repo_name
stringlengths 5
122
| path
stringlengths 3
232
| text
stringlengths 6
1.05M
|
|---|---|---|
vicharl/containerdns
|
kdns/src/netdev.h
|
#ifndef _DNSNETDEV_H_
#define _DNSNETDEV_H_
#include <rte_mbuf.h>
#include <rte_ether.h>
#include <rte_mempool.h>
#include <rte_udp.h>
#include <rte_ip.h>
#include "metrics.h"
#define NETIF_MAX_PKT_BURST (32)
struct netif_queue_stats {
uint64_t pkts_rcv; /* Total number of receive packets */
uint64_t pkts_2kni; /* Total number of receive pkts to kni */
uint64_t pkts_icmp; /* Total number of receive pkts to kni */
uint64_t dns_pkts_rcv; /* Total number of successfully received packets. */
uint64_t dns_pkts_snd; /* Total number of successfully transmitted packets. */
uint64_t pkt_dropped; /* Total number of dropped packets by software. */
uint64_t pkt_len_err; /* pkt len err. */
uint64_t dns_lens_rcv; /* Total lens of received packets. */
uint64_t dns_lens_snd; /* Total lens of transmitted packets. */
uint64_t dns_fwd_rcv_udp; /* Total number of receive forward packets */
uint64_t dns_fwd_snd_udp; /* Total number of response forward packets */
uint64_t dns_fwd_lost_udp; /* Total number of lost response forward packets */
uint64_t dns_fwd_rcv_tcp;
uint64_t dns_fwd_snd_tcp;
uint64_t dns_fwd_lost_tcp;
uint64_t dns_pkts_rcv_tcp;
uint64_t dns_pkts_snd_tcp;
metrics_metrics_st metrics;
} __rte_cache_aligned;
/* RX/TX queue conf for lcore */
struct netif_queue_conf {
uint16_t port_id;
uint16_t rx_queue_id;
uint16_t tx_queue_id;
struct netif_queue_stats stats;
uint16_t tx_len;
struct rte_mbuf *tx_mbufs[NETIF_MAX_PKT_BURST];
uint16_t kni_len;
struct rte_mbuf *kni_mbufs[NETIF_MAX_PKT_BURST];
} __rte_cache_aligned;
struct net_device {
uint16_t max_rx_queues;
uint16_t max_tx_queues;
uint16_t max_rx_desc;
uint16_t max_tx_desc;
struct ether_addr hwaddr;
struct netif_queue_conf l_netif_queue_conf[RTE_MAX_LCORE];
};
int netdev_mode_parse(const char *entry);
struct netif_queue_conf *netif_queue_conf_get(uint16_t lcore_id);
int kdns_netdev_init(void);
void kni_egress(struct rte_mbuf **mbufs, uint16_t nb_mbufs);
int kni_ingress(struct rte_mbuf **mbufs, uint16_t nb_mbufs);
void netif_statsdata_get(struct netif_queue_stats *sta);
void netif_statsdata_reset(void);
void netif_statsdata_metrics_reset(void);
void init_dns_packet_header(struct ether_hdr *eth_hdr, struct ipv4_hdr *ipv4_hdr, struct udp_hdr *udp_hdr, uint16_t data_len);
#endif
|
vicharl/containerdns
|
kdns/core/zone.h
|
/*
* zone.h -- zone compiler.
*
* Copyright (c) 2001-2006, NLnet Labs.
*
* Modified Work Copyright (c) 2018 The TIGLabs Authors.
*
*/
#ifndef _ZONEC_H_
#define _ZONEC_H_
#include "domain_store.h"
#define MAXTOKENSLEN 512 /* Maximum number of tokens per entry */
#define B64BUFSIZE 65535 /* Buffer size for b64 conversion */
#define ROOT (const uint8_t *)"\001"
#define NSEC_WINDOW_COUNT 256
#define NSEC_WINDOW_BITS_COUNT 256
#define NSEC_WINDOW_BITS_SIZE (NSEC_WINDOW_BITS_COUNT / 8)
#define IPSECKEY_NOGATEWAY 0 /* RFC 4025 */
#define IPSECKEY_IP4 1
#define IPSECKEY_IP6 2
#define IPSECKEY_DNAME 3
#define LINEBUFSZ 1024
struct lexdata {
size_t len; /* holds the label length */
char *str; /* holds the data */
};
#define DEFAULT_TTL 3600
int
zrdatacmp(uint16_t type, rr_type *a, rr_type *b);
uint16_t *zparser_conv_serial( const char *periodstr);
uint16_t *zparser_conv_short( const char *text);
uint16_t *zparser_conv_a( const char *text);
uint16_t *zparser_conv_aaaa(const char *text);
uint16_t *alloc_rdata_init( const void *data, size_t size);
#endif /* _ZONEC_H_ */
|
vicharl/containerdns
|
kdns/dpdk-17.02/drivers/net/qede/base/ecore_hsi_init_tool.h
|
/*
* Copyright (c) 2016 QLogic Corporation.
* All rights reserved.
* www.qlogic.com
*
* See LICENSE.qede_pmd for copyright and licensing details.
*/
#ifndef __ECORE_HSI_INIT_TOOL__
#define __ECORE_HSI_INIT_TOOL__
/**************************************/
/* Init Tool HSI constants and macros */
/**************************************/
/* Width of GRC address in bits (addresses are specified in dwords) */
#define GRC_ADDR_BITS 23
#define MAX_GRC_ADDR ((1 << GRC_ADDR_BITS) - 1)
/* indicates an init that should be applied to any phase ID */
#define ANY_PHASE_ID 0xffff
/* Max size in dwords of a zipped array */
#define MAX_ZIPPED_SIZE 8192
enum init_modes {
MODE_BB_A0_DEPRECATED,
MODE_BB_B0,
MODE_K2,
MODE_ASIC,
MODE_EMUL_REDUCED,
MODE_EMUL_FULL,
MODE_FPGA,
MODE_CHIPSIM,
MODE_SF,
MODE_MF_SD,
MODE_MF_SI,
MODE_PORTS_PER_ENG_1,
MODE_PORTS_PER_ENG_2,
MODE_PORTS_PER_ENG_4,
MODE_100G,
MODE_E5,
MAX_INIT_MODES
};
enum init_phases {
PHASE_ENGINE,
PHASE_PORT,
PHASE_PF,
PHASE_VF,
PHASE_QM_PF,
MAX_INIT_PHASES
};
enum init_split_types {
SPLIT_TYPE_NONE,
SPLIT_TYPE_PORT,
SPLIT_TYPE_PF,
SPLIT_TYPE_PORT_PF,
SPLIT_TYPE_VF,
MAX_INIT_SPLIT_TYPES
};
struct fw_asserts_ram_section {
/* The offset of the section in the RAM in RAM lines (64-bit units) */
__le16 section_ram_line_offset;
/* The size of the section in RAM lines (64-bit units) */
__le16 section_ram_line_size;
/* The offset of the asserts list within the section in dwords */
u8 list_dword_offset;
/* The size of an assert list element in dwords */
u8 list_element_dword_size;
u8 list_num_elements /* The number of elements in the asserts list */;
/* The offset of the next list index field within the section in dwords */
u8 list_next_index_dword_offset;
};
struct fw_ver_num {
u8 major /* Firmware major version number */;
u8 minor /* Firmware minor version number */;
u8 rev /* Firmware revision version number */;
/* Firmware engineering version number (for bootleg versions) */
u8 eng;
};
struct fw_ver_info {
__le16 tools_ver /* Tools version number */;
u8 image_id /* FW image ID (e.g. main, l2b, kuku) */;
u8 reserved1;
struct fw_ver_num num /* FW version number */;
__le32 timestamp /* FW Timestamp in unix time (sec. since 1970) */;
__le32 reserved2;
};
struct fw_info {
struct fw_ver_info ver /* FW version information */;
/* Info regarding the FW asserts section in the Storm RAM */
struct fw_asserts_ram_section fw_asserts_section;
};
struct fw_info_location {
/* GRC address where the fw_info struct is located. */
__le32 grc_addr;
/* Size of the fw_info structure (thats located at the grc_addr). */
__le32 size;
};
/*
* Binary buffer header
*/
struct bin_buffer_hdr {
/* buffer offset in bytes from the beginning of the binary file */
__le32 offset;
__le32 length /* buffer length in bytes */;
};
/*
* binary init buffer types
*/
enum bin_init_buffer_type {
BIN_BUF_INIT_FW_VER_INFO /* fw_ver_info struct */,
BIN_BUF_INIT_CMD /* init commands */,
BIN_BUF_INIT_VAL /* init data */,
BIN_BUF_INIT_MODE_TREE /* init modes tree */,
BIN_BUF_INIT_IRO /* internal RAM offsets */,
MAX_BIN_INIT_BUFFER_TYPE
};
/*
* init array header: raw
*/
struct init_array_raw_hdr {
__le32 data;
/* Init array type, from init_array_types enum */
#define INIT_ARRAY_RAW_HDR_TYPE_MASK 0xF
#define INIT_ARRAY_RAW_HDR_TYPE_SHIFT 0
/* init array params */
#define INIT_ARRAY_RAW_HDR_PARAMS_MASK 0xFFFFFFF
#define INIT_ARRAY_RAW_HDR_PARAMS_SHIFT 4
};
/*
* init array header: standard
*/
struct init_array_standard_hdr {
__le32 data;
/* Init array type, from init_array_types enum */
#define INIT_ARRAY_STANDARD_HDR_TYPE_MASK 0xF
#define INIT_ARRAY_STANDARD_HDR_TYPE_SHIFT 0
/* Init array size (in dwords) */
#define INIT_ARRAY_STANDARD_HDR_SIZE_MASK 0xFFFFFFF
#define INIT_ARRAY_STANDARD_HDR_SIZE_SHIFT 4
};
/*
* init array header: zipped
*/
struct init_array_zipped_hdr {
__le32 data;
/* Init array type, from init_array_types enum */
#define INIT_ARRAY_ZIPPED_HDR_TYPE_MASK 0xF
#define INIT_ARRAY_ZIPPED_HDR_TYPE_SHIFT 0
/* Init array zipped size (in bytes) */
#define INIT_ARRAY_ZIPPED_HDR_ZIPPED_SIZE_MASK 0xFFFFFFF
#define INIT_ARRAY_ZIPPED_HDR_ZIPPED_SIZE_SHIFT 4
};
/*
* init array header: pattern
*/
struct init_array_pattern_hdr {
__le32 data;
/* Init array type, from init_array_types enum */
#define INIT_ARRAY_PATTERN_HDR_TYPE_MASK 0xF
#define INIT_ARRAY_PATTERN_HDR_TYPE_SHIFT 0
/* pattern size in dword */
#define INIT_ARRAY_PATTERN_HDR_PATTERN_SIZE_MASK 0xF
#define INIT_ARRAY_PATTERN_HDR_PATTERN_SIZE_SHIFT 4
/* pattern repetitions */
#define INIT_ARRAY_PATTERN_HDR_REPETITIONS_MASK 0xFFFFFF
#define INIT_ARRAY_PATTERN_HDR_REPETITIONS_SHIFT 8
};
/*
* init array header union
*/
union init_array_hdr {
struct init_array_raw_hdr raw /* raw init array header */;
/* standard init array header */
struct init_array_standard_hdr standard;
struct init_array_zipped_hdr zipped /* zipped init array header */;
struct init_array_pattern_hdr pattern /* pattern init array header */;
};
/*
* init array types
*/
enum init_array_types {
INIT_ARR_STANDARD /* standard init array */,
INIT_ARR_ZIPPED /* zipped init array */,
INIT_ARR_PATTERN /* a repeated pattern */,
MAX_INIT_ARRAY_TYPES
};
/*
* init operation: callback
*/
struct init_callback_op {
__le32 op_data;
/* Init operation, from init_op_types enum */
#define INIT_CALLBACK_OP_OP_MASK 0xF
#define INIT_CALLBACK_OP_OP_SHIFT 0
#define INIT_CALLBACK_OP_RESERVED_MASK 0xFFFFFFF
#define INIT_CALLBACK_OP_RESERVED_SHIFT 4
__le16 callback_id /* Callback ID */;
__le16 block_id /* Blocks ID */;
};
/*
* init operation: delay
*/
struct init_delay_op {
__le32 op_data;
/* Init operation, from init_op_types enum */
#define INIT_DELAY_OP_OP_MASK 0xF
#define INIT_DELAY_OP_OP_SHIFT 0
#define INIT_DELAY_OP_RESERVED_MASK 0xFFFFFFF
#define INIT_DELAY_OP_RESERVED_SHIFT 4
__le32 delay /* delay in us */;
};
/*
* init operation: if_mode
*/
struct init_if_mode_op {
__le32 op_data;
/* Init operation, from init_op_types enum */
#define INIT_IF_MODE_OP_OP_MASK 0xF
#define INIT_IF_MODE_OP_OP_SHIFT 0
#define INIT_IF_MODE_OP_RESERVED1_MASK 0xFFF
#define INIT_IF_MODE_OP_RESERVED1_SHIFT 4
/* Commands to skip if the modes dont match */
#define INIT_IF_MODE_OP_CMD_OFFSET_MASK 0xFFFF
#define INIT_IF_MODE_OP_CMD_OFFSET_SHIFT 16
__le16 reserved2;
/* offset (in bytes) in modes expression buffer */
__le16 modes_buf_offset;
};
/*
* init operation: if_phase
*/
struct init_if_phase_op {
__le32 op_data;
/* Init operation, from init_op_types enum */
#define INIT_IF_PHASE_OP_OP_MASK 0xF
#define INIT_IF_PHASE_OP_OP_SHIFT 0
/* Indicates if DMAE is enabled in this phase */
#define INIT_IF_PHASE_OP_DMAE_ENABLE_MASK 0x1
#define INIT_IF_PHASE_OP_DMAE_ENABLE_SHIFT 4
#define INIT_IF_PHASE_OP_RESERVED1_MASK 0x7FF
#define INIT_IF_PHASE_OP_RESERVED1_SHIFT 5
/* Commands to skip if the phases dont match */
#define INIT_IF_PHASE_OP_CMD_OFFSET_MASK 0xFFFF
#define INIT_IF_PHASE_OP_CMD_OFFSET_SHIFT 16
__le32 phase_data;
#define INIT_IF_PHASE_OP_PHASE_MASK 0xFF /* Init phase */
#define INIT_IF_PHASE_OP_PHASE_SHIFT 0
#define INIT_IF_PHASE_OP_RESERVED2_MASK 0xFF
#define INIT_IF_PHASE_OP_RESERVED2_SHIFT 8
#define INIT_IF_PHASE_OP_PHASE_ID_MASK 0xFFFF /* Init phase ID */
#define INIT_IF_PHASE_OP_PHASE_ID_SHIFT 16
};
/*
* init mode operators
*/
enum init_mode_ops {
INIT_MODE_OP_NOT /* init mode not operator */,
INIT_MODE_OP_OR /* init mode or operator */,
INIT_MODE_OP_AND /* init mode and operator */,
MAX_INIT_MODE_OPS
};
/*
* init operation: raw
*/
struct init_raw_op {
__le32 op_data;
/* Init operation, from init_op_types enum */
#define INIT_RAW_OP_OP_MASK 0xF
#define INIT_RAW_OP_OP_SHIFT 0
#define INIT_RAW_OP_PARAM1_MASK 0xFFFFFFF /* init param 1 */
#define INIT_RAW_OP_PARAM1_SHIFT 4
__le32 param2 /* Init param 2 */;
};
/*
* init array params
*/
struct init_op_array_params {
__le16 size /* array size in dwords */;
__le16 offset /* array start offset in dwords */;
};
/*
* Write init operation arguments
*/
union init_write_args {
/* value to write, used when init source is INIT_SRC_INLINE */
__le32 inline_val;
/* number of zeros to write, used when init source is INIT_SRC_ZEROS */
__le32 zeros_count;
/* array offset to write, used when init source is INIT_SRC_ARRAY */
__le32 array_offset;
/* runtime array params to write, used when init source is INIT_SRC_RUNTIME */
struct init_op_array_params runtime;
};
/*
* init operation: write
*/
struct init_write_op {
__le32 data;
/* init operation, from init_op_types enum */
#define INIT_WRITE_OP_OP_MASK 0xF
#define INIT_WRITE_OP_OP_SHIFT 0
/* init source type, taken from init_source_types enum */
#define INIT_WRITE_OP_SOURCE_MASK 0x7
#define INIT_WRITE_OP_SOURCE_SHIFT 4
#define INIT_WRITE_OP_RESERVED_MASK 0x1
#define INIT_WRITE_OP_RESERVED_SHIFT 7
/* indicates if the register is wide-bus */
#define INIT_WRITE_OP_WIDE_BUS_MASK 0x1
#define INIT_WRITE_OP_WIDE_BUS_SHIFT 8
/* internal (absolute) GRC address, in dwords */
#define INIT_WRITE_OP_ADDRESS_MASK 0x7FFFFF
#define INIT_WRITE_OP_ADDRESS_SHIFT 9
union init_write_args args /* Write init operation arguments */;
};
/*
* init operation: read
*/
struct init_read_op {
__le32 op_data;
/* init operation, from init_op_types enum */
#define INIT_READ_OP_OP_MASK 0xF
#define INIT_READ_OP_OP_SHIFT 0
/* polling type, from init_poll_types enum */
#define INIT_READ_OP_POLL_TYPE_MASK 0xF
#define INIT_READ_OP_POLL_TYPE_SHIFT 4
#define INIT_READ_OP_RESERVED_MASK 0x1
#define INIT_READ_OP_RESERVED_SHIFT 8
/* internal (absolute) GRC address, in dwords */
#define INIT_READ_OP_ADDRESS_MASK 0x7FFFFF
#define INIT_READ_OP_ADDRESS_SHIFT 9
/* expected polling value, used only when polling is done */
__le32 expected_val;
};
/*
* Init operations union
*/
union init_op {
struct init_raw_op raw /* raw init operation */;
struct init_write_op write /* write init operation */;
struct init_read_op read /* read init operation */;
struct init_if_mode_op if_mode /* if_mode init operation */;
struct init_if_phase_op if_phase /* if_phase init operation */;
struct init_callback_op callback /* callback init operation */;
struct init_delay_op delay /* delay init operation */;
};
/*
* Init command operation types
*/
enum init_op_types {
INIT_OP_READ /* GRC read init command */,
INIT_OP_WRITE /* GRC write init command */,
/* Skip init commands if the init modes expression doesn't match */
INIT_OP_IF_MODE,
/* Skip init commands if the init phase doesn't match */
INIT_OP_IF_PHASE,
INIT_OP_DELAY /* delay init command */,
INIT_OP_CALLBACK /* callback init command */,
MAX_INIT_OP_TYPES
};
/*
* init polling types
*/
enum init_poll_types {
INIT_POLL_NONE /* No polling */,
INIT_POLL_EQ /* init value is included in the init command */,
INIT_POLL_OR /* init value is all zeros */,
INIT_POLL_AND /* init value is an array of values */,
MAX_INIT_POLL_TYPES
};
/*
* init source types
*/
enum init_source_types {
INIT_SRC_INLINE /* init value is included in the init command */,
INIT_SRC_ZEROS /* init value is all zeros */,
INIT_SRC_ARRAY /* init value is an array of values */,
INIT_SRC_RUNTIME /* init value is provided during runtime */,
MAX_INIT_SOURCE_TYPES
};
/*
* Internal RAM Offsets macro data
*/
struct iro {
__le32 base /* RAM field offset */;
__le16 m1 /* multiplier 1 */;
__le16 m2 /* multiplier 2 */;
__le16 m3 /* multiplier 3 */;
__le16 size /* RAM field size */;
};
#endif /* __ECORE_HSI_INIT_TOOL__ */
|
vicharl/containerdns
|
kdns/core/dns.h
|
<reponame>vicharl/containerdns<filename>kdns/core/dns.h
/*
* dns.h -- DNS definitions.
*
* Copyright (c) 2001-2006, NLnet Labs.
*
* Modified Work Copyright (c) 2018 The TIGLabs Authors.
*
*/
#ifndef _DNS_H_
#define _DNS_H_
#include <stdint.h>
#include <assert.h>
#include <stdio.h>
#include "buffer.h"
typedef enum rr_section {
QUESTION_SECTION,
ANSWER_SECTION,
AUTHORITY_SECTION,
OPTIONAL_AUTHORITY_SECTION,
ADDITIONAL_SECTION,
RR_SECTION_COUNT
}rr_section_type;
/* Possible OPCODE values */
#define OPCODE_QUERY 0 /* a standard query (QUERY) */
#define OPCODE_IQUERY 1 /* an inverse query (IQUERY) */
#define OPCODE_STATUS 2 /* a server status request (STATUS) */
#define OPCODE_NOTIFY 4 /* NOTIFY */
#define OPCODE_UPDATE 5 /* Dynamic update */
/* Possible RCODE values */
#define RCODE_OK 0 /* No error condition */
#define RCODE_FORMAT 1 /* Format error */
#define RCODE_SERVFAIL 2 /* Server failure */
#define RCODE_NXDOMAIN 3 /* Name Error */
#define RCODE_IMPL 4 /* Not implemented */
#define RCODE_REFUSE 5 /* Refused */
#define RCODE_YXDOMAIN 6 /* name should not exist */
#define RCODE_YXRRSET 7 /* rrset should not exist */
#define RCODE_NXRRSET 8 /* rrset does not exist */
#define RCODE_NOTAUTH 9 /* server not authoritative */
#define RCODE_NOTZONE 10 /* name not inside zone */
/* RFC1035 */
#define CLASS_IN 1 /* Class IN */
#define CLASS_CS 2 /* Class CS */
#define CLASS_CH 3 /* Class CHAOS */
#define CLASS_HS 4 /* Class HS */
#define CLASS_NONE 254 /* Class NONE rfc2136 */
#define CLASS_ANY 255 /* Class ANY */
// type support
#define TYPE_A 1 /* a host address */
#define TYPE_CNAME 5 /* the canonical name for an alias */
#define TYPE_SOA 6 /* marks the start of a zone of authority */
#define TYPE_PTR 12 /* a domain name pointer */
#define TYPE_AAAA 28 /* ipv6 address */
#define TYPE_SRV 33 /* SRV record RFC2782 */
#define TYPE_SUPPORT_MAX 6
#define MAXLABELLEN 63
#define MAXDOMAINLEN 255
#define MAXRDATALEN 64 /* This is more than enough, think multiple TXT. */
#define MAX_RDLENGTH 65535
/* Maximum size of a single RR. */
#define MAX_RR_SIZE \
(MAXDOMAINLEN + sizeof(uint32_t) + 4*sizeof(uint16_t) + MAX_RDLENGTH)
/*
* The different types of RDATA wireformat data.
*/
enum rdata_wireformat
{
RDATA_WF_COMPRESSED_DNAME, /* Possibly compressed domain name. */
RDATA_WF_UNCOMPRESSED_DNAME, /* Uncompressed domain name. */
RDATA_WF_LITERAL_DNAME, /* Literal (not downcased) dname. */
RDATA_WF_BYTE, /* 8-bit integer. */
RDATA_WF_SHORT, /* 16-bit integer. */
RDATA_WF_LONG, /* 32-bit integer. */
RDATA_WF_TEXT, /* Text string. */
RDATA_WF_TEXTS, /* Text string sequence. */
RDATA_WF_A, /* 32-bit IPv4 address. */
RDATA_WF_AAAA, /* 128-bit IPv6 address. */
RDATA_WF_BINARY, /* Binary data (unknown length). */
RDATA_WF_BINARYWITHLENGTH, /* Binary data preceded by 1 byte length */
RDATA_WF_APL, /* APL data. */
RDATA_WF_IPSECGATEWAY, /* IPSECKEY gateway ip4, ip6 or dname. */
RDATA_WF_ILNP64, /* 64-bit uncompressed IPv6 address. */
RDATA_WF_EUI48, /* 48-bit address. */
RDATA_WF_EUI64, /* 64-bit address. */
RDATA_WF_LONG_TEXT /* Long (>255) text string. */
};
typedef enum rdata_wireformat rdata_wireformat_type;
typedef struct rrtype_descriptor
{
uint16_t type; /* RR type */
const char *name; /* Textual name. */
uint32_t minimum; /* Minimum number of RDATAs. */
uint32_t maximum; /* Maximum number of RDATAs. */
uint8_t wireformat[MAXRDATALEN]; /* rdata_wireformat_type */
}rrtype_descriptor_st;
rrtype_descriptor_st *rrtype_descriptor_by_type(uint16_t type);
/*
* Domain names stored in memory add some additional information to be
* able to quickly index and compare by label.
*/
typedef struct domain_name domain_name_st;
struct domain_name
{
uint8_t name_size;
uint8_t label_count;
};
const domain_name_st *
domain_name_make_no_malloc( const uint8_t *name, int normalize,domain_name_st *result);
/*
* Construct a new domain name based on NAME in wire format. NAME
* cannot contain compression pointers.
*
* Pre: NAME != NULL.
*/
const domain_name_st *domain_name_make( const uint8_t *name,
int normalize);
/*
* Construct a new domain name based on the ASCII representation NAME.
* If ORIGIN is not NULL and NAME is not terminated by a "." the
* ORIGIN is appended to the result. NAME can contain escape
* sequences.
*
* Returns NULL on failure. Otherwise a newly allocated domain name
* is returned.
*
* Pre: name != NULL.
*/
const domain_name_st *domain_name_parse( const char *name);
/*
* parse ascii string to wireformat domain name (without compression ptrs)
* returns 0 on failure, the length of the wireformat on success.
* the result is stored in the wirefmt which must be at least MAXDOMAINLEN
* in size. On failure, the wirefmt can be altered.
*/
int domain_name_parse_wire(uint8_t* wirefmt, const char* name);
/*
* Copy the most significant LABEL_COUNT labels from dname.
*/
const domain_name_st *domain_name_partial_copy(
const domain_name_st *dname,
uint8_t label_count);
/*
* The origin of DNAME.
*/
const domain_name_st *domain_name_origin( const domain_name_st *dname);
/*
* Return true if LEFT is a subdomain of RIGHT.
*/
int domain_name_is_subdomain(const domain_name_st *left, const domain_name_st *right);
/*
* Offsets into NAME for each label starting with the most
* significant label (the root label, followed by the TLD,
* etc).
*/
static inline const uint8_t *
domain_name_label_offsets(const domain_name_st *dname)
{
return (const uint8_t *) ((const char *) dname + sizeof(domain_name_st));
}
/*
* The actual name in wire format (a sequence of label, each
* prefixed by a length byte, terminated by a zero length
* label).
*/
static inline const uint8_t *
domain_name_get(const domain_name_st *dname)
{
return (const uint8_t *) ((const char *) dname
+ sizeof(domain_name_st)
+ dname->label_count * sizeof(uint8_t));
}
/*
* Return the label for DNAME specified by LABEL_INDEX. The first
* label (LABEL_INDEX == 0) is the root label, the next label is the
* TLD, etc.
*
* Pre: dname != NULL && label_index < dname->label_count.
*/
static inline const uint8_t *
domain_name_label(const domain_name_st *dname, uint8_t label)
{
uint8_t label_index;
assert(dname != NULL);
assert(label < dname->label_count);
label_index = domain_name_label_offsets(dname)[label];
assert(label_index < dname->name_size);
return domain_name_get(dname) + label_index;
}
/*
* Compare two domain names. The comparison defines a lexicographical
* ordering based on the domain name's labels, starting with the most
* significant label.
*
* Return < 0 if LEFT < RIGHT, 0 if LEFT == RIGHT, and > 0 if LEFT >
* RIGHT. The comparison is case sensitive.
*
* Pre: left != NULL && right != NULL
*/
int domain_name_compare(const domain_name_st *left, const domain_name_st *right);
/*
* Compare two labels. The comparison defines a lexicographical
* ordering based on the characters in the labels.
*
* Return < 0 if LEFT < RIGHT, 0 if LEFT == RIGHT, and > 0 if LEFT >
* RIGHT. The comparison is case sensitive.
*
* Pre: left != NULL && right != NULL
* label_is_normal(left) && label_is_normal(right)
*/
int label_compare(const uint8_t *left, const uint8_t *right);
/*
* Returns the number of labels that match in LEFT and RIGHT, starting
* with the most significant label. Because the root label always
* matches, the result will always be >= 1.
*
* Pre: left != NULL && right != NULL
*/
uint8_t domain_name_label_match_count(const domain_name_st *left,
const domain_name_st *right);
/*
* The total size (in bytes) allocated to store DNAME.
*
* Pre: dname != NULL
*/
static inline size_t
domain_name_total_size(const domain_name_st *dname)
{
return (sizeof(domain_name_st)
+ ((((size_t)dname->label_count) + ((size_t)dname->name_size))
* sizeof(uint8_t)));
}
/*
* Is LABEL a normal LABEL (not a pointer or reserved)?
*
* Pre: label != NULL;
*/
static inline int
label_is_normal(const uint8_t *label)
{
assert(label);
return (label[0] & 0xc0) == 0;
}
/*
* Is LABEL a pointer?
*
* Pre: label != NULL;
若是真正的数据,会以0x00结尾;若是指针,指针占2个字节,第一个字节的高2位为11。
*/
static inline int
label_is_pointer(const uint8_t *label)
{
assert(label);
return (label[0] & 0xc0) == 0xc0;
}
/*
* LABEL's pointer location.
*
* Pre: label != NULL && label_is_pointer(label)
*/
static inline uint16_t
label_pointer_location(const uint8_t *label)
{
assert(label);
assert(label_is_pointer(label));
return ((uint16_t) (label[0] & ~0xc0) << 8) | (uint16_t) label[1];
}
/*
* Length of LABEL.
*
* Pre: label != NULL && label_is_normal(label)
*/
static inline uint8_t
label_length(const uint8_t *label)
{
assert(label);
assert(label_is_normal(label));
return label[0];
}
/*
* The data of LABEL.
*
* Pre: label != NULL && label_is_normal(label)
*/
static inline const uint8_t *
labeldata(const uint8_t *label)
{
assert(label);
assert(label_is_normal(label));
return label + 1;
}
/*
* Is LABEL the root label?
*
* Pre: label != NULL
*/
static inline int
label_is_root(const uint8_t *label)
{
assert(label);
return label[0] == 0;
}
/*
* Is LABEL the wildcard label?
*
* Pre: label != NULL
*/
static inline int
label_is_wildcard(const uint8_t *label)
{
assert(label);
return label[0] == 1 && label[1] == '*';
}
/*
* The next label of LABEL.
*
* Pre: label != NULL
* label_is_normal(label)
* !label_is_root(label)
*/
static inline const uint8_t *
label_next(const uint8_t *label)
{
assert(label);
assert(label_is_normal(label));
assert(!label_is_root(label));
return label + label_length(label) + 1;
}
/*
* Convert DNAME to its string representation. The result points to a
* static buffer that is overwritten the next time this function is
* invoked.
*
* If ORIGIN is provided and DNAME is a subdomain of ORIGIN the dname
* will be represented relative to ORIGIN.
*
* Pre: dname != NULL
*/
const char *domain_name_to_string(const domain_name_st *dname,
const domain_name_st *origin);
/*
* Create a dname containing the single label specified by STR
* followed by the root label.
*/
const domain_name_st *domain_name_make_from_label(
const uint8_t *label,
const size_t length);
/*
* Concatenate two dnames.
*/
const domain_name_st *domain_name_concatenate(
const domain_name_st *left,
const domain_name_st *right);
/*
* Perform DNAME substitution on a name, replace src with dest.
* Name must be a subdomain of src. The returned name is a subdomain of dest.
* Returns NULL if the result domain name is too long.
*/
const domain_name_st *domain_name_replace(
const domain_name_st* name,
const domain_name_st* src,
const domain_name_st* dest);
/** check if two uncompressed dnames of the same total length are equal */
int domain_name_equal_nocase(uint8_t* a, uint8_t* b, uint16_t len);
#endif /* _DNS_H_ */
|
vicharl/containerdns
|
kdns/dpdk-17.02/app/test-crypto-perf/main.c
|
<reponame>vicharl/containerdns
#include <stdio.h>
#include <unistd.h>
#include <rte_eal.h>
#include <rte_cryptodev.h>
#include "cperf.h"
#include "cperf_options.h"
#include "cperf_test_vector_parsing.h"
#include "cperf_test_throughput.h"
#include "cperf_test_latency.h"
const char *cperf_test_type_strs[] = {
[CPERF_TEST_TYPE_THROUGHPUT] = "throughput",
[CPERF_TEST_TYPE_CYCLECOUNT] = "cycle-count",
[CPERF_TEST_TYPE_LATENCY] = "latency"
};
const char *cperf_op_type_strs[] = {
[CPERF_CIPHER_ONLY] = "cipher-only",
[CPERF_AUTH_ONLY] = "auth-only",
[CPERF_CIPHER_THEN_AUTH] = "cipher-then-auth",
[CPERF_AUTH_THEN_CIPHER] = "auth-then-cipher",
[CPERF_AEAD] = "aead"
};
const struct cperf_test cperf_testmap[] = {
[CPERF_TEST_TYPE_THROUGHPUT] = {
cperf_throughput_test_constructor,
cperf_throughput_test_runner,
cperf_throughput_test_destructor
},
[CPERF_TEST_TYPE_CYCLECOUNT] = { NULL },
[CPERF_TEST_TYPE_LATENCY] = {
cperf_latency_test_constructor,
cperf_latency_test_runner,
cperf_latency_test_destructor
}
};
static int
cperf_initialize_cryptodev(struct cperf_options *opts, uint8_t *enabled_cdevs)
{
uint8_t cdev_id, enabled_cdev_count = 0, nb_lcores;
int ret;
enabled_cdev_count = rte_cryptodev_devices_get(opts->device_type,
enabled_cdevs, RTE_CRYPTO_MAX_DEVS);
if (enabled_cdev_count == 0) {
printf("No crypto devices type %s available\n",
opts->device_type);
return -EINVAL;
}
nb_lcores = rte_lcore_count() - 1;
if (enabled_cdev_count > nb_lcores) {
printf("Number of capable crypto devices (%d) "
"has to be less or equal to number of slave "
"cores (%d)\n", enabled_cdev_count, nb_lcores);
return -EINVAL;
}
for (cdev_id = 0; cdev_id < enabled_cdev_count &&
cdev_id < RTE_CRYPTO_MAX_DEVS; cdev_id++) {
struct rte_cryptodev_config conf = {
.nb_queue_pairs = 1,
.socket_id = SOCKET_ID_ANY,
.session_mp = {
.nb_objs = 2048,
.cache_size = 64
}
};
struct rte_cryptodev_qp_conf qp_conf = {
.nb_descriptors = 2048
};
ret = rte_cryptodev_configure(enabled_cdevs[cdev_id], &conf);
if (ret < 0) {
printf("Failed to configure cryptodev %u",
enabled_cdevs[cdev_id]);
return -EINVAL;
}
ret = rte_cryptodev_queue_pair_setup(enabled_cdevs[cdev_id], 0,
&qp_conf, SOCKET_ID_ANY);
if (ret < 0) {
printf("Failed to setup queue pair %u on "
"cryptodev %u", 0, cdev_id);
return -EINVAL;
}
ret = rte_cryptodev_start(enabled_cdevs[cdev_id]);
if (ret < 0) {
printf("Failed to start device %u: error %d\n",
enabled_cdevs[cdev_id], ret);
return -EPERM;
}
}
return enabled_cdev_count;
}
static int
cperf_verify_devices_capabilities(struct cperf_options *opts,
uint8_t *enabled_cdevs, uint8_t nb_cryptodevs)
{
struct rte_cryptodev_sym_capability_idx cap_idx;
const struct rte_cryptodev_symmetric_capability *capability;
uint8_t i, cdev_id;
int ret;
for (i = 0; i < nb_cryptodevs; i++) {
cdev_id = enabled_cdevs[i];
if (opts->op_type == CPERF_AUTH_ONLY ||
opts->op_type == CPERF_CIPHER_THEN_AUTH ||
opts->op_type == CPERF_AUTH_THEN_CIPHER) {
cap_idx.type = RTE_CRYPTO_SYM_XFORM_AUTH;
cap_idx.algo.auth = opts->auth_algo;
capability = rte_cryptodev_sym_capability_get(cdev_id,
&cap_idx);
if (capability == NULL)
return -1;
ret = rte_cryptodev_sym_capability_check_auth(
capability,
opts->auth_key_sz,
opts->auth_digest_sz,
opts->auth_aad_sz);
if (ret != 0)
return ret;
}
if (opts->op_type == CPERF_CIPHER_ONLY ||
opts->op_type == CPERF_CIPHER_THEN_AUTH ||
opts->op_type == CPERF_AUTH_THEN_CIPHER) {
cap_idx.type = RTE_CRYPTO_SYM_XFORM_CIPHER;
cap_idx.algo.cipher = opts->cipher_algo;
capability = rte_cryptodev_sym_capability_get(cdev_id,
&cap_idx);
if (capability == NULL)
return -1;
ret = rte_cryptodev_sym_capability_check_cipher(
capability,
opts->cipher_key_sz,
opts->cipher_iv_sz);
if (ret != 0)
return ret;
}
}
return 0;
}
static int
cperf_check_test_vector(struct cperf_options *opts,
struct cperf_test_vector *test_vec)
{
if (opts->op_type == CPERF_CIPHER_ONLY) {
if (opts->cipher_algo == RTE_CRYPTO_CIPHER_NULL) {
if (test_vec->plaintext.data == NULL)
return -1;
} else if (opts->cipher_algo != RTE_CRYPTO_CIPHER_NULL) {
if (test_vec->plaintext.data == NULL)
return -1;
if (test_vec->plaintext.length != opts->buffer_sz)
return -1;
if (test_vec->ciphertext.data == NULL)
return -1;
if (test_vec->ciphertext.length != opts->buffer_sz)
return -1;
if (test_vec->iv.data == NULL)
return -1;
if (test_vec->iv.length != opts->cipher_iv_sz)
return -1;
if (test_vec->cipher_key.data == NULL)
return -1;
if (test_vec->cipher_key.length != opts->cipher_key_sz)
return -1;
}
} else if (opts->op_type == CPERF_AUTH_ONLY) {
if (opts->auth_algo != RTE_CRYPTO_AUTH_NULL) {
if (test_vec->plaintext.data == NULL)
return -1;
if (test_vec->plaintext.length != opts->buffer_sz)
return -1;
if (test_vec->auth_key.data == NULL)
return -1;
if (test_vec->auth_key.length != opts->auth_key_sz)
return -1;
if (test_vec->digest.data == NULL)
return -1;
if (test_vec->digest.length != opts->auth_digest_sz)
return -1;
}
} else if (opts->op_type == CPERF_CIPHER_THEN_AUTH ||
opts->op_type == CPERF_AUTH_THEN_CIPHER) {
if (opts->cipher_algo == RTE_CRYPTO_CIPHER_NULL) {
if (test_vec->plaintext.data == NULL)
return -1;
if (test_vec->plaintext.length != opts->buffer_sz)
return -1;
} else if (opts->cipher_algo != RTE_CRYPTO_CIPHER_NULL) {
if (test_vec->plaintext.data == NULL)
return -1;
if (test_vec->plaintext.length != opts->buffer_sz)
return -1;
if (test_vec->ciphertext.data == NULL)
return -1;
if (test_vec->ciphertext.length != opts->buffer_sz)
return -1;
if (test_vec->iv.data == NULL)
return -1;
if (test_vec->iv.length != opts->cipher_iv_sz)
return -1;
if (test_vec->cipher_key.data == NULL)
return -1;
if (test_vec->cipher_key.length != opts->cipher_key_sz)
return -1;
}
if (opts->auth_algo != RTE_CRYPTO_AUTH_NULL) {
if (test_vec->auth_key.data == NULL)
return -1;
if (test_vec->auth_key.length != opts->auth_key_sz)
return -1;
if (test_vec->digest.data == NULL)
return -1;
if (test_vec->digest.length != opts->auth_digest_sz)
return -1;
}
} else if (opts->op_type == CPERF_AEAD) {
if (test_vec->plaintext.data == NULL)
return -1;
if (test_vec->plaintext.length != opts->buffer_sz)
return -1;
if (test_vec->aad.data == NULL)
return -1;
if (test_vec->aad.length != opts->auth_aad_sz)
return -1;
if (test_vec->digest.data == NULL)
return -1;
if (test_vec->digest.length != opts->auth_digest_sz)
return -1;
}
return 0;
}
int
main(int argc, char **argv)
{
struct cperf_options opts = {0};
struct cperf_test_vector *t_vec = NULL;
struct cperf_op_fns op_fns;
void *ctx[RTE_MAX_LCORE] = { };
int nb_cryptodevs = 0;
uint8_t cdev_id, i;
uint8_t enabled_cdevs[RTE_CRYPTO_MAX_DEVS] = { 0 };
int ret;
uint32_t lcore_id;
/* Initialise DPDK EAL */
ret = rte_eal_init(argc, argv);
if (ret < 0)
rte_exit(EXIT_FAILURE, "Invalid EAL arguments!\n");
argc -= ret;
argv += ret;
cperf_options_default(&opts);
ret = cperf_options_parse(&opts, argc, argv);
if (ret) {
RTE_LOG(ERR, USER1, "Parsing on or more user options failed\n");
goto err;
}
ret = cperf_options_check(&opts);
if (ret) {
RTE_LOG(ERR, USER1,
"Checking on or more user options failed\n");
goto err;
}
if (!opts.silent)
cperf_options_dump(&opts);
nb_cryptodevs = cperf_initialize_cryptodev(&opts, enabled_cdevs);
if (nb_cryptodevs < 1) {
RTE_LOG(ERR, USER1, "Failed to initialise requested crypto "
"device type\n");
nb_cryptodevs = 0;
goto err;
}
ret = cperf_verify_devices_capabilities(&opts, enabled_cdevs,
nb_cryptodevs);
if (ret) {
RTE_LOG(ERR, USER1, "Crypto device type does not support "
"capabilities requested\n");
goto err;
}
if (opts.test_file != NULL) {
t_vec = cperf_test_vector_get_from_file(&opts);
if (t_vec == NULL) {
RTE_LOG(ERR, USER1,
"Failed to create test vector for"
" specified file\n");
goto err;
}
if (cperf_check_test_vector(&opts, t_vec)) {
RTE_LOG(ERR, USER1, "Incomplete necessary test vectors"
"\n");
goto err;
}
} else {
t_vec = cperf_test_vector_get_dummy(&opts);
if (t_vec == NULL) {
RTE_LOG(ERR, USER1,
"Failed to create test vector for"
" specified algorithms\n");
goto err;
}
}
ret = cperf_get_op_functions(&opts, &op_fns);
if (ret) {
RTE_LOG(ERR, USER1, "Failed to find function ops set for "
"specified algorithms combination\n");
goto err;
}
if (!opts.silent)
show_test_vector(t_vec);
i = 0;
RTE_LCORE_FOREACH_SLAVE(lcore_id) {
if (i == nb_cryptodevs)
break;
cdev_id = enabled_cdevs[i];
ctx[cdev_id] = cperf_testmap[opts.test].constructor(cdev_id, 0,
&opts, t_vec, &op_fns);
if (ctx[cdev_id] == NULL) {
RTE_LOG(ERR, USER1, "Test run constructor failed\n");
goto err;
}
i++;
}
i = 0;
RTE_LCORE_FOREACH_SLAVE(lcore_id) {
if (i == nb_cryptodevs)
break;
cdev_id = enabled_cdevs[i];
rte_eal_remote_launch(cperf_testmap[opts.test].runner,
ctx[cdev_id], lcore_id);
i++;
}
rte_eal_mp_wait_lcore();
i = 0;
RTE_LCORE_FOREACH_SLAVE(lcore_id) {
if (i == nb_cryptodevs)
break;
cdev_id = enabled_cdevs[i];
cperf_testmap[opts.test].destructor(ctx[cdev_id]);
i++;
}
free_test_vector(t_vec, &opts);
printf("\n");
return EXIT_SUCCESS;
err:
i = 0;
RTE_LCORE_FOREACH_SLAVE(lcore_id) {
if (i == nb_cryptodevs)
break;
cdev_id = enabled_cdevs[i];
if (ctx[cdev_id] && cperf_testmap[opts.test].destructor)
cperf_testmap[opts.test].destructor(ctx[cdev_id]);
i++;
}
free_test_vector(t_vec, &opts);
printf("\n");
return EXIT_FAILURE;
}
|
vicharl/containerdns
|
kdns/dpdk-17.02/drivers/net/fm10k/fm10k_rxtx_vec.c
|
<filename>kdns/dpdk-17.02/drivers/net/fm10k/fm10k_rxtx_vec.c<gh_stars>100-1000
/*-
* BSD LICENSE
*
* Copyright(c) 2013-2015 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <inttypes.h>
#include <rte_ethdev.h>
#include <rte_common.h>
#include "fm10k.h"
#include "base/fm10k_type.h"
#include <tmmintrin.h>
#ifndef __INTEL_COMPILER
#pragma GCC diagnostic ignored "-Wcast-qual"
#endif
static void
fm10k_reset_tx_queue(struct fm10k_tx_queue *txq);
/* Handling the offload flags (olflags) field takes computation
* time when receiving packets. Therefore we provide a flag to disable
* the processing of the olflags field when they are not needed. This
* gives improved performance, at the cost of losing the offload info
* in the received packet
*/
#ifdef RTE_LIBRTE_FM10K_RX_OLFLAGS_ENABLE
/* Vlan present flag shift */
#define VP_SHIFT (2)
/* L3 type shift */
#define L3TYPE_SHIFT (4)
/* L4 type shift */
#define L4TYPE_SHIFT (7)
/* HBO flag shift */
#define HBOFLAG_SHIFT (10)
/* RXE flag shift */
#define RXEFLAG_SHIFT (13)
/* IPE/L4E flag shift */
#define L3L4EFLAG_SHIFT (14)
/* shift PKT_RX_L4_CKSUM_GOOD into one byte by 1 bit */
#define CKSUM_SHIFT (1)
static inline void
fm10k_desc_to_olflags_v(__m128i descs[4], struct rte_mbuf **rx_pkts)
{
__m128i ptype0, ptype1, vtag0, vtag1, eflag0, eflag1, cksumflag;
union {
uint16_t e[4];
uint64_t dword;
} vol;
const __m128i pkttype_msk = _mm_set_epi16(
0x0000, 0x0000, 0x0000, 0x0000,
PKT_RX_VLAN_PKT, PKT_RX_VLAN_PKT,
PKT_RX_VLAN_PKT, PKT_RX_VLAN_PKT);
/* mask everything except rss type */
const __m128i rsstype_msk = _mm_set_epi16(
0x0000, 0x0000, 0x0000, 0x0000,
0x000F, 0x000F, 0x000F, 0x000F);
/* mask for HBO and RXE flag flags */
const __m128i rxe_msk = _mm_set_epi16(
0x0000, 0x0000, 0x0000, 0x0000,
0x0001, 0x0001, 0x0001, 0x0001);
/* mask the lower byte of ol_flags */
const __m128i ol_flags_msk = _mm_set_epi16(
0x0000, 0x0000, 0x0000, 0x0000,
0x00FF, 0x00FF, 0x00FF, 0x00FF);
const __m128i l3l4cksum_flag = _mm_set_epi8(0, 0, 0, 0,
0, 0, 0, 0,
0, 0, 0, 0,
(PKT_RX_IP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD) >> CKSUM_SHIFT,
(PKT_RX_IP_CKSUM_BAD | PKT_RX_L4_CKSUM_GOOD) >> CKSUM_SHIFT,
(PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_BAD) >> CKSUM_SHIFT,
(PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_GOOD) >> CKSUM_SHIFT);
const __m128i rxe_flag = _mm_set_epi8(0, 0, 0, 0,
0, 0, 0, 0,
0, 0, 0, 0,
0, 0, 0, 0);
/* map rss type to rss hash flag */
const __m128i rss_flags = _mm_set_epi8(0, 0, 0, 0,
0, 0, 0, PKT_RX_RSS_HASH,
PKT_RX_RSS_HASH, 0, PKT_RX_RSS_HASH, 0,
PKT_RX_RSS_HASH, PKT_RX_RSS_HASH, PKT_RX_RSS_HASH, 0);
/* Calculate RSS_hash and Vlan fields */
ptype0 = _mm_unpacklo_epi16(descs[0], descs[1]);
ptype1 = _mm_unpacklo_epi16(descs[2], descs[3]);
vtag0 = _mm_unpackhi_epi16(descs[0], descs[1]);
vtag1 = _mm_unpackhi_epi16(descs[2], descs[3]);
ptype0 = _mm_unpacklo_epi32(ptype0, ptype1);
ptype0 = _mm_and_si128(ptype0, rsstype_msk);
ptype0 = _mm_shuffle_epi8(rss_flags, ptype0);
vtag1 = _mm_unpacklo_epi32(vtag0, vtag1);
eflag0 = vtag1;
cksumflag = vtag1;
vtag1 = _mm_srli_epi16(vtag1, VP_SHIFT);
vtag1 = _mm_and_si128(vtag1, pkttype_msk);
vtag1 = _mm_or_si128(ptype0, vtag1);
/* Process err flags, simply set RECIP_ERR bit if HBO/IXE is set */
eflag1 = _mm_srli_epi16(eflag0, RXEFLAG_SHIFT);
eflag0 = _mm_srli_epi16(eflag0, HBOFLAG_SHIFT);
eflag0 = _mm_or_si128(eflag0, eflag1);
eflag0 = _mm_and_si128(eflag0, rxe_msk);
eflag0 = _mm_shuffle_epi8(rxe_flag, eflag0);
vtag1 = _mm_or_si128(eflag0, vtag1);
/* Process L4/L3 checksum error flags */
cksumflag = _mm_srli_epi16(cksumflag, L3L4EFLAG_SHIFT);
cksumflag = _mm_shuffle_epi8(l3l4cksum_flag, cksumflag);
/* clean the higher byte and shift back the flag bits */
cksumflag = _mm_and_si128(cksumflag, ol_flags_msk);
cksumflag = _mm_slli_epi16(cksumflag, CKSUM_SHIFT);
vtag1 = _mm_or_si128(cksumflag, vtag1);
vol.dword = _mm_cvtsi128_si64(vtag1);
rx_pkts[0]->ol_flags = vol.e[0];
rx_pkts[1]->ol_flags = vol.e[1];
rx_pkts[2]->ol_flags = vol.e[2];
rx_pkts[3]->ol_flags = vol.e[3];
}
/* @note: When this function is changed, make corresponding change to
* fm10k_dev_supported_ptypes_get().
*/
static inline void
fm10k_desc_to_pktype_v(__m128i descs[4], struct rte_mbuf **rx_pkts)
{
__m128i l3l4type0, l3l4type1, l3type, l4type;
union {
uint16_t e[4];
uint64_t dword;
} vol;
/* L3 pkt type mask Bit4 to Bit6 */
const __m128i l3type_msk = _mm_set_epi16(
0x0000, 0x0000, 0x0000, 0x0000,
0x0070, 0x0070, 0x0070, 0x0070);
/* L4 pkt type mask Bit7 to Bit9 */
const __m128i l4type_msk = _mm_set_epi16(
0x0000, 0x0000, 0x0000, 0x0000,
0x0380, 0x0380, 0x0380, 0x0380);
/* convert RRC l3 type to mbuf format */
const __m128i l3type_flags = _mm_set_epi8(0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, RTE_PTYPE_L3_IPV6_EXT,
RTE_PTYPE_L3_IPV6, RTE_PTYPE_L3_IPV4_EXT,
RTE_PTYPE_L3_IPV4, 0);
/* Convert RRC l4 type to mbuf format l4type_flags shift-left 8 bits
* to fill into8 bits length.
*/
const __m128i l4type_flags = _mm_set_epi8(0, 0, 0, 0, 0, 0, 0, 0, 0,
RTE_PTYPE_TUNNEL_GENEVE >> 8,
RTE_PTYPE_TUNNEL_NVGRE >> 8,
RTE_PTYPE_TUNNEL_VXLAN >> 8,
RTE_PTYPE_TUNNEL_GRE >> 8,
RTE_PTYPE_L4_UDP >> 8,
RTE_PTYPE_L4_TCP >> 8,
0);
l3l4type0 = _mm_unpacklo_epi16(descs[0], descs[1]);
l3l4type1 = _mm_unpacklo_epi16(descs[2], descs[3]);
l3l4type0 = _mm_unpacklo_epi32(l3l4type0, l3l4type1);
l3type = _mm_and_si128(l3l4type0, l3type_msk);
l4type = _mm_and_si128(l3l4type0, l4type_msk);
l3type = _mm_srli_epi16(l3type, L3TYPE_SHIFT);
l4type = _mm_srli_epi16(l4type, L4TYPE_SHIFT);
l3type = _mm_shuffle_epi8(l3type_flags, l3type);
/* l4type_flags shift-left for 8 bits, need shift-right back */
l4type = _mm_shuffle_epi8(l4type_flags, l4type);
l4type = _mm_slli_epi16(l4type, 8);
l3l4type0 = _mm_or_si128(l3type, l4type);
vol.dword = _mm_cvtsi128_si64(l3l4type0);
rx_pkts[0]->packet_type = vol.e[0];
rx_pkts[1]->packet_type = vol.e[1];
rx_pkts[2]->packet_type = vol.e[2];
rx_pkts[3]->packet_type = vol.e[3];
}
#else
#define fm10k_desc_to_olflags_v(desc, rx_pkts) do {} while (0)
#define fm10k_desc_to_pktype_v(desc, rx_pkts) do {} while (0)
#endif
int __attribute__((cold))
fm10k_rx_vec_condition_check(struct rte_eth_dev *dev)
{
#ifndef RTE_LIBRTE_IEEE1588
struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode;
struct rte_fdir_conf *fconf = &dev->data->dev_conf.fdir_conf;
#ifndef RTE_FM10K_RX_OLFLAGS_ENABLE
/* whithout rx ol_flags, no VP flag report */
if (rxmode->hw_vlan_extend != 0)
return -1;
#endif
/* no fdir support */
if (fconf->mode != RTE_FDIR_MODE_NONE)
return -1;
/* no header split support */
if (rxmode->header_split == 1)
return -1;
return 0;
#else
RTE_SET_USED(dev);
return -1;
#endif
}
int __attribute__((cold))
fm10k_rxq_vec_setup(struct fm10k_rx_queue *rxq)
{
uintptr_t p;
struct rte_mbuf mb_def = { .buf_addr = 0 }; /* zeroed mbuf */
mb_def.nb_segs = 1;
/* data_off will be ajusted after new mbuf allocated for 512-byte
* alignment.
*/
mb_def.data_off = RTE_PKTMBUF_HEADROOM;
mb_def.port = rxq->port_id;
rte_mbuf_refcnt_set(&mb_def, 1);
/* prevent compiler reordering: rearm_data covers previous fields */
rte_compiler_barrier();
p = (uintptr_t)&mb_def.rearm_data;
rxq->mbuf_initializer = *(uint64_t *)p;
return 0;
}
static inline void
fm10k_rxq_rearm(struct fm10k_rx_queue *rxq)
{
int i;
uint16_t rx_id;
volatile union fm10k_rx_desc *rxdp;
struct rte_mbuf **mb_alloc = &rxq->sw_ring[rxq->rxrearm_start];
struct rte_mbuf *mb0, *mb1;
__m128i head_off = _mm_set_epi64x(
RTE_PKTMBUF_HEADROOM + FM10K_RX_DATABUF_ALIGN - 1,
RTE_PKTMBUF_HEADROOM + FM10K_RX_DATABUF_ALIGN - 1);
__m128i dma_addr0, dma_addr1;
/* Rx buffer need to be aligned with 512 byte */
const __m128i hba_msk = _mm_set_epi64x(0,
UINT64_MAX - FM10K_RX_DATABUF_ALIGN + 1);
rxdp = rxq->hw_ring + rxq->rxrearm_start;
/* Pull 'n' more MBUFs into the software ring */
if (rte_mempool_get_bulk(rxq->mp,
(void *)mb_alloc,
RTE_FM10K_RXQ_REARM_THRESH) < 0) {
dma_addr0 = _mm_setzero_si128();
/* Clean up all the HW/SW ring content */
for (i = 0; i < RTE_FM10K_RXQ_REARM_THRESH; i++) {
mb_alloc[i] = &rxq->fake_mbuf;
_mm_store_si128((__m128i *)&rxdp[i].q,
dma_addr0);
}
rte_eth_devices[rxq->port_id].data->rx_mbuf_alloc_failed +=
RTE_FM10K_RXQ_REARM_THRESH;
return;
}
/* Initialize the mbufs in vector, process 2 mbufs in one loop */
for (i = 0; i < RTE_FM10K_RXQ_REARM_THRESH; i += 2, mb_alloc += 2) {
__m128i vaddr0, vaddr1;
uintptr_t p0, p1;
mb0 = mb_alloc[0];
mb1 = mb_alloc[1];
/* Flush mbuf with pkt template.
* Data to be rearmed is 6 bytes long.
* Though, RX will overwrite ol_flags that are coming next
* anyway. So overwrite whole 8 bytes with one load:
* 6 bytes of rearm_data plus first 2 bytes of ol_flags.
*/
p0 = (uintptr_t)&mb0->rearm_data;
*(uint64_t *)p0 = rxq->mbuf_initializer;
p1 = (uintptr_t)&mb1->rearm_data;
*(uint64_t *)p1 = rxq->mbuf_initializer;
/* load buf_addr(lo 64bit) and buf_physaddr(hi 64bit) */
vaddr0 = _mm_loadu_si128((__m128i *)&mb0->buf_addr);
vaddr1 = _mm_loadu_si128((__m128i *)&mb1->buf_addr);
/* convert pa to dma_addr hdr/data */
dma_addr0 = _mm_unpackhi_epi64(vaddr0, vaddr0);
dma_addr1 = _mm_unpackhi_epi64(vaddr1, vaddr1);
/* add headroom to pa values */
dma_addr0 = _mm_add_epi64(dma_addr0, head_off);
dma_addr1 = _mm_add_epi64(dma_addr1, head_off);
/* Do 512 byte alignment to satisfy HW requirement, in the
* meanwhile, set Header Buffer Address to zero.
*/
dma_addr0 = _mm_and_si128(dma_addr0, hba_msk);
dma_addr1 = _mm_and_si128(dma_addr1, hba_msk);
/* flush desc with pa dma_addr */
_mm_store_si128((__m128i *)&rxdp++->q, dma_addr0);
_mm_store_si128((__m128i *)&rxdp++->q, dma_addr1);
/* enforce 512B alignment on default Rx virtual addresses */
mb0->data_off = (uint16_t)(RTE_PTR_ALIGN((char *)mb0->buf_addr
+ RTE_PKTMBUF_HEADROOM, FM10K_RX_DATABUF_ALIGN)
- (char *)mb0->buf_addr);
mb1->data_off = (uint16_t)(RTE_PTR_ALIGN((char *)mb1->buf_addr
+ RTE_PKTMBUF_HEADROOM, FM10K_RX_DATABUF_ALIGN)
- (char *)mb1->buf_addr);
}
rxq->rxrearm_start += RTE_FM10K_RXQ_REARM_THRESH;
if (rxq->rxrearm_start >= rxq->nb_desc)
rxq->rxrearm_start = 0;
rxq->rxrearm_nb -= RTE_FM10K_RXQ_REARM_THRESH;
rx_id = (uint16_t)((rxq->rxrearm_start == 0) ?
(rxq->nb_desc - 1) : (rxq->rxrearm_start - 1));
/* Update the tail pointer on the NIC */
FM10K_PCI_REG_WRITE(rxq->tail_ptr, rx_id);
}
void __attribute__((cold))
fm10k_rx_queue_release_mbufs_vec(struct fm10k_rx_queue *rxq)
{
const unsigned mask = rxq->nb_desc - 1;
unsigned i;
if (rxq->sw_ring == NULL || rxq->rxrearm_nb >= rxq->nb_desc)
return;
/* free all mbufs that are valid in the ring */
for (i = rxq->next_dd; i != rxq->rxrearm_start; i = (i + 1) & mask)
rte_pktmbuf_free_seg(rxq->sw_ring[i]);
rxq->rxrearm_nb = rxq->nb_desc;
/* set all entries to NULL */
memset(rxq->sw_ring, 0, sizeof(rxq->sw_ring[0]) * rxq->nb_desc);
}
static inline uint16_t
fm10k_recv_raw_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts,
uint16_t nb_pkts, uint8_t *split_packet)
{
volatile union fm10k_rx_desc *rxdp;
struct rte_mbuf **mbufp;
uint16_t nb_pkts_recd;
int pos;
struct fm10k_rx_queue *rxq = rx_queue;
uint64_t var;
__m128i shuf_msk;
__m128i dd_check, eop_check;
uint16_t next_dd;
next_dd = rxq->next_dd;
/* Just the act of getting into the function from the application is
* going to cost about 7 cycles
*/
rxdp = rxq->hw_ring + next_dd;
rte_prefetch0(rxdp);
/* See if we need to rearm the RX queue - gives the prefetch a bit
* of time to act
*/
if (rxq->rxrearm_nb > RTE_FM10K_RXQ_REARM_THRESH)
fm10k_rxq_rearm(rxq);
/* Before we start moving massive data around, check to see if
* there is actually a packet available
*/
if (!(rxdp->d.staterr & FM10K_RXD_STATUS_DD))
return 0;
/* Vecotr RX will process 4 packets at a time, strip the unaligned
* tails in case it's not multiple of 4.
*/
nb_pkts = RTE_ALIGN_FLOOR(nb_pkts, RTE_FM10K_DESCS_PER_LOOP);
/* 4 packets DD mask */
dd_check = _mm_set_epi64x(0x0000000100000001LL, 0x0000000100000001LL);
/* 4 packets EOP mask */
eop_check = _mm_set_epi64x(0x0000000200000002LL, 0x0000000200000002LL);
/* mask to shuffle from desc. to mbuf */
shuf_msk = _mm_set_epi8(
7, 6, 5, 4, /* octet 4~7, 32bits rss */
15, 14, /* octet 14~15, low 16 bits vlan_macip */
13, 12, /* octet 12~13, 16 bits data_len */
0xFF, 0xFF, /* skip high 16 bits pkt_len, zero out */
13, 12, /* octet 12~13, low 16 bits pkt_len */
0xFF, 0xFF, /* skip high 16 bits pkt_type */
0xFF, 0xFF /* Skip pkt_type field in shuffle operation */
);
/* Cache is empty -> need to scan the buffer rings, but first move
* the next 'n' mbufs into the cache
*/
mbufp = &rxq->sw_ring[next_dd];
/* A. load 4 packet in one loop
* [A*. mask out 4 unused dirty field in desc]
* B. copy 4 mbuf point from swring to rx_pkts
* C. calc the number of DD bits among the 4 packets
* [C*. extract the end-of-packet bit, if requested]
* D. fill info. from desc to mbuf
*/
for (pos = 0, nb_pkts_recd = 0; pos < nb_pkts;
pos += RTE_FM10K_DESCS_PER_LOOP,
rxdp += RTE_FM10K_DESCS_PER_LOOP) {
__m128i descs0[RTE_FM10K_DESCS_PER_LOOP];
__m128i pkt_mb1, pkt_mb2, pkt_mb3, pkt_mb4;
__m128i zero, staterr, sterr_tmp1, sterr_tmp2;
__m128i mbp1;
/* 2 64 bit or 4 32 bit mbuf pointers in one XMM reg. */
#if defined(RTE_ARCH_X86_64)
__m128i mbp2;
#endif
/* B.1 load 2 (64 bit) or 4 (32 bit) mbuf points */
mbp1 = _mm_loadu_si128((__m128i *)&mbufp[pos]);
/* Read desc statuses backwards to avoid race condition */
/* A.1 load 4 pkts desc */
descs0[3] = _mm_loadu_si128((__m128i *)(rxdp + 3));
rte_compiler_barrier();
/* B.2 copy 2 64 bit or 4 32 bit mbuf point into rx_pkts */
_mm_storeu_si128((__m128i *)&rx_pkts[pos], mbp1);
#if defined(RTE_ARCH_X86_64)
/* B.1 load 2 64 bit mbuf poitns */
mbp2 = _mm_loadu_si128((__m128i *)&mbufp[pos+2]);
#endif
descs0[2] = _mm_loadu_si128((__m128i *)(rxdp + 2));
rte_compiler_barrier();
/* B.1 load 2 mbuf point */
descs0[1] = _mm_loadu_si128((__m128i *)(rxdp + 1));
rte_compiler_barrier();
descs0[0] = _mm_loadu_si128((__m128i *)(rxdp));
#if defined(RTE_ARCH_X86_64)
/* B.2 copy 2 mbuf point into rx_pkts */
_mm_storeu_si128((__m128i *)&rx_pkts[pos+2], mbp2);
#endif
/* avoid compiler reorder optimization */
rte_compiler_barrier();
if (split_packet) {
rte_mbuf_prefetch_part2(rx_pkts[pos]);
rte_mbuf_prefetch_part2(rx_pkts[pos + 1]);
rte_mbuf_prefetch_part2(rx_pkts[pos + 2]);
rte_mbuf_prefetch_part2(rx_pkts[pos + 3]);
}
/* D.1 pkt 3,4 convert format from desc to pktmbuf */
pkt_mb4 = _mm_shuffle_epi8(descs0[3], shuf_msk);
pkt_mb3 = _mm_shuffle_epi8(descs0[2], shuf_msk);
/* C.1 4=>2 filter staterr info only */
sterr_tmp2 = _mm_unpackhi_epi32(descs0[3], descs0[2]);
/* C.1 4=>2 filter staterr info only */
sterr_tmp1 = _mm_unpackhi_epi32(descs0[1], descs0[0]);
/* set ol_flags with vlan packet type */
fm10k_desc_to_olflags_v(descs0, &rx_pkts[pos]);
/* D.1 pkt 1,2 convert format from desc to pktmbuf */
pkt_mb2 = _mm_shuffle_epi8(descs0[1], shuf_msk);
pkt_mb1 = _mm_shuffle_epi8(descs0[0], shuf_msk);
/* C.2 get 4 pkts staterr value */
zero = _mm_xor_si128(dd_check, dd_check);
staterr = _mm_unpacklo_epi32(sterr_tmp1, sterr_tmp2);
/* D.3 copy final 3,4 data to rx_pkts */
_mm_storeu_si128((void *)&rx_pkts[pos+3]->rx_descriptor_fields1,
pkt_mb4);
_mm_storeu_si128((void *)&rx_pkts[pos+2]->rx_descriptor_fields1,
pkt_mb3);
/* C* extract and record EOP bit */
if (split_packet) {
__m128i eop_shuf_mask = _mm_set_epi8(
0xFF, 0xFF, 0xFF, 0xFF,
0xFF, 0xFF, 0xFF, 0xFF,
0xFF, 0xFF, 0xFF, 0xFF,
0x04, 0x0C, 0x00, 0x08
);
/* and with mask to extract bits, flipping 1-0 */
__m128i eop_bits = _mm_andnot_si128(staterr, eop_check);
/* the staterr values are not in order, as the count
* count of dd bits doesn't care. However, for end of
* packet tracking, we do care, so shuffle. This also
* compresses the 32-bit values to 8-bit
*/
eop_bits = _mm_shuffle_epi8(eop_bits, eop_shuf_mask);
/* store the resulting 32-bit value */
*(int *)split_packet = _mm_cvtsi128_si32(eop_bits);
split_packet += RTE_FM10K_DESCS_PER_LOOP;
/* zero-out next pointers */
rx_pkts[pos]->next = NULL;
rx_pkts[pos + 1]->next = NULL;
rx_pkts[pos + 2]->next = NULL;
rx_pkts[pos + 3]->next = NULL;
}
/* C.3 calc available number of desc */
staterr = _mm_and_si128(staterr, dd_check);
staterr = _mm_packs_epi32(staterr, zero);
/* D.3 copy final 1,2 data to rx_pkts */
_mm_storeu_si128((void *)&rx_pkts[pos+1]->rx_descriptor_fields1,
pkt_mb2);
_mm_storeu_si128((void *)&rx_pkts[pos]->rx_descriptor_fields1,
pkt_mb1);
fm10k_desc_to_pktype_v(descs0, &rx_pkts[pos]);
/* C.4 calc avaialbe number of desc */
var = __builtin_popcountll(_mm_cvtsi128_si64(staterr));
nb_pkts_recd += var;
if (likely(var != RTE_FM10K_DESCS_PER_LOOP))
break;
}
/* Update our internal tail pointer */
rxq->next_dd = (uint16_t)(rxq->next_dd + nb_pkts_recd);
rxq->next_dd = (uint16_t)(rxq->next_dd & (rxq->nb_desc - 1));
rxq->rxrearm_nb = (uint16_t)(rxq->rxrearm_nb + nb_pkts_recd);
return nb_pkts_recd;
}
/* vPMD receive routine
*
* Notice:
* - don't support ol_flags for rss and csum err
*/
uint16_t
fm10k_recv_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts,
uint16_t nb_pkts)
{
return fm10k_recv_raw_pkts_vec(rx_queue, rx_pkts, nb_pkts, NULL);
}
static inline uint16_t
fm10k_reassemble_packets(struct fm10k_rx_queue *rxq,
struct rte_mbuf **rx_bufs,
uint16_t nb_bufs, uint8_t *split_flags)
{
struct rte_mbuf *pkts[RTE_FM10K_MAX_RX_BURST]; /*finished pkts*/
struct rte_mbuf *start = rxq->pkt_first_seg;
struct rte_mbuf *end = rxq->pkt_last_seg;
unsigned pkt_idx, buf_idx;
for (buf_idx = 0, pkt_idx = 0; buf_idx < nb_bufs; buf_idx++) {
if (end != NULL) {
/* processing a split packet */
end->next = rx_bufs[buf_idx];
start->nb_segs++;
start->pkt_len += rx_bufs[buf_idx]->data_len;
end = end->next;
if (!split_flags[buf_idx]) {
/* it's the last packet of the set */
#ifdef RTE_LIBRTE_FM10K_RX_OLFLAGS_ENABLE
start->hash = end->hash;
start->ol_flags = end->ol_flags;
start->packet_type = end->packet_type;
#endif
pkts[pkt_idx++] = start;
start = end = NULL;
}
} else {
/* not processing a split packet */
if (!split_flags[buf_idx]) {
/* not a split packet, save and skip */
pkts[pkt_idx++] = rx_bufs[buf_idx];
continue;
}
end = start = rx_bufs[buf_idx];
}
}
/* save the partial packet for next time */
rxq->pkt_first_seg = start;
rxq->pkt_last_seg = end;
memcpy(rx_bufs, pkts, pkt_idx * (sizeof(*pkts)));
return pkt_idx;
}
/*
* vPMD receive routine that reassembles scattered packets
*
* Notice:
* - don't support ol_flags for rss and csum err
* - nb_pkts > RTE_FM10K_MAX_RX_BURST, only scan RTE_FM10K_MAX_RX_BURST
* numbers of DD bit
*/
uint16_t
fm10k_recv_scattered_pkts_vec(void *rx_queue,
struct rte_mbuf **rx_pkts,
uint16_t nb_pkts)
{
struct fm10k_rx_queue *rxq = rx_queue;
uint8_t split_flags[RTE_FM10K_MAX_RX_BURST] = {0};
unsigned i = 0;
/* Split_flags only can support max of RTE_FM10K_MAX_RX_BURST */
nb_pkts = RTE_MIN(nb_pkts, RTE_FM10K_MAX_RX_BURST);
/* get some new buffers */
uint16_t nb_bufs = fm10k_recv_raw_pkts_vec(rxq, rx_pkts, nb_pkts,
split_flags);
if (nb_bufs == 0)
return 0;
/* happy day case, full burst + no packets to be joined */
const uint64_t *split_fl64 = (uint64_t *)split_flags;
if (rxq->pkt_first_seg == NULL &&
split_fl64[0] == 0 && split_fl64[1] == 0 &&
split_fl64[2] == 0 && split_fl64[3] == 0)
return nb_bufs;
/* reassemble any packets that need reassembly*/
if (rxq->pkt_first_seg == NULL) {
/* find the first split flag, and only reassemble then*/
while (i < nb_bufs && !split_flags[i])
i++;
if (i == nb_bufs)
return nb_bufs;
}
return i + fm10k_reassemble_packets(rxq, &rx_pkts[i], nb_bufs - i,
&split_flags[i]);
}
static const struct fm10k_txq_ops vec_txq_ops = {
.reset = fm10k_reset_tx_queue,
};
void __attribute__((cold))
fm10k_txq_vec_setup(struct fm10k_tx_queue *txq)
{
txq->ops = &vec_txq_ops;
}
int __attribute__((cold))
fm10k_tx_vec_condition_check(struct fm10k_tx_queue *txq)
{
/* Vector TX can't offload any features yet */
if ((txq->txq_flags & FM10K_SIMPLE_TX_FLAG) != FM10K_SIMPLE_TX_FLAG)
return -1;
if (txq->tx_ftag_en)
return -1;
return 0;
}
static inline void
vtx1(volatile struct fm10k_tx_desc *txdp,
struct rte_mbuf *pkt, uint64_t flags)
{
__m128i descriptor = _mm_set_epi64x(flags << 56 |
pkt->vlan_tci << 16 | pkt->data_len,
MBUF_DMA_ADDR(pkt));
_mm_store_si128((__m128i *)txdp, descriptor);
}
static inline void
vtx(volatile struct fm10k_tx_desc *txdp,
struct rte_mbuf **pkt, uint16_t nb_pkts, uint64_t flags)
{
int i;
for (i = 0; i < nb_pkts; ++i, ++txdp, ++pkt)
vtx1(txdp, *pkt, flags);
}
static inline int __attribute__((always_inline))
fm10k_tx_free_bufs(struct fm10k_tx_queue *txq)
{
struct rte_mbuf **txep;
uint8_t flags;
uint32_t n;
uint32_t i;
int nb_free = 0;
struct rte_mbuf *m, *free[RTE_FM10K_TX_MAX_FREE_BUF_SZ];
/* check DD bit on threshold descriptor */
flags = txq->hw_ring[txq->next_dd].flags;
if (!(flags & FM10K_TXD_FLAG_DONE))
return 0;
n = txq->rs_thresh;
/* First buffer to free from S/W ring is at index
* next_dd - (rs_thresh-1)
*/
txep = &txq->sw_ring[txq->next_dd - (n - 1)];
m = __rte_pktmbuf_prefree_seg(txep[0]);
if (likely(m != NULL)) {
free[0] = m;
nb_free = 1;
for (i = 1; i < n; i++) {
m = __rte_pktmbuf_prefree_seg(txep[i]);
if (likely(m != NULL)) {
if (likely(m->pool == free[0]->pool))
free[nb_free++] = m;
else {
rte_mempool_put_bulk(free[0]->pool,
(void *)free, nb_free);
free[0] = m;
nb_free = 1;
}
}
}
rte_mempool_put_bulk(free[0]->pool, (void **)free, nb_free);
} else {
for (i = 1; i < n; i++) {
m = __rte_pktmbuf_prefree_seg(txep[i]);
if (m != NULL)
rte_mempool_put(m->pool, m);
}
}
/* buffers were freed, update counters */
txq->nb_free = (uint16_t)(txq->nb_free + txq->rs_thresh);
txq->next_dd = (uint16_t)(txq->next_dd + txq->rs_thresh);
if (txq->next_dd >= txq->nb_desc)
txq->next_dd = (uint16_t)(txq->rs_thresh - 1);
return txq->rs_thresh;
}
static inline void __attribute__((always_inline))
tx_backlog_entry(struct rte_mbuf **txep,
struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
{
int i;
for (i = 0; i < (int)nb_pkts; ++i)
txep[i] = tx_pkts[i];
}
uint16_t
fm10k_xmit_pkts_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
uint16_t nb_pkts)
{
struct fm10k_tx_queue *txq = (struct fm10k_tx_queue *)tx_queue;
volatile struct fm10k_tx_desc *txdp;
struct rte_mbuf **txep;
uint16_t n, nb_commit, tx_id;
uint64_t flags = FM10K_TXD_FLAG_LAST;
uint64_t rs = FM10K_TXD_FLAG_RS | FM10K_TXD_FLAG_LAST;
int i;
/* cross rx_thresh boundary is not allowed */
nb_pkts = RTE_MIN(nb_pkts, txq->rs_thresh);
if (txq->nb_free < txq->free_thresh)
fm10k_tx_free_bufs(txq);
nb_commit = nb_pkts = (uint16_t)RTE_MIN(txq->nb_free, nb_pkts);
if (unlikely(nb_pkts == 0))
return 0;
tx_id = txq->next_free;
txdp = &txq->hw_ring[tx_id];
txep = &txq->sw_ring[tx_id];
txq->nb_free = (uint16_t)(txq->nb_free - nb_pkts);
n = (uint16_t)(txq->nb_desc - tx_id);
if (nb_commit >= n) {
tx_backlog_entry(txep, tx_pkts, n);
for (i = 0; i < n - 1; ++i, ++tx_pkts, ++txdp)
vtx1(txdp, *tx_pkts, flags);
vtx1(txdp, *tx_pkts++, rs);
nb_commit = (uint16_t)(nb_commit - n);
tx_id = 0;
txq->next_rs = (uint16_t)(txq->rs_thresh - 1);
/* avoid reach the end of ring */
txdp = &(txq->hw_ring[tx_id]);
txep = &txq->sw_ring[tx_id];
}
tx_backlog_entry(txep, tx_pkts, nb_commit);
vtx(txdp, tx_pkts, nb_commit, flags);
tx_id = (uint16_t)(tx_id + nb_commit);
if (tx_id > txq->next_rs) {
txq->hw_ring[txq->next_rs].flags |= FM10K_TXD_FLAG_RS;
txq->next_rs = (uint16_t)(txq->next_rs + txq->rs_thresh);
}
txq->next_free = tx_id;
FM10K_PCI_REG_WRITE(txq->tail_ptr, txq->next_free);
return nb_pkts;
}
static void __attribute__((cold))
fm10k_reset_tx_queue(struct fm10k_tx_queue *txq)
{
static const struct fm10k_tx_desc zeroed_desc = {0};
struct rte_mbuf **txe = txq->sw_ring;
uint16_t i;
/* Zero out HW ring memory */
for (i = 0; i < txq->nb_desc; i++)
txq->hw_ring[i] = zeroed_desc;
/* Initialize SW ring entries */
for (i = 0; i < txq->nb_desc; i++)
txe[i] = NULL;
txq->next_dd = (uint16_t)(txq->rs_thresh - 1);
txq->next_rs = (uint16_t)(txq->rs_thresh - 1);
txq->next_free = 0;
txq->nb_used = 0;
/* Always allow 1 descriptor to be un-allocated to avoid
* a H/W race condition
*/
txq->nb_free = (uint16_t)(txq->nb_desc - 1);
FM10K_PCI_REG_WRITE(txq->tail_ptr, 0);
}
|
vicharl/containerdns
|
kdns/dpdk-17.02/app/test-crypto-perf/cperf_options_parsing.c
|
/*-
* BSD LICENSE
*
* Copyright(c) 2016-2017 Intel Corporation. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <getopt.h>
#include <unistd.h>
#include <rte_malloc.h>
#include "cperf_options.h"
struct name_id_map {
const char *name;
uint32_t id;
};
static int
get_str_key_id_mapping(struct name_id_map *map, unsigned int map_len,
const char *str_key)
{
unsigned int i;
for (i = 0; i < map_len; i++) {
if (strcmp(str_key, map[i].name) == 0)
return map[i].id;
}
return -1;
}
static int
parse_cperf_test_type(struct cperf_options *opts, const char *arg)
{
struct name_id_map cperftest_namemap[] = {
{
cperf_test_type_strs[CPERF_TEST_TYPE_THROUGHPUT],
CPERF_TEST_TYPE_THROUGHPUT
},
{
cperf_test_type_strs[CPERF_TEST_TYPE_CYCLECOUNT],
CPERF_TEST_TYPE_CYCLECOUNT
},
{
cperf_test_type_strs[CPERF_TEST_TYPE_LATENCY],
CPERF_TEST_TYPE_LATENCY
}
};
int id = get_str_key_id_mapping(
(struct name_id_map *)cperftest_namemap,
RTE_DIM(cperftest_namemap), arg);
if (id < 0) {
RTE_LOG(ERR, USER1, "failed to parse test type");
return -1;
}
opts->test = (enum cperf_perf_test_type)id;
return 0;
}
static int
parse_uint32_t(uint32_t *value, const char *arg)
{
char *end = NULL;
unsigned long n = strtoul(arg, &end, 10);
if ((optarg[0] == '\0') || (end == NULL) || (*end != '\0'))
return -1;
if (n > UINT32_MAX)
return -ERANGE;
*value = (uint32_t) n;
return 0;
}
static int
parse_uint16_t(uint16_t *value, const char *arg)
{
uint32_t val = 0;
int ret = parse_uint32_t(&val, arg);
if (ret < 0)
return ret;
if (val > UINT16_MAX)
return -ERANGE;
*value = (uint16_t) val;
return 0;
}
static int
parse_total_ops(struct cperf_options *opts, const char *arg)
{
int ret = parse_uint32_t(&opts->total_ops, arg);
if (ret)
RTE_LOG(ERR, USER1, "failed to parse total operations count\n");
if (opts->total_ops == 0) {
RTE_LOG(ERR, USER1,
"invalid total operations count number specified\n");
return -1;
}
return ret;
}
static int
parse_pool_sz(struct cperf_options *opts, const char *arg)
{
int ret = parse_uint32_t(&opts->pool_sz, arg);
if (ret)
RTE_LOG(ERR, USER1, "failed to parse pool size");
return ret;
}
static int
parse_burst_sz(struct cperf_options *opts, const char *arg)
{
int ret = parse_uint32_t(&opts->burst_sz, arg);
if (ret)
RTE_LOG(ERR, USER1, "failed to parse burst size");
return ret;
}
static int
parse_buffer_sz(struct cperf_options *opts, const char *arg)
{
uint32_t i, valid_buf_sz[] = {
32, 64, 128, 256, 384, 512, 768, 1024, 1280, 1536, 1792,
2048
};
if (parse_uint32_t(&opts->buffer_sz, arg)) {
RTE_LOG(ERR, USER1, "failed to parse buffer size");
return -1;
}
for (i = 0; i < RTE_DIM(valid_buf_sz); i++)
if (valid_buf_sz[i] == opts->buffer_sz)
return 0;
RTE_LOG(ERR, USER1, "invalid buffer size specified");
return -1;
}
static int
parse_segments_nb(struct cperf_options *opts, const char *arg)
{
int ret = parse_uint32_t(&opts->segments_nb, arg);
if (ret) {
RTE_LOG(ERR, USER1, "failed to parse segments number\n");
return -1;
}
if ((opts->segments_nb == 0) || (opts->segments_nb > 255)) {
RTE_LOG(ERR, USER1, "invalid segments number specified\n");
return -1;
}
return 0;
}
static int
parse_device_type(struct cperf_options *opts, const char *arg)
{
if (strlen(arg) > (sizeof(opts->device_type) - 1))
return -1;
strncpy(opts->device_type, arg, sizeof(opts->device_type) - 1);
*(opts->device_type + sizeof(opts->device_type) - 1) = '\0';
return 0;
}
static int
parse_op_type(struct cperf_options *opts, const char *arg)
{
struct name_id_map optype_namemap[] = {
{
cperf_op_type_strs[CPERF_CIPHER_ONLY],
CPERF_CIPHER_ONLY
},
{
cperf_op_type_strs[CPERF_AUTH_ONLY],
CPERF_AUTH_ONLY
},
{
cperf_op_type_strs[CPERF_CIPHER_THEN_AUTH],
CPERF_CIPHER_THEN_AUTH
},
{
cperf_op_type_strs[CPERF_AUTH_THEN_CIPHER],
CPERF_AUTH_THEN_CIPHER
},
{
cperf_op_type_strs[CPERF_AEAD],
CPERF_AEAD
}
};
int id = get_str_key_id_mapping(optype_namemap,
RTE_DIM(optype_namemap), arg);
if (id < 0) {
RTE_LOG(ERR, USER1, "invalid opt type specified\n");
return -1;
}
opts->op_type = (enum cperf_op_type)id;
return 0;
}
static int
parse_sessionless(struct cperf_options *opts,
const char *arg __rte_unused)
{
opts->sessionless = 1;
return 0;
}
static int
parse_out_of_place(struct cperf_options *opts,
const char *arg __rte_unused)
{
opts->out_of_place = 1;
return 0;
}
static int
parse_verify(struct cperf_options *opts,
const char *arg __rte_unused)
{
opts->verify = 1;
return 0;
}
static int
parse_test_file(struct cperf_options *opts,
const char *arg)
{
opts->test_file = strdup(arg);
if (access(opts->test_file, F_OK) != -1)
return 0;
RTE_LOG(ERR, USER1, "Test vector file doesn't exist\n");
return -1;
}
static int
parse_test_name(struct cperf_options *opts,
const char *arg)
{
char *test_name = (char *) rte_zmalloc(NULL,
sizeof(char) * (strlen(arg) + 3), 0);
snprintf(test_name, strlen(arg) + 3, "[%s]", arg);
opts->test_name = test_name;
return 0;
}
static int
parse_silent(struct cperf_options *opts,
const char *arg __rte_unused)
{
opts->silent = 1;
return 0;
}
static int
parse_cipher_algo(struct cperf_options *opts, const char *arg)
{
struct name_id_map cipher_algo_namemap[] = {
{
rte_crypto_cipher_algorithm_strings
[RTE_CRYPTO_CIPHER_3DES_CBC],
RTE_CRYPTO_CIPHER_3DES_CBC
},
{
rte_crypto_cipher_algorithm_strings
[RTE_CRYPTO_CIPHER_3DES_ECB],
RTE_CRYPTO_CIPHER_3DES_ECB
},
{
rte_crypto_cipher_algorithm_strings
[RTE_CRYPTO_CIPHER_3DES_CTR],
RTE_CRYPTO_CIPHER_3DES_CTR
},
{
rte_crypto_cipher_algorithm_strings
[RTE_CRYPTO_CIPHER_AES_CBC],
RTE_CRYPTO_CIPHER_AES_CBC
},
{
rte_crypto_cipher_algorithm_strings
[RTE_CRYPTO_CIPHER_AES_CCM],
RTE_CRYPTO_CIPHER_AES_CCM
},
{
rte_crypto_cipher_algorithm_strings
[RTE_CRYPTO_CIPHER_AES_CTR],
RTE_CRYPTO_CIPHER_AES_CTR
},
{
rte_crypto_cipher_algorithm_strings
[RTE_CRYPTO_CIPHER_AES_ECB],
RTE_CRYPTO_CIPHER_AES_ECB
},
{
rte_crypto_cipher_algorithm_strings
[RTE_CRYPTO_CIPHER_AES_GCM],
RTE_CRYPTO_CIPHER_AES_GCM
},
{
rte_crypto_cipher_algorithm_strings
[RTE_CRYPTO_CIPHER_AES_F8],
RTE_CRYPTO_CIPHER_AES_F8
},
{
rte_crypto_cipher_algorithm_strings
[RTE_CRYPTO_CIPHER_AES_XTS],
RTE_CRYPTO_CIPHER_AES_XTS
},
{
rte_crypto_cipher_algorithm_strings
[RTE_CRYPTO_CIPHER_ARC4],
RTE_CRYPTO_CIPHER_ARC4
},
{
rte_crypto_cipher_algorithm_strings
[RTE_CRYPTO_CIPHER_NULL],
RTE_CRYPTO_CIPHER_NULL
},
{
rte_crypto_cipher_algorithm_strings
[RTE_CRYPTO_CIPHER_KASUMI_F8],
RTE_CRYPTO_CIPHER_KASUMI_F8
},
{
rte_crypto_cipher_algorithm_strings
[RTE_CRYPTO_CIPHER_SNOW3G_UEA2],
RTE_CRYPTO_CIPHER_SNOW3G_UEA2
},
{
rte_crypto_cipher_algorithm_strings
[RTE_CRYPTO_CIPHER_ZUC_EEA3],
RTE_CRYPTO_CIPHER_ZUC_EEA3
},
};
int id = get_str_key_id_mapping(cipher_algo_namemap,
RTE_DIM(cipher_algo_namemap), arg);
if (id < 0) {
RTE_LOG(ERR, USER1, "Invalid cipher algorithm specified\n");
return -1;
}
opts->cipher_algo = (enum rte_crypto_cipher_algorithm)id;
return 0;
}
static int
parse_cipher_op(struct cperf_options *opts, const char *arg)
{
struct name_id_map cipher_op_namemap[] = {
{
rte_crypto_cipher_operation_strings
[RTE_CRYPTO_CIPHER_OP_ENCRYPT],
RTE_CRYPTO_CIPHER_OP_ENCRYPT },
{
rte_crypto_cipher_operation_strings
[RTE_CRYPTO_CIPHER_OP_DECRYPT],
RTE_CRYPTO_CIPHER_OP_DECRYPT
}
};
int id = get_str_key_id_mapping(cipher_op_namemap,
RTE_DIM(cipher_op_namemap), arg);
if (id < 0) {
RTE_LOG(ERR, USER1, "Invalid cipher operation specified\n");
return -1;
}
opts->cipher_op = (enum rte_crypto_cipher_operation)id;
return 0;
}
static int
parse_cipher_key_sz(struct cperf_options *opts, const char *arg)
{
return parse_uint16_t(&opts->cipher_key_sz, arg);
}
static int
parse_cipher_iv_sz(struct cperf_options *opts, const char *arg)
{
return parse_uint16_t(&opts->cipher_iv_sz, arg);
}
static int
parse_auth_algo(struct cperf_options *opts, const char *arg) {
struct name_id_map cipher_auth_namemap[] = {
{
rte_crypto_auth_algorithm_strings
[RTE_CRYPTO_AUTH_AES_CBC_MAC],
RTE_CRYPTO_AUTH_AES_CBC_MAC
},
{
rte_crypto_auth_algorithm_strings
[RTE_CRYPTO_AUTH_AES_CCM],
RTE_CRYPTO_AUTH_AES_CCM
},
{
rte_crypto_auth_algorithm_strings
[RTE_CRYPTO_AUTH_AES_CMAC],
RTE_CRYPTO_AUTH_AES_CMAC
},
{
rte_crypto_auth_algorithm_strings
[RTE_CRYPTO_AUTH_AES_GCM],
RTE_CRYPTO_AUTH_AES_GCM
},
{
rte_crypto_auth_algorithm_strings
[RTE_CRYPTO_AUTH_AES_GMAC],
RTE_CRYPTO_AUTH_AES_GMAC
},
{
rte_crypto_auth_algorithm_strings
[RTE_CRYPTO_AUTH_AES_XCBC_MAC],
RTE_CRYPTO_AUTH_AES_XCBC_MAC
},
{
rte_crypto_auth_algorithm_strings
[RTE_CRYPTO_AUTH_MD5],
RTE_CRYPTO_AUTH_MD5
},
{
rte_crypto_auth_algorithm_strings
[RTE_CRYPTO_AUTH_MD5_HMAC],
RTE_CRYPTO_AUTH_MD5_HMAC
},
{
rte_crypto_auth_algorithm_strings
[RTE_CRYPTO_AUTH_SHA1],
RTE_CRYPTO_AUTH_SHA1
},
{
rte_crypto_auth_algorithm_strings
[RTE_CRYPTO_AUTH_SHA1_HMAC],
RTE_CRYPTO_AUTH_SHA1_HMAC
},
{
rte_crypto_auth_algorithm_strings
[RTE_CRYPTO_AUTH_SHA224],
RTE_CRYPTO_AUTH_SHA224
},
{
rte_crypto_auth_algorithm_strings
[RTE_CRYPTO_AUTH_SHA224_HMAC],
RTE_CRYPTO_AUTH_SHA224_HMAC
},
{
rte_crypto_auth_algorithm_strings
[RTE_CRYPTO_AUTH_SHA256],
RTE_CRYPTO_AUTH_SHA256
},
{
rte_crypto_auth_algorithm_strings
[RTE_CRYPTO_AUTH_SHA256_HMAC],
RTE_CRYPTO_AUTH_SHA256_HMAC
},
{
rte_crypto_auth_algorithm_strings
[RTE_CRYPTO_AUTH_SHA384],
RTE_CRYPTO_AUTH_SHA384
},
{
rte_crypto_auth_algorithm_strings
[RTE_CRYPTO_AUTH_SHA384_HMAC],
RTE_CRYPTO_AUTH_SHA384_HMAC
},
{
rte_crypto_auth_algorithm_strings
[RTE_CRYPTO_AUTH_SHA512],
RTE_CRYPTO_AUTH_SHA512
},
{
rte_crypto_auth_algorithm_strings
[RTE_CRYPTO_AUTH_SHA512_HMAC],
RTE_CRYPTO_AUTH_SHA512_HMAC
},
{
rte_crypto_auth_algorithm_strings
[RTE_CRYPTO_AUTH_KASUMI_F9],
RTE_CRYPTO_AUTH_KASUMI_F9
},
{
rte_crypto_auth_algorithm_strings
[RTE_CRYPTO_AUTH_SNOW3G_UIA2],
RTE_CRYPTO_AUTH_SNOW3G_UIA2
},
{
rte_crypto_auth_algorithm_strings
[RTE_CRYPTO_AUTH_ZUC_EIA3],
RTE_CRYPTO_AUTH_ZUC_EIA3
},
};
int id = get_str_key_id_mapping(cipher_auth_namemap,
RTE_DIM(cipher_auth_namemap), arg);
if (id < 0) {
RTE_LOG(ERR, USER1, "invalid authentication algorithm specified"
"\n");
return -1;
}
opts->auth_algo = (enum rte_crypto_auth_algorithm)id;
return 0;
}
static int
parse_auth_op(struct cperf_options *opts, const char *arg)
{
struct name_id_map auth_op_namemap[] = {
{
rte_crypto_auth_operation_strings
[RTE_CRYPTO_AUTH_OP_GENERATE],
RTE_CRYPTO_AUTH_OP_GENERATE },
{
rte_crypto_auth_operation_strings
[RTE_CRYPTO_AUTH_OP_VERIFY],
RTE_CRYPTO_AUTH_OP_VERIFY
}
};
int id = get_str_key_id_mapping(auth_op_namemap,
RTE_DIM(auth_op_namemap), arg);
if (id < 0) {
RTE_LOG(ERR, USER1, "invalid authentication operation specified"
"\n");
return -1;
}
opts->auth_op = (enum rte_crypto_auth_operation)id;
return 0;
}
static int
parse_auth_key_sz(struct cperf_options *opts, const char *arg)
{
return parse_uint16_t(&opts->auth_key_sz, arg);
}
static int
parse_auth_digest_sz(struct cperf_options *opts, const char *arg)
{
return parse_uint16_t(&opts->auth_digest_sz, arg);
}
static int
parse_auth_aad_sz(struct cperf_options *opts, const char *arg)
{
return parse_uint16_t(&opts->auth_aad_sz, arg);
}
static int
parse_csv_friendly(struct cperf_options *opts, const char *arg __rte_unused)
{
opts->csv = 1;
opts->silent = 1;
return 0;
}
typedef int (*option_parser_t)(struct cperf_options *opts,
const char *arg);
struct long_opt_parser {
const char *lgopt_name;
option_parser_t parser_fn;
};
static struct option lgopts[] = {
{ CPERF_PTEST_TYPE, required_argument, 0, 0 },
{ CPERF_POOL_SIZE, required_argument, 0, 0 },
{ CPERF_TOTAL_OPS, required_argument, 0, 0 },
{ CPERF_BURST_SIZE, required_argument, 0, 0 },
{ CPERF_BUFFER_SIZE, required_argument, 0, 0 },
{ CPERF_SEGMENTS_NB, required_argument, 0, 0 },
{ CPERF_DEVTYPE, required_argument, 0, 0 },
{ CPERF_OPTYPE, required_argument, 0, 0 },
{ CPERF_SILENT, no_argument, 0, 0 },
{ CPERF_SESSIONLESS, no_argument, 0, 0 },
{ CPERF_OUT_OF_PLACE, no_argument, 0, 0 },
{ CPERF_VERIFY, no_argument, 0, 0 },
{ CPERF_TEST_FILE, required_argument, 0, 0 },
{ CPERF_TEST_NAME, required_argument, 0, 0 },
{ CPERF_CIPHER_ALGO, required_argument, 0, 0 },
{ CPERF_CIPHER_OP, required_argument, 0, 0 },
{ CPERF_CIPHER_KEY_SZ, required_argument, 0, 0 },
{ CPERF_CIPHER_IV_SZ, required_argument, 0, 0 },
{ CPERF_AUTH_ALGO, required_argument, 0, 0 },
{ CPERF_AUTH_OP, required_argument, 0, 0 },
{ CPERF_AUTH_KEY_SZ, required_argument, 0, 0 },
{ CPERF_AUTH_DIGEST_SZ, required_argument, 0, 0 },
{ CPERF_AUTH_AAD_SZ, required_argument, 0, 0 },
{ CPERF_CSV, no_argument, 0, 0},
{ NULL, 0, 0, 0 }
};
void
cperf_options_default(struct cperf_options *opts)
{
opts->test = CPERF_TEST_TYPE_THROUGHPUT;
opts->pool_sz = 8192;
opts->total_ops = 10000000;
opts->burst_sz = 32;
opts->buffer_sz = 64;
opts->segments_nb = 1;
strncpy(opts->device_type, "crypto_aesni_mb",
sizeof(opts->device_type));
opts->op_type = CPERF_CIPHER_THEN_AUTH;
opts->silent = 0;
opts->verify = 0;
opts->test_file = NULL;
opts->test_name = NULL;
opts->sessionless = 0;
opts->out_of_place = 0;
opts->csv = 0;
opts->cipher_algo = RTE_CRYPTO_CIPHER_AES_CBC;
opts->cipher_op = RTE_CRYPTO_CIPHER_OP_ENCRYPT;
opts->cipher_key_sz = 16;
opts->cipher_iv_sz = 16;
opts->auth_algo = RTE_CRYPTO_AUTH_SHA1_HMAC;
opts->auth_op = RTE_CRYPTO_AUTH_OP_GENERATE;
opts->auth_key_sz = 64;
opts->auth_digest_sz = 12;
opts->auth_aad_sz = 0;
}
static int
cperf_opts_parse_long(int opt_idx, struct cperf_options *opts)
{
struct long_opt_parser parsermap[] = {
{ CPERF_PTEST_TYPE, parse_cperf_test_type },
{ CPERF_SILENT, parse_silent },
{ CPERF_POOL_SIZE, parse_pool_sz },
{ CPERF_TOTAL_OPS, parse_total_ops },
{ CPERF_BURST_SIZE, parse_burst_sz },
{ CPERF_BUFFER_SIZE, parse_buffer_sz },
{ CPERF_SEGMENTS_NB, parse_segments_nb },
{ CPERF_DEVTYPE, parse_device_type },
{ CPERF_OPTYPE, parse_op_type },
{ CPERF_SESSIONLESS, parse_sessionless },
{ CPERF_OUT_OF_PLACE, parse_out_of_place },
{ CPERF_VERIFY, parse_verify },
{ CPERF_TEST_FILE, parse_test_file },
{ CPERF_TEST_NAME, parse_test_name },
{ CPERF_CIPHER_ALGO, parse_cipher_algo },
{ CPERF_CIPHER_OP, parse_cipher_op },
{ CPERF_CIPHER_KEY_SZ, parse_cipher_key_sz },
{ CPERF_CIPHER_IV_SZ, parse_cipher_iv_sz },
{ CPERF_AUTH_ALGO, parse_auth_algo },
{ CPERF_AUTH_OP, parse_auth_op },
{ CPERF_AUTH_KEY_SZ, parse_auth_key_sz },
{ CPERF_AUTH_DIGEST_SZ, parse_auth_digest_sz },
{ CPERF_AUTH_AAD_SZ, parse_auth_aad_sz },
{ CPERF_CSV, parse_csv_friendly},
};
unsigned int i;
for (i = 0; i < RTE_DIM(parsermap); i++) {
if (strncmp(lgopts[opt_idx].name, parsermap[i].lgopt_name,
strlen(lgopts[opt_idx].name)) == 0)
return parsermap[i].parser_fn(opts, optarg);
}
return -EINVAL;
}
int
cperf_options_parse(struct cperf_options *options, int argc, char **argv)
{
int opt, retval, opt_idx;
while ((opt = getopt_long(argc, argv, "", lgopts, &opt_idx)) != EOF) {
switch (opt) {
/* long options */
case 0:
retval = cperf_opts_parse_long(opt_idx, options);
if (retval != 0)
return retval;
break;
default:
return -EINVAL;
}
}
return 0;
}
int
cperf_options_check(struct cperf_options *options)
{
if (options->segments_nb > options->buffer_sz) {
RTE_LOG(ERR, USER1,
"Segments number greater than buffer size.\n");
return -EINVAL;
}
if (options->verify && options->test_file == NULL) {
RTE_LOG(ERR, USER1, "Define path to the file with test"
" vectors.\n");
return -EINVAL;
}
if (options->test_name != NULL && options->test_file == NULL) {
RTE_LOG(ERR, USER1, "Define path to the file with test"
" vectors.\n");
return -EINVAL;
}
if (options->auth_op == RTE_CRYPTO_AUTH_OP_VERIFY &&
options->test_file == NULL) {
RTE_LOG(ERR, USER1, "Define path to the file with test"
" vectors.\n");
return -EINVAL;
}
if (options->verify &&
options->total_ops > options->pool_sz) {
RTE_LOG(ERR, USER1, "Total number of ops must be less than or"
" equal to the pool size.\n");
return -EINVAL;
}
if (options->op_type == CPERF_CIPHER_THEN_AUTH) {
if (options->cipher_op != RTE_CRYPTO_CIPHER_OP_ENCRYPT &&
options->auth_op !=
RTE_CRYPTO_AUTH_OP_GENERATE) {
RTE_LOG(ERR, USER1, "Option cipher then auth must use"
" options: encrypt and generate.\n");
return -EINVAL;
}
} else if (options->op_type == CPERF_AUTH_THEN_CIPHER) {
if (options->cipher_op != RTE_CRYPTO_CIPHER_OP_DECRYPT &&
options->auth_op !=
RTE_CRYPTO_AUTH_OP_VERIFY) {
RTE_LOG(ERR, USER1, "Option auth then cipher must use"
" options: decrypt and verify.\n");
return -EINVAL;
}
} else if (options->op_type == CPERF_AEAD) {
if (!(options->cipher_op == RTE_CRYPTO_CIPHER_OP_ENCRYPT &&
options->auth_op ==
RTE_CRYPTO_AUTH_OP_GENERATE) &&
!(options->cipher_op ==
RTE_CRYPTO_CIPHER_OP_DECRYPT &&
options->auth_op ==
RTE_CRYPTO_AUTH_OP_VERIFY)) {
RTE_LOG(ERR, USER1, "Use together options: encrypt and"
" generate or decrypt and verify.\n");
return -EINVAL;
}
}
return 0;
}
void
cperf_options_dump(struct cperf_options *opts)
{
printf("# Crypto Performance Application Options:\n");
printf("#\n");
printf("# cperf test: %s\n", cperf_test_type_strs[opts->test]);
printf("#\n");
printf("# size of crypto op / mbuf pool: %u\n", opts->pool_sz);
printf("# total number of ops: %u\n", opts->total_ops);
printf("# burst size: %u\n", opts->burst_sz);
printf("# buffer size: %u\n", opts->buffer_sz);
printf("# segments per buffer: %u\n", opts->segments_nb);
printf("#\n");
printf("# cryptodev type: %s\n", opts->device_type);
printf("#\n");
printf("# crypto operation: %s\n", cperf_op_type_strs[opts->op_type]);
printf("# verify operation: %s\n", opts->verify ? "yes" : "no");
printf("# sessionless: %s\n", opts->sessionless ? "yes" : "no");
printf("# out of place: %s\n", opts->out_of_place ? "yes" : "no");
printf("#\n");
if (opts->op_type == CPERF_AUTH_ONLY ||
opts->op_type == CPERF_CIPHER_THEN_AUTH ||
opts->op_type == CPERF_AUTH_THEN_CIPHER ||
opts->op_type == CPERF_AEAD) {
printf("# auth algorithm: %s\n",
rte_crypto_auth_algorithm_strings[opts->auth_algo]);
printf("# auth operation: %s\n",
rte_crypto_auth_operation_strings[opts->auth_op]);
printf("# auth key size: %u\n", opts->auth_key_sz);
printf("# auth digest size: %u\n", opts->auth_digest_sz);
printf("# auth aad size: %u\n", opts->auth_aad_sz);
printf("#\n");
}
if (opts->op_type == CPERF_CIPHER_ONLY ||
opts->op_type == CPERF_CIPHER_THEN_AUTH ||
opts->op_type == CPERF_AUTH_THEN_CIPHER ||
opts->op_type == CPERF_AEAD) {
printf("# cipher algorithm: %s\n",
rte_crypto_cipher_algorithm_strings[opts->cipher_algo]);
printf("# cipher operation: %s\n",
rte_crypto_cipher_operation_strings[opts->cipher_op]);
printf("# cipher key size: %u\n", opts->cipher_key_sz);
printf("# cipher iv size: %u\n", opts->cipher_iv_sz);
printf("#\n");
}
}
|
vicharl/containerdns
|
kdns/src/tcp_process.c
|
/*
* tcp+process.c
*/
#define _GNU_SOURCE
#include <pthread.h>
#include <string.h>
#include <errno.h>
#include <unistd.h>
#include <arpa/inet.h>
#include <sys/select.h>
#include <sys/socket.h>
#include <netdb.h>
#include <fcntl.h>
#include <stdio.h>
#include "netdev.h"
#include "util.h"
#include "dns-conf.h"
#include "kdns.h"
#include "forward.h"
#include "view_update.h"
#include "db_update.h"
#include "query.h"
#include "kdns-adap.h"
#include "tcp_process.h"
extern domain_fwd_ctrl g_fwd_ctrl;
rte_rwlock_t tcp_lock;
struct kdns tcp_kdns;
static struct query *tcp_query;
struct netif_queue_stats tcp_stats;
void tcp_statsdata_get(struct netif_queue_stats *sta) {
sta->dns_fwd_rcv_tcp = tcp_stats.dns_fwd_rcv_tcp;
sta->dns_fwd_snd_tcp = tcp_stats.dns_fwd_snd_tcp;
sta->dns_fwd_lost_tcp = tcp_stats.dns_fwd_lost_tcp;
sta->dns_pkts_rcv_tcp = tcp_stats.dns_pkts_rcv_tcp;
sta->dns_pkts_snd_tcp = tcp_stats.dns_pkts_snd_tcp;
}
void tcp_statsdata_reset(void) {
memset(&tcp_stats, 0, sizeof(tcp_stats));
}
int tcp_domian_databd_update(struct domin_info_update *update) {
rte_rwlock_write_lock(&tcp_lock);
int ret = domaindata_update(tcp_kdns.db, update);
rte_rwlock_write_unlock(&tcp_lock);
return ret;
}
static int tcp_process_query(char *snd_buf, ssize_t snd_len, char *rvc_buf, ssize_t rcv_len, dns_addr_t *id_addr, int timeout) {
int sock_fd = socket(AF_INET, SOCK_STREAM, IPPROTO_TCP);
if (sock_fd == -1) {
log_msg(LOG_ERR, "tcp_process_query sock errno=%d, errinfo=%s\n", errno, strerror(errno));
return -1;
}
struct timeval tv = {timeout, 0};
if (setsockopt(sock_fd, SOL_SOCKET, SO_RCVTIMEO, &tv, sizeof(tv)) < 0) {
log_msg(LOG_ERR, "tcp_process_query socket option SO_RCVTIMEO errno=%d, errinfo=%s\n", errno, strerror(errno));
close(sock_fd);
return -1;
}
if (setsockopt(sock_fd, SOL_SOCKET, SO_SNDTIMEO, &tv, sizeof(tv)) < 0) {
log_msg(LOG_ERR, "tcp_process_query socket option SO_SNDTIMEO errno=%d, errinfo=%s\n", errno, strerror(errno));
close(sock_fd);
return -1;
}
int ret = connect(sock_fd, &id_addr->addr, id_addr->addrlen);
if (-1 == ret) {
log_msg(LOG_ERR, "tcp_process_query connect errno=%d, errinfo=%s\n", errno, strerror(errno));
close(sock_fd);
return -1;
}
ret = send(sock_fd, snd_buf, snd_len, 0);
if (ret <= 0) {
log_msg(LOG_ERR, "tcp_process_query send errno=%d, errinfo=%s\n", errno, strerror(errno));
close(sock_fd);
return -1;
}
ret = recv(sock_fd, rvc_buf, rcv_len, 0);
if (ret <= 0) {
log_msg(LOG_ERR, "tcp_process_query recv errno=%d, errinfo=%s\n", errno, strerror(errno));
close(sock_fd);
return -1;
}
close(sock_fd);
return ret;
}
static int tcp_process_forward(int sfd, char *buf, int buf_len, struct sockaddr_in *caddr, uint16_t id, uint16_t qtype, char *domain) {
(void)id;
int i = 0;
int rlen = 0;
int fwd_mode;
int fwd_timeout;
int servers_len;
dns_addr_t server_addrs[FWD_MAX_ADDRS];
char recv_buf[TCP_MAX_MESSAGE_LEN];
pthread_rwlock_rdlock(&__fwd_lock);
fwd_mode = g_fwd_ctrl.mode;
fwd_timeout = g_fwd_ctrl.timeout;
domain_fwd_addrs *fwd_addrs = fwd_addrs_find(domain, &g_fwd_ctrl);
servers_len = fwd_addrs->servers_len;
memcpy(&server_addrs, &fwd_addrs->server_addrs, sizeof(fwd_addrs->server_addrs));
pthread_rwlock_unlock(&__fwd_lock);
tcp_stats.dns_fwd_rcv_tcp++;
if (fwd_mode == FWD_MODE_TYPE_DISABLE) {
tcp_stats.dns_fwd_lost_tcp++;
return 0;
}
for (; i < servers_len; i++) {
rlen = tcp_process_query(buf, buf_len, recv_buf, sizeof(recv_buf), &server_addrs[i], fwd_timeout);
if (rlen > 0) {
break;
}
char ip_src_str[INET_ADDRSTRLEN] = {0};
char ip_dst_str[INET_ADDRSTRLEN] = {0};
inet_ntop(AF_INET, &caddr->sin_addr, ip_src_str, sizeof(ip_src_str));
inet_ntop(AF_INET, &((struct sockaddr_in *)&server_addrs[i].addr)->sin_addr, ip_dst_str, sizeof(ip_dst_str));
log_msg(LOG_ERR, "Failed to send tcp request: %s, type %d, to %s, from: %s, trycnt: %d\n",
domain, qtype, ip_dst_str, ip_src_str, i);
tcp_stats.dns_fwd_lost_tcp++;
}
if (rlen > 0) {
if (send(sfd, recv_buf, rlen, 0) == -1) {
tcp_stats.dns_fwd_lost_tcp++;
log_msg(LOG_ERR, "Failed to send tcp response: %s, type %d, to %s\n", domain, qtype, inet_ntoa(caddr->sin_addr));
return -1;
}
tcp_stats.dns_fwd_snd_tcp++;
}
return 0;
}
static int tcp_recv(int fd, char *buf, int len) {
int bytes_transmitted = 0;
while (bytes_transmitted < len) {
int recv_len = recv(fd, buf + bytes_transmitted, len - bytes_transmitted, 0);
if (recv_len == -1) {
log_msg(LOG_ERR, "call recv len %d error, ret=%d, errno=%d, errinfo=%s\n", len, recv_len, errno, strerror(errno));
return -1;
} else if (recv_len == 0) {
/* EOF */
return 0;
}
bytes_transmitted += recv_len;
}
return bytes_transmitted;
}
static void *thread_tcp_process(void *arg) {
char *ip = (char *)arg;
int sfd, cfd, slen;
socklen_t addr_len;
uint16_t flags_old;
struct sockaddr_in saddr, caddr;
char buf[TCP_MAX_MESSAGE_LEN];
sleep(30);
sfd = socket(AF_INET, SOCK_STREAM, IPPROTO_TCP);
if (sfd < 0) {
log_msg(LOG_ERR, "Failed to create tcp socket, errno=%d, errinfo=%s\n", errno, strerror(errno));
exit(1);
}
bzero(&saddr, sizeof(saddr));
saddr.sin_family = AF_INET;
saddr.sin_addr.s_addr = inet_addr(ip);
saddr.sin_port = htons(53);
if (bind(sfd, (struct sockaddr *)&saddr, sizeof(saddr)) == -1) {
log_msg(LOG_ERR, "Failed to bind tcp, ip %s, errno=%d, errinfo=%s\n", ip, errno, strerror(errno));
exit(1);
}
if (listen(sfd, 100) == -1) {
log_msg(LOG_ERR, "Failed to listen, ip %s, errno=%d, errinfo=%s\n", ip, errno, strerror(errno));
exit(1);
}
log_msg(LOG_INFO, "Accepting tcp querys, form %s...\n", ip);
while (1) {
addr_len = sizeof(struct sockaddr);
cfd = accept(sfd, (struct sockaddr *)&caddr, &addr_len);
if (cfd == -1) {
log_msg(LOG_ERR, "Failed to accept, ip %s, errno=%d, errinfo=%s\n", ip, errno, strerror(errno));
continue;
}
struct timeval tv = {2, 0};
if (setsockopt(cfd, SOL_SOCKET, SO_RCVTIMEO, &tv, sizeof(tv)) < 0) {
log_msg(LOG_ERR, "set socket option SO_RCVTIMEO errno=%d, errinfo=%s\n", errno, strerror(errno));
close(cfd);
continue;
}
if (setsockopt(cfd, SOL_SOCKET, SO_SNDTIMEO, &tv, sizeof(tv)) < 0) {
log_msg(LOG_ERR, "set socket option SO_SNDTIMEO errno=%d, errinfo=%s\n", errno, strerror(errno));
close(cfd);
continue;
}
while (1) {
int bytes_transmitted = 0;
bytes_transmitted = tcp_recv(cfd, buf, 2); //recv query len first
if (bytes_transmitted != 2) {
if (bytes_transmitted < 0) {
log_msg(LOG_ERR, "failed recv len %d from %s\n", 2, inet_ntoa(caddr.sin_addr));
}
close(cfd);
break;
}
/*
* Minimum query size is:
*
* Size of the header (12)
* + Root domain name (1)
* + Query class (2)
* + Query type (2)
*/
uint16_t tcp_query_len = 0;
memcpy(&tcp_query_len, buf, 2);
tcp_query_len = ntohs(tcp_query_len);
if (tcp_query_len < DNS_HEAD_SIZE + 1 + sizeof(uint16_t) + sizeof(uint16_t)) {
log_msg(LOG_ERR, "tcp query from %s packet size %d illegal, drop\n", inet_ntoa(caddr.sin_addr), tcp_query_len);
close(cfd);
break;
}
bytes_transmitted = tcp_recv(cfd, buf + 2, tcp_query_len);
if (bytes_transmitted != tcp_query_len) {
if (bytes_transmitted < 0) {
log_msg(LOG_ERR, "failed recv len %d from %s\n", tcp_query_len, inet_ntoa(caddr.sin_addr));
}
close(cfd);
break;
}
query_reset(tcp_query);
tcp_query->sip = *(uint32_t *)&caddr.sin_addr;
tcp_query->maxMsgLen = sizeof(buf);
tcp_query->packet->data = (uint8_t *)(buf + 2); //skip len
tcp_query->packet->position += 2 + tcp_query_len;
buffer_flip(tcp_query->packet);
memcpy(&flags_old, tcp_query->packet->data + 2, 2);
view_query_master_process(tcp_query);
rte_rwlock_read_lock(&tcp_lock);
if (query_process(tcp_query, &tcp_kdns) != QUERY_FAIL) {
buffer_flip(tcp_query->packet);
}
rte_rwlock_read_unlock(&tcp_lock);
if (GET_RCODE(tcp_query->packet) == RCODE_REFUSE) {
memcpy((buf + 2) + 2, &flags_old, 2);
tcp_process_forward(cfd, buf, tcp_query_len + 2, &caddr, GET_ID(tcp_query->packet), tcp_query->qtype,
(char *)domain_name_to_string(tcp_query->qname, NULL));
continue;
}
tcp_stats.dns_pkts_rcv_tcp++;
slen = buffer_remaining(tcp_query->packet);
if (slen > 0) {
uint16_t len = htons(slen);
memcpy(buf, &len, 2);
if (send(cfd, buf, slen + 2, 0) == -1) {
log_msg(LOG_ERR, "response query %s to %s, send error, errno=%d, errinfo=%s\n",
(char *)domain_name_to_string(tcp_query->qname, NULL), inet_ntoa(caddr.sin_addr), errno, strerror(errno));
}
tcp_stats.dns_pkts_snd_tcp++;
}
}
}
}
int tcp_process_init(void) {
char *ip = g_dns_cfg->netdev.kni_vip;
char *zones = g_dns_cfg->comm.zones;
rte_rwlock_init(&tcp_lock);
kdns_prepare_init(&tcp_kdns, &tcp_query, zones);
pthread_t *thread_id = (pthread_t *)xalloc(sizeof(pthread_t));
pthread_create(thread_id, NULL, thread_tcp_process, (void *)ip);
pthread_setname_np(*thread_id, "kdns_tcp_proc");
return 0;
}
int tcp_zones_reload(char *del_zones, char *add_zones) {
//log_msg(LOG_INFO, "tcp reload zones: del: %s, add: %s.\n", del_zones, add_zones);
rte_rwlock_write_lock(&tcp_lock);
int ret = kdns_zones_realod(&tcp_kdns, del_zones, add_zones);
rte_rwlock_write_unlock(&tcp_lock);
return ret;
}
|
vicharl/containerdns
|
kdns/dpdk-17.02/drivers/net/qede/base/ecore_int_api.h
|
/*
* Copyright (c) 2016 QLogic Corporation.
* All rights reserved.
* www.qlogic.com
*
* See LICENSE.qede_pmd for copyright and licensing details.
*/
#ifndef __ECORE_INT_API_H__
#define __ECORE_INT_API_H__
#ifndef __EXTRACT__LINUX__
#define ECORE_SB_IDX 0x0002
#define RX_PI 0
#define TX_PI(tc) (RX_PI + 1 + tc)
#ifndef ECORE_INT_MODE
#define ECORE_INT_MODE
enum ecore_int_mode {
ECORE_INT_MODE_INTA,
ECORE_INT_MODE_MSIX,
ECORE_INT_MODE_MSI,
ECORE_INT_MODE_POLL,
};
#endif
struct ecore_sb_info {
struct status_block *sb_virt;
dma_addr_t sb_phys;
u32 sb_ack; /* Last given ack */
u16 igu_sb_id;
void OSAL_IOMEM *igu_addr;
u8 flags;
#define ECORE_SB_INFO_INIT 0x1
#define ECORE_SB_INFO_SETUP 0x2
#ifdef ECORE_CONFIG_DIRECT_HWFN
struct ecore_hwfn *p_hwfn;
#endif
struct ecore_dev *p_dev;
};
struct ecore_sb_cnt_info {
int sb_cnt;
int sb_iov_cnt;
int sb_free_blk;
};
static OSAL_INLINE u16 ecore_sb_update_sb_idx(struct ecore_sb_info *sb_info)
{
u32 prod = 0;
u16 rc = 0;
/* barrier(); status block is written to by the chip */
/* FIXME: need some sort of barrier. */
prod = OSAL_LE32_TO_CPU(sb_info->sb_virt->prod_index) &
STATUS_BLOCK_PROD_INDEX_MASK;
if (sb_info->sb_ack != prod) {
sb_info->sb_ack = prod;
rc |= ECORE_SB_IDX;
}
OSAL_MMIOWB(sb_info->p_dev);
return rc;
}
/**
*
* @brief This function creates an update command for interrupts that is
* written to the IGU.
*
* @param sb_info - This is the structure allocated and
* initialized per status block. Assumption is
* that it was initialized using ecore_sb_init
* @param int_cmd - Enable/Disable/Nop
* @param upd_flg - whether igu consumer should be
* updated.
*
* @return OSAL_INLINE void
*/
static OSAL_INLINE void ecore_sb_ack(struct ecore_sb_info *sb_info,
enum igu_int_cmd int_cmd, u8 upd_flg)
{
struct igu_prod_cons_update igu_ack = { 0 };
igu_ack.sb_id_and_flags =
((sb_info->sb_ack << IGU_PROD_CONS_UPDATE_SB_INDEX_SHIFT) |
(upd_flg << IGU_PROD_CONS_UPDATE_UPDATE_FLAG_SHIFT) |
(int_cmd << IGU_PROD_CONS_UPDATE_ENABLE_INT_SHIFT) |
(IGU_SEG_ACCESS_REG << IGU_PROD_CONS_UPDATE_SEGMENT_ACCESS_SHIFT));
#ifdef ECORE_CONFIG_DIRECT_HWFN
DIRECT_REG_WR(sb_info->p_hwfn, sb_info->igu_addr,
igu_ack.sb_id_and_flags);
#else
DIRECT_REG_WR(OSAL_NULL, sb_info->igu_addr, igu_ack.sb_id_and_flags);
#endif
/* Both segments (interrupts & acks) are written to same place address;
* Need to guarantee all commands will be received (in-order) by HW.
*/
OSAL_MMIOWB(sb_info->p_dev);
OSAL_BARRIER(sb_info->p_dev);
}
#ifdef ECORE_CONFIG_DIRECT_HWFN
static OSAL_INLINE void __internal_ram_wr(struct ecore_hwfn *p_hwfn,
void OSAL_IOMEM *addr,
int size, u32 *data)
#else
static OSAL_INLINE void __internal_ram_wr(void *p_hwfn,
void OSAL_IOMEM *addr,
int size, u32 *data)
#endif
{
unsigned int i;
for (i = 0; i < size / sizeof(*data); i++)
DIRECT_REG_WR(p_hwfn, &((u32 OSAL_IOMEM *)addr)[i], data[i]);
}
#ifdef ECORE_CONFIG_DIRECT_HWFN
static OSAL_INLINE void __internal_ram_wr_relaxed(struct ecore_hwfn *p_hwfn,
void OSAL_IOMEM * addr,
int size, u32 *data)
#else
static OSAL_INLINE void __internal_ram_wr_relaxed(void *p_hwfn,
void OSAL_IOMEM * addr,
int size, u32 *data)
#endif
{
unsigned int i;
for (i = 0; i < size / sizeof(*data); i++)
DIRECT_REG_WR_RELAXED(p_hwfn, &((u32 OSAL_IOMEM *)addr)[i],
data[i]);
}
#ifdef ECORE_CONFIG_DIRECT_HWFN
static OSAL_INLINE void internal_ram_wr(struct ecore_hwfn *p_hwfn,
void OSAL_IOMEM * addr,
int size, u32 *data)
{
__internal_ram_wr_relaxed(p_hwfn, addr, size, data);
}
#else
static OSAL_INLINE void internal_ram_wr(void OSAL_IOMEM *addr,
int size, u32 *data)
{
__internal_ram_wr_relaxed(OSAL_NULL, addr, size, data);
}
#endif
#endif
struct ecore_hwfn;
struct ecore_ptt;
enum ecore_coalescing_fsm {
ECORE_COAL_RX_STATE_MACHINE,
ECORE_COAL_TX_STATE_MACHINE
};
/**
* @brief ecore_int_cau_conf_pi - configure cau for a given
* status block
*
* @param p_hwfn
* @param p_ptt
* @param igu_sb_id
* @param pi_index
* @param state
* @param timeset
*/
void ecore_int_cau_conf_pi(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
u16 igu_sb_id,
u32 pi_index,
enum ecore_coalescing_fsm coalescing_fsm,
u8 timeset);
/**
*
* @brief ecore_int_igu_enable_int - enable device interrupts
*
* @param p_hwfn
* @param p_ptt
* @param int_mode - interrupt mode to use
*/
void ecore_int_igu_enable_int(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
enum ecore_int_mode int_mode);
/**
*
* @brief ecore_int_igu_disable_int - disable device interrupts
*
* @param p_hwfn
* @param p_ptt
*/
void ecore_int_igu_disable_int(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt);
/**
*
* @brief ecore_int_igu_read_sisr_reg - Reads the single isr multiple dpc
* register from igu.
*
* @param p_hwfn
*
* @return u64
*/
u64 ecore_int_igu_read_sisr_reg(struct ecore_hwfn *p_hwfn);
#define ECORE_SP_SB_ID 0xffff
/**
* @brief ecore_int_sb_init - Initializes the sb_info structure.
*
* once the structure is initialized it can be passed to sb related functions.
*
* @param p_hwfn
* @param p_ptt
* @param sb_info points to an uninitialized (but
* allocated) sb_info structure
* @param sb_virt_addr
* @param sb_phy_addr
* @param sb_id the sb_id to be used (zero based in driver)
* should use ECORE_SP_SB_ID for SP Status block
*
* @return enum _ecore_status_t
*/
enum _ecore_status_t ecore_int_sb_init(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
struct ecore_sb_info *sb_info,
void *sb_virt_addr,
dma_addr_t sb_phy_addr, u16 sb_id);
/**
* @brief ecore_int_sb_setup - Setup the sb.
*
* @param p_hwfn
* @param p_ptt
* @param sb_info initialized sb_info structure
*/
void ecore_int_sb_setup(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt, struct ecore_sb_info *sb_info);
/**
* @brief ecore_int_sb_release - releases the sb_info structure.
*
* once the structure is released, it's memory can be freed
*
* @param p_hwfn
* @param sb_info points to an allocated sb_info structure
* @param sb_id the sb_id to be used (zero based in driver)
* should never be equal to ECORE_SP_SB_ID
* (SP Status block)
*
* @return enum _ecore_status_t
*/
enum _ecore_status_t ecore_int_sb_release(struct ecore_hwfn *p_hwfn,
struct ecore_sb_info *sb_info,
u16 sb_id);
/**
* @brief ecore_int_sp_dpc - To be called when an interrupt is received on the
* default status block.
*
* @param p_hwfn - pointer to hwfn
*
*/
void ecore_int_sp_dpc(osal_int_ptr_t hwfn_cookie);
/**
* @brief ecore_int_get_num_sbs - get the number of status
* blocks configured for this funciton in the igu.
*
* @param p_hwfn
* @param p_sb_cnt_info
*
* @return
*/
void ecore_int_get_num_sbs(struct ecore_hwfn *p_hwfn,
struct ecore_sb_cnt_info *p_sb_cnt_info);
/**
* @brief ecore_int_disable_post_isr_release - performs the cleanup post ISR
* release. The API need to be called after releasing all slowpath IRQs
* of the device.
*
* @param p_dev
*
*/
void ecore_int_disable_post_isr_release(struct ecore_dev *p_dev);
/**
* @brief ecore_int_attn_clr_enable - sets whether the general behavior is
* preventing attentions from being reasserted, or following the
* attributes of the specific attention.
*
* @param p_dev
* @param clr_enable
*
*/
void ecore_int_attn_clr_enable(struct ecore_dev *p_dev, bool clr_enable);
#endif
|
vicharl/containerdns
|
kdns/core/util.h
|
/*
* util.h -- set of various support routines.
*
* Copyright (c) 2001-2006, NLnet Labs.
*
* Modified Work Copyright (c) 2018 The TIGLabs Authors.
*
*/
#ifndef _UTIL_H_
#define _UTIL_H_
#include <sys/time.h>
#include <stdarg.h>
#include <stdio.h>
#include <time.h>
#include <stdint.h>
#include <sys/socket.h>
#include <net/if.h>
#include <arpa/inet.h>
#include <sys/ioctl.h>
#include <netinet/in.h>
#include <linux/if_ether.h>
struct rr;
struct buffer;
#define LOG_ERR 1
#define LOG_INFO 2
#define ALIGN_UP(n, alignment) \
(((n) + (alignment) - 1) & (~((alignment) - 1)))
#define PADDING(n, alignment) \
(ALIGN_UP((n), (alignment)) - (n))
#define ATTR_FORMAT(archetype, string_index, first_to_check) \
__attribute__ ((format (archetype, string_index, first_to_check)))
void log_open( char *ident);
void log_msg(int priority, const char *format, ...)
ATTR_FORMAT(printf, 2, 3);
int log_file_reload(char *filename);
void *xalloc(size_t size);
void *xalloc_zero(size_t size);
void *xalloc_array_zero(size_t num, size_t size);
void *xrealloc(void *ptr, size_t size);
uint32_t strtoserial(const char *nptr, const char **endptr);
size_t strlcpy(char *dst, const char *src, size_t siz);
/*
* Convert binary data to a string of hexadecimal characters.
*/
ssize_t hex_ntop(uint8_t const *src, size_t srclength, char *target,
size_t targsize);
ssize_t hex_pton(const char* src, uint8_t* target, size_t targsize);
/*
* Convert a single (hexadecimal) digit to its integer value.
*/
int hexdigit_to_int(char ch);
int linux_set_if_mac(const char *ifname, const unsigned char mac[ETH_ALEN]);
#endif /* _UTIL_H_ */
|
vicharl/containerdns
|
kdns/dpdk-17.02/app/test-crypto-perf/cperf_verify_parser.c
|
#include <stdio.h>
#include <rte_malloc.h>
#include "cperf_options.h"
#include "cperf_test_vectors.h"
#include "cperf_verify_parser.h"
int
free_test_vector(struct cperf_test_vector *vector, struct cperf_options *opts)
{
if (vector == NULL || opts == NULL)
return -1;
if (opts->test_file == NULL) {
if (vector->iv.data)
rte_free(vector->iv.data);
if (vector->aad.data)
rte_free(vector->aad.data);
if (vector->digest.data)
rte_free(vector->digest.data);
rte_free(vector);
} else {
if (vector->plaintext.data)
rte_free(vector->plaintext.data);
if (vector->cipher_key.data)
rte_free(vector->cipher_key.data);
if (vector->auth_key.data)
rte_free(vector->auth_key.data);
if (vector->iv.data)
rte_free(vector->iv.data);
if (vector->ciphertext.data)
rte_free(vector->ciphertext.data);
if (vector->aad.data)
rte_free(vector->aad.data);
if (vector->digest.data)
rte_free(vector->digest.data);
rte_free(vector);
}
return 0;
}
/* trim leading and trailing spaces */
static char *
trim(char *str)
{
char *start, *end;
for (start = str; *start; start++) {
if (!isspace((unsigned char) start[0]))
break;
}
for (end = start + strlen(start); end > start + 1; end--) {
if (!isspace((unsigned char) end[-1]))
break;
}
*end = 0;
/* Shift from "start" to the beginning of the string */
if (start > str)
memmove(str, start, (end - start) + 1);
return str;
}
/* tokenization test values separated by a comma */
static int
parse_values(char *tokens, uint8_t **data, uint32_t *data_length)
{
uint8_t n_tokens;
uint32_t data_size = 32;
uint8_t *values;
char *tok, *error = NULL;
tok = strtok(tokens, VALUE_DELIMITER);
if (tok == NULL)
return -1;
values = (uint8_t *) rte_zmalloc(NULL, sizeof(uint8_t) * data_size, 0);
if (values == NULL)
return -1;
n_tokens = 0;
while (tok != NULL) {
uint8_t *values_extended = NULL;
if (n_tokens >= data_size) {
data_size *= 2;
values_extended = (uint8_t *) rte_realloc(values,
sizeof(uint8_t) * data_size, 0);
if (values_extended == NULL) {
rte_free(values);
return -1;
}
values = values_extended;
}
values[n_tokens] = (uint8_t) strtoul(tok, &error, 0);
if ((error == NULL) || (*error != '\0')) {
printf("Failed with convert '%s'\n", tok);
rte_free(values);
return -1;
}
tok = strtok(NULL, VALUE_DELIMITER);
if (tok == NULL)
break;
n_tokens++;
}
uint8_t *resize_values = (uint8_t *) rte_realloc(values,
sizeof(uint8_t) * (n_tokens + 1), 0);
if (resize_values == NULL) {
rte_free(values);
return -1;
}
*data = resize_values;
*data_length = n_tokens + 1;
return 0;
}
/* checks the type of key and assigns data */
static int
parse_entry(char *entry, struct cperf_test_vector *vector)
{
char *token, *key_token;
uint8_t *data = NULL;
int status;
uint32_t data_length;
/* get key */
token = strtok(entry, ENTRY_DELIMITER);
key_token = token;
/* get values for key */
token = strtok(NULL, ENTRY_DELIMITER);
if (token == NULL) {
printf("Expected 'key = values' but was '%.40s'..\n",
key_token);
return -1;
}
status = parse_values(token, &data, &data_length);
if (status)
return -1;
/* compare keys */
if (strstr(key_token, "plaintext")) {
if (vector->plaintext.data)
rte_free(vector->plaintext.data);
vector->plaintext.data = data;
vector->plaintext.length = data_length;
} else if (strstr(key_token, "cipher_key")) {
if (vector->cipher_key.data)
rte_free(vector->cipher_key.data);
vector->cipher_key.data = data;
vector->cipher_key.length = data_length;
} else if (strstr(key_token, "auth_key")) {
if (vector->auth_key.data)
rte_free(vector->auth_key.data);
vector->auth_key.data = data;
vector->auth_key.length = data_length;
} else if (strstr(key_token, "iv")) {
if (vector->iv.data)
rte_free(vector->iv.data);
vector->iv.data = data;
vector->iv.phys_addr = rte_malloc_virt2phy(vector->iv.data);
vector->iv.length = data_length;
} else if (strstr(key_token, "ciphertext")) {
if (vector->ciphertext.data)
rte_free(vector->ciphertext.data);
vector->ciphertext.data = data;
vector->ciphertext.length = data_length;
} else if (strstr(key_token, "aad")) {
if (vector->aad.data)
rte_free(vector->aad.data);
vector->aad.data = data;
vector->aad.phys_addr = rte_malloc_virt2phy(vector->aad.data);
vector->aad.length = data_length;
} else if (strstr(key_token, "digest")) {
if (vector->digest.data)
rte_free(vector->digest.data);
vector->digest.data = data;
vector->digest.phys_addr = rte_malloc_virt2phy(
vector->digest.data);
vector->digest.length = data_length;
} else {
printf("Not valid key: '%s'\n", trim(key_token));
return -1;
}
return 0;
}
/* searches in the file for registry keys and values */
static int
parse_file(struct cperf_test_vector *v_vec, const char *path)
{
FILE *fp;
char *line = NULL, *entry = NULL;
ssize_t read;
size_t len = 0;
int status = 0;
fp = fopen(path, "r");
if (fp == NULL) {
printf("File %s does not exists\n", path);
return -1;
}
while ((read = getline(&line, &len, fp)) != -1) {
/* ignore comments and new lines */
if (line[0] == '#' || line[0] == '/' || line[0] == '\n'
|| line[0] == '\r' || line[0] == ' ')
continue;
trim(line);
/* buffer for multiline */
entry = (char *) rte_realloc(entry,
sizeof(char) * strlen(line) + 1, 0);
if (entry == NULL)
return -1;
memset(entry, 0, strlen(line) + 1);
strncpy(entry, line, strlen(line));
/* check if entry ends with , or = */
if (entry[strlen(entry) - 1] == ','
|| entry[strlen(entry) - 1] == '=') {
while ((read = getline(&line, &len, fp)) != -1) {
trim(line);
/* extend entry about length of new line */
char *entry_extended = (char *) rte_realloc(
entry, sizeof(char)
* (strlen(line) + strlen(entry))
+ 1, 0);
if (entry_extended == NULL)
goto err;
entry = entry_extended;
strncat(entry, line, strlen(line));
if (entry[strlen(entry) - 1] != ',')
break;
}
}
status = parse_entry(entry, v_vec);
if (status) {
printf("An error occurred while parsing!\n");
goto err;
}
}
fclose(fp);
free(line);
rte_free(entry);
return 0;
err:
if (fp)
fclose(fp);
if (line)
free(line);
if (entry)
rte_free(entry);
return -1;
}
struct cperf_test_vector*
cperf_test_vector_get_from_file(struct cperf_options *opts)
{
int status;
struct cperf_test_vector *test_vector = NULL;
if (opts == NULL || opts->test_file == NULL)
return test_vector;
test_vector = (struct cperf_test_vector *) rte_zmalloc(NULL,
sizeof(struct cperf_test_vector), 0);
if (test_vector == NULL)
return test_vector;
/* filling the vector with data from a file */
status = parse_file(test_vector, opts->test_file);
if (status) {
free_test_vector(test_vector, opts);
return NULL;
}
/* other values not included in the file */
test_vector->data.cipher_offset = 0;
test_vector->data.cipher_length = opts->buffer_sz;
test_vector->data.auth_offset = 0;
test_vector->data.auth_length = opts->buffer_sz;
return test_vector;
}
|
vicharl/containerdns
|
kdns/src/rate_limit.h
|
#ifndef _RATE_LIMIT_H_
#define _RATE_LIMIT_H_
typedef enum {
RATE_LIMIT_TYPE_ALL,
RATE_LIMIT_TYPE_FWD,
RATE_LIMIT_TYPE_EXCEEDED_LOG,
RATE_LIMIT_TYPE_MAX,
} rate_limit_type;
int rate_limit(uint32_t sip, rate_limit_type type, unsigned lcore_id);
int rate_limit_init(uint32_t all_per_second, uint32_t fwd_per_second, uint32_t client_num, unsigned lcore_id);
void rate_limit_uninit(unsigned lcore_id);
int rate_limit_reload(uint32_t all_per_second, uint32_t fwd_per_second, uint32_t client_num, unsigned lcore_id);
#endif /* _RATE_LIMIT_H_ */
|
vicharl/containerdns
|
kdns/dpdk-17.02/app/test-crypto-perf/cperf_test_latency.c
|
<reponame>vicharl/containerdns
/*-
* BSD LICENSE
*
* Copyright(c) 2016-2017 Intel Corporation. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <rte_malloc.h>
#include <rte_cycles.h>
#include <rte_crypto.h>
#include <rte_cryptodev.h>
#include "cperf_test_latency.h"
#include "cperf_ops.h"
struct cperf_latency_results {
uint64_t ops_failed;
uint64_t enqd_tot;
uint64_t enqd_max;
uint64_t enqd_min;
uint64_t deqd_tot;
uint64_t deqd_max;
uint64_t deqd_min;
uint64_t cycles_tot;
uint64_t cycles_max;
uint64_t cycles_min;
uint64_t burst_num;
uint64_t num;
};
struct cperf_op_result {
uint64_t tsc_start;
uint64_t tsc_end;
enum rte_crypto_op_status status;
};
struct cperf_latency_ctx {
uint8_t dev_id;
uint16_t qp_id;
uint8_t lcore_id;
struct rte_mempool *pkt_mbuf_pool_in;
struct rte_mempool *pkt_mbuf_pool_out;
struct rte_mbuf **mbufs_in;
struct rte_mbuf **mbufs_out;
struct rte_mempool *crypto_op_pool;
struct rte_cryptodev_sym_session *sess;
cperf_populate_ops_t populate_ops;
cperf_verify_crypto_op_t verify_op_output;
const struct cperf_options *options;
const struct cperf_test_vector *test_vector;
struct cperf_op_result *res;
struct cperf_latency_results results;
};
#define max(a, b) (a > b ? (uint64_t)a : (uint64_t)b)
#define min(a, b) (a < b ? (uint64_t)a : (uint64_t)b)
static void
cperf_latency_test_free(struct cperf_latency_ctx *ctx, uint32_t mbuf_nb)
{
uint32_t i;
if (ctx) {
if (ctx->sess)
rte_cryptodev_sym_session_free(ctx->dev_id, ctx->sess);
if (ctx->mbufs_in) {
for (i = 0; i < mbuf_nb; i++)
rte_pktmbuf_free(ctx->mbufs_in[i]);
rte_free(ctx->mbufs_in);
}
if (ctx->mbufs_out) {
for (i = 0; i < mbuf_nb; i++) {
if (ctx->mbufs_out[i] != NULL)
rte_pktmbuf_free(ctx->mbufs_out[i]);
}
rte_free(ctx->mbufs_out);
}
if (ctx->pkt_mbuf_pool_in)
rte_mempool_free(ctx->pkt_mbuf_pool_in);
if (ctx->pkt_mbuf_pool_out)
rte_mempool_free(ctx->pkt_mbuf_pool_out);
if (ctx->crypto_op_pool)
rte_mempool_free(ctx->crypto_op_pool);
rte_free(ctx->res);
rte_free(ctx);
}
}
static struct rte_mbuf *
cperf_mbuf_create(struct rte_mempool *mempool,
uint32_t segments_nb,
const struct cperf_options *options,
const struct cperf_test_vector *test_vector)
{
struct rte_mbuf *mbuf;
uint32_t segment_sz = options->buffer_sz / segments_nb;
uint32_t last_sz = options->buffer_sz % segments_nb;
uint8_t *mbuf_data;
uint8_t *test_data =
(options->cipher_op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
test_vector->plaintext.data :
test_vector->ciphertext.data;
mbuf = rte_pktmbuf_alloc(mempool);
if (mbuf == NULL)
goto error;
mbuf_data = (uint8_t *)rte_pktmbuf_append(mbuf, segment_sz);
if (mbuf_data == NULL)
goto error;
memcpy(mbuf_data, test_data, segment_sz);
test_data += segment_sz;
segments_nb--;
while (segments_nb) {
struct rte_mbuf *m;
m = rte_pktmbuf_alloc(mempool);
if (m == NULL)
goto error;
rte_pktmbuf_chain(mbuf, m);
mbuf_data = (uint8_t *)rte_pktmbuf_append(mbuf, segment_sz);
if (mbuf_data == NULL)
goto error;
memcpy(mbuf_data, test_data, segment_sz);
test_data += segment_sz;
segments_nb--;
}
if (last_sz) {
mbuf_data = (uint8_t *)rte_pktmbuf_append(mbuf, last_sz);
if (mbuf_data == NULL)
goto error;
memcpy(mbuf_data, test_data, last_sz);
}
mbuf_data = (uint8_t *)rte_pktmbuf_append(mbuf,
options->auth_digest_sz);
if (mbuf_data == NULL)
goto error;
if (options->op_type == CPERF_AEAD) {
uint8_t *aead = (uint8_t *)rte_pktmbuf_prepend(mbuf,
RTE_ALIGN_CEIL(options->auth_aad_sz, 16));
if (aead == NULL)
goto error;
memcpy(aead, test_vector->aad.data, test_vector->aad.length);
}
return mbuf;
error:
if (mbuf != NULL)
rte_pktmbuf_free(mbuf);
return NULL;
}
void *
cperf_latency_test_constructor(uint8_t dev_id, uint16_t qp_id,
const struct cperf_options *options,
const struct cperf_test_vector *test_vector,
const struct cperf_op_fns *op_fns)
{
struct cperf_latency_ctx *ctx = NULL;
unsigned int mbuf_idx = 0;
char pool_name[32] = "";
ctx = rte_malloc(NULL, sizeof(struct cperf_latency_ctx), 0);
if (ctx == NULL)
goto err;
ctx->dev_id = dev_id;
ctx->qp_id = qp_id;
ctx->populate_ops = op_fns->populate_ops;
ctx->options = options;
ctx->test_vector = test_vector;
ctx->sess = op_fns->sess_create(dev_id, options, test_vector);
if (ctx->sess == NULL)
goto err;
snprintf(pool_name, sizeof(pool_name), "cperf_pool_in_cdev_%d",
dev_id);
ctx->pkt_mbuf_pool_in = rte_pktmbuf_pool_create(pool_name,
options->pool_sz * options->segments_nb, 0, 0,
RTE_PKTMBUF_HEADROOM +
RTE_CACHE_LINE_ROUNDUP(
(options->buffer_sz / options->segments_nb) +
(options->buffer_sz % options->segments_nb) +
options->auth_digest_sz),
rte_socket_id());
if (ctx->pkt_mbuf_pool_in == NULL)
goto err;
/* Generate mbufs_in with plaintext populated for test */
if (ctx->options->pool_sz % ctx->options->burst_sz)
goto err;
ctx->mbufs_in = rte_malloc(NULL,
(sizeof(struct rte_mbuf *) *
ctx->options->pool_sz), 0);
for (mbuf_idx = 0; mbuf_idx < options->pool_sz; mbuf_idx++) {
ctx->mbufs_in[mbuf_idx] = cperf_mbuf_create(
ctx->pkt_mbuf_pool_in, options->segments_nb,
options, test_vector);
if (ctx->mbufs_in[mbuf_idx] == NULL)
goto err;
}
if (options->out_of_place == 1) {
snprintf(pool_name, sizeof(pool_name),
"cperf_pool_out_cdev_%d",
dev_id);
ctx->pkt_mbuf_pool_out = rte_pktmbuf_pool_create(
pool_name, options->pool_sz, 0, 0,
RTE_PKTMBUF_HEADROOM +
RTE_CACHE_LINE_ROUNDUP(
options->buffer_sz +
options->auth_digest_sz),
rte_socket_id());
if (ctx->pkt_mbuf_pool_out == NULL)
goto err;
}
ctx->mbufs_out = rte_malloc(NULL,
(sizeof(struct rte_mbuf *) *
ctx->options->pool_sz), 0);
for (mbuf_idx = 0; mbuf_idx < options->pool_sz; mbuf_idx++) {
if (options->out_of_place == 1) {
ctx->mbufs_out[mbuf_idx] = cperf_mbuf_create(
ctx->pkt_mbuf_pool_out, 1,
options, test_vector);
if (ctx->mbufs_out[mbuf_idx] == NULL)
goto err;
} else {
ctx->mbufs_out[mbuf_idx] = NULL;
}
}
snprintf(pool_name, sizeof(pool_name), "cperf_op_pool_cdev_%d",
dev_id);
ctx->crypto_op_pool = rte_crypto_op_pool_create(pool_name,
RTE_CRYPTO_OP_TYPE_SYMMETRIC, options->pool_sz, 0, 0,
rte_socket_id());
if (ctx->crypto_op_pool == NULL)
goto err;
ctx->res = rte_malloc(NULL, sizeof(struct cperf_op_result) *
ctx->options->total_ops, 0);
if (ctx->res == NULL)
goto err;
return ctx;
err:
cperf_latency_test_free(ctx, mbuf_idx);
return NULL;
}
static int
cperf_latency_test_verifier(struct rte_mbuf *mbuf,
const struct cperf_options *options,
const struct cperf_test_vector *vector)
{
const struct rte_mbuf *m;
uint32_t len;
uint16_t nb_segs;
uint8_t *data;
uint32_t cipher_offset, auth_offset;
uint8_t cipher, auth;
int res = 0;
m = mbuf;
nb_segs = m->nb_segs;
len = 0;
while (m && nb_segs != 0) {
len += m->data_len;
m = m->next;
nb_segs--;
}
data = rte_malloc(NULL, len, 0);
if (data == NULL)
return 1;
m = mbuf;
nb_segs = m->nb_segs;
len = 0;
while (m && nb_segs != 0) {
memcpy(data + len, rte_pktmbuf_mtod(m, uint8_t *),
m->data_len);
len += m->data_len;
m = m->next;
nb_segs--;
}
switch (options->op_type) {
case CPERF_CIPHER_ONLY:
cipher = 1;
cipher_offset = 0;
auth = 0;
auth_offset = 0;
break;
case CPERF_CIPHER_THEN_AUTH:
cipher = 1;
cipher_offset = 0;
auth = 1;
auth_offset = vector->plaintext.length;
break;
case CPERF_AUTH_ONLY:
cipher = 0;
cipher_offset = 0;
auth = 1;
auth_offset = vector->plaintext.length;
break;
case CPERF_AUTH_THEN_CIPHER:
cipher = 1;
cipher_offset = 0;
auth = 1;
auth_offset = vector->plaintext.length;
break;
case CPERF_AEAD:
cipher = 1;
cipher_offset = vector->aad.length;
auth = 1;
auth_offset = vector->aad.length + vector->plaintext.length;
break;
}
if (cipher == 1) {
if (options->cipher_op == RTE_CRYPTO_CIPHER_OP_ENCRYPT)
res += memcmp(data + cipher_offset,
vector->ciphertext.data,
vector->ciphertext.length);
else
res += memcmp(data + cipher_offset,
vector->plaintext.data,
vector->plaintext.length);
}
if (auth == 1) {
if (options->auth_op == RTE_CRYPTO_AUTH_OP_GENERATE)
res += memcmp(data + auth_offset,
vector->digest.data,
vector->digest.length);
}
if (res != 0)
res = 1;
return res;
}
int
cperf_latency_test_runner(void *arg)
{
struct cperf_latency_ctx *ctx = arg;
struct cperf_op_result *pres;
if (ctx == NULL)
return 0;
struct rte_crypto_op *ops[ctx->options->burst_sz];
struct rte_crypto_op *ops_processed[ctx->options->burst_sz];
uint64_t ops_enqd = 0, ops_deqd = 0;
uint16_t ops_unused = 0;
uint64_t m_idx = 0, b_idx = 0, i;
uint64_t tsc_val, tsc_end, tsc_start;
uint64_t tsc_max = 0, tsc_min = ~0UL, tsc_tot = 0, tsc_idx = 0;
uint64_t enqd_max = 0, enqd_min = ~0UL, enqd_tot = 0;
uint64_t deqd_max = 0, deqd_min = ~0UL, deqd_tot = 0;
uint32_t lcore = rte_lcore_id();
#ifdef CPERF_LINEARIZATION_ENABLE
struct rte_cryptodev_info dev_info;
int linearize = 0;
/* Check if source mbufs require coalescing */
if (ctx->options->segments_nb > 1) {
rte_cryptodev_info_get(ctx->dev_id, &dev_info);
if ((dev_info.feature_flags &
RTE_CRYPTODEV_FF_MBUF_SCATTER_GATHER) == 0)
linearize = 1;
}
#endif /* CPERF_LINEARIZATION_ENABLE */
ctx->lcore_id = lcore;
/* Warm up the host CPU before starting the test */
for (i = 0; i < ctx->options->total_ops; i++)
rte_cryptodev_enqueue_burst(ctx->dev_id, ctx->qp_id, NULL, 0);
while (enqd_tot < ctx->options->total_ops) {
uint16_t burst_size = ((enqd_tot + ctx->options->burst_sz)
<= ctx->options->total_ops) ?
ctx->options->burst_sz :
ctx->options->total_ops -
enqd_tot;
uint16_t ops_needed = burst_size - ops_unused;
/* Allocate crypto ops from pool */
if (ops_needed != rte_crypto_op_bulk_alloc(
ctx->crypto_op_pool,
RTE_CRYPTO_OP_TYPE_SYMMETRIC,
ops, ops_needed))
return -1;
/* Setup crypto op, attach mbuf etc */
(ctx->populate_ops)(ops, &ctx->mbufs_in[m_idx],
&ctx->mbufs_out[m_idx],
ops_needed, ctx->sess, ctx->options,
ctx->test_vector);
tsc_start = rte_rdtsc_precise();
#ifdef CPERF_LINEARIZATION_ENABLE
if (linearize) {
/* PMD doesn't support scatter-gather and source buffer
* is segmented.
* We need to linearize it before enqueuing.
*/
for (i = 0; i < burst_size; i++)
rte_pktmbuf_linearize(ops[i]->sym->m_src);
}
#endif /* CPERF_LINEARIZATION_ENABLE */
/* Enqueue burst of ops on crypto device */
ops_enqd = rte_cryptodev_enqueue_burst(ctx->dev_id, ctx->qp_id,
ops, burst_size);
/* Dequeue processed burst of ops from crypto device */
ops_deqd = rte_cryptodev_dequeue_burst(ctx->dev_id, ctx->qp_id,
ops_processed, ctx->options->burst_sz);
tsc_end = rte_rdtsc_precise();
for (i = 0; i < ops_needed; i++) {
ctx->res[tsc_idx].tsc_start = tsc_start;
ops[i]->opaque_data = (void *)&ctx->res[tsc_idx];
tsc_idx++;
}
/*
* Calculate number of ops not enqueued (mainly for hw
* accelerators whose ingress queue can fill up).
*/
ops_unused = burst_size - ops_enqd;
if (likely(ops_deqd)) {
/*
* free crypto ops so they can be reused. We don't free
* the mbufs here as we don't want to reuse them as
* the crypto operation will change the data and cause
* failures.
*/
for (i = 0; i < ops_deqd; i++) {
pres = (struct cperf_op_result *)
(ops_processed[i]->opaque_data);
pres->status = ops_processed[i]->status;
pres->tsc_end = tsc_end;
rte_crypto_op_free(ops_processed[i]);
}
deqd_tot += ops_deqd;
deqd_max = max(ops_deqd, deqd_max);
deqd_min = min(ops_deqd, deqd_min);
}
enqd_tot += ops_enqd;
enqd_max = max(ops_enqd, enqd_max);
enqd_min = min(ops_enqd, enqd_min);
m_idx += ops_needed;
m_idx = m_idx + ctx->options->burst_sz > ctx->options->pool_sz ?
0 : m_idx;
b_idx++;
}
/* Dequeue any operations still in the crypto device */
while (deqd_tot < ctx->options->total_ops) {
/* Sending 0 length burst to flush sw crypto device */
rte_cryptodev_enqueue_burst(ctx->dev_id, ctx->qp_id, NULL, 0);
/* dequeue burst */
ops_deqd = rte_cryptodev_dequeue_burst(ctx->dev_id, ctx->qp_id,
ops_processed, ctx->options->burst_sz);
tsc_end = rte_rdtsc_precise();
if (ops_deqd != 0) {
for (i = 0; i < ops_deqd; i++) {
pres = (struct cperf_op_result *)
(ops_processed[i]->opaque_data);
pres->status = ops_processed[i]->status;
pres->tsc_end = tsc_end;
rte_crypto_op_free(ops_processed[i]);
}
deqd_tot += ops_deqd;
deqd_max = max(ops_deqd, deqd_max);
deqd_min = min(ops_deqd, deqd_min);
}
}
for (i = 0; i < tsc_idx; i++) {
tsc_val = ctx->res[i].tsc_end - ctx->res[i].tsc_start;
tsc_max = max(tsc_val, tsc_max);
tsc_min = min(tsc_val, tsc_min);
tsc_tot += tsc_val;
}
if (ctx->options->verify) {
struct rte_mbuf **mbufs;
if (ctx->options->out_of_place == 1)
mbufs = ctx->mbufs_out;
else
mbufs = ctx->mbufs_in;
for (i = 0; i < ctx->options->total_ops; i++) {
if (ctx->res[i].status != RTE_CRYPTO_OP_STATUS_SUCCESS
|| cperf_latency_test_verifier(mbufs[i],
ctx->options,
ctx->test_vector)) {
ctx->results.ops_failed++;
}
}
}
ctx->results.enqd_tot = enqd_tot;
ctx->results.enqd_max = enqd_max;
ctx->results.enqd_min = enqd_min;
ctx->results.deqd_tot = deqd_tot;
ctx->results.deqd_max = deqd_max;
ctx->results.deqd_min = deqd_min;
ctx->results.cycles_tot = tsc_tot;
ctx->results.cycles_max = tsc_max;
ctx->results.cycles_min = tsc_min;
ctx->results.burst_num = b_idx;
ctx->results.num = tsc_idx;
return 0;
}
void
cperf_latency_test_destructor(void *arg)
{
struct cperf_latency_ctx *ctx = arg;
uint64_t i;
if (ctx == NULL)
return;
static int only_once;
uint64_t etot, eavg, emax, emin;
uint64_t dtot, davg, dmax, dmin;
uint64_t ctot, cavg, cmax, cmin;
double ttot, tavg, tmax, tmin;
const uint64_t tunit = 1000000; /* us */
const uint64_t tsc_hz = rte_get_tsc_hz();
etot = ctx->results.enqd_tot;
eavg = ctx->results.enqd_tot / ctx->results.burst_num;
emax = ctx->results.enqd_max;
emin = ctx->results.enqd_min;
dtot = ctx->results.deqd_tot;
davg = ctx->results.deqd_tot / ctx->results.burst_num;
dmax = ctx->results.deqd_max;
dmin = ctx->results.deqd_min;
ctot = ctx->results.cycles_tot;
cavg = ctx->results.cycles_tot / ctx->results.num;
cmax = ctx->results.cycles_max;
cmin = ctx->results.cycles_min;
ttot = tunit*(double)(ctot) / tsc_hz;
tavg = tunit*(double)(cavg) / tsc_hz;
tmax = tunit*(double)(cmax) / tsc_hz;
tmin = tunit*(double)(cmin) / tsc_hz;
if (ctx->options->csv) {
if (!only_once)
printf("\n# lcore, Pakt Seq #, Packet Size, cycles,"
" time (us)");
for (i = 0; i < ctx->options->total_ops; i++) {
printf("\n%u;%"PRIu64";%"PRIu64";%.3f",
ctx->lcore_id, i + 1,
ctx->res[i].tsc_end - ctx->res[i].tsc_start,
tunit * (double) (ctx->res[i].tsc_end
- ctx->res[i].tsc_start)
/ tsc_hz);
}
only_once = 1;
} else {
printf("\n# Device %d on lcore %u\n", ctx->dev_id,
ctx->lcore_id);
printf("\n# total operations: %u", ctx->options->total_ops);
printf("\n# verified failed: %"PRIu64,
ctx->results.ops_failed);
printf("\n# burst number: %"PRIu64,
ctx->results.burst_num);
printf("\n#");
printf("\n# \t Total\t Average\t Maximum\t "
" Minimum");
printf("\n# enqueued\t%12"PRIu64"\t%10"PRIu64"\t%10"PRIu64"\t"
"%10"PRIu64, etot, eavg, emax, emin);
printf("\n# dequeued\t%12"PRIu64"\t%10"PRIu64"\t%10"PRIu64"\t"
"%10"PRIu64, dtot, davg, dmax, dmin);
printf("\n# cycles\t%12"PRIu64"\t%10"PRIu64"\t%10"PRIu64"\t"
"%10"PRIu64, ctot, cavg, cmax, cmin);
printf("\n# time [us]\t%12.0f\t%10.3f\t%10.3f\t%10.3f", ttot,
tavg, tmax, tmin);
printf("\n\n");
}
cperf_latency_test_free(ctx, ctx->options->pool_sz);
}
|
vicharl/containerdns
|
kdns/src/tcp_process.h
|
#ifndef _TCP_PROCESS_H_
#define _TCP_PROCESS_H_
#include <arpa/inet.h>
#include "db_update.h"
void tcp_statsdata_get(struct netif_queue_stats *sta);
void tcp_statsdata_reset(void);
int tcp_process_init(void);
int tcp_domian_databd_update(struct domin_info_update *update);
int tcp_zones_reload(char *del_zones, char *add_zones);
#endif /*_TCP_PROCESS_H_*/
|
vicharl/containerdns
|
kdns/deps/libmicrohttpd/src/microhttpd/response.c
|
/*
This file is part of libmicrohttpd
Copyright (C) 2007, 2009, 2010, 2016, 2017 <NAME> and <NAME>
This library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
This library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with this library; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
/**
* @file response.c
* @brief Methods for managing response objects
* @author <NAME>
* @author <NAME>
* @author Karlson2k (<NAME>)
*/
#define MHD_NO_DEPRECATION 1
#include "mhd_options.h"
#ifdef HAVE_SYS_IOCTL_H
#include <sys/ioctl.h>
#endif /* HAVE_SYS_IOCTL_H */
#if defined(_WIN32) && ! defined(__CYGWIN__)
#include <windows.h>
#endif /* _WIN32 && !__CYGWIN__ */
#include "internal.h"
#include "response.h"
#include "mhd_limits.h"
#include "mhd_sockets.h"
#include "mhd_itc.h"
#include "mhd_str.h"
#include "connection.h"
#include "memorypool.h"
#include "mhd_compat.h"
#if defined(MHD_W32_MUTEX_)
#ifndef WIN32_LEAN_AND_MEAN
#define WIN32_LEAN_AND_MEAN 1
#endif /* !WIN32_LEAN_AND_MEAN */
#include <windows.h>
#endif /* MHD_W32_MUTEX_ */
#if defined(_WIN32)
#include <io.h> /* for lseek(), read() */
#endif /* _WIN32 */
/**
* Add a header or footer line to the response.
*
* @param response response to add a header to
* @param kind header or footer
* @param header the header to add
* @param content value to add
* @return #MHD_NO on error (i.e. invalid header or content format).
*/
static int
add_response_entry (struct MHD_Response *response,
enum MHD_ValueKind kind,
const char *header,
const char *content)
{
struct MHD_HTTP_Header *hdr;
if ( (NULL == response) ||
(NULL == header) ||
(NULL == content) ||
(0 == header[0]) ||
(0 == content[0]) ||
(NULL != strchr (header, '\t')) ||
(NULL != strchr (header, '\r')) ||
(NULL != strchr (header, '\n')) ||
(NULL != strchr (content, '\t')) ||
(NULL != strchr (content, '\r')) ||
(NULL != strchr (content, '\n')) )
return MHD_NO;
if (NULL == (hdr = malloc (sizeof (struct MHD_HTTP_Header))))
return MHD_NO;
if (NULL == (hdr->header = strdup (header)))
{
free (hdr);
return MHD_NO;
}
if (NULL == (hdr->value = strdup (content)))
{
free (hdr->header);
free (hdr);
return MHD_NO;
}
hdr->kind = kind;
hdr->next = response->first_header;
response->first_header = hdr;
return MHD_YES;
}
/**
* Add a header line to the response.
*
* @param response response to add a header to
* @param header the header to add
* @param content value to add
* @return #MHD_NO on error (i.e. invalid header or content format).
* @ingroup response
*/
int
MHD_add_response_header (struct MHD_Response *response,
const char *header,
const char *content)
{
return add_response_entry (response,
MHD_HEADER_KIND,
header,
content);
}
/**
* Add a footer line to the response.
*
* @param response response to remove a header from
* @param footer the footer to delete
* @param content value to delete
* @return #MHD_NO on error (i.e. invalid footer or content format).
* @ingroup response
*/
int
MHD_add_response_footer (struct MHD_Response *response,
const char *footer,
const char *content)
{
return add_response_entry (response,
MHD_FOOTER_KIND,
footer,
content);
}
/**
* Delete a header (or footer) line from the response.
*
* @param response response to remove a header from
* @param header the header to delete
* @param content value to delete
* @return #MHD_NO on error (no such header known)
* @ingroup response
*/
int
MHD_del_response_header (struct MHD_Response *response,
const char *header,
const char *content)
{
struct MHD_HTTP_Header *pos;
struct MHD_HTTP_Header *prev;
if ( (NULL == header) ||
(NULL == content) )
return MHD_NO;
prev = NULL;
pos = response->first_header;
while (NULL != pos)
{
if ((0 == strcmp (header,
pos->header)) &&
(0 == strcmp (content,
pos->value)))
{
free (pos->header);
free (pos->value);
if (NULL == prev)
response->first_header = pos->next;
else
prev->next = pos->next;
free (pos);
return MHD_YES;
}
prev = pos;
pos = pos->next;
}
return MHD_NO;
}
/**
* Get all of the headers (and footers) added to a response.
*
* @param response response to query
* @param iterator callback to call on each header;
* maybe NULL (then just count headers)
* @param iterator_cls extra argument to @a iterator
* @return number of entries iterated over
* @ingroup response
*/
int
MHD_get_response_headers (struct MHD_Response *response,
MHD_KeyValueIterator iterator,
void *iterator_cls)
{
int numHeaders = 0;
struct MHD_HTTP_Header *pos;
for (pos = response->first_header;
NULL != pos;
pos = pos->next)
{
numHeaders++;
if ((NULL != iterator) &&
(MHD_YES != iterator (iterator_cls,
pos->kind,
pos->header,
pos->value)))
break;
}
return numHeaders;
}
/**
* Get a particular header (or footer) from the response.
*
* @param response response to query
* @param key which header to get
* @return NULL if header does not exist
* @ingroup response
*/
const char *
MHD_get_response_header (struct MHD_Response *response,
const char *key)
{
struct MHD_HTTP_Header *pos;
if (NULL == key)
return NULL;
for (pos = response->first_header;
NULL != pos;
pos = pos->next)
{
if ( MHD_str_equal_caseless_ (pos->header, key) )
return pos->value;
}
return NULL;
}
/**
* Check whether response header contains particular token.
*
* Token could be surrounded by spaces and tabs and delimited by comma.
* Case-insensitive match used for header names and tokens.
*
* @param response the response to query
* @param key header name
* @param token the token to find
* @param token_len the length of token, not including optional
* terminating null-character.
* @return true if token is found in specified header,
* false otherwise
*/
bool
MHD_check_response_header_token_ci (const struct MHD_Response *response,
const char *key,
const char *token,
size_t token_len)
{
struct MHD_HTTP_Header *pos;
if ( (NULL == key) ||
('\0' == key[0]) ||
(NULL == token) ||
('\0' == token[0]) )
return false;
for (pos = response->first_header;
NULL != pos;
pos = pos->next)
{
if ( (pos->kind == MHD_HEADER_KIND) &&
MHD_str_equal_caseless_ (pos->header,
key) &&
MHD_str_has_token_caseless_ (pos->value,
token,
token_len) )
return true;
}
return false;
}
/**
* Create a response object. The response object can be extended with
* header information and then be used any number of times.
*
* @param size size of the data portion of the response, #MHD_SIZE_UNKNOWN for unknown
* @param block_size preferred block size for querying crc (advisory only,
* MHD may still call @a crc using smaller chunks); this
* is essentially the buffer size used for IO, clients
* should pick a value that is appropriate for IO and
* memory performance requirements
* @param crc callback to use to obtain response data
* @param crc_cls extra argument to @a crc
* @param crfc callback to call to free @a crc_cls resources
* @return NULL on error (i.e. invalid arguments, out of memory)
* @ingroup response
*/
struct MHD_Response *
MHD_create_response_from_callback (uint64_t size,
size_t block_size,
MHD_ContentReaderCallback crc,
void *crc_cls,
MHD_ContentReaderFreeCallback crfc)
{
struct MHD_Response *response;
if ((NULL == crc) || (0 == block_size))
return NULL;
if (NULL == (response = MHD_calloc_ (1, sizeof (struct MHD_Response) + block_size)))
return NULL;
response->fd = -1;
response->data = (void *) &response[1];
response->data_buffer_size = block_size;
if (! MHD_mutex_init_ (&response->mutex))
{
free (response);
return NULL;
}
response->crc = crc;
response->crfc = crfc;
response->crc_cls = crc_cls;
response->reference_count = 1;
response->total_size = size;
return response;
}
/**
* Set special flags and options for a response.
*
* @param response the response to modify
* @param flags to set for the response
* @param ... #MHD_RO_END terminated list of options
* @return #MHD_YES on success, #MHD_NO on error
*/
int
MHD_set_response_options (struct MHD_Response *response,
enum MHD_ResponseFlags flags,
...)
{
va_list ap;
int ret;
enum MHD_ResponseOptions ro;
ret = MHD_YES;
response->flags = flags;
va_start (ap, flags);
while (MHD_RO_END != (ro = va_arg (ap, enum MHD_ResponseOptions)))
{
switch (ro)
{
default:
ret = MHD_NO;
break;
}
}
va_end (ap);
return ret;
}
/**
* Given a file descriptor, read data from the file
* to generate the response.
*
* @param cls pointer to the response
* @param pos offset in the file to access
* @param buf where to write the data
* @param max number of bytes to write at most
* @return number of bytes written
*/
static ssize_t
file_reader (void *cls,
uint64_t pos,
char *buf,
size_t max)
{
struct MHD_Response *response = cls;
#if !defined(_WIN32) || defined(__CYGWIN__)
ssize_t n;
#else /* _WIN32 && !__CYGWIN__ */
const HANDLE fh = (HANDLE) _get_osfhandle (response->fd);
#endif /* _WIN32 && !__CYGWIN__ */
const int64_t offset64 = (int64_t)(pos + response->fd_off);
if (offset64 < 0)
return MHD_CONTENT_READER_END_WITH_ERROR; /* seek to required position is not possible */
#if !defined(_WIN32) || defined(__CYGWIN__)
if (max > SSIZE_MAX)
max = SSIZE_MAX; /* Clamp to maximum return value. */
#if defined(HAVE_PREAD64)
n = pread64(response->fd, buf, max, offset64);
#elif defined(HAVE_PREAD)
if ( (sizeof(off_t) < sizeof (uint64_t)) &&
(offset64 > (uint64_t)INT32_MAX) )
return MHD_CONTENT_READER_END_WITH_ERROR; /* Read at required position is not possible. */
n = pread(response->fd, buf, max, (off_t) offset64);
#else /* ! HAVE_PREAD */
#if defined(HAVE_LSEEK64)
if (lseek64 (response->fd,
offset64,
SEEK_SET) != offset64)
return MHD_CONTENT_READER_END_WITH_ERROR; /* can't seek to required position */
#else /* ! HAVE_LSEEK64 */
if ( (sizeof(off_t) < sizeof (uint64_t)) &&
(offset64 > (uint64_t)INT32_MAX) )
return MHD_CONTENT_READER_END_WITH_ERROR; /* seek to required position is not possible */
if (lseek (response->fd,
(off_t) offset64,
SEEK_SET) != (off_t) offset64)
return MHD_CONTENT_READER_END_WITH_ERROR; /* can't seek to required position */
#endif /* ! HAVE_LSEEK64 */
n = read (response->fd,
buf,
max);
#endif /* ! HAVE_PREAD */
if (0 == n)
return MHD_CONTENT_READER_END_OF_STREAM;
if (n < 0)
return MHD_CONTENT_READER_END_WITH_ERROR;
return n;
#else /* _WIN32 && !__CYGWIN__ */
if (INVALID_HANDLE_VALUE == fh)
return MHD_CONTENT_READER_END_WITH_ERROR; /* Value of 'response->fd' is not valid. */
else
{
OVERLAPPED f_ol = {0, 0, {{0, 0}}, 0}; /* Initialize to zero. */
ULARGE_INTEGER pos_uli;
DWORD toRead = (max > INT32_MAX) ? INT32_MAX : (DWORD) max;
DWORD resRead;
pos_uli.QuadPart = (uint64_t) offset64; /* Simple transformation 64bit -> 2x32bit. */
f_ol.Offset = pos_uli.LowPart;
f_ol.OffsetHigh = pos_uli.HighPart;
if (! ReadFile(fh, (void*)buf, toRead, &resRead, &f_ol))
return MHD_CONTENT_READER_END_WITH_ERROR; /* Read error. */
if (0 == resRead)
return MHD_CONTENT_READER_END_OF_STREAM;
return (ssize_t) resRead;
}
#endif /* _WIN32 && !__CYGWIN__ */
}
/**
* Destroy file reader context. Closes the file
* descriptor.
*
* @param cls pointer to file descriptor
*/
static void
free_callback (void *cls)
{
struct MHD_Response *response = cls;
(void) close (response->fd);
response->fd = -1;
}
#undef MHD_create_response_from_fd_at_offset
/**
* Create a response object. The response object can be extended with
* header information and then be used any number of times.
*
* @param size size of the data portion of the response
* @param fd file descriptor referring to a file on disk with the
* data; will be closed when response is destroyed;
* fd should be in 'blocking' mode
* @param offset offset to start reading from in the file;
* Be careful! `off_t` may have been compiled to be a
* 64-bit variable for MHD, in which case your application
* also has to be compiled using the same options! Read
* the MHD manual for more details.
* @return NULL on error (i.e. invalid arguments, out of memory)
* @ingroup response
*/
struct MHD_Response *
MHD_create_response_from_fd_at_offset (size_t size,
int fd,
off_t offset)
{
return MHD_create_response_from_fd_at_offset64 (size,
fd,
offset);
}
/**
* Create a response object. The response object can be extended with
* header information and then be used any number of times.
*
* @param size size of the data portion of the response;
* sizes larger than 2 GiB may be not supported by OS or
* MHD build; see ::MHD_FEATURE_LARGE_FILE
* @param fd file descriptor referring to a file on disk with the
* data; will be closed when response is destroyed;
* fd should be in 'blocking' mode
* @param offset offset to start reading from in the file;
* reading file beyond 2 GiB may be not supported by OS or
* MHD build; see ::MHD_FEATURE_LARGE_FILE
* @return NULL on error (i.e. invalid arguments, out of memory)
* @ingroup response
*/
_MHD_EXTERN struct MHD_Response *
MHD_create_response_from_fd_at_offset64 (uint64_t size,
int fd,
uint64_t offset)
{
struct MHD_Response *response;
#if !defined(HAVE___LSEEKI64) && !defined(HAVE_LSEEK64)
if ( (sizeof(uint64_t) > sizeof(off_t)) &&
( (size > (uint64_t)INT32_MAX) ||
(offset > (uint64_t)INT32_MAX) ||
((size + offset) >= (uint64_t)INT32_MAX) ) )
return NULL;
#endif
if ( ((int64_t)size < 0) ||
((int64_t)offset < 0) ||
((int64_t)(size + offset) < 0) )
return NULL;
response = MHD_create_response_from_callback (size,
4 * 1024,
&file_reader,
NULL,
&free_callback);
if (NULL == response)
return NULL;
response->fd = fd;
response->fd_off = offset;
response->crc_cls = response;
return response;
}
/**
* Create a response object. The response object can be extended with
* header information and then be used any number of times.
*
* @param size size of the data portion of the response
* @param fd file descriptor referring to a file on disk with the data
* @return NULL on error (i.e. invalid arguments, out of memory)
* @ingroup response
*/
struct MHD_Response *
MHD_create_response_from_fd (size_t size,
int fd)
{
return MHD_create_response_from_fd_at_offset64 (size,
fd,
0);
}
/**
* Create a response object. The response object can be extended with
* header information and then be used any number of times.
*
* @param size size of the data portion of the response;
* sizes larger than 2 GiB may be not supported by OS or
* MHD build; see ::MHD_FEATURE_LARGE_FILE
* @param fd file descriptor referring to a file on disk with the
* data; will be closed when response is destroyed;
* fd should be in 'blocking' mode
* @return NULL on error (i.e. invalid arguments, out of memory)
* @ingroup response
*/
_MHD_EXTERN struct MHD_Response *
MHD_create_response_from_fd64 (uint64_t size,
int fd)
{
return MHD_create_response_from_fd_at_offset64 (size,
fd,
0);
}
/**
* Create a response object. The response object can be extended with
* header information and then be used any number of times.
*
* @param size size of the @a data portion of the response
* @param data the data itself
* @param must_free libmicrohttpd should free data when done
* @param must_copy libmicrohttpd must make a copy of @a data
* right away, the data maybe released anytime after
* this call returns
* @return NULL on error (i.e. invalid arguments, out of memory)
* @deprecated use #MHD_create_response_from_buffer instead
* @ingroup response
*/
struct MHD_Response *
MHD_create_response_from_data (size_t size,
void *data,
int must_free,
int must_copy)
{
struct MHD_Response *response;
void *tmp;
if ((NULL == data) && (size > 0))
return NULL;
if (NULL == (response = MHD_calloc_ (1, sizeof (struct MHD_Response))))
return NULL;
response->fd = -1;
if (! MHD_mutex_init_ (&response->mutex))
{
free (response);
return NULL;
}
if ((must_copy) && (size > 0))
{
if (NULL == (tmp = malloc (size)))
{
MHD_mutex_destroy_chk_ (&response->mutex);
free (response);
return NULL;
}
memcpy (tmp, data, size);
must_free = MHD_YES;
data = tmp;
}
if (must_free)
{
response->crfc = &free;
response->crc_cls = data;
}
response->reference_count = 1;
response->total_size = size;
response->data = data;
response->data_size = size;
return response;
}
/**
* Create a response object. The response object can be extended with
* header information and then be used any number of times.
*
* @param size size of the data portion of the response
* @param buffer size bytes containing the response's data portion
* @param mode flags for buffer management
* @return NULL on error (i.e. invalid arguments, out of memory)
* @ingroup response
*/
struct MHD_Response *
MHD_create_response_from_buffer (size_t size,
void *buffer,
enum MHD_ResponseMemoryMode mode)
{
return MHD_create_response_from_data (size,
buffer,
mode == MHD_RESPMEM_MUST_FREE,
mode == MHD_RESPMEM_MUST_COPY);
}
#ifdef UPGRADE_SUPPORT
/**
* This connection-specific callback is provided by MHD to
* applications (unusual) during the #MHD_UpgradeHandler.
* It allows applications to perform 'special' actions on
* the underlying socket from the upgrade.
*
* @param urh the handle identifying the connection to perform
* the upgrade @a action on.
* @param action which action should be performed
* @param ... arguments to the action (depends on the action)
* @return #MHD_NO on error, #MHD_YES on success
*/
_MHD_EXTERN int
MHD_upgrade_action (struct MHD_UpgradeResponseHandle *urh,
enum MHD_UpgradeAction action,
...)
{
struct MHD_Connection *connection;
struct MHD_Daemon *daemon;
if (NULL == urh)
return MHD_NO;
connection = urh->connection;
/* Precaution checks on external data. */
if (NULL == connection)
return MHD_NO;
daemon = connection->daemon;
if (NULL == daemon)
return MHD_NO;
switch (action)
{
case MHD_UPGRADE_ACTION_CLOSE:
if (urh->was_closed)
return MHD_NO; /* Already closed. */
/* transition to special 'closed' state for start of cleanup */
#ifdef HTTPS_SUPPORT
if (0 != (daemon->options & MHD_USE_TLS) )
{
/* signal that app is done by shutdown() of 'app' socket */
/* Application will not use anyway this socket after this command. */
shutdown (urh->app.socket,
SHUT_RDWR);
}
#endif /* HTTPS_SUPPORT */
mhd_assert (MHD_CONNECTION_UPGRADE == connection->state);
urh->was_closed = true;
/* As soon as connection will be marked with BOTH
* 'urh->was_closed' AND 'urh->clean_ready', it will
* be moved to cleanup list by MHD_resume_connection(). */
MHD_resume_connection (connection);
return MHD_YES;
default:
/* we don't understand this one */
return MHD_NO;
}
}
/**
* We are done sending the header of a given response to the client.
* Now it is time to perform the upgrade and hand over the connection
* to the application.
* @remark To be called only from thread that process connection's
* recv(), send() and response. Must be called right after sending
* response headers.
*
* @param response the response that was created for an upgrade
* @param connection the specific connection we are upgrading
* @return #MHD_YES on success, #MHD_NO on failure (will cause
* connection to be closed)
*/
int
MHD_response_execute_upgrade_ (struct MHD_Response *response,
struct MHD_Connection *connection)
{
struct MHD_Daemon *daemon = connection->daemon;
struct MHD_UpgradeResponseHandle *urh;
size_t rbo;
if (0 == (daemon->options & MHD_ALLOW_UPGRADE))
return MHD_NO;
if (NULL ==
MHD_get_response_header (response,
MHD_HTTP_HEADER_UPGRADE))
{
#ifdef HAVE_MESSAGES
MHD_DLOG (daemon,
_("Invalid response for upgrade: application failed to set the 'Upgrade' header!\n"));
#endif
return MHD_NO;
}
urh = MHD_calloc_ (1, sizeof (struct MHD_UpgradeResponseHandle));
if (NULL == urh)
return MHD_NO;
urh->connection = connection;
rbo = connection->read_buffer_offset;
connection->read_buffer_offset = 0;
#ifdef HTTPS_SUPPORT
if (0 != (daemon->options & MHD_USE_TLS) )
{
struct MemoryPool *pool;
size_t avail;
char *buf;
MHD_socket sv[2];
#if defined(MHD_socket_nosignal_) || !defined(MHD_socket_pair_nblk_)
int res1;
int res2;
#endif /* MHD_socket_nosignal_ || !MHD_socket_pair_nblk_ */
#ifdef MHD_socket_pair_nblk_
if (! MHD_socket_pair_nblk_ (sv))
{
free (urh);
return MHD_NO;
}
#else /* !MHD_socket_pair_nblk_ */
if (! MHD_socket_pair_ (sv))
{
free (urh);
return MHD_NO;
}
res1 = MHD_socket_nonblocking_(sv[0]);
res2 = MHD_socket_nonblocking_(sv[1]);
if ( (! res1) || (! res2) )
{
#ifdef HAVE_MESSAGES
MHD_DLOG (daemon,
_("Failed to make loopback sockets non-blocking.\n"));
#endif
if (! res2)
{
/* Socketpair cannot be used. */
MHD_socket_close_chk_ (sv[0]);
MHD_socket_close_chk_ (sv[1]);
free (urh);
return MHD_NO;
}
}
#endif /* !MHD_socket_pair_nblk_ */
#ifdef MHD_socket_nosignal_
res1 = MHD_socket_nosignal_(sv[0]);
res2 = MHD_socket_nosignal_(sv[1]);
if ( (! res1) || (! res2) )
{
#ifdef HAVE_MESSAGES
MHD_DLOG (daemon,
_("Failed to set SO_NOSIGPIPE on loopback sockets.\n"));
#endif
#ifndef MSG_NOSIGNAL
if (!res2)
{
/* Socketpair cannot be used. */
MHD_socket_close_chk_ (sv[0]);
MHD_socket_close_chk_ (sv[1]);
free (urh);
return MHD_NO;
}
#endif /* ! MSG_NOSIGNAL */
}
#endif /* MHD_socket_nosignal_ */
if ( (! MHD_SCKT_FD_FITS_FDSET_ (sv[1],
NULL)) &&
(0 == (daemon->options & (MHD_USE_POLL | MHD_USE_EPOLL))) )
{
#ifdef HAVE_MESSAGES
MHD_DLOG (daemon,
_("Socketpair descriptor larger than FD_SETSIZE: %d > %d\n"),
(int) sv[1],
(int) FD_SETSIZE);
#endif
MHD_socket_close_chk_ (sv[0]);
MHD_socket_close_chk_ (sv[1]);
free (urh);
return MHD_NO;
}
urh->app.socket = sv[0];
urh->app.urh = urh;
urh->app.celi = MHD_EPOLL_STATE_UNREADY;
urh->mhd.socket = sv[1];
urh->mhd.urh = urh;
urh->mhd.celi = MHD_EPOLL_STATE_UNREADY;
pool = connection->pool;
avail = MHD_pool_get_free (pool);
if (avail < RESERVE_EBUF_SIZE)
{
/* connection's pool is totally at the limit,
use our 'emergency' buffer of #RESERVE_EBUF_SIZE bytes. */
avail = RESERVE_EBUF_SIZE;
buf = urh->e_buf;
}
else
{
/* Normal case: grab all remaining memory from the
connection's pool for the IO buffers; the connection
certainly won't need it anymore as we've upgraded
to another protocol. */
buf = MHD_pool_allocate (pool,
avail,
MHD_NO);
}
/* use half the buffer for inbound, half for outbound */
urh->in_buffer_size = avail / 2;
urh->out_buffer_size = avail - urh->in_buffer_size;
urh->in_buffer = buf;
urh->out_buffer = &buf[urh->in_buffer_size];
#ifdef EPOLL_SUPPORT
/* Launch IO processing by the event loop */
if (0 != (daemon->options & MHD_USE_EPOLL))
{
/* We're running with epoll(), need to add the sockets
to the event set of the daemon's `epoll_upgrade_fd` */
struct epoll_event event;
mhd_assert (-1 != daemon->epoll_upgrade_fd);
/* First, add network socket */
event.events = EPOLLIN | EPOLLOUT | EPOLLPRI | EPOLLET;
event.data.ptr = &urh->app;
if (0 != epoll_ctl (daemon->epoll_upgrade_fd,
EPOLL_CTL_ADD,
connection->socket_fd,
&event))
{
#ifdef HAVE_MESSAGES
MHD_DLOG (daemon,
_("Call to epoll_ctl failed: %s\n"),
MHD_socket_last_strerr_ ());
#endif
MHD_socket_close_chk_ (sv[0]);
MHD_socket_close_chk_ (sv[1]);
free (urh);
return MHD_NO;
}
/* Second, add our end of the UNIX socketpair() */
event.events = EPOLLIN | EPOLLOUT | EPOLLPRI | EPOLLET;
event.data.ptr = &urh->mhd;
if (0 != epoll_ctl (daemon->epoll_upgrade_fd,
EPOLL_CTL_ADD,
urh->mhd.socket,
&event))
{
event.events = EPOLLIN | EPOLLOUT | EPOLLPRI;
event.data.ptr = &urh->app;
if (0 != epoll_ctl (daemon->epoll_upgrade_fd,
EPOLL_CTL_DEL,
connection->socket_fd,
&event))
MHD_PANIC (_("Error cleaning up while handling epoll error"));
#ifdef HAVE_MESSAGES
MHD_DLOG (daemon,
_("Call to epoll_ctl failed: %s\n"),
MHD_socket_last_strerr_ ());
#endif
MHD_socket_close_chk_ (sv[0]);
MHD_socket_close_chk_ (sv[1]);
free (urh);
return MHD_NO;
}
EDLL_insert (daemon->eready_urh_head,
daemon->eready_urh_tail,
urh);
urh->in_eready_list = true;
}
#endif /* EPOLL_SUPPORT */
if (0 == (daemon->options & MHD_USE_THREAD_PER_CONNECTION) )
{
/* This takes care of further processing for most event loops:
simply add to DLL for bi-direcitonal processing */
DLL_insert (daemon->urh_head,
daemon->urh_tail,
urh);
}
/* In thread-per-connection mode, thread will switch to forwarding once
* connection.urh is not NULL and connection.state == MHD_CONNECTION_UPGRADE.
*/
}
else
{
urh->app.socket = MHD_INVALID_SOCKET;
urh->mhd.socket = MHD_INVALID_SOCKET;
/* Non-TLS connection do not hold any additional resources. */
urh->clean_ready = true;
}
#else /* ! HTTPS_SUPPORT */
urh->clean_ready = true;
#endif /* ! HTTPS_SUPPORT */
connection->urh = urh;
/* As far as MHD's event loops are concerned, this connection is
suspended; it will be resumed once application is done by the
#MHD_upgrade_action() function */
internal_suspend_connection_ (connection);
/* hand over socket to application */
response->upgrade_handler (response->upgrade_handler_cls,
connection,
connection->client_context,
connection->read_buffer,
rbo,
#ifdef HTTPS_SUPPORT
(0 == (daemon->options & MHD_USE_TLS) ) ?
connection->socket_fd : urh->app.socket,
#else /* ! HTTPS_SUPPORT */
connection->socket_fd,
#endif /* ! HTTPS_SUPPORT */
urh);
return MHD_YES;
}
/**
* Create a response object that can be used for 101 UPGRADE
* responses, for example to implement WebSockets. After sending the
* response, control over the data stream is given to the callback (which
* can then, for example, start some bi-directional communication).
* If the response is queued for multiple connections, the callback
* will be called for each connection. The callback
* will ONLY be called after the response header was successfully passed
* to the OS; if there are communication errors before, the usual MHD
* connection error handling code will be performed.
*
* Setting the correct HTTP code (i.e. MHD_HTTP_SWITCHING_PROTOCOLS)
* and setting correct HTTP headers for the upgrade must be done
* manually (this way, it is possible to implement most existing
* WebSocket versions using this API; in fact, this API might be useful
* for any protocol switch, not just WebSockets). Note that
* draft-ietf-hybi-thewebsocketprotocol-00 cannot be implemented this
* way as the header "HTTP/1.1 101 WebSocket Protocol Handshake"
* cannot be generated; instead, MHD will always produce "HTTP/1.1 101
* Switching Protocols" (if the response code 101 is used).
*
* As usual, the response object can be extended with header
* information and then be used any number of times (as long as the
* header information is not connection-specific).
*
* @param upgrade_handler function to call with the 'upgraded' socket
* @param upgrade_handler_cls closure for @a upgrade_handler
* @return NULL on error (i.e. invalid arguments, out of memory)
*/
_MHD_EXTERN struct MHD_Response *
MHD_create_response_for_upgrade (MHD_UpgradeHandler upgrade_handler,
void *upgrade_handler_cls)
{
struct MHD_Response *response;
if (NULL == upgrade_handler)
return NULL; /* invalid request */
response = MHD_calloc_ (1, sizeof (struct MHD_Response));
if (NULL == response)
return NULL;
if (! MHD_mutex_init_ (&response->mutex))
{
free (response);
return NULL;
}
response->upgrade_handler = upgrade_handler;
response->upgrade_handler_cls = upgrade_handler_cls;
response->total_size = MHD_SIZE_UNKNOWN;
response->reference_count = 1;
if (MHD_NO ==
MHD_add_response_header (response,
MHD_HTTP_HEADER_CONNECTION,
"Upgrade"))
{
MHD_destroy_response (response);
return NULL;
}
return response;
}
#endif /* UPGRADE_SUPPORT */
/**
* Destroy a response object and associated resources. Note that
* libmicrohttpd may keep some of the resources around if the response
* is still in the queue for some clients, so the memory may not
* necessarily be freed immediately.
*
* @param response response to destroy
* @ingroup response
*/
void
MHD_destroy_response (struct MHD_Response *response)
{
struct MHD_HTTP_Header *pos;
if (NULL == response)
return;
MHD_mutex_lock_chk_ (&response->mutex);
if (0 != --(response->reference_count))
{
MHD_mutex_unlock_chk_ (&response->mutex);
return;
}
MHD_mutex_unlock_chk_ (&response->mutex);
MHD_mutex_destroy_chk_ (&response->mutex);
if (NULL != response->crfc)
response->crfc (response->crc_cls);
while (NULL != response->first_header)
{
pos = response->first_header;
response->first_header = pos->next;
free (pos->header);
free (pos->value);
free (pos);
}
free (response);
}
/**
* Increments the reference counter for the @a response.
*
* @param response object to modify
*/
void
MHD_increment_response_rc (struct MHD_Response *response)
{
MHD_mutex_lock_chk_ (&response->mutex);
(response->reference_count)++;
MHD_mutex_unlock_chk_ (&response->mutex);
}
/* end of response.c */
|
vicharl/containerdns
|
kdns/dpdk-17.02/drivers/net/sfc/sfc_port.c
|
<filename>kdns/dpdk-17.02/drivers/net/sfc/sfc_port.c
/*-
* Copyright (c) 2016 Solarflare Communications Inc.
* All rights reserved.
*
* This software was jointly developed between OKTET Labs (under contract
* for Solarflare) and Solarflare Communications, Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
* THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
* OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
* OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
* EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "efx.h"
#include "sfc.h"
#include "sfc_log.h"
/**
* Update MAC statistics in the buffer.
*
* @param sa Adapter
*
* @return Status code
* @retval 0 Success
* @retval EAGAIN Try again
* @retval ENOMEM Memory allocation failure
*/
int
sfc_port_update_mac_stats(struct sfc_adapter *sa)
{
struct sfc_port *port = &sa->port;
int rc;
SFC_ASSERT(rte_spinlock_is_locked(&port->mac_stats_lock));
if (sa->state != SFC_ADAPTER_STARTED)
return EINVAL;
rc = efx_mac_stats_update(sa->nic, &port->mac_stats_dma_mem,
port->mac_stats_buf, NULL);
if (rc != 0)
return rc;
return 0;
}
static int
sfc_port_init_dev_link(struct sfc_adapter *sa)
{
struct rte_eth_link *dev_link = &sa->eth_dev->data->dev_link;
int rc;
efx_link_mode_t link_mode;
struct rte_eth_link current_link;
rc = efx_port_poll(sa->nic, &link_mode);
if (rc != 0)
return rc;
sfc_port_link_mode_to_info(link_mode, ¤t_link);
EFX_STATIC_ASSERT(sizeof(*dev_link) == sizeof(rte_atomic64_t));
rte_atomic64_set((rte_atomic64_t *)dev_link,
*(uint64_t *)¤t_link);
return 0;
}
int
sfc_port_start(struct sfc_adapter *sa)
{
struct sfc_port *port = &sa->port;
int rc;
uint32_t phy_adv_cap;
const uint32_t phy_pause_caps =
((1u << EFX_PHY_CAP_PAUSE) | (1u << EFX_PHY_CAP_ASYM));
sfc_log_init(sa, "entry");
sfc_log_init(sa, "init filters");
rc = efx_filter_init(sa->nic);
if (rc != 0)
goto fail_filter_init;
sfc_log_init(sa, "init port");
rc = efx_port_init(sa->nic);
if (rc != 0)
goto fail_port_init;
sfc_log_init(sa, "set flow control to %#x autoneg=%u",
port->flow_ctrl, port->flow_ctrl_autoneg);
rc = efx_mac_fcntl_set(sa->nic, port->flow_ctrl,
port->flow_ctrl_autoneg);
if (rc != 0)
goto fail_mac_fcntl_set;
/* Preserve pause capabilities set by above efx_mac_fcntl_set() */
efx_phy_adv_cap_get(sa->nic, EFX_PHY_CAP_CURRENT, &phy_adv_cap);
SFC_ASSERT((port->phy_adv_cap & phy_pause_caps) == 0);
phy_adv_cap = port->phy_adv_cap | (phy_adv_cap & phy_pause_caps);
sfc_log_init(sa, "set phy adv caps to %#x", phy_adv_cap);
rc = efx_phy_adv_cap_set(sa->nic, phy_adv_cap);
if (rc != 0)
goto fail_phy_adv_cap_set;
sfc_log_init(sa, "set MAC PDU %u", (unsigned int)port->pdu);
rc = efx_mac_pdu_set(sa->nic, port->pdu);
if (rc != 0)
goto fail_mac_pdu_set;
sfc_log_init(sa, "set MAC address");
rc = efx_mac_addr_set(sa->nic,
sa->eth_dev->data->mac_addrs[0].addr_bytes);
if (rc != 0)
goto fail_mac_addr_set;
sfc_log_init(sa, "set MAC filters");
port->promisc = (sa->eth_dev->data->promiscuous != 0) ?
B_TRUE : B_FALSE;
port->allmulti = (sa->eth_dev->data->all_multicast != 0) ?
B_TRUE : B_FALSE;
rc = sfc_set_rx_mode(sa);
if (rc != 0)
goto fail_mac_filter_set;
efx_mac_stats_get_mask(sa->nic, port->mac_stats_mask,
sizeof(port->mac_stats_mask));
/* Update MAC stats using periodic DMA.
* Common code always uses 1000ms update period, so period_ms
* parameter only needs to be non-zero to start updates.
*/
sfc_log_init(sa, "request MAC stats DMA'ing");
rc = efx_mac_stats_periodic(sa->nic, &port->mac_stats_dma_mem,
1000, B_FALSE);
if (rc != 0)
goto fail_mac_stats_periodic;
sfc_log_init(sa, "disable MAC drain");
rc = efx_mac_drain(sa->nic, B_FALSE);
if (rc != 0)
goto fail_mac_drain;
/* Synchronize link status knowledge */
rc = sfc_port_init_dev_link(sa);
if (rc != 0)
goto fail_port_init_dev_link;
sfc_log_init(sa, "done");
return 0;
fail_port_init_dev_link:
(void)efx_mac_drain(sa->nic, B_TRUE);
fail_mac_drain:
(void)efx_mac_stats_periodic(sa->nic, &port->mac_stats_dma_mem,
0, B_FALSE);
fail_mac_stats_periodic:
fail_mac_filter_set:
fail_mac_addr_set:
fail_mac_pdu_set:
fail_phy_adv_cap_set:
fail_mac_fcntl_set:
efx_port_fini(sa->nic);
fail_port_init:
efx_filter_fini(sa->nic);
fail_filter_init:
sfc_log_init(sa, "failed %d", rc);
return rc;
}
void
sfc_port_stop(struct sfc_adapter *sa)
{
sfc_log_init(sa, "entry");
efx_mac_drain(sa->nic, B_TRUE);
(void)efx_mac_stats_periodic(sa->nic, &sa->port.mac_stats_dma_mem,
0, B_FALSE);
efx_port_fini(sa->nic);
efx_filter_fini(sa->nic);
sfc_log_init(sa, "done");
}
int
sfc_port_init(struct sfc_adapter *sa)
{
const struct rte_eth_dev_data *dev_data = sa->eth_dev->data;
struct sfc_port *port = &sa->port;
int rc;
sfc_log_init(sa, "entry");
/* Enable flow control by default */
port->flow_ctrl = EFX_FCNTL_RESPOND | EFX_FCNTL_GENERATE;
port->flow_ctrl_autoneg = B_TRUE;
if (dev_data->dev_conf.rxmode.jumbo_frame)
port->pdu = dev_data->dev_conf.rxmode.max_rx_pkt_len;
else
port->pdu = EFX_MAC_PDU(dev_data->mtu);
rte_spinlock_init(&port->mac_stats_lock);
rc = ENOMEM;
port->mac_stats_buf = rte_calloc_socket("mac_stats_buf", EFX_MAC_NSTATS,
sizeof(uint64_t), 0,
sa->socket_id);
if (port->mac_stats_buf == NULL)
goto fail_mac_stats_buf_alloc;
rc = sfc_dma_alloc(sa, "mac_stats", 0, EFX_MAC_STATS_SIZE,
sa->socket_id, &port->mac_stats_dma_mem);
if (rc != 0)
goto fail_mac_stats_dma_alloc;
sfc_log_init(sa, "done");
return 0;
fail_mac_stats_dma_alloc:
rte_free(port->mac_stats_buf);
fail_mac_stats_buf_alloc:
sfc_log_init(sa, "failed %d", rc);
return rc;
}
void
sfc_port_fini(struct sfc_adapter *sa)
{
struct sfc_port *port = &sa->port;
sfc_log_init(sa, "entry");
sfc_dma_free(sa, &port->mac_stats_dma_mem);
rte_free(port->mac_stats_buf);
sfc_log_init(sa, "done");
}
int
sfc_set_rx_mode(struct sfc_adapter *sa)
{
struct sfc_port *port = &sa->port;
int rc;
rc = efx_mac_filter_set(sa->nic, port->promisc, B_TRUE,
port->promisc || port->allmulti, B_TRUE);
return rc;
}
void
sfc_port_link_mode_to_info(efx_link_mode_t link_mode,
struct rte_eth_link *link_info)
{
SFC_ASSERT(link_mode < EFX_LINK_NMODES);
memset(link_info, 0, sizeof(*link_info));
if ((link_mode == EFX_LINK_DOWN) || (link_mode == EFX_LINK_UNKNOWN))
link_info->link_status = ETH_LINK_DOWN;
else
link_info->link_status = ETH_LINK_UP;
switch (link_mode) {
case EFX_LINK_10HDX:
link_info->link_speed = ETH_SPEED_NUM_10M;
link_info->link_duplex = ETH_LINK_HALF_DUPLEX;
break;
case EFX_LINK_10FDX:
link_info->link_speed = ETH_SPEED_NUM_10M;
link_info->link_duplex = ETH_LINK_FULL_DUPLEX;
break;
case EFX_LINK_100HDX:
link_info->link_speed = ETH_SPEED_NUM_100M;
link_info->link_duplex = ETH_LINK_HALF_DUPLEX;
break;
case EFX_LINK_100FDX:
link_info->link_speed = ETH_SPEED_NUM_100M;
link_info->link_duplex = ETH_LINK_FULL_DUPLEX;
break;
case EFX_LINK_1000HDX:
link_info->link_speed = ETH_SPEED_NUM_1G;
link_info->link_duplex = ETH_LINK_HALF_DUPLEX;
break;
case EFX_LINK_1000FDX:
link_info->link_speed = ETH_SPEED_NUM_1G;
link_info->link_duplex = ETH_LINK_FULL_DUPLEX;
break;
case EFX_LINK_10000FDX:
link_info->link_speed = ETH_SPEED_NUM_10G;
link_info->link_duplex = ETH_LINK_FULL_DUPLEX;
break;
case EFX_LINK_40000FDX:
link_info->link_speed = ETH_SPEED_NUM_40G;
link_info->link_duplex = ETH_LINK_FULL_DUPLEX;
break;
default:
SFC_ASSERT(B_FALSE);
/* FALLTHROUGH */
case EFX_LINK_UNKNOWN:
case EFX_LINK_DOWN:
link_info->link_speed = ETH_SPEED_NUM_NONE;
link_info->link_duplex = 0;
break;
}
link_info->link_autoneg = ETH_LINK_AUTONEG;
}
|
vicharl/containerdns
|
kdns/dpdk-17.02/examples/distributor/main.c
|
<reponame>vicharl/containerdns<gh_stars>100-1000
/*-
* BSD LICENSE
*
* Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <stdint.h>
#include <inttypes.h>
#include <unistd.h>
#include <signal.h>
#include <getopt.h>
#include <rte_eal.h>
#include <rte_ethdev.h>
#include <rte_cycles.h>
#include <rte_malloc.h>
#include <rte_debug.h>
#include <rte_prefetch.h>
#include <rte_distributor.h>
#define RX_RING_SIZE 256
#define TX_RING_SIZE 512
#define NUM_MBUFS ((64*1024)-1)
#define MBUF_CACHE_SIZE 250
#define BURST_SIZE 32
#define RTE_RING_SZ 1024
#define RTE_LOGTYPE_DISTRAPP RTE_LOGTYPE_USER1
/* mask of enabled ports */
static uint32_t enabled_port_mask;
volatile uint8_t quit_signal;
volatile uint8_t quit_signal_rx;
static volatile struct app_stats {
struct {
uint64_t rx_pkts;
uint64_t returned_pkts;
uint64_t enqueued_pkts;
} rx __rte_cache_aligned;
struct {
uint64_t dequeue_pkts;
uint64_t tx_pkts;
} tx __rte_cache_aligned;
} app_stats;
static const struct rte_eth_conf port_conf_default = {
.rxmode = {
.mq_mode = ETH_MQ_RX_RSS,
.max_rx_pkt_len = ETHER_MAX_LEN,
},
.txmode = {
.mq_mode = ETH_MQ_TX_NONE,
},
.rx_adv_conf = {
.rss_conf = {
.rss_hf = ETH_RSS_IP | ETH_RSS_UDP |
ETH_RSS_TCP | ETH_RSS_SCTP,
}
},
};
struct output_buffer {
unsigned count;
struct rte_mbuf *mbufs[BURST_SIZE];
};
/*
* Initialises a given port using global settings and with the rx buffers
* coming from the mbuf_pool passed as parameter
*/
static inline int
port_init(uint8_t port, struct rte_mempool *mbuf_pool)
{
struct rte_eth_conf port_conf = port_conf_default;
const uint16_t rxRings = 1, txRings = rte_lcore_count() - 1;
int retval;
uint16_t q;
if (port >= rte_eth_dev_count())
return -1;
retval = rte_eth_dev_configure(port, rxRings, txRings, &port_conf);
if (retval != 0)
return retval;
for (q = 0; q < rxRings; q++) {
retval = rte_eth_rx_queue_setup(port, q, RX_RING_SIZE,
rte_eth_dev_socket_id(port),
NULL, mbuf_pool);
if (retval < 0)
return retval;
}
for (q = 0; q < txRings; q++) {
retval = rte_eth_tx_queue_setup(port, q, TX_RING_SIZE,
rte_eth_dev_socket_id(port),
NULL);
if (retval < 0)
return retval;
}
retval = rte_eth_dev_start(port);
if (retval < 0)
return retval;
struct rte_eth_link link;
rte_eth_link_get_nowait(port, &link);
if (!link.link_status) {
sleep(1);
rte_eth_link_get_nowait(port, &link);
}
if (!link.link_status) {
printf("Link down on port %"PRIu8"\n", port);
return 0;
}
struct ether_addr addr;
rte_eth_macaddr_get(port, &addr);
printf("Port %u MAC: %02"PRIx8" %02"PRIx8" %02"PRIx8
" %02"PRIx8" %02"PRIx8" %02"PRIx8"\n",
(unsigned)port,
addr.addr_bytes[0], addr.addr_bytes[1],
addr.addr_bytes[2], addr.addr_bytes[3],
addr.addr_bytes[4], addr.addr_bytes[5]);
rte_eth_promiscuous_enable(port);
return 0;
}
struct lcore_params {
unsigned worker_id;
struct rte_distributor *d;
struct rte_ring *r;
struct rte_mempool *mem_pool;
};
static int
quit_workers(struct rte_distributor *d, struct rte_mempool *p)
{
const unsigned num_workers = rte_lcore_count() - 2;
unsigned i;
struct rte_mbuf *bufs[num_workers];
if (rte_mempool_get_bulk(p, (void *)bufs, num_workers) != 0) {
printf("line %d: Error getting mbufs from pool\n", __LINE__);
return -1;
}
for (i = 0; i < num_workers; i++)
bufs[i]->hash.rss = i << 1;
rte_distributor_process(d, bufs, num_workers);
rte_mempool_put_bulk(p, (void *)bufs, num_workers);
return 0;
}
static int
lcore_rx(struct lcore_params *p)
{
struct rte_distributor *d = p->d;
struct rte_mempool *mem_pool = p->mem_pool;
struct rte_ring *r = p->r;
const uint8_t nb_ports = rte_eth_dev_count();
const int socket_id = rte_socket_id();
uint8_t port;
for (port = 0; port < nb_ports; port++) {
/* skip ports that are not enabled */
if ((enabled_port_mask & (1 << port)) == 0)
continue;
if (rte_eth_dev_socket_id(port) > 0 &&
rte_eth_dev_socket_id(port) != socket_id)
printf("WARNING, port %u is on remote NUMA node to "
"RX thread.\n\tPerformance will not "
"be optimal.\n", port);
}
printf("\nCore %u doing packet RX.\n", rte_lcore_id());
port = 0;
while (!quit_signal_rx) {
/* skip ports that are not enabled */
if ((enabled_port_mask & (1 << port)) == 0) {
if (++port == nb_ports)
port = 0;
continue;
}
struct rte_mbuf *bufs[BURST_SIZE*2];
const uint16_t nb_rx = rte_eth_rx_burst(port, 0, bufs,
BURST_SIZE);
if (unlikely(nb_rx == 0)) {
if (++port == nb_ports)
port = 0;
continue;
}
app_stats.rx.rx_pkts += nb_rx;
rte_distributor_process(d, bufs, nb_rx);
const uint16_t nb_ret = rte_distributor_returned_pkts(d,
bufs, BURST_SIZE*2);
app_stats.rx.returned_pkts += nb_ret;
if (unlikely(nb_ret == 0)) {
if (++port == nb_ports)
port = 0;
continue;
}
uint16_t sent = rte_ring_enqueue_burst(r, (void *)bufs, nb_ret);
app_stats.rx.enqueued_pkts += sent;
if (unlikely(sent < nb_ret)) {
RTE_LOG_DP(DEBUG, DISTRAPP,
"%s:Packet loss due to full ring\n", __func__);
while (sent < nb_ret)
rte_pktmbuf_free(bufs[sent++]);
}
if (++port == nb_ports)
port = 0;
}
rte_distributor_process(d, NULL, 0);
/* flush distributor to bring to known state */
rte_distributor_flush(d);
/* set worker & tx threads quit flag */
quit_signal = 1;
/*
* worker threads may hang in get packet as
* distributor process is not running, just make sure workers
* get packets till quit_signal is actually been
* received and they gracefully shutdown
*/
if (quit_workers(d, mem_pool) != 0)
return -1;
/* rx thread should quit at last */
return 0;
}
static inline void
flush_one_port(struct output_buffer *outbuf, uint8_t outp)
{
unsigned nb_tx = rte_eth_tx_burst(outp, 0, outbuf->mbufs,
outbuf->count);
app_stats.tx.tx_pkts += nb_tx;
if (unlikely(nb_tx < outbuf->count)) {
RTE_LOG_DP(DEBUG, DISTRAPP,
"%s:Packet loss with tx_burst\n", __func__);
do {
rte_pktmbuf_free(outbuf->mbufs[nb_tx]);
} while (++nb_tx < outbuf->count);
}
outbuf->count = 0;
}
static inline void
flush_all_ports(struct output_buffer *tx_buffers, uint8_t nb_ports)
{
uint8_t outp;
for (outp = 0; outp < nb_ports; outp++) {
/* skip ports that are not enabled */
if ((enabled_port_mask & (1 << outp)) == 0)
continue;
if (tx_buffers[outp].count == 0)
continue;
flush_one_port(&tx_buffers[outp], outp);
}
}
static int
lcore_tx(struct rte_ring *in_r)
{
static struct output_buffer tx_buffers[RTE_MAX_ETHPORTS];
const uint8_t nb_ports = rte_eth_dev_count();
const int socket_id = rte_socket_id();
uint8_t port;
for (port = 0; port < nb_ports; port++) {
/* skip ports that are not enabled */
if ((enabled_port_mask & (1 << port)) == 0)
continue;
if (rte_eth_dev_socket_id(port) > 0 &&
rte_eth_dev_socket_id(port) != socket_id)
printf("WARNING, port %u is on remote NUMA node to "
"TX thread.\n\tPerformance will not "
"be optimal.\n", port);
}
printf("\nCore %u doing packet TX.\n", rte_lcore_id());
while (!quit_signal) {
for (port = 0; port < nb_ports; port++) {
/* skip ports that are not enabled */
if ((enabled_port_mask & (1 << port)) == 0)
continue;
struct rte_mbuf *bufs[BURST_SIZE];
const uint16_t nb_rx = rte_ring_dequeue_burst(in_r,
(void *)bufs, BURST_SIZE);
app_stats.tx.dequeue_pkts += nb_rx;
/* if we get no traffic, flush anything we have */
if (unlikely(nb_rx == 0)) {
flush_all_ports(tx_buffers, nb_ports);
continue;
}
/* for traffic we receive, queue it up for transmit */
uint16_t i;
rte_prefetch_non_temporal((void *)bufs[0]);
rte_prefetch_non_temporal((void *)bufs[1]);
rte_prefetch_non_temporal((void *)bufs[2]);
for (i = 0; i < nb_rx; i++) {
struct output_buffer *outbuf;
uint8_t outp;
rte_prefetch_non_temporal((void *)bufs[i + 3]);
/*
* workers should update in_port to hold the
* output port value
*/
outp = bufs[i]->port;
/* skip ports that are not enabled */
if ((enabled_port_mask & (1 << outp)) == 0)
continue;
outbuf = &tx_buffers[outp];
outbuf->mbufs[outbuf->count++] = bufs[i];
if (outbuf->count == BURST_SIZE)
flush_one_port(outbuf, outp);
}
}
}
return 0;
}
static void
int_handler(int sig_num)
{
printf("Exiting on signal %d\n", sig_num);
/* set quit flag for rx thread to exit */
quit_signal_rx = 1;
}
static void
print_stats(void)
{
struct rte_eth_stats eth_stats;
unsigned i;
printf("\nRX thread stats:\n");
printf(" - Received: %"PRIu64"\n", app_stats.rx.rx_pkts);
printf(" - Processed: %"PRIu64"\n", app_stats.rx.returned_pkts);
printf(" - Enqueued: %"PRIu64"\n", app_stats.rx.enqueued_pkts);
printf("\nTX thread stats:\n");
printf(" - Dequeued: %"PRIu64"\n", app_stats.tx.dequeue_pkts);
printf(" - Transmitted: %"PRIu64"\n", app_stats.tx.tx_pkts);
for (i = 0; i < rte_eth_dev_count(); i++) {
rte_eth_stats_get(i, ð_stats);
printf("\nPort %u stats:\n", i);
printf(" - Pkts in: %"PRIu64"\n", eth_stats.ipackets);
printf(" - Pkts out: %"PRIu64"\n", eth_stats.opackets);
printf(" - In Errs: %"PRIu64"\n", eth_stats.ierrors);
printf(" - Out Errs: %"PRIu64"\n", eth_stats.oerrors);
printf(" - Mbuf Errs: %"PRIu64"\n", eth_stats.rx_nombuf);
}
}
static int
lcore_worker(struct lcore_params *p)
{
struct rte_distributor *d = p->d;
const unsigned id = p->worker_id;
/*
* for single port, xor_val will be zero so we won't modify the output
* port, otherwise we send traffic from 0 to 1, 2 to 3, and vice versa
*/
const unsigned xor_val = (rte_eth_dev_count() > 1);
struct rte_mbuf *buf = NULL;
printf("\nCore %u acting as worker core.\n", rte_lcore_id());
while (!quit_signal) {
buf = rte_distributor_get_pkt(d, id, buf);
buf->port ^= xor_val;
}
return 0;
}
/* display usage */
static void
print_usage(const char *prgname)
{
printf("%s [EAL options] -- -p PORTMASK\n"
" -p PORTMASK: hexadecimal bitmask of ports to configure\n",
prgname);
}
static int
parse_portmask(const char *portmask)
{
char *end = NULL;
unsigned long pm;
/* parse hexadecimal string */
pm = strtoul(portmask, &end, 16);
if ((portmask[0] == '\0') || (end == NULL) || (*end != '\0'))
return -1;
if (pm == 0)
return -1;
return pm;
}
/* Parse the argument given in the command line of the application */
static int
parse_args(int argc, char **argv)
{
int opt;
char **argvopt;
int option_index;
char *prgname = argv[0];
static struct option lgopts[] = {
{NULL, 0, 0, 0}
};
argvopt = argv;
while ((opt = getopt_long(argc, argvopt, "p:",
lgopts, &option_index)) != EOF) {
switch (opt) {
/* portmask */
case 'p':
enabled_port_mask = parse_portmask(optarg);
if (enabled_port_mask == 0) {
printf("invalid portmask\n");
print_usage(prgname);
return -1;
}
break;
default:
print_usage(prgname);
return -1;
}
}
if (optind <= 1) {
print_usage(prgname);
return -1;
}
argv[optind-1] = prgname;
optind = 0; /* reset getopt lib */
return 0;
}
/* Main function, does initialization and calls the per-lcore functions */
int
main(int argc, char *argv[])
{
struct rte_mempool *mbuf_pool;
struct rte_distributor *d;
struct rte_ring *output_ring;
unsigned lcore_id, worker_id = 0;
unsigned nb_ports;
uint8_t portid;
uint8_t nb_ports_available;
/* catch ctrl-c so we can print on exit */
signal(SIGINT, int_handler);
/* init EAL */
int ret = rte_eal_init(argc, argv);
if (ret < 0)
rte_exit(EXIT_FAILURE, "Error with EAL initialization\n");
argc -= ret;
argv += ret;
/* parse application arguments (after the EAL ones) */
ret = parse_args(argc, argv);
if (ret < 0)
rte_exit(EXIT_FAILURE, "Invalid distributor parameters\n");
if (rte_lcore_count() < 3)
rte_exit(EXIT_FAILURE, "Error, This application needs at "
"least 3 logical cores to run:\n"
"1 lcore for packet RX and distribution\n"
"1 lcore for packet TX\n"
"and at least 1 lcore for worker threads\n");
nb_ports = rte_eth_dev_count();
if (nb_ports == 0)
rte_exit(EXIT_FAILURE, "Error: no ethernet ports detected\n");
if (nb_ports != 1 && (nb_ports & 1))
rte_exit(EXIT_FAILURE, "Error: number of ports must be even, except "
"when using a single port\n");
mbuf_pool = rte_pktmbuf_pool_create("MBUF_POOL",
NUM_MBUFS * nb_ports, MBUF_CACHE_SIZE, 0,
RTE_MBUF_DEFAULT_BUF_SIZE, rte_socket_id());
if (mbuf_pool == NULL)
rte_exit(EXIT_FAILURE, "Cannot create mbuf pool\n");
nb_ports_available = nb_ports;
/* initialize all ports */
for (portid = 0; portid < nb_ports; portid++) {
/* skip ports that are not enabled */
if ((enabled_port_mask & (1 << portid)) == 0) {
printf("\nSkipping disabled port %d\n", portid);
nb_ports_available--;
continue;
}
/* init port */
printf("Initializing port %u... done\n", (unsigned) portid);
if (port_init(portid, mbuf_pool) != 0)
rte_exit(EXIT_FAILURE, "Cannot initialize port %"PRIu8"\n",
portid);
}
if (!nb_ports_available) {
rte_exit(EXIT_FAILURE,
"All available ports are disabled. Please set portmask.\n");
}
d = rte_distributor_create("PKT_DIST", rte_socket_id(),
rte_lcore_count() - 2);
if (d == NULL)
rte_exit(EXIT_FAILURE, "Cannot create distributor\n");
/*
* scheduler ring is read only by the transmitter core, but written to
* by multiple threads
*/
output_ring = rte_ring_create("Output_ring", RTE_RING_SZ,
rte_socket_id(), RING_F_SC_DEQ);
if (output_ring == NULL)
rte_exit(EXIT_FAILURE, "Cannot create output ring\n");
RTE_LCORE_FOREACH_SLAVE(lcore_id) {
if (worker_id == rte_lcore_count() - 2)
rte_eal_remote_launch((lcore_function_t *)lcore_tx,
output_ring, lcore_id);
else {
struct lcore_params *p =
rte_malloc(NULL, sizeof(*p), 0);
if (!p)
rte_panic("malloc failure\n");
*p = (struct lcore_params){worker_id, d, output_ring, mbuf_pool};
rte_eal_remote_launch((lcore_function_t *)lcore_worker,
p, lcore_id);
}
worker_id++;
}
/* call lcore_main on master core only */
struct lcore_params p = { 0, d, output_ring, mbuf_pool};
if (lcore_rx(&p) != 0)
return -1;
RTE_LCORE_FOREACH_SLAVE(lcore_id) {
if (rte_eal_wait_lcore(lcore_id) < 0)
return -1;
}
print_stats();
return 0;
}
|
vicharl/containerdns
|
kdns/dpdk-17.02/drivers/net/qede/base/reg_addr.h
|
/*
* Copyright (c) 2016 QLogic Corporation.
* All rights reserved.
* www.qlogic.com
*
* See LICENSE.qede_pmd for copyright and licensing details.
*/
/*
* Copyright (c) 2016 QLogic Corporation.
* All rights reserved.
* www.qlogic.com
*
* See LICENSE.qede_pmd for copyright and licensing details.
*/
#define CDU_REG_CID_ADDR_PARAMS_CONTEXT_SIZE_SHIFT \
0
#define CDU_REG_CID_ADDR_PARAMS_CONTEXT_SIZE ( \
0xfff << 0)
#define CDU_REG_CID_ADDR_PARAMS_BLOCK_WASTE_SHIFT \
12
#define CDU_REG_CID_ADDR_PARAMS_BLOCK_WASTE ( \
0xfff << 12)
#define CDU_REG_CID_ADDR_PARAMS_NCIB_SHIFT \
24
#define CDU_REG_CID_ADDR_PARAMS_NCIB ( \
0xffUL << 24) /* @DPDK */
#define XSDM_REG_OPERATION_GEN \
0xf80408UL
#define NIG_REG_RX_BRB_OUT_EN \
0x500e18UL
#define NIG_REG_STORM_OUT_EN \
0x500e08UL
#define PSWRQ2_REG_L2P_VALIDATE_VFID \
0x240c50UL
#define PGLUE_B_REG_USE_CLIENTID_IN_TAG \
0x2aae04UL
#define PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER \
0x2aa16cUL
#define BAR0_MAP_REG_MSDM_RAM \
0x1d00000UL
#define BAR0_MAP_REG_USDM_RAM \
0x1d80000UL
#define BAR0_MAP_REG_PSDM_RAM \
0x1f00000UL
#define BAR0_MAP_REG_TSDM_RAM \
0x1c80000UL
#define NIG_REG_RX_LLH_BRB_GATE_DNTFWD_PERPF \
0x5011f4UL
#define PRS_REG_SEARCH_TCP \
0x1f0400UL
#define PRS_REG_SEARCH_UDP \
0x1f0404UL
#define PRS_REG_SEARCH_OPENFLOW \
0x1f0434UL
#define TM_REG_PF_ENABLE_CONN \
0x2c043cUL
#define TM_REG_PF_ENABLE_TASK \
0x2c0444UL
#define TM_REG_PF_SCAN_ACTIVE_CONN \
0x2c04fcUL
#define TM_REG_PF_SCAN_ACTIVE_TASK \
0x2c0500UL
#define IGU_REG_LEADING_EDGE_LATCH \
0x18082cUL
#define IGU_REG_TRAILING_EDGE_LATCH \
0x180830UL
#define QM_REG_USG_CNT_PF_TX \
0x2f2eacUL
#define QM_REG_USG_CNT_PF_OTHER \
0x2f2eb0UL
#define DORQ_REG_PF_DB_ENABLE \
0x100508UL
#define QM_REG_PF_EN \
0x2f2ea4UL
#define TCFC_REG_STRONG_ENABLE_PF \
0x2d0708UL
#define CCFC_REG_STRONG_ENABLE_PF \
0x2e0708UL
#define PGLUE_B_REG_PGL_ADDR_88_F0 \
0x2aa404UL
#define PGLUE_B_REG_PGL_ADDR_8C_F0 \
0x2aa408UL
#define PGLUE_B_REG_PGL_ADDR_90_F0 \
0x2aa40cUL
#define PGLUE_B_REG_PGL_ADDR_94_F0 \
0x2aa410UL
#define PGLUE_B_REG_WAS_ERROR_PF_31_0_CLR \
0x2aa138UL
#define PGLUE_B_REG_INTERNAL_PFID_ENABLE_TARGET_READ \
0x2aa174UL
#define MISC_REG_GEN_PURP_CR0 \
0x008c80UL
#define MCP_REG_SCRATCH \
0xe20000UL
#define CNIG_REG_NW_PORT_MODE_BB_B0 \
0x218200UL
#define MISCS_REG_CHIP_NUM \
0x00976cUL
#define MISCS_REG_CHIP_REV \
0x009770UL
#define MISCS_REG_CMT_ENABLED_FOR_PAIR \
0x00971cUL
#define MISCS_REG_CHIP_TEST_REG \
0x009778UL
#define MISCS_REG_CHIP_METAL \
0x009774UL
#define BRB_REG_HEADER_SIZE \
0x340804UL
#define BTB_REG_HEADER_SIZE \
0xdb0804UL
#define CAU_REG_LONG_TIMEOUT_THRESHOLD \
0x1c0708UL
#define CCFC_REG_ACTIVITY_COUNTER \
0x2e8800UL
#define CDU_REG_CID_ADDR_PARAMS \
0x580900UL
#define DBG_REG_CLIENT_ENABLE \
0x010004UL
#define DMAE_REG_INIT \
0x00c000UL
#define DORQ_REG_IFEN \
0x100040UL
#define GRC_REG_TIMEOUT_EN \
0x050404UL
#define IGU_REG_BLOCK_CONFIGURATION \
0x180040UL
#define MCM_REG_INIT \
0x1200000UL
#define MCP2_REG_DBG_DWORD_ENABLE \
0x052404UL
#define MISC_REG_PORT_MODE \
0x008c00UL
#define MISC_REG_BLOCK_256B_EN \
0x008c14UL
#define MISCS_REG_RESET_PL_HV \
0x009060UL
#define MISCS_REG_CLK_100G_MODE \
0x009070UL
#define MISCS_REG_RESET_PL_HV_2 \
0x009150UL
#define MSDM_REG_ENABLE_IN1 \
0xfc0004UL
#define MSEM_REG_ENABLE_IN \
0x1800004UL
#define NIG_REG_CM_HDR \
0x500840UL
#define NCSI_REG_CONFIG \
0x040200UL
#define PSWRQ2_REG_RBC_DONE \
0x240000UL
#define PSWRQ2_REG_CFG_DONE \
0x240004UL
#define PBF_REG_INIT \
0xd80000UL
#define PTU_REG_ATC_INIT_ARRAY \
0x560000UL
#define PCM_REG_INIT \
0x1100000UL
#define PGLUE_B_REG_ADMIN_PER_PF_REGION \
0x2a9000UL
#define PRM_REG_DISABLE_PRM \
0x230000UL
#define PRS_REG_SOFT_RST \
0x1f0000UL
#define PSDM_REG_ENABLE_IN1 \
0xfa0004UL
#define PSEM_REG_ENABLE_IN \
0x1600004UL
#define PSWRQ_REG_DBG_SELECT \
0x280020UL
#define PSWRQ2_REG_CDUT_P_SIZE \
0x24000cUL
#define PSWHST_REG_DISCARD_INTERNAL_WRITES \
0x2a0040UL
#define PSWHST2_REG_DBGSYN_ALMOST_FULL_THR \
0x29e050UL
#define PSWRD_REG_DBG_SELECT \
0x29c040UL
#define PSWRD2_REG_CONF11 \
0x29d064UL
#define PSWWR_REG_USDM_FULL_TH \
0x29a040UL
#define PSWWR2_REG_CDU_FULL_TH2 \
0x29b040UL
#define QM_REG_MAXPQSIZE_0 \
0x2f0434UL
#define RSS_REG_RSS_INIT_EN \
0x238804UL
#define RDIF_REG_STOP_ON_ERROR \
0x300040UL
#define SRC_REG_SOFT_RST \
0x23874cUL
#define TCFC_REG_ACTIVITY_COUNTER \
0x2d8800UL
#define TCM_REG_INIT \
0x1180000UL
#define TM_REG_PXP_READ_DATA_FIFO_INIT \
0x2c0014UL
#define TSDM_REG_ENABLE_IN1 \
0xfb0004UL
#define TSEM_REG_ENABLE_IN \
0x1700004UL
#define TDIF_REG_STOP_ON_ERROR \
0x310040UL
#define UCM_REG_INIT \
0x1280000UL
#define UMAC_REG_IPG_HD_BKP_CNTL_BB_B0 \
0x051004UL
#define USDM_REG_ENABLE_IN1 \
0xfd0004UL
#define USEM_REG_ENABLE_IN \
0x1900004UL
#define XCM_REG_INIT \
0x1000000UL
#define XSDM_REG_ENABLE_IN1 \
0xf80004UL
#define XSEM_REG_ENABLE_IN \
0x1400004UL
#define YCM_REG_INIT \
0x1080000UL
#define YSDM_REG_ENABLE_IN1 \
0xf90004UL
#define YSEM_REG_ENABLE_IN \
0x1500004UL
#define XYLD_REG_SCBD_STRICT_PRIO \
0x4c0000UL
#define TMLD_REG_SCBD_STRICT_PRIO \
0x4d0000UL
#define MULD_REG_SCBD_STRICT_PRIO \
0x4e0000UL
#define YULD_REG_SCBD_STRICT_PRIO \
0x4c8000UL
#define MISC_REG_SHARED_MEM_ADDR \
0x008c20UL
#define DMAE_REG_GO_C0 \
0x00c048UL
#define DMAE_REG_GO_C1 \
0x00c04cUL
#define DMAE_REG_GO_C2 \
0x00c050UL
#define DMAE_REG_GO_C3 \
0x00c054UL
#define DMAE_REG_GO_C4 \
0x00c058UL
#define DMAE_REG_GO_C5 \
0x00c05cUL
#define DMAE_REG_GO_C6 \
0x00c060UL
#define DMAE_REG_GO_C7 \
0x00c064UL
#define DMAE_REG_GO_C8 \
0x00c068UL
#define DMAE_REG_GO_C9 \
0x00c06cUL
#define DMAE_REG_GO_C10 \
0x00c070UL
#define DMAE_REG_GO_C11 \
0x00c074UL
#define DMAE_REG_GO_C12 \
0x00c078UL
#define DMAE_REG_GO_C13 \
0x00c07cUL
#define DMAE_REG_GO_C14 \
0x00c080UL
#define DMAE_REG_GO_C15 \
0x00c084UL
#define DMAE_REG_GO_C16 \
0x00c088UL
#define DMAE_REG_GO_C17 \
0x00c08cUL
#define DMAE_REG_GO_C18 \
0x00c090UL
#define DMAE_REG_GO_C19 \
0x00c094UL
#define DMAE_REG_GO_C20 \
0x00c098UL
#define DMAE_REG_GO_C21 \
0x00c09cUL
#define DMAE_REG_GO_C22 \
0x00c0a0UL
#define DMAE_REG_GO_C23 \
0x00c0a4UL
#define DMAE_REG_GO_C24 \
0x00c0a8UL
#define DMAE_REG_GO_C25 \
0x00c0acUL
#define DMAE_REG_GO_C26 \
0x00c0b0UL
#define DMAE_REG_GO_C27 \
0x00c0b4UL
#define DMAE_REG_GO_C28 \
0x00c0b8UL
#define DMAE_REG_GO_C29 \
0x00c0bcUL
#define DMAE_REG_GO_C30 \
0x00c0c0UL
#define DMAE_REG_GO_C31 \
0x00c0c4UL
#define DMAE_REG_CMD_MEM \
0x00c800UL
#define QM_REG_MAXPQSIZETXSEL_0 \
0x2f0440UL
#define QM_REG_SDMCMDREADY \
0x2f1e10UL
#define QM_REG_SDMCMDADDR \
0x2f1e04UL
#define QM_REG_SDMCMDDATALSB \
0x2f1e08UL
#define QM_REG_SDMCMDDATAMSB \
0x2f1e0cUL
#define QM_REG_SDMCMDGO \
0x2f1e14UL
#define QM_REG_RLPFCRD \
0x2f4d80UL
#define QM_REG_RLPFINCVAL \
0x2f4c80UL
#define QM_REG_RLGLBLCRD \
0x2f4400UL
#define QM_REG_RLGLBLINCVAL \
0x2f3400UL
#define IGU_REG_ATTENTION_ENABLE \
0x18083cUL
#define IGU_REG_ATTN_MSG_ADDR_L \
0x180820UL
#define IGU_REG_ATTN_MSG_ADDR_H \
0x180824UL
#define MISC_REG_AEU_GENERAL_ATTN_0 \
0x008400UL
#define CAU_REG_SB_ADDR_MEMORY \
0x1c8000UL
#define CAU_REG_SB_VAR_MEMORY \
0x1c6000UL
#define CAU_REG_PI_MEMORY \
0x1d0000UL
#define IGU_REG_PF_CONFIGURATION \
0x180800UL
#define MISC_REG_AEU_ENABLE1_IGU_OUT_0 \
0x00849cUL
#define MISC_REG_AEU_MASK_ATTN_IGU \
0x008494UL
#define IGU_REG_CLEANUP_STATUS_0 \
0x180980UL
#define IGU_REG_CLEANUP_STATUS_1 \
0x180a00UL
#define IGU_REG_CLEANUP_STATUS_2 \
0x180a80UL
#define IGU_REG_CLEANUP_STATUS_3 \
0x180b00UL
#define IGU_REG_CLEANUP_STATUS_4 \
0x180b80UL
#define IGU_REG_COMMAND_REG_32LSB_DATA \
0x180840UL
#define IGU_REG_COMMAND_REG_CTRL \
0x180848UL
#define IGU_REG_BLOCK_CONFIGURATION_VF_CLEANUP_EN ( \
0x1 << 1)
#define IGU_REG_BLOCK_CONFIGURATION_PXP_TPH_INTERFACE_EN ( \
0x1 << 0)
#define IGU_REG_MAPPING_MEMORY \
0x184000UL
#define MISCS_REG_GENERIC_POR_0 \
0x0096d4UL
#define MCP_REG_NVM_CFG4 \
0xe0642cUL
#define MCP_REG_NVM_CFG4_FLASH_SIZE ( \
0x7 << 0)
#define MCP_REG_NVM_CFG4_FLASH_SIZE_SHIFT \
0
#define CCFC_REG_STRONG_ENABLE_VF 0x2e070cUL
#define CNIG_REG_PMEG_IF_CMD_BB_B0 0x21821cUL
#define CNIG_REG_PMEG_IF_ADDR_BB_B0 0x218224UL
#define CNIG_REG_PMEG_IF_WRDATA_BB_B0 0x218228UL
#define NWM_REG_MAC0 0x800400UL
#define NWM_REG_MAC0_SIZE 256
#define CNIG_REG_NIG_PORT0_CONF_K2 0x218200UL
#define CNIG_REG_NIG_PORT0_CONF_NIG_PORT_ENABLE_0_SHIFT 0
#define CNIG_REG_NIG_PORT0_CONF_NIG_PORT_NWM_PORT_MAP_0_SHIFT 1
#define CNIG_REG_NIG_PORT0_CONF_NIG_PORT_RATE_0_SHIFT 3
#define ETH_MAC_REG_XIF_MODE 0x000080UL
#define ETH_MAC_REG_XIF_MODE_XGMII_SHIFT 0
#define ETH_MAC_REG_FRM_LENGTH 0x000014UL
#define ETH_MAC_REG_FRM_LENGTH_FRM_LENGTH_SHIFT 0
#define ETH_MAC_REG_TX_IPG_LENGTH 0x000044UL
#define ETH_MAC_REG_TX_IPG_LENGTH_TXIPG_SHIFT 0
#define ETH_MAC_REG_RX_FIFO_SECTIONS 0x00001cUL
#define ETH_MAC_REG_RX_FIFO_SECTIONS_RX_SECTION_FULL_SHIFT 0
#define ETH_MAC_REG_TX_FIFO_SECTIONS 0x000020UL
#define ETH_MAC_REG_TX_FIFO_SECTIONS_TX_SECTION_EMPTY_SHIFT 16
#define ETH_MAC_REG_TX_FIFO_SECTIONS_TX_SECTION_FULL_SHIFT 0
#define ETH_MAC_REG_COMMAND_CONFIG 0x000008UL
#define MISC_REG_RESET_PL_PDA_VAUX 0x008090UL
#define MISC_REG_XMAC_CORE_PORT_MODE 0x008c08UL
#define MISC_REG_XMAC_PHY_PORT_MODE 0x008c04UL
#define XMAC_REG_MODE 0x210008UL
#define XMAC_REG_RX_MAX_SIZE 0x210040UL
#define XMAC_REG_TX_CTRL_LO 0x210020UL
#define XMAC_REG_CTRL 0x210000UL
#define XMAC_REG_RX_CTRL 0x210030UL
#define XMAC_REG_RX_CTRL_PROCESS_VARIABLE_PREAMBLE (0x1 << 12)
#define MISC_REG_CLK_100G_MODE 0x008c10UL
#define MISC_REG_OPTE_MODE 0x008c0cUL
#define NIG_REG_LLH_ENG_CLS_TCP_4_TUPLE_SEARCH 0x501b84UL
#define NIG_REG_LLH_ENG_CLS_ENG_ID_TBL 0x501b90UL
#define PRS_REG_SEARCH_TAG1 0x1f0444UL
#define PRS_REG_SEARCH_TCP_FIRST_FRAG 0x1f0410UL
#define MISCS_REG_PLL_MAIN_CTRL_4 0x00974cUL
#define MISCS_REG_ECO_RESERVED 0x0097b4UL
#define PGLUE_B_REG_PF_BAR0_SIZE 0x2aae60UL
#define PGLUE_B_REG_PF_BAR1_SIZE 0x2aae64UL
#define NIG_REG_LLH_FUNC_FILTER_EN_SIZE 16
#define NIG_REG_LLH_FUNC_FILTER_EN 0x501a80UL
#define NIG_REG_LLH_FUNC_FILTER_VALUE 0x501a00UL
#define NIG_REG_LLH_FUNC_FILTER_PROTOCOL_TYPE 0x501b00UL
#define NIG_REG_LLH_FUNC_FILTER_EN_SIZE 16
#define NIG_REG_LLH_FUNC_FILTER_VALUE 0x501a00UL
#define NIG_REG_LLH_FUNC_FILTER_EN 0x501a80UL
#define NIG_REG_LLH_FUNC_FILTER_EN_SIZE 16
#define NIG_REG_LLH_FUNC_FILTER_EN 0x501a80UL
#define NIG_REG_LLH_FUNC_FILTER_VALUE 0x501a00UL
#define NIG_REG_LLH_FUNC_FILTER_MODE 0x501ac0UL
#define NIG_REG_LLH_FUNC_FILTER_PROTOCOL_TYPE 0x501b00UL
#define NIG_REG_LLH_FUNC_FILTER_EN_SIZE 16
#define NIG_REG_LLH_FUNC_FILTER_VALUE 0x501a00UL
#define NIG_REG_LLH_FUNC_FILTER_EN 0x501a80UL
#define NIG_REG_LLH_FUNC_FILTER_EN_SIZE 16
#define NIG_REG_LLH_FUNC_FILTER_EN 0x501a80UL
#define NIG_REG_LLH_FUNC_FILTER_EN_SIZE 16
#define NIG_REG_LLH_FUNC_FILTER_VALUE 0x501a00UL
#define XMAC_REG_CTRL_TX_EN (0x1 << 0)
#define XMAC_REG_CTRL_RX_EN (0x1 << 1)
#define CDU_REG_SEGMENT0_PARAMS_T0_TID_SIZE (0xffUL << 24) /* @DPDK */
#define CDU_REG_SEGMENT0_PARAMS_T0_TID_BLOCK_WASTE (0xff << 16)
#define CDU_REG_SEGMENT0_PARAMS_T0_TID_BLOCK_WASTE_SHIFT 16
#define CDU_REG_SEGMENT1_PARAMS_T1_TID_BLOCK_WASTE (0xff << 16)
#define CDU_REG_SEGMENT1_PARAMS_T1_TID_SIZE (0xffUL << 24) /* @DPDK */
#define CDU_REG_SEGMENT1_PARAMS_T1_NUM_TIDS_IN_BLOCK (0xfff << 0)
#define CDU_REG_SEGMENT1_PARAMS_T1_NUM_TIDS_IN_BLOCK_SHIFT 0
#define CDU_REG_SEGMENT0_PARAMS_T0_NUM_TIDS_IN_BLOCK (0xfff << 0)
#define CDU_REG_SEGMENT0_PARAMS_T0_NUM_TIDS_IN_BLOCK_SHIFT 0
#define PSWRQ2_REG_ILT_MEMORY 0x260000UL
#define QM_REG_WFQPFWEIGHT 0x2f4e80UL
#define QM_REG_WFQVPWEIGHT 0x2fa000UL
#define NIG_REG_LB_ARB_CREDIT_WEIGHT_0 0x50160cUL
#define NIG_REG_TX_ARB_CREDIT_WEIGHT_0 0x501f88UL
#define NIG_REG_LB_ARB_CREDIT_WEIGHT_1 0x501610UL
#define NIG_REG_TX_ARB_CREDIT_WEIGHT_1 0x501f8cUL
#define NIG_REG_LB_ARB_CREDIT_UPPER_BOUND_0 0x5015e4UL
#define NIG_REG_TX_ARB_CREDIT_UPPER_BOUND_0 0x501f58UL
#define NIG_REG_LB_ARB_CREDIT_UPPER_BOUND_1 0x5015e8UL
#define NIG_REG_TX_ARB_CREDIT_UPPER_BOUND_1 0x501f5cUL
#define NIG_REG_LB_ARB_CLIENT_IS_STRICT 0x5015c0UL
#define NIG_REG_TX_ARB_CLIENT_IS_STRICT 0x501f34UL
#define NIG_REG_LB_ARB_CLIENT_IS_SUBJECT2WFQ 0x5015c4UL
#define NIG_REG_TX_ARB_CLIENT_IS_SUBJECT2WFQ 0x501f38UL
#define NIG_REG_TX_LB_GLBRATELIMIT_CTRL_TX_LB_GLBRATELIMIT_BASE_TYPE_SHIFT 1
#define NIG_REG_TX_LB_GLBRATELIMIT_CTRL 0x501f1cUL
#define NIG_REG_TX_LB_GLBRATELIMIT_INC_PERIOD 0x501f20UL
#define NIG_REG_TX_LB_GLBRATELIMIT_INC_VALUE 0x501f24UL
#define NIG_REG_TX_LB_GLBRATELIMIT_MAX_VALUE 0x501f28UL
#define NIG_REG_TX_LB_GLBRATELIMIT_CTRL_TX_LB_GLBRATELIMIT_EN_SHIFT 0
#define NIG_REG_LB_BRBRATELIMIT_CTRL_LB_BRBRATELIMIT_BASE_TYPE_SHIFT 1
#define NIG_REG_LB_BRBRATELIMIT_CTRL 0x50150cUL
#define NIG_REG_LB_BRBRATELIMIT_INC_PERIOD 0x501510UL
#define NIG_REG_LB_BRBRATELIMIT_INC_VALUE 0x501514UL
#define NIG_REG_LB_BRBRATELIMIT_MAX_VALUE 0x501518UL
#define NIG_REG_LB_BRBRATELIMIT_CTRL_LB_BRBRATELIMIT_EN_SHIFT 0
#define NIG_REG_LB_TCRATELIMIT_CTRL_0_LB_TCRATELIMIT_BASE_TYPE_0_SHIFT 1
#define NIG_REG_LB_TCRATELIMIT_CTRL_0 0x501520UL
#define NIG_REG_LB_TCRATELIMIT_INC_PERIOD_0 0x501540UL
#define NIG_REG_LB_TCRATELIMIT_INC_VALUE_0 0x501560UL
#define NIG_REG_LB_TCRATELIMIT_MAX_VALUE_0 0x501580UL
#define NIG_REG_LB_TCRATELIMIT_CTRL_0_LB_TCRATELIMIT_EN_0_SHIFT 0
#define NIG_REG_PRIORITY_FOR_TC_0 0x501bccUL
#define NIG_REG_RX_TC0_PRIORITY_MASK 0x501becUL
#define PRS_REG_ETS_ARB_CREDIT_WEIGHT_1 0x1f0540UL
#define PRS_REG_ETS_ARB_CREDIT_WEIGHT_0 0x1f0534UL
#define PRS_REG_ETS_ARB_CREDIT_UPPER_BOUND_1 0x1f053cUL
#define PRS_REG_ETS_ARB_CREDIT_UPPER_BOUND_0 0x1f0530UL
#define PRS_REG_ETS_ARB_CLIENT_IS_STRICT 0x1f0514UL
#define PRS_REG_ETS_ARB_CLIENT_IS_SUBJECT2WFQ 0x1f0518UL
#define BRB_REG_TOTAL_MAC_SIZE 0x3408c0UL
#define BRB_REG_SHARED_HR_AREA 0x340880UL
#define BRB_REG_TC_GUARANTIED_0 0x340900UL
#define BRB_REG_MAIN_TC_GUARANTIED_HYST_0 0x340978UL
#define BRB_REG_LB_TC_FULL_XOFF_THRESHOLD_0 0x340c60UL
#define BRB_REG_LB_TC_FULL_XON_THRESHOLD_0 0x340d38UL
#define BRB_REG_LB_TC_PAUSE_XOFF_THRESHOLD_0 0x340ab0UL
#define BRB_REG_LB_TC_PAUSE_XON_THRESHOLD_0 0x340b88UL
#define BRB_REG_MAIN_TC_FULL_XOFF_THRESHOLD_0 0x340c00UL
#define BRB_REG_MAIN_TC_FULL_XON_THRESHOLD_0 0x340cd8UL
#define BRB_REG_MAIN_TC_PAUSE_XOFF_THRESHOLD_0 0x340a50UL
#define BRB_REG_MAIN_TC_PAUSE_XON_THRESHOLD_0 0x340b28UL
#define PRS_REG_VXLAN_PORT 0x1f0738UL
#define NIG_REG_VXLAN_PORT 0x50105cUL
#define PBF_REG_VXLAN_PORT 0xd80518UL
#define PRS_REG_ENCAPSULATION_TYPE_EN 0x1f0730UL
#define PRS_REG_OUTPUT_FORMAT_4_0 0x1f099cUL
#define NIG_REG_ENC_TYPE_ENABLE 0x501058UL
#define NIG_REG_ENC_TYPE_ENABLE_VXLAN_ENABLE_SHIFT 2
#define DORQ_REG_L2_EDPM_TUNNEL_VXLAN_EN 0x100914UL
#define PRS_REG_ENCAPSULATION_TYPE_EN 0x1f0730UL
#define PRS_REG_OUTPUT_FORMAT_4_0 0x1f099cUL
#define NIG_REG_ENC_TYPE_ENABLE 0x501058UL
#define NIG_REG_ENC_TYPE_ENABLE_ETH_OVER_GRE_ENABLE_SHIFT 0
#define NIG_REG_ENC_TYPE_ENABLE_IP_OVER_GRE_ENABLE_SHIFT 1
#define DORQ_REG_L2_EDPM_TUNNEL_GRE_ETH_EN 0x10090cUL
#define DORQ_REG_L2_EDPM_TUNNEL_GRE_IP_EN 0x100910UL
#define PRS_REG_NGE_PORT 0x1f086cUL
#define NIG_REG_NGE_PORT 0x508b38UL
#define PBF_REG_NGE_PORT 0xd8051cUL
#define PRS_REG_ENCAPSULATION_TYPE_EN 0x1f0730UL
#define PRS_REG_OUTPUT_FORMAT_4_0 0x1f099cUL
#define NIG_REG_NGE_ETH_ENABLE 0x508b2cUL
#define NIG_REG_NGE_IP_ENABLE 0x508b28UL
#define NIG_REG_NGE_COMP_VER 0x508b30UL
#define PBF_REG_NGE_COMP_VER 0xd80524UL
#define PRS_REG_NGE_COMP_VER 0x1f0878UL
#define DORQ_REG_L2_EDPM_TUNNEL_NGE_ETH_EN 0x100930UL
#define DORQ_REG_L2_EDPM_TUNNEL_NGE_IP_EN 0x10092cUL
#define NIG_REG_PKT_PRIORITY_TO_TC 0x501ba4UL
#define PGLUE_B_REG_START_INIT_PTT_GTT 0x2a8008UL
#define PGLUE_B_REG_INIT_DONE_PTT_GTT 0x2a800cUL
#define MISC_REG_AEU_GENERAL_ATTN_35 0x00848cUL
#define MCP_REG_CPU_STATE 0xe05004UL
#define MCP_REG_CPU_MODE 0xe05000UL
#define MCP_REG_CPU_MODE_SOFT_HALT (0x1 << 10)
#define MCP_REG_CPU_EVENT_MASK 0xe05008UL
#define PSWHST_REG_VF_DISABLED_ERROR_VALID 0x2a0060UL
#define PSWHST_REG_VF_DISABLED_ERROR_ADDRESS 0x2a0064UL
#define PSWHST_REG_VF_DISABLED_ERROR_DATA 0x2a005cUL
#define PSWHST_REG_INCORRECT_ACCESS_VALID 0x2a0070UL
#define PSWHST_REG_INCORRECT_ACCESS_ADDRESS 0x2a0074UL
#define PSWHST_REG_INCORRECT_ACCESS_DATA 0x2a0068UL
#define PSWHST_REG_INCORRECT_ACCESS_LENGTH 0x2a006cUL
#define GRC_REG_TIMEOUT_ATTN_ACCESS_VALID 0x050054UL
#define GRC_REG_TIMEOUT_ATTN_ACCESS_DATA_0 0x05004cUL
#define GRC_REG_TIMEOUT_ATTN_ACCESS_DATA_1 0x050050UL
#define PGLUE_B_REG_TX_ERR_WR_DETAILS2 0x2aa150UL
#define PGLUE_B_REG_TX_ERR_WR_ADD_31_0 0x2aa144UL
#define PGLUE_B_REG_TX_ERR_WR_ADD_63_32 0x2aa148UL
#define PGLUE_B_REG_TX_ERR_WR_DETAILS 0x2aa14cUL
#define PGLUE_B_REG_TX_ERR_RD_DETAILS2 0x2aa160UL
#define PGLUE_B_REG_TX_ERR_RD_ADD_31_0 0x2aa154UL
#define PGLUE_B_REG_TX_ERR_RD_ADD_63_32 0x2aa158UL
#define PGLUE_B_REG_TX_ERR_RD_DETAILS 0x2aa15cUL
#define PGLUE_B_REG_TX_ERR_WR_DETAILS_ICPL 0x2aa164UL
#define PGLUE_B_REG_MASTER_ZLR_ERR_DETAILS 0x2aa54cUL
#define PGLUE_B_REG_MASTER_ZLR_ERR_ADD_31_0 0x2aa544UL
#define PGLUE_B_REG_MASTER_ZLR_ERR_ADD_63_32 0x2aa548UL
#define PGLUE_B_REG_VF_ILT_ERR_DETAILS2 0x2aae80UL
#define PGLUE_B_REG_VF_ILT_ERR_ADD_31_0 0x2aae74UL
#define PGLUE_B_REG_VF_ILT_ERR_ADD_63_32 0x2aae78UL
#define PGLUE_B_REG_VF_ILT_ERR_DETAILS 0x2aae7cUL
#define PGLUE_B_REG_LATCHED_ERRORS_CLR 0x2aa3bcUL
#define NIG_REG_INT_MASK_3_P0_LB_TC1_PAUSE_TOO_LONG_INT (0x1 << 10)
#define DORQ_REG_DB_DROP_REASON 0x100a2cUL
#define DORQ_REG_DB_DROP_DETAILS 0x100a24UL
#define TM_REG_INT_STS_1 0x2c0190UL
#define TM_REG_INT_STS_1_PEND_TASK_SCAN (0x1 << 6)
#define TM_REG_INT_STS_1_PEND_CONN_SCAN (0x1 << 5)
#define TM_REG_INT_MASK_1 0x2c0194UL
#define TM_REG_INT_MASK_1_PEND_CONN_SCAN (0x1 << 5)
#define TM_REG_INT_MASK_1_PEND_TASK_SCAN (0x1 << 6)
#define MISC_REG_AEU_AFTER_INVERT_1_IGU 0x0087b4UL
#define MISC_REG_AEU_ENABLE4_IGU_OUT_0 0x0084a8UL
#define MISC_REG_AEU_ENABLE3_IGU_OUT_0 0x0084a4UL
#define YSEM_REG_FAST_MEMORY 0x1540000UL
#define NIG_REG_FLOWCTRL_MODE 0x501ba0UL
#define TSEM_REG_FAST_MEMORY 0x1740000UL
#define TSEM_REG_DBG_FRAME_MODE 0x1701408UL
#define TSEM_REG_SLOW_DBG_ACTIVE 0x1701400UL
#define TSEM_REG_SLOW_DBG_MODE 0x1701404UL
#define TSEM_REG_DBG_MODE1_CFG 0x1701420UL
#define TSEM_REG_SYNC_DBG_EMPTY 0x1701160UL
#define TSEM_REG_SLOW_DBG_EMPTY 0x1701140UL
#define TCM_REG_CTX_RBC_ACCS 0x11814c0UL
#define TCM_REG_AGG_CON_CTX 0x11814c4UL
#define TCM_REG_SM_CON_CTX 0x11814ccUL
#define TCM_REG_AGG_TASK_CTX 0x11814c8UL
#define TCM_REG_SM_TASK_CTX 0x11814d0UL
#define MSEM_REG_FAST_MEMORY 0x1840000UL
#define MSEM_REG_DBG_FRAME_MODE 0x1801408UL
#define MSEM_REG_SLOW_DBG_ACTIVE 0x1801400UL
#define MSEM_REG_SLOW_DBG_MODE 0x1801404UL
#define MSEM_REG_DBG_MODE1_CFG 0x1801420UL
#define MSEM_REG_SYNC_DBG_EMPTY 0x1801160UL
#define MSEM_REG_SLOW_DBG_EMPTY 0x1801140UL
#define MCM_REG_CTX_RBC_ACCS 0x1201800UL
#define MCM_REG_AGG_CON_CTX 0x1201804UL
#define MCM_REG_SM_CON_CTX 0x120180cUL
#define MCM_REG_AGG_TASK_CTX 0x1201808UL
#define MCM_REG_SM_TASK_CTX 0x1201810UL
#define USEM_REG_FAST_MEMORY 0x1940000UL
#define USEM_REG_DBG_FRAME_MODE 0x1901408UL
#define USEM_REG_SLOW_DBG_ACTIVE 0x1901400UL
#define USEM_REG_SLOW_DBG_MODE 0x1901404UL
#define USEM_REG_DBG_MODE1_CFG 0x1901420UL
#define USEM_REG_SYNC_DBG_EMPTY 0x1901160UL
#define USEM_REG_SLOW_DBG_EMPTY 0x1901140UL
#define UCM_REG_CTX_RBC_ACCS 0x1281700UL
#define UCM_REG_AGG_CON_CTX 0x1281704UL
#define UCM_REG_SM_CON_CTX 0x128170cUL
#define UCM_REG_AGG_TASK_CTX 0x1281708UL
#define UCM_REG_SM_TASK_CTX 0x1281710UL
#define XSEM_REG_FAST_MEMORY 0x1440000UL
#define XSEM_REG_DBG_FRAME_MODE 0x1401408UL
#define XSEM_REG_SLOW_DBG_ACTIVE 0x1401400UL
#define XSEM_REG_SLOW_DBG_MODE 0x1401404UL
#define XSEM_REG_DBG_MODE1_CFG 0x1401420UL
#define XSEM_REG_SYNC_DBG_EMPTY 0x1401160UL
#define XSEM_REG_SLOW_DBG_EMPTY 0x1401140UL
#define XCM_REG_CTX_RBC_ACCS 0x1001800UL
#define XCM_REG_AGG_CON_CTX 0x1001804UL
#define XCM_REG_SM_CON_CTX 0x1001808UL
#define YSEM_REG_DBG_FRAME_MODE 0x1501408UL
#define YSEM_REG_SLOW_DBG_ACTIVE 0x1501400UL
#define YSEM_REG_SLOW_DBG_MODE 0x1501404UL
#define YSEM_REG_DBG_MODE1_CFG 0x1501420UL
#define YSEM_REG_SYNC_DBG_EMPTY 0x1501160UL
#define YCM_REG_CTX_RBC_ACCS 0x1081800UL
#define YCM_REG_AGG_CON_CTX 0x1081804UL
#define YCM_REG_SM_CON_CTX 0x108180cUL
#define YCM_REG_AGG_TASK_CTX 0x1081808UL
#define YCM_REG_SM_TASK_CTX 0x1081810UL
#define PSEM_REG_FAST_MEMORY 0x1640000UL
#define PSEM_REG_DBG_FRAME_MODE 0x1601408UL
#define PSEM_REG_SLOW_DBG_ACTIVE 0x1601400UL
#define PSEM_REG_SLOW_DBG_MODE 0x1601404UL
#define PSEM_REG_DBG_MODE1_CFG 0x1601420UL
#define PSEM_REG_SYNC_DBG_EMPTY 0x1601160UL
#define PSEM_REG_SLOW_DBG_EMPTY 0x1601140UL
#define PCM_REG_CTX_RBC_ACCS 0x1101440UL
#define PCM_REG_SM_CON_CTX 0x1101444UL
#define GRC_REG_DBG_SELECT 0x0500a4UL
#define GRC_REG_DBG_DWORD_ENABLE 0x0500a8UL
#define GRC_REG_DBG_SHIFT 0x0500acUL
#define GRC_REG_DBG_FORCE_VALID 0x0500b0UL
#define GRC_REG_DBG_FORCE_FRAME 0x0500b4UL
#define PGLUE_B_REG_DBG_SELECT 0x2a8400UL
#define PGLUE_B_REG_DBG_DWORD_ENABLE 0x2a8404UL
#define PGLUE_B_REG_DBG_SHIFT 0x2a8408UL
#define PGLUE_B_REG_DBG_FORCE_VALID 0x2a840cUL
#define PGLUE_B_REG_DBG_FORCE_FRAME 0x2a8410UL
#define CNIG_REG_DBG_SELECT_K2 0x218254UL
#define CNIG_REG_DBG_DWORD_ENABLE_K2 0x218258UL
#define CNIG_REG_DBG_SHIFT_K2 0x21825cUL
#define CNIG_REG_DBG_FORCE_VALID_K2 0x218260UL
#define CNIG_REG_DBG_FORCE_FRAME_K2 0x218264UL
#define NCSI_REG_DBG_SELECT 0x040474UL
#define NCSI_REG_DBG_DWORD_ENABLE 0x040478UL
#define NCSI_REG_DBG_SHIFT 0x04047cUL
#define NCSI_REG_DBG_FORCE_VALID 0x040480UL
#define NCSI_REG_DBG_FORCE_FRAME 0x040484UL
#define BMB_REG_DBG_SELECT 0x540a7cUL
#define BMB_REG_DBG_DWORD_ENABLE 0x540a80UL
#define BMB_REG_DBG_SHIFT 0x540a84UL
#define BMB_REG_DBG_FORCE_VALID 0x540a88UL
#define BMB_REG_DBG_FORCE_FRAME 0x540a8cUL
#define PCIE_REG_DBG_SELECT 0x0547e8UL
#define PHY_PCIE_REG_DBG_SELECT 0x629fe8UL
#define PCIE_REG_DBG_DWORD_ENABLE 0x0547ecUL
#define PHY_PCIE_REG_DBG_DWORD_ENABLE 0x629fecUL
#define PCIE_REG_DBG_SHIFT 0x0547f0UL
#define PHY_PCIE_REG_DBG_SHIFT 0x629ff0UL
#define PCIE_REG_DBG_FORCE_VALID 0x0547f4UL
#define PHY_PCIE_REG_DBG_FORCE_VALID 0x629ff4UL
#define PCIE_REG_DBG_FORCE_FRAME 0x0547f8UL
#define PHY_PCIE_REG_DBG_FORCE_FRAME 0x629ff8UL
#define MCP2_REG_DBG_SELECT 0x052400UL
#define MCP2_REG_DBG_SHIFT 0x052408UL
#define MCP2_REG_DBG_FORCE_VALID 0x052440UL
#define MCP2_REG_DBG_FORCE_FRAME 0x052444UL
#define PSWHST_REG_DBG_SELECT 0x2a0100UL
#define PSWHST_REG_DBG_DWORD_ENABLE 0x2a0104UL
#define PSWHST_REG_DBG_SHIFT 0x2a0108UL
#define PSWHST_REG_DBG_FORCE_VALID 0x2a010cUL
#define PSWHST_REG_DBG_FORCE_FRAME 0x2a0110UL
#define PSWHST2_REG_DBG_SELECT 0x29e058UL
#define PSWHST2_REG_DBG_DWORD_ENABLE 0x29e05cUL
#define PSWHST2_REG_DBG_SHIFT 0x29e060UL
#define PSWHST2_REG_DBG_FORCE_VALID 0x29e064UL
#define PSWHST2_REG_DBG_FORCE_FRAME 0x29e068UL
#define PSWRD_REG_DBG_DWORD_ENABLE 0x29c044UL
#define PSWRD_REG_DBG_SHIFT 0x29c048UL
#define PSWRD_REG_DBG_FORCE_VALID 0x29c04cUL
#define PSWRD_REG_DBG_FORCE_FRAME 0x29c050UL
#define PSWRD2_REG_DBG_SELECT 0x29d400UL
#define PSWRD2_REG_DBG_DWORD_ENABLE 0x29d404UL
#define PSWRD2_REG_DBG_SHIFT 0x29d408UL
#define PSWRD2_REG_DBG_FORCE_VALID 0x29d40cUL
#define PSWRD2_REG_DBG_FORCE_FRAME 0x29d410UL
#define PSWWR_REG_DBG_SELECT 0x29a084UL
#define PSWWR_REG_DBG_DWORD_ENABLE 0x29a088UL
#define PSWWR_REG_DBG_SHIFT 0x29a08cUL
#define PSWWR_REG_DBG_FORCE_VALID 0x29a090UL
#define PSWWR_REG_DBG_FORCE_FRAME 0x29a094UL
#define PSWRQ_REG_DBG_DWORD_ENABLE 0x280024UL
#define PSWRQ_REG_DBG_SHIFT 0x280028UL
#define PSWRQ_REG_DBG_FORCE_VALID 0x28002cUL
#define PSWRQ_REG_DBG_FORCE_FRAME 0x280030UL
#define PSWRQ2_REG_DBG_SELECT 0x240100UL
#define PSWRQ2_REG_DBG_DWORD_ENABLE 0x240104UL
#define PSWRQ2_REG_DBG_SHIFT 0x240108UL
#define PSWRQ2_REG_DBG_FORCE_VALID 0x24010cUL
#define PSWRQ2_REG_DBG_FORCE_FRAME 0x240110UL
#define PGLCS_REG_DBG_SELECT 0x001d14UL
#define PGLCS_REG_DBG_DWORD_ENABLE 0x001d18UL
#define PGLCS_REG_DBG_SHIFT 0x001d1cUL
#define PGLCS_REG_DBG_FORCE_VALID 0x001d20UL
#define PGLCS_REG_DBG_FORCE_FRAME 0x001d24UL
#define PTU_REG_DBG_SELECT 0x560100UL
#define PTU_REG_DBG_DWORD_ENABLE 0x560104UL
#define PTU_REG_DBG_SHIFT 0x560108UL
#define PTU_REG_DBG_FORCE_VALID 0x56010cUL
#define PTU_REG_DBG_FORCE_FRAME 0x560110UL
#define DMAE_REG_DBG_SELECT 0x00c510UL
#define DMAE_REG_DBG_DWORD_ENABLE 0x00c514UL
#define DMAE_REG_DBG_SHIFT 0x00c518UL
#define DMAE_REG_DBG_FORCE_VALID 0x00c51cUL
#define DMAE_REG_DBG_FORCE_FRAME 0x00c520UL
#define TCM_REG_DBG_SELECT 0x1180040UL
#define TCM_REG_DBG_DWORD_ENABLE 0x1180044UL
#define TCM_REG_DBG_SHIFT 0x1180048UL
#define TCM_REG_DBG_FORCE_VALID 0x118004cUL
#define TCM_REG_DBG_FORCE_FRAME 0x1180050UL
#define MCM_REG_DBG_SELECT 0x1200040UL
#define MCM_REG_DBG_DWORD_ENABLE 0x1200044UL
#define MCM_REG_DBG_SHIFT 0x1200048UL
#define MCM_REG_DBG_FORCE_VALID 0x120004cUL
#define MCM_REG_DBG_FORCE_FRAME 0x1200050UL
#define UCM_REG_DBG_SELECT 0x1280050UL
#define UCM_REG_DBG_DWORD_ENABLE 0x1280054UL
#define UCM_REG_DBG_SHIFT 0x1280058UL
#define UCM_REG_DBG_FORCE_VALID 0x128005cUL
#define UCM_REG_DBG_FORCE_FRAME 0x1280060UL
#define XCM_REG_DBG_SELECT 0x1000040UL
#define XCM_REG_DBG_DWORD_ENABLE 0x1000044UL
#define XCM_REG_DBG_SHIFT 0x1000048UL
#define XCM_REG_DBG_FORCE_VALID 0x100004cUL
#define XCM_REG_DBG_FORCE_FRAME 0x1000050UL
#define YCM_REG_DBG_SELECT 0x1080040UL
#define YCM_REG_DBG_DWORD_ENABLE 0x1080044UL
#define YCM_REG_DBG_SHIFT 0x1080048UL
#define YCM_REG_DBG_FORCE_VALID 0x108004cUL
#define YCM_REG_DBG_FORCE_FRAME 0x1080050UL
#define PCM_REG_DBG_SELECT 0x1100040UL
#define PCM_REG_DBG_DWORD_ENABLE 0x1100044UL
#define PCM_REG_DBG_SHIFT 0x1100048UL
#define PCM_REG_DBG_FORCE_VALID 0x110004cUL
#define PCM_REG_DBG_FORCE_FRAME 0x1100050UL
#define QM_REG_DBG_SELECT 0x2f2e74UL
#define QM_REG_DBG_DWORD_ENABLE 0x2f2e78UL
#define QM_REG_DBG_SHIFT 0x2f2e7cUL
#define QM_REG_DBG_FORCE_VALID 0x2f2e80UL
#define QM_REG_DBG_FORCE_FRAME 0x2f2e84UL
#define TM_REG_DBG_SELECT 0x2c07a8UL
#define TM_REG_DBG_DWORD_ENABLE 0x2c07acUL
#define TM_REG_DBG_SHIFT 0x2c07b0UL
#define TM_REG_DBG_FORCE_VALID 0x2c07b4UL
#define TM_REG_DBG_FORCE_FRAME 0x2c07b8UL
#define DORQ_REG_DBG_SELECT 0x100ad0UL
#define DORQ_REG_DBG_DWORD_ENABLE 0x100ad4UL
#define DORQ_REG_DBG_SHIFT 0x100ad8UL
#define DORQ_REG_DBG_FORCE_VALID 0x100adcUL
#define DORQ_REG_DBG_FORCE_FRAME 0x100ae0UL
#define BRB_REG_DBG_SELECT 0x340ed0UL
#define BRB_REG_DBG_DWORD_ENABLE 0x340ed4UL
#define BRB_REG_DBG_SHIFT 0x340ed8UL
#define BRB_REG_DBG_FORCE_VALID 0x340edcUL
#define BRB_REG_DBG_FORCE_FRAME 0x340ee0UL
#define SRC_REG_DBG_SELECT 0x238700UL
#define SRC_REG_DBG_DWORD_ENABLE 0x238704UL
#define SRC_REG_DBG_SHIFT 0x238708UL
#define SRC_REG_DBG_FORCE_VALID 0x23870cUL
#define SRC_REG_DBG_FORCE_FRAME 0x238710UL
#define PRS_REG_DBG_SELECT 0x1f0b6cUL
#define PRS_REG_DBG_DWORD_ENABLE 0x1f0b70UL
#define PRS_REG_DBG_SHIFT 0x1f0b74UL
#define PRS_REG_DBG_FORCE_VALID 0x1f0ba0UL
#define PRS_REG_DBG_FORCE_FRAME 0x1f0ba4UL
#define TSDM_REG_DBG_SELECT 0xfb0e28UL
#define TSDM_REG_DBG_DWORD_ENABLE 0xfb0e2cUL
#define TSDM_REG_DBG_SHIFT 0xfb0e30UL
#define TSDM_REG_DBG_FORCE_VALID 0xfb0e34UL
#define TSDM_REG_DBG_FORCE_FRAME 0xfb0e38UL
#define MSDM_REG_DBG_SELECT 0xfc0e28UL
#define MSDM_REG_DBG_DWORD_ENABLE 0xfc0e2cUL
#define MSDM_REG_DBG_SHIFT 0xfc0e30UL
#define MSDM_REG_DBG_FORCE_VALID 0xfc0e34UL
#define MSDM_REG_DBG_FORCE_FRAME 0xfc0e38UL
#define USDM_REG_DBG_SELECT 0xfd0e28UL
#define USDM_REG_DBG_DWORD_ENABLE 0xfd0e2cUL
#define USDM_REG_DBG_SHIFT 0xfd0e30UL
#define USDM_REG_DBG_FORCE_VALID 0xfd0e34UL
#define USDM_REG_DBG_FORCE_FRAME 0xfd0e38UL
#define XSDM_REG_DBG_SELECT 0xf80e28UL
#define XSDM_REG_DBG_DWORD_ENABLE 0xf80e2cUL
#define XSDM_REG_DBG_SHIFT 0xf80e30UL
#define XSDM_REG_DBG_FORCE_VALID 0xf80e34UL
#define XSDM_REG_DBG_FORCE_FRAME 0xf80e38UL
#define YSDM_REG_DBG_SELECT 0xf90e28UL
#define YSDM_REG_DBG_DWORD_ENABLE 0xf90e2cUL
#define YSDM_REG_DBG_SHIFT 0xf90e30UL
#define YSDM_REG_DBG_FORCE_VALID 0xf90e34UL
#define YSDM_REG_DBG_FORCE_FRAME 0xf90e38UL
#define PSDM_REG_DBG_SELECT 0xfa0e28UL
#define PSDM_REG_DBG_DWORD_ENABLE 0xfa0e2cUL
#define PSDM_REG_DBG_SHIFT 0xfa0e30UL
#define PSDM_REG_DBG_FORCE_VALID 0xfa0e34UL
#define PSDM_REG_DBG_FORCE_FRAME 0xfa0e38UL
#define TSEM_REG_DBG_SELECT 0x1701528UL
#define TSEM_REG_DBG_DWORD_ENABLE 0x170152cUL
#define TSEM_REG_DBG_SHIFT 0x1701530UL
#define TSEM_REG_DBG_FORCE_VALID 0x1701534UL
#define TSEM_REG_DBG_FORCE_FRAME 0x1701538UL
#define MSEM_REG_DBG_SELECT 0x1801528UL
#define MSEM_REG_DBG_DWORD_ENABLE 0x180152cUL
#define MSEM_REG_DBG_SHIFT 0x1801530UL
#define MSEM_REG_DBG_FORCE_VALID 0x1801534UL
#define MSEM_REG_DBG_FORCE_FRAME 0x1801538UL
#define USEM_REG_DBG_SELECT 0x1901528UL
#define USEM_REG_DBG_DWORD_ENABLE 0x190152cUL
#define USEM_REG_DBG_SHIFT 0x1901530UL
#define USEM_REG_DBG_FORCE_VALID 0x1901534UL
#define USEM_REG_DBG_FORCE_FRAME 0x1901538UL
#define XSEM_REG_DBG_SELECT 0x1401528UL
#define XSEM_REG_DBG_DWORD_ENABLE 0x140152cUL
#define XSEM_REG_DBG_SHIFT 0x1401530UL
#define XSEM_REG_DBG_FORCE_VALID 0x1401534UL
#define XSEM_REG_DBG_FORCE_FRAME 0x1401538UL
#define YSEM_REG_DBG_SELECT 0x1501528UL
#define YSEM_REG_DBG_DWORD_ENABLE 0x150152cUL
#define YSEM_REG_DBG_SHIFT 0x1501530UL
#define YSEM_REG_DBG_FORCE_VALID 0x1501534UL
#define YSEM_REG_DBG_FORCE_FRAME 0x1501538UL
#define PSEM_REG_DBG_SELECT 0x1601528UL
#define PSEM_REG_DBG_DWORD_ENABLE 0x160152cUL
#define PSEM_REG_DBG_SHIFT 0x1601530UL
#define PSEM_REG_DBG_FORCE_VALID 0x1601534UL
#define PSEM_REG_DBG_FORCE_FRAME 0x1601538UL
#define RSS_REG_DBG_SELECT 0x238c4cUL
#define RSS_REG_DBG_DWORD_ENABLE 0x238c50UL
#define RSS_REG_DBG_SHIFT 0x238c54UL
#define RSS_REG_DBG_FORCE_VALID 0x238c58UL
#define RSS_REG_DBG_FORCE_FRAME 0x238c5cUL
#define TMLD_REG_DBG_SELECT 0x4d1600UL
#define TMLD_REG_DBG_DWORD_ENABLE 0x4d1604UL
#define TMLD_REG_DBG_SHIFT 0x4d1608UL
#define TMLD_REG_DBG_FORCE_VALID 0x4d160cUL
#define TMLD_REG_DBG_FORCE_FRAME 0x4d1610UL
#define MULD_REG_DBG_SELECT 0x4e1600UL
#define MULD_REG_DBG_DWORD_ENABLE 0x4e1604UL
#define MULD_REG_DBG_SHIFT 0x4e1608UL
#define MULD_REG_DBG_FORCE_VALID 0x4e160cUL
#define MULD_REG_DBG_FORCE_FRAME 0x4e1610UL
#define YULD_REG_DBG_SELECT 0x4c9600UL
#define YULD_REG_DBG_DWORD_ENABLE 0x4c9604UL
#define YULD_REG_DBG_SHIFT 0x4c9608UL
#define YULD_REG_DBG_FORCE_VALID 0x4c960cUL
#define YULD_REG_DBG_FORCE_FRAME 0x4c9610UL
#define XYLD_REG_DBG_SELECT 0x4c1600UL
#define XYLD_REG_DBG_DWORD_ENABLE 0x4c1604UL
#define XYLD_REG_DBG_SHIFT 0x4c1608UL
#define XYLD_REG_DBG_FORCE_VALID 0x4c160cUL
#define XYLD_REG_DBG_FORCE_FRAME 0x4c1610UL
#define PRM_REG_DBG_SELECT 0x2306a8UL
#define PRM_REG_DBG_DWORD_ENABLE 0x2306acUL
#define PRM_REG_DBG_SHIFT 0x2306b0UL
#define PRM_REG_DBG_FORCE_VALID 0x2306b4UL
#define PRM_REG_DBG_FORCE_FRAME 0x2306b8UL
#define PBF_PB1_REG_DBG_SELECT 0xda0728UL
#define PBF_PB1_REG_DBG_DWORD_ENABLE 0xda072cUL
#define PBF_PB1_REG_DBG_SHIFT 0xda0730UL
#define PBF_PB1_REG_DBG_FORCE_VALID 0xda0734UL
#define PBF_PB1_REG_DBG_FORCE_FRAME 0xda0738UL
#define PBF_PB2_REG_DBG_SELECT 0xda4728UL
#define PBF_PB2_REG_DBG_DWORD_ENABLE 0xda472cUL
#define PBF_PB2_REG_DBG_SHIFT 0xda4730UL
#define PBF_PB2_REG_DBG_FORCE_VALID 0xda4734UL
#define PBF_PB2_REG_DBG_FORCE_FRAME 0xda4738UL
#define RPB_REG_DBG_SELECT 0x23c728UL
#define RPB_REG_DBG_DWORD_ENABLE 0x23c72cUL
#define RPB_REG_DBG_SHIFT 0x23c730UL
#define RPB_REG_DBG_FORCE_VALID 0x23c734UL
#define RPB_REG_DBG_FORCE_FRAME 0x23c738UL
#define BTB_REG_DBG_SELECT 0xdb08c8UL
#define BTB_REG_DBG_DWORD_ENABLE 0xdb08ccUL
#define BTB_REG_DBG_SHIFT 0xdb08d0UL
#define BTB_REG_DBG_FORCE_VALID 0xdb08d4UL
#define BTB_REG_DBG_FORCE_FRAME 0xdb08d8UL
#define PBF_REG_DBG_SELECT 0xd80060UL
#define PBF_REG_DBG_DWORD_ENABLE 0xd80064UL
#define PBF_REG_DBG_SHIFT 0xd80068UL
#define PBF_REG_DBG_FORCE_VALID 0xd8006cUL
#define PBF_REG_DBG_FORCE_FRAME 0xd80070UL
#define RDIF_REG_DBG_SELECT 0x300500UL
#define RDIF_REG_DBG_DWORD_ENABLE 0x300504UL
#define RDIF_REG_DBG_SHIFT 0x300508UL
#define RDIF_REG_DBG_FORCE_VALID 0x30050cUL
#define RDIF_REG_DBG_FORCE_FRAME 0x300510UL
#define TDIF_REG_DBG_SELECT 0x310500UL
#define TDIF_REG_DBG_DWORD_ENABLE 0x310504UL
#define TDIF_REG_DBG_SHIFT 0x310508UL
#define TDIF_REG_DBG_FORCE_VALID 0x31050cUL
#define TDIF_REG_DBG_FORCE_FRAME 0x310510UL
#define CDU_REG_DBG_SELECT 0x580704UL
#define CDU_REG_DBG_DWORD_ENABLE 0x580708UL
#define CDU_REG_DBG_SHIFT 0x58070cUL
#define CDU_REG_DBG_FORCE_VALID 0x580710UL
#define CDU_REG_DBG_FORCE_FRAME 0x580714UL
#define CCFC_REG_DBG_SELECT 0x2e0500UL
#define CCFC_REG_DBG_DWORD_ENABLE 0x2e0504UL
#define CCFC_REG_DBG_SHIFT 0x2e0508UL
#define CCFC_REG_DBG_FORCE_VALID 0x2e050cUL
#define CCFC_REG_DBG_FORCE_FRAME 0x2e0510UL
#define TCFC_REG_DBG_SELECT 0x2d0500UL
#define TCFC_REG_DBG_DWORD_ENABLE 0x2d0504UL
#define TCFC_REG_DBG_SHIFT 0x2d0508UL
#define TCFC_REG_DBG_FORCE_VALID 0x2d050cUL
#define TCFC_REG_DBG_FORCE_FRAME 0x2d0510UL
#define IGU_REG_DBG_SELECT 0x181578UL
#define IGU_REG_DBG_DWORD_ENABLE 0x18157cUL
#define IGU_REG_DBG_SHIFT 0x181580UL
#define IGU_REG_DBG_FORCE_VALID 0x181584UL
#define IGU_REG_DBG_FORCE_FRAME 0x181588UL
#define CAU_REG_DBG_SELECT 0x1c0ea8UL
#define CAU_REG_DBG_DWORD_ENABLE 0x1c0eacUL
#define CAU_REG_DBG_SHIFT 0x1c0eb0UL
#define CAU_REG_DBG_FORCE_VALID 0x1c0eb4UL
#define CAU_REG_DBG_FORCE_FRAME 0x1c0eb8UL
#define UMAC_REG_DBG_SELECT 0x051094UL
#define UMAC_REG_DBG_DWORD_ENABLE 0x051098UL
#define UMAC_REG_DBG_SHIFT 0x05109cUL
#define UMAC_REG_DBG_FORCE_VALID 0x0510a0UL
#define UMAC_REG_DBG_FORCE_FRAME 0x0510a4UL
#define NIG_REG_DBG_SELECT 0x502140UL
#define NIG_REG_DBG_DWORD_ENABLE 0x502144UL
#define NIG_REG_DBG_SHIFT 0x502148UL
#define NIG_REG_DBG_FORCE_VALID 0x50214cUL
#define NIG_REG_DBG_FORCE_FRAME 0x502150UL
#define WOL_REG_DBG_SELECT 0x600140UL
#define WOL_REG_DBG_DWORD_ENABLE 0x600144UL
#define WOL_REG_DBG_SHIFT 0x600148UL
#define WOL_REG_DBG_FORCE_VALID 0x60014cUL
#define WOL_REG_DBG_FORCE_FRAME 0x600150UL
#define BMBN_REG_DBG_SELECT 0x610140UL
#define BMBN_REG_DBG_DWORD_ENABLE 0x610144UL
#define BMBN_REG_DBG_SHIFT 0x610148UL
#define BMBN_REG_DBG_FORCE_VALID 0x61014cUL
#define BMBN_REG_DBG_FORCE_FRAME 0x610150UL
#define NWM_REG_DBG_SELECT 0x8000ecUL
#define NWM_REG_DBG_DWORD_ENABLE 0x8000f0UL
#define NWM_REG_DBG_SHIFT 0x8000f4UL
#define NWM_REG_DBG_FORCE_VALID 0x8000f8UL
#define NWM_REG_DBG_FORCE_FRAME 0x8000fcUL
#define BRB_REG_BIG_RAM_ADDRESS 0x340800UL
#define BRB_REG_BIG_RAM_DATA 0x341500UL
#define BTB_REG_BIG_RAM_ADDRESS 0xdb0800UL
#define BTB_REG_BIG_RAM_DATA 0xdb0c00UL
#define BMB_REG_BIG_RAM_ADDRESS 0x540800UL
#define BMB_REG_BIG_RAM_DATA 0x540f00UL
#define MISCS_REG_RESET_PL_UA 0x009050UL
#define MISC_REG_RESET_PL_UA 0x008050UL
#define MISC_REG_RESET_PL_HV 0x008060UL
#define MISC_REG_RESET_PL_PDA_VMAIN_1 0x008070UL
#define MISC_REG_RESET_PL_PDA_VMAIN_2 0x008080UL
#define SEM_FAST_REG_INT_RAM 0x020000UL
#define DBG_REG_DBG_BLOCK_ON 0x010454UL
#define DBG_REG_FRAMING_MODE 0x010058UL
#define SEM_FAST_REG_DEBUG_MODE 0x000744UL
#define SEM_FAST_REG_DEBUG_ACTIVE 0x000740UL
#define SEM_FAST_REG_DBG_MODE6_SRC_DISABLE 0x000750UL
#define SEM_FAST_REG_FILTER_CID 0x000754UL
#define SEM_FAST_REG_EVENT_ID_RANGE_STRT 0x000760UL
#define SEM_FAST_REG_EVENT_ID_RANGE_END 0x000764UL
#define SEM_FAST_REG_FILTER_EVENT_ID 0x000758UL
#define SEM_FAST_REG_EVENT_ID_MASK 0x00075cUL
#define SEM_FAST_REG_RECORD_FILTER_ENABLE 0x000768UL
#define SEM_FAST_REG_DBG_MODE6_SRC_DISABLE 0x000750UL
#define SEM_FAST_REG_DEBUG_ACTIVE 0x000740UL
#define SEM_FAST_REG_RECORD_FILTER_ENABLE 0x000768UL
#define DBG_REG_TIMESTAMP_VALID_EN 0x010b58UL
#define DBG_REG_FILTER_ENABLE 0x0109d0UL
#define DBG_REG_TRIGGER_ENABLE 0x01054cUL
#define DBG_REG_FILTER_CNSTR_OPRTN_0 0x010a28UL
#define DBG_REG_TRIGGER_STATE_SET_CNSTR_OPRTN_0 0x01071cUL
#define DBG_REG_FILTER_CNSTR_DATA_0 0x0109d8UL
#define DBG_REG_TRIGGER_STATE_SET_CNSTR_DATA_0 0x01059cUL
#define DBG_REG_FILTER_CNSTR_DATA_MASK_0 0x0109f8UL
#define DBG_REG_TRIGGER_STATE_SET_CNSTR_DATA_MASK_0 0x01065cUL
#define DBG_REG_FILTER_CNSTR_FRAME_0 0x0109e8UL
#define DBG_REG_TRIGGER_STATE_SET_CNSTR_FRAME_0 0x0105fcUL
#define DBG_REG_FILTER_CNSTR_FRAME_MASK_0 0x010a08UL
#define DBG_REG_TRIGGER_STATE_SET_CNSTR_FRAME_MASK_0 0x0106bcUL
#define DBG_REG_FILTER_CNSTR_OFFSET_0 0x010a18UL
#define DBG_REG_TRIGGER_STATE_SET_CNSTR_OFFSET_0 0x0107dcUL
#define DBG_REG_FILTER_CNSTR_RANGE_0 0x010a38UL
#define DBG_REG_TRIGGER_STATE_SET_CNSTR_RANGE_0 0x01077cUL
#define DBG_REG_FILTER_CNSTR_CYCLIC_0 0x010a68UL
#define DBG_REG_TRIGGER_STATE_SET_CNSTR_CYCLIC_0 0x0108fcUL
#define DBG_REG_FILTER_CNSTR_MUST_0 0x010a48UL
#define DBG_REG_TRIGGER_STATE_SET_CNSTR_MUST_0 0x01083cUL
#define DBG_REG_INTR_BUFFER 0x014000UL
#define DBG_REG_INTR_BUFFER_WR_PTR 0x010404UL
#define DBG_REG_WRAP_ON_INT_BUFFER 0x010418UL
#define DBG_REG_INTR_BUFFER_RD_PTR 0x010400UL
#define DBG_REG_EXT_BUFFER_WR_PTR 0x010410UL
#define DBG_REG_WRAP_ON_EXT_BUFFER 0x01041cUL
#define SEM_FAST_REG_STALL_0 0x000488UL
#define SEM_FAST_REG_STALLED 0x000494UL
#define SEM_FAST_REG_STORM_REG_FILE 0x008000UL
#define SEM_FAST_REG_VFC_DATA_WR 0x000b40UL
#define SEM_FAST_REG_VFC_ADDR 0x000b44UL
#define SEM_FAST_REG_VFC_DATA_RD 0x000b48UL
#define SEM_FAST_REG_VFC_DATA_WR 0x000b40UL
#define SEM_FAST_REG_VFC_ADDR 0x000b44UL
#define SEM_FAST_REG_VFC_DATA_RD 0x000b48UL
#define RSS_REG_RSS_RAM_ADDR 0x238c30UL
#define RSS_REG_RSS_RAM_DATA 0x238c20UL
#define MISCS_REG_BLOCK_256B_EN 0x009074UL
#define MCP_REG_CPU_REG_FILE 0xe05200UL
#define MCP_REG_CPU_REG_FILE_SIZE 32
#define DBG_REG_CALENDAR_OUT_DATA 0x010480UL
#define DBG_REG_FULL_MODE 0x010060UL
#define DBG_REG_PCI_EXT_BUFFER_STRT_ADDR_LSB 0x010430UL
#define DBG_REG_PCI_EXT_BUFFER_STRT_ADDR_MSB 0x010434UL
#define DBG_REG_TARGET_PACKET_SIZE 0x010b3cUL
#define DBG_REG_PCI_EXT_BUFFER_SIZE 0x010438UL
#define DBG_REG_PCI_FUNC_NUM 0x010a98UL
#define DBG_REG_PCI_LOGIC_ADDR 0x010460UL
#define DBG_REG_PCI_REQ_CREDIT 0x010440UL
#define DBG_REG_DEBUG_TARGET 0x01005cUL
#define DBG_REG_OUTPUT_ENABLE 0x01000cUL
#define DBG_REG_OUTPUT_ENABLE 0x01000cUL
#define DBG_REG_DEBUG_TARGET 0x01005cUL
#define DBG_REG_OTHER_ENGINE_MODE 0x010010UL
#define NIG_REG_DEBUG_PORT 0x5020d0UL
#define DBG_REG_ETHERNET_HDR_WIDTH 0x010b38UL
#define DBG_REG_ETHERNET_HDR_7 0x010b34UL
#define DBG_REG_ETHERNET_HDR_6 0x010b30UL
#define DBG_REG_ETHERNET_HDR_5 0x010b2cUL
#define DBG_REG_ETHERNET_HDR_4 0x010b28UL
#define DBG_REG_TARGET_PACKET_SIZE 0x010b3cUL
#define DBG_REG_NIG_DATA_LIMIT_SIZE 0x01043cUL
#define DBG_REG_TIMESTAMP_VALID_EN 0x010b58UL
#define DBG_REG_TIMESTAMP_FRAME_EN 0x010b54UL
#define DBG_REG_TIMESTAMP_TICK 0x010b50UL
#define DBG_REG_FILTER_ID_NUM 0x0109d4UL
#define DBG_REG_FILTER_MSG_LENGTH_ENABLE 0x010a78UL
#define DBG_REG_FILTER_MSG_LENGTH 0x010a7cUL
#define DBG_REG_RCRD_ON_WINDOW_PRE_NUM_CHUNKS 0x010a90UL
#define DBG_REG_RCRD_ON_WINDOW_POST_NUM_CYCLES 0x010a94UL
#define DBG_REG_RCRD_ON_WINDOW_PRE_TRGR_EVNT_MODE 0x010a88UL
#define DBG_REG_RCRD_ON_WINDOW_POST_TRGR_EVNT_MODE 0x010a8cUL
#define DBG_REG_TRIGGER_ENABLE 0x01054cUL
#define DBG_REG_TRIGGER_STATE_ID_0 0x010554UL
#define DBG_REG_TRIGGER_STATE_MSG_LENGTH_ENABLE_0 0x01095cUL
#define DBG_REG_TRIGGER_STATE_MSG_LENGTH_0 0x010968UL
#define DBG_REG_TRIGGER_STATE_SET_COUNT_0 0x010584UL
#define DBG_REG_TRIGGER_STATE_SET_NXT_STATE_0 0x01056cUL
#define DBG_REG_NO_GRANT_ON_FULL 0x010458UL
#define DBG_REG_STORM_ID_NUM 0x010b14UL
#define DBG_REG_CALENDAR_SLOT0 0x010014UL
#define DBG_REG_HW_ID_NUM 0x010b10UL
#define DBG_REG_FILTER_ENABLE 0x0109d0UL
#define DBG_REG_TIMESTAMP 0x010b4cUL
#define DBG_REG_CPU_TIMEOUT 0x010450UL
#define DBG_REG_TRIGGER_STATUS_CUR_STATE 0x010b60UL
#define GRC_REG_TRACE_FIFO_VALID_DATA 0x050064UL
#define GRC_REG_TRACE_FIFO 0x050068UL
#define IGU_REG_ERROR_HANDLING_DATA_VALID 0x181530UL
#define IGU_REG_ERROR_HANDLING_MEMORY 0x181520UL
#define GRC_REG_NUMBER_VALID_OVERRIDE_WINDOW 0x05040cUL
#define GRC_REG_NUMBER_VALID_OVERRIDE_WINDOW 0x05040cUL
#define GRC_REG_PROTECTION_OVERRIDE_WINDOW 0x050500UL
#define TSEM_REG_VF_ERROR 0x1700408UL
#define USEM_REG_VF_ERROR 0x1900408UL
#define MSEM_REG_VF_ERROR 0x1800408UL
#define XSEM_REG_VF_ERROR 0x1400408UL
#define YSEM_REG_VF_ERROR 0x1500408UL
#define PSEM_REG_VF_ERROR 0x1600408UL
#define PGLUE_B_REG_WAS_ERROR_VF_31_0_CLR 0x2aa118UL
#define IGU_REG_STATISTIC_NUM_VF_MSG_SENT 0x180408UL
#define IGU_REG_VF_CONFIGURATION 0x180804UL
#define PSWHST_REG_ZONE_PERMISSION_TABLE 0x2a0800UL
#define DORQ_REG_VF_USAGE_CNT 0x1009c4UL
#define PBF_REG_NUM_BLOCKS_ALLOCATED_CONS_VOQ0 0xd806ccUL
#define PBF_REG_NUM_BLOCKS_ALLOCATED_PROD_VOQ0 0xd806c8UL
#define PRS_REG_MSG_CT_MAIN_0 0x1f0a24UL
#define PRS_REG_MSG_CT_LB_0 0x1f0a28UL
#define BRB_REG_PER_TC_COUNTERS 0x341a00UL
/* added */
#define DORQ_REG_PF_DPI_BIT_SHIFT 0x100450UL
#define DORQ_REG_PF_ICID_BIT_SHIFT_NORM 0x100448UL
#define DORQ_REG_PF_MIN_ADDR_REG1 0x100400UL
#define MISCS_REG_FUNCTION_HIDE 0x0096f0UL
#define PCIE_REG_PRTY_MASK 0x0547b4UL
#define PGLUE_B_REG_VF_BAR0_SIZE 0x2aaeb4UL
#define BAR0_MAP_REG_YSDM_RAM 0x1e80000UL
#define SEM_FAST_REG_INT_RAM_SIZE 20480
#define MCP_REG_SCRATCH_SIZE 57344
#define CDU_REG_SEGMENT0_PARAMS_T0_TID_SIZE_SHIFT 24
#define CDU_REG_SEGMENT1_PARAMS_T1_TID_SIZE_SHIFT 24
#define CDU_REG_SEGMENT1_PARAMS_T1_TID_BLOCK_WASTE_SHIFT 16
#define DORQ_REG_DB_DROP_DETAILS_ADDRESS 0x100a1cUL
/* 8.10.9.0 FW */
#define NIG_REG_VXLAN_CTRL 0x50105cUL
#define PRS_REG_SEARCH_ROCE 0x1f040cUL
#define PRS_REG_CM_HDR_GFT 0x1f11c8UL
#define PRS_REG_CM_HDR_GFT_EVENT_ID_SHIFT 0
#define PRS_REG_CM_HDR_GFT_CM_HDR_SHIFT 8
#define CCFC_REG_WEAK_ENABLE_VF 0x2e0704UL
#define TCFC_REG_STRONG_ENABLE_VF 0x2d070cUL
#define TCFC_REG_WEAK_ENABLE_VF 0x2d0704UL
#define PRS_REG_SEARCH_GFT 0x1f11bcUL
#define PRS_REG_LOAD_L2_FILTER 0x1f0198UL
#define PRS_REG_GFT_CAM 0x1f1100UL
#define PRS_REG_GFT_PROFILE_MASK_RAM 0x1f1000UL
#define PGLUE_B_REG_MSDM_VF_SHIFT_B 0x2aa1c4UL
#define PGLUE_B_REG_MSDM_OFFSET_MASK_B 0x2aa1c0UL
#define PRS_REG_PKT_LEN_STAT_TAGS_NOT_COUNTED_FIRST 0x1f0a0cUL
#define PRS_REG_SEARCH_FCOE 0x1f0408UL
#define PGLUE_B_REG_PGL_ADDR_E8_F0 0x2aaf98UL
#define NIG_REG_DSCP_TO_TC_MAP_ENABLE 0x5088f8UL
#define PGLUE_B_REG_PGL_ADDR_EC_F0 0x2aaf9cUL
#define PGLUE_B_REG_PGL_ADDR_F0_F0 0x2aafa0UL
#define PRS_REG_ROCE_DEST_QP_MAX_PF 0x1f0430UL
#define PGLUE_B_REG_PGL_ADDR_F4_F0 0x2aafa4UL
#define IGU_REG_WRITE_DONE_PENDING 0x180900UL
#define NIG_REG_LLH_TAGMAC_DEF_PF_VECTOR 0x50196cUL
#define PRS_REG_MSG_INFO 0x1f0a1cUL
#define BAR0_MAP_REG_XSDM_RAM 0x1e00000UL
|
vicharl/containerdns
|
kdns/dpdk-17.02/drivers/net/i40e/base/i40e_prototype.h
|
/*******************************************************************************
Copyright (c) 2013 - 2015, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
3. Neither the name of the Intel Corporation nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
***************************************************************************/
#ifndef _I40E_PROTOTYPE_H_
#define _I40E_PROTOTYPE_H_
#include "i40e_type.h"
#include "i40e_alloc.h"
#include "i40e_virtchnl.h"
/* Prototypes for shared code functions that are not in
* the standard function pointer structures. These are
* mostly because they are needed even before the init
* has happened and will assist in the early SW and FW
* setup.
*/
/* adminq functions */
enum i40e_status_code i40e_init_adminq(struct i40e_hw *hw);
enum i40e_status_code i40e_shutdown_adminq(struct i40e_hw *hw);
enum i40e_status_code i40e_init_asq(struct i40e_hw *hw);
enum i40e_status_code i40e_init_arq(struct i40e_hw *hw);
enum i40e_status_code i40e_alloc_adminq_asq_ring(struct i40e_hw *hw);
enum i40e_status_code i40e_alloc_adminq_arq_ring(struct i40e_hw *hw);
enum i40e_status_code i40e_shutdown_asq(struct i40e_hw *hw);
enum i40e_status_code i40e_shutdown_arq(struct i40e_hw *hw);
u16 i40e_clean_asq(struct i40e_hw *hw);
void i40e_free_adminq_asq(struct i40e_hw *hw);
void i40e_free_adminq_arq(struct i40e_hw *hw);
enum i40e_status_code i40e_validate_mac_addr(u8 *mac_addr);
void i40e_adminq_init_ring_data(struct i40e_hw *hw);
enum i40e_status_code i40e_clean_arq_element(struct i40e_hw *hw,
struct i40e_arq_event_info *e,
u16 *events_pending);
enum i40e_status_code i40e_asq_send_command(struct i40e_hw *hw,
struct i40e_aq_desc *desc,
void *buff, /* can be NULL */
u16 buff_size,
struct i40e_asq_cmd_details *cmd_details);
#ifdef VF_DRIVER
bool i40e_asq_done(struct i40e_hw *hw);
#endif
/* debug function for adminq */
void i40e_debug_aq(struct i40e_hw *hw, enum i40e_debug_mask mask,
void *desc, void *buffer, u16 buf_len);
void i40e_idle_aq(struct i40e_hw *hw);
bool i40e_check_asq_alive(struct i40e_hw *hw);
enum i40e_status_code i40e_aq_queue_shutdown(struct i40e_hw *hw, bool unloading);
enum i40e_status_code i40e_aq_get_rss_lut(struct i40e_hw *hw, u16 seid,
bool pf_lut, u8 *lut, u16 lut_size);
enum i40e_status_code i40e_aq_set_rss_lut(struct i40e_hw *hw, u16 seid,
bool pf_lut, u8 *lut, u16 lut_size);
enum i40e_status_code i40e_aq_get_rss_key(struct i40e_hw *hw,
u16 seid,
struct i40e_aqc_get_set_rss_key_data *key);
enum i40e_status_code i40e_aq_set_rss_key(struct i40e_hw *hw,
u16 seid,
struct i40e_aqc_get_set_rss_key_data *key);
const char *i40e_aq_str(struct i40e_hw *hw, enum i40e_admin_queue_err aq_err);
const char *i40e_stat_str(struct i40e_hw *hw, enum i40e_status_code stat_err);
#ifdef PF_DRIVER
u32 i40e_led_get(struct i40e_hw *hw);
void i40e_led_set(struct i40e_hw *hw, u32 mode, bool blink);
enum i40e_status_code i40e_led_set_phy(struct i40e_hw *hw, bool on,
u16 led_addr, u32 mode);
enum i40e_status_code i40e_led_get_phy(struct i40e_hw *hw, u16 *led_addr,
u16 *val);
enum i40e_status_code i40e_blink_phy_link_led(struct i40e_hw *hw,
u32 time, u32 interval);
/* admin send queue commands */
enum i40e_status_code i40e_aq_get_firmware_version(struct i40e_hw *hw,
u16 *fw_major_version, u16 *fw_minor_version,
u32 *fw_build,
u16 *api_major_version, u16 *api_minor_version,
struct i40e_asq_cmd_details *cmd_details);
enum i40e_status_code i40e_aq_debug_write_register(struct i40e_hw *hw,
u32 reg_addr, u64 reg_val,
struct i40e_asq_cmd_details *cmd_details);
enum i40e_status_code i40e_aq_debug_read_register(struct i40e_hw *hw,
u32 reg_addr, u64 *reg_val,
struct i40e_asq_cmd_details *cmd_details);
enum i40e_status_code i40e_aq_set_phy_debug(struct i40e_hw *hw, u8 cmd_flags,
struct i40e_asq_cmd_details *cmd_details);
enum i40e_status_code i40e_aq_set_default_vsi(struct i40e_hw *hw, u16 vsi_id,
struct i40e_asq_cmd_details *cmd_details);
enum i40e_status_code i40e_aq_clear_default_vsi(struct i40e_hw *hw, u16 vsi_id,
struct i40e_asq_cmd_details *cmd_details);
enum i40e_status_code i40e_aq_get_phy_capabilities(struct i40e_hw *hw,
bool qualified_modules, bool report_init,
struct i40e_aq_get_phy_abilities_resp *abilities,
struct i40e_asq_cmd_details *cmd_details);
enum i40e_status_code i40e_aq_set_phy_config(struct i40e_hw *hw,
struct i40e_aq_set_phy_config *config,
struct i40e_asq_cmd_details *cmd_details);
enum i40e_status_code i40e_set_fc(struct i40e_hw *hw, u8 *aq_failures,
bool atomic_reset);
enum i40e_status_code i40e_aq_set_phy_int_mask(struct i40e_hw *hw, u16 mask,
struct i40e_asq_cmd_details *cmd_details);
enum i40e_status_code i40e_aq_set_mac_config(struct i40e_hw *hw,
u16 max_frame_size, bool crc_en, u16 pacing,
struct i40e_asq_cmd_details *cmd_details);
enum i40e_status_code i40e_aq_get_local_advt_reg(struct i40e_hw *hw,
u64 *advt_reg,
struct i40e_asq_cmd_details *cmd_details);
enum i40e_status_code i40e_aq_get_partner_advt(struct i40e_hw *hw,
u64 *advt_reg,
struct i40e_asq_cmd_details *cmd_details);
enum i40e_status_code i40e_aq_set_lb_modes(struct i40e_hw *hw, u16 lb_modes,
struct i40e_asq_cmd_details *cmd_details);
enum i40e_status_code i40e_aq_clear_pxe_mode(struct i40e_hw *hw,
struct i40e_asq_cmd_details *cmd_details);
enum i40e_status_code i40e_aq_set_link_restart_an(struct i40e_hw *hw,
bool enable_link, struct i40e_asq_cmd_details *cmd_details);
enum i40e_status_code i40e_aq_get_link_info(struct i40e_hw *hw,
bool enable_lse, struct i40e_link_status *link,
struct i40e_asq_cmd_details *cmd_details);
enum i40e_status_code i40e_aq_set_local_advt_reg(struct i40e_hw *hw,
u64 advt_reg,
struct i40e_asq_cmd_details *cmd_details);
enum i40e_status_code i40e_aq_send_driver_version(struct i40e_hw *hw,
struct i40e_driver_version *dv,
struct i40e_asq_cmd_details *cmd_details);
enum i40e_status_code i40e_aq_add_vsi(struct i40e_hw *hw,
struct i40e_vsi_context *vsi_ctx,
struct i40e_asq_cmd_details *cmd_details);
enum i40e_status_code i40e_aq_set_vsi_broadcast(struct i40e_hw *hw,
u16 vsi_id, bool set_filter,
struct i40e_asq_cmd_details *cmd_details);
enum i40e_status_code i40e_aq_set_vsi_unicast_promiscuous(struct i40e_hw *hw,
u16 vsi_id, bool set, struct i40e_asq_cmd_details *cmd_details,
bool rx_only_promisc);
enum i40e_status_code i40e_aq_set_vsi_multicast_promiscuous(struct i40e_hw *hw,
u16 vsi_id, bool set, struct i40e_asq_cmd_details *cmd_details);
enum i40e_status_code i40e_aq_set_vsi_full_promiscuous(struct i40e_hw *hw,
u16 seid, bool set,
struct i40e_asq_cmd_details *cmd_details);
enum i40e_status_code i40e_aq_set_vsi_mc_promisc_on_vlan(struct i40e_hw *hw,
u16 seid, bool enable, u16 vid,
struct i40e_asq_cmd_details *cmd_details);
enum i40e_status_code i40e_aq_set_vsi_uc_promisc_on_vlan(struct i40e_hw *hw,
u16 seid, bool enable, u16 vid,
struct i40e_asq_cmd_details *cmd_details);
enum i40e_status_code i40e_aq_set_vsi_bc_promisc_on_vlan(struct i40e_hw *hw,
u16 seid, bool enable, u16 vid,
struct i40e_asq_cmd_details *cmd_details);
enum i40e_status_code i40e_aq_set_vsi_vlan_promisc(struct i40e_hw *hw,
u16 seid, bool enable,
struct i40e_asq_cmd_details *cmd_details);
enum i40e_status_code i40e_aq_get_vsi_params(struct i40e_hw *hw,
struct i40e_vsi_context *vsi_ctx,
struct i40e_asq_cmd_details *cmd_details);
enum i40e_status_code i40e_aq_update_vsi_params(struct i40e_hw *hw,
struct i40e_vsi_context *vsi_ctx,
struct i40e_asq_cmd_details *cmd_details);
enum i40e_status_code i40e_aq_add_veb(struct i40e_hw *hw, u16 uplink_seid,
u16 downlink_seid, u8 enabled_tc,
bool default_port, u16 *pveb_seid,
bool enable_stats,
struct i40e_asq_cmd_details *cmd_details);
enum i40e_status_code i40e_aq_get_veb_parameters(struct i40e_hw *hw,
u16 veb_seid, u16 *switch_id, bool *floating,
u16 *statistic_index, u16 *vebs_used,
u16 *vebs_free,
struct i40e_asq_cmd_details *cmd_details);
enum i40e_status_code i40e_aq_add_macvlan(struct i40e_hw *hw, u16 vsi_id,
struct i40e_aqc_add_macvlan_element_data *mv_list,
u16 count, struct i40e_asq_cmd_details *cmd_details);
enum i40e_status_code i40e_aq_remove_macvlan(struct i40e_hw *hw, u16 vsi_id,
struct i40e_aqc_remove_macvlan_element_data *mv_list,
u16 count, struct i40e_asq_cmd_details *cmd_details);
enum i40e_status_code i40e_aq_add_mirrorrule(struct i40e_hw *hw, u16 sw_seid,
u16 rule_type, u16 dest_vsi, u16 count, __le16 *mr_list,
struct i40e_asq_cmd_details *cmd_details,
u16 *rule_id, u16 *rules_used, u16 *rules_free);
enum i40e_status_code i40e_aq_delete_mirrorrule(struct i40e_hw *hw, u16 sw_seid,
u16 rule_type, u16 rule_id, u16 count, __le16 *mr_list,
struct i40e_asq_cmd_details *cmd_details,
u16 *rules_used, u16 *rules_free);
enum i40e_status_code i40e_aq_add_vlan(struct i40e_hw *hw, u16 vsi_id,
struct i40e_aqc_add_remove_vlan_element_data *v_list,
u8 count, struct i40e_asq_cmd_details *cmd_details);
enum i40e_status_code i40e_aq_remove_vlan(struct i40e_hw *hw, u16 vsi_id,
struct i40e_aqc_add_remove_vlan_element_data *v_list,
u8 count, struct i40e_asq_cmd_details *cmd_details);
enum i40e_status_code i40e_aq_send_msg_to_vf(struct i40e_hw *hw, u16 vfid,
u32 v_opcode, u32 v_retval, u8 *msg, u16 msglen,
struct i40e_asq_cmd_details *cmd_details);
enum i40e_status_code i40e_aq_get_switch_config(struct i40e_hw *hw,
struct i40e_aqc_get_switch_config_resp *buf,
u16 buf_size, u16 *start_seid,
struct i40e_asq_cmd_details *cmd_details);
enum i40e_status_code i40e_aq_set_switch_config(struct i40e_hw *hw,
u16 flags, u16 valid_flags,
struct i40e_asq_cmd_details *cmd_details);
enum i40e_status_code i40e_aq_request_resource(struct i40e_hw *hw,
enum i40e_aq_resources_ids resource,
enum i40e_aq_resource_access_type access,
u8 sdp_number, u64 *timeout,
struct i40e_asq_cmd_details *cmd_details);
enum i40e_status_code i40e_aq_release_resource(struct i40e_hw *hw,
enum i40e_aq_resources_ids resource,
u8 sdp_number,
struct i40e_asq_cmd_details *cmd_details);
enum i40e_status_code i40e_aq_read_nvm(struct i40e_hw *hw, u8 module_pointer,
u32 offset, u16 length, void *data,
bool last_command,
struct i40e_asq_cmd_details *cmd_details);
enum i40e_status_code i40e_aq_erase_nvm(struct i40e_hw *hw, u8 module_pointer,
u32 offset, u16 length, bool last_command,
struct i40e_asq_cmd_details *cmd_details);
enum i40e_status_code i40e_aq_read_nvm_config(struct i40e_hw *hw,
u8 cmd_flags, u32 field_id, void *data,
u16 buf_size, u16 *element_count,
struct i40e_asq_cmd_details *cmd_details);
enum i40e_status_code i40e_aq_write_nvm_config(struct i40e_hw *hw,
u8 cmd_flags, void *data, u16 buf_size,
u16 element_count,
struct i40e_asq_cmd_details *cmd_details);
enum i40e_status_code i40e_aq_oem_post_update(struct i40e_hw *hw,
void *buff, u16 buff_size,
struct i40e_asq_cmd_details *cmd_details);
enum i40e_status_code i40e_aq_discover_capabilities(struct i40e_hw *hw,
void *buff, u16 buff_size, u16 *data_size,
enum i40e_admin_queue_opc list_type_opc,
struct i40e_asq_cmd_details *cmd_details);
enum i40e_status_code i40e_aq_update_nvm(struct i40e_hw *hw, u8 module_pointer,
u32 offset, u16 length, void *data,
bool last_command,
struct i40e_asq_cmd_details *cmd_details);
enum i40e_status_code i40e_aq_get_lldp_mib(struct i40e_hw *hw, u8 bridge_type,
u8 mib_type, void *buff, u16 buff_size,
u16 *local_len, u16 *remote_len,
struct i40e_asq_cmd_details *cmd_details);
enum i40e_status_code i40e_aq_set_lldp_mib(struct i40e_hw *hw,
u8 mib_type, void *buff, u16 buff_size,
struct i40e_asq_cmd_details *cmd_details);
enum i40e_status_code i40e_aq_cfg_lldp_mib_change_event(struct i40e_hw *hw,
bool enable_update,
struct i40e_asq_cmd_details *cmd_details);
enum i40e_status_code i40e_aq_add_lldp_tlv(struct i40e_hw *hw, u8 bridge_type,
void *buff, u16 buff_size, u16 tlv_len,
u16 *mib_len,
struct i40e_asq_cmd_details *cmd_details);
enum i40e_status_code i40e_aq_update_lldp_tlv(struct i40e_hw *hw,
u8 bridge_type, void *buff, u16 buff_size,
u16 old_len, u16 new_len, u16 offset,
u16 *mib_len,
struct i40e_asq_cmd_details *cmd_details);
enum i40e_status_code i40e_aq_delete_lldp_tlv(struct i40e_hw *hw,
u8 bridge_type, void *buff, u16 buff_size,
u16 tlv_len, u16 *mib_len,
struct i40e_asq_cmd_details *cmd_details);
enum i40e_status_code i40e_aq_stop_lldp(struct i40e_hw *hw, bool shutdown_agent,
struct i40e_asq_cmd_details *cmd_details);
enum i40e_status_code i40e_aq_start_lldp(struct i40e_hw *hw,
struct i40e_asq_cmd_details *cmd_details);
enum i40e_status_code i40e_aq_get_cee_dcb_config(struct i40e_hw *hw,
void *buff, u16 buff_size,
struct i40e_asq_cmd_details *cmd_details);
enum i40e_status_code i40e_aq_start_stop_dcbx(struct i40e_hw *hw,
bool start_agent,
struct i40e_asq_cmd_details *cmd_details);
enum i40e_status_code i40e_aq_add_udp_tunnel(struct i40e_hw *hw,
u16 udp_port, u8 protocol_index,
u8 *filter_index,
struct i40e_asq_cmd_details *cmd_details);
enum i40e_status_code i40e_aq_del_udp_tunnel(struct i40e_hw *hw, u8 index,
struct i40e_asq_cmd_details *cmd_details);
enum i40e_status_code i40e_aq_get_switch_resource_alloc(struct i40e_hw *hw,
u8 *num_entries,
struct i40e_aqc_switch_resource_alloc_element_resp *buf,
u16 count,
struct i40e_asq_cmd_details *cmd_details);
enum i40e_status_code i40e_aq_add_pvirt(struct i40e_hw *hw, u16 flags,
u16 mac_seid, u16 vsi_seid,
u16 *ret_seid);
enum i40e_status_code i40e_aq_add_tag(struct i40e_hw *hw, bool direct_to_queue,
u16 vsi_seid, u16 tag, u16 queue_num,
u16 *tags_used, u16 *tags_free,
struct i40e_asq_cmd_details *cmd_details);
enum i40e_status_code i40e_aq_remove_tag(struct i40e_hw *hw, u16 vsi_seid,
u16 tag, u16 *tags_used, u16 *tags_free,
struct i40e_asq_cmd_details *cmd_details);
enum i40e_status_code i40e_aq_add_mcast_etag(struct i40e_hw *hw, u16 pe_seid,
u16 etag, u8 num_tags_in_buf, void *buf,
u16 *tags_used, u16 *tags_free,
struct i40e_asq_cmd_details *cmd_details);
enum i40e_status_code i40e_aq_remove_mcast_etag(struct i40e_hw *hw, u16 pe_seid,
u16 etag, u16 *tags_used, u16 *tags_free,
struct i40e_asq_cmd_details *cmd_details);
enum i40e_status_code i40e_aq_update_tag(struct i40e_hw *hw, u16 vsi_seid,
u16 old_tag, u16 new_tag, u16 *tags_used,
u16 *tags_free,
struct i40e_asq_cmd_details *cmd_details);
enum i40e_status_code i40e_aq_add_statistics(struct i40e_hw *hw, u16 seid,
u16 vlan_id, u16 *stat_index,
struct i40e_asq_cmd_details *cmd_details);
enum i40e_status_code i40e_aq_remove_statistics(struct i40e_hw *hw, u16 seid,
u16 vlan_id, u16 stat_index,
struct i40e_asq_cmd_details *cmd_details);
enum i40e_status_code i40e_aq_set_port_parameters(struct i40e_hw *hw,
u16 bad_frame_vsi, bool save_bad_pac,
bool pad_short_pac, bool double_vlan,
struct i40e_asq_cmd_details *cmd_details);
enum i40e_status_code i40e_aq_delete_element(struct i40e_hw *hw, u16 seid,
struct i40e_asq_cmd_details *cmd_details);
enum i40e_status_code i40e_aq_mac_address_write(struct i40e_hw *hw,
u16 flags, u8 *mac_addr,
struct i40e_asq_cmd_details *cmd_details);
enum i40e_status_code i40e_aq_config_vsi_bw_limit(struct i40e_hw *hw,
u16 seid, u16 credit, u8 max_credit,
struct i40e_asq_cmd_details *cmd_details);
enum i40e_status_code i40e_aq_dcb_ignore_pfc(struct i40e_hw *hw,
u8 tcmap, bool request, u8 *tcmap_ret,
struct i40e_asq_cmd_details *cmd_details);
enum i40e_status_code i40e_aq_config_switch_comp_ets_bw_limit(
struct i40e_hw *hw, u16 seid,
struct i40e_aqc_configure_switching_comp_ets_bw_limit_data *bw_data,
struct i40e_asq_cmd_details *cmd_details);
enum i40e_status_code i40e_aq_config_vsi_ets_sla_bw_limit(struct i40e_hw *hw,
u16 seid,
struct i40e_aqc_configure_vsi_ets_sla_bw_data *bw_data,
struct i40e_asq_cmd_details *cmd_details);
enum i40e_status_code i40e_aq_dcb_updated(struct i40e_hw *hw,
struct i40e_asq_cmd_details *cmd_details);
enum i40e_status_code i40e_aq_config_switch_comp_bw_limit(struct i40e_hw *hw,
u16 seid, u16 credit, u8 max_bw,
struct i40e_asq_cmd_details *cmd_details);
enum i40e_status_code i40e_aq_config_vsi_tc_bw(struct i40e_hw *hw, u16 seid,
struct i40e_aqc_configure_vsi_tc_bw_data *bw_data,
struct i40e_asq_cmd_details *cmd_details);
enum i40e_status_code i40e_aq_config_switch_comp_ets(struct i40e_hw *hw,
u16 seid,
struct i40e_aqc_configure_switching_comp_ets_data *ets_data,
enum i40e_admin_queue_opc opcode,
struct i40e_asq_cmd_details *cmd_details);
enum i40e_status_code i40e_aq_config_switch_comp_bw_config(struct i40e_hw *hw,
u16 seid,
struct i40e_aqc_configure_switching_comp_bw_config_data *bw_data,
struct i40e_asq_cmd_details *cmd_details);
enum i40e_status_code i40e_aq_query_vsi_bw_config(struct i40e_hw *hw,
u16 seid,
struct i40e_aqc_query_vsi_bw_config_resp *bw_data,
struct i40e_asq_cmd_details *cmd_details);
enum i40e_status_code i40e_aq_query_vsi_ets_sla_config(struct i40e_hw *hw,
u16 seid,
struct i40e_aqc_query_vsi_ets_sla_config_resp *bw_data,
struct i40e_asq_cmd_details *cmd_details);
enum i40e_status_code i40e_aq_query_switch_comp_ets_config(struct i40e_hw *hw,
u16 seid,
struct i40e_aqc_query_switching_comp_ets_config_resp *bw_data,
struct i40e_asq_cmd_details *cmd_details);
enum i40e_status_code i40e_aq_query_port_ets_config(struct i40e_hw *hw,
u16 seid,
struct i40e_aqc_query_port_ets_config_resp *bw_data,
struct i40e_asq_cmd_details *cmd_details);
enum i40e_status_code i40e_aq_query_switch_comp_bw_config(struct i40e_hw *hw,
u16 seid,
struct i40e_aqc_query_switching_comp_bw_config_resp *bw_data,
struct i40e_asq_cmd_details *cmd_details);
enum i40e_status_code i40e_aq_resume_port_tx(struct i40e_hw *hw,
struct i40e_asq_cmd_details *cmd_details);
enum i40e_status_code i40e_read_lldp_cfg(struct i40e_hw *hw,
struct i40e_lldp_variables *lldp_cfg);
enum i40e_status_code i40e_aq_add_cloud_filters(struct i40e_hw *hw,
u16 vsi,
struct i40e_aqc_add_remove_cloud_filters_element_data *filters,
u8 filter_count);
enum i40e_status_code i40e_aq_remove_cloud_filters(struct i40e_hw *hw,
u16 vsi,
struct i40e_aqc_add_remove_cloud_filters_element_data *filters,
u8 filter_count);
enum i40e_status_code i40e_aq_alternate_read(struct i40e_hw *hw,
u32 reg_addr0, u32 *reg_val0,
u32 reg_addr1, u32 *reg_val1);
enum i40e_status_code i40e_aq_alternate_read_indirect(struct i40e_hw *hw,
u32 addr, u32 dw_count, void *buffer);
enum i40e_status_code i40e_aq_alternate_write(struct i40e_hw *hw,
u32 reg_addr0, u32 reg_val0,
u32 reg_addr1, u32 reg_val1);
enum i40e_status_code i40e_aq_alternate_write_indirect(struct i40e_hw *hw,
u32 addr, u32 dw_count, void *buffer);
enum i40e_status_code i40e_aq_alternate_clear(struct i40e_hw *hw);
enum i40e_status_code i40e_aq_alternate_write_done(struct i40e_hw *hw,
u8 bios_mode, bool *reset_needed);
enum i40e_status_code i40e_aq_set_oem_mode(struct i40e_hw *hw,
u8 oem_mode);
/* i40e_common */
enum i40e_status_code i40e_init_shared_code(struct i40e_hw *hw);
enum i40e_status_code i40e_pf_reset(struct i40e_hw *hw);
void i40e_clear_hw(struct i40e_hw *hw);
void i40e_clear_pxe_mode(struct i40e_hw *hw);
enum i40e_status_code i40e_get_link_status(struct i40e_hw *hw, bool *link_up);
enum i40e_status_code i40e_update_link_info(struct i40e_hw *hw);
enum i40e_status_code i40e_get_mac_addr(struct i40e_hw *hw, u8 *mac_addr);
enum i40e_status_code i40e_read_bw_from_alt_ram(struct i40e_hw *hw,
u32 *max_bw, u32 *min_bw, bool *min_valid, bool *max_valid);
enum i40e_status_code i40e_aq_configure_partition_bw(struct i40e_hw *hw,
struct i40e_aqc_configure_partition_bw_data *bw_data,
struct i40e_asq_cmd_details *cmd_details);
enum i40e_status_code i40e_get_port_mac_addr(struct i40e_hw *hw, u8 *mac_addr);
enum i40e_status_code i40e_read_pba_string(struct i40e_hw *hw, u8 *pba_num,
u32 pba_num_size);
void i40e_pre_tx_queue_cfg(struct i40e_hw *hw, u32 queue, bool enable);
enum i40e_status_code i40e_get_san_mac_addr(struct i40e_hw *hw, u8 *mac_addr);
enum i40e_aq_link_speed i40e_get_link_speed(struct i40e_hw *hw);
/* prototype for functions used for NVM access */
enum i40e_status_code i40e_init_nvm(struct i40e_hw *hw);
enum i40e_status_code i40e_acquire_nvm(struct i40e_hw *hw,
enum i40e_aq_resource_access_type access);
void i40e_release_nvm(struct i40e_hw *hw);
enum i40e_status_code i40e_read_nvm_word(struct i40e_hw *hw, u16 offset,
u16 *data);
enum i40e_status_code i40e_read_nvm_buffer(struct i40e_hw *hw, u16 offset,
u16 *words, u16 *data);
enum i40e_status_code i40e_write_nvm_aq(struct i40e_hw *hw, u8 module,
u32 offset, u16 words, void *data,
bool last_command);
enum i40e_status_code __i40e_read_nvm_word(struct i40e_hw *hw, u16 offset,
u16 *data);
enum i40e_status_code __i40e_read_nvm_buffer(struct i40e_hw *hw, u16 offset,
u16 *words, u16 *data);
enum i40e_status_code __i40e_write_nvm_word(struct i40e_hw *hw, u32 offset,
void *data);
enum i40e_status_code __i40e_write_nvm_buffer(struct i40e_hw *hw, u8 module,
u32 offset, u16 words, void *data);
enum i40e_status_code i40e_calc_nvm_checksum(struct i40e_hw *hw, u16 *checksum);
enum i40e_status_code i40e_update_nvm_checksum(struct i40e_hw *hw);
enum i40e_status_code i40e_validate_nvm_checksum(struct i40e_hw *hw,
u16 *checksum);
enum i40e_status_code i40e_nvmupd_command(struct i40e_hw *hw,
struct i40e_nvm_access *cmd,
u8 *bytes, int *);
void i40e_nvmupd_check_wait_event(struct i40e_hw *hw, u16 opcode);
void i40e_set_pci_config_data(struct i40e_hw *hw, u16 link_status);
#endif /* PF_DRIVER */
#if defined(I40E_QV) || defined(VF_DRIVER)
enum i40e_status_code i40e_set_mac_type(struct i40e_hw *hw);
#endif
extern struct i40e_rx_ptype_decoded i40e_ptype_lookup[];
STATIC INLINE struct i40e_rx_ptype_decoded decode_rx_desc_ptype(u8 ptype)
{
return i40e_ptype_lookup[ptype];
}
/* prototype for functions used for SW spinlocks */
void i40e_init_spinlock(struct i40e_spinlock *sp);
void i40e_acquire_spinlock(struct i40e_spinlock *sp);
void i40e_release_spinlock(struct i40e_spinlock *sp);
void i40e_destroy_spinlock(struct i40e_spinlock *sp);
/* i40e_common for VF drivers*/
void i40e_vf_parse_hw_config(struct i40e_hw *hw,
struct i40e_virtchnl_vf_resource *msg);
enum i40e_status_code i40e_vf_reset(struct i40e_hw *hw);
enum i40e_status_code i40e_aq_send_msg_to_pf(struct i40e_hw *hw,
enum i40e_virtchnl_ops v_opcode,
enum i40e_status_code v_retval,
u8 *msg, u16 msglen,
struct i40e_asq_cmd_details *cmd_details);
enum i40e_status_code i40e_set_filter_control(struct i40e_hw *hw,
struct i40e_filter_control_settings *settings);
enum i40e_status_code i40e_aq_add_rem_control_packet_filter(struct i40e_hw *hw,
u8 *mac_addr, u16 ethtype, u16 flags,
u16 vsi_seid, u16 queue, bool is_add,
struct i40e_control_filter_stats *stats,
struct i40e_asq_cmd_details *cmd_details);
enum i40e_status_code i40e_aq_debug_dump(struct i40e_hw *hw, u8 cluster_id,
u8 table_id, u32 start_index, u16 buff_size,
void *buff, u16 *ret_buff_size,
u8 *ret_next_table, u32 *ret_next_index,
struct i40e_asq_cmd_details *cmd_details);
void i40e_add_filter_to_drop_tx_flow_control_frames(struct i40e_hw *hw,
u16 vsi_seid);
enum i40e_status_code i40e_aq_rx_ctl_read_register(struct i40e_hw *hw,
u32 reg_addr, u32 *reg_val,
struct i40e_asq_cmd_details *cmd_details);
u32 i40e_read_rx_ctl(struct i40e_hw *hw, u32 reg_addr);
enum i40e_status_code i40e_aq_rx_ctl_write_register(struct i40e_hw *hw,
u32 reg_addr, u32 reg_val,
struct i40e_asq_cmd_details *cmd_details);
void i40e_write_rx_ctl(struct i40e_hw *hw, u32 reg_addr, u32 reg_val);
enum i40e_status_code i40e_aq_set_arp_proxy_config(struct i40e_hw *hw,
struct i40e_aqc_arp_proxy_data *proxy_config,
struct i40e_asq_cmd_details *cmd_details);
enum i40e_status_code i40e_aq_set_ns_proxy_table_entry(struct i40e_hw *hw,
struct i40e_aqc_ns_proxy_data *ns_proxy_table_entry,
struct i40e_asq_cmd_details *cmd_details);
enum i40e_status_code i40e_aq_set_clear_wol_filter(struct i40e_hw *hw,
u8 filter_index,
struct i40e_aqc_set_wol_filter_data *filter,
bool set_filter, bool no_wol_tco,
bool filter_valid, bool no_wol_tco_valid,
struct i40e_asq_cmd_details *cmd_details);
enum i40e_status_code i40e_aq_get_wake_event_reason(struct i40e_hw *hw,
u16 *wake_reason,
struct i40e_asq_cmd_details *cmd_details);
enum i40e_status_code i40e_aq_clear_all_wol_filters(struct i40e_hw *hw,
struct i40e_asq_cmd_details *cmd_details);
enum i40e_status_code i40e_read_phy_register_clause22(struct i40e_hw *hw,
u16 reg, u8 phy_addr, u16 *value);
enum i40e_status_code i40e_write_phy_register_clause22(struct i40e_hw *hw,
u16 reg, u8 phy_addr, u16 value);
enum i40e_status_code i40e_read_phy_register_clause45(struct i40e_hw *hw,
u8 page, u16 reg, u8 phy_addr, u16 *value);
enum i40e_status_code i40e_write_phy_register_clause45(struct i40e_hw *hw,
u8 page, u16 reg, u8 phy_addr, u16 value);
enum i40e_status_code i40e_read_phy_register(struct i40e_hw *hw,
u8 page, u16 reg, u8 phy_addr, u16 *value);
enum i40e_status_code i40e_write_phy_register(struct i40e_hw *hw,
u8 page, u16 reg, u8 phy_addr, u16 value);
u8 i40e_get_phy_address(struct i40e_hw *hw, u8 dev_num);
enum i40e_status_code i40e_blink_phy_link_led(struct i40e_hw *hw,
u32 time, u32 interval);
#endif /* _I40E_PROTOTYPE_H_ */
|
vicharl/containerdns
|
kdns/dpdk-17.02/drivers/net/qede/qede_eth_if.c
|
/*
* Copyright (c) 2016 QLogic Corporation.
* All rights reserved.
* www.qlogic.com
*
* See LICENSE.qede_pmd for copyright and licensing details.
*/
#include "qede_ethdev.h"
static int
qed_start_vport(struct ecore_dev *edev, struct qed_start_vport_params *p_params)
{
int rc, i;
for_each_hwfn(edev, i) {
struct ecore_hwfn *p_hwfn = &edev->hwfns[i];
u8 tx_switching = 0;
struct ecore_sp_vport_start_params start = { 0 };
start.tpa_mode = p_params->gro_enable ? ECORE_TPA_MODE_GRO :
ECORE_TPA_MODE_NONE;
start.remove_inner_vlan = p_params->remove_inner_vlan;
start.tx_switching = tx_switching;
start.only_untagged = false; /* untagged only */
start.drop_ttl0 = p_params->drop_ttl0;
start.concrete_fid = p_hwfn->hw_info.concrete_fid;
start.opaque_fid = p_hwfn->hw_info.opaque_fid;
start.concrete_fid = p_hwfn->hw_info.concrete_fid;
start.handle_ptp_pkts = p_params->handle_ptp_pkts;
start.vport_id = p_params->vport_id;
start.max_buffers_per_cqe = 16; /* TODO-is this right */
start.mtu = p_params->mtu;
/* @DPDK - Disable FW placement */
start.zero_placement_offset = 1;
rc = ecore_sp_vport_start(p_hwfn, &start);
if (rc) {
DP_ERR(edev, "Failed to start VPORT\n");
return rc;
}
DP_VERBOSE(edev, ECORE_MSG_SPQ,
"Started V-PORT %d with MTU %d\n",
p_params->vport_id, p_params->mtu);
}
ecore_reset_vport_stats(edev);
return 0;
}
static int qed_stop_vport(struct ecore_dev *edev, uint8_t vport_id)
{
int rc, i;
for_each_hwfn(edev, i) {
struct ecore_hwfn *p_hwfn = &edev->hwfns[i];
rc = ecore_sp_vport_stop(p_hwfn,
p_hwfn->hw_info.opaque_fid, vport_id);
if (rc) {
DP_ERR(edev, "Failed to stop VPORT\n");
return rc;
}
}
return 0;
}
bool qed_update_rss_parm_cmt(struct ecore_dev *edev, uint16_t *p_tbl)
{
uint16_t max = 0, k;
bool rss_mode = 0; /* disable */
int divisor;
/* Find largest entry, since it's possible RSS needs to
* be disabled [in case only 1 queue per-hwfn]
*/
for (k = 0; k < ECORE_RSS_IND_TABLE_SIZE; k++)
max = (max > p_tbl[k]) ? max : p_tbl[k];
/* Either fix RSS values or disable RSS */
if (edev->num_hwfns < max + 1) {
divisor = (max + edev->num_hwfns - 1) / edev->num_hwfns;
DP_VERBOSE(edev, ECORE_MSG_SPQ,
"CMT - fixing RSS values (modulo %02x)\n",
divisor);
for (k = 0; k < ECORE_RSS_IND_TABLE_SIZE; k++)
p_tbl[k] = p_tbl[k] % divisor;
rss_mode = 1;
}
return rss_mode;
}
static int
qed_update_vport(struct ecore_dev *edev, struct qed_update_vport_params *params)
{
struct ecore_sp_vport_update_params sp_params;
struct ecore_rss_params sp_rss_params;
int rc, i;
memset(&sp_params, 0, sizeof(sp_params));
memset(&sp_rss_params, 0, sizeof(sp_rss_params));
/* Translate protocol params into sp params */
sp_params.vport_id = params->vport_id;
sp_params.update_vport_active_rx_flg = params->update_vport_active_flg;
sp_params.update_vport_active_tx_flg = params->update_vport_active_flg;
sp_params.vport_active_rx_flg = params->vport_active_flg;
sp_params.vport_active_tx_flg = params->vport_active_flg;
sp_params.update_inner_vlan_removal_flg =
params->update_inner_vlan_removal_flg;
sp_params.inner_vlan_removal_flg = params->inner_vlan_removal_flg;
sp_params.update_tx_switching_flg = params->update_tx_switching_flg;
sp_params.tx_switching_flg = params->tx_switching_flg;
sp_params.accept_any_vlan = params->accept_any_vlan;
sp_params.update_accept_any_vlan_flg =
params->update_accept_any_vlan_flg;
sp_params.mtu = params->mtu;
for_each_hwfn(edev, i) {
struct ecore_hwfn *p_hwfn = &edev->hwfns[i];
sp_params.opaque_fid = p_hwfn->hw_info.opaque_fid;
rc = ecore_sp_vport_update(p_hwfn, &sp_params,
ECORE_SPQ_MODE_EBLOCK, NULL);
if (rc) {
DP_ERR(edev, "Failed to update VPORT\n");
return rc;
}
DP_VERBOSE(edev, ECORE_MSG_SPQ,
"Updated V-PORT %d: active_flag %d [update %d]\n",
params->vport_id, params->vport_active_flg,
params->update_vport_active_flg);
}
return 0;
}
static int
qed_start_rxq(struct ecore_dev *edev,
uint8_t rss_num,
struct ecore_queue_start_common_params *p_params,
uint16_t bd_max_bytes,
dma_addr_t bd_chain_phys_addr,
dma_addr_t cqe_pbl_addr,
uint16_t cqe_pbl_size, void OSAL_IOMEM * *pp_prod)
{
struct ecore_hwfn *p_hwfn;
int rc, hwfn_index;
hwfn_index = rss_num % edev->num_hwfns;
p_hwfn = &edev->hwfns[hwfn_index];
p_params->queue_id = p_params->queue_id / edev->num_hwfns;
p_params->stats_id = p_params->vport_id;
rc = ecore_sp_eth_rx_queue_start(p_hwfn,
p_hwfn->hw_info.opaque_fid,
p_params,
bd_max_bytes,
bd_chain_phys_addr,
cqe_pbl_addr, cqe_pbl_size, pp_prod);
if (rc) {
DP_ERR(edev, "Failed to start RXQ#%d\n", p_params->queue_id);
return rc;
}
DP_VERBOSE(edev, ECORE_MSG_SPQ,
"Started RX-Q %d [rss_num %d] on V-PORT %d and SB %d\n",
p_params->queue_id, rss_num, p_params->vport_id,
p_params->sb);
return 0;
}
static int
qed_stop_rxq(struct ecore_dev *edev, struct qed_stop_rxq_params *params)
{
int rc, hwfn_index;
struct ecore_hwfn *p_hwfn;
hwfn_index = params->rss_id % edev->num_hwfns;
p_hwfn = &edev->hwfns[hwfn_index];
rc = ecore_sp_eth_rx_queue_stop(p_hwfn,
params->rx_queue_id / edev->num_hwfns,
params->eq_completion_only, false);
if (rc) {
DP_ERR(edev, "Failed to stop RXQ#%d\n", params->rx_queue_id);
return rc;
}
return 0;
}
static int
qed_start_txq(struct ecore_dev *edev,
uint8_t rss_num,
struct ecore_queue_start_common_params *p_params,
dma_addr_t pbl_addr,
uint16_t pbl_size, void OSAL_IOMEM * *pp_doorbell)
{
struct ecore_hwfn *p_hwfn;
int rc, hwfn_index;
hwfn_index = rss_num % edev->num_hwfns;
p_hwfn = &edev->hwfns[hwfn_index];
p_params->queue_id = p_params->queue_id / edev->num_hwfns;
p_params->qzone_id = p_params->queue_id;
p_params->stats_id = p_params->vport_id;
rc = ecore_sp_eth_tx_queue_start(p_hwfn,
p_hwfn->hw_info.opaque_fid,
p_params,
0 /* tc */,
pbl_addr, pbl_size, pp_doorbell);
if (rc) {
DP_ERR(edev, "Failed to start TXQ#%d\n", p_params->queue_id);
return rc;
}
DP_VERBOSE(edev, ECORE_MSG_SPQ,
"Started TX-Q %d [rss_num %d] on V-PORT %d and SB %d\n",
p_params->queue_id, rss_num, p_params->vport_id,
p_params->sb);
return 0;
}
static int
qed_stop_txq(struct ecore_dev *edev, struct qed_stop_txq_params *params)
{
struct ecore_hwfn *p_hwfn;
int rc, hwfn_index;
hwfn_index = params->rss_id % edev->num_hwfns;
p_hwfn = &edev->hwfns[hwfn_index];
rc = ecore_sp_eth_tx_queue_stop(p_hwfn,
params->tx_queue_id / edev->num_hwfns);
if (rc) {
DP_ERR(edev, "Failed to stop TXQ#%d\n", params->tx_queue_id);
return rc;
}
return 0;
}
static int
qed_fp_cqe_completion(struct ecore_dev *edev,
uint8_t rss_id, struct eth_slow_path_rx_cqe *cqe)
{
return ecore_eth_cqe_completion(&edev->hwfns[rss_id % edev->num_hwfns],
cqe);
}
static int qed_fastpath_stop(struct ecore_dev *edev)
{
ecore_hw_stop_fastpath(edev);
return 0;
}
static void qed_fastpath_start(struct ecore_dev *edev)
{
struct ecore_hwfn *p_hwfn;
int i;
for_each_hwfn(edev, i) {
p_hwfn = &edev->hwfns[i];
ecore_hw_start_fastpath(p_hwfn);
}
}
static void
qed_get_vport_stats(struct ecore_dev *edev, struct ecore_eth_stats *stats)
{
ecore_get_vport_stats(edev, stats);
}
int qed_configure_filter_rx_mode(struct rte_eth_dev *eth_dev,
enum qed_filter_rx_mode_type type)
{
struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
struct ecore_filter_accept_flags flags;
memset(&flags, 0, sizeof(flags));
flags.update_rx_mode_config = 1;
flags.update_tx_mode_config = 1;
flags.rx_accept_filter = ECORE_ACCEPT_UCAST_MATCHED |
ECORE_ACCEPT_MCAST_MATCHED |
ECORE_ACCEPT_BCAST;
flags.tx_accept_filter = ECORE_ACCEPT_UCAST_MATCHED |
ECORE_ACCEPT_MCAST_MATCHED |
ECORE_ACCEPT_BCAST;
if (type == QED_FILTER_RX_MODE_TYPE_PROMISC) {
flags.rx_accept_filter |= ECORE_ACCEPT_UCAST_UNMATCHED;
if (IS_VF(edev)) {
flags.tx_accept_filter |= ECORE_ACCEPT_UCAST_UNMATCHED;
DP_INFO(edev, "Enabling Tx unmatched flag for VF\n");
}
} else if (type == QED_FILTER_RX_MODE_TYPE_MULTI_PROMISC) {
flags.rx_accept_filter |= ECORE_ACCEPT_MCAST_UNMATCHED;
} else if (type == (QED_FILTER_RX_MODE_TYPE_MULTI_PROMISC |
QED_FILTER_RX_MODE_TYPE_PROMISC)) {
flags.rx_accept_filter |= ECORE_ACCEPT_UCAST_UNMATCHED |
ECORE_ACCEPT_MCAST_UNMATCHED;
}
return ecore_filter_accept_cmd(edev, 0, flags, false, false,
ECORE_SPQ_MODE_CB, NULL);
}
static const struct qed_eth_ops qed_eth_ops_pass = {
INIT_STRUCT_FIELD(common, &qed_common_ops_pass),
INIT_STRUCT_FIELD(fill_dev_info, &qed_fill_eth_dev_info),
INIT_STRUCT_FIELD(vport_start, &qed_start_vport),
INIT_STRUCT_FIELD(vport_stop, &qed_stop_vport),
INIT_STRUCT_FIELD(vport_update, &qed_update_vport),
INIT_STRUCT_FIELD(q_rx_start, &qed_start_rxq),
INIT_STRUCT_FIELD(q_tx_start, &qed_start_txq),
INIT_STRUCT_FIELD(q_rx_stop, &qed_stop_rxq),
INIT_STRUCT_FIELD(q_tx_stop, &qed_stop_txq),
INIT_STRUCT_FIELD(eth_cqe_completion, &qed_fp_cqe_completion),
INIT_STRUCT_FIELD(fastpath_stop, &qed_fastpath_stop),
INIT_STRUCT_FIELD(fastpath_start, &qed_fastpath_start),
INIT_STRUCT_FIELD(get_vport_stats, &qed_get_vport_stats),
};
const struct qed_eth_ops *qed_get_eth_ops(void)
{
return &qed_eth_ops_pass;
}
|
vicharl/containerdns
|
kdns/dpdk-17.02/drivers/net/e1000/em_rxtx.c
|
<filename>kdns/dpdk-17.02/drivers/net/e1000/em_rxtx.c
/*-
* BSD LICENSE
*
* Copyright(c) 2010-2016 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <sys/queue.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <errno.h>
#include <stdint.h>
#include <stdarg.h>
#include <inttypes.h>
#include <rte_interrupts.h>
#include <rte_byteorder.h>
#include <rte_common.h>
#include <rte_log.h>
#include <rte_debug.h>
#include <rte_pci.h>
#include <rte_memory.h>
#include <rte_memcpy.h>
#include <rte_memzone.h>
#include <rte_launch.h>
#include <rte_eal.h>
#include <rte_per_lcore.h>
#include <rte_lcore.h>
#include <rte_atomic.h>
#include <rte_branch_prediction.h>
#include <rte_mempool.h>
#include <rte_malloc.h>
#include <rte_mbuf.h>
#include <rte_ether.h>
#include <rte_ethdev.h>
#include <rte_prefetch.h>
#include <rte_ip.h>
#include <rte_udp.h>
#include <rte_tcp.h>
#include <rte_sctp.h>
#include <rte_net.h>
#include <rte_string_fns.h>
#include "e1000_logs.h"
#include "base/e1000_api.h"
#include "e1000_ethdev.h"
#include "base/e1000_osdep.h"
#define E1000_TXD_VLAN_SHIFT 16
#define E1000_RXDCTL_GRAN 0x01000000 /* RXDCTL Granularity */
#define E1000_TX_OFFLOAD_MASK ( \
PKT_TX_IP_CKSUM | \
PKT_TX_L4_MASK | \
PKT_TX_VLAN_PKT)
#define E1000_TX_OFFLOAD_NOTSUP_MASK \
(PKT_TX_OFFLOAD_MASK ^ E1000_TX_OFFLOAD_MASK)
/**
* Structure associated with each descriptor of the RX ring of a RX queue.
*/
struct em_rx_entry {
struct rte_mbuf *mbuf; /**< mbuf associated with RX descriptor. */
};
/**
* Structure associated with each descriptor of the TX ring of a TX queue.
*/
struct em_tx_entry {
struct rte_mbuf *mbuf; /**< mbuf associated with TX desc, if any. */
uint16_t next_id; /**< Index of next descriptor in ring. */
uint16_t last_id; /**< Index of last scattered descriptor. */
};
/**
* Structure associated with each RX queue.
*/
struct em_rx_queue {
struct rte_mempool *mb_pool; /**< mbuf pool to populate RX ring. */
volatile struct e1000_rx_desc *rx_ring; /**< RX ring virtual address. */
uint64_t rx_ring_phys_addr; /**< RX ring DMA address. */
volatile uint32_t *rdt_reg_addr; /**< RDT register address. */
volatile uint32_t *rdh_reg_addr; /**< RDH register address. */
struct em_rx_entry *sw_ring; /**< address of RX software ring. */
struct rte_mbuf *pkt_first_seg; /**< First segment of current packet. */
struct rte_mbuf *pkt_last_seg; /**< Last segment of current packet. */
uint16_t nb_rx_desc; /**< number of RX descriptors. */
uint16_t rx_tail; /**< current value of RDT register. */
uint16_t nb_rx_hold; /**< number of held free RX desc. */
uint16_t rx_free_thresh; /**< max free RX desc to hold. */
uint16_t queue_id; /**< RX queue index. */
uint8_t port_id; /**< Device port identifier. */
uint8_t pthresh; /**< Prefetch threshold register. */
uint8_t hthresh; /**< Host threshold register. */
uint8_t wthresh; /**< Write-back threshold register. */
uint8_t crc_len; /**< 0 if CRC stripped, 4 otherwise. */
};
/**
* Hardware context number
*/
enum {
EM_CTX_0 = 0, /**< CTX0 */
EM_CTX_NUM = 1, /**< CTX NUM */
};
/** Offload features */
union em_vlan_macip {
uint32_t data;
struct {
uint16_t l3_len:9; /**< L3 (IP) Header Length. */
uint16_t l2_len:7; /**< L2 (MAC) Header Length. */
uint16_t vlan_tci;
/**< VLAN Tag Control Identifier (CPU order). */
} f;
};
/*
* Compare mask for vlan_macip_len.data,
* should be in sync with em_vlan_macip.f layout.
* */
#define TX_VLAN_CMP_MASK 0xFFFF0000 /**< VLAN length - 16-bits. */
#define TX_MAC_LEN_CMP_MASK 0x0000FE00 /**< MAC length - 7-bits. */
#define TX_IP_LEN_CMP_MASK 0x000001FF /**< IP length - 9-bits. */
/** MAC+IP length. */
#define TX_MACIP_LEN_CMP_MASK (TX_MAC_LEN_CMP_MASK | TX_IP_LEN_CMP_MASK)
/**
* Structure to check if new context need be built
*/
struct em_ctx_info {
uint64_t flags; /**< ol_flags related to context build. */
uint32_t cmp_mask; /**< compare mask */
union em_vlan_macip hdrlen; /**< L2 and L3 header lenghts */
};
/**
* Structure associated with each TX queue.
*/
struct em_tx_queue {
volatile struct e1000_data_desc *tx_ring; /**< TX ring address */
uint64_t tx_ring_phys_addr; /**< TX ring DMA address. */
struct em_tx_entry *sw_ring; /**< virtual address of SW ring. */
volatile uint32_t *tdt_reg_addr; /**< Address of TDT register. */
uint16_t nb_tx_desc; /**< number of TX descriptors. */
uint16_t tx_tail; /**< Current value of TDT register. */
/**< Start freeing TX buffers if there are less free descriptors than
this value. */
uint16_t tx_free_thresh;
/**< Number of TX descriptors to use before RS bit is set. */
uint16_t tx_rs_thresh;
/** Number of TX descriptors used since RS bit was set. */
uint16_t nb_tx_used;
/** Index to last TX descriptor to have been cleaned. */
uint16_t last_desc_cleaned;
/** Total number of TX descriptors ready to be allocated. */
uint16_t nb_tx_free;
uint16_t queue_id; /**< TX queue index. */
uint8_t port_id; /**< Device port identifier. */
uint8_t pthresh; /**< Prefetch threshold register. */
uint8_t hthresh; /**< Host threshold register. */
uint8_t wthresh; /**< Write-back threshold register. */
struct em_ctx_info ctx_cache;
/**< Hardware context history.*/
};
#if 1
#define RTE_PMD_USE_PREFETCH
#endif
#ifdef RTE_PMD_USE_PREFETCH
#define rte_em_prefetch(p) rte_prefetch0(p)
#else
#define rte_em_prefetch(p) do {} while(0)
#endif
#ifdef RTE_PMD_PACKET_PREFETCH
#define rte_packet_prefetch(p) rte_prefetch1(p)
#else
#define rte_packet_prefetch(p) do {} while(0)
#endif
#ifndef DEFAULT_TX_FREE_THRESH
#define DEFAULT_TX_FREE_THRESH 32
#endif /* DEFAULT_TX_FREE_THRESH */
#ifndef DEFAULT_TX_RS_THRESH
#define DEFAULT_TX_RS_THRESH 32
#endif /* DEFAULT_TX_RS_THRESH */
/*********************************************************************
*
* TX function
*
**********************************************************************/
/*
* Populates TX context descriptor.
*/
static inline void
em_set_xmit_ctx(struct em_tx_queue* txq,
volatile struct e1000_context_desc *ctx_txd,
uint64_t flags,
union em_vlan_macip hdrlen)
{
uint32_t cmp_mask, cmd_len;
uint16_t ipcse, l2len;
struct e1000_context_desc ctx;
cmp_mask = 0;
cmd_len = E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_C;
l2len = hdrlen.f.l2_len;
ipcse = (uint16_t)(l2len + hdrlen.f.l3_len);
/* setup IPCS* fields */
ctx.lower_setup.ip_fields.ipcss = (uint8_t)l2len;
ctx.lower_setup.ip_fields.ipcso = (uint8_t)(l2len +
offsetof(struct ipv4_hdr, hdr_checksum));
/*
* When doing checksum or TCP segmentation with IPv6 headers,
* IPCSE field should be set t0 0.
*/
if (flags & PKT_TX_IP_CKSUM) {
ctx.lower_setup.ip_fields.ipcse =
(uint16_t)rte_cpu_to_le_16(ipcse - 1);
cmd_len |= E1000_TXD_CMD_IP;
cmp_mask |= TX_MACIP_LEN_CMP_MASK;
} else {
ctx.lower_setup.ip_fields.ipcse = 0;
}
/* setup TUCS* fields */
ctx.upper_setup.tcp_fields.tucss = (uint8_t)ipcse;
ctx.upper_setup.tcp_fields.tucse = 0;
switch (flags & PKT_TX_L4_MASK) {
case PKT_TX_UDP_CKSUM:
ctx.upper_setup.tcp_fields.tucso = (uint8_t)(ipcse +
offsetof(struct udp_hdr, dgram_cksum));
cmp_mask |= TX_MACIP_LEN_CMP_MASK;
break;
case PKT_TX_TCP_CKSUM:
ctx.upper_setup.tcp_fields.tucso = (uint8_t)(ipcse +
offsetof(struct tcp_hdr, cksum));
cmd_len |= E1000_TXD_CMD_TCP;
cmp_mask |= TX_MACIP_LEN_CMP_MASK;
break;
default:
ctx.upper_setup.tcp_fields.tucso = 0;
}
ctx.cmd_and_length = rte_cpu_to_le_32(cmd_len);
ctx.tcp_seg_setup.data = 0;
*ctx_txd = ctx;
txq->ctx_cache.flags = flags;
txq->ctx_cache.cmp_mask = cmp_mask;
txq->ctx_cache.hdrlen = hdrlen;
}
/*
* Check which hardware context can be used. Use the existing match
* or create a new context descriptor.
*/
static inline uint32_t
what_ctx_update(struct em_tx_queue *txq, uint64_t flags,
union em_vlan_macip hdrlen)
{
/* If match with the current context */
if (likely (txq->ctx_cache.flags == flags &&
((txq->ctx_cache.hdrlen.data ^ hdrlen.data) &
txq->ctx_cache.cmp_mask) == 0))
return EM_CTX_0;
/* Mismatch */
return EM_CTX_NUM;
}
/* Reset transmit descriptors after they have been used */
static inline int
em_xmit_cleanup(struct em_tx_queue *txq)
{
struct em_tx_entry *sw_ring = txq->sw_ring;
volatile struct e1000_data_desc *txr = txq->tx_ring;
uint16_t last_desc_cleaned = txq->last_desc_cleaned;
uint16_t nb_tx_desc = txq->nb_tx_desc;
uint16_t desc_to_clean_to;
uint16_t nb_tx_to_clean;
/* Determine the last descriptor needing to be cleaned */
desc_to_clean_to = (uint16_t)(last_desc_cleaned + txq->tx_rs_thresh);
if (desc_to_clean_to >= nb_tx_desc)
desc_to_clean_to = (uint16_t)(desc_to_clean_to - nb_tx_desc);
/* Check to make sure the last descriptor to clean is done */
desc_to_clean_to = sw_ring[desc_to_clean_to].last_id;
if (! (txr[desc_to_clean_to].upper.fields.status & E1000_TXD_STAT_DD))
{
PMD_TX_FREE_LOG(DEBUG,
"TX descriptor %4u is not done"
"(port=%d queue=%d)", desc_to_clean_to,
txq->port_id, txq->queue_id);
/* Failed to clean any descriptors, better luck next time */
return -(1);
}
/* Figure out how many descriptors will be cleaned */
if (last_desc_cleaned > desc_to_clean_to)
nb_tx_to_clean = (uint16_t)((nb_tx_desc - last_desc_cleaned) +
desc_to_clean_to);
else
nb_tx_to_clean = (uint16_t)(desc_to_clean_to -
last_desc_cleaned);
PMD_TX_FREE_LOG(DEBUG,
"Cleaning %4u TX descriptors: %4u to %4u "
"(port=%d queue=%d)", nb_tx_to_clean,
last_desc_cleaned, desc_to_clean_to, txq->port_id,
txq->queue_id);
/*
* The last descriptor to clean is done, so that means all the
* descriptors from the last descriptor that was cleaned
* up to the last descriptor with the RS bit set
* are done. Only reset the threshold descriptor.
*/
txr[desc_to_clean_to].upper.fields.status = 0;
/* Update the txq to reflect the last descriptor that was cleaned */
txq->last_desc_cleaned = desc_to_clean_to;
txq->nb_tx_free = (uint16_t)(txq->nb_tx_free + nb_tx_to_clean);
/* No Error */
return 0;
}
static inline uint32_t
tx_desc_cksum_flags_to_upper(uint64_t ol_flags)
{
static const uint32_t l4_olinfo[2] = {0, E1000_TXD_POPTS_TXSM << 8};
static const uint32_t l3_olinfo[2] = {0, E1000_TXD_POPTS_IXSM << 8};
uint32_t tmp;
tmp = l4_olinfo[(ol_flags & PKT_TX_L4_MASK) != PKT_TX_L4_NO_CKSUM];
tmp |= l3_olinfo[(ol_flags & PKT_TX_IP_CKSUM) != 0];
return tmp;
}
uint16_t
eth_em_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
uint16_t nb_pkts)
{
struct em_tx_queue *txq;
struct em_tx_entry *sw_ring;
struct em_tx_entry *txe, *txn;
volatile struct e1000_data_desc *txr;
volatile struct e1000_data_desc *txd;
struct rte_mbuf *tx_pkt;
struct rte_mbuf *m_seg;
uint64_t buf_dma_addr;
uint32_t popts_spec;
uint32_t cmd_type_len;
uint16_t slen;
uint64_t ol_flags;
uint16_t tx_id;
uint16_t tx_last;
uint16_t nb_tx;
uint16_t nb_used;
uint64_t tx_ol_req;
uint32_t ctx;
uint32_t new_ctx;
union em_vlan_macip hdrlen;
txq = tx_queue;
sw_ring = txq->sw_ring;
txr = txq->tx_ring;
tx_id = txq->tx_tail;
txe = &sw_ring[tx_id];
/* Determine if the descriptor ring needs to be cleaned. */
if (txq->nb_tx_free < txq->tx_free_thresh)
em_xmit_cleanup(txq);
/* TX loop */
for (nb_tx = 0; nb_tx < nb_pkts; nb_tx++) {
new_ctx = 0;
tx_pkt = *tx_pkts++;
RTE_MBUF_PREFETCH_TO_FREE(txe->mbuf);
/*
* Determine how many (if any) context descriptors
* are needed for offload functionality.
*/
ol_flags = tx_pkt->ol_flags;
/* If hardware offload required */
tx_ol_req = (ol_flags & (PKT_TX_IP_CKSUM | PKT_TX_L4_MASK));
if (tx_ol_req) {
hdrlen.f.vlan_tci = tx_pkt->vlan_tci;
hdrlen.f.l2_len = tx_pkt->l2_len;
hdrlen.f.l3_len = tx_pkt->l3_len;
/* If new context to be built or reuse the exist ctx. */
ctx = what_ctx_update(txq, tx_ol_req, hdrlen);
/* Only allocate context descriptor if required*/
new_ctx = (ctx == EM_CTX_NUM);
}
/*
* Keep track of how many descriptors are used this loop
* This will always be the number of segments + the number of
* Context descriptors required to transmit the packet
*/
nb_used = (uint16_t)(tx_pkt->nb_segs + new_ctx);
/*
* The number of descriptors that must be allocated for a
* packet is the number of segments of that packet, plus 1
* Context Descriptor for the hardware offload, if any.
* Determine the last TX descriptor to allocate in the TX ring
* for the packet, starting from the current position (tx_id)
* in the ring.
*/
tx_last = (uint16_t) (tx_id + nb_used - 1);
/* Circular ring */
if (tx_last >= txq->nb_tx_desc)
tx_last = (uint16_t) (tx_last - txq->nb_tx_desc);
PMD_TX_LOG(DEBUG, "port_id=%u queue_id=%u pktlen=%u"
" tx_first=%u tx_last=%u",
(unsigned) txq->port_id,
(unsigned) txq->queue_id,
(unsigned) tx_pkt->pkt_len,
(unsigned) tx_id,
(unsigned) tx_last);
/*
* Make sure there are enough TX descriptors available to
* transmit the entire packet.
* nb_used better be less than or equal to txq->tx_rs_thresh
*/
while (unlikely (nb_used > txq->nb_tx_free)) {
PMD_TX_FREE_LOG(DEBUG, "Not enough free TX descriptors "
"nb_used=%4u nb_free=%4u "
"(port=%d queue=%d)",
nb_used, txq->nb_tx_free,
txq->port_id, txq->queue_id);
if (em_xmit_cleanup(txq) != 0) {
/* Could not clean any descriptors */
if (nb_tx == 0)
return 0;
goto end_of_tx;
}
}
/*
* By now there are enough free TX descriptors to transmit
* the packet.
*/
/*
* Set common flags of all TX Data Descriptors.
*
* The following bits must be set in all Data Descriptors:
* - E1000_TXD_DTYP_DATA
* - E1000_TXD_DTYP_DEXT
*
* The following bits must be set in the first Data Descriptor
* and are ignored in the other ones:
* - E1000_TXD_POPTS_IXSM
* - E1000_TXD_POPTS_TXSM
*
* The following bits must be set in the last Data Descriptor
* and are ignored in the other ones:
* - E1000_TXD_CMD_VLE
* - E1000_TXD_CMD_IFCS
*
* The following bits must only be set in the last Data
* Descriptor:
* - E1000_TXD_CMD_EOP
*
* The following bits can be set in any Data Descriptor, but
* are only set in the last Data Descriptor:
* - E1000_TXD_CMD_RS
*/
cmd_type_len = E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D |
E1000_TXD_CMD_IFCS;
popts_spec = 0;
/* Set VLAN Tag offload fields. */
if (ol_flags & PKT_TX_VLAN_PKT) {
cmd_type_len |= E1000_TXD_CMD_VLE;
popts_spec = tx_pkt->vlan_tci << E1000_TXD_VLAN_SHIFT;
}
if (tx_ol_req) {
/*
* Setup the TX Context Descriptor if required
*/
if (new_ctx) {
volatile struct e1000_context_desc *ctx_txd;
ctx_txd = (volatile struct e1000_context_desc *)
&txr[tx_id];
txn = &sw_ring[txe->next_id];
RTE_MBUF_PREFETCH_TO_FREE(txn->mbuf);
if (txe->mbuf != NULL) {
rte_pktmbuf_free_seg(txe->mbuf);
txe->mbuf = NULL;
}
em_set_xmit_ctx(txq, ctx_txd, tx_ol_req,
hdrlen);
txe->last_id = tx_last;
tx_id = txe->next_id;
txe = txn;
}
/*
* Setup the TX Data Descriptor,
* This path will go through
* whatever new/reuse the context descriptor
*/
popts_spec |= tx_desc_cksum_flags_to_upper(ol_flags);
}
m_seg = tx_pkt;
do {
txd = &txr[tx_id];
txn = &sw_ring[txe->next_id];
if (txe->mbuf != NULL)
rte_pktmbuf_free_seg(txe->mbuf);
txe->mbuf = m_seg;
/*
* Set up Transmit Data Descriptor.
*/
slen = m_seg->data_len;
buf_dma_addr = rte_mbuf_data_dma_addr(m_seg);
txd->buffer_addr = rte_cpu_to_le_64(buf_dma_addr);
txd->lower.data = rte_cpu_to_le_32(cmd_type_len | slen);
txd->upper.data = rte_cpu_to_le_32(popts_spec);
txe->last_id = tx_last;
tx_id = txe->next_id;
txe = txn;
m_seg = m_seg->next;
} while (m_seg != NULL);
/*
* The last packet data descriptor needs End Of Packet (EOP)
*/
cmd_type_len |= E1000_TXD_CMD_EOP;
txq->nb_tx_used = (uint16_t)(txq->nb_tx_used + nb_used);
txq->nb_tx_free = (uint16_t)(txq->nb_tx_free - nb_used);
/* Set RS bit only on threshold packets' last descriptor */
if (txq->nb_tx_used >= txq->tx_rs_thresh) {
PMD_TX_FREE_LOG(DEBUG,
"Setting RS bit on TXD id=%4u "
"(port=%d queue=%d)",
tx_last, txq->port_id, txq->queue_id);
cmd_type_len |= E1000_TXD_CMD_RS;
/* Update txq RS bit counters */
txq->nb_tx_used = 0;
}
txd->lower.data |= rte_cpu_to_le_32(cmd_type_len);
}
end_of_tx:
rte_wmb();
/*
* Set the Transmit Descriptor Tail (TDT)
*/
PMD_TX_LOG(DEBUG, "port_id=%u queue_id=%u tx_tail=%u nb_tx=%u",
(unsigned) txq->port_id, (unsigned) txq->queue_id,
(unsigned) tx_id, (unsigned) nb_tx);
E1000_PCI_REG_WRITE_RELAXED(txq->tdt_reg_addr, tx_id);
txq->tx_tail = tx_id;
return nb_tx;
}
/*********************************************************************
*
* TX prep functions
*
**********************************************************************/
uint16_t
eth_em_prep_pkts(__rte_unused void *tx_queue, struct rte_mbuf **tx_pkts,
uint16_t nb_pkts)
{
int i, ret;
struct rte_mbuf *m;
for (i = 0; i < nb_pkts; i++) {
m = tx_pkts[i];
if (m->ol_flags & E1000_TX_OFFLOAD_NOTSUP_MASK) {
rte_errno = -ENOTSUP;
return i;
}
#ifdef RTE_LIBRTE_ETHDEV_DEBUG
ret = rte_validate_tx_offload(m);
if (ret != 0) {
rte_errno = ret;
return i;
}
#endif
ret = rte_net_intel_cksum_prepare(m);
if (ret != 0) {
rte_errno = ret;
return i;
}
}
return i;
}
/*********************************************************************
*
* RX functions
*
**********************************************************************/
static inline uint64_t
rx_desc_status_to_pkt_flags(uint32_t rx_status)
{
uint64_t pkt_flags;
/* Check if VLAN present */
pkt_flags = ((rx_status & E1000_RXD_STAT_VP) ?
PKT_RX_VLAN_PKT | PKT_RX_VLAN_STRIPPED : 0);
return pkt_flags;
}
static inline uint64_t
rx_desc_error_to_pkt_flags(uint32_t rx_error)
{
uint64_t pkt_flags = 0;
if (rx_error & E1000_RXD_ERR_IPE)
pkt_flags |= PKT_RX_IP_CKSUM_BAD;
if (rx_error & E1000_RXD_ERR_TCPE)
pkt_flags |= PKT_RX_L4_CKSUM_BAD;
return pkt_flags;
}
uint16_t
eth_em_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
uint16_t nb_pkts)
{
volatile struct e1000_rx_desc *rx_ring;
volatile struct e1000_rx_desc *rxdp;
struct em_rx_queue *rxq;
struct em_rx_entry *sw_ring;
struct em_rx_entry *rxe;
struct rte_mbuf *rxm;
struct rte_mbuf *nmb;
struct e1000_rx_desc rxd;
uint64_t dma_addr;
uint16_t pkt_len;
uint16_t rx_id;
uint16_t nb_rx;
uint16_t nb_hold;
uint8_t status;
rxq = rx_queue;
nb_rx = 0;
nb_hold = 0;
rx_id = rxq->rx_tail;
rx_ring = rxq->rx_ring;
sw_ring = rxq->sw_ring;
while (nb_rx < nb_pkts) {
/*
* The order of operations here is important as the DD status
* bit must not be read after any other descriptor fields.
* rx_ring and rxdp are pointing to volatile data so the order
* of accesses cannot be reordered by the compiler. If they were
* not volatile, they could be reordered which could lead to
* using invalid descriptor fields when read from rxd.
*/
rxdp = &rx_ring[rx_id];
status = rxdp->status;
if (! (status & E1000_RXD_STAT_DD))
break;
rxd = *rxdp;
/*
* End of packet.
*
* If the E1000_RXD_STAT_EOP flag is not set, the RX packet is
* likely to be invalid and to be dropped by the various
* validation checks performed by the network stack.
*
* Allocate a new mbuf to replenish the RX ring descriptor.
* If the allocation fails:
* - arrange for that RX descriptor to be the first one
* being parsed the next time the receive function is
* invoked [on the same queue].
*
* - Stop parsing the RX ring and return immediately.
*
* This policy do not drop the packet received in the RX
* descriptor for which the allocation of a new mbuf failed.
* Thus, it allows that packet to be later retrieved if
* mbuf have been freed in the mean time.
* As a side effect, holding RX descriptors instead of
* systematically giving them back to the NIC may lead to
* RX ring exhaustion situations.
* However, the NIC can gracefully prevent such situations
* to happen by sending specific "back-pressure" flow control
* frames to its peer(s).
*/
PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_id=%u "
"status=0x%x pkt_len=%u",
(unsigned) rxq->port_id, (unsigned) rxq->queue_id,
(unsigned) rx_id, (unsigned) status,
(unsigned) rte_le_to_cpu_16(rxd.length));
nmb = rte_mbuf_raw_alloc(rxq->mb_pool);
if (nmb == NULL) {
PMD_RX_LOG(DEBUG, "RX mbuf alloc failed port_id=%u "
"queue_id=%u",
(unsigned) rxq->port_id,
(unsigned) rxq->queue_id);
rte_eth_devices[rxq->port_id].data->rx_mbuf_alloc_failed++;
break;
}
nb_hold++;
rxe = &sw_ring[rx_id];
rx_id++;
if (rx_id == rxq->nb_rx_desc)
rx_id = 0;
/* Prefetch next mbuf while processing current one. */
rte_em_prefetch(sw_ring[rx_id].mbuf);
/*
* When next RX descriptor is on a cache-line boundary,
* prefetch the next 4 RX descriptors and the next 8 pointers
* to mbufs.
*/
if ((rx_id & 0x3) == 0) {
rte_em_prefetch(&rx_ring[rx_id]);
rte_em_prefetch(&sw_ring[rx_id]);
}
/* Rearm RXD: attach new mbuf and reset status to zero. */
rxm = rxe->mbuf;
rxe->mbuf = nmb;
dma_addr =
rte_cpu_to_le_64(rte_mbuf_data_dma_addr_default(nmb));
rxdp->buffer_addr = dma_addr;
rxdp->status = 0;
/*
* Initialize the returned mbuf.
* 1) setup generic mbuf fields:
* - number of segments,
* - next segment,
* - packet length,
* - RX port identifier.
* 2) integrate hardware offload data, if any:
* - RSS flag & hash,
* - IP checksum flag,
* - VLAN TCI, if any,
* - error flags.
*/
pkt_len = (uint16_t) (rte_le_to_cpu_16(rxd.length) -
rxq->crc_len);
rxm->data_off = RTE_PKTMBUF_HEADROOM;
rte_packet_prefetch((char *)rxm->buf_addr + rxm->data_off);
rxm->nb_segs = 1;
rxm->next = NULL;
rxm->pkt_len = pkt_len;
rxm->data_len = pkt_len;
rxm->port = rxq->port_id;
rxm->ol_flags = rx_desc_status_to_pkt_flags(status);
rxm->ol_flags = rxm->ol_flags |
rx_desc_error_to_pkt_flags(rxd.errors);
/* Only valid if PKT_RX_VLAN_PKT set in pkt_flags */
rxm->vlan_tci = rte_le_to_cpu_16(rxd.special);
/*
* Store the mbuf address into the next entry of the array
* of returned packets.
*/
rx_pkts[nb_rx++] = rxm;
}
rxq->rx_tail = rx_id;
/*
* If the number of free RX descriptors is greater than the RX free
* threshold of the queue, advance the Receive Descriptor Tail (RDT)
* register.
* Update the RDT with the value of the last processed RX descriptor
* minus 1, to guarantee that the RDT register is never equal to the
* RDH register, which creates a "full" ring situtation from the
* hardware point of view...
*/
nb_hold = (uint16_t) (nb_hold + rxq->nb_rx_hold);
if (nb_hold > rxq->rx_free_thresh) {
PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_tail=%u "
"nb_hold=%u nb_rx=%u",
(unsigned) rxq->port_id, (unsigned) rxq->queue_id,
(unsigned) rx_id, (unsigned) nb_hold,
(unsigned) nb_rx);
rx_id = (uint16_t) ((rx_id == 0) ?
(rxq->nb_rx_desc - 1) : (rx_id - 1));
E1000_PCI_REG_WRITE(rxq->rdt_reg_addr, rx_id);
nb_hold = 0;
}
rxq->nb_rx_hold = nb_hold;
return nb_rx;
}
uint16_t
eth_em_recv_scattered_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
uint16_t nb_pkts)
{
struct em_rx_queue *rxq;
volatile struct e1000_rx_desc *rx_ring;
volatile struct e1000_rx_desc *rxdp;
struct em_rx_entry *sw_ring;
struct em_rx_entry *rxe;
struct rte_mbuf *first_seg;
struct rte_mbuf *last_seg;
struct rte_mbuf *rxm;
struct rte_mbuf *nmb;
struct e1000_rx_desc rxd;
uint64_t dma; /* Physical address of mbuf data buffer */
uint16_t rx_id;
uint16_t nb_rx;
uint16_t nb_hold;
uint16_t data_len;
uint8_t status;
rxq = rx_queue;
nb_rx = 0;
nb_hold = 0;
rx_id = rxq->rx_tail;
rx_ring = rxq->rx_ring;
sw_ring = rxq->sw_ring;
/*
* Retrieve RX context of current packet, if any.
*/
first_seg = rxq->pkt_first_seg;
last_seg = rxq->pkt_last_seg;
while (nb_rx < nb_pkts) {
next_desc:
/*
* The order of operations here is important as the DD status
* bit must not be read after any other descriptor fields.
* rx_ring and rxdp are pointing to volatile data so the order
* of accesses cannot be reordered by the compiler. If they were
* not volatile, they could be reordered which could lead to
* using invalid descriptor fields when read from rxd.
*/
rxdp = &rx_ring[rx_id];
status = rxdp->status;
if (! (status & E1000_RXD_STAT_DD))
break;
rxd = *rxdp;
/*
* Descriptor done.
*
* Allocate a new mbuf to replenish the RX ring descriptor.
* If the allocation fails:
* - arrange for that RX descriptor to be the first one
* being parsed the next time the receive function is
* invoked [on the same queue].
*
* - Stop parsing the RX ring and return immediately.
*
* This policy does not drop the packet received in the RX
* descriptor for which the allocation of a new mbuf failed.
* Thus, it allows that packet to be later retrieved if
* mbuf have been freed in the mean time.
* As a side effect, holding RX descriptors instead of
* systematically giving them back to the NIC may lead to
* RX ring exhaustion situations.
* However, the NIC can gracefully prevent such situations
* to happen by sending specific "back-pressure" flow control
* frames to its peer(s).
*/
PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_id=%u "
"status=0x%x data_len=%u",
(unsigned) rxq->port_id, (unsigned) rxq->queue_id,
(unsigned) rx_id, (unsigned) status,
(unsigned) rte_le_to_cpu_16(rxd.length));
nmb = rte_mbuf_raw_alloc(rxq->mb_pool);
if (nmb == NULL) {
PMD_RX_LOG(DEBUG, "RX mbuf alloc failed port_id=%u "
"queue_id=%u", (unsigned) rxq->port_id,
(unsigned) rxq->queue_id);
rte_eth_devices[rxq->port_id].data->rx_mbuf_alloc_failed++;
break;
}
nb_hold++;
rxe = &sw_ring[rx_id];
rx_id++;
if (rx_id == rxq->nb_rx_desc)
rx_id = 0;
/* Prefetch next mbuf while processing current one. */
rte_em_prefetch(sw_ring[rx_id].mbuf);
/*
* When next RX descriptor is on a cache-line boundary,
* prefetch the next 4 RX descriptors and the next 8 pointers
* to mbufs.
*/
if ((rx_id & 0x3) == 0) {
rte_em_prefetch(&rx_ring[rx_id]);
rte_em_prefetch(&sw_ring[rx_id]);
}
/*
* Update RX descriptor with the physical address of the new
* data buffer of the new allocated mbuf.
*/
rxm = rxe->mbuf;
rxe->mbuf = nmb;
dma = rte_cpu_to_le_64(rte_mbuf_data_dma_addr_default(nmb));
rxdp->buffer_addr = dma;
rxdp->status = 0;
/*
* Set data length & data buffer address of mbuf.
*/
data_len = rte_le_to_cpu_16(rxd.length);
rxm->data_len = data_len;
rxm->data_off = RTE_PKTMBUF_HEADROOM;
/*
* If this is the first buffer of the received packet,
* set the pointer to the first mbuf of the packet and
* initialize its context.
* Otherwise, update the total length and the number of segments
* of the current scattered packet, and update the pointer to
* the last mbuf of the current packet.
*/
if (first_seg == NULL) {
first_seg = rxm;
first_seg->pkt_len = data_len;
first_seg->nb_segs = 1;
} else {
first_seg->pkt_len += data_len;
first_seg->nb_segs++;
last_seg->next = rxm;
}
/*
* If this is not the last buffer of the received packet,
* update the pointer to the last mbuf of the current scattered
* packet and continue to parse the RX ring.
*/
if (! (status & E1000_RXD_STAT_EOP)) {
last_seg = rxm;
goto next_desc;
}
/*
* This is the last buffer of the received packet.
* If the CRC is not stripped by the hardware:
* - Subtract the CRC length from the total packet length.
* - If the last buffer only contains the whole CRC or a part
* of it, free the mbuf associated to the last buffer.
* If part of the CRC is also contained in the previous
* mbuf, subtract the length of that CRC part from the
* data length of the previous mbuf.
*/
rxm->next = NULL;
if (unlikely(rxq->crc_len > 0)) {
first_seg->pkt_len -= ETHER_CRC_LEN;
if (data_len <= ETHER_CRC_LEN) {
rte_pktmbuf_free_seg(rxm);
first_seg->nb_segs--;
last_seg->data_len = (uint16_t)
(last_seg->data_len -
(ETHER_CRC_LEN - data_len));
last_seg->next = NULL;
} else
rxm->data_len =
(uint16_t) (data_len - ETHER_CRC_LEN);
}
/*
* Initialize the first mbuf of the returned packet:
* - RX port identifier,
* - hardware offload data, if any:
* - IP checksum flag,
* - error flags.
*/
first_seg->port = rxq->port_id;
first_seg->ol_flags = rx_desc_status_to_pkt_flags(status);
first_seg->ol_flags = first_seg->ol_flags |
rx_desc_error_to_pkt_flags(rxd.errors);
/* Only valid if PKT_RX_VLAN_PKT set in pkt_flags */
rxm->vlan_tci = rte_le_to_cpu_16(rxd.special);
/* Prefetch data of first segment, if configured to do so. */
rte_packet_prefetch((char *)first_seg->buf_addr +
first_seg->data_off);
/*
* Store the mbuf address into the next entry of the array
* of returned packets.
*/
rx_pkts[nb_rx++] = first_seg;
/*
* Setup receipt context for a new packet.
*/
first_seg = NULL;
}
/*
* Record index of the next RX descriptor to probe.
*/
rxq->rx_tail = rx_id;
/*
* Save receive context.
*/
rxq->pkt_first_seg = first_seg;
rxq->pkt_last_seg = last_seg;
/*
* If the number of free RX descriptors is greater than the RX free
* threshold of the queue, advance the Receive Descriptor Tail (RDT)
* register.
* Update the RDT with the value of the last processed RX descriptor
* minus 1, to guarantee that the RDT register is never equal to the
* RDH register, which creates a "full" ring situtation from the
* hardware point of view...
*/
nb_hold = (uint16_t) (nb_hold + rxq->nb_rx_hold);
if (nb_hold > rxq->rx_free_thresh) {
PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_tail=%u "
"nb_hold=%u nb_rx=%u",
(unsigned) rxq->port_id, (unsigned) rxq->queue_id,
(unsigned) rx_id, (unsigned) nb_hold,
(unsigned) nb_rx);
rx_id = (uint16_t) ((rx_id == 0) ?
(rxq->nb_rx_desc - 1) : (rx_id - 1));
E1000_PCI_REG_WRITE(rxq->rdt_reg_addr, rx_id);
nb_hold = 0;
}
rxq->nb_rx_hold = nb_hold;
return nb_rx;
}
#define EM_MAX_BUF_SIZE 16384
#define EM_RCTL_FLXBUF_STEP 1024
static void
em_tx_queue_release_mbufs(struct em_tx_queue *txq)
{
unsigned i;
if (txq->sw_ring != NULL) {
for (i = 0; i != txq->nb_tx_desc; i++) {
if (txq->sw_ring[i].mbuf != NULL) {
rte_pktmbuf_free_seg(txq->sw_ring[i].mbuf);
txq->sw_ring[i].mbuf = NULL;
}
}
}
}
static void
em_tx_queue_release(struct em_tx_queue *txq)
{
if (txq != NULL) {
em_tx_queue_release_mbufs(txq);
rte_free(txq->sw_ring);
rte_free(txq);
}
}
void
eth_em_tx_queue_release(void *txq)
{
em_tx_queue_release(txq);
}
/* (Re)set dynamic em_tx_queue fields to defaults */
static void
em_reset_tx_queue(struct em_tx_queue *txq)
{
uint16_t i, nb_desc, prev;
static const struct e1000_data_desc txd_init = {
.upper.fields = {.status = E1000_TXD_STAT_DD},
};
nb_desc = txq->nb_tx_desc;
/* Initialize ring entries */
prev = (uint16_t) (nb_desc - 1);
for (i = 0; i < nb_desc; i++) {
txq->tx_ring[i] = txd_init;
txq->sw_ring[i].mbuf = NULL;
txq->sw_ring[i].last_id = i;
txq->sw_ring[prev].next_id = i;
prev = i;
}
/*
* Always allow 1 descriptor to be un-allocated to avoid
* a H/W race condition
*/
txq->nb_tx_free = (uint16_t)(nb_desc - 1);
txq->last_desc_cleaned = (uint16_t)(nb_desc - 1);
txq->nb_tx_used = 0;
txq->tx_tail = 0;
memset((void*)&txq->ctx_cache, 0, sizeof (txq->ctx_cache));
}
int
eth_em_tx_queue_setup(struct rte_eth_dev *dev,
uint16_t queue_idx,
uint16_t nb_desc,
unsigned int socket_id,
const struct rte_eth_txconf *tx_conf)
{
const struct rte_memzone *tz;
struct em_tx_queue *txq;
struct e1000_hw *hw;
uint32_t tsize;
uint16_t tx_rs_thresh, tx_free_thresh;
hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
/*
* Validate number of transmit descriptors.
* It must not exceed hardware maximum, and must be multiple
* of E1000_ALIGN.
*/
if (nb_desc % EM_TXD_ALIGN != 0 ||
(nb_desc > E1000_MAX_RING_DESC) ||
(nb_desc < E1000_MIN_RING_DESC)) {
return -(EINVAL);
}
tx_free_thresh = tx_conf->tx_free_thresh;
if (tx_free_thresh == 0)
tx_free_thresh = (uint16_t)RTE_MIN(nb_desc / 4,
DEFAULT_TX_FREE_THRESH);
tx_rs_thresh = tx_conf->tx_rs_thresh;
if (tx_rs_thresh == 0)
tx_rs_thresh = (uint16_t)RTE_MIN(tx_free_thresh,
DEFAULT_TX_RS_THRESH);
if (tx_free_thresh >= (nb_desc - 3)) {
PMD_INIT_LOG(ERR, "tx_free_thresh must be less than the "
"number of TX descriptors minus 3. "
"(tx_free_thresh=%u port=%d queue=%d)",
(unsigned int)tx_free_thresh,
(int)dev->data->port_id, (int)queue_idx);
return -(EINVAL);
}
if (tx_rs_thresh > tx_free_thresh) {
PMD_INIT_LOG(ERR, "tx_rs_thresh must be less than or equal to "
"tx_free_thresh. (tx_free_thresh=%u "
"tx_rs_thresh=%u port=%d queue=%d)",
(unsigned int)tx_free_thresh,
(unsigned int)tx_rs_thresh,
(int)dev->data->port_id,
(int)queue_idx);
return -(EINVAL);
}
/*
* If rs_bit_thresh is greater than 1, then TX WTHRESH should be
* set to 0. If WTHRESH is greater than zero, the RS bit is ignored
* by the NIC and all descriptors are written back after the NIC
* accumulates WTHRESH descriptors.
*/
if (tx_conf->tx_thresh.wthresh != 0 && tx_rs_thresh != 1) {
PMD_INIT_LOG(ERR, "TX WTHRESH must be set to 0 if "
"tx_rs_thresh is greater than 1. (tx_rs_thresh=%u "
"port=%d queue=%d)", (unsigned int)tx_rs_thresh,
(int)dev->data->port_id, (int)queue_idx);
return -(EINVAL);
}
/* Free memory prior to re-allocation if needed... */
if (dev->data->tx_queues[queue_idx] != NULL) {
em_tx_queue_release(dev->data->tx_queues[queue_idx]);
dev->data->tx_queues[queue_idx] = NULL;
}
/*
* Allocate TX ring hardware descriptors. A memzone large enough to
* handle the maximum ring size is allocated in order to allow for
* resizing in later calls to the queue setup function.
*/
tsize = sizeof(txq->tx_ring[0]) * E1000_MAX_RING_DESC;
tz = rte_eth_dma_zone_reserve(dev, "tx_ring", queue_idx, tsize,
RTE_CACHE_LINE_SIZE, socket_id);
if (tz == NULL)
return -ENOMEM;
/* Allocate the tx queue data structure. */
if ((txq = rte_zmalloc("ethdev TX queue", sizeof(*txq),
RTE_CACHE_LINE_SIZE)) == NULL)
return -ENOMEM;
/* Allocate software ring */
if ((txq->sw_ring = rte_zmalloc("txq->sw_ring",
sizeof(txq->sw_ring[0]) * nb_desc,
RTE_CACHE_LINE_SIZE)) == NULL) {
em_tx_queue_release(txq);
return -ENOMEM;
}
txq->nb_tx_desc = nb_desc;
txq->tx_free_thresh = tx_free_thresh;
txq->tx_rs_thresh = tx_rs_thresh;
txq->pthresh = tx_conf->tx_thresh.pthresh;
txq->hthresh = tx_conf->tx_thresh.hthresh;
txq->wthresh = tx_conf->tx_thresh.wthresh;
txq->queue_id = queue_idx;
txq->port_id = dev->data->port_id;
txq->tdt_reg_addr = E1000_PCI_REG_ADDR(hw, E1000_TDT(queue_idx));
txq->tx_ring_phys_addr = rte_mem_phy2mch(tz->memseg_id, tz->phys_addr);
txq->tx_ring = (struct e1000_data_desc *) tz->addr;
PMD_INIT_LOG(DEBUG, "sw_ring=%p hw_ring=%p dma_addr=0x%"PRIx64,
txq->sw_ring, txq->tx_ring, txq->tx_ring_phys_addr);
em_reset_tx_queue(txq);
dev->data->tx_queues[queue_idx] = txq;
return 0;
}
static void
em_rx_queue_release_mbufs(struct em_rx_queue *rxq)
{
unsigned i;
if (rxq->sw_ring != NULL) {
for (i = 0; i != rxq->nb_rx_desc; i++) {
if (rxq->sw_ring[i].mbuf != NULL) {
rte_pktmbuf_free_seg(rxq->sw_ring[i].mbuf);
rxq->sw_ring[i].mbuf = NULL;
}
}
}
}
static void
em_rx_queue_release(struct em_rx_queue *rxq)
{
if (rxq != NULL) {
em_rx_queue_release_mbufs(rxq);
rte_free(rxq->sw_ring);
rte_free(rxq);
}
}
void
eth_em_rx_queue_release(void *rxq)
{
em_rx_queue_release(rxq);
}
/* Reset dynamic em_rx_queue fields back to defaults */
static void
em_reset_rx_queue(struct em_rx_queue *rxq)
{
rxq->rx_tail = 0;
rxq->nb_rx_hold = 0;
rxq->pkt_first_seg = NULL;
rxq->pkt_last_seg = NULL;
}
int
eth_em_rx_queue_setup(struct rte_eth_dev *dev,
uint16_t queue_idx,
uint16_t nb_desc,
unsigned int socket_id,
const struct rte_eth_rxconf *rx_conf,
struct rte_mempool *mp)
{
const struct rte_memzone *rz;
struct em_rx_queue *rxq;
struct e1000_hw *hw;
uint32_t rsize;
hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
/*
* Validate number of receive descriptors.
* It must not exceed hardware maximum, and must be multiple
* of E1000_ALIGN.
*/
if (nb_desc % EM_RXD_ALIGN != 0 ||
(nb_desc > E1000_MAX_RING_DESC) ||
(nb_desc < E1000_MIN_RING_DESC)) {
return -EINVAL;
}
/*
* EM devices don't support drop_en functionality
*/
if (rx_conf->rx_drop_en) {
PMD_INIT_LOG(ERR, "drop_en functionality not supported by "
"device");
return -EINVAL;
}
/* Free memory prior to re-allocation if needed. */
if (dev->data->rx_queues[queue_idx] != NULL) {
em_rx_queue_release(dev->data->rx_queues[queue_idx]);
dev->data->rx_queues[queue_idx] = NULL;
}
/* Allocate RX ring for max possible mumber of hardware descriptors. */
rsize = sizeof(rxq->rx_ring[0]) * E1000_MAX_RING_DESC;
rz = rte_eth_dma_zone_reserve(dev, "rx_ring", queue_idx, rsize,
RTE_CACHE_LINE_SIZE, socket_id);
if (rz == NULL)
return -ENOMEM;
/* Allocate the RX queue data structure. */
if ((rxq = rte_zmalloc("ethdev RX queue", sizeof(*rxq),
RTE_CACHE_LINE_SIZE)) == NULL)
return -ENOMEM;
/* Allocate software ring. */
if ((rxq->sw_ring = rte_zmalloc("rxq->sw_ring",
sizeof (rxq->sw_ring[0]) * nb_desc,
RTE_CACHE_LINE_SIZE)) == NULL) {
em_rx_queue_release(rxq);
return -ENOMEM;
}
rxq->mb_pool = mp;
rxq->nb_rx_desc = nb_desc;
rxq->pthresh = rx_conf->rx_thresh.pthresh;
rxq->hthresh = rx_conf->rx_thresh.hthresh;
rxq->wthresh = rx_conf->rx_thresh.wthresh;
rxq->rx_free_thresh = rx_conf->rx_free_thresh;
rxq->queue_id = queue_idx;
rxq->port_id = dev->data->port_id;
rxq->crc_len = (uint8_t) ((dev->data->dev_conf.rxmode.hw_strip_crc) ?
0 : ETHER_CRC_LEN);
rxq->rdt_reg_addr = E1000_PCI_REG_ADDR(hw, E1000_RDT(queue_idx));
rxq->rdh_reg_addr = E1000_PCI_REG_ADDR(hw, E1000_RDH(queue_idx));
rxq->rx_ring_phys_addr = rte_mem_phy2mch(rz->memseg_id, rz->phys_addr);
rxq->rx_ring = (struct e1000_rx_desc *) rz->addr;
PMD_INIT_LOG(DEBUG, "sw_ring=%p hw_ring=%p dma_addr=0x%"PRIx64,
rxq->sw_ring, rxq->rx_ring, rxq->rx_ring_phys_addr);
dev->data->rx_queues[queue_idx] = rxq;
em_reset_rx_queue(rxq);
return 0;
}
uint32_t
eth_em_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id)
{
#define EM_RXQ_SCAN_INTERVAL 4
volatile struct e1000_rx_desc *rxdp;
struct em_rx_queue *rxq;
uint32_t desc = 0;
if (rx_queue_id >= dev->data->nb_rx_queues) {
PMD_RX_LOG(DEBUG, "Invalid RX queue_id=%d", rx_queue_id);
return 0;
}
rxq = dev->data->rx_queues[rx_queue_id];
rxdp = &(rxq->rx_ring[rxq->rx_tail]);
while ((desc < rxq->nb_rx_desc) &&
(rxdp->status & E1000_RXD_STAT_DD)) {
desc += EM_RXQ_SCAN_INTERVAL;
rxdp += EM_RXQ_SCAN_INTERVAL;
if (rxq->rx_tail + desc >= rxq->nb_rx_desc)
rxdp = &(rxq->rx_ring[rxq->rx_tail +
desc - rxq->nb_rx_desc]);
}
return desc;
}
int
eth_em_rx_descriptor_done(void *rx_queue, uint16_t offset)
{
volatile struct e1000_rx_desc *rxdp;
struct em_rx_queue *rxq = rx_queue;
uint32_t desc;
if (unlikely(offset >= rxq->nb_rx_desc))
return 0;
desc = rxq->rx_tail + offset;
if (desc >= rxq->nb_rx_desc)
desc -= rxq->nb_rx_desc;
rxdp = &rxq->rx_ring[desc];
return !!(rxdp->status & E1000_RXD_STAT_DD);
}
void
em_dev_clear_queues(struct rte_eth_dev *dev)
{
uint16_t i;
struct em_tx_queue *txq;
struct em_rx_queue *rxq;
for (i = 0; i < dev->data->nb_tx_queues; i++) {
txq = dev->data->tx_queues[i];
if (txq != NULL) {
em_tx_queue_release_mbufs(txq);
em_reset_tx_queue(txq);
}
}
for (i = 0; i < dev->data->nb_rx_queues; i++) {
rxq = dev->data->rx_queues[i];
if (rxq != NULL) {
em_rx_queue_release_mbufs(rxq);
em_reset_rx_queue(rxq);
}
}
}
void
em_dev_free_queues(struct rte_eth_dev *dev)
{
uint16_t i;
for (i = 0; i < dev->data->nb_rx_queues; i++) {
eth_em_rx_queue_release(dev->data->rx_queues[i]);
dev->data->rx_queues[i] = NULL;
}
dev->data->nb_rx_queues = 0;
for (i = 0; i < dev->data->nb_tx_queues; i++) {
eth_em_tx_queue_release(dev->data->tx_queues[i]);
dev->data->tx_queues[i] = NULL;
}
dev->data->nb_tx_queues = 0;
}
/*
* Takes as input/output parameter RX buffer size.
* Returns (BSIZE | BSEX | FLXBUF) fields of RCTL register.
*/
static uint32_t
em_rctl_bsize(__rte_unused enum e1000_mac_type hwtyp, uint32_t *bufsz)
{
/*
* For BSIZE & BSEX all configurable sizes are:
* 16384: rctl |= (E1000_RCTL_SZ_16384 | E1000_RCTL_BSEX);
* 8192: rctl |= (E1000_RCTL_SZ_8192 | E1000_RCTL_BSEX);
* 4096: rctl |= (E1000_RCTL_SZ_4096 | E1000_RCTL_BSEX);
* 2048: rctl |= E1000_RCTL_SZ_2048;
* 1024: rctl |= E1000_RCTL_SZ_1024;
* 512: rctl |= E1000_RCTL_SZ_512;
* 256: rctl |= E1000_RCTL_SZ_256;
*/
static const struct {
uint32_t bufsz;
uint32_t rctl;
} bufsz_to_rctl[] = {
{16384, (E1000_RCTL_SZ_16384 | E1000_RCTL_BSEX)},
{8192, (E1000_RCTL_SZ_8192 | E1000_RCTL_BSEX)},
{4096, (E1000_RCTL_SZ_4096 | E1000_RCTL_BSEX)},
{2048, E1000_RCTL_SZ_2048},
{1024, E1000_RCTL_SZ_1024},
{512, E1000_RCTL_SZ_512},
{256, E1000_RCTL_SZ_256},
};
int i;
uint32_t rctl_bsize;
rctl_bsize = *bufsz;
/*
* Starting from 82571 it is possible to specify RX buffer size
* by RCTL.FLXBUF. When this field is different from zero, the
* RX buffer size = RCTL.FLXBUF * 1K
* (e.g. t is possible to specify RX buffer size 1,2,...,15KB).
* It is working ok on real HW, but by some reason doesn't work
* on VMware emulated 82574L.
* So for now, always use BSIZE/BSEX to setup RX buffer size.
* If you don't plan to use it on VMware emulated 82574L and
* would like to specify RX buffer size in 1K granularity,
* uncomment the following lines:
* ***************************************************************
* if (hwtyp >= e1000_82571 && hwtyp <= e1000_82574 &&
* rctl_bsize >= EM_RCTL_FLXBUF_STEP) {
* rctl_bsize /= EM_RCTL_FLXBUF_STEP;
* *bufsz = rctl_bsize;
* return (rctl_bsize << E1000_RCTL_FLXBUF_SHIFT &
* E1000_RCTL_FLXBUF_MASK);
* }
* ***************************************************************
*/
for (i = 0; i != sizeof(bufsz_to_rctl) / sizeof(bufsz_to_rctl[0]);
i++) {
if (rctl_bsize >= bufsz_to_rctl[i].bufsz) {
*bufsz = bufsz_to_rctl[i].bufsz;
return bufsz_to_rctl[i].rctl;
}
}
/* Should never happen. */
return -EINVAL;
}
static int
em_alloc_rx_queue_mbufs(struct em_rx_queue *rxq)
{
struct em_rx_entry *rxe = rxq->sw_ring;
uint64_t dma_addr;
unsigned i;
static const struct e1000_rx_desc rxd_init = {
.buffer_addr = 0,
};
/* Initialize software ring entries */
for (i = 0; i < rxq->nb_rx_desc; i++) {
volatile struct e1000_rx_desc *rxd;
struct rte_mbuf *mbuf = rte_mbuf_raw_alloc(rxq->mb_pool);
if (mbuf == NULL) {
PMD_INIT_LOG(ERR, "RX mbuf alloc failed "
"queue_id=%hu", rxq->queue_id);
return -ENOMEM;
}
dma_addr =
rte_cpu_to_le_64(rte_mbuf_data_dma_addr_default(mbuf));
/* Clear HW ring memory */
rxq->rx_ring[i] = rxd_init;
rxd = &rxq->rx_ring[i];
rxd->buffer_addr = dma_addr;
rxe[i].mbuf = mbuf;
}
return 0;
}
/*********************************************************************
*
* Enable receive unit.
*
**********************************************************************/
int
eth_em_rx_init(struct rte_eth_dev *dev)
{
struct e1000_hw *hw;
struct em_rx_queue *rxq;
uint32_t rctl;
uint32_t rfctl;
uint32_t rxcsum;
uint32_t rctl_bsize;
uint16_t i;
int ret;
hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
/*
* Make sure receives are disabled while setting
* up the descriptor ring.
*/
rctl = E1000_READ_REG(hw, E1000_RCTL);
E1000_WRITE_REG(hw, E1000_RCTL, rctl & ~E1000_RCTL_EN);
rfctl = E1000_READ_REG(hw, E1000_RFCTL);
/* Disable extended descriptor type. */
rfctl &= ~E1000_RFCTL_EXTEN;
/* Disable accelerated acknowledge */
if (hw->mac.type == e1000_82574)
rfctl |= E1000_RFCTL_ACK_DIS;
E1000_WRITE_REG(hw, E1000_RFCTL, rfctl);
/*
* XXX TEMPORARY WORKAROUND: on some systems with 82573
* long latencies are observed, like Lenovo X60. This
* change eliminates the problem, but since having positive
* values in RDTR is a known source of problems on other
* platforms another solution is being sought.
*/
if (hw->mac.type == e1000_82573)
E1000_WRITE_REG(hw, E1000_RDTR, 0x20);
dev->rx_pkt_burst = (eth_rx_burst_t)eth_em_recv_pkts;
/* Determine RX bufsize. */
rctl_bsize = EM_MAX_BUF_SIZE;
for (i = 0; i < dev->data->nb_rx_queues; i++) {
uint32_t buf_size;
rxq = dev->data->rx_queues[i];
buf_size = rte_pktmbuf_data_room_size(rxq->mb_pool) -
RTE_PKTMBUF_HEADROOM;
rctl_bsize = RTE_MIN(rctl_bsize, buf_size);
}
rctl |= em_rctl_bsize(hw->mac.type, &rctl_bsize);
/* Configure and enable each RX queue. */
for (i = 0; i < dev->data->nb_rx_queues; i++) {
uint64_t bus_addr;
uint32_t rxdctl;
rxq = dev->data->rx_queues[i];
/* Allocate buffers for descriptor rings and setup queue */
ret = em_alloc_rx_queue_mbufs(rxq);
if (ret)
return ret;
/*
* Reset crc_len in case it was changed after queue setup by a
* call to configure
*/
rxq->crc_len =
(uint8_t)(dev->data->dev_conf.rxmode.hw_strip_crc ?
0 : ETHER_CRC_LEN);
bus_addr = rxq->rx_ring_phys_addr;
E1000_WRITE_REG(hw, E1000_RDLEN(i),
rxq->nb_rx_desc *
sizeof(*rxq->rx_ring));
E1000_WRITE_REG(hw, E1000_RDBAH(i),
(uint32_t)(bus_addr >> 32));
E1000_WRITE_REG(hw, E1000_RDBAL(i), (uint32_t)bus_addr);
E1000_WRITE_REG(hw, E1000_RDH(i), 0);
E1000_WRITE_REG(hw, E1000_RDT(i), rxq->nb_rx_desc - 1);
rxdctl = E1000_READ_REG(hw, E1000_RXDCTL(0));
rxdctl &= 0xFE000000;
rxdctl |= rxq->pthresh & 0x3F;
rxdctl |= (rxq->hthresh & 0x3F) << 8;
rxdctl |= (rxq->wthresh & 0x3F) << 16;
rxdctl |= E1000_RXDCTL_GRAN;
E1000_WRITE_REG(hw, E1000_RXDCTL(i), rxdctl);
/*
* Due to EM devices not having any sort of hardware
* limit for packet length, jumbo frame of any size
* can be accepted, thus we have to enable scattered
* rx if jumbo frames are enabled (or if buffer size
* is too small to accommodate non-jumbo packets)
* to avoid splitting packets that don't fit into
* one buffer.
*/
if (dev->data->dev_conf.rxmode.jumbo_frame ||
rctl_bsize < ETHER_MAX_LEN) {
if (!dev->data->scattered_rx)
PMD_INIT_LOG(DEBUG, "forcing scatter mode");
dev->rx_pkt_burst =
(eth_rx_burst_t)eth_em_recv_scattered_pkts;
dev->data->scattered_rx = 1;
}
}
if (dev->data->dev_conf.rxmode.enable_scatter) {
if (!dev->data->scattered_rx)
PMD_INIT_LOG(DEBUG, "forcing scatter mode");
dev->rx_pkt_burst = eth_em_recv_scattered_pkts;
dev->data->scattered_rx = 1;
}
/*
* Setup the Checksum Register.
* Receive Full-Packet Checksum Offload is mutually exclusive with RSS.
*/
rxcsum = E1000_READ_REG(hw, E1000_RXCSUM);
if (dev->data->dev_conf.rxmode.hw_ip_checksum)
rxcsum |= E1000_RXCSUM_IPOFL;
else
rxcsum &= ~E1000_RXCSUM_IPOFL;
E1000_WRITE_REG(hw, E1000_RXCSUM, rxcsum);
/* No MRQ or RSS support for now */
/* Set early receive threshold on appropriate hw */
if ((hw->mac.type == e1000_ich9lan ||
hw->mac.type == e1000_pch2lan ||
hw->mac.type == e1000_ich10lan) &&
dev->data->dev_conf.rxmode.jumbo_frame == 1) {
u32 rxdctl = E1000_READ_REG(hw, E1000_RXDCTL(0));
E1000_WRITE_REG(hw, E1000_RXDCTL(0), rxdctl | 3);
E1000_WRITE_REG(hw, E1000_ERT, 0x100 | (1 << 13));
}
if (hw->mac.type == e1000_pch2lan) {
if (dev->data->dev_conf.rxmode.jumbo_frame == 1)
e1000_lv_jumbo_workaround_ich8lan(hw, TRUE);
else
e1000_lv_jumbo_workaround_ich8lan(hw, FALSE);
}
/* Setup the Receive Control Register. */
if (dev->data->dev_conf.rxmode.hw_strip_crc)
rctl |= E1000_RCTL_SECRC; /* Strip Ethernet CRC. */
else
rctl &= ~E1000_RCTL_SECRC; /* Do not Strip Ethernet CRC. */
rctl &= ~(3 << E1000_RCTL_MO_SHIFT);
rctl |= E1000_RCTL_EN | E1000_RCTL_BAM | E1000_RCTL_LBM_NO |
E1000_RCTL_RDMTS_HALF |
(hw->mac.mc_filter_type << E1000_RCTL_MO_SHIFT);
/* Make sure VLAN Filters are off. */
rctl &= ~E1000_RCTL_VFE;
/* Don't store bad packets. */
rctl &= ~E1000_RCTL_SBP;
/* Legacy descriptor type. */
rctl &= ~E1000_RCTL_DTYP_MASK;
/*
* Configure support of jumbo frames, if any.
*/
if (dev->data->dev_conf.rxmode.jumbo_frame == 1)
rctl |= E1000_RCTL_LPE;
else
rctl &= ~E1000_RCTL_LPE;
/* Enable Receives. */
E1000_WRITE_REG(hw, E1000_RCTL, rctl);
return 0;
}
/*********************************************************************
*
* Enable transmit unit.
*
**********************************************************************/
void
eth_em_tx_init(struct rte_eth_dev *dev)
{
struct e1000_hw *hw;
struct em_tx_queue *txq;
uint32_t tctl;
uint32_t txdctl;
uint16_t i;
hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
/* Setup the Base and Length of the Tx Descriptor Rings. */
for (i = 0; i < dev->data->nb_tx_queues; i++) {
uint64_t bus_addr;
txq = dev->data->tx_queues[i];
bus_addr = txq->tx_ring_phys_addr;
E1000_WRITE_REG(hw, E1000_TDLEN(i),
txq->nb_tx_desc *
sizeof(*txq->tx_ring));
E1000_WRITE_REG(hw, E1000_TDBAH(i),
(uint32_t)(bus_addr >> 32));
E1000_WRITE_REG(hw, E1000_TDBAL(i), (uint32_t)bus_addr);
/* Setup the HW Tx Head and Tail descriptor pointers. */
E1000_WRITE_REG(hw, E1000_TDT(i), 0);
E1000_WRITE_REG(hw, E1000_TDH(i), 0);
/* Setup Transmit threshold registers. */
txdctl = E1000_READ_REG(hw, E1000_TXDCTL(i));
/*
* bit 22 is reserved, on some models should always be 0,
* on others - always 1.
*/
txdctl &= E1000_TXDCTL_COUNT_DESC;
txdctl |= txq->pthresh & 0x3F;
txdctl |= (txq->hthresh & 0x3F) << 8;
txdctl |= (txq->wthresh & 0x3F) << 16;
txdctl |= E1000_TXDCTL_GRAN;
E1000_WRITE_REG(hw, E1000_TXDCTL(i), txdctl);
}
/* Program the Transmit Control Register. */
tctl = E1000_READ_REG(hw, E1000_TCTL);
tctl &= ~E1000_TCTL_CT;
tctl |= (E1000_TCTL_PSP | E1000_TCTL_RTLC | E1000_TCTL_EN |
(E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT));
/* This write will effectively turn on the transmit unit. */
E1000_WRITE_REG(hw, E1000_TCTL, tctl);
}
void
em_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
struct rte_eth_rxq_info *qinfo)
{
struct em_rx_queue *rxq;
rxq = dev->data->rx_queues[queue_id];
qinfo->mp = rxq->mb_pool;
qinfo->scattered_rx = dev->data->scattered_rx;
qinfo->nb_desc = rxq->nb_rx_desc;
qinfo->conf.rx_free_thresh = rxq->rx_free_thresh;
}
void
em_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
struct rte_eth_txq_info *qinfo)
{
struct em_tx_queue *txq;
txq = dev->data->tx_queues[queue_id];
qinfo->nb_desc = txq->nb_tx_desc;
qinfo->conf.tx_thresh.pthresh = txq->pthresh;
qinfo->conf.tx_thresh.hthresh = txq->hthresh;
qinfo->conf.tx_thresh.wthresh = txq->wthresh;
qinfo->conf.tx_free_thresh = txq->tx_free_thresh;
qinfo->conf.tx_rs_thresh = txq->tx_rs_thresh;
}
|
vicharl/containerdns
|
kdns/dpdk-17.02/drivers/net/qede/base/ecore_dcbx.h
|
/*
* Copyright (c) 2016 QLogic Corporation.
* All rights reserved.
* www.qlogic.com
*
* See LICENSE.qede_pmd for copyright and licensing details.
*/
#ifndef __ECORE_DCBX_H__
#define __ECORE_DCBX_H__
#include "ecore.h"
#include "ecore_mcp.h"
#include "mcp_public.h"
#include "reg_addr.h"
#include "ecore_hw.h"
#include "ecore_hsi_common.h"
#include "ecore_dcbx_api.h"
#define ECORE_MFW_GET_FIELD(name, field) \
(((name) & (field ## _MASK)) >> (field ## _SHIFT))
struct ecore_dcbx_info {
struct lldp_status_params_s lldp_remote[LLDP_MAX_LLDP_AGENTS];
struct lldp_config_params_s lldp_local[LLDP_MAX_LLDP_AGENTS];
struct dcbx_local_params local_admin;
struct ecore_dcbx_results results;
struct dcb_dscp_map dscp_map;
bool dscp_nig_update;
struct dcbx_mib operational;
struct dcbx_mib remote;
struct ecore_dcbx_set set;
struct ecore_dcbx_get get;
u8 dcbx_cap;
};
struct ecore_dcbx_mib_meta_data {
struct lldp_config_params_s *lldp_local;
struct lldp_status_params_s *lldp_remote;
struct dcbx_local_params *local_admin;
struct dcb_dscp_map *dscp_map;
struct dcbx_mib *mib;
osal_size_t size;
u32 addr;
};
/* ECORE local interface routines */
enum _ecore_status_t
ecore_dcbx_mib_update_event(struct ecore_hwfn *, struct ecore_ptt *,
enum ecore_mib_read_type);
enum _ecore_status_t ecore_dcbx_read_lldp_params(struct ecore_hwfn *,
struct ecore_ptt *);
enum _ecore_status_t ecore_dcbx_info_alloc(struct ecore_hwfn *p_hwfn);
void ecore_dcbx_info_free(struct ecore_hwfn *, struct ecore_dcbx_info *);
void ecore_dcbx_set_pf_update_params(struct ecore_dcbx_results *p_src,
struct pf_update_ramrod_data *p_dest);
#endif /* __ECORE_DCBX_H__ */
|
vicharl/containerdns
|
kdns/dpdk-17.02/lib/librte_eal/common/eal_common_bus.c
|
/*-
* BSD LICENSE
*
* Copyright(c) 2016 NXP
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* * Neither the name of NXP nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <stdio.h>
#include <string.h>
#include <sys/queue.h>
#include <rte_bus.h>
#include "eal_private.h"
struct rte_bus_list rte_bus_list =
TAILQ_HEAD_INITIALIZER(rte_bus_list);
void
rte_bus_register(struct rte_bus *bus)
{
RTE_VERIFY(bus);
RTE_VERIFY(bus->name && strlen(bus->name));
/* A bus should mandatorily have the scan implemented */
RTE_VERIFY(bus->scan);
RTE_VERIFY(bus->probe);
TAILQ_INSERT_TAIL(&rte_bus_list, bus, next);
RTE_LOG(DEBUG, EAL, "Registered [%s] bus.\n", bus->name);
}
void
rte_bus_unregister(struct rte_bus *bus)
{
TAILQ_REMOVE(&rte_bus_list, bus, next);
RTE_LOG(DEBUG, EAL, "Unregistered [%s] bus.\n", bus->name);
}
/* Scan all the buses for registered devices */
int
rte_bus_scan(void)
{
int ret;
struct rte_bus *bus = NULL;
TAILQ_FOREACH(bus, &rte_bus_list, next) {
ret = bus->scan();
if (ret) {
RTE_LOG(ERR, EAL, "Scan for (%s) bus failed.\n",
bus->name);
return ret;
}
}
return 0;
}
/* Probe all devices of all buses */
int
rte_bus_probe(void)
{
int ret;
struct rte_bus *bus;
TAILQ_FOREACH(bus, &rte_bus_list, next) {
ret = bus->probe();
if (ret) {
RTE_LOG(ERR, EAL, "Bus (%s) probe failed.\n",
bus->name);
return ret;
}
}
return 0;
}
/* Dump information of a single bus */
static int
bus_dump_one(FILE *f, struct rte_bus *bus)
{
int ret;
/* For now, dump only the bus name */
ret = fprintf(f, " %s\n", bus->name);
/* Error in case of inability in writing to stream */
if (ret < 0)
return ret;
return 0;
}
void
rte_bus_dump(FILE *f)
{
int ret;
struct rte_bus *bus;
TAILQ_FOREACH(bus, &rte_bus_list, next) {
ret = bus_dump_one(f, bus);
if (ret) {
RTE_LOG(ERR, EAL, "Unable to write to stream (%d)\n",
ret);
break;
}
}
}
|
vicharl/containerdns
|
kdns/deps/libmicrohttpd/src/microhttpd/connection_https.c
|
/*
This file is part of libmicrohttpd
Copyright (C) 2007, 2008, 2010 <NAME> and <NAME>
This library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
This library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with this library; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
/**
* @file connection_https.c
* @brief Methods for managing SSL/TLS connections. This file is only
* compiled if ENABLE_HTTPS is set.
* @author <NAME>
* @author <NAME>
*/
#include "internal.h"
#include "connection.h"
#include "connection_https.h"
#include "memorypool.h"
#include "response.h"
#include "mhd_mono_clock.h"
#include <gnutls/gnutls.h>
/**
* Callback for receiving data from the socket.
*
* @param connection the MHD_Connection structure
* @param other where to write received data to
* @param i maximum size of other (in bytes)
* @return positive value for number of bytes actually received or
* negative value for error number MHD_ERR_xxx_
*/
static ssize_t
recv_tls_adapter (struct MHD_Connection *connection,
void *other,
size_t i)
{
ssize_t res;
if (i > SSIZE_MAX)
i = SSIZE_MAX;
res = gnutls_record_recv (connection->tls_session,
other,
i);
if ( (GNUTLS_E_AGAIN == res) ||
(GNUTLS_E_INTERRUPTED == res) )
{
#ifdef EPOLL_SUPPORT
if (GNUTLS_E_AGAIN == res)
connection->epoll_state &= ~MHD_EPOLL_STATE_READ_READY;
#endif
/* Any network errors means that buffer is empty. */
connection->tls_read_ready = false;
return MHD_ERR_AGAIN_;
}
if (res < 0)
{
/* Likely 'GNUTLS_E_INVALID_SESSION' (client communication
disrupted); interpret as a hard error */
connection->tls_read_ready = false;
return MHD_ERR_NOTCONN_;
}
#ifdef EPOLL_SUPPORT
/* Unlike non-TLS connections, do not reset "read-ready" if
* received amount smaller than provided amount, as TLS
* connections may receive data by fixed-size chunks. */
#endif /* EPOLL_SUPPORT */
/* Check whether TLS buffers still have some unread data. */
connection->tls_read_ready = ( ((size_t)res == i) &&
(0 != gnutls_record_check_pending (connection->tls_session)) );
return res;
}
/**
* Callback for writing data to the socket.
*
* @param connection the MHD connection structure
* @param other data to write
* @param i number of bytes to write
* @return positive value for number of bytes actually sent or
* negative value for error number MHD_ERR_xxx_
*/
static ssize_t
send_tls_adapter (struct MHD_Connection *connection,
const void *other,
size_t i)
{
ssize_t res;
if (i > SSIZE_MAX)
i = SSIZE_MAX;
res = gnutls_record_send (connection->tls_session,
other,
i);
if ( (GNUTLS_E_AGAIN == res) ||
(GNUTLS_E_INTERRUPTED == res) )
{
#ifdef EPOLL_SUPPORT
if (GNUTLS_E_AGAIN == res)
connection->epoll_state &= ~MHD_EPOLL_STATE_WRITE_READY;
#endif
return MHD_ERR_AGAIN_;
}
if (res < 0)
{
/* Likely 'GNUTLS_E_INVALID_SESSION' (client communication
disrupted); interpret as a hard error */
return MHD_ERR_NOTCONN_;
}
#ifdef EPOLL_SUPPORT
/* Unlike non-TLS connections, do not reset "write-ready" if
* sent amount smaller than provided amount, as TLS
* connections may break data into smaller parts for sending. */
#endif /* EPOLL_SUPPORT */
return res;
}
/**
* Give gnuTLS chance to work on the TLS handshake.
*
* @param connection connection to handshake on
* @return true if the handshake has completed successfully
* and we should start to read/write data,
* false is handshake in progress or in case
* of error
*/
bool
MHD_run_tls_handshake_ (struct MHD_Connection *connection)
{
int ret;
if ((MHD_TLS_CONN_INIT == connection->tls_state) ||
(MHD_TLS_CONN_HANDSHAKING == connection->tls_state))
{
ret = gnutls_handshake (connection->tls_session);
if (ret == GNUTLS_E_SUCCESS)
{
/* set connection TLS state to enable HTTP processing */
connection->tls_state = MHD_TLS_CONN_CONNECTED;
MHD_update_last_activity_ (connection);
return true;
}
if ( (GNUTLS_E_AGAIN == ret) ||
(GNUTLS_E_INTERRUPTED == ret) )
{
connection->tls_state = MHD_TLS_CONN_HANDSHAKING;
/* handshake not done */
return false;
}
/* handshake failed */
connection->tls_state = MHD_TLS_CONN_TLS_FAILED;
#ifdef HAVE_MESSAGES
MHD_DLOG (connection->daemon,
_("Error: received handshake message out of context\n"));
#endif
MHD_connection_close_ (connection,
MHD_REQUEST_TERMINATED_WITH_ERROR);
return false;
}
return true;
}
/**
* Set connection callback function to be used through out
* the processing of this secure connection.
*
* @param connection which callbacks should be modified
*/
void
MHD_set_https_callbacks (struct MHD_Connection *connection)
{
connection->recv_cls = &recv_tls_adapter;
connection->send_cls = &send_tls_adapter;
}
/**
* Initiate shutdown of TLS layer of connection.
*
* @param connection to use
* @return true if succeed, false otherwise.
*/
bool
MHD_tls_connection_shutdown (struct MHD_Connection *connection)
{
if (MHD_TLS_CONN_WR_CLOSED > connection->tls_state)
{
const int res =
gnutls_bye(connection->tls_session, GNUTLS_SHUT_WR);
if (GNUTLS_E_SUCCESS == res)
{
connection->tls_state = MHD_TLS_CONN_WR_CLOSED;
return true;
}
if ((GNUTLS_E_AGAIN == res) ||
(GNUTLS_E_INTERRUPTED == res))
{
connection->tls_state = MHD_TLS_CONN_WR_CLOSING;
return true;
}
else
connection->tls_state = MHD_TLS_CONN_TLS_FAILED;
}
return false;
}
/* end of connection_https.c */
|
vicharl/containerdns
|
kdns/core/view.h
|
<reponame>vicharl/containerdns
/*
*
* Copyright (c) 2018 The TIGLabs Authors.
*
*/
#ifndef __DNS_VIEW_H__
#define __DNS_VIEW_H__
#include <stdint.h>
#include <stdlib.h>
#include <arpa/inet.h>
#include "kdns.h"
#define VIEW_NULL_VALUE NULL
#define VIEW_NO_NODE NULL
/* type of stored value */
enum view_action {
ACTION_ADD,
ACTION_DEL
};
typedef struct view_value{
char cidrs[MAX_VIEW_NAME_LEN];
char view_name[MAX_VIEW_NAME_LEN];
}view_value_t;
typedef struct _view_node {
struct _view_node *left;
struct _view_node *right;
struct _view_node *parent;
view_value_t * view_data;
} view_node_t;
typedef struct view_tree {
view_node_t *root;
view_node_t *free;
int size;
} view_tree_t;
int view_operate(view_tree_t *tree, char *pcidr, char *view_name, enum view_action action);
view_tree_t *view_tree_create(void);
view_value_t* view_find(view_tree_t *tree, uint8_t *key, size_t nbits);
void view_tree_dump(view_node_t *node, void* arg1,void (*callback)(void*,view_value_t *));
#endif
|
vicharl/containerdns
|
kdns/dpdk-17.02/drivers/net/sfc/sfc.h
|
<reponame>vicharl/containerdns
/*-
* Copyright (c) 2016 Solarflare Communications Inc.
* All rights reserved.
*
* This software was jointly developed between OKTET Labs (under contract
* for Solarflare) and Solarflare Communications, Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
* THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
* OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
* OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
* EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef _SFC_H
#define _SFC_H
#include <stdbool.h>
#include <rte_ethdev.h>
#include <rte_kvargs.h>
#include <rte_spinlock.h>
#include "efx.h"
#ifdef __cplusplus
extern "C" {
#endif
#define SFC_DEV_TO_PCI(eth_dev) \
RTE_DEV_TO_PCI((eth_dev)->device)
#if EFSYS_OPT_RX_SCALE
/** RSS key length (bytes) */
#define SFC_RSS_KEY_SIZE 40
/** RSS hash offloads mask */
#define SFC_RSS_OFFLOADS (ETH_RSS_IP | ETH_RSS_TCP)
#endif
/*
* +---------------+
* | UNINITIALIZED |<-----------+
* +---------------+ |
* |.eth_dev_init |.eth_dev_uninit
* V |
* +---------------+------------+
* | INITIALIZED |
* +---------------+<-----------+
* |.dev_configure |
* V |
* +---------------+ |
* | CONFIGURING |------------^
* +---------------+ failed |
* |success |
* | +---------------+
* | | CLOSING |
* | +---------------+
* | ^
* V |.dev_close
* +---------------+------------+
* | CONFIGURED |
* +---------------+<-----------+
* |.dev_start |
* V |
* +---------------+ |
* | STARTING |------------^
* +---------------+ failed |
* |success |
* | +---------------+
* | | STOPPING |
* | +---------------+
* | ^
* V |.dev_stop
* +---------------+------------+
* | STARTED |
* +---------------+
*/
enum sfc_adapter_state {
SFC_ADAPTER_UNINITIALIZED = 0,
SFC_ADAPTER_INITIALIZED,
SFC_ADAPTER_CONFIGURING,
SFC_ADAPTER_CONFIGURED,
SFC_ADAPTER_CLOSING,
SFC_ADAPTER_STARTING,
SFC_ADAPTER_STARTED,
SFC_ADAPTER_STOPPING,
SFC_ADAPTER_NSTATES
};
enum sfc_dev_filter_mode {
SFC_DEV_FILTER_MODE_PROMISC = 0,
SFC_DEV_FILTER_MODE_ALLMULTI,
SFC_DEV_FILTER_NMODES
};
enum sfc_mcdi_state {
SFC_MCDI_UNINITIALIZED = 0,
SFC_MCDI_INITIALIZED,
SFC_MCDI_BUSY,
SFC_MCDI_COMPLETED,
SFC_MCDI_NSTATES
};
struct sfc_mcdi {
rte_spinlock_t lock;
efsys_mem_t mem;
enum sfc_mcdi_state state;
efx_mcdi_transport_t transport;
bool logging;
};
struct sfc_intr {
efx_intr_type_t type;
rte_intr_callback_fn handler;
boolean_t lsc_intr;
};
struct sfc_evq_info;
struct sfc_rxq_info;
struct sfc_txq_info;
struct sfc_port {
unsigned int lsc_seq;
uint32_t phy_adv_cap_mask;
uint32_t phy_adv_cap;
unsigned int flow_ctrl;
boolean_t flow_ctrl_autoneg;
size_t pdu;
boolean_t promisc;
boolean_t allmulti;
rte_spinlock_t mac_stats_lock;
uint64_t *mac_stats_buf;
efsys_mem_t mac_stats_dma_mem;
uint32_t mac_stats_mask[EFX_MAC_STATS_MASK_NPAGES];
};
/* Adapter private data */
struct sfc_adapter {
/*
* PMD setup and configuration is not thread safe. Since it is not
* performance sensitive, it is better to guarantee thread-safety
* and add device level lock. Adapter control operations which
* change its state should acquire the lock.
*/
rte_spinlock_t lock;
enum sfc_adapter_state state;
struct rte_eth_dev *eth_dev;
struct rte_kvargs *kvargs;
bool debug_init;
int socket_id;
efsys_bar_t mem_bar;
efx_family_t family;
efx_nic_t *nic;
rte_spinlock_t nic_lock;
struct sfc_mcdi mcdi;
struct sfc_intr intr;
struct sfc_port port;
unsigned int rxq_max;
unsigned int txq_max;
unsigned int txq_max_entries;
uint32_t evq_flags;
unsigned int evq_count;
struct sfc_evq_info *evq_info;
unsigned int mgmt_evq_index;
rte_spinlock_t mgmt_evq_lock;
unsigned int rxq_count;
struct sfc_rxq_info *rxq_info;
unsigned int txq_count;
struct sfc_txq_info *txq_info;
boolean_t tso;
unsigned int rss_channels;
#if EFSYS_OPT_RX_SCALE
efx_rx_scale_support_t rss_support;
efx_rx_hash_support_t hash_support;
efx_rx_hash_type_t rss_hash_types;
unsigned int rss_tbl[EFX_RSS_TBL_SIZE];
uint8_t rss_key[SFC_RSS_KEY_SIZE];
#endif
};
/*
* Add wrapper functions to acquire/release lock to be able to remove or
* change the lock in one place.
*/
static inline void
sfc_adapter_lock_init(struct sfc_adapter *sa)
{
rte_spinlock_init(&sa->lock);
}
static inline int
sfc_adapter_is_locked(struct sfc_adapter *sa)
{
return rte_spinlock_is_locked(&sa->lock);
}
static inline void
sfc_adapter_lock(struct sfc_adapter *sa)
{
rte_spinlock_lock(&sa->lock);
}
static inline int
sfc_adapter_trylock(struct sfc_adapter *sa)
{
return rte_spinlock_trylock(&sa->lock);
}
static inline void
sfc_adapter_unlock(struct sfc_adapter *sa)
{
rte_spinlock_unlock(&sa->lock);
}
static inline void
sfc_adapter_lock_fini(__rte_unused struct sfc_adapter *sa)
{
/* Just for symmetry of the API */
}
int sfc_dma_alloc(const struct sfc_adapter *sa, const char *name, uint16_t id,
size_t len, int socket_id, efsys_mem_t *esmp);
void sfc_dma_free(const struct sfc_adapter *sa, efsys_mem_t *esmp);
int sfc_attach(struct sfc_adapter *sa);
void sfc_detach(struct sfc_adapter *sa);
int sfc_start(struct sfc_adapter *sa);
void sfc_stop(struct sfc_adapter *sa);
int sfc_mcdi_init(struct sfc_adapter *sa);
void sfc_mcdi_fini(struct sfc_adapter *sa);
int sfc_configure(struct sfc_adapter *sa);
void sfc_close(struct sfc_adapter *sa);
int sfc_intr_attach(struct sfc_adapter *sa);
void sfc_intr_detach(struct sfc_adapter *sa);
int sfc_intr_init(struct sfc_adapter *sa);
void sfc_intr_fini(struct sfc_adapter *sa);
int sfc_intr_start(struct sfc_adapter *sa);
void sfc_intr_stop(struct sfc_adapter *sa);
int sfc_port_init(struct sfc_adapter *sa);
void sfc_port_fini(struct sfc_adapter *sa);
int sfc_port_start(struct sfc_adapter *sa);
void sfc_port_stop(struct sfc_adapter *sa);
void sfc_port_link_mode_to_info(efx_link_mode_t link_mode,
struct rte_eth_link *link_info);
int sfc_port_update_mac_stats(struct sfc_adapter *sa);
int sfc_set_rx_mode(struct sfc_adapter *sa);
#ifdef __cplusplus
}
#endif
#endif /* _SFC_H */
|
vicharl/containerdns
|
kdns/dpdk-17.02/drivers/net/sfc/base/ef10_impl.h
|
<filename>kdns/dpdk-17.02/drivers/net/sfc/base/ef10_impl.h
/*
* Copyright (c) 2015-2016 Solarflare Communications Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
* THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
* OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
* OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
* EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* The views and conclusions contained in the software and documentation are
* those of the authors and should not be interpreted as representing official
* policies, either expressed or implied, of the FreeBSD Project.
*/
#ifndef _SYS_EF10_IMPL_H
#define _SYS_EF10_IMPL_H
#ifdef __cplusplus
extern "C" {
#endif
#if (EFSYS_OPT_HUNTINGTON && EFSYS_OPT_MEDFORD)
#define EF10_MAX_PIOBUF_NBUFS MAX(HUNT_PIOBUF_NBUFS, MEDFORD_PIOBUF_NBUFS)
#elif EFSYS_OPT_HUNTINGTON
#define EF10_MAX_PIOBUF_NBUFS HUNT_PIOBUF_NBUFS
#elif EFSYS_OPT_MEDFORD
#define EF10_MAX_PIOBUF_NBUFS MEDFORD_PIOBUF_NBUFS
#endif
/*
* FIXME: This is just a power of 2 which fits in an MCDI v1 message, and could
* possibly be increased, or the write size reported by newer firmware used
* instead.
*/
#define EF10_NVRAM_CHUNK 0x80
/* Alignment requirement for value written to RX WPTR:
* the WPTR must be aligned to an 8 descriptor boundary
*/
#define EF10_RX_WPTR_ALIGN 8
/*
* Max byte offset into the packet the TCP header must start for the hardware
* to be able to parse the packet correctly.
*/
#define EF10_TCP_HEADER_OFFSET_LIMIT 208
/* Invalid RSS context handle */
#define EF10_RSS_CONTEXT_INVALID (0xffffffff)
/* EV */
__checkReturn efx_rc_t
ef10_ev_init(
__in efx_nic_t *enp);
void
ef10_ev_fini(
__in efx_nic_t *enp);
__checkReturn efx_rc_t
ef10_ev_qcreate(
__in efx_nic_t *enp,
__in unsigned int index,
__in efsys_mem_t *esmp,
__in size_t n,
__in uint32_t id,
__in uint32_t us,
__in uint32_t flags,
__in efx_evq_t *eep);
void
ef10_ev_qdestroy(
__in efx_evq_t *eep);
__checkReturn efx_rc_t
ef10_ev_qprime(
__in efx_evq_t *eep,
__in unsigned int count);
void
ef10_ev_qpost(
__in efx_evq_t *eep,
__in uint16_t data);
__checkReturn efx_rc_t
ef10_ev_qmoderate(
__in efx_evq_t *eep,
__in unsigned int us);
#if EFSYS_OPT_QSTATS
void
ef10_ev_qstats_update(
__in efx_evq_t *eep,
__inout_ecount(EV_NQSTATS) efsys_stat_t *stat);
#endif /* EFSYS_OPT_QSTATS */
void
ef10_ev_rxlabel_init(
__in efx_evq_t *eep,
__in efx_rxq_t *erp,
__in unsigned int label,
__in boolean_t packed_stream);
void
ef10_ev_rxlabel_fini(
__in efx_evq_t *eep,
__in unsigned int label);
/* INTR */
__checkReturn efx_rc_t
ef10_intr_init(
__in efx_nic_t *enp,
__in efx_intr_type_t type,
__in efsys_mem_t *esmp);
void
ef10_intr_enable(
__in efx_nic_t *enp);
void
ef10_intr_disable(
__in efx_nic_t *enp);
void
ef10_intr_disable_unlocked(
__in efx_nic_t *enp);
__checkReturn efx_rc_t
ef10_intr_trigger(
__in efx_nic_t *enp,
__in unsigned int level);
void
ef10_intr_status_line(
__in efx_nic_t *enp,
__out boolean_t *fatalp,
__out uint32_t *qmaskp);
void
ef10_intr_status_message(
__in efx_nic_t *enp,
__in unsigned int message,
__out boolean_t *fatalp);
void
ef10_intr_fatal(
__in efx_nic_t *enp);
void
ef10_intr_fini(
__in efx_nic_t *enp);
/* NIC */
extern __checkReturn efx_rc_t
ef10_nic_probe(
__in efx_nic_t *enp);
extern __checkReturn efx_rc_t
ef10_nic_set_drv_limits(
__inout efx_nic_t *enp,
__in efx_drv_limits_t *edlp);
extern __checkReturn efx_rc_t
ef10_nic_get_vi_pool(
__in efx_nic_t *enp,
__out uint32_t *vi_countp);
extern __checkReturn efx_rc_t
ef10_nic_get_bar_region(
__in efx_nic_t *enp,
__in efx_nic_region_t region,
__out uint32_t *offsetp,
__out size_t *sizep);
extern __checkReturn efx_rc_t
ef10_nic_reset(
__in efx_nic_t *enp);
extern __checkReturn efx_rc_t
ef10_nic_init(
__in efx_nic_t *enp);
#if EFSYS_OPT_DIAG
extern __checkReturn efx_rc_t
ef10_nic_register_test(
__in efx_nic_t *enp);
#endif /* EFSYS_OPT_DIAG */
extern void
ef10_nic_fini(
__in efx_nic_t *enp);
extern void
ef10_nic_unprobe(
__in efx_nic_t *enp);
/* MAC */
extern __checkReturn efx_rc_t
ef10_mac_poll(
__in efx_nic_t *enp,
__out efx_link_mode_t *link_modep);
extern __checkReturn efx_rc_t
ef10_mac_up(
__in efx_nic_t *enp,
__out boolean_t *mac_upp);
extern __checkReturn efx_rc_t
ef10_mac_addr_set(
__in efx_nic_t *enp);
extern __checkReturn efx_rc_t
ef10_mac_pdu_set(
__in efx_nic_t *enp);
extern __checkReturn efx_rc_t
ef10_mac_pdu_get(
__in efx_nic_t *enp,
__out size_t *pdu);
extern __checkReturn efx_rc_t
ef10_mac_reconfigure(
__in efx_nic_t *enp);
extern __checkReturn efx_rc_t
ef10_mac_multicast_list_set(
__in efx_nic_t *enp);
extern __checkReturn efx_rc_t
ef10_mac_filter_default_rxq_set(
__in efx_nic_t *enp,
__in efx_rxq_t *erp,
__in boolean_t using_rss);
extern void
ef10_mac_filter_default_rxq_clear(
__in efx_nic_t *enp);
#if EFSYS_OPT_LOOPBACK
extern __checkReturn efx_rc_t
ef10_mac_loopback_set(
__in efx_nic_t *enp,
__in efx_link_mode_t link_mode,
__in efx_loopback_type_t loopback_type);
#endif /* EFSYS_OPT_LOOPBACK */
#if EFSYS_OPT_MAC_STATS
extern __checkReturn efx_rc_t
ef10_mac_stats_get_mask(
__in efx_nic_t *enp,
__inout_bcount(mask_size) uint32_t *maskp,
__in size_t mask_size);
extern __checkReturn efx_rc_t
ef10_mac_stats_update(
__in efx_nic_t *enp,
__in efsys_mem_t *esmp,
__inout_ecount(EFX_MAC_NSTATS) efsys_stat_t *stat,
__inout_opt uint32_t *generationp);
#endif /* EFSYS_OPT_MAC_STATS */
/* MCDI */
#if EFSYS_OPT_MCDI
extern __checkReturn efx_rc_t
ef10_mcdi_init(
__in efx_nic_t *enp,
__in const efx_mcdi_transport_t *mtp);
extern void
ef10_mcdi_fini(
__in efx_nic_t *enp);
extern void
ef10_mcdi_send_request(
__in efx_nic_t *enp,
__in_bcount(hdr_len) void *hdrp,
__in size_t hdr_len,
__in_bcount(sdu_len) void *sdup,
__in size_t sdu_len);
extern __checkReturn boolean_t
ef10_mcdi_poll_response(
__in efx_nic_t *enp);
extern void
ef10_mcdi_read_response(
__in efx_nic_t *enp,
__out_bcount(length) void *bufferp,
__in size_t offset,
__in size_t length);
extern efx_rc_t
ef10_mcdi_poll_reboot(
__in efx_nic_t *enp);
extern __checkReturn efx_rc_t
ef10_mcdi_feature_supported(
__in efx_nic_t *enp,
__in efx_mcdi_feature_id_t id,
__out boolean_t *supportedp);
extern void
ef10_mcdi_get_timeout(
__in efx_nic_t *enp,
__in efx_mcdi_req_t *emrp,
__out uint32_t *timeoutp);
#endif /* EFSYS_OPT_MCDI */
/* NVRAM */
#if EFSYS_OPT_NVRAM || EFSYS_OPT_VPD
extern __checkReturn efx_rc_t
ef10_nvram_buf_read_tlv(
__in efx_nic_t *enp,
__in_bcount(max_seg_size) caddr_t seg_data,
__in size_t max_seg_size,
__in uint32_t tag,
__deref_out_bcount_opt(*sizep) caddr_t *datap,
__out size_t *sizep);
extern __checkReturn efx_rc_t
ef10_nvram_buf_write_tlv(
__inout_bcount(partn_size) caddr_t partn_data,
__in size_t partn_size,
__in uint32_t tag,
__in_bcount(tag_size) caddr_t tag_data,
__in size_t tag_size,
__out size_t *total_lengthp);
extern __checkReturn efx_rc_t
ef10_nvram_partn_read_tlv(
__in efx_nic_t *enp,
__in uint32_t partn,
__in uint32_t tag,
__deref_out_bcount_opt(*sizep) caddr_t *datap,
__out size_t *sizep);
extern __checkReturn efx_rc_t
ef10_nvram_partn_write_tlv(
__in efx_nic_t *enp,
__in uint32_t partn,
__in uint32_t tag,
__in_bcount(size) caddr_t data,
__in size_t size);
extern __checkReturn efx_rc_t
ef10_nvram_partn_write_segment_tlv(
__in efx_nic_t *enp,
__in uint32_t partn,
__in uint32_t tag,
__in_bcount(size) caddr_t data,
__in size_t size,
__in boolean_t all_segments);
extern __checkReturn efx_rc_t
ef10_nvram_partn_lock(
__in efx_nic_t *enp,
__in uint32_t partn);
extern __checkReturn efx_rc_t
ef10_nvram_partn_unlock(
__in efx_nic_t *enp,
__in uint32_t partn,
__out_opt uint32_t *resultp);
#endif /* EFSYS_OPT_NVRAM || EFSYS_OPT_VPD */
#if EFSYS_OPT_NVRAM
#if EFSYS_OPT_DIAG
extern __checkReturn efx_rc_t
ef10_nvram_test(
__in efx_nic_t *enp);
#endif /* EFSYS_OPT_DIAG */
extern __checkReturn efx_rc_t
ef10_nvram_type_to_partn(
__in efx_nic_t *enp,
__in efx_nvram_type_t type,
__out uint32_t *partnp);
extern __checkReturn efx_rc_t
ef10_nvram_partn_size(
__in efx_nic_t *enp,
__in uint32_t partn,
__out size_t *sizep);
extern __checkReturn efx_rc_t
ef10_nvram_partn_rw_start(
__in efx_nic_t *enp,
__in uint32_t partn,
__out size_t *chunk_sizep);
extern __checkReturn efx_rc_t
ef10_nvram_partn_read_mode(
__in efx_nic_t *enp,
__in uint32_t partn,
__in unsigned int offset,
__out_bcount(size) caddr_t data,
__in size_t size,
__in uint32_t mode);
extern __checkReturn efx_rc_t
ef10_nvram_partn_read(
__in efx_nic_t *enp,
__in uint32_t partn,
__in unsigned int offset,
__out_bcount(size) caddr_t data,
__in size_t size);
extern __checkReturn efx_rc_t
ef10_nvram_partn_erase(
__in efx_nic_t *enp,
__in uint32_t partn,
__in unsigned int offset,
__in size_t size);
extern __checkReturn efx_rc_t
ef10_nvram_partn_write(
__in efx_nic_t *enp,
__in uint32_t partn,
__in unsigned int offset,
__out_bcount(size) caddr_t data,
__in size_t size);
extern __checkReturn efx_rc_t
ef10_nvram_partn_rw_finish(
__in efx_nic_t *enp,
__in uint32_t partn);
extern __checkReturn efx_rc_t
ef10_nvram_partn_get_version(
__in efx_nic_t *enp,
__in uint32_t partn,
__out uint32_t *subtypep,
__out_ecount(4) uint16_t version[4]);
extern __checkReturn efx_rc_t
ef10_nvram_partn_set_version(
__in efx_nic_t *enp,
__in uint32_t partn,
__in_ecount(4) uint16_t version[4]);
extern __checkReturn efx_rc_t
ef10_nvram_buffer_validate(
__in efx_nic_t *enp,
__in uint32_t partn,
__in_bcount(buffer_size)
caddr_t bufferp,
__in size_t buffer_size);
extern __checkReturn efx_rc_t
ef10_nvram_buffer_create(
__in efx_nic_t *enp,
__in uint16_t partn_type,
__in_bcount(buffer_size)
caddr_t bufferp,
__in size_t buffer_size);
extern __checkReturn efx_rc_t
ef10_nvram_buffer_find_item_start(
__in_bcount(buffer_size)
caddr_t bufferp,
__in size_t buffer_size,
__out uint32_t *startp
);
extern __checkReturn efx_rc_t
ef10_nvram_buffer_find_end(
__in_bcount(buffer_size)
caddr_t bufferp,
__in size_t buffer_size,
__in uint32_t offset,
__out uint32_t *endp
);
extern __checkReturn __success(return != B_FALSE) boolean_t
ef10_nvram_buffer_find_item(
__in_bcount(buffer_size)
caddr_t bufferp,
__in size_t buffer_size,
__in uint32_t offset,
__out uint32_t *startp,
__out uint32_t *lengthp
);
extern __checkReturn efx_rc_t
ef10_nvram_buffer_get_item(
__in_bcount(buffer_size)
caddr_t bufferp,
__in size_t buffer_size,
__in uint32_t offset,
__in uint32_t length,
__out_bcount_part(item_max_size, *lengthp)
caddr_t itemp,
__in size_t item_max_size,
__out uint32_t *lengthp
);
extern __checkReturn efx_rc_t
ef10_nvram_buffer_insert_item(
__in_bcount(buffer_size)
caddr_t bufferp,
__in size_t buffer_size,
__in uint32_t offset,
__in_bcount(length) caddr_t keyp,
__in uint32_t length,
__out uint32_t *lengthp
);
extern __checkReturn efx_rc_t
ef10_nvram_buffer_delete_item(
__in_bcount(buffer_size)
caddr_t bufferp,
__in size_t buffer_size,
__in uint32_t offset,
__in uint32_t length,
__in uint32_t end
);
extern __checkReturn efx_rc_t
ef10_nvram_buffer_finish(
__in_bcount(buffer_size)
caddr_t bufferp,
__in size_t buffer_size
);
#endif /* EFSYS_OPT_NVRAM */
/* PHY */
typedef struct ef10_link_state_s {
uint32_t els_adv_cap_mask;
uint32_t els_lp_cap_mask;
unsigned int els_fcntl;
efx_link_mode_t els_link_mode;
#if EFSYS_OPT_LOOPBACK
efx_loopback_type_t els_loopback;
#endif
boolean_t els_mac_up;
} ef10_link_state_t;
extern void
ef10_phy_link_ev(
__in efx_nic_t *enp,
__in efx_qword_t *eqp,
__out efx_link_mode_t *link_modep);
extern __checkReturn efx_rc_t
ef10_phy_get_link(
__in efx_nic_t *enp,
__out ef10_link_state_t *elsp);
extern __checkReturn efx_rc_t
ef10_phy_power(
__in efx_nic_t *enp,
__in boolean_t on);
extern __checkReturn efx_rc_t
ef10_phy_reconfigure(
__in efx_nic_t *enp);
extern __checkReturn efx_rc_t
ef10_phy_verify(
__in efx_nic_t *enp);
extern __checkReturn efx_rc_t
ef10_phy_oui_get(
__in efx_nic_t *enp,
__out uint32_t *ouip);
#if EFSYS_OPT_PHY_STATS
extern __checkReturn efx_rc_t
ef10_phy_stats_update(
__in efx_nic_t *enp,
__in efsys_mem_t *esmp,
__inout_ecount(EFX_PHY_NSTATS) uint32_t *stat);
#endif /* EFSYS_OPT_PHY_STATS */
#if EFSYS_OPT_BIST
extern __checkReturn efx_rc_t
ef10_bist_enable_offline(
__in efx_nic_t *enp);
extern __checkReturn efx_rc_t
ef10_bist_start(
__in efx_nic_t *enp,
__in efx_bist_type_t type);
extern __checkReturn efx_rc_t
ef10_bist_poll(
__in efx_nic_t *enp,
__in efx_bist_type_t type,
__out efx_bist_result_t *resultp,
__out_opt __drv_when(count > 0, __notnull)
uint32_t *value_maskp,
__out_ecount_opt(count) __drv_when(count > 0, __notnull)
unsigned long *valuesp,
__in size_t count);
extern void
ef10_bist_stop(
__in efx_nic_t *enp,
__in efx_bist_type_t type);
#endif /* EFSYS_OPT_BIST */
/* TX */
extern __checkReturn efx_rc_t
ef10_tx_init(
__in efx_nic_t *enp);
extern void
ef10_tx_fini(
__in efx_nic_t *enp);
extern __checkReturn efx_rc_t
ef10_tx_qcreate(
__in efx_nic_t *enp,
__in unsigned int index,
__in unsigned int label,
__in efsys_mem_t *esmp,
__in size_t n,
__in uint32_t id,
__in uint16_t flags,
__in efx_evq_t *eep,
__in efx_txq_t *etp,
__out unsigned int *addedp);
extern void
ef10_tx_qdestroy(
__in efx_txq_t *etp);
extern __checkReturn efx_rc_t
ef10_tx_qpost(
__in efx_txq_t *etp,
__in_ecount(n) efx_buffer_t *eb,
__in unsigned int n,
__in unsigned int completed,
__inout unsigned int *addedp);
extern void
ef10_tx_qpush(
__in efx_txq_t *etp,
__in unsigned int added,
__in unsigned int pushed);
#if EFSYS_OPT_RX_PACKED_STREAM
extern void
ef10_rx_qps_update_credits(
__in efx_rxq_t *erp);
extern __checkReturn uint8_t *
ef10_rx_qps_packet_info(
__in efx_rxq_t *erp,
__in uint8_t *buffer,
__in uint32_t buffer_length,
__in uint32_t current_offset,
__out uint16_t *lengthp,
__out uint32_t *next_offsetp,
__out uint32_t *timestamp);
#endif
extern __checkReturn efx_rc_t
ef10_tx_qpace(
__in efx_txq_t *etp,
__in unsigned int ns);
extern __checkReturn efx_rc_t
ef10_tx_qflush(
__in efx_txq_t *etp);
extern void
ef10_tx_qenable(
__in efx_txq_t *etp);
extern __checkReturn efx_rc_t
ef10_tx_qpio_enable(
__in efx_txq_t *etp);
extern void
ef10_tx_qpio_disable(
__in efx_txq_t *etp);
extern __checkReturn efx_rc_t
ef10_tx_qpio_write(
__in efx_txq_t *etp,
__in_ecount(buf_length) uint8_t *buffer,
__in size_t buf_length,
__in size_t pio_buf_offset);
extern __checkReturn efx_rc_t
ef10_tx_qpio_post(
__in efx_txq_t *etp,
__in size_t pkt_length,
__in unsigned int completed,
__inout unsigned int *addedp);
extern __checkReturn efx_rc_t
ef10_tx_qdesc_post(
__in efx_txq_t *etp,
__in_ecount(n) efx_desc_t *ed,
__in unsigned int n,
__in unsigned int completed,
__inout unsigned int *addedp);
extern void
ef10_tx_qdesc_dma_create(
__in efx_txq_t *etp,
__in efsys_dma_addr_t addr,
__in size_t size,
__in boolean_t eop,
__out efx_desc_t *edp);
extern void
ef10_tx_qdesc_tso_create(
__in efx_txq_t *etp,
__in uint16_t ipv4_id,
__in uint32_t tcp_seq,
__in uint8_t tcp_flags,
__out efx_desc_t *edp);
extern void
ef10_tx_qdesc_tso2_create(
__in efx_txq_t *etp,
__in uint16_t ipv4_id,
__in uint32_t tcp_seq,
__in uint16_t tcp_mss,
__out_ecount(count) efx_desc_t *edp,
__in int count);
extern void
ef10_tx_qdesc_vlantci_create(
__in efx_txq_t *etp,
__in uint16_t vlan_tci,
__out efx_desc_t *edp);
#if EFSYS_OPT_QSTATS
extern void
ef10_tx_qstats_update(
__in efx_txq_t *etp,
__inout_ecount(TX_NQSTATS) efsys_stat_t *stat);
#endif /* EFSYS_OPT_QSTATS */
typedef uint32_t efx_piobuf_handle_t;
#define EFX_PIOBUF_HANDLE_INVALID ((efx_piobuf_handle_t) -1)
extern __checkReturn efx_rc_t
ef10_nic_pio_alloc(
__inout efx_nic_t *enp,
__out uint32_t *bufnump,
__out efx_piobuf_handle_t *handlep,
__out uint32_t *blknump,
__out uint32_t *offsetp,
__out size_t *sizep);
extern __checkReturn efx_rc_t
ef10_nic_pio_free(
__inout efx_nic_t *enp,
__in uint32_t bufnum,
__in uint32_t blknum);
extern __checkReturn efx_rc_t
ef10_nic_pio_link(
__inout efx_nic_t *enp,
__in uint32_t vi_index,
__in efx_piobuf_handle_t handle);
extern __checkReturn efx_rc_t
ef10_nic_pio_unlink(
__inout efx_nic_t *enp,
__in uint32_t vi_index);
/* VPD */
#if EFSYS_OPT_VPD
extern __checkReturn efx_rc_t
ef10_vpd_init(
__in efx_nic_t *enp);
extern __checkReturn efx_rc_t
ef10_vpd_size(
__in efx_nic_t *enp,
__out size_t *sizep);
extern __checkReturn efx_rc_t
ef10_vpd_read(
__in efx_nic_t *enp,
__out_bcount(size) caddr_t data,
__in size_t size);
extern __checkReturn efx_rc_t
ef10_vpd_verify(
__in efx_nic_t *enp,
__in_bcount(size) caddr_t data,
__in size_t size);
extern __checkReturn efx_rc_t
ef10_vpd_reinit(
__in efx_nic_t *enp,
__in_bcount(size) caddr_t data,
__in size_t size);
extern __checkReturn efx_rc_t
ef10_vpd_get(
__in efx_nic_t *enp,
__in_bcount(size) caddr_t data,
__in size_t size,
__inout efx_vpd_value_t *evvp);
extern __checkReturn efx_rc_t
ef10_vpd_set(
__in efx_nic_t *enp,
__in_bcount(size) caddr_t data,
__in size_t size,
__in efx_vpd_value_t *evvp);
extern __checkReturn efx_rc_t
ef10_vpd_next(
__in efx_nic_t *enp,
__in_bcount(size) caddr_t data,
__in size_t size,
__out efx_vpd_value_t *evvp,
__inout unsigned int *contp);
extern __checkReturn efx_rc_t
ef10_vpd_write(
__in efx_nic_t *enp,
__in_bcount(size) caddr_t data,
__in size_t size);
extern void
ef10_vpd_fini(
__in efx_nic_t *enp);
#endif /* EFSYS_OPT_VPD */
/* RX */
extern __checkReturn efx_rc_t
ef10_rx_init(
__in efx_nic_t *enp);
#if EFSYS_OPT_RX_SCATTER
extern __checkReturn efx_rc_t
ef10_rx_scatter_enable(
__in efx_nic_t *enp,
__in unsigned int buf_size);
#endif /* EFSYS_OPT_RX_SCATTER */
#if EFSYS_OPT_RX_SCALE
extern __checkReturn efx_rc_t
ef10_rx_scale_mode_set(
__in efx_nic_t *enp,
__in efx_rx_hash_alg_t alg,
__in efx_rx_hash_type_t type,
__in boolean_t insert);
extern __checkReturn efx_rc_t
ef10_rx_scale_key_set(
__in efx_nic_t *enp,
__in_ecount(n) uint8_t *key,
__in size_t n);
extern __checkReturn efx_rc_t
ef10_rx_scale_tbl_set(
__in efx_nic_t *enp,
__in_ecount(n) unsigned int *table,
__in size_t n);
extern __checkReturn uint32_t
ef10_rx_prefix_hash(
__in efx_nic_t *enp,
__in efx_rx_hash_alg_t func,
__in uint8_t *buffer);
#endif /* EFSYS_OPT_RX_SCALE */
extern __checkReturn efx_rc_t
ef10_rx_prefix_pktlen(
__in efx_nic_t *enp,
__in uint8_t *buffer,
__out uint16_t *lengthp);
extern void
ef10_rx_qpost(
__in efx_rxq_t *erp,
__in_ecount(n) efsys_dma_addr_t *addrp,
__in size_t size,
__in unsigned int n,
__in unsigned int completed,
__in unsigned int added);
extern void
ef10_rx_qpush(
__in efx_rxq_t *erp,
__in unsigned int added,
__inout unsigned int *pushedp);
extern __checkReturn efx_rc_t
ef10_rx_qflush(
__in efx_rxq_t *erp);
extern void
ef10_rx_qenable(
__in efx_rxq_t *erp);
extern __checkReturn efx_rc_t
ef10_rx_qcreate(
__in efx_nic_t *enp,
__in unsigned int index,
__in unsigned int label,
__in efx_rxq_type_t type,
__in efsys_mem_t *esmp,
__in size_t n,
__in uint32_t id,
__in efx_evq_t *eep,
__in efx_rxq_t *erp);
extern void
ef10_rx_qdestroy(
__in efx_rxq_t *erp);
extern void
ef10_rx_fini(
__in efx_nic_t *enp);
#if EFSYS_OPT_FILTER
typedef struct ef10_filter_handle_s {
uint32_t efh_lo;
uint32_t efh_hi;
} ef10_filter_handle_t;
typedef struct ef10_filter_entry_s {
uintptr_t efe_spec; /* pointer to filter spec plus busy bit */
ef10_filter_handle_t efe_handle;
} ef10_filter_entry_t;
/*
* BUSY flag indicates that an update is in progress.
* AUTO_OLD flag is used to mark and sweep MAC packet filters.
*/
#define EFX_EF10_FILTER_FLAG_BUSY 1U
#define EFX_EF10_FILTER_FLAG_AUTO_OLD 2U
#define EFX_EF10_FILTER_FLAGS 3U
/*
* Size of the hash table used by the driver. Doesn't need to be the
* same size as the hardware's table.
*/
#define EFX_EF10_FILTER_TBL_ROWS 8192
/* Only need to allow for one directed and one unknown unicast filter */
#define EFX_EF10_FILTER_UNICAST_FILTERS_MAX 2
/* Allow for the broadcast address to be added to the multicast list */
#define EFX_EF10_FILTER_MULTICAST_FILTERS_MAX (EFX_MAC_MULTICAST_LIST_MAX + 1)
typedef struct ef10_filter_table_s {
ef10_filter_entry_t eft_entry[EFX_EF10_FILTER_TBL_ROWS];
efx_rxq_t *eft_default_rxq;
boolean_t eft_using_rss;
uint32_t eft_unicst_filter_indexes[
EFX_EF10_FILTER_UNICAST_FILTERS_MAX];
uint32_t eft_unicst_filter_count;
uint32_t eft_mulcst_filter_indexes[
EFX_EF10_FILTER_MULTICAST_FILTERS_MAX];
uint32_t eft_mulcst_filter_count;
boolean_t eft_using_all_mulcst;
} ef10_filter_table_t;
__checkReturn efx_rc_t
ef10_filter_init(
__in efx_nic_t *enp);
void
ef10_filter_fini(
__in efx_nic_t *enp);
__checkReturn efx_rc_t
ef10_filter_restore(
__in efx_nic_t *enp);
__checkReturn efx_rc_t
ef10_filter_add(
__in efx_nic_t *enp,
__inout efx_filter_spec_t *spec,
__in boolean_t may_replace);
__checkReturn efx_rc_t
ef10_filter_delete(
__in efx_nic_t *enp,
__inout efx_filter_spec_t *spec);
extern __checkReturn efx_rc_t
ef10_filter_supported_filters(
__in efx_nic_t *enp,
__out uint32_t *list,
__out size_t *length);
extern __checkReturn efx_rc_t
ef10_filter_reconfigure(
__in efx_nic_t *enp,
__in_ecount(6) uint8_t const *mac_addr,
__in boolean_t all_unicst,
__in boolean_t mulcst,
__in boolean_t all_mulcst,
__in boolean_t brdcst,
__in_ecount(6*count) uint8_t const *addrs,
__in uint32_t count);
extern void
ef10_filter_get_default_rxq(
__in efx_nic_t *enp,
__out efx_rxq_t **erpp,
__out boolean_t *using_rss);
extern void
ef10_filter_default_rxq_set(
__in efx_nic_t *enp,
__in efx_rxq_t *erp,
__in boolean_t using_rss);
extern void
ef10_filter_default_rxq_clear(
__in efx_nic_t *enp);
#endif /* EFSYS_OPT_FILTER */
extern __checkReturn efx_rc_t
efx_mcdi_get_function_info(
__in efx_nic_t *enp,
__out uint32_t *pfp,
__out_opt uint32_t *vfp);
extern __checkReturn efx_rc_t
efx_mcdi_privilege_mask(
__in efx_nic_t *enp,
__in uint32_t pf,
__in uint32_t vf,
__out uint32_t *maskp);
extern __checkReturn efx_rc_t
efx_mcdi_get_port_assignment(
__in efx_nic_t *enp,
__out uint32_t *portp);
extern __checkReturn efx_rc_t
efx_mcdi_get_port_modes(
__in efx_nic_t *enp,
__out uint32_t *modesp,
__out_opt uint32_t *current_modep);
extern __checkReturn efx_rc_t
ef10_nic_get_port_mode_bandwidth(
__in uint32_t port_mode,
__out uint32_t *bandwidth_mbpsp);
extern __checkReturn efx_rc_t
efx_mcdi_get_mac_address_pf(
__in efx_nic_t *enp,
__out_ecount_opt(6) uint8_t mac_addrp[6]);
extern __checkReturn efx_rc_t
efx_mcdi_get_mac_address_vf(
__in efx_nic_t *enp,
__out_ecount_opt(6) uint8_t mac_addrp[6]);
extern __checkReturn efx_rc_t
efx_mcdi_get_clock(
__in efx_nic_t *enp,
__out uint32_t *sys_freqp,
__out uint32_t *dpcpu_freqp);
extern __checkReturn efx_rc_t
efx_mcdi_get_vector_cfg(
__in efx_nic_t *enp,
__out_opt uint32_t *vec_basep,
__out_opt uint32_t *pf_nvecp,
__out_opt uint32_t *vf_nvecp);
extern __checkReturn efx_rc_t
ef10_get_datapath_caps(
__in efx_nic_t *enp);
extern __checkReturn efx_rc_t
ef10_get_privilege_mask(
__in efx_nic_t *enp,
__out uint32_t *maskp);
extern __checkReturn efx_rc_t
ef10_external_port_mapping(
__in efx_nic_t *enp,
__in uint32_t port,
__out uint8_t *external_portp);
#if EFSYS_OPT_RX_PACKED_STREAM
/* Data space per credit in packed stream mode */
#define EFX_RX_PACKED_STREAM_MEM_PER_CREDIT (1 << 16)
/*
* Received packets are always aligned at this boundary. Also there always
* exists a gap of this size between packets.
* (see SF-112241-TC, 4.5)
*/
#define EFX_RX_PACKED_STREAM_ALIGNMENT 64
/*
* Size of a pseudo-header prepended to received packets
* in packed stream mode
*/
#define EFX_RX_PACKED_STREAM_RX_PREFIX_SIZE 8
/* Minimum space for packet in packed stream mode */
#define EFX_RX_PACKED_STREAM_MIN_PACKET_SPACE \
P2ROUNDUP(EFX_RX_PACKED_STREAM_RX_PREFIX_SIZE + \
EFX_MAC_PDU_MIN + \
EFX_RX_PACKED_STREAM_ALIGNMENT, \
EFX_RX_PACKED_STREAM_ALIGNMENT)
/* Maximum number of credits */
#define EFX_RX_PACKED_STREAM_MAX_CREDITS 127
#endif /* EFSYS_OPT_RX_PACKED_STREAM */
#ifdef __cplusplus
}
#endif
#endif /* _SYS_EF10_IMPL_H */
|
vicharl/containerdns
|
kdns/dpdk-17.02/app/test-crypto-perf/cperf_ops.c
|
/*-
* BSD LICENSE
*
* Copyright(c) 2016-2017 Intel Corporation. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <rte_cryptodev.h>
#include "cperf_ops.h"
#include "cperf_test_vectors.h"
static int
cperf_set_ops_null_cipher(struct rte_crypto_op **ops,
struct rte_mbuf **bufs_in, struct rte_mbuf **bufs_out,
uint16_t nb_ops, struct rte_cryptodev_sym_session *sess,
const struct cperf_options *options,
const struct cperf_test_vector *test_vector __rte_unused)
{
uint16_t i;
for (i = 0; i < nb_ops; i++) {
struct rte_crypto_sym_op *sym_op = ops[i]->sym;
rte_crypto_op_attach_sym_session(ops[i], sess);
sym_op->m_src = bufs_in[i];
sym_op->m_dst = bufs_out[i];
/* cipher parameters */
sym_op->cipher.data.length = options->buffer_sz;
sym_op->cipher.data.offset = 0;
}
return 0;
}
static int
cperf_set_ops_null_auth(struct rte_crypto_op **ops,
struct rte_mbuf **bufs_in, struct rte_mbuf **bufs_out,
uint16_t nb_ops, struct rte_cryptodev_sym_session *sess,
const struct cperf_options *options,
const struct cperf_test_vector *test_vector __rte_unused)
{
uint16_t i;
for (i = 0; i < nb_ops; i++) {
struct rte_crypto_sym_op *sym_op = ops[i]->sym;
rte_crypto_op_attach_sym_session(ops[i], sess);
sym_op->m_src = bufs_in[i];
sym_op->m_dst = bufs_out[i];
/* auth parameters */
sym_op->auth.data.length = options->buffer_sz;
sym_op->auth.data.offset = 0;
}
return 0;
}
static int
cperf_set_ops_cipher(struct rte_crypto_op **ops,
struct rte_mbuf **bufs_in, struct rte_mbuf **bufs_out,
uint16_t nb_ops, struct rte_cryptodev_sym_session *sess,
const struct cperf_options *options,
const struct cperf_test_vector *test_vector)
{
uint16_t i;
for (i = 0; i < nb_ops; i++) {
struct rte_crypto_sym_op *sym_op = ops[i]->sym;
rte_crypto_op_attach_sym_session(ops[i], sess);
sym_op->m_src = bufs_in[i];
sym_op->m_dst = bufs_out[i];
/* cipher parameters */
sym_op->cipher.iv.data = test_vector->iv.data;
sym_op->cipher.iv.phys_addr = test_vector->iv.phys_addr;
sym_op->cipher.iv.length = test_vector->iv.length;
sym_op->cipher.data.length = options->buffer_sz;
sym_op->cipher.data.offset = 0;
}
return 0;
}
static int
cperf_set_ops_auth(struct rte_crypto_op **ops,
struct rte_mbuf **bufs_in, struct rte_mbuf **bufs_out,
uint16_t nb_ops, struct rte_cryptodev_sym_session *sess,
const struct cperf_options *options,
const struct cperf_test_vector *test_vector)
{
uint16_t i;
for (i = 0; i < nb_ops; i++) {
struct rte_crypto_sym_op *sym_op = ops[i]->sym;
rte_crypto_op_attach_sym_session(ops[i], sess);
sym_op->m_src = bufs_in[i];
sym_op->m_dst = bufs_out[i];
/* authentication parameters */
if (options->auth_op == RTE_CRYPTO_AUTH_OP_VERIFY) {
sym_op->auth.digest.data = test_vector->digest.data;
sym_op->auth.digest.phys_addr =
test_vector->digest.phys_addr;
sym_op->auth.digest.length = options->auth_digest_sz;
} else {
uint32_t offset = options->buffer_sz;
struct rte_mbuf *buf, *tbuf;
if (options->out_of_place) {
buf = bufs_out[i];
} else {
buf = bufs_in[i];
tbuf = buf;
while ((tbuf->next != NULL) &&
(offset >= tbuf->data_len)) {
offset -= tbuf->data_len;
tbuf = tbuf->next;
}
}
sym_op->auth.digest.data = rte_pktmbuf_mtod_offset(buf,
uint8_t *, offset);
sym_op->auth.digest.phys_addr =
rte_pktmbuf_mtophys_offset(buf, offset);
sym_op->auth.digest.length = options->auth_digest_sz;
sym_op->auth.aad.phys_addr = test_vector->aad.phys_addr;
sym_op->auth.aad.data = test_vector->aad.data;
sym_op->auth.aad.length = options->auth_aad_sz;
}
sym_op->auth.data.length = options->buffer_sz;
sym_op->auth.data.offset = 0;
}
return 0;
}
static int
cperf_set_ops_cipher_auth(struct rte_crypto_op **ops,
struct rte_mbuf **bufs_in, struct rte_mbuf **bufs_out,
uint16_t nb_ops, struct rte_cryptodev_sym_session *sess,
const struct cperf_options *options,
const struct cperf_test_vector *test_vector)
{
uint16_t i;
for (i = 0; i < nb_ops; i++) {
struct rte_crypto_sym_op *sym_op = ops[i]->sym;
rte_crypto_op_attach_sym_session(ops[i], sess);
sym_op->m_src = bufs_in[i];
sym_op->m_dst = bufs_out[i];
/* cipher parameters */
sym_op->cipher.iv.data = test_vector->iv.data;
sym_op->cipher.iv.phys_addr = test_vector->iv.phys_addr;
sym_op->cipher.iv.length = test_vector->iv.length;
sym_op->cipher.data.length = options->buffer_sz;
sym_op->cipher.data.offset = 0;
/* authentication parameters */
if (options->auth_op == RTE_CRYPTO_AUTH_OP_VERIFY) {
sym_op->auth.digest.data = test_vector->digest.data;
sym_op->auth.digest.phys_addr =
test_vector->digest.phys_addr;
sym_op->auth.digest.length = options->auth_digest_sz;
} else {
uint32_t offset = options->buffer_sz;
struct rte_mbuf *buf, *tbuf;
if (options->out_of_place) {
buf = bufs_out[i];
} else {
buf = bufs_in[i];
tbuf = buf;
while ((tbuf->next != NULL) &&
(offset >= tbuf->data_len)) {
offset -= tbuf->data_len;
tbuf = tbuf->next;
}
}
sym_op->auth.digest.data = rte_pktmbuf_mtod_offset(buf,
uint8_t *, offset);
sym_op->auth.digest.phys_addr =
rte_pktmbuf_mtophys_offset(buf, offset);
sym_op->auth.digest.length = options->auth_digest_sz;
sym_op->auth.aad.phys_addr = test_vector->aad.phys_addr;
sym_op->auth.aad.data = test_vector->aad.data;
sym_op->auth.aad.length = options->auth_aad_sz;
}
sym_op->auth.data.length = options->buffer_sz;
sym_op->auth.data.offset = 0;
}
return 0;
}
static int
cperf_set_ops_aead(struct rte_crypto_op **ops,
struct rte_mbuf **bufs_in, struct rte_mbuf **bufs_out,
uint16_t nb_ops, struct rte_cryptodev_sym_session *sess,
const struct cperf_options *options,
const struct cperf_test_vector *test_vector)
{
uint16_t i;
for (i = 0; i < nb_ops; i++) {
struct rte_crypto_sym_op *sym_op = ops[i]->sym;
rte_crypto_op_attach_sym_session(ops[i], sess);
sym_op->m_src = bufs_in[i];
sym_op->m_dst = bufs_out[i];
/* cipher parameters */
sym_op->cipher.iv.data = test_vector->iv.data;
sym_op->cipher.iv.phys_addr = test_vector->iv.phys_addr;
sym_op->cipher.iv.length = test_vector->iv.length;
sym_op->cipher.data.length = options->buffer_sz;
sym_op->cipher.data.offset =
RTE_ALIGN_CEIL(options->auth_aad_sz, 16);
sym_op->auth.aad.data = rte_pktmbuf_mtod(bufs_in[i], uint8_t *);
sym_op->auth.aad.phys_addr = rte_pktmbuf_mtophys(bufs_in[i]);
sym_op->auth.aad.length = options->auth_aad_sz;
/* authentication parameters */
if (options->auth_op == RTE_CRYPTO_AUTH_OP_VERIFY) {
sym_op->auth.digest.data = test_vector->digest.data;
sym_op->auth.digest.phys_addr =
test_vector->digest.phys_addr;
sym_op->auth.digest.length = options->auth_digest_sz;
} else {
uint32_t offset = sym_op->cipher.data.length +
sym_op->cipher.data.offset;
struct rte_mbuf *buf, *tbuf;
if (options->out_of_place) {
buf = bufs_out[i];
} else {
buf = bufs_in[i];
tbuf = buf;
while ((tbuf->next != NULL) &&
(offset >= tbuf->data_len)) {
offset -= tbuf->data_len;
tbuf = tbuf->next;
}
}
sym_op->auth.digest.data = rte_pktmbuf_mtod_offset(buf,
uint8_t *, offset);
sym_op->auth.digest.phys_addr =
rte_pktmbuf_mtophys_offset(buf, offset);
sym_op->auth.digest.length = options->auth_digest_sz;
}
sym_op->auth.data.length = options->buffer_sz;
sym_op->auth.data.offset = options->auth_aad_sz;
}
return 0;
}
static struct rte_cryptodev_sym_session *
cperf_create_session(uint8_t dev_id,
const struct cperf_options *options,
const struct cperf_test_vector *test_vector)
{
struct rte_crypto_sym_xform cipher_xform;
struct rte_crypto_sym_xform auth_xform;
struct rte_cryptodev_sym_session *sess = NULL;
/*
* cipher only
*/
if (options->op_type == CPERF_CIPHER_ONLY) {
cipher_xform.type = RTE_CRYPTO_SYM_XFORM_CIPHER;
cipher_xform.next = NULL;
cipher_xform.cipher.algo = options->cipher_algo;
cipher_xform.cipher.op = options->cipher_op;
/* cipher different than null */
if (options->cipher_algo != RTE_CRYPTO_CIPHER_NULL) {
cipher_xform.cipher.key.data =
test_vector->cipher_key.data;
cipher_xform.cipher.key.length =
test_vector->cipher_key.length;
}
/* create crypto session */
sess = rte_cryptodev_sym_session_create(dev_id, &cipher_xform);
/*
* auth only
*/
} else if (options->op_type == CPERF_AUTH_ONLY) {
auth_xform.type = RTE_CRYPTO_SYM_XFORM_AUTH;
auth_xform.next = NULL;
auth_xform.auth.algo = options->auth_algo;
auth_xform.auth.op = options->auth_op;
/* auth different than null */
if (options->auth_algo != RTE_CRYPTO_AUTH_NULL) {
auth_xform.auth.digest_length =
options->auth_digest_sz;
auth_xform.auth.add_auth_data_length =
options->auth_aad_sz;
auth_xform.auth.key.length =
test_vector->auth_key.length;
auth_xform.auth.key.data = test_vector->auth_key.data;
}
/* create crypto session */
sess = rte_cryptodev_sym_session_create(dev_id, &auth_xform);
/*
* cipher and auth
*/
} else if (options->op_type == CPERF_CIPHER_THEN_AUTH
|| options->op_type == CPERF_AUTH_THEN_CIPHER
|| options->op_type == CPERF_AEAD) {
/*
* cipher
*/
cipher_xform.type = RTE_CRYPTO_SYM_XFORM_CIPHER;
cipher_xform.next = NULL;
cipher_xform.cipher.algo = options->cipher_algo;
cipher_xform.cipher.op = options->cipher_op;
/* cipher different than null */
if (options->cipher_algo != RTE_CRYPTO_CIPHER_NULL) {
cipher_xform.cipher.key.data =
test_vector->cipher_key.data;
cipher_xform.cipher.key.length =
test_vector->cipher_key.length;
}
/*
* auth
*/
auth_xform.type = RTE_CRYPTO_SYM_XFORM_AUTH;
auth_xform.next = NULL;
auth_xform.auth.algo = options->auth_algo;
auth_xform.auth.op = options->auth_op;
/* auth different than null */
if (options->auth_algo != RTE_CRYPTO_AUTH_NULL) {
auth_xform.auth.digest_length = options->auth_digest_sz;
auth_xform.auth.add_auth_data_length =
options->auth_aad_sz;
/* auth options for aes gcm */
if (options->cipher_algo == RTE_CRYPTO_CIPHER_AES_GCM &&
options->auth_algo == RTE_CRYPTO_AUTH_AES_GCM) {
auth_xform.auth.key.length = 0;
auth_xform.auth.key.data = NULL;
} else { /* auth options for others */
auth_xform.auth.key.length =
test_vector->auth_key.length;
auth_xform.auth.key.data =
test_vector->auth_key.data;
}
}
/* create crypto session for aes gcm */
if (options->cipher_algo == RTE_CRYPTO_CIPHER_AES_GCM) {
if (options->cipher_op ==
RTE_CRYPTO_CIPHER_OP_ENCRYPT) {
cipher_xform.next = &auth_xform;
/* create crypto session */
sess = rte_cryptodev_sym_session_create(dev_id,
&cipher_xform);
} else { /* decrypt */
auth_xform.next = &cipher_xform;
/* create crypto session */
sess = rte_cryptodev_sym_session_create(dev_id,
&auth_xform);
}
} else { /* create crypto session for other */
/* cipher then auth */
if (options->op_type == CPERF_CIPHER_THEN_AUTH) {
cipher_xform.next = &auth_xform;
/* create crypto session */
sess = rte_cryptodev_sym_session_create(dev_id,
&cipher_xform);
} else { /* auth then cipher */
auth_xform.next = &cipher_xform;
/* create crypto session */
sess = rte_cryptodev_sym_session_create(dev_id,
&auth_xform);
}
}
}
return sess;
}
int
cperf_get_op_functions(const struct cperf_options *options,
struct cperf_op_fns *op_fns)
{
memset(op_fns, 0, sizeof(struct cperf_op_fns));
op_fns->sess_create = cperf_create_session;
if (options->op_type == CPERF_AEAD
|| options->op_type == CPERF_AUTH_THEN_CIPHER
|| options->op_type == CPERF_CIPHER_THEN_AUTH) {
if (options->cipher_algo == RTE_CRYPTO_CIPHER_AES_GCM &&
options->auth_algo == RTE_CRYPTO_AUTH_AES_GCM)
op_fns->populate_ops = cperf_set_ops_aead;
else
op_fns->populate_ops = cperf_set_ops_cipher_auth;
return 0;
}
if (options->op_type == CPERF_AUTH_ONLY) {
if (options->auth_algo == RTE_CRYPTO_AUTH_NULL)
op_fns->populate_ops = cperf_set_ops_null_auth;
else
op_fns->populate_ops = cperf_set_ops_auth;
return 0;
}
if (options->op_type == CPERF_CIPHER_ONLY) {
if (options->cipher_algo == RTE_CRYPTO_CIPHER_NULL)
op_fns->populate_ops = cperf_set_ops_null_cipher;
else
op_fns->populate_ops = cperf_set_ops_cipher;
return 0;
}
return -1;
}
|
vicharl/containerdns
|
kdns/dpdk-17.02/drivers/net/sfc/sfc_kvargs.h
|
/*-
* Copyright (c) 2016 Solarflare Communications Inc.
* All rights reserved.
*
* This software was jointly developed between OKTET Labs (under contract
* for Solarflare) and Solarflare Communications, Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
* THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
* OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
* OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
* EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef _SFC_KVARGS_H
#define _SFC_KVARGS_H
#include <rte_kvargs.h>
#ifdef __cplusplus
extern "C" {
#endif
#define SFC_KVARG_VALUES_BOOL "[1|y|yes|on|0|n|no|off]"
#define SFC_KVARG_DEBUG_INIT "debug_init"
#define SFC_KVARG_MCDI_LOGGING "mcdi_logging"
#define SFC_KVARG_PERF_PROFILE "perf_profile"
#define SFC_KVARG_PERF_PROFILE_AUTO "auto"
#define SFC_KVARG_PERF_PROFILE_THROUGHPUT "throughput"
#define SFC_KVARG_PERF_PROFILE_LOW_LATENCY "low-latency"
#define SFC_KVARG_VALUES_PERF_PROFILE \
"[" SFC_KVARG_PERF_PROFILE_AUTO "|" \
SFC_KVARG_PERF_PROFILE_THROUGHPUT "|" \
SFC_KVARG_PERF_PROFILE_LOW_LATENCY "]"
struct sfc_adapter;
int sfc_kvargs_parse(struct sfc_adapter *sa);
void sfc_kvargs_cleanup(struct sfc_adapter *sa);
int sfc_kvargs_process(struct sfc_adapter *sa, const char *key_match,
arg_handler_t handler, void *opaque_arg);
int sfc_kvarg_bool_handler(const char *key, const char *value_str,
void *opaque);
#ifdef __cplusplus
}
#endif
#endif /* _SFC_KVARGS_H */
|
vicharl/containerdns
|
kdns/src/process.c
|
#define _GNU_SOURCE
#include <pthread.h>
#include <rte_mbuf.h>
#include <rte_ether.h>
#include <rte_ip.h>
#include <rte_malloc.h>
#include <rte_udp.h>
#include <arpa/inet.h>
#include <rte_byteorder.h>
#include <rte_ethdev.h>
#include <rte_kni.h>
#include <rte_arp.h>
#include <rte_icmp.h>
#include "rte_cycles.h"
#include "dns-conf.h"
#include "process.h"
#include "kdns-adap.h"
#include "query.h"
#include "buffer.h"
#include "netdev.h"
#include "forward.h"
#include "domain_update.h"
#include "view_update.h"
#include "dns-conf.h"
#include "rate_limit.h"
#include "ctrl_msg.h"
#define PREFETCH_OFFSET (3)
#define UDP_PORT_53 (0x3500) // port 53
extern int dns_reload;
extern char *dns_cfgfile;
extern char *dns_procname;
static int tx_msg_slave_process(ctrl_msg *msg, unsigned slave_lcore) {
ctrl_mbufs_msg *mmsg = (ctrl_mbufs_msg *)msg;
struct netif_queue_conf *conf = netif_queue_conf_get(slave_lcore);
uint16_t cnts = rte_eth_tx_burst(conf->port_id, conf->tx_queue_id, mmsg->mbufs, mmsg->mbufs_cnts);
if (unlikely(cnts < mmsg->mbufs_cnts)) {
log_msg(LOG_ERR, "Failed to send %u pkt to tx_queue %u on slave_lcore %u\n", mmsg->mbufs_cnts - cnts, conf->tx_queue_id, slave_lcore);
do {
rte_pktmbuf_free(mmsg->mbufs[cnts]);
} while (++cnts < mmsg->mbufs_cnts);
}
free(mmsg);
return 0;
}
static void tx_msg_slave_ingress(struct rte_mbuf **mbufs, uint16_t rx_len) {
uint16_t i;
static unsigned kni_slave_lcore = 0;
ctrl_mbufs_msg *msg = xalloc_zero(sizeof(ctrl_mbufs_msg));
msg->cmsg.type = CTRL_MSG_TYPE_MBUF_TO_TX;
msg->cmsg.len = sizeof(ctrl_mbufs_msg);
msg->mbufs_cnts = rx_len;
for (i = 0; i < rx_len; ++i) {
msg->mbufs[i] = mbufs[i];
}
kni_slave_lcore = rte_get_next_lcore(kni_slave_lcore, 1, 1);
int s_cnt = ctrl_msg_slave_ingress((void **)&msg, 1, kni_slave_lcore);
if (s_cnt != 1) {
log_msg(LOG_ERR, "Failed to send %u pkt to tx msg to slave_lcore %u\n", rx_len, kni_slave_lcore);
for (i = 0; i < rx_len; i++) {
rte_pktmbuf_free(mbufs[i]);
}
free(msg);
}
}
static int kni_msg_master_process(ctrl_msg *msg) {
ctrl_mbufs_msg *mmsg = (ctrl_mbufs_msg *)msg;
kni_egress(mmsg->mbufs, mmsg->mbufs_cnts);
free(mmsg);
return 0;
}
static void kni_msg_master_ingress(struct rte_mbuf **mbufs, uint16_t rx_len, struct netif_queue_conf *conf) {
uint16_t i;
ctrl_mbufs_msg *msg = xalloc_zero(sizeof(ctrl_mbufs_msg));
msg->cmsg.type = CTRL_MSG_TYPE_MBUF_TO_KNI;
msg->cmsg.len = sizeof(ctrl_mbufs_msg);
msg->mbufs_cnts = rx_len;
for (i = 0; i < rx_len; ++i) {
msg->mbufs[i] = mbufs[i];
}
int s_cnt = ctrl_msg_master_ingress((void **)&msg, 1);
if (s_cnt != 1) {
log_msg(LOG_ERR, "Failed to send %u pkt to kni msg\n", rx_len);
conf->stats.pkt_dropped += (uint64_t)rx_len;
for (i = 0; i < rx_len; i++) {
rte_pktmbuf_free(mbufs[i]);
}
free(msg);
} else {
conf->stats.pkts_2kni += (uint64_t)rx_len;
}
}
static int packet_process(struct rte_mbuf *pkt, struct netif_queue_conf *conf, unsigned lcore_id) {
uint16_t ether_hdr_offset = sizeof(struct ether_hdr);
uint16_t ip_hdr_offset = sizeof(struct ether_hdr) + sizeof(struct ipv4_hdr);
uint16_t udp_hdr_offset = sizeof(struct ether_hdr) + sizeof(struct ipv4_hdr) + sizeof(struct udp_hdr);
struct ether_hdr *eth_hdr = rte_pktmbuf_mtod(pkt, struct ether_hdr *);
struct ipv4_hdr *ipv4_hdr = rte_pktmbuf_mtod_offset(pkt, struct ipv4_hdr *, ether_hdr_offset);
struct udp_hdr *udp_hdr = rte_pktmbuf_mtod_offset(pkt, struct udp_hdr *, ip_hdr_offset);
#ifdef ENABLE_KDNS_METRICS
uint64_t start_time = time_now_usec();
#endif
conf->stats.pkts_rcv++;
if (unlikely(eth_hdr->ether_type != rte_cpu_to_be_16(ETHER_TYPE_IPv4))) {
conf->kni_mbufs[conf->kni_len++] = pkt;
return 0;
}
if (unlikely(rate_limit(ipv4_hdr->src_addr, RATE_LIMIT_TYPE_ALL, lcore_id) != 0)) {
conf->stats.pkt_dropped++;
rte_pktmbuf_free(pkt);
return 0;
}
uint16_t ip_hdr_len = (ipv4_hdr->version_ihl & IPV4_HDR_IHL_MASK) * IPV4_IHL_MULTIPLIER;
uint16_t ip_total_length = rte_be_to_cpu_16(ipv4_hdr->total_length);
if (unlikely(ip_hdr_len != sizeof(struct ipv4_hdr) || ip_total_length < ip_hdr_len || pkt->pkt_len < (sizeof(struct ether_hdr) + ip_total_length))) {
log_msg(LOG_ERR, "illegal pkt: pkt_len(%d), ip_hdr_len(%d), ip_total_length(%d)\n", pkt->pkt_len, ip_hdr_len, ip_total_length);
conf->stats.pkt_len_err++;
conf->stats.pkt_dropped++;
rte_pktmbuf_free(pkt);
return 0;
}
if (unlikely(ipv4_hdr->next_proto_id != IPPROTO_UDP || udp_hdr->dst_port != UDP_PORT_53)) {
conf->kni_mbufs[conf->kni_len++] = pkt;
return 0;
}
conf->stats.dns_pkts_rcv++;
conf->stats.dns_lens_rcv += pkt->pkt_len;
uint16_t udp_dgram_len = rte_be_to_cpu_16(udp_hdr->dgram_len);
int query_len = udp_dgram_len - sizeof(struct udp_hdr);
if (unlikely((ip_total_length != (sizeof(struct ipv4_hdr) + udp_dgram_len) || query_len < DNS_HEAD_SIZE))) {
log_msg(LOG_ERR, "illegal pkt: ip_total_length(%d), udp_dgram_len(%d), query_len(%d)\n", ip_total_length, udp_dgram_len, query_len);
conf->stats.pkt_len_err++;
conf->stats.pkt_dropped++;
rte_pktmbuf_free(pkt);
return 0;
}
uint8_t *query_data = rte_pktmbuf_mtod_offset(pkt, uint8_t *, udp_hdr_offset);
uint16_t old_flag = *(((uint16_t *)query_data) + 1);
kdns_query_st *query = dns_packet_proess(ipv4_hdr->src_addr, query_data, query_len, lcore_id);
if (unlikely(GET_RCODE(query->packet) == RCODE_REFUSE)) {
if (unlikely(rate_limit(ipv4_hdr->src_addr, RATE_LIMIT_TYPE_FWD, lcore_id) != 0)) {
conf->stats.pkt_dropped++;
rte_pktmbuf_free(pkt);
return 0;
}
*(((uint16_t *)query_data) + 1) = old_flag;
fwd_query_enqueue(pkt, ipv4_hdr->src_addr, GET_ID(query->packet), query->qtype, (char *)domain_name_to_string(query->qname, NULL));
return 0;
}
int ret_len = buffer_remaining(query->packet);
if (likely(ret_len > 0)) {
init_dns_packet_header(eth_hdr, ipv4_hdr, udp_hdr, ret_len);
pkt->pkt_len = ret_len + udp_hdr_offset;
pkt->data_len = pkt->pkt_len;
pkt->l2_len = sizeof(struct ether_hdr);
pkt->vlan_tci = ETHER_TYPE_IPv4;
pkt->l3_len = sizeof(struct ipv4_hdr);
conf->tx_mbufs[conf->tx_len++] = pkt;
conf->stats.dns_lens_snd += pkt->pkt_len;
} else {
log_msg(LOG_ERR, "failed deal dns packet, ret %d\n", ret_len);
conf->stats.pkt_dropped++;
rte_pktmbuf_free(pkt);
return 0;
}
#ifdef ENABLE_KDNS_METRICS
metrics_data_update(&conf->stats.metrics, time_now_usec() - start_time);
#endif
return 0;
}
int process_slave(__attribute__((unused)) void *arg) {
int i;
uint16_t rx_count, ctrl_msg_count = 0;
uint64_t now_tsc, prev_tsc, intvl_tsc;
struct rte_mbuf *mbufs[NETIF_MAX_PKT_BURST];
unsigned lcore_id = rte_lcore_id();
char *zones = g_dns_cfg->comm.zones;
uint32_t all_per_second = g_dns_cfg->comm.all_per_second;
uint32_t fwd_per_second = g_dns_cfg->comm.fwd_per_second;
uint32_t client_num = g_dns_cfg->comm.client_num;
now_tsc = rte_rdtsc();
prev_tsc = now_tsc;
intvl_tsc = rte_get_timer_hz() / 1000; //1ms
kdns_init(zones, lcore_id);
rate_limit_init(all_per_second, fwd_per_second, client_num, lcore_id);
struct netif_queue_conf *conf = netif_queue_conf_get(lcore_id);
log_msg(LOG_INFO, "Starting slave on core %u: rx %u, tx %u\n", lcore_id, conf->rx_queue_id, conf->tx_queue_id);
while (1) {
now_tsc = rte_rdtsc();
if (ctrl_msg_count || now_tsc - prev_tsc > intvl_tsc) {
prev_tsc = now_tsc;
ctrl_msg_count = ctrl_msg_slave_process(lcore_id);
}
rx_count = rte_eth_rx_burst(conf->port_id, conf->rx_queue_id, mbufs, NETIF_MAX_PKT_BURST);
if (unlikely(rx_count == 0)) {
continue;
}
conf->tx_len = 0;
conf->kni_len = 0;
/* Prefetch PREFETCH_OFFSET packets */
for (i = 0; i < PREFETCH_OFFSET && i < rx_count; i++) {
rte_prefetch0(rte_pktmbuf_mtod(mbufs[i], void *));
}
/* Prefetch and Deal already prefetched packets. */
for (i = 0; i < (rx_count - PREFETCH_OFFSET); i++) {
rte_prefetch0(rte_pktmbuf_mtod(mbufs[i + PREFETCH_OFFSET], void *));
packet_process(mbufs[i], conf, lcore_id);
}
/* Deal remaining prefetched packets */
for (; i < rx_count; i++) {
packet_process(mbufs[i], conf, lcore_id);
}
// send the pkts
if (likely(conf->tx_len > 0)) {
int ntx = rte_eth_tx_burst(conf->port_id, conf->tx_queue_id, conf->tx_mbufs, conf->tx_len);
conf->stats.dns_pkts_snd += ntx;
if (unlikely(ntx != conf->tx_len)) {
log_msg(LOG_ERR, "rx=%d, tx=%d, failed tx=%d, on slave=%u\n", rx_count, conf->tx_len, conf->tx_len - ntx, lcore_id);
int i = 0;
for (i = ntx; i < conf->tx_len; i++) {
rte_pktmbuf_free(conf->tx_mbufs[i]);
}
conf->stats.pkt_dropped += ntx;
}
}
// snd to master
if (unlikely(conf->kni_len > 0)) {
kni_msg_master_ingress(conf->kni_mbufs, conf->kni_len, conf);
}
}
return 0;
}
//set master's affinity to master core
static int reset_master_affinity(void) {
int s;
pthread_t tid;
cpu_set_t cpuset;
tid = pthread_self();
CPU_ZERO(&cpuset);
CPU_SET(rte_get_master_lcore(), &cpuset);
s = pthread_setaffinity_np(tid, sizeof(cpu_set_t), &cpuset);
if (s != 0) {
log_msg(LOG_ERR, "fail to set thread affinty, errno=%d, errinfo=%s\n", errno, strerror(errno));
return -1;
}
CPU_ZERO(&cpuset);
s = pthread_getaffinity_np(tid, sizeof(cpu_set_t), &cpuset);
if (s != 0) {
log_msg(LOG_ERR, "fail to get thread affinity, errno=%d, errinfo=%s\n", errno, strerror(errno));
return -2;
}
log_msg(LOG_INFO, "master thread affinity is set to %u\n", CPU_COUNT(&cpuset));
return 0;
}
int process_master(__attribute__((unused)) void *arg) {
uint16_t nb_ctrl = 0, nb_kni = 0, nb_fwd = 0;
struct rte_mbuf *mbufs[NETIF_MAX_PKT_BURST];
unsigned lcore_id = rte_lcore_id();
uint16_t web_port = g_dns_cfg->comm.web_port;
int ssl_enable = g_dns_cfg->comm.ssl_enable;
char *key_pem_file = g_dns_cfg->comm.key_pem_file;
char *cert_pem_file = g_dns_cfg->comm.cert_pem_file;
domain_info_master_init();
view_master_init();
ctrl_msg_reg(CTRL_MSG_TYPE_MBUF_TO_KNI, 0, kni_msg_master_process, NULL);
ctrl_msg_reg(CTRL_MSG_TYPE_MBUF_TO_TX, 0, NULL, tx_msg_slave_process);
domian_info_exchange_run(web_port, ssl_enable, key_pem_file, cert_pem_file);
reset_master_affinity();
log_msg(LOG_INFO, "Starting master on core %u\n", lcore_id);
while (1) {
if (dns_reload) {
dns_reload = 0;
log_msg(LOG_INFO, "Program hanged up @@@.");
dns_config_reload(dns_cfgfile, dns_procname);
}
nb_ctrl = ctrl_msg_master_process();
nb_kni = kni_ingress(mbufs, NETIF_MAX_PKT_BURST);
if (nb_kni > 0) {
tx_msg_slave_ingress(mbufs, nb_kni);
}
nb_fwd = fwd_response_dequeue(mbufs, NETIF_MAX_PKT_BURST);
if (nb_fwd > 0) {
tx_msg_slave_ingress(mbufs, nb_fwd);
}
if (nb_ctrl == 0 && nb_kni == 0 && nb_fwd == 0) {
rte_delay_ms(1);
}
}
return 0;
}
|
vicharl/containerdns
|
kdns/dpdk-17.02/lib/librte_ether/rte_flow.c
|
<reponame>vicharl/containerdns
/*-
* BSD LICENSE
*
* Copyright 2016 6WIND S.A.
* Copyright 2016 Mellanox.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* * Neither the name of 6WIND S.A. nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <stdint.h>
#include <rte_errno.h>
#include <rte_branch_prediction.h>
#include "rte_ethdev.h"
#include "rte_flow_driver.h"
#include "rte_flow.h"
/* Get generic flow operations structure from a port. */
const struct rte_flow_ops *
rte_flow_ops_get(uint8_t port_id, struct rte_flow_error *error)
{
struct rte_eth_dev *dev = &rte_eth_devices[port_id];
const struct rte_flow_ops *ops;
int code;
if (unlikely(!rte_eth_dev_is_valid_port(port_id)))
code = ENODEV;
else if (unlikely(!dev->dev_ops->filter_ctrl ||
dev->dev_ops->filter_ctrl(dev,
RTE_ETH_FILTER_GENERIC,
RTE_ETH_FILTER_GET,
&ops) ||
!ops))
code = ENOSYS;
else
return ops;
rte_flow_error_set(error, code, RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
NULL, rte_strerror(code));
return NULL;
}
/* Check whether a flow rule can be created on a given port. */
int
rte_flow_validate(uint8_t port_id,
const struct rte_flow_attr *attr,
const struct rte_flow_item pattern[],
const struct rte_flow_action actions[],
struct rte_flow_error *error)
{
const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
struct rte_eth_dev *dev = &rte_eth_devices[port_id];
if (unlikely(!ops))
return -rte_errno;
if (likely(!!ops->validate))
return ops->validate(dev, attr, pattern, actions, error);
return -rte_flow_error_set(error, ENOSYS,
RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
NULL, rte_strerror(ENOSYS));
}
/* Create a flow rule on a given port. */
struct rte_flow *
rte_flow_create(uint8_t port_id,
const struct rte_flow_attr *attr,
const struct rte_flow_item pattern[],
const struct rte_flow_action actions[],
struct rte_flow_error *error)
{
struct rte_eth_dev *dev = &rte_eth_devices[port_id];
const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
if (unlikely(!ops))
return NULL;
if (likely(!!ops->create))
return ops->create(dev, attr, pattern, actions, error);
rte_flow_error_set(error, ENOSYS, RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
NULL, rte_strerror(ENOSYS));
return NULL;
}
/* Destroy a flow rule on a given port. */
int
rte_flow_destroy(uint8_t port_id,
struct rte_flow *flow,
struct rte_flow_error *error)
{
struct rte_eth_dev *dev = &rte_eth_devices[port_id];
const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
if (unlikely(!ops))
return -rte_errno;
if (likely(!!ops->destroy))
return ops->destroy(dev, flow, error);
return -rte_flow_error_set(error, ENOSYS,
RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
NULL, rte_strerror(ENOSYS));
}
/* Destroy all flow rules associated with a port. */
int
rte_flow_flush(uint8_t port_id,
struct rte_flow_error *error)
{
struct rte_eth_dev *dev = &rte_eth_devices[port_id];
const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
if (unlikely(!ops))
return -rte_errno;
if (likely(!!ops->flush))
return ops->flush(dev, error);
return -rte_flow_error_set(error, ENOSYS,
RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
NULL, rte_strerror(ENOSYS));
}
/* Query an existing flow rule. */
int
rte_flow_query(uint8_t port_id,
struct rte_flow *flow,
enum rte_flow_action_type action,
void *data,
struct rte_flow_error *error)
{
struct rte_eth_dev *dev = &rte_eth_devices[port_id];
const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
if (!ops)
return -rte_errno;
if (likely(!!ops->query))
return ops->query(dev, flow, action, data, error);
return -rte_flow_error_set(error, ENOSYS,
RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
NULL, rte_strerror(ENOSYS));
}
|
vicharl/containerdns
|
kdns/src/kdns-adap.h
|
<reponame>vicharl/containerdns
#ifndef _NSD_APAPTER_H_
#define _NSD_APAPTER_H_
#include "query.h"
#include "kdns.h"
#include "util.h"
int kdns_init(char *zones, unsigned lcore_id);
int kdns_prepare_init(struct kdns *kdns, struct query **query, char *zones);
kdns_query_st *dns_packet_proess(uint32_t sip, uint8_t *query_data, int query_len, unsigned lcore_id);
int check_pid(const char *pid_file);
void write_pid(const char *pid_file);
int kdns_zones_realod(struct kdns* kdns, char *del_zones, char *add_zones);
int kdns_slave_zones_realod(char *del_zones, char *add_zones, unsigned lcore_id);
#endif
|
vicharl/containerdns
|
kdns/src/view_update.h
|
#ifndef __VIEW_UPDATE_H__
#define __VIEW_UPDATE_H__
#include "kdns.h"
#include "view.h"
#include "webserver.h"
#include "query.h"
#include "ctrl_msg.h"
typedef struct view_info_update {
ctrl_msg cmsg;
enum view_action action;
char cidrs[MAX_VIEW_NAME_LEN];
char view_name[MAX_VIEW_NAME_LEN];
struct view_info_update *next;
} view_info_update_st;
void *view_post(struct connection_info_struct *con_info, __attribute__((unused))char *url, int *len_response);
void *view_del(struct connection_info_struct *con_info, __attribute__((unused))char *url, int *len_response);
void *views_post_all(struct connection_info_struct *con_info, __attribute__((unused))char *url, int *len_response);
void *views_delete_all(struct connection_info_struct *con_info, __attribute__((unused))char *url, int *len_response);
void *view_get(__attribute__((unused)) struct connection_info_struct *con_info, char *url, int *len_response);
void view_query_slave_process(struct query *query, unsigned slave_lcore);
void view_query_master_process(struct query *query);
void view_master_init(void);
#endif
|
vicharl/containerdns
|
kdns/dpdk-17.02/drivers/crypto/scheduler/scheduler_pmd_ops.c
|
<filename>kdns/dpdk-17.02/drivers/crypto/scheduler/scheduler_pmd_ops.c
/*-
* BSD LICENSE
*
* Copyright(c) 2017 Intel Corporation. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <string.h>
#include <rte_config.h>
#include <rte_common.h>
#include <rte_malloc.h>
#include <rte_dev.h>
#include <rte_cryptodev.h>
#include <rte_cryptodev_pmd.h>
#include <rte_reorder.h>
#include "scheduler_pmd_private.h"
/** Configure device */
static int
scheduler_pmd_config(struct rte_cryptodev *dev)
{
struct scheduler_ctx *sched_ctx = dev->data->dev_private;
uint32_t i;
int ret = 0;
for (i = 0; i < sched_ctx->nb_slaves; i++) {
uint8_t slave_dev_id = sched_ctx->slaves[i].dev_id;
struct rte_cryptodev *slave_dev =
rte_cryptodev_pmd_get_dev(slave_dev_id);
ret = (*slave_dev->dev_ops->dev_configure)(slave_dev);
if (ret < 0)
break;
}
return ret;
}
static int
update_reorder_buff(struct rte_cryptodev *dev, uint16_t qp_id)
{
struct scheduler_ctx *sched_ctx = dev->data->dev_private;
struct scheduler_qp_ctx *qp_ctx = dev->data->queue_pairs[qp_id];
if (sched_ctx->reordering_enabled) {
char reorder_buff_name[RTE_CRYPTODEV_NAME_MAX_LEN];
uint32_t buff_size = sched_ctx->nb_slaves * PER_SLAVE_BUFF_SIZE;
if (qp_ctx->reorder_buf) {
rte_reorder_free(qp_ctx->reorder_buf);
qp_ctx->reorder_buf = NULL;
}
if (!buff_size)
return 0;
if (snprintf(reorder_buff_name, RTE_CRYPTODEV_NAME_MAX_LEN,
"%s_rb_%u_%u", RTE_STR(CRYPTODEV_NAME_SCHEDULER_PMD),
dev->data->dev_id, qp_id) < 0) {
CS_LOG_ERR("failed to create unique reorder buffer "
"name");
return -ENOMEM;
}
qp_ctx->reorder_buf = rte_reorder_create(reorder_buff_name,
rte_socket_id(), buff_size);
if (!qp_ctx->reorder_buf) {
CS_LOG_ERR("failed to create reorder buffer");
return -ENOMEM;
}
} else {
if (qp_ctx->reorder_buf) {
rte_reorder_free(qp_ctx->reorder_buf);
qp_ctx->reorder_buf = NULL;
}
}
return 0;
}
/** Start device */
static int
scheduler_pmd_start(struct rte_cryptodev *dev)
{
struct scheduler_ctx *sched_ctx = dev->data->dev_private;
uint32_t i;
int ret;
if (dev->data->dev_started)
return 0;
for (i = 0; i < dev->data->nb_queue_pairs; i++) {
ret = update_reorder_buff(dev, i);
if (ret < 0) {
CS_LOG_ERR("Failed to update reorder buffer");
return ret;
}
}
if (sched_ctx->mode == CDEV_SCHED_MODE_NOT_SET) {
CS_LOG_ERR("Scheduler mode is not set");
return -1;
}
if (!sched_ctx->nb_slaves) {
CS_LOG_ERR("No slave in the scheduler");
return -1;
}
RTE_FUNC_PTR_OR_ERR_RET(*sched_ctx->ops.slave_attach, -ENOTSUP);
for (i = 0; i < sched_ctx->nb_slaves; i++) {
uint8_t slave_dev_id = sched_ctx->slaves[i].dev_id;
if ((*sched_ctx->ops.slave_attach)(dev, slave_dev_id) < 0) {
CS_LOG_ERR("Failed to attach slave");
return -ENOTSUP;
}
}
RTE_FUNC_PTR_OR_ERR_RET(*sched_ctx->ops.scheduler_start, -ENOTSUP);
if ((*sched_ctx->ops.scheduler_start)(dev) < 0) {
CS_LOG_ERR("Scheduler start failed");
return -1;
}
/* start all slaves */
for (i = 0; i < sched_ctx->nb_slaves; i++) {
uint8_t slave_dev_id = sched_ctx->slaves[i].dev_id;
struct rte_cryptodev *slave_dev =
rte_cryptodev_pmd_get_dev(slave_dev_id);
ret = (*slave_dev->dev_ops->dev_start)(slave_dev);
if (ret < 0) {
CS_LOG_ERR("Failed to start slave dev %u",
slave_dev_id);
return ret;
}
}
return 0;
}
/** Stop device */
static void
scheduler_pmd_stop(struct rte_cryptodev *dev)
{
struct scheduler_ctx *sched_ctx = dev->data->dev_private;
uint32_t i;
if (!dev->data->dev_started)
return;
/* stop all slaves first */
for (i = 0; i < sched_ctx->nb_slaves; i++) {
uint8_t slave_dev_id = sched_ctx->slaves[i].dev_id;
struct rte_cryptodev *slave_dev =
rte_cryptodev_pmd_get_dev(slave_dev_id);
(*slave_dev->dev_ops->dev_stop)(slave_dev);
}
if (*sched_ctx->ops.scheduler_stop)
(*sched_ctx->ops.scheduler_stop)(dev);
for (i = 0; i < sched_ctx->nb_slaves; i++) {
uint8_t slave_dev_id = sched_ctx->slaves[i].dev_id;
if (*sched_ctx->ops.slave_detach)
(*sched_ctx->ops.slave_detach)(dev, slave_dev_id);
}
}
/** Close device */
static int
scheduler_pmd_close(struct rte_cryptodev *dev)
{
struct scheduler_ctx *sched_ctx = dev->data->dev_private;
uint32_t i;
int ret;
/* the dev should be stopped before being closed */
if (dev->data->dev_started)
return -EBUSY;
/* close all slaves first */
for (i = 0; i < sched_ctx->nb_slaves; i++) {
uint8_t slave_dev_id = sched_ctx->slaves[i].dev_id;
struct rte_cryptodev *slave_dev =
rte_cryptodev_pmd_get_dev(slave_dev_id);
ret = (*slave_dev->dev_ops->dev_close)(slave_dev);
if (ret < 0)
return ret;
}
for (i = 0; i < dev->data->nb_queue_pairs; i++) {
struct scheduler_qp_ctx *qp_ctx = dev->data->queue_pairs[i];
if (qp_ctx->reorder_buf) {
rte_reorder_free(qp_ctx->reorder_buf);
qp_ctx->reorder_buf = NULL;
}
if (qp_ctx->private_qp_ctx) {
rte_free(qp_ctx->private_qp_ctx);
qp_ctx->private_qp_ctx = NULL;
}
}
if (sched_ctx->private_ctx)
rte_free(sched_ctx->private_ctx);
if (sched_ctx->capabilities)
rte_free(sched_ctx->capabilities);
return 0;
}
/** Get device statistics */
static void
scheduler_pmd_stats_get(struct rte_cryptodev *dev,
struct rte_cryptodev_stats *stats)
{
struct scheduler_ctx *sched_ctx = dev->data->dev_private;
uint32_t i;
for (i = 0; i < sched_ctx->nb_slaves; i++) {
uint8_t slave_dev_id = sched_ctx->slaves[i].dev_id;
struct rte_cryptodev *slave_dev =
rte_cryptodev_pmd_get_dev(slave_dev_id);
struct rte_cryptodev_stats slave_stats = {0};
(*slave_dev->dev_ops->stats_get)(slave_dev, &slave_stats);
stats->enqueued_count += slave_stats.enqueued_count;
stats->dequeued_count += slave_stats.dequeued_count;
stats->enqueue_err_count += slave_stats.enqueue_err_count;
stats->dequeue_err_count += slave_stats.dequeue_err_count;
}
}
/** Reset device statistics */
static void
scheduler_pmd_stats_reset(struct rte_cryptodev *dev)
{
struct scheduler_ctx *sched_ctx = dev->data->dev_private;
uint32_t i;
for (i = 0; i < sched_ctx->nb_slaves; i++) {
uint8_t slave_dev_id = sched_ctx->slaves[i].dev_id;
struct rte_cryptodev *slave_dev =
rte_cryptodev_pmd_get_dev(slave_dev_id);
(*slave_dev->dev_ops->stats_reset)(slave_dev);
}
}
/** Get device info */
static void
scheduler_pmd_info_get(struct rte_cryptodev *dev,
struct rte_cryptodev_info *dev_info)
{
struct scheduler_ctx *sched_ctx = dev->data->dev_private;
uint32_t max_nb_sessions = sched_ctx->nb_slaves ?
UINT32_MAX : RTE_CRYPTODEV_VDEV_DEFAULT_MAX_NB_SESSIONS;
uint32_t i;
if (!dev_info)
return;
for (i = 0; i < sched_ctx->nb_slaves; i++) {
uint8_t slave_dev_id = sched_ctx->slaves[i].dev_id;
struct rte_cryptodev_info slave_info;
rte_cryptodev_info_get(slave_dev_id, &slave_info);
max_nb_sessions = slave_info.sym.max_nb_sessions <
max_nb_sessions ?
slave_info.sym.max_nb_sessions :
max_nb_sessions;
}
dev_info->dev_type = dev->dev_type;
dev_info->feature_flags = dev->feature_flags;
dev_info->capabilities = sched_ctx->capabilities;
dev_info->max_nb_queue_pairs = sched_ctx->max_nb_queue_pairs;
dev_info->sym.max_nb_sessions = max_nb_sessions;
}
/** Release queue pair */
static int
scheduler_pmd_qp_release(struct rte_cryptodev *dev, uint16_t qp_id)
{
struct scheduler_qp_ctx *qp_ctx = dev->data->queue_pairs[qp_id];
if (!qp_ctx)
return 0;
if (qp_ctx->reorder_buf)
rte_reorder_free(qp_ctx->reorder_buf);
if (qp_ctx->private_qp_ctx)
rte_free(qp_ctx->private_qp_ctx);
rte_free(qp_ctx);
dev->data->queue_pairs[qp_id] = NULL;
return 0;
}
/** Setup a queue pair */
static int
scheduler_pmd_qp_setup(struct rte_cryptodev *dev, uint16_t qp_id,
__rte_unused const struct rte_cryptodev_qp_conf *qp_conf, int socket_id)
{
struct scheduler_ctx *sched_ctx = dev->data->dev_private;
struct scheduler_qp_ctx *qp_ctx;
char name[RTE_CRYPTODEV_NAME_MAX_LEN];
if (snprintf(name, RTE_CRYPTODEV_NAME_MAX_LEN,
"CRYTO_SCHE PMD %u QP %u",
dev->data->dev_id, qp_id) < 0) {
CS_LOG_ERR("Failed to create unique queue pair name");
return -EFAULT;
}
/* Free memory prior to re-allocation if needed. */
if (dev->data->queue_pairs[qp_id] != NULL)
scheduler_pmd_qp_release(dev, qp_id);
/* Allocate the queue pair data structure. */
qp_ctx = rte_zmalloc_socket(name, sizeof(*qp_ctx), RTE_CACHE_LINE_SIZE,
socket_id);
if (qp_ctx == NULL)
return -ENOMEM;
dev->data->queue_pairs[qp_id] = qp_ctx;
if (*sched_ctx->ops.config_queue_pair) {
if ((*sched_ctx->ops.config_queue_pair)(dev, qp_id) < 0) {
CS_LOG_ERR("Unable to configure queue pair");
return -1;
}
}
return 0;
}
/** Start queue pair */
static int
scheduler_pmd_qp_start(__rte_unused struct rte_cryptodev *dev,
__rte_unused uint16_t queue_pair_id)
{
return -ENOTSUP;
}
/** Stop queue pair */
static int
scheduler_pmd_qp_stop(__rte_unused struct rte_cryptodev *dev,
__rte_unused uint16_t queue_pair_id)
{
return -ENOTSUP;
}
/** Return the number of allocated queue pairs */
static uint32_t
scheduler_pmd_qp_count(struct rte_cryptodev *dev)
{
return dev->data->nb_queue_pairs;
}
static uint32_t
scheduler_pmd_session_get_size(struct rte_cryptodev *dev __rte_unused)
{
return sizeof(struct scheduler_session);
}
static int
config_slave_sess(struct scheduler_ctx *sched_ctx,
struct rte_crypto_sym_xform *xform,
struct scheduler_session *sess,
uint32_t create)
{
uint32_t i;
for (i = 0; i < sched_ctx->nb_slaves; i++) {
struct scheduler_slave *slave = &sched_ctx->slaves[i];
struct rte_cryptodev *dev =
rte_cryptodev_pmd_get_dev(slave->dev_id);
if (sess->sessions[i]) {
if (create)
continue;
/* !create */
(*dev->dev_ops->session_clear)(dev,
(void *)sess->sessions[i]);
sess->sessions[i] = NULL;
} else {
if (!create)
continue;
/* create */
sess->sessions[i] =
rte_cryptodev_sym_session_create(
slave->dev_id, xform);
if (!sess->sessions[i]) {
config_slave_sess(sched_ctx, NULL, sess, 0);
return -1;
}
}
}
return 0;
}
/** Clear the memory of session so it doesn't leave key material behind */
static void
scheduler_pmd_session_clear(struct rte_cryptodev *dev,
void *sess)
{
struct scheduler_ctx *sched_ctx = dev->data->dev_private;
config_slave_sess(sched_ctx, NULL, sess, 0);
memset(sess, 0, sizeof(struct scheduler_session));
}
static void *
scheduler_pmd_session_configure(struct rte_cryptodev *dev,
struct rte_crypto_sym_xform *xform, void *sess)
{
struct scheduler_ctx *sched_ctx = dev->data->dev_private;
if (config_slave_sess(sched_ctx, xform, sess, 1) < 0) {
CS_LOG_ERR("unabled to config sym session");
return NULL;
}
return sess;
}
struct rte_cryptodev_ops scheduler_pmd_ops = {
.dev_configure = scheduler_pmd_config,
.dev_start = scheduler_pmd_start,
.dev_stop = scheduler_pmd_stop,
.dev_close = scheduler_pmd_close,
.stats_get = scheduler_pmd_stats_get,
.stats_reset = scheduler_pmd_stats_reset,
.dev_infos_get = scheduler_pmd_info_get,
.queue_pair_setup = scheduler_pmd_qp_setup,
.queue_pair_release = scheduler_pmd_qp_release,
.queue_pair_start = scheduler_pmd_qp_start,
.queue_pair_stop = scheduler_pmd_qp_stop,
.queue_pair_count = scheduler_pmd_qp_count,
.session_get_size = scheduler_pmd_session_get_size,
.session_configure = scheduler_pmd_session_configure,
.session_clear = scheduler_pmd_session_clear,
};
struct rte_cryptodev_ops *rte_crypto_scheduler_pmd_ops = &scheduler_pmd_ops;
|
vicharl/containerdns
|
kdns/src/db_update.h
|
#ifndef __DB_UPDATE_H__
#define __DB_UPDATE_H__
#include "domain_store.h"
#include "zone.h"
#include "kdns.h"
#include "ctrl_msg.h"
#define DB_MAX_NAME_LEN 255
enum db_action {
DOMAN_ACTION_ADD,
DOMAN_ACTION_DEL
};
typedef struct domin_info_update {
ctrl_msg cmsg;
enum db_action action;
uint32_t ttl;
uint16_t type;
uint16_t prio;
uint16_t weight;
uint16_t port;
uint32_t maxAnswer;
unsigned int hashValue; // hash check
uint16_t lb_mode;
uint16_t lb_weight;
char view_name[DB_MAX_NAME_LEN];
char type_str[DB_MAX_NAME_LEN];
char zone_name[DB_MAX_NAME_LEN];
char domain_name[DB_MAX_NAME_LEN];
char host[DB_MAX_NAME_LEN];
struct domin_info_update *next;
} domin_info_update_st;
int domaindata_update(struct domain_store *db, struct domin_info_update *update);
int domaindata_soa_insert(struct domain_store *db, char *zone_name);
#endif
|
vicharl/containerdns
|
kdns/dpdk-17.02/drivers/crypto/aesni_gcm/aesni_gcm_pmd.c
|
<gh_stars>100-1000
/*-
* BSD LICENSE
*
* Copyright(c) 2016 Intel Corporation. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <rte_common.h>
#include <rte_config.h>
#include <rte_hexdump.h>
#include <rte_cryptodev.h>
#include <rte_cryptodev_pmd.h>
#include <rte_vdev.h>
#include <rte_malloc.h>
#include <rte_cpuflags.h>
#include <rte_byteorder.h>
#include "aesni_gcm_pmd_private.h"
/** GCM encode functions pointer table */
static const struct aesni_gcm_ops aesni_gcm_enc[] = {
[AESNI_GCM_KEY_128] = {
aesni_gcm128_init,
aesni_gcm128_enc_update,
aesni_gcm128_enc_finalize
},
[AESNI_GCM_KEY_256] = {
aesni_gcm256_init,
aesni_gcm256_enc_update,
aesni_gcm256_enc_finalize
}
};
/** GCM decode functions pointer table */
static const struct aesni_gcm_ops aesni_gcm_dec[] = {
[AESNI_GCM_KEY_128] = {
aesni_gcm128_init,
aesni_gcm128_dec_update,
aesni_gcm128_dec_finalize
},
[AESNI_GCM_KEY_256] = {
aesni_gcm256_init,
aesni_gcm256_dec_update,
aesni_gcm256_dec_finalize
}
};
/** Parse crypto xform chain and set private session parameters */
int
aesni_gcm_set_session_parameters(struct aesni_gcm_session *sess,
const struct rte_crypto_sym_xform *xform)
{
const struct rte_crypto_sym_xform *auth_xform;
const struct rte_crypto_sym_xform *cipher_xform;
if (xform->next == NULL || xform->next->next != NULL) {
GCM_LOG_ERR("Two and only two chained xform required");
return -EINVAL;
}
if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
auth_xform = xform->next;
cipher_xform = xform;
} else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
auth_xform = xform;
cipher_xform = xform->next;
} else {
GCM_LOG_ERR("Cipher and auth xform required");
return -EINVAL;
}
if (!(cipher_xform->cipher.algo == RTE_CRYPTO_CIPHER_AES_GCM &&
(auth_xform->auth.algo == RTE_CRYPTO_AUTH_AES_GCM ||
auth_xform->auth.algo == RTE_CRYPTO_AUTH_AES_GMAC))) {
GCM_LOG_ERR("We only support AES GCM and AES GMAC");
return -EINVAL;
}
/* Select Crypto operation */
if (cipher_xform->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT &&
auth_xform->auth.op == RTE_CRYPTO_AUTH_OP_GENERATE)
sess->op = AESNI_GCM_OP_AUTHENTICATED_ENCRYPTION;
else if (cipher_xform->cipher.op == RTE_CRYPTO_CIPHER_OP_DECRYPT &&
auth_xform->auth.op == RTE_CRYPTO_AUTH_OP_VERIFY)
sess->op = AESNI_GCM_OP_AUTHENTICATED_DECRYPTION;
else {
GCM_LOG_ERR("Cipher/Auth operations: Encrypt/Generate or"
" Decrypt/Verify are valid only");
return -EINVAL;
}
/* Check key length and calculate GCM pre-compute. */
switch (cipher_xform->cipher.key.length) {
case 16:
aesni_gcm128_pre(cipher_xform->cipher.key.data, &sess->gdata);
sess->key = AESNI_GCM_KEY_128;
break;
case 32:
aesni_gcm256_pre(cipher_xform->cipher.key.data, &sess->gdata);
sess->key = AESNI_GCM_KEY_256;
break;
default:
GCM_LOG_ERR("Unsupported cipher key length");
return -EINVAL;
}
return 0;
}
/** Get gcm session */
static struct aesni_gcm_session *
aesni_gcm_get_session(struct aesni_gcm_qp *qp, struct rte_crypto_sym_op *op)
{
struct aesni_gcm_session *sess = NULL;
if (op->sess_type == RTE_CRYPTO_SYM_OP_WITH_SESSION) {
if (unlikely(op->session->dev_type
!= RTE_CRYPTODEV_AESNI_GCM_PMD))
return sess;
sess = (struct aesni_gcm_session *)op->session->_private;
} else {
void *_sess;
if (rte_mempool_get(qp->sess_mp, &_sess))
return sess;
sess = (struct aesni_gcm_session *)
((struct rte_cryptodev_sym_session *)_sess)->_private;
if (unlikely(aesni_gcm_set_session_parameters(sess,
op->xform) != 0)) {
rte_mempool_put(qp->sess_mp, _sess);
sess = NULL;
}
}
return sess;
}
/**
* Process a crypto operation and complete a JOB_AES_HMAC job structure for
* submission to the multi buffer library for processing.
*
* @param qp queue pair
* @param op symmetric crypto operation
* @param session GCM session
*
* @return
*
*/
static int
process_gcm_crypto_op(struct rte_crypto_sym_op *op,
struct aesni_gcm_session *session)
{
uint8_t *src, *dst;
struct rte_mbuf *m_src = op->m_src;
uint32_t offset = op->cipher.data.offset;
uint32_t part_len, total_len, data_len;
RTE_ASSERT(m_src != NULL);
while (offset >= m_src->data_len) {
offset -= m_src->data_len;
m_src = m_src->next;
RTE_ASSERT(m_src != NULL);
}
data_len = m_src->data_len - offset;
part_len = (data_len < op->cipher.data.length) ? data_len :
op->cipher.data.length;
/* Destination buffer is required when segmented source buffer */
RTE_ASSERT((part_len == op->cipher.data.length) ||
((part_len != op->cipher.data.length) &&
(op->m_dst != NULL)));
/* Segmented destination buffer is not supported */
RTE_ASSERT((op->m_dst == NULL) ||
((op->m_dst != NULL) &&
rte_pktmbuf_is_contiguous(op->m_dst)));
dst = op->m_dst ?
rte_pktmbuf_mtod_offset(op->m_dst, uint8_t *,
op->cipher.data.offset) :
rte_pktmbuf_mtod_offset(op->m_src, uint8_t *,
op->cipher.data.offset);
src = rte_pktmbuf_mtod_offset(m_src, uint8_t *, offset);
/* sanity checks */
if (op->cipher.iv.length != 16 && op->cipher.iv.length != 12 &&
op->cipher.iv.length != 0) {
GCM_LOG_ERR("iv");
return -1;
}
/*
* GCM working in 12B IV mode => 16B pre-counter block we need
* to set BE LSB to 1, driver expects that 16B is allocated
*/
if (op->cipher.iv.length == 12) {
uint32_t *iv_padd = (uint32_t *)&op->cipher.iv.data[12];
*iv_padd = rte_bswap32(1);
}
if (op->auth.digest.length != 16 &&
op->auth.digest.length != 12 &&
op->auth.digest.length != 8) {
GCM_LOG_ERR("digest");
return -1;
}
if (session->op == AESNI_GCM_OP_AUTHENTICATED_ENCRYPTION) {
aesni_gcm_enc[session->key].init(&session->gdata,
op->cipher.iv.data,
op->auth.aad.data,
(uint64_t)op->auth.aad.length);
aesni_gcm_enc[session->key].update(&session->gdata, dst, src,
(uint64_t)part_len);
total_len = op->cipher.data.length - part_len;
while (total_len) {
dst += part_len;
m_src = m_src->next;
RTE_ASSERT(m_src != NULL);
src = rte_pktmbuf_mtod(m_src, uint8_t *);
part_len = (m_src->data_len < total_len) ?
m_src->data_len : total_len;
aesni_gcm_enc[session->key].update(&session->gdata,
dst, src,
(uint64_t)part_len);
total_len -= part_len;
}
aesni_gcm_enc[session->key].finalize(&session->gdata,
op->auth.digest.data,
(uint64_t)op->auth.digest.length);
} else { /* session->op == AESNI_GCM_OP_AUTHENTICATED_DECRYPTION */
uint8_t *auth_tag = (uint8_t *)rte_pktmbuf_append(op->m_dst ?
op->m_dst : op->m_src,
op->auth.digest.length);
if (!auth_tag) {
GCM_LOG_ERR("auth_tag");
return -1;
}
aesni_gcm_dec[session->key].init(&session->gdata,
op->cipher.iv.data,
op->auth.aad.data,
(uint64_t)op->auth.aad.length);
aesni_gcm_dec[session->key].update(&session->gdata, dst, src,
(uint64_t)part_len);
total_len = op->cipher.data.length - part_len;
while (total_len) {
dst += part_len;
m_src = m_src->next;
RTE_ASSERT(m_src != NULL);
src = rte_pktmbuf_mtod(m_src, uint8_t *);
part_len = (m_src->data_len < total_len) ?
m_src->data_len : total_len;
aesni_gcm_dec[session->key].update(&session->gdata,
dst, src,
(uint64_t)part_len);
total_len -= part_len;
}
aesni_gcm_dec[session->key].finalize(&session->gdata,
auth_tag,
(uint64_t)op->auth.digest.length);
}
return 0;
}
/**
* Process a completed job and return rte_mbuf which job processed
*
* @param job JOB_AES_HMAC job to process
*
* @return
* - Returns processed mbuf which is trimmed of output digest used in
* verification of supplied digest in the case of a HASH_CIPHER operation
* - Returns NULL on invalid job
*/
static void
post_process_gcm_crypto_op(struct rte_crypto_op *op)
{
struct rte_mbuf *m = op->sym->m_dst ? op->sym->m_dst : op->sym->m_src;
struct aesni_gcm_session *session =
(struct aesni_gcm_session *)op->sym->session->_private;
op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
/* Verify digest if required */
if (session->op == AESNI_GCM_OP_AUTHENTICATED_DECRYPTION) {
uint8_t *tag = rte_pktmbuf_mtod_offset(m, uint8_t *,
m->data_len - op->sym->auth.digest.length);
#ifdef RTE_LIBRTE_PMD_AESNI_GCM_DEBUG
rte_hexdump(stdout, "auth tag (orig):",
op->sym->auth.digest.data, op->sym->auth.digest.length);
rte_hexdump(stdout, "auth tag (calc):",
tag, op->sym->auth.digest.length);
#endif
if (memcmp(tag, op->sym->auth.digest.data,
op->sym->auth.digest.length) != 0)
op->status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
/* trim area used for digest from mbuf */
rte_pktmbuf_trim(m, op->sym->auth.digest.length);
}
}
/**
* Process a completed GCM request
*
* @param qp Queue Pair to process
* @param job JOB_AES_HMAC job
*
* @return
* - Number of processed jobs
*/
static void
handle_completed_gcm_crypto_op(struct aesni_gcm_qp *qp,
struct rte_crypto_op *op)
{
post_process_gcm_crypto_op(op);
/* Free session if a session-less crypto op */
if (op->sym->sess_type == RTE_CRYPTO_SYM_OP_SESSIONLESS) {
rte_mempool_put(qp->sess_mp, op->sym->session);
op->sym->session = NULL;
}
rte_ring_enqueue(qp->processed_pkts, (void *)op);
}
static uint16_t
aesni_gcm_pmd_enqueue_burst(void *queue_pair,
struct rte_crypto_op **ops, uint16_t nb_ops)
{
struct aesni_gcm_session *sess;
struct aesni_gcm_qp *qp = queue_pair;
int i, retval = 0;
for (i = 0; i < nb_ops; i++) {
sess = aesni_gcm_get_session(qp, ops[i]->sym);
if (unlikely(sess == NULL)) {
ops[i]->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
qp->qp_stats.enqueue_err_count++;
break;
}
retval = process_gcm_crypto_op(ops[i]->sym, sess);
if (retval < 0) {
ops[i]->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
qp->qp_stats.enqueue_err_count++;
break;
}
handle_completed_gcm_crypto_op(qp, ops[i]);
qp->qp_stats.enqueued_count++;
}
return i;
}
static uint16_t
aesni_gcm_pmd_dequeue_burst(void *queue_pair,
struct rte_crypto_op **ops, uint16_t nb_ops)
{
struct aesni_gcm_qp *qp = queue_pair;
unsigned nb_dequeued;
nb_dequeued = rte_ring_dequeue_burst(qp->processed_pkts,
(void **)ops, nb_ops);
qp->qp_stats.dequeued_count += nb_dequeued;
return nb_dequeued;
}
static int aesni_gcm_remove(const char *name);
static int
aesni_gcm_create(struct rte_crypto_vdev_init_params *init_params)
{
struct rte_cryptodev *dev;
struct aesni_gcm_private *internals;
if (init_params->name[0] == '\0') {
int ret = rte_cryptodev_pmd_create_dev_name(
init_params->name,
RTE_STR(CRYPTODEV_NAME_AESNI_GCM_PMD));
if (ret < 0) {
GCM_LOG_ERR("failed to create unique name");
return ret;
}
}
/* Check CPU for support for AES instruction set */
if (!rte_cpu_get_flag_enabled(RTE_CPUFLAG_AES)) {
GCM_LOG_ERR("AES instructions not supported by CPU");
return -EFAULT;
}
dev = rte_cryptodev_pmd_virtual_dev_init(init_params->name,
sizeof(struct aesni_gcm_private), init_params->socket_id);
if (dev == NULL) {
GCM_LOG_ERR("failed to create cryptodev vdev");
goto init_error;
}
dev->dev_type = RTE_CRYPTODEV_AESNI_GCM_PMD;
dev->dev_ops = rte_aesni_gcm_pmd_ops;
/* register rx/tx burst functions for data path */
dev->dequeue_burst = aesni_gcm_pmd_dequeue_burst;
dev->enqueue_burst = aesni_gcm_pmd_enqueue_burst;
dev->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO |
RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING |
RTE_CRYPTODEV_FF_CPU_AESNI |
RTE_CRYPTODEV_FF_MBUF_SCATTER_GATHER;
internals = dev->data->dev_private;
internals->max_nb_queue_pairs = init_params->max_nb_queue_pairs;
internals->max_nb_sessions = init_params->max_nb_sessions;
return 0;
init_error:
GCM_LOG_ERR("driver %s: create failed", init_params->name);
aesni_gcm_remove(init_params->name);
return -EFAULT;
}
static int
aesni_gcm_probe(const char *name, const char *input_args)
{
struct rte_crypto_vdev_init_params init_params = {
RTE_CRYPTODEV_VDEV_DEFAULT_MAX_NB_QUEUE_PAIRS,
RTE_CRYPTODEV_VDEV_DEFAULT_MAX_NB_SESSIONS,
rte_socket_id(),
{0}
};
rte_cryptodev_parse_vdev_init_params(&init_params, input_args);
RTE_LOG(INFO, PMD, "Initialising %s on NUMA node %d\n", name,
init_params.socket_id);
if (init_params.name[0] != '\0')
RTE_LOG(INFO, PMD, " User defined name = %s\n",
init_params.name);
RTE_LOG(INFO, PMD, " Max number of queue pairs = %d\n",
init_params.max_nb_queue_pairs);
RTE_LOG(INFO, PMD, " Max number of sessions = %d\n",
init_params.max_nb_sessions);
return aesni_gcm_create(&init_params);
}
static int
aesni_gcm_remove(const char *name)
{
if (name == NULL)
return -EINVAL;
GCM_LOG_INFO("Closing AESNI crypto device %s on numa socket %u\n",
name, rte_socket_id());
return 0;
}
static struct rte_vdev_driver aesni_gcm_pmd_drv = {
.probe = aesni_gcm_probe,
.remove = aesni_gcm_remove
};
RTE_PMD_REGISTER_VDEV(CRYPTODEV_NAME_AESNI_GCM_PMD, aesni_gcm_pmd_drv);
RTE_PMD_REGISTER_ALIAS(CRYPTODEV_NAME_AESNI_GCM_PMD, cryptodev_aesni_gcm_pmd);
RTE_PMD_REGISTER_PARAM_STRING(CRYPTODEV_NAME_AESNI_GCM_PMD,
"max_nb_queue_pairs=<int> "
"max_nb_sessions=<int> "
"socket_id=<int>");
|
vicharl/containerdns
|
kdns/dpdk-17.02/lib/librte_eal/common/include/arch/arm/rte_vect.h
|
<filename>kdns/dpdk-17.02/lib/librte_eal/common/include/arch/arm/rte_vect.h
/*-
* BSD LICENSE
*
* Copyright(c) 2015 Cavium Networks. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Cavium Networks nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef _RTE_VECT_ARM_H_
#define _RTE_VECT_ARM_H_
#include <stdint.h>
#include "generic/rte_vect.h"
#include "arm_neon.h"
#ifdef __cplusplus
extern "C" {
#endif
typedef int32x4_t xmm_t;
#define XMM_SIZE (sizeof(xmm_t))
#define XMM_MASK (XMM_SIZE - 1)
typedef union rte_xmm {
xmm_t x;
uint8_t u8[XMM_SIZE / sizeof(uint8_t)];
uint16_t u16[XMM_SIZE / sizeof(uint16_t)];
uint32_t u32[XMM_SIZE / sizeof(uint32_t)];
uint64_t u64[XMM_SIZE / sizeof(uint64_t)];
double pd[XMM_SIZE / sizeof(double)];
} __attribute__((aligned(16))) rte_xmm_t;
#ifdef RTE_ARCH_ARM
/* NEON intrinsic vqtbl1q_u8() is not supported in ARMv7-A(AArch32) */
static __inline uint8x16_t
vqtbl1q_u8(uint8x16_t a, uint8x16_t b)
{
uint8_t i, pos;
rte_xmm_t rte_a, rte_b, rte_ret;
vst1q_u8(rte_a.u8, a);
vst1q_u8(rte_b.u8, b);
for (i = 0; i < 16; i++) {
pos = rte_b.u8[i];
if (pos < 16)
rte_ret.u8[i] = rte_a.u8[pos];
else
rte_ret.u8[i] = 0;
}
return vld1q_u8(rte_ret.u8);
}
#endif
#ifdef __cplusplus
}
#endif
#endif
|
vicharl/containerdns
|
kdns/dpdk-17.02/drivers/net/sfc/base/ef10_tx.c
|
/*
* Copyright (c) 2012-2016 Solarflare Communications Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
* THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
* OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
* OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
* EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* The views and conclusions contained in the software and documentation are
* those of the authors and should not be interpreted as representing official
* policies, either expressed or implied, of the FreeBSD Project.
*/
#include "efx.h"
#include "efx_impl.h"
#if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD
#if EFSYS_OPT_QSTATS
#define EFX_TX_QSTAT_INCR(_etp, _stat) \
do { \
(_etp)->et_stat[_stat]++; \
_NOTE(CONSTANTCONDITION) \
} while (B_FALSE)
#else
#define EFX_TX_QSTAT_INCR(_etp, _stat)
#endif
static __checkReturn efx_rc_t
efx_mcdi_init_txq(
__in efx_nic_t *enp,
__in uint32_t size,
__in uint32_t target_evq,
__in uint32_t label,
__in uint32_t instance,
__in uint16_t flags,
__in efsys_mem_t *esmp)
{
efx_mcdi_req_t req;
uint8_t payload[MAX(MC_CMD_INIT_TXQ_IN_LEN(EFX_TXQ_MAX_BUFS),
MC_CMD_INIT_TXQ_OUT_LEN)];
efx_qword_t *dma_addr;
uint64_t addr;
int npages;
int i;
efx_rc_t rc;
EFSYS_ASSERT(EFX_TXQ_MAX_BUFS >=
EFX_TXQ_NBUFS(enp->en_nic_cfg.enc_txq_max_ndescs));
npages = EFX_TXQ_NBUFS(size);
if (npages > MC_CMD_INIT_TXQ_IN_DMA_ADDR_MAXNUM) {
rc = EINVAL;
goto fail1;
}
(void) memset(payload, 0, sizeof (payload));
req.emr_cmd = MC_CMD_INIT_TXQ;
req.emr_in_buf = payload;
req.emr_in_length = MC_CMD_INIT_TXQ_IN_LEN(npages);
req.emr_out_buf = payload;
req.emr_out_length = MC_CMD_INIT_TXQ_OUT_LEN;
MCDI_IN_SET_DWORD(req, INIT_TXQ_IN_SIZE, size);
MCDI_IN_SET_DWORD(req, INIT_TXQ_IN_TARGET_EVQ, target_evq);
MCDI_IN_SET_DWORD(req, INIT_TXQ_IN_LABEL, label);
MCDI_IN_SET_DWORD(req, INIT_TXQ_IN_INSTANCE, instance);
MCDI_IN_POPULATE_DWORD_7(req, INIT_TXQ_IN_FLAGS,
INIT_TXQ_IN_FLAG_BUFF_MODE, 0,
INIT_TXQ_IN_FLAG_IP_CSUM_DIS,
(flags & EFX_TXQ_CKSUM_IPV4) ? 0 : 1,
INIT_TXQ_IN_FLAG_TCP_CSUM_DIS,
(flags & EFX_TXQ_CKSUM_TCPUDP) ? 0 : 1,
INIT_TXQ_EXT_IN_FLAG_TSOV2_EN, (flags & EFX_TXQ_FATSOV2) ? 1 : 0,
INIT_TXQ_IN_FLAG_TCP_UDP_ONLY, 0,
INIT_TXQ_IN_CRC_MODE, 0,
INIT_TXQ_IN_FLAG_TIMESTAMP, 0);
MCDI_IN_SET_DWORD(req, INIT_TXQ_IN_OWNER_ID, 0);
MCDI_IN_SET_DWORD(req, INIT_TXQ_IN_PORT_ID, EVB_PORT_ID_ASSIGNED);
dma_addr = MCDI_IN2(req, efx_qword_t, INIT_TXQ_IN_DMA_ADDR);
addr = EFSYS_MEM_ADDR(esmp);
for (i = 0; i < npages; i++) {
EFX_POPULATE_QWORD_2(*dma_addr,
EFX_DWORD_1, (uint32_t)(addr >> 32),
EFX_DWORD_0, (uint32_t)(addr & 0xffffffff));
dma_addr++;
addr += EFX_BUF_SIZE;
}
efx_mcdi_execute(enp, &req);
if (req.emr_rc != 0) {
rc = req.emr_rc;
goto fail2;
}
return (0);
fail2:
EFSYS_PROBE(fail2);
fail1:
EFSYS_PROBE1(fail1, efx_rc_t, rc);
return (rc);
}
static __checkReturn efx_rc_t
efx_mcdi_fini_txq(
__in efx_nic_t *enp,
__in uint32_t instance)
{
efx_mcdi_req_t req;
uint8_t payload[MAX(MC_CMD_FINI_TXQ_IN_LEN,
MC_CMD_FINI_TXQ_OUT_LEN)];
efx_rc_t rc;
(void) memset(payload, 0, sizeof (payload));
req.emr_cmd = MC_CMD_FINI_TXQ;
req.emr_in_buf = payload;
req.emr_in_length = MC_CMD_FINI_TXQ_IN_LEN;
req.emr_out_buf = payload;
req.emr_out_length = MC_CMD_FINI_TXQ_OUT_LEN;
MCDI_IN_SET_DWORD(req, FINI_TXQ_IN_INSTANCE, instance);
efx_mcdi_execute_quiet(enp, &req);
if ((req.emr_rc != 0) && (req.emr_rc != MC_CMD_ERR_EALREADY)) {
rc = req.emr_rc;
goto fail1;
}
return (0);
fail1:
EFSYS_PROBE1(fail1, efx_rc_t, rc);
return (rc);
}
__checkReturn efx_rc_t
ef10_tx_init(
__in efx_nic_t *enp)
{
_NOTE(ARGUNUSED(enp))
return (0);
}
void
ef10_tx_fini(
__in efx_nic_t *enp)
{
_NOTE(ARGUNUSED(enp))
}
__checkReturn efx_rc_t
ef10_tx_qcreate(
__in efx_nic_t *enp,
__in unsigned int index,
__in unsigned int label,
__in efsys_mem_t *esmp,
__in size_t n,
__in uint32_t id,
__in uint16_t flags,
__in efx_evq_t *eep,
__in efx_txq_t *etp,
__out unsigned int *addedp)
{
efx_qword_t desc;
efx_rc_t rc;
_NOTE(ARGUNUSED(id))
if ((rc = efx_mcdi_init_txq(enp, n, eep->ee_index, label, index, flags,
esmp)) != 0)
goto fail1;
/*
* A previous user of this TX queue may have written a descriptor to the
* TX push collector, but not pushed the doorbell (e.g. after a crash).
* The next doorbell write would then push the stale descriptor.
*
* Ensure the (per network port) TX push collector is cleared by writing
* a no-op TX option descriptor. See bug29981 for details.
*/
*addedp = 1;
EFX_POPULATE_QWORD_4(desc,
ESF_DZ_TX_DESC_IS_OPT, 1,
ESF_DZ_TX_OPTION_TYPE, ESE_DZ_TX_OPTION_DESC_CRC_CSUM,
ESF_DZ_TX_OPTION_UDP_TCP_CSUM,
(flags & EFX_TXQ_CKSUM_TCPUDP) ? 1 : 0,
ESF_DZ_TX_OPTION_IP_CSUM,
(flags & EFX_TXQ_CKSUM_IPV4) ? 1 : 0);
EFSYS_MEM_WRITEQ(etp->et_esmp, 0, &desc);
ef10_tx_qpush(etp, *addedp, 0);
return (0);
fail1:
EFSYS_PROBE1(fail1, efx_rc_t, rc);
return (rc);
}
void
ef10_tx_qdestroy(
__in efx_txq_t *etp)
{
/* FIXME */
_NOTE(ARGUNUSED(etp))
/* FIXME */
}
__checkReturn efx_rc_t
ef10_tx_qpio_enable(
__in efx_txq_t *etp)
{
efx_nic_t *enp = etp->et_enp;
efx_piobuf_handle_t handle;
efx_rc_t rc;
if (etp->et_pio_size != 0) {
rc = EALREADY;
goto fail1;
}
/* Sub-allocate a PIO block from a piobuf */
if ((rc = ef10_nic_pio_alloc(enp,
&etp->et_pio_bufnum,
&handle,
&etp->et_pio_blknum,
&etp->et_pio_offset,
&etp->et_pio_size)) != 0) {
goto fail2;
}
EFSYS_ASSERT3U(etp->et_pio_size, !=, 0);
/* Link the piobuf to this TXQ */
if ((rc = ef10_nic_pio_link(enp, etp->et_index, handle)) != 0) {
goto fail3;
}
/*
* et_pio_offset is the offset of the sub-allocated block within the
* hardware PIO buffer. It is used as the buffer address in the PIO
* option descriptor.
*
* et_pio_write_offset is the offset of the sub-allocated block from the
* start of the write-combined memory mapping, and is used for writing
* data into the PIO buffer.
*/
etp->et_pio_write_offset =
(etp->et_pio_bufnum * ER_DZ_TX_PIOBUF_STEP) +
ER_DZ_TX_PIOBUF_OFST + etp->et_pio_offset;
return (0);
fail3:
EFSYS_PROBE(fail3);
ef10_nic_pio_free(enp, etp->et_pio_bufnum, etp->et_pio_blknum);
etp->et_pio_size = 0;
fail2:
EFSYS_PROBE(fail2);
fail1:
EFSYS_PROBE1(fail1, efx_rc_t, rc);
return (rc);
}
void
ef10_tx_qpio_disable(
__in efx_txq_t *etp)
{
efx_nic_t *enp = etp->et_enp;
if (etp->et_pio_size != 0) {
/* Unlink the piobuf from this TXQ */
ef10_nic_pio_unlink(enp, etp->et_index);
/* Free the sub-allocated PIO block */
ef10_nic_pio_free(enp, etp->et_pio_bufnum, etp->et_pio_blknum);
etp->et_pio_size = 0;
etp->et_pio_write_offset = 0;
}
}
__checkReturn efx_rc_t
ef10_tx_qpio_write(
__in efx_txq_t *etp,
__in_ecount(length) uint8_t *buffer,
__in size_t length,
__in size_t offset)
{
efx_nic_t *enp = etp->et_enp;
efsys_bar_t *esbp = enp->en_esbp;
uint32_t write_offset;
uint32_t write_offset_limit;
efx_qword_t *eqp;
efx_rc_t rc;
EFSYS_ASSERT(length % sizeof (efx_qword_t) == 0);
if (etp->et_pio_size == 0) {
rc = ENOENT;
goto fail1;
}
if (offset + length > etp->et_pio_size) {
rc = ENOSPC;
goto fail2;
}
/*
* Writes to PIO buffers must be 64 bit aligned, and multiples of
* 64 bits.
*/
write_offset = etp->et_pio_write_offset + offset;
write_offset_limit = write_offset + length;
eqp = (efx_qword_t *)buffer;
while (write_offset < write_offset_limit) {
EFSYS_BAR_WC_WRITEQ(esbp, write_offset, eqp);
eqp++;
write_offset += sizeof (efx_qword_t);
}
return (0);
fail2:
EFSYS_PROBE(fail2);
fail1:
EFSYS_PROBE1(fail1, efx_rc_t, rc);
return (rc);
}
__checkReturn efx_rc_t
ef10_tx_qpio_post(
__in efx_txq_t *etp,
__in size_t pkt_length,
__in unsigned int completed,
__inout unsigned int *addedp)
{
efx_qword_t pio_desc;
unsigned int id;
size_t offset;
unsigned int added = *addedp;
efx_rc_t rc;
if (added - completed + 1 > EFX_TXQ_LIMIT(etp->et_mask + 1)) {
rc = ENOSPC;
goto fail1;
}
if (etp->et_pio_size == 0) {
rc = ENOENT;
goto fail2;
}
id = added++ & etp->et_mask;
offset = id * sizeof (efx_qword_t);
EFSYS_PROBE4(tx_pio_post, unsigned int, etp->et_index,
unsigned int, id, uint32_t, etp->et_pio_offset,
size_t, pkt_length);
EFX_POPULATE_QWORD_5(pio_desc,
ESF_DZ_TX_DESC_IS_OPT, 1,
ESF_DZ_TX_OPTION_TYPE, 1,
ESF_DZ_TX_PIO_CONT, 0,
ESF_DZ_TX_PIO_BYTE_CNT, pkt_length,
ESF_DZ_TX_PIO_BUF_ADDR, etp->et_pio_offset);
EFSYS_MEM_WRITEQ(etp->et_esmp, offset, &pio_desc);
EFX_TX_QSTAT_INCR(etp, TX_POST_PIO);
*addedp = added;
return (0);
fail2:
EFSYS_PROBE(fail2);
fail1:
EFSYS_PROBE1(fail1, efx_rc_t, rc);
return (rc);
}
__checkReturn efx_rc_t
ef10_tx_qpost(
__in efx_txq_t *etp,
__in_ecount(n) efx_buffer_t *eb,
__in unsigned int n,
__in unsigned int completed,
__inout unsigned int *addedp)
{
unsigned int added = *addedp;
unsigned int i;
efx_rc_t rc;
if (added - completed + n > EFX_TXQ_LIMIT(etp->et_mask + 1)) {
rc = ENOSPC;
goto fail1;
}
for (i = 0; i < n; i++) {
efx_buffer_t *ebp = &eb[i];
efsys_dma_addr_t addr = ebp->eb_addr;
size_t size = ebp->eb_size;
boolean_t eop = ebp->eb_eop;
unsigned int id;
size_t offset;
efx_qword_t qword;
/* Fragments must not span 4k boundaries. */
EFSYS_ASSERT(P2ROUNDUP(addr + 1, 4096) >= (addr + size));
id = added++ & etp->et_mask;
offset = id * sizeof (efx_qword_t);
EFSYS_PROBE5(tx_post, unsigned int, etp->et_index,
unsigned int, id, efsys_dma_addr_t, addr,
size_t, size, boolean_t, eop);
EFX_POPULATE_QWORD_5(qword,
ESF_DZ_TX_KER_TYPE, 0,
ESF_DZ_TX_KER_CONT, (eop) ? 0 : 1,
ESF_DZ_TX_KER_BYTE_CNT, (uint32_t)(size),
ESF_DZ_TX_KER_BUF_ADDR_DW0, (uint32_t)(addr & 0xffffffff),
ESF_DZ_TX_KER_BUF_ADDR_DW1, (uint32_t)(addr >> 32));
EFSYS_MEM_WRITEQ(etp->et_esmp, offset, &qword);
}
EFX_TX_QSTAT_INCR(etp, TX_POST);
*addedp = added;
return (0);
fail1:
EFSYS_PROBE1(fail1, efx_rc_t, rc);
return (rc);
}
/*
* This improves performance by pushing a TX descriptor at the same time as the
* doorbell. The descriptor must be added to the TXQ, so that can be used if the
* hardware decides not to use the pushed descriptor.
*/
void
ef10_tx_qpush(
__in efx_txq_t *etp,
__in unsigned int added,
__in unsigned int pushed)
{
efx_nic_t *enp = etp->et_enp;
unsigned int wptr;
unsigned int id;
size_t offset;
efx_qword_t desc;
efx_oword_t oword;
wptr = added & etp->et_mask;
id = pushed & etp->et_mask;
offset = id * sizeof (efx_qword_t);
EFSYS_MEM_READQ(etp->et_esmp, offset, &desc);
EFX_POPULATE_OWORD_3(oword,
ERF_DZ_TX_DESC_WPTR, wptr,
ERF_DZ_TX_DESC_HWORD, EFX_QWORD_FIELD(desc, EFX_DWORD_1),
ERF_DZ_TX_DESC_LWORD, EFX_QWORD_FIELD(desc, EFX_DWORD_0));
/* Guarantee ordering of memory (descriptors) and PIO (doorbell) */
EFX_DMA_SYNC_QUEUE_FOR_DEVICE(etp->et_esmp, etp->et_mask + 1, wptr, id);
EFSYS_PIO_WRITE_BARRIER();
EFX_BAR_TBL_DOORBELL_WRITEO(enp, ER_DZ_TX_DESC_UPD_REG, etp->et_index,
&oword);
}
__checkReturn efx_rc_t
ef10_tx_qdesc_post(
__in efx_txq_t *etp,
__in_ecount(n) efx_desc_t *ed,
__in unsigned int n,
__in unsigned int completed,
__inout unsigned int *addedp)
{
unsigned int added = *addedp;
unsigned int i;
efx_rc_t rc;
if (added - completed + n > EFX_TXQ_LIMIT(etp->et_mask + 1)) {
rc = ENOSPC;
goto fail1;
}
for (i = 0; i < n; i++) {
efx_desc_t *edp = &ed[i];
unsigned int id;
size_t offset;
id = added++ & etp->et_mask;
offset = id * sizeof (efx_desc_t);
EFSYS_MEM_WRITEQ(etp->et_esmp, offset, &edp->ed_eq);
}
EFSYS_PROBE3(tx_desc_post, unsigned int, etp->et_index,
unsigned int, added, unsigned int, n);
EFX_TX_QSTAT_INCR(etp, TX_POST);
*addedp = added;
return (0);
fail1:
EFSYS_PROBE1(fail1, efx_rc_t, rc);
return (rc);
}
void
ef10_tx_qdesc_dma_create(
__in efx_txq_t *etp,
__in efsys_dma_addr_t addr,
__in size_t size,
__in boolean_t eop,
__out efx_desc_t *edp)
{
/* Fragments must not span 4k boundaries. */
EFSYS_ASSERT(P2ROUNDUP(addr + 1, 4096) >= addr + size);
EFSYS_PROBE4(tx_desc_dma_create, unsigned int, etp->et_index,
efsys_dma_addr_t, addr,
size_t, size, boolean_t, eop);
EFX_POPULATE_QWORD_5(edp->ed_eq,
ESF_DZ_TX_KER_TYPE, 0,
ESF_DZ_TX_KER_CONT, (eop) ? 0 : 1,
ESF_DZ_TX_KER_BYTE_CNT, (uint32_t)(size),
ESF_DZ_TX_KER_BUF_ADDR_DW0, (uint32_t)(addr & 0xffffffff),
ESF_DZ_TX_KER_BUF_ADDR_DW1, (uint32_t)(addr >> 32));
}
void
ef10_tx_qdesc_tso_create(
__in efx_txq_t *etp,
__in uint16_t ipv4_id,
__in uint32_t tcp_seq,
__in uint8_t tcp_flags,
__out efx_desc_t *edp)
{
EFSYS_PROBE4(tx_desc_tso_create, unsigned int, etp->et_index,
uint16_t, ipv4_id, uint32_t, tcp_seq,
uint8_t, tcp_flags);
EFX_POPULATE_QWORD_5(edp->ed_eq,
ESF_DZ_TX_DESC_IS_OPT, 1,
ESF_DZ_TX_OPTION_TYPE,
ESE_DZ_TX_OPTION_DESC_TSO,
ESF_DZ_TX_TSO_TCP_FLAGS, tcp_flags,
ESF_DZ_TX_TSO_IP_ID, ipv4_id,
ESF_DZ_TX_TSO_TCP_SEQNO, tcp_seq);
}
void
ef10_tx_qdesc_tso2_create(
__in efx_txq_t *etp,
__in uint16_t ipv4_id,
__in uint32_t tcp_seq,
__in uint16_t tcp_mss,
__out_ecount(count) efx_desc_t *edp,
__in int count)
{
EFSYS_PROBE4(tx_desc_tso2_create, unsigned int, etp->et_index,
uint16_t, ipv4_id, uint32_t, tcp_seq,
uint16_t, tcp_mss);
EFSYS_ASSERT(count >= EFX_TX_FATSOV2_OPT_NDESCS);
EFX_POPULATE_QWORD_5(edp[0].ed_eq,
ESF_DZ_TX_DESC_IS_OPT, 1,
ESF_DZ_TX_OPTION_TYPE,
ESE_DZ_TX_OPTION_DESC_TSO,
ESF_DZ_TX_TSO_OPTION_TYPE,
ESE_DZ_TX_TSO_OPTION_DESC_FATSO2A,
ESF_DZ_TX_TSO_IP_ID, ipv4_id,
ESF_DZ_TX_TSO_TCP_SEQNO, tcp_seq);
EFX_POPULATE_QWORD_4(edp[1].ed_eq,
ESF_DZ_TX_DESC_IS_OPT, 1,
ESF_DZ_TX_OPTION_TYPE,
ESE_DZ_TX_OPTION_DESC_TSO,
ESF_DZ_TX_TSO_OPTION_TYPE,
ESE_DZ_TX_TSO_OPTION_DESC_FATSO2B,
ESF_DZ_TX_TSO_TCP_MSS, tcp_mss);
}
void
ef10_tx_qdesc_vlantci_create(
__in efx_txq_t *etp,
__in uint16_t tci,
__out efx_desc_t *edp)
{
EFSYS_PROBE2(tx_desc_vlantci_create, unsigned int, etp->et_index,
uint16_t, tci);
EFX_POPULATE_QWORD_4(edp->ed_eq,
ESF_DZ_TX_DESC_IS_OPT, 1,
ESF_DZ_TX_OPTION_TYPE,
ESE_DZ_TX_OPTION_DESC_VLAN,
ESF_DZ_TX_VLAN_OP, tci ? 1 : 0,
ESF_DZ_TX_VLAN_TAG1, tci);
}
__checkReturn efx_rc_t
ef10_tx_qpace(
__in efx_txq_t *etp,
__in unsigned int ns)
{
efx_rc_t rc;
/* FIXME */
_NOTE(ARGUNUSED(etp, ns))
_NOTE(CONSTANTCONDITION)
if (B_FALSE) {
rc = ENOTSUP;
goto fail1;
}
/* FIXME */
return (0);
fail1:
EFSYS_PROBE1(fail1, efx_rc_t, rc);
return (rc);
}
__checkReturn efx_rc_t
ef10_tx_qflush(
__in efx_txq_t *etp)
{
efx_nic_t *enp = etp->et_enp;
efx_rc_t rc;
if ((rc = efx_mcdi_fini_txq(enp, etp->et_index)) != 0)
goto fail1;
return (0);
fail1:
EFSYS_PROBE1(fail1, efx_rc_t, rc);
return (rc);
}
void
ef10_tx_qenable(
__in efx_txq_t *etp)
{
/* FIXME */
_NOTE(ARGUNUSED(etp))
/* FIXME */
}
#if EFSYS_OPT_QSTATS
void
ef10_tx_qstats_update(
__in efx_txq_t *etp,
__inout_ecount(TX_NQSTATS) efsys_stat_t *stat)
{
unsigned int id;
for (id = 0; id < TX_NQSTATS; id++) {
efsys_stat_t *essp = &stat[id];
EFSYS_STAT_INCR(essp, etp->et_stat[id]);
etp->et_stat[id] = 0;
}
}
#endif /* EFSYS_OPT_QSTATS */
#endif /* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD */
|
vicharl/containerdns
|
kdns/dpdk-17.02/lib/librte_eal/common/include/generic/rte_io.h
|
<filename>kdns/dpdk-17.02/lib/librte_eal/common/include/generic/rte_io.h
/*
* BSD LICENSE
*
* Copyright(c) 2016 Cavium networks. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Cavium networks nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef _RTE_IO_H_
#define _RTE_IO_H_
#include <rte_atomic.h>
/**
* @file
* I/O device memory operations
*
* This file defines the generic API for I/O device memory read/write operations
*/
#include <stdint.h>
#include <rte_common.h>
#include <rte_atomic.h>
#ifdef __DOXYGEN__
/**
* Read a 8-bit value from I/O device memory address *addr*.
*
* The relaxed version does not have additional I/O memory barrier, useful in
* accessing the device registers of integrated controllers which implicitly
* strongly ordered with respect to memory access.
*
* @param addr
* I/O memory address to read the value from
* @return
* read value
*/
static inline uint8_t
rte_read8_relaxed(const volatile void *addr);
/**
* Read a 16-bit value from I/O device memory address *addr*.
*
* The relaxed version does not have additional I/O memory barrier, useful in
* accessing the device registers of integrated controllers which implicitly
* strongly ordered with respect to memory access.
*
* @param addr
* I/O memory address to read the value from
* @return
* read value
*/
static inline uint16_t
rte_read16_relaxed(const volatile void *addr);
/**
* Read a 32-bit value from I/O device memory address *addr*.
*
* The relaxed version does not have additional I/O memory barrier, useful in
* accessing the device registers of integrated controllers which implicitly
* strongly ordered with respect to memory access.
*
* @param addr
* I/O memory address to read the value from
* @return
* read value
*/
static inline uint32_t
rte_read32_relaxed(const volatile void *addr);
/**
* Read a 64-bit value from I/O device memory address *addr*.
*
* The relaxed version does not have additional I/O memory barrier, useful in
* accessing the device registers of integrated controllers which implicitly
* strongly ordered with respect to memory access.
*
* @param addr
* I/O memory address to read the value from
* @return
* read value
*/
static inline uint64_t
rte_read64_relaxed(const volatile void *addr);
/**
* Write a 8-bit value to I/O device memory address *addr*.
*
* The relaxed version does not have additional I/O memory barrier, useful in
* accessing the device registers of integrated controllers which implicitly
* strongly ordered with respect to memory access.
*
* @param value
* Value to write
* @param addr
* I/O memory address to write the value to
*/
static inline void
rte_write8_relaxed(uint8_t value, volatile void *addr);
/**
* Write a 16-bit value to I/O device memory address *addr*.
*
* The relaxed version does not have additional I/O memory barrier, useful in
* accessing the device registers of integrated controllers which implicitly
* strongly ordered with respect to memory access.
*
* @param value
* Value to write
* @param addr
* I/O memory address to write the value to
*/
static inline void
rte_write16_relaxed(uint16_t value, volatile void *addr);
/**
* Write a 32-bit value to I/O device memory address *addr*.
*
* The relaxed version does not have additional I/O memory barrier, useful in
* accessing the device registers of integrated controllers which implicitly
* strongly ordered with respect to memory access.
*
* @param value
* Value to write
* @param addr
* I/O memory address to write the value to
*/
static inline void
rte_write32_relaxed(uint32_t value, volatile void *addr);
/**
* Write a 64-bit value to I/O device memory address *addr*.
*
* The relaxed version does not have additional I/O memory barrier, useful in
* accessing the device registers of integrated controllers which implicitly
* strongly ordered with respect to memory access.
*
* @param value
* Value to write
* @param addr
* I/O memory address to write the value to
*/
static inline void
rte_write64_relaxed(uint64_t value, volatile void *addr);
/**
* Read a 8-bit value from I/O device memory address *addr*.
*
* @param addr
* I/O memory address to read the value from
* @return
* read value
*/
static inline uint8_t
rte_read8(const volatile void *addr);
/**
* Read a 16-bit value from I/O device memory address *addr*.
*
*
* @param addr
* I/O memory address to read the value from
* @return
* read value
*/
static inline uint16_t
rte_read16(const volatile void *addr);
/**
* Read a 32-bit value from I/O device memory address *addr*.
*
* @param addr
* I/O memory address to read the value from
* @return
* read value
*/
static inline uint32_t
rte_read32(const volatile void *addr);
/**
* Read a 64-bit value from I/O device memory address *addr*.
*
* @param addr
* I/O memory address to read the value from
* @return
* read value
*/
static inline uint64_t
rte_read64(const volatile void *addr);
/**
* Write a 8-bit value to I/O device memory address *addr*.
*
* @param value
* Value to write
* @param addr
* I/O memory address to write the value to
*/
static inline void
rte_write8(uint8_t value, volatile void *addr);
/**
* Write a 16-bit value to I/O device memory address *addr*.
*
* @param value
* Value to write
* @param addr
* I/O memory address to write the value to
*/
static inline void
rte_write16(uint16_t value, volatile void *addr);
/**
* Write a 32-bit value to I/O device memory address *addr*.
*
* @param value
* Value to write
* @param addr
* I/O memory address to write the value to
*/
static inline void
rte_write32(uint32_t value, volatile void *addr);
/**
* Write a 64-bit value to I/O device memory address *addr*.
*
* @param value
* Value to write
* @param addr
* I/O memory address to write the value to
*/
static inline void
rte_write64(uint64_t value, volatile void *addr);
#endif /* __DOXYGEN__ */
#ifndef RTE_OVERRIDE_IO_H
static inline uint8_t __attribute__((always_inline))
rte_read8_relaxed(const volatile void *addr)
{
return *(const volatile uint8_t *)addr;
}
static inline uint16_t __attribute__((always_inline))
rte_read16_relaxed(const volatile void *addr)
{
return *(const volatile uint16_t *)addr;
}
static inline uint32_t __attribute__((always_inline))
rte_read32_relaxed(const volatile void *addr)
{
return *(const volatile uint32_t *)addr;
}
static inline uint64_t __attribute__((always_inline))
rte_read64_relaxed(const volatile void *addr)
{
return *(const volatile uint64_t *)addr;
}
static inline void __attribute__((always_inline))
rte_write8_relaxed(uint8_t value, volatile void *addr)
{
*(volatile uint8_t *)addr = value;
}
static inline void __attribute__((always_inline))
rte_write16_relaxed(uint16_t value, volatile void *addr)
{
*(volatile uint16_t *)addr = value;
}
static inline void __attribute__((always_inline))
rte_write32_relaxed(uint32_t value, volatile void *addr)
{
*(volatile uint32_t *)addr = value;
}
static inline void __attribute__((always_inline))
rte_write64_relaxed(uint64_t value, volatile void *addr)
{
*(volatile uint64_t *)addr = value;
}
static inline uint8_t __attribute__((always_inline))
rte_read8(const volatile void *addr)
{
uint8_t val;
val = rte_read8_relaxed(addr);
rte_io_rmb();
return val;
}
static inline uint16_t __attribute__((always_inline))
rte_read16(const volatile void *addr)
{
uint16_t val;
val = rte_read16_relaxed(addr);
rte_io_rmb();
return val;
}
static inline uint32_t __attribute__((always_inline))
rte_read32(const volatile void *addr)
{
uint32_t val;
val = rte_read32_relaxed(addr);
rte_io_rmb();
return val;
}
static inline uint64_t __attribute__((always_inline))
rte_read64(const volatile void *addr)
{
uint64_t val;
val = rte_read64_relaxed(addr);
rte_io_rmb();
return val;
}
static inline void __attribute__((always_inline))
rte_write8(uint8_t value, volatile void *addr)
{
rte_io_wmb();
rte_write8_relaxed(value, addr);
}
static inline void __attribute__((always_inline))
rte_write16(uint16_t value, volatile void *addr)
{
rte_io_wmb();
rte_write16_relaxed(value, addr);
}
static inline void __attribute__((always_inline))
rte_write32(uint32_t value, volatile void *addr)
{
rte_io_wmb();
rte_write32_relaxed(value, addr);
}
static inline void __attribute__((always_inline))
rte_write64(uint64_t value, volatile void *addr)
{
rte_io_wmb();
rte_write64_relaxed(value, addr);
}
#endif /* RTE_OVERRIDE_IO_H */
#endif /* _RTE_IO_H_ */
|
vicharl/containerdns
|
kdns/dpdk-17.02/drivers/crypto/aesni_gcm/aesni_gcm_pmd_private.h
|
<reponame>vicharl/containerdns
/*-
* BSD LICENSE
*
* Copyright(c) 2016 Intel Corporation. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef _RTE_AESNI_GCM_PMD_PRIVATE_H_
#define _RTE_AESNI_GCM_PMD_PRIVATE_H_
#include "aesni_gcm_ops.h"
#define GCM_LOG_ERR(fmt, args...) \
RTE_LOG(ERR, CRYPTODEV, "[%s] %s() line %u: " fmt "\n", \
RTE_STR(CRYPTODEV_NAME_AESNI_GCM_PMD), \
__func__, __LINE__, ## args)
#ifdef RTE_LIBRTE_AESNI_MB_DEBUG
#define GCM_LOG_INFO(fmt, args...) \
RTE_LOG(INFO, CRYPTODEV, "[%s] %s() line %u: " fmt "\n", \
RTE_STR(CRYPTODEV_NAME_AESNI_GCM_PMD), \
__func__, __LINE__, ## args)
#define GCM_LOG_DBG(fmt, args...) \
RTE_LOG(DEBUG, CRYPTODEV, "[%s] %s() line %u: " fmt "\n", \
RTE_STR(CRYPTODEV_NAME_AESNI_GCM_PMD), \
__func__, __LINE__, ## args)
#else
#define GCM_LOG_INFO(fmt, args...)
#define GCM_LOG_DBG(fmt, args...)
#endif
/** private data structure for each virtual AESNI GCM device */
struct aesni_gcm_private {
unsigned max_nb_queue_pairs;
/**< Max number of queue pairs supported by device */
unsigned max_nb_sessions;
/**< Max number of sessions supported by device */
};
struct aesni_gcm_qp {
uint16_t id;
/**< Queue Pair Identifier */
char name[RTE_CRYPTODEV_NAME_LEN];
/**< Unique Queue Pair Name */
struct rte_ring *processed_pkts;
/**< Ring for placing process packets */
struct rte_mempool *sess_mp;
/**< Session Mempool */
struct rte_cryptodev_stats qp_stats;
/**< Queue pair statistics */
} __rte_cache_aligned;
enum aesni_gcm_operation {
AESNI_GCM_OP_AUTHENTICATED_ENCRYPTION,
AESNI_GCM_OP_AUTHENTICATED_DECRYPTION
};
enum aesni_gcm_key {
AESNI_GCM_KEY_128,
AESNI_GCM_KEY_256
};
/** AESNI GCM private session structure */
struct aesni_gcm_session {
enum aesni_gcm_operation op;
/**< GCM operation type */
enum aesni_gcm_key key;
/**< GCM key type */
struct gcm_data gdata __rte_cache_aligned;
/**< GCM parameters */
};
/**
* Setup GCM session parameters
* @param sess aesni gcm session structure
* @param xform crypto transform chain
*
* @return
* - On success returns 0
* - On failure returns error code < 0
*/
extern int
aesni_gcm_set_session_parameters(struct aesni_gcm_session *sess,
const struct rte_crypto_sym_xform *xform);
/**
* Device specific operations function pointer structure */
extern struct rte_cryptodev_ops *rte_aesni_gcm_pmd_ops;
#endif /* _RTE_AESNI_GCM_PMD_PRIVATE_H_ */
|
vicharl/containerdns
|
kdns/dpdk-17.02/drivers/net/szedata2/rte_eth_szedata2.h
|
/*-
* BSD LICENSE
*
* Copyright (c) 2015 - 2016 CESNET
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* * Neither the name of CESNET nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef RTE_PMD_SZEDATA2_H_
#define RTE_PMD_SZEDATA2_H_
#include <stdbool.h>
#include <rte_byteorder.h>
/* PCI Vendor ID */
#define PCI_VENDOR_ID_NETCOPE 0x1b26
/* PCI Device IDs */
#define PCI_DEVICE_ID_NETCOPE_COMBO80G 0xcb80
#define PCI_DEVICE_ID_NETCOPE_COMBO100G 0xc1c1
#define PCI_DEVICE_ID_NETCOPE_COMBO100G2 0xc2c1
/* number of PCI resource used by COMBO card */
#define PCI_RESOURCE_NUMBER 0
/* szedata2_packet header length == 4 bytes == 2B segment size + 2B hw size */
#define RTE_SZE2_PACKET_HEADER_SIZE 4
#define RTE_SZE2_MMIO_MAX 10
/*!
* Round 'what' to the nearest larger (or equal) multiple of '8'
* (szedata2 packet is aligned to 8 bytes)
*/
#define RTE_SZE2_ALIGN8(what) (((what) + ((8) - 1)) & (~((8) - 1)))
/*! main handle structure */
struct szedata {
int fd;
struct sze2_instance_info *info;
uint32_t *write_size;
void *space[RTE_SZE2_MMIO_MAX];
struct szedata_lock lock[2][2];
__u32 *rx_asize, *tx_asize;
/* szedata_read_next variables - to keep context (ct) */
/*
* rx
*/
/** initial sze lock ptr */
const struct szedata_lock *ct_rx_lck_orig;
/** current sze lock ptr (initial or next) */
const struct szedata_lock *ct_rx_lck;
/** remaining bytes (not read) within current lock */
unsigned int ct_rx_rem_bytes;
/** current pointer to locked memory */
unsigned char *ct_rx_cur_ptr;
/**
* allocated buffer to store RX packet if it was split
* into 2 buffers
*/
unsigned char *ct_rx_buffer;
/** registered function to provide filtering based on hwdata */
int (*ct_rx_filter)(u_int16_t hwdata_len, u_char *hwdata);
/*
* tx
*/
/**
* buffer for tx - packet is prepared here
* (in future for burst write)
*/
unsigned char *ct_tx_buffer;
/** initial sze TX lock ptrs - number according to TX interfaces */
const struct szedata_lock **ct_tx_lck_orig;
/** current sze TX lock ptrs - number according to TX interfaces */
const struct szedata_lock **ct_tx_lck;
/** already written bytes in both locks */
unsigned int *ct_tx_written_bytes;
/** remaining bytes (not written) within current lock */
unsigned int *ct_tx_rem_bytes;
/** current pointers to locked memory */
unsigned char **ct_tx_cur_ptr;
/** NUMA node closest to PCIe device, or -1 */
int numa_node;
};
/*
* @return Byte from PCI resource at offset "offset".
*/
static inline uint8_t
pci_resource_read8(struct rte_mem_resource *rsc, uint32_t offset)
{
return *((uint8_t *)((uint8_t *)rsc->addr + offset));
}
/*
* @return Two bytes from PCI resource starting at offset "offset".
*/
static inline uint16_t
pci_resource_read16(struct rte_mem_resource *rsc, uint32_t offset)
{
return rte_le_to_cpu_16(*((uint16_t *)((uint8_t *)rsc->addr +
offset)));
}
/*
* @return Four bytes from PCI resource starting at offset "offset".
*/
static inline uint32_t
pci_resource_read32(struct rte_mem_resource *rsc, uint32_t offset)
{
return rte_le_to_cpu_32(*((uint32_t *)((uint8_t *)rsc->addr +
offset)));
}
/*
* @return Eight bytes from PCI resource starting at offset "offset".
*/
static inline uint64_t
pci_resource_read64(struct rte_mem_resource *rsc, uint32_t offset)
{
return rte_le_to_cpu_64(*((uint64_t *)((uint8_t *)rsc->addr +
offset)));
}
/*
* Write one byte to PCI resource address space at offset "offset".
*/
static inline void
pci_resource_write8(struct rte_mem_resource *rsc, uint32_t offset, uint8_t val)
{
*((uint8_t *)((uint8_t *)rsc->addr + offset)) = val;
}
/*
* Write two bytes to PCI resource address space at offset "offset".
*/
static inline void
pci_resource_write16(struct rte_mem_resource *rsc, uint32_t offset,
uint16_t val)
{
*((uint16_t *)((uint8_t *)rsc->addr + offset)) = rte_cpu_to_le_16(val);
}
/*
* Write four bytes to PCI resource address space at offset "offset".
*/
static inline void
pci_resource_write32(struct rte_mem_resource *rsc, uint32_t offset,
uint32_t val)
{
*((uint32_t *)((uint8_t *)rsc->addr + offset)) = rte_cpu_to_le_32(val);
}
/*
* Write eight bytes to PCI resource address space at offset "offset".
*/
static inline void
pci_resource_write64(struct rte_mem_resource *rsc, uint32_t offset,
uint64_t val)
{
*((uint64_t *)((uint8_t *)rsc->addr + offset)) = rte_cpu_to_le_64(val);
}
#define SZEDATA2_PCI_RESOURCE_PTR(rsc, offset, type) \
((type)(((uint8_t *)(rsc)->addr) + (offset)))
enum szedata2_link_speed {
SZEDATA2_LINK_SPEED_DEFAULT = 0,
SZEDATA2_LINK_SPEED_10G,
SZEDATA2_LINK_SPEED_40G,
SZEDATA2_LINK_SPEED_100G,
};
enum szedata2_mac_check_mode {
SZEDATA2_MAC_CHMODE_PROMISC = 0x0,
SZEDATA2_MAC_CHMODE_ONLY_VALID = 0x1,
SZEDATA2_MAC_CHMODE_ALL_BROADCAST = 0x2,
SZEDATA2_MAC_CHMODE_ALL_MULTICAST = 0x3,
};
/*
* Structure describes CGMII IBUF address space
*/
struct szedata2_cgmii_ibuf {
/** Total Received Frames Counter low part */
uint32_t trfcl;
/** Correct Frames Counter low part */
uint32_t cfcl;
/** Discarded Frames Counter low part */
uint32_t dfcl;
/** Counter of frames discarded due to buffer overflow low part */
uint32_t bodfcl;
/** Total Received Frames Counter high part */
uint32_t trfch;
/** Correct Frames Counter high part */
uint32_t cfch;
/** Discarded Frames Counter high part */
uint32_t dfch;
/** Counter of frames discarded due to buffer overflow high part */
uint32_t bodfch;
/** IBUF enable register */
uint32_t ibuf_en;
/** Error mask register */
uint32_t err_mask;
/** IBUF status register */
uint32_t ibuf_st;
/** IBUF command register */
uint32_t ibuf_cmd;
/** Minimum frame length allowed */
uint32_t mfla;
/** Frame MTU */
uint32_t mtu;
/** MAC address check mode */
uint32_t mac_chmode;
/** Octets Received OK Counter low part */
uint32_t orocl;
/** Octets Received OK Counter high part */
uint32_t oroch;
} __rte_packed;
/* Offset of CGMII IBUF memory for MAC addresses */
#define SZEDATA2_CGMII_IBUF_MAC_MEM_OFF 0x80
/*
* @return
* true if IBUF is enabled
* false if IBUF is disabled
*/
static inline bool
cgmii_ibuf_is_enabled(volatile struct szedata2_cgmii_ibuf *ibuf)
{
return ((rte_le_to_cpu_32(ibuf->ibuf_en) & 0x1) != 0) ? true : false;
}
/*
* Enables IBUF.
*/
static inline void
cgmii_ibuf_enable(volatile struct szedata2_cgmii_ibuf *ibuf)
{
ibuf->ibuf_en =
rte_cpu_to_le_32(rte_le_to_cpu_32(ibuf->ibuf_en) | 0x1);
}
/*
* Disables IBUF.
*/
static inline void
cgmii_ibuf_disable(volatile struct szedata2_cgmii_ibuf *ibuf)
{
ibuf->ibuf_en =
rte_cpu_to_le_32(rte_le_to_cpu_32(ibuf->ibuf_en) & ~0x1);
}
/*
* @return
* true if ibuf link is up
* false if ibuf link is down
*/
static inline bool
cgmii_ibuf_is_link_up(volatile struct szedata2_cgmii_ibuf *ibuf)
{
return ((rte_le_to_cpu_32(ibuf->ibuf_st) & 0x80) != 0) ? true : false;
}
/*
* @return
* MAC address check mode
*/
static inline enum szedata2_mac_check_mode
cgmii_ibuf_mac_mode_read(volatile struct szedata2_cgmii_ibuf *ibuf)
{
switch (rte_le_to_cpu_32(ibuf->mac_chmode) & 0x3) {
case 0x0:
return SZEDATA2_MAC_CHMODE_PROMISC;
case 0x1:
return SZEDATA2_MAC_CHMODE_ONLY_VALID;
case 0x2:
return SZEDATA2_MAC_CHMODE_ALL_BROADCAST;
case 0x3:
return SZEDATA2_MAC_CHMODE_ALL_MULTICAST;
default:
return SZEDATA2_MAC_CHMODE_PROMISC;
}
}
/*
* Writes "mode" in MAC address check mode register.
*/
static inline void
cgmii_ibuf_mac_mode_write(volatile struct szedata2_cgmii_ibuf *ibuf,
enum szedata2_mac_check_mode mode)
{
ibuf->mac_chmode = rte_cpu_to_le_32(
(rte_le_to_cpu_32(ibuf->mac_chmode) & ~0x3) | mode);
}
/*
* Structure describes CGMII OBUF address space
*/
struct szedata2_cgmii_obuf {
/** Total Sent Frames Counter low part */
uint32_t tsfcl;
/** Octets Sent Counter low part */
uint32_t oscl;
/** Total Discarded Frames Counter low part */
uint32_t tdfcl;
/** reserved */
uint32_t reserved1;
/** Total Sent Frames Counter high part */
uint32_t tsfch;
/** Octets Sent Counter high part */
uint32_t osch;
/** Total Discarded Frames Counter high part */
uint32_t tdfch;
/** reserved */
uint32_t reserved2;
/** OBUF enable register */
uint32_t obuf_en;
/** reserved */
uint64_t reserved3;
/** OBUF control register */
uint32_t ctrl;
/** OBUF status register */
uint32_t obuf_st;
} __rte_packed;
/*
* @return
* true if OBUF is enabled
* false if OBUF is disabled
*/
static inline bool
cgmii_obuf_is_enabled(volatile struct szedata2_cgmii_obuf *obuf)
{
return ((rte_le_to_cpu_32(obuf->obuf_en) & 0x1) != 0) ? true : false;
}
/*
* Enables OBUF.
*/
static inline void
cgmii_obuf_enable(volatile struct szedata2_cgmii_obuf *obuf)
{
obuf->obuf_en =
rte_cpu_to_le_32(rte_le_to_cpu_32(obuf->obuf_en) | 0x1);
}
/*
* Disables OBUF.
*/
static inline void
cgmii_obuf_disable(volatile struct szedata2_cgmii_obuf *obuf)
{
obuf->obuf_en =
rte_cpu_to_le_32(rte_le_to_cpu_32(obuf->obuf_en) & ~0x1);
}
/*
* Function takes value from IBUF status register. Values in IBUF and OBUF
* should be same.
*
* @return Link speed constant.
*/
static inline enum szedata2_link_speed
cgmii_link_speed(volatile struct szedata2_cgmii_ibuf *ibuf)
{
uint32_t speed = (rte_le_to_cpu_32(ibuf->ibuf_st) & 0x70) >> 4;
switch (speed) {
case 0x03:
return SZEDATA2_LINK_SPEED_10G;
case 0x04:
return SZEDATA2_LINK_SPEED_40G;
case 0x05:
return SZEDATA2_LINK_SPEED_100G;
default:
return SZEDATA2_LINK_SPEED_DEFAULT;
}
}
/*
* IBUFs and OBUFs can generally be located at different offsets in different
* firmwares.
* This part defines base offsets of IBUFs and OBUFs through various firmwares.
* Currently one firmware type is supported.
* Type of firmware is set through configuration option
* CONFIG_RTE_LIBRTE_PMD_SZEDATA_AS.
* Possible values are:
* 0 - for firmwares:
* NIC_100G1_LR4
* HANIC_100G1_LR4
* HANIC_100G1_SR10
*/
#if !defined(RTE_LIBRTE_PMD_SZEDATA2_AS)
#error "RTE_LIBRTE_PMD_SZEDATA2_AS has to be defined"
#elif RTE_LIBRTE_PMD_SZEDATA2_AS == 0
/*
* CGMII IBUF offset from the beginning of PCI resource address space.
*/
#define SZEDATA2_CGMII_IBUF_BASE_OFF 0x8000
/*
* Size of CGMII IBUF.
*/
#define SZEDATA2_CGMII_IBUF_SIZE 0x200
/*
* GCMII OBUF offset from the beginning of PCI resource address space.
*/
#define SZEDATA2_CGMII_OBUF_BASE_OFF 0x9000
/*
* Size of CGMII OBUF.
*/
#define SZEDATA2_CGMII_OBUF_SIZE 0x100
#else
#error "RTE_LIBRTE_PMD_SZEDATA2_AS has wrong value, see comments in config file"
#endif
#endif
|
vicharl/containerdns
|
kdns/dpdk-17.02/drivers/net/ixgbe/base/ixgbe_x550.h
|
/*******************************************************************************
Copyright (c) 2001-2015, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
3. Neither the name of the Intel Corporation nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
***************************************************************************/
#ifndef _IXGBE_X550_H_
#define _IXGBE_X550_H_
#include "ixgbe_type.h"
s32 ixgbe_dmac_config_X550(struct ixgbe_hw *hw);
s32 ixgbe_dmac_config_tcs_X550(struct ixgbe_hw *hw);
s32 ixgbe_dmac_update_tcs_X550(struct ixgbe_hw *hw);
s32 ixgbe_get_bus_info_X550em(struct ixgbe_hw *hw);
s32 ixgbe_init_eeprom_params_X550(struct ixgbe_hw *hw);
s32 ixgbe_update_eeprom_checksum_X550(struct ixgbe_hw *hw);
s32 ixgbe_calc_eeprom_checksum_X550(struct ixgbe_hw *hw);
s32 ixgbe_calc_checksum_X550(struct ixgbe_hw *hw, u16 *buffer, u32 buffer_size);
s32 ixgbe_validate_eeprom_checksum_X550(struct ixgbe_hw *hw, u16 *checksum_val);
s32 ixgbe_update_flash_X550(struct ixgbe_hw *hw);
s32 ixgbe_write_ee_hostif_buffer_X550(struct ixgbe_hw *hw,
u16 offset, u16 words, u16 *data);
s32 ixgbe_write_ee_hostif_X550(struct ixgbe_hw *hw, u16 offset,
u16 data);
s32 ixgbe_read_ee_hostif_buffer_X550(struct ixgbe_hw *hw,
u16 offset, u16 words, u16 *data);
s32 ixgbe_read_ee_hostif_X550(struct ixgbe_hw *hw, u16 offset,
u16 *data);
s32 ixgbe_write_ee_hostif_data_X550(struct ixgbe_hw *hw, u16 offset,
u16 data);
void ixgbe_set_source_address_pruning_X550(struct ixgbe_hw *hw, bool enable,
unsigned int pool);
void ixgbe_set_ethertype_anti_spoofing_X550(struct ixgbe_hw *hw,
bool enable, int vf);
s32 ixgbe_write_iosf_sb_reg_x550(struct ixgbe_hw *hw, u32 reg_addr,
u32 device_type, u32 data);
s32 ixgbe_read_iosf_sb_reg_x550(struct ixgbe_hw *hw, u32 reg_addr,
u32 device_type, u32 *data);
s32 ixgbe_set_fw_drv_ver_x550(struct ixgbe_hw *hw, u8 maj, u8 min,
u8 build, u8 ver, u16 len, const char *str);
s32 ixgbe_get_phy_token(struct ixgbe_hw *);
s32 ixgbe_put_phy_token(struct ixgbe_hw *);
s32 ixgbe_write_iosf_sb_reg_x550a(struct ixgbe_hw *hw, u32 reg_addr,
u32 device_type, u32 data);
s32 ixgbe_read_iosf_sb_reg_x550a(struct ixgbe_hw *hw, u32 reg_addr,
u32 device_type, u32 *data);
void ixgbe_disable_mdd_X550(struct ixgbe_hw *hw);
void ixgbe_enable_mdd_X550(struct ixgbe_hw *hw);
void ixgbe_mdd_event_X550(struct ixgbe_hw *hw, u32 *vf_bitmap);
void ixgbe_restore_mdd_vf_X550(struct ixgbe_hw *hw, u32 vf);
enum ixgbe_media_type ixgbe_get_media_type_X550em(struct ixgbe_hw *hw);
s32 ixgbe_setup_sfp_modules_X550em(struct ixgbe_hw *hw);
s32 ixgbe_get_link_capabilities_X550em(struct ixgbe_hw *hw,
ixgbe_link_speed *speed, bool *autoneg);
void ixgbe_init_mac_link_ops_X550em(struct ixgbe_hw *hw);
s32 ixgbe_reset_hw_X550em(struct ixgbe_hw *hw);
s32 ixgbe_init_phy_ops_X550em(struct ixgbe_hw *hw);
s32 ixgbe_setup_kr_x550em(struct ixgbe_hw *hw);
s32 ixgbe_init_ext_t_x550em(struct ixgbe_hw *hw);
s32 ixgbe_setup_internal_phy_t_x550em(struct ixgbe_hw *hw);
s32 ixgbe_setup_phy_loopback_x550em(struct ixgbe_hw *hw);
u32 ixgbe_get_supported_physical_layer_X550em(struct ixgbe_hw *hw);
void ixgbe_disable_rx_x550(struct ixgbe_hw *hw);
s32 ixgbe_get_lcd_t_x550em(struct ixgbe_hw *hw, ixgbe_link_speed *lcd_speed);
s32 ixgbe_enter_lplu_t_x550em(struct ixgbe_hw *hw);
s32 ixgbe_acquire_swfw_sync_X550em(struct ixgbe_hw *hw, u32 mask);
void ixgbe_release_swfw_sync_X550em(struct ixgbe_hw *hw, u32 mask);
s32 ixgbe_setup_fc_X550em(struct ixgbe_hw *hw);
s32 ixgbe_setup_mac_link_sfp_x550em(struct ixgbe_hw *hw,
ixgbe_link_speed speed,
bool autoneg_wait_to_complete);
s32 ixgbe_setup_mac_link_sfp_x550a(struct ixgbe_hw *hw,
ixgbe_link_speed speed,
bool autoneg_wait_to_complete);
s32 ixgbe_read_phy_reg_x550a(struct ixgbe_hw *hw, u32 reg_addr,
u32 device_type, u16 *phy_data);
s32 ixgbe_write_phy_reg_x550a(struct ixgbe_hw *hw, u32 reg_addr,
u32 device_type, u16 phy_data);
s32 ixgbe_setup_fc_fiber_x550em_a(struct ixgbe_hw *hw);
s32 ixgbe_setup_fc_backplane_x550em_a(struct ixgbe_hw *hw);
s32 ixgbe_setup_fc_sgmii_x550em_a(struct ixgbe_hw *hw);
void ixgbe_fc_autoneg_fiber_x550em_a(struct ixgbe_hw *hw);
void ixgbe_fc_autoneg_backplane_x550em_a(struct ixgbe_hw *hw);
void ixgbe_fc_autoneg_sgmii_x550em_a(struct ixgbe_hw *hw);
s32 ixgbe_handle_lasi_ext_t_x550em(struct ixgbe_hw *hw);
s32 ixgbe_setup_mac_link_t_X550em(struct ixgbe_hw *hw,
ixgbe_link_speed speed,
bool autoneg_wait_to_complete);
s32 ixgbe_check_link_t_X550em(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
bool *link_up, bool link_up_wait_to_complete);
s32 ixgbe_reset_phy_t_X550em(struct ixgbe_hw *hw);
s32 ixgbe_identify_sfp_module_X550em(struct ixgbe_hw *hw);
s32 ixgbe_led_on_t_X550em(struct ixgbe_hw *hw, u32 led_idx);
s32 ixgbe_led_off_t_X550em(struct ixgbe_hw *hw, u32 led_idx);
#endif /* _IXGBE_X550_H_ */
|
vicharl/containerdns
|
kdns/core/kdns.h
|
<filename>kdns/core/kdns.h
/*
* kdns.h -- kdns(8) definitions and prototypes
*
* Copyright (c) 2001-2006, NLnet Labs.
*
* Modified Work Copyright (c) 2018 The TIGLabs Authors.
*
*/
#ifndef _NSD_H_
#define _NSD_H_
#include "dns.h"
#define MAX_CORES 64
#define DEFAULT_VIEW_NAME "no_info"
#define MAX_VIEW_NAME_LEN 32
#define VIEW_MATCH_DEF 0
#define VIEW_MATCH_NAME 1
#define VIEW_MATCH_NONE 2
// max match 128 rrs
#define VIEW_MATCH_MAX_NUM 1024
/* configuration and run-time variables */
typedef struct kdns kdns_type;
struct kdns
{
struct domain_store *db;
/*
uint16_t *compressed_domain_name_offsets ;
uint32_t compression_tablecapacity ;
uint32_t compression_table_size ;
*/
};
/* extra domain numbers for temporary domains */
#define EXTRA_DOMAIN_NUMBERS 2048000
#define EDNS_MAX_MESSAGE_LEN 4096
#define UDP_PORT "53"
#define UDP_MAX_MESSAGE_LEN 512
#define TCP_MAX_MESSAGE_LEN 65535
#endif /* _NSD_H_ */
|
vicharl/containerdns
|
kdns/dpdk-17.02/drivers/net/virtio/virtio_user/virtio_user_dev.c
|
<filename>kdns/dpdk-17.02/drivers/net/virtio/virtio_user/virtio_user_dev.c<gh_stars>100-1000
/*-
* BSD LICENSE
*
* Copyright(c) 2010-2016 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <stdint.h>
#include <stdio.h>
#include <fcntl.h>
#include <string.h>
#include <errno.h>
#include <sys/mman.h>
#include <unistd.h>
#include <sys/eventfd.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <unistd.h>
#include "vhost.h"
#include "virtio_user_dev.h"
#include "../virtio_ethdev.h"
static int
virtio_user_create_queue(struct virtio_user_dev *dev, uint32_t queue_sel)
{
/* Of all per virtqueue MSGs, make sure VHOST_SET_VRING_CALL come
* firstly because vhost depends on this msg to allocate virtqueue
* pair.
*/
int callfd;
struct vhost_vring_file file;
/* May use invalid flag, but some backend leverages kickfd and callfd as
* criteria to judge if dev is alive. so finally we use real event_fd.
*/
callfd = eventfd(0, EFD_CLOEXEC | EFD_NONBLOCK);
if (callfd < 0) {
PMD_DRV_LOG(ERR, "callfd error, %s", strerror(errno));
return -1;
}
file.index = queue_sel;
file.fd = callfd;
dev->ops->send_request(dev, VHOST_USER_SET_VRING_CALL, &file);
dev->callfds[queue_sel] = callfd;
return 0;
}
static int
virtio_user_kick_queue(struct virtio_user_dev *dev, uint32_t queue_sel)
{
int kickfd;
struct vhost_vring_file file;
struct vhost_vring_state state;
struct vring *vring = &dev->vrings[queue_sel];
struct vhost_vring_addr addr = {
.index = queue_sel,
.desc_user_addr = (uint64_t)(uintptr_t)vring->desc,
.avail_user_addr = (uint64_t)(uintptr_t)vring->avail,
.used_user_addr = (uint64_t)(uintptr_t)vring->used,
.log_guest_addr = 0,
.flags = 0, /* disable log */
};
state.index = queue_sel;
state.num = vring->num;
dev->ops->send_request(dev, VHOST_USER_SET_VRING_NUM, &state);
state.index = queue_sel;
state.num = 0; /* no reservation */
dev->ops->send_request(dev, VHOST_USER_SET_VRING_BASE, &state);
dev->ops->send_request(dev, VHOST_USER_SET_VRING_ADDR, &addr);
/* Of all per virtqueue MSGs, make sure VHOST_USER_SET_VRING_KICK comes
* lastly because vhost depends on this msg to judge if
* virtio is ready.
*/
kickfd = eventfd(0, EFD_CLOEXEC | EFD_NONBLOCK);
if (kickfd < 0) {
PMD_DRV_LOG(ERR, "kickfd error, %s", strerror(errno));
return -1;
}
file.index = queue_sel;
file.fd = kickfd;
dev->ops->send_request(dev, VHOST_USER_SET_VRING_KICK, &file);
dev->kickfds[queue_sel] = kickfd;
return 0;
}
static int
virtio_user_queue_setup(struct virtio_user_dev *dev,
int (*fn)(struct virtio_user_dev *, uint32_t))
{
uint32_t i, queue_sel;
for (i = 0; i < dev->max_queue_pairs; ++i) {
queue_sel = 2 * i + VTNET_SQ_RQ_QUEUE_IDX;
if (fn(dev, queue_sel) < 0) {
PMD_DRV_LOG(INFO, "setup rx vq fails: %u", i);
return -1;
}
}
for (i = 0; i < dev->max_queue_pairs; ++i) {
queue_sel = 2 * i + VTNET_SQ_TQ_QUEUE_IDX;
if (fn(dev, queue_sel) < 0) {
PMD_DRV_LOG(INFO, "setup tx vq fails: %u", i);
return -1;
}
}
return 0;
}
int
virtio_user_start_device(struct virtio_user_dev *dev)
{
uint64_t features;
int ret;
/* Step 0: tell vhost to create queues */
if (virtio_user_queue_setup(dev, virtio_user_create_queue) < 0)
goto error;
/* Step 1: set features */
features = dev->features;
/* Strip VIRTIO_NET_F_MAC, as MAC address is handled in vdev init */
features &= ~(1ull << VIRTIO_NET_F_MAC);
/* Strip VIRTIO_NET_F_CTRL_VQ, as devices do not really need to know */
features &= ~(1ull << VIRTIO_NET_F_CTRL_VQ);
ret = dev->ops->send_request(dev, VHOST_USER_SET_FEATURES, &features);
if (ret < 0)
goto error;
PMD_DRV_LOG(INFO, "set features: %" PRIx64, features);
/* Step 2: share memory regions */
ret = dev->ops->send_request(dev, VHOST_USER_SET_MEM_TABLE, NULL);
if (ret < 0)
goto error;
/* Step 3: kick queues */
if (virtio_user_queue_setup(dev, virtio_user_kick_queue) < 0)
goto error;
/* Step 4: enable queues
* we enable the 1st queue pair by default.
*/
dev->ops->enable_qp(dev, 0, 1);
return 0;
error:
/* TODO: free resource here or caller to check */
return -1;
}
int virtio_user_stop_device(struct virtio_user_dev *dev)
{
uint32_t i;
for (i = 0; i < dev->max_queue_pairs * 2; ++i) {
close(dev->callfds[i]);
close(dev->kickfds[i]);
}
for (i = 0; i < dev->max_queue_pairs; ++i)
dev->ops->enable_qp(dev, i, 0);
free(dev->ifname);
dev->ifname = NULL;
return 0;
}
static inline void
parse_mac(struct virtio_user_dev *dev, const char *mac)
{
int i, r;
uint32_t tmp[ETHER_ADDR_LEN];
if (!mac)
return;
r = sscanf(mac, "%x:%x:%x:%x:%x:%x", &tmp[0],
&tmp[1], &tmp[2], &tmp[3], &tmp[4], &tmp[5]);
if (r == ETHER_ADDR_LEN) {
for (i = 0; i < ETHER_ADDR_LEN; ++i)
dev->mac_addr[i] = (uint8_t)tmp[i];
dev->mac_specified = 1;
} else {
/* ignore the wrong mac, use random mac */
PMD_DRV_LOG(ERR, "wrong format of mac: %s", mac);
}
}
static int
is_vhost_user_by_type(const char *path)
{
struct stat sb;
if (stat(path, &sb) == -1)
return 0;
return S_ISSOCK(sb.st_mode);
}
static int
virtio_user_dev_setup(struct virtio_user_dev *dev)
{
uint32_t i, q;
dev->vhostfd = -1;
for (i = 0; i < VIRTIO_MAX_VIRTQUEUES; ++i) {
dev->kickfds[i] = -1;
dev->callfds[i] = -1;
}
dev->vhostfds = NULL;
dev->tapfds = NULL;
if (is_vhost_user_by_type(dev->path)) {
dev->ops = &ops_user;
} else {
dev->ops = &ops_kernel;
dev->vhostfds = malloc(dev->max_queue_pairs * sizeof(int));
dev->tapfds = malloc(dev->max_queue_pairs * sizeof(int));
if (!dev->vhostfds || !dev->tapfds) {
PMD_INIT_LOG(ERR, "Failed to malloc");
return -1;
}
for (q = 0; q < dev->max_queue_pairs; ++q) {
dev->vhostfds[q] = -1;
dev->tapfds[q] = -1;
}
}
return dev->ops->setup(dev);
}
int
virtio_user_dev_init(struct virtio_user_dev *dev, char *path, int queues,
int cq, int queue_size, const char *mac)
{
snprintf(dev->path, PATH_MAX, "%s", path);
dev->max_queue_pairs = queues;
dev->queue_pairs = 1; /* mq disabled by default */
dev->queue_size = queue_size;
dev->mac_specified = 0;
parse_mac(dev, mac);
if (virtio_user_dev_setup(dev) < 0) {
PMD_INIT_LOG(ERR, "backend set up fails");
return -1;
}
if (dev->ops->send_request(dev, VHOST_USER_SET_OWNER, NULL) < 0) {
PMD_INIT_LOG(ERR, "set_owner fails: %s", strerror(errno));
return -1;
}
if (dev->ops->send_request(dev, VHOST_USER_GET_FEATURES,
&dev->device_features) < 0) {
PMD_INIT_LOG(ERR, "get_features failed: %s", strerror(errno));
return -1;
}
if (dev->mac_specified)
dev->device_features |= (1ull << VIRTIO_NET_F_MAC);
if (cq) {
/* device does not really need to know anything about CQ,
* so if necessary, we just claim to support CQ
*/
dev->device_features |= (1ull << VIRTIO_NET_F_CTRL_VQ);
} else {
dev->device_features &= ~(1ull << VIRTIO_NET_F_CTRL_VQ);
/* Also disable features depends on VIRTIO_NET_F_CTRL_VQ */
dev->device_features &= ~(1ull << VIRTIO_NET_F_CTRL_RX);
dev->device_features &= ~(1ull << VIRTIO_NET_F_CTRL_VLAN);
dev->device_features &= ~(1ull << VIRTIO_NET_F_GUEST_ANNOUNCE);
dev->device_features &= ~(1ull << VIRTIO_NET_F_MQ);
dev->device_features &= ~(1ull << VIRTIO_NET_F_CTRL_MAC_ADDR);
}
return 0;
}
void
virtio_user_dev_uninit(struct virtio_user_dev *dev)
{
uint32_t i;
virtio_user_stop_device(dev);
close(dev->vhostfd);
if (dev->vhostfds) {
for (i = 0; i < dev->max_queue_pairs; ++i)
close(dev->vhostfds[i]);
free(dev->vhostfds);
free(dev->tapfds);
}
}
static uint8_t
virtio_user_handle_mq(struct virtio_user_dev *dev, uint16_t q_pairs)
{
uint16_t i;
uint8_t ret = 0;
if (q_pairs > dev->max_queue_pairs) {
PMD_INIT_LOG(ERR, "multi-q config %u, but only %u supported",
q_pairs, dev->max_queue_pairs);
return -1;
}
for (i = 0; i < q_pairs; ++i)
ret |= dev->ops->enable_qp(dev, i, 1);
for (i = q_pairs; i < dev->max_queue_pairs; ++i)
ret |= dev->ops->enable_qp(dev, i, 0);
dev->queue_pairs = q_pairs;
return ret;
}
static uint32_t
virtio_user_handle_ctrl_msg(struct virtio_user_dev *dev, struct vring *vring,
uint16_t idx_hdr)
{
struct virtio_net_ctrl_hdr *hdr;
virtio_net_ctrl_ack status = ~0;
uint16_t i, idx_data, idx_status;
uint32_t n_descs = 0;
/* locate desc for header, data, and status */
idx_data = vring->desc[idx_hdr].next;
n_descs++;
i = idx_data;
while (vring->desc[i].flags == VRING_DESC_F_NEXT) {
i = vring->desc[i].next;
n_descs++;
}
/* locate desc for status */
idx_status = i;
n_descs++;
hdr = (void *)(uintptr_t)vring->desc[idx_hdr].addr;
if (hdr->class == VIRTIO_NET_CTRL_MQ &&
hdr->cmd == VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET) {
uint16_t queues;
queues = *(uint16_t *)(uintptr_t)vring->desc[idx_data].addr;
status = virtio_user_handle_mq(dev, queues);
}
/* Update status */
*(virtio_net_ctrl_ack *)(uintptr_t)vring->desc[idx_status].addr = status;
return n_descs;
}
void
virtio_user_handle_cq(struct virtio_user_dev *dev, uint16_t queue_idx)
{
uint16_t avail_idx, desc_idx;
struct vring_used_elem *uep;
uint32_t n_descs;
struct vring *vring = &dev->vrings[queue_idx];
/* Consume avail ring, using used ring idx as first one */
while (vring->used->idx != vring->avail->idx) {
avail_idx = (vring->used->idx) & (vring->num - 1);
desc_idx = vring->avail->ring[avail_idx];
n_descs = virtio_user_handle_ctrl_msg(dev, vring, desc_idx);
/* Update used ring */
uep = &vring->used->ring[avail_idx];
uep->id = avail_idx;
uep->len = n_descs;
vring->used->idx++;
}
}
|
vicharl/containerdns
|
kdns/dpdk-17.02/lib/librte_cryptodev/rte_cryptodev.h
|
<filename>kdns/dpdk-17.02/lib/librte_cryptodev/rte_cryptodev.h
/*-
*
* Copyright(c) 2015-2017 Intel Corporation. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef _RTE_CRYPTODEV_H_
#define _RTE_CRYPTODEV_H_
/**
* @file rte_cryptodev.h
*
* RTE Cryptographic Device APIs
*
* Defines RTE Crypto Device APIs for the provisioning of cipher and
* authentication operations.
*/
#ifdef __cplusplus
extern "C" {
#endif
#include "rte_kvargs.h"
#include "rte_crypto.h"
#include "rte_dev.h"
#include <rte_common.h>
#define CRYPTODEV_NAME_NULL_PMD crypto_null
/**< Null crypto PMD device name */
#define CRYPTODEV_NAME_AESNI_MB_PMD crypto_aesni_mb
/**< AES-NI Multi buffer PMD device name */
#define CRYPTODEV_NAME_AESNI_GCM_PMD crypto_aesni_gcm
/**< AES-NI GCM PMD device name */
#define CRYPTODEV_NAME_OPENSSL_PMD crypto_openssl
/**< Open SSL Crypto PMD device name */
#define CRYPTODEV_NAME_QAT_SYM_PMD crypto_qat
/**< Intel QAT Symmetric Crypto PMD device name */
#define CRYPTODEV_NAME_SNOW3G_PMD crypto_snow3g
/**< SNOW 3G PMD device name */
#define CRYPTODEV_NAME_KASUMI_PMD crypto_kasumi
/**< KASUMI PMD device name */
#define CRYPTODEV_NAME_ZUC_PMD crypto_zuc
/**< KASUMI PMD device name */
#define CRYPTODEV_NAME_ARMV8_PMD crypto_armv8
/**< ARMv8 Crypto PMD device name */
#define CRYPTODEV_NAME_SCHEDULER_PMD crypto_scheduler
/**< Scheduler Crypto PMD device name */
/** Crypto device type */
enum rte_cryptodev_type {
RTE_CRYPTODEV_NULL_PMD = 1, /**< Null crypto PMD */
RTE_CRYPTODEV_AESNI_GCM_PMD, /**< AES-NI GCM PMD */
RTE_CRYPTODEV_AESNI_MB_PMD, /**< AES-NI multi buffer PMD */
RTE_CRYPTODEV_QAT_SYM_PMD, /**< QAT PMD Symmetric Crypto */
RTE_CRYPTODEV_SNOW3G_PMD, /**< SNOW 3G PMD */
RTE_CRYPTODEV_KASUMI_PMD, /**< KASUMI PMD */
RTE_CRYPTODEV_ZUC_PMD, /**< ZUC PMD */
RTE_CRYPTODEV_OPENSSL_PMD, /**< OpenSSL PMD */
RTE_CRYPTODEV_ARMV8_PMD, /**< ARMv8 crypto PMD */
RTE_CRYPTODEV_SCHEDULER_PMD, /**< Crypto Scheduler PMD */
};
extern const char **rte_cyptodev_names;
/* Logging Macros */
#define CDEV_LOG_ERR(...) \
RTE_LOG(ERR, CRYPTODEV, \
RTE_FMT("%s() line %u: " RTE_FMT_HEAD(__VA_ARGS__,) "\n", \
__func__, __LINE__, RTE_FMT_TAIL(__VA_ARGS__,)))
#define CDEV_PMD_LOG_ERR(dev, ...) \
RTE_LOG(ERR, CRYPTODEV, \
RTE_FMT("[%s] %s() line %u: " RTE_FMT_HEAD(__VA_ARGS__,) "\n", \
dev, __func__, __LINE__, RTE_FMT_TAIL(__VA_ARGS__,)))
#ifdef RTE_LIBRTE_CRYPTODEV_DEBUG
#define CDEV_LOG_DEBUG(...) \
RTE_LOG(DEBUG, CRYPTODEV, \
RTE_FMT("%s() line %u: " RTE_FMT_HEAD(__VA_ARGS__,) "\n", \
__func__, __LINE__, RTE_FMT_TAIL(__VA_ARGS__,)))
#define CDEV_PMD_TRACE(...) \
RTE_LOG(DEBUG, CRYPTODEV, \
RTE_FMT("[%s] %s: " RTE_FMT_HEAD(__VA_ARGS__,) "\n", \
dev, __func__, RTE_FMT_TAIL(__VA_ARGS__,)))
#else
#define CDEV_LOG_DEBUG(...) (void)0
#define CDEV_PMD_TRACE(...) (void)0
#endif
/**
* Crypto parameters range description
*/
struct rte_crypto_param_range {
uint16_t min; /**< minimum size */
uint16_t max; /**< maximum size */
uint16_t increment;
/**< if a range of sizes are supported,
* this parameter is used to indicate
* increments in byte size that are supported
* between the minimum and maximum
*/
};
/**
* Symmetric Crypto Capability
*/
struct rte_cryptodev_symmetric_capability {
enum rte_crypto_sym_xform_type xform_type;
/**< Transform type : Authentication / Cipher */
RTE_STD_C11
union {
struct {
enum rte_crypto_auth_algorithm algo;
/**< authentication algorithm */
uint16_t block_size;
/**< algorithm block size */
struct rte_crypto_param_range key_size;
/**< auth key size range */
struct rte_crypto_param_range digest_size;
/**< digest size range */
struct rte_crypto_param_range aad_size;
/**< Additional authentication data size range */
} auth;
/**< Symmetric Authentication transform capabilities */
struct {
enum rte_crypto_cipher_algorithm algo;
/**< cipher algorithm */
uint16_t block_size;
/**< algorithm block size */
struct rte_crypto_param_range key_size;
/**< cipher key size range */
struct rte_crypto_param_range iv_size;
/**< Initialisation vector data size range */
} cipher;
/**< Symmetric Cipher transform capabilities */
};
};
/** Structure used to capture a capability of a crypto device */
struct rte_cryptodev_capabilities {
enum rte_crypto_op_type op;
/**< Operation type */
RTE_STD_C11
union {
struct rte_cryptodev_symmetric_capability sym;
/**< Symmetric operation capability parameters */
};
};
/** Structure used to describe crypto algorithms */
struct rte_cryptodev_sym_capability_idx {
enum rte_crypto_sym_xform_type type;
union {
enum rte_crypto_cipher_algorithm cipher;
enum rte_crypto_auth_algorithm auth;
} algo;
};
/**
* Provide capabilities available for defined device and algorithm
*
* @param dev_id The identifier of the device.
* @param idx Description of crypto algorithms.
*
* @return
* - Return description of the symmetric crypto capability if exist.
* - Return NULL if the capability not exist.
*/
const struct rte_cryptodev_symmetric_capability *
rte_cryptodev_sym_capability_get(uint8_t dev_id,
const struct rte_cryptodev_sym_capability_idx *idx);
/**
* Check if key size and initial vector are supported
* in crypto cipher capability
*
* @param capability Description of the symmetric crypto capability.
* @param key_size Cipher key size.
* @param iv_size Cipher initial vector size.
*
* @return
* - Return 0 if the parameters are in range of the capability.
* - Return -1 if the parameters are out of range of the capability.
*/
int
rte_cryptodev_sym_capability_check_cipher(
const struct rte_cryptodev_symmetric_capability *capability,
uint16_t key_size, uint16_t iv_size);
/**
* Check if key size and initial vector are supported
* in crypto auth capability
*
* @param capability Description of the symmetric crypto capability.
* @param key_size Auth key size.
* @param digest_size Auth digest size.
* @param aad_size Auth aad size.
*
* @return
* - Return 0 if the parameters are in range of the capability.
* - Return -1 if the parameters are out of range of the capability.
*/
int
rte_cryptodev_sym_capability_check_auth(
const struct rte_cryptodev_symmetric_capability *capability,
uint16_t key_size, uint16_t digest_size, uint16_t aad_size);
/** Macro used at end of crypto PMD list */
#define RTE_CRYPTODEV_END_OF_CAPABILITIES_LIST() \
{ RTE_CRYPTO_OP_TYPE_UNDEFINED }
/**
* Crypto device supported feature flags
*
* Note:
* New features flags should be added to the end of the list
*
* Keep these flags synchronised with rte_cryptodev_get_feature_name()
*/
#define RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO (1ULL << 0)
/**< Symmetric crypto operations are supported */
#define RTE_CRYPTODEV_FF_ASYMMETRIC_CRYPTO (1ULL << 1)
/**< Asymmetric crypto operations are supported */
#define RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING (1ULL << 2)
/**< Chaining symmetric crypto operations are supported */
#define RTE_CRYPTODEV_FF_CPU_SSE (1ULL << 3)
/**< Utilises CPU SIMD SSE instructions */
#define RTE_CRYPTODEV_FF_CPU_AVX (1ULL << 4)
/**< Utilises CPU SIMD AVX instructions */
#define RTE_CRYPTODEV_FF_CPU_AVX2 (1ULL << 5)
/**< Utilises CPU SIMD AVX2 instructions */
#define RTE_CRYPTODEV_FF_CPU_AESNI (1ULL << 6)
/**< Utilises CPU AES-NI instructions */
#define RTE_CRYPTODEV_FF_HW_ACCELERATED (1ULL << 7)
/**< Operations are off-loaded to an external hardware accelerator */
#define RTE_CRYPTODEV_FF_CPU_AVX512 (1ULL << 8)
/**< Utilises CPU SIMD AVX512 instructions */
#define RTE_CRYPTODEV_FF_MBUF_SCATTER_GATHER (1ULL << 9)
/**< Scatter-gather mbufs are supported */
#define RTE_CRYPTODEV_FF_CPU_NEON (1ULL << 10)
/**< Utilises CPU NEON instructions */
#define RTE_CRYPTODEV_FF_CPU_ARM_CE (1ULL << 11)
/**< Utilises ARM CPU Cryptographic Extensions */
/**
* Get the name of a crypto device feature flag
*
* @param flag The mask describing the flag.
*
* @return
* The name of this flag, or NULL if it's not a valid feature flag.
*/
extern const char *
rte_cryptodev_get_feature_name(uint64_t flag);
/** Crypto device information */
struct rte_cryptodev_info {
const char *driver_name; /**< Driver name. */
enum rte_cryptodev_type dev_type; /**< Device type */
struct rte_pci_device *pci_dev; /**< PCI information. */
uint64_t feature_flags; /**< Feature flags */
const struct rte_cryptodev_capabilities *capabilities;
/**< Array of devices supported capabilities */
unsigned max_nb_queue_pairs;
/**< Maximum number of queues pairs supported by device. */
struct {
unsigned max_nb_sessions;
/**< Maximum number of sessions supported by device. */
} sym;
};
#define RTE_CRYPTODEV_DETACHED (0)
#define RTE_CRYPTODEV_ATTACHED (1)
/** Definitions of Crypto device event types */
enum rte_cryptodev_event_type {
RTE_CRYPTODEV_EVENT_UNKNOWN, /**< unknown event type */
RTE_CRYPTODEV_EVENT_ERROR, /**< error interrupt event */
RTE_CRYPTODEV_EVENT_MAX /**< max value of this enum */
};
/** Crypto device queue pair configuration structure. */
struct rte_cryptodev_qp_conf {
uint32_t nb_descriptors; /**< Number of descriptors per queue pair */
};
/**
* Typedef for application callback function to be registered by application
* software for notification of device events
*
* @param dev_id Crypto device identifier
* @param event Crypto device event to register for notification of.
* @param cb_arg User specified parameter to be passed as to passed to
* users callback function.
*/
typedef void (*rte_cryptodev_cb_fn)(uint8_t dev_id,
enum rte_cryptodev_event_type event, void *cb_arg);
/** Crypto Device statistics */
struct rte_cryptodev_stats {
uint64_t enqueued_count;
/**< Count of all operations enqueued */
uint64_t dequeued_count;
/**< Count of all operations dequeued */
uint64_t enqueue_err_count;
/**< Total error count on operations enqueued */
uint64_t dequeue_err_count;
/**< Total error count on operations dequeued */
};
#define RTE_CRYPTODEV_NAME_MAX_LEN (64)
/**< Max length of name of crypto PMD */
#define RTE_CRYPTODEV_VDEV_DEFAULT_MAX_NB_QUEUE_PAIRS 8
#define RTE_CRYPTODEV_VDEV_DEFAULT_MAX_NB_SESSIONS 2048
/**
* @internal
* Initialisation parameters for virtual crypto devices
*/
struct rte_crypto_vdev_init_params {
unsigned max_nb_queue_pairs;
unsigned max_nb_sessions;
uint8_t socket_id;
char name[RTE_CRYPTODEV_NAME_MAX_LEN];
};
/**
* Parse virtual device initialisation parameters input arguments
* @internal
*
* @params params Initialisation parameters with defaults set.
* @params input_args Command line arguments
*
* @return
* 0 on successful parse
* <0 on failure to parse
*/
int
rte_cryptodev_parse_vdev_init_params(
struct rte_crypto_vdev_init_params *params,
const char *input_args);
/**
* Create a virtual crypto device
*
* @param name Cryptodev PMD name of device to be created.
* @param args Options arguments for device.
*
* @return
* - On successful creation of the cryptodev the device index is returned,
* which will be between 0 and rte_cryptodev_count().
* - In the case of a failure, returns -1.
*/
extern int
rte_cryptodev_create_vdev(const char *name, const char *args);
/**
* Get the device identifier for the named crypto device.
*
* @param name device name to select the device structure.
*
* @return
* - Returns crypto device identifier on success.
* - Return -1 on failure to find named crypto device.
*/
extern int
rte_cryptodev_get_dev_id(const char *name);
/**
* Get the total number of crypto devices that have been successfully
* initialised.
*
* @return
* - The total number of usable crypto devices.
*/
extern uint8_t
rte_cryptodev_count(void);
/**
* Get number of crypto device defined type.
*
* @param type type of device.
*
* @return
* Returns number of crypto device.
*/
extern uint8_t
rte_cryptodev_count_devtype(enum rte_cryptodev_type type);
/**
* Get number and identifiers of attached crypto device.
*
* @param dev_name device name.
* @param devices output devices identifiers.
* @param nb_devices maximal number of devices.
*
* @return
* Returns number of attached crypto device.
*/
uint8_t
rte_cryptodev_devices_get(const char *dev_name, uint8_t *devices,
uint8_t nb_devices);
/*
* Return the NUMA socket to which a device is connected
*
* @param dev_id
* The identifier of the device
* @return
* The NUMA socket id to which the device is connected or
* a default of zero if the socket could not be determined.
* -1 if returned is the dev_id value is out of range.
*/
extern int
rte_cryptodev_socket_id(uint8_t dev_id);
/** Crypto device configuration structure */
struct rte_cryptodev_config {
int socket_id; /**< Socket to allocate resources on */
uint16_t nb_queue_pairs;
/**< Number of queue pairs to configure on device */
struct {
uint32_t nb_objs; /**< Number of objects in mempool */
uint32_t cache_size; /**< l-core object cache size */
} session_mp; /**< Session mempool configuration */
};
/**
* Configure a device.
*
* This function must be invoked first before any other function in the
* API. This function can also be re-invoked when a device is in the
* stopped state.
*
* @param dev_id The identifier of the device to configure.
* @param config The crypto device configuration structure.
*
* @return
* - 0: Success, device configured.
* - <0: Error code returned by the driver configuration function.
*/
extern int
rte_cryptodev_configure(uint8_t dev_id, struct rte_cryptodev_config *config);
/**
* Start an device.
*
* The device start step is the last one and consists of setting the configured
* offload features and in starting the transmit and the receive units of the
* device.
* On success, all basic functions exported by the API (link status,
* receive/transmit, and so on) can be invoked.
*
* @param dev_id
* The identifier of the device.
* @return
* - 0: Success, device started.
* - <0: Error code of the driver device start function.
*/
extern int
rte_cryptodev_start(uint8_t dev_id);
/**
* Stop an device. The device can be restarted with a call to
* rte_cryptodev_start()
*
* @param dev_id The identifier of the device.
*/
extern void
rte_cryptodev_stop(uint8_t dev_id);
/**
* Close an device. The device cannot be restarted!
*
* @param dev_id The identifier of the device.
*
* @return
* - 0 on successfully closing device
* - <0 on failure to close device
*/
extern int
rte_cryptodev_close(uint8_t dev_id);
/**
* Allocate and set up a receive queue pair for a device.
*
*
* @param dev_id The identifier of the device.
* @param queue_pair_id The index of the queue pairs to set up. The
* value must be in the range [0, nb_queue_pair
* - 1] previously supplied to
* rte_cryptodev_configure().
* @param qp_conf The pointer to the configuration data to be
* used for the queue pair. NULL value is
* allowed, in which case default configuration
* will be used.
* @param socket_id The *socket_id* argument is the socket
* identifier in case of NUMA. The value can be
* *SOCKET_ID_ANY* if there is no NUMA constraint
* for the DMA memory allocated for the receive
* queue pair.
*
* @return
* - 0: Success, queue pair correctly set up.
* - <0: Queue pair configuration failed
*/
extern int
rte_cryptodev_queue_pair_setup(uint8_t dev_id, uint16_t queue_pair_id,
const struct rte_cryptodev_qp_conf *qp_conf, int socket_id);
/**
* Start a specified queue pair of a device. It is used
* when deferred_start flag of the specified queue is true.
*
* @param dev_id The identifier of the device
* @param queue_pair_id The index of the queue pair to start. The value
* must be in the range [0, nb_queue_pair - 1]
* previously supplied to
* rte_crypto_dev_configure().
* @return
* - 0: Success, the transmit queue is correctly set up.
* - -EINVAL: The dev_id or the queue_id out of range.
* - -ENOTSUP: The function not supported in PMD driver.
*/
extern int
rte_cryptodev_queue_pair_start(uint8_t dev_id, uint16_t queue_pair_id);
/**
* Stop specified queue pair of a device
*
* @param dev_id The identifier of the device
* @param queue_pair_id The index of the queue pair to stop. The value
* must be in the range [0, nb_queue_pair - 1]
* previously supplied to
* rte_cryptodev_configure().
* @return
* - 0: Success, the transmit queue is correctly set up.
* - -EINVAL: The dev_id or the queue_id out of range.
* - -ENOTSUP: The function not supported in PMD driver.
*/
extern int
rte_cryptodev_queue_pair_stop(uint8_t dev_id, uint16_t queue_pair_id);
/**
* Get the number of queue pairs on a specific crypto device
*
* @param dev_id Crypto device identifier.
* @return
* - The number of configured queue pairs.
*/
extern uint16_t
rte_cryptodev_queue_pair_count(uint8_t dev_id);
/**
* Retrieve the general I/O statistics of a device.
*
* @param dev_id The identifier of the device.
* @param stats A pointer to a structure of type
* *rte_cryptodev_stats* to be filled with the
* values of device counters.
* @return
* - Zero if successful.
* - Non-zero otherwise.
*/
extern int
rte_cryptodev_stats_get(uint8_t dev_id, struct rte_cryptodev_stats *stats);
/**
* Reset the general I/O statistics of a device.
*
* @param dev_id The identifier of the device.
*/
extern void
rte_cryptodev_stats_reset(uint8_t dev_id);
/**
* Retrieve the contextual information of a device.
*
* @param dev_id The identifier of the device.
* @param dev_info A pointer to a structure of type
* *rte_cryptodev_info* to be filled with the
* contextual information of the device.
*/
extern void
rte_cryptodev_info_get(uint8_t dev_id, struct rte_cryptodev_info *dev_info);
/**
* Register a callback function for specific device id.
*
* @param dev_id Device id.
* @param event Event interested.
* @param cb_fn User supplied callback function to be called.
* @param cb_arg Pointer to the parameters for the registered
* callback.
*
* @return
* - On success, zero.
* - On failure, a negative value.
*/
extern int
rte_cryptodev_callback_register(uint8_t dev_id,
enum rte_cryptodev_event_type event,
rte_cryptodev_cb_fn cb_fn, void *cb_arg);
/**
* Unregister a callback function for specific device id.
*
* @param dev_id The device identifier.
* @param event Event interested.
* @param cb_fn User supplied callback function to be called.
* @param cb_arg Pointer to the parameters for the registered
* callback.
*
* @return
* - On success, zero.
* - On failure, a negative value.
*/
extern int
rte_cryptodev_callback_unregister(uint8_t dev_id,
enum rte_cryptodev_event_type event,
rte_cryptodev_cb_fn cb_fn, void *cb_arg);
typedef uint16_t (*dequeue_pkt_burst_t)(void *qp,
struct rte_crypto_op **ops, uint16_t nb_ops);
/**< Dequeue processed packets from queue pair of a device. */
typedef uint16_t (*enqueue_pkt_burst_t)(void *qp,
struct rte_crypto_op **ops, uint16_t nb_ops);
/**< Enqueue packets for processing on queue pair of a device. */
struct rte_cryptodev_callback;
/** Structure to keep track of registered callbacks */
TAILQ_HEAD(rte_cryptodev_cb_list, rte_cryptodev_callback);
/** The data structure associated with each crypto device. */
struct rte_cryptodev {
dequeue_pkt_burst_t dequeue_burst;
/**< Pointer to PMD receive function. */
enqueue_pkt_burst_t enqueue_burst;
/**< Pointer to PMD transmit function. */
const struct rte_cryptodev_driver *driver;
/**< Driver for this device */
struct rte_cryptodev_data *data;
/**< Pointer to device data */
struct rte_cryptodev_ops *dev_ops;
/**< Functions exported by PMD */
uint64_t feature_flags;
/**< Supported features */
struct rte_device *device;
/**< Backing device */
enum rte_cryptodev_type dev_type;
/**< Crypto device type */
struct rte_cryptodev_cb_list link_intr_cbs;
/**< User application callback for interrupts if present */
__extension__
uint8_t attached : 1;
/**< Flag indicating the device is attached */
} __rte_cache_aligned;
/**
*
* The data part, with no function pointers, associated with each device.
*
* This structure is safe to place in shared memory to be common among
* different processes in a multi-process configuration.
*/
struct rte_cryptodev_data {
uint8_t dev_id;
/**< Device ID for this instance */
uint8_t socket_id;
/**< Socket ID where memory is allocated */
char name[RTE_CRYPTODEV_NAME_MAX_LEN];
/**< Unique identifier name */
__extension__
uint8_t dev_started : 1;
/**< Device state: STARTED(1)/STOPPED(0) */
struct rte_mempool *session_pool;
/**< Session memory pool */
void **queue_pairs;
/**< Array of pointers to queue pairs. */
uint16_t nb_queue_pairs;
/**< Number of device queue pairs. */
void *dev_private;
/**< PMD-specific private data */
} __rte_cache_aligned;
extern struct rte_cryptodev *rte_cryptodevs;
/**
*
* Dequeue a burst of processed crypto operations from a queue on the crypto
* device. The dequeued operation are stored in *rte_crypto_op* structures
* whose pointers are supplied in the *ops* array.
*
* The rte_cryptodev_dequeue_burst() function returns the number of ops
* actually dequeued, which is the number of *rte_crypto_op* data structures
* effectively supplied into the *ops* array.
*
* A return value equal to *nb_ops* indicates that the queue contained
* at least *nb_ops* operations, and this is likely to signify that other
* processed operations remain in the devices output queue. Applications
* implementing a "retrieve as many processed operations as possible" policy
* can check this specific case and keep invoking the
* rte_cryptodev_dequeue_burst() function until a value less than
* *nb_ops* is returned.
*
* The rte_cryptodev_dequeue_burst() function does not provide any error
* notification to avoid the corresponding overhead.
*
* @param dev_id The symmetric crypto device identifier
* @param qp_id The index of the queue pair from which to
* retrieve processed packets. The value must be
* in the range [0, nb_queue_pair - 1] previously
* supplied to rte_cryptodev_configure().
* @param ops The address of an array of pointers to
* *rte_crypto_op* structures that must be
* large enough to store *nb_ops* pointers in it.
* @param nb_ops The maximum number of operations to dequeue.
*
* @return
* - The number of operations actually dequeued, which is the number
* of pointers to *rte_crypto_op* structures effectively supplied to the
* *ops* array.
*/
static inline uint16_t
rte_cryptodev_dequeue_burst(uint8_t dev_id, uint16_t qp_id,
struct rte_crypto_op **ops, uint16_t nb_ops)
{
struct rte_cryptodev *dev = &rte_cryptodevs[dev_id];
nb_ops = (*dev->dequeue_burst)
(dev->data->queue_pairs[qp_id], ops, nb_ops);
return nb_ops;
}
/**
* Enqueue a burst of operations for processing on a crypto device.
*
* The rte_cryptodev_enqueue_burst() function is invoked to place
* crypto operations on the queue *qp_id* of the device designated by
* its *dev_id*.
*
* The *nb_ops* parameter is the number of operations to process which are
* supplied in the *ops* array of *rte_crypto_op* structures.
*
* The rte_cryptodev_enqueue_burst() function returns the number of
* operations it actually enqueued for processing. A return value equal to
* *nb_ops* means that all packets have been enqueued.
*
* @param dev_id The identifier of the device.
* @param qp_id The index of the queue pair which packets are
* to be enqueued for processing. The value
* must be in the range [0, nb_queue_pairs - 1]
* previously supplied to
* *rte_cryptodev_configure*.
* @param ops The address of an array of *nb_ops* pointers
* to *rte_crypto_op* structures which contain
* the crypto operations to be processed.
* @param nb_ops The number of operations to process.
*
* @return
* The number of operations actually enqueued on the crypto device. The return
* value can be less than the value of the *nb_ops* parameter when the
* crypto devices queue is full or if invalid parameters are specified in
* a *rte_crypto_op*.
*/
static inline uint16_t
rte_cryptodev_enqueue_burst(uint8_t dev_id, uint16_t qp_id,
struct rte_crypto_op **ops, uint16_t nb_ops)
{
struct rte_cryptodev *dev = &rte_cryptodevs[dev_id];
return (*dev->enqueue_burst)(
dev->data->queue_pairs[qp_id], ops, nb_ops);
}
/** Cryptodev symmetric crypto session */
struct rte_cryptodev_sym_session {
RTE_STD_C11
struct {
uint8_t dev_id;
/**< Device Id */
enum rte_cryptodev_type dev_type;
/** Crypto Device type session created on */
struct rte_mempool *mp;
/**< Mempool session allocated from */
} __rte_aligned(8);
/**< Public symmetric session details */
__extension__ char _private[0];
/**< Private session material */
};
/**
* Initialise a session for symmetric cryptographic operations.
*
* This function is used by the client to initialize immutable
* parameters of symmetric cryptographic operation.
* To perform the operation the rte_cryptodev_enqueue_burst function is
* used. Each mbuf should contain a reference to the session
* pointer returned from this function contained within it's crypto_op if a
* session-based operation is being provisioned. Memory to contain the session
* information is allocated from within mempool managed by the cryptodev.
*
* The rte_cryptodev_session_free must be called to free allocated
* memory when the session is no longer required.
*
* @param dev_id The device identifier.
* @param xform Crypto transform chain.
*
* @return
* Pointer to the created session or NULL
*/
extern struct rte_cryptodev_sym_session *
rte_cryptodev_sym_session_create(uint8_t dev_id,
struct rte_crypto_sym_xform *xform);
/**
* Free the memory associated with a previously allocated session.
*
* @param dev_id The device identifier.
* @param session Session pointer previously allocated by
* *rte_cryptodev_sym_session_create*.
*
* @return
* NULL on successful freeing of session.
* Session pointer on failure to free session.
*/
extern struct rte_cryptodev_sym_session *
rte_cryptodev_sym_session_free(uint8_t dev_id,
struct rte_cryptodev_sym_session *session);
#ifdef __cplusplus
}
#endif
#endif /* _RTE_CRYPTODEV_H_ */
|
vicharl/containerdns
|
kdns/src/dns-conf.h
|
<gh_stars>100-1000
#ifndef __DNSCONF_H__
#define __DNSCONF_H__
#include <stdint.h>
#include "ctrl_msg.h"
#include "zone.h"
#define DPDK_MAX_ARG_NUM (32)
#define DPDK_MAX_ARG_LEN (128)
#define MAX_CONFIG_STR_LEN (2048)
struct config_update {
ctrl_msg cmsg;
uint32_t flags;
char del_zones[MAX_CONFIG_STR_LEN];
char add_zones[MAX_CONFIG_STR_LEN];
int fwd_mode;
int fwd_timeout;
char fwd_def_addrs[MAX_CONFIG_STR_LEN];
char fwd_zones_addrs[MAX_CONFIG_STR_LEN];
uint32_t all_per_second;
uint32_t fwd_per_second;
uint32_t client_num;
};
struct comm_config {
char log_file[MAX_CONFIG_STR_LEN];
char metrics_host[32];
char zones[MAX_CONFIG_STR_LEN];
int fwd_mode;
uint16_t fwd_threads;
uint16_t fwd_timeout;
uint32_t fwd_mbuf_num;
char fwd_def_addrs[MAX_CONFIG_STR_LEN];
char fwd_zones_addrs[MAX_CONFIG_STR_LEN];
uint16_t web_port;
int ssl_enable;
char key_pem_file[MAX_CONFIG_STR_LEN];
char cert_pem_file[MAX_CONFIG_STR_LEN];
uint32_t all_per_second;
uint32_t fwd_per_second;
uint32_t client_num;
};
struct netdev_config {
int mode; //rss: 0, other: 1
uint32_t mbuf_num;
uint16_t rxq_desc_num;
uint16_t txq_desc_num;
uint16_t rxq_num;
uint16_t txq_num;
char kni_name_prefix[32];
uint32_t kni_mbuf_num;
uint32_t kni_ip;
char kni_vip[32];
uint32_t kni_gateway;
};
struct eal_config {
int argc;
char argv[DPDK_MAX_ARG_NUM][DPDK_MAX_ARG_LEN];
};
struct dns_config {
struct eal_config eal;
struct netdev_config netdev;
struct comm_config comm;
};
extern struct dns_config *g_dns_cfg;
int dns_config_load(char *cfgfile_path, char *proc_name);
int dns_config_reload(char *cfgfile_path, char *proc_name);
#endif
|
vicharl/containerdns
|
kdns/dpdk-17.02/drivers/net/sfc/sfc_ev.c
|
<gh_stars>100-1000
/*-
* Copyright (c) 2016 Solarflare Communications Inc.
* All rights reserved.
*
* This software was jointly developed between OKTET Labs (under contract
* for Solarflare) and Solarflare Communications, Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
* THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
* OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
* OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
* EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <rte_debug.h>
#include <rte_cycles.h>
#include <rte_alarm.h>
#include <rte_branch_prediction.h>
#include "efx.h"
#include "sfc.h"
#include "sfc_debug.h"
#include "sfc_log.h"
#include "sfc_ev.h"
#include "sfc_rx.h"
#include "sfc_tx.h"
#include "sfc_kvargs.h"
/* Initial delay when waiting for event queue init complete event */
#define SFC_EVQ_INIT_BACKOFF_START_US (1)
/* Maximum delay between event queue polling attempts */
#define SFC_EVQ_INIT_BACKOFF_MAX_US (10 * 1000)
/* Event queue init approx timeout */
#define SFC_EVQ_INIT_TIMEOUT_US (2 * US_PER_S)
/* Management event queue polling period in microseconds */
#define SFC_MGMT_EV_QPOLL_PERIOD_US (US_PER_S)
static boolean_t
sfc_ev_initialized(void *arg)
{
struct sfc_evq *evq = arg;
/* Init done events may be duplicated on SFN7xxx (SFC bug 31631) */
SFC_ASSERT(evq->init_state == SFC_EVQ_STARTING ||
evq->init_state == SFC_EVQ_STARTED);
evq->init_state = SFC_EVQ_STARTED;
return B_FALSE;
}
static boolean_t
sfc_ev_rx(void *arg, __rte_unused uint32_t label, uint32_t id,
uint32_t size, uint16_t flags)
{
struct sfc_evq *evq = arg;
struct sfc_rxq *rxq;
unsigned int stop;
unsigned int pending_id;
unsigned int delta;
unsigned int i;
struct sfc_rx_sw_desc *rxd;
if (unlikely(evq->exception))
goto done;
rxq = evq->rxq;
SFC_ASSERT(rxq != NULL);
SFC_ASSERT(rxq->evq == evq);
SFC_ASSERT(rxq->state & SFC_RXQ_STARTED);
stop = (id + 1) & rxq->ptr_mask;
pending_id = rxq->pending & rxq->ptr_mask;
delta = (stop >= pending_id) ? (stop - pending_id) :
(rxq->ptr_mask + 1 - pending_id + stop);
if (delta == 0) {
/*
* Rx event with no new descriptors done and zero length
* is used to abort scattered packet when there is no room
* for the tail.
*/
if (unlikely(size != 0)) {
evq->exception = B_TRUE;
sfc_err(evq->sa,
"EVQ %u RxQ %u invalid RX abort "
"(id=%#x size=%u flags=%#x); needs restart",
evq->evq_index, sfc_rxq_sw_index(rxq),
id, size, flags);
goto done;
}
/* Add discard flag to the first fragment */
rxq->sw_desc[pending_id].flags |= EFX_DISCARD;
/* Remove continue flag from the last fragment */
rxq->sw_desc[id].flags &= ~EFX_PKT_CONT;
} else if (unlikely(delta > rxq->batch_max)) {
evq->exception = B_TRUE;
sfc_err(evq->sa,
"EVQ %u RxQ %u completion out of order "
"(id=%#x delta=%u flags=%#x); needs restart",
evq->evq_index, sfc_rxq_sw_index(rxq), id, delta,
flags);
goto done;
}
for (i = pending_id; i != stop; i = (i + 1) & rxq->ptr_mask) {
rxd = &rxq->sw_desc[i];
rxd->flags = flags;
SFC_ASSERT(size < (1 << 16));
rxd->size = (uint16_t)size;
}
rxq->pending += delta;
done:
return B_FALSE;
}
static boolean_t
sfc_ev_tx(void *arg, __rte_unused uint32_t label, uint32_t id)
{
struct sfc_evq *evq = arg;
struct sfc_txq *txq;
unsigned int stop;
unsigned int delta;
txq = evq->txq;
SFC_ASSERT(txq != NULL);
SFC_ASSERT(txq->evq == evq);
if (unlikely((txq->state & SFC_TXQ_STARTED) == 0))
goto done;
stop = (id + 1) & txq->ptr_mask;
id = txq->pending & txq->ptr_mask;
delta = (stop >= id) ? (stop - id) : (txq->ptr_mask + 1 - id + stop);
txq->pending += delta;
done:
return B_FALSE;
}
static boolean_t
sfc_ev_exception(void *arg, __rte_unused uint32_t code,
__rte_unused uint32_t data)
{
struct sfc_evq *evq = arg;
if (code == EFX_EXCEPTION_UNKNOWN_SENSOREVT)
return B_FALSE;
evq->exception = B_TRUE;
sfc_warn(evq->sa,
"hardware exception %s (code=%u, data=%#x) on EVQ %u;"
" needs recovery",
(code == EFX_EXCEPTION_RX_RECOVERY) ? "RX_RECOVERY" :
(code == EFX_EXCEPTION_RX_DSC_ERROR) ? "RX_DSC_ERROR" :
(code == EFX_EXCEPTION_TX_DSC_ERROR) ? "TX_DSC_ERROR" :
(code == EFX_EXCEPTION_FWALERT_SRAM) ? "FWALERT_SRAM" :
(code == EFX_EXCEPTION_UNKNOWN_FWALERT) ? "UNKNOWN_FWALERT" :
(code == EFX_EXCEPTION_RX_ERROR) ? "RX_ERROR" :
(code == EFX_EXCEPTION_TX_ERROR) ? "TX_ERROR" :
(code == EFX_EXCEPTION_EV_ERROR) ? "EV_ERROR" :
"UNKNOWN",
code, data, evq->evq_index);
return B_TRUE;
}
static boolean_t
sfc_ev_rxq_flush_done(void *arg, __rte_unused uint32_t rxq_hw_index)
{
struct sfc_evq *evq = arg;
struct sfc_rxq *rxq;
rxq = evq->rxq;
SFC_ASSERT(rxq != NULL);
SFC_ASSERT(rxq->hw_index == rxq_hw_index);
SFC_ASSERT(rxq->evq == evq);
sfc_rx_qflush_done(rxq);
return B_FALSE;
}
static boolean_t
sfc_ev_rxq_flush_failed(void *arg, __rte_unused uint32_t rxq_hw_index)
{
struct sfc_evq *evq = arg;
struct sfc_rxq *rxq;
rxq = evq->rxq;
SFC_ASSERT(rxq != NULL);
SFC_ASSERT(rxq->hw_index == rxq_hw_index);
SFC_ASSERT(rxq->evq == evq);
sfc_rx_qflush_failed(rxq);
return B_FALSE;
}
static boolean_t
sfc_ev_txq_flush_done(void *arg, __rte_unused uint32_t txq_hw_index)
{
struct sfc_evq *evq = arg;
struct sfc_txq *txq;
txq = evq->txq;
SFC_ASSERT(txq != NULL);
SFC_ASSERT(txq->hw_index == txq_hw_index);
SFC_ASSERT(txq->evq == evq);
sfc_tx_qflush_done(txq);
return B_FALSE;
}
static boolean_t
sfc_ev_software(void *arg, uint16_t magic)
{
struct sfc_evq *evq = arg;
sfc_err(evq->sa, "EVQ %u unexpected software event magic=%#.4x",
evq->evq_index, magic);
return B_TRUE;
}
static boolean_t
sfc_ev_sram(void *arg, uint32_t code)
{
struct sfc_evq *evq = arg;
sfc_err(evq->sa, "EVQ %u unexpected SRAM event code=%u",
evq->evq_index, code);
return B_TRUE;
}
static boolean_t
sfc_ev_wake_up(void *arg, uint32_t index)
{
struct sfc_evq *evq = arg;
sfc_err(evq->sa, "EVQ %u unexpected wake up event index=%u",
evq->evq_index, index);
return B_TRUE;
}
static boolean_t
sfc_ev_timer(void *arg, uint32_t index)
{
struct sfc_evq *evq = arg;
sfc_err(evq->sa, "EVQ %u unexpected timer event index=%u",
evq->evq_index, index);
return B_TRUE;
}
static boolean_t
sfc_ev_link_change(void *arg, efx_link_mode_t link_mode)
{
struct sfc_evq *evq = arg;
struct sfc_adapter *sa = evq->sa;
struct rte_eth_link *dev_link = &sa->eth_dev->data->dev_link;
struct rte_eth_link new_link;
uint64_t new_link_u64;
uint64_t old_link_u64;
EFX_STATIC_ASSERT(sizeof(*dev_link) == sizeof(rte_atomic64_t));
sfc_port_link_mode_to_info(link_mode, &new_link);
new_link_u64 = *(uint64_t *)&new_link;
do {
old_link_u64 = rte_atomic64_read((rte_atomic64_t *)dev_link);
if (old_link_u64 == new_link_u64)
break;
if (rte_atomic64_cmpset((volatile uint64_t *)dev_link,
old_link_u64, new_link_u64)) {
evq->sa->port.lsc_seq++;
break;
}
} while (B_TRUE);
return B_FALSE;
}
static const efx_ev_callbacks_t sfc_ev_callbacks = {
.eec_initialized = sfc_ev_initialized,
.eec_rx = sfc_ev_rx,
.eec_tx = sfc_ev_tx,
.eec_exception = sfc_ev_exception,
.eec_rxq_flush_done = sfc_ev_rxq_flush_done,
.eec_rxq_flush_failed = sfc_ev_rxq_flush_failed,
.eec_txq_flush_done = sfc_ev_txq_flush_done,
.eec_software = sfc_ev_software,
.eec_sram = sfc_ev_sram,
.eec_wake_up = sfc_ev_wake_up,
.eec_timer = sfc_ev_timer,
.eec_link_change = sfc_ev_link_change,
};
void
sfc_ev_qpoll(struct sfc_evq *evq)
{
SFC_ASSERT(evq->init_state == SFC_EVQ_STARTED ||
evq->init_state == SFC_EVQ_STARTING);
/* Synchronize the DMA memory for reading not required */
efx_ev_qpoll(evq->common, &evq->read_ptr, &sfc_ev_callbacks, evq);
if (unlikely(evq->exception) && sfc_adapter_trylock(evq->sa)) {
struct sfc_adapter *sa = evq->sa;
int rc;
if ((evq->rxq != NULL) && (evq->rxq->state & SFC_RXQ_RUNNING)) {
unsigned int rxq_sw_index = sfc_rxq_sw_index(evq->rxq);
sfc_warn(sa,
"restart RxQ %u because of exception on its EvQ %u",
rxq_sw_index, evq->evq_index);
sfc_rx_qstop(sa, rxq_sw_index);
rc = sfc_rx_qstart(sa, rxq_sw_index);
if (rc != 0)
sfc_err(sa, "cannot restart RxQ %u",
rxq_sw_index);
}
if (evq->txq != NULL) {
unsigned int txq_sw_index = sfc_txq_sw_index(evq->txq);
sfc_warn(sa,
"restart TxQ %u because of exception on its EvQ %u",
txq_sw_index, evq->evq_index);
sfc_tx_qstop(sa, txq_sw_index);
rc = sfc_tx_qstart(sa, txq_sw_index);
if (rc != 0)
sfc_err(sa, "cannot restart TxQ %u",
txq_sw_index);
}
if (evq->exception)
sfc_panic(sa, "unrecoverable exception on EvQ %u",
evq->evq_index);
sfc_adapter_unlock(sa);
}
/* Poll-mode driver does not re-prime the event queue for interrupts */
}
void
sfc_ev_mgmt_qpoll(struct sfc_adapter *sa)
{
if (rte_spinlock_trylock(&sa->mgmt_evq_lock)) {
struct sfc_evq *mgmt_evq = sa->evq_info[sa->mgmt_evq_index].evq;
if (mgmt_evq->init_state == SFC_EVQ_STARTED)
sfc_ev_qpoll(mgmt_evq);
rte_spinlock_unlock(&sa->mgmt_evq_lock);
}
}
int
sfc_ev_qprime(struct sfc_evq *evq)
{
SFC_ASSERT(evq->init_state == SFC_EVQ_STARTED);
return efx_ev_qprime(evq->common, evq->read_ptr);
}
int
sfc_ev_qstart(struct sfc_adapter *sa, unsigned int sw_index)
{
const struct sfc_evq_info *evq_info;
struct sfc_evq *evq;
efsys_mem_t *esmp;
unsigned int total_delay_us;
unsigned int delay_us;
int rc;
sfc_log_init(sa, "sw_index=%u", sw_index);
evq_info = &sa->evq_info[sw_index];
evq = evq_info->evq;
esmp = &evq->mem;
/* Clear all events */
(void)memset((void *)esmp->esm_base, 0xff,
EFX_EVQ_SIZE(evq_info->entries));
/* Create the common code event queue */
rc = efx_ev_qcreate(sa->nic, sw_index, esmp, evq_info->entries,
0 /* unused on EF10 */, 0, evq_info->flags,
&evq->common);
if (rc != 0)
goto fail_ev_qcreate;
evq->init_state = SFC_EVQ_STARTING;
/* Wait for the initialization event */
total_delay_us = 0;
delay_us = SFC_EVQ_INIT_BACKOFF_START_US;
do {
(void)sfc_ev_qpoll(evq);
/* Check to see if the initialization complete indication
* posted by the hardware.
*/
if (evq->init_state == SFC_EVQ_STARTED)
goto done;
/* Give event queue some time to init */
rte_delay_us(delay_us);
total_delay_us += delay_us;
/* Exponential backoff */
delay_us *= 2;
if (delay_us > SFC_EVQ_INIT_BACKOFF_MAX_US)
delay_us = SFC_EVQ_INIT_BACKOFF_MAX_US;
} while (total_delay_us < SFC_EVQ_INIT_TIMEOUT_US);
rc = ETIMEDOUT;
goto fail_timedout;
done:
return 0;
fail_timedout:
evq->init_state = SFC_EVQ_INITIALIZED;
efx_ev_qdestroy(evq->common);
fail_ev_qcreate:
sfc_log_init(sa, "failed %d", rc);
return rc;
}
void
sfc_ev_qstop(struct sfc_adapter *sa, unsigned int sw_index)
{
const struct sfc_evq_info *evq_info;
struct sfc_evq *evq;
sfc_log_init(sa, "sw_index=%u", sw_index);
SFC_ASSERT(sw_index < sa->evq_count);
evq_info = &sa->evq_info[sw_index];
evq = evq_info->evq;
if (evq == NULL || evq->init_state != SFC_EVQ_STARTED)
return;
evq->init_state = SFC_EVQ_INITIALIZED;
evq->read_ptr = 0;
evq->exception = B_FALSE;
efx_ev_qdestroy(evq->common);
}
static void
sfc_ev_mgmt_periodic_qpoll(void *arg)
{
struct sfc_adapter *sa = arg;
int rc;
sfc_ev_mgmt_qpoll(sa);
rc = rte_eal_alarm_set(SFC_MGMT_EV_QPOLL_PERIOD_US,
sfc_ev_mgmt_periodic_qpoll, sa);
if (rc == -ENOTSUP) {
sfc_warn(sa, "alarms are not supported");
sfc_warn(sa, "management EVQ must be polled indirectly using no-wait link status update");
} else if (rc != 0) {
sfc_err(sa,
"cannot rearm management EVQ polling alarm (rc=%d)",
rc);
}
}
static void
sfc_ev_mgmt_periodic_qpoll_start(struct sfc_adapter *sa)
{
sfc_ev_mgmt_periodic_qpoll(sa);
}
static void
sfc_ev_mgmt_periodic_qpoll_stop(struct sfc_adapter *sa)
{
rte_eal_alarm_cancel(sfc_ev_mgmt_periodic_qpoll, sa);
}
int
sfc_ev_start(struct sfc_adapter *sa)
{
int rc;
sfc_log_init(sa, "entry");
rc = efx_ev_init(sa->nic);
if (rc != 0)
goto fail_ev_init;
/* Start management EVQ used for global events */
rte_spinlock_lock(&sa->mgmt_evq_lock);
rc = sfc_ev_qstart(sa, sa->mgmt_evq_index);
if (rc != 0)
goto fail_mgmt_evq_start;
if (sa->intr.lsc_intr) {
rc = sfc_ev_qprime(sa->evq_info[sa->mgmt_evq_index].evq);
if (rc != 0)
goto fail_evq0_prime;
}
rte_spinlock_unlock(&sa->mgmt_evq_lock);
/*
* Start management EVQ polling. If interrupts are disabled
* (not used), it is required to process link status change
* and other device level events to avoid unrecoverable
* error because the event queue overflow.
*/
sfc_ev_mgmt_periodic_qpoll_start(sa);
/*
* Rx/Tx event queues are started/stopped when corresponding
* Rx/Tx queue is started/stopped.
*/
return 0;
fail_evq0_prime:
sfc_ev_qstop(sa, 0);
fail_mgmt_evq_start:
rte_spinlock_unlock(&sa->mgmt_evq_lock);
efx_ev_fini(sa->nic);
fail_ev_init:
sfc_log_init(sa, "failed %d", rc);
return rc;
}
void
sfc_ev_stop(struct sfc_adapter *sa)
{
unsigned int sw_index;
sfc_log_init(sa, "entry");
sfc_ev_mgmt_periodic_qpoll_stop(sa);
/* Make sure that all event queues are stopped */
sw_index = sa->evq_count;
while (sw_index-- > 0) {
if (sw_index == sa->mgmt_evq_index) {
/* Locks are required for the management EVQ */
rte_spinlock_lock(&sa->mgmt_evq_lock);
sfc_ev_qstop(sa, sa->mgmt_evq_index);
rte_spinlock_unlock(&sa->mgmt_evq_lock);
} else {
sfc_ev_qstop(sa, sw_index);
}
}
efx_ev_fini(sa->nic);
}
int
sfc_ev_qinit(struct sfc_adapter *sa, unsigned int sw_index,
unsigned int entries, int socket_id)
{
struct sfc_evq_info *evq_info;
struct sfc_evq *evq;
int rc;
sfc_log_init(sa, "sw_index=%u", sw_index);
evq_info = &sa->evq_info[sw_index];
SFC_ASSERT(rte_is_power_of_2(entries));
SFC_ASSERT(entries <= evq_info->max_entries);
evq_info->entries = entries;
rc = ENOMEM;
evq = rte_zmalloc_socket("sfc-evq", sizeof(*evq), RTE_CACHE_LINE_SIZE,
socket_id);
if (evq == NULL)
goto fail_evq_alloc;
evq->sa = sa;
evq->evq_index = sw_index;
/* Allocate DMA space */
rc = sfc_dma_alloc(sa, "evq", sw_index, EFX_EVQ_SIZE(evq_info->entries),
socket_id, &evq->mem);
if (rc != 0)
goto fail_dma_alloc;
evq->init_state = SFC_EVQ_INITIALIZED;
evq_info->evq = evq;
return 0;
fail_dma_alloc:
rte_free(evq);
fail_evq_alloc:
sfc_log_init(sa, "failed %d", rc);
return rc;
}
void
sfc_ev_qfini(struct sfc_adapter *sa, unsigned int sw_index)
{
struct sfc_evq *evq;
sfc_log_init(sa, "sw_index=%u", sw_index);
evq = sa->evq_info[sw_index].evq;
SFC_ASSERT(evq->init_state == SFC_EVQ_INITIALIZED);
sa->evq_info[sw_index].evq = NULL;
sfc_dma_free(sa, &evq->mem);
rte_free(evq);
}
static int
sfc_ev_qinit_info(struct sfc_adapter *sa, unsigned int sw_index)
{
struct sfc_evq_info *evq_info = &sa->evq_info[sw_index];
unsigned int max_entries;
sfc_log_init(sa, "sw_index=%u", sw_index);
max_entries = sfc_evq_max_entries(sa, sw_index);
SFC_ASSERT(rte_is_power_of_2(max_entries));
evq_info->max_entries = max_entries;
evq_info->flags = sa->evq_flags |
((sa->intr.lsc_intr && sw_index == sa->mgmt_evq_index) ?
EFX_EVQ_FLAGS_NOTIFY_INTERRUPT :
EFX_EVQ_FLAGS_NOTIFY_DISABLED);
return 0;
}
static int
sfc_kvarg_perf_profile_handler(__rte_unused const char *key,
const char *value_str, void *opaque)
{
uint64_t *value = opaque;
if (strcasecmp(value_str, SFC_KVARG_PERF_PROFILE_THROUGHPUT) == 0)
*value = EFX_EVQ_FLAGS_TYPE_THROUGHPUT;
else if (strcasecmp(value_str, SFC_KVARG_PERF_PROFILE_LOW_LATENCY) == 0)
*value = EFX_EVQ_FLAGS_TYPE_LOW_LATENCY;
else if (strcasecmp(value_str, SFC_KVARG_PERF_PROFILE_AUTO) == 0)
*value = EFX_EVQ_FLAGS_TYPE_AUTO;
else
return -EINVAL;
return 0;
}
static void
sfc_ev_qfini_info(struct sfc_adapter *sa, unsigned int sw_index)
{
sfc_log_init(sa, "sw_index=%u", sw_index);
/* Nothing to cleanup */
}
int
sfc_ev_init(struct sfc_adapter *sa)
{
int rc;
unsigned int sw_index;
sfc_log_init(sa, "entry");
sa->evq_flags = EFX_EVQ_FLAGS_TYPE_THROUGHPUT;
rc = sfc_kvargs_process(sa, SFC_KVARG_PERF_PROFILE,
sfc_kvarg_perf_profile_handler,
&sa->evq_flags);
if (rc != 0) {
sfc_err(sa, "invalid %s parameter value",
SFC_KVARG_PERF_PROFILE);
goto fail_kvarg_perf_profile;
}
sa->evq_count = sfc_ev_qcount(sa);
sa->mgmt_evq_index = 0;
rte_spinlock_init(&sa->mgmt_evq_lock);
/* Allocate EVQ info array */
rc = ENOMEM;
sa->evq_info = rte_calloc_socket("sfc-evqs", sa->evq_count,
sizeof(struct sfc_evq_info), 0,
sa->socket_id);
if (sa->evq_info == NULL)
goto fail_evqs_alloc;
for (sw_index = 0; sw_index < sa->evq_count; ++sw_index) {
rc = sfc_ev_qinit_info(sa, sw_index);
if (rc != 0)
goto fail_ev_qinit_info;
}
rc = sfc_ev_qinit(sa, sa->mgmt_evq_index, SFC_MGMT_EVQ_ENTRIES,
sa->socket_id);
if (rc != 0)
goto fail_mgmt_evq_init;
/*
* Rx/Tx event queues are created/destroyed when corresponding
* Rx/Tx queue is created/destroyed.
*/
return 0;
fail_mgmt_evq_init:
fail_ev_qinit_info:
while (sw_index-- > 0)
sfc_ev_qfini_info(sa, sw_index);
rte_free(sa->evq_info);
sa->evq_info = NULL;
fail_evqs_alloc:
sa->evq_count = 0;
fail_kvarg_perf_profile:
sfc_log_init(sa, "failed %d", rc);
return rc;
}
void
sfc_ev_fini(struct sfc_adapter *sa)
{
int sw_index;
sfc_log_init(sa, "entry");
/* Cleanup all event queues */
sw_index = sa->evq_count;
while (--sw_index >= 0) {
if (sa->evq_info[sw_index].evq != NULL)
sfc_ev_qfini(sa, sw_index);
sfc_ev_qfini_info(sa, sw_index);
}
rte_free(sa->evq_info);
sa->evq_info = NULL;
sa->evq_count = 0;
}
|
vicharl/containerdns
|
kdns/dpdk-17.02/drivers/net/sfc/sfc.c
|
<filename>kdns/dpdk-17.02/drivers/net/sfc/sfc.c
/*-
* Copyright (c) 2016 Solarflare Communications Inc.
* All rights reserved.
*
* This software was jointly developed between OKTET Labs (under contract
* for Solarflare) and Solarflare Communications, Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
* THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
* OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
* OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
* EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/* sysconf() */
#include <unistd.h>
#include <rte_errno.h>
#include "efx.h"
#include "sfc.h"
#include "sfc_log.h"
#include "sfc_ev.h"
#include "sfc_rx.h"
#include "sfc_tx.h"
int
sfc_dma_alloc(const struct sfc_adapter *sa, const char *name, uint16_t id,
size_t len, int socket_id, efsys_mem_t *esmp)
{
const struct rte_memzone *mz;
sfc_log_init(sa, "name=%s id=%u len=%lu socket_id=%d",
name, id, len, socket_id);
mz = rte_eth_dma_zone_reserve(sa->eth_dev, name, id, len,
sysconf(_SC_PAGESIZE), socket_id);
if (mz == NULL) {
sfc_err(sa, "cannot reserve DMA zone for %s:%u %#x@%d: %s",
name, (unsigned int)id, (unsigned int)len, socket_id,
rte_strerror(rte_errno));
return ENOMEM;
}
esmp->esm_addr = rte_mem_phy2mch(mz->memseg_id, mz->phys_addr);
if (esmp->esm_addr == RTE_BAD_PHYS_ADDR) {
(void)rte_memzone_free(mz);
return EFAULT;
}
esmp->esm_mz = mz;
esmp->esm_base = mz->addr;
return 0;
}
void
sfc_dma_free(const struct sfc_adapter *sa, efsys_mem_t *esmp)
{
int rc;
sfc_log_init(sa, "name=%s", esmp->esm_mz->name);
rc = rte_memzone_free(esmp->esm_mz);
if (rc != 0)
sfc_err(sa, "rte_memzone_free(() failed: %d", rc);
memset(esmp, 0, sizeof(*esmp));
}
static uint32_t
sfc_phy_cap_from_link_speeds(uint32_t speeds)
{
uint32_t phy_caps = 0;
if (~speeds & ETH_LINK_SPEED_FIXED) {
phy_caps |= (1 << EFX_PHY_CAP_AN);
/*
* If no speeds are specified in the mask, any supported
* may be negotiated
*/
if (speeds == ETH_LINK_SPEED_AUTONEG)
phy_caps |=
(1 << EFX_PHY_CAP_1000FDX) |
(1 << EFX_PHY_CAP_10000FDX) |
(1 << EFX_PHY_CAP_40000FDX);
}
if (speeds & ETH_LINK_SPEED_1G)
phy_caps |= (1 << EFX_PHY_CAP_1000FDX);
if (speeds & ETH_LINK_SPEED_10G)
phy_caps |= (1 << EFX_PHY_CAP_10000FDX);
if (speeds & ETH_LINK_SPEED_40G)
phy_caps |= (1 << EFX_PHY_CAP_40000FDX);
return phy_caps;
}
/*
* Check requested device level configuration.
* Receive and transmit configuration is checked in corresponding
* modules.
*/
static int
sfc_check_conf(struct sfc_adapter *sa)
{
const struct rte_eth_conf *conf = &sa->eth_dev->data->dev_conf;
int rc = 0;
sa->port.phy_adv_cap =
sfc_phy_cap_from_link_speeds(conf->link_speeds) &
sa->port.phy_adv_cap_mask;
if ((sa->port.phy_adv_cap & ~(1 << EFX_PHY_CAP_AN)) == 0) {
sfc_err(sa, "No link speeds from mask %#x are supported",
conf->link_speeds);
rc = EINVAL;
}
if (conf->lpbk_mode != 0) {
sfc_err(sa, "Loopback not supported");
rc = EINVAL;
}
if (conf->dcb_capability_en != 0) {
sfc_err(sa, "Priority-based flow control not supported");
rc = EINVAL;
}
if (conf->fdir_conf.mode != RTE_FDIR_MODE_NONE) {
sfc_err(sa, "Flow Director not supported");
rc = EINVAL;
}
if ((conf->intr_conf.lsc != 0) &&
(sa->intr.type != EFX_INTR_LINE) &&
(sa->intr.type != EFX_INTR_MESSAGE)) {
sfc_err(sa, "Link status change interrupt not supported");
rc = EINVAL;
}
if (conf->intr_conf.rxq != 0) {
sfc_err(sa, "Receive queue interrupt not supported");
rc = EINVAL;
}
return rc;
}
/*
* Find out maximum number of receive and transmit queues which could be
* advertised.
*
* NIC is kept initialized on success to allow other modules acquire
* defaults and capabilities.
*/
static int
sfc_estimate_resource_limits(struct sfc_adapter *sa)
{
const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic);
efx_drv_limits_t limits;
int rc;
uint32_t evq_allocated;
uint32_t rxq_allocated;
uint32_t txq_allocated;
memset(&limits, 0, sizeof(limits));
/* Request at least one Rx and Tx queue */
limits.edl_min_rxq_count = 1;
limits.edl_min_txq_count = 1;
/* Management event queue plus event queue for each Tx and Rx queue */
limits.edl_min_evq_count =
1 + limits.edl_min_rxq_count + limits.edl_min_txq_count;
/* Divide by number of functions to guarantee that all functions
* will get promised resources
*/
/* FIXME Divide by number of functions (not 2) below */
limits.edl_max_evq_count = encp->enc_evq_limit / 2;
SFC_ASSERT(limits.edl_max_evq_count >= limits.edl_min_rxq_count);
/* Split equally between receive and transmit */
limits.edl_max_rxq_count =
MIN(encp->enc_rxq_limit, (limits.edl_max_evq_count - 1) / 2);
SFC_ASSERT(limits.edl_max_rxq_count >= limits.edl_min_rxq_count);
limits.edl_max_txq_count =
MIN(encp->enc_txq_limit,
limits.edl_max_evq_count - 1 - limits.edl_max_rxq_count);
if (sa->tso)
limits.edl_max_txq_count =
MIN(limits.edl_max_txq_count,
encp->enc_fw_assisted_tso_v2_n_contexts /
encp->enc_hw_pf_count);
SFC_ASSERT(limits.edl_max_txq_count >= limits.edl_min_rxq_count);
/* Configure the minimum required resources needed for the
* driver to operate, and the maximum desired resources that the
* driver is capable of using.
*/
efx_nic_set_drv_limits(sa->nic, &limits);
sfc_log_init(sa, "init nic");
rc = efx_nic_init(sa->nic);
if (rc != 0)
goto fail_nic_init;
/* Find resource dimensions assigned by firmware to this function */
rc = efx_nic_get_vi_pool(sa->nic, &evq_allocated, &rxq_allocated,
&txq_allocated);
if (rc != 0)
goto fail_get_vi_pool;
/* It still may allocate more than maximum, ensure limit */
evq_allocated = MIN(evq_allocated, limits.edl_max_evq_count);
rxq_allocated = MIN(rxq_allocated, limits.edl_max_rxq_count);
txq_allocated = MIN(txq_allocated, limits.edl_max_txq_count);
/* Subtract management EVQ not used for traffic */
SFC_ASSERT(evq_allocated > 0);
evq_allocated--;
/* Right now we use separate EVQ for Rx and Tx */
sa->rxq_max = MIN(rxq_allocated, evq_allocated / 2);
sa->txq_max = MIN(txq_allocated, evq_allocated - sa->rxq_max);
/* Keep NIC initialized */
return 0;
fail_get_vi_pool:
fail_nic_init:
efx_nic_fini(sa->nic);
return rc;
}
static int
sfc_set_drv_limits(struct sfc_adapter *sa)
{
const struct rte_eth_dev_data *data = sa->eth_dev->data;
efx_drv_limits_t lim;
memset(&lim, 0, sizeof(lim));
/* Limits are strict since take into account initial estimation */
lim.edl_min_evq_count = lim.edl_max_evq_count =
1 + data->nb_rx_queues + data->nb_tx_queues;
lim.edl_min_rxq_count = lim.edl_max_rxq_count = data->nb_rx_queues;
lim.edl_min_txq_count = lim.edl_max_txq_count = data->nb_tx_queues;
return efx_nic_set_drv_limits(sa->nic, &lim);
}
int
sfc_start(struct sfc_adapter *sa)
{
int rc;
sfc_log_init(sa, "entry");
SFC_ASSERT(sfc_adapter_is_locked(sa));
switch (sa->state) {
case SFC_ADAPTER_CONFIGURED:
break;
case SFC_ADAPTER_STARTED:
sfc_info(sa, "already started");
return 0;
default:
rc = EINVAL;
goto fail_bad_state;
}
sa->state = SFC_ADAPTER_STARTING;
sfc_log_init(sa, "set resource limits");
rc = sfc_set_drv_limits(sa);
if (rc != 0)
goto fail_set_drv_limits;
sfc_log_init(sa, "init nic");
rc = efx_nic_init(sa->nic);
if (rc != 0)
goto fail_nic_init;
rc = sfc_intr_start(sa);
if (rc != 0)
goto fail_intr_start;
rc = sfc_ev_start(sa);
if (rc != 0)
goto fail_ev_start;
rc = sfc_port_start(sa);
if (rc != 0)
goto fail_port_start;
rc = sfc_rx_start(sa);
if (rc != 0)
goto fail_rx_start;
rc = sfc_tx_start(sa);
if (rc != 0)
goto fail_tx_start;
sa->state = SFC_ADAPTER_STARTED;
sfc_log_init(sa, "done");
return 0;
fail_tx_start:
sfc_rx_stop(sa);
fail_rx_start:
sfc_port_stop(sa);
fail_port_start:
sfc_ev_stop(sa);
fail_ev_start:
sfc_intr_stop(sa);
fail_intr_start:
efx_nic_fini(sa->nic);
fail_nic_init:
fail_set_drv_limits:
sa->state = SFC_ADAPTER_CONFIGURED;
fail_bad_state:
sfc_log_init(sa, "failed %d", rc);
return rc;
}
void
sfc_stop(struct sfc_adapter *sa)
{
sfc_log_init(sa, "entry");
SFC_ASSERT(sfc_adapter_is_locked(sa));
switch (sa->state) {
case SFC_ADAPTER_STARTED:
break;
case SFC_ADAPTER_CONFIGURED:
sfc_info(sa, "already stopped");
return;
default:
sfc_err(sa, "stop in unexpected state %u", sa->state);
SFC_ASSERT(B_FALSE);
return;
}
sa->state = SFC_ADAPTER_STOPPING;
sfc_tx_stop(sa);
sfc_rx_stop(sa);
sfc_port_stop(sa);
sfc_ev_stop(sa);
sfc_intr_stop(sa);
efx_nic_fini(sa->nic);
sa->state = SFC_ADAPTER_CONFIGURED;
sfc_log_init(sa, "done");
}
int
sfc_configure(struct sfc_adapter *sa)
{
int rc;
sfc_log_init(sa, "entry");
SFC_ASSERT(sfc_adapter_is_locked(sa));
SFC_ASSERT(sa->state == SFC_ADAPTER_INITIALIZED);
sa->state = SFC_ADAPTER_CONFIGURING;
rc = sfc_check_conf(sa);
if (rc != 0)
goto fail_check_conf;
rc = sfc_intr_init(sa);
if (rc != 0)
goto fail_intr_init;
rc = sfc_ev_init(sa);
if (rc != 0)
goto fail_ev_init;
rc = sfc_port_init(sa);
if (rc != 0)
goto fail_port_init;
rc = sfc_rx_init(sa);
if (rc != 0)
goto fail_rx_init;
rc = sfc_tx_init(sa);
if (rc != 0)
goto fail_tx_init;
sa->state = SFC_ADAPTER_CONFIGURED;
sfc_log_init(sa, "done");
return 0;
fail_tx_init:
sfc_rx_fini(sa);
fail_rx_init:
sfc_port_fini(sa);
fail_port_init:
sfc_ev_fini(sa);
fail_ev_init:
sfc_intr_fini(sa);
fail_intr_init:
fail_check_conf:
sa->state = SFC_ADAPTER_INITIALIZED;
sfc_log_init(sa, "failed %d", rc);
return rc;
}
void
sfc_close(struct sfc_adapter *sa)
{
sfc_log_init(sa, "entry");
SFC_ASSERT(sfc_adapter_is_locked(sa));
SFC_ASSERT(sa->state == SFC_ADAPTER_CONFIGURED);
sa->state = SFC_ADAPTER_CLOSING;
sfc_tx_fini(sa);
sfc_rx_fini(sa);
sfc_port_fini(sa);
sfc_ev_fini(sa);
sfc_intr_fini(sa);
sa->state = SFC_ADAPTER_INITIALIZED;
sfc_log_init(sa, "done");
}
static int
sfc_mem_bar_init(struct sfc_adapter *sa)
{
struct rte_eth_dev *eth_dev = sa->eth_dev;
struct rte_pci_device *pci_dev = SFC_DEV_TO_PCI(eth_dev);
efsys_bar_t *ebp = &sa->mem_bar;
unsigned int i;
struct rte_mem_resource *res;
for (i = 0; i < RTE_DIM(pci_dev->mem_resource); i++) {
res = &pci_dev->mem_resource[i];
if ((res->len != 0) && (res->phys_addr != 0)) {
/* Found first memory BAR */
SFC_BAR_LOCK_INIT(ebp, eth_dev->data->name);
ebp->esb_rid = i;
ebp->esb_dev = pci_dev;
ebp->esb_base = res->addr;
return 0;
}
}
return EFAULT;
}
static void
sfc_mem_bar_fini(struct sfc_adapter *sa)
{
efsys_bar_t *ebp = &sa->mem_bar;
SFC_BAR_LOCK_DESTROY(ebp);
memset(ebp, 0, sizeof(*ebp));
}
#if EFSYS_OPT_RX_SCALE
/*
* A fixed RSS key which has a property of being symmetric
* (symmetrical flows are distributed to the same CPU)
* and also known to give a uniform distribution
* (a good distribution of traffic between different CPUs)
*/
static const uint8_t default_rss_key[SFC_RSS_KEY_SIZE] = {
0x6d, 0x5a, 0x6d, 0x5a, 0x6d, 0x5a, 0x6d, 0x5a,
0x6d, 0x5a, 0x6d, 0x5a, 0x6d, 0x5a, 0x6d, 0x5a,
0x6d, 0x5a, 0x6d, 0x5a, 0x6d, 0x5a, 0x6d, 0x5a,
0x6d, 0x5a, 0x6d, 0x5a, 0x6d, 0x5a, 0x6d, 0x5a,
0x6d, 0x5a, 0x6d, 0x5a, 0x6d, 0x5a, 0x6d, 0x5a,
};
#endif
static int
sfc_set_rss_defaults(struct sfc_adapter *sa)
{
#if EFSYS_OPT_RX_SCALE
int rc;
rc = efx_intr_init(sa->nic, sa->intr.type, NULL);
if (rc != 0)
goto fail_intr_init;
rc = efx_ev_init(sa->nic);
if (rc != 0)
goto fail_ev_init;
rc = efx_rx_init(sa->nic);
if (rc != 0)
goto fail_rx_init;
rc = efx_rx_scale_support_get(sa->nic, &sa->rss_support);
if (rc != 0)
goto fail_scale_support_get;
rc = efx_rx_hash_support_get(sa->nic, &sa->hash_support);
if (rc != 0)
goto fail_hash_support_get;
efx_rx_fini(sa->nic);
efx_ev_fini(sa->nic);
efx_intr_fini(sa->nic);
sa->rss_hash_types = sfc_rte_to_efx_hash_type(SFC_RSS_OFFLOADS);
rte_memcpy(sa->rss_key, default_rss_key, sizeof(sa->rss_key));
return 0;
fail_hash_support_get:
fail_scale_support_get:
fail_rx_init:
efx_ev_fini(sa->nic);
fail_ev_init:
efx_intr_fini(sa->nic);
fail_intr_init:
return rc;
#else
return 0;
#endif
}
int
sfc_attach(struct sfc_adapter *sa)
{
struct rte_pci_device *pci_dev = SFC_DEV_TO_PCI(sa->eth_dev);
const efx_nic_cfg_t *encp;
efx_nic_t *enp;
int rc;
sfc_log_init(sa, "entry");
SFC_ASSERT(sfc_adapter_is_locked(sa));
sa->socket_id = rte_socket_id();
sfc_log_init(sa, "init mem bar");
rc = sfc_mem_bar_init(sa);
if (rc != 0)
goto fail_mem_bar_init;
sfc_log_init(sa, "get family");
rc = efx_family(pci_dev->id.vendor_id, pci_dev->id.device_id,
&sa->family);
if (rc != 0)
goto fail_family;
sfc_log_init(sa, "family is %u", sa->family);
sfc_log_init(sa, "create nic");
rte_spinlock_init(&sa->nic_lock);
rc = efx_nic_create(sa->family, (efsys_identifier_t *)sa,
&sa->mem_bar, &sa->nic_lock, &enp);
if (rc != 0)
goto fail_nic_create;
sa->nic = enp;
rc = sfc_mcdi_init(sa);
if (rc != 0)
goto fail_mcdi_init;
sfc_log_init(sa, "probe nic");
rc = efx_nic_probe(enp);
if (rc != 0)
goto fail_nic_probe;
efx_mcdi_new_epoch(enp);
sfc_log_init(sa, "reset nic");
rc = efx_nic_reset(enp);
if (rc != 0)
goto fail_nic_reset;
encp = efx_nic_cfg_get(sa->nic);
sa->tso = encp->enc_fw_assisted_tso_v2_enabled;
if (!sa->tso)
sfc_warn(sa, "TSO support isn't available on this adapter");
sfc_log_init(sa, "estimate resource limits");
rc = sfc_estimate_resource_limits(sa);
if (rc != 0)
goto fail_estimate_rsrc_limits;
sa->txq_max_entries = encp->enc_txq_max_ndescs;
SFC_ASSERT(rte_is_power_of_2(sa->txq_max_entries));
rc = sfc_intr_attach(sa);
if (rc != 0)
goto fail_intr_attach;
efx_phy_adv_cap_get(sa->nic, EFX_PHY_CAP_PERM,
&sa->port.phy_adv_cap_mask);
rc = sfc_set_rss_defaults(sa);
if (rc != 0)
goto fail_set_rss_defaults;
sfc_log_init(sa, "fini nic");
efx_nic_fini(enp);
sa->state = SFC_ADAPTER_INITIALIZED;
sfc_log_init(sa, "done");
return 0;
fail_set_rss_defaults:
sfc_intr_detach(sa);
fail_intr_attach:
efx_nic_fini(sa->nic);
fail_estimate_rsrc_limits:
fail_nic_reset:
sfc_log_init(sa, "unprobe nic");
efx_nic_unprobe(enp);
fail_nic_probe:
sfc_mcdi_fini(sa);
fail_mcdi_init:
sfc_log_init(sa, "destroy nic");
sa->nic = NULL;
efx_nic_destroy(enp);
fail_nic_create:
fail_family:
sfc_mem_bar_fini(sa);
fail_mem_bar_init:
sfc_log_init(sa, "failed %d", rc);
return rc;
}
void
sfc_detach(struct sfc_adapter *sa)
{
efx_nic_t *enp = sa->nic;
sfc_log_init(sa, "entry");
SFC_ASSERT(sfc_adapter_is_locked(sa));
sfc_intr_detach(sa);
sfc_log_init(sa, "unprobe nic");
efx_nic_unprobe(enp);
sfc_mcdi_fini(sa);
sfc_log_init(sa, "destroy nic");
sa->nic = NULL;
efx_nic_destroy(enp);
sfc_mem_bar_fini(sa);
sa->state = SFC_ADAPTER_UNINITIALIZED;
}
|
vicharl/containerdns
|
kdns/dpdk-17.02/drivers/net/qede/base/ecore_init_fw_funcs.c
|
/*
* Copyright (c) 2016 QLogic Corporation.
* All rights reserved.
* www.qlogic.com
*
* See LICENSE.qede_pmd for copyright and licensing details.
*/
#include "bcm_osal.h"
#include "ecore_hw.h"
#include "ecore_init_ops.h"
#include "reg_addr.h"
#include "ecore_rt_defs.h"
#include "ecore_hsi_common.h"
#include "ecore_hsi_init_func.h"
#include "ecore_hsi_eth.h"
#include "ecore_hsi_init_tool.h"
#include "ecore_iro.h"
#include "ecore_init_fw_funcs.h"
enum CmInterfaceEnum {
MCM_SEC,
MCM_PRI,
UCM_SEC,
UCM_PRI,
TCM_SEC,
TCM_PRI,
YCM_SEC,
YCM_PRI,
XCM_SEC,
XCM_PRI,
NUM_OF_CM_INTERFACES
};
/* general constants */
#define QM_PQ_MEM_4KB(pq_size) \
(pq_size ? DIV_ROUND_UP((pq_size + 1) * QM_PQ_ELEMENT_SIZE, 0x1000) : 0)
#define QM_PQ_SIZE_256B(pq_size) \
(pq_size ? DIV_ROUND_UP(pq_size, 0x100) - 1 : 0)
#define QM_INVALID_PQ_ID 0xffff
/* feature enable */
#define QM_BYPASS_EN 1
#define QM_BYTE_CRD_EN 1
/* other PQ constants */
#define QM_OTHER_PQS_PER_PF 4
/* WFQ constants */
#define QM_WFQ_UPPER_BOUND 62500000
#define QM_WFQ_VP_PQ_VOQ_SHIFT 0
#define QM_WFQ_VP_PQ_PF_SHIFT 5
#define QM_WFQ_INC_VAL(weight) ((weight) * 0x9000)
#define QM_WFQ_MAX_INC_VAL 43750000
/* RL constants */
#define QM_RL_UPPER_BOUND 62500000
#define QM_RL_PERIOD 5
#define QM_RL_PERIOD_CLK_25M (25 * QM_RL_PERIOD)
#define QM_RL_MAX_INC_VAL 43750000
/* RL increment value - the factor of 1.01 was added after seeing only
* 99% factor reached in a 25Gbps port with DPDK RFC 2544 test.
* In this scenario the PF RL was reducing the line rate to 99% although
* the credit increment value was the correct one and FW calculated
* correct packet sizes. The reason for the inaccuracy of the RL is
* unknown at this point.
*/
/* rate in mbps */
#define QM_RL_INC_VAL(rate) OSAL_MAX_T(u32, (u32)(((rate ? rate : 1000000) * \
QM_RL_PERIOD * 101) / (8 * 100)), 1)
/* AFullOprtnstcCrdMask constants */
#define QM_OPPOR_LINE_VOQ_DEF 1
#define QM_OPPOR_FW_STOP_DEF 0
#define QM_OPPOR_PQ_EMPTY_DEF 1
/* Command Queue constants */
#define PBF_CMDQ_PURE_LB_LINES 150
#define PBF_CMDQ_LINES_RT_OFFSET(voq) \
(PBF_REG_YCMD_QS_NUM_LINES_VOQ0_RT_OFFSET + \
voq * (PBF_REG_YCMD_QS_NUM_LINES_VOQ1_RT_OFFSET \
- PBF_REG_YCMD_QS_NUM_LINES_VOQ0_RT_OFFSET))
#define PBF_BTB_GUARANTEED_RT_OFFSET(voq) \
(PBF_REG_BTB_GUARANTEED_VOQ0_RT_OFFSET + voq * \
(PBF_REG_BTB_GUARANTEED_VOQ1_RT_OFFSET - PBF_REG_BTB_GUARANTEED_VOQ0_RT_OFFSET))
#define QM_VOQ_LINE_CRD(pbf_cmd_lines) \
((((pbf_cmd_lines) - 4) * 2) | QM_LINE_CRD_REG_SIGN_BIT)
/* BTB: blocks constants (block size = 256B) */
#define BTB_JUMBO_PKT_BLOCKS 38 /* 256B blocks in 9700B packet */
/* headroom per-port */
#define BTB_HEADROOM_BLOCKS BTB_JUMBO_PKT_BLOCKS
#define BTB_PURE_LB_FACTOR 10
#define BTB_PURE_LB_RATIO 7 /* factored (hence really 0.7) */
/* QM stop command constants */
#define QM_STOP_PQ_MASK_WIDTH 32
#define QM_STOP_CMD_ADDR 0x2
#define QM_STOP_CMD_STRUCT_SIZE 2
#define QM_STOP_CMD_PAUSE_MASK_OFFSET 0
#define QM_STOP_CMD_PAUSE_MASK_SHIFT 0
#define QM_STOP_CMD_PAUSE_MASK_MASK 0xffffffff /* @DPDK */
#define QM_STOP_CMD_GROUP_ID_OFFSET 1
#define QM_STOP_CMD_GROUP_ID_SHIFT 16
#define QM_STOP_CMD_GROUP_ID_MASK 15
#define QM_STOP_CMD_PQ_TYPE_OFFSET 1
#define QM_STOP_CMD_PQ_TYPE_SHIFT 24
#define QM_STOP_CMD_PQ_TYPE_MASK 1
#define QM_STOP_CMD_MAX_POLL_COUNT 100
#define QM_STOP_CMD_POLL_PERIOD_US 500
/* QM command macros */
#define QM_CMD_STRUCT_SIZE(cmd) cmd##_STRUCT_SIZE
#define QM_CMD_SET_FIELD(var, cmd, field, value) \
SET_FIELD(var[cmd##_##field##_OFFSET], cmd##_##field, value)
/* QM: VOQ macros */
#define PHYS_VOQ(port, tc, max_phys_tcs_per_port) \
((port) * (max_phys_tcs_per_port) + (tc))
#define LB_VOQ(port) (MAX_PHYS_VOQS + (port))
#define VOQ(port, tc, max_phys_tcs_per_port) \
((tc) < LB_TC ? PHYS_VOQ(port, tc, max_phys_tcs_per_port) : LB_VOQ(port))
/******************** INTERNAL IMPLEMENTATION *********************/
/* Prepare PF RL enable/disable runtime init values */
static void ecore_enable_pf_rl(struct ecore_hwfn *p_hwfn, bool pf_rl_en)
{
STORE_RT_REG(p_hwfn, QM_REG_RLPFENABLE_RT_OFFSET, pf_rl_en ? 1 : 0);
if (pf_rl_en) {
/* enable RLs for all VOQs */
STORE_RT_REG(p_hwfn, QM_REG_RLPFVOQENABLE_RT_OFFSET,
(1 << MAX_NUM_VOQS) - 1);
/* write RL period */
STORE_RT_REG(p_hwfn, QM_REG_RLPFPERIOD_RT_OFFSET,
QM_RL_PERIOD_CLK_25M);
STORE_RT_REG(p_hwfn, QM_REG_RLPFPERIODTIMER_RT_OFFSET,
QM_RL_PERIOD_CLK_25M);
/* set credit threshold for QM bypass flow */
if (QM_BYPASS_EN)
STORE_RT_REG(p_hwfn, QM_REG_AFULLQMBYPTHRPFRL_RT_OFFSET,
QM_RL_UPPER_BOUND);
}
}
/* Prepare PF WFQ enable/disable runtime init values */
static void ecore_enable_pf_wfq(struct ecore_hwfn *p_hwfn, bool pf_wfq_en)
{
STORE_RT_REG(p_hwfn, QM_REG_WFQPFENABLE_RT_OFFSET, pf_wfq_en ? 1 : 0);
/* set credit threshold for QM bypass flow */
if (pf_wfq_en && QM_BYPASS_EN)
STORE_RT_REG(p_hwfn, QM_REG_AFULLQMBYPTHRPFWFQ_RT_OFFSET,
QM_WFQ_UPPER_BOUND);
}
/* Prepare VPORT RL enable/disable runtime init values */
static void ecore_enable_vport_rl(struct ecore_hwfn *p_hwfn, bool vport_rl_en)
{
STORE_RT_REG(p_hwfn, QM_REG_RLGLBLENABLE_RT_OFFSET,
vport_rl_en ? 1 : 0);
if (vport_rl_en) {
/* write RL period (use timer 0 only) */
STORE_RT_REG(p_hwfn, QM_REG_RLGLBLPERIOD_0_RT_OFFSET,
QM_RL_PERIOD_CLK_25M);
STORE_RT_REG(p_hwfn, QM_REG_RLGLBLPERIODTIMER_0_RT_OFFSET,
QM_RL_PERIOD_CLK_25M);
/* set credit threshold for QM bypass flow */
if (QM_BYPASS_EN)
STORE_RT_REG(p_hwfn,
QM_REG_AFULLQMBYPTHRGLBLRL_RT_OFFSET,
QM_RL_UPPER_BOUND);
}
}
/* Prepare VPORT WFQ enable/disable runtime init values */
static void ecore_enable_vport_wfq(struct ecore_hwfn *p_hwfn, bool vport_wfq_en)
{
STORE_RT_REG(p_hwfn, QM_REG_WFQVPENABLE_RT_OFFSET,
vport_wfq_en ? 1 : 0);
/* set credit threshold for QM bypass flow */
if (vport_wfq_en && QM_BYPASS_EN)
STORE_RT_REG(p_hwfn, QM_REG_AFULLQMBYPTHRVPWFQ_RT_OFFSET,
QM_WFQ_UPPER_BOUND);
}
/* Prepare runtime init values to allocate PBF command queue lines for
* the specified VOQ
*/
static void ecore_cmdq_lines_voq_rt_init(struct ecore_hwfn *p_hwfn,
u8 voq, u16 cmdq_lines)
{
u32 qm_line_crd;
qm_line_crd = QM_VOQ_LINE_CRD(cmdq_lines);
OVERWRITE_RT_REG(p_hwfn, PBF_CMDQ_LINES_RT_OFFSET(voq),
(u32)cmdq_lines);
STORE_RT_REG(p_hwfn, QM_REG_VOQCRDLINE_RT_OFFSET + voq, qm_line_crd);
STORE_RT_REG(p_hwfn, QM_REG_VOQINITCRDLINE_RT_OFFSET + voq,
qm_line_crd);
}
/* Prepare runtime init values to allocate PBF command queue lines. */
static void ecore_cmdq_lines_rt_init(struct ecore_hwfn *p_hwfn,
u8 max_ports_per_engine,
u8 max_phys_tcs_per_port,
struct init_qm_port_params
port_params[MAX_NUM_PORTS])
{
u8 tc, voq, port_id, num_tcs_in_port;
/* clear PBF lines for all VOQs */
for (voq = 0; voq < MAX_NUM_VOQS; voq++)
STORE_RT_REG(p_hwfn, PBF_CMDQ_LINES_RT_OFFSET(voq), 0);
for (port_id = 0; port_id < max_ports_per_engine; port_id++) {
if (port_params[port_id].active) {
u16 phys_lines, phys_lines_per_tc;
/* find #lines to divide between active physical TCs */
phys_lines =
port_params[port_id].num_pbf_cmd_lines -
PBF_CMDQ_PURE_LB_LINES;
/* find #lines per active physical TC */
num_tcs_in_port = 0;
for (tc = 0; tc < NUM_OF_PHYS_TCS; tc++) {
if (((port_params[port_id].active_phys_tcs >>
tc) & 0x1) == 1)
num_tcs_in_port++;
}
phys_lines_per_tc = phys_lines / num_tcs_in_port;
/* init registers per active TC */
for (tc = 0; tc < NUM_OF_PHYS_TCS; tc++) {
if (((port_params[port_id].active_phys_tcs >>
tc) & 0x1) == 1) {
voq = PHYS_VOQ(port_id, tc,
max_phys_tcs_per_port);
ecore_cmdq_lines_voq_rt_init(p_hwfn,
voq, phys_lines_per_tc);
}
}
/* init registers for pure LB TC */
ecore_cmdq_lines_voq_rt_init(p_hwfn, LB_VOQ(port_id),
PBF_CMDQ_PURE_LB_LINES);
}
}
}
/*
* Prepare runtime init values to allocate guaranteed BTB blocks for the
* specified port. The guaranteed BTB space is divided between the TCs as
* follows (shared space Is currently not used):
* 1. Parameters:
* B BTB blocks for this port
* C Number of physical TCs for this port
* 2. Calculation:
* a. 38 blocks (9700B jumbo frame) are allocated for global per port
* headroom
* b. B = B 38 (remainder after global headroom allocation)
* c. MAX(38,B/(C+0.7)) blocks are allocated for the pure LB VOQ.
* d. B = B MAX(38, B/(C+0.7)) (remainder after pure LB allocation).
* e. B/C blocks are allocated for each physical TC.
* Assumptions:
* - MTU is up to 9700 bytes (38 blocks)
* - All TCs are considered symmetrical (same rate and packet size)
* - No optimization for lossy TC (all are considered lossless). Shared space is
* not enabled and allocated for each TC.
*/
static void ecore_btb_blocks_rt_init(struct ecore_hwfn *p_hwfn,
u8 max_ports_per_engine,
u8 max_phys_tcs_per_port,
struct init_qm_port_params
port_params[MAX_NUM_PORTS])
{
u8 tc, voq, port_id, num_tcs_in_port;
u32 usable_blocks, pure_lb_blocks, phys_blocks;
for (port_id = 0; port_id < max_ports_per_engine; port_id++) {
if (port_params[port_id].active) {
/* subtract headroom blocks */
usable_blocks =
port_params[port_id].num_btb_blocks -
BTB_HEADROOM_BLOCKS;
/* find blocks per physical TC. use factor to avoid floating arithmethic */
num_tcs_in_port = 0;
for (tc = 0; tc < NUM_OF_PHYS_TCS; tc++)
if (((port_params[port_id].active_phys_tcs >>
tc) & 0x1) == 1)
num_tcs_in_port++;
pure_lb_blocks =
(usable_blocks * BTB_PURE_LB_FACTOR) /
(num_tcs_in_port *
BTB_PURE_LB_FACTOR + BTB_PURE_LB_RATIO);
pure_lb_blocks =
OSAL_MAX_T(u32, BTB_JUMBO_PKT_BLOCKS,
pure_lb_blocks / BTB_PURE_LB_FACTOR);
phys_blocks =
(usable_blocks -
pure_lb_blocks) /
num_tcs_in_port;
/* init physical TCs */
for (tc = 0;
tc < NUM_OF_PHYS_TCS;
tc++) {
if (((port_params[port_id].active_phys_tcs >>
tc) & 0x1) == 1) {
voq = PHYS_VOQ(port_id, tc,
max_phys_tcs_per_port);
STORE_RT_REG(p_hwfn,
PBF_BTB_GUARANTEED_RT_OFFSET(voq),
phys_blocks);
}
}
/* init pure LB TC */
STORE_RT_REG(p_hwfn,
PBF_BTB_GUARANTEED_RT_OFFSET(
LB_VOQ(port_id)), pure_lb_blocks);
}
}
}
/* Prepare Tx PQ mapping runtime init values for the specified PF */
static void ecore_tx_pq_map_rt_init(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
u8 port_id,
u8 pf_id,
u8 max_phys_tcs_per_port,
bool is_first_pf,
u32 num_pf_cids,
u32 num_vf_cids,
u16 start_pq,
u16 num_pf_pqs,
u16 num_vf_pqs,
u8 start_vport,
u32 base_mem_addr_4kb,
struct init_qm_pq_params *pq_params,
struct init_qm_vport_params *vport_params)
{
u16 i, pq_id, pq_group;
u16 num_pqs = num_pf_pqs + num_vf_pqs;
u16 first_pq_group = start_pq / QM_PF_QUEUE_GROUP_SIZE;
u16 last_pq_group = (start_pq + num_pqs - 1) / QM_PF_QUEUE_GROUP_SIZE;
/* a bit per Tx PQ indicating if the PQ is associated with a VF */
u32 tx_pq_vf_mask[MAX_QM_TX_QUEUES / QM_PF_QUEUE_GROUP_SIZE] = { 0 };
u32 num_tx_pq_vf_masks = MAX_QM_TX_QUEUES / QM_PF_QUEUE_GROUP_SIZE;
u32 pq_mem_4kb = QM_PQ_MEM_4KB(num_pf_cids);
u32 vport_pq_mem_4kb = QM_PQ_MEM_4KB(num_vf_cids);
u32 mem_addr_4kb = base_mem_addr_4kb;
/* set mapping from PQ group to PF */
for (pq_group = first_pq_group; pq_group <= last_pq_group; pq_group++)
STORE_RT_REG(p_hwfn, QM_REG_PQTX2PF_0_RT_OFFSET + pq_group,
(u32)(pf_id));
/* set PQ sizes */
STORE_RT_REG(p_hwfn, QM_REG_MAXPQSIZE_0_RT_OFFSET,
QM_PQ_SIZE_256B(num_pf_cids));
STORE_RT_REG(p_hwfn, QM_REG_MAXPQSIZE_1_RT_OFFSET,
QM_PQ_SIZE_256B(num_vf_cids));
/* go over all Tx PQs */
for (i = 0, pq_id = start_pq; i < num_pqs; i++, pq_id++) {
struct qm_rf_pq_map tx_pq_map;
u8 voq =
VOQ(port_id, pq_params[i].tc_id, max_phys_tcs_per_port);
bool is_vf_pq = (i >= num_pf_pqs);
/* added to avoid compilation warning */
u32 max_qm_global_rls = MAX_QM_GLOBAL_RLS;
bool rl_valid = pq_params[i].rl_valid &&
pq_params[i].vport_id < max_qm_global_rls;
/* update first Tx PQ of VPORT/TC */
u8 vport_id_in_pf = pq_params[i].vport_id - start_vport;
u16 first_tx_pq_id =
vport_params[vport_id_in_pf].first_tx_pq_id[pq_params[i].
tc_id];
if (first_tx_pq_id == QM_INVALID_PQ_ID) {
/* create new VP PQ */
vport_params[vport_id_in_pf].
first_tx_pq_id[pq_params[i].tc_id] = pq_id;
first_tx_pq_id = pq_id;
/* map VP PQ to VOQ and PF */
STORE_RT_REG(p_hwfn,
QM_REG_WFQVPMAP_RT_OFFSET + first_tx_pq_id,
(voq << QM_WFQ_VP_PQ_VOQ_SHIFT) | (pf_id <<
QM_WFQ_VP_PQ_PF_SHIFT));
}
/* check RL ID */
if (pq_params[i].rl_valid && pq_params[i].vport_id >=
max_qm_global_rls)
DP_NOTICE(p_hwfn, true,
"Invalid VPORT ID for rate limiter config");
/* fill PQ map entry */
OSAL_MEMSET(&tx_pq_map, 0, sizeof(tx_pq_map));
SET_FIELD(tx_pq_map.reg, QM_RF_PQ_MAP_PQ_VALID, 1);
SET_FIELD(tx_pq_map.reg, QM_RF_PQ_MAP_RL_VALID,
rl_valid ? 1 : 0);
SET_FIELD(tx_pq_map.reg, QM_RF_PQ_MAP_VP_PQ_ID, first_tx_pq_id);
SET_FIELD(tx_pq_map.reg, QM_RF_PQ_MAP_RL_ID,
rl_valid ? pq_params[i].vport_id : 0);
SET_FIELD(tx_pq_map.reg, QM_RF_PQ_MAP_VOQ, voq);
SET_FIELD(tx_pq_map.reg, QM_RF_PQ_MAP_WRR_WEIGHT_GROUP,
pq_params[i].wrr_group);
/* write PQ map entry to CAM */
STORE_RT_REG(p_hwfn, QM_REG_TXPQMAP_RT_OFFSET + pq_id,
*((u32 *)&tx_pq_map));
/* set base address */
STORE_RT_REG(p_hwfn, QM_REG_BASEADDRTXPQ_RT_OFFSET + pq_id,
mem_addr_4kb);
/* check if VF PQ */
if (is_vf_pq) {
/* if PQ is associated with a VF, add indication to PQ
* VF mask
*/
tx_pq_vf_mask[pq_id / QM_PF_QUEUE_GROUP_SIZE] |=
(1 << (pq_id % QM_PF_QUEUE_GROUP_SIZE));
mem_addr_4kb += vport_pq_mem_4kb;
} else {
mem_addr_4kb += pq_mem_4kb;
}
}
/* store Tx PQ VF mask to size select register */
for (i = 0; i < num_tx_pq_vf_masks; i++) {
if (tx_pq_vf_mask[i])
STORE_RT_REG(p_hwfn, QM_REG_MAXPQSIZETXSEL_0_RT_OFFSET +
i, tx_pq_vf_mask[i]);
}
}
/* Prepare Other PQ mapping runtime init values for the specified PF */
static void ecore_other_pq_map_rt_init(struct ecore_hwfn *p_hwfn,
u8 port_id,
u8 pf_id,
u32 num_pf_cids,
u32 num_tids, u32 base_mem_addr_4kb)
{
u16 i, pq_id;
/* a single other PQ grp is used in each PF, where PQ group i is used in PF i */
u16 pq_group = pf_id;
u32 pq_size = num_pf_cids + num_tids;
u32 pq_mem_4kb = QM_PQ_MEM_4KB(pq_size);
u32 mem_addr_4kb = base_mem_addr_4kb;
/* map PQ group to PF */
STORE_RT_REG(p_hwfn, QM_REG_PQOTHER2PF_0_RT_OFFSET + pq_group,
(u32)(pf_id));
/* set PQ sizes */
STORE_RT_REG(p_hwfn, QM_REG_MAXPQSIZE_2_RT_OFFSET,
QM_PQ_SIZE_256B(pq_size));
/* set base address */
for (i = 0, pq_id = pf_id * QM_PF_QUEUE_GROUP_SIZE;
i < QM_OTHER_PQS_PER_PF; i++, pq_id++) {
STORE_RT_REG(p_hwfn, QM_REG_BASEADDROTHERPQ_RT_OFFSET + pq_id,
mem_addr_4kb);
mem_addr_4kb += pq_mem_4kb;
}
}
/* Prepare PF WFQ runtime init values for specified PF. Return -1 on error. */
static int ecore_pf_wfq_rt_init(struct ecore_hwfn *p_hwfn,
u8 port_id,
u8 pf_id,
u16 pf_wfq,
u8 max_phys_tcs_per_port,
u16 num_tx_pqs,
struct init_qm_pq_params *pq_params)
{
u16 i;
u32 inc_val;
u32 crd_reg_offset =
(pf_id <
MAX_NUM_PFS_BB ? QM_REG_WFQPFCRD_RT_OFFSET :
QM_REG_WFQPFCRD_MSB_RT_OFFSET) + (pf_id % MAX_NUM_PFS_BB);
inc_val = QM_WFQ_INC_VAL(pf_wfq);
if (inc_val == 0 || inc_val > QM_WFQ_MAX_INC_VAL) {
DP_NOTICE(p_hwfn, true, "Invalid PF WFQ weight configuration");
return -1;
}
for (i = 0; i < num_tx_pqs; i++) {
u8 voq =
VOQ(port_id, pq_params[i].tc_id, max_phys_tcs_per_port);
OVERWRITE_RT_REG(p_hwfn, crd_reg_offset + voq * MAX_NUM_PFS_BB,
(u32)QM_WFQ_CRD_REG_SIGN_BIT);
}
STORE_RT_REG(p_hwfn, QM_REG_WFQPFUPPERBOUND_RT_OFFSET + pf_id,
QM_WFQ_UPPER_BOUND | (u32)QM_WFQ_CRD_REG_SIGN_BIT);
STORE_RT_REG(p_hwfn, QM_REG_WFQPFWEIGHT_RT_OFFSET + pf_id, inc_val);
return 0;
}
/* Prepare PF RL runtime init values for specified PF. Return -1 on error. */
static int ecore_pf_rl_rt_init(struct ecore_hwfn *p_hwfn, u8 pf_id, u32 pf_rl)
{
u32 inc_val = QM_RL_INC_VAL(pf_rl);
if (inc_val > QM_RL_MAX_INC_VAL) {
DP_NOTICE(p_hwfn, true, "Invalid PF rate limit configuration");
return -1;
}
STORE_RT_REG(p_hwfn, QM_REG_RLPFCRD_RT_OFFSET + pf_id,
(u32)QM_RL_CRD_REG_SIGN_BIT);
STORE_RT_REG(p_hwfn, QM_REG_RLPFUPPERBOUND_RT_OFFSET + pf_id,
QM_RL_UPPER_BOUND | (u32)QM_RL_CRD_REG_SIGN_BIT);
STORE_RT_REG(p_hwfn, QM_REG_RLPFINCVAL_RT_OFFSET + pf_id, inc_val);
return 0;
}
/* Prepare VPORT WFQ runtime init values for the specified VPORTs. Return -1 on
* error.
*/
static int ecore_vp_wfq_rt_init(struct ecore_hwfn *p_hwfn,
u8 num_vports,
struct init_qm_vport_params *vport_params)
{
u8 tc, i;
u32 inc_val;
/* go over all PF VPORTs */
for (i = 0; i < num_vports; i++) {
if (vport_params[i].vport_wfq) {
inc_val = QM_WFQ_INC_VAL(vport_params[i].vport_wfq);
if (inc_val > QM_WFQ_MAX_INC_VAL) {
DP_NOTICE(p_hwfn, true,
"Invalid VPORT WFQ weight config");
return -1;
}
/* each VPORT can have several VPORT PQ IDs for
* different TCs
*/
for (tc = 0; tc < NUM_OF_TCS; tc++) {
u16 vport_pq_id =
vport_params[i].first_tx_pq_id[tc];
if (vport_pq_id != QM_INVALID_PQ_ID) {
STORE_RT_REG(p_hwfn,
QM_REG_WFQVPCRD_RT_OFFSET +
vport_pq_id,
(u32)QM_WFQ_CRD_REG_SIGN_BIT);
STORE_RT_REG(p_hwfn,
QM_REG_WFQVPWEIGHT_RT_OFFSET
+ vport_pq_id, inc_val);
}
}
}
}
return 0;
}
/* Prepare VPORT RL runtime init values for the specified VPORTs.
* Return -1 on error.
*/
static int ecore_vport_rl_rt_init(struct ecore_hwfn *p_hwfn,
u8 start_vport,
u8 num_vports,
struct init_qm_vport_params *vport_params)
{
u8 i, vport_id;
if (start_vport + num_vports >= MAX_QM_GLOBAL_RLS) {
DP_NOTICE(p_hwfn, true,
"Invalid VPORT ID for rate limiter configuration");
return -1;
}
/* go over all PF VPORTs */
for (i = 0, vport_id = start_vport; i < num_vports; i++, vport_id++) {
u32 inc_val = QM_RL_INC_VAL(vport_params[i].vport_rl);
if (inc_val > QM_RL_MAX_INC_VAL) {
DP_NOTICE(p_hwfn, true,
"Invalid VPORT rate-limit configuration");
return -1;
}
STORE_RT_REG(p_hwfn, QM_REG_RLGLBLCRD_RT_OFFSET + vport_id,
(u32)QM_RL_CRD_REG_SIGN_BIT);
STORE_RT_REG(p_hwfn,
QM_REG_RLGLBLUPPERBOUND_RT_OFFSET + vport_id,
QM_RL_UPPER_BOUND | (u32)QM_RL_CRD_REG_SIGN_BIT);
STORE_RT_REG(p_hwfn, QM_REG_RLGLBLINCVAL_RT_OFFSET + vport_id,
inc_val);
}
return 0;
}
static bool ecore_poll_on_qm_cmd_ready(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt)
{
u32 reg_val, i;
for (i = 0, reg_val = 0; i < QM_STOP_CMD_MAX_POLL_COUNT && reg_val == 0;
i++) {
OSAL_UDELAY(QM_STOP_CMD_POLL_PERIOD_US);
reg_val = ecore_rd(p_hwfn, p_ptt, QM_REG_SDMCMDREADY);
}
/* check if timeout while waiting for SDM command ready */
if (i == QM_STOP_CMD_MAX_POLL_COUNT) {
DP_VERBOSE(p_hwfn, ECORE_MSG_DEBUG,
"Timeout waiting for QM SDM cmd ready signal\n");
return false;
}
return true;
}
static bool ecore_send_qm_cmd(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
u32 cmd_addr, u32 cmd_data_lsb, u32 cmd_data_msb)
{
if (!ecore_poll_on_qm_cmd_ready(p_hwfn, p_ptt))
return false;
ecore_wr(p_hwfn, p_ptt, QM_REG_SDMCMDADDR, cmd_addr);
ecore_wr(p_hwfn, p_ptt, QM_REG_SDMCMDDATALSB, cmd_data_lsb);
ecore_wr(p_hwfn, p_ptt, QM_REG_SDMCMDDATAMSB, cmd_data_msb);
ecore_wr(p_hwfn, p_ptt, QM_REG_SDMCMDGO, 1);
ecore_wr(p_hwfn, p_ptt, QM_REG_SDMCMDGO, 0);
return ecore_poll_on_qm_cmd_ready(p_hwfn, p_ptt);
}
/******************** INTERFACE IMPLEMENTATION *********************/
u32 ecore_qm_pf_mem_size(u8 pf_id,
u32 num_pf_cids,
u32 num_vf_cids,
u32 num_tids, u16 num_pf_pqs, u16 num_vf_pqs)
{
return QM_PQ_MEM_4KB(num_pf_cids) * num_pf_pqs +
QM_PQ_MEM_4KB(num_vf_cids) * num_vf_pqs +
QM_PQ_MEM_4KB(num_pf_cids + num_tids) * QM_OTHER_PQS_PER_PF;
}
int ecore_qm_common_rt_init(struct ecore_hwfn *p_hwfn,
u8 max_ports_per_engine,
u8 max_phys_tcs_per_port,
bool pf_rl_en,
bool pf_wfq_en,
bool vport_rl_en,
bool vport_wfq_en,
struct init_qm_port_params
port_params[MAX_NUM_PORTS])
{
/* init AFullOprtnstcCrdMask */
u32 mask =
(QM_OPPOR_LINE_VOQ_DEF << QM_RF_OPPORTUNISTIC_MASK_LINEVOQ_SHIFT) |
(QM_BYTE_CRD_EN << QM_RF_OPPORTUNISTIC_MASK_BYTEVOQ_SHIFT) |
(pf_wfq_en << QM_RF_OPPORTUNISTIC_MASK_PFWFQ_SHIFT) |
(vport_wfq_en << QM_RF_OPPORTUNISTIC_MASK_VPWFQ_SHIFT) |
(pf_rl_en << QM_RF_OPPORTUNISTIC_MASK_PFRL_SHIFT) |
(vport_rl_en << QM_RF_OPPORTUNISTIC_MASK_VPQCNRL_SHIFT) |
(QM_OPPOR_FW_STOP_DEF << QM_RF_OPPORTUNISTIC_MASK_FWPAUSE_SHIFT) |
(QM_OPPOR_PQ_EMPTY_DEF <<
QM_RF_OPPORTUNISTIC_MASK_QUEUEEMPTY_SHIFT);
STORE_RT_REG(p_hwfn, QM_REG_AFULLOPRTNSTCCRDMASK_RT_OFFSET, mask);
/* enable/disable PF RL */
ecore_enable_pf_rl(p_hwfn, pf_rl_en);
/* enable/disable PF WFQ */
ecore_enable_pf_wfq(p_hwfn, pf_wfq_en);
/* enable/disable VPORT RL */
ecore_enable_vport_rl(p_hwfn, vport_rl_en);
/* enable/disable VPORT WFQ */
ecore_enable_vport_wfq(p_hwfn, vport_wfq_en);
/* init PBF CMDQ line credit */
ecore_cmdq_lines_rt_init(p_hwfn, max_ports_per_engine,
max_phys_tcs_per_port, port_params);
/* init BTB blocks in PBF */
ecore_btb_blocks_rt_init(p_hwfn, max_ports_per_engine,
max_phys_tcs_per_port, port_params);
return 0;
}
int ecore_qm_pf_rt_init(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
u8 port_id,
u8 pf_id,
u8 max_phys_tcs_per_port,
bool is_first_pf,
u32 num_pf_cids,
u32 num_vf_cids,
u32 num_tids,
u16 start_pq,
u16 num_pf_pqs,
u16 num_vf_pqs,
u8 start_vport,
u8 num_vports,
u16 pf_wfq,
u32 pf_rl,
struct init_qm_pq_params *pq_params,
struct init_qm_vport_params *vport_params)
{
u8 tc, i;
u32 other_mem_size_4kb =
QM_PQ_MEM_4KB(num_pf_cids + num_tids) * QM_OTHER_PQS_PER_PF;
/* clear first Tx PQ ID array for each VPORT */
for (i = 0; i < num_vports; i++)
for (tc = 0; tc < NUM_OF_TCS; tc++)
vport_params[i].first_tx_pq_id[tc] = QM_INVALID_PQ_ID;
/* map Other PQs (if any) */
#if QM_OTHER_PQS_PER_PF > 0
ecore_other_pq_map_rt_init(p_hwfn, port_id, pf_id, num_pf_cids,
num_tids, 0);
#endif
/* map Tx PQs */
ecore_tx_pq_map_rt_init(p_hwfn, p_ptt, port_id, pf_id,
max_phys_tcs_per_port, is_first_pf, num_pf_cids,
num_vf_cids, start_pq, num_pf_pqs, num_vf_pqs,
start_vport, other_mem_size_4kb, pq_params,
vport_params);
/* init PF WFQ */
if (pf_wfq)
if (ecore_pf_wfq_rt_init
(p_hwfn, port_id, pf_id, pf_wfq, max_phys_tcs_per_port,
num_pf_pqs + num_vf_pqs, pq_params) != 0)
return -1;
/* init PF RL */
if (ecore_pf_rl_rt_init(p_hwfn, pf_id, pf_rl) != 0)
return -1;
/* set VPORT WFQ */
if (ecore_vp_wfq_rt_init(p_hwfn, num_vports, vport_params) != 0)
return -1;
/* set VPORT RL */
if (ecore_vport_rl_rt_init
(p_hwfn, start_vport, num_vports, vport_params) != 0)
return -1;
return 0;
}
int ecore_init_pf_wfq(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt, u8 pf_id, u16 pf_wfq)
{
u32 inc_val = QM_WFQ_INC_VAL(pf_wfq);
if (inc_val == 0 || inc_val > QM_WFQ_MAX_INC_VAL) {
DP_NOTICE(p_hwfn, true, "Invalid PF WFQ weight configuration");
return -1;
}
ecore_wr(p_hwfn, p_ptt, QM_REG_WFQPFWEIGHT + pf_id * 4, inc_val);
return 0;
}
int ecore_init_pf_rl(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt, u8 pf_id, u32 pf_rl)
{
u32 inc_val = QM_RL_INC_VAL(pf_rl);
if (inc_val > QM_RL_MAX_INC_VAL) {
DP_NOTICE(p_hwfn, true, "Invalid PF rate limit configuration");
return -1;
}
ecore_wr(p_hwfn, p_ptt, QM_REG_RLPFCRD + pf_id * 4,
(u32)QM_RL_CRD_REG_SIGN_BIT);
ecore_wr(p_hwfn, p_ptt, QM_REG_RLPFINCVAL + pf_id * 4, inc_val);
return 0;
}
int ecore_init_vport_wfq(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
u16 first_tx_pq_id[NUM_OF_TCS], u16 vport_wfq)
{
u8 tc;
u32 inc_val = QM_WFQ_INC_VAL(vport_wfq);
if (inc_val == 0 || inc_val > QM_WFQ_MAX_INC_VAL) {
DP_NOTICE(p_hwfn, true,
"Invalid VPORT WFQ weight configuration");
return -1;
}
for (tc = 0; tc < NUM_OF_TCS; tc++) {
u16 vport_pq_id = first_tx_pq_id[tc];
if (vport_pq_id != QM_INVALID_PQ_ID) {
ecore_wr(p_hwfn, p_ptt,
QM_REG_WFQVPWEIGHT + vport_pq_id * 4, inc_val);
}
}
return 0;
}
int ecore_init_vport_rl(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt, u8 vport_id, u32 vport_rl)
{
u32 inc_val, max_qm_global_rls = MAX_QM_GLOBAL_RLS;
if (vport_id >= max_qm_global_rls) {
DP_NOTICE(p_hwfn, true,
"Invalid VPORT ID for rate limiter configuration");
return -1;
}
inc_val = QM_RL_INC_VAL(vport_rl);
if (inc_val > QM_RL_MAX_INC_VAL) {
DP_NOTICE(p_hwfn, true,
"Invalid VPORT rate-limit configuration");
return -1;
}
ecore_wr(p_hwfn, p_ptt, QM_REG_RLGLBLCRD + vport_id * 4,
(u32)QM_RL_CRD_REG_SIGN_BIT);
ecore_wr(p_hwfn, p_ptt, QM_REG_RLGLBLINCVAL + vport_id * 4, inc_val);
return 0;
}
bool ecore_send_qm_stop_cmd(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
bool is_release_cmd,
bool is_tx_pq, u16 start_pq, u16 num_pqs)
{
u32 cmd_arr[QM_CMD_STRUCT_SIZE(QM_STOP_CMD)] = { 0 };
u32 pq_mask = 0, last_pq = start_pq + num_pqs - 1, pq_id;
/* set command's PQ type */
QM_CMD_SET_FIELD(cmd_arr, QM_STOP_CMD, PQ_TYPE, is_tx_pq ? 0 : 1);
/* go over requested PQs */
for (pq_id = start_pq; pq_id <= last_pq; pq_id++) {
/* set PQ bit in mask (stop command only) */
if (!is_release_cmd)
pq_mask |= (1 << (pq_id % QM_STOP_PQ_MASK_WIDTH));
/* if last PQ or end of PQ mask, write command */
if ((pq_id == last_pq) ||
(pq_id % QM_STOP_PQ_MASK_WIDTH ==
(QM_STOP_PQ_MASK_WIDTH - 1))) {
QM_CMD_SET_FIELD(cmd_arr, QM_STOP_CMD, PAUSE_MASK,
pq_mask);
QM_CMD_SET_FIELD(cmd_arr, QM_STOP_CMD, GROUP_ID,
pq_id / QM_STOP_PQ_MASK_WIDTH);
if (!ecore_send_qm_cmd
(p_hwfn, p_ptt, QM_STOP_CMD_ADDR, cmd_arr[0],
cmd_arr[1]))
return false;
pq_mask = 0;
}
}
return true;
}
/* NIG: ETS configuration constants */
#define NIG_TX_ETS_CLIENT_OFFSET 4
#define NIG_LB_ETS_CLIENT_OFFSET 1
#define NIG_ETS_MIN_WFQ_BYTES 1600
/* NIG: ETS constants */
#define NIG_ETS_UP_BOUND(weight, mtu) \
(2 * ((weight) > (mtu) ? (weight) : (mtu)))
/* NIG: RL constants */
#define NIG_RL_BASE_TYPE 1 /* byte base type */
#define NIG_RL_PERIOD 1 /* in us */
#define NIG_RL_PERIOD_CLK_25M (25 * NIG_RL_PERIOD)
#define NIG_RL_INC_VAL(rate) (((rate) * NIG_RL_PERIOD) / 8)
#define NIG_RL_MAX_VAL(inc_val, mtu) \
(2 * ((inc_val) > (mtu) ? (inc_val) : (mtu)))
/* NIG: packet prioritry configuration constants */
#define NIG_PRIORITY_MAP_TC_BITS 4
void ecore_init_nig_ets(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
struct init_ets_req *req, bool is_lb)
{
u8 tc, sp_tc_map = 0, wfq_tc_map = 0;
u8 num_tc = is_lb ? NUM_OF_TCS : NUM_OF_PHYS_TCS;
u8 tc_client_offset =
is_lb ? NIG_LB_ETS_CLIENT_OFFSET : NIG_TX_ETS_CLIENT_OFFSET;
u32 min_weight = 0xffffffff;
u32 tc_weight_base_addr =
is_lb ? NIG_REG_LB_ARB_CREDIT_WEIGHT_0 :
NIG_REG_TX_ARB_CREDIT_WEIGHT_0;
u32 tc_weight_addr_diff =
is_lb ? NIG_REG_LB_ARB_CREDIT_WEIGHT_1 -
NIG_REG_LB_ARB_CREDIT_WEIGHT_0 : NIG_REG_TX_ARB_CREDIT_WEIGHT_1 -
NIG_REG_TX_ARB_CREDIT_WEIGHT_0;
u32 tc_bound_base_addr =
is_lb ? NIG_REG_LB_ARB_CREDIT_UPPER_BOUND_0 :
NIG_REG_TX_ARB_CREDIT_UPPER_BOUND_0;
u32 tc_bound_addr_diff =
is_lb ? NIG_REG_LB_ARB_CREDIT_UPPER_BOUND_1 -
NIG_REG_LB_ARB_CREDIT_UPPER_BOUND_0 :
NIG_REG_TX_ARB_CREDIT_UPPER_BOUND_1 -
NIG_REG_TX_ARB_CREDIT_UPPER_BOUND_0;
for (tc = 0; tc < num_tc; tc++) {
struct init_ets_tc_req *tc_req = &req->tc_req[tc];
/* update SP map */
if (tc_req->use_sp)
sp_tc_map |= (1 << tc);
if (tc_req->use_wfq) {
/* update WFQ map */
wfq_tc_map |= (1 << tc);
/* find minimal weight */
if (tc_req->weight < min_weight)
min_weight = tc_req->weight;
}
}
/* write SP map */
ecore_wr(p_hwfn, p_ptt,
is_lb ? NIG_REG_LB_ARB_CLIENT_IS_STRICT :
NIG_REG_TX_ARB_CLIENT_IS_STRICT,
(sp_tc_map << tc_client_offset));
/* write WFQ map */
ecore_wr(p_hwfn, p_ptt,
is_lb ? NIG_REG_LB_ARB_CLIENT_IS_SUBJECT2WFQ :
NIG_REG_TX_ARB_CLIENT_IS_SUBJECT2WFQ,
(wfq_tc_map << tc_client_offset));
/* write WFQ weights */
for (tc = 0; tc < num_tc; tc++, tc_client_offset++) {
struct init_ets_tc_req *tc_req = &req->tc_req[tc];
if (tc_req->use_wfq) {
/* translate weight to bytes */
u32 byte_weight =
(NIG_ETS_MIN_WFQ_BYTES * tc_req->weight) /
min_weight;
/* write WFQ weight */
ecore_wr(p_hwfn, p_ptt,
tc_weight_base_addr +
tc_weight_addr_diff * tc_client_offset,
byte_weight);
/* write WFQ upper bound */
ecore_wr(p_hwfn, p_ptt,
tc_bound_base_addr +
tc_bound_addr_diff * tc_client_offset,
NIG_ETS_UP_BOUND(byte_weight, req->mtu));
}
}
}
void ecore_init_nig_lb_rl(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
struct init_nig_lb_rl_req *req)
{
u8 tc;
u32 ctrl, inc_val, reg_offset;
/* disable global MAC+LB RL */
ctrl =
NIG_RL_BASE_TYPE <<
NIG_REG_TX_LB_GLBRATELIMIT_CTRL_TX_LB_GLBRATELIMIT_BASE_TYPE_SHIFT;
ecore_wr(p_hwfn, p_ptt, NIG_REG_TX_LB_GLBRATELIMIT_CTRL, ctrl);
/* configure and enable global MAC+LB RL */
if (req->lb_mac_rate) {
/* configure */
ecore_wr(p_hwfn, p_ptt, NIG_REG_TX_LB_GLBRATELIMIT_INC_PERIOD,
NIG_RL_PERIOD_CLK_25M);
inc_val = NIG_RL_INC_VAL(req->lb_mac_rate);
ecore_wr(p_hwfn, p_ptt, NIG_REG_TX_LB_GLBRATELIMIT_INC_VALUE,
inc_val);
ecore_wr(p_hwfn, p_ptt, NIG_REG_TX_LB_GLBRATELIMIT_MAX_VALUE,
NIG_RL_MAX_VAL(inc_val, req->mtu));
/* enable */
ctrl |=
1 <<
NIG_REG_TX_LB_GLBRATELIMIT_CTRL_TX_LB_GLBRATELIMIT_EN_SHIFT;
ecore_wr(p_hwfn, p_ptt, NIG_REG_TX_LB_GLBRATELIMIT_CTRL, ctrl);
}
/* disable global LB-only RL */
ctrl =
NIG_RL_BASE_TYPE <<
NIG_REG_LB_BRBRATELIMIT_CTRL_LB_BRBRATELIMIT_BASE_TYPE_SHIFT;
ecore_wr(p_hwfn, p_ptt, NIG_REG_LB_BRBRATELIMIT_CTRL, ctrl);
/* configure and enable global LB-only RL */
if (req->lb_rate) {
/* configure */
ecore_wr(p_hwfn, p_ptt, NIG_REG_LB_BRBRATELIMIT_INC_PERIOD,
NIG_RL_PERIOD_CLK_25M);
inc_val = NIG_RL_INC_VAL(req->lb_rate);
ecore_wr(p_hwfn, p_ptt, NIG_REG_LB_BRBRATELIMIT_INC_VALUE,
inc_val);
ecore_wr(p_hwfn, p_ptt, NIG_REG_LB_BRBRATELIMIT_MAX_VALUE,
NIG_RL_MAX_VAL(inc_val, req->mtu));
/* enable */
ctrl |=
1 << NIG_REG_LB_BRBRATELIMIT_CTRL_LB_BRBRATELIMIT_EN_SHIFT;
ecore_wr(p_hwfn, p_ptt, NIG_REG_LB_BRBRATELIMIT_CTRL, ctrl);
}
/* per-TC RLs */
for (tc = 0, reg_offset = 0; tc < NUM_OF_PHYS_TCS;
tc++, reg_offset += 4) {
/* disable TC RL */
ctrl =
NIG_RL_BASE_TYPE <<
NIG_REG_LB_TCRATELIMIT_CTRL_0_LB_TCRATELIMIT_BASE_TYPE_0_SHIFT;
ecore_wr(p_hwfn, p_ptt,
NIG_REG_LB_TCRATELIMIT_CTRL_0 + reg_offset, ctrl);
/* configure and enable TC RL */
if (req->tc_rate[tc]) {
/* configure */
ecore_wr(p_hwfn, p_ptt,
NIG_REG_LB_TCRATELIMIT_INC_PERIOD_0 +
reg_offset, NIG_RL_PERIOD_CLK_25M);
inc_val = NIG_RL_INC_VAL(req->tc_rate[tc]);
ecore_wr(p_hwfn, p_ptt,
NIG_REG_LB_TCRATELIMIT_INC_VALUE_0 +
reg_offset, inc_val);
ecore_wr(p_hwfn, p_ptt,
NIG_REG_LB_TCRATELIMIT_MAX_VALUE_0 +
reg_offset, NIG_RL_MAX_VAL(inc_val, req->mtu));
/* enable */
ctrl |=
1 <<
NIG_REG_LB_TCRATELIMIT_CTRL_0_LB_TCRATELIMIT_EN_0_SHIFT;
ecore_wr(p_hwfn, p_ptt,
NIG_REG_LB_TCRATELIMIT_CTRL_0 + reg_offset,
ctrl);
}
}
}
void ecore_init_nig_pri_tc_map(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
struct init_nig_pri_tc_map_req *req)
{
u8 pri, tc;
u32 pri_tc_mask = 0;
u8 tc_pri_mask[NUM_OF_PHYS_TCS] = { 0 };
for (pri = 0; pri < NUM_OF_VLAN_PRIORITIES; pri++) {
if (req->pri[pri].valid) {
pri_tc_mask |=
(req->pri[pri].
tc_id << (pri * NIG_PRIORITY_MAP_TC_BITS));
tc_pri_mask[req->pri[pri].tc_id] |= (1 << pri);
}
}
/* write priority -> TC mask */
ecore_wr(p_hwfn, p_ptt, NIG_REG_PKT_PRIORITY_TO_TC, pri_tc_mask);
/* write TC -> priority mask */
for (tc = 0; tc < NUM_OF_PHYS_TCS; tc++) {
ecore_wr(p_hwfn, p_ptt, NIG_REG_PRIORITY_FOR_TC_0 + tc * 4,
tc_pri_mask[tc]);
ecore_wr(p_hwfn, p_ptt, NIG_REG_RX_TC0_PRIORITY_MASK + tc * 4,
tc_pri_mask[tc]);
}
}
/* PRS: ETS configuration constants */
#define PRS_ETS_MIN_WFQ_BYTES 1600
#define PRS_ETS_UP_BOUND(weight, mtu) \
(2 * ((weight) > (mtu) ? (weight) : (mtu)))
void ecore_init_prs_ets(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt, struct init_ets_req *req)
{
u8 tc, sp_tc_map = 0, wfq_tc_map = 0;
u32 min_weight = 0xffffffff;
u32 tc_weight_addr_diff =
PRS_REG_ETS_ARB_CREDIT_WEIGHT_1 - PRS_REG_ETS_ARB_CREDIT_WEIGHT_0;
u32 tc_bound_addr_diff =
PRS_REG_ETS_ARB_CREDIT_UPPER_BOUND_1 -
PRS_REG_ETS_ARB_CREDIT_UPPER_BOUND_0;
for (tc = 0; tc < NUM_OF_TCS; tc++) {
struct init_ets_tc_req *tc_req = &req->tc_req[tc];
/* update SP map */
if (tc_req->use_sp)
sp_tc_map |= (1 << tc);
if (tc_req->use_wfq) {
/* update WFQ map */
wfq_tc_map |= (1 << tc);
/* find minimal weight */
if (tc_req->weight < min_weight)
min_weight = tc_req->weight;
}
}
/* write SP map */
ecore_wr(p_hwfn, p_ptt, PRS_REG_ETS_ARB_CLIENT_IS_STRICT, sp_tc_map);
/* write WFQ map */
ecore_wr(p_hwfn, p_ptt, PRS_REG_ETS_ARB_CLIENT_IS_SUBJECT2WFQ,
wfq_tc_map);
/* write WFQ weights */
for (tc = 0; tc < NUM_OF_TCS; tc++) {
struct init_ets_tc_req *tc_req = &req->tc_req[tc];
if (tc_req->use_wfq) {
/* translate weight to bytes */
u32 byte_weight =
(PRS_ETS_MIN_WFQ_BYTES * tc_req->weight) /
min_weight;
/* write WFQ weight */
ecore_wr(p_hwfn, p_ptt,
PRS_REG_ETS_ARB_CREDIT_WEIGHT_0 +
tc * tc_weight_addr_diff, byte_weight);
/* write WFQ upper bound */
ecore_wr(p_hwfn, p_ptt,
PRS_REG_ETS_ARB_CREDIT_UPPER_BOUND_0 +
tc * tc_bound_addr_diff,
PRS_ETS_UP_BOUND(byte_weight, req->mtu));
}
}
}
/* BRB: RAM configuration constants */
#define BRB_TOTAL_RAM_BLOCKS_BB 4800
#define BRB_TOTAL_RAM_BLOCKS_K2 5632
#define BRB_BLOCK_SIZE 128 /* in bytes */
#define BRB_MIN_BLOCKS_PER_TC 9
#define BRB_HYST_BYTES 10240
#define BRB_HYST_BLOCKS (BRB_HYST_BYTES / BRB_BLOCK_SIZE)
/*
* temporary big RAM allocation - should be updated
*/
void ecore_init_brb_ram(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt, struct init_brb_ram_req *req)
{
u8 port, active_ports = 0;
u32 active_port_blocks, reg_offset = 0;
u32 tc_headroom_blocks =
(u32)DIV_ROUND_UP(req->headroom_per_tc, BRB_BLOCK_SIZE);
u32 min_pkt_size_blocks =
(u32)DIV_ROUND_UP(req->min_pkt_size, BRB_BLOCK_SIZE);
u32 total_blocks =
ECORE_IS_K2(p_hwfn->
p_dev) ? BRB_TOTAL_RAM_BLOCKS_K2 :
BRB_TOTAL_RAM_BLOCKS_BB;
/* find number of active ports */
for (port = 0; port < MAX_NUM_PORTS; port++)
if (req->num_active_tcs[port])
active_ports++;
active_port_blocks = (u32)(total_blocks / active_ports);
for (port = 0; port < req->max_ports_per_engine; port++) {
/* calculate per-port sizes */
u32 tc_guaranteed_blocks =
(u32)DIV_ROUND_UP(req->guranteed_per_tc, BRB_BLOCK_SIZE);
u32 port_blocks =
req->num_active_tcs[port] ? active_port_blocks : 0;
u32 port_guaranteed_blocks =
req->num_active_tcs[port] * tc_guaranteed_blocks;
u32 port_shared_blocks = port_blocks - port_guaranteed_blocks;
u32 full_xoff_th =
req->num_active_tcs[port] * BRB_MIN_BLOCKS_PER_TC;
u32 full_xon_th = full_xoff_th + min_pkt_size_blocks;
u32 pause_xoff_th = tc_headroom_blocks;
u32 pause_xon_th = pause_xoff_th + min_pkt_size_blocks;
u8 tc;
/* init total size per port */
ecore_wr(p_hwfn, p_ptt, BRB_REG_TOTAL_MAC_SIZE + port * 4,
port_blocks);
/* init shared size per port */
ecore_wr(p_hwfn, p_ptt, BRB_REG_SHARED_HR_AREA + port * 4,
port_shared_blocks);
for (tc = 0; tc < NUM_OF_TCS; tc++, reg_offset += 4) {
/* clear init values for non-active TCs */
if (tc == req->num_active_tcs[port]) {
tc_guaranteed_blocks = 0;
full_xoff_th = 0;
full_xon_th = 0;
pause_xoff_th = 0;
pause_xon_th = 0;
}
/* init guaranteed size per TC */
ecore_wr(p_hwfn, p_ptt,
BRB_REG_TC_GUARANTIED_0 + reg_offset,
tc_guaranteed_blocks);
ecore_wr(p_hwfn, p_ptt,
BRB_REG_MAIN_TC_GUARANTIED_HYST_0 + reg_offset,
BRB_HYST_BLOCKS);
/* init pause/full thresholds per physical TC - for loopback traffic */
ecore_wr(p_hwfn, p_ptt,
BRB_REG_LB_TC_FULL_XOFF_THRESHOLD_0 +
reg_offset, full_xoff_th);
ecore_wr(p_hwfn, p_ptt,
BRB_REG_LB_TC_FULL_XON_THRESHOLD_0 +
reg_offset, full_xon_th);
ecore_wr(p_hwfn, p_ptt,
BRB_REG_LB_TC_PAUSE_XOFF_THRESHOLD_0 +
reg_offset, pause_xoff_th);
ecore_wr(p_hwfn, p_ptt,
BRB_REG_LB_TC_PAUSE_XON_THRESHOLD_0 +
reg_offset, pause_xon_th);
/* init pause/full thresholds per physical TC - for main traffic */
ecore_wr(p_hwfn, p_ptt,
BRB_REG_MAIN_TC_FULL_XOFF_THRESHOLD_0 +
reg_offset, full_xoff_th);
ecore_wr(p_hwfn, p_ptt,
BRB_REG_MAIN_TC_FULL_XON_THRESHOLD_0 +
reg_offset, full_xon_th);
ecore_wr(p_hwfn, p_ptt,
BRB_REG_MAIN_TC_PAUSE_XOFF_THRESHOLD_0 +
reg_offset, pause_xoff_th);
ecore_wr(p_hwfn, p_ptt,
BRB_REG_MAIN_TC_PAUSE_XON_THRESHOLD_0 +
reg_offset, pause_xon_th);
}
}
}
/*In MF should be called once per engine to set EtherType of OuterTag*/
void ecore_set_engine_mf_ovlan_eth_type(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt, u32 ethType)
{
/* update PRS register */
STORE_RT_REG(p_hwfn, PRS_REG_TAG_ETHERTYPE_0_RT_OFFSET, ethType);
/* update NIG register */
STORE_RT_REG(p_hwfn, NIG_REG_TAG_ETHERTYPE_0_RT_OFFSET, ethType);
/* update PBF register */
STORE_RT_REG(p_hwfn, PBF_REG_TAG_ETHERTYPE_0_RT_OFFSET, ethType);
}
/*In MF should be called once per port to set EtherType of OuterTag*/
void ecore_set_port_mf_ovlan_eth_type(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt, u32 ethType)
{
/* update DORQ register */
STORE_RT_REG(p_hwfn, DORQ_REG_TAG1_ETHERTYPE_RT_OFFSET, ethType);
}
#define SET_TUNNEL_TYPE_ENABLE_BIT(var, offset, enable) \
(var = ((var) & ~(1 << (offset))) | ((enable) ? (1 << (offset)) : 0))
#define PRS_ETH_TUNN_FIC_FORMAT -188897008
void ecore_set_vxlan_dest_port(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt, u16 dest_port)
{
/* update PRS register */
ecore_wr(p_hwfn, p_ptt, PRS_REG_VXLAN_PORT, dest_port);
/* update NIG register */
ecore_wr(p_hwfn, p_ptt, NIG_REG_VXLAN_CTRL, dest_port);
/* update PBF register */
ecore_wr(p_hwfn, p_ptt, PBF_REG_VXLAN_PORT, dest_port);
}
void ecore_set_vxlan_enable(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt, bool vxlan_enable)
{
u32 reg_val;
/* update PRS register */
reg_val = ecore_rd(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN);
SET_TUNNEL_TYPE_ENABLE_BIT(reg_val,
PRS_REG_ENCAPSULATION_TYPE_EN_VXLAN_ENABLE_SHIFT,
vxlan_enable);
ecore_wr(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN, reg_val);
if (reg_val) {
ecore_wr(p_hwfn, p_ptt, PRS_REG_OUTPUT_FORMAT_4_0,
(u32)PRS_ETH_TUNN_FIC_FORMAT);
}
/* update NIG register */
reg_val = ecore_rd(p_hwfn, p_ptt, NIG_REG_ENC_TYPE_ENABLE);
SET_TUNNEL_TYPE_ENABLE_BIT(reg_val,
NIG_REG_ENC_TYPE_ENABLE_VXLAN_ENABLE_SHIFT,
vxlan_enable);
ecore_wr(p_hwfn, p_ptt, NIG_REG_ENC_TYPE_ENABLE, reg_val);
/* update DORQ register */
ecore_wr(p_hwfn, p_ptt, DORQ_REG_L2_EDPM_TUNNEL_VXLAN_EN,
vxlan_enable ? 1 : 0);
}
void ecore_set_gre_enable(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
bool eth_gre_enable, bool ip_gre_enable)
{
u32 reg_val;
/* update PRS register */
reg_val = ecore_rd(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN);
SET_TUNNEL_TYPE_ENABLE_BIT(reg_val,
PRS_REG_ENCAPSULATION_TYPE_EN_ETH_OVER_GRE_ENABLE_SHIFT,
eth_gre_enable);
SET_TUNNEL_TYPE_ENABLE_BIT(reg_val,
PRS_REG_ENCAPSULATION_TYPE_EN_IP_OVER_GRE_ENABLE_SHIFT,
ip_gre_enable);
ecore_wr(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN, reg_val);
if (reg_val) {
ecore_wr(p_hwfn, p_ptt, PRS_REG_OUTPUT_FORMAT_4_0,
(u32)PRS_ETH_TUNN_FIC_FORMAT);
}
/* update NIG register */
reg_val = ecore_rd(p_hwfn, p_ptt, NIG_REG_ENC_TYPE_ENABLE);
SET_TUNNEL_TYPE_ENABLE_BIT(reg_val,
NIG_REG_ENC_TYPE_ENABLE_ETH_OVER_GRE_ENABLE_SHIFT,
eth_gre_enable);
SET_TUNNEL_TYPE_ENABLE_BIT(reg_val,
NIG_REG_ENC_TYPE_ENABLE_IP_OVER_GRE_ENABLE_SHIFT,
ip_gre_enable);
ecore_wr(p_hwfn, p_ptt, NIG_REG_ENC_TYPE_ENABLE, reg_val);
/* update DORQ registers */
ecore_wr(p_hwfn, p_ptt, DORQ_REG_L2_EDPM_TUNNEL_GRE_ETH_EN,
eth_gre_enable ? 1 : 0);
ecore_wr(p_hwfn, p_ptt, DORQ_REG_L2_EDPM_TUNNEL_GRE_IP_EN,
ip_gre_enable ? 1 : 0);
}
void ecore_set_geneve_dest_port(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt, u16 dest_port)
{
/* update PRS register */
ecore_wr(p_hwfn, p_ptt, PRS_REG_NGE_PORT, dest_port);
/* update NIG register */
ecore_wr(p_hwfn, p_ptt, NIG_REG_NGE_PORT, dest_port);
/* update PBF register */
ecore_wr(p_hwfn, p_ptt, PBF_REG_NGE_PORT, dest_port);
}
void ecore_set_geneve_enable(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
bool eth_geneve_enable, bool ip_geneve_enable)
{
u32 reg_val;
/* update PRS register */
reg_val = ecore_rd(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN);
SET_TUNNEL_TYPE_ENABLE_BIT(reg_val,
PRS_REG_ENCAPSULATION_TYPE_EN_ETH_OVER_GENEVE_ENABLE_SHIFT,
eth_geneve_enable);
SET_TUNNEL_TYPE_ENABLE_BIT(reg_val,
PRS_REG_ENCAPSULATION_TYPE_EN_IP_OVER_GENEVE_ENABLE_SHIFT,
ip_geneve_enable);
ecore_wr(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN, reg_val);
if (reg_val) {
ecore_wr(p_hwfn, p_ptt, PRS_REG_OUTPUT_FORMAT_4_0,
(u32)PRS_ETH_TUNN_FIC_FORMAT);
}
/* update NIG register */
ecore_wr(p_hwfn, p_ptt, NIG_REG_NGE_ETH_ENABLE,
eth_geneve_enable ? 1 : 0);
ecore_wr(p_hwfn, p_ptt, NIG_REG_NGE_IP_ENABLE,
ip_geneve_enable ? 1 : 0);
/* EDPM with geneve tunnel not supported in BB_B0 */
if (ECORE_IS_BB_B0(p_hwfn->p_dev))
return;
/* update DORQ registers */
ecore_wr(p_hwfn, p_ptt, DORQ_REG_L2_EDPM_TUNNEL_NGE_ETH_EN,
eth_geneve_enable ? 1 : 0);
ecore_wr(p_hwfn, p_ptt, DORQ_REG_L2_EDPM_TUNNEL_NGE_IP_EN,
ip_geneve_enable ? 1 : 0);
}
#define T_ETH_PACKET_ACTION_GFT_EVENTID 23
#define PARSER_ETH_CONN_GFT_ACTION_CM_HDR 272
#define T_ETH_PACKET_MATCH_RFS_EVENTID 25
#define PARSER_ETH_CONN_CM_HDR (0x0)
#define CAM_LINE_SIZE sizeof(u32)
#define RAM_LINE_SIZE sizeof(u64)
#define REG_SIZE sizeof(u32)
void ecore_set_gft_event_id_cm_hdr(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt)
{
/* set RFS event ID to be awakened i Tstorm By Prs */
u32 rfs_cm_hdr_event_id = ecore_rd(p_hwfn, p_ptt, PRS_REG_CM_HDR_GFT);
rfs_cm_hdr_event_id |= T_ETH_PACKET_ACTION_GFT_EVENTID <<
PRS_REG_CM_HDR_GFT_EVENT_ID_SHIFT;
rfs_cm_hdr_event_id |= PARSER_ETH_CONN_GFT_ACTION_CM_HDR <<
PRS_REG_CM_HDR_GFT_CM_HDR_SHIFT;
ecore_wr(p_hwfn, p_ptt, PRS_REG_CM_HDR_GFT, rfs_cm_hdr_event_id);
}
void ecore_set_rfs_mode_enable(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
u16 pf_id,
bool tcp,
bool udp,
bool ipv4,
bool ipv6)
{
u32 rfs_cm_hdr_event_id = ecore_rd(p_hwfn, p_ptt, PRS_REG_CM_HDR_GFT);
union gft_cam_line_union camLine;
struct gft_ram_line ramLine;
u32 *ramLinePointer = (u32 *)&ramLine;
int i;
if (!ipv6 && !ipv4)
DP_NOTICE(p_hwfn, true,
"set_rfs_mode_enable: must accept at "
"least on of - ipv4 or ipv6");
if (!tcp && !udp)
DP_NOTICE(p_hwfn, true,
"set_rfs_mode_enable: must accept at "
"least on of - udp or tcp");
/* set RFS event ID to be awakened i Tstorm By Prs */
rfs_cm_hdr_event_id |= T_ETH_PACKET_MATCH_RFS_EVENTID <<
PRS_REG_CM_HDR_GFT_EVENT_ID_SHIFT;
rfs_cm_hdr_event_id |= PARSER_ETH_CONN_CM_HDR <<
PRS_REG_CM_HDR_GFT_CM_HDR_SHIFT;
ecore_wr(p_hwfn, p_ptt, PRS_REG_CM_HDR_GFT, rfs_cm_hdr_event_id);
/* Configure Registers for RFS mode */
/* enable gft search */
ecore_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_GFT, 1);
ecore_wr(p_hwfn, p_ptt, PRS_REG_LOAD_L2_FILTER, 0); /* do not load
* context only cid
* in PRS on match
*/
camLine.cam_line_mapped.camline = 0;
/* cam line is now valid!! */
SET_FIELD(camLine.cam_line_mapped.camline,
GFT_CAM_LINE_MAPPED_VALID, 1);
/* filters are per PF!! */
SET_FIELD(camLine.cam_line_mapped.camline,
GFT_CAM_LINE_MAPPED_PF_ID_MASK, 1);
SET_FIELD(camLine.cam_line_mapped.camline,
GFT_CAM_LINE_MAPPED_PF_ID, pf_id);
if (!(tcp && udp)) {
SET_FIELD(camLine.cam_line_mapped.camline,
GFT_CAM_LINE_MAPPED_UPPER_PROTOCOL_TYPE_MASK, 1);
if (tcp)
SET_FIELD(camLine.cam_line_mapped.camline,
GFT_CAM_LINE_MAPPED_UPPER_PROTOCOL_TYPE,
GFT_PROFILE_TCP_PROTOCOL);
else
SET_FIELD(camLine.cam_line_mapped.camline,
GFT_CAM_LINE_MAPPED_UPPER_PROTOCOL_TYPE,
GFT_PROFILE_UDP_PROTOCOL);
}
if (!(ipv4 && ipv6)) {
SET_FIELD(camLine.cam_line_mapped.camline,
GFT_CAM_LINE_MAPPED_IP_VERSION_MASK, 1);
if (ipv4)
SET_FIELD(camLine.cam_line_mapped.camline,
GFT_CAM_LINE_MAPPED_IP_VERSION,
GFT_PROFILE_IPV4);
else
SET_FIELD(camLine.cam_line_mapped.camline,
GFT_CAM_LINE_MAPPED_IP_VERSION,
GFT_PROFILE_IPV6);
}
/* write characteristics to cam */
ecore_wr(p_hwfn, p_ptt, PRS_REG_GFT_CAM + CAM_LINE_SIZE * pf_id,
camLine.cam_line_mapped.camline);
camLine.cam_line_mapped.camline =
ecore_rd(p_hwfn, p_ptt, PRS_REG_GFT_CAM + CAM_LINE_SIZE * pf_id);
/* write line to RAM - compare to filter 4 tuple */
ramLine.low32bits = 0;
ramLine.high32bits = 0;
SET_FIELD(ramLine.high32bits, GFT_RAM_LINE_DST_IP, 1);
SET_FIELD(ramLine.high32bits, GFT_RAM_LINE_SRC_IP, 1);
SET_FIELD(ramLine.low32bits, GFT_RAM_LINE_SRC_PORT, 1);
SET_FIELD(ramLine.low32bits, GFT_RAM_LINE_DST_PORT, 1);
/* each iteration write to reg */
for (i = 0; i < RAM_LINE_SIZE / REG_SIZE; i++)
ecore_wr(p_hwfn, p_ptt, PRS_REG_GFT_PROFILE_MASK_RAM +
RAM_LINE_SIZE * pf_id +
i * REG_SIZE, *(ramLinePointer + i));
/* set default profile so that no filter match will happen */
ramLine.low32bits = 0xffff;
ramLine.high32bits = 0xffff;
for (i = 0; i < RAM_LINE_SIZE / REG_SIZE; i++)
ecore_wr(p_hwfn, p_ptt, PRS_REG_GFT_PROFILE_MASK_RAM +
RAM_LINE_SIZE * PRS_GFT_CAM_LINES_NO_MATCH +
i * REG_SIZE, *(ramLinePointer + i));
}
/* Configure VF zone size mode*/
void ecore_config_vf_zone_size_mode(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt, u16 mode,
bool runtime_init)
{
u32 msdm_vf_size_log = MSTORM_VF_ZONE_DEFAULT_SIZE_LOG;
u32 msdm_vf_offset_mask;
if (mode == VF_ZONE_SIZE_MODE_DOUBLE)
msdm_vf_size_log += 1;
else if (mode == VF_ZONE_SIZE_MODE_QUAD)
msdm_vf_size_log += 2;
msdm_vf_offset_mask = (1 << msdm_vf_size_log) - 1;
if (runtime_init) {
STORE_RT_REG(p_hwfn,
PGLUE_REG_B_MSDM_VF_SHIFT_B_RT_OFFSET,
msdm_vf_size_log);
STORE_RT_REG(p_hwfn,
PGLUE_REG_B_MSDM_OFFSET_MASK_B_RT_OFFSET,
msdm_vf_offset_mask);
} else {
ecore_wr(p_hwfn, p_ptt,
PGLUE_B_REG_MSDM_VF_SHIFT_B, msdm_vf_size_log);
ecore_wr(p_hwfn, p_ptt,
PGLUE_B_REG_MSDM_OFFSET_MASK_B, msdm_vf_offset_mask);
}
}
/* get mstorm statistics for offset by VF zone size mode*/
u32 ecore_get_mstorm_queue_stat_offset(struct ecore_hwfn *p_hwfn,
u16 stat_cnt_id,
u16 vf_zone_size_mode)
{
u32 offset = MSTORM_QUEUE_STAT_OFFSET(stat_cnt_id);
if ((vf_zone_size_mode != VF_ZONE_SIZE_MODE_DEFAULT) &&
(stat_cnt_id > MAX_NUM_PFS)) {
if (vf_zone_size_mode == VF_ZONE_SIZE_MODE_DOUBLE)
offset += (1 << MSTORM_VF_ZONE_DEFAULT_SIZE_LOG) *
(stat_cnt_id - MAX_NUM_PFS);
else if (vf_zone_size_mode == VF_ZONE_SIZE_MODE_QUAD)
offset += 3 * (1 << MSTORM_VF_ZONE_DEFAULT_SIZE_LOG) *
(stat_cnt_id - MAX_NUM_PFS);
}
return offset;
}
/* get mstorm VF producer offset by VF zone size mode*/
u32 ecore_get_mstorm_eth_vf_prods_offset(struct ecore_hwfn *p_hwfn,
u8 vf_id,
u8 vf_queue_id,
u16 vf_zone_size_mode)
{
u32 offset = MSTORM_ETH_VF_PRODS_OFFSET(vf_id, vf_queue_id);
if (vf_zone_size_mode != VF_ZONE_SIZE_MODE_DEFAULT) {
if (vf_zone_size_mode == VF_ZONE_SIZE_MODE_DOUBLE)
offset += (1 << MSTORM_VF_ZONE_DEFAULT_SIZE_LOG) *
vf_id;
else if (vf_zone_size_mode == VF_ZONE_SIZE_MODE_QUAD)
offset += 3 * (1 << MSTORM_VF_ZONE_DEFAULT_SIZE_LOG) *
vf_id;
}
return offset;
}
|
vicharl/containerdns
|
kdns/dpdk-17.02/lib/librte_cmdline/cmdline_parse.h
|
<filename>kdns/dpdk-17.02/lib/librte_cmdline/cmdline_parse.h
/*-
* BSD LICENSE
*
* Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* Copyright (c) 2009, <NAME> <<EMAIL>>
* All rights reserved.
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of the University of California, Berkeley nor the
* names of its contributors may be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE REGENTS AND CONTRIBUTORS BE LIABLE FOR ANY
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef _CMDLINE_PARSE_H_
#define _CMDLINE_PARSE_H_
#ifdef __cplusplus
extern "C" {
#endif
#ifndef offsetof
#define offsetof(type, field) ((size_t) &( ((type *)0)->field) )
#endif
/* return status for parsing */
#define CMDLINE_PARSE_SUCCESS 0
#define CMDLINE_PARSE_AMBIGUOUS -1
#define CMDLINE_PARSE_NOMATCH -2
#define CMDLINE_PARSE_BAD_ARGS -3
/* return status for completion */
#define CMDLINE_PARSE_COMPLETE_FINISHED 0
#define CMDLINE_PARSE_COMPLETE_AGAIN 1
#define CMDLINE_PARSE_COMPLETED_BUFFER 2
/* maximum buffer size for parsed result */
#define CMDLINE_PARSE_RESULT_BUFSIZE 8192
/* maximum number of dynamic tokens */
#define CMDLINE_PARSE_DYNAMIC_TOKENS 128
/**
* Stores a pointer to the ops struct, and the offset: the place to
* write the parsed result in the destination structure.
*/
struct cmdline_token_hdr {
struct cmdline_token_ops *ops;
unsigned int offset;
};
typedef struct cmdline_token_hdr cmdline_parse_token_hdr_t;
/**
* A token is defined by this structure.
*
* parse() takes the token as first argument, then the source buffer
* starting at the token we want to parse. The 3rd arg is a pointer
* where we store the parsed data (as binary). It returns the number of
* parsed chars on success and a negative value on error.
*
* complete_get_nb() returns the number of possible values for this
* token if completion is possible. If it is NULL or if it returns 0,
* no completion is possible.
*
* complete_get_elt() copy in dstbuf (the size is specified in the
* parameter) the i-th possible completion for this token. returns 0
* on success or and a negative value on error.
*
* get_help() fills the dstbuf with the help for the token. It returns
* -1 on error and 0 on success.
*/
struct cmdline_token_ops {
/** parse(token ptr, buf, res pts, buf len) */
int (*parse)(cmdline_parse_token_hdr_t *, const char *, void *,
unsigned int);
/** return the num of possible choices for this token */
int (*complete_get_nb)(cmdline_parse_token_hdr_t *);
/** return the elt x for this token (token, idx, dstbuf, size) */
int (*complete_get_elt)(cmdline_parse_token_hdr_t *, int, char *,
unsigned int);
/** get help for this token (token, dstbuf, size) */
int (*get_help)(cmdline_parse_token_hdr_t *, char *, unsigned int);
};
struct cmdline;
/**
* Store a instruction, which is a pointer to a callback function and
* its parameter that is called when the instruction is parsed, a help
* string, and a list of token composing this instruction.
*
* When no tokens are defined (tokens[0] == NULL), they are retrieved
* dynamically by calling f() as follows:
*
* f((struct cmdline_token_hdr **)&token_hdr,
* NULL,
* (struct cmdline_token_hdr *[])tokens));
*
* The address of the resulting token is expected at the location pointed by
* the first argument. Can be set to NULL to end the list.
*
* The cmdline argument (struct cmdline *) is always NULL.
*
* The last argument points to the NULL-terminated list of dynamic tokens
* defined so far. Since token_hdr points to an index of that list, the
* current index can be derived as follows:
*
* int index = token_hdr - &(*tokens)[0];
*/
struct cmdline_inst {
/* f(parsed_struct, data) */
void (*f)(void *, struct cmdline *, void *);
void *data;
const char *help_str;
cmdline_parse_token_hdr_t *tokens[];
};
typedef struct cmdline_inst cmdline_parse_inst_t;
/**
* A context is identified by its name, and contains a list of
* instruction
*
*/
typedef cmdline_parse_inst_t *cmdline_parse_ctx_t;
/**
* Try to parse a buffer according to the specified context. The
* argument buf must ends with "\n\0". The function returns
* CMDLINE_PARSE_AMBIGUOUS, CMDLINE_PARSE_NOMATCH or
* CMDLINE_PARSE_BAD_ARGS on error. Else it calls the associated
* function (defined in the context) and returns 0
* (CMDLINE_PARSE_SUCCESS).
*/
int cmdline_parse(struct cmdline *cl, const char *buf);
/**
* complete() must be called with *state==0 (try to complete) or
* with *state==-1 (just display choices), then called without
* modifying *state until it returns CMDLINE_PARSE_COMPLETED_BUFFER or
* CMDLINE_PARSE_COMPLETED_BUFFER.
*
* It returns < 0 on error.
*
* Else it returns:
* - CMDLINE_PARSE_COMPLETED_BUFFER on completion (one possible
* choice). In this case, the chars are appended in dst buffer.
* - CMDLINE_PARSE_COMPLETE_AGAIN if there is several possible
* choices. In this case, you must call the function again,
* keeping the value of state intact.
* - CMDLINE_PARSE_COMPLETED_BUFFER when the iteration is
* finished. The dst is not valid for this last call.
*
* The returned dst buf ends with \0.
*/
int cmdline_complete(struct cmdline *cl, const char *buf, int *state,
char *dst, unsigned int size);
/* return true if(!c || iscomment(c) || isblank(c) ||
* isendofline(c)) */
int cmdline_isendoftoken(char c);
/* return true if(!c || iscomment(c) || isendofline(c)) */
int cmdline_isendofcommand(char c);
#ifdef __cplusplus
}
#endif
#endif /* _CMDLINE_PARSE_H_ */
|
vicharl/containerdns
|
kdns/dpdk-17.02/drivers/net/mlx5/mlx5_trigger.c
|
<gh_stars>100-1000
/*-
* BSD LICENSE
*
* Copyright 2015 6WIND S.A.
* Copyright 2015 Mellanox.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* * Neither the name of 6WIND S.A. nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/* DPDK headers don't like -pedantic. */
#ifdef PEDANTIC
#pragma GCC diagnostic ignored "-Wpedantic"
#endif
#include <rte_ether.h>
#include <rte_ethdev.h>
#include <rte_interrupts.h>
#include <rte_alarm.h>
#ifdef PEDANTIC
#pragma GCC diagnostic error "-Wpedantic"
#endif
#include "mlx5.h"
#include "mlx5_rxtx.h"
#include "mlx5_utils.h"
/**
* DPDK callback to start the device.
*
* Simulate device start by attaching all configured flows.
*
* @param dev
* Pointer to Ethernet device structure.
*
* @return
* 0 on success, negative errno value on failure.
*/
int
mlx5_dev_start(struct rte_eth_dev *dev)
{
struct priv *priv = dev->data->dev_private;
int err;
if (mlx5_is_secondary())
return -E_RTE_SECONDARY;
priv_lock(priv);
if (priv->started) {
priv_unlock(priv);
return 0;
}
DEBUG("%p: allocating and configuring hash RX queues", (void *)dev);
err = priv_create_hash_rxqs(priv);
if (!err)
err = priv_rehash_flows(priv);
if (!err)
priv->started = 1;
else {
ERROR("%p: an error occurred while configuring hash RX queues:"
" %s",
(void *)priv, strerror(err));
goto error;
}
if (dev->data->dev_conf.fdir_conf.mode != RTE_FDIR_MODE_NONE)
priv_fdir_enable(priv);
err = priv_flow_start(priv);
if (err) {
priv->started = 0;
ERROR("%p: an error occurred while configuring flows:"
" %s",
(void *)priv, strerror(err));
goto error;
}
priv_dev_interrupt_handler_install(priv, dev);
priv_xstats_init(priv);
priv_unlock(priv);
return 0;
error:
/* Rollback. */
priv_special_flow_disable_all(priv);
priv_mac_addrs_disable(priv);
priv_destroy_hash_rxqs(priv);
priv_flow_stop(priv);
priv_unlock(priv);
return -err;
}
/**
* DPDK callback to stop the device.
*
* Simulate device stop by detaching all configured flows.
*
* @param dev
* Pointer to Ethernet device structure.
*/
void
mlx5_dev_stop(struct rte_eth_dev *dev)
{
struct priv *priv = dev->data->dev_private;
if (mlx5_is_secondary())
return;
priv_lock(priv);
if (!priv->started) {
priv_unlock(priv);
return;
}
DEBUG("%p: cleaning up and destroying hash RX queues", (void *)dev);
priv_special_flow_disable_all(priv);
priv_mac_addrs_disable(priv);
priv_destroy_hash_rxqs(priv);
priv_fdir_disable(priv);
priv_flow_stop(priv);
priv_dev_interrupt_handler_uninstall(priv, dev);
priv->started = 0;
priv_unlock(priv);
}
|
vicharl/containerdns
|
kdns/dpdk-17.02/drivers/net/sfc/sfc_ev.h
|
<gh_stars>100-1000
/*-
* Copyright (c) 2016 Solarflare Communications Inc.
* All rights reserved.
*
* This software was jointly developed between OKTET Labs (under contract
* for Solarflare) and Solarflare Communications, Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
* THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
* OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
* OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
* EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef _SFC_EV_H_
#define _SFC_EV_H_
#include "efx.h"
#ifdef __cplusplus
extern "C" {
#endif
/* Number of entries in the management event queue */
#define SFC_MGMT_EVQ_ENTRIES (EFX_EVQ_MINNEVS)
struct sfc_adapter;
struct sfc_rxq;
struct sfc_txq;
enum sfc_evq_state {
SFC_EVQ_UNINITIALIZED = 0,
SFC_EVQ_INITIALIZED,
SFC_EVQ_STARTING,
SFC_EVQ_STARTED,
SFC_EVQ_NSTATES
};
struct sfc_evq {
/* Used on datapath */
efx_evq_t *common;
unsigned int read_ptr;
boolean_t exception;
efsys_mem_t mem;
struct sfc_rxq *rxq;
struct sfc_txq *txq;
/* Not used on datapath */
struct sfc_adapter *sa;
unsigned int evq_index;
enum sfc_evq_state init_state;
};
struct sfc_evq_info {
/* Maximum number of EVQ entries taken into account when buffer
* table space is allocated.
*/
unsigned int max_entries;
/* Real number of EVQ entries, less or equal to max_entries */
unsigned int entries;
/* Event queue creation flags */
uint32_t flags;
/* NUMA-aware EVQ data structure used on datapath */
struct sfc_evq *evq;
};
/*
* Functions below define event queue to transmit/receive queue and vice
* versa mapping.
*/
static inline unsigned int
sfc_ev_qcount(struct sfc_adapter *sa)
{
const struct rte_eth_dev_data *dev_data = sa->eth_dev->data;
/*
* One management EVQ for global events.
* Own EVQ for each Tx and Rx queue.
*/
return 1 + dev_data->nb_rx_queues + dev_data->nb_tx_queues;
}
static inline unsigned int
sfc_evq_max_entries(struct sfc_adapter *sa, unsigned int sw_index)
{
unsigned int max_entries;
if (sw_index == sa->mgmt_evq_index)
max_entries = SFC_MGMT_EVQ_ENTRIES;
else if (sw_index <= sa->eth_dev->data->nb_rx_queues)
max_entries = EFX_RXQ_MAXNDESCS;
else
max_entries = efx_nic_cfg_get(sa->nic)->enc_txq_max_ndescs;
return max_entries;
}
static inline unsigned int
sfc_evq_index_by_rxq_sw_index(__rte_unused struct sfc_adapter *sa,
unsigned int rxq_sw_index)
{
return 1 + rxq_sw_index;
}
static inline unsigned int
sfc_evq_index_by_txq_sw_index(struct sfc_adapter *sa, unsigned int txq_sw_index)
{
return 1 + sa->eth_dev->data->nb_rx_queues + txq_sw_index;
}
int sfc_ev_init(struct sfc_adapter *sa);
void sfc_ev_fini(struct sfc_adapter *sa);
int sfc_ev_start(struct sfc_adapter *sa);
void sfc_ev_stop(struct sfc_adapter *sa);
int sfc_ev_qinit(struct sfc_adapter *sa, unsigned int sw_index,
unsigned int entries, int socket_id);
void sfc_ev_qfini(struct sfc_adapter *sa, unsigned int sw_index);
int sfc_ev_qstart(struct sfc_adapter *sa, unsigned int sw_index);
void sfc_ev_qstop(struct sfc_adapter *sa, unsigned int sw_index);
int sfc_ev_qprime(struct sfc_evq *evq);
void sfc_ev_qpoll(struct sfc_evq *evq);
void sfc_ev_mgmt_qpoll(struct sfc_adapter *sa);
#ifdef __cplusplus
}
#endif
#endif /* _SFC_EV_H_ */
|
vicharl/containerdns
|
kdns/dpdk-17.02/drivers/net/bnxt/bnxt_hwrm.c
|
/*-
* BSD LICENSE
*
* Copyright(c) Broadcom Limited.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Broadcom Corporation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <rte_byteorder.h>
#include <rte_common.h>
#include <rte_cycles.h>
#include <rte_malloc.h>
#include <rte_memzone.h>
#include <rte_version.h>
#include "bnxt.h"
#include "bnxt_cpr.h"
#include "bnxt_filter.h"
#include "bnxt_hwrm.h"
#include "bnxt_rxq.h"
#include "bnxt_rxr.h"
#include "bnxt_ring.h"
#include "bnxt_txq.h"
#include "bnxt_txr.h"
#include "bnxt_vnic.h"
#include "hsi_struct_def_dpdk.h"
#include <rte_io.h>
#define HWRM_CMD_TIMEOUT 2000
/*
* HWRM Functions (sent to HWRM)
* These are named bnxt_hwrm_*() and return -1 if bnxt_hwrm_send_message()
* fails (ie: a timeout), and a positive non-zero HWRM error code if the HWRM
* command was failed by the ChiMP.
*/
static int bnxt_hwrm_send_message_locked(struct bnxt *bp, void *msg,
uint32_t msg_len)
{
unsigned int i;
struct input *req = msg;
struct output *resp = bp->hwrm_cmd_resp_addr;
uint32_t *data = msg;
uint8_t *bar;
uint8_t *valid;
/* Write request msg to hwrm channel */
for (i = 0; i < msg_len; i += 4) {
bar = (uint8_t *)bp->bar0 + i;
rte_write32(*data, bar);
data++;
}
/* Zero the rest of the request space */
for (; i < bp->max_req_len; i += 4) {
bar = (uint8_t *)bp->bar0 + i;
rte_write32(0, bar);
}
/* Ring channel doorbell */
bar = (uint8_t *)bp->bar0 + 0x100;
rte_write32(1, bar);
/* Poll for the valid bit */
for (i = 0; i < HWRM_CMD_TIMEOUT; i++) {
/* Sanity check on the resp->resp_len */
rte_rmb();
if (resp->resp_len && resp->resp_len <=
bp->max_resp_len) {
/* Last byte of resp contains the valid key */
valid = (uint8_t *)resp + resp->resp_len - 1;
if (*valid == HWRM_RESP_VALID_KEY)
break;
}
rte_delay_us(600);
}
if (i >= HWRM_CMD_TIMEOUT) {
RTE_LOG(ERR, PMD, "Error sending msg %x\n",
req->req_type);
goto err_ret;
}
return 0;
err_ret:
return -1;
}
static int bnxt_hwrm_send_message(struct bnxt *bp, void *msg, uint32_t msg_len)
{
int rc;
rte_spinlock_lock(&bp->hwrm_lock);
rc = bnxt_hwrm_send_message_locked(bp, msg, msg_len);
rte_spinlock_unlock(&bp->hwrm_lock);
return rc;
}
#define HWRM_PREP(req, type, cr, resp) \
memset(bp->hwrm_cmd_resp_addr, 0, bp->max_resp_len); \
req.req_type = rte_cpu_to_le_16(HWRM_##type); \
req.cmpl_ring = rte_cpu_to_le_16(cr); \
req.seq_id = rte_cpu_to_le_16(bp->hwrm_cmd_seq++); \
req.target_id = rte_cpu_to_le_16(0xffff); \
req.resp_addr = rte_cpu_to_le_64(bp->hwrm_cmd_resp_dma_addr)
#define HWRM_CHECK_RESULT \
{ \
if (rc) { \
RTE_LOG(ERR, PMD, "%s failed rc:%d\n", \
__func__, rc); \
return rc; \
} \
if (resp->error_code) { \
rc = rte_le_to_cpu_16(resp->error_code); \
RTE_LOG(ERR, PMD, "%s error %d\n", __func__, rc); \
return rc; \
} \
}
int bnxt_hwrm_cfa_l2_clear_rx_mask(struct bnxt *bp, struct bnxt_vnic_info *vnic)
{
int rc = 0;
struct hwrm_cfa_l2_set_rx_mask_input req = {.req_type = 0 };
struct hwrm_cfa_l2_set_rx_mask_output *resp = bp->hwrm_cmd_resp_addr;
HWRM_PREP(req, CFA_L2_SET_RX_MASK, -1, resp);
req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
req.mask = 0;
rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
HWRM_CHECK_RESULT;
return rc;
}
int bnxt_hwrm_cfa_l2_set_rx_mask(struct bnxt *bp, struct bnxt_vnic_info *vnic)
{
int rc = 0;
struct hwrm_cfa_l2_set_rx_mask_input req = {.req_type = 0 };
struct hwrm_cfa_l2_set_rx_mask_output *resp = bp->hwrm_cmd_resp_addr;
uint32_t mask = 0;
HWRM_PREP(req, CFA_L2_SET_RX_MASK, -1, resp);
req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
/* FIXME add multicast flag, when multicast adding options is supported
* by ethtool.
*/
if (vnic->flags & BNXT_VNIC_INFO_PROMISC)
mask = HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_PROMISCUOUS;
if (vnic->flags & BNXT_VNIC_INFO_ALLMULTI)
mask = HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_ALL_MCAST;
req.mask = rte_cpu_to_le_32(HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_BCAST |
mask);
rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
HWRM_CHECK_RESULT;
return rc;
}
int bnxt_hwrm_clear_filter(struct bnxt *bp,
struct bnxt_filter_info *filter)
{
int rc = 0;
struct hwrm_cfa_l2_filter_free_input req = {.req_type = 0 };
struct hwrm_cfa_l2_filter_free_output *resp = bp->hwrm_cmd_resp_addr;
HWRM_PREP(req, CFA_L2_FILTER_FREE, -1, resp);
req.l2_filter_id = rte_cpu_to_le_64(filter->fw_l2_filter_id);
rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
HWRM_CHECK_RESULT;
filter->fw_l2_filter_id = -1;
return 0;
}
int bnxt_hwrm_set_filter(struct bnxt *bp,
struct bnxt_vnic_info *vnic,
struct bnxt_filter_info *filter)
{
int rc = 0;
struct hwrm_cfa_l2_filter_alloc_input req = {.req_type = 0 };
struct hwrm_cfa_l2_filter_alloc_output *resp = bp->hwrm_cmd_resp_addr;
uint32_t enables = 0;
HWRM_PREP(req, CFA_L2_FILTER_ALLOC, -1, resp);
req.flags = rte_cpu_to_le_32(filter->flags);
enables = filter->enables |
HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_DST_ID;
req.dst_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
if (enables &
HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR)
memcpy(req.l2_addr, filter->l2_addr,
ETHER_ADDR_LEN);
if (enables &
HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR_MASK)
memcpy(req.l2_addr_mask, filter->l2_addr_mask,
ETHER_ADDR_LEN);
if (enables &
HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_OVLAN)
req.l2_ovlan = filter->l2_ovlan;
if (enables &
HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_OVLAN_MASK)
req.l2_ovlan_mask = filter->l2_ovlan_mask;
req.enables = rte_cpu_to_le_32(enables);
rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
HWRM_CHECK_RESULT;
filter->fw_l2_filter_id = rte_le_to_cpu_64(resp->l2_filter_id);
return rc;
}
int bnxt_hwrm_exec_fwd_resp(struct bnxt *bp, void *fwd_cmd)
{
int rc;
struct hwrm_exec_fwd_resp_input req = {.req_type = 0 };
struct hwrm_exec_fwd_resp_output *resp = bp->hwrm_cmd_resp_addr;
HWRM_PREP(req, EXEC_FWD_RESP, -1, resp);
memcpy(req.encap_request, fwd_cmd,
sizeof(req.encap_request));
rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
HWRM_CHECK_RESULT;
return rc;
}
int bnxt_hwrm_func_qcaps(struct bnxt *bp)
{
int rc = 0;
struct hwrm_func_qcaps_input req = {.req_type = 0 };
struct hwrm_func_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
HWRM_PREP(req, FUNC_QCAPS, -1, resp);
req.fid = rte_cpu_to_le_16(0xffff);
rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
HWRM_CHECK_RESULT;
bp->max_ring_grps = rte_le_to_cpu_32(resp->max_hw_ring_grps);
if (BNXT_PF(bp)) {
struct bnxt_pf_info *pf = &bp->pf;
pf->fw_fid = rte_le_to_cpu_32(resp->fid);
pf->port_id = resp->port_id;
memcpy(pf->mac_addr, resp->mac_address, ETHER_ADDR_LEN);
pf->max_rsscos_ctx = rte_le_to_cpu_16(resp->max_rsscos_ctx);
pf->max_cp_rings = rte_le_to_cpu_16(resp->max_cmpl_rings);
pf->max_tx_rings = rte_le_to_cpu_16(resp->max_tx_rings);
pf->max_rx_rings = rte_le_to_cpu_16(resp->max_rx_rings);
pf->max_l2_ctx = rte_le_to_cpu_16(resp->max_l2_ctxs);
pf->max_vnics = rte_le_to_cpu_16(resp->max_vnics);
pf->first_vf_id = rte_le_to_cpu_16(resp->first_vf_id);
pf->max_vfs = rte_le_to_cpu_16(resp->max_vfs);
} else {
struct bnxt_vf_info *vf = &bp->vf;
vf->fw_fid = rte_le_to_cpu_32(resp->fid);
memcpy(vf->mac_addr, &resp->mac_address, ETHER_ADDR_LEN);
vf->max_rsscos_ctx = rte_le_to_cpu_16(resp->max_rsscos_ctx);
vf->max_cp_rings = rte_le_to_cpu_16(resp->max_cmpl_rings);
vf->max_tx_rings = rte_le_to_cpu_16(resp->max_tx_rings);
vf->max_rx_rings = rte_le_to_cpu_16(resp->max_rx_rings);
vf->max_l2_ctx = rte_le_to_cpu_16(resp->max_l2_ctxs);
vf->max_vnics = rte_le_to_cpu_16(resp->max_vnics);
}
return rc;
}
int bnxt_hwrm_func_reset(struct bnxt *bp)
{
int rc = 0;
struct hwrm_func_reset_input req = {.req_type = 0 };
struct hwrm_func_reset_output *resp = bp->hwrm_cmd_resp_addr;
HWRM_PREP(req, FUNC_RESET, -1, resp);
req.enables = rte_cpu_to_le_32(0);
rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
HWRM_CHECK_RESULT;
return rc;
}
int bnxt_hwrm_func_driver_register(struct bnxt *bp, uint32_t flags,
uint32_t *vf_req_fwd)
{
int rc;
struct hwrm_func_drv_rgtr_input req = {.req_type = 0 };
struct hwrm_func_drv_rgtr_output *resp = bp->hwrm_cmd_resp_addr;
if (bp->flags & BNXT_FLAG_REGISTERED)
return 0;
HWRM_PREP(req, FUNC_DRV_RGTR, -1, resp);
req.flags = flags;
req.enables = HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_VER |
HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_ASYNC_EVENT_FWD;
req.ver_maj = RTE_VER_YEAR;
req.ver_min = RTE_VER_MONTH;
req.ver_upd = RTE_VER_MINOR;
memcpy(req.vf_req_fwd, vf_req_fwd, sizeof(req.vf_req_fwd));
req.async_event_fwd[0] |= rte_cpu_to_le_32(0x1); /* TODO: Use MACRO */
rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
HWRM_CHECK_RESULT;
bp->flags |= BNXT_FLAG_REGISTERED;
return rc;
}
int bnxt_hwrm_ver_get(struct bnxt *bp)
{
int rc = 0;
struct hwrm_ver_get_input req = {.req_type = 0 };
struct hwrm_ver_get_output *resp = bp->hwrm_cmd_resp_addr;
uint32_t my_version;
uint32_t fw_version;
uint16_t max_resp_len;
char type[RTE_MEMZONE_NAMESIZE];
HWRM_PREP(req, VER_GET, -1, resp);
req.hwrm_intf_maj = HWRM_VERSION_MAJOR;
req.hwrm_intf_min = HWRM_VERSION_MINOR;
req.hwrm_intf_upd = HWRM_VERSION_UPDATE;
/*
* Hold the lock since we may be adjusting the response pointers.
*/
rte_spinlock_lock(&bp->hwrm_lock);
rc = bnxt_hwrm_send_message_locked(bp, &req, sizeof(req));
HWRM_CHECK_RESULT;
RTE_LOG(INFO, PMD, "%d.%d.%d:%d.%d.%d\n",
resp->hwrm_intf_maj, resp->hwrm_intf_min,
resp->hwrm_intf_upd,
resp->hwrm_fw_maj, resp->hwrm_fw_min, resp->hwrm_fw_bld);
RTE_LOG(INFO, PMD, "Driver HWRM version: %d.%d.%d\n",
HWRM_VERSION_MAJOR, HWRM_VERSION_MINOR, HWRM_VERSION_UPDATE);
my_version = HWRM_VERSION_MAJOR << 16;
my_version |= HWRM_VERSION_MINOR << 8;
my_version |= HWRM_VERSION_UPDATE;
fw_version = resp->hwrm_intf_maj << 16;
fw_version |= resp->hwrm_intf_min << 8;
fw_version |= resp->hwrm_intf_upd;
if (resp->hwrm_intf_maj != HWRM_VERSION_MAJOR) {
RTE_LOG(ERR, PMD, "Unsupported firmware API version\n");
rc = -EINVAL;
goto error;
}
if (my_version != fw_version) {
RTE_LOG(INFO, PMD, "BNXT Driver/HWRM API mismatch.\n");
if (my_version < fw_version) {
RTE_LOG(INFO, PMD,
"Firmware API version is newer than driver.\n");
RTE_LOG(INFO, PMD,
"The driver may be missing features.\n");
} else {
RTE_LOG(INFO, PMD,
"Firmware API version is older than driver.\n");
RTE_LOG(INFO, PMD,
"Not all driver features may be functional.\n");
}
}
if (bp->max_req_len > resp->max_req_win_len) {
RTE_LOG(ERR, PMD, "Unsupported request length\n");
rc = -EINVAL;
}
bp->max_req_len = resp->max_req_win_len;
max_resp_len = resp->max_resp_len;
if (bp->max_resp_len != max_resp_len) {
sprintf(type, "bnxt_hwrm_%04x:%02x:%02x:%02x",
bp->pdev->addr.domain, bp->pdev->addr.bus,
bp->pdev->addr.devid, bp->pdev->addr.function);
rte_free(bp->hwrm_cmd_resp_addr);
bp->hwrm_cmd_resp_addr = rte_malloc(type, max_resp_len, 0);
if (bp->hwrm_cmd_resp_addr == NULL) {
rc = -ENOMEM;
goto error;
}
bp->hwrm_cmd_resp_dma_addr =
rte_malloc_virt2phy(bp->hwrm_cmd_resp_addr);
bp->max_resp_len = max_resp_len;
}
error:
rte_spinlock_unlock(&bp->hwrm_lock);
return rc;
}
int bnxt_hwrm_func_driver_unregister(struct bnxt *bp, uint32_t flags)
{
int rc;
struct hwrm_func_drv_unrgtr_input req = {.req_type = 0 };
struct hwrm_func_drv_unrgtr_output *resp = bp->hwrm_cmd_resp_addr;
if (!(bp->flags & BNXT_FLAG_REGISTERED))
return 0;
HWRM_PREP(req, FUNC_DRV_UNRGTR, -1, resp);
req.flags = flags;
rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
HWRM_CHECK_RESULT;
bp->flags &= ~BNXT_FLAG_REGISTERED;
return rc;
}
static int bnxt_hwrm_port_phy_cfg(struct bnxt *bp, struct bnxt_link_info *conf)
{
int rc = 0;
struct hwrm_port_phy_cfg_input req = {0};
struct hwrm_port_phy_cfg_output *resp = bp->hwrm_cmd_resp_addr;
uint32_t enables = 0;
HWRM_PREP(req, PORT_PHY_CFG, -1, resp);
if (conf->link_up) {
req.flags = rte_cpu_to_le_32(conf->phy_flags);
req.force_link_speed = rte_cpu_to_le_16(conf->link_speed);
/*
* Note, ChiMP FW 20.2.1 and 20.2.2 return an error when we set
* any auto mode, even "none".
*/
if (!conf->link_speed) {
req.auto_mode |= conf->auto_mode;
enables = HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_MODE;
req.auto_link_speed_mask = conf->auto_link_speed_mask;
enables |=
HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_LINK_SPEED_MASK;
req.auto_link_speed = bp->link_info.auto_link_speed;
enables |=
HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_LINK_SPEED;
}
req.auto_duplex = conf->duplex;
enables |= HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_DUPLEX;
req.auto_pause = conf->auto_pause;
req.force_pause = conf->force_pause;
/* Set force_pause if there is no auto or if there is a force */
if (req.auto_pause && !req.force_pause)
enables |= HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_PAUSE;
else
enables |= HWRM_PORT_PHY_CFG_INPUT_ENABLES_FORCE_PAUSE;
req.enables = rte_cpu_to_le_32(enables);
} else {
req.flags =
rte_cpu_to_le_32(HWRM_PORT_PHY_CFG_INPUT_FLAGS_FORCE_LINK_DOWN);
RTE_LOG(INFO, PMD, "Force Link Down\n");
}
rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
HWRM_CHECK_RESULT;
return rc;
}
static int bnxt_hwrm_port_phy_qcfg(struct bnxt *bp,
struct bnxt_link_info *link_info)
{
int rc = 0;
struct hwrm_port_phy_qcfg_input req = {0};
struct hwrm_port_phy_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
HWRM_PREP(req, PORT_PHY_QCFG, -1, resp);
rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
HWRM_CHECK_RESULT;
link_info->phy_link_status = resp->link;
if (link_info->phy_link_status != HWRM_PORT_PHY_QCFG_OUTPUT_LINK_NO_LINK) {
link_info->link_up = 1;
link_info->link_speed = rte_le_to_cpu_16(resp->link_speed);
} else {
link_info->link_up = 0;
link_info->link_speed = 0;
}
link_info->duplex = resp->duplex;
link_info->pause = resp->pause;
link_info->auto_pause = resp->auto_pause;
link_info->force_pause = resp->force_pause;
link_info->auto_mode = resp->auto_mode;
link_info->support_speeds = rte_le_to_cpu_16(resp->support_speeds);
link_info->auto_link_speed = rte_le_to_cpu_16(resp->auto_link_speed);
link_info->preemphasis = rte_le_to_cpu_32(resp->preemphasis);
link_info->phy_ver[0] = resp->phy_maj;
link_info->phy_ver[1] = resp->phy_min;
link_info->phy_ver[2] = resp->phy_bld;
return rc;
}
int bnxt_hwrm_queue_qportcfg(struct bnxt *bp)
{
int rc = 0;
struct hwrm_queue_qportcfg_input req = {.req_type = 0 };
struct hwrm_queue_qportcfg_output *resp = bp->hwrm_cmd_resp_addr;
HWRM_PREP(req, QUEUE_QPORTCFG, -1, resp);
rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
HWRM_CHECK_RESULT;
#define GET_QUEUE_INFO(x) \
bp->cos_queue[x].id = resp->queue_id##x; \
bp->cos_queue[x].profile = resp->queue_id##x##_service_profile
GET_QUEUE_INFO(0);
GET_QUEUE_INFO(1);
GET_QUEUE_INFO(2);
GET_QUEUE_INFO(3);
GET_QUEUE_INFO(4);
GET_QUEUE_INFO(5);
GET_QUEUE_INFO(6);
GET_QUEUE_INFO(7);
return rc;
}
int bnxt_hwrm_ring_alloc(struct bnxt *bp,
struct bnxt_ring *ring,
uint32_t ring_type, uint32_t map_index,
uint32_t stats_ctx_id)
{
int rc = 0;
struct hwrm_ring_alloc_input req = {.req_type = 0 };
struct hwrm_ring_alloc_output *resp = bp->hwrm_cmd_resp_addr;
HWRM_PREP(req, RING_ALLOC, -1, resp);
req.enables = rte_cpu_to_le_32(0);
req.page_tbl_addr = rte_cpu_to_le_64(ring->bd_dma);
req.fbo = rte_cpu_to_le_32(0);
/* Association of ring index with doorbell index */
req.logical_id = rte_cpu_to_le_16(map_index);
switch (ring_type) {
case HWRM_RING_ALLOC_INPUT_RING_TYPE_TX:
req.queue_id = bp->cos_queue[0].id;
/* FALLTHROUGH */
case HWRM_RING_ALLOC_INPUT_RING_TYPE_RX:
req.ring_type = ring_type;
req.cmpl_ring_id =
rte_cpu_to_le_16(bp->grp_info[map_index].cp_fw_ring_id);
req.length = rte_cpu_to_le_32(ring->ring_size);
req.stat_ctx_id = rte_cpu_to_le_16(stats_ctx_id);
req.enables = rte_cpu_to_le_32(rte_le_to_cpu_32(req.enables) |
HWRM_RING_ALLOC_INPUT_ENABLES_STAT_CTX_ID_VALID);
break;
case HWRM_RING_ALLOC_INPUT_RING_TYPE_CMPL:
req.ring_type = ring_type;
/*
* TODO: Some HWRM versions crash with
* HWRM_RING_ALLOC_INPUT_INT_MODE_POLL
*/
req.int_mode = HWRM_RING_ALLOC_INPUT_INT_MODE_MSIX;
req.length = rte_cpu_to_le_32(ring->ring_size);
break;
default:
RTE_LOG(ERR, PMD, "hwrm alloc invalid ring type %d\n",
ring_type);
return -1;
}
rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
if (rc || resp->error_code) {
if (rc == 0 && resp->error_code)
rc = rte_le_to_cpu_16(resp->error_code);
switch (ring_type) {
case HWRM_RING_FREE_INPUT_RING_TYPE_CMPL:
RTE_LOG(ERR, PMD,
"hwrm_ring_alloc cp failed. rc:%d\n", rc);
return rc;
case HWRM_RING_FREE_INPUT_RING_TYPE_RX:
RTE_LOG(ERR, PMD,
"hwrm_ring_alloc rx failed. rc:%d\n", rc);
return rc;
case HWRM_RING_FREE_INPUT_RING_TYPE_TX:
RTE_LOG(ERR, PMD,
"hwrm_ring_alloc tx failed. rc:%d\n", rc);
return rc;
default:
RTE_LOG(ERR, PMD, "Invalid ring. rc:%d\n", rc);
return rc;
}
}
ring->fw_ring_id = rte_le_to_cpu_16(resp->ring_id);
return rc;
}
int bnxt_hwrm_ring_free(struct bnxt *bp,
struct bnxt_ring *ring, uint32_t ring_type)
{
int rc;
struct hwrm_ring_free_input req = {.req_type = 0 };
struct hwrm_ring_free_output *resp = bp->hwrm_cmd_resp_addr;
HWRM_PREP(req, RING_FREE, -1, resp);
req.ring_type = ring_type;
req.ring_id = rte_cpu_to_le_16(ring->fw_ring_id);
rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
if (rc || resp->error_code) {
if (rc == 0 && resp->error_code)
rc = rte_le_to_cpu_16(resp->error_code);
switch (ring_type) {
case HWRM_RING_FREE_INPUT_RING_TYPE_CMPL:
RTE_LOG(ERR, PMD, "hwrm_ring_free cp failed. rc:%d\n",
rc);
return rc;
case HWRM_RING_FREE_INPUT_RING_TYPE_RX:
RTE_LOG(ERR, PMD, "hwrm_ring_free rx failed. rc:%d\n",
rc);
return rc;
case HWRM_RING_FREE_INPUT_RING_TYPE_TX:
RTE_LOG(ERR, PMD, "hwrm_ring_free tx failed. rc:%d\n",
rc);
return rc;
default:
RTE_LOG(ERR, PMD, "Invalid ring, rc:%d\n", rc);
return rc;
}
}
return 0;
}
int bnxt_hwrm_ring_grp_alloc(struct bnxt *bp, unsigned int idx)
{
int rc = 0;
struct hwrm_ring_grp_alloc_input req = {.req_type = 0 };
struct hwrm_ring_grp_alloc_output *resp = bp->hwrm_cmd_resp_addr;
HWRM_PREP(req, RING_GRP_ALLOC, -1, resp);
req.cr = rte_cpu_to_le_16(bp->grp_info[idx].cp_fw_ring_id);
req.rr = rte_cpu_to_le_16(bp->grp_info[idx].rx_fw_ring_id);
req.ar = rte_cpu_to_le_16(bp->grp_info[idx].ag_fw_ring_id);
req.sc = rte_cpu_to_le_16(bp->grp_info[idx].fw_stats_ctx);
rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
HWRM_CHECK_RESULT;
bp->grp_info[idx].fw_grp_id =
rte_le_to_cpu_16(resp->ring_group_id);
return rc;
}
int bnxt_hwrm_ring_grp_free(struct bnxt *bp, unsigned int idx)
{
int rc;
struct hwrm_ring_grp_free_input req = {.req_type = 0 };
struct hwrm_ring_grp_free_output *resp = bp->hwrm_cmd_resp_addr;
HWRM_PREP(req, RING_GRP_FREE, -1, resp);
req.ring_group_id = rte_cpu_to_le_16(bp->grp_info[idx].fw_grp_id);
rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
HWRM_CHECK_RESULT;
bp->grp_info[idx].fw_grp_id = INVALID_HW_RING_ID;
return rc;
}
int bnxt_hwrm_stat_clear(struct bnxt *bp, struct bnxt_cp_ring_info *cpr)
{
int rc = 0;
struct hwrm_stat_ctx_clr_stats_input req = {.req_type = 0 };
struct hwrm_stat_ctx_clr_stats_output *resp = bp->hwrm_cmd_resp_addr;
HWRM_PREP(req, STAT_CTX_CLR_STATS, -1, resp);
if (cpr->hw_stats_ctx_id == (uint32_t)HWRM_NA_SIGNATURE)
return rc;
req.stat_ctx_id = rte_cpu_to_le_16(cpr->hw_stats_ctx_id);
req.seq_id = rte_cpu_to_le_16(bp->hwrm_cmd_seq++);
rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
HWRM_CHECK_RESULT;
return rc;
}
int bnxt_hwrm_stat_ctx_alloc(struct bnxt *bp,
struct bnxt_cp_ring_info *cpr, unsigned int idx)
{
int rc;
struct hwrm_stat_ctx_alloc_input req = {.req_type = 0 };
struct hwrm_stat_ctx_alloc_output *resp = bp->hwrm_cmd_resp_addr;
HWRM_PREP(req, STAT_CTX_ALLOC, -1, resp);
req.update_period_ms = rte_cpu_to_le_32(1000);
req.seq_id = rte_cpu_to_le_16(bp->hwrm_cmd_seq++);
req.stats_dma_addr =
rte_cpu_to_le_64(cpr->hw_stats_map);
rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
HWRM_CHECK_RESULT;
cpr->hw_stats_ctx_id = rte_le_to_cpu_16(resp->stat_ctx_id);
bp->grp_info[idx].fw_stats_ctx = cpr->hw_stats_ctx_id;
return rc;
}
int bnxt_hwrm_stat_ctx_free(struct bnxt *bp,
struct bnxt_cp_ring_info *cpr, unsigned int idx)
{
int rc;
struct hwrm_stat_ctx_free_input req = {.req_type = 0 };
struct hwrm_stat_ctx_free_output *resp = bp->hwrm_cmd_resp_addr;
HWRM_PREP(req, STAT_CTX_FREE, -1, resp);
req.stat_ctx_id = rte_cpu_to_le_16(cpr->hw_stats_ctx_id);
req.seq_id = rte_cpu_to_le_16(bp->hwrm_cmd_seq++);
rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
HWRM_CHECK_RESULT;
cpr->hw_stats_ctx_id = HWRM_NA_SIGNATURE;
bp->grp_info[idx].fw_stats_ctx = cpr->hw_stats_ctx_id;
return rc;
}
int bnxt_hwrm_vnic_alloc(struct bnxt *bp, struct bnxt_vnic_info *vnic)
{
int rc = 0, i, j;
struct hwrm_vnic_alloc_input req = { 0 };
struct hwrm_vnic_alloc_output *resp = bp->hwrm_cmd_resp_addr;
/* map ring groups to this vnic */
for (i = vnic->start_grp_id, j = 0; i <= vnic->end_grp_id; i++, j++) {
if (bp->grp_info[i].fw_grp_id == (uint16_t)HWRM_NA_SIGNATURE) {
RTE_LOG(ERR, PMD,
"Not enough ring groups avail:%x req:%x\n", j,
(vnic->end_grp_id - vnic->start_grp_id) + 1);
break;
}
vnic->fw_grp_ids[j] = bp->grp_info[i].fw_grp_id;
}
vnic->fw_rss_cos_lb_ctx = (uint16_t)HWRM_NA_SIGNATURE;
vnic->ctx_is_rss_cos_lb = HW_CONTEXT_NONE;
HWRM_PREP(req, VNIC_ALLOC, -1, resp);
rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
HWRM_CHECK_RESULT;
vnic->fw_vnic_id = rte_le_to_cpu_16(resp->vnic_id);
return rc;
}
int bnxt_hwrm_vnic_cfg(struct bnxt *bp, struct bnxt_vnic_info *vnic)
{
int rc = 0;
struct hwrm_vnic_cfg_input req = {.req_type = 0 };
struct hwrm_vnic_cfg_output *resp = bp->hwrm_cmd_resp_addr;
HWRM_PREP(req, VNIC_CFG, -1, resp);
/* Only RSS support for now TBD: COS & LB */
req.enables =
rte_cpu_to_le_32(HWRM_VNIC_CFG_INPUT_ENABLES_DFLT_RING_GRP |
HWRM_VNIC_CFG_INPUT_ENABLES_RSS_RULE |
HWRM_VNIC_CFG_INPUT_ENABLES_MRU);
req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
req.dflt_ring_grp =
rte_cpu_to_le_16(bp->grp_info[vnic->start_grp_id].fw_grp_id);
req.rss_rule = rte_cpu_to_le_16(vnic->fw_rss_cos_lb_ctx);
req.cos_rule = rte_cpu_to_le_16(0xffff);
req.lb_rule = rte_cpu_to_le_16(0xffff);
req.mru = rte_cpu_to_le_16(bp->eth_dev->data->mtu + ETHER_HDR_LEN +
ETHER_CRC_LEN + VLAN_TAG_SIZE);
if (vnic->func_default)
req.flags = 1;
if (vnic->vlan_strip)
req.flags |=
rte_cpu_to_le_32(HWRM_VNIC_CFG_INPUT_FLAGS_VLAN_STRIP_MODE);
rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
HWRM_CHECK_RESULT;
return rc;
}
int bnxt_hwrm_vnic_ctx_alloc(struct bnxt *bp, struct bnxt_vnic_info *vnic)
{
int rc = 0;
struct hwrm_vnic_rss_cos_lb_ctx_alloc_input req = {.req_type = 0 };
struct hwrm_vnic_rss_cos_lb_ctx_alloc_output *resp =
bp->hwrm_cmd_resp_addr;
HWRM_PREP(req, VNIC_RSS_COS_LB_CTX_ALLOC, -1, resp);
rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
HWRM_CHECK_RESULT;
vnic->fw_rss_cos_lb_ctx = rte_le_to_cpu_16(resp->rss_cos_lb_ctx_id);
return rc;
}
int bnxt_hwrm_vnic_ctx_free(struct bnxt *bp, struct bnxt_vnic_info *vnic)
{
int rc = 0;
struct hwrm_vnic_rss_cos_lb_ctx_free_input req = {.req_type = 0 };
struct hwrm_vnic_rss_cos_lb_ctx_free_output *resp =
bp->hwrm_cmd_resp_addr;
HWRM_PREP(req, VNIC_RSS_COS_LB_CTX_FREE, -1, resp);
req.rss_cos_lb_ctx_id = rte_cpu_to_le_16(vnic->fw_rss_cos_lb_ctx);
rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
HWRM_CHECK_RESULT;
vnic->fw_rss_cos_lb_ctx = INVALID_HW_RING_ID;
return rc;
}
int bnxt_hwrm_vnic_free(struct bnxt *bp, struct bnxt_vnic_info *vnic)
{
int rc = 0;
struct hwrm_vnic_free_input req = {.req_type = 0 };
struct hwrm_vnic_free_output *resp = bp->hwrm_cmd_resp_addr;
if (vnic->fw_vnic_id == INVALID_HW_RING_ID)
return rc;
HWRM_PREP(req, VNIC_FREE, -1, resp);
req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
HWRM_CHECK_RESULT;
vnic->fw_vnic_id = INVALID_HW_RING_ID;
return rc;
}
int bnxt_hwrm_vnic_rss_cfg(struct bnxt *bp,
struct bnxt_vnic_info *vnic)
{
int rc = 0;
struct hwrm_vnic_rss_cfg_input req = {.req_type = 0 };
struct hwrm_vnic_rss_cfg_output *resp = bp->hwrm_cmd_resp_addr;
HWRM_PREP(req, VNIC_RSS_CFG, -1, resp);
req.hash_type = rte_cpu_to_le_32(vnic->hash_type);
req.ring_grp_tbl_addr =
rte_cpu_to_le_64(vnic->rss_table_dma_addr);
req.hash_key_tbl_addr =
rte_cpu_to_le_64(vnic->rss_hash_key_dma_addr);
req.rss_ctx_idx = rte_cpu_to_le_16(vnic->fw_rss_cos_lb_ctx);
rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
HWRM_CHECK_RESULT;
return rc;
}
/*
* HWRM utility functions
*/
int bnxt_clear_all_hwrm_stat_ctxs(struct bnxt *bp)
{
unsigned int i;
int rc = 0;
for (i = 0; i < bp->rx_cp_nr_rings + bp->tx_cp_nr_rings; i++) {
struct bnxt_tx_queue *txq;
struct bnxt_rx_queue *rxq;
struct bnxt_cp_ring_info *cpr;
if (i >= bp->rx_cp_nr_rings) {
txq = bp->tx_queues[i - bp->rx_cp_nr_rings];
cpr = txq->cp_ring;
} else {
rxq = bp->rx_queues[i];
cpr = rxq->cp_ring;
}
rc = bnxt_hwrm_stat_clear(bp, cpr);
if (rc)
return rc;
}
return 0;
}
int bnxt_free_all_hwrm_stat_ctxs(struct bnxt *bp)
{
int rc;
unsigned int i;
struct bnxt_cp_ring_info *cpr;
for (i = 0; i < bp->rx_cp_nr_rings + bp->tx_cp_nr_rings; i++) {
unsigned int idx = i + 1;
if (i >= bp->rx_cp_nr_rings)
cpr = bp->tx_queues[i - bp->rx_cp_nr_rings]->cp_ring;
else
cpr = bp->rx_queues[i]->cp_ring;
if (cpr->hw_stats_ctx_id != HWRM_NA_SIGNATURE) {
rc = bnxt_hwrm_stat_ctx_free(bp, cpr, idx);
if (rc)
return rc;
}
}
return 0;
}
int bnxt_alloc_all_hwrm_stat_ctxs(struct bnxt *bp)
{
unsigned int i;
int rc = 0;
for (i = 0; i < bp->rx_cp_nr_rings + bp->tx_cp_nr_rings; i++) {
struct bnxt_tx_queue *txq;
struct bnxt_rx_queue *rxq;
struct bnxt_cp_ring_info *cpr;
unsigned int idx = i + 1;
if (i >= bp->rx_cp_nr_rings) {
txq = bp->tx_queues[i - bp->rx_cp_nr_rings];
cpr = txq->cp_ring;
} else {
rxq = bp->rx_queues[i];
cpr = rxq->cp_ring;
}
rc = bnxt_hwrm_stat_ctx_alloc(bp, cpr, idx);
if (rc)
return rc;
}
return rc;
}
int bnxt_free_all_hwrm_ring_grps(struct bnxt *bp)
{
uint16_t i;
uint32_t rc = 0;
for (i = 0; i < bp->rx_cp_nr_rings; i++) {
unsigned int idx = i + 1;
if (bp->grp_info[idx].fw_grp_id == INVALID_HW_RING_ID) {
RTE_LOG(ERR, PMD,
"Attempt to free invalid ring group %d\n",
idx);
continue;
}
rc = bnxt_hwrm_ring_grp_free(bp, idx);
if (rc)
return rc;
}
return rc;
}
static void bnxt_free_cp_ring(struct bnxt *bp,
struct bnxt_cp_ring_info *cpr, unsigned int idx)
{
struct bnxt_ring *cp_ring = cpr->cp_ring_struct;
bnxt_hwrm_ring_free(bp, cp_ring,
HWRM_RING_FREE_INPUT_RING_TYPE_CMPL);
cp_ring->fw_ring_id = INVALID_HW_RING_ID;
bp->grp_info[idx].cp_fw_ring_id = INVALID_HW_RING_ID;
memset(cpr->cp_desc_ring, 0, cpr->cp_ring_struct->ring_size *
sizeof(*cpr->cp_desc_ring));
cpr->cp_raw_cons = 0;
}
int bnxt_free_all_hwrm_rings(struct bnxt *bp)
{
unsigned int i;
int rc = 0;
for (i = 0; i < bp->tx_cp_nr_rings; i++) {
struct bnxt_tx_queue *txq = bp->tx_queues[i];
struct bnxt_tx_ring_info *txr = txq->tx_ring;
struct bnxt_ring *ring = txr->tx_ring_struct;
struct bnxt_cp_ring_info *cpr = txq->cp_ring;
unsigned int idx = bp->rx_cp_nr_rings + i + 1;
if (ring->fw_ring_id != INVALID_HW_RING_ID) {
bnxt_hwrm_ring_free(bp, ring,
HWRM_RING_FREE_INPUT_RING_TYPE_TX);
ring->fw_ring_id = INVALID_HW_RING_ID;
memset(txr->tx_desc_ring, 0,
txr->tx_ring_struct->ring_size *
sizeof(*txr->tx_desc_ring));
memset(txr->tx_buf_ring, 0,
txr->tx_ring_struct->ring_size *
sizeof(*txr->tx_buf_ring));
txr->tx_prod = 0;
txr->tx_cons = 0;
}
if (cpr->cp_ring_struct->fw_ring_id != INVALID_HW_RING_ID)
bnxt_free_cp_ring(bp, cpr, idx);
}
for (i = 0; i < bp->rx_cp_nr_rings; i++) {
struct bnxt_rx_queue *rxq = bp->rx_queues[i];
struct bnxt_rx_ring_info *rxr = rxq->rx_ring;
struct bnxt_ring *ring = rxr->rx_ring_struct;
struct bnxt_cp_ring_info *cpr = rxq->cp_ring;
unsigned int idx = i + 1;
if (ring->fw_ring_id != INVALID_HW_RING_ID) {
bnxt_hwrm_ring_free(bp, ring,
HWRM_RING_FREE_INPUT_RING_TYPE_RX);
ring->fw_ring_id = INVALID_HW_RING_ID;
bp->grp_info[idx].rx_fw_ring_id = INVALID_HW_RING_ID;
memset(rxr->rx_desc_ring, 0,
rxr->rx_ring_struct->ring_size *
sizeof(*rxr->rx_desc_ring));
memset(rxr->rx_buf_ring, 0,
rxr->rx_ring_struct->ring_size *
sizeof(*rxr->rx_buf_ring));
rxr->rx_prod = 0;
}
if (cpr->cp_ring_struct->fw_ring_id != INVALID_HW_RING_ID)
bnxt_free_cp_ring(bp, cpr, idx);
}
/* Default completion ring */
{
struct bnxt_cp_ring_info *cpr = bp->def_cp_ring;
if (cpr->cp_ring_struct->fw_ring_id != INVALID_HW_RING_ID)
bnxt_free_cp_ring(bp, cpr, 0);
}
return rc;
}
int bnxt_alloc_all_hwrm_ring_grps(struct bnxt *bp)
{
uint16_t i;
uint32_t rc = 0;
for (i = 0; i < bp->rx_cp_nr_rings; i++) {
unsigned int idx = i + 1;
if (bp->grp_info[idx].cp_fw_ring_id == INVALID_HW_RING_ID ||
bp->grp_info[idx].rx_fw_ring_id == INVALID_HW_RING_ID)
continue;
rc = bnxt_hwrm_ring_grp_alloc(bp, idx);
if (rc)
return rc;
}
return rc;
}
void bnxt_free_hwrm_resources(struct bnxt *bp)
{
/* Release memzone */
rte_free(bp->hwrm_cmd_resp_addr);
bp->hwrm_cmd_resp_addr = NULL;
bp->hwrm_cmd_resp_dma_addr = 0;
}
int bnxt_alloc_hwrm_resources(struct bnxt *bp)
{
struct rte_pci_device *pdev = bp->pdev;
char type[RTE_MEMZONE_NAMESIZE];
sprintf(type, "bnxt_hwrm_%04x:%02x:%02x:%02x", pdev->addr.domain,
pdev->addr.bus, pdev->addr.devid, pdev->addr.function);
bp->max_req_len = HWRM_MAX_REQ_LEN;
bp->max_resp_len = HWRM_MAX_RESP_LEN;
bp->hwrm_cmd_resp_addr = rte_malloc(type, bp->max_resp_len, 0);
if (bp->hwrm_cmd_resp_addr == NULL)
return -ENOMEM;
bp->hwrm_cmd_resp_dma_addr =
rte_malloc_virt2phy(bp->hwrm_cmd_resp_addr);
rte_spinlock_init(&bp->hwrm_lock);
return 0;
}
int bnxt_clear_hwrm_vnic_filters(struct bnxt *bp, struct bnxt_vnic_info *vnic)
{
struct bnxt_filter_info *filter;
int rc = 0;
STAILQ_FOREACH(filter, &vnic->filter, next) {
rc = bnxt_hwrm_clear_filter(bp, filter);
if (rc)
break;
}
return rc;
}
int bnxt_set_hwrm_vnic_filters(struct bnxt *bp, struct bnxt_vnic_info *vnic)
{
struct bnxt_filter_info *filter;
int rc = 0;
STAILQ_FOREACH(filter, &vnic->filter, next) {
rc = bnxt_hwrm_set_filter(bp, vnic, filter);
if (rc)
break;
}
return rc;
}
void bnxt_free_all_hwrm_resources(struct bnxt *bp)
{
struct bnxt_vnic_info *vnic;
unsigned int i;
if (bp->vnic_info == NULL)
return;
vnic = &bp->vnic_info[0];
bnxt_hwrm_cfa_l2_clear_rx_mask(bp, vnic);
/* VNIC resources */
for (i = 0; i < bp->nr_vnics; i++) {
struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
bnxt_clear_hwrm_vnic_filters(bp, vnic);
bnxt_hwrm_vnic_ctx_free(bp, vnic);
bnxt_hwrm_vnic_free(bp, vnic);
}
/* Ring resources */
bnxt_free_all_hwrm_rings(bp);
bnxt_free_all_hwrm_ring_grps(bp);
bnxt_free_all_hwrm_stat_ctxs(bp);
}
static uint16_t bnxt_parse_eth_link_duplex(uint32_t conf_link_speed)
{
uint8_t hw_link_duplex = HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_BOTH;
if ((conf_link_speed & ETH_LINK_SPEED_FIXED) == ETH_LINK_SPEED_AUTONEG)
return HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_BOTH;
switch (conf_link_speed) {
case ETH_LINK_SPEED_10M_HD:
case ETH_LINK_SPEED_100M_HD:
return HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_HALF;
}
return hw_link_duplex;
}
static uint16_t bnxt_parse_eth_link_speed(uint32_t conf_link_speed)
{
uint16_t eth_link_speed = 0;
if (conf_link_speed == ETH_LINK_SPEED_AUTONEG)
return ETH_LINK_SPEED_AUTONEG;
switch (conf_link_speed & ~ETH_LINK_SPEED_FIXED) {
case ETH_LINK_SPEED_100M:
case ETH_LINK_SPEED_100M_HD:
eth_link_speed =
HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_100MB;
break;
case ETH_LINK_SPEED_1G:
eth_link_speed =
HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_1GB;
break;
case ETH_LINK_SPEED_2_5G:
eth_link_speed =
HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_2_5GB;
break;
case ETH_LINK_SPEED_10G:
eth_link_speed =
HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_10GB;
break;
case ETH_LINK_SPEED_20G:
eth_link_speed =
HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_20GB;
break;
case ETH_LINK_SPEED_25G:
eth_link_speed =
HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_25GB;
break;
case ETH_LINK_SPEED_40G:
eth_link_speed =
HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_40GB;
break;
case ETH_LINK_SPEED_50G:
eth_link_speed =
HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_50GB;
break;
default:
RTE_LOG(ERR, PMD,
"Unsupported link speed %d; default to AUTO\n",
conf_link_speed);
break;
}
return eth_link_speed;
}
#define BNXT_SUPPORTED_SPEEDS (ETH_LINK_SPEED_100M | ETH_LINK_SPEED_100M_HD | \
ETH_LINK_SPEED_1G | ETH_LINK_SPEED_2_5G | \
ETH_LINK_SPEED_10G | ETH_LINK_SPEED_20G | ETH_LINK_SPEED_25G | \
ETH_LINK_SPEED_40G | ETH_LINK_SPEED_50G)
static int bnxt_valid_link_speed(uint32_t link_speed, uint8_t port_id)
{
uint32_t one_speed;
if (link_speed == ETH_LINK_SPEED_AUTONEG)
return 0;
if (link_speed & ETH_LINK_SPEED_FIXED) {
one_speed = link_speed & ~ETH_LINK_SPEED_FIXED;
if (one_speed & (one_speed - 1)) {
RTE_LOG(ERR, PMD,
"Invalid advertised speeds (%u) for port %u\n",
link_speed, port_id);
return -EINVAL;
}
if ((one_speed & BNXT_SUPPORTED_SPEEDS) != one_speed) {
RTE_LOG(ERR, PMD,
"Unsupported advertised speed (%u) for port %u\n",
link_speed, port_id);
return -EINVAL;
}
} else {
if (!(link_speed & BNXT_SUPPORTED_SPEEDS)) {
RTE_LOG(ERR, PMD,
"Unsupported advertised speeds (%u) for port %u\n",
link_speed, port_id);
return -EINVAL;
}
}
return 0;
}
static uint16_t bnxt_parse_eth_link_speed_mask(uint32_t link_speed)
{
uint16_t ret = 0;
if (link_speed == ETH_LINK_SPEED_AUTONEG)
link_speed = BNXT_SUPPORTED_SPEEDS;
if (link_speed & ETH_LINK_SPEED_100M)
ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_100MB;
if (link_speed & ETH_LINK_SPEED_100M_HD)
ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_100MB;
if (link_speed & ETH_LINK_SPEED_1G)
ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_1GB;
if (link_speed & ETH_LINK_SPEED_2_5G)
ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_2_5GB;
if (link_speed & ETH_LINK_SPEED_10G)
ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_10GB;
if (link_speed & ETH_LINK_SPEED_20G)
ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_20GB;
if (link_speed & ETH_LINK_SPEED_25G)
ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_25GB;
if (link_speed & ETH_LINK_SPEED_40G)
ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_40GB;
if (link_speed & ETH_LINK_SPEED_50G)
ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_50GB;
return ret;
}
static uint32_t bnxt_parse_hw_link_speed(uint16_t hw_link_speed)
{
uint32_t eth_link_speed = ETH_SPEED_NUM_NONE;
switch (hw_link_speed) {
case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_100MB:
eth_link_speed = ETH_SPEED_NUM_100M;
break;
case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_1GB:
eth_link_speed = ETH_SPEED_NUM_1G;
break;
case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_2_5GB:
eth_link_speed = ETH_SPEED_NUM_2_5G;
break;
case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_10GB:
eth_link_speed = ETH_SPEED_NUM_10G;
break;
case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_20GB:
eth_link_speed = ETH_SPEED_NUM_20G;
break;
case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_25GB:
eth_link_speed = ETH_SPEED_NUM_25G;
break;
case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_40GB:
eth_link_speed = ETH_SPEED_NUM_40G;
break;
case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_50GB:
eth_link_speed = ETH_SPEED_NUM_50G;
break;
case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_2GB:
default:
RTE_LOG(ERR, PMD, "HWRM link speed %d not defined\n",
hw_link_speed);
break;
}
return eth_link_speed;
}
static uint16_t bnxt_parse_hw_link_duplex(uint16_t hw_link_duplex)
{
uint16_t eth_link_duplex = ETH_LINK_FULL_DUPLEX;
switch (hw_link_duplex) {
case HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_BOTH:
case HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_FULL:
eth_link_duplex = ETH_LINK_FULL_DUPLEX;
break;
case HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_HALF:
eth_link_duplex = ETH_LINK_HALF_DUPLEX;
break;
default:
RTE_LOG(ERR, PMD, "HWRM link duplex %d not defined\n",
hw_link_duplex);
break;
}
return eth_link_duplex;
}
int bnxt_get_hwrm_link_config(struct bnxt *bp, struct rte_eth_link *link)
{
int rc = 0;
struct bnxt_link_info *link_info = &bp->link_info;
rc = bnxt_hwrm_port_phy_qcfg(bp, link_info);
if (rc) {
RTE_LOG(ERR, PMD,
"Get link config failed with rc %d\n", rc);
goto exit;
}
if (link_info->link_up)
link->link_speed =
bnxt_parse_hw_link_speed(link_info->link_speed);
else
link->link_speed = ETH_LINK_SPEED_10M;
link->link_duplex = bnxt_parse_hw_link_duplex(link_info->duplex);
link->link_status = link_info->link_up;
link->link_autoneg = link_info->auto_mode ==
HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_MODE_NONE ?
ETH_LINK_SPEED_FIXED : ETH_LINK_SPEED_AUTONEG;
exit:
return rc;
}
int bnxt_set_hwrm_link_config(struct bnxt *bp, bool link_up)
{
int rc = 0;
struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf;
struct bnxt_link_info link_req;
uint16_t speed;
if (BNXT_NPAR_PF(bp) || BNXT_VF(bp))
return 0;
rc = bnxt_valid_link_speed(dev_conf->link_speeds,
bp->eth_dev->data->port_id);
if (rc)
goto error;
memset(&link_req, 0, sizeof(link_req));
link_req.link_up = link_up;
if (!link_up)
goto port_phy_cfg;
speed = bnxt_parse_eth_link_speed(dev_conf->link_speeds);
link_req.phy_flags = HWRM_PORT_PHY_CFG_INPUT_FLAGS_RESET_PHY;
if (speed == 0) {
link_req.phy_flags |=
HWRM_PORT_PHY_CFG_INPUT_FLAGS_RESTART_AUTONEG;
link_req.auto_mode =
HWRM_PORT_PHY_CFG_INPUT_AUTO_MODE_SPEED_MASK;
link_req.auto_link_speed_mask =
bnxt_parse_eth_link_speed_mask(dev_conf->link_speeds);
} else {
link_req.phy_flags |= HWRM_PORT_PHY_CFG_INPUT_FLAGS_FORCE;
link_req.link_speed = speed;
RTE_LOG(INFO, PMD, "Set Link Speed %x\n", speed);
}
link_req.duplex = bnxt_parse_eth_link_duplex(dev_conf->link_speeds);
link_req.auto_pause = bp->link_info.auto_pause;
link_req.force_pause = bp->link_info.force_pause;
port_phy_cfg:
rc = bnxt_hwrm_port_phy_cfg(bp, &link_req);
if (rc) {
RTE_LOG(ERR, PMD,
"Set link config failed with rc %d\n", rc);
}
rte_delay_ms(BNXT_LINK_WAIT_INTERVAL);
error:
return rc;
}
/* JIRA 22088 */
int bnxt_hwrm_func_qcfg(struct bnxt *bp)
{
struct hwrm_func_qcfg_input req = {0};
struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
int rc = 0;
HWRM_PREP(req, FUNC_QCFG, -1, resp);
req.fid = rte_cpu_to_le_16(0xffff);
rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
HWRM_CHECK_RESULT;
if (BNXT_VF(bp)) {
struct bnxt_vf_info *vf = &bp->vf;
/* Hard Coded.. 0xfff VLAN ID mask */
vf->vlan = rte_le_to_cpu_16(resp->vlan) & 0xfff;
}
switch (resp->port_partition_type) {
case HWRM_FUNC_QCFG_OUTPUT_PORT_PARTITION_TYPE_NPAR1_0:
case HWRM_FUNC_QCFG_OUTPUT_PORT_PARTITION_TYPE_NPAR1_5:
case HWRM_FUNC_QCFG_OUTPUT_PORT_PARTITION_TYPE_NPAR2_0:
bp->port_partition_type = resp->port_partition_type;
break;
default:
bp->port_partition_type = 0;
break;
}
return rc;
}
|
vicharl/containerdns
|
kdns/dpdk-17.02/examples/server_node_efd/node/node.c
|
<gh_stars>100-1000
/*-
* BSD LICENSE
*
* Copyright(c) 2016-2017 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <stdint.h>
#include <stdio.h>
#include <inttypes.h>
#include <stdarg.h>
#include <errno.h>
#include <sys/queue.h>
#include <stdlib.h>
#include <getopt.h>
#include <string.h>
#include <rte_common.h>
#include <rte_malloc.h>
#include <rte_memory.h>
#include <rte_memzone.h>
#include <rte_eal.h>
#include <rte_atomic.h>
#include <rte_branch_prediction.h>
#include <rte_log.h>
#include <rte_per_lcore.h>
#include <rte_launch.h>
#include <rte_lcore.h>
#include <rte_ring.h>
#include <rte_launch.h>
#include <rte_lcore.h>
#include <rte_debug.h>
#include <rte_mempool.h>
#include <rte_mbuf.h>
#include <rte_interrupts.h>
#include <rte_pci.h>
#include <rte_ether.h>
#include <rte_ethdev.h>
#include <rte_string_fns.h>
#include <rte_ip.h>
#include "common.h"
/* Number of packets to attempt to read from queue */
#define PKT_READ_SIZE ((uint16_t)32)
/*
* Our node id number - tells us which rx queue to read, and NIC TX
* queue to write to.
*/
static uint8_t node_id;
#define MBQ_CAPACITY 32
/* maps input ports to output ports for packets */
static uint8_t output_ports[RTE_MAX_ETHPORTS];
/* buffers up a set of packet that are ready to send */
struct rte_eth_dev_tx_buffer *tx_buffer[RTE_MAX_ETHPORTS];
/* shared data from server. We update statistics here */
static struct tx_stats *tx_stats;
static struct filter_stats *filter_stats;
/*
* print a usage message
*/
static void
usage(const char *progname)
{
printf("Usage: %s [EAL args] -- -n <node_id>\n\n", progname);
}
/*
* Convert the node id number from a string to an int.
*/
static int
parse_node_num(const char *node)
{
char *end = NULL;
unsigned long temp;
if (node == NULL || *node == '\0')
return -1;
temp = strtoul(node, &end, 10);
if (end == NULL || *end != '\0')
return -1;
node_id = (uint8_t)temp;
return 0;
}
/*
* Parse the application arguments to the node app.
*/
static int
parse_app_args(int argc, char *argv[])
{
int option_index, opt;
char **argvopt = argv;
const char *progname = NULL;
static struct option lgopts[] = { /* no long options */
{NULL, 0, 0, 0 }
};
progname = argv[0];
while ((opt = getopt_long(argc, argvopt, "n:", lgopts,
&option_index)) != EOF) {
switch (opt) {
case 'n':
if (parse_node_num(optarg) != 0) {
usage(progname);
return -1;
}
break;
default:
usage(progname);
return -1;
}
}
return 0;
}
/*
* Tx buffer error callback
*/
static void
flush_tx_error_callback(struct rte_mbuf **unsent, uint16_t count,
void *userdata) {
int i;
uint8_t port_id = (uintptr_t)userdata;
tx_stats->tx_drop[port_id] += count;
/* free the mbufs which failed from transmit */
for (i = 0; i < count; i++)
rte_pktmbuf_free(unsent[i]);
}
static void
configure_tx_buffer(uint8_t port_id, uint16_t size)
{
int ret;
/* Initialize TX buffers */
tx_buffer[port_id] = rte_zmalloc_socket("tx_buffer",
RTE_ETH_TX_BUFFER_SIZE(size), 0,
rte_eth_dev_socket_id(port_id));
if (tx_buffer[port_id] == NULL)
rte_exit(EXIT_FAILURE, "Cannot allocate buffer for tx "
"on port %u\n", (unsigned int) port_id);
rte_eth_tx_buffer_init(tx_buffer[port_id], size);
ret = rte_eth_tx_buffer_set_err_callback(tx_buffer[port_id],
flush_tx_error_callback, (void *)(intptr_t)port_id);
if (ret < 0)
rte_exit(EXIT_FAILURE, "Cannot set error callback for "
"tx buffer on port %u\n", (unsigned int) port_id);
}
/*
* set up output ports so that all traffic on port gets sent out
* its paired port. Index using actual port numbers since that is
* what comes in the mbuf structure.
*/
static void
configure_output_ports(const struct shared_info *info)
{
int i;
if (info->num_ports > RTE_MAX_ETHPORTS)
rte_exit(EXIT_FAILURE, "Too many ethernet ports. "
"RTE_MAX_ETHPORTS = %u\n",
(unsigned int)RTE_MAX_ETHPORTS);
for (i = 0; i < info->num_ports - 1; i += 2) {
uint8_t p1 = info->id[i];
uint8_t p2 = info->id[i+1];
output_ports[p1] = p2;
output_ports[p2] = p1;
configure_tx_buffer(p1, MBQ_CAPACITY);
configure_tx_buffer(p2, MBQ_CAPACITY);
}
}
/*
* Create the hash table that will contain the flows that
* the node will handle, which will be used to decide if packet
* is transmitted or dropped.
*/
static struct rte_hash *
create_hash_table(const struct shared_info *info)
{
uint32_t num_flows_node = info->num_flows / info->num_nodes;
char name[RTE_HASH_NAMESIZE];
struct rte_hash *h;
/* create table */
struct rte_hash_parameters hash_params = {
.entries = num_flows_node * 2, /* table load = 50% */
.key_len = sizeof(uint32_t), /* Store IPv4 dest IP address */
.socket_id = rte_socket_id(),
.hash_func_init_val = 0,
};
snprintf(name, sizeof(name), "hash_table_%d", node_id);
hash_params.name = name;
h = rte_hash_create(&hash_params);
if (h == NULL)
rte_exit(EXIT_FAILURE,
"Problem creating the hash table for node %d\n",
node_id);
return h;
}
static void
populate_hash_table(const struct rte_hash *h, const struct shared_info *info)
{
unsigned int i;
int32_t ret;
uint32_t ip_dst;
uint32_t num_flows_node = 0;
uint64_t target_node;
/* Add flows in table */
for (i = 0; i < info->num_flows; i++) {
target_node = i % info->num_nodes;
if (target_node != node_id)
continue;
ip_dst = rte_cpu_to_be_32(i);
ret = rte_hash_add_key(h, (void *) &ip_dst);
if (ret < 0)
rte_exit(EXIT_FAILURE, "Unable to add entry %u "
"in hash table\n", i);
else
num_flows_node++;
}
printf("Hash table: Adding 0x%x keys\n", num_flows_node);
}
/*
* This function performs routing of packets
* Just sends each input packet out an output port based solely on the input
* port it arrived on.
*/
static inline void
transmit_packet(struct rte_mbuf *buf)
{
int sent;
const uint8_t in_port = buf->port;
const uint8_t out_port = output_ports[in_port];
struct rte_eth_dev_tx_buffer *buffer = tx_buffer[out_port];
sent = rte_eth_tx_buffer(out_port, node_id, buffer, buf);
if (sent)
tx_stats->tx[out_port] += sent;
}
static inline void
handle_packets(struct rte_hash *h, struct rte_mbuf **bufs, uint16_t num_packets)
{
struct ipv4_hdr *ipv4_hdr;
uint32_t ipv4_dst_ip[PKT_READ_SIZE];
const void *key_ptrs[PKT_READ_SIZE];
unsigned int i;
int32_t positions[PKT_READ_SIZE] = {0};
for (i = 0; i < num_packets; i++) {
/* Handle IPv4 header.*/
ipv4_hdr = rte_pktmbuf_mtod_offset(bufs[i], struct ipv4_hdr *,
sizeof(struct ether_hdr));
ipv4_dst_ip[i] = ipv4_hdr->dst_addr;
key_ptrs[i] = &ipv4_dst_ip[i];
}
/* Check if packets belongs to any flows handled by this node */
rte_hash_lookup_bulk(h, key_ptrs, num_packets, positions);
for (i = 0; i < num_packets; i++) {
if (likely(positions[i] >= 0)) {
filter_stats->passed++;
transmit_packet(bufs[i]);
} else {
filter_stats->drop++;
/* Drop packet, as flow is not handled by this node */
rte_pktmbuf_free(bufs[i]);
}
}
}
/*
* Application main function - loops through
* receiving and processing packets. Never returns
*/
int
main(int argc, char *argv[])
{
const struct rte_memzone *mz;
struct rte_ring *rx_ring;
struct rte_hash *h;
struct rte_mempool *mp;
struct shared_info *info;
int need_flush = 0; /* indicates whether we have unsent packets */
int retval;
void *pkts[PKT_READ_SIZE];
uint16_t sent;
retval = rte_eal_init(argc, argv);
if (retval < 0)
return -1;
argc -= retval;
argv += retval;
if (parse_app_args(argc, argv) < 0)
rte_exit(EXIT_FAILURE, "Invalid command-line arguments\n");
if (rte_eth_dev_count() == 0)
rte_exit(EXIT_FAILURE, "No Ethernet ports - bye\n");
rx_ring = rte_ring_lookup(get_rx_queue_name(node_id));
if (rx_ring == NULL)
rte_exit(EXIT_FAILURE, "Cannot get RX ring - "
"is server process running?\n");
mp = rte_mempool_lookup(PKTMBUF_POOL_NAME);
if (mp == NULL)
rte_exit(EXIT_FAILURE, "Cannot get mempool for mbufs\n");
mz = rte_memzone_lookup(MZ_SHARED_INFO);
if (mz == NULL)
rte_exit(EXIT_FAILURE, "Cannot get port info structure\n");
info = mz->addr;
tx_stats = &(info->tx_stats[node_id]);
filter_stats = &(info->filter_stats[node_id]);
configure_output_ports(info);
h = create_hash_table(info);
populate_hash_table(h, info);
RTE_LOG(INFO, APP, "Finished Process Init.\n");
printf("\nNode process %d handling packets\n", node_id);
printf("[Press Ctrl-C to quit ...]\n");
for (;;) {
uint16_t rx_pkts = PKT_READ_SIZE;
uint8_t port;
/*
* Try dequeuing max possible packets first, if that fails,
* get the most we can. Loop body should only execute once,
* maximum
*/
while (rx_pkts > 0 &&
unlikely(rte_ring_dequeue_bulk(rx_ring, pkts,
rx_pkts) != 0))
rx_pkts = (uint16_t)RTE_MIN(rte_ring_count(rx_ring),
PKT_READ_SIZE);
if (unlikely(rx_pkts == 0)) {
if (need_flush)
for (port = 0; port < info->num_ports; port++) {
sent = rte_eth_tx_buffer_flush(
info->id[port],
node_id,
tx_buffer[port]);
if (unlikely(sent))
tx_stats->tx[port] += sent;
}
need_flush = 0;
continue;
}
handle_packets(h, (struct rte_mbuf **)pkts, rx_pkts);
need_flush = 1;
}
}
|
vicharl/containerdns
|
kdns/src/parser.h
|
<gh_stars>100-1000
/*-
* BSD LICENSE
*
* Copyright(c) 2010-2016 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef __INCLUDE_PARSER_H__
#define __INCLUDE_PARSER_H__
#include <stdint.h>
#include <rte_ip.h>
#include <rte_ether.h>
#define PARSE_DELIMITER " \f\n\r\t\v"
#define skip_white_spaces(pos) \
({ \
__typeof__(pos) _p = (pos); \
for ( ; isspace(*_p); _p++) \
; \
_p; \
})
static inline size_t
skip_digits(const char *src)
{
size_t i;
for (i = 0; isdigit(src[i]); i++)
;
return i;
}
int parser_read_arg_bool(const char *p);
int parser_read_uint64(uint64_t *value, const char *p);
int parser_read_uint32(uint32_t *value, const char *p);
int parser_read_uint16(uint16_t *value, const char *p);
int parser_read_uint8(uint8_t *value, const char *p);
int parser_read_uint64_hex(uint64_t *value, const char *p);
int parser_read_uint32_hex(uint32_t *value, const char *p);
int parser_read_uint16_hex(uint16_t *value, const char *p);
int parser_read_uint8_hex(uint8_t *value, const char *p);
int parse_hex_string(char *src, uint8_t *dst, uint32_t *size);
int parse_ipv4_addr(const char *token, struct in_addr *ipv4);
int parse_ipv6_addr(const char *token, struct in6_addr *ipv6);
int parse_mac_addr(const char *token, struct ether_addr *addr);
int parse_mpls_labels(char *string, uint32_t *labels, uint32_t *n_labels);
int parse_l4_proto(const char *token, uint8_t *proto);
int parse_tokenize_string(char *string, char *tokens[], uint32_t *n_tokens);
int parse_pipeline_core(uint32_t *socket, uint32_t *core, uint32_t *ht, const char *entry);
int str_split(char *str, const char *delim, char *tokens[], int limit);
int parse_ipv4_port(const char *token, uint32_t *ip, uint16_t *port);
static inline void
mac_addr_tostring(struct ether_addr *addr, char *buf, size_t len)
{
snprintf(buf, len, "%02x:%02x:%02x:%02x:%02x:%02x",
addr->addr_bytes[0],
addr->addr_bytes[1],
addr->addr_bytes[2],
addr->addr_bytes[3],
addr->addr_bytes[4],
addr->addr_bytes[5]);
}
static inline void
ipv4_addr_tostring(uint32_t ipv4, char *buf, size_t len)
{
ipv4 = rte_be_to_cpu_32(ipv4);
snprintf(buf, len, "%u.%u.%u.%u",
(unsigned char)(ipv4 >> 24 & 0xff),
(unsigned char)(ipv4 >> 16 & 0xff),
(unsigned char)(ipv4 >> 8 & 0xff),
(unsigned char)(ipv4 & 0xff));
}
#endif
|
vicharl/containerdns
|
kdns/src/metrics.c
|
<reponame>vicharl/containerdns<filename>kdns/src/metrics.c
#define _GNU_SOURCE
#include <sys/time.h>
#include <stdio.h>
#include <pthread.h>
#include <unistd.h>
#include <time.h>
#include <rte_rwlock.h>
#include <string.h>
#include <jansson.h>
#include "hashMap.h"
#include "util.h"
#include "metrics.h"
#include "dns-conf.h"
#define METRICS_HASH_SIZE 0x3FFFF
#define METRICS_LOCK_SIZE 0xF
// 10 minutes
#define METRICS_TIME_EXPIRED ( 10*60*1000*1000)
#define METRICS_MAX_NAME_LEN 255
// domain query metrics
typedef struct metrics_domain{
uint64_t requestCount ;
uint64_t lastQueryTime ; // unix time us
uint64_t firstQueryTime ;// unix time us
metrics_metrics_st metrics;
}metrics_domain_st;
// domain+clinetIp query metrics
typedef struct metrics_domain_clientIp{
char domain_name[METRICS_MAX_NAME_LEN];
uint32_t src_addr;
uint64_t requestCount ;
uint64_t lastQueryTime ; // unix time us
uint64_t firstQueryTime ;// unix time us
}metrics_domain_clientIp_st;
static hashMap *g_metrics_fwd_domains = NULL;
static hashMap *g_metrics_fwd_domains_client = NULL;
static json_t * json_metrics_fwd_domains = NULL;
static json_t * json_metrics_fwd_clieintIp = NULL;
static rte_rwlock_t metrics_lock;
static char *g_dns_host_name = NULL;
uint64_t time_now_usec(void){
struct timeval tv;
gettimeofday(&tv, NULL);
return (0LL + 1000 * 1000) * tv.tv_sec + tv.tv_usec ;
}
static int metrics_check_equal(char *key, hashNode *node, __attribute__((unused))void *check){
if (strcmp(key,node->key)==0){
return 1;
}
return 0;
}
void metrics_data_update( metrics_metrics_st* metrics,uint64_t diff_us){
if (diff_us > metrics->maxTime){
metrics->maxTime = diff_us;
}
if (diff_us < metrics->minTime){
metrics->minTime = diff_us;
}
// 10 us
if (diff_us <= 10){
metrics->metrics[0]++;
}else if((10 < diff_us) && (diff_us <= 100)){
metrics->metrics[1]++;
}else if((100 < diff_us) && (diff_us <= 1000)){
metrics->metrics[2]++;
}else{
metrics->metrics[3]++;
}
metrics->timeSum += diff_us;
return;
}
static int metrics_domain_query(hashNode *node, void* input){
metrics_domain_st *mNode = (metrics_domain_st*) node->data;
uint64_t *p_time_start = (uint64_t*)input;
uint64_t time_now = time_now_usec();
uint64_t diff = time_now - *p_time_start;
mNode->requestCount++;
mNode->lastQueryTime = time_now;
metrics_data_update(&mNode->metrics, diff);
return 1;
}
static int metrics_domain_clientIp_query(hashNode *node, void* input){
uint64_t *p_time_start = (uint64_t*)input;
metrics_domain_clientIp_st *mNode = (metrics_domain_clientIp_st*) node->data;
mNode->requestCount++;
mNode->lastQueryTime = *p_time_start;
return 1;
}
static int metrics_domain_query_all_and_reset(hashNode *node, void* arg){
metrics_domain_st *mNode = (metrics_domain_st*) node->data;
json_t * array = (json_t *)arg;
json_t *value = json_pack("{s:s, s:f, s:f, s:f, s:f,s:f, s:f, s:f, s:f, s:f, s:f}",
"Domain", node->key, "QueryNum", (double)mNode->requestCount, "FirstQueryTime", (double)mNode->firstQueryTime,
"LastQueryTime",(double)mNode->lastQueryTime,"MinTime", (double)mNode->metrics.minTime,"MaxTime", (double)mNode->metrics.maxTime,
"SumTime",(double)mNode->metrics.timeSum,"metrics1",(double)mNode->metrics.metrics[0],"metrics2",(double)mNode->metrics.metrics[1],
"metrics3",(double)mNode->metrics.metrics[2],"metrics4",(double)mNode->metrics.metrics[3]);
memset(&mNode->metrics, 0, sizeof(metrics_metrics_st));
mNode->metrics.minTime = 0xffff;
json_array_append_new(array, value);
return 1;
}
static int metrics_domain_clientip_query_all(hashNode *node, void* arg){
metrics_domain_clientIp_st *mNode = (metrics_domain_clientIp_st*) node->data;
json_t * array = (json_t *)arg;
json_t *value = json_pack("{s:s, s:s, s:f, s:f, s:f,s:i}",
"Domain", mNode->domain_name,"Host", g_dns_host_name, "QueryNum", (double)mNode->requestCount, "FirstQueryTime", (double)mNode->firstQueryTime,
"LastQueryTime",(double)mNode->lastQueryTime,"SourceIP", mNode->src_addr);
json_array_append_new(array, value);
return 1;
}
static int metrics_domian_expired_check(hashNode *node,void *now){
uint64_t *time_now = (uint64_t *)now;
metrics_domain_st *mNode = (metrics_domain_st*) node->data;
//printf("metrics_domian_expired_check : %s\n",node->key);
if (mNode->lastQueryTime + METRICS_TIME_EXPIRED < *time_now){
return 1;
}
return 0;
}
static int metrics_clientIp_expired_check(hashNode *node,void *now){
uint64_t *time_now = (uint64_t *)now;
//printf("metrics_clientIp_expired_check : %s\n",node->key);
metrics_domain_clientIp_st *mNode = (metrics_domain_clientIp_st*) node->data;
//
if (mNode->lastQueryTime + METRICS_TIME_EXPIRED < *time_now){
return 1;
}
return 0;
}
void metrics_domain_update(char *domain, int64_t timeStart){
if (HASH_NODE_FIND == hmap_lookup(g_metrics_fwd_domains, domain, NULL, (void*)&timeStart)){
return;
}
metrics_domain_st * newNode = xalloc_zero(sizeof(metrics_domain_st));
newNode->firstQueryTime = newNode->lastQueryTime = timeStart;
newNode->requestCount = 1;
newNode->metrics.minTime = 0xffff;
hmap_update(g_metrics_fwd_domains, domain, NULL, (void*)newNode);
}
void metrics_domain_clientIp_update(char *domain, int64_t timeStart, uint32_t src_addr){
char key[METRICS_MAX_NAME_LEN]= {0};
sprintf(key,"%s-%d",domain, src_addr);
if (HASH_NODE_FIND == hmap_lookup(g_metrics_fwd_domains_client, key, NULL, (void*)&timeStart)){
return;
}
metrics_domain_clientIp_st * newNode = xalloc_zero(sizeof(metrics_domain_clientIp_st));
newNode->firstQueryTime = newNode->lastQueryTime = timeStart;
memcpy(newNode->domain_name,domain,strlen(domain));
newNode->src_addr = src_addr;
newNode->requestCount = 1;
hmap_update(g_metrics_fwd_domains_client, key, NULL, (void*)newNode);
}
static void *thread_metrics_expired_cleanup(void *arg){
(void)arg;
int del_nums = 0;
while (1) {
sleep(600);
uint64_t time_now = time_now_usec();
del_nums = hmap_check_expired(g_metrics_fwd_domains, (void *)&time_now);
if (del_nums) {
log_msg(LOG_INFO, "metrics fwd domains expired: %d record dels\n", del_nums);
}
del_nums = hmap_check_expired(g_metrics_fwd_domains_client, (void *)&time_now);
if (del_nums) {
log_msg(LOG_INFO, "metrics fwd domains client expired: %d record dels\n", del_nums);
}
}
return NULL;
}
static void *thread_metrics_domain_getAll(void *arg){
(void)arg;
//sleep 2s let cleanup thread run first
sleep(2);
json_t * array_tmp = NULL;
while (1){
sleep(600);
array_tmp = json_array();
if(!array_tmp){
log_msg(LOG_ERR,"unable to create array\n");
continue;
}
hmap_get_all(g_metrics_fwd_domains, (void*)array_tmp);
rte_rwlock_write_lock(&metrics_lock);
json_decref(json_metrics_fwd_domains);
json_metrics_fwd_domains = array_tmp;
rte_rwlock_write_unlock(&metrics_lock);
}
return NULL;
}
static void *thread_metrics_domain_clientIp_getAll(void *arg){
(void)arg;
sleep(2);
json_t * array_tmp = NULL;
while (1){
sleep(600);
array_tmp = json_array();
if(!array_tmp){
log_msg(LOG_ERR,"unable to create array\n");
continue;
}
hmap_get_all(g_metrics_fwd_domains_client, (void*)array_tmp);
rte_rwlock_write_lock(&metrics_lock);
json_decref(json_metrics_fwd_clieintIp);
json_metrics_fwd_clieintIp = array_tmp;
rte_rwlock_write_unlock(&metrics_lock);
}
return NULL;
}
void* metrics_domains_get( __attribute__((unused)) struct connection_info_struct *con_info,__attribute__((unused))char *url, int * len_response)
{
char *str_ret = NULL;
rte_rwlock_read_lock(&metrics_lock);
if (json_metrics_fwd_domains != NULL){
str_ret = json_dumps(json_metrics_fwd_domains, JSON_COMPACT);
}else{
str_ret = strdup("nodata");
}
rte_rwlock_read_unlock(&metrics_lock);
*len_response = strlen(str_ret);
return (void* )str_ret;
}
void* metrics_domains_clientIp_get( __attribute__((unused)) struct connection_info_struct *con_info,__attribute__((unused))char *url, int * len_response)
{
char *str_ret = NULL;
rte_rwlock_read_lock(&metrics_lock);
if (json_metrics_fwd_clieintIp != NULL){
str_ret = json_dumps(json_metrics_fwd_clieintIp, JSON_COMPACT);
}else{
str_ret = strdup("nodata");
}
rte_rwlock_read_unlock(&metrics_lock);
*len_response = strlen(str_ret);
return (void* )str_ret;
}
int metrics_host_reload(char *host_name) {
char *tmp = g_dns_host_name;
g_dns_host_name = strdup(host_name);
if (tmp) {
free(tmp);
}
return 0;
}
void fwd_metrics_init(void) {
g_metrics_fwd_domains = hmap_create(METRICS_HASH_SIZE, METRICS_LOCK_SIZE, elfHashDomain,
metrics_check_equal, metrics_domain_query, metrics_domian_expired_check, metrics_domain_query_all_and_reset);
g_metrics_fwd_domains_client = hmap_create(METRICS_HASH_SIZE, METRICS_LOCK_SIZE, elfHashDomain,
metrics_check_equal, metrics_domain_clientIp_query, metrics_clientIp_expired_check, metrics_domain_clientip_query_all);
json_metrics_fwd_domains = json_array();
if(!json_metrics_fwd_domains){
log_msg(LOG_ERR,"unable to create array\n");
exit(-1);
}
json_metrics_fwd_clieintIp = json_array();
if(!json_metrics_fwd_clieintIp){
log_msg(LOG_ERR,"unable to create array\n");
exit(-1);
}
g_dns_host_name = strdup(g_dns_cfg->comm.metrics_host);
rte_rwlock_init(&metrics_lock);
// cache date expired clean up thread
pthread_t *thread_cache_expired = (pthread_t *) xalloc(sizeof(pthread_t));
pthread_create(thread_cache_expired, NULL, thread_metrics_expired_cleanup, (void*)NULL);
pthread_setname_np(*thread_cache_expired, "kdns_mcache_clr");
// sleep(3);
// metrics_domains thread
pthread_t *thread_domain_metrics = (pthread_t *) xalloc(sizeof(pthread_t));
pthread_create(thread_domain_metrics, NULL, thread_metrics_domain_getAll, (void*)NULL);
pthread_setname_np(*thread_domain_metrics, "kdns_domain_get");
// metrics_domains_clientIp thread
pthread_t *thread_domain_clientIp_metrics = (pthread_t *) xalloc(sizeof(pthread_t));
pthread_create(thread_domain_clientIp_metrics, NULL, thread_metrics_domain_clientIp_getAll, (void*)NULL);
pthread_setname_np(*thread_domain_clientIp_metrics, "kdns_cip_get");
}
|
vicharl/containerdns
|
kdns/dpdk-17.02/drivers/crypto/openssl/rte_openssl_pmd.c
|
<gh_stars>100-1000
/*-
* BSD LICENSE
*
* Copyright(c) 2016 Intel Corporation. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <rte_common.h>
#include <rte_hexdump.h>
#include <rte_cryptodev.h>
#include <rte_cryptodev_pmd.h>
#include <rte_vdev.h>
#include <rte_malloc.h>
#include <rte_cpuflags.h>
#include <openssl/evp.h>
#include "rte_openssl_pmd_private.h"
static int cryptodev_openssl_remove(const char *name);
/*----------------------------------------------------------------------------*/
/**
* Increment counter by 1
* Counter is 64 bit array, big-endian
*/
static void
ctr_inc(uint8_t *ctr)
{
uint64_t *ctr64 = (uint64_t *)ctr;
*ctr64 = __builtin_bswap64(*ctr64);
(*ctr64)++;
*ctr64 = __builtin_bswap64(*ctr64);
}
/*
*------------------------------------------------------------------------------
* Session Prepare
*------------------------------------------------------------------------------
*/
/** Get xform chain order */
static enum openssl_chain_order
openssl_get_chain_order(const struct rte_crypto_sym_xform *xform)
{
enum openssl_chain_order res = OPENSSL_CHAIN_NOT_SUPPORTED;
if (xform != NULL) {
if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
if (xform->next == NULL)
res = OPENSSL_CHAIN_ONLY_AUTH;
else if (xform->next->type ==
RTE_CRYPTO_SYM_XFORM_CIPHER)
res = OPENSSL_CHAIN_AUTH_CIPHER;
}
if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
if (xform->next == NULL)
res = OPENSSL_CHAIN_ONLY_CIPHER;
else if (xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH)
res = OPENSSL_CHAIN_CIPHER_AUTH;
}
}
return res;
}
/** Get session cipher key from input cipher key */
static void
get_cipher_key(uint8_t *input_key, int keylen, uint8_t *session_key)
{
memcpy(session_key, input_key, keylen);
}
/** Get key ede 24 bytes standard from input key */
static int
get_cipher_key_ede(uint8_t *key, int keylen, uint8_t *key_ede)
{
int res = 0;
/* Initialize keys - 24 bytes: [key1-key2-key3] */
switch (keylen) {
case 24:
memcpy(key_ede, key, 24);
break;
case 16:
/* K3 = K1 */
memcpy(key_ede, key, 16);
memcpy(key_ede + 16, key, 8);
break;
case 8:
/* K1 = K2 = K3 (DES compatibility) */
memcpy(key_ede, key, 8);
memcpy(key_ede + 8, key, 8);
memcpy(key_ede + 16, key, 8);
break;
default:
OPENSSL_LOG_ERR("Unsupported key size");
res = -EINVAL;
}
return res;
}
/** Get adequate openssl function for input cipher algorithm */
static uint8_t
get_cipher_algo(enum rte_crypto_cipher_algorithm sess_algo, size_t keylen,
const EVP_CIPHER **algo)
{
int res = 0;
if (algo != NULL) {
switch (sess_algo) {
case RTE_CRYPTO_CIPHER_3DES_CBC:
switch (keylen) {
case 16:
*algo = EVP_des_ede_cbc();
break;
case 24:
*algo = EVP_des_ede3_cbc();
break;
default:
res = -EINVAL;
}
break;
case RTE_CRYPTO_CIPHER_3DES_CTR:
break;
case RTE_CRYPTO_CIPHER_AES_CBC:
switch (keylen) {
case 16:
*algo = EVP_aes_128_cbc();
break;
case 24:
*algo = EVP_aes_192_cbc();
break;
case 32:
*algo = EVP_aes_256_cbc();
break;
default:
res = -EINVAL;
}
break;
case RTE_CRYPTO_CIPHER_AES_CTR:
switch (keylen) {
case 16:
*algo = EVP_aes_128_ctr();
break;
case 24:
*algo = EVP_aes_192_ctr();
break;
case 32:
*algo = EVP_aes_256_ctr();
break;
default:
res = -EINVAL;
}
break;
case RTE_CRYPTO_CIPHER_AES_GCM:
switch (keylen) {
case 16:
*algo = EVP_aes_128_gcm();
break;
case 24:
*algo = EVP_aes_192_gcm();
break;
case 32:
*algo = EVP_aes_256_gcm();
break;
default:
res = -EINVAL;
}
break;
default:
res = -EINVAL;
break;
}
} else {
res = -EINVAL;
}
return res;
}
/** Get adequate openssl function for input auth algorithm */
static uint8_t
get_auth_algo(enum rte_crypto_auth_algorithm sessalgo,
const EVP_MD **algo)
{
int res = 0;
if (algo != NULL) {
switch (sessalgo) {
case RTE_CRYPTO_AUTH_MD5:
case RTE_CRYPTO_AUTH_MD5_HMAC:
*algo = EVP_md5();
break;
case RTE_CRYPTO_AUTH_SHA1:
case RTE_CRYPTO_AUTH_SHA1_HMAC:
*algo = EVP_sha1();
break;
case RTE_CRYPTO_AUTH_SHA224:
case RTE_CRYPTO_AUTH_SHA224_HMAC:
*algo = EVP_sha224();
break;
case RTE_CRYPTO_AUTH_SHA256:
case RTE_CRYPTO_AUTH_SHA256_HMAC:
*algo = EVP_sha256();
break;
case RTE_CRYPTO_AUTH_SHA384:
case RTE_CRYPTO_AUTH_SHA384_HMAC:
*algo = EVP_sha384();
break;
case RTE_CRYPTO_AUTH_SHA512:
case RTE_CRYPTO_AUTH_SHA512_HMAC:
*algo = EVP_sha512();
break;
default:
res = -EINVAL;
break;
}
} else {
res = -EINVAL;
}
return res;
}
/** Set session cipher parameters */
static int
openssl_set_session_cipher_parameters(struct openssl_session *sess,
const struct rte_crypto_sym_xform *xform)
{
/* Select cipher direction */
sess->cipher.direction = xform->cipher.op;
/* Select cipher key */
sess->cipher.key.length = xform->cipher.key.length;
/* Select cipher algo */
switch (xform->cipher.algo) {
case RTE_CRYPTO_CIPHER_3DES_CBC:
case RTE_CRYPTO_CIPHER_AES_CBC:
case RTE_CRYPTO_CIPHER_AES_CTR:
case RTE_CRYPTO_CIPHER_AES_GCM:
sess->cipher.mode = OPENSSL_CIPHER_LIB;
sess->cipher.algo = xform->cipher.algo;
sess->cipher.ctx = EVP_CIPHER_CTX_new();
if (get_cipher_algo(sess->cipher.algo, sess->cipher.key.length,
&sess->cipher.evp_algo) != 0)
return -EINVAL;
get_cipher_key(xform->cipher.key.data, sess->cipher.key.length,
sess->cipher.key.data);
break;
case RTE_CRYPTO_CIPHER_3DES_CTR:
sess->cipher.mode = OPENSSL_CIPHER_DES3CTR;
sess->cipher.ctx = EVP_CIPHER_CTX_new();
if (get_cipher_key_ede(xform->cipher.key.data,
sess->cipher.key.length,
sess->cipher.key.data) != 0)
return -EINVAL;
break;
default:
sess->cipher.algo = RTE_CRYPTO_CIPHER_NULL;
return -EINVAL;
}
return 0;
}
/* Set session auth parameters */
static int
openssl_set_session_auth_parameters(struct openssl_session *sess,
const struct rte_crypto_sym_xform *xform)
{
/* Select auth generate/verify */
sess->auth.operation = xform->auth.op;
sess->auth.algo = xform->auth.algo;
/* Select auth algo */
switch (xform->auth.algo) {
case RTE_CRYPTO_AUTH_AES_GMAC:
case RTE_CRYPTO_AUTH_AES_GCM:
/* Check additional condition for AES_GMAC/GCM */
if (sess->cipher.algo != RTE_CRYPTO_CIPHER_AES_GCM)
return -EINVAL;
sess->chain_order = OPENSSL_CHAIN_COMBINED;
break;
case RTE_CRYPTO_AUTH_MD5:
case RTE_CRYPTO_AUTH_SHA1:
case RTE_CRYPTO_AUTH_SHA224:
case RTE_CRYPTO_AUTH_SHA256:
case RTE_CRYPTO_AUTH_SHA384:
case RTE_CRYPTO_AUTH_SHA512:
sess->auth.mode = OPENSSL_AUTH_AS_AUTH;
if (get_auth_algo(xform->auth.algo,
&sess->auth.auth.evp_algo) != 0)
return -EINVAL;
sess->auth.auth.ctx = EVP_MD_CTX_create();
break;
case RTE_CRYPTO_AUTH_MD5_HMAC:
case RTE_CRYPTO_AUTH_SHA1_HMAC:
case RTE_CRYPTO_AUTH_SHA224_HMAC:
case RTE_CRYPTO_AUTH_SHA256_HMAC:
case RTE_CRYPTO_AUTH_SHA384_HMAC:
case RTE_CRYPTO_AUTH_SHA512_HMAC:
sess->auth.mode = OPENSSL_AUTH_AS_HMAC;
sess->auth.hmac.ctx = EVP_MD_CTX_create();
if (get_auth_algo(xform->auth.algo,
&sess->auth.hmac.evp_algo) != 0)
return -EINVAL;
sess->auth.hmac.pkey = EVP_PKEY_new_mac_key(EVP_PKEY_HMAC, NULL,
xform->auth.key.data, xform->auth.key.length);
break;
default:
return -EINVAL;
}
return 0;
}
/** Parse crypto xform chain and set private session parameters */
int
openssl_set_session_parameters(struct openssl_session *sess,
const struct rte_crypto_sym_xform *xform)
{
const struct rte_crypto_sym_xform *cipher_xform = NULL;
const struct rte_crypto_sym_xform *auth_xform = NULL;
sess->chain_order = openssl_get_chain_order(xform);
switch (sess->chain_order) {
case OPENSSL_CHAIN_ONLY_CIPHER:
cipher_xform = xform;
break;
case OPENSSL_CHAIN_ONLY_AUTH:
auth_xform = xform;
break;
case OPENSSL_CHAIN_CIPHER_AUTH:
cipher_xform = xform;
auth_xform = xform->next;
break;
case OPENSSL_CHAIN_AUTH_CIPHER:
auth_xform = xform;
cipher_xform = xform->next;
break;
default:
return -EINVAL;
}
/* cipher_xform must be check before auth_xform */
if (cipher_xform) {
if (openssl_set_session_cipher_parameters(
sess, cipher_xform)) {
OPENSSL_LOG_ERR(
"Invalid/unsupported cipher parameters");
return -EINVAL;
}
}
if (auth_xform) {
if (openssl_set_session_auth_parameters(sess, auth_xform)) {
OPENSSL_LOG_ERR(
"Invalid/unsupported auth parameters");
return -EINVAL;
}
}
return 0;
}
/** Reset private session parameters */
void
openssl_reset_session(struct openssl_session *sess)
{
EVP_CIPHER_CTX_free(sess->cipher.ctx);
switch (sess->auth.mode) {
case OPENSSL_AUTH_AS_AUTH:
EVP_MD_CTX_destroy(sess->auth.auth.ctx);
break;
case OPENSSL_AUTH_AS_HMAC:
EVP_PKEY_free(sess->auth.hmac.pkey);
EVP_MD_CTX_destroy(sess->auth.hmac.ctx);
break;
default:
break;
}
}
/** Provide session for operation */
static struct openssl_session *
get_session(struct openssl_qp *qp, struct rte_crypto_op *op)
{
struct openssl_session *sess = NULL;
if (op->sym->sess_type == RTE_CRYPTO_SYM_OP_WITH_SESSION) {
/* get existing session */
if (likely(op->sym->session != NULL &&
op->sym->session->dev_type ==
RTE_CRYPTODEV_OPENSSL_PMD))
sess = (struct openssl_session *)
op->sym->session->_private;
} else {
/* provide internal session */
void *_sess = NULL;
if (!rte_mempool_get(qp->sess_mp, (void **)&_sess)) {
sess = (struct openssl_session *)
((struct rte_cryptodev_sym_session *)_sess)
->_private;
if (unlikely(openssl_set_session_parameters(
sess, op->sym->xform) != 0)) {
rte_mempool_put(qp->sess_mp, _sess);
sess = NULL;
} else
op->sym->session = _sess;
}
}
if (sess == NULL)
op->status = RTE_CRYPTO_OP_STATUS_INVALID_SESSION;
return sess;
}
/*
*------------------------------------------------------------------------------
* Process Operations
*------------------------------------------------------------------------------
*/
static inline int
process_openssl_encryption_update(struct rte_mbuf *mbuf_src, int offset,
uint8_t **dst, int srclen, EVP_CIPHER_CTX *ctx)
{
struct rte_mbuf *m;
int dstlen;
int l, n = srclen;
uint8_t *src;
for (m = mbuf_src; m != NULL && offset > rte_pktmbuf_data_len(m);
m = m->next)
offset -= rte_pktmbuf_data_len(m);
if (m == 0)
return -1;
src = rte_pktmbuf_mtod_offset(m, uint8_t *, offset);
l = rte_pktmbuf_data_len(m) - offset;
if (srclen <= l) {
if (EVP_EncryptUpdate(ctx, *dst, &dstlen, src, srclen) <= 0)
return -1;
*dst += l;
return 0;
}
if (EVP_EncryptUpdate(ctx, *dst, &dstlen, src, l) <= 0)
return -1;
*dst += dstlen;
n -= l;
for (m = m->next; (m != NULL) && (n > 0); m = m->next) {
src = rte_pktmbuf_mtod(m, uint8_t *);
l = rte_pktmbuf_data_len(m) < n ? rte_pktmbuf_data_len(m) : n;
if (EVP_EncryptUpdate(ctx, *dst, &dstlen, src, l) <= 0)
return -1;
*dst += dstlen;
n -= l;
}
return 0;
}
static inline int
process_openssl_decryption_update(struct rte_mbuf *mbuf_src, int offset,
uint8_t **dst, int srclen, EVP_CIPHER_CTX *ctx)
{
struct rte_mbuf *m;
int dstlen;
int l, n = srclen;
uint8_t *src;
for (m = mbuf_src; m != NULL && offset > rte_pktmbuf_data_len(m);
m = m->next)
offset -= rte_pktmbuf_data_len(m);
if (m == 0)
return -1;
src = rte_pktmbuf_mtod_offset(m, uint8_t *, offset);
l = rte_pktmbuf_data_len(m) - offset;
if (srclen <= l) {
if (EVP_DecryptUpdate(ctx, *dst, &dstlen, src, srclen) <= 0)
return -1;
*dst += l;
return 0;
}
if (EVP_DecryptUpdate(ctx, *dst, &dstlen, src, l) <= 0)
return -1;
*dst += dstlen;
n -= l;
for (m = m->next; (m != NULL) && (n > 0); m = m->next) {
src = rte_pktmbuf_mtod(m, uint8_t *);
l = rte_pktmbuf_data_len(m) < n ? rte_pktmbuf_data_len(m) : n;
if (EVP_DecryptUpdate(ctx, *dst, &dstlen, src, l) <= 0)
return -1;
*dst += dstlen;
n -= l;
}
return 0;
}
/** Process standard openssl cipher encryption */
static int
process_openssl_cipher_encrypt(struct rte_mbuf *mbuf_src, uint8_t *dst,
int offset, uint8_t *iv, uint8_t *key, int srclen,
EVP_CIPHER_CTX *ctx, const EVP_CIPHER *algo)
{
int totlen;
if (EVP_EncryptInit_ex(ctx, algo, NULL, key, iv) <= 0)
goto process_cipher_encrypt_err;
EVP_CIPHER_CTX_set_padding(ctx, 0);
if (process_openssl_encryption_update(mbuf_src, offset, &dst,
srclen, ctx))
goto process_cipher_encrypt_err;
if (EVP_EncryptFinal_ex(ctx, dst, &totlen) <= 0)
goto process_cipher_encrypt_err;
return 0;
process_cipher_encrypt_err:
OPENSSL_LOG_ERR("Process openssl cipher encrypt failed");
return -EINVAL;
}
/** Process standard openssl cipher decryption */
static int
process_openssl_cipher_decrypt(struct rte_mbuf *mbuf_src, uint8_t *dst,
int offset, uint8_t *iv, uint8_t *key, int srclen,
EVP_CIPHER_CTX *ctx, const EVP_CIPHER *algo)
{
int totlen;
if (EVP_DecryptInit_ex(ctx, algo, NULL, key, iv) <= 0)
goto process_cipher_decrypt_err;
EVP_CIPHER_CTX_set_padding(ctx, 0);
if (process_openssl_decryption_update(mbuf_src, offset, &dst,
srclen, ctx))
goto process_cipher_decrypt_err;
if (EVP_DecryptFinal_ex(ctx, dst, &totlen) <= 0)
goto process_cipher_decrypt_err;
return 0;
process_cipher_decrypt_err:
OPENSSL_LOG_ERR("Process openssl cipher decrypt failed");
return -EINVAL;
}
/** Process cipher des 3 ctr encryption, decryption algorithm */
static int
process_openssl_cipher_des3ctr(struct rte_mbuf *mbuf_src, uint8_t *dst,
int offset, uint8_t *iv, uint8_t *key, int srclen,
EVP_CIPHER_CTX *ctx)
{
uint8_t ebuf[8], ctr[8];
int unused, n;
struct rte_mbuf *m;
uint8_t *src;
int l;
for (m = mbuf_src; m != NULL && offset > rte_pktmbuf_data_len(m);
m = m->next)
offset -= rte_pktmbuf_data_len(m);
if (m == 0)
goto process_cipher_des3ctr_err;
src = rte_pktmbuf_mtod_offset(m, uint8_t *, offset);
l = rte_pktmbuf_data_len(m) - offset;
/* We use 3DES encryption also for decryption.
* IV is not important for 3DES ecb
*/
if (EVP_EncryptInit_ex(ctx, EVP_des_ede3_ecb(), NULL, key, NULL) <= 0)
goto process_cipher_des3ctr_err;
memcpy(ctr, iv, 8);
for (n = 0; n < srclen; n++) {
if (n % 8 == 0) {
if (EVP_EncryptUpdate(ctx,
(unsigned char *)&ebuf, &unused,
(const unsigned char *)&ctr, 8) <= 0)
goto process_cipher_des3ctr_err;
ctr_inc(ctr);
}
dst[n] = *(src++) ^ ebuf[n % 8];
l--;
if (!l) {
m = m->next;
if (m) {
src = rte_pktmbuf_mtod(m, uint8_t *);
l = rte_pktmbuf_data_len(m);
}
}
}
return 0;
process_cipher_des3ctr_err:
OPENSSL_LOG_ERR("Process openssl cipher des 3 ede ctr failed");
return -EINVAL;
}
/** Process auth/encription aes-gcm algorithm */
static int
process_openssl_auth_encryption_gcm(struct rte_mbuf *mbuf_src, int offset,
int srclen, uint8_t *aad, int aadlen, uint8_t *iv, int ivlen,
uint8_t *key, uint8_t *dst, uint8_t *tag,
EVP_CIPHER_CTX *ctx, const EVP_CIPHER *algo)
{
int len = 0, unused = 0;
uint8_t empty[] = {};
if (EVP_EncryptInit_ex(ctx, algo, NULL, NULL, NULL) <= 0)
goto process_auth_encryption_gcm_err;
if (EVP_CIPHER_CTX_ctrl(ctx, EVP_CTRL_GCM_SET_IVLEN, ivlen, NULL) <= 0)
goto process_auth_encryption_gcm_err;
if (EVP_EncryptInit_ex(ctx, NULL, NULL, key, iv) <= 0)
goto process_auth_encryption_gcm_err;
if (aadlen > 0)
if (EVP_EncryptUpdate(ctx, NULL, &len, aad, aadlen) <= 0)
goto process_auth_encryption_gcm_err;
if (srclen > 0)
if (process_openssl_encryption_update(mbuf_src, offset, &dst,
srclen, ctx))
goto process_auth_encryption_gcm_err;
/* Workaround open ssl bug in version less then 1.0.1f */
if (EVP_EncryptUpdate(ctx, empty, &unused, empty, 0) <= 0)
goto process_auth_encryption_gcm_err;
if (EVP_EncryptFinal_ex(ctx, dst, &len) <= 0)
goto process_auth_encryption_gcm_err;
if (EVP_CIPHER_CTX_ctrl(ctx, EVP_CTRL_GCM_GET_TAG, 16, tag) <= 0)
goto process_auth_encryption_gcm_err;
return 0;
process_auth_encryption_gcm_err:
OPENSSL_LOG_ERR("Process openssl auth encryption gcm failed");
return -EINVAL;
}
static int
process_openssl_auth_decryption_gcm(struct rte_mbuf *mbuf_src, int offset,
int srclen, uint8_t *aad, int aadlen, uint8_t *iv, int ivlen,
uint8_t *key, uint8_t *dst, uint8_t *tag, EVP_CIPHER_CTX *ctx,
const EVP_CIPHER *algo)
{
int len = 0, unused = 0;
uint8_t empty[] = {};
if (EVP_DecryptInit_ex(ctx, algo, NULL, NULL, NULL) <= 0)
goto process_auth_decryption_gcm_err;
if (EVP_CIPHER_CTX_ctrl(ctx, EVP_CTRL_GCM_SET_IVLEN, ivlen, NULL) <= 0)
goto process_auth_decryption_gcm_err;
if (EVP_CIPHER_CTX_ctrl(ctx, EVP_CTRL_GCM_SET_TAG, 16, tag) <= 0)
goto process_auth_decryption_gcm_err;
if (EVP_DecryptInit_ex(ctx, NULL, NULL, key, iv) <= 0)
goto process_auth_decryption_gcm_err;
if (aadlen > 0)
if (EVP_DecryptUpdate(ctx, NULL, &len, aad, aadlen) <= 0)
goto process_auth_decryption_gcm_err;
if (srclen > 0)
if (process_openssl_decryption_update(mbuf_src, offset, &dst,
srclen, ctx))
goto process_auth_decryption_gcm_err;
/* Workaround open ssl bug in version less then 1.0.1f */
if (EVP_DecryptUpdate(ctx, empty, &unused, empty, 0) <= 0)
goto process_auth_decryption_gcm_err;
if (EVP_DecryptFinal_ex(ctx, dst, &len) <= 0)
goto process_auth_decryption_gcm_final_err;
return 0;
process_auth_decryption_gcm_err:
OPENSSL_LOG_ERR("Process openssl auth description gcm failed");
return -EINVAL;
process_auth_decryption_gcm_final_err:
return -EFAULT;
}
/** Process standard openssl auth algorithms */
static int
process_openssl_auth(struct rte_mbuf *mbuf_src, uint8_t *dst, int offset,
__rte_unused uint8_t *iv, __rte_unused EVP_PKEY * pkey,
int srclen, EVP_MD_CTX *ctx, const EVP_MD *algo)
{
size_t dstlen;
struct rte_mbuf *m;
int l, n = srclen;
uint8_t *src;
for (m = mbuf_src; m != NULL && offset > rte_pktmbuf_data_len(m);
m = m->next)
offset -= rte_pktmbuf_data_len(m);
if (m == 0)
goto process_auth_err;
if (EVP_DigestInit_ex(ctx, algo, NULL) <= 0)
goto process_auth_err;
src = rte_pktmbuf_mtod_offset(m, uint8_t *, offset);
l = rte_pktmbuf_data_len(m) - offset;
if (srclen <= l) {
if (EVP_DigestUpdate(ctx, (char *)src, srclen) <= 0)
goto process_auth_err;
goto process_auth_final;
}
if (EVP_DigestUpdate(ctx, (char *)src, l) <= 0)
goto process_auth_err;
n -= l;
for (m = m->next; (m != NULL) && (n > 0); m = m->next) {
src = rte_pktmbuf_mtod(m, uint8_t *);
l = rte_pktmbuf_data_len(m) < n ? rte_pktmbuf_data_len(m) : n;
if (EVP_DigestUpdate(ctx, (char *)src, l) <= 0)
goto process_auth_err;
n -= l;
}
process_auth_final:
if (EVP_DigestFinal_ex(ctx, dst, (unsigned int *)&dstlen) <= 0)
goto process_auth_err;
return 0;
process_auth_err:
OPENSSL_LOG_ERR("Process openssl auth failed");
return -EINVAL;
}
/** Process standard openssl auth algorithms with hmac */
static int
process_openssl_auth_hmac(struct rte_mbuf *mbuf_src, uint8_t *dst, int offset,
__rte_unused uint8_t *iv, EVP_PKEY *pkey,
int srclen, EVP_MD_CTX *ctx, const EVP_MD *algo)
{
size_t dstlen;
struct rte_mbuf *m;
int l, n = srclen;
uint8_t *src;
for (m = mbuf_src; m != NULL && offset > rte_pktmbuf_data_len(m);
m = m->next)
offset -= rte_pktmbuf_data_len(m);
if (m == 0)
goto process_auth_err;
if (EVP_DigestSignInit(ctx, NULL, algo, NULL, pkey) <= 0)
goto process_auth_err;
src = rte_pktmbuf_mtod_offset(m, uint8_t *, offset);
l = rte_pktmbuf_data_len(m) - offset;
if (srclen <= l) {
if (EVP_DigestSignUpdate(ctx, (char *)src, srclen) <= 0)
goto process_auth_err;
goto process_auth_final;
}
if (EVP_DigestSignUpdate(ctx, (char *)src, l) <= 0)
goto process_auth_err;
n -= l;
for (m = m->next; (m != NULL) && (n > 0); m = m->next) {
src = rte_pktmbuf_mtod(m, uint8_t *);
l = rte_pktmbuf_data_len(m) < n ? rte_pktmbuf_data_len(m) : n;
if (EVP_DigestSignUpdate(ctx, (char *)src, l) <= 0)
goto process_auth_err;
n -= l;
}
process_auth_final:
if (EVP_DigestSignFinal(ctx, dst, &dstlen) <= 0)
goto process_auth_err;
return 0;
process_auth_err:
OPENSSL_LOG_ERR("Process openssl auth failed");
return -EINVAL;
}
/*----------------------------------------------------------------------------*/
/** Process auth/cipher combined operation */
static void
process_openssl_combined_op
(struct rte_crypto_op *op, struct openssl_session *sess,
struct rte_mbuf *mbuf_src, struct rte_mbuf *mbuf_dst)
{
/* cipher */
uint8_t *dst = NULL, *iv, *tag, *aad;
int srclen, ivlen, aadlen, status = -1;
/*
* Segmented destination buffer is not supported for
* encryption/decryption
*/
if (!rte_pktmbuf_is_contiguous(mbuf_dst)) {
op->status = RTE_CRYPTO_OP_STATUS_ERROR;
return;
}
iv = op->sym->cipher.iv.data;
ivlen = op->sym->cipher.iv.length;
aad = op->sym->auth.aad.data;
aadlen = op->sym->auth.aad.length;
tag = op->sym->auth.digest.data;
if (tag == NULL)
tag = rte_pktmbuf_mtod_offset(mbuf_dst, uint8_t *,
op->sym->cipher.data.offset +
op->sym->cipher.data.length);
if (sess->auth.algo == RTE_CRYPTO_AUTH_AES_GMAC)
srclen = 0;
else {
srclen = op->sym->cipher.data.length;
dst = rte_pktmbuf_mtod_offset(mbuf_dst, uint8_t *,
op->sym->cipher.data.offset);
}
if (sess->cipher.direction == RTE_CRYPTO_CIPHER_OP_ENCRYPT)
status = process_openssl_auth_encryption_gcm(
mbuf_src, op->sym->cipher.data.offset, srclen,
aad, aadlen, iv, ivlen, sess->cipher.key.data,
dst, tag, sess->cipher.ctx,
sess->cipher.evp_algo);
else
status = process_openssl_auth_decryption_gcm(
mbuf_src, op->sym->cipher.data.offset, srclen,
aad, aadlen, iv, ivlen, sess->cipher.key.data,
dst, tag, sess->cipher.ctx,
sess->cipher.evp_algo);
if (status != 0) {
if (status == (-EFAULT) &&
sess->auth.operation ==
RTE_CRYPTO_AUTH_OP_VERIFY)
op->status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
else
op->status = RTE_CRYPTO_OP_STATUS_ERROR;
}
}
/** Process cipher operation */
static void
process_openssl_cipher_op
(struct rte_crypto_op *op, struct openssl_session *sess,
struct rte_mbuf *mbuf_src, struct rte_mbuf *mbuf_dst)
{
uint8_t *dst, *iv;
int srclen, status;
/*
* Segmented destination buffer is not supported for
* encryption/decryption
*/
if (!rte_pktmbuf_is_contiguous(mbuf_dst)) {
op->status = RTE_CRYPTO_OP_STATUS_ERROR;
return;
}
srclen = op->sym->cipher.data.length;
dst = rte_pktmbuf_mtod_offset(mbuf_dst, uint8_t *,
op->sym->cipher.data.offset);
iv = op->sym->cipher.iv.data;
if (sess->cipher.mode == OPENSSL_CIPHER_LIB)
if (sess->cipher.direction == RTE_CRYPTO_CIPHER_OP_ENCRYPT)
status = process_openssl_cipher_encrypt(mbuf_src, dst,
op->sym->cipher.data.offset, iv,
sess->cipher.key.data, srclen,
sess->cipher.ctx,
sess->cipher.evp_algo);
else
status = process_openssl_cipher_decrypt(mbuf_src, dst,
op->sym->cipher.data.offset, iv,
sess->cipher.key.data, srclen,
sess->cipher.ctx,
sess->cipher.evp_algo);
else
status = process_openssl_cipher_des3ctr(mbuf_src, dst,
op->sym->cipher.data.offset, iv,
sess->cipher.key.data, srclen,
sess->cipher.ctx);
if (status != 0)
op->status = RTE_CRYPTO_OP_STATUS_ERROR;
}
/** Process auth operation */
static void
process_openssl_auth_op
(struct rte_crypto_op *op, struct openssl_session *sess,
struct rte_mbuf *mbuf_src, struct rte_mbuf *mbuf_dst)
{
uint8_t *dst;
int srclen, status;
srclen = op->sym->auth.data.length;
if (sess->auth.operation == RTE_CRYPTO_AUTH_OP_VERIFY)
dst = (uint8_t *)rte_pktmbuf_append(mbuf_src,
op->sym->auth.digest.length);
else {
dst = op->sym->auth.digest.data;
if (dst == NULL)
dst = rte_pktmbuf_mtod_offset(mbuf_dst, uint8_t *,
op->sym->auth.data.offset +
op->sym->auth.data.length);
}
switch (sess->auth.mode) {
case OPENSSL_AUTH_AS_AUTH:
status = process_openssl_auth(mbuf_src, dst,
op->sym->auth.data.offset, NULL, NULL, srclen,
sess->auth.auth.ctx, sess->auth.auth.evp_algo);
break;
case OPENSSL_AUTH_AS_HMAC:
status = process_openssl_auth_hmac(mbuf_src, dst,
op->sym->auth.data.offset, NULL,
sess->auth.hmac.pkey, srclen,
sess->auth.hmac.ctx, sess->auth.hmac.evp_algo);
break;
default:
status = -1;
break;
}
if (sess->auth.operation == RTE_CRYPTO_AUTH_OP_VERIFY) {
if (memcmp(dst, op->sym->auth.digest.data,
op->sym->auth.digest.length) != 0) {
op->status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
}
/* Trim area used for digest from mbuf. */
rte_pktmbuf_trim(mbuf_src, op->sym->auth.digest.length);
}
if (status != 0)
op->status = RTE_CRYPTO_OP_STATUS_ERROR;
}
/** Process crypto operation for mbuf */
static int
process_op(const struct openssl_qp *qp, struct rte_crypto_op *op,
struct openssl_session *sess)
{
struct rte_mbuf *msrc, *mdst;
int retval;
msrc = op->sym->m_src;
mdst = op->sym->m_dst ? op->sym->m_dst : op->sym->m_src;
op->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
switch (sess->chain_order) {
case OPENSSL_CHAIN_ONLY_CIPHER:
process_openssl_cipher_op(op, sess, msrc, mdst);
break;
case OPENSSL_CHAIN_ONLY_AUTH:
process_openssl_auth_op(op, sess, msrc, mdst);
break;
case OPENSSL_CHAIN_CIPHER_AUTH:
process_openssl_cipher_op(op, sess, msrc, mdst);
process_openssl_auth_op(op, sess, mdst, mdst);
break;
case OPENSSL_CHAIN_AUTH_CIPHER:
process_openssl_auth_op(op, sess, msrc, mdst);
process_openssl_cipher_op(op, sess, msrc, mdst);
break;
case OPENSSL_CHAIN_COMBINED:
process_openssl_combined_op(op, sess, msrc, mdst);
break;
default:
op->status = RTE_CRYPTO_OP_STATUS_ERROR;
break;
}
/* Free session if a session-less crypto op */
if (op->sym->sess_type == RTE_CRYPTO_SYM_OP_SESSIONLESS) {
openssl_reset_session(sess);
memset(sess, 0, sizeof(struct openssl_session));
rte_mempool_put(qp->sess_mp, op->sym->session);
op->sym->session = NULL;
}
if (op->status == RTE_CRYPTO_OP_STATUS_NOT_PROCESSED)
op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
if (op->status != RTE_CRYPTO_OP_STATUS_ERROR)
retval = rte_ring_enqueue(qp->processed_ops, (void *)op);
else
retval = -1;
return retval;
}
/*
*------------------------------------------------------------------------------
* PMD Framework
*------------------------------------------------------------------------------
*/
/** Enqueue burst */
static uint16_t
openssl_pmd_enqueue_burst(void *queue_pair, struct rte_crypto_op **ops,
uint16_t nb_ops)
{
struct openssl_session *sess;
struct openssl_qp *qp = queue_pair;
int i, retval;
for (i = 0; i < nb_ops; i++) {
sess = get_session(qp, ops[i]);
if (unlikely(sess == NULL))
goto enqueue_err;
retval = process_op(qp, ops[i], sess);
if (unlikely(retval < 0))
goto enqueue_err;
}
qp->stats.enqueued_count += i;
return i;
enqueue_err:
qp->stats.enqueue_err_count++;
return i;
}
/** Dequeue burst */
static uint16_t
openssl_pmd_dequeue_burst(void *queue_pair, struct rte_crypto_op **ops,
uint16_t nb_ops)
{
struct openssl_qp *qp = queue_pair;
unsigned int nb_dequeued = 0;
nb_dequeued = rte_ring_dequeue_burst(qp->processed_ops,
(void **)ops, nb_ops);
qp->stats.dequeued_count += nb_dequeued;
return nb_dequeued;
}
/** Create OPENSSL crypto device */
static int
cryptodev_openssl_create(struct rte_crypto_vdev_init_params *init_params)
{
struct rte_cryptodev *dev;
struct openssl_private *internals;
if (init_params->name[0] == '\0') {
int ret = rte_cryptodev_pmd_create_dev_name(
init_params->name,
RTE_STR(CRYPTODEV_NAME_OPENSSL_PMD));
if (ret < 0) {
OPENSSL_LOG_ERR("failed to create unique name");
return ret;
}
}
dev = rte_cryptodev_pmd_virtual_dev_init(init_params->name,
sizeof(struct openssl_private),
init_params->socket_id);
if (dev == NULL) {
OPENSSL_LOG_ERR("failed to create cryptodev vdev");
goto init_error;
}
dev->dev_type = RTE_CRYPTODEV_OPENSSL_PMD;
dev->dev_ops = rte_openssl_pmd_ops;
/* register rx/tx burst functions for data path */
dev->dequeue_burst = openssl_pmd_dequeue_burst;
dev->enqueue_burst = openssl_pmd_enqueue_burst;
dev->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO |
RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING |
RTE_CRYPTODEV_FF_CPU_AESNI |
RTE_CRYPTODEV_FF_MBUF_SCATTER_GATHER;
/* Set vector instructions mode supported */
internals = dev->data->dev_private;
internals->max_nb_qpairs = init_params->max_nb_queue_pairs;
internals->max_nb_sessions = init_params->max_nb_sessions;
return 0;
init_error:
OPENSSL_LOG_ERR("driver %s: cryptodev_openssl_create failed",
init_params->name);
cryptodev_openssl_remove(init_params->name);
return -EFAULT;
}
/** Initialise OPENSSL crypto device */
static int
cryptodev_openssl_probe(const char *name,
const char *input_args)
{
struct rte_crypto_vdev_init_params init_params = {
RTE_CRYPTODEV_VDEV_DEFAULT_MAX_NB_QUEUE_PAIRS,
RTE_CRYPTODEV_VDEV_DEFAULT_MAX_NB_SESSIONS,
rte_socket_id(),
{0}
};
rte_cryptodev_parse_vdev_init_params(&init_params, input_args);
RTE_LOG(INFO, PMD, "Initialising %s on NUMA node %d\n", name,
init_params.socket_id);
if (init_params.name[0] != '\0')
RTE_LOG(INFO, PMD, " User defined name = %s\n",
init_params.name);
RTE_LOG(INFO, PMD, " Max number of queue pairs = %d\n",
init_params.max_nb_queue_pairs);
RTE_LOG(INFO, PMD, " Max number of sessions = %d\n",
init_params.max_nb_sessions);
return cryptodev_openssl_create(&init_params);
}
/** Uninitialise OPENSSL crypto device */
static int
cryptodev_openssl_remove(const char *name)
{
if (name == NULL)
return -EINVAL;
RTE_LOG(INFO, PMD,
"Closing OPENSSL crypto device %s on numa socket %u\n",
name, rte_socket_id());
return 0;
}
static struct rte_vdev_driver cryptodev_openssl_pmd_drv = {
.probe = cryptodev_openssl_probe,
.remove = cryptodev_openssl_remove
};
RTE_PMD_REGISTER_VDEV(CRYPTODEV_NAME_OPENSSL_PMD,
cryptodev_openssl_pmd_drv);
RTE_PMD_REGISTER_PARAM_STRING(CRYPTODEV_NAME_OPENSSL_PMD,
"max_nb_queue_pairs=<int> "
"max_nb_sessions=<int> "
"socket_id=<int>");
|
vicharl/containerdns
|
kdns/dpdk-17.02/lib/librte_eal/common/include/rte_interrupts.h
|
/*-
* BSD LICENSE
*
* Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef _RTE_INTERRUPTS_H_
#define _RTE_INTERRUPTS_H_
#include <rte_common.h>
/**
* @file
*
* The RTE interrupt interface provides functions to register/unregister
* callbacks for a specific interrupt.
*/
#ifdef __cplusplus
extern "C" {
#endif
/** Interrupt handle */
struct rte_intr_handle;
/** Function to be registered for the specific interrupt */
typedef void (*rte_intr_callback_fn)(struct rte_intr_handle *intr_handle,
void *cb_arg);
#include <exec-env/rte_interrupts.h>
/**
* It registers the callback for the specific interrupt. Multiple
* callbacks cal be registered at the same time.
* @param intr_handle
* Pointer to the interrupt handle.
* @param cb
* callback address.
* @param cb_arg
* address of parameter for callback.
*
* @return
* - On success, zero.
* - On failure, a negative value.
*/
int rte_intr_callback_register(const struct rte_intr_handle *intr_handle,
rte_intr_callback_fn cb, void *cb_arg);
/**
* It unregisters the callback according to the specified interrupt handle.
*
* @param intr_handle
* pointer to the interrupt handle.
* @param cb
* callback address.
* @param cb_arg
* address of parameter for callback, (void *)-1 means to remove all
* registered which has the same callback address.
*
* @return
* - On success, return the number of callback entities removed.
* - On failure, a negative value.
*/
int rte_intr_callback_unregister(const struct rte_intr_handle *intr_handle,
rte_intr_callback_fn cb, void *cb_arg);
/**
* It enables the interrupt for the specified handle.
*
* @param intr_handle
* pointer to the interrupt handle.
*
* @return
* - On success, zero.
* - On failure, a negative value.
*/
int rte_intr_enable(const struct rte_intr_handle *intr_handle);
/**
* It disables the interrupt for the specified handle.
*
* @param intr_handle
* pointer to the interrupt handle.
*
* @return
* - On success, zero.
* - On failure, a negative value.
*/
int rte_intr_disable(const struct rte_intr_handle *intr_handle);
#ifdef __cplusplus
}
#endif
#endif
|
vicharl/containerdns
|
kdns/deps/libmicrohttpd/src/testcurl/test_concurrent_stop.c
|
<reponame>vicharl/containerdns
/*
This file is part of libmicrohttpd
Copyright (C) 2007, 2009, 2011, 2015, 2016 <NAME>
libmicrohttpd is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published
by the Free Software Foundation; either version 3, or (at your
option) any later version.
libmicrohttpd is distributed in the hope that it will be useful, but
WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
General Public License for more details.
You should have received a copy of the GNU General Public License
along with libmicrohttpd; see the file COPYING. If not, write to the
Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
Boston, MA 02110-1301, USA.
*/
/**
* @file test_concurrent_stop.c
* @brief test stopping server while concurrent GETs are ongoing
* @author <NAME>
*/
#include "MHD_config.h"
#include "platform.h"
#include <curl/curl.h>
#include <microhttpd.h>
#include <stdlib.h>
#include <string.h>
#include <time.h>
#include <pthread.h>
#include "gauger.h"
#ifdef CPU_COUNT
#undef CPU_COUNT
#endif
#define CPU_COUNT 40
/**
* How many rounds of operations do we do for each
* test (total number of requests will be ROUNDS * PAR).
*/
#define ROUNDS 50000
/**
* How many requests do we do in parallel?
*/
#define PAR CPU_COUNT
/**
* Do we use HTTP 1.1?
*/
static int oneone;
/**
* Response to return (re-used).
*/
static struct MHD_Response *response;
static size_t
copyBuffer (void *ptr,
size_t size, size_t nmemb,
void *ctx)
{
(void)ptr;(void)ctx; /* Unused. Silent compiler warning. */
return size * nmemb;
}
static int
ahc_echo (void *cls,
struct MHD_Connection *connection,
const char *url,
const char *method,
const char *version,
const char *upload_data,
size_t *upload_data_size,
void **unused)
{
static int ptr;
const char *me = cls;
int ret;
(void)url;(void)version; /* Unused. Silent compiler warning. */
(void)upload_data;(void)upload_data_size; /* Unused. Silent compiler warning. */
if (0 != strcmp (me, method))
return MHD_NO; /* unexpected method */
if (&ptr != *unused)
{
*unused = &ptr;
return MHD_YES;
}
*unused = NULL;
ret = MHD_queue_response (connection,
MHD_HTTP_OK,
response);
if (ret == MHD_NO)
abort ();
return ret;
}
static void
clean_curl(void * param)
{
if (param)
{
CURL * const c = *((CURL **)param);
if (c)
curl_easy_cleanup (c);
}
}
static void *
thread_gets (void *param)
{
CURL *c;
CURLcode errornum;
unsigned int i;
char * const url = (char*) param;
int pth_olst;
if (pthread_setcancelstate(PTHREAD_CANCEL_ENABLE, &pth_olst) ||
pthread_setcanceltype(PTHREAD_CANCEL_DEFERRED, &pth_olst) )
{
fprintf(stderr,
"pthread_setcancelstate()/pthread_setcanceltype() failed.\n");
_exit(99);
}
for (i=0;i<ROUNDS;i++)
{
pthread_testcancel();
c = NULL;
pthread_cleanup_push(clean_curl, (void*)&c);
c = curl_easy_init ();
pthread_testcancel();
curl_easy_setopt (c, CURLOPT_URL, url);
curl_easy_setopt (c, CURLOPT_WRITEFUNCTION, ©Buffer);
curl_easy_setopt (c, CURLOPT_WRITEDATA, NULL);
curl_easy_setopt (c, CURLOPT_FAILONERROR, 1);
curl_easy_setopt (c, CURLOPT_TIMEOUT, 5L);
if (oneone)
curl_easy_setopt (c, CURLOPT_HTTP_VERSION, CURL_HTTP_VERSION_1_1);
else
curl_easy_setopt (c, CURLOPT_HTTP_VERSION, CURL_HTTP_VERSION_1_0);
curl_easy_setopt (c, CURLOPT_CONNECTTIMEOUT, 5L);
/* NOTE: use of CONNECTTIMEOUT without also
setting NOSIGNAL results in really weird
crashes on my system! */
curl_easy_setopt (c, CURLOPT_NOSIGNAL, 1);
pthread_testcancel();
errornum = curl_easy_perform (c);
pthread_cleanup_pop (1);
if (CURLE_OK != errornum)
return NULL;
}
return NULL;
}
static void *
do_gets (void * param)
{
int j;
pthread_t par[PAR];
char url[64];
int port = (int)(intptr_t)param;
sprintf(url, "http://127.0.0.1:%d/hello_world", port);
for (j=0;j<PAR;j++)
{
if (0 != pthread_create(&par[j], NULL, &thread_gets, (void*)url))
{
fprintf(stderr, "pthread_create failed.\n");
for (j--; j >= 0; j--)
{
pthread_cancel(par[j]);
pthread_join(par[j], NULL);
}
_exit(99);
}
}
(void)sleep (1);
for (j=0;j<PAR;j++)
{
pthread_cancel(par[j]);
pthread_join(par[j], NULL);
}
return NULL;
}
pthread_t start_gets(int port)
{
pthread_t tid;
if (0 != pthread_create(&tid, NULL, &do_gets, (void*)(intptr_t)port))
{
fprintf(stderr, "pthread_create failed.\n");
_exit(99);
}
return tid;
}
static int
testMultithreadedGet (int port,
int poll_flag)
{
struct MHD_Daemon *d;
pthread_t p;
d = MHD_start_daemon (MHD_USE_THREAD_PER_CONNECTION | MHD_USE_INTERNAL_POLLING_THREAD | MHD_USE_ERROR_LOG | poll_flag,
port,
NULL, NULL,
&ahc_echo, "GET",
MHD_OPTION_END);
if (d == NULL)
return 16;
if (0 == port)
{
const union MHD_DaemonInfo *dinfo;
dinfo = MHD_get_daemon_info (d, MHD_DAEMON_INFO_BIND_PORT);
if (NULL == dinfo || 0 == dinfo->port)
{ MHD_stop_daemon (d); return 32; }
port = (int)dinfo->port;
}
p = start_gets (port);
(void)sleep (1);
MHD_stop_daemon (d);
pthread_join (p, NULL);
return 0;
}
static int
testMultithreadedPoolGet (int port,
int poll_flag)
{
struct MHD_Daemon *d;
pthread_t p;
d = MHD_start_daemon (MHD_USE_INTERNAL_POLLING_THREAD | MHD_USE_ERROR_LOG | poll_flag,
port,
NULL, NULL,
&ahc_echo, "GET",
MHD_OPTION_THREAD_POOL_SIZE, CPU_COUNT,
MHD_OPTION_END);
if (d == NULL)
return 16;
if (0 == port)
{
const union MHD_DaemonInfo *dinfo;
dinfo = MHD_get_daemon_info (d, MHD_DAEMON_INFO_BIND_PORT);
if (NULL == dinfo || 0 == dinfo->port)
{ MHD_stop_daemon (d); return 32; }
port = (int)dinfo->port;
}
p = start_gets (port);
(void)sleep (1);
MHD_stop_daemon (d);
pthread_join (p, NULL);
return 0;
}
int
main (int argc, char *const *argv)
{
unsigned int errorCount = 0;
int port;
(void)argc; /* Unused. Silent compiler warning. */
if (MHD_NO != MHD_is_feature_supported (MHD_FEATURE_AUTODETECT_BIND_PORT))
port = 0;
else
port = 1142;
oneone = (NULL != strrchr (argv[0], (int) '/')) ?
(NULL != strstr (strrchr (argv[0], (int) '/'), "11")) : 0;
if (0 != port && oneone)
port += 5;
if (0 != curl_global_init (CURL_GLOBAL_WIN32))
return 2;
response = MHD_create_response_from_buffer (strlen ("/hello_world"),
"/hello_world",
MHD_RESPMEM_MUST_COPY);
errorCount += testMultithreadedGet (port, 0);
if (0 != port) port++;
errorCount += testMultithreadedPoolGet (port, 0);
MHD_destroy_response (response);
if (errorCount != 0)
fprintf (stderr, "Error (code: %u)\n", errorCount);
curl_global_cleanup ();
return errorCount != 0; /* 0 == pass */
}
|
vicharl/containerdns
|
kdns/core/domain_store.h
|
<reponame>vicharl/containerdns<filename>kdns/core/domain_store.h<gh_stars>100-1000
/*
* domain_store.h -- internal namespace database definitions
*
* Copyright (c) 2001-2006, NLnet Labs.
*
* Modified Work Copyright (c) 2018 The TIGLabs Authors.
*
*/
#ifndef _DOMAIN_STORE_H_
#define _DOMAIN_STORE_H_
#include <stdio.h>
#include "dns.h"
#include "kdns.h"
#include "radtree.h"
struct kdns;
typedef struct domain
{
struct radnode* rnode;
domain_name_st* dname;
struct domain* parent;
struct domain* wildcard_child_closest_match;
struct rrset * rrsets;
size_t usage;
uint16_t compressed_offset;
uint32_t maxAnswer;
unsigned is_existing : 1;
unsigned is_apex : 1;
}domain_type;
typedef struct zone
{
struct radnode *node;
struct domain * apex;
struct rrset * soa_rrset;
struct rrset* soa_nx_rrset;
struct rrset* ns_rrset;
unsigned zonestatid; /* array index for zone stats */
unsigned is_ok : 1; /* zone has not expired. */
unsigned is_changed : 1; /* zone was changed by AXFR */
}zone_type;
/* a RR in DNS */
typedef struct rr {
struct domain * owner;
union rdata_atom* rdatas;
char view_name[MAX_VIEW_NAME_LEN];
uint32_t ttl;
uint16_t type;
uint16_t klass;
uint16_t rdata_count;
uint16_t lb_mode;
uint16_t lb_weight;
uint16_t lb_weight_cur;
}rr_type;
/*
* An RRset consists of at least one RR. All RRs are from the same
* zone.
*/
typedef struct rrset
{
struct rrset* next;
struct zone* zone;
struct rr* rrs;
uint16_t rr_count;
}rrset_type;
typedef union rdata_atom
{
domain_type* domain;
/* Default. */
uint16_t* data;
}rdata_atom_type;
typedef struct domain_table
{
struct radtree *nametree;
struct domain* root;
size_t number_total;
}domain_table_type;
typedef struct domain_store
{
struct domain_table* domains;
struct radtree* zonetree;
struct view_tree* viewtree;
}domain_store_type;
/*
* Create a new domain_table containing only the root domain.
*/
domain_table_type *domain_table_create(void);
/*
* Search the domain table for a match and the closest encloser.
*/
int domain_table_search(domain_table_type* table,
const domain_name_st* dname,
domain_type **closest_match,
domain_type **closest_encloser);
/*
* The number of domains stored in the table (minimum is one for the
* root domain).
*/
static inline uint32_t
domain_table_count(domain_table_type* table)
{
return table->nametree->count;
}
void rrset_lower_usage(domain_store_type* db, rrset_type* rrset);
void rrset_delete(domain_store_type* db, domain_type* domain, rrset_type* rrset);
void rr_lower_usage(domain_store_type* db, rr_type* rr);
void add_rdata_to_recyclebin( rr_type* rr);
domain_type* rrset_zero_nonexist_check(domain_type* domain, domain_type* ce);
/*
* Find the specified dname in the domain_table. NULL is returned if
* there is no exact match.
*/
domain_type* domain_table_find(domain_table_type* table,
const domain_name_st* dname);
/*
* Insert a domain name in the domain table. If the domain name is
* not yet present in the table it is copied and a new domain_name_info node
* is created (as well as for the missing parent domain names, if
* any). Otherwise the domain_type that is already in the
* domain_table is returned.
*/
domain_type *domain_table_insert(domain_table_type *table,
const domain_name_st *dname,uint32_t maxAnswer);
/*
* Add an RRset to the specified domain. Updates the is_existing flag
* as required.
*/
void domain_add_rrset(domain_type* domain, rrset_type* rrset);
rrset_type* domain_find_rrset(domain_type* domain, zone_type* zone, uint16_t type);
rrset_type* domain_find_any_rrset(domain_type* domain, zone_type* zone);
zone_type* domain_find_zone(domain_store_type* db, domain_type* domain);
/* find DNAME rrset in domain->parent or higher and return that domain */
domain_type * find_domain_name_above(domain_type* domain, zone_type* zone);
domain_type* domain_wildcard_child(domain_type* domain);
domain_type *domain_previous_existing_child(domain_type* domain);
static inline domain_name_st *
domain_dname(domain_type* domain)
{
return (domain_name_st *) domain->dname;
}
static inline domain_type *
domain_previous(domain_type* domain)
{
struct radnode* prev = radix_prev(domain->rnode);
return prev == NULL ? NULL : (domain_type*)prev->elem;
}
static inline domain_type *
domain_next(domain_type* domain)
{
struct radnode* next = radix_next(domain->rnode);
return next == NULL ? NULL : (domain_type*)next->elem;
}
/* easy comparison for subdomain, true if d1 is subdomain of d2. */
static inline int domain_is_subdomain(domain_type* d1, domain_type* d2)
{ return domain_name_is_subdomain(domain_dname(d1), domain_dname(d2)); }
/* easy printout, to static buffer of domain_name_to_string, fqdn. */
static inline const char* domain_to_string(domain_type* domain)
{ return domain_name_to_string(domain_dname(domain), NULL); }
static inline int rdata_atom_is_domain(uint16_t type, size_t index);
static inline int rdata_atom_is_literal_domain(uint16_t type, size_t index);
static inline domain_type *
rdata_atom_domain(rdata_atom_type atom)
{
return atom.domain;
}
static inline uint16_t
rdata_atom_size(rdata_atom_type atom)
{
return *atom.data;
}
static inline uint8_t *
rdata_atomdata(rdata_atom_type atom)
{
return (uint8_t *) (atom.data + 1);
}
/* Find the zone for the specified dname in DB. */
zone_type *domain_store_find_zone(domain_store_type *db, const domain_name_st *dname);
/*
* Delete a domain name from the domain table. Removes domain_name_info node.
* Only deletes if usage is 0, has no rrsets and no children. Checks parents
* for deletion as well. Adjusts numberlist(domain.number), and
* wcard_child closest match.
*/
void domain_table_deldomain(domain_store_type* db, domain_type* domain);
/** marshal rdata into buffer, must be MAX_RDLENGTH in size */
size_t rr_marshal_rdata(rr_type* rr, uint8_t* rdata, size_t sz);
/* dbaccess.c */
int domain_store_lookup (struct domain_store* db,
const domain_name_st* dname,
domain_type **closest_match,
domain_type **closest_encloser);
/* pass number of children (to alloc in dirty array */
struct domain_store *domain_store_open(void);
void domain_store_close(struct domain_store* db);
/** zone one zonefile into memory and revert on parse error, write to udb */
void domain_store_read_zonefile(struct kdns* kdns, struct zone* zone);
void apex_rrset_checks(rrset_type* rrset,domain_type* domain);
zone_type* domain_store_zone_create(domain_store_type* db, const domain_name_st* dname);
void delete_zone_rrs(domain_store_type* db, zone_type* zone);
void domain_store_zone_delete(domain_store_type* db, zone_type* zone);
static inline int
rdata_atom_is_domain(uint16_t type, size_t index)
{
const rrtype_descriptor_st *descriptor
= rrtype_descriptor_by_type(type);
return (index < descriptor->maximum
&& (descriptor->wireformat[index] == RDATA_WF_COMPRESSED_DNAME
|| descriptor->wireformat[index] == RDATA_WF_UNCOMPRESSED_DNAME));
}
static inline int
rdata_atom_is_literal_domain(uint16_t type, size_t index)
{
const rrtype_descriptor_st *descriptor
= rrtype_descriptor_by_type(type);
return (index < descriptor->maximum
&& (descriptor->wireformat[index] == RDATA_WF_LITERAL_DNAME));
}
static inline rdata_wireformat_type
rdata_atom_wireformat_type(uint16_t type, size_t index)
{
const rrtype_descriptor_st *descriptor
= rrtype_descriptor_by_type(type);
assert(index < descriptor->maximum);
return (rdata_wireformat_type) descriptor->wireformat[index];
}
static inline uint16_t
rrset_rrtype(rrset_type* rrset)
{
assert(rrset);
assert(rrset->rr_count > 0);
return rrset->rrs[0].type;
}
static inline uint16_t
rrset_rrclass(rrset_type* rrset)
{
assert(rrset);
assert(rrset->rr_count > 0);
return rrset->rrs[0].klass;
}
#endif
|
vicharl/containerdns
|
kdns/dpdk-17.02/lib/librte_eal/common/include/rte_bus.h
|
/*-
* BSD LICENSE
*
* Copyright(c) 2016 NXP
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* * Neither the name of NXP nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef _RTE_BUS_H_
#define _RTE_BUS_H_
/**
* @file
*
* DPDK device bus interface
*
* This file exposes API and interfaces for bus abstraction
* over the devices and drivers in EAL.
*/
#ifdef __cplusplus
extern "C" {
#endif
#include <stdio.h>
#include <sys/queue.h>
#include <rte_log.h>
#include <rte_dev.h>
/** Double linked list of buses */
TAILQ_HEAD(rte_bus_list, rte_bus);
/**
* Bus specific scan for devices attached on the bus.
* For each bus object, the scan would be reponsible for finding devices and
* adding them to its private device list.
*
* A bus should mandatorily implement this method.
*
* @return
* 0 for successful scan
* <0 for unsuccessful scan with error value
*/
typedef int (*rte_bus_scan_t)(void);
/**
* Implementation specific probe function which is responsible for linking
* devices on that bus with applicable drivers.
*
* This is called while iterating over each registered bus.
*
* @return
* 0 for successful probe
* !0 for any error while probing
*/
typedef int (*rte_bus_probe_t)(void);
/**
* A structure describing a generic bus.
*/
struct rte_bus {
TAILQ_ENTRY(rte_bus) next; /**< Next bus object in linked list */
const char *name; /**< Name of the bus */
rte_bus_scan_t scan; /**< Scan for devices attached to bus */
rte_bus_probe_t probe; /**< Probe devices on bus */
};
/**
* Register a Bus handler.
*
* @param bus
* A pointer to a rte_bus structure describing the bus
* to be registered.
*/
void rte_bus_register(struct rte_bus *bus);
/**
* Unregister a Bus handler.
*
* @param bus
* A pointer to a rte_bus structure describing the bus
* to be unregistered.
*/
void rte_bus_unregister(struct rte_bus *bus);
/**
* Scan all the buses.
*
* @return
* 0 in case of success in scanning all buses
* !0 in case of failure to scan
*/
int rte_bus_scan(void);
/**
* For each device on the buses, perform a driver 'match' and call the
* driver-specific probe for device initialization.
*
* @return
* 0 for successful match/probe
* !0 otherwise
*/
int rte_bus_probe(void);
/**
* Dump information of all the buses registered with EAL.
*
* @param f
* A valid and open output stream handle
*
* @return
* 0 in case of success
* !0 in case there is error in opening the output stream
*/
void rte_bus_dump(FILE *f);
/**
* Helper for Bus registration.
* The constructor has higher priority than PMD constructors.
*/
#define RTE_REGISTER_BUS(nm, bus) \
static void __attribute__((constructor(101), used)) businitfn_ ##nm(void) \
{\
(bus).name = RTE_STR(nm);\
rte_bus_register(&bus); \
}
#ifdef __cplusplus
}
#endif
#endif /* _RTE_BUS_H */
|
vicharl/containerdns
|
kdns/dpdk-17.02/drivers/crypto/scheduler/scheduler_roundrobin.c
|
/*-
* BSD LICENSE
*
* Copyright(c) 2017 Intel Corporation. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <rte_cryptodev.h>
#include <rte_malloc.h>
#include "rte_cryptodev_scheduler_operations.h"
#include "scheduler_pmd_private.h"
struct rr_scheduler_qp_ctx {
struct scheduler_slave slaves[MAX_SLAVES_NUM];
uint32_t nb_slaves;
uint32_t last_enq_slave_idx;
uint32_t last_deq_slave_idx;
};
static uint16_t
schedule_enqueue(void *qp_ctx, struct rte_crypto_op **ops, uint16_t nb_ops)
{
struct rr_scheduler_qp_ctx *rr_qp_ctx =
((struct scheduler_qp_ctx *)qp_ctx)->private_qp_ctx;
uint32_t slave_idx = rr_qp_ctx->last_enq_slave_idx;
struct scheduler_slave *slave = &rr_qp_ctx->slaves[slave_idx];
uint16_t i, processed_ops;
struct rte_cryptodev_sym_session *sessions[nb_ops];
struct scheduler_session *sess0, *sess1, *sess2, *sess3;
if (unlikely(nb_ops == 0))
return 0;
for (i = 0; i < nb_ops && i < 4; i++)
rte_prefetch0(ops[i]->sym->session);
for (i = 0; (i < (nb_ops - 8)) && (nb_ops > 8); i += 4) {
sess0 = (struct scheduler_session *)
ops[i]->sym->session->_private;
sess1 = (struct scheduler_session *)
ops[i+1]->sym->session->_private;
sess2 = (struct scheduler_session *)
ops[i+2]->sym->session->_private;
sess3 = (struct scheduler_session *)
ops[i+3]->sym->session->_private;
sessions[i] = ops[i]->sym->session;
sessions[i + 1] = ops[i + 1]->sym->session;
sessions[i + 2] = ops[i + 2]->sym->session;
sessions[i + 3] = ops[i + 3]->sym->session;
ops[i]->sym->session = sess0->sessions[slave_idx];
ops[i + 1]->sym->session = sess1->sessions[slave_idx];
ops[i + 2]->sym->session = sess2->sessions[slave_idx];
ops[i + 3]->sym->session = sess3->sessions[slave_idx];
rte_prefetch0(ops[i + 4]->sym->session);
rte_prefetch0(ops[i + 5]->sym->session);
rte_prefetch0(ops[i + 6]->sym->session);
rte_prefetch0(ops[i + 7]->sym->session);
}
for (; i < nb_ops; i++) {
sess0 = (struct scheduler_session *)
ops[i]->sym->session->_private;
sessions[i] = ops[i]->sym->session;
ops[i]->sym->session = sess0->sessions[slave_idx];
}
processed_ops = rte_cryptodev_enqueue_burst(slave->dev_id,
slave->qp_id, ops, nb_ops);
slave->nb_inflight_cops += processed_ops;
rr_qp_ctx->last_enq_slave_idx += 1;
rr_qp_ctx->last_enq_slave_idx %= rr_qp_ctx->nb_slaves;
/* recover session if enqueue is failed */
if (unlikely(processed_ops < nb_ops)) {
for (i = processed_ops; i < nb_ops; i++)
ops[i]->sym->session = sessions[i];
}
return processed_ops;
}
static uint16_t
schedule_enqueue_ordering(void *qp_ctx, struct rte_crypto_op **ops,
uint16_t nb_ops)
{
struct scheduler_qp_ctx *gen_qp_ctx = qp_ctx;
struct rr_scheduler_qp_ctx *rr_qp_ctx =
gen_qp_ctx->private_qp_ctx;
uint32_t slave_idx = rr_qp_ctx->last_enq_slave_idx;
struct scheduler_slave *slave = &rr_qp_ctx->slaves[slave_idx];
uint16_t i, processed_ops;
struct rte_cryptodev_sym_session *sessions[nb_ops];
struct scheduler_session *sess0, *sess1, *sess2, *sess3;
if (unlikely(nb_ops == 0))
return 0;
for (i = 0; i < nb_ops && i < 4; i++) {
rte_prefetch0(ops[i]->sym->session);
rte_prefetch0(ops[i]->sym->m_src);
}
for (i = 0; (i < (nb_ops - 8)) && (nb_ops > 8); i += 4) {
sess0 = (struct scheduler_session *)
ops[i]->sym->session->_private;
sess1 = (struct scheduler_session *)
ops[i+1]->sym->session->_private;
sess2 = (struct scheduler_session *)
ops[i+2]->sym->session->_private;
sess3 = (struct scheduler_session *)
ops[i+3]->sym->session->_private;
sessions[i] = ops[i]->sym->session;
sessions[i + 1] = ops[i + 1]->sym->session;
sessions[i + 2] = ops[i + 2]->sym->session;
sessions[i + 3] = ops[i + 3]->sym->session;
ops[i]->sym->session = sess0->sessions[slave_idx];
ops[i]->sym->m_src->seqn = gen_qp_ctx->seqn++;
ops[i + 1]->sym->session = sess1->sessions[slave_idx];
ops[i + 1]->sym->m_src->seqn = gen_qp_ctx->seqn++;
ops[i + 2]->sym->session = sess2->sessions[slave_idx];
ops[i + 2]->sym->m_src->seqn = gen_qp_ctx->seqn++;
ops[i + 3]->sym->session = sess3->sessions[slave_idx];
ops[i + 3]->sym->m_src->seqn = gen_qp_ctx->seqn++;
rte_prefetch0(ops[i + 4]->sym->session);
rte_prefetch0(ops[i + 4]->sym->m_src);
rte_prefetch0(ops[i + 5]->sym->session);
rte_prefetch0(ops[i + 5]->sym->m_src);
rte_prefetch0(ops[i + 6]->sym->session);
rte_prefetch0(ops[i + 6]->sym->m_src);
rte_prefetch0(ops[i + 7]->sym->session);
rte_prefetch0(ops[i + 7]->sym->m_src);
}
for (; i < nb_ops; i++) {
sess0 = (struct scheduler_session *)
ops[i]->sym->session->_private;
sessions[i] = ops[i]->sym->session;
ops[i]->sym->session = sess0->sessions[slave_idx];
ops[i]->sym->m_src->seqn = gen_qp_ctx->seqn++;
}
processed_ops = rte_cryptodev_enqueue_burst(slave->dev_id,
slave->qp_id, ops, nb_ops);
slave->nb_inflight_cops += processed_ops;
rr_qp_ctx->last_enq_slave_idx += 1;
rr_qp_ctx->last_enq_slave_idx %= rr_qp_ctx->nb_slaves;
/* recover session if enqueue is failed */
if (unlikely(processed_ops < nb_ops)) {
for (i = processed_ops; i < nb_ops; i++)
ops[i]->sym->session = sessions[i];
}
return processed_ops;
}
static uint16_t
schedule_dequeue(void *qp_ctx, struct rte_crypto_op **ops, uint16_t nb_ops)
{
struct rr_scheduler_qp_ctx *rr_qp_ctx =
((struct scheduler_qp_ctx *)qp_ctx)->private_qp_ctx;
struct scheduler_slave *slave;
uint32_t last_slave_idx = rr_qp_ctx->last_deq_slave_idx;
uint16_t nb_deq_ops;
if (unlikely(rr_qp_ctx->slaves[last_slave_idx].nb_inflight_cops == 0)) {
do {
last_slave_idx += 1;
if (unlikely(last_slave_idx >= rr_qp_ctx->nb_slaves))
last_slave_idx = 0;
/* looped back, means no inflight cops in the queue */
if (last_slave_idx == rr_qp_ctx->last_deq_slave_idx)
return 0;
} while (rr_qp_ctx->slaves[last_slave_idx].nb_inflight_cops
== 0);
}
slave = &rr_qp_ctx->slaves[last_slave_idx];
nb_deq_ops = rte_cryptodev_dequeue_burst(slave->dev_id,
slave->qp_id, ops, nb_ops);
last_slave_idx += 1;
last_slave_idx %= rr_qp_ctx->nb_slaves;
rr_qp_ctx->last_deq_slave_idx = last_slave_idx;
slave->nb_inflight_cops -= nb_deq_ops;
return nb_deq_ops;
}
static uint16_t
schedule_dequeue_ordering(void *qp_ctx, struct rte_crypto_op **ops,
uint16_t nb_ops)
{
struct scheduler_qp_ctx *gen_qp_ctx = (struct scheduler_qp_ctx *)qp_ctx;
struct rr_scheduler_qp_ctx *rr_qp_ctx = (gen_qp_ctx->private_qp_ctx);
struct scheduler_slave *slave;
struct rte_reorder_buffer *reorder_buff = gen_qp_ctx->reorder_buf;
struct rte_mbuf *mbuf0, *mbuf1, *mbuf2, *mbuf3;
uint16_t nb_deq_ops, nb_drained_mbufs;
const uint16_t nb_op_ops = nb_ops;
struct rte_crypto_op *op_ops[nb_op_ops];
struct rte_mbuf *reorder_mbufs[nb_op_ops];
uint32_t last_slave_idx = rr_qp_ctx->last_deq_slave_idx;
uint16_t i;
if (unlikely(rr_qp_ctx->slaves[last_slave_idx].nb_inflight_cops == 0)) {
do {
last_slave_idx += 1;
if (unlikely(last_slave_idx >= rr_qp_ctx->nb_slaves))
last_slave_idx = 0;
/* looped back, means no inflight cops in the queue */
if (last_slave_idx == rr_qp_ctx->last_deq_slave_idx)
return 0;
} while (rr_qp_ctx->slaves[last_slave_idx].nb_inflight_cops
== 0);
}
slave = &rr_qp_ctx->slaves[last_slave_idx];
nb_deq_ops = rte_cryptodev_dequeue_burst(slave->dev_id,
slave->qp_id, op_ops, nb_ops);
rr_qp_ctx->last_deq_slave_idx += 1;
rr_qp_ctx->last_deq_slave_idx %= rr_qp_ctx->nb_slaves;
slave->nb_inflight_cops -= nb_deq_ops;
for (i = 0; i < nb_deq_ops && i < 4; i++)
rte_prefetch0(op_ops[i]->sym->m_src);
for (i = 0; (i < (nb_deq_ops - 8)) && (nb_deq_ops > 8); i += 4) {
mbuf0 = op_ops[i]->sym->m_src;
mbuf1 = op_ops[i + 1]->sym->m_src;
mbuf2 = op_ops[i + 2]->sym->m_src;
mbuf3 = op_ops[i + 3]->sym->m_src;
mbuf0->userdata = op_ops[i];
mbuf1->userdata = op_ops[i + 1];
mbuf2->userdata = op_ops[i + 2];
mbuf3->userdata = op_ops[i + 3];
rte_reorder_insert(reorder_buff, mbuf0);
rte_reorder_insert(reorder_buff, mbuf1);
rte_reorder_insert(reorder_buff, mbuf2);
rte_reorder_insert(reorder_buff, mbuf3);
rte_prefetch0(op_ops[i + 4]->sym->m_src);
rte_prefetch0(op_ops[i + 5]->sym->m_src);
rte_prefetch0(op_ops[i + 6]->sym->m_src);
rte_prefetch0(op_ops[i + 7]->sym->m_src);
}
for (; i < nb_deq_ops; i++) {
mbuf0 = op_ops[i]->sym->m_src;
mbuf0->userdata = op_ops[i];
rte_reorder_insert(reorder_buff, mbuf0);
}
nb_drained_mbufs = rte_reorder_drain(reorder_buff, reorder_mbufs,
nb_ops);
for (i = 0; i < nb_drained_mbufs && i < 4; i++)
rte_prefetch0(reorder_mbufs[i]);
for (i = 0; (i < (nb_drained_mbufs - 8)) && (nb_drained_mbufs > 8);
i += 4) {
ops[i] = *(struct rte_crypto_op **)reorder_mbufs[i]->userdata;
ops[i + 1] = *(struct rte_crypto_op **)
reorder_mbufs[i + 1]->userdata;
ops[i + 2] = *(struct rte_crypto_op **)
reorder_mbufs[i + 2]->userdata;
ops[i + 3] = *(struct rte_crypto_op **)
reorder_mbufs[i + 3]->userdata;
reorder_mbufs[i]->userdata = NULL;
reorder_mbufs[i + 1]->userdata = NULL;
reorder_mbufs[i + 2]->userdata = NULL;
reorder_mbufs[i + 3]->userdata = NULL;
rte_prefetch0(reorder_mbufs[i + 4]);
rte_prefetch0(reorder_mbufs[i + 5]);
rte_prefetch0(reorder_mbufs[i + 6]);
rte_prefetch0(reorder_mbufs[i + 7]);
}
for (; i < nb_drained_mbufs; i++) {
ops[i] = *(struct rte_crypto_op **)
reorder_mbufs[i]->userdata;
reorder_mbufs[i]->userdata = NULL;
}
return nb_drained_mbufs;
}
static int
slave_attach(__rte_unused struct rte_cryptodev *dev,
__rte_unused uint8_t slave_id)
{
return 0;
}
static int
slave_detach(__rte_unused struct rte_cryptodev *dev,
__rte_unused uint8_t slave_id)
{
return 0;
}
static int
scheduler_start(struct rte_cryptodev *dev)
{
struct scheduler_ctx *sched_ctx = dev->data->dev_private;
uint16_t i;
for (i = 0; i < dev->data->nb_queue_pairs; i++) {
struct scheduler_qp_ctx *qp_ctx = dev->data->queue_pairs[i];
struct rr_scheduler_qp_ctx *rr_qp_ctx =
qp_ctx->private_qp_ctx;
uint32_t j;
memset(rr_qp_ctx->slaves, 0, MAX_SLAVES_NUM *
sizeof(struct scheduler_slave));
for (j = 0; j < sched_ctx->nb_slaves; j++) {
rr_qp_ctx->slaves[j].dev_id =
sched_ctx->slaves[j].dev_id;
rr_qp_ctx->slaves[j].qp_id = i;
}
rr_qp_ctx->nb_slaves = sched_ctx->nb_slaves;
rr_qp_ctx->last_enq_slave_idx = 0;
rr_qp_ctx->last_deq_slave_idx = 0;
if (sched_ctx->reordering_enabled) {
qp_ctx->schedule_enqueue = &schedule_enqueue_ordering;
qp_ctx->schedule_dequeue = &schedule_dequeue_ordering;
} else {
qp_ctx->schedule_enqueue = &schedule_enqueue;
qp_ctx->schedule_dequeue = &schedule_dequeue;
}
}
return 0;
}
static int
scheduler_stop(__rte_unused struct rte_cryptodev *dev)
{
return 0;
}
static int
scheduler_config_qp(struct rte_cryptodev *dev, uint16_t qp_id)
{
struct scheduler_qp_ctx *qp_ctx = dev->data->queue_pairs[qp_id];
struct rr_scheduler_qp_ctx *rr_qp_ctx;
rr_qp_ctx = rte_zmalloc_socket(NULL, sizeof(*rr_qp_ctx), 0,
rte_socket_id());
if (!rr_qp_ctx) {
CS_LOG_ERR("failed allocate memory for private queue pair");
return -ENOMEM;
}
qp_ctx->private_qp_ctx = (void *)rr_qp_ctx;
return 0;
}
static int
scheduler_create_private_ctx(__rte_unused struct rte_cryptodev *dev)
{
return 0;
}
struct rte_cryptodev_scheduler_ops scheduler_rr_ops = {
slave_attach,
slave_detach,
scheduler_start,
scheduler_stop,
scheduler_config_qp,
scheduler_create_private_ctx
};
struct rte_cryptodev_scheduler scheduler = {
.name = "roundrobin-scheduler",
.description = "scheduler which will round robin burst across "
"slave crypto devices",
.mode = CDEV_SCHED_MODE_ROUNDROBIN,
.ops = &scheduler_rr_ops
};
struct rte_cryptodev_scheduler *roundrobin_scheduler = &scheduler;
|
vicharl/containerdns
|
kdns/core/view.c
|
/*
*
* Copyright (c) 2018 The TIGLabs Authors.
*
*/
#include <jansson.h>
#include <arpa/inet.h>
#include <netinet/in.h>
#include "view.h"
#include "kdns.h"
#define CREATE 0x1
#define FIND_FIRST 0x2
#define FIND_BEST 0x4
static view_node_t *view_tree_alloc_node(view_tree_t *tree)
{
view_node_t *node;
if (tree->free) {
node = tree->free;
tree->free = node->right;
} else {
node = xalloc_zero(sizeof *node);
}
/* init node */
node->parent = NULL;
node->left = NULL;
node->right = NULL;
node->view_data = VIEW_NULL_VALUE;
return node;
}
static view_node_t* do_view_tree_get(view_tree_t *tree, uint8_t *key, size_t nbits, int flags)
{
uint8_t bit = 0x80;
size_t byte = 0;
view_node_t *cur = tree->root;
view_node_t *last = NULL;
/* walk down the tree */
while (cur && nbits-- > 0) {
if (cur->view_data != VIEW_NULL_VALUE) {
if (flags & FIND_FIRST) {
return cur;
}
if (flags & FIND_BEST) {
last = cur;
}
}
if (bit & key[byte]) {
if (cur->right == NULL && (flags & CREATE)) {
cur->right = view_tree_alloc_node(tree);
cur->right->parent = cur;
}
cur = cur->right;
} else {
if (cur->left == NULL && (flags & CREATE)) {
cur->left = view_tree_alloc_node(tree);
cur->left->parent = cur;
}
cur = cur->left;
}
bit >>= 1;
if (bit == 0) {
bit = 0x80;
byte++;
}
}
if (!cur && (flags & FIND_BEST)) {
return last;
}
return cur;
}
static int do_view_tree_insert(view_tree_t *tree, uint8_t *key, size_t nbits, char *pcidr, char *view_name)
{
view_value_t *view_data = (view_value_t *)xalloc_zero(sizeof(view_value_t));
if (view_data == NULL) {
log_msg(LOG_ERR, "no mem for caloc :%s--%s\n", pcidr, view_name);
return -1;
}
view_node_t *node = do_view_tree_get(tree, key, nbits, CREATE);
if (node->view_data != VIEW_NULL_VALUE) {
log_msg(LOG_INFO, "warning: insert duplicate view tree node!\n");
free(view_data);
return 0;
}
memcpy(view_data->cidrs, pcidr, strlen(pcidr));
memcpy(view_data->view_name, view_name, strlen(view_name));
/* set view_name */
node->view_data = view_data;
tree->size++;
return 0;
}
view_value_t* view_find(view_tree_t *tree, uint8_t *key, size_t nbits)
{
view_node_t *node = do_view_tree_get(tree, key, nbits, FIND_BEST);
return node ? node->view_data : VIEW_NO_NODE;
}
static int do_view_tree_delete(view_tree_t *tree, uint8_t *key, size_t nbits, char *pcidr, char *view_name)
{
view_node_t *node = do_view_tree_get(tree, key, nbits, 0);
if (node == NULL || node->view_data == VIEW_NULL_VALUE) {
log_msg(LOG_ERR, "warning: delete non-exist key in view tree!\n");
return -1;
}
if (strcmp(node->view_data->cidrs, pcidr) || strcmp(node->view_data->view_name, view_name)) {
log_msg(LOG_ERR, "warning: cidrs %s or view_name %s different with cidrs %s or view_name %s in view tree!\n",
pcidr, view_name, node->view_data->cidrs, node->view_data->view_name);
return -1;
}
free(node->view_data);
node->view_data = VIEW_NULL_VALUE;
tree->size--;
if (node->left || node->right || (node->parent == NULL)) {
return 0;
}
while (!node->left && !node->right) {
if (node->parent->left == node) {
node->parent->left = NULL;
} else {
node->parent->right = NULL;
}
node->right = tree->free;
tree->free = node;
node = node->parent;
tree->free->parent = NULL;
if (node->view_data != VIEW_NULL_VALUE || node->parent == NULL) {
break;
}
}
return 0;
}
view_tree_t *view_tree_create(void)
{
view_tree_t *tree = xalloc_zero(sizeof *tree);
tree->free = NULL;
tree->size = 0;
tree->root = view_tree_alloc_node(tree);
return tree;
}
int view_operate(view_tree_t *tree, char *pcidr, char *view_name, enum view_action action)
{
int ret = -1;
size_t nbits = 32, maxbits = 32;
if (action != ACTION_ADD && action != ACTION_DEL) {
log_msg(LOG_ERR, "action %d is not valid!\n", action);
return -1;
}
char *cidr = strdup(pcidr);
char *mask = strchr(cidr, '/');
if (mask != NULL) {
*mask = '\0';
mask++;
nbits = atoi(mask);
if (nbits <= 0 || nbits >= maxbits) {
log_msg(LOG_ERR, "mask bits '%s' is not valid!\n", mask);
goto _out;
}
}
//check the addr
struct in_addr ip;
if (inet_pton(AF_INET, cidr, &ip) != 1) {
log_msg(LOG_ERR, "ipv4 addr '%s' is not valid!\n", cidr);
goto _out;
}
if (action == ACTION_ADD) {
ret = do_view_tree_insert(tree, (uint8_t *)&ip.s_addr, nbits, pcidr, view_name);
if (ret != 0) {
log_msg(LOG_ERR, "failed to insert view_name %s, cidr %s in view tree!\n", view_name, cidr);
}
} else {
ret = do_view_tree_delete(tree, (uint8_t *)&ip.s_addr, nbits, pcidr, view_name);
if (ret != 0) {
log_msg(LOG_ERR, "failed to delete view_name %s, cidr %s from view tree!\n", view_name, cidr);
}
}
_out:
free(cidr);
return ret;
}
void view_tree_dump(view_node_t *node, void* arg1, void (*callback)(void*, view_value_t *))
{
if (node->view_data != VIEW_NULL_VALUE) {
callback(arg1, node->view_data);
}
if (node->left) {
view_tree_dump(node->left, arg1, callback);
}
if (node->right) {
view_tree_dump(node->right, arg1, callback);
}
}
|
vicharl/containerdns
|
kdns/dpdk-17.02/drivers/net/qede/qede_eth_if.h
|
<reponame>vicharl/containerdns<gh_stars>100-1000
/*
* Copyright (c) 2016 QLogic Corporation.
* All rights reserved.
* www.qlogic.com
*
* See LICENSE.qede_pmd for copyright and licensing details.
*/
#ifndef _QEDE_ETH_IF_H
#define _QEDE_ETH_IF_H
#include "qede_if.h"
/*forward decl */
struct eth_slow_path_rx_cqe;
#define INIT_STRUCT_FIELD(field, value) .field = value
#define QED_ETH_INTERFACE_VERSION 609
#define QEDE_MAX_MCAST_FILTERS 64
enum qed_filter_rx_mode_type {
QED_FILTER_RX_MODE_TYPE_REGULAR,
QED_FILTER_RX_MODE_TYPE_MULTI_PROMISC,
QED_FILTER_RX_MODE_TYPE_PROMISC,
};
enum qed_filter_type {
QED_FILTER_TYPE_UCAST,
QED_FILTER_TYPE_MCAST,
QED_FILTER_TYPE_RX_MODE,
QED_MAX_FILTER_TYPES,
};
struct qed_dev_eth_info {
struct qed_dev_info common;
uint8_t num_queues;
uint8_t num_tc;
struct ether_addr port_mac;
uint16_t num_vlan_filters;
uint32_t num_mac_addrs;
/* Legacy VF - this affects the datapath */
bool is_legacy;
};
struct qed_stop_rxq_params {
uint8_t rss_id;
uint8_t rx_queue_id;
uint8_t vport_id;
bool eq_completion_only;
};
struct qed_update_vport_params {
uint8_t vport_id;
uint8_t update_vport_active_flg;
uint8_t vport_active_flg;
uint8_t update_inner_vlan_removal_flg;
uint8_t inner_vlan_removal_flg;
uint8_t update_tx_switching_flg;
uint8_t tx_switching_flg;
uint8_t update_accept_any_vlan_flg;
uint8_t accept_any_vlan;
uint8_t update_rss_flg;
uint16_t mtu;
};
struct qed_start_vport_params {
bool remove_inner_vlan;
bool handle_ptp_pkts;
bool gro_enable;
bool drop_ttl0;
uint8_t vport_id;
uint16_t mtu;
bool clear_stats;
};
struct qed_stop_txq_params {
uint8_t rss_id;
uint8_t tx_queue_id;
};
struct qed_eth_ops {
const struct qed_common_ops *common;
int (*fill_dev_info)(struct ecore_dev *edev,
struct qed_dev_eth_info *info);
int (*vport_start)(struct ecore_dev *edev,
struct qed_start_vport_params *params);
int (*vport_stop)(struct ecore_dev *edev, uint8_t vport_id);
int (*vport_update)(struct ecore_dev *edev,
struct qed_update_vport_params *params);
int (*q_rx_start)(struct ecore_dev *cdev,
uint8_t rss_num,
struct ecore_queue_start_common_params *p_params,
uint16_t bd_max_bytes,
dma_addr_t bd_chain_phys_addr,
dma_addr_t cqe_pbl_addr,
uint16_t cqe_pbl_size, void OSAL_IOMEM * *pp_prod);
int (*q_rx_stop)(struct ecore_dev *edev,
struct qed_stop_rxq_params *params);
int (*q_tx_start)(struct ecore_dev *edev,
uint8_t rss_num,
struct ecore_queue_start_common_params *p_params,
dma_addr_t pbl_addr,
uint16_t pbl_size, void OSAL_IOMEM * *pp_doorbell);
int (*q_tx_stop)(struct ecore_dev *edev,
struct qed_stop_txq_params *params);
int (*eth_cqe_completion)(struct ecore_dev *edev,
uint8_t rss_id,
struct eth_slow_path_rx_cqe *cqe);
int (*fastpath_stop)(struct ecore_dev *edev);
void (*fastpath_start)(struct ecore_dev *edev);
void (*get_vport_stats)(struct ecore_dev *edev,
struct ecore_eth_stats *stats);
};
/* externs */
extern const struct qed_common_ops qed_common_ops_pass;
const struct qed_eth_ops *qed_get_eth_ops(void);
int qed_configure_filter_rx_mode(struct rte_eth_dev *eth_dev,
enum qed_filter_rx_mode_type type);
bool qed_update_rss_parm_cmt(struct ecore_dev *edev, uint16_t *p_tbl);
#endif /* _QEDE_ETH_IF_H */
|
vicharl/containerdns
|
kdns/dpdk-17.02/drivers/net/qede/base/ecore_hsi_common.h
|
<filename>kdns/dpdk-17.02/drivers/net/qede/base/ecore_hsi_common.h
/*
* Copyright (c) 2016 QLogic Corporation.
* All rights reserved.
* www.qlogic.com
*
* See LICENSE.qede_pmd for copyright and licensing details.
*/
#ifndef __ECORE_HSI_COMMON__
#define __ECORE_HSI_COMMON__
/********************************/
/* Add include to common target */
/********************************/
#include "common_hsi.h"
/*
* opcodes for the event ring
*/
enum common_event_opcode {
COMMON_EVENT_PF_START,
COMMON_EVENT_PF_STOP,
COMMON_EVENT_VF_START,
COMMON_EVENT_VF_STOP,
COMMON_EVENT_VF_PF_CHANNEL,
COMMON_EVENT_VF_FLR,
COMMON_EVENT_PF_UPDATE,
COMMON_EVENT_MALICIOUS_VF,
COMMON_EVENT_RL_UPDATE,
COMMON_EVENT_EMPTY,
MAX_COMMON_EVENT_OPCODE
};
/*
* Common Ramrod Command IDs
*/
enum common_ramrod_cmd_id {
COMMON_RAMROD_UNUSED,
COMMON_RAMROD_PF_START /* PF Function Start Ramrod */,
COMMON_RAMROD_PF_STOP /* PF Function Stop Ramrod */,
COMMON_RAMROD_VF_START /* VF Function Start */,
COMMON_RAMROD_VF_STOP /* VF Function Stop Ramrod */,
COMMON_RAMROD_PF_UPDATE /* PF update Ramrod */,
COMMON_RAMROD_RL_UPDATE /* QCN/DCQCN RL update Ramrod */,
COMMON_RAMROD_EMPTY /* Empty Ramrod */,
MAX_COMMON_RAMROD_CMD_ID
};
/*
* The core storm context for the Ystorm
*/
struct ystorm_core_conn_st_ctx {
__le32 reserved[4];
};
/*
* The core storm context for the Pstorm
*/
struct pstorm_core_conn_st_ctx {
__le32 reserved[4];
};
/*
* Core Slowpath Connection storm context of Xstorm
*/
struct xstorm_core_conn_st_ctx {
__le32 spq_base_lo /* SPQ Ring Base Address low dword */;
__le32 spq_base_hi /* SPQ Ring Base Address high dword */;
/* Consolidation Ring Base Address */
struct regpair consolid_base_addr;
__le16 spq_cons /* SPQ Ring Consumer */;
__le16 consolid_cons /* Consolidation Ring Consumer */;
__le32 reserved0[55] /* Pad to 15 cycles */;
};
struct xstorm_core_conn_ag_ctx {
u8 reserved0 /* cdu_validation */;
u8 core_state /* state */;
u8 flags0;
/* exist_in_qm0 */
#define XSTORM_CORE_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1
#define XSTORM_CORE_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
/* exist_in_qm1 */
#define XSTORM_CORE_CONN_AG_CTX_RESERVED1_MASK 0x1
#define XSTORM_CORE_CONN_AG_CTX_RESERVED1_SHIFT 1
/* exist_in_qm2 */
#define XSTORM_CORE_CONN_AG_CTX_RESERVED2_MASK 0x1
#define XSTORM_CORE_CONN_AG_CTX_RESERVED2_SHIFT 2
/* exist_in_qm3 */
#define XSTORM_CORE_CONN_AG_CTX_EXIST_IN_QM3_MASK 0x1
#define XSTORM_CORE_CONN_AG_CTX_EXIST_IN_QM3_SHIFT 3
/* bit4 */
#define XSTORM_CORE_CONN_AG_CTX_RESERVED3_MASK 0x1
#define XSTORM_CORE_CONN_AG_CTX_RESERVED3_SHIFT 4
/* cf_array_active */
#define XSTORM_CORE_CONN_AG_CTX_RESERVED4_MASK 0x1
#define XSTORM_CORE_CONN_AG_CTX_RESERVED4_SHIFT 5
/* bit6 */
#define XSTORM_CORE_CONN_AG_CTX_RESERVED5_MASK 0x1
#define XSTORM_CORE_CONN_AG_CTX_RESERVED5_SHIFT 6
/* bit7 */
#define XSTORM_CORE_CONN_AG_CTX_RESERVED6_MASK 0x1
#define XSTORM_CORE_CONN_AG_CTX_RESERVED6_SHIFT 7
u8 flags1;
/* bit8 */
#define XSTORM_CORE_CONN_AG_CTX_RESERVED7_MASK 0x1
#define XSTORM_CORE_CONN_AG_CTX_RESERVED7_SHIFT 0
/* bit9 */
#define XSTORM_CORE_CONN_AG_CTX_RESERVED8_MASK 0x1
#define XSTORM_CORE_CONN_AG_CTX_RESERVED8_SHIFT 1
/* bit10 */
#define XSTORM_CORE_CONN_AG_CTX_RESERVED9_MASK 0x1
#define XSTORM_CORE_CONN_AG_CTX_RESERVED9_SHIFT 2
/* bit11 */
#define XSTORM_CORE_CONN_AG_CTX_BIT11_MASK 0x1
#define XSTORM_CORE_CONN_AG_CTX_BIT11_SHIFT 3
/* bit12 */
#define XSTORM_CORE_CONN_AG_CTX_BIT12_MASK 0x1
#define XSTORM_CORE_CONN_AG_CTX_BIT12_SHIFT 4
/* bit13 */
#define XSTORM_CORE_CONN_AG_CTX_BIT13_MASK 0x1
#define XSTORM_CORE_CONN_AG_CTX_BIT13_SHIFT 5
/* bit14 */
#define XSTORM_CORE_CONN_AG_CTX_TX_RULE_ACTIVE_MASK 0x1
#define XSTORM_CORE_CONN_AG_CTX_TX_RULE_ACTIVE_SHIFT 6
/* bit15 */
#define XSTORM_CORE_CONN_AG_CTX_DQ_CF_ACTIVE_MASK 0x1
#define XSTORM_CORE_CONN_AG_CTX_DQ_CF_ACTIVE_SHIFT 7
u8 flags2;
/* timer0cf */
#define XSTORM_CORE_CONN_AG_CTX_CF0_MASK 0x3
#define XSTORM_CORE_CONN_AG_CTX_CF0_SHIFT 0
/* timer1cf */
#define XSTORM_CORE_CONN_AG_CTX_CF1_MASK 0x3
#define XSTORM_CORE_CONN_AG_CTX_CF1_SHIFT 2
/* timer2cf */
#define XSTORM_CORE_CONN_AG_CTX_CF2_MASK 0x3
#define XSTORM_CORE_CONN_AG_CTX_CF2_SHIFT 4
/* timer_stop_all */
#define XSTORM_CORE_CONN_AG_CTX_CF3_MASK 0x3
#define XSTORM_CORE_CONN_AG_CTX_CF3_SHIFT 6
u8 flags3;
#define XSTORM_CORE_CONN_AG_CTX_CF4_MASK 0x3 /* cf4 */
#define XSTORM_CORE_CONN_AG_CTX_CF4_SHIFT 0
#define XSTORM_CORE_CONN_AG_CTX_CF5_MASK 0x3 /* cf5 */
#define XSTORM_CORE_CONN_AG_CTX_CF5_SHIFT 2
#define XSTORM_CORE_CONN_AG_CTX_CF6_MASK 0x3 /* cf6 */
#define XSTORM_CORE_CONN_AG_CTX_CF6_SHIFT 4
#define XSTORM_CORE_CONN_AG_CTX_CF7_MASK 0x3 /* cf7 */
#define XSTORM_CORE_CONN_AG_CTX_CF7_SHIFT 6
u8 flags4;
#define XSTORM_CORE_CONN_AG_CTX_CF8_MASK 0x3 /* cf8 */
#define XSTORM_CORE_CONN_AG_CTX_CF8_SHIFT 0
#define XSTORM_CORE_CONN_AG_CTX_CF9_MASK 0x3 /* cf9 */
#define XSTORM_CORE_CONN_AG_CTX_CF9_SHIFT 2
/* cf10 */
#define XSTORM_CORE_CONN_AG_CTX_CF10_MASK 0x3
#define XSTORM_CORE_CONN_AG_CTX_CF10_SHIFT 4
/* cf11 */
#define XSTORM_CORE_CONN_AG_CTX_CF11_MASK 0x3
#define XSTORM_CORE_CONN_AG_CTX_CF11_SHIFT 6
u8 flags5;
/* cf12 */
#define XSTORM_CORE_CONN_AG_CTX_CF12_MASK 0x3
#define XSTORM_CORE_CONN_AG_CTX_CF12_SHIFT 0
/* cf13 */
#define XSTORM_CORE_CONN_AG_CTX_CF13_MASK 0x3
#define XSTORM_CORE_CONN_AG_CTX_CF13_SHIFT 2
/* cf14 */
#define XSTORM_CORE_CONN_AG_CTX_CF14_MASK 0x3
#define XSTORM_CORE_CONN_AG_CTX_CF14_SHIFT 4
/* cf15 */
#define XSTORM_CORE_CONN_AG_CTX_CF15_MASK 0x3
#define XSTORM_CORE_CONN_AG_CTX_CF15_SHIFT 6
u8 flags6;
/* cf16 */
#define XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_MASK 0x3
#define XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_SHIFT 0
/* cf_array_cf */
#define XSTORM_CORE_CONN_AG_CTX_CF17_MASK 0x3
#define XSTORM_CORE_CONN_AG_CTX_CF17_SHIFT 2
/* cf18 */
#define XSTORM_CORE_CONN_AG_CTX_DQ_CF_MASK 0x3
#define XSTORM_CORE_CONN_AG_CTX_DQ_CF_SHIFT 4
/* cf19 */
#define XSTORM_CORE_CONN_AG_CTX_TERMINATE_CF_MASK 0x3
#define XSTORM_CORE_CONN_AG_CTX_TERMINATE_CF_SHIFT 6
u8 flags7;
/* cf20 */
#define XSTORM_CORE_CONN_AG_CTX_FLUSH_Q0_MASK 0x3
#define XSTORM_CORE_CONN_AG_CTX_FLUSH_Q0_SHIFT 0
/* cf21 */
#define XSTORM_CORE_CONN_AG_CTX_RESERVED10_MASK 0x3
#define XSTORM_CORE_CONN_AG_CTX_RESERVED10_SHIFT 2
/* cf22 */
#define XSTORM_CORE_CONN_AG_CTX_SLOW_PATH_MASK 0x3
#define XSTORM_CORE_CONN_AG_CTX_SLOW_PATH_SHIFT 4
/* cf0en */
#define XSTORM_CORE_CONN_AG_CTX_CF0EN_MASK 0x1
#define XSTORM_CORE_CONN_AG_CTX_CF0EN_SHIFT 6
/* cf1en */
#define XSTORM_CORE_CONN_AG_CTX_CF1EN_MASK 0x1
#define XSTORM_CORE_CONN_AG_CTX_CF1EN_SHIFT 7
u8 flags8;
/* cf2en */
#define XSTORM_CORE_CONN_AG_CTX_CF2EN_MASK 0x1
#define XSTORM_CORE_CONN_AG_CTX_CF2EN_SHIFT 0
/* cf3en */
#define XSTORM_CORE_CONN_AG_CTX_CF3EN_MASK 0x1
#define XSTORM_CORE_CONN_AG_CTX_CF3EN_SHIFT 1
/* cf4en */
#define XSTORM_CORE_CONN_AG_CTX_CF4EN_MASK 0x1
#define XSTORM_CORE_CONN_AG_CTX_CF4EN_SHIFT 2
/* cf5en */
#define XSTORM_CORE_CONN_AG_CTX_CF5EN_MASK 0x1
#define XSTORM_CORE_CONN_AG_CTX_CF5EN_SHIFT 3
/* cf6en */
#define XSTORM_CORE_CONN_AG_CTX_CF6EN_MASK 0x1
#define XSTORM_CORE_CONN_AG_CTX_CF6EN_SHIFT 4
/* cf7en */
#define XSTORM_CORE_CONN_AG_CTX_CF7EN_MASK 0x1
#define XSTORM_CORE_CONN_AG_CTX_CF7EN_SHIFT 5
/* cf8en */
#define XSTORM_CORE_CONN_AG_CTX_CF8EN_MASK 0x1
#define XSTORM_CORE_CONN_AG_CTX_CF8EN_SHIFT 6
/* cf9en */
#define XSTORM_CORE_CONN_AG_CTX_CF9EN_MASK 0x1
#define XSTORM_CORE_CONN_AG_CTX_CF9EN_SHIFT 7
u8 flags9;
/* cf10en */
#define XSTORM_CORE_CONN_AG_CTX_CF10EN_MASK 0x1
#define XSTORM_CORE_CONN_AG_CTX_CF10EN_SHIFT 0
/* cf11en */
#define XSTORM_CORE_CONN_AG_CTX_CF11EN_MASK 0x1
#define XSTORM_CORE_CONN_AG_CTX_CF11EN_SHIFT 1
/* cf12en */
#define XSTORM_CORE_CONN_AG_CTX_CF12EN_MASK 0x1
#define XSTORM_CORE_CONN_AG_CTX_CF12EN_SHIFT 2
/* cf13en */
#define XSTORM_CORE_CONN_AG_CTX_CF13EN_MASK 0x1
#define XSTORM_CORE_CONN_AG_CTX_CF13EN_SHIFT 3
/* cf14en */
#define XSTORM_CORE_CONN_AG_CTX_CF14EN_MASK 0x1
#define XSTORM_CORE_CONN_AG_CTX_CF14EN_SHIFT 4
/* cf15en */
#define XSTORM_CORE_CONN_AG_CTX_CF15EN_MASK 0x1
#define XSTORM_CORE_CONN_AG_CTX_CF15EN_SHIFT 5
/* cf16en */
#define XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_EN_MASK 0x1
#define XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_EN_SHIFT 6
/* cf_array_cf_en */
#define XSTORM_CORE_CONN_AG_CTX_CF17EN_MASK 0x1
#define XSTORM_CORE_CONN_AG_CTX_CF17EN_SHIFT 7
u8 flags10;
/* cf18en */
#define XSTORM_CORE_CONN_AG_CTX_DQ_CF_EN_MASK 0x1
#define XSTORM_CORE_CONN_AG_CTX_DQ_CF_EN_SHIFT 0
/* cf19en */
#define XSTORM_CORE_CONN_AG_CTX_TERMINATE_CF_EN_MASK 0x1
#define XSTORM_CORE_CONN_AG_CTX_TERMINATE_CF_EN_SHIFT 1
/* cf20en */
#define XSTORM_CORE_CONN_AG_CTX_FLUSH_Q0_EN_MASK 0x1
#define XSTORM_CORE_CONN_AG_CTX_FLUSH_Q0_EN_SHIFT 2
/* cf21en */
#define XSTORM_CORE_CONN_AG_CTX_RESERVED11_MASK 0x1
#define XSTORM_CORE_CONN_AG_CTX_RESERVED11_SHIFT 3
/* cf22en */
#define XSTORM_CORE_CONN_AG_CTX_SLOW_PATH_EN_MASK 0x1
#define XSTORM_CORE_CONN_AG_CTX_SLOW_PATH_EN_SHIFT 4
/* cf23en */
#define XSTORM_CORE_CONN_AG_CTX_CF23EN_MASK 0x1
#define XSTORM_CORE_CONN_AG_CTX_CF23EN_SHIFT 5
/* rule0en */
#define XSTORM_CORE_CONN_AG_CTX_RESERVED12_MASK 0x1
#define XSTORM_CORE_CONN_AG_CTX_RESERVED12_SHIFT 6
/* rule1en */
#define XSTORM_CORE_CONN_AG_CTX_RESERVED13_MASK 0x1
#define XSTORM_CORE_CONN_AG_CTX_RESERVED13_SHIFT 7
u8 flags11;
/* rule2en */
#define XSTORM_CORE_CONN_AG_CTX_RESERVED14_MASK 0x1
#define XSTORM_CORE_CONN_AG_CTX_RESERVED14_SHIFT 0
/* rule3en */
#define XSTORM_CORE_CONN_AG_CTX_RESERVED15_MASK 0x1
#define XSTORM_CORE_CONN_AG_CTX_RESERVED15_SHIFT 1
/* rule4en */
#define XSTORM_CORE_CONN_AG_CTX_TX_DEC_RULE_EN_MASK 0x1
#define XSTORM_CORE_CONN_AG_CTX_TX_DEC_RULE_EN_SHIFT 2
/* rule5en */
#define XSTORM_CORE_CONN_AG_CTX_RULE5EN_MASK 0x1
#define XSTORM_CORE_CONN_AG_CTX_RULE5EN_SHIFT 3
/* rule6en */
#define XSTORM_CORE_CONN_AG_CTX_RULE6EN_MASK 0x1
#define XSTORM_CORE_CONN_AG_CTX_RULE6EN_SHIFT 4
/* rule7en */
#define XSTORM_CORE_CONN_AG_CTX_RULE7EN_MASK 0x1
#define XSTORM_CORE_CONN_AG_CTX_RULE7EN_SHIFT 5
/* rule8en */
#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED1_MASK 0x1
#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED1_SHIFT 6
/* rule9en */
#define XSTORM_CORE_CONN_AG_CTX_RULE9EN_MASK 0x1
#define XSTORM_CORE_CONN_AG_CTX_RULE9EN_SHIFT 7
u8 flags12;
/* rule10en */
#define XSTORM_CORE_CONN_AG_CTX_RULE10EN_MASK 0x1
#define XSTORM_CORE_CONN_AG_CTX_RULE10EN_SHIFT 0
/* rule11en */
#define XSTORM_CORE_CONN_AG_CTX_RULE11EN_MASK 0x1
#define XSTORM_CORE_CONN_AG_CTX_RULE11EN_SHIFT 1
/* rule12en */
#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED2_MASK 0x1
#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED2_SHIFT 2
/* rule13en */
#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED3_MASK 0x1
#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED3_SHIFT 3
/* rule14en */
#define XSTORM_CORE_CONN_AG_CTX_RULE14EN_MASK 0x1
#define XSTORM_CORE_CONN_AG_CTX_RULE14EN_SHIFT 4
/* rule15en */
#define XSTORM_CORE_CONN_AG_CTX_RULE15EN_MASK 0x1
#define XSTORM_CORE_CONN_AG_CTX_RULE15EN_SHIFT 5
/* rule16en */
#define XSTORM_CORE_CONN_AG_CTX_RULE16EN_MASK 0x1
#define XSTORM_CORE_CONN_AG_CTX_RULE16EN_SHIFT 6
/* rule17en */
#define XSTORM_CORE_CONN_AG_CTX_RULE17EN_MASK 0x1
#define XSTORM_CORE_CONN_AG_CTX_RULE17EN_SHIFT 7
u8 flags13;
/* rule18en */
#define XSTORM_CORE_CONN_AG_CTX_RULE18EN_MASK 0x1
#define XSTORM_CORE_CONN_AG_CTX_RULE18EN_SHIFT 0
/* rule19en */
#define XSTORM_CORE_CONN_AG_CTX_RULE19EN_MASK 0x1
#define XSTORM_CORE_CONN_AG_CTX_RULE19EN_SHIFT 1
/* rule20en */
#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED4_MASK 0x1
#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED4_SHIFT 2
/* rule21en */
#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED5_MASK 0x1
#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED5_SHIFT 3
/* rule22en */
#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED6_MASK 0x1
#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED6_SHIFT 4
/* rule23en */
#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED7_MASK 0x1
#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED7_SHIFT 5
/* rule24en */
#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED8_MASK 0x1
#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED8_SHIFT 6
/* rule25en */
#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED9_MASK 0x1
#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED9_SHIFT 7
u8 flags14;
/* bit16 */
#define XSTORM_CORE_CONN_AG_CTX_BIT16_MASK 0x1
#define XSTORM_CORE_CONN_AG_CTX_BIT16_SHIFT 0
/* bit17 */
#define XSTORM_CORE_CONN_AG_CTX_BIT17_MASK 0x1
#define XSTORM_CORE_CONN_AG_CTX_BIT17_SHIFT 1
/* bit18 */
#define XSTORM_CORE_CONN_AG_CTX_BIT18_MASK 0x1
#define XSTORM_CORE_CONN_AG_CTX_BIT18_SHIFT 2
/* bit19 */
#define XSTORM_CORE_CONN_AG_CTX_BIT19_MASK 0x1
#define XSTORM_CORE_CONN_AG_CTX_BIT19_SHIFT 3
/* bit20 */
#define XSTORM_CORE_CONN_AG_CTX_BIT20_MASK 0x1
#define XSTORM_CORE_CONN_AG_CTX_BIT20_SHIFT 4
/* bit21 */
#define XSTORM_CORE_CONN_AG_CTX_BIT21_MASK 0x1
#define XSTORM_CORE_CONN_AG_CTX_BIT21_SHIFT 5
/* cf23 */
#define XSTORM_CORE_CONN_AG_CTX_CF23_MASK 0x3
#define XSTORM_CORE_CONN_AG_CTX_CF23_SHIFT 6
u8 byte2 /* byte2 */;
__le16 physical_q0 /* physical_q0 */;
__le16 consolid_prod /* physical_q1 */;
__le16 reserved16 /* physical_q2 */;
__le16 tx_bd_cons /* word3 */;
__le16 tx_bd_or_spq_prod /* word4 */;
__le16 word5 /* word5 */;
__le16 conn_dpi /* conn_dpi */;
u8 byte3 /* byte3 */;
u8 byte4 /* byte4 */;
u8 byte5 /* byte5 */;
u8 byte6 /* byte6 */;
__le32 reg0 /* reg0 */;
__le32 reg1 /* reg1 */;
__le32 reg2 /* reg2 */;
__le32 reg3 /* reg3 */;
__le32 reg4 /* reg4 */;
__le32 reg5 /* cf_array0 */;
__le32 reg6 /* cf_array1 */;
__le16 word7 /* word7 */;
__le16 word8 /* word8 */;
__le16 word9 /* word9 */;
__le16 word10 /* word10 */;
__le32 reg7 /* reg7 */;
__le32 reg8 /* reg8 */;
__le32 reg9 /* reg9 */;
u8 byte7 /* byte7 */;
u8 byte8 /* byte8 */;
u8 byte9 /* byte9 */;
u8 byte10 /* byte10 */;
u8 byte11 /* byte11 */;
u8 byte12 /* byte12 */;
u8 byte13 /* byte13 */;
u8 byte14 /* byte14 */;
u8 byte15 /* byte15 */;
u8 byte16 /* byte16 */;
__le16 word11 /* word11 */;
__le32 reg10 /* reg10 */;
__le32 reg11 /* reg11 */;
__le32 reg12 /* reg12 */;
__le32 reg13 /* reg13 */;
__le32 reg14 /* reg14 */;
__le32 reg15 /* reg15 */;
__le32 reg16 /* reg16 */;
__le32 reg17 /* reg17 */;
__le32 reg18 /* reg18 */;
__le32 reg19 /* reg19 */;
__le16 word12 /* word12 */;
__le16 word13 /* word13 */;
__le16 word14 /* word14 */;
__le16 word15 /* word15 */;
};
struct tstorm_core_conn_ag_ctx {
u8 byte0 /* cdu_validation */;
u8 byte1 /* state */;
u8 flags0;
#define TSTORM_CORE_CONN_AG_CTX_BIT0_MASK 0x1 /* exist_in_qm0 */
#define TSTORM_CORE_CONN_AG_CTX_BIT0_SHIFT 0
#define TSTORM_CORE_CONN_AG_CTX_BIT1_MASK 0x1 /* exist_in_qm1 */
#define TSTORM_CORE_CONN_AG_CTX_BIT1_SHIFT 1
#define TSTORM_CORE_CONN_AG_CTX_BIT2_MASK 0x1 /* bit2 */
#define TSTORM_CORE_CONN_AG_CTX_BIT2_SHIFT 2
#define TSTORM_CORE_CONN_AG_CTX_BIT3_MASK 0x1 /* bit3 */
#define TSTORM_CORE_CONN_AG_CTX_BIT3_SHIFT 3
#define TSTORM_CORE_CONN_AG_CTX_BIT4_MASK 0x1 /* bit4 */
#define TSTORM_CORE_CONN_AG_CTX_BIT4_SHIFT 4
#define TSTORM_CORE_CONN_AG_CTX_BIT5_MASK 0x1 /* bit5 */
#define TSTORM_CORE_CONN_AG_CTX_BIT5_SHIFT 5
#define TSTORM_CORE_CONN_AG_CTX_CF0_MASK 0x3 /* timer0cf */
#define TSTORM_CORE_CONN_AG_CTX_CF0_SHIFT 6
u8 flags1;
#define TSTORM_CORE_CONN_AG_CTX_CF1_MASK 0x3 /* timer1cf */
#define TSTORM_CORE_CONN_AG_CTX_CF1_SHIFT 0
#define TSTORM_CORE_CONN_AG_CTX_CF2_MASK 0x3 /* timer2cf */
#define TSTORM_CORE_CONN_AG_CTX_CF2_SHIFT 2
#define TSTORM_CORE_CONN_AG_CTX_CF3_MASK 0x3 /* timer_stop_all */
#define TSTORM_CORE_CONN_AG_CTX_CF3_SHIFT 4
#define TSTORM_CORE_CONN_AG_CTX_CF4_MASK 0x3 /* cf4 */
#define TSTORM_CORE_CONN_AG_CTX_CF4_SHIFT 6
u8 flags2;
#define TSTORM_CORE_CONN_AG_CTX_CF5_MASK 0x3 /* cf5 */
#define TSTORM_CORE_CONN_AG_CTX_CF5_SHIFT 0
#define TSTORM_CORE_CONN_AG_CTX_CF6_MASK 0x3 /* cf6 */
#define TSTORM_CORE_CONN_AG_CTX_CF6_SHIFT 2
#define TSTORM_CORE_CONN_AG_CTX_CF7_MASK 0x3 /* cf7 */
#define TSTORM_CORE_CONN_AG_CTX_CF7_SHIFT 4
#define TSTORM_CORE_CONN_AG_CTX_CF8_MASK 0x3 /* cf8 */
#define TSTORM_CORE_CONN_AG_CTX_CF8_SHIFT 6
u8 flags3;
#define TSTORM_CORE_CONN_AG_CTX_CF9_MASK 0x3 /* cf9 */
#define TSTORM_CORE_CONN_AG_CTX_CF9_SHIFT 0
#define TSTORM_CORE_CONN_AG_CTX_CF10_MASK 0x3 /* cf10 */
#define TSTORM_CORE_CONN_AG_CTX_CF10_SHIFT 2
#define TSTORM_CORE_CONN_AG_CTX_CF0EN_MASK 0x1 /* cf0en */
#define TSTORM_CORE_CONN_AG_CTX_CF0EN_SHIFT 4
#define TSTORM_CORE_CONN_AG_CTX_CF1EN_MASK 0x1 /* cf1en */
#define TSTORM_CORE_CONN_AG_CTX_CF1EN_SHIFT 5
#define TSTORM_CORE_CONN_AG_CTX_CF2EN_MASK 0x1 /* cf2en */
#define TSTORM_CORE_CONN_AG_CTX_CF2EN_SHIFT 6
#define TSTORM_CORE_CONN_AG_CTX_CF3EN_MASK 0x1 /* cf3en */
#define TSTORM_CORE_CONN_AG_CTX_CF3EN_SHIFT 7
u8 flags4;
#define TSTORM_CORE_CONN_AG_CTX_CF4EN_MASK 0x1 /* cf4en */
#define TSTORM_CORE_CONN_AG_CTX_CF4EN_SHIFT 0
#define TSTORM_CORE_CONN_AG_CTX_CF5EN_MASK 0x1 /* cf5en */
#define TSTORM_CORE_CONN_AG_CTX_CF5EN_SHIFT 1
#define TSTORM_CORE_CONN_AG_CTX_CF6EN_MASK 0x1 /* cf6en */
#define TSTORM_CORE_CONN_AG_CTX_CF6EN_SHIFT 2
#define TSTORM_CORE_CONN_AG_CTX_CF7EN_MASK 0x1 /* cf7en */
#define TSTORM_CORE_CONN_AG_CTX_CF7EN_SHIFT 3
#define TSTORM_CORE_CONN_AG_CTX_CF8EN_MASK 0x1 /* cf8en */
#define TSTORM_CORE_CONN_AG_CTX_CF8EN_SHIFT 4
#define TSTORM_CORE_CONN_AG_CTX_CF9EN_MASK 0x1 /* cf9en */
#define TSTORM_CORE_CONN_AG_CTX_CF9EN_SHIFT 5
#define TSTORM_CORE_CONN_AG_CTX_CF10EN_MASK 0x1 /* cf10en */
#define TSTORM_CORE_CONN_AG_CTX_CF10EN_SHIFT 6
#define TSTORM_CORE_CONN_AG_CTX_RULE0EN_MASK 0x1 /* rule0en */
#define TSTORM_CORE_CONN_AG_CTX_RULE0EN_SHIFT 7
u8 flags5;
#define TSTORM_CORE_CONN_AG_CTX_RULE1EN_MASK 0x1 /* rule1en */
#define TSTORM_CORE_CONN_AG_CTX_RULE1EN_SHIFT 0
#define TSTORM_CORE_CONN_AG_CTX_RULE2EN_MASK 0x1 /* rule2en */
#define TSTORM_CORE_CONN_AG_CTX_RULE2EN_SHIFT 1
#define TSTORM_CORE_CONN_AG_CTX_RULE3EN_MASK 0x1 /* rule3en */
#define TSTORM_CORE_CONN_AG_CTX_RULE3EN_SHIFT 2
#define TSTORM_CORE_CONN_AG_CTX_RULE4EN_MASK 0x1 /* rule4en */
#define TSTORM_CORE_CONN_AG_CTX_RULE4EN_SHIFT 3
#define TSTORM_CORE_CONN_AG_CTX_RULE5EN_MASK 0x1 /* rule5en */
#define TSTORM_CORE_CONN_AG_CTX_RULE5EN_SHIFT 4
#define TSTORM_CORE_CONN_AG_CTX_RULE6EN_MASK 0x1 /* rule6en */
#define TSTORM_CORE_CONN_AG_CTX_RULE6EN_SHIFT 5
#define TSTORM_CORE_CONN_AG_CTX_RULE7EN_MASK 0x1 /* rule7en */
#define TSTORM_CORE_CONN_AG_CTX_RULE7EN_SHIFT 6
#define TSTORM_CORE_CONN_AG_CTX_RULE8EN_MASK 0x1 /* rule8en */
#define TSTORM_CORE_CONN_AG_CTX_RULE8EN_SHIFT 7
__le32 reg0 /* reg0 */;
__le32 reg1 /* reg1 */;
__le32 reg2 /* reg2 */;
__le32 reg3 /* reg3 */;
__le32 reg4 /* reg4 */;
__le32 reg5 /* reg5 */;
__le32 reg6 /* reg6 */;
__le32 reg7 /* reg7 */;
__le32 reg8 /* reg8 */;
u8 byte2 /* byte2 */;
u8 byte3 /* byte3 */;
__le16 word0 /* word0 */;
u8 byte4 /* byte4 */;
u8 byte5 /* byte5 */;
__le16 word1 /* word1 */;
__le16 word2 /* conn_dpi */;
__le16 word3 /* word3 */;
__le32 reg9 /* reg9 */;
__le32 reg10 /* reg10 */;
};
struct ustorm_core_conn_ag_ctx {
u8 reserved /* cdu_validation */;
u8 byte1 /* state */;
u8 flags0;
#define USTORM_CORE_CONN_AG_CTX_BIT0_MASK 0x1 /* exist_in_qm0 */
#define USTORM_CORE_CONN_AG_CTX_BIT0_SHIFT 0
#define USTORM_CORE_CONN_AG_CTX_BIT1_MASK 0x1 /* exist_in_qm1 */
#define USTORM_CORE_CONN_AG_CTX_BIT1_SHIFT 1
#define USTORM_CORE_CONN_AG_CTX_CF0_MASK 0x3 /* timer0cf */
#define USTORM_CORE_CONN_AG_CTX_CF0_SHIFT 2
#define USTORM_CORE_CONN_AG_CTX_CF1_MASK 0x3 /* timer1cf */
#define USTORM_CORE_CONN_AG_CTX_CF1_SHIFT 4
#define USTORM_CORE_CONN_AG_CTX_CF2_MASK 0x3 /* timer2cf */
#define USTORM_CORE_CONN_AG_CTX_CF2_SHIFT 6
u8 flags1;
#define USTORM_CORE_CONN_AG_CTX_CF3_MASK 0x3 /* timer_stop_all */
#define USTORM_CORE_CONN_AG_CTX_CF3_SHIFT 0
#define USTORM_CORE_CONN_AG_CTX_CF4_MASK 0x3 /* cf4 */
#define USTORM_CORE_CONN_AG_CTX_CF4_SHIFT 2
#define USTORM_CORE_CONN_AG_CTX_CF5_MASK 0x3 /* cf5 */
#define USTORM_CORE_CONN_AG_CTX_CF5_SHIFT 4
#define USTORM_CORE_CONN_AG_CTX_CF6_MASK 0x3 /* cf6 */
#define USTORM_CORE_CONN_AG_CTX_CF6_SHIFT 6
u8 flags2;
#define USTORM_CORE_CONN_AG_CTX_CF0EN_MASK 0x1 /* cf0en */
#define USTORM_CORE_CONN_AG_CTX_CF0EN_SHIFT 0
#define USTORM_CORE_CONN_AG_CTX_CF1EN_MASK 0x1 /* cf1en */
#define USTORM_CORE_CONN_AG_CTX_CF1EN_SHIFT 1
#define USTORM_CORE_CONN_AG_CTX_CF2EN_MASK 0x1 /* cf2en */
#define USTORM_CORE_CONN_AG_CTX_CF2EN_SHIFT 2
#define USTORM_CORE_CONN_AG_CTX_CF3EN_MASK 0x1 /* cf3en */
#define USTORM_CORE_CONN_AG_CTX_CF3EN_SHIFT 3
#define USTORM_CORE_CONN_AG_CTX_CF4EN_MASK 0x1 /* cf4en */
#define USTORM_CORE_CONN_AG_CTX_CF4EN_SHIFT 4
#define USTORM_CORE_CONN_AG_CTX_CF5EN_MASK 0x1 /* cf5en */
#define USTORM_CORE_CONN_AG_CTX_CF5EN_SHIFT 5
#define USTORM_CORE_CONN_AG_CTX_CF6EN_MASK 0x1 /* cf6en */
#define USTORM_CORE_CONN_AG_CTX_CF6EN_SHIFT 6
#define USTORM_CORE_CONN_AG_CTX_RULE0EN_MASK 0x1 /* rule0en */
#define USTORM_CORE_CONN_AG_CTX_RULE0EN_SHIFT 7
u8 flags3;
#define USTORM_CORE_CONN_AG_CTX_RULE1EN_MASK 0x1 /* rule1en */
#define USTORM_CORE_CONN_AG_CTX_RULE1EN_SHIFT 0
#define USTORM_CORE_CONN_AG_CTX_RULE2EN_MASK 0x1 /* rule2en */
#define USTORM_CORE_CONN_AG_CTX_RULE2EN_SHIFT 1
#define USTORM_CORE_CONN_AG_CTX_RULE3EN_MASK 0x1 /* rule3en */
#define USTORM_CORE_CONN_AG_CTX_RULE3EN_SHIFT 2
#define USTORM_CORE_CONN_AG_CTX_RULE4EN_MASK 0x1 /* rule4en */
#define USTORM_CORE_CONN_AG_CTX_RULE4EN_SHIFT 3
#define USTORM_CORE_CONN_AG_CTX_RULE5EN_MASK 0x1 /* rule5en */
#define USTORM_CORE_CONN_AG_CTX_RULE5EN_SHIFT 4
#define USTORM_CORE_CONN_AG_CTX_RULE6EN_MASK 0x1 /* rule6en */
#define USTORM_CORE_CONN_AG_CTX_RULE6EN_SHIFT 5
#define USTORM_CORE_CONN_AG_CTX_RULE7EN_MASK 0x1 /* rule7en */
#define USTORM_CORE_CONN_AG_CTX_RULE7EN_SHIFT 6
#define USTORM_CORE_CONN_AG_CTX_RULE8EN_MASK 0x1 /* rule8en */
#define USTORM_CORE_CONN_AG_CTX_RULE8EN_SHIFT 7
u8 byte2 /* byte2 */;
u8 byte3 /* byte3 */;
__le16 word0 /* conn_dpi */;
__le16 word1 /* word1 */;
__le32 rx_producers /* reg0 */;
__le32 reg1 /* reg1 */;
__le32 reg2 /* reg2 */;
__le32 reg3 /* reg3 */;
__le16 word2 /* word2 */;
__le16 word3 /* word3 */;
};
/*
* The core storm context for the Mstorm
*/
struct mstorm_core_conn_st_ctx {
__le32 reserved[24];
};
/*
* The core storm context for the Ustorm
*/
struct ustorm_core_conn_st_ctx {
__le32 reserved[4];
};
/*
* core connection context
*/
struct core_conn_context {
/* ystorm storm context */
struct ystorm_core_conn_st_ctx ystorm_st_context;
struct regpair ystorm_st_padding[2] /* padding */;
/* pstorm storm context */
struct pstorm_core_conn_st_ctx pstorm_st_context;
struct regpair pstorm_st_padding[2] /* padding */;
/* xstorm storm context */
struct xstorm_core_conn_st_ctx xstorm_st_context;
/* xstorm aggregative context */
struct xstorm_core_conn_ag_ctx xstorm_ag_context;
/* tstorm aggregative context */
struct tstorm_core_conn_ag_ctx tstorm_ag_context;
/* ustorm aggregative context */
struct ustorm_core_conn_ag_ctx ustorm_ag_context;
/* mstorm storm context */
struct mstorm_core_conn_st_ctx mstorm_st_context;
/* ustorm storm context */
struct ustorm_core_conn_st_ctx ustorm_st_context;
struct regpair ustorm_st_padding[2] /* padding */;
};
/*
* How ll2 should deal with packet upon errors
*/
enum core_error_handle {
LL2_DROP_PACKET /* If error occurs drop packet */,
LL2_DO_NOTHING /* If error occurs do nothing */,
LL2_ASSERT /* If error occurs assert */,
MAX_CORE_ERROR_HANDLE
};
/*
* opcodes for the event ring
*/
enum core_event_opcode {
CORE_EVENT_TX_QUEUE_START,
CORE_EVENT_TX_QUEUE_STOP,
CORE_EVENT_RX_QUEUE_START,
CORE_EVENT_RX_QUEUE_STOP,
CORE_EVENT_RX_QUEUE_FLUSH,
MAX_CORE_EVENT_OPCODE
};
/*
* The L4 pseudo checksum mode for Core
*/
enum core_l4_pseudo_checksum_mode {
/* Pseudo Checksum on packet is calculated with the correct packet length. */
CORE_L4_PSEUDO_CSUM_CORRECT_LENGTH,
/* Pseudo Checksum on packet is calculated with zero length. */
CORE_L4_PSEUDO_CSUM_ZERO_LENGTH,
MAX_CORE_L4_PSEUDO_CHECKSUM_MODE
};
/*
* Light-L2 RX Producers in Tstorm RAM
*/
struct core_ll2_port_stats {
struct regpair gsi_invalid_hdr;
struct regpair gsi_invalid_pkt_length;
struct regpair gsi_unsupported_pkt_typ;
struct regpair gsi_crcchksm_error;
};
/*
* Ethernet TX Per Queue Stats
*/
struct core_ll2_pstorm_per_queue_stat {
/* number of total bytes sent without errors */
struct regpair sent_ucast_bytes;
/* number of total bytes sent without errors */
struct regpair sent_mcast_bytes;
/* number of total bytes sent without errors */
struct regpair sent_bcast_bytes;
/* number of total packets sent without errors */
struct regpair sent_ucast_pkts;
/* number of total packets sent without errors */
struct regpair sent_mcast_pkts;
/* number of total packets sent without errors */
struct regpair sent_bcast_pkts;
};
/*
* Light-L2 RX Producers in Tstorm RAM
*/
struct core_ll2_rx_prod {
__le16 bd_prod /* BD Producer */;
__le16 cqe_prod /* CQE Producer */;
__le32 reserved;
};
struct core_ll2_tstorm_per_queue_stat {
/* Number of packets discarded because they are bigger than MTU */
struct regpair packet_too_big_discard;
/* Number of packets discarded due to lack of host buffers */
struct regpair no_buff_discard;
};
struct core_ll2_ustorm_per_queue_stat {
struct regpair rcv_ucast_bytes;
struct regpair rcv_mcast_bytes;
struct regpair rcv_bcast_bytes;
struct regpair rcv_ucast_pkts;
struct regpair rcv_mcast_pkts;
struct regpair rcv_bcast_pkts;
};
/*
* Core Ramrod Command IDs (light L2)
*/
enum core_ramrod_cmd_id {
CORE_RAMROD_UNUSED,
CORE_RAMROD_RX_QUEUE_START /* RX Queue Start Ramrod */,
CORE_RAMROD_TX_QUEUE_START /* TX Queue Start Ramrod */,
CORE_RAMROD_RX_QUEUE_STOP /* RX Queue Stop Ramrod */,
CORE_RAMROD_TX_QUEUE_STOP /* TX Queue Stop Ramrod */,
CORE_RAMROD_RX_QUEUE_FLUSH /* RX Flush queue Ramrod */,
MAX_CORE_RAMROD_CMD_ID
};
/*
* Core RX CQE Type for Light L2
*/
enum core_roce_flavor_type {
CORE_ROCE,
CORE_RROCE,
MAX_CORE_ROCE_FLAVOR_TYPE
};
/*
* Specifies how ll2 should deal with packets errors: packet_too_big and no_buff
*/
struct core_rx_action_on_error {
u8 error_type;
/* ll2 how to handle error packet_too_big (use enum core_error_handle) */
#define CORE_RX_ACTION_ON_ERROR_PACKET_TOO_BIG_MASK 0x3
#define CORE_RX_ACTION_ON_ERROR_PACKET_TOO_BIG_SHIFT 0
/* ll2 how to handle error with no_buff (use enum core_error_handle) */
#define CORE_RX_ACTION_ON_ERROR_NO_BUFF_MASK 0x3
#define CORE_RX_ACTION_ON_ERROR_NO_BUFF_SHIFT 2
#define CORE_RX_ACTION_ON_ERROR_RESERVED_MASK 0xF
#define CORE_RX_ACTION_ON_ERROR_RESERVED_SHIFT 4
};
/*
* Core RX BD for Light L2
*/
struct core_rx_bd {
struct regpair addr;
__le16 reserved[4];
};
/*
* Core RX CM offload BD for Light L2
*/
struct core_rx_bd_with_buff_len {
struct regpair addr;
__le16 buff_length;
__le16 reserved[3];
};
/*
* Core RX CM offload BD for Light L2
*/
union core_rx_bd_union {
struct core_rx_bd rx_bd /* Core Rx Bd static buffer size */;
/* Core Rx Bd with dynamic buffer length */
struct core_rx_bd_with_buff_len rx_bd_with_len;
};
/*
* Opaque Data for Light L2 RX CQE .
*/
struct core_rx_cqe_opaque_data {
__le32 data[2] /* Opaque CQE Data */;
};
/*
* Core RX CQE Type for Light L2
*/
enum core_rx_cqe_type {
CORE_RX_CQE_ILLIGAL_TYPE /* Bad RX Cqe type */,
CORE_RX_CQE_TYPE_REGULAR /* Regular Core RX CQE */,
CORE_RX_CQE_TYPE_GSI_OFFLOAD /* Fp Gsi offload RX CQE */,
CORE_RX_CQE_TYPE_SLOW_PATH /* Slow path Core RX CQE */,
MAX_CORE_RX_CQE_TYPE
};
/*
* Core RX CQE for Light L2 .
*/
struct core_rx_fast_path_cqe {
u8 type /* CQE type */;
/* Offset (in bytes) of the packet from start of the buffer */
u8 placement_offset;
/* Parsing and error flags from the parser */
struct parsing_and_err_flags parse_flags;
__le16 packet_length /* Total packet length (from the parser) */;
__le16 vlan /* 802.1q VLAN tag */;
struct core_rx_cqe_opaque_data opaque_data /* Opaque Data */;
__le32 reserved[4];
};
/*
* Core Rx CM offload CQE .
*/
struct core_rx_gsi_offload_cqe {
u8 type /* CQE type */;
u8 data_length_error /* set if gsi data is bigger than buff */;
/* Parsing and error flags from the parser */
struct parsing_and_err_flags parse_flags;
__le16 data_length /* Total packet length (from the parser) */;
__le16 vlan /* 802.1q VLAN tag */;
__le32 src_mac_addrhi /* hi 4 bytes source mac address */;
__le16 src_mac_addrlo /* lo 2 bytes of source mac address */;
u8 reserved1[2];
__le32 gid_dst[4] /* Gid destination address */;
};
/*
* Core RX CQE for Light L2 .
*/
struct core_rx_slow_path_cqe {
u8 type /* CQE type */;
u8 ramrod_cmd_id;
__le16 echo;
struct core_rx_cqe_opaque_data opaque_data /* Opaque Data */;
__le32 reserved1[5];
};
/*
* Core RX CM offload BD for Light L2
*/
union core_rx_cqe_union {
struct core_rx_fast_path_cqe rx_cqe_fp /* Fast path CQE */;
struct core_rx_gsi_offload_cqe rx_cqe_gsi /* GSI offload CQE */;
struct core_rx_slow_path_cqe rx_cqe_sp /* Slow path CQE */;
};
/*
* Ramrod data for rx queue start ramrod
*/
struct core_rx_start_ramrod_data {
struct regpair bd_base /* bd address of the first bd page */;
struct regpair cqe_pbl_addr /* Base address on host of CQE PBL */;
__le16 mtu /* Maximum transmission unit */;
__le16 sb_id /* Status block ID */;
u8 sb_index /* index of the protocol index */;
u8 complete_cqe_flg /* post completion to the CQE ring if set */;
u8 complete_event_flg /* post completion to the event ring if set */;
u8 drop_ttl0_flg /* drop packet with ttl0 if set */;
__le16 num_of_pbl_pages /* Num of pages in CQE PBL */;
/* if set, 802.1q tags will be removed and copied to CQE */
u8 inner_vlan_removal_en;
u8 queue_id /* Light L2 RX Queue ID */;
u8 main_func_queue /* Is this the main queue for the PF */;
/* Duplicate broadcast packets to LL2 main queue in mf_si mode. Valid if
* main_func_queue is set.
*/
u8 mf_si_bcast_accept_all;
/* Duplicate multicast packets to LL2 main queue in mf_si mode. Valid if
* main_func_queue is set.
*/
u8 mf_si_mcast_accept_all;
/* Specifies how ll2 should deal with packets errors: packet_too_big and
* no_buff
*/
struct core_rx_action_on_error action_on_error;
/* set when in GSI offload mode on ROCE connection */
u8 gsi_offload_flag;
u8 reserved[7];
};
/*
* Ramrod data for rx queue stop ramrod
*/
struct core_rx_stop_ramrod_data {
u8 complete_cqe_flg /* post completion to the CQE ring if set */;
u8 complete_event_flg /* post completion to the event ring if set */;
u8 queue_id /* Light L2 RX Queue ID */;
u8 reserved1;
__le16 reserved2[2];
};
/*
* Flags for Core TX BD
*/
struct core_tx_bd_data {
__le16 as_bitfield;
/* Do not allow additional VLAN manipulations on this packet (DCB) */
#define CORE_TX_BD_DATA_FORCE_VLAN_MODE_MASK 0x1
#define CORE_TX_BD_DATA_FORCE_VLAN_MODE_SHIFT 0
/* Insert VLAN into packet */
#define CORE_TX_BD_DATA_VLAN_INSERTION_MASK 0x1
#define CORE_TX_BD_DATA_VLAN_INSERTION_SHIFT 1
/* This is the first BD of the packet (for debug) */
#define CORE_TX_BD_DATA_START_BD_MASK 0x1
#define CORE_TX_BD_DATA_START_BD_SHIFT 2
/* Calculate the IP checksum for the packet */
#define CORE_TX_BD_DATA_IP_CSUM_MASK 0x1
#define CORE_TX_BD_DATA_IP_CSUM_SHIFT 3
/* Calculate the L4 checksum for the packet */
#define CORE_TX_BD_DATA_L4_CSUM_MASK 0x1
#define CORE_TX_BD_DATA_L4_CSUM_SHIFT 4
/* Packet is IPv6 with extensions */
#define CORE_TX_BD_DATA_IPV6_EXT_MASK 0x1
#define CORE_TX_BD_DATA_IPV6_EXT_SHIFT 5
/* If IPv6+ext, and if l4_csum is 1, than this field indicates L4 protocol:
* 0-TCP, 1-UDP
*/
#define CORE_TX_BD_DATA_L4_PROTOCOL_MASK 0x1
#define CORE_TX_BD_DATA_L4_PROTOCOL_SHIFT 6
/* The pseudo checksum mode to place in the L4 checksum field. Required only
* when IPv6+ext and l4_csum is set. (use enum core_l4_pseudo_checksum_mode)
*/
#define CORE_TX_BD_DATA_L4_PSEUDO_CSUM_MODE_MASK 0x1
#define CORE_TX_BD_DATA_L4_PSEUDO_CSUM_MODE_SHIFT 7
/* Number of BDs that make up one packet - width wide enough to present
* CORE_LL2_TX_MAX_BDS_PER_PACKET
*/
#define CORE_TX_BD_DATA_NBDS_MASK 0xF
#define CORE_TX_BD_DATA_NBDS_SHIFT 8
/* Use roce_flavor enum - Differentiate between Roce flavors is valid when
* connType is ROCE (use enum core_roce_flavor_type)
*/
#define CORE_TX_BD_DATA_ROCE_FLAV_MASK 0x1
#define CORE_TX_BD_DATA_ROCE_FLAV_SHIFT 12
/* Calculate ip length */
#define CORE_TX_BD_DATA_IP_LEN_MASK 0x1
#define CORE_TX_BD_DATA_IP_LEN_SHIFT 13
#define CORE_TX_BD_DATA_RESERVED0_MASK 0x3
#define CORE_TX_BD_DATA_RESERVED0_SHIFT 14
};
/*
* Core TX BD for Light L2
*/
struct core_tx_bd {
struct regpair addr /* Buffer Address */;
__le16 nbytes /* Number of Bytes in Buffer */;
/* Network packets: VLAN to insert to packet (if insertion flag set) LoopBack
* packets: echo data to pass to Rx
*/
__le16 nw_vlan_or_lb_echo;
struct core_tx_bd_data bd_data /* BD Flags */;
__le16 bitfield1;
/* L4 Header Offset from start of packet (in Words). This is needed if both
* l4_csum and ipv6_ext are set
*/
#define CORE_TX_BD_L4_HDR_OFFSET_W_MASK 0x3FFF
#define CORE_TX_BD_L4_HDR_OFFSET_W_SHIFT 0
/* Packet destination - Network, LB (use enum core_tx_dest) */
#define CORE_TX_BD_TX_DST_MASK 0x1
#define CORE_TX_BD_TX_DST_SHIFT 14
#define CORE_TX_BD_RESERVED_MASK 0x1
#define CORE_TX_BD_RESERVED_SHIFT 15
};
/*
* Light L2 TX Destination
*/
enum core_tx_dest {
CORE_TX_DEST_NW /* Light L2 TX Destination to the Network */,
CORE_TX_DEST_LB /* Light L2 TX Destination to the Loopback */,
MAX_CORE_TX_DEST
};
/*
* Ramrod data for tx queue start ramrod
*/
struct core_tx_start_ramrod_data {
struct regpair pbl_base_addr /* Address of the pbl page */;
__le16 mtu /* Maximum transmission unit */;
__le16 sb_id /* Status block ID */;
u8 sb_index /* Status block protocol index */;
u8 stats_en /* Statistics Enable */;
u8 stats_id /* Statistics Counter ID */;
u8 conn_type /* connection type that loaded ll2 */;
__le16 pbl_size /* Number of BD pages pointed by PBL */;
__le16 qm_pq_id /* QM PQ ID */;
/* set when in GSI offload mode on ROCE connection */
u8 gsi_offload_flag;
u8 resrved[3];
};
/*
* Ramrod data for tx queue stop ramrod
*/
struct core_tx_stop_ramrod_data {
__le32 reserved0[2];
};
/*
* Enum flag for what type of dcb data to update
*/
enum dcb_dhcp_update_flag {
/* use when no change should be done to dcb data */
DONT_UPDATE_DCB_DHCP,
UPDATE_DCB /* use to update only l2 (vlan) priority */,
UPDATE_DSCP /* use to update only l3 dhcp */,
UPDATE_DCB_DSCP /* update vlan pri and dhcp */,
MAX_DCB_DHCP_UPDATE_FLAG
};
struct eth_mstorm_per_pf_stat {
struct regpair gre_discard_pkts /* Dropped GRE RX packets */;
struct regpair vxlan_discard_pkts /* Dropped VXLAN RX packets */;
struct regpair geneve_discard_pkts /* Dropped GENEVE RX packets */;
struct regpair lb_discard_pkts /* Dropped Tx switched packets */;
};
struct eth_mstorm_per_queue_stat {
/* Number of packets discarded because TTL=0 (in IPv4) or hopLimit=0 (IPv6) */
struct regpair ttl0_discard;
/* Number of packets discarded because they are bigger than MTU */
struct regpair packet_too_big_discard;
/* Number of packets discarded due to lack of host buffers (BDs/SGEs/CQEs) */
struct regpair no_buff_discard;
/* Number of packets discarded because of no active Rx connection */
struct regpair not_active_discard;
/* number of coalesced packets in all TPA aggregations */
struct regpair tpa_coalesced_pkts;
/* total number of TPA aggregations */
struct regpair tpa_coalesced_events;
/* number of aggregations, which abnormally ended */
struct regpair tpa_aborts_num;
/* total TCP payload length in all TPA aggregations */
struct regpair tpa_coalesced_bytes;
};
/*
* Ethernet TX Per PF
*/
struct eth_pstorm_per_pf_stat {
/* number of total ucast bytes sent on loopback port without errors */
struct regpair sent_lb_ucast_bytes;
/* number of total mcast bytes sent on loopback port without errors */
struct regpair sent_lb_mcast_bytes;
/* number of total bcast bytes sent on loopback port without errors */
struct regpair sent_lb_bcast_bytes;
/* number of total ucast packets sent on loopback port without errors */
struct regpair sent_lb_ucast_pkts;
/* number of total mcast packets sent on loopback port without errors */
struct regpair sent_lb_mcast_pkts;
/* number of total bcast packets sent on loopback port without errors */
struct regpair sent_lb_bcast_pkts;
struct regpair sent_gre_bytes /* Sent GRE bytes */;
struct regpair sent_vxlan_bytes /* Sent VXLAN bytes */;
struct regpair sent_geneve_bytes /* Sent GENEVE bytes */;
struct regpair sent_gre_pkts /* Sent GRE packets */;
struct regpair sent_vxlan_pkts /* Sent VXLAN packets */;
struct regpair sent_geneve_pkts /* Sent GENEVE packets */;
struct regpair gre_drop_pkts /* Dropped GRE TX packets */;
struct regpair vxlan_drop_pkts /* Dropped VXLAN TX packets */;
struct regpair geneve_drop_pkts /* Dropped GENEVE TX packets */;
};
/*
* Ethernet TX Per Queue Stats
*/
struct eth_pstorm_per_queue_stat {
/* number of total bytes sent without errors */
struct regpair sent_ucast_bytes;
/* number of total bytes sent without errors */
struct regpair sent_mcast_bytes;
/* number of total bytes sent without errors */
struct regpair sent_bcast_bytes;
/* number of total packets sent without errors */
struct regpair sent_ucast_pkts;
/* number of total packets sent without errors */
struct regpair sent_mcast_pkts;
/* number of total packets sent without errors */
struct regpair sent_bcast_pkts;
/* number of total packets dropped due to errors */
struct regpair error_drop_pkts;
};
/*
* ETH Rx producers data
*/
struct eth_rx_rate_limit {
/* Rate Limit Multiplier - (Storm Clock (MHz) * 8 / Desired Bandwidth (MB/s)) */
__le16 mult;
/* Constant term to add (or subtract from number of cycles) */
__le16 cnst;
u8 add_sub_cnst /* Add (1) or subtract (0) constant term */;
u8 reserved0;
__le16 reserved1;
};
struct eth_ustorm_per_pf_stat {
/* number of total ucast bytes received on loopback port without errors */
struct regpair rcv_lb_ucast_bytes;
/* number of total mcast bytes received on loopback port without errors */
struct regpair rcv_lb_mcast_bytes;
/* number of total bcast bytes received on loopback port without errors */
struct regpair rcv_lb_bcast_bytes;
/* number of total ucast packets received on loopback port without errors */
struct regpair rcv_lb_ucast_pkts;
/* number of total mcast packets received on loopback port without errors */
struct regpair rcv_lb_mcast_pkts;
/* number of total bcast packets received on loopback port without errors */
struct regpair rcv_lb_bcast_pkts;
struct regpair rcv_gre_bytes /* Received GRE bytes */;
struct regpair rcv_vxlan_bytes /* Received VXLAN bytes */;
struct regpair rcv_geneve_bytes /* Received GENEVE bytes */;
struct regpair rcv_gre_pkts /* Received GRE packets */;
struct regpair rcv_vxlan_pkts /* Received VXLAN packets */;
struct regpair rcv_geneve_pkts /* Received GENEVE packets */;
};
struct eth_ustorm_per_queue_stat {
struct regpair rcv_ucast_bytes;
struct regpair rcv_mcast_bytes;
struct regpair rcv_bcast_bytes;
struct regpair rcv_ucast_pkts;
struct regpair rcv_mcast_pkts;
struct regpair rcv_bcast_pkts;
};
/*
* Event Ring Next Page Address
*/
struct event_ring_next_addr {
struct regpair addr /* Next Page Address */;
__le32 reserved[2] /* Reserved */;
};
/*
* Event Ring Element
*/
union event_ring_element {
struct event_ring_entry entry /* Event Ring Entry */;
/* Event Ring Next Page Address */
struct event_ring_next_addr next_addr;
};
/*
* Ports mode
*/
enum fw_flow_ctrl_mode {
flow_ctrl_pause,
flow_ctrl_pfc,
MAX_FW_FLOW_CTRL_MODE
};
/*
* Major and Minor hsi Versions
*/
struct hsi_fp_ver_struct {
u8 minor_ver_arr[2] /* Minor Version of hsi loading pf */;
u8 major_ver_arr[2] /* Major Version of driver loading pf */;
};
/*
* Integration Phase
*/
enum integ_phase {
INTEG_PHASE_BB_A0_LATEST = 3 /* BB A0 latest integration phase */,
INTEG_PHASE_BB_B0_NO_MCP = 10 /* BB B0 without MCP */,
INTEG_PHASE_BB_B0_WITH_MCP = 11 /* BB B0 with MCP */,
MAX_INTEG_PHASE
};
/*
* Ports mode
*/
enum iwarp_ll2_tx_queues {
/* LL2 queue for OOO packets sent in-order by the driver */
IWARP_LL2_IN_ORDER_TX_QUEUE = 1,
/* LL2 queue for unaligned packets sent aligned by the driver */
IWARP_LL2_ALIGNED_TX_QUEUE,
IWARP_LL2_ERROR /* Error indication */,
MAX_IWARP_LL2_TX_QUEUES
};
/*
* Malicious VF error ID
*/
enum malicious_vf_error_id {
MALICIOUS_VF_NO_ERROR /* Zero placeholder value */,
/* Writing to VF/PF channel when it is not ready */
VF_PF_CHANNEL_NOT_READY,
VF_ZONE_MSG_NOT_VALID /* VF channel message is not valid */,
VF_ZONE_FUNC_NOT_ENABLED /* Parent PF of VF channel is not active */,
/* TX packet is shorter then reported on BDs or from minimal size */
ETH_PACKET_TOO_SMALL,
/* Tx packet with marked as insert VLAN when its illegal */
ETH_ILLEGAL_VLAN_MODE,
ETH_MTU_VIOLATION /* TX packet is greater then MTU */,
/* TX packet has illegal inband tags marked */
ETH_ILLEGAL_INBAND_TAGS,
/* Vlan cant be added to inband tag */
ETH_VLAN_INSERT_AND_INBAND_VLAN,
/* indicated number of BDs for the packet is illegal */
ETH_ILLEGAL_NBDS,
ETH_FIRST_BD_WO_SOP /* 1st BD must have start_bd flag set */,
/* There are not enough BDs for transmission of even one packet */
ETH_INSUFFICIENT_BDS,
ETH_ILLEGAL_LSO_HDR_NBDS /* Header NBDs value is illegal */,
ETH_ILLEGAL_LSO_MSS /* LSO MSS value is more than allowed */,
/* empty BD (which not contains control flags) is illegal */
ETH_ZERO_SIZE_BD,
ETH_ILLEGAL_LSO_HDR_LEN /* LSO header size is above the limit */,
/* In LSO its expected that on the local BD ring there will be at least MSS
* bytes of data
*/
ETH_INSUFFICIENT_PAYLOAD,
ETH_EDPM_OUT_OF_SYNC /* Valid BDs on local ring after EDPM L2 sync */,
/* Tunneled packet with IPv6+Ext without a proper number of BDs */
ETH_TUNN_IPV6_EXT_NBD_ERR,
ETH_CONTROL_PACKET_VIOLATION /* VF sent control frame such as PFC */,
ETH_ANTI_SPOOFING_ERR /* Anti-Spoofing verification failure */,
MAX_MALICIOUS_VF_ERROR_ID
};
/*
* Mstorm non-triggering VF zone
*/
struct mstorm_non_trigger_vf_zone {
/* VF statistic bucket */
struct eth_mstorm_per_queue_stat eth_queue_stat;
/* VF RX queues producers */
struct eth_rx_prod_data
eth_rx_queue_producers[ETH_MAX_NUM_RX_QUEUES_PER_VF_QUAD];
};
/*
* Mstorm VF zone
*/
struct mstorm_vf_zone {
/* non-interrupt-triggering zone */
struct mstorm_non_trigger_vf_zone non_trigger;
};
/*
* personality per PF
*/
enum personality_type {
BAD_PERSONALITY_TYP,
PERSONALITY_ISCSI /* iSCSI and LL2 */,
PERSONALITY_FCOE /* Fcoe and LL2 */,
PERSONALITY_RDMA_AND_ETH /* Roce or Iwarp, Eth and LL2 */,
PERSONALITY_RDMA /* Roce and LL2 */,
PERSONALITY_CORE /* CORE(LL2) */,
PERSONALITY_ETH /* Ethernet */,
PERSONALITY_TOE /* Toe and LL2 */,
MAX_PERSONALITY_TYPE
};
/*
* tunnel configuration
*/
struct pf_start_tunnel_config {
/* Set VXLAN tunnel UDP destination port to vxlan_udp_port. If not set -
* FW will use a default port
*/
u8 set_vxlan_udp_port_flg;
/* Set GENEVE tunnel UDP destination port to geneve_udp_port. If not set -
* FW will use a default port
*/
u8 set_geneve_udp_port_flg;
u8 tx_enable_vxlan /* If set, enable VXLAN tunnel in TX path. */;
/* If set, enable l2 GENEVE tunnel in TX path. */
u8 tx_enable_l2geneve;
/* If set, enable IP GENEVE tunnel in TX path. */
u8 tx_enable_ipgeneve;
u8 tx_enable_l2gre /* If set, enable l2 GRE tunnel in TX path. */;
u8 tx_enable_ipgre /* If set, enable IP GRE tunnel in TX path. */;
u8 tunnel_clss_vxlan /* Classification scheme for VXLAN tunnel. */;
/* Classification scheme for l2 GENEVE tunnel. */
u8 tunnel_clss_l2geneve;
/* Classification scheme for ip GENEVE tunnel. */
u8 tunnel_clss_ipgeneve;
u8 tunnel_clss_l2gre /* Classification scheme for l2 GRE tunnel. */;
u8 tunnel_clss_ipgre /* Classification scheme for ip GRE tunnel. */;
/* VXLAN tunnel UDP destination port. Valid if set_vxlan_udp_port_flg=1 */
__le16 vxlan_udp_port;
/* GENEVE tunnel UDP destination port. Valid if set_geneve_udp_port_flg=1 */
__le16 geneve_udp_port;
};
/*
* Ramrod data for PF start ramrod
*/
struct pf_start_ramrod_data {
struct regpair event_ring_pbl_addr /* Address of event ring PBL */;
/* PBL address of consolidation queue */
struct regpair consolid_q_pbl_addr;
/* tunnel configuration. */
struct pf_start_tunnel_config tunnel_config;
__le16 event_ring_sb_id /* Status block ID */;
/* All VfIds owned by Pf will be from baseVfId till baseVfId+numVfs */
u8 base_vf_id;
u8 num_vfs /* Amount of vfs owned by PF */;
u8 event_ring_num_pages /* Number of PBL pages in event ring */;
u8 event_ring_sb_index /* Status block index */;
u8 path_id /* HW path ID (engine ID) */;
u8 warning_as_error /* In FW asserts, treat warning as error */;
/* If not set - throw a warning for each ramrod (for debug) */
u8 dont_log_ramrods;
u8 personality /* define what type of personality is new PF */;
/* Log type mask. Each bit set enables a corresponding event type logging.
* Event types are defined as ASSERT_LOG_TYPE_xxx
*/
__le16 log_type_mask;
u8 mf_mode /* Multi function mode */;
u8 integ_phase /* Integration phase */;
/* If set, inter-pf tx switching is allowed in Switch Independent func mode */
u8 allow_npar_tx_switching;
/* Map from inner to outer priority. Set pri_map_valid when init map */
u8 inner_to_outer_pri_map[8];
/* If inner_to_outer_pri_map is initialize then set pri_map_valid */
u8 pri_map_valid;
/* In case mf_mode is MF_OVLAN, this field specifies the outer vlan
* (lower 16 bits) and ethType to use (higher 16 bits)
*/
__le32 outer_tag;
/* FP HSI version to be used by FW */
struct hsi_fp_ver_struct hsi_fp_ver;
};
/*
* Data for port update ramrod
*/
struct protocol_dcb_data {
u8 dcb_enable_flag /* dcbEnable flag value */;
u8 dscp_enable_flag /* If set use dscp value */;
u8 dcb_priority /* dcbPri flag value */;
u8 dcb_tc /* dcb TC value */;
u8 dscp_val /* dscp value to write if dscp_enable_flag is set */;
u8 reserved0;
};
/*
* Update tunnel configuration
*/
struct pf_update_tunnel_config {
/* Update RX per PF tunnel classification scheme. */
u8 update_rx_pf_clss;
/* Update per PORT default tunnel RX classification scheme for traffic with
* unknown unicast outer MAC in NPAR mode.
*/
u8 update_rx_def_ucast_clss;
/* Update per PORT default tunnel RX classification scheme for traffic with non
* unicast outer MAC in NPAR mode.
*/
u8 update_rx_def_non_ucast_clss;
/* Update TX per PF tunnel classification scheme. used by pf update. */
u8 update_tx_pf_clss;
/* Update VXLAN tunnel UDP destination port. */
u8 set_vxlan_udp_port_flg;
/* Update GENEVE tunnel UDP destination port. */
u8 set_geneve_udp_port_flg;
u8 tx_enable_vxlan /* If set, enable VXLAN tunnel in TX path. */;
/* If set, enable l2 GENEVE tunnel in TX path. */
u8 tx_enable_l2geneve;
/* If set, enable IP GENEVE tunnel in TX path. */
u8 tx_enable_ipgeneve;
u8 tx_enable_l2gre /* If set, enable l2 GRE tunnel in TX path. */;
u8 tx_enable_ipgre /* If set, enable IP GRE tunnel in TX path. */;
u8 tunnel_clss_vxlan /* Classification scheme for VXLAN tunnel. */;
/* Classification scheme for l2 GENEVE tunnel. */
u8 tunnel_clss_l2geneve;
/* Classification scheme for ip GENEVE tunnel. */
u8 tunnel_clss_ipgeneve;
u8 tunnel_clss_l2gre /* Classification scheme for l2 GRE tunnel. */;
u8 tunnel_clss_ipgre /* Classification scheme for ip GRE tunnel. */;
__le16 vxlan_udp_port /* VXLAN tunnel UDP destination port. */;
__le16 geneve_udp_port /* GENEVE tunnel UDP destination port. */;
__le16 reserved[2];
};
/*
* Data for port update ramrod
*/
struct pf_update_ramrod_data {
u8 pf_id;
u8 update_eth_dcb_data_flag /* Update Eth DCB data indication */;
u8 update_fcoe_dcb_data_flag /* Update FCOE DCB data indication */;
u8 update_iscsi_dcb_data_flag /* Update iSCSI DCB data indication */;
u8 update_roce_dcb_data_flag /* Update ROCE DCB data indication */;
/* Update RROCE (RoceV2) DCB data indication */
u8 update_rroce_dcb_data_flag;
u8 update_iwarp_dcb_data_flag /* Update IWARP DCB data indication */;
u8 update_mf_vlan_flag /* Update MF outer vlan Id */;
struct protocol_dcb_data eth_dcb_data /* core eth related fields */;
struct protocol_dcb_data fcoe_dcb_data /* core fcoe related fields */;
/* core iscsi related fields */
struct protocol_dcb_data iscsi_dcb_data;
struct protocol_dcb_data roce_dcb_data /* core roce related fields */;
/* core roce related fields */
struct protocol_dcb_data rroce_dcb_data;
/* core iwarp related fields */
struct protocol_dcb_data iwarp_dcb_data;
__le16 mf_vlan /* new outer vlan id value */;
__le16 reserved;
/* tunnel configuration. */
struct pf_update_tunnel_config tunnel_config;
};
/*
* Ports mode
*/
enum ports_mode {
ENGX2_PORTX1 /* 2 engines x 1 port */,
ENGX2_PORTX2 /* 2 engines x 2 ports */,
ENGX1_PORTX1 /* 1 engine x 1 port */,
ENGX1_PORTX2 /* 1 engine x 2 ports */,
ENGX1_PORTX4 /* 1 engine x 4 ports */,
MAX_PORTS_MODE
};
/*
* use to index in hsi_fp_[major|minor]_ver_arr per protocol
*/
enum protocol_version_array_key {
ETH_VER_KEY = 0,
ROCE_VER_KEY,
MAX_PROTOCOL_VERSION_ARRAY_KEY
};
/*
* RDMA TX Stats
*/
struct rdma_sent_stats {
struct regpair sent_bytes /* number of total RDMA bytes sent */;
struct regpair sent_pkts /* number of total RDMA packets sent */;
};
/*
* Pstorm non-triggering VF zone
*/
struct pstorm_non_trigger_vf_zone {
/* VF statistic bucket */
struct eth_pstorm_per_queue_stat eth_queue_stat;
struct rdma_sent_stats rdma_stats /* RoCE sent statistics */;
};
/*
* Pstorm VF zone
*/
struct pstorm_vf_zone {
/* non-interrupt-triggering zone */
struct pstorm_non_trigger_vf_zone non_trigger;
struct regpair reserved[7] /* vf_zone size mus be power of 2 */;
};
/*
* Ramrod Header of SPQE
*/
struct ramrod_header {
__le32 cid /* Slowpath Connection CID */;
u8 cmd_id /* Ramrod Cmd (Per Protocol Type) */;
u8 protocol_id /* Ramrod Protocol ID */;
__le16 echo /* Ramrod echo */;
};
/*
* RDMA RX Stats
*/
struct rdma_rcv_stats {
struct regpair rcv_bytes /* number of total RDMA bytes received */;
struct regpair rcv_pkts /* number of total RDMA packets received */;
};
/*
* Data for update QCN/DCQCN RL ramrod
*/
struct rl_update_ramrod_data {
u8 qcn_update_param_flg /* Update QCN global params: timeout. */;
/* Update DCQCN global params: timeout, g, k. */
u8 dcqcn_update_param_flg;
u8 rl_init_flg /* Init RL parameters, when RL disabled. */;
u8 rl_start_flg /* Start RL in IDLE state. Set rate to maximum. */;
u8 rl_stop_flg /* Stop RL. */;
u8 rl_id_first /* ID of first or single RL, that will be updated. */;
/* ID of last RL, that will be updated. If clear, single RL will updated. */
u8 rl_id_last;
u8 rl_dc_qcn_flg /* If set, RL will used for DCQCN. */;
__le32 rl_bc_rate /* Byte Counter Limit. */;
__le16 rl_max_rate /* Maximum rate in 1.6 Mbps resolution. */;
__le16 rl_r_ai /* Active increase rate. */;
__le16 rl_r_hai /* Hyper active increase rate. */;
__le16 dcqcn_g /* DCQCN Alpha update gain in 1/64K resolution . */;
__le32 dcqcn_k_us /* DCQCN Alpha update interval. */;
__le32 dcqcn_timeuot_us /* DCQCN timeout. */;
__le32 qcn_timeuot_us /* QCN timeout. */;
__le32 reserved[2];
};
/*
* Slowpath Element (SPQE)
*/
struct slow_path_element {
struct ramrod_header hdr /* Ramrod Header */;
struct regpair data_ptr /* Pointer to the Ramrod Data on the Host */;
};
/*
* Tstorm non-triggering VF zone
*/
struct tstorm_non_trigger_vf_zone {
struct rdma_rcv_stats rdma_stats /* RoCE received statistics */;
};
struct tstorm_per_port_stat {
/* packet is dropped because it was truncated in NIG */
struct regpair trunc_error_discard;
/* packet is dropped because of Ethernet FCS error */
struct regpair mac_error_discard;
/* packet is dropped because classification was unsuccessful */
struct regpair mftag_filter_discard;
/* packet was passed to Ethernet and dropped because of no mac filter match */
struct regpair eth_mac_filter_discard;
/* packet passed to Light L2 and dropped because Light L2 is not configured for
* this PF
*/
struct regpair ll2_mac_filter_discard;
/* packet passed to Light L2 and dropped because Light L2 is not configured for
* this PF
*/
struct regpair ll2_conn_disabled_discard;
/* packet is an ISCSI irregular packet */
struct regpair iscsi_irregular_pkt;
/* packet is an FCOE irregular packet */
struct regpair fcoe_irregular_pkt;
/* packet is an ROCE irregular packet */
struct regpair roce_irregular_pkt;
/* packet is an ETH irregular packet */
struct regpair eth_irregular_pkt;
/* packet is an TOE irregular packet */
struct regpair toe_irregular_pkt;
/* packet is an PREROCE irregular packet */
struct regpair preroce_irregular_pkt;
struct regpair eth_gre_tunn_filter_discard /* GRE dropped packets */;
/* VXLAN dropped packets */
struct regpair eth_vxlan_tunn_filter_discard;
/* GENEVE dropped packets */
struct regpair eth_geneve_tunn_filter_discard;
};
/*
* Tstorm VF zone
*/
struct tstorm_vf_zone {
/* non-interrupt-triggering zone */
struct tstorm_non_trigger_vf_zone non_trigger;
};
/*
* Tunnel classification scheme
*/
enum tunnel_clss {
/* Use MAC and VLAN from first L2 header for vport classification. */
TUNNEL_CLSS_MAC_VLAN = 0,
/* Use MAC from first L2 header and VNI from tunnel header for vport
* classification
*/
TUNNEL_CLSS_MAC_VNI,
/* Use MAC and VLAN from last L2 header for vport classification */
TUNNEL_CLSS_INNER_MAC_VLAN,
/* Use MAC from last L2 header and VNI from tunnel header for vport
* classification
*/
TUNNEL_CLSS_INNER_MAC_VNI,
/* Use MAC and VLAN from last L2 header for vport classification. If no exact
* match, use MAC and VLAN from first L2 header for classification.
*/
TUNNEL_CLSS_MAC_VLAN_DUAL_STAGE,
MAX_TUNNEL_CLSS
};
/*
* Ustorm non-triggering VF zone
*/
struct ustorm_non_trigger_vf_zone {
/* VF statistic bucket */
struct eth_ustorm_per_queue_stat eth_queue_stat;
struct regpair vf_pf_msg_addr /* VF-PF message address */;
};
/*
* Ustorm triggering VF zone
*/
struct ustorm_trigger_vf_zone {
u8 vf_pf_msg_valid /* VF-PF message valid flag */;
u8 reserved[7];
};
/*
* Ustorm VF zone
*/
struct ustorm_vf_zone {
/* non-interrupt-triggering zone */
struct ustorm_non_trigger_vf_zone non_trigger;
struct ustorm_trigger_vf_zone trigger /* interrupt triggering zone */;
};
/*
* VF-PF channel data
*/
struct vf_pf_channel_data {
/* 0: VF-PF Channel NOT ready. Waiting for ack from PF driver. 1: VF-PF Channel
* is ready for a new transaction.
*/
__le32 ready;
/* 0: VF-PF Channel is invalid because of malicious VF. 1: VF-PF Channel is
* valid.
*/
u8 valid;
u8 reserved0;
__le16 reserved1;
};
/*
* Ramrod data for VF start ramrod
*/
struct vf_start_ramrod_data {
u8 vf_id /* VF ID */;
/* If set, initial cleanup ack will be sent to parent PF SP event queue */
u8 enable_flr_ack;
__le16 opaque_fid /* VF opaque FID */;
u8 personality /* define what type of personality is new VF */;
u8 reserved[7];
/* FP HSI version to be used by FW */
struct hsi_fp_ver_struct hsi_fp_ver;
};
/*
* Ramrod data for VF start ramrod
*/
struct vf_stop_ramrod_data {
u8 vf_id /* VF ID */;
u8 reserved0;
__le16 reserved1;
__le32 reserved2;
};
/*
* VF zone size mode.
*/
enum vf_zone_size_mode {
/* Default VF zone size. Up to 192 VF supported. */
VF_ZONE_SIZE_MODE_DEFAULT,
/* Doubled VF zone size. Up to 96 VF supported. */
VF_ZONE_SIZE_MODE_DOUBLE,
/* Quad VF zone size. Up to 48 VF supported. */
VF_ZONE_SIZE_MODE_QUAD,
MAX_VF_ZONE_SIZE_MODE
};
/*
* Attentions status block
*/
struct atten_status_block {
__le32 atten_bits;
__le32 atten_ack;
__le16 reserved0;
__le16 sb_index /* status block running index */;
__le32 reserved1;
};
/*
* Igu cleanup bit values to distinguish between clean or producer consumer
* update.
*/
enum command_type_bit {
IGU_COMMAND_TYPE_NOP = 0,
IGU_COMMAND_TYPE_SET = 1,
MAX_COMMAND_TYPE_BIT
};
/*
* DMAE command
*/
struct dmae_cmd {
__le32 opcode;
/* DMA Source. 0 - PCIe, 1 - GRC (use enum dmae_cmd_src_enum) */
#define DMAE_CMD_SRC_MASK 0x1
#define DMAE_CMD_SRC_SHIFT 0
/* DMA destination. 0 - None, 1 - PCIe, 2 - GRC, 3 - None
* (use enum dmae_cmd_dst_enum)
*/
#define DMAE_CMD_DST_MASK 0x3
#define DMAE_CMD_DST_SHIFT 1
/* Completion destination. 0 - PCie, 1 - GRC (use enum dmae_cmd_c_dst_enum) */
#define DMAE_CMD_C_DST_MASK 0x1
#define DMAE_CMD_C_DST_SHIFT 3
/* Reset the CRC result (do not use the previous result as the seed) */
#define DMAE_CMD_CRC_RESET_MASK 0x1
#define DMAE_CMD_CRC_RESET_SHIFT 4
/* Reset the source address in the next go to the same source address of the
* previous go
*/
#define DMAE_CMD_SRC_ADDR_RESET_MASK 0x1
#define DMAE_CMD_SRC_ADDR_RESET_SHIFT 5
/* Reset the destination address in the next go to the same destination address
* of the previous go
*/
#define DMAE_CMD_DST_ADDR_RESET_MASK 0x1
#define DMAE_CMD_DST_ADDR_RESET_SHIFT 6
/* 0 completion function is the same as src function, 1 - 0 completion
* function is the same as dst function (use enum dmae_cmd_comp_func_enum)
*/
#define DMAE_CMD_COMP_FUNC_MASK 0x1
#define DMAE_CMD_COMP_FUNC_SHIFT 7
/* 0 - Do not write a completion word, 1 - Write a completion word
* (use enum dmae_cmd_comp_word_en_enum)
*/
#define DMAE_CMD_COMP_WORD_EN_MASK 0x1
#define DMAE_CMD_COMP_WORD_EN_SHIFT 8
/* 0 - Do not write a CRC word, 1 - Write a CRC word
* (use enum dmae_cmd_comp_crc_en_enum)
*/
#define DMAE_CMD_COMP_CRC_EN_MASK 0x1
#define DMAE_CMD_COMP_CRC_EN_SHIFT 9
/* The CRC word should be taken from the DMAE address space from address 9+X,
* where X is the value in these bits.
*/
#define DMAE_CMD_COMP_CRC_OFFSET_MASK 0x7
#define DMAE_CMD_COMP_CRC_OFFSET_SHIFT 10
#define DMAE_CMD_RESERVED1_MASK 0x1
#define DMAE_CMD_RESERVED1_SHIFT 13
#define DMAE_CMD_ENDIANITY_MODE_MASK 0x3
#define DMAE_CMD_ENDIANITY_MODE_SHIFT 14
/* The field specifies how the completion word is affected by PCIe read error. 0
* Send a regular completion, 1 - Send a completion with an error indication,
* 2 do not send a completion (use enum dmae_cmd_error_handling_enum)
*/
#define DMAE_CMD_ERR_HANDLING_MASK 0x3
#define DMAE_CMD_ERR_HANDLING_SHIFT 16
/* The port ID to be placed on the RF FID field of the GRC bus. this field is
* used both when GRC is the destination and when it is the source of the DMAE
* transaction.
*/
#define DMAE_CMD_PORT_ID_MASK 0x3
#define DMAE_CMD_PORT_ID_SHIFT 18
/* Source PCI function number [3:0] */
#define DMAE_CMD_SRC_PF_ID_MASK 0xF
#define DMAE_CMD_SRC_PF_ID_SHIFT 20
/* Destination PCI function number [3:0] */
#define DMAE_CMD_DST_PF_ID_MASK 0xF
#define DMAE_CMD_DST_PF_ID_SHIFT 24
#define DMAE_CMD_SRC_VF_ID_VALID_MASK 0x1 /* Source VFID valid */
#define DMAE_CMD_SRC_VF_ID_VALID_SHIFT 28
#define DMAE_CMD_DST_VF_ID_VALID_MASK 0x1 /* Destination VFID valid */
#define DMAE_CMD_DST_VF_ID_VALID_SHIFT 29
#define DMAE_CMD_RESERVED2_MASK 0x3
#define DMAE_CMD_RESERVED2_SHIFT 30
/* PCIe source address low in bytes or GRC source address in DW */
__le32 src_addr_lo;
/* PCIe source address high in bytes or reserved (if source is GRC) */
__le32 src_addr_hi;
/* PCIe destination address low in bytes or GRC destination address in DW */
__le32 dst_addr_lo;
/* PCIe destination address high in bytes or reserved (if destination is GRC) */
__le32 dst_addr_hi;
__le16 length_dw /* Length in DW */;
__le16 opcode_b;
#define DMAE_CMD_SRC_VF_ID_MASK 0xFF /* Source VF id */
#define DMAE_CMD_SRC_VF_ID_SHIFT 0
#define DMAE_CMD_DST_VF_ID_MASK 0xFF /* Destination VF id */
#define DMAE_CMD_DST_VF_ID_SHIFT 8
__le32 comp_addr_lo /* PCIe completion address low or grc address */;
/* PCIe completion address high or reserved (if completion address is in GRC) */
__le32 comp_addr_hi;
__le32 comp_val /* Value to write to completion address */;
__le32 crc32 /* crc16 result */;
__le32 crc_32_c /* crc32_c result */;
__le16 crc16 /* crc16 result */;
__le16 crc16_c /* crc16_c result */;
__le16 crc10 /* crc_t10 result */;
__le16 reserved;
__le16 xsum16 /* checksum16 result */;
__le16 xsum8 /* checksum8 result */;
};
enum dmae_cmd_comp_crc_en_enum {
dmae_cmd_comp_crc_disabled /* Do not write a CRC word */,
dmae_cmd_comp_crc_enabled /* Write a CRC word */,
MAX_DMAE_CMD_COMP_CRC_EN_ENUM
};
enum dmae_cmd_comp_func_enum {
/* completion word and/or CRC will be sent to SRC-PCI function/SRC VFID */
dmae_cmd_comp_func_to_src,
/* completion word and/or CRC will be sent to DST-PCI function/DST VFID */
dmae_cmd_comp_func_to_dst,
MAX_DMAE_CMD_COMP_FUNC_ENUM
};
enum dmae_cmd_comp_word_en_enum {
dmae_cmd_comp_word_disabled /* Do not write a completion word */,
dmae_cmd_comp_word_enabled /* Write the completion word */,
MAX_DMAE_CMD_COMP_WORD_EN_ENUM
};
enum dmae_cmd_c_dst_enum {
dmae_cmd_c_dst_pcie,
dmae_cmd_c_dst_grc,
MAX_DMAE_CMD_C_DST_ENUM
};
enum dmae_cmd_dst_enum {
dmae_cmd_dst_none_0,
dmae_cmd_dst_pcie,
dmae_cmd_dst_grc,
dmae_cmd_dst_none_3,
MAX_DMAE_CMD_DST_ENUM
};
enum dmae_cmd_error_handling_enum {
/* Send a regular completion (with no error indication) */
dmae_cmd_error_handling_send_regular_comp,
/* Send a completion with an error indication (i.e. set bit 31 of the completion
* word)
*/
dmae_cmd_error_handling_send_comp_with_err,
dmae_cmd_error_handling_dont_send_comp /* Do not send a completion */,
MAX_DMAE_CMD_ERROR_HANDLING_ENUM
};
enum dmae_cmd_src_enum {
dmae_cmd_src_pcie /* The source is the PCIe */,
dmae_cmd_src_grc /* The source is the GRC */,
MAX_DMAE_CMD_SRC_ENUM
};
/*
* IGU cleanup command
*/
struct igu_cleanup {
__le32 sb_id_and_flags;
#define IGU_CLEANUP_RESERVED0_MASK 0x7FFFFFF
#define IGU_CLEANUP_RESERVED0_SHIFT 0
/* cleanup clear - 0, set - 1 */
#define IGU_CLEANUP_CLEANUP_SET_MASK 0x1
#define IGU_CLEANUP_CLEANUP_SET_SHIFT 27
#define IGU_CLEANUP_CLEANUP_TYPE_MASK 0x7
#define IGU_CLEANUP_CLEANUP_TYPE_SHIFT 28
/* must always be set (use enum command_type_bit) */
#define IGU_CLEANUP_COMMAND_TYPE_MASK 0x1
#define IGU_CLEANUP_COMMAND_TYPE_SHIFT 31
__le32 reserved1;
};
/*
* IGU firmware driver command
*/
union igu_command {
struct igu_prod_cons_update prod_cons_update;
struct igu_cleanup cleanup;
};
/*
* IGU firmware driver command
*/
struct igu_command_reg_ctrl {
__le16 opaque_fid;
__le16 igu_command_reg_ctrl_fields;
#define IGU_COMMAND_REG_CTRL_PXP_BAR_ADDR_MASK 0xFFF
#define IGU_COMMAND_REG_CTRL_PXP_BAR_ADDR_SHIFT 0
#define IGU_COMMAND_REG_CTRL_RESERVED_MASK 0x7
#define IGU_COMMAND_REG_CTRL_RESERVED_SHIFT 12
/* command typ: 0 - read, 1 - write */
#define IGU_COMMAND_REG_CTRL_COMMAND_TYPE_MASK 0x1
#define IGU_COMMAND_REG_CTRL_COMMAND_TYPE_SHIFT 15
};
/*
* IGU mapping line structure
*/
struct igu_mapping_line {
__le32 igu_mapping_line_fields;
#define IGU_MAPPING_LINE_VALID_MASK 0x1
#define IGU_MAPPING_LINE_VALID_SHIFT 0
#define IGU_MAPPING_LINE_VECTOR_NUMBER_MASK 0xFF
#define IGU_MAPPING_LINE_VECTOR_NUMBER_SHIFT 1
/* In BB: VF-0-120, PF-0-7; In K2: VF-0-191, PF-0-15 */
#define IGU_MAPPING_LINE_FUNCTION_NUMBER_MASK 0xFF
#define IGU_MAPPING_LINE_FUNCTION_NUMBER_SHIFT 9
#define IGU_MAPPING_LINE_PF_VALID_MASK 0x1 /* PF-1, VF-0 */
#define IGU_MAPPING_LINE_PF_VALID_SHIFT 17
#define IGU_MAPPING_LINE_IPS_GROUP_MASK 0x3F
#define IGU_MAPPING_LINE_IPS_GROUP_SHIFT 18
#define IGU_MAPPING_LINE_RESERVED_MASK 0xFF
#define IGU_MAPPING_LINE_RESERVED_SHIFT 24
};
/*
* IGU MSIX line structure
*/
struct igu_msix_vector {
struct regpair address;
__le32 data;
__le32 msix_vector_fields;
#define IGU_MSIX_VECTOR_MASK_BIT_MASK 0x1
#define IGU_MSIX_VECTOR_MASK_BIT_SHIFT 0
#define IGU_MSIX_VECTOR_RESERVED0_MASK 0x7FFF
#define IGU_MSIX_VECTOR_RESERVED0_SHIFT 1
#define IGU_MSIX_VECTOR_STEERING_TAG_MASK 0xFF
#define IGU_MSIX_VECTOR_STEERING_TAG_SHIFT 16
#define IGU_MSIX_VECTOR_RESERVED1_MASK 0xFF
#define IGU_MSIX_VECTOR_RESERVED1_SHIFT 24
};
struct mstorm_core_conn_ag_ctx {
u8 byte0 /* cdu_validation */;
u8 byte1 /* state */;
u8 flags0;
#define MSTORM_CORE_CONN_AG_CTX_BIT0_MASK 0x1 /* exist_in_qm0 */
#define MSTORM_CORE_CONN_AG_CTX_BIT0_SHIFT 0
#define MSTORM_CORE_CONN_AG_CTX_BIT1_MASK 0x1 /* exist_in_qm1 */
#define MSTORM_CORE_CONN_AG_CTX_BIT1_SHIFT 1
#define MSTORM_CORE_CONN_AG_CTX_CF0_MASK 0x3 /* cf0 */
#define MSTORM_CORE_CONN_AG_CTX_CF0_SHIFT 2
#define MSTORM_CORE_CONN_AG_CTX_CF1_MASK 0x3 /* cf1 */
#define MSTORM_CORE_CONN_AG_CTX_CF1_SHIFT 4
#define MSTORM_CORE_CONN_AG_CTX_CF2_MASK 0x3 /* cf2 */
#define MSTORM_CORE_CONN_AG_CTX_CF2_SHIFT 6
u8 flags1;
#define MSTORM_CORE_CONN_AG_CTX_CF0EN_MASK 0x1 /* cf0en */
#define MSTORM_CORE_CONN_AG_CTX_CF0EN_SHIFT 0
#define MSTORM_CORE_CONN_AG_CTX_CF1EN_MASK 0x1 /* cf1en */
#define MSTORM_CORE_CONN_AG_CTX_CF1EN_SHIFT 1
#define MSTORM_CORE_CONN_AG_CTX_CF2EN_MASK 0x1 /* cf2en */
#define MSTORM_CORE_CONN_AG_CTX_CF2EN_SHIFT 2
#define MSTORM_CORE_CONN_AG_CTX_RULE0EN_MASK 0x1 /* rule0en */
#define MSTORM_CORE_CONN_AG_CTX_RULE0EN_SHIFT 3
#define MSTORM_CORE_CONN_AG_CTX_RULE1EN_MASK 0x1 /* rule1en */
#define MSTORM_CORE_CONN_AG_CTX_RULE1EN_SHIFT 4
#define MSTORM_CORE_CONN_AG_CTX_RULE2EN_MASK 0x1 /* rule2en */
#define MSTORM_CORE_CONN_AG_CTX_RULE2EN_SHIFT 5
#define MSTORM_CORE_CONN_AG_CTX_RULE3EN_MASK 0x1 /* rule3en */
#define MSTORM_CORE_CONN_AG_CTX_RULE3EN_SHIFT 6
#define MSTORM_CORE_CONN_AG_CTX_RULE4EN_MASK 0x1 /* rule4en */
#define MSTORM_CORE_CONN_AG_CTX_RULE4EN_SHIFT 7
__le16 word0 /* word0 */;
__le16 word1 /* word1 */;
__le32 reg0 /* reg0 */;
__le32 reg1 /* reg1 */;
};
/*
* per encapsulation type enabling flags
*/
struct prs_reg_encapsulation_type_en {
u8 flags;
/* Enable bit for Ethernet-over-GRE (L2 GRE) encapsulation. */
#define PRS_REG_ENCAPSULATION_TYPE_EN_ETH_OVER_GRE_ENABLE_MASK 0x1
#define PRS_REG_ENCAPSULATION_TYPE_EN_ETH_OVER_GRE_ENABLE_SHIFT 0
/* Enable bit for IP-over-GRE (IP GRE) encapsulation. */
#define PRS_REG_ENCAPSULATION_TYPE_EN_IP_OVER_GRE_ENABLE_MASK 0x1
#define PRS_REG_ENCAPSULATION_TYPE_EN_IP_OVER_GRE_ENABLE_SHIFT 1
/* Enable bit for VXLAN encapsulation. */
#define PRS_REG_ENCAPSULATION_TYPE_EN_VXLAN_ENABLE_MASK 0x1
#define PRS_REG_ENCAPSULATION_TYPE_EN_VXLAN_ENABLE_SHIFT 2
/* Enable bit for T-Tag encapsulation. */
#define PRS_REG_ENCAPSULATION_TYPE_EN_T_TAG_ENABLE_MASK 0x1
#define PRS_REG_ENCAPSULATION_TYPE_EN_T_TAG_ENABLE_SHIFT 3
/* Enable bit for Ethernet-over-GENEVE (L2 GENEVE) encapsulation. */
#define PRS_REG_ENCAPSULATION_TYPE_EN_ETH_OVER_GENEVE_ENABLE_MASK 0x1
#define PRS_REG_ENCAPSULATION_TYPE_EN_ETH_OVER_GENEVE_ENABLE_SHIFT 4
/* Enable bit for IP-over-GENEVE (IP GENEVE) encapsulation. */
#define PRS_REG_ENCAPSULATION_TYPE_EN_IP_OVER_GENEVE_ENABLE_MASK 0x1
#define PRS_REG_ENCAPSULATION_TYPE_EN_IP_OVER_GENEVE_ENABLE_SHIFT 5
#define PRS_REG_ENCAPSULATION_TYPE_EN_RESERVED_MASK 0x3
#define PRS_REG_ENCAPSULATION_TYPE_EN_RESERVED_SHIFT 6
};
enum pxp_tph_st_hint {
TPH_ST_HINT_BIDIR /* Read/Write access by Host and Device */,
TPH_ST_HINT_REQUESTER /* Read/Write access by Device */,
/* Device Write and Host Read, or Host Write and Device Read */
TPH_ST_HINT_TARGET,
/* Device Write and Host Read, or Host Write and Device Read - with temporal
* reuse
*/
TPH_ST_HINT_TARGET_PRIO,
MAX_PXP_TPH_ST_HINT
};
/*
* QM hardware structure of enable bypass credit mask
*/
struct qm_rf_bypass_mask {
u8 flags;
#define QM_RF_BYPASS_MASK_LINEVOQ_MASK 0x1
#define QM_RF_BYPASS_MASK_LINEVOQ_SHIFT 0
#define QM_RF_BYPASS_MASK_RESERVED0_MASK 0x1
#define QM_RF_BYPASS_MASK_RESERVED0_SHIFT 1
#define QM_RF_BYPASS_MASK_PFWFQ_MASK 0x1
#define QM_RF_BYPASS_MASK_PFWFQ_SHIFT 2
#define QM_RF_BYPASS_MASK_VPWFQ_MASK 0x1
#define QM_RF_BYPASS_MASK_VPWFQ_SHIFT 3
#define QM_RF_BYPASS_MASK_PFRL_MASK 0x1
#define QM_RF_BYPASS_MASK_PFRL_SHIFT 4
#define QM_RF_BYPASS_MASK_VPQCNRL_MASK 0x1
#define QM_RF_BYPASS_MASK_VPQCNRL_SHIFT 5
#define QM_RF_BYPASS_MASK_FWPAUSE_MASK 0x1
#define QM_RF_BYPASS_MASK_FWPAUSE_SHIFT 6
#define QM_RF_BYPASS_MASK_RESERVED1_MASK 0x1
#define QM_RF_BYPASS_MASK_RESERVED1_SHIFT 7
};
/*
* QM hardware structure of opportunistic credit mask
*/
struct qm_rf_opportunistic_mask {
__le16 flags;
#define QM_RF_OPPORTUNISTIC_MASK_LINEVOQ_MASK 0x1
#define QM_RF_OPPORTUNISTIC_MASK_LINEVOQ_SHIFT 0
#define QM_RF_OPPORTUNISTIC_MASK_BYTEVOQ_MASK 0x1
#define QM_RF_OPPORTUNISTIC_MASK_BYTEVOQ_SHIFT 1
#define QM_RF_OPPORTUNISTIC_MASK_PFWFQ_MASK 0x1
#define QM_RF_OPPORTUNISTIC_MASK_PFWFQ_SHIFT 2
#define QM_RF_OPPORTUNISTIC_MASK_VPWFQ_MASK 0x1
#define QM_RF_OPPORTUNISTIC_MASK_VPWFQ_SHIFT 3
#define QM_RF_OPPORTUNISTIC_MASK_PFRL_MASK 0x1
#define QM_RF_OPPORTUNISTIC_MASK_PFRL_SHIFT 4
#define QM_RF_OPPORTUNISTIC_MASK_VPQCNRL_MASK 0x1
#define QM_RF_OPPORTUNISTIC_MASK_VPQCNRL_SHIFT 5
#define QM_RF_OPPORTUNISTIC_MASK_FWPAUSE_MASK 0x1
#define QM_RF_OPPORTUNISTIC_MASK_FWPAUSE_SHIFT 6
#define QM_RF_OPPORTUNISTIC_MASK_RESERVED0_MASK 0x1
#define QM_RF_OPPORTUNISTIC_MASK_RESERVED0_SHIFT 7
#define QM_RF_OPPORTUNISTIC_MASK_QUEUEEMPTY_MASK 0x1
#define QM_RF_OPPORTUNISTIC_MASK_QUEUEEMPTY_SHIFT 8
#define QM_RF_OPPORTUNISTIC_MASK_RESERVED1_MASK 0x7F
#define QM_RF_OPPORTUNISTIC_MASK_RESERVED1_SHIFT 9
};
/*
* QM hardware structure of QM map memory
*/
struct qm_rf_pq_map {
__le32 reg;
#define QM_RF_PQ_MAP_PQ_VALID_MASK 0x1 /* PQ active */
#define QM_RF_PQ_MAP_PQ_VALID_SHIFT 0
#define QM_RF_PQ_MAP_RL_ID_MASK 0xFF /* RL ID */
#define QM_RF_PQ_MAP_RL_ID_SHIFT 1
/* the first PQ associated with the VPORT and VOQ of this PQ */
#define QM_RF_PQ_MAP_VP_PQ_ID_MASK 0x1FF
#define QM_RF_PQ_MAP_VP_PQ_ID_SHIFT 9
#define QM_RF_PQ_MAP_VOQ_MASK 0x1F /* VOQ */
#define QM_RF_PQ_MAP_VOQ_SHIFT 18
#define QM_RF_PQ_MAP_WRR_WEIGHT_GROUP_MASK 0x3 /* WRR weight */
#define QM_RF_PQ_MAP_WRR_WEIGHT_GROUP_SHIFT 23
#define QM_RF_PQ_MAP_RL_VALID_MASK 0x1 /* RL active */
#define QM_RF_PQ_MAP_RL_VALID_SHIFT 25
#define QM_RF_PQ_MAP_RESERVED_MASK 0x3F
#define QM_RF_PQ_MAP_RESERVED_SHIFT 26
};
/*
* Completion params for aggregated interrupt completion
*/
struct sdm_agg_int_comp_params {
__le16 params;
/* the number of aggregated interrupt, 0-31 */
#define SDM_AGG_INT_COMP_PARAMS_AGG_INT_INDEX_MASK 0x3F
#define SDM_AGG_INT_COMP_PARAMS_AGG_INT_INDEX_SHIFT 0
/* 1 - set a bit in aggregated vector, 0 - dont set */
#define SDM_AGG_INT_COMP_PARAMS_AGG_VECTOR_ENABLE_MASK 0x1
#define SDM_AGG_INT_COMP_PARAMS_AGG_VECTOR_ENABLE_SHIFT 6
/* Number of bit in the aggregated vector, 0-279 (TBD) */
#define SDM_AGG_INT_COMP_PARAMS_AGG_VECTOR_BIT_MASK 0x1FF
#define SDM_AGG_INT_COMP_PARAMS_AGG_VECTOR_BIT_SHIFT 7
};
/*
* SDM operation gen command (generate aggregative interrupt)
*/
struct sdm_op_gen {
__le32 command;
/* completion parameters 0-15 */
#define SDM_OP_GEN_COMP_PARAM_MASK 0xFFFF
#define SDM_OP_GEN_COMP_PARAM_SHIFT 0
#define SDM_OP_GEN_COMP_TYPE_MASK 0xF /* completion type 16-19 */
#define SDM_OP_GEN_COMP_TYPE_SHIFT 16
#define SDM_OP_GEN_RESERVED_MASK 0xFFF /* reserved 20-31 */
#define SDM_OP_GEN_RESERVED_SHIFT 20
};
struct ystorm_core_conn_ag_ctx {
u8 byte0 /* cdu_validation */;
u8 byte1 /* state */;
u8 flags0;
#define YSTORM_CORE_CONN_AG_CTX_BIT0_MASK 0x1 /* exist_in_qm0 */
#define YSTORM_CORE_CONN_AG_CTX_BIT0_SHIFT 0
#define YSTORM_CORE_CONN_AG_CTX_BIT1_MASK 0x1 /* exist_in_qm1 */
#define YSTORM_CORE_CONN_AG_CTX_BIT1_SHIFT 1
#define YSTORM_CORE_CONN_AG_CTX_CF0_MASK 0x3 /* cf0 */
#define YSTORM_CORE_CONN_AG_CTX_CF0_SHIFT 2
#define YSTORM_CORE_CONN_AG_CTX_CF1_MASK 0x3 /* cf1 */
#define YSTORM_CORE_CONN_AG_CTX_CF1_SHIFT 4
#define YSTORM_CORE_CONN_AG_CTX_CF2_MASK 0x3 /* cf2 */
#define YSTORM_CORE_CONN_AG_CTX_CF2_SHIFT 6
u8 flags1;
#define YSTORM_CORE_CONN_AG_CTX_CF0EN_MASK 0x1 /* cf0en */
#define YSTORM_CORE_CONN_AG_CTX_CF0EN_SHIFT 0
#define YSTORM_CORE_CONN_AG_CTX_CF1EN_MASK 0x1 /* cf1en */
#define YSTORM_CORE_CONN_AG_CTX_CF1EN_SHIFT 1
#define YSTORM_CORE_CONN_AG_CTX_CF2EN_MASK 0x1 /* cf2en */
#define YSTORM_CORE_CONN_AG_CTX_CF2EN_SHIFT 2
#define YSTORM_CORE_CONN_AG_CTX_RULE0EN_MASK 0x1 /* rule0en */
#define YSTORM_CORE_CONN_AG_CTX_RULE0EN_SHIFT 3
#define YSTORM_CORE_CONN_AG_CTX_RULE1EN_MASK 0x1 /* rule1en */
#define YSTORM_CORE_CONN_AG_CTX_RULE1EN_SHIFT 4
#define YSTORM_CORE_CONN_AG_CTX_RULE2EN_MASK 0x1 /* rule2en */
#define YSTORM_CORE_CONN_AG_CTX_RULE2EN_SHIFT 5
#define YSTORM_CORE_CONN_AG_CTX_RULE3EN_MASK 0x1 /* rule3en */
#define YSTORM_CORE_CONN_AG_CTX_RULE3EN_SHIFT 6
#define YSTORM_CORE_CONN_AG_CTX_RULE4EN_MASK 0x1 /* rule4en */
#define YSTORM_CORE_CONN_AG_CTX_RULE4EN_SHIFT 7
u8 byte2 /* byte2 */;
u8 byte3 /* byte3 */;
__le16 word0 /* word0 */;
__le32 reg0 /* reg0 */;
__le32 reg1 /* reg1 */;
__le16 word1 /* word1 */;
__le16 word2 /* word2 */;
__le16 word3 /* word3 */;
__le16 word4 /* word4 */;
__le32 reg2 /* reg2 */;
__le32 reg3 /* reg3 */;
};
#endif /* __ECORE_HSI_COMMON__ */
|
vicharl/containerdns
|
kdns/dpdk-17.02/drivers/crypto/scheduler/rte_cryptodev_scheduler.h
|
<reponame>vicharl/containerdns
/*-
* BSD LICENSE
*
* Copyright(c) 2017 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef _RTE_CRYPTO_SCHEDULER_H
#define _RTE_CRYPTO_SCHEDULER_H
#include "rte_cryptodev_scheduler_operations.h"
#ifdef __cplusplus
extern "C" {
#endif
/**
* Crypto scheduler PMD operation modes
*/
enum rte_cryptodev_scheduler_mode {
CDEV_SCHED_MODE_NOT_SET = 0,
CDEV_SCHED_MODE_USERDEFINED,
CDEV_SCHED_MODE_ROUNDROBIN,
CDEV_SCHED_MODE_COUNT /* number of modes */
};
#define RTE_CRYPTODEV_SCHEDULER_NAME_MAX_LEN (64)
#define RTE_CRYPTODEV_SCHEDULER_DESC_MAX_LEN (256)
struct rte_cryptodev_scheduler;
/**
* Load a user defined scheduler
*
* @param scheduler_id The target scheduler device ID
* scheduler Pointer to the user defined scheduler
*
* @return
* 0 if loading successful, negative integer if otherwise.
*/
int
rte_cryptodev_scheduler_load_user_scheduler(uint8_t scheduler_id,
struct rte_cryptodev_scheduler *scheduler);
/**
* Attach a pre-configured crypto device to the scheduler
*
* @param scheduler_id The target scheduler device ID
* slave_id crypto device ID to be attached
*
* @return
* 0 if attaching successful, negative int if otherwise.
*/
int
rte_cryptodev_scheduler_slave_attach(uint8_t scheduler_id, uint8_t slave_id);
/**
* Detach a attached crypto device to the scheduler
*
* @param scheduler_id The target scheduler device ID
* slave_id crypto device ID to be detached
*
* @return
* 0 if detaching successful, negative int if otherwise.
*/
int
rte_cryptodev_scheduler_slave_detach(uint8_t scheduler_id, uint8_t slave_id);
/**
* Set the scheduling mode
*
* @param scheduler_id The target scheduler device ID
* mode The scheduling mode
*
* @return
* 0 if attaching successful, negative integer if otherwise.
*/
int
rte_crpytodev_scheduler_mode_set(uint8_t scheduler_id,
enum rte_cryptodev_scheduler_mode mode);
/**
* Get the current scheduling mode
*
* @param scheduler_id The target scheduler device ID
* mode Pointer to write the scheduling mode
*/
enum rte_cryptodev_scheduler_mode
rte_crpytodev_scheduler_mode_get(uint8_t scheduler_id);
/**
* Set the crypto ops reordering feature on/off
*
* @param dev_id The target scheduler device ID
* enable_reorder set the crypto op reordering feature
* 0: disable reordering
* 1: enable reordering
*
* @return
* 0 if setting successful, negative integer if otherwise.
*/
int
rte_cryptodev_scheduler_ordering_set(uint8_t scheduler_id,
uint32_t enable_reorder);
/**
* Get the current crypto ops reordering feature
*
* @param dev_id The target scheduler device ID
*
* @return
* 0 if reordering is disabled
* 1 if reordering is enabled
* negative integer if otherwise.
*/
int
rte_cryptodev_scheduler_ordering_get(uint8_t scheduler_id);
typedef uint16_t (*rte_cryptodev_scheduler_burst_enqueue_t)(void *qp_ctx,
struct rte_crypto_op **ops, uint16_t nb_ops);
typedef uint16_t (*rte_cryptodev_scheduler_burst_dequeue_t)(void *qp_ctx,
struct rte_crypto_op **ops, uint16_t nb_ops);
struct rte_cryptodev_scheduler {
const char *name;
const char *description;
enum rte_cryptodev_scheduler_mode mode;
struct rte_cryptodev_scheduler_ops *ops;
};
extern struct rte_cryptodev_scheduler *roundrobin_scheduler;
#ifdef __cplusplus
}
#endif
#endif /* _RTE_CRYPTO_SCHEDULER_H */
|
vicharl/containerdns
|
kdns/src/view_update.c
|
<filename>kdns/src/view_update.c
/*
* view_update.c
*/
#include <rte_ring.h>
#include <rte_rwlock.h>
#include <jansson.h>
#include "domain_store.h"
#include "view_update.h"
#include "kdns.h"
#include "ctrl_msg.h"
extern struct kdns dpdk_dns[MAX_CORES];
static view_tree_t *view_master_tree;
static rte_rwlock_t view_master_lock;
static int send_view_msg_to_master(struct view_info_update *msg) {
msg->cmsg.type = CTRL_MSG_TYPE_UPDATE_VIEW;
msg->cmsg.len = sizeof(struct view_info_update);
return ctrl_msg_master_ingress((void **)&msg, 1) == 1 ? 0 : -1;
}
static struct view_info_update *do_view_parse(enum view_action action, json_t *json_data) {
struct view_info_update *update = xalloc_zero(sizeof(struct view_info_update));
update->action = action;
const char *view_name;
/* get view name */
json_t *json_key = json_object_get(json_data, "viewName");
if (!json_key || !json_is_string(json_key)) {
log_msg(LOG_ERR, "viewName does not exist or is not string!");
goto _parse_err;
}
view_name = json_string_value(json_key);
snprintf(update->view_name, strlen(view_name) + 1, "%s", view_name);
/* get cidrs */
json_key = json_object_get(json_data, "cidrs");
if (!json_key || !json_is_string(json_key)) {
log_msg(LOG_ERR, "view cidrs does not exist or is not string!");
goto _parse_err;
}
view_name = json_string_value(json_key);
snprintf(update->cidrs, strlen(view_name) + 1, "%s", view_name);
return update;
_parse_err:
free(update);
return NULL;
}
static void *view_parse(enum view_action action, struct connection_info_struct *con_info, int *len_response) {
char *post_ok, *parse_err;
if (action == ACTION_ADD) {
log_msg(LOG_INFO, "add data = %s\n", (char *)con_info->uploaddata);
} else {
log_msg(LOG_INFO, "del data = %s\n", (char *)con_info->uploaddata);
}
/* parse json object */
json_error_t jerror;
json_t *json_response = json_loads(con_info->uploaddata, 0, &jerror);
if (!json_response) {
log_msg(LOG_ERR, "load json string failed: %s %s (line %d, col %d)\n",
jerror.text, jerror.source, jerror.line, jerror.column);
goto _parse_err;
}
if (!json_is_object(json_response)) {
log_msg(LOG_ERR, "load json string failed: not an object!\n");
goto _parse_err;
}
struct view_info_update *update = do_view_parse(action, json_response);
if (update == NULL) {
goto _parse_err;
}
send_view_msg_to_master(update);
json_decref(json_response);
post_ok = strdup("OK\n");
*len_response = strlen(post_ok);
return post_ok;
_parse_err:
if (json_response) {
json_decref(json_response);
}
parse_err = strdup("parse data err\n");
*len_response = strlen(parse_err);
return parse_err;
}
static void *view_parse_all(enum view_action action, struct connection_info_struct *con_info, int *len_response) {
char *post_ok, *parse_err;
if (action == ACTION_ADD) {
log_msg(LOG_INFO, "add all data = %s\n", (char *)con_info->uploaddata);
} else {
log_msg(LOG_INFO, "del all data = %s\n", (char *)con_info->uploaddata);
}
json_error_t jerror;
json_t *json_response = json_loads(con_info->uploaddata, 0, &jerror);
if (!json_response) {
log_msg(LOG_ERR, "load json string failed: %s %s (line %d, col %d)\n",
jerror.text, jerror.source, jerror.line, jerror.column);
goto _parse_err;
}
if (!json_is_array(json_response)) {
log_msg(LOG_ERR, "load json string failed: not an array!");
goto _parse_err;
}
size_t domains_count = json_array_size(json_response);
size_t i_num;
for (i_num = 0; i_num < domains_count; i_num++) {
int ret = 0;
int retry_num = 5;
struct view_info_update *update;
json_t *array_elem = json_array_get(json_response, i_num);
if (!json_is_object(array_elem)) {
log_msg(LOG_ERR, "load json string failed: not an object!\n");
json_decref(array_elem);
goto _parse_err;
}
_retry:
update = do_view_parse(action, array_elem);
if (update == NULL) {
json_decref(array_elem);
goto _parse_err;
}
ret = send_view_msg_to_master(update);
if ((ret != 1) && (retry_num > 0)) {
retry_num--;
usleep(200000); //200ms
goto _retry;
}
json_decref(array_elem);
}
json_decref(json_response);
post_ok = strdup("OK\n");
*len_response = strlen(post_ok);
return post_ok;
_parse_err:
if (json_response) {
json_decref(json_response);
}
parse_err = strdup("parse data err\n");
*len_response = strlen(parse_err);
return parse_err;
}
void *view_post(struct connection_info_struct *con_info, __attribute__((unused))char *url, int *len_response) {
return view_parse(ACTION_ADD, con_info, len_response);
}
void *view_del(struct connection_info_struct *con_info, __attribute__((unused))char *url, int *len_response) {
return view_parse(ACTION_DEL, con_info, len_response);
}
void *views_post_all(struct connection_info_struct *con_info, __attribute__((unused))char *url, int *len_response) {
return view_parse_all(ACTION_ADD, con_info, len_response);
}
void *views_delete_all(struct connection_info_struct *con_info, __attribute__((unused))char *url, int *len_response) {
return view_parse_all(ACTION_DEL, con_info, len_response);
}
static int do_view_msg_update(struct view_tree *tree, struct view_info_update *update) {
return view_operate(tree, update->cidrs, update->view_name, update->action);
}
static void do_view_info_get(void *arg1, view_value_t *data) {
json_t *array = (json_t *)arg1;
json_t *value = json_pack("{s:s, s:s}", "viewName", data->view_name, "cidrs", data->cidrs);
json_array_append_new(array, value);
}
void *view_get(__attribute__((unused)) struct connection_info_struct *con_info, char *url, int *len_response) {
(void)url;
log_msg(LOG_INFO, "view_get() in \n");
char *outErr = NULL;
json_t *array = json_array();
if (!array) {
log_msg(LOG_ERR, "unable to create array\n");
outErr = strdup("unable to create array");
goto err_out;
}
rte_rwlock_read_lock(&view_master_lock);
view_tree_dump(view_master_tree->root, (void *)array, do_view_info_get);
rte_rwlock_read_unlock(&view_master_lock);
char *str_ret = json_dumps(array, JSON_COMPACT);
json_decref(array);
*len_response = strlen(str_ret);
log_msg(LOG_INFO, "view_get() out \n");
return (void *)str_ret;
err_out:
*len_response = strlen(outErr);
log_msg(LOG_INFO, "domain_get() err out \n");
return (void *)outErr;
}
void view_query_slave_process(struct query *query, unsigned slave_lcore) {
view_value_t *data = view_find(dpdk_dns[slave_lcore].db->viewtree, (uint8_t *)&query->sip, 32);
if (data != VIEW_NO_NODE) {
snprintf(query->view_name, MAX_VIEW_NAME_LEN, "%s", data->view_name);
}
}
void view_query_master_process(struct query *query) {
rte_rwlock_read_lock(&view_master_lock);
view_value_t *data = view_find(view_master_tree, (uint8_t *)&query->sip, 32);
if (data != VIEW_NO_NODE) {
snprintf(query->view_name, MAX_VIEW_NAME_LEN, "%s", data->view_name);
}
rte_rwlock_read_unlock(&view_master_lock);
}
static int view_msg_slave_process(ctrl_msg *msg, unsigned slave_lcore) {
int ret = do_view_msg_update(dpdk_dns[slave_lcore].db->viewtree, (struct view_info_update *)msg);
free(msg);
return ret;
}
static int view_msg_master_process(ctrl_msg *msg) {
rte_rwlock_write_lock(&view_master_lock);
int ret = do_view_msg_update(view_master_tree, (struct view_info_update *)msg);
rte_rwlock_write_unlock(&view_master_lock);
free(msg);
return ret;
}
void view_master_init(void) {
ctrl_msg_reg(CTRL_MSG_TYPE_UPDATE_VIEW, CTRL_MSG_FLAG_MASTER_SYNC_SLAVE, view_msg_master_process, view_msg_slave_process);
rte_rwlock_init(&view_master_lock);
view_master_tree = view_tree_create();
}
|
vicharl/containerdns
|
kdns/dpdk-17.02/app/test-crypto-perf/cperf.h
|
<reponame>vicharl/containerdns<filename>kdns/dpdk-17.02/app/test-crypto-perf/cperf.h
/*-
* BSD LICENSE
*
* Copyright(c) 2016-2017 Intel Corporation. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef _CPERF_
#define _CPERF_
#include <rte_crypto.h>
#include "cperf_ops.h"
struct cperf_options;
struct cperf_test_vector;
struct cperf_op_fns;
typedef void *(*cperf_constructor_t)(uint8_t dev_id, uint16_t qp_id,
const struct cperf_options *options,
const struct cperf_test_vector *t_vec,
const struct cperf_op_fns *op_fns);
typedef int (*cperf_runner_t)(void *test_ctx);
typedef void (*cperf_destructor_t)(void *test_ctx);
struct cperf_test {
cperf_constructor_t constructor;
cperf_runner_t runner;
cperf_destructor_t destructor;
};
#endif /* _CPERF_ */
|
vicharl/containerdns
|
kdns/src/db_update.c
|
<reponame>vicharl/containerdns<gh_stars>100-1000
/*
* data_update.c
*/
#include <stdlib.h>
#include "db_update.h"
#include "util.h"
static rrset_type *do_domaindata_insert(struct domain_store *db, zone_type *zo, const domain_name_st *dname, rr_type *rr, uint32_t maxAnswer)
{
rrset_type *rrset;
// insert domain
//domain_name_st *dname = domain_name_make(db->domain, 1);
domain_type *owner = domain_table_insert(db->domains, dname, maxAnswer);
rr->owner = owner;
/* Do we have this type of rrset already? */
rrset = domain_find_rrset(rr->owner, zo, rr->type);
if (!rrset) {
rrset = (rrset_type *)xalloc_zero(sizeof(rrset_type));
rrset->zone = zo;
rrset->rr_count = 1;
rrset->rrs = (rr_type *)xalloc_zero(sizeof(rr_type));
rrset->rrs[0] = *rr;
/* Add it */
domain_add_rrset(rr->owner, rrset);
} else {
int i;
rr_type *o;
/* Search for possible duplicates... */
for (i = 0; i < rrset->rr_count; i++) {
if (!strcmp(rrset->rrs[i].view_name, rr->view_name)
&& (rrset->rrs[i].ttl != rr->ttl || rrset->rrs[i].lb_mode != rr->lb_mode)) {
log_msg(LOG_ERR, "ttl or lb_mode not match in same view\n");
return NULL;
}
/* Discard the duplicates... */
if (!zrdatacmp(rr->type, rr, &rrset->rrs[i]) && !strcmp(rrset->rrs[i].view_name, rr->view_name)) {
return NULL;
}
if (rr->type == TYPE_CNAME && !strcmp(rrset->rrs[i].view_name, rr->view_name)) {
log_msg(LOG_ERR, "multiple CNAMEs at the same name in same view\n");
return NULL;
}
}
if (rrset->rr_count == 65535) {
log_msg(LOG_ERR, "too many RRs for domain RRset\n");
return NULL;
}
/* Add it... */
o = rrset->rrs;
rrset->rrs = (rr_type *)xalloc_array_zero(rrset->rr_count + 1, sizeof(rr_type));
memcpy(rrset->rrs, o, (rrset->rr_count) * sizeof(rr_type));
free(o);
rrset->rrs[rrset->rr_count] = *rr;
++rrset->rr_count;
}
return rrset;
}
static int do_domaindata_delete(struct domain_store *db, zone_type *zo, const domain_name_st *dname, rr_type *rr)
{
rrset_type *rrset;
domain_type *domain = domain_table_find(db->domains, dname);
if (domain == NULL) {
log_msg(LOG_ERR, "domain not find: %s\n", domain_name_get(dname));
return -1;
}
/* Do we have this type of rrset already? */
rrset = domain_find_rrset(domain, zo, rr->type);
if (!rrset) {
log_msg(LOG_ERR, "rrset not find: %s\n", domain_name_get(dname));
return -1;
} else {
int rrnum;
/* Search for the val ... */
for (rrnum = 0; rrnum < rrset->rr_count; rrnum++) {
if (!zrdatacmp(rr->type, rr, &rrset->rrs[rrnum]) && !strcmp(rrset->rrs[rrnum].view_name, rr->view_name)) {
break;
}
}
// find
if (rrnum < rrset->rr_count) {
rr_lower_usage(db, &rrset->rrs[rrnum]);
if (rrset->rr_count == 1) {
rrset_delete(db, domain, rrset);
rrset_zero_nonexist_check(domain, NULL);
domain_table_deldomain(db, domain);
} else {
rr_type *rrs_orig = rrset->rrs;
add_rdata_to_recyclebin(&rrset->rrs[rrnum]);
if (rrnum < rrset->rr_count - 1) {
rrset->rrs[rrnum] = rrset->rrs[rrset->rr_count - 1];
}
memset(&rrset->rrs[rrset->rr_count - 1], 0, sizeof(rr_type));
/* realloc the rrs array one smaller */
rrset->rrs = xalloc_array_zero(rrset->rr_count - 1, sizeof(rr_type));
if (!rrset->rrs) {
log_msg(LOG_ERR, "out of memory, %s:%d\n", __FILE__, __LINE__);
exit(1);
}
memcpy(rrset->rrs, rrs_orig, (rrset->rr_count - 1) * sizeof(rr_type));
free(rrs_orig);
rrset->rr_count--;
}
}
}
return 0;
}
static void db_zadd_rdata_domain(rr_type *rr, domain_type *domain)
{
if (rr->rdata_count >= MAXRDATALEN) {
log_msg(LOG_ERR, "too many rdata elements\n");
} else {
rr->rdatas[rr->rdata_count++].domain = domain;
domain->usage++; /* new reference to domain */
}
}
static void db_zadd_rdata_wireformat(rr_type *rr, uint16_t *data)
{
if (rr->rdata_count >= MAXRDATALEN) {
log_msg(LOG_ERR, "too many rdata elements\n");
} else {
rr->rdatas[rr->rdata_count++].data = data;
}
}
int domaindata_soa_insert(struct domain_store *db, char *zone_name)
{
const domain_name_st *zname = domain_name_parse((const char *)zone_name);
if (zname == NULL) {
log_msg(LOG_ERR, "illegal zone name: %s\n", zone_name);
return -1;
}
zone_type *zo = domain_store_find_zone(db, zname);
if (zo == NULL) {
log_msg(LOG_ERR, "not find the zone, zone name: %s\n", zone_name);
free((void *)zname);
return -1;
}
char string[64] = {0};
snprintf(string, sizeof(string), "ns1.%s", zone_name);
const domain_name_st *ns1_name = domain_name_parse((const char *)string);
domain_type *ns1_own = domain_table_insert(db->domains, ns1_name, 0);
free((void *)ns1_name);
snprintf(string, sizeof(string), "mail.%s", zone_name);
const domain_name_st *mail_name = domain_name_parse((const char *)string);
domain_type *mail_own = domain_table_insert(db->domains, mail_name, 0);
free((void *)mail_name);
rr_type rr;
memset(&rr, 0, sizeof(rr));
rr.rdata_count = 0;
rr.klass = CLASS_IN;
rr.type = TYPE_SOA;
rr.rdatas = xalloc_array_zero(MAXRDATALEN, sizeof(rdata_atom_type));
db_zadd_rdata_domain(&rr, ns1_own); //ns
db_zadd_rdata_domain(&rr, mail_own); //mail
db_zadd_rdata_wireformat(&rr, zparser_conv_serial("2017070809")); //serial number
db_zadd_rdata_wireformat(&rr, zparser_conv_serial("3600")); //refresh
db_zadd_rdata_wireformat(&rr, zparser_conv_serial("900")); //retry
db_zadd_rdata_wireformat(&rr, zparser_conv_serial("1209600")); //expire
db_zadd_rdata_wireformat(&rr, zparser_conv_serial("1800")); // ttl
rrset_type *rrset = do_domaindata_insert(db, zo, zname, &rr, 0);
if (rrset == NULL) {
rr_lower_usage(db, &rr);
add_rdata_to_recyclebin(&rr);
free((void *)zname);
return -1;
}
domain_type *owner = domain_table_find(db->domains, zname);
if (owner) {
apex_rrset_checks(rrset, owner);
}
free((void *)zname);
return 0;
}
int domaindata_update(struct domain_store *db, struct domin_info_update *update)
{
if (update->type != TYPE_A && update->type != TYPE_AAAA && update->type != TYPE_PTR
&& update->type != TYPE_CNAME && update->type != TYPE_SRV) {
log_msg(LOG_ERR, "err type: %u\n", update->type);
return -1;
}
if (update->action != DOMAN_ACTION_ADD && update->action != DOMAN_ACTION_DEL) {
log_msg(LOG_ERR, "err action: %u\n", update->action);
return -1;
}
const domain_name_st *zname = domain_name_parse((const char *)update->zone_name);
if (zname == NULL) {
log_msg(LOG_ERR, "illegal zone name: %s\n", update->zone_name);
return -1;
}
zone_type *zo = domain_store_find_zone(db, zname);
if (zo == NULL) {
log_msg(LOG_ERR, "not find the zone, zone name: %s\n", update->zone_name);
free((void *)zname);
return -1;
}
free((void *)zname);
domain_type *hostOwner = NULL;
if (update->type == TYPE_PTR || update->type == TYPE_CNAME || update->type == TYPE_SRV) {
const domain_name_st *hostDomain = domain_name_parse((const char *)update->host);
if (hostDomain == NULL) {
log_msg(LOG_ERR, "illegal host domain: %s\n", update->host);
return -1;
}
hostOwner = domain_table_find(db->domains, hostDomain);
if (hostOwner == NULL && update->action == DOMAN_ACTION_ADD) {
hostOwner = domain_table_insert(db->domains, hostDomain, update->maxAnswer);
}
free((void *)hostDomain);
if (hostOwner == NULL) {
log_msg(LOG_ERR, "err: action %s but can not find domain: %s\n",
update->action == DOMAN_ACTION_ADD ? "add" : "del", update->host);
return -1;
}
}
const domain_name_st *dname = domain_name_parse((const char *)update->domain_name);
if (dname == NULL) {
log_msg(LOG_ERR, "illegal domain name: %s\n", update->domain_name);
return -1;
}
rr_type rr;
memset(&rr, 0, sizeof(rr));
rr.rdata_count = 0;
rr.klass = CLASS_IN;
rr.type = update->type;
rr.ttl = update->ttl;
rr.lb_mode = update->lb_mode;
rr.lb_weight = update->lb_weight;
rr.lb_weight_cur = update->lb_weight;
snprintf(rr.view_name, MAX_VIEW_NAME_LEN, "%s", update->view_name);
rr.rdatas = xalloc_array_zero(MAXRDATALEN, sizeof(rdata_atom_type));
if (update->type == TYPE_A) {
db_zadd_rdata_wireformat(&rr, zparser_conv_a(update->host));
} else if (update->type == TYPE_AAAA) {
db_zadd_rdata_wireformat(&rr, zparser_conv_aaaa(update->host));
} else if (update->type == TYPE_PTR) {
db_zadd_rdata_domain(&rr, hostOwner);
} else if (update->type == TYPE_CNAME) {
db_zadd_rdata_domain(&rr, hostOwner);
} else if (update->type == TYPE_SRV) {
char string[32];
sprintf(string, "%d", update->prio);
db_zadd_rdata_wireformat(&rr, zparser_conv_short(string)); // prio
sprintf(string, "%d", update->weight);
db_zadd_rdata_wireformat(&rr, zparser_conv_short(string)); // weight
sprintf(string, "%d", update->port);
db_zadd_rdata_wireformat(&rr, zparser_conv_short(string)); // port
db_zadd_rdata_domain(&rr, hostOwner);
}
if (update->action == DOMAN_ACTION_ADD) {
rrset_type *rrset = do_domaindata_insert(db, zo, dname, &rr, update->maxAnswer);
if (rrset == NULL) {
rr_lower_usage(db, &rr);
add_rdata_to_recyclebin(&rr);
free((void *)dname);
return -1;
}
free((void *)dname);
return 0;
} else {
int ret = do_domaindata_delete(db, zo, dname, &rr);
rr_lower_usage(db, &rr);
add_rdata_to_recyclebin(&rr);
free((void *)dname);
return ret;
}
}
|
vicharl/containerdns
|
kdns/dpdk-17.02/drivers/net/fm10k/fm10k_ethdev.c
|
<gh_stars>100-1000
/*-
* BSD LICENSE
*
* Copyright(c) 2013-2016 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <rte_ethdev.h>
#include <rte_malloc.h>
#include <rte_memzone.h>
#include <rte_string_fns.h>
#include <rte_dev.h>
#include <rte_spinlock.h>
#include <rte_kvargs.h>
#include "fm10k.h"
#include "base/fm10k_api.h"
/* Default delay to acquire mailbox lock */
#define FM10K_MBXLOCK_DELAY_US 20
#define UINT64_LOWER_32BITS_MASK 0x00000000ffffffffULL
#define MAIN_VSI_POOL_NUMBER 0
/* Max try times to acquire switch status */
#define MAX_QUERY_SWITCH_STATE_TIMES 10
/* Wait interval to get switch status */
#define WAIT_SWITCH_MSG_US 100000
/* A period of quiescence for switch */
#define FM10K_SWITCH_QUIESCE_US 10000
/* Number of chars per uint32 type */
#define CHARS_PER_UINT32 (sizeof(uint32_t))
#define BIT_MASK_PER_UINT32 ((1 << CHARS_PER_UINT32) - 1)
/* default 1:1 map from queue ID to interrupt vector ID */
#define Q2V(pci_dev, queue_id) ((pci_dev)->intr_handle.intr_vec[queue_id])
/* First 64 Logical ports for PF/VMDQ, second 64 for Flow director */
#define MAX_LPORT_NUM 128
#define GLORT_FD_Q_BASE 0x40
#define GLORT_PF_MASK 0xFFC0
#define GLORT_FD_MASK GLORT_PF_MASK
#define GLORT_FD_INDEX GLORT_FD_Q_BASE
static void fm10k_close_mbx_service(struct fm10k_hw *hw);
static void fm10k_dev_promiscuous_enable(struct rte_eth_dev *dev);
static void fm10k_dev_promiscuous_disable(struct rte_eth_dev *dev);
static void fm10k_dev_allmulticast_enable(struct rte_eth_dev *dev);
static void fm10k_dev_allmulticast_disable(struct rte_eth_dev *dev);
static inline int fm10k_glort_valid(struct fm10k_hw *hw);
static int
fm10k_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on);
static void fm10k_MAC_filter_set(struct rte_eth_dev *dev,
const u8 *mac, bool add, uint32_t pool);
static void fm10k_tx_queue_release(void *queue);
static void fm10k_rx_queue_release(void *queue);
static void fm10k_set_rx_function(struct rte_eth_dev *dev);
static void fm10k_set_tx_function(struct rte_eth_dev *dev);
static int fm10k_check_ftag(struct rte_devargs *devargs);
struct fm10k_xstats_name_off {
char name[RTE_ETH_XSTATS_NAME_SIZE];
unsigned offset;
};
struct fm10k_xstats_name_off fm10k_hw_stats_strings[] = {
{"completion_timeout_count", offsetof(struct fm10k_hw_stats, timeout)},
{"unsupported_requests_count", offsetof(struct fm10k_hw_stats, ur)},
{"completer_abort_count", offsetof(struct fm10k_hw_stats, ca)},
{"unsupported_message_count", offsetof(struct fm10k_hw_stats, um)},
{"checksum_error_count", offsetof(struct fm10k_hw_stats, xec)},
{"vlan_dropped", offsetof(struct fm10k_hw_stats, vlan_drop)},
{"loopback_dropped", offsetof(struct fm10k_hw_stats, loopback_drop)},
{"rx_mbuf_allocation_errors", offsetof(struct fm10k_hw_stats,
nodesc_drop)},
};
#define FM10K_NB_HW_XSTATS (sizeof(fm10k_hw_stats_strings) / \
sizeof(fm10k_hw_stats_strings[0]))
struct fm10k_xstats_name_off fm10k_hw_stats_rx_q_strings[] = {
{"packets", offsetof(struct fm10k_hw_stats_q, rx_packets)},
{"bytes", offsetof(struct fm10k_hw_stats_q, rx_bytes)},
{"dropped", offsetof(struct fm10k_hw_stats_q, rx_drops)},
};
#define FM10K_NB_RX_Q_XSTATS (sizeof(fm10k_hw_stats_rx_q_strings) / \
sizeof(fm10k_hw_stats_rx_q_strings[0]))
struct fm10k_xstats_name_off fm10k_hw_stats_tx_q_strings[] = {
{"packets", offsetof(struct fm10k_hw_stats_q, tx_packets)},
{"bytes", offsetof(struct fm10k_hw_stats_q, tx_bytes)},
};
#define FM10K_NB_TX_Q_XSTATS (sizeof(fm10k_hw_stats_tx_q_strings) / \
sizeof(fm10k_hw_stats_tx_q_strings[0]))
#define FM10K_NB_XSTATS (FM10K_NB_HW_XSTATS + FM10K_MAX_QUEUES_PF * \
(FM10K_NB_RX_Q_XSTATS + FM10K_NB_TX_Q_XSTATS))
static int
fm10k_dev_rxq_interrupt_setup(struct rte_eth_dev *dev);
static void
fm10k_mbx_initlock(struct fm10k_hw *hw)
{
rte_spinlock_init(FM10K_DEV_PRIVATE_TO_MBXLOCK(hw->back));
}
static void
fm10k_mbx_lock(struct fm10k_hw *hw)
{
while (!rte_spinlock_trylock(FM10K_DEV_PRIVATE_TO_MBXLOCK(hw->back)))
rte_delay_us(FM10K_MBXLOCK_DELAY_US);
}
static void
fm10k_mbx_unlock(struct fm10k_hw *hw)
{
rte_spinlock_unlock(FM10K_DEV_PRIVATE_TO_MBXLOCK(hw->back));
}
/* Stubs needed for linkage when vPMD is disabled */
int __attribute__((weak))
fm10k_rx_vec_condition_check(__rte_unused struct rte_eth_dev *dev)
{
return -1;
}
uint16_t __attribute__((weak))
fm10k_recv_pkts_vec(
__rte_unused void *rx_queue,
__rte_unused struct rte_mbuf **rx_pkts,
__rte_unused uint16_t nb_pkts)
{
return 0;
}
uint16_t __attribute__((weak))
fm10k_recv_scattered_pkts_vec(
__rte_unused void *rx_queue,
__rte_unused struct rte_mbuf **rx_pkts,
__rte_unused uint16_t nb_pkts)
{
return 0;
}
int __attribute__((weak))
fm10k_rxq_vec_setup(__rte_unused struct fm10k_rx_queue *rxq)
{
return -1;
}
void __attribute__((weak))
fm10k_rx_queue_release_mbufs_vec(
__rte_unused struct fm10k_rx_queue *rxq)
{
return;
}
void __attribute__((weak))
fm10k_txq_vec_setup(__rte_unused struct fm10k_tx_queue *txq)
{
return;
}
int __attribute__((weak))
fm10k_tx_vec_condition_check(__rte_unused struct fm10k_tx_queue *txq)
{
return -1;
}
uint16_t __attribute__((weak))
fm10k_xmit_pkts_vec(__rte_unused void *tx_queue,
__rte_unused struct rte_mbuf **tx_pkts,
__rte_unused uint16_t nb_pkts)
{
return 0;
}
/*
* reset queue to initial state, allocate software buffers used when starting
* device.
* return 0 on success
* return -ENOMEM if buffers cannot be allocated
* return -EINVAL if buffers do not satisfy alignment condition
*/
static inline int
rx_queue_reset(struct fm10k_rx_queue *q)
{
static const union fm10k_rx_desc zero = {{0} };
uint64_t dma_addr;
int i, diag;
PMD_INIT_FUNC_TRACE();
diag = rte_mempool_get_bulk(q->mp, (void **)q->sw_ring, q->nb_desc);
if (diag != 0)
return -ENOMEM;
for (i = 0; i < q->nb_desc; ++i) {
fm10k_pktmbuf_reset(q->sw_ring[i], q->port_id);
if (!fm10k_addr_alignment_valid(q->sw_ring[i])) {
rte_mempool_put_bulk(q->mp, (void **)q->sw_ring,
q->nb_desc);
return -EINVAL;
}
dma_addr = MBUF_DMA_ADDR_DEFAULT(q->sw_ring[i]);
q->hw_ring[i].q.pkt_addr = dma_addr;
q->hw_ring[i].q.hdr_addr = dma_addr;
}
/* initialize extra software ring entries. Space for these extra
* entries is always allocated.
*/
memset(&q->fake_mbuf, 0x0, sizeof(q->fake_mbuf));
for (i = 0; i < q->nb_fake_desc; ++i) {
q->sw_ring[q->nb_desc + i] = &q->fake_mbuf;
q->hw_ring[q->nb_desc + i] = zero;
}
q->next_dd = 0;
q->next_alloc = 0;
q->next_trigger = q->alloc_thresh - 1;
FM10K_PCI_REG_WRITE(q->tail_ptr, q->nb_desc - 1);
q->rxrearm_start = 0;
q->rxrearm_nb = 0;
return 0;
}
/*
* clean queue, descriptor rings, free software buffers used when stopping
* device.
*/
static inline void
rx_queue_clean(struct fm10k_rx_queue *q)
{
union fm10k_rx_desc zero = {.q = {0, 0, 0, 0} };
uint32_t i;
PMD_INIT_FUNC_TRACE();
/* zero descriptor rings */
for (i = 0; i < q->nb_desc; ++i)
q->hw_ring[i] = zero;
/* zero faked descriptors */
for (i = 0; i < q->nb_fake_desc; ++i)
q->hw_ring[q->nb_desc + i] = zero;
/* vPMD driver has a different way of releasing mbufs. */
if (q->rx_using_sse) {
fm10k_rx_queue_release_mbufs_vec(q);
return;
}
/* free software buffers */
for (i = 0; i < q->nb_desc; ++i) {
if (q->sw_ring[i]) {
rte_pktmbuf_free_seg(q->sw_ring[i]);
q->sw_ring[i] = NULL;
}
}
}
/*
* free all queue memory used when releasing the queue (i.e. configure)
*/
static inline void
rx_queue_free(struct fm10k_rx_queue *q)
{
PMD_INIT_FUNC_TRACE();
if (q) {
PMD_INIT_LOG(DEBUG, "Freeing rx queue %p", q);
rx_queue_clean(q);
if (q->sw_ring) {
rte_free(q->sw_ring);
q->sw_ring = NULL;
}
rte_free(q);
q = NULL;
}
}
/*
* disable RX queue, wait unitl HW finished necessary flush operation
*/
static inline int
rx_queue_disable(struct fm10k_hw *hw, uint16_t qnum)
{
uint32_t reg, i;
reg = FM10K_READ_REG(hw, FM10K_RXQCTL(qnum));
FM10K_WRITE_REG(hw, FM10K_RXQCTL(qnum),
reg & ~FM10K_RXQCTL_ENABLE);
/* Wait 100us at most */
for (i = 0; i < FM10K_QUEUE_DISABLE_TIMEOUT; i++) {
rte_delay_us(1);
reg = FM10K_READ_REG(hw, FM10K_RXQCTL(qnum));
if (!(reg & FM10K_RXQCTL_ENABLE))
break;
}
if (i == FM10K_QUEUE_DISABLE_TIMEOUT)
return -1;
return 0;
}
/*
* reset queue to initial state, allocate software buffers used when starting
* device
*/
static inline void
tx_queue_reset(struct fm10k_tx_queue *q)
{
PMD_INIT_FUNC_TRACE();
q->last_free = 0;
q->next_free = 0;
q->nb_used = 0;
q->nb_free = q->nb_desc - 1;
fifo_reset(&q->rs_tracker, (q->nb_desc + 1) / q->rs_thresh);
FM10K_PCI_REG_WRITE(q->tail_ptr, 0);
}
/*
* clean queue, descriptor rings, free software buffers used when stopping
* device
*/
static inline void
tx_queue_clean(struct fm10k_tx_queue *q)
{
struct fm10k_tx_desc zero = {0, 0, 0, 0, 0, 0};
uint32_t i;
PMD_INIT_FUNC_TRACE();
/* zero descriptor rings */
for (i = 0; i < q->nb_desc; ++i)
q->hw_ring[i] = zero;
/* free software buffers */
for (i = 0; i < q->nb_desc; ++i) {
if (q->sw_ring[i]) {
rte_pktmbuf_free_seg(q->sw_ring[i]);
q->sw_ring[i] = NULL;
}
}
}
/*
* free all queue memory used when releasing the queue (i.e. configure)
*/
static inline void
tx_queue_free(struct fm10k_tx_queue *q)
{
PMD_INIT_FUNC_TRACE();
if (q) {
PMD_INIT_LOG(DEBUG, "Freeing tx queue %p", q);
tx_queue_clean(q);
if (q->rs_tracker.list) {
rte_free(q->rs_tracker.list);
q->rs_tracker.list = NULL;
}
if (q->sw_ring) {
rte_free(q->sw_ring);
q->sw_ring = NULL;
}
rte_free(q);
q = NULL;
}
}
/*
* disable TX queue, wait unitl HW finished necessary flush operation
*/
static inline int
tx_queue_disable(struct fm10k_hw *hw, uint16_t qnum)
{
uint32_t reg, i;
reg = FM10K_READ_REG(hw, FM10K_TXDCTL(qnum));
FM10K_WRITE_REG(hw, FM10K_TXDCTL(qnum),
reg & ~FM10K_TXDCTL_ENABLE);
/* Wait 100us at most */
for (i = 0; i < FM10K_QUEUE_DISABLE_TIMEOUT; i++) {
rte_delay_us(1);
reg = FM10K_READ_REG(hw, FM10K_TXDCTL(qnum));
if (!(reg & FM10K_TXDCTL_ENABLE))
break;
}
if (i == FM10K_QUEUE_DISABLE_TIMEOUT)
return -1;
return 0;
}
static int
fm10k_check_mq_mode(struct rte_eth_dev *dev)
{
enum rte_eth_rx_mq_mode rx_mq_mode = dev->data->dev_conf.rxmode.mq_mode;
struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
struct rte_eth_vmdq_rx_conf *vmdq_conf;
uint16_t nb_rx_q = dev->data->nb_rx_queues;
vmdq_conf = &dev->data->dev_conf.rx_adv_conf.vmdq_rx_conf;
if (rx_mq_mode & ETH_MQ_RX_DCB_FLAG) {
PMD_INIT_LOG(ERR, "DCB mode is not supported.");
return -EINVAL;
}
if (!(rx_mq_mode & ETH_MQ_RX_VMDQ_FLAG))
return 0;
if (hw->mac.type == fm10k_mac_vf) {
PMD_INIT_LOG(ERR, "VMDQ mode is not supported in VF.");
return -EINVAL;
}
/* Check VMDQ queue pool number */
if (vmdq_conf->nb_queue_pools >
sizeof(vmdq_conf->pool_map[0].pools) * CHAR_BIT ||
vmdq_conf->nb_queue_pools > nb_rx_q) {
PMD_INIT_LOG(ERR, "Too many of queue pools: %d",
vmdq_conf->nb_queue_pools);
return -EINVAL;
}
return 0;
}
static const struct fm10k_txq_ops def_txq_ops = {
.reset = tx_queue_reset,
};
static int
fm10k_dev_configure(struct rte_eth_dev *dev)
{
int ret;
PMD_INIT_FUNC_TRACE();
if (dev->data->dev_conf.rxmode.hw_strip_crc == 0)
PMD_INIT_LOG(WARNING, "fm10k always strip CRC");
/* multipe queue mode checking */
ret = fm10k_check_mq_mode(dev);
if (ret != 0) {
PMD_DRV_LOG(ERR, "fm10k_check_mq_mode fails with %d.",
ret);
return ret;
}
return 0;
}
/* fls = find last set bit = 32 minus the number of leading zeros */
#ifndef fls
#define fls(x) (((x) == 0) ? 0 : (32 - __builtin_clz((x))))
#endif
static void
fm10k_dev_vmdq_rx_configure(struct rte_eth_dev *dev)
{
struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
struct rte_eth_vmdq_rx_conf *vmdq_conf;
uint32_t i;
vmdq_conf = &dev->data->dev_conf.rx_adv_conf.vmdq_rx_conf;
for (i = 0; i < vmdq_conf->nb_pool_maps; i++) {
if (!vmdq_conf->pool_map[i].pools)
continue;
fm10k_mbx_lock(hw);
fm10k_update_vlan(hw, vmdq_conf->pool_map[i].vlan_id, 0, true);
fm10k_mbx_unlock(hw);
}
}
static void
fm10k_dev_pf_main_vsi_reset(struct rte_eth_dev *dev)
{
struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
/* Add default mac address */
fm10k_MAC_filter_set(dev, hw->mac.addr, true,
MAIN_VSI_POOL_NUMBER);
}
static void
fm10k_dev_rss_configure(struct rte_eth_dev *dev)
{
struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
struct rte_eth_conf *dev_conf = &dev->data->dev_conf;
uint32_t mrqc, *key, i, reta, j;
uint64_t hf;
#define RSS_KEY_SIZE 40
static uint8_t rss_intel_key[RSS_KEY_SIZE] = {
0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2,
0x41, 0x67, 0x25, 0x3D, 0x43, 0xA3, 0x8F, 0xB0,
0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4,
0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C,
0x6A, 0x42, 0xB7, 0x3B, 0xBE, 0xAC, 0x01, 0xFA,
};
if (dev->data->nb_rx_queues == 1 ||
dev_conf->rxmode.mq_mode != ETH_MQ_RX_RSS ||
dev_conf->rx_adv_conf.rss_conf.rss_hf == 0) {
FM10K_WRITE_REG(hw, FM10K_MRQC(0), 0);
return;
}
/* random key is rss_intel_key (default) or user provided (rss_key) */
if (dev_conf->rx_adv_conf.rss_conf.rss_key == NULL)
key = (uint32_t *)rss_intel_key;
else
key = (uint32_t *)dev_conf->rx_adv_conf.rss_conf.rss_key;
/* Now fill our hash function seeds, 4 bytes at a time */
for (i = 0; i < RSS_KEY_SIZE / sizeof(*key); ++i)
FM10K_WRITE_REG(hw, FM10K_RSSRK(0, i), key[i]);
/*
* Fill in redirection table
* The byte-swap is needed because NIC registers are in
* little-endian order.
*/
reta = 0;
for (i = 0, j = 0; i < FM10K_MAX_RSS_INDICES; i++, j++) {
if (j == dev->data->nb_rx_queues)
j = 0;
reta = (reta << CHAR_BIT) | j;
if ((i & 3) == 3)
FM10K_WRITE_REG(hw, FM10K_RETA(0, i >> 2),
rte_bswap32(reta));
}
/*
* Generate RSS hash based on packet types, TCP/UDP
* port numbers and/or IPv4/v6 src and dst addresses
*/
hf = dev_conf->rx_adv_conf.rss_conf.rss_hf;
mrqc = 0;
mrqc |= (hf & ETH_RSS_IPV4) ? FM10K_MRQC_IPV4 : 0;
mrqc |= (hf & ETH_RSS_IPV6) ? FM10K_MRQC_IPV6 : 0;
mrqc |= (hf & ETH_RSS_IPV6_EX) ? FM10K_MRQC_IPV6 : 0;
mrqc |= (hf & ETH_RSS_NONFRAG_IPV4_TCP) ? FM10K_MRQC_TCP_IPV4 : 0;
mrqc |= (hf & ETH_RSS_NONFRAG_IPV6_TCP) ? FM10K_MRQC_TCP_IPV6 : 0;
mrqc |= (hf & ETH_RSS_IPV6_TCP_EX) ? FM10K_MRQC_TCP_IPV6 : 0;
mrqc |= (hf & ETH_RSS_NONFRAG_IPV4_UDP) ? FM10K_MRQC_UDP_IPV4 : 0;
mrqc |= (hf & ETH_RSS_NONFRAG_IPV6_UDP) ? FM10K_MRQC_UDP_IPV6 : 0;
mrqc |= (hf & ETH_RSS_IPV6_UDP_EX) ? FM10K_MRQC_UDP_IPV6 : 0;
if (mrqc == 0) {
PMD_INIT_LOG(ERR, "Specified RSS mode 0x%"PRIx64"is not"
"supported", hf);
return;
}
FM10K_WRITE_REG(hw, FM10K_MRQC(0), mrqc);
}
static void
fm10k_dev_logic_port_update(struct rte_eth_dev *dev, uint16_t nb_lport_new)
{
struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
uint32_t i;
for (i = 0; i < nb_lport_new; i++) {
/* Set unicast mode by default. App can change
* to other mode in other API func.
*/
fm10k_mbx_lock(hw);
hw->mac.ops.update_xcast_mode(hw, hw->mac.dglort_map + i,
FM10K_XCAST_MODE_NONE);
fm10k_mbx_unlock(hw);
}
}
static void
fm10k_dev_mq_rx_configure(struct rte_eth_dev *dev)
{
struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
struct rte_eth_vmdq_rx_conf *vmdq_conf;
struct rte_eth_conf *dev_conf = &dev->data->dev_conf;
struct fm10k_macvlan_filter_info *macvlan;
uint16_t nb_queue_pools = 0; /* pool number in configuration */
uint16_t nb_lport_new;
macvlan = FM10K_DEV_PRIVATE_TO_MACVLAN(dev->data->dev_private);
vmdq_conf = &dev->data->dev_conf.rx_adv_conf.vmdq_rx_conf;
fm10k_dev_rss_configure(dev);
/* only PF supports VMDQ */
if (hw->mac.type != fm10k_mac_pf)
return;
if (dev_conf->rxmode.mq_mode & ETH_MQ_RX_VMDQ_FLAG)
nb_queue_pools = vmdq_conf->nb_queue_pools;
/* no pool number change, no need to update logic port and VLAN/MAC */
if (macvlan->nb_queue_pools == nb_queue_pools)
return;
nb_lport_new = nb_queue_pools ? nb_queue_pools : 1;
fm10k_dev_logic_port_update(dev, nb_lport_new);
/* reset MAC/VLAN as it's based on VMDQ or PF main VSI */
memset(dev->data->mac_addrs, 0,
ETHER_ADDR_LEN * FM10K_MAX_MACADDR_NUM);
ether_addr_copy((const struct ether_addr *)hw->mac.addr,
&dev->data->mac_addrs[0]);
memset(macvlan, 0, sizeof(*macvlan));
macvlan->nb_queue_pools = nb_queue_pools;
if (nb_queue_pools)
fm10k_dev_vmdq_rx_configure(dev);
else
fm10k_dev_pf_main_vsi_reset(dev);
}
static int
fm10k_dev_tx_init(struct rte_eth_dev *dev)
{
struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
int i, ret;
struct fm10k_tx_queue *txq;
uint64_t base_addr;
uint32_t size;
/* Disable TXINT to avoid possible interrupt */
for (i = 0; i < hw->mac.max_queues; i++)
FM10K_WRITE_REG(hw, FM10K_TXINT(i),
3 << FM10K_TXINT_TIMER_SHIFT);
/* Setup TX queue */
for (i = 0; i < dev->data->nb_tx_queues; ++i) {
txq = dev->data->tx_queues[i];
base_addr = txq->hw_ring_phys_addr;
size = txq->nb_desc * sizeof(struct fm10k_tx_desc);
/* disable queue to avoid issues while updating state */
ret = tx_queue_disable(hw, i);
if (ret) {
PMD_INIT_LOG(ERR, "failed to disable queue %d", i);
return -1;
}
/* Enable use of FTAG bit in TX descriptor, PFVTCTL
* register is read-only for VF.
*/
if (fm10k_check_ftag(dev->device->devargs)) {
if (hw->mac.type == fm10k_mac_pf) {
FM10K_WRITE_REG(hw, FM10K_PFVTCTL(i),
FM10K_PFVTCTL_FTAG_DESC_ENABLE);
PMD_INIT_LOG(DEBUG, "FTAG mode is enabled");
} else {
PMD_INIT_LOG(ERR, "VF FTAG is not supported.");
return -ENOTSUP;
}
}
/* set location and size for descriptor ring */
FM10K_WRITE_REG(hw, FM10K_TDBAL(i),
base_addr & UINT64_LOWER_32BITS_MASK);
FM10K_WRITE_REG(hw, FM10K_TDBAH(i),
base_addr >> (CHAR_BIT * sizeof(uint32_t)));
FM10K_WRITE_REG(hw, FM10K_TDLEN(i), size);
/* assign default SGLORT for each TX queue by PF */
if (hw->mac.type == fm10k_mac_pf)
FM10K_WRITE_REG(hw, FM10K_TX_SGLORT(i), hw->mac.dglort_map);
}
/* set up vector or scalar TX function as appropriate */
fm10k_set_tx_function(dev);
return 0;
}
static int
fm10k_dev_rx_init(struct rte_eth_dev *dev)
{
struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
struct fm10k_macvlan_filter_info *macvlan;
struct rte_pci_device *pdev = RTE_DEV_TO_PCI(dev->device);
struct rte_intr_handle *intr_handle = &pdev->intr_handle;
int i, ret;
struct fm10k_rx_queue *rxq;
uint64_t base_addr;
uint32_t size;
uint32_t rxdctl = FM10K_RXDCTL_WRITE_BACK_MIN_DELAY;
uint32_t logic_port = hw->mac.dglort_map;
uint16_t buf_size;
uint16_t queue_stride = 0;
/* enable RXINT for interrupt mode */
i = 0;
if (rte_intr_dp_is_en(intr_handle)) {
for (; i < dev->data->nb_rx_queues; i++) {
FM10K_WRITE_REG(hw, FM10K_RXINT(i), Q2V(pdev, i));
if (hw->mac.type == fm10k_mac_pf)
FM10K_WRITE_REG(hw, FM10K_ITR(Q2V(pdev, i)),
FM10K_ITR_AUTOMASK |
FM10K_ITR_MASK_CLEAR);
else
FM10K_WRITE_REG(hw, FM10K_VFITR(Q2V(pdev, i)),
FM10K_ITR_AUTOMASK |
FM10K_ITR_MASK_CLEAR);
}
}
/* Disable other RXINT to avoid possible interrupt */
for (; i < hw->mac.max_queues; i++)
FM10K_WRITE_REG(hw, FM10K_RXINT(i),
3 << FM10K_RXINT_TIMER_SHIFT);
/* Setup RX queues */
for (i = 0; i < dev->data->nb_rx_queues; ++i) {
rxq = dev->data->rx_queues[i];
base_addr = rxq->hw_ring_phys_addr;
size = rxq->nb_desc * sizeof(union fm10k_rx_desc);
/* disable queue to avoid issues while updating state */
ret = rx_queue_disable(hw, i);
if (ret) {
PMD_INIT_LOG(ERR, "failed to disable queue %d", i);
return -1;
}
/* Setup the Base and Length of the Rx Descriptor Ring */
FM10K_WRITE_REG(hw, FM10K_RDBAL(i),
base_addr & UINT64_LOWER_32BITS_MASK);
FM10K_WRITE_REG(hw, FM10K_RDBAH(i),
base_addr >> (CHAR_BIT * sizeof(uint32_t)));
FM10K_WRITE_REG(hw, FM10K_RDLEN(i), size);
/* Configure the Rx buffer size for one buff without split */
buf_size = (uint16_t)(rte_pktmbuf_data_room_size(rxq->mp) -
RTE_PKTMBUF_HEADROOM);
/* As RX buffer is aligned to 512B within mbuf, some bytes are
* reserved for this purpose, and the worst case could be 511B.
* But SRR reg assumes all buffers have the same size. In order
* to fill the gap, we'll have to consider the worst case and
* assume 512B is reserved. If we don't do so, it's possible
* for HW to overwrite data to next mbuf.
*/
buf_size -= FM10K_RX_DATABUF_ALIGN;
FM10K_WRITE_REG(hw, FM10K_SRRCTL(i),
(buf_size >> FM10K_SRRCTL_BSIZEPKT_SHIFT) |
FM10K_SRRCTL_LOOPBACK_SUPPRESS);
/* It adds dual VLAN length for supporting dual VLAN */
if ((dev->data->dev_conf.rxmode.max_rx_pkt_len +
2 * FM10K_VLAN_TAG_SIZE) > buf_size ||
dev->data->dev_conf.rxmode.enable_scatter) {
uint32_t reg;
dev->data->scattered_rx = 1;
reg = FM10K_READ_REG(hw, FM10K_SRRCTL(i));
reg |= FM10K_SRRCTL_BUFFER_CHAINING_EN;
FM10K_WRITE_REG(hw, FM10K_SRRCTL(i), reg);
}
/* Enable drop on empty, it's RO for VF */
if (hw->mac.type == fm10k_mac_pf && rxq->drop_en)
rxdctl |= FM10K_RXDCTL_DROP_ON_EMPTY;
FM10K_WRITE_REG(hw, FM10K_RXDCTL(i), rxdctl);
FM10K_WRITE_FLUSH(hw);
}
/* Configure VMDQ/RSS if applicable */
fm10k_dev_mq_rx_configure(dev);
/* Decide the best RX function */
fm10k_set_rx_function(dev);
/* update RX_SGLORT for loopback suppress*/
if (hw->mac.type != fm10k_mac_pf)
return 0;
macvlan = FM10K_DEV_PRIVATE_TO_MACVLAN(dev->data->dev_private);
if (macvlan->nb_queue_pools)
queue_stride = dev->data->nb_rx_queues / macvlan->nb_queue_pools;
for (i = 0; i < dev->data->nb_rx_queues; ++i) {
if (i && queue_stride && !(i % queue_stride))
logic_port++;
FM10K_WRITE_REG(hw, FM10K_RX_SGLORT(i), logic_port);
}
return 0;
}
static int
fm10k_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)
{
struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
int err = -1;
uint32_t reg;
struct fm10k_rx_queue *rxq;
PMD_INIT_FUNC_TRACE();
if (rx_queue_id < dev->data->nb_rx_queues) {
rxq = dev->data->rx_queues[rx_queue_id];
err = rx_queue_reset(rxq);
if (err == -ENOMEM) {
PMD_INIT_LOG(ERR, "Failed to alloc memory : %d", err);
return err;
} else if (err == -EINVAL) {
PMD_INIT_LOG(ERR, "Invalid buffer address alignment :"
" %d", err);
return err;
}
/* Setup the HW Rx Head and Tail Descriptor Pointers
* Note: this must be done AFTER the queue is enabled on real
* hardware, but BEFORE the queue is enabled when using the
* emulation platform. Do it in both places for now and remove
* this comment and the following two register writes when the
* emulation platform is no longer being used.
*/
FM10K_WRITE_REG(hw, FM10K_RDH(rx_queue_id), 0);
FM10K_WRITE_REG(hw, FM10K_RDT(rx_queue_id), rxq->nb_desc - 1);
/* Set PF ownership flag for PF devices */
reg = FM10K_READ_REG(hw, FM10K_RXQCTL(rx_queue_id));
if (hw->mac.type == fm10k_mac_pf)
reg |= FM10K_RXQCTL_PF;
reg |= FM10K_RXQCTL_ENABLE;
/* enable RX queue */
FM10K_WRITE_REG(hw, FM10K_RXQCTL(rx_queue_id), reg);
FM10K_WRITE_FLUSH(hw);
/* Setup the HW Rx Head and Tail Descriptor Pointers
* Note: this must be done AFTER the queue is enabled
*/
FM10K_WRITE_REG(hw, FM10K_RDH(rx_queue_id), 0);
FM10K_WRITE_REG(hw, FM10K_RDT(rx_queue_id), rxq->nb_desc - 1);
dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
}
return err;
}
static int
fm10k_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
{
struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
PMD_INIT_FUNC_TRACE();
if (rx_queue_id < dev->data->nb_rx_queues) {
/* Disable RX queue */
rx_queue_disable(hw, rx_queue_id);
/* Free mbuf and clean HW ring */
rx_queue_clean(dev->data->rx_queues[rx_queue_id]);
dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
}
return 0;
}
static int
fm10k_dev_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)
{
struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
/** @todo - this should be defined in the shared code */
#define FM10K_TXDCTL_WRITE_BACK_MIN_DELAY 0x00010000
uint32_t txdctl = FM10K_TXDCTL_WRITE_BACK_MIN_DELAY;
int err = 0;
PMD_INIT_FUNC_TRACE();
if (tx_queue_id < dev->data->nb_tx_queues) {
struct fm10k_tx_queue *q = dev->data->tx_queues[tx_queue_id];
q->ops->reset(q);
/* reset head and tail pointers */
FM10K_WRITE_REG(hw, FM10K_TDH(tx_queue_id), 0);
FM10K_WRITE_REG(hw, FM10K_TDT(tx_queue_id), 0);
/* enable TX queue */
FM10K_WRITE_REG(hw, FM10K_TXDCTL(tx_queue_id),
FM10K_TXDCTL_ENABLE | txdctl);
FM10K_WRITE_FLUSH(hw);
dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
} else
err = -1;
return err;
}
static int
fm10k_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
{
struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
PMD_INIT_FUNC_TRACE();
if (tx_queue_id < dev->data->nb_tx_queues) {
tx_queue_disable(hw, tx_queue_id);
tx_queue_clean(dev->data->tx_queues[tx_queue_id]);
dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
}
return 0;
}
static inline int fm10k_glort_valid(struct fm10k_hw *hw)
{
return ((hw->mac.dglort_map & FM10K_DGLORTMAP_NONE)
!= FM10K_DGLORTMAP_NONE);
}
static void
fm10k_dev_promiscuous_enable(struct rte_eth_dev *dev)
{
struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
int status;
PMD_INIT_FUNC_TRACE();
/* Return if it didn't acquire valid glort range */
if ((hw->mac.type == fm10k_mac_pf) && !fm10k_glort_valid(hw))
return;
fm10k_mbx_lock(hw);
status = hw->mac.ops.update_xcast_mode(hw, hw->mac.dglort_map,
FM10K_XCAST_MODE_PROMISC);
fm10k_mbx_unlock(hw);
if (status != FM10K_SUCCESS)
PMD_INIT_LOG(ERR, "Failed to enable promiscuous mode");
}
static void
fm10k_dev_promiscuous_disable(struct rte_eth_dev *dev)
{
struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
uint8_t mode;
int status;
PMD_INIT_FUNC_TRACE();
/* Return if it didn't acquire valid glort range */
if ((hw->mac.type == fm10k_mac_pf) && !fm10k_glort_valid(hw))
return;
if (dev->data->all_multicast == 1)
mode = FM10K_XCAST_MODE_ALLMULTI;
else
mode = FM10K_XCAST_MODE_NONE;
fm10k_mbx_lock(hw);
status = hw->mac.ops.update_xcast_mode(hw, hw->mac.dglort_map,
mode);
fm10k_mbx_unlock(hw);
if (status != FM10K_SUCCESS)
PMD_INIT_LOG(ERR, "Failed to disable promiscuous mode");
}
static void
fm10k_dev_allmulticast_enable(struct rte_eth_dev *dev)
{
struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
int status;
PMD_INIT_FUNC_TRACE();
/* Return if it didn't acquire valid glort range */
if ((hw->mac.type == fm10k_mac_pf) && !fm10k_glort_valid(hw))
return;
/* If promiscuous mode is enabled, it doesn't make sense to enable
* allmulticast and disable promiscuous since fm10k only can select
* one of the modes.
*/
if (dev->data->promiscuous) {
PMD_INIT_LOG(INFO, "Promiscuous mode is enabled, "\
"needn't enable allmulticast");
return;
}
fm10k_mbx_lock(hw);
status = hw->mac.ops.update_xcast_mode(hw, hw->mac.dglort_map,
FM10K_XCAST_MODE_ALLMULTI);
fm10k_mbx_unlock(hw);
if (status != FM10K_SUCCESS)
PMD_INIT_LOG(ERR, "Failed to enable allmulticast mode");
}
static void
fm10k_dev_allmulticast_disable(struct rte_eth_dev *dev)
{
struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
int status;
PMD_INIT_FUNC_TRACE();
/* Return if it didn't acquire valid glort range */
if ((hw->mac.type == fm10k_mac_pf) && !fm10k_glort_valid(hw))
return;
if (dev->data->promiscuous) {
PMD_INIT_LOG(ERR, "Failed to disable allmulticast mode "\
"since promisc mode is enabled");
return;
}
fm10k_mbx_lock(hw);
/* Change mode to unicast mode */
status = hw->mac.ops.update_xcast_mode(hw, hw->mac.dglort_map,
FM10K_XCAST_MODE_NONE);
fm10k_mbx_unlock(hw);
if (status != FM10K_SUCCESS)
PMD_INIT_LOG(ERR, "Failed to disable allmulticast mode");
}
static void
fm10k_dev_dglort_map_configure(struct rte_eth_dev *dev)
{
struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
uint32_t dglortdec, pool_len, rss_len, i, dglortmask;
uint16_t nb_queue_pools;
struct fm10k_macvlan_filter_info *macvlan;
macvlan = FM10K_DEV_PRIVATE_TO_MACVLAN(dev->data->dev_private);
nb_queue_pools = macvlan->nb_queue_pools;
pool_len = nb_queue_pools ? fls(nb_queue_pools - 1) : 0;
rss_len = fls(dev->data->nb_rx_queues - 1) - pool_len;
/* GLORT 0x0-0x3F are used by PF and VMDQ, 0x40-0x7F used by FD */
dglortdec = (rss_len << FM10K_DGLORTDEC_RSSLENGTH_SHIFT) | pool_len;
dglortmask = (GLORT_PF_MASK << FM10K_DGLORTMAP_MASK_SHIFT) |
hw->mac.dglort_map;
FM10K_WRITE_REG(hw, FM10K_DGLORTMAP(0), dglortmask);
/* Configure VMDQ/RSS DGlort Decoder */
FM10K_WRITE_REG(hw, FM10K_DGLORTDEC(0), dglortdec);
/* Flow Director configurations, only queue number is valid. */
dglortdec = fls(dev->data->nb_rx_queues - 1);
dglortmask = (GLORT_FD_MASK << FM10K_DGLORTMAP_MASK_SHIFT) |
(hw->mac.dglort_map + GLORT_FD_Q_BASE);
FM10K_WRITE_REG(hw, FM10K_DGLORTMAP(1), dglortmask);
FM10K_WRITE_REG(hw, FM10K_DGLORTDEC(1), dglortdec);
/* Invalidate all other GLORT entries */
for (i = 2; i < FM10K_DGLORT_COUNT; i++)
FM10K_WRITE_REG(hw, FM10K_DGLORTMAP(i),
FM10K_DGLORTMAP_NONE);
}
#define BSIZEPKT_ROUNDUP ((1 << FM10K_SRRCTL_BSIZEPKT_SHIFT) - 1)
static int
fm10k_dev_start(struct rte_eth_dev *dev)
{
struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
int i, diag;
PMD_INIT_FUNC_TRACE();
/* stop, init, then start the hw */
diag = fm10k_stop_hw(hw);
if (diag != FM10K_SUCCESS) {
PMD_INIT_LOG(ERR, "Hardware stop failed: %d", diag);
return -EIO;
}
diag = fm10k_init_hw(hw);
if (diag != FM10K_SUCCESS) {
PMD_INIT_LOG(ERR, "Hardware init failed: %d", diag);
return -EIO;
}
diag = fm10k_start_hw(hw);
if (diag != FM10K_SUCCESS) {
PMD_INIT_LOG(ERR, "Hardware start failed: %d", diag);
return -EIO;
}
diag = fm10k_dev_tx_init(dev);
if (diag) {
PMD_INIT_LOG(ERR, "TX init failed: %d", diag);
return diag;
}
if (fm10k_dev_rxq_interrupt_setup(dev))
return -EIO;
diag = fm10k_dev_rx_init(dev);
if (diag) {
PMD_INIT_LOG(ERR, "RX init failed: %d", diag);
return diag;
}
if (hw->mac.type == fm10k_mac_pf)
fm10k_dev_dglort_map_configure(dev);
for (i = 0; i < dev->data->nb_rx_queues; i++) {
struct fm10k_rx_queue *rxq;
rxq = dev->data->rx_queues[i];
if (rxq->rx_deferred_start)
continue;
diag = fm10k_dev_rx_queue_start(dev, i);
if (diag != 0) {
int j;
for (j = 0; j < i; ++j)
rx_queue_clean(dev->data->rx_queues[j]);
return diag;
}
}
for (i = 0; i < dev->data->nb_tx_queues; i++) {
struct fm10k_tx_queue *txq;
txq = dev->data->tx_queues[i];
if (txq->tx_deferred_start)
continue;
diag = fm10k_dev_tx_queue_start(dev, i);
if (diag != 0) {
int j;
for (j = 0; j < i; ++j)
tx_queue_clean(dev->data->tx_queues[j]);
for (j = 0; j < dev->data->nb_rx_queues; ++j)
rx_queue_clean(dev->data->rx_queues[j]);
return diag;
}
}
/* Update default vlan when not in VMDQ mode */
if (!(dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_VMDQ_FLAG))
fm10k_vlan_filter_set(dev, hw->mac.default_vid, true);
return 0;
}
static void
fm10k_dev_stop(struct rte_eth_dev *dev)
{
struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
struct rte_pci_device *pdev = RTE_DEV_TO_PCI(dev->device);
struct rte_intr_handle *intr_handle = &pdev->intr_handle;
int i;
PMD_INIT_FUNC_TRACE();
if (dev->data->tx_queues)
for (i = 0; i < dev->data->nb_tx_queues; i++)
fm10k_dev_tx_queue_stop(dev, i);
if (dev->data->rx_queues)
for (i = 0; i < dev->data->nb_rx_queues; i++)
fm10k_dev_rx_queue_stop(dev, i);
/* Disable datapath event */
if (rte_intr_dp_is_en(intr_handle)) {
for (i = 0; i < dev->data->nb_rx_queues; i++) {
FM10K_WRITE_REG(hw, FM10K_RXINT(i),
3 << FM10K_RXINT_TIMER_SHIFT);
if (hw->mac.type == fm10k_mac_pf)
FM10K_WRITE_REG(hw, FM10K_ITR(Q2V(pdev, i)),
FM10K_ITR_MASK_SET);
else
FM10K_WRITE_REG(hw, FM10K_VFITR(Q2V(pdev, i)),
FM10K_ITR_MASK_SET);
}
}
/* Clean datapath event and queue/vec mapping */
rte_intr_efd_disable(intr_handle);
rte_free(intr_handle->intr_vec);
intr_handle->intr_vec = NULL;
}
static void
fm10k_dev_queue_release(struct rte_eth_dev *dev)
{
int i;
PMD_INIT_FUNC_TRACE();
if (dev->data->tx_queues) {
for (i = 0; i < dev->data->nb_tx_queues; i++) {
struct fm10k_tx_queue *txq = dev->data->tx_queues[i];
tx_queue_free(txq);
}
}
if (dev->data->rx_queues) {
for (i = 0; i < dev->data->nb_rx_queues; i++)
fm10k_rx_queue_release(dev->data->rx_queues[i]);
}
}
static void
fm10k_dev_close(struct rte_eth_dev *dev)
{
struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
PMD_INIT_FUNC_TRACE();
fm10k_mbx_lock(hw);
hw->mac.ops.update_lport_state(hw, hw->mac.dglort_map,
MAX_LPORT_NUM, false);
fm10k_mbx_unlock(hw);
/* allow 10ms for device to quiesce */
rte_delay_us(FM10K_SWITCH_QUIESCE_US);
/* Stop mailbox service first */
fm10k_close_mbx_service(hw);
fm10k_dev_stop(dev);
fm10k_dev_queue_release(dev);
fm10k_stop_hw(hw);
}
static int
fm10k_link_update(struct rte_eth_dev *dev,
__rte_unused int wait_to_complete)
{
PMD_INIT_FUNC_TRACE();
/* The host-interface link is always up. The speed is ~50Gbps per Gen3
* x8 PCIe interface. For now, we leave the speed undefined since there
* is no 50Gbps Ethernet. */
dev->data->dev_link.link_speed = 0;
dev->data->dev_link.link_duplex = ETH_LINK_FULL_DUPLEX;
dev->data->dev_link.link_status = ETH_LINK_UP;
return 0;
}
static int fm10k_xstats_get_names(__rte_unused struct rte_eth_dev *dev,
struct rte_eth_xstat_name *xstats_names, __rte_unused unsigned limit)
{
unsigned i, q;
unsigned count = 0;
if (xstats_names != NULL) {
/* Note: limit checked in rte_eth_xstats_names() */
/* Global stats */
for (i = 0; i < FM10K_NB_HW_XSTATS; i++) {
snprintf(xstats_names[count].name,
sizeof(xstats_names[count].name),
"%s", fm10k_hw_stats_strings[count].name);
count++;
}
/* PF queue stats */
for (q = 0; q < FM10K_MAX_QUEUES_PF; q++) {
for (i = 0; i < FM10K_NB_RX_Q_XSTATS; i++) {
snprintf(xstats_names[count].name,
sizeof(xstats_names[count].name),
"rx_q%u_%s", q,
fm10k_hw_stats_rx_q_strings[i].name);
count++;
}
for (i = 0; i < FM10K_NB_TX_Q_XSTATS; i++) {
snprintf(xstats_names[count].name,
sizeof(xstats_names[count].name),
"tx_q%u_%s", q,
fm10k_hw_stats_tx_q_strings[i].name);
count++;
}
}
}
return FM10K_NB_XSTATS;
}
static int
fm10k_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
unsigned n)
{
struct fm10k_hw_stats *hw_stats =
FM10K_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
unsigned i, q, count = 0;
if (n < FM10K_NB_XSTATS)
return FM10K_NB_XSTATS;
/* Global stats */
for (i = 0; i < FM10K_NB_HW_XSTATS; i++) {
xstats[count].value = *(uint64_t *)(((char *)hw_stats) +
fm10k_hw_stats_strings[count].offset);
xstats[count].id = count;
count++;
}
/* PF queue stats */
for (q = 0; q < FM10K_MAX_QUEUES_PF; q++) {
for (i = 0; i < FM10K_NB_RX_Q_XSTATS; i++) {
xstats[count].value =
*(uint64_t *)(((char *)&hw_stats->q[q]) +
fm10k_hw_stats_rx_q_strings[i].offset);
xstats[count].id = count;
count++;
}
for (i = 0; i < FM10K_NB_TX_Q_XSTATS; i++) {
xstats[count].value =
*(uint64_t *)(((char *)&hw_stats->q[q]) +
fm10k_hw_stats_tx_q_strings[i].offset);
xstats[count].id = count;
count++;
}
}
return FM10K_NB_XSTATS;
}
static void
fm10k_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
{
uint64_t ipackets, opackets, ibytes, obytes;
struct fm10k_hw *hw =
FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
struct fm10k_hw_stats *hw_stats =
FM10K_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
int i;
PMD_INIT_FUNC_TRACE();
fm10k_update_hw_stats(hw, hw_stats);
ipackets = opackets = ibytes = obytes = 0;
for (i = 0; (i < RTE_ETHDEV_QUEUE_STAT_CNTRS) &&
(i < hw->mac.max_queues); ++i) {
stats->q_ipackets[i] = hw_stats->q[i].rx_packets.count;
stats->q_opackets[i] = hw_stats->q[i].tx_packets.count;
stats->q_ibytes[i] = hw_stats->q[i].rx_bytes.count;
stats->q_obytes[i] = hw_stats->q[i].tx_bytes.count;
ipackets += stats->q_ipackets[i];
opackets += stats->q_opackets[i];
ibytes += stats->q_ibytes[i];
obytes += stats->q_obytes[i];
}
stats->ipackets = ipackets;
stats->opackets = opackets;
stats->ibytes = ibytes;
stats->obytes = obytes;
}
static void
fm10k_stats_reset(struct rte_eth_dev *dev)
{
struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
struct fm10k_hw_stats *hw_stats =
FM10K_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
PMD_INIT_FUNC_TRACE();
memset(hw_stats, 0, sizeof(*hw_stats));
fm10k_rebind_hw_stats(hw, hw_stats);
}
static void
fm10k_dev_infos_get(struct rte_eth_dev *dev,
struct rte_eth_dev_info *dev_info)
{
struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
struct rte_pci_device *pdev = RTE_DEV_TO_PCI(dev->device);
PMD_INIT_FUNC_TRACE();
dev_info->pci_dev = pdev;
dev_info->min_rx_bufsize = FM10K_MIN_RX_BUF_SIZE;
dev_info->max_rx_pktlen = FM10K_MAX_PKT_SIZE;
dev_info->max_rx_queues = hw->mac.max_queues;
dev_info->max_tx_queues = hw->mac.max_queues;
dev_info->max_mac_addrs = FM10K_MAX_MACADDR_NUM;
dev_info->max_hash_mac_addrs = 0;
dev_info->max_vfs = pdev->max_vfs;
dev_info->vmdq_pool_base = 0;
dev_info->vmdq_queue_base = 0;
dev_info->max_vmdq_pools = ETH_32_POOLS;
dev_info->vmdq_queue_num = FM10K_MAX_QUEUES_PF;
dev_info->rx_offload_capa =
DEV_RX_OFFLOAD_VLAN_STRIP |
DEV_RX_OFFLOAD_IPV4_CKSUM |
DEV_RX_OFFLOAD_UDP_CKSUM |
DEV_RX_OFFLOAD_TCP_CKSUM;
dev_info->tx_offload_capa =
DEV_TX_OFFLOAD_VLAN_INSERT |
DEV_TX_OFFLOAD_IPV4_CKSUM |
DEV_TX_OFFLOAD_UDP_CKSUM |
DEV_TX_OFFLOAD_TCP_CKSUM |
DEV_TX_OFFLOAD_TCP_TSO;
dev_info->hash_key_size = FM10K_RSSRK_SIZE * sizeof(uint32_t);
dev_info->reta_size = FM10K_MAX_RSS_INDICES;
dev_info->default_rxconf = (struct rte_eth_rxconf) {
.rx_thresh = {
.pthresh = FM10K_DEFAULT_RX_PTHRESH,
.hthresh = FM10K_DEFAULT_RX_HTHRESH,
.wthresh = FM10K_DEFAULT_RX_WTHRESH,
},
.rx_free_thresh = FM10K_RX_FREE_THRESH_DEFAULT(0),
.rx_drop_en = 0,
};
dev_info->default_txconf = (struct rte_eth_txconf) {
.tx_thresh = {
.pthresh = FM10K_DEFAULT_TX_PTHRESH,
.hthresh = FM10K_DEFAULT_TX_HTHRESH,
.wthresh = FM10K_DEFAULT_TX_WTHRESH,
},
.tx_free_thresh = FM10K_TX_FREE_THRESH_DEFAULT(0),
.tx_rs_thresh = FM10K_TX_RS_THRESH_DEFAULT(0),
.txq_flags = FM10K_SIMPLE_TX_FLAG,
};
dev_info->rx_desc_lim = (struct rte_eth_desc_lim) {
.nb_max = FM10K_MAX_RX_DESC,
.nb_min = FM10K_MIN_RX_DESC,
.nb_align = FM10K_MULT_RX_DESC,
};
dev_info->tx_desc_lim = (struct rte_eth_desc_lim) {
.nb_max = FM10K_MAX_TX_DESC,
.nb_min = FM10K_MIN_TX_DESC,
.nb_align = FM10K_MULT_TX_DESC,
.nb_seg_max = FM10K_TX_MAX_SEG,
.nb_mtu_seg_max = FM10K_TX_MAX_MTU_SEG,
};
dev_info->speed_capa = ETH_LINK_SPEED_1G | ETH_LINK_SPEED_2_5G |
ETH_LINK_SPEED_10G | ETH_LINK_SPEED_25G |
ETH_LINK_SPEED_40G | ETH_LINK_SPEED_100G;
}
#ifdef RTE_LIBRTE_FM10K_RX_OLFLAGS_ENABLE
static const uint32_t *
fm10k_dev_supported_ptypes_get(struct rte_eth_dev *dev)
{
if (dev->rx_pkt_burst == fm10k_recv_pkts ||
dev->rx_pkt_burst == fm10k_recv_scattered_pkts) {
static uint32_t ptypes[] = {
/* refers to rx_desc_to_ol_flags() */
RTE_PTYPE_L2_ETHER,
RTE_PTYPE_L3_IPV4,
RTE_PTYPE_L3_IPV4_EXT,
RTE_PTYPE_L3_IPV6,
RTE_PTYPE_L3_IPV6_EXT,
RTE_PTYPE_L4_TCP,
RTE_PTYPE_L4_UDP,
RTE_PTYPE_UNKNOWN
};
return ptypes;
} else if (dev->rx_pkt_burst == fm10k_recv_pkts_vec ||
dev->rx_pkt_burst == fm10k_recv_scattered_pkts_vec) {
static uint32_t ptypes_vec[] = {
/* refers to fm10k_desc_to_pktype_v() */
RTE_PTYPE_L3_IPV4,
RTE_PTYPE_L3_IPV4_EXT,
RTE_PTYPE_L3_IPV6,
RTE_PTYPE_L3_IPV6_EXT,
RTE_PTYPE_L4_TCP,
RTE_PTYPE_L4_UDP,
RTE_PTYPE_TUNNEL_GENEVE,
RTE_PTYPE_TUNNEL_NVGRE,
RTE_PTYPE_TUNNEL_VXLAN,
RTE_PTYPE_TUNNEL_GRE,
RTE_PTYPE_UNKNOWN
};
return ptypes_vec;
}
return NULL;
}
#else
static const uint32_t *
fm10k_dev_supported_ptypes_get(struct rte_eth_dev *dev __rte_unused)
{
return NULL;
}
#endif
static int
fm10k_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
{
s32 result;
uint16_t mac_num = 0;
uint32_t vid_idx, vid_bit, mac_index;
struct fm10k_hw *hw;
struct fm10k_macvlan_filter_info *macvlan;
struct rte_eth_dev_data *data = dev->data;
hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
macvlan = FM10K_DEV_PRIVATE_TO_MACVLAN(dev->data->dev_private);
if (macvlan->nb_queue_pools > 0) { /* VMDQ mode */
PMD_INIT_LOG(ERR, "Cannot change VLAN filter in VMDQ mode");
return -EINVAL;
}
if (vlan_id > ETH_VLAN_ID_MAX) {
PMD_INIT_LOG(ERR, "Invalid vlan_id: must be < 4096");
return -EINVAL;
}
vid_idx = FM10K_VFTA_IDX(vlan_id);
vid_bit = FM10K_VFTA_BIT(vlan_id);
/* this VLAN ID is already in the VLAN filter table, return SUCCESS */
if (on && (macvlan->vfta[vid_idx] & vid_bit))
return 0;
/* this VLAN ID is NOT in the VLAN filter table, cannot remove */
if (!on && !(macvlan->vfta[vid_idx] & vid_bit)) {
PMD_INIT_LOG(ERR, "Invalid vlan_id: not existing "
"in the VLAN filter table");
return -EINVAL;
}
fm10k_mbx_lock(hw);
result = fm10k_update_vlan(hw, vlan_id, 0, on);
fm10k_mbx_unlock(hw);
if (result != FM10K_SUCCESS) {
PMD_INIT_LOG(ERR, "VLAN update failed: %d", result);
return -EIO;
}
for (mac_index = 0; (mac_index < FM10K_MAX_MACADDR_NUM) &&
(result == FM10K_SUCCESS); mac_index++) {
if (is_zero_ether_addr(&data->mac_addrs[mac_index]))
continue;
if (mac_num > macvlan->mac_num - 1) {
PMD_INIT_LOG(ERR, "MAC address number "
"not match");
break;
}
fm10k_mbx_lock(hw);
result = fm10k_update_uc_addr(hw, hw->mac.dglort_map,
data->mac_addrs[mac_index].addr_bytes,
vlan_id, on, 0);
fm10k_mbx_unlock(hw);
mac_num++;
}
if (result != FM10K_SUCCESS) {
PMD_INIT_LOG(ERR, "MAC address update failed: %d", result);
return -EIO;
}
if (on) {
macvlan->vlan_num++;
macvlan->vfta[vid_idx] |= vid_bit;
} else {
macvlan->vlan_num--;
macvlan->vfta[vid_idx] &= ~vid_bit;
}
return 0;
}
static void
fm10k_vlan_offload_set(__rte_unused struct rte_eth_dev *dev, int mask)
{
if (mask & ETH_VLAN_STRIP_MASK) {
if (!dev->data->dev_conf.rxmode.hw_vlan_strip)
PMD_INIT_LOG(ERR, "VLAN stripping is "
"always on in fm10k");
}
if (mask & ETH_VLAN_EXTEND_MASK) {
if (dev->data->dev_conf.rxmode.hw_vlan_extend)
PMD_INIT_LOG(ERR, "VLAN QinQ is not "
"supported in fm10k");
}
if (mask & ETH_VLAN_FILTER_MASK) {
if (!dev->data->dev_conf.rxmode.hw_vlan_filter)
PMD_INIT_LOG(ERR, "VLAN filter is always on in fm10k");
}
}
/* Add/Remove a MAC address, and update filters to main VSI */
static void fm10k_MAC_filter_set_main_vsi(struct rte_eth_dev *dev,
const u8 *mac, bool add, uint32_t pool)
{
struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
struct fm10k_macvlan_filter_info *macvlan;
uint32_t i, j, k;
macvlan = FM10K_DEV_PRIVATE_TO_MACVLAN(dev->data->dev_private);
if (pool != MAIN_VSI_POOL_NUMBER) {
PMD_DRV_LOG(ERR, "VMDQ not enabled, can't set "
"mac to pool %u", pool);
return;
}
for (i = 0, j = 0; j < FM10K_VFTA_SIZE; j++) {
if (!macvlan->vfta[j])
continue;
for (k = 0; k < FM10K_UINT32_BIT_SIZE; k++) {
if (!(macvlan->vfta[j] & (1 << k)))
continue;
if (i + 1 > macvlan->vlan_num) {
PMD_INIT_LOG(ERR, "vlan number not match");
return;
}
fm10k_mbx_lock(hw);
fm10k_update_uc_addr(hw, hw->mac.dglort_map, mac,
j * FM10K_UINT32_BIT_SIZE + k, add, 0);
fm10k_mbx_unlock(hw);
i++;
}
}
}
/* Add/Remove a MAC address, and update filters to VMDQ */
static void fm10k_MAC_filter_set_vmdq(struct rte_eth_dev *dev,
const u8 *mac, bool add, uint32_t pool)
{
struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
struct fm10k_macvlan_filter_info *macvlan;
struct rte_eth_vmdq_rx_conf *vmdq_conf;
uint32_t i;
macvlan = FM10K_DEV_PRIVATE_TO_MACVLAN(dev->data->dev_private);
vmdq_conf = &dev->data->dev_conf.rx_adv_conf.vmdq_rx_conf;
if (pool > macvlan->nb_queue_pools) {
PMD_DRV_LOG(ERR, "Pool number %u invalid."
" Max pool is %u",
pool, macvlan->nb_queue_pools);
return;
}
for (i = 0; i < vmdq_conf->nb_pool_maps; i++) {
if (!(vmdq_conf->pool_map[i].pools & (1UL << pool)))
continue;
fm10k_mbx_lock(hw);
fm10k_update_uc_addr(hw, hw->mac.dglort_map + pool, mac,
vmdq_conf->pool_map[i].vlan_id, add, 0);
fm10k_mbx_unlock(hw);
}
}
/* Add/Remove a MAC address, and update filters */
static void fm10k_MAC_filter_set(struct rte_eth_dev *dev,
const u8 *mac, bool add, uint32_t pool)
{
struct fm10k_macvlan_filter_info *macvlan;
macvlan = FM10K_DEV_PRIVATE_TO_MACVLAN(dev->data->dev_private);
if (macvlan->nb_queue_pools > 0) /* VMDQ mode */
fm10k_MAC_filter_set_vmdq(dev, mac, add, pool);
else
fm10k_MAC_filter_set_main_vsi(dev, mac, add, pool);
if (add)
macvlan->mac_num++;
else
macvlan->mac_num--;
}
/* Add a MAC address, and update filters */
static void
fm10k_macaddr_add(struct rte_eth_dev *dev,
struct ether_addr *mac_addr,
uint32_t index,
uint32_t pool)
{
struct fm10k_macvlan_filter_info *macvlan;
macvlan = FM10K_DEV_PRIVATE_TO_MACVLAN(dev->data->dev_private);
fm10k_MAC_filter_set(dev, mac_addr->addr_bytes, TRUE, pool);
macvlan->mac_vmdq_id[index] = pool;
}
/* Remove a MAC address, and update filters */
static void
fm10k_macaddr_remove(struct rte_eth_dev *dev, uint32_t index)
{
struct rte_eth_dev_data *data = dev->data;
struct fm10k_macvlan_filter_info *macvlan;
macvlan = FM10K_DEV_PRIVATE_TO_MACVLAN(dev->data->dev_private);
fm10k_MAC_filter_set(dev, data->mac_addrs[index].addr_bytes,
FALSE, macvlan->mac_vmdq_id[index]);
macvlan->mac_vmdq_id[index] = 0;
}
static inline int
check_nb_desc(uint16_t min, uint16_t max, uint16_t mult, uint16_t request)
{
if ((request < min) || (request > max) || ((request % mult) != 0))
return -1;
else
return 0;
}
static inline int
check_thresh(uint16_t min, uint16_t max, uint16_t div, uint16_t request)
{
if ((request < min) || (request > max) || ((div % request) != 0))
return -1;
else
return 0;
}
static inline int
handle_rxconf(struct fm10k_rx_queue *q, const struct rte_eth_rxconf *conf)
{
uint16_t rx_free_thresh;
if (conf->rx_free_thresh == 0)
rx_free_thresh = FM10K_RX_FREE_THRESH_DEFAULT(q);
else
rx_free_thresh = conf->rx_free_thresh;
/* make sure the requested threshold satisfies the constraints */
if (check_thresh(FM10K_RX_FREE_THRESH_MIN(q),
FM10K_RX_FREE_THRESH_MAX(q),
FM10K_RX_FREE_THRESH_DIV(q),
rx_free_thresh)) {
PMD_INIT_LOG(ERR, "rx_free_thresh (%u) must be "
"less than or equal to %u, "
"greater than or equal to %u, "
"and a divisor of %u",
rx_free_thresh, FM10K_RX_FREE_THRESH_MAX(q),
FM10K_RX_FREE_THRESH_MIN(q),
FM10K_RX_FREE_THRESH_DIV(q));
return -EINVAL;
}
q->alloc_thresh = rx_free_thresh;
q->drop_en = conf->rx_drop_en;
q->rx_deferred_start = conf->rx_deferred_start;
return 0;
}
/*
* Hardware requires specific alignment for Rx packet buffers. At
* least one of the following two conditions must be satisfied.
* 1. Address is 512B aligned
* 2. Address is 8B aligned and buffer does not cross 4K boundary.
*
* As such, the driver may need to adjust the DMA address within the
* buffer by up to 512B.
*
* return 1 if the element size is valid, otherwise return 0.
*/
static int
mempool_element_size_valid(struct rte_mempool *mp)
{
uint32_t min_size;
/* elt_size includes mbuf header and headroom */
min_size = mp->elt_size - sizeof(struct rte_mbuf) -
RTE_PKTMBUF_HEADROOM;
/* account for up to 512B of alignment */
min_size -= FM10K_RX_DATABUF_ALIGN;
/* sanity check for overflow */
if (min_size > mp->elt_size)
return 0;
/* size is valid */
return 1;
}
static int
fm10k_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_id,
uint16_t nb_desc, unsigned int socket_id,
const struct rte_eth_rxconf *conf, struct rte_mempool *mp)
{
struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
struct fm10k_dev_info *dev_info =
FM10K_DEV_PRIVATE_TO_INFO(dev->data->dev_private);
struct fm10k_rx_queue *q;
const struct rte_memzone *mz;
PMD_INIT_FUNC_TRACE();
/* make sure the mempool element size can account for alignment. */
if (!mempool_element_size_valid(mp)) {
PMD_INIT_LOG(ERR, "Error : Mempool element size is too small");
return -EINVAL;
}
/* make sure a valid number of descriptors have been requested */
if (check_nb_desc(FM10K_MIN_RX_DESC, FM10K_MAX_RX_DESC,
FM10K_MULT_RX_DESC, nb_desc)) {
PMD_INIT_LOG(ERR, "Number of Rx descriptors (%u) must be "
"less than or equal to %"PRIu32", "
"greater than or equal to %u, "
"and a multiple of %u",
nb_desc, (uint32_t)FM10K_MAX_RX_DESC, FM10K_MIN_RX_DESC,
FM10K_MULT_RX_DESC);
return -EINVAL;
}
/*
* if this queue existed already, free the associated memory. The
* queue cannot be reused in case we need to allocate memory on
* different socket than was previously used.
*/
if (dev->data->rx_queues[queue_id] != NULL) {
rx_queue_free(dev->data->rx_queues[queue_id]);
dev->data->rx_queues[queue_id] = NULL;
}
/* allocate memory for the queue structure */
q = rte_zmalloc_socket("fm10k", sizeof(*q), RTE_CACHE_LINE_SIZE,
socket_id);
if (q == NULL) {
PMD_INIT_LOG(ERR, "Cannot allocate queue structure");
return -ENOMEM;
}
/* setup queue */
q->mp = mp;
q->nb_desc = nb_desc;
q->nb_fake_desc = FM10K_MULT_RX_DESC;
q->port_id = dev->data->port_id;
q->queue_id = queue_id;
q->tail_ptr = (volatile uint32_t *)
&((uint32_t *)hw->hw_addr)[FM10K_RDT(queue_id)];
if (handle_rxconf(q, conf))
return -EINVAL;
/* allocate memory for the software ring */
q->sw_ring = rte_zmalloc_socket("fm10k sw ring",
(nb_desc + q->nb_fake_desc) * sizeof(struct rte_mbuf *),
RTE_CACHE_LINE_SIZE, socket_id);
if (q->sw_ring == NULL) {
PMD_INIT_LOG(ERR, "Cannot allocate software ring");
rte_free(q);
return -ENOMEM;
}
/*
* allocate memory for the hardware descriptor ring. A memzone large
* enough to hold the maximum ring size is requested to allow for
* resizing in later calls to the queue setup function.
*/
mz = rte_eth_dma_zone_reserve(dev, "rx_ring", queue_id,
FM10K_MAX_RX_RING_SZ, FM10K_ALIGN_RX_DESC,
socket_id);
if (mz == NULL) {
PMD_INIT_LOG(ERR, "Cannot allocate hardware ring");
rte_free(q->sw_ring);
rte_free(q);
return -ENOMEM;
}
q->hw_ring = mz->addr;
q->hw_ring_phys_addr = rte_mem_phy2mch(mz->memseg_id, mz->phys_addr);
/* Check if number of descs satisfied Vector requirement */
if (!rte_is_power_of_2(nb_desc)) {
PMD_INIT_LOG(DEBUG, "queue[%d] doesn't meet Vector Rx "
"preconditions - canceling the feature for "
"the whole port[%d]",
q->queue_id, q->port_id);
dev_info->rx_vec_allowed = false;
} else
fm10k_rxq_vec_setup(q);
dev->data->rx_queues[queue_id] = q;
return 0;
}
static void
fm10k_rx_queue_release(void *queue)
{
PMD_INIT_FUNC_TRACE();
rx_queue_free(queue);
}
static inline int
handle_txconf(struct fm10k_tx_queue *q, const struct rte_eth_txconf *conf)
{
uint16_t tx_free_thresh;
uint16_t tx_rs_thresh;
/* constraint MACROs require that tx_free_thresh is configured
* before tx_rs_thresh */
if (conf->tx_free_thresh == 0)
tx_free_thresh = FM10K_TX_FREE_THRESH_DEFAULT(q);
else
tx_free_thresh = conf->tx_free_thresh;
/* make sure the requested threshold satisfies the constraints */
if (check_thresh(FM10K_TX_FREE_THRESH_MIN(q),
FM10K_TX_FREE_THRESH_MAX(q),
FM10K_TX_FREE_THRESH_DIV(q),
tx_free_thresh)) {
PMD_INIT_LOG(ERR, "tx_free_thresh (%u) must be "
"less than or equal to %u, "
"greater than or equal to %u, "
"and a divisor of %u",
tx_free_thresh, FM10K_TX_FREE_THRESH_MAX(q),
FM10K_TX_FREE_THRESH_MIN(q),
FM10K_TX_FREE_THRESH_DIV(q));
return -EINVAL;
}
q->free_thresh = tx_free_thresh;
if (conf->tx_rs_thresh == 0)
tx_rs_thresh = FM10K_TX_RS_THRESH_DEFAULT(q);
else
tx_rs_thresh = conf->tx_rs_thresh;
q->tx_deferred_start = conf->tx_deferred_start;
/* make sure the requested threshold satisfies the constraints */
if (check_thresh(FM10K_TX_RS_THRESH_MIN(q),
FM10K_TX_RS_THRESH_MAX(q),
FM10K_TX_RS_THRESH_DIV(q),
tx_rs_thresh)) {
PMD_INIT_LOG(ERR, "tx_rs_thresh (%u) must be "
"less than or equal to %u, "
"greater than or equal to %u, "
"and a divisor of %u",
tx_rs_thresh, FM10K_TX_RS_THRESH_MAX(q),
FM10K_TX_RS_THRESH_MIN(q),
FM10K_TX_RS_THRESH_DIV(q));
return -EINVAL;
}
q->rs_thresh = tx_rs_thresh;
return 0;
}
static int
fm10k_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_id,
uint16_t nb_desc, unsigned int socket_id,
const struct rte_eth_txconf *conf)
{
struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
struct fm10k_tx_queue *q;
const struct rte_memzone *mz;
PMD_INIT_FUNC_TRACE();
/* make sure a valid number of descriptors have been requested */
if (check_nb_desc(FM10K_MIN_TX_DESC, FM10K_MAX_TX_DESC,
FM10K_MULT_TX_DESC, nb_desc)) {
PMD_INIT_LOG(ERR, "Number of Tx descriptors (%u) must be "
"less than or equal to %"PRIu32", "
"greater than or equal to %u, "
"and a multiple of %u",
nb_desc, (uint32_t)FM10K_MAX_TX_DESC, FM10K_MIN_TX_DESC,
FM10K_MULT_TX_DESC);
return -EINVAL;
}
/*
* if this queue existed already, free the associated memory. The
* queue cannot be reused in case we need to allocate memory on
* different socket than was previously used.
*/
if (dev->data->tx_queues[queue_id] != NULL) {
struct fm10k_tx_queue *txq = dev->data->tx_queues[queue_id];
tx_queue_free(txq);
dev->data->tx_queues[queue_id] = NULL;
}
/* allocate memory for the queue structure */
q = rte_zmalloc_socket("fm10k", sizeof(*q), RTE_CACHE_LINE_SIZE,
socket_id);
if (q == NULL) {
PMD_INIT_LOG(ERR, "Cannot allocate queue structure");
return -ENOMEM;
}
/* setup queue */
q->nb_desc = nb_desc;
q->port_id = dev->data->port_id;
q->queue_id = queue_id;
q->txq_flags = conf->txq_flags;
q->ops = &def_txq_ops;
q->tail_ptr = (volatile uint32_t *)
&((uint32_t *)hw->hw_addr)[FM10K_TDT(queue_id)];
if (handle_txconf(q, conf))
return -EINVAL;
/* allocate memory for the software ring */
q->sw_ring = rte_zmalloc_socket("fm10k sw ring",
nb_desc * sizeof(struct rte_mbuf *),
RTE_CACHE_LINE_SIZE, socket_id);
if (q->sw_ring == NULL) {
PMD_INIT_LOG(ERR, "Cannot allocate software ring");
rte_free(q);
return -ENOMEM;
}
/*
* allocate memory for the hardware descriptor ring. A memzone large
* enough to hold the maximum ring size is requested to allow for
* resizing in later calls to the queue setup function.
*/
mz = rte_eth_dma_zone_reserve(dev, "tx_ring", queue_id,
FM10K_MAX_TX_RING_SZ, FM10K_ALIGN_TX_DESC,
socket_id);
if (mz == NULL) {
PMD_INIT_LOG(ERR, "Cannot allocate hardware ring");
rte_free(q->sw_ring);
rte_free(q);
return -ENOMEM;
}
q->hw_ring = mz->addr;
q->hw_ring_phys_addr = rte_mem_phy2mch(mz->memseg_id, mz->phys_addr);
/*
* allocate memory for the RS bit tracker. Enough slots to hold the
* descriptor index for each RS bit needing to be set are required.
*/
q->rs_tracker.list = rte_zmalloc_socket("fm10k rs tracker",
((nb_desc + 1) / q->rs_thresh) *
sizeof(uint16_t),
RTE_CACHE_LINE_SIZE, socket_id);
if (q->rs_tracker.list == NULL) {
PMD_INIT_LOG(ERR, "Cannot allocate RS bit tracker");
rte_free(q->sw_ring);
rte_free(q);
return -ENOMEM;
}
dev->data->tx_queues[queue_id] = q;
return 0;
}
static void
fm10k_tx_queue_release(void *queue)
{
struct fm10k_tx_queue *q = queue;
PMD_INIT_FUNC_TRACE();
tx_queue_free(q);
}
static int
fm10k_reta_update(struct rte_eth_dev *dev,
struct rte_eth_rss_reta_entry64 *reta_conf,
uint16_t reta_size)
{
struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
uint16_t i, j, idx, shift;
uint8_t mask;
uint32_t reta;
PMD_INIT_FUNC_TRACE();
if (reta_size > FM10K_MAX_RSS_INDICES) {
PMD_INIT_LOG(ERR, "The size of hash lookup table configured "
"(%d) doesn't match the number hardware can supported "
"(%d)", reta_size, FM10K_MAX_RSS_INDICES);
return -EINVAL;
}
/*
* Update Redirection Table RETA[n], n=0..31. The redirection table has
* 128-entries in 32 registers
*/
for (i = 0; i < FM10K_MAX_RSS_INDICES; i += CHARS_PER_UINT32) {
idx = i / RTE_RETA_GROUP_SIZE;
shift = i % RTE_RETA_GROUP_SIZE;
mask = (uint8_t)((reta_conf[idx].mask >> shift) &
BIT_MASK_PER_UINT32);
if (mask == 0)
continue;
reta = 0;
if (mask != BIT_MASK_PER_UINT32)
reta = FM10K_READ_REG(hw, FM10K_RETA(0, i >> 2));
for (j = 0; j < CHARS_PER_UINT32; j++) {
if (mask & (0x1 << j)) {
if (mask != 0xF)
reta &= ~(UINT8_MAX << CHAR_BIT * j);
reta |= reta_conf[idx].reta[shift + j] <<
(CHAR_BIT * j);
}
}
FM10K_WRITE_REG(hw, FM10K_RETA(0, i >> 2), reta);
}
return 0;
}
static int
fm10k_reta_query(struct rte_eth_dev *dev,
struct rte_eth_rss_reta_entry64 *reta_conf,
uint16_t reta_size)
{
struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
uint16_t i, j, idx, shift;
uint8_t mask;
uint32_t reta;
PMD_INIT_FUNC_TRACE();
if (reta_size < FM10K_MAX_RSS_INDICES) {
PMD_INIT_LOG(ERR, "The size of hash lookup table configured "
"(%d) doesn't match the number hardware can supported "
"(%d)", reta_size, FM10K_MAX_RSS_INDICES);
return -EINVAL;
}
/*
* Read Redirection Table RETA[n], n=0..31. The redirection table has
* 128-entries in 32 registers
*/
for (i = 0; i < FM10K_MAX_RSS_INDICES; i += CHARS_PER_UINT32) {
idx = i / RTE_RETA_GROUP_SIZE;
shift = i % RTE_RETA_GROUP_SIZE;
mask = (uint8_t)((reta_conf[idx].mask >> shift) &
BIT_MASK_PER_UINT32);
if (mask == 0)
continue;
reta = FM10K_READ_REG(hw, FM10K_RETA(0, i >> 2));
for (j = 0; j < CHARS_PER_UINT32; j++) {
if (mask & (0x1 << j))
reta_conf[idx].reta[shift + j] = ((reta >>
CHAR_BIT * j) & UINT8_MAX);
}
}
return 0;
}
static int
fm10k_rss_hash_update(struct rte_eth_dev *dev,
struct rte_eth_rss_conf *rss_conf)
{
struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
uint32_t *key = (uint32_t *)rss_conf->rss_key;
uint32_t mrqc;
uint64_t hf = rss_conf->rss_hf;
int i;
PMD_INIT_FUNC_TRACE();
if (key && (rss_conf->rss_key_len < FM10K_RSSRK_SIZE *
FM10K_RSSRK_ENTRIES_PER_REG))
return -EINVAL;
if (hf == 0)
return -EINVAL;
mrqc = 0;
mrqc |= (hf & ETH_RSS_IPV4) ? FM10K_MRQC_IPV4 : 0;
mrqc |= (hf & ETH_RSS_IPV6) ? FM10K_MRQC_IPV6 : 0;
mrqc |= (hf & ETH_RSS_IPV6_EX) ? FM10K_MRQC_IPV6 : 0;
mrqc |= (hf & ETH_RSS_NONFRAG_IPV4_TCP) ? FM10K_MRQC_TCP_IPV4 : 0;
mrqc |= (hf & ETH_RSS_NONFRAG_IPV6_TCP) ? FM10K_MRQC_TCP_IPV6 : 0;
mrqc |= (hf & ETH_RSS_IPV6_TCP_EX) ? FM10K_MRQC_TCP_IPV6 : 0;
mrqc |= (hf & ETH_RSS_NONFRAG_IPV4_UDP) ? FM10K_MRQC_UDP_IPV4 : 0;
mrqc |= (hf & ETH_RSS_NONFRAG_IPV6_UDP) ? FM10K_MRQC_UDP_IPV6 : 0;
mrqc |= (hf & ETH_RSS_IPV6_UDP_EX) ? FM10K_MRQC_UDP_IPV6 : 0;
/* If the mapping doesn't fit any supported, return */
if (mrqc == 0)
return -EINVAL;
if (key != NULL)
for (i = 0; i < FM10K_RSSRK_SIZE; ++i)
FM10K_WRITE_REG(hw, FM10K_RSSRK(0, i), key[i]);
FM10K_WRITE_REG(hw, FM10K_MRQC(0), mrqc);
return 0;
}
static int
fm10k_rss_hash_conf_get(struct rte_eth_dev *dev,
struct rte_eth_rss_conf *rss_conf)
{
struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
uint32_t *key = (uint32_t *)rss_conf->rss_key;
uint32_t mrqc;
uint64_t hf;
int i;
PMD_INIT_FUNC_TRACE();
if (key && (rss_conf->rss_key_len < FM10K_RSSRK_SIZE *
FM10K_RSSRK_ENTRIES_PER_REG))
return -EINVAL;
if (key != NULL)
for (i = 0; i < FM10K_RSSRK_SIZE; ++i)
key[i] = FM10K_READ_REG(hw, FM10K_RSSRK(0, i));
mrqc = FM10K_READ_REG(hw, FM10K_MRQC(0));
hf = 0;
hf |= (mrqc & FM10K_MRQC_IPV4) ? ETH_RSS_IPV4 : 0;
hf |= (mrqc & FM10K_MRQC_IPV6) ? ETH_RSS_IPV6 : 0;
hf |= (mrqc & FM10K_MRQC_IPV6) ? ETH_RSS_IPV6_EX : 0;
hf |= (mrqc & FM10K_MRQC_TCP_IPV4) ? ETH_RSS_NONFRAG_IPV4_TCP : 0;
hf |= (mrqc & FM10K_MRQC_TCP_IPV6) ? ETH_RSS_NONFRAG_IPV6_TCP : 0;
hf |= (mrqc & FM10K_MRQC_TCP_IPV6) ? ETH_RSS_IPV6_TCP_EX : 0;
hf |= (mrqc & FM10K_MRQC_UDP_IPV4) ? ETH_RSS_NONFRAG_IPV4_UDP : 0;
hf |= (mrqc & FM10K_MRQC_UDP_IPV6) ? ETH_RSS_NONFRAG_IPV6_UDP : 0;
hf |= (mrqc & FM10K_MRQC_UDP_IPV6) ? ETH_RSS_IPV6_UDP_EX : 0;
rss_conf->rss_hf = hf;
return 0;
}
static void
fm10k_dev_enable_intr_pf(struct rte_eth_dev *dev)
{
struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
uint32_t int_map = FM10K_INT_MAP_IMMEDIATE;
/* Bind all local non-queue interrupt to vector 0 */
int_map |= FM10K_MISC_VEC_ID;
FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_mailbox), int_map);
FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_pcie_fault), int_map);
FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_switch_up_down), int_map);
FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_switch_event), int_map);
FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_sram), int_map);
FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_vflr), int_map);
/* Enable misc causes */
FM10K_WRITE_REG(hw, FM10K_EIMR, FM10K_EIMR_ENABLE(PCA_FAULT) |
FM10K_EIMR_ENABLE(THI_FAULT) |
FM10K_EIMR_ENABLE(FUM_FAULT) |
FM10K_EIMR_ENABLE(MAILBOX) |
FM10K_EIMR_ENABLE(SWITCHREADY) |
FM10K_EIMR_ENABLE(SWITCHNOTREADY) |
FM10K_EIMR_ENABLE(SRAMERROR) |
FM10K_EIMR_ENABLE(VFLR));
/* Enable ITR 0 */
FM10K_WRITE_REG(hw, FM10K_ITR(0), FM10K_ITR_AUTOMASK |
FM10K_ITR_MASK_CLEAR);
FM10K_WRITE_FLUSH(hw);
}
static void
fm10k_dev_disable_intr_pf(struct rte_eth_dev *dev)
{
struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
uint32_t int_map = FM10K_INT_MAP_DISABLE;
int_map |= FM10K_MISC_VEC_ID;
FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_mailbox), int_map);
FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_pcie_fault), int_map);
FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_switch_up_down), int_map);
FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_switch_event), int_map);
FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_sram), int_map);
FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_vflr), int_map);
/* Disable misc causes */
FM10K_WRITE_REG(hw, FM10K_EIMR, FM10K_EIMR_DISABLE(PCA_FAULT) |
FM10K_EIMR_DISABLE(THI_FAULT) |
FM10K_EIMR_DISABLE(FUM_FAULT) |
FM10K_EIMR_DISABLE(MAILBOX) |
FM10K_EIMR_DISABLE(SWITCHREADY) |
FM10K_EIMR_DISABLE(SWITCHNOTREADY) |
FM10K_EIMR_DISABLE(SRAMERROR) |
FM10K_EIMR_DISABLE(VFLR));
/* Disable ITR 0 */
FM10K_WRITE_REG(hw, FM10K_ITR(0), FM10K_ITR_MASK_SET);
FM10K_WRITE_FLUSH(hw);
}
static void
fm10k_dev_enable_intr_vf(struct rte_eth_dev *dev)
{
struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
uint32_t int_map = FM10K_INT_MAP_IMMEDIATE;
/* Bind all local non-queue interrupt to vector 0 */
int_map |= FM10K_MISC_VEC_ID;
/* Only INT 0 available, other 15 are reserved. */
FM10K_WRITE_REG(hw, FM10K_VFINT_MAP, int_map);
/* Enable ITR 0 */
FM10K_WRITE_REG(hw, FM10K_VFITR(0), FM10K_ITR_AUTOMASK |
FM10K_ITR_MASK_CLEAR);
FM10K_WRITE_FLUSH(hw);
}
static void
fm10k_dev_disable_intr_vf(struct rte_eth_dev *dev)
{
struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
uint32_t int_map = FM10K_INT_MAP_DISABLE;
int_map |= FM10K_MISC_VEC_ID;
/* Only INT 0 available, other 15 are reserved. */
FM10K_WRITE_REG(hw, FM10K_VFINT_MAP, int_map);
/* Disable ITR 0 */
FM10K_WRITE_REG(hw, FM10K_VFITR(0), FM10K_ITR_MASK_SET);
FM10K_WRITE_FLUSH(hw);
}
static int
fm10k_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id)
{
struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
struct rte_pci_device *pdev = RTE_DEV_TO_PCI(dev->device);
/* Enable ITR */
if (hw->mac.type == fm10k_mac_pf)
FM10K_WRITE_REG(hw, FM10K_ITR(Q2V(pdev, queue_id)),
FM10K_ITR_AUTOMASK | FM10K_ITR_MASK_CLEAR);
else
FM10K_WRITE_REG(hw, FM10K_VFITR(Q2V(pdev, queue_id)),
FM10K_ITR_AUTOMASK | FM10K_ITR_MASK_CLEAR);
rte_intr_enable(&pdev->intr_handle);
return 0;
}
static int
fm10k_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id)
{
struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
struct rte_pci_device *pdev = RTE_DEV_TO_PCI(dev->device);
/* Disable ITR */
if (hw->mac.type == fm10k_mac_pf)
FM10K_WRITE_REG(hw, FM10K_ITR(Q2V(pdev, queue_id)),
FM10K_ITR_MASK_SET);
else
FM10K_WRITE_REG(hw, FM10K_VFITR(Q2V(pdev, queue_id)),
FM10K_ITR_MASK_SET);
return 0;
}
static int
fm10k_dev_rxq_interrupt_setup(struct rte_eth_dev *dev)
{
struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
struct rte_pci_device *pdev = RTE_DEV_TO_PCI(dev->device);
struct rte_intr_handle *intr_handle = &pdev->intr_handle;
uint32_t intr_vector, vec;
uint16_t queue_id;
int result = 0;
/* fm10k needs one separate interrupt for mailbox,
* so only drivers which support multiple interrupt vectors
* e.g. vfio-pci can work for fm10k interrupt mode
*/
if (!rte_intr_cap_multiple(intr_handle) ||
dev->data->dev_conf.intr_conf.rxq == 0)
return result;
intr_vector = dev->data->nb_rx_queues;
/* disable interrupt first */
rte_intr_disable(intr_handle);
if (hw->mac.type == fm10k_mac_pf)
fm10k_dev_disable_intr_pf(dev);
else
fm10k_dev_disable_intr_vf(dev);
if (rte_intr_efd_enable(intr_handle, intr_vector)) {
PMD_INIT_LOG(ERR, "Failed to init event fd");
result = -EIO;
}
if (rte_intr_dp_is_en(intr_handle) && !result) {
intr_handle->intr_vec = rte_zmalloc("intr_vec",
dev->data->nb_rx_queues * sizeof(int), 0);
if (intr_handle->intr_vec) {
for (queue_id = 0, vec = FM10K_RX_VEC_START;
queue_id < dev->data->nb_rx_queues;
queue_id++) {
intr_handle->intr_vec[queue_id] = vec;
if (vec < intr_handle->nb_efd - 1
+ FM10K_RX_VEC_START)
vec++;
}
} else {
PMD_INIT_LOG(ERR, "Failed to allocate %d rx_queues"
" intr_vec", dev->data->nb_rx_queues);
rte_intr_efd_disable(intr_handle);
result = -ENOMEM;
}
}
if (hw->mac.type == fm10k_mac_pf)
fm10k_dev_enable_intr_pf(dev);
else
fm10k_dev_enable_intr_vf(dev);
rte_intr_enable(intr_handle);
hw->mac.ops.update_int_moderator(hw);
return result;
}
static int
fm10k_dev_handle_fault(struct fm10k_hw *hw, uint32_t eicr)
{
struct fm10k_fault fault;
int err;
const char *estr = "Unknown error";
/* Process PCA fault */
if (eicr & FM10K_EICR_PCA_FAULT) {
err = fm10k_get_fault(hw, FM10K_PCA_FAULT, &fault);
if (err)
goto error;
switch (fault.type) {
case PCA_NO_FAULT:
estr = "PCA_NO_FAULT"; break;
case PCA_UNMAPPED_ADDR:
estr = "PCA_UNMAPPED_ADDR"; break;
case PCA_BAD_QACCESS_PF:
estr = "PCA_BAD_QACCESS_PF"; break;
case PCA_BAD_QACCESS_VF:
estr = "PCA_BAD_QACCESS_VF"; break;
case PCA_MALICIOUS_REQ:
estr = "PCA_MALICIOUS_REQ"; break;
case PCA_POISONED_TLP:
estr = "PCA_POISONED_TLP"; break;
case PCA_TLP_ABORT:
estr = "PCA_TLP_ABORT"; break;
default:
goto error;
}
PMD_INIT_LOG(ERR, "%s: %s(%d) Addr:0x%"PRIx64" Spec: 0x%x",
estr, fault.func ? "VF" : "PF", fault.func,
fault.address, fault.specinfo);
}
/* Process THI fault */
if (eicr & FM10K_EICR_THI_FAULT) {
err = fm10k_get_fault(hw, FM10K_THI_FAULT, &fault);
if (err)
goto error;
switch (fault.type) {
case THI_NO_FAULT:
estr = "THI_NO_FAULT"; break;
case THI_MAL_DIS_Q_FAULT:
estr = "THI_MAL_DIS_Q_FAULT"; break;
default:
goto error;
}
PMD_INIT_LOG(ERR, "%s: %s(%d) Addr:0x%"PRIx64" Spec: 0x%x",
estr, fault.func ? "VF" : "PF", fault.func,
fault.address, fault.specinfo);
}
/* Process FUM fault */
if (eicr & FM10K_EICR_FUM_FAULT) {
err = fm10k_get_fault(hw, FM10K_FUM_FAULT, &fault);
if (err)
goto error;
switch (fault.type) {
case FUM_NO_FAULT:
estr = "FUM_NO_FAULT"; break;
case FUM_UNMAPPED_ADDR:
estr = "FUM_UNMAPPED_ADDR"; break;
case FUM_POISONED_TLP:
estr = "FUM_POISONED_TLP"; break;
case FUM_BAD_VF_QACCESS:
estr = "FUM_BAD_VF_QACCESS"; break;
case FUM_ADD_DECODE_ERR:
estr = "FUM_ADD_DECODE_ERR"; break;
case FUM_RO_ERROR:
estr = "FUM_RO_ERROR"; break;
case FUM_QPRC_CRC_ERROR:
estr = "FUM_QPRC_CRC_ERROR"; break;
case FUM_CSR_TIMEOUT:
estr = "FUM_CSR_TIMEOUT"; break;
case FUM_INVALID_TYPE:
estr = "FUM_INVALID_TYPE"; break;
case FUM_INVALID_LENGTH:
estr = "FUM_INVALID_LENGTH"; break;
case FUM_INVALID_BE:
estr = "FUM_INVALID_BE"; break;
case FUM_INVALID_ALIGN:
estr = "FUM_INVALID_ALIGN"; break;
default:
goto error;
}
PMD_INIT_LOG(ERR, "%s: %s(%d) Addr:0x%"PRIx64" Spec: 0x%x",
estr, fault.func ? "VF" : "PF", fault.func,
fault.address, fault.specinfo);
}
return 0;
error:
PMD_INIT_LOG(ERR, "Failed to handle fault event.");
return err;
}
/**
* PF interrupt handler triggered by NIC for handling specific interrupt.
*
* @param handle
* Pointer to interrupt handle.
* @param param
* The address of parameter (struct rte_eth_dev *) regsitered before.
*
* @return
* void
*/
static void
fm10k_dev_interrupt_handler_pf(
struct rte_intr_handle *handle,
void *param)
{
struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
uint32_t cause, status;
if (hw->mac.type != fm10k_mac_pf)
return;
cause = FM10K_READ_REG(hw, FM10K_EICR);
/* Handle PCI fault cases */
if (cause & FM10K_EICR_FAULT_MASK) {
PMD_INIT_LOG(ERR, "INT: find fault!");
fm10k_dev_handle_fault(hw, cause);
}
/* Handle switch up/down */
if (cause & FM10K_EICR_SWITCHNOTREADY)
PMD_INIT_LOG(ERR, "INT: Switch is not ready");
if (cause & FM10K_EICR_SWITCHREADY)
PMD_INIT_LOG(INFO, "INT: Switch is ready");
/* Handle mailbox message */
fm10k_mbx_lock(hw);
hw->mbx.ops.process(hw, &hw->mbx);
fm10k_mbx_unlock(hw);
/* Handle SRAM error */
if (cause & FM10K_EICR_SRAMERROR) {
PMD_INIT_LOG(ERR, "INT: SRAM error on PEP");
status = FM10K_READ_REG(hw, FM10K_SRAM_IP);
/* Write to clear pending bits */
FM10K_WRITE_REG(hw, FM10K_SRAM_IP, status);
/* Todo: print out error message after shared code updates */
}
/* Clear these 3 events if having any */
cause &= FM10K_EICR_SWITCHNOTREADY | FM10K_EICR_MAILBOX |
FM10K_EICR_SWITCHREADY;
if (cause)
FM10K_WRITE_REG(hw, FM10K_EICR, cause);
/* Re-enable interrupt from device side */
FM10K_WRITE_REG(hw, FM10K_ITR(0), FM10K_ITR_AUTOMASK |
FM10K_ITR_MASK_CLEAR);
/* Re-enable interrupt from host side */
rte_intr_enable(handle);
}
/**
* VF interrupt handler triggered by NIC for handling specific interrupt.
*
* @param handle
* Pointer to interrupt handle.
* @param param
* The address of parameter (struct rte_eth_dev *) regsitered before.
*
* @return
* void
*/
static void
fm10k_dev_interrupt_handler_vf(
struct rte_intr_handle *handle,
void *param)
{
struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
if (hw->mac.type != fm10k_mac_vf)
return;
/* Handle mailbox message if lock is acquired */
fm10k_mbx_lock(hw);
hw->mbx.ops.process(hw, &hw->mbx);
fm10k_mbx_unlock(hw);
/* Re-enable interrupt from device side */
FM10K_WRITE_REG(hw, FM10K_VFITR(0), FM10K_ITR_AUTOMASK |
FM10K_ITR_MASK_CLEAR);
/* Re-enable interrupt from host side */
rte_intr_enable(handle);
}
/* Mailbox message handler in VF */
static const struct fm10k_msg_data fm10k_msgdata_vf[] = {
FM10K_TLV_MSG_TEST_HANDLER(fm10k_tlv_msg_test),
FM10K_VF_MSG_MAC_VLAN_HANDLER(fm10k_msg_mac_vlan_vf),
FM10K_VF_MSG_LPORT_STATE_HANDLER(fm10k_msg_lport_state_vf),
FM10K_TLV_MSG_ERROR_HANDLER(fm10k_tlv_msg_error),
};
static int
fm10k_setup_mbx_service(struct fm10k_hw *hw)
{
int err = 0;
/* Initialize mailbox lock */
fm10k_mbx_initlock(hw);
/* Replace default message handler with new ones */
if (hw->mac.type == fm10k_mac_vf)
err = hw->mbx.ops.register_handlers(&hw->mbx, fm10k_msgdata_vf);
if (err) {
PMD_INIT_LOG(ERR, "Failed to register mailbox handler.err:%d",
err);
return err;
}
/* Connect to SM for PF device or PF for VF device */
return hw->mbx.ops.connect(hw, &hw->mbx);
}
static void
fm10k_close_mbx_service(struct fm10k_hw *hw)
{
/* Disconnect from SM for PF device or PF for VF device */
hw->mbx.ops.disconnect(hw, &hw->mbx);
}
static const struct eth_dev_ops fm10k_eth_dev_ops = {
.dev_configure = fm10k_dev_configure,
.dev_start = fm10k_dev_start,
.dev_stop = fm10k_dev_stop,
.dev_close = fm10k_dev_close,
.promiscuous_enable = fm10k_dev_promiscuous_enable,
.promiscuous_disable = fm10k_dev_promiscuous_disable,
.allmulticast_enable = fm10k_dev_allmulticast_enable,
.allmulticast_disable = fm10k_dev_allmulticast_disable,
.stats_get = fm10k_stats_get,
.xstats_get = fm10k_xstats_get,
.xstats_get_names = fm10k_xstats_get_names,
.stats_reset = fm10k_stats_reset,
.xstats_reset = fm10k_stats_reset,
.link_update = fm10k_link_update,
.dev_infos_get = fm10k_dev_infos_get,
.dev_supported_ptypes_get = fm10k_dev_supported_ptypes_get,
.vlan_filter_set = fm10k_vlan_filter_set,
.vlan_offload_set = fm10k_vlan_offload_set,
.mac_addr_add = fm10k_macaddr_add,
.mac_addr_remove = fm10k_macaddr_remove,
.rx_queue_start = fm10k_dev_rx_queue_start,
.rx_queue_stop = fm10k_dev_rx_queue_stop,
.tx_queue_start = fm10k_dev_tx_queue_start,
.tx_queue_stop = fm10k_dev_tx_queue_stop,
.rx_queue_setup = fm10k_rx_queue_setup,
.rx_queue_release = fm10k_rx_queue_release,
.tx_queue_setup = fm10k_tx_queue_setup,
.tx_queue_release = fm10k_tx_queue_release,
.rx_descriptor_done = fm10k_dev_rx_descriptor_done,
.rx_queue_intr_enable = fm10k_dev_rx_queue_intr_enable,
.rx_queue_intr_disable = fm10k_dev_rx_queue_intr_disable,
.reta_update = fm10k_reta_update,
.reta_query = fm10k_reta_query,
.rss_hash_update = fm10k_rss_hash_update,
.rss_hash_conf_get = fm10k_rss_hash_conf_get,
};
static int ftag_check_handler(__rte_unused const char *key,
const char *value, __rte_unused void *opaque)
{
if (strcmp(value, "1"))
return -1;
return 0;
}
static int
fm10k_check_ftag(struct rte_devargs *devargs)
{
struct rte_kvargs *kvlist;
const char *ftag_key = "enable_ftag";
if (devargs == NULL)
return 0;
kvlist = rte_kvargs_parse(devargs->args, NULL);
if (kvlist == NULL)
return 0;
if (!rte_kvargs_count(kvlist, ftag_key)) {
rte_kvargs_free(kvlist);
return 0;
}
/* FTAG is enabled when there's key-value pair: enable_ftag=1 */
if (rte_kvargs_process(kvlist, ftag_key,
ftag_check_handler, NULL) < 0) {
rte_kvargs_free(kvlist);
return 0;
}
rte_kvargs_free(kvlist);
return 1;
}
static void __attribute__((cold))
fm10k_set_tx_function(struct rte_eth_dev *dev)
{
struct fm10k_tx_queue *txq;
int i;
int use_sse = 1;
uint16_t tx_ftag_en = 0;
if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
/* primary process has set the ftag flag and txq_flags */
txq = dev->data->tx_queues[0];
if (fm10k_tx_vec_condition_check(txq)) {
dev->tx_pkt_burst = fm10k_xmit_pkts;
dev->tx_pkt_prepare = fm10k_prep_pkts;
PMD_INIT_LOG(DEBUG, "Use regular Tx func");
} else {
PMD_INIT_LOG(DEBUG, "Use vector Tx func");
dev->tx_pkt_burst = fm10k_xmit_pkts_vec;
dev->tx_pkt_prepare = NULL;
}
return;
}
if (fm10k_check_ftag(dev->device->devargs))
tx_ftag_en = 1;
for (i = 0; i < dev->data->nb_tx_queues; i++) {
txq = dev->data->tx_queues[i];
txq->tx_ftag_en = tx_ftag_en;
/* Check if Vector Tx is satisfied */
if (fm10k_tx_vec_condition_check(txq))
use_sse = 0;
}
if (use_sse) {
PMD_INIT_LOG(DEBUG, "Use vector Tx func");
for (i = 0; i < dev->data->nb_tx_queues; i++) {
txq = dev->data->tx_queues[i];
fm10k_txq_vec_setup(txq);
}
dev->tx_pkt_burst = fm10k_xmit_pkts_vec;
dev->tx_pkt_prepare = NULL;
} else {
dev->tx_pkt_burst = fm10k_xmit_pkts;
dev->tx_pkt_prepare = fm10k_prep_pkts;
PMD_INIT_LOG(DEBUG, "Use regular Tx func");
}
}
static void __attribute__((cold))
fm10k_set_rx_function(struct rte_eth_dev *dev)
{
struct fm10k_dev_info *dev_info =
FM10K_DEV_PRIVATE_TO_INFO(dev->data->dev_private);
uint16_t i, rx_using_sse;
uint16_t rx_ftag_en = 0;
if (fm10k_check_ftag(dev->device->devargs))
rx_ftag_en = 1;
/* In order to allow Vector Rx there are a few configuration
* conditions to be met.
*/
if (!fm10k_rx_vec_condition_check(dev) &&
dev_info->rx_vec_allowed && !rx_ftag_en) {
if (dev->data->scattered_rx)
dev->rx_pkt_burst = fm10k_recv_scattered_pkts_vec;
else
dev->rx_pkt_burst = fm10k_recv_pkts_vec;
} else if (dev->data->scattered_rx)
dev->rx_pkt_burst = fm10k_recv_scattered_pkts;
else
dev->rx_pkt_burst = fm10k_recv_pkts;
rx_using_sse =
(dev->rx_pkt_burst == fm10k_recv_scattered_pkts_vec ||
dev->rx_pkt_burst == fm10k_recv_pkts_vec);
if (rx_using_sse)
PMD_INIT_LOG(DEBUG, "Use vector Rx func");
else
PMD_INIT_LOG(DEBUG, "Use regular Rx func");
if (rte_eal_process_type() != RTE_PROC_PRIMARY)
return;
for (i = 0; i < dev->data->nb_rx_queues; i++) {
struct fm10k_rx_queue *rxq = dev->data->rx_queues[i];
rxq->rx_using_sse = rx_using_sse;
rxq->rx_ftag_en = rx_ftag_en;
}
}
static void
fm10k_params_init(struct rte_eth_dev *dev)
{
struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
struct fm10k_dev_info *info =
FM10K_DEV_PRIVATE_TO_INFO(dev->data->dev_private);
/* Inialize bus info. Normally we would call fm10k_get_bus_info(), but
* there is no way to get link status without reading BAR4. Until this
* works, assume we have maximum bandwidth.
* @todo - fix bus info
*/
hw->bus_caps.speed = fm10k_bus_speed_8000;
hw->bus_caps.width = fm10k_bus_width_pcie_x8;
hw->bus_caps.payload = fm10k_bus_payload_512;
hw->bus.speed = fm10k_bus_speed_8000;
hw->bus.width = fm10k_bus_width_pcie_x8;
hw->bus.payload = fm10k_bus_payload_256;
info->rx_vec_allowed = true;
}
static int
eth_fm10k_dev_init(struct rte_eth_dev *dev)
{
struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
struct rte_pci_device *pdev = RTE_DEV_TO_PCI(dev->device);
struct rte_intr_handle *intr_handle = &pdev->intr_handle;
int diag, i;
struct fm10k_macvlan_filter_info *macvlan;
PMD_INIT_FUNC_TRACE();
dev->dev_ops = &fm10k_eth_dev_ops;
dev->rx_pkt_burst = &fm10k_recv_pkts;
dev->tx_pkt_burst = &fm10k_xmit_pkts;
dev->tx_pkt_prepare = &fm10k_prep_pkts;
/*
* Primary process does the whole initialization, for secondary
* processes, we just select the same Rx and Tx function as primary.
*/
if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
fm10k_set_rx_function(dev);
fm10k_set_tx_function(dev);
return 0;
}
rte_eth_copy_pci_info(dev, pdev);
dev->data->dev_flags |= RTE_ETH_DEV_DETACHABLE;
macvlan = FM10K_DEV_PRIVATE_TO_MACVLAN(dev->data->dev_private);
memset(macvlan, 0, sizeof(*macvlan));
/* Vendor and Device ID need to be set before init of shared code */
memset(hw, 0, sizeof(*hw));
hw->device_id = pdev->id.device_id;
hw->vendor_id = pdev->id.vendor_id;
hw->subsystem_device_id = pdev->id.subsystem_device_id;
hw->subsystem_vendor_id = pdev->id.subsystem_vendor_id;
hw->revision_id = 0;
hw->hw_addr = (void *)pdev->mem_resource[0].addr;
if (hw->hw_addr == NULL) {
PMD_INIT_LOG(ERR, "Bad mem resource."
" Try to blacklist unused devices.");
return -EIO;
}
/* Store fm10k_adapter pointer */
hw->back = dev->data->dev_private;
/* Initialize the shared code */
diag = fm10k_init_shared_code(hw);
if (diag != FM10K_SUCCESS) {
PMD_INIT_LOG(ERR, "Shared code init failed: %d", diag);
return -EIO;
}
/* Initialize parameters */
fm10k_params_init(dev);
/* Initialize the hw */
diag = fm10k_init_hw(hw);
if (diag != FM10K_SUCCESS) {
PMD_INIT_LOG(ERR, "Hardware init failed: %d", diag);
return -EIO;
}
/* Initialize MAC address(es) */
dev->data->mac_addrs = rte_zmalloc("fm10k",
ETHER_ADDR_LEN * FM10K_MAX_MACADDR_NUM, 0);
if (dev->data->mac_addrs == NULL) {
PMD_INIT_LOG(ERR, "Cannot allocate memory for MAC addresses");
return -ENOMEM;
}
diag = fm10k_read_mac_addr(hw);
ether_addr_copy((const struct ether_addr *)hw->mac.addr,
&dev->data->mac_addrs[0]);
if (diag != FM10K_SUCCESS ||
!is_valid_assigned_ether_addr(dev->data->mac_addrs)) {
/* Generate a random addr */
eth_random_addr(hw->mac.addr);
memcpy(hw->mac.perm_addr, hw->mac.addr, ETH_ALEN);
ether_addr_copy((const struct ether_addr *)hw->mac.addr,
&dev->data->mac_addrs[0]);
}
/* Reset the hw statistics */
fm10k_stats_reset(dev);
/* Reset the hw */
diag = fm10k_reset_hw(hw);
if (diag != FM10K_SUCCESS) {
PMD_INIT_LOG(ERR, "Hardware reset failed: %d", diag);
return -EIO;
}
/* Setup mailbox service */
diag = fm10k_setup_mbx_service(hw);
if (diag != FM10K_SUCCESS) {
PMD_INIT_LOG(ERR, "Failed to setup mailbox: %d", diag);
return -EIO;
}
/*PF/VF has different interrupt handling mechanism */
if (hw->mac.type == fm10k_mac_pf) {
/* register callback func to eal lib */
rte_intr_callback_register(intr_handle,
fm10k_dev_interrupt_handler_pf, (void *)dev);
/* enable MISC interrupt */
fm10k_dev_enable_intr_pf(dev);
} else { /* VF */
rte_intr_callback_register(intr_handle,
fm10k_dev_interrupt_handler_vf, (void *)dev);
fm10k_dev_enable_intr_vf(dev);
}
/* Enable intr after callback registered */
rte_intr_enable(intr_handle);
hw->mac.ops.update_int_moderator(hw);
/* Make sure Switch Manager is ready before going forward. */
if (hw->mac.type == fm10k_mac_pf) {
int switch_ready = 0;
for (i = 0; i < MAX_QUERY_SWITCH_STATE_TIMES; i++) {
fm10k_mbx_lock(hw);
hw->mac.ops.get_host_state(hw, &switch_ready);
fm10k_mbx_unlock(hw);
if (switch_ready)
break;
/* Delay some time to acquire async LPORT_MAP info. */
rte_delay_us(WAIT_SWITCH_MSG_US);
}
if (switch_ready == 0) {
PMD_INIT_LOG(ERR, "switch is not ready");
return -1;
}
}
/*
* Below function will trigger operations on mailbox, acquire lock to
* avoid race condition from interrupt handler. Operations on mailbox
* FIFO will trigger interrupt to PF/SM, in which interrupt handler
* will handle and generate an interrupt to our side. Then, FIFO in
* mailbox will be touched.
*/
fm10k_mbx_lock(hw);
/* Enable port first */
hw->mac.ops.update_lport_state(hw, hw->mac.dglort_map,
MAX_LPORT_NUM, 1);
/* Set unicast mode by default. App can change to other mode in other
* API func.
*/
hw->mac.ops.update_xcast_mode(hw, hw->mac.dglort_map,
FM10K_XCAST_MODE_NONE);
fm10k_mbx_unlock(hw);
/* Make sure default VID is ready before going forward. */
if (hw->mac.type == fm10k_mac_pf) {
for (i = 0; i < MAX_QUERY_SWITCH_STATE_TIMES; i++) {
if (hw->mac.default_vid)
break;
/* Delay some time to acquire async port VLAN info. */
rte_delay_us(WAIT_SWITCH_MSG_US);
}
if (!hw->mac.default_vid) {
PMD_INIT_LOG(ERR, "default VID is not ready");
return -1;
}
}
/* Add default mac address */
fm10k_MAC_filter_set(dev, hw->mac.addr, true,
MAIN_VSI_POOL_NUMBER);
return 0;
}
static int
eth_fm10k_dev_uninit(struct rte_eth_dev *dev)
{
struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
struct rte_pci_device *pdev = RTE_DEV_TO_PCI(dev->device);
struct rte_intr_handle *intr_handle = &pdev->intr_handle;
PMD_INIT_FUNC_TRACE();
/* only uninitialize in the primary process */
if (rte_eal_process_type() != RTE_PROC_PRIMARY)
return 0;
/* safe to close dev here */
fm10k_dev_close(dev);
dev->dev_ops = NULL;
dev->rx_pkt_burst = NULL;
dev->tx_pkt_burst = NULL;
/* disable uio/vfio intr */
rte_intr_disable(intr_handle);
/*PF/VF has different interrupt handling mechanism */
if (hw->mac.type == fm10k_mac_pf) {
/* disable interrupt */
fm10k_dev_disable_intr_pf(dev);
/* unregister callback func to eal lib */
rte_intr_callback_unregister(intr_handle,
fm10k_dev_interrupt_handler_pf, (void *)dev);
} else {
/* disable interrupt */
fm10k_dev_disable_intr_vf(dev);
rte_intr_callback_unregister(intr_handle,
fm10k_dev_interrupt_handler_vf, (void *)dev);
}
/* free mac memory */
if (dev->data->mac_addrs) {
rte_free(dev->data->mac_addrs);
dev->data->mac_addrs = NULL;
}
memset(hw, 0, sizeof(*hw));
return 0;
}
/*
* The set of PCI devices this driver supports. This driver will enable both PF
* and SRIOV-VF devices.
*/
static const struct rte_pci_id pci_id_fm10k_map[] = {
{ RTE_PCI_DEVICE(FM10K_INTEL_VENDOR_ID, FM10K_DEV_ID_PF) },
{ RTE_PCI_DEVICE(FM10K_INTEL_VENDOR_ID, FM10K_DEV_ID_SDI_FM10420_QDA2) },
{ RTE_PCI_DEVICE(FM10K_INTEL_VENDOR_ID, FM10K_DEV_ID_VF) },
{ .vendor_id = 0, /* sentinel */ },
};
static struct eth_driver rte_pmd_fm10k = {
.pci_drv = {
.id_table = pci_id_fm10k_map,
.drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC,
.probe = rte_eth_dev_pci_probe,
.remove = rte_eth_dev_pci_remove,
},
.eth_dev_init = eth_fm10k_dev_init,
.eth_dev_uninit = eth_fm10k_dev_uninit,
.dev_private_size = sizeof(struct fm10k_adapter),
};
RTE_PMD_REGISTER_PCI(net_fm10k, rte_pmd_fm10k.pci_drv);
RTE_PMD_REGISTER_PCI_TABLE(net_fm10k, pci_id_fm10k_map);
RTE_PMD_REGISTER_KMOD_DEP(net_fm10k, "* igb_uio | uio_pci_generic | vfio");
|
vicharl/containerdns
|
kdns/dpdk-17.02/app/test-crypto-perf/cperf_options.h
|
<reponame>vicharl/containerdns<filename>kdns/dpdk-17.02/app/test-crypto-perf/cperf_options.h
#ifndef _CPERF_OPTIONS_
#define _CPERF_OPTIONS_
#include <rte_crypto.h>
#define CPERF_PTEST_TYPE ("ptest")
#define CPERF_SILENT ("silent")
#define CPERF_POOL_SIZE ("pool-sz")
#define CPERF_TOTAL_OPS ("total-ops")
#define CPERF_BURST_SIZE ("burst-sz")
#define CPERF_BUFFER_SIZE ("buffer-sz")
#define CPERF_SEGMENTS_NB ("segments-nb")
#define CPERF_DEVTYPE ("devtype")
#define CPERF_OPTYPE ("optype")
#define CPERF_SESSIONLESS ("sessionless")
#define CPERF_OUT_OF_PLACE ("out-of-place")
#define CPERF_VERIFY ("verify")
#define CPERF_TEST_FILE ("test-file")
#define CPERF_TEST_NAME ("test-name")
#define CPERF_CIPHER_ALGO ("cipher-algo")
#define CPERF_CIPHER_OP ("cipher-op")
#define CPERF_CIPHER_KEY_SZ ("cipher-key-sz")
#define CPERF_CIPHER_IV_SZ ("cipher-iv-sz")
#define CPERF_AUTH_ALGO ("auth-algo")
#define CPERF_AUTH_OP ("auth-op")
#define CPERF_AUTH_KEY_SZ ("auth-key-sz")
#define CPERF_AUTH_DIGEST_SZ ("auth-digest-sz")
#define CPERF_AUTH_AAD_SZ ("auth-aad-sz")
#define CPERF_CSV ("csv-friendly")
enum cperf_perf_test_type {
CPERF_TEST_TYPE_THROUGHPUT,
CPERF_TEST_TYPE_CYCLECOUNT,
CPERF_TEST_TYPE_LATENCY
};
extern const char *cperf_test_type_strs[];
enum cperf_op_type {
CPERF_CIPHER_ONLY = 1,
CPERF_AUTH_ONLY,
CPERF_CIPHER_THEN_AUTH,
CPERF_AUTH_THEN_CIPHER,
CPERF_AEAD
};
extern const char *cperf_op_type_strs[];
struct cperf_options {
enum cperf_perf_test_type test;
uint32_t pool_sz;
uint32_t total_ops;
uint32_t burst_sz;
uint32_t buffer_sz;
uint32_t segments_nb;
char device_type[RTE_CRYPTODEV_NAME_LEN];
enum cperf_op_type op_type;
uint32_t sessionless:1;
uint32_t out_of_place:1;
uint32_t verify:1;
uint32_t silent:1;
uint32_t csv:1;
char *test_file;
char *test_name;
enum rte_crypto_cipher_algorithm cipher_algo;
enum rte_crypto_cipher_operation cipher_op;
uint16_t cipher_key_sz;
uint16_t cipher_iv_sz;
enum rte_crypto_auth_algorithm auth_algo;
enum rte_crypto_auth_operation auth_op;
uint16_t auth_key_sz;
uint16_t auth_digest_sz;
uint16_t auth_aad_sz;
};
void
cperf_options_default(struct cperf_options *options);
int
cperf_options_parse(struct cperf_options *options,
int argc, char **argv);
int
cperf_options_check(struct cperf_options *options);
void
cperf_options_dump(struct cperf_options *options);
#endif
|
YanjenChen/fibdrv
|
bign.c
|
#include "bign.h"
void add_bign128(struct bign128 *output, struct bign128 *x, struct bign128 *y)
{
output->upper = x->upper + y->upper;
if (y->lower > ~(x->lower))
output->upper++;
output->lower = x->lower + y->lower;
}
|
YanjenChen/fibdrv
|
client.c
|
#include <fcntl.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <sys/types.h>
#include <time.h>
#include <unistd.h>
#define FIB_DEV "/dev/fibonacci"
/* #include "bign.h" */
int main()
{
long long sz;
unsigned long long buf[1];
char write_buf[] = "testing writing";
FILE *time_log;
struct timespec t_start, t_end;
int offset = 100; /* TODO: try test something bigger than the limit */
time_log = fopen("fibfastadd.time.log", "w");
if (!time_log) {
perror("Failed to open log file");
exit(1);
}
int fd = open(FIB_DEV, O_RDWR);
if (fd < 0) {
perror("Failed to open character device");
exit(1);
}
for (int i = 0; i <= offset; i++) {
sz = write(fd, write_buf, strlen(write_buf));
printf("Writing to " FIB_DEV ", returned the sequence %lld\n", sz);
}
/* Set which algorithm to use */
sz = write(fd, write_buf, 0);
for (int i = 0; i <= offset; i++) {
lseek(fd, i, SEEK_SET);
clock_gettime(CLOCK_REALTIME, &t_start);
sz = read(fd, buf, 1);
clock_gettime(CLOCK_REALTIME, &t_end);
printf("Reading from " FIB_DEV " at offset %d, returned the sequence ",
i);
printf("%llx.\n", buf[0]);
/* Log execution time */
fprintf(time_log, "%d %lld %ld %lld\n", i, sz,
t_end.tv_nsec - t_start.tv_nsec,
t_end.tv_nsec - t_start.tv_nsec - sz);
}
for (int i = offset; i >= 0; i--) {
lseek(fd, i, SEEK_SET);
sz = read(fd, buf, 1);
printf("Reading from " FIB_DEV " at offset %d, returned the sequence ",
i);
printf("%llx.\n", buf[0]);
}
fclose(time_log);
close(fd);
return 0;
}
|
YanjenChen/fibdrv
|
bign.h
|
#ifndef HW2_BIGN_H
#define HW2_BIGN_H
/* 128 bits integer */
struct bign128 {
unsigned long long lower, upper;
};
void add_bign128(struct bign128 *output, struct bign128 *x, struct bign128 *y);
#endif /* HW2_BIGN_H */
|
paladin-t/flock
|
SimpleFlock.h
|
#ifndef __SIMPLE_FLOCK_H__
#define __SIMPLE_FLOCK_H__
#include "CommonTypes.h"
class FlockGroup {
public:
typedef Vec2f (* Adjuster)(const Vec2f &p, const Vec2f &t, int n, int l);
struct Object {
Object();
Object(const Vec2f &o);
Object(const Vec2f &o, const Object &t);
void clear(void);
Vec2f pos;
float dist;
};
public:
FlockGroup();
virtual ~FlockGroup();
virtual void begin(void);
virtual void end(void);
Vec2f getTargetPosition(const Vec2f &o, Adjuster adj = NULL);
void setTargetObject(const Vec2f &t);
public:
Adjuster adjuster;
protected:
Vec2f radius;
Object target;
Object nearest;
int now;
int last;
};
#endif // __SIMPLE_FLOCK_H__
|
paladin-t/flock
|
NNMonsterFlockGroup.h
|
<gh_stars>10-100
#ifndef __NN_MONSTER_FLOCK_GROUP_H__
#define __NN_MONSTER_FLOCK_GROUP_H__
#include <map>
#include "util/SimpleFlock.h"
void clampInside(Vec2f &p);
class IMonsterAction;
class PlayerShape;
struct FlockInfo {
FlockInfo() {
}
FlockInfo(const Vec2f &p) : pos(p) {
}
Vec2f pos;
Vec2f target;
};
typedef std::map<IMonsterAction*, FlockInfo> FlockDict;
class MonsterFlock : public FlockGroup {
public:
MonsterFlock();
virtual ~MonsterFlock();
static bool open(void);
static bool close(void);
static MonsterFlock* instance(void);
virtual void begin(void);
virtual void end(void);
Vec2f get(IMonsterAction* m);
void targetTo(PlayerShape* p);
private:
static MonsterFlock* mSelf;
FlockDict mNow;
FlockDict mLast;
};
#endif // __NN_MONSTER_FLOCK_GROUP_H__
|
mhv/Beholder
|
Beholder/Beholder.h
|
//
// Beholder.h
// Beholder
//
// Created by <NAME> on 9/20/15.
// Copyright © 2015 <NAME>. All rights reserved.
//
#import <UIKit/UIKit.h>
//! Project version number for Beholder.
FOUNDATION_EXPORT double BeholderVersionNumber;
//! Project version string for Beholder.
FOUNDATION_EXPORT const unsigned char BeholderVersionString[];
// In this header, you should import all the public headers of your framework using statements like #import <Beholder/PublicHeader.h>
#import <Utils/Utils.h>
|
BlackBears/CCFScrollingTabBar
|
source/CCFScrollableTabColorKeys.h
|
<gh_stars>1-10
/**
* @file CCFScrollableTabColorKeys.h
* @author <NAME> (www.cocoafactory.com)
*
* @date 2012-03-09 02:04:20
* @version 1.0
*
* @note Copyright 2011 Cocoa Factory, LLC. All rights reserved
*/
#ifndef CCFScrollingTabBar_CCFScrollableTabColorKeys_h
#define CCFScrollingTabBar_CCFScrollableTabColorKeys_h
#define kCCFScrollableTabTextColor @"text"
#define kCCFScrollableTabDarkColor @"dark"
#define kCCFScrollableTabLightColor @"light"
#endif
|
BlackBears/CCFScrollingTabBar
|
source/CCFScrollableTab.h
|
/**
* @file CCFScrollableTab.h
* @author <NAME> (www.cocoafactory.com)
*
* @date 2012-03-08 14:25:21
* @version 1.0
*
* @note Copyright 2011 Cocoa Factory, LLC. All rights reserved
*/
#import "CCFScrollableTabColorKeys.h"
@protocol CCFScrollableTabDelegate;
/** CCFScrollableTab
The selectable entitled item in the view hierarchy.
*/
@interface CCFScrollableTab : UIView
/** Delegate
The object that responds to messages from the CCFScrollableTabDelegate protocol on our behalf.
*/
@property (nonatomic, weak) id <CCFScrollableTabDelegate> delegate;
/** Selection
Returns the selection state for this item
*/
@property (nonatomic, assign, getter = isSelected) BOOL selected;
/** Location
Returns the horizontal location of the view in its superview
*/
@property (nonatomic, assign) CGFloat location;
///---------------------------------------------------------------------------------------
/// @name Class methods
///---------------------------------------------------------------------------------------
/** Width for text
Returns the width of the text provided when drawn with the default font
@param sampleText The text to measure
@return The width of the text provided
*/
+ (CGFloat)widthForText:(NSString *)sampleText;
/** Designated initializer
The designated initializer for the class.
@param frame The frame in which to draw the view.
@param title An NSString containing a localized title for this item.
@param colorInfo An NSDictionary containing information about the colors to use for the view. The keys that must be included are: kCCFScrollableTabLightColor, kCCFScrollableTabDarkColor, and kCCFScrollableTabTextColor.
@return A new instance of the class.
*/
- (id)initWithFrame:(CGRect)frame title:(NSString *)aTitle colorInfo:(NSDictionary *)colorInfo index:(NSInteger)index;
@end
|
BlackBears/CCFScrollingTabBar
|
example/CCFViewController.h
|
//
// CCFViewController.h
// CCFScrollingTabBar
//
// Created by <NAME> on 3/8/12.
// Copyright (c) 2012 __MyCompanyName__. All rights reserved.
//
#import "CCFScrollableTabDelegate.h"
#import "CCFScrollableTabViewDelegate.h"
#import "CCFScrollableTabViewDataSource.h"
@class CCFScrollableTabView;
@interface CCFViewController : UIViewController <CCFScrollableTabViewDataSource,CCFScrollableTabViewDelegate>
@property (nonatomic, weak) IBOutlet UILabel *textLabel;
@property (nonatomic, weak) IBOutlet CCFScrollableTabView *tabStrip;
@end
|
BlackBears/CCFScrollingTabBar
|
source/CCFScrollableTabViewDataSource.h
|
/**
* @file CCFScrollableTabViewDataSource.h
* @author <NAME> (www.cocoafactory.com)
*
* @date 2012-03-09 04:17:45
* @version 1.0
*
* @note Copyright 2011 Cocoa Factory, LLC. All rights reserved
*/
@class CCFScrollableTabView;
/* CCFScrollableTabViewDataSource
This protocol declares the methods that data sources of CCFScrollableTabView must implement in order to provide
data in support of its construction.
*/
@protocol CCFScrollableTabViewDataSource <NSObject>
@required
/** Titles in the scrollable tab view
A required method of the CCFScrollableTabViewDataSource protocol. Returns an array of titles to use for the tab view.
The data source is responsible for any localization that needs to occur before returning the strings.
@param tabView The CCFScrollableTabView for which the titles are requested
@return An NSArray instance containing all of the titles that are to be displayed
*/
- (NSArray *)titlesInScrollableTabView:(CCFScrollableTabView *)tabView;
/** Light color for scrollable tab view
The UIColor instance for the top portion of the background gradient of the scrollable tab view.
@param tabView The CCFScrollableTabView for which the color is requested
@return A UIColor instance to use for the top color of the background gradient
*/
- (UIColor *)lightColorInScrollableTabView:(CCFScrollableTabView *)tabView;
/** Dark color for scrollable tab view
The UIColor instance for the bottom portion of the background gradient of the scrollable tab view.
@param tabView The CCFScrollableTabView for which the color is requested
@return A UIColor instance to use for the top color of the background gradient
*/
- (UIColor *)darkColorInScrollableTabView:(CCFScrollableTabView *)tabView;
/** Text color for titles in scrollable tab view
The UIColor instance for the title text in the tab view
@param tabView The CCFScrollableTabView for which the color is requested
@return A UIColor instance to use for the text in the view
*/
- (UIColor *)textColorInScrollableTabView:(CCFScrollableTabView *)tabView;
@end
|
BlackBears/CCFScrollingTabBar
|
source/CCFScrollableTabViewDelegate.h
|
/**
* @file CCFScrollableTabViewDelegate.h
* @author <NAME> (www.cocoafactory.com)
*
* @date 2012-03-09 04:21:30
* @version 1.0
*
* @note Copyright 2011 Cocoa Factory, LLC. All rights reserved
*/
@class CCFScrollableTabView;
/** CCFScrollableTabViewDelegate
This protocol describes the required methods that delegates of CCFScrollableTabView must implement
*/
@protocol CCFScrollableTabViewDelegate <NSObject>
/** Item selection
This method notifies that the delegate of CCFScrollableTabView that an item was selected
@param tabView The CCFScrollableTabView instance that is sending the message
@param index The index of the item that was selected.
*/
- (void)scrollableTabView:(CCFScrollableTabView *)tabView didSelectItemAtIndex:(NSInteger)index;
@end
|
BlackBears/CCFScrollingTabBar
|
source/CCFScrollableTabContentView.h
|
/**
* @file CCFScrollableTabContentView.h
* @author <NAME> (www.cocoafactory.com)
*
* @date 2012-03-08 16:17:29
* @version 1.0
*
* @note Copyright 2011 Cocoa Factory, LLC. All rights reserved
*/
#import "CCFScrollableTabDelegate.h"
@protocol CCFScrollableTabContentViewDelegate;
@class CCFScrollableTab;
/** CCFScrollableTabContentView
The content view for the CCFScrollableTabScrollView, containing one or more CCFScrollableTab instances.
*/
@interface CCFScrollableTabContentView : UIView <CCFScrollableTabDelegate>
/** Delegate
The object that responds to messages of the CCFScrollableTabContentViewDelegate on our behalf.
*/
@property (nonatomic, weak) id <CCFScrollableTabContentViewDelegate> delegate;
///---------------------------------------------------------------------------------------
/// @name Initialization
///---------------------------------------------------------------------------------------
/** Designated initializer
The designated initializer for the class.
@param frame The frame in which to draw the view.
@param titles An NSArray containing localized strings for the text at each tab in order.
@param colorInfo An NSDictionary containing information about the colors to use for the view. The keys that must be included are: kCCFScrollableTabLightColor, kCCFScrollableTabDarkColor, and kCCFScrollableTabTextColor.
@return A new instance of the class.
*/
- (id)initWithFrame:(CGRect)frame titles:(NSArray *)titles colorInfo:(NSDictionary *)colorInfo;
///---------------------------------------------------------------------------------------
/// @name Selection
///---------------------------------------------------------------------------------------
/** Item selection
Selects tab item at given index
@param index The index of the tab to select
*/
- (void)selectItemAtIndex:(NSInteger)index;
/** Item at index
Returns the tab item at the provided index
@param index The index whose tab is requested
@return The CCFScrollableTab object at the given index.
*/
- (CCFScrollableTab *)tabAtIndex:(NSInteger)index;
@end
|
BlackBears/CCFScrollingTabBar
|
source/CCFScrollableTabContentViewDelegate.h
|
<filename>source/CCFScrollableTabContentViewDelegate.h
/**
* @file CCFScrollableTabContentViewDelegate.h
* @author <NAME> (www.cocoafactory.com)
*
* @date 2012-03-09 04:56:41
* @version 1.0
*
* @note Copyright 2011 Cocoa Factory, LLC. All rights reserved
*/
@class CCFScrollableTabContentView;
/** CCFScrollableTabContentViewDelegate protocol
This protocol declares the required methods that delegates of CCFScrollableTabContentView must implement.
*/
@protocol CCFScrollableTabContentViewDelegate <NSObject>
/** Item selected
This method is called on the delegate of the content view when an item is selected.
@param contentView The CCFScrollableTabContentView instance that is sending the message.
@param index The index of the selected tab item.
*/
- (void)scrollableTabContentView:(CCFScrollableTabContentView *)contentView didSelectItemAtIndex:(NSInteger)index;
@end
|
BlackBears/CCFScrollingTabBar
|
source/CCFScrollableTabStopDelegate.h
|
<reponame>BlackBears/CCFScrollingTabBar
/**
* @file CCFScrollableTabStopDelegate.h
* @author <NAME> (www.cocoafactory.com)
*
* @date 2012-03-09 09:24:54
* @version 1.0
*
* @note Copyright 2011 Cocoa Factory, LLC. All rights reserved
*/
@class CCFScrollableTabStop;
@protocol CCFScrollableTabStopDelegate <NSObject>
- (void)tabStopDidReceiveTouch:(CCFScrollableTabStop *)tabStop;
@end
|
BlackBears/CCFScrollingTabBar
|
source/CCFScrollableTabView.h
|
/**
* @file CCFScrollableTabView.h
* @author <NAME> (www.cocoafactory.com)
*
* @date 2012-03-09 04:13:19
* @version 1.0
*
* @note Copyright 2011 Cocoa Factory, LLC. All rights reserved
*/
#import "CCFScrollableTabScrollViewDelegate.h"
#import "CCFScrollableTabViewDataSource.h"
#import "CCFScrollableTabViewDelegate.h"
#import "CCFScrollableTabStopDelegate.h"
/** Scrollable tab view
This is the top-level class in the view hierarchy.
*/
@interface CCFScrollableTabView : UIView <CCFScrollableTabScrollViewDelegate, CCFScrollableTabStopDelegate, UIScrollViewDelegate>
/** Delegate
An object that receives messages of the CCFScrollableTabViewDelegate protocol
*/
@property (nonatomic, weak) id <CCFScrollableTabViewDelegate> delegate;
/** Datasource
An object that receives messages of the CCFScrollableTabViewDataSource protocol
*/
@property (nonatomic, weak) id <CCFScrollableTabViewDataSource> dataSource;
///---------------------------------------------------------------------------------------
/// @name Managing selection
///---------------------------------------------------------------------------------------
/** Set selected segment index
Sets the index of the selected segment. As a side effect the appropriate delegate method from the CCFScrollableTabViewDelegate protocol is called.
@param index The index of the tab to select.
*/
- (void)setSelectedItemIndex:(NSInteger)index;
@end
|
BlackBears/CCFScrollingTabBar
|
example/CCFAppDelegate.h
|
//
// CCFAppDelegate.h
// CCFScrollingTabBar
//
// Created by <NAME> on 3/8/12.
// Copyright (c) 2012 __MyCompanyName__. All rights reserved.
//
#import <UIKit/UIKit.h>
@class CCFViewController;
@interface CCFAppDelegate : UIResponder <UIApplicationDelegate>
@property (strong, nonatomic) UIWindow *window;
@property (strong, nonatomic) CCFViewController *viewController;
@end
|
BlackBears/CCFScrollingTabBar
|
source/CCFScrollableTabDelegate.h
|
<gh_stars>1-10
/**
* @file CCFScrollableTabDelegate.h
* @author <NAME> (www.cocoafactory.com)
*
* @date 2012-03-08 15:47:01
* @version 1.0
*
* @note Copyright 2011 Cocoa Factory, LLC. All rights reserved
*/
@protocol CCFScrollableTabDelegate <NSObject>
- (void)scrollableTabDidSelectItemAtIndex:(NSInteger)index;
@end
|
BlackBears/CCFScrollingTabBar
|
source/CCFScrollableTabScrollViewDelegate.h
|
/**
* @file CCFScrollableTabScrollViewDelegate.h
* @author <NAME> (www.cocoafactory.com)
*
* @date 2012-03-09 05:02:55
* @version 1.0
*
* @note Copyright 2011 Cocoa Factory, LLC. All rights reserved
*/
@class CCFScrollableTabScrollView;
/** CCFScrollableTabScrollViewDelegate
This protocol declares the required methods that delegates of CCFScrollableTabScrollView must implement
*/
@protocol CCFScrollableTabScrollViewDelegate <NSObject>
/** Item selection
This method is called on the delegate when the user selects an item
@param scrollView the CCFScrollableTabScrollView that is sending the message
@param index The index of the selected item
*/
- (void)scrollableTabScrollView:(CCFScrollableTabScrollView *)scrollView didSelectItemAtIndex:(NSInteger)index;
@end
|
BlackBears/CCFScrollingTabBar
|
source/CCFScrollableTabScrollView.h
|
<filename>source/CCFScrollableTabScrollView.h<gh_stars>1-10
/**
* @file CCFScrollableTabScrollView.h
* @author <NAME> (www.cocoafactory.com)
*
* @date 2012-03-09 05:01:32
* @version 1.0
*
* @note Copyright 2011 Cocoa Factory, LLC. All rights reserved
*/
#import "CCFScrollableTabContentViewDelegate.h"
@protocol CCFScrollableTabScrollViewDelegate;
/* CCFScrollableTabScrollView
The scrollable portion of the view.
*/
@interface CCFScrollableTabScrollView : UIScrollView <CCFScrollableTabContentViewDelegate>
/** Delegate
The object that receives messages of the CCFScrollableTabScrollViewDelegate protocol on our behalf.
*/
@property (nonatomic, weak) id <CCFScrollableTabScrollViewDelegate> tabDelegate;
///---------------------------------------------------------------------------------------
/// @name Initialization
///---------------------------------------------------------------------------------------
/** Designated initializer
Returns a newly initialized instance of the class given a frame, set of titles to use, and information about the colors for the view.
@param frame The frame in which to draw the view.
@param titles An NSArray containing localized strings for the text at each tab in order.
@param colorInfo An NSDictionary containing information about the colors to use for the view. The keys that must be included are: kCCFScrollableTabLightColor, kCCFScrollableTabDarkColor, and kCCFScrollableTabTextColor.
@return A new instance of the class.
*/
- (id)initWithFrame:(CGRect)frame titles:(NSArray *)titles colorInfo:(NSDictionary *)colorInfo;
///---------------------------------------------------------------------------------------
/// @name Selection
///---------------------------------------------------------------------------------------
/** Set selected item
Sets the index of the item that should be selected
@param index The index to select
*/
- (void)setSelectedIndex:(NSInteger)index;
@end
|
BlackBears/CCFScrollingTabBar
|
source/CCFScrollableTabStop.h
|
/**
* @file CCFScrollableTabStop.h
* @author <NAME> (www.cocoafactory.com)
*
* @date 2012-03-09 01:55:59
* @version 1.0
*
* @note Copyright 2011 Cocoa Factory, LLC. All rights reserved
*/
enum {
CCFScrollableTabStopRight,
CCFScrollableTabStopLeft
};
typedef NSInteger CCFScrollableTabStopSide;
@protocol CCFScrollableTabStopDelegate;
/** Scrollable tab stop
A UIView subclass that draws an arrow on either side of the view for facilitated scrolling.
*/
@interface CCFScrollableTabStop : UIView
/** Highlighted
Defines the highlighting state of the tab stop. Only one tab stop (the right or the left) can be highlighted at a given time.
*/
@property (nonatomic, assign, getter = isHighlighted) BOOL highlighted;
/** Side
The sidedness of the tab stop. The righthand stop has a value of the CCFScrollableTabStopRight, whereas its contralateral stop is CCFScrollableTabStopLeft.
*/
@property (nonatomic, assign) CCFScrollableTabStopSide side;
/** Delegate
An object that receives messages of the CCFScrollableTabStopDelegate protocol
*/
@property (nonatomic, weak) id <CCFScrollableTabStopDelegate> delegate;
///---------------------------------------------------------------------------------------
/// @name Initialization
///---------------------------------------------------------------------------------------
/** Designated initializer
Returns an instnace of the class given a frame, color information dictionary and the side of the stop
@param frame A CGRect that defines the frame of the stop.
@param colorInfo An NSDictionary that contains the colors to use for the stop. It should have the following keys: kCCFScrollableTabLightColor and kCCFScrollableTabDarkColor.
@param aSide The sidedness of the stop as a CCFScrollableTabStopSide parameter
@return Returns a newly initialized instance of the class.
*/
- (id)initWithFrame:(CGRect)frame colorInfo:(NSDictionary *)colorInfo side:(CCFScrollableTabStopSide)aSide;
@end
|
xinyandai/tensor
|
src/tensor/tensor.h
|
//
// Created by xinyan on 24/4/2019.
//
#pragma once
#ifndef TENSOR_TENSOR_H
#define TENSOR_TENSOR_H
#define CHECK_SHAPE (true)
#define FLAG_CONTIGUOUS (1<<0)
#define FLAG_TRANSPOSED (1<<1)
#include <array>
#include <algorithm>
#include <memory>
#include <random>
#include <type_traits>
#include "helper.h"
namespace tensor {
template<typename T=float, size_type D=2, bool HOST=true>
class Tensor {
private:
template <size_type N, size_type R=D>
void _continuous_stride() {
static_assert(N <= R);
static_assert(N >= 0);
if constexpr (N > 0 ) {
if constexpr (N == R ) {
stride_[N-1] = 1;
} else {
stride_[N-1] = stride_[N] * shape_[N];
}
return _continuous_stride< N - 1, R>();
}
}
template <size_type R>
void _check_shape (const Tensor<T, R, HOST> &t) {
static_assert(R <= D);
if constexpr CHECK_SHAPE {
for (int i = 0; i < R; ++i) {
if(shape_[i]!=t.shape()[i]) {
char message[1024];
sprintf(message, "Not doable for different "
"shape at dim %d from %d to %d",
i, shape_[i], t.shape()[i]);
throw std::runtime_error(message);
}
}
}
}
public:
Tensor<T, D, HOST> operator[] (
std::array<Slice, D> slices) const {
Tensor<T, D, HOST> result;
result.shape_ = {0};
result.data_= data_;
#pragma unroll
for (int i = 0; i < D; ++i) {
slices[i].set_shape(shape_[i]);
}
#pragma unroll
for (int i = 0; i < D; ++i) {
result.shape_[i] = slices[i].slice_size();
}
#pragma unroll
for (int i = 0; i < D; ++i) {
result.data_ += slices[i].slice_offset(stride_[i]);
}
#pragma unroll
for (int i = 0; i < D; ++i) {
result.stride_[i] = slices[i].stride_size(stride_[i]);
}
result.size_ = MULTIPLIER<size_type, D>(result.shape_);
result.ptr_= ptr_;
return result;
}
const T& operator[] (std::array<size_type , D> slices) const {
T *offset = data_;
#pragma unroll
for (int i = 0; i < D; ++i) {
offset += (stride_[i] * slices[i]);
}
return *offset;
}
T& operator[] (std::array<size_type , D> slices) {
T *offset = data_;
#pragma unroll
for (int i = 0; i < D; ++i) {
offset += (stride_[i] * slices[i]);
}
return *offset;
}
explicit Tensor() : size_(0), shape_({0}),
data_(nullptr), ptr_(data_), flag_(0) {}
explicit Tensor(const std::array<size_type, D> & shapes) :
size_(MULTIPLIER<size_type, D>(shapes)), shape_(shapes),
data_(new T[size_]), ptr_(data_), flag_(FLAG_CONTIGUOUS) {
this->_continuous_stride<D>();
}
// reshape
template <size_type R>
Tensor(Tensor<T, R, HOST> &t,
const std::array<size_type, D> & shapes):
size_(MULTIPLIER<size_type, D>(shapes)), shape_(shapes),
data_(t.data()), ptr_(t.ptr()),
flag_(FLAG_CONTIGUOUS & (D == R)) {
static_assert(D >= R);
if (size_ == t.size()) {
this->_continuous_stride<D>();
} else if (size_ > t.size() && (size_ % t.size() == 0)) {
this->_continuous_stride<R, R>();
#pragma unroll
for (int i = R; i < D; ++i) {
stride_[i] = 0;
}
} else {
throw std::runtime_error(
"construction failed for un-compatible shape.");
}
}
// copying constructor
Tensor(const Tensor<T, D, HOST> &t) : size_(t.size_),
stride_(t.stride_), shape_(t.shape_), data_(t.data_),
ptr_(t.ptr_) , flag_(t.flag_) {}
// Move constructor.
Tensor(const Tensor<T, D, HOST>&& t) noexcept
: size_(t.size_),
stride_(t.stride_),
shape_(t.shape_),
data_(t.data_),
ptr_(std::move(t.ptr_)) ,
flag_(t.flag_) {}
// Move assignment operator.
Tensor& operator = (Tensor<T, D, HOST>&& t) noexcept {
size_ = t.size_;
stride_ = t.stride_;
shape_ = t.shape_;
data_ = t.data_;
ptr_ = std::move(t.ptr_);
flag_ = t.flag_;
return *this;
}
// assignment operator
Tensor& operator = (T t) {
fill(t);
return *this;
}
Tensor& operator = (const Tensor<T, D, HOST> &t) {
_check_shape(t);
operation_by_stride<T, D, D >(
data_, t.data_, stride_.data(), t.stride_.data(),
shape_.data(), assignment<T >());
return *this;
}
template <size_type R>
Tensor& operator = (const Tensor<T, R, HOST> &t) {
_check_shape(t);
operation_by_stride<T, D, R >(
data_, t.data(), stride_.data(), t.stride().data(),
shape_.data(), assignment<T >());
return *this;
}
template <size_type R>
Tensor& operator += (const Tensor<T, R, HOST> &t) {
_check_shape(t);
operation_by_stride<T, D, R >(
data_, t.data(), stride_.data(), t.stride().data(),
shape_.data(), adder<T >());
return *this;
}
template <size_type R>
Tensor& operator -= (const Tensor<T, R, HOST> &t) {
_check_shape(t);
operation_by_stride<T, D, R >(
data_, t.data(), stride_.data(), t.stride().data(),
shape_.data(), subtract<T >());
return *this;
}
template <size_type R>
Tensor& operator *= (const Tensor<T, R, HOST> &t) {
_check_shape(t);
operation_by_stride<T, D, R >(
data_, t.data(), stride_.data(), t.stride().data(),
shape_.data(), multiplier<T >());
return *this;
}
template <size_type R>
Tensor& operator /= (const Tensor<T, R, HOST> &t) {
_check_shape(t);
operation_by_stride<T, D, R >(
data_, t.data(), stride_.data(), t.stride().data(),
shape_.data(), divider<T >());
return *this;
}
template <size_type R>
Tensor<T, D, HOST> operator+ (const Tensor<T, R, HOST> &t) {
Tensor<T, D, HOST> out(shape_);
out = *this;
out += t;
return out;
}
template <size_type R>
Tensor<T, D, HOST> operator- (const Tensor<T, R, HOST> &t) {
Tensor<T, D, HOST> out(shape_);
out = *this;
out -= t;
return out;
}
template <size_type R>
Tensor<T, D, HOST> operator* (const Tensor<T, R, HOST> &t) {
Tensor<T, D, HOST> out(shape_);
out = *this;
out *= t;
return out;
}
template <size_type R>
Tensor<T, D, HOST> operator/ (const Tensor<T, R, HOST> &t) {
Tensor<T, D, HOST> out(shape_);
out = *this;
out /= t;
return out;
}
Tensor& operator += (T t) {
operation_by_stride<T, D, 0 >(
data_, &t, stride_.data(), nullptr,
shape_.data(), adder<T >());
return *this;
}
Tensor& operator -= (T t) {
operation_by_stride<T, D, 0 >(
data_, &t, stride_.data(), nullptr,
shape_.data(), subtract<T >());
return *this;
}
Tensor& operator *= (T t) {
operation_by_stride<T, D, 0 >(
data_, &t, stride_.data(), nullptr,
shape_.data(), multiplier<T >());
return *this;
}
Tensor& operator /= (T t) {
operation_by_stride<T, D, 0 >(
data_, &t, stride_.data(), nullptr,
shape_.data(), divider<T >());
return *this;
}
Tensor<T, D, HOST> operator+ (T t) {
Tensor<T, D, HOST> out(shape_);
out = *this;
out += t;
return out;
}
template <size_type R>
Tensor<T, D, HOST> operator- (T t) {
Tensor<T, D, HOST> out(shape_);
out = *this;
out -= t;
return out;
}
template <size_type R>
Tensor<T, D, HOST> operator* (T t) {
Tensor<T, D, HOST> out(shape_);
out = *this;
out *= t;
return out;
}
template <size_type R>
Tensor<T, D, HOST> operator/ (T t) {
Tensor<T, D, HOST> out(shape_);
out = *this;
out /= t;
return out;
}
~ Tensor() = default;
template <size_type R>
inline Tensor<T, R, HOST>
broadcast_to (const std::array<size_type, R> & shapes) {
return (Tensor<T, R, HOST>(*this, shapes));
}
template <size_type R>
inline Tensor<T, R, HOST>
reshape (const std::array<size_type, R> & shapes) {
return (Tensor<T, R, HOST>(*this, shapes));
}
Tensor(const Tensor<T, D, HOST> &t,
size_type axis_1, size_type axis_2) :
size_(t.size_),
stride_(t.stride_),
shape_(t.shape_),
data_(t.data_),
ptr_(t.ptr_),
flag_(t.flag_ & (!FLAG_CONTIGUOUS) ) {
}
Tensor<T, D, HOST>
transpose () const {
return move_axis(0, -1);
}
Tensor<T, D, HOST>
move_axis (size_type a, size_type b) const {
if (a==b)
return *this;
if (a < 0)
a += D;
if (b < 0)
b += D;
if (a >= D || b >=D)
throw std::runtime_error("the axis to be moved out of bound.");
if (a > b)
return move_axis(b, a);
Tensor<T, D, HOST> copied(*this);
#pragma unroll
for (int i = a; i < b ; ++i) {
copied.shape_[i] = shape_[i + 1];
copied.stride_[i] = stride_[i + 1];
}
copied.shape_[b] = shape_[a];
copied.stride_[b] = stride_[a];
if constexpr (D == 2) {
if (get_flag(FLAG_CONTIGUOUS)) {
copied.unset_flag(FLAG_CONTIGUOUS);
copied.set_flag(FLAG_TRANSPOSED);
} else if (get_flag(FLAG_TRANSPOSED)) {
copied.unset_flag(FLAG_TRANSPOSED);
copied.set_flag(FLAG_CONTIGUOUS);
}
}
return (copied);
}
void fill(T value) {
if (get_flag(FLAG_CONTIGUOUS)) {
for (int i = 0; i < size_; ++i)
data_[i] = value;
}
else {
operation_by_stride<T, D, 0>(
data_, &value, stride_.data(), NULL,
shape_.data(), assignment<T >());
}
}
void dump_shape() const {
std::cout << "shapes (";
#pragma unroll
for (int i = 0; i < D; ++i) {
std::cout << shape_[i] << ", " ;
}
std::cout << ")"<< std::endl;
}
void dump() const {
tensor::dump<T, D>(data_, stride_.data(), shape_.data());
dump_shape();
}
Tensor<T, D, HOST> as_contiguous() const {
if (get_flag(FLAG_CONTIGUOUS)) {
return *this;
} else {
Tensor<T, D, HOST> copied(this->shape());
T *data = copied.data();
operation_by_stride<T, D, D>(
copied.data(), data_, copied.stride_.data(),
stride_.data(), shape_.data(), assignment<T >());
return copied;
}
}
const std::shared_ptr<T[]> &ptr() const {return ptr_;}
const T* data() const {return data_;}
T* data() {return data_;}
bool get_flag(int flag) const {return (flag_ & flag) > 0; }
void set_flag(int flag) { flag_ |= flag; }
void unset_flag(int flag) { flag_ &= (!flag); }
static constexpr size_type dim() {return D;}
size_type size() const {return size_;}
const std::array<size_type, D > &
shape() const {return shape_;}
const std::array<size_type, D > &
stride() const {return stride_;}
protected:
size_type size_;
// Tuple to step in each dimension when traversing an array.
std::array<size_type, D> stride_;
std::array<size_type, D> shape_;
T *data_;
std::shared_ptr<T[]> ptr_;
int flag_;
};
// tensor calculation
template<typename T, size_type D, bool HOST, class B>
Tensor<T, D, HOST> add (
const Tensor<T, D, HOST> &a,
const B &b,
Tensor<T, D, HOST> *out) {
(*out) = a;
(*out) += b;
return *out;
}
template<typename T, size_type D, bool HOST, class B>
Tensor<T, D, HOST> multiply (
const Tensor<T, D, HOST> &a,
const B &b,
Tensor<T, D, HOST> *out) {
(*out) = a;
(*out) *= b;
return *out;
}
template<typename T, size_type D, bool HOST, class B>
Tensor<T, D, HOST> divide (
const Tensor<T, D, HOST> &a,
const B &b,
Tensor<T, D, HOST> *out) {
(*out) = a;
(*out) /= b;
return *out;
}
Tensor<size_type , 1, true> range(size_type size) {
Tensor<size_type , 1, true> re({size});
for (int i = 0; i < size; ++i) {
re.data()[i] = i;
}
return re;
}
namespace random {
template<typename T, size_type D>
Tensor<T, D, true> normal(T mean, T std_v, std::array<size_type, D > shape) {
Tensor<T, D, true> re(shape);
std::default_random_engine generator;
std::normal_distribution<T > distribution(mean, std_v);
for (size_type i=0; i < re.size(); i++) {
*(re.data() + i) = distribution(generator);
}
return re;
}
} ; // namesapce random
} ; // namespace tensor
#endif //TENSOR_TENSOR_H
|
xinyandai/tensor
|
src/tensor/io.h
|
//
// Created by xinyan on 12/5/2019.
//
#pragma once
#ifndef TENSOR_IO_H
#define TENSOR_IO_H
#include <iostream>
#include <fstream>
#include "tensor.h"
namespace tensor {
template <typename T>
Tensor<T, 2, true>
vecs(const char* fvecs) {
std::ifstream fin(fvecs,
std::ios::binary | std::ios::ate);
if (!fin) {
char message[1024];
sprintf(message, "cannot open file %s", fvecs);
throw std::runtime_error(message);
}
size_t fileSize = fin.tellg();
fin.seekg(0, fin.beg);
if (fileSize == 0) {
char message[1024];
sprintf(message, "File size is 0 %s", fvecs);
throw std::runtime_error(message);
}
int dim;
fin.read(reinterpret_cast<char*>(&dim), sizeof(int));
size_t bytesPerRecord = 1LL * dim * sizeof(T) + 4;
if (fileSize % bytesPerRecord != 0) {
char message[1024];
sprintf(message, "File not aligned [%s]", fvecs);
throw std::runtime_error(message);
}
size_t cardinality = fileSize / bytesPerRecord;
if (cardinality > MAX_ELEMENT) {
char message[1024];
sprintf(message, "File size is %d (> max_elements %d) [%s]",
cardinality, MAX_ELEMENT, fvecs);
throw std::runtime_error(message);
}
Tensor<T, 2, true> t({int(cardinality), dim});
T* data = t.data();
fin.read((char*)data, sizeof(T) * dim);
for (int i = 1; i < cardinality; ++i) {
fin.read((char*)&dim, 4);
fin.read((char*)(data + i * dim),
sizeof(T) * dim);
}
fin.close();
return t;
}
}
#endif //TENSOR_IO_H
|
xinyandai/tensor
|
src/tensor/linalg.h
|
//
// Created by xinyan on 30/4/2019.
//
#pragma once
#ifndef TENSOR_CALCULATOR_H
#define TENSOR_CALCULATOR_H
#include "tensor.h"
#include "simd.h"
#ifndef FINTEGER
#define FINTEGER long
#endif
extern "C" {
/* declare BLAS functions, see http://www.netlib.org/clapack/cblas/ */
int
sgemm_ (
const char *transa, const char *transb,
FINTEGER *m, FINTEGER * n, FINTEGER *k,
const float *alpha, const float *a,
FINTEGER *lda, const float *b, FINTEGER *ldb,
float *beta, float *c, FINTEGER *ldc);
int dgemm_(
char *transa, char *transb,
FINTEGER *m, FINTEGER *n, FINTEGER *k,
const double *alpha, const double *a,
FINTEGER *lda, const double *b, FINTEGER *ldb,
const double *beta, const double *c, FINTEGER *ldc);
}
namespace tensor {
template<typename T>
Tensor<T, 2> _mm (
const Tensor<T, 2> &a,
const Tensor<T, 2> &b,
Tensor<T, 2> *out,
T alpha = 1.0f,
T beta = 0.0f) {
// alpha*op( A )*op( B ) + beta*C
// code seems strange here since tensor is row based and BLAS is
// column based. NT_A / NT_B is true if tensor a / b are already
// transposed. We regard tensor contiguous if the tensor is just
// transposed
const bool NT_A = a.get_flag(FLAG_TRANSPOSED);
const bool NT_B = b.get_flag(FLAG_TRANSPOSED);
if (!a.get_flag(FLAG_CONTIGUOUS | FLAG_TRANSPOSED)) {
return mm(a.as_contiguous(), b, out);
}
if (!b.get_flag(FLAG_CONTIGUOUS | FLAG_TRANSPOSED)) {
return mm(a, b.as_contiguous(), out);
}
// M specifies the number of rows of the matrix op( A )
// and of the matrix C. M must be at least zero.
FINTEGER M = a.shape()[0] ;
// On entry, N specifies the number of columns of the matrix
// op( B ) and the number of columns of the matrix C.
FINTEGER N = b.shape()[1];
// On entry, K specifies the number of columns of the matrix
// op( A ) and the number of rows of the matrix op( B ).
FINTEGER K = a.shape()[1];
if (K != b.shape()[0] || M *N != out->size()) {
throw std::runtime_error(
"shape not matched in matrix multiplication.");
}
// On entry, LDA specifies the first dimension of A as declared
// in the calling (sub) program. When TRANSA = 'N' or 'n' then
// LDA must be at least max( 1, m ), otherwise LDA must be at
// least max( 1, k ).
FINTEGER LDA = NT_A ? M : K;
// On entry, LDB specifies the first dimension of B as declared
// in the calling (sub) program. When TRANSB = 'N' or 'n' then
// LDB must be at least max( 1, k ), otherwise LDB must be at
// least max( 1, n ).
FINTEGER LDB = NT_B ? K : N;
// On entry, LDC specifies the first dimension of C as declared
// in the calling (sub) program. LDC must be at least
// max( 1, m ).
FINTEGER LDC = M;
if constexpr (std::is_same<T, float >::value) {
sgemm_(NT_A ? "N" : "T", NT_B ? "N" : "T", &M, &N, &K,
&alpha, a.data(), &LDA, b.data(), &LDB,
&beta, out->data(), &LDC);
} else if constexpr (std::is_same<T, double >::value) {
dgemm_(NT_A ? "N" : "T", NT_B ? "N" : "T", &M, &N, &K,
&alpha, a.data(), &LDA, b.data(), &LDB,
&beta, out->data(), &LDC);
}
return *out;
}
template<typename T>
Tensor<T, 2> mm (
const Tensor<T, 2> &a,
const Tensor<T, 2> &b,
Tensor<T, 2> *out,
T alpha = 1.0f,
T beta = 0.0f) {
return _mm(b.transpose(), a.transpose(), out, alpha, beta);
}
template<typename T>
Tensor<T, 2> mm (
const Tensor<T, 2> &a,
const Tensor<T, 2> &b,
T alpha = 1.0f,
T beta = 0.0f) {
Tensor<T, 2> out({a.shape()[0], b.shape()[1]});
mm(a, b, &out, alpha, beta);
return out;
}
template<typename T>
Tensor<size_type, 1> vq (
const Tensor<T, 2> &a,
const Tensor<T, 2> &b) {
Tensor<T, 2> l2dist = l2_sqr(a, b);
return arg_min(l2dist, 1);
}
template<typename T>
Tensor<T, 2> kmeans (const Tensor<T, 2> &x,
const size_type K, const size_type n_iter) {
size_type N = x.shape()[0];
size_type D = x.shape()[1];
Tensor<T, 2> c = x[{S(0, K), S()}].as_contiguous();
for (size_type iter = 0; iter < n_iter; iter++) {
Tensor<size_type, 1> codes = vq(x, c);
Tensor<T, 1> counter({K});
counter.fill(0.f);
c.fill(0.f);
for (size_type i = 0; i < N; i++) {
size_type code_idx = codes[{i}];
c[{S(code_idx), S()}] += x[{S(i), S()}];
counter[{code_idx}] += 1;
}
for (size_type i = 0; i < K; i++) {
if (counter[{i}] > 0.0) {
c[{S(i), S()}] /= counter[{i}];
} else {
std::cout << "[warning]: empty bucket at iteration "
<< iter + 1 << std::endl;
}
}
}
return c;
}
template<typename T>
Tensor<T, 1> mv (
const Tensor<T, 2> &a, const T *b, Tensor<T, 1> * out) {
size_type M = a.shape()[0], N = a.shape()[1];
#pragma omp parallel for
for (int i = 0; i < M; ++i) {
out->data()[i] = fvec_inner_product(a.data()[i * N], b, N);
}
}
template<typename T>
Tensor<T, 1> mv (const Tensor<T, 2> &a, const T *b) {
Tensor<T, 1> out({a.shape()[0]});
mm(a, *b, &out);
return out;
}
template<typename T, size_type D, class F >
Tensor<T, D-1> _reduce(
const Tensor<T, D> &a, size_type axis, F f) {
const Tensor<T, D> moved_a = a.move_axis(axis, D-1);
std::array<size_type, D-1> shapes;
#pragma unroll
for (size_type i = 0; i < D-1; ++i) {
shapes[i] = moved_a.shape()[i];
}
Tensor<T, D-1> sqr(shapes);
reduce_by_stride<T, D> (
sqr.data(), moved_a.data(),
sqr.stride().data(), moved_a.stride().data(),
moved_a.shape().data(), f);
return sqr;
}
template<typename T, size_type D, class F>
Tensor<size_type, D-1> _arg_reduce(
const Tensor<T, D> &a, size_type axis, F f) {
const Tensor<T, D> moved_a = a.move_axis(axis, D-1);
std::array<size_type, D-1> shapes;
#pragma unroll
for (size_type i = 0; i < D-1; ++i) {
shapes[i] = moved_a.shape()[i];
}
Tensor<size_type, D-1> indices(shapes);
arg_reduce_by_stride<T, D> (
indices.data(), moved_a.data(),
indices.stride().data(), moved_a.stride().data(),
moved_a.shape().data(), f);
return indices;
}
template<typename T, size_type D>
Tensor<T, D-1> norm_sqr(
const Tensor<T, D> &a, size_type axis=D-1) {
return _reduce(a, axis, norm_sqr_adder<T >());
}
template<typename T, size_type D>
Tensor<T, D-1> max(
const Tensor<T, D> &a, size_type axis=D-1) {
return _reduce(a, axis, max_assigner<T >());
}
template<typename T, size_type D>
Tensor<T, D-1> min(
const Tensor<T, D> &a, size_type axis=D-1) {
return _reduce(a, axis, min_assigner<T >());
}
template<typename T, size_type D>
Tensor<size_type, D-1> arg_max(
const Tensor<T, D> &a, size_type axis=D-1) {
return _arg_reduce(a, axis, max_compare<T >());
}
template<typename T, size_type D>
Tensor<size_type, D-1> arg_min(
const Tensor<T, D> &a, size_type axis=D-1) {
return _arg_reduce(a, axis, min_compare<T >());
}
template<typename T, size_type D>
Tensor<size_type, D> top_select(
const Tensor<T, D> &a, size_type K,
size_type axis=D-1, bool desc = false) {
Tensor<T, D> moved_a = a.move_axis(axis, D-1);
std::array<size_type, D> shapes;
#pragma unroll
for (size_type i = 0; i < D-1; ++i) {
shapes[i] = moved_a.shape()[i];
}
shapes[D-1] = K;
Tensor<size_type , D> indices(shapes);
if (K == moved_a.shape()[D-1]) {
reorder_by_stride<T, D> (
indices.data(), moved_a.data(),
indices.stride().data(), moved_a.stride().data(),
moved_a.shape().data(), K, desc, arg_sorter<T >());
} else {
reorder_by_stride<T, D> (
indices.data(), moved_a.data(),
indices.stride().data(), moved_a.stride().data(),
moved_a.shape().data(), K, desc, top_selector<T >());
}
return indices;
}
template<typename T, size_type D>
Tensor<T, D> arg_sort(
const Tensor<T, D> &a, size_type axis=D-1, bool desc = false) {
return top_select(a, a.shape()[axis], axis, desc);
}
template <typename T>
Tensor<T, 2> l2_sqr(
const Tensor<T, 2>& a, const Tensor<T, 2>& b) {
if (a.shape()[1] != b.shape()[1])
throw std::runtime_error(
"dimension do not match when calculating l2 sqr dist");
const Tensor<T, 1> a_sqr = norm_sqr(a) ;
const Tensor<T, 1> b_sqr = norm_sqr(b) ;
Tensor<T, 2> m({a.shape()[0], b.shape()[0]});
#pragma omp parallel for
for (int r = 0; r < m.shape()[0]; ++r) {
for (int c = 0; c < m.shape()[1]; ++c) {
m[{r, c}] = a_sqr[{r}] + b_sqr[{c}];
}
}
tensor::mm(a, b.transpose(), &m, -2.0f, 1.0f);
return m;
}
} // namespace tensor
#endif //TENSOR_CALCULATOR_H
|
xinyandai/tensor
|
src/tensor/simd.h
|
//
// Created by xinyan on 1/5/2019.
//
#pragma once
#ifndef TENSOR_SIMD_H
#define TENSOR_SIMD_H
/**
* Copyright (c) 2015-present, Facebook, Inc.
* All rights reserved.
*
* This source code is licensed under the BSD+Patents license found in the
* LICENSE file in the root directory of this source tree.
*/
// -*- c++ -*-
#include <cstdio>
#include <cassert>
#include <cstring>
#include <cmath>
#ifdef __SSE__
#include <immintrin.h>
#endif
#ifdef __aarch64__
#include <arm_neon.h>
#endif
#include <omp.h>
/**************************************************
* Get some stats about the system
**************************************************/
namespace tensor {
float fvec_L2sqr (const float * x,
const float * y,
size_t d);
#ifdef __AVX__
#define USE_AVX
#endif
/*********************************************************
* Optimized distance computations
*********************************************************/
/* Functions to compute:
- L2 distance between 2 vectors
- inner product between 2 vectors
- L2 norm of a vector
The functions should probably not be invoked when a large number of
vectors are be processed in batch (in which case Matrix multiply
is faster), but may be useful for comparing vectors isolated in
memory.
Works with any vectors of any dimension, even unaligned (in which
case they are slower).
*/
/*********************************************************
* Reference implementations
*/
/* same without SSE */
float fvec_L2sqr_ref (const float * x,
const float * y,
size_t d)
{
size_t i;
float res = 0;
for (i = 0; i < d; i++) {
const float tmp = x[i] - y[i];
res += tmp * tmp;
}
return res;
}
float fvec_inner_product_ref (const float * x,
const float * y,
size_t d)
{
size_t i;
float res = 0;
for (i = 0; i < d; i++)
res += x[i] * y[i];
return res;
}
float fvec_norm_L2sqr_ref (const float *x, size_t d)
{
size_t i;
double res = 0;
for (i = 0; i < d; i++)
res += x[i] * x[i];
return res;
}
void fvec_L2sqr_ny_ref (float * dis,
const float * x,
const float * y,
size_t d, size_t ny)
{
for (size_t i = 0; i < ny; i++) {
dis[i] = fvec_L2sqr (x, y, d);
y += d;
}
}
/*********************************************************
* SSE and AVX implementations
*/
#ifdef __SSE__
// reads 0 <= d < 4 floats as __m128
static inline __m128 masked_read (int d, const float *x)
{
assert (0 <= d && d < 4);
__attribute__((__aligned__(16))) float buf[4] = {0, 0, 0, 0};
switch (d) {
case 3:
buf[2] = x[2];
case 2:
buf[1] = x[1];
case 1:
buf[0] = x[0];
}
return _mm_load_ps (buf);
// cannot use AVX2 _mm_mask_set1_epi32
}
float fvec_norm_L2sqr (const float * x,
size_t d)
{
__m128 mx;
__m128 msum1 = _mm_setzero_ps();
while (d >= 4) {
mx = _mm_loadu_ps (x); x += 4;
msum1 = _mm_add_ps (msum1, _mm_mul_ps (mx, mx));
d -= 4;
}
mx = masked_read (d, x);
msum1 = _mm_add_ps (msum1, _mm_mul_ps (mx, mx));
msum1 = _mm_hadd_ps (msum1, msum1);
msum1 = _mm_hadd_ps (msum1, msum1);
return _mm_cvtss_f32 (msum1);
}
namespace {
float sqr (float x) {
return x * x;
}
void fvec_L2sqr_ny_D1 (float * dis, const float * x,
const float * y, size_t ny)
{
float x0s = x[0];
__m128 x0 = _mm_set_ps (x0s, x0s, x0s, x0s);
size_t i;
for (i = 0; i + 3 < ny; i += 4) {
__m128 tmp, accu;
tmp = x0 - _mm_loadu_ps (y); y += 4;
accu = tmp * tmp;
dis[i] = _mm_cvtss_f32 (accu);
tmp = _mm_shuffle_ps (accu, accu, 1);
dis[i + 1] = _mm_cvtss_f32 (tmp);
tmp = _mm_shuffle_ps (accu, accu, 2);
dis[i + 2] = _mm_cvtss_f32 (tmp);
tmp = _mm_shuffle_ps (accu, accu, 3);
dis[i + 3] = _mm_cvtss_f32 (tmp);
}
while (i < ny) { // handle non-multiple-of-4 case
dis[i++] = sqr(x0s - *y++);
}
}
void fvec_L2sqr_ny_D2 (float * dis, const float * x,
const float * y, size_t ny)
{
__m128 x0 = _mm_set_ps (x[1], x[0], x[1], x[0]);
size_t i;
for (i = 0; i + 1 < ny; i += 2) {
__m128 tmp, accu;
tmp = x0 - _mm_loadu_ps (y); y += 4;
accu = tmp * tmp;
accu = _mm_hadd_ps (accu, accu);
dis[i] = _mm_cvtss_f32 (accu);
accu = _mm_shuffle_ps (accu, accu, 3);
dis[i + 1] = _mm_cvtss_f32 (accu);
}
if (i < ny) { // handle odd case
dis[i] = sqr(x[0] - y[0]) + sqr(x[1] - y[1]);
}
}
void fvec_L2sqr_ny_D4 (float * dis, const float * x,
const float * y, size_t ny)
{
__m128 x0 = _mm_loadu_ps(x);
for (size_t i = 0; i < ny; i++) {
__m128 tmp, accu;
tmp = x0 - _mm_loadu_ps (y); y += 4;
accu = tmp * tmp;
accu = _mm_hadd_ps (accu, accu);
accu = _mm_hadd_ps (accu, accu);
dis[i] = _mm_cvtss_f32 (accu);
}
}
void fvec_L2sqr_ny_D8 (float * dis, const float * x,
const float * y, size_t ny)
{
__m128 x0 = _mm_loadu_ps(x);
__m128 x1 = _mm_loadu_ps(x + 4);
for (size_t i = 0; i < ny; i++) {
__m128 tmp, accu;
tmp = x0 - _mm_loadu_ps (y); y += 4;
accu = tmp * tmp;
tmp = x1 - _mm_loadu_ps (y); y += 4;
accu += tmp * tmp;
accu = _mm_hadd_ps (accu, accu);
accu = _mm_hadd_ps (accu, accu);
dis[i] = _mm_cvtss_f32 (accu);
}
}
void fvec_L2sqr_ny_D12 (float * dis, const float * x,
const float * y, size_t ny)
{
__m128 x0 = _mm_loadu_ps(x);
__m128 x1 = _mm_loadu_ps(x + 4);
__m128 x2 = _mm_loadu_ps(x + 8);
for (size_t i = 0; i < ny; i++) {
__m128 tmp, accu;
tmp = x0 - _mm_loadu_ps (y); y += 4;
accu = tmp * tmp;
tmp = x1 - _mm_loadu_ps (y); y += 4;
accu += tmp * tmp;
tmp = x2 - _mm_loadu_ps (y); y += 4;
accu += tmp * tmp;
accu = _mm_hadd_ps (accu, accu);
accu = _mm_hadd_ps (accu, accu);
dis[i] = _mm_cvtss_f32 (accu);
}
}
} // anonymous namespace
void fvec_L2sqr_ny (float * dis, const float * x,
const float * y, size_t d, size_t ny) {
// optimized for a few special cases
switch(d) {
case 1:
fvec_L2sqr_ny_D1 (dis, x, y, ny);
return;
case 2:
fvec_L2sqr_ny_D2 (dis, x, y, ny);
return;
case 4:
fvec_L2sqr_ny_D4 (dis, x, y, ny);
return;
case 8:
fvec_L2sqr_ny_D8 (dis, x, y, ny);
return;
case 12:
fvec_L2sqr_ny_D12 (dis, x, y, ny);
return;
default:
fvec_L2sqr_ny_ref (dis, x, y, d, ny);
return;
}
}
#endif
#ifdef USE_AVX
// reads 0 <= d < 8 floats as __m256
static inline __m256 masked_read_8 (int d, const float *x)
{
assert (0 <= d && d < 8);
if (d < 4) {
__m256 res = _mm256_setzero_ps ();
res = _mm256_insertf128_ps (res, masked_read (d, x), 0);
return res;
} else {
__m256 res = _mm256_setzero_ps ();
res = _mm256_insertf128_ps (res, _mm_loadu_ps (x), 0);
res = _mm256_insertf128_ps (res, masked_read (d - 4, x + 4), 1);
return res;
}
}
float fvec_inner_product (const float * x,
const float * y,
size_t d)
{
__m256 msum1 = _mm256_setzero_ps();
while (d >= 8) {
__m256 mx = _mm256_loadu_ps (x); x += 8;
__m256 my = _mm256_loadu_ps (y); y += 8;
msum1 = _mm256_add_ps (msum1, _mm256_mul_ps (mx, my));
d -= 8;
}
__m128 msum2 = _mm256_extractf128_ps(msum1, 1);
msum2 += _mm256_extractf128_ps(msum1, 0);
if (d >= 4) {
__m128 mx = _mm_loadu_ps (x); x += 4;
__m128 my = _mm_loadu_ps (y); y += 4;
msum2 = _mm_add_ps (msum2, _mm_mul_ps (mx, my));
d -= 4;
}
if (d > 0) {
__m128 mx = masked_read (d, x);
__m128 my = masked_read (d, y);
msum2 = _mm_add_ps (msum2, _mm_mul_ps (mx, my));
}
msum2 = _mm_hadd_ps (msum2, msum2);
msum2 = _mm_hadd_ps (msum2, msum2);
return _mm_cvtss_f32 (msum2);
}
float fvec_L2sqr (const float * x,
const float * y,
size_t d)
{
__m256 msum1 = _mm256_setzero_ps();
while (d >= 8) {
__m256 mx = _mm256_loadu_ps (x); x += 8;
__m256 my = _mm256_loadu_ps (y); y += 8;
const __m256 a_m_b1 = mx - my;
msum1 += a_m_b1 * a_m_b1;
d -= 8;
}
__m128 msum2 = _mm256_extractf128_ps(msum1, 1);
msum2 += _mm256_extractf128_ps(msum1, 0);
if (d >= 4) {
__m128 mx = _mm_loadu_ps (x); x += 4;
__m128 my = _mm_loadu_ps (y); y += 4;
const __m128 a_m_b1 = mx - my;
msum2 += a_m_b1 * a_m_b1;
d -= 4;
}
if (d > 0) {
__m128 mx = masked_read (d, x);
__m128 my = masked_read (d, y);
__m128 a_m_b1 = mx - my;
msum2 += a_m_b1 * a_m_b1;
}
msum2 = _mm_hadd_ps (msum2, msum2);
msum2 = _mm_hadd_ps (msum2, msum2);
return _mm_cvtss_f32 (msum2);
}
#elif defined(__SSE__)
/* SSE-implementation of L2 distance */
float fvec_L2sqr (const float * x,
const float * y,
size_t d)
{
__m128 msum1 = _mm_setzero_ps();
while (d >= 4) {
__m128 mx = _mm_loadu_ps (x); x += 4;
__m128 my = _mm_loadu_ps (y); y += 4;
const __m128 a_m_b1 = mx - my;
msum1 += a_m_b1 * a_m_b1;
d -= 4;
}
if (d > 0) {
// add the last 1, 2 or 3 values
__m128 mx = masked_read (d, x);
__m128 my = masked_read (d, y);
__m128 a_m_b1 = mx - my;
msum1 += a_m_b1 * a_m_b1;
}
msum1 = _mm_hadd_ps (msum1, msum1);
msum1 = _mm_hadd_ps (msum1, msum1);
return _mm_cvtss_f32 (msum1);
}
float fvec_inner_product (const float * x,
const float * y,
size_t d)
{
__m128 mx, my;
__m128 msum1 = _mm_setzero_ps();
while (d >= 4) {
mx = _mm_loadu_ps (x); x += 4;
my = _mm_loadu_ps (y); y += 4;
msum1 = _mm_add_ps (msum1, _mm_mul_ps (mx, my));
d -= 4;
}
// add the last 1, 2, or 3 values
mx = masked_read (d, x);
my = masked_read (d, y);
__m128 prod = _mm_mul_ps (mx, my);
msum1 = _mm_add_ps (msum1, prod);
msum1 = _mm_hadd_ps (msum1, msum1);
msum1 = _mm_hadd_ps (msum1, msum1);
return _mm_cvtss_f32 (msum1);
}
#elif defined(__aarch64__)
float fvec_L2sqr (const float * x,
const float * y,
size_t d)
{
if (d & 3) return fvec_L2sqr_ref (x, y, d);
float32x4_t accu = vdupq_n_f32 (0);
for (size_t i = 0; i < d; i += 4) {
float32x4_t xi = vld1q_f32 (x + i);
float32x4_t yi = vld1q_f32 (y + i);
float32x4_t sq = vsubq_f32 (xi, yi);
accu = vfmaq_f32 (accu, sq, sq);
}
float32x4_t a2 = vpaddq_f32 (accu, accu);
return vdups_laneq_f32 (a2, 0) + vdups_laneq_f32 (a2, 1);
}
float fvec_inner_product (const float * x,
const float * y,
size_t d)
{
if (d & 3) return fvec_inner_product_ref (x, y, d);
float32x4_t accu = vdupq_n_f32 (0);
for (size_t i = 0; i < d; i += 4) {
float32x4_t xi = vld1q_f32 (x + i);
float32x4_t yi = vld1q_f32 (y + i);
accu = vfmaq_f32 (accu, xi, yi);
}
float32x4_t a2 = vpaddq_f32 (accu, accu);
return vdups_laneq_f32 (a2, 0) + vdups_laneq_f32 (a2, 1);
}
float fvec_norm_L2sqr (const float *x, size_t d)
{
if (d & 3) return fvec_norm_L2sqr_ref (x, d);
float32x4_t accu = vdupq_n_f32 (0);
for (size_t i = 0; i < d; i += 4) {
float32x4_t xi = vld1q_f32 (x + i);
accu = vfmaq_f32 (accu, xi, xi);
}
float32x4_t a2 = vpaddq_f32 (accu, accu);
return vdups_laneq_f32 (a2, 0) + vdups_laneq_f32 (a2, 1);
}
// not optimized for ARM
void fvec_L2sqr_ny (float * dis, const float * x,
const float * y, size_t d, size_t ny) {
fvec_L2sqr_ny_ref (dis, x, y, d, ny);
}
#else
// scalar implementation
float fvec_L2sqr (const float * x,
const float * y,
size_t d)
{
return fvec_L2sqr_ref (x, y, d);
}
float fvec_inner_product (const float * x,
const float * y,
size_t d)
{
return fvec_inner_product_ref (x, y, d);
}
float fvec_norm_L2sqr (const float *x, size_t d)
{
return fvec_norm_L2sqr_ref (x, d);
}
void fvec_L2sqr_ny (float * dis, const float * x,
const float * y, size_t d, size_t ny) {
fvec_L2sqr_ny_ref (dis, x, y, d, ny);
}
#endif
/***************************************************************************
* heavily optimized table computations
***************************************************************************/
static inline void fvec_madd_ref (size_t n, const float *a,
float bf, const float *b, float *c) {
for (size_t i = 0; i < n; i++)
c[i] = a[i] + bf * b[i];
}
#ifdef __SSE__
static inline void fvec_madd_sse (size_t n, const float *a,
float bf, const float *b, float *c) {
n >>= 2;
__m128 bf4 = _mm_set_ps1 (bf);
__m128 * a4 = (__m128*)a;
__m128 * b4 = (__m128*)b;
__m128 * c4 = (__m128*)c;
while (n--) {
*c4 = _mm_add_ps (*a4, _mm_mul_ps (bf4, *b4));
b4++;
a4++;
c4++;
}
}
void fvec_madd (size_t n, const float *a,
float bf, const float *b, float *c)
{
if ((n & 3) == 0 &&
((((long)a) | ((long)b) | ((long)c)) & 15) == 0)
fvec_madd_sse (n, a, bf, b, c);
else
fvec_madd_ref (n, a, bf, b, c);
}
#else
void fvec_madd (size_t n, const float *a,
float bf, const float *b, float *c)
{
fvec_madd_ref (n, a, bf, b, c);
}
#endif
static inline int fvec_madd_and_argmin_ref (size_t n, const float *a,
float bf, const float *b, float *c) {
float vmin = 1e20;
int imin = -1;
for (size_t i = 0; i < n; i++) {
c[i] = a[i] + bf * b[i];
if (c[i] < vmin) {
vmin = c[i];
imin = i;
}
}
return imin;
}
#ifdef __SSE__
static inline int fvec_madd_and_argmin_sse (
size_t n, const float *a,
float bf, const float *b, float *c) {
n >>= 2;
__m128 bf4 = _mm_set_ps1 (bf);
__m128 vmin4 = _mm_set_ps1 (1e20);
__m128i imin4 = _mm_set1_epi32 (-1);
__m128i idx4 = _mm_set_epi32 (3, 2, 1, 0);
__m128i inc4 = _mm_set1_epi32 (4);
__m128 * a4 = (__m128*)a;
__m128 * b4 = (__m128*)b;
__m128 * c4 = (__m128*)c;
while (n--) {
__m128 vc4 = _mm_add_ps (*a4, _mm_mul_ps (bf4, *b4));
*c4 = vc4;
__m128i mask = (__m128i)_mm_cmpgt_ps (vmin4, vc4);
// imin4 = _mm_blendv_epi8 (imin4, idx4, mask); // slower!
imin4 = _mm_or_si128 (_mm_and_si128 (mask, idx4),
_mm_andnot_si128 (mask, imin4));
vmin4 = _mm_min_ps (vmin4, vc4);
b4++;
a4++;
c4++;
idx4 = _mm_add_epi32 (idx4, inc4);
}
// 4 values -> 2
{
idx4 = _mm_shuffle_epi32 (imin4, 3 << 2 | 2);
__m128 vc4 = _mm_shuffle_ps (vmin4, vmin4, 3 << 2 | 2);
__m128i mask = (__m128i)_mm_cmpgt_ps (vmin4, vc4);
imin4 = _mm_or_si128 (_mm_and_si128 (mask, idx4),
_mm_andnot_si128 (mask, imin4));
vmin4 = _mm_min_ps (vmin4, vc4);
}
// 2 values -> 1
{
idx4 = _mm_shuffle_epi32 (imin4, 1);
__m128 vc4 = _mm_shuffle_ps (vmin4, vmin4, 1);
__m128i mask = (__m128i)_mm_cmpgt_ps (vmin4, vc4);
imin4 = _mm_or_si128 (_mm_and_si128 (mask, idx4),
_mm_andnot_si128 (mask, imin4));
// vmin4 = _mm_min_ps (vmin4, vc4);
}
return _mm_cvtsi128_si32 (imin4);
}
int fvec_madd_and_argmin (size_t n, const float *a,
float bf, const float *b, float *c)
{
if ((n & 3) == 0 &&
((((long)a) | ((long)b) | ((long)c)) & 15) == 0)
return fvec_madd_and_argmin_sse (n, a, bf, b, c);
else
return fvec_madd_and_argmin_ref (n, a, bf, b, c);
}
#else
int fvec_madd_and_argmin (size_t n, const float *a,
float bf, const float *b, float *c)
{
return fvec_madd_and_argmin_ref (n, a, bf, b, c);
}
#endif
} // namespace tensor
#endif //TENSOR_SIMD_H
|
xinyandai/tensor
|
src/tensor/stride_iter.h
|
//
// Created by xinyan on 5/6/2019.
//
#pragma once
#ifndef TENSOR_STRDE_ITER_H
#define TENSOR_STRDE_ITER_H
#include <iostream>
#include <vector>
#include <algorithm>
#include <iterator>
#include <cassert>
template <typename T>
class StrideIterator
{
typedef int size_type;
public:
typedef T value_type;
typedef T& reference;
typedef size_type difference_type;
typedef T* pointer;
typedef std::random_access_iterator_tag iterator_category;
StrideIterator(const StrideIterator& a)
:ptr_(a.ptr_), step_(a.step_) { }
StrideIterator(T* ptr, size_type step)
:ptr_(ptr), step_(step) { }
StrideIterator operator++() { ptr_+=step_; return *this; }
StrideIterator operator--() { ptr_-=step_; return *this; }
StrideIterator operator++(
int) { StrideIterator i = *this; ptr_+=step_; return i; }
StrideIterator operator--(
int) { StrideIterator i = *this; ptr_-=step_; return i; }
StrideIterator operator+=(
size_type n) { ptr_ += n * step_; return *this; }
StrideIterator operator-=(
size_type n) { ptr_ -= n * step_; return *this; }
T& operator*() { return *ptr_; }
T& operator[](size_type n) { return ptr_[n*step_]; }
T* operator->() { return ptr_; }
bool operator==(
const StrideIterator& rhs) { return ptr_ == rhs.ptr_; }
bool operator!=(
const StrideIterator& rhs) { return ptr_ != rhs.ptr_; }
// friend operators
friend bool operator<(
const StrideIterator& x, const StrideIterator& y) {
return x.ptr_ < y.ptr_;
}
friend size_type operator-(
const StrideIterator& x, const StrideIterator& y) {
return (x.ptr_ - y.ptr_) / x.step_;
}
friend StrideIterator operator+(
const StrideIterator& x, size_type y) {
StrideIterator c(x);
c.ptr_ += y * c.step_;
return c;
}
friend StrideIterator operator-(
const StrideIterator& x, size_type y) {
StrideIterator c(x);
c.ptr_ -= y * c.step_;
return c;
}
friend bool operator==(
const StrideIterator& x, const StrideIterator& y) {
return x.ptr_ == y.ptr_;
}
friend bool operator!=(
const StrideIterator& x, const StrideIterator& y) {
return x.ptr_ != y.ptr_;
}
private:
T* ptr_;
size_type step_;
};
#endif //TENSOR_STRDE_ITER_H
|
xinyandai/tensor
|
src/tensor/helper.h
|
//
// Created by xinyan on 11/5/2019.
//
#pragma once
#ifndef TENSOR_UTIL_H
#define TENSOR_UTIL_H
#include <array>
#include <vector>
#include <bits/stdc++.h>
#include "stride_iter.h"
#define MAX_ELEMENT (1<<30) // 2147483647
#define SLICE_END (MAX_ELEMENT) // 2147483647
namespace tensor {
typedef int size_type;
using namespace std;
template<typename size_type, size_type D>
size_type MULTIPLIER(const std::array <size_type, D> &a) {
size_type result = 1;
#pragma unroll
for (int i = 0; i < D; ++i) {
result *= a[i];
}
return result;
}
template<typename T, size_type N>
static void
dump(const T *data, const size_type *stride,
const size_type *shape) {
if constexpr (N == 1) {
for (int i = 0; i < shape[0]; ++i) {
std::cout << *data << "\t ";
data += *stride;
}
std::cout << std::endl;
} else if constexpr (N > 1) {
for (int i = 0; i < shape[0]; ++i) {
dump<T, N - 1>(data, stride + 1, shape + 1);
data += *stride;
}
}
}
template<typename T>
struct adder {
void operator()(T &a, const T &b) { a += b; }
};
template<typename T>
struct subtract {
void operator()(T &a, const T &b) { a -= b; }
};
template<typename T>
struct divider {
void operator()(T &a, const T &b) {
if (b==0) {
std::cout << "[warning] dived by 0.0" << std::endl;
}
a /= b;
}
};
template<typename T>
struct assignment {
void operator()(T &a, const T &b) { a = b; }
};
template<typename T>
struct multiplier {
void operator()(T &a, const T &b) { a *= b; }
};
template<typename T>
struct norm_sqr_adder {
void operator()(T &a, const T &b) { a += b * b; }
};
template<typename T>
struct max_assigner {
void operator()(const T &a, const T &b) { if(a < b) a = b; }
};
template<typename T>
struct min_assigner {
void operator()(const T &a, const T &b) { if(a > b) a = b; }
};
template<typename T>
struct max_compare {
bool operator()(const T &a, const T &b) { return a < b; }
};
template<typename T>
struct min_compare {
bool operator()(const T &a, const T &b) { return a > b; }
};
template<typename T>
struct top_selector {
void operator()(
size_type N, size_type K, size_type *index, const T *array,
size_type stride_index, size_type stride_array, bool desc = false) {
auto compare = [array, stride_array, desc](
const size_type a, const size_type b) {
bool smaller = array[a * stride_array] < array[b * stride_array];
return desc == !smaller;
};
if (N == K) {
for (int i = 0; i < N; ++i) {
index[i*stride_index] = i;
}
StrideIterator<size_type> idx_iter(index, stride_index);
std::sort(idx_iter, idx_iter + N, compare);
} else {
vector<size_type > sort_idx(N);
for (int i = 0; i < N; ++i) {
sort_idx[i] = i;
}
std::nth_element(sort_idx.begin(), sort_idx.begin() + K, sort_idx.end(), compare);
std::sort(sort_idx.begin(), sort_idx.begin() + K, compare);
for (int i = 0; i < K; ++i) {
index[i * stride_index] = sort_idx[i];
}
}
}
};
template<typename T>
struct arg_sorter {
void operator()(
size_type N, size_type K, size_type *index, const T *array,
size_type stride_index, size_type stride_array, bool desc = false) {
top_selector<T >()(N, K, index, array, stride_index, stride_array, desc);
}
};
template<typename T, size_type data_N, size_type source_N = 0, typename F>
static void
operation_by_stride(T *data, const T *source,
const size_type *stride_data,
const size_type *stride_source,
const size_type *shape,
F f) {
static_assert(data_N >= source_N);
if constexpr (data_N == 1) {
for (int i = 0; i < *shape; ++i) {
f(*data, *source);
data += *stride_data;
if constexpr (source_N > 0) {
source += *stride_source;
}
}
} else if constexpr (data_N > 1) {
for (int i = 0; i < *shape; ++i) {
if constexpr (source_N > 0) {
operation_by_stride<T, data_N - 1, source_N - 1>(
data, source, stride_data + 1,
stride_source + 1, shape + 1, f);
data += *stride_data;
source += *stride_source;
} else {
operation_by_stride<T, data_N - 1, 0>(
data, source, stride_data + 1,
stride_source, shape + 1, f);
data += *stride_data;
}
}
}
}
/***
* @tparam T
* @tparam N
* @tparam F
* @param data
* @param source
* @param stride_data
* @param stride_source
* @param shape
* @param N_axis N minus axis
* @param f
*/
template<typename T, size_type N, typename F>
static void
reduce_by_stride(T *data, const T *source,
const size_type *stride_data,
const size_type *stride_source,
const size_type *shape,
F f) {
if constexpr (N == 1) {
*data = 0;
for (int i = 0; i < *(shape); ++i) {
f(*data, *source);
source += *(stride_source);
}
} else if constexpr (N > 1) {
for (int i = 0; i < *shape; ++i) {
reduce_by_stride<T, N - 1, F>(
data, source, stride_data + 1,
stride_source + 1, shape + 1, f);
data += *stride_data;
source += *stride_source;
}
}
}
template<typename T, size_type N, typename F>
static void
arg_reduce_by_stride(size_type *data, const T *source,
const size_type *stride_data,
const size_type *stride_source,
const size_type *shape,
F f) {
if constexpr (N == 1) {
*data = 0;
T m = *source;
for (int i = 1; i < *(shape); ++i) {
source += *(stride_source);
if (f(m, *source)) {
*data = i;
m = *source;
}
}
} else if constexpr (N > 1) {
for (int i = 0; i < *shape; ++i) {
arg_reduce_by_stride<T, N - 1, F>(
data, source, stride_data + 1,
stride_source + 1, shape + 1, f);
data += *stride_data;
source += *stride_source;
}
}
}
template<typename T, size_type N, typename F>
static void
reorder_by_stride(size_type *data, const T *source,
const size_type *stride_data,
const size_type *stride_source,
const size_type *shape,
const size_type K,
bool desc,
F f) {
if constexpr (N == 1) {
f(*shape, K, data, source, *stride_data, *stride_source, desc);
} else if constexpr (N > 1) {
for (int i = 0; i < *shape; ++i) {
reorder_by_stride<T, N - 1, F>(
data, source,
stride_data + 1, stride_source + 1,
shape + 1, K, desc, f);
data += *stride_data;
source += *stride_source;
}
}
}
struct Slice {
size_type begin_;
size_type end_;
size_type step_;
void set_shape(size_type shape) {
if (step_ == 0) {
throw std::runtime_error("slice step size is 0");
}
int end = end_ >= 0? end_ : shape + end_;
if (end_ < 0)
end_ += shape;
else if (end_ == SLICE_END)
end_ = shape;
if (begin_ < 0)
begin_ += shape;
}
size_type slice_size() {
if (end_ - begin_ > 0 && step_ > 0) {
return (end_ - begin_) / step_;
}
if (end_ - begin_ < 0 && step_ < 0) {
return (begin_ - end_) / (- step_);
}
char message[1024];
sprintf(message, "slice step size %d is not compatible "
"with slice interval [%d, %d]",
step_, begin_, end_);
throw std::runtime_error(message);
}
size_type slice_offset(size_type stride) {
return begin_ * stride;
}
size_type stride_size(size_type stride) {
return stride * step_;
}
explicit Slice(): begin_(0), end_(SLICE_END), step_(1) {};
explicit Slice(size_type begin)
:begin_(begin), end_(begin+1), step_(1) {};
explicit Slice(size_type begin, size_type end)
:begin_(begin), end_(end), step_(1) {};
explicit Slice(size_type begin, size_type end, size_type step)
:begin_(begin), end_(end), step_(step) {};
};
typedef Slice S;
} // namespace tensor
#endif //TENSOR_UTIL_H
|
valkyrienyanko/game-engine
|
Game Engine/src/graphics/simple2drenderer.h
|
<filename>Game Engine/src/graphics/simple2drenderer.h
#pragma once
#include <deque>
#include "renderer2d.h"
namespace valk {
namespace graphics {
class Simple2DRenderer : public Renderer2D
{
private:
std::deque<const Renderable2D*> m_RenderQueue;
public:
void submit(const Renderable2D* renderable) override;
void flush() override;
};
}
}
|
valkyrienyanko/game-engine
|
Game Engine/src/graphics/renderer2d.h
|
<reponame>valkyrienyanko/game-engine
#pragma once
#include <GL/glew.h>
#include "../maths/maths.h"
#include "renderer.h"
#include "renderable2d.h"
namespace valk {
namespace graphics {
class Renderer2D
{
protected:
virtual void submit(const Renderable2D* renderable) = 0;
virtual void flush() = 0;
};
}
}
|
valkyrienyanko/game-engine
|
Game Engine/src/maths/vec2.h
|
#pragma once
#include <iostream>
namespace valk {
namespace maths {
struct vec2
{
float x, y;
vec2();
vec2(const float& x, const float& y);
vec2& add(const vec2& other);
vec2& subtract(const vec2& other);
vec2& multiply(const vec2& other);
vec2& divide(const vec2& other);
friend vec2 operator+(vec2 left, const vec2& right);
friend vec2 operator-(vec2 left, const vec2& right);
friend vec2 operator*(vec2 left, const vec2& right);
friend vec2 operator/(vec2 left, const vec2& right);
bool operator==(const vec2& other);
bool operator!=(const vec2& other);
vec2& operator+=(const vec2& other);
vec2& operator-=(const vec2& other);
vec2& operator*=(const vec2& other);
vec2& operator/=(const vec2& other);
friend std::ostream& operator<<(std::ostream& stream, const vec2& vector);
};
}
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.