hexsha stringlengths 40 40 | size int64 22 2.4M | ext stringclasses 5
values | lang stringclasses 1
value | max_stars_repo_path stringlengths 3 260 | max_stars_repo_name stringlengths 5 109 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 9 | max_stars_count float64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 260 | max_issues_repo_name stringlengths 5 109 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 9 | max_issues_count float64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 260 | max_forks_repo_name stringlengths 5 109 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 9 | max_forks_count float64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 22 2.4M | avg_line_length float64 5 169k | max_line_length int64 5 786k | alphanum_fraction float64 0.06 0.95 | matches listlengths 1 11 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
3683b96e360c7a7238ed5544e066fd9cd229d9ec | 6,044 | h | C | bridge/bindings/qjs/dom/event.h | liangyuetian/kraken | 8cc023ae936e9277c2350e629a018e34fd5b65ec | [
"Apache-2.0"
] | null | null | null | bridge/bindings/qjs/dom/event.h | liangyuetian/kraken | 8cc023ae936e9277c2350e629a018e34fd5b65ec | [
"Apache-2.0"
] | null | null | null | bridge/bindings/qjs/dom/event.h | liangyuetian/kraken | 8cc023ae936e9277c2350e629a018e34fd5b65ec | [
"Apache-2.0"
] | null | null | null | /*
* Copyright (C) 2021 Alibaba Inc. All rights reserved.
* Author: Kraken Team.
*/
#ifndef KRAKENBRIDGE_EVENT_H
#define KRAKENBRIDGE_EVENT_H
#include "bindings/qjs/host_class.h"
namespace kraken::binding::qjs {
#define EVENT_CLICK "click"
#define EVENT_INPUT "input"
#define EVENT_APPEAR "appear"
#define EVENT_DISAPPEAR "disappear"
#define EVENT_COLOR_SCHEME_CHANGE "colorschemechange"
#define EVENT_ERROR "error"
#define EVENT_MEDIA_ERROR "mediaerror"
#define EVENT_TOUCH_START "touchstart"
#define EVENT_TOUCH_MOVE "touchmove"
#define EVENT_TOUCH_END "touchend"
#define EVENT_TOUCH_CANCEL "touchcancel"
#define EVENT_MESSAGE "message"
#define EVENT_CLOSE "close"
#define EVENT_OPEN "open"
#define EVENT_INTERSECTION_CHANGE "intersectionchange"
#define EVENT_CANCEL "cancel"
#define EVENT_POPSTATE "popstate"
#define EVENT_FINISH "finish"
#define EVENT_TRANSITION_RUN "transitionrun"
#define EVENT_TRANSITION_CANCEL "transitioncancel"
#define EVENT_TRANSITION_START "transitionstart"
#define EVENT_TRANSITION_END "transitionend"
#define EVENT_FOCUS "focus"
#define EVENT_LOAD "load"
#define EVENT_UNLOAD "unload"
#define EVENT_CHANGE "change"
#define EVENT_CAN_PLAY "canplay"
#define EVENT_CAN_PLAY_THROUGH "canplaythrough"
#define EVENT_ENDED "ended"
#define EVENT_PAUSE "pause"
#define EVENT_PLAY "play"
#define EVENT_SEEKED "seeked"
#define EVENT_SEEKING "seeking"
#define EVENT_VOLUME_CHANGE "volumechange"
#define EVENT_SCROLL "scroll"
#define EVENT_SWIPE "swipe"
#define EVENT_PAN "pan"
#define EVENT_LONG_PRESS "longpress"
#define EVENT_SCALE "scale"
void bindEvent(ExecutionContext* context);
class EventInstance;
class EventTargetInstance;
using EventCreator = EventInstance* (*)(ExecutionContext* context, void* nativeEvent);
class Event : public HostClass {
public:
static JSClassID kEventClassID;
JSValue instanceConstructor(JSContext* ctx, JSValue func_obj, JSValue this_val, int argc, JSValue* argv) override;
Event() = delete;
explicit Event(ExecutionContext* context);
static EventInstance* buildEventInstance(std::string& eventType, ExecutionContext* context, void* nativeEvent, bool isCustomEvent);
static void defineEvent(const std::string& eventType, EventCreator creator);
OBJECT_INSTANCE(Event);
static JSValue stopPropagation(JSContext* ctx, JSValueConst this_val, int argc, JSValueConst* argv);
static JSValue stopImmediatePropagation(JSContext* ctx, JSValueConst this_val, int argc, JSValueConst* argv);
static JSValue preventDefault(JSContext* ctx, JSValueConst this_val, int argc, JSValueConst* argv);
static JSValue initEvent(JSContext* ctx, JSValueConst this_val, int argc, JSValueConst* argv);
private:
static std::unordered_map<std::string, EventCreator> m_eventCreatorMap;
DEFINE_PROTOTYPE_READONLY_PROPERTY(type);
DEFINE_PROTOTYPE_READONLY_PROPERTY(bubbles);
DEFINE_PROTOTYPE_READONLY_PROPERTY(cancelable);
DEFINE_PROTOTYPE_READONLY_PROPERTY(timestamp);
DEFINE_PROTOTYPE_READONLY_PROPERTY(defaultPrevented);
DEFINE_PROTOTYPE_READONLY_PROPERTY(target);
DEFINE_PROTOTYPE_READONLY_PROPERTY(srcElement);
DEFINE_PROTOTYPE_READONLY_PROPERTY(currentTarget);
DEFINE_PROTOTYPE_READONLY_PROPERTY(returnValue);
DEFINE_PROTOTYPE_READONLY_PROPERTY(cancelBubble);
DEFINE_PROTOTYPE_FUNCTION(stopPropagation, 0);
DEFINE_PROTOTYPE_FUNCTION(stopImmediatePropagation, 0);
DEFINE_PROTOTYPE_FUNCTION(preventDefault, 1);
DEFINE_PROTOTYPE_FUNCTION(initEvent, 3);
friend EventInstance;
};
// Dart generated nativeEvent member are force align to 64-bit system. So all members in NativeEvent should have 64 bit width.
#if ANDROID_32_BIT
struct NativeEvent {
int64_t type{0};
int64_t bubbles{0};
int64_t cancelable{0};
int64_t timeStamp{0};
int64_t defaultPrevented{0};
// The pointer address of target EventTargetInstance object.
int64_t target{0};
// The pointer address of current target EventTargetInstance object.
int64_t currentTarget{0};
};
#else
// Use pointer instead of int64_t on 64 bit system can help compiler to choose best register for better running performance.
struct NativeEvent {
NativeString* type{nullptr};
int64_t bubbles{0};
int64_t cancelable{0};
int64_t timeStamp{0};
int64_t defaultPrevented{0};
// The pointer address of target EventTargetInstance object.
void* target{nullptr};
// The pointer address of current target EventTargetInstance object.
void* currentTarget{nullptr};
};
#endif
struct RawEvent {
uint64_t* bytes;
int64_t length;
};
class EventInstance : public Instance {
public:
EventInstance() = delete;
~EventInstance() override { delete nativeEvent; }
static EventInstance* fromNativeEvent(Event* event, NativeEvent* nativeEvent);
NativeEvent* nativeEvent{nullptr};
FORCE_INLINE const bool propagationStopped() { return m_propagationStopped; }
FORCE_INLINE const bool cancelled() { return m_cancelled; }
FORCE_INLINE void cancelled(bool v) { m_cancelled = v; }
FORCE_INLINE const bool propagationImmediatelyStopped() { return m_propagationImmediatelyStopped; }
FORCE_INLINE NativeString* type() {
#if ANDROID_32_BIT
return reinterpret_cast<NativeString*>(nativeEvent->type);
#else
return nativeEvent->type;
#endif
};
void setType(NativeString* type) const;
FORCE_INLINE EventTargetInstance* target() { return reinterpret_cast<EventTargetInstance*>(nativeEvent->target); }
void setTarget(EventTargetInstance* target) const;
FORCE_INLINE EventTargetInstance* currentTarget() { return reinterpret_cast<EventTargetInstance*>(nativeEvent->currentTarget); }
void setCurrentTarget(EventTargetInstance* target) const;
protected:
explicit EventInstance(Event* jsEvent, JSAtom eventType, JSValue eventInit);
explicit EventInstance(Event* jsEvent, NativeEvent* nativeEvent);
bool m_cancelled{false};
bool m_propagationStopped{false};
bool m_propagationImmediatelyStopped{false};
private:
static void finalizer(JSRuntime* rt, JSValue val);
friend Event;
};
} // namespace kraken::binding::qjs
#endif // KRAKENBRIDGE_EVENT_H
| 34.936416 | 133 | 0.797981 | [
"object"
] |
3684c813ee97b3bd0973304c0fb650424bf15045 | 5,919 | c | C | ports/esp32/boards/LILYGO_T5_EINK_47/drivers/epd/py.c | ondiiik/micropython-espboards | fb526a1ed19a741354e9552a4c077c2b832d4c4f | [
"MIT"
] | 1 | 2022-01-07T15:33:16.000Z | 2022-01-07T15:33:16.000Z | ports/esp32/boards/LILYGO_T5_EINK_47/drivers/epd/py.c | ondiiik/micropython-twatch-2020 | fb526a1ed19a741354e9552a4c077c2b832d4c4f | [
"MIT"
] | 1 | 2021-05-22T15:33:56.000Z | 2021-05-23T13:33:05.000Z | ports/esp32/boards/LILYGO_T5_EINK_47/drivers/epd/py.c | ondiiik/micropython-twatch-2020 | fb526a1ed19a741354e9552a4c077c2b832d4c4f | [
"MIT"
] | null | null | null | /*
* This file is part of the MicroPython ESP32 project, https://github.com/lewisxhe/MicroPython_ESP32_psRAM_LoBo
*
* The MIT License (MIT)
*
* Copyright (c) 2021 OSi (Ondrej Sienczak)
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
#include "py/binary.h"
#include "py/objarray.h"
#include "py/runtime.h"
#include "epd_driver.h"
#include <stdint.h>
typedef struct my_epd_obj
{
mp_obj_base_t base;
mp_obj_array_t* fb;
}
my_epd_obj;
static mp_obj_array_t* new_ba(size_t aSize)
{
mp_obj_array_t* ba = m_malloc(sizeof(*ba) + aSize);
ba->base.type = &mp_type_bytearray;
ba->typecode = BYTEARRAY_TYPECODE;
ba->free = 0;
ba->len = aSize;
ba->items = ba + 1;
return ba;
}
static bool singleton;
static const mp_obj_type_t py_epd_type;
mp_obj_t my_epd_make_new(const mp_obj_type_t* aType,
size_t aArgsCnt,
size_t aKwCnt,
const mp_obj_t* aArgs)
{
if (singleton)
{
mp_raise_msg(&mp_type_RuntimeError, MP_ERROR_TEXT("Epd object can be created only once (singleton)"));
}
singleton = true;
mp_arg_check_num(aArgsCnt, aKwCnt, 0, 0, false);
my_epd_obj* self = m_new(my_epd_obj, 1);
self->base.type = &py_epd_type;
epd_init();
self->fb = new_ba(EPD_WIDTH / 2 * EPD_HEIGHT);
return MP_OBJ_FROM_PTR(self);
}
STATIC mp_obj_t py_epd_fb(mp_obj_t aSelf)
{
my_epd_obj* self = MP_OBJ_TO_PTR(aSelf);
return self->fb;
}
STATIC MP_DEFINE_CONST_FUN_OBJ_1(py_epd_fb_obj, py_epd_fb);
STATIC mp_obj_t py_epd_on(mp_obj_t aSelf)
{
MP_THREAD_GIL_EXIT();
epd_poweron();
MP_THREAD_GIL_ENTER();
return mp_const_none;
}
STATIC MP_DEFINE_CONST_FUN_OBJ_1(py_epd_on_obj, py_epd_on);
STATIC mp_obj_t py_epd_off(mp_obj_t aSelf)
{
MP_THREAD_GIL_EXIT();
epd_poweroff();
MP_THREAD_GIL_ENTER();
return mp_const_none;
}
STATIC MP_DEFINE_CONST_FUN_OBJ_1(py_epd_off_obj, py_epd_off);
STATIC mp_obj_t py_epd_power_off(mp_obj_t aSelf)
{
MP_THREAD_GIL_EXIT();
epd_poweroff_all();
MP_THREAD_GIL_ENTER();
return mp_const_none;
}
STATIC MP_DEFINE_CONST_FUN_OBJ_1(py_epd_power_off_obj, py_epd_power_off);
STATIC mp_obj_t py_epd_clear(mp_obj_t aSelf)
{
MP_THREAD_GIL_EXIT();
epd_clear();
MP_THREAD_GIL_ENTER();
return mp_const_none;
}
STATIC MP_DEFINE_CONST_FUN_OBJ_1(py_epd_clear_obj, py_epd_clear);
STATIC mp_obj_t py_epd_flush(mp_obj_t aSelf)
{
my_epd_obj* self = MP_OBJ_TO_PTR(aSelf);
Rect_t area = { 0, 0, EPD_WIDTH, EPD_HEIGHT };
MP_THREAD_GIL_EXIT();
epd_draw_image(area, self->fb->items, BLACK_ON_WHITE);
MP_THREAD_GIL_ENTER();
return mp_const_none;
}
STATIC MP_DEFINE_CONST_FUN_OBJ_1(py_epd_flush_obj, py_epd_flush);
STATIC mp_obj_t py_epd_clear_area(size_t aArgsCnt,
const mp_obj_t* aArgs)
{
Rect_t area =
{
MP_OBJ_SMALL_INT_VALUE(aArgs[1]),
MP_OBJ_SMALL_INT_VALUE(aArgs[2]),
MP_OBJ_SMALL_INT_VALUE(aArgs[3]),
MP_OBJ_SMALL_INT_VALUE(aArgs[4])
};
MP_THREAD_GIL_EXIT();
epd_clear_area(area);
MP_THREAD_GIL_ENTER();
return mp_const_none;
}
STATIC MP_DEFINE_CONST_FUN_OBJ_VAR_BETWEEN(py_epd_clear_area_obj, 5, 5, py_epd_clear_area);
STATIC const mp_rom_map_elem_t py_epd_locals_dict_table[] =
{
{ MP_ROM_QSTR(MP_QSTR_fb), MP_ROM_PTR(&py_epd_fb_obj) },
{ MP_ROM_QSTR(MP_QSTR_on), MP_ROM_PTR(&py_epd_on_obj) },
{ MP_ROM_QSTR(MP_QSTR_off), MP_ROM_PTR(&py_epd_off_obj) },
{ MP_ROM_QSTR(MP_QSTR_power_off), MP_ROM_PTR(&py_epd_power_off_obj) },
{ MP_ROM_QSTR(MP_QSTR_clear), MP_ROM_PTR(&py_epd_clear_obj) },
{ MP_ROM_QSTR(MP_QSTR_clear_area), MP_ROM_PTR(&py_epd_clear_area_obj) },
{ MP_ROM_QSTR(MP_QSTR_flush), MP_ROM_PTR(&py_epd_flush_obj) },
{ MP_ROM_QSTR(MP_QSTR_WIDTH), MP_ROM_INT(EPD_WIDTH) },
{ MP_ROM_QSTR(MP_QSTR_HEIGHT), MP_ROM_INT(EPD_HEIGHT) },
};
STATIC MP_DEFINE_CONST_DICT(py_epd_locals_dict, py_epd_locals_dict_table);
static const mp_obj_type_t py_epd_type =
{
{ &mp_type_type },
.name = MP_QSTR_Epd,
.make_new = my_epd_make_new,
.locals_dict = MP_ROM_PTR(&py_epd_locals_dict)
};
STATIC const mp_map_elem_t globals_dict_table[] =
{
{ MP_ROM_QSTR(MP_QSTR___name__), MP_ROM_QSTR(MP_QSTR_epd) },
{ MP_ROM_QSTR(MP_QSTR_Epd), MP_ROM_PTR(&py_epd_type) },
};
STATIC MP_DEFINE_CONST_DICT(globals_dict, globals_dict_table);
const mp_obj_module_t mp_module_epd =
{
.base = {&mp_type_module},
.globals = MP_ROM_PTR(&globals_dict),
};
MP_REGISTER_MODULE(MP_QSTR_epd, mp_module_epd, 1);
| 28.320574 | 111 | 0.688461 | [
"object"
] |
368686b3a71f1aa5abf7436a5b70e4a5376fc578 | 9,412 | c | C | openbmc/build/tmp/deploy/sdk/witherspoon-2019-08-08/sysroots/armv6-openbmc-linux-gnueabi/usr/src/debug/net-snmp/5.8-r0/net-snmp-5.8/agent/helpers/table_row.c | sotaoverride/backup | ca53a10b72295387ef4948a9289cb78ab70bc449 | [
"Apache-2.0"
] | null | null | null | openbmc/build/tmp/deploy/sdk/witherspoon-2019-08-08/sysroots/armv6-openbmc-linux-gnueabi/usr/src/debug/net-snmp/5.8-r0/net-snmp-5.8/agent/helpers/table_row.c | sotaoverride/backup | ca53a10b72295387ef4948a9289cb78ab70bc449 | [
"Apache-2.0"
] | null | null | null | openbmc/build/tmp/deploy/sdk/witherspoon-2019-08-08/sysroots/armv6-openbmc-linux-gnueabi/usr/src/debug/net-snmp/5.8-r0/net-snmp-5.8/agent/helpers/table_row.c | sotaoverride/backup | ca53a10b72295387ef4948a9289cb78ab70bc449 | [
"Apache-2.0"
] | null | null | null | /*
* table_row.c
*
* Helper for registering single row slices of a shared table
*
* Portions of this file are subject to the following copyright(s). See
* the Net-SNMP's COPYING file for more details and other copyrights
* that may apply:
*
* Portions of this file are copyrighted by:
* Copyright (c) 2016 VMware, Inc. All rights reserved.
* Use is subject to license terms specified in the COPYING file
* distributed with the Net-SNMP package.
*/
#define TABLE_ROW_DATA "table_row"
#include <net-snmp/net-snmp-config.h>
#include <net-snmp/net-snmp-features.h>
#if HAVE_STRING_H
#include <string.h>
#else
#include <strings.h>
#endif
#include <net-snmp/agent/net-snmp-agent-includes.h>
#include <net-snmp/agent/table.h>
#include <net-snmp/agent/table_container.h>
#include <net-snmp/library/container.h>
#include <net-snmp/library/snmp_assert.h>
#include <net-snmp/net-snmp-includes.h>
netsnmp_feature_child_of(table_row_all, mib_helpers)
netsnmp_feature_child_of(table_row_extract, table_row_all)
/*
* snmp.h:#define SNMP_MSG_INTERNAL_SET_BEGIN -1
* snmp.h:#define SNMP_MSG_INTERNAL_SET_RESERVE1 0
* snmp.h:#define SNMP_MSG_INTERNAL_SET_RESERVE2 1
* snmp.h:#define SNMP_MSG_INTERNAL_SET_ACTION 2
* snmp.h:#define SNMP_MSG_INTERNAL_SET_COMMIT 3
* snmp.h:#define SNMP_MSG_INTERNAL_SET_FREE 4
* snmp.h:#define SNMP_MSG_INTERNAL_SET_UNDO 5
*/
/** @defgroup table_row table_row
* Helps you implement a table shared across two or more subagents,
* or otherwise split into individual row slices.
* @ingroup table
*
* @{
*/
static Netsnmp_Node_Handler _table_row_handler;
static Netsnmp_Node_Handler _table_row_default_handler;
/**********************************************************************
**********************************************************************
* *
* *
* PUBLIC Registration functions *
* *
* *
**********************************************************************
**********************************************************************/
/* ==================================
*
* Table Row API: Table maintenance
*
* This helper doesn't operate with the complete
* table, so these routines are not relevant.
*
* ================================== */
/* ==================================
*
* Table Row API: MIB maintenance
*
* ================================== */
/** returns a netsnmp_mib_handler object for the table_container helper */
netsnmp_mib_handler* netsnmp_table_row_handler_get(void* row)
{
netsnmp_mib_handler* handler;
handler = netsnmp_create_handler("table_row", _table_row_handler);
if (NULL == handler)
{
snmp_log(LOG_ERR, "malloc failure in netsnmp_table_row_register\n");
return NULL;
}
handler->myvoid = (void*)row;
handler->flags |= MIB_HANDLER_INSTANCE;
/* handler->flags |= MIB_HANDLER_AUTO_NEXT; ??? */
return handler;
}
int netsnmp_table_row_register(netsnmp_handler_registration* reginfo,
netsnmp_table_registration_info* tabreg,
void* row, netsnmp_variable_list* index)
{
netsnmp_handler_registration* reg2;
netsnmp_mib_handler* handler;
oid row_oid[MAX_OID_LEN];
size_t row_oid_len, len;
char tmp[SNMP_MAXBUF_MEDIUM];
if ((NULL == reginfo) || (NULL == reginfo->handler) || (NULL == tabreg))
{
snmp_log(LOG_ERR, "bad param in netsnmp_table_row_register\n");
netsnmp_handler_registration_free(reginfo);
return SNMPERR_GENERR;
}
/*
* The first table_row invoked for a particular table should
* register the full table as well, with a default handler to
* process requests for non-existent (or incomplete) rows.
*
* Subsequent table_row registrations attempting to set up
* this default handler would fail - preferably silently!
*/
snprintf(tmp, sizeof(tmp), "%s_table", reginfo->handlerName);
reg2 = netsnmp_create_handler_registration(
tmp, _table_row_default_handler, reginfo->rootoid, reginfo->rootoid_len,
reginfo->modes);
netsnmp_register_table(reg2, tabreg); /* Ignore return value */
/*
* Adjust the OID being registered, to take account
* of the indexes and column range provided....
*/
row_oid_len = reginfo->rootoid_len;
memcpy(row_oid, (u_char*)reginfo->rootoid, row_oid_len * sizeof(oid));
row_oid[row_oid_len++] = 1; /* tableEntry */
row_oid[row_oid_len++] = tabreg->min_column;
reginfo->range_ubound = tabreg->max_column;
reginfo->range_subid = row_oid_len - 1;
build_oid_noalloc(&row_oid[row_oid_len], MAX_OID_LEN - row_oid_len, &len,
NULL, 0, index);
row_oid_len += len;
free(reginfo->rootoid);
reginfo->rootoid = snmp_duplicate_objid(row_oid, row_oid_len);
reginfo->rootoid_len = row_oid_len;
/*
* ... insert a minimal handler ...
*/
handler = netsnmp_table_row_handler_get(row);
if (!handler ||
(netsnmp_inject_handler(reginfo, handler) != SNMPERR_SUCCESS))
{
snmp_log(LOG_ERR, "could not create table row handler\n");
netsnmp_handler_free(handler);
netsnmp_handler_registration_free(reginfo);
return SNMP_ERR_GENERR;
}
/*
* ... and register the row
*/
return netsnmp_register_handler(reginfo);
}
/** return the row data structure supplied to the table_row helper */
#ifndef NETSNMP_FEATURE_REMOVE_TABLE_ROW_EXTRACT
void* netsnmp_table_row_extract(netsnmp_request_info* request)
{
return netsnmp_request_get_list_data(request, TABLE_ROW_DATA);
}
#endif /* NETSNMP_FEATURE_REMOVE_TABLE_ROW_EXTRACT */
/** @cond */
/**********************************************************************
**********************************************************************
* *
* *
* netsnmp_table_row_helper_handler() *
* *
* *
**********************************************************************
**********************************************************************/
static int _table_row_handler(netsnmp_mib_handler* handler,
netsnmp_handler_registration* reginfo,
netsnmp_agent_request_info* reqinfo,
netsnmp_request_info* requests)
{
int rc = SNMP_ERR_NOERROR;
netsnmp_request_info* req;
void* row;
/** sanity checks */
netsnmp_assert((NULL != handler) && (NULL != handler->myvoid));
netsnmp_assert((NULL != reginfo) && (NULL != reqinfo));
DEBUGMSGTL(("table_row", "Mode %s, Got request:\n",
se_find_label_in_slist("agent_mode", reqinfo->mode)));
/*
* First off, get our pointer from the handler.
* This contains the row that was actually registered.
* Make this available for each of the requests passed in.
*/
row = handler->myvoid;
for (req = requests; req; req = req->next)
netsnmp_request_add_list_data(
req, netsnmp_create_data_list(TABLE_ROW_DATA, row, NULL));
/*
* Then call the next handler, to actually process the request
*/
rc = netsnmp_call_next_handler(handler, reginfo, reqinfo, requests);
if (rc != SNMP_ERR_NOERROR)
{
DEBUGMSGTL(("table_row", "next handler returned %d\n", rc));
}
return rc;
}
static int _table_row_default_handler(netsnmp_mib_handler* handler,
netsnmp_handler_registration* reginfo,
netsnmp_agent_request_info* reqinfo,
netsnmp_request_info* requests)
{
netsnmp_request_info* req;
netsnmp_table_request_info* table_info;
netsnmp_table_registration_info* tabreg;
tabreg = netsnmp_find_table_registration_info(reginfo);
for (req = requests; req; req = req->next)
{
table_info = netsnmp_extract_table_info(req);
if ((table_info->colnum >= tabreg->min_column) ||
(table_info->colnum <= tabreg->max_column))
{
netsnmp_set_request_error(reqinfo, req, SNMP_NOSUCHINSTANCE);
}
else
{
netsnmp_set_request_error(reqinfo, req, SNMP_NOSUCHOBJECT);
}
}
return SNMP_ERR_NOERROR;
}
/** @endcond */
/* ==================================
*
* Table Row API: Row operations
*
* This helper doesn't operate with the complete
* table, so these routines are not relevant.
*
* ================================== */
/* ==================================
*
* Table Row API: Index operations
*
* This helper doesn't operate with the complete
* table, so these routines are not relevant.
*
* ================================== */
/** @} */
| 34.602941 | 80 | 0.560455 | [
"object"
] |
368b9f210b0742dd934bdbc5210cd0486c535284 | 4,529 | h | C | src/util/random.h | ashishd/colmap | 30521f19de45c1cb2df8809728e780bf95fc8836 | [
"BSD-3-Clause"
] | 2 | 2022-02-18T04:58:18.000Z | 2022-02-18T04:59:13.000Z | src/util/random.h | hyowonha/colmap | d908cc37cbf97701b589a047274e3a7fbaf17c54 | [
"BSD-3-Clause"
] | null | null | null | src/util/random.h | hyowonha/colmap | d908cc37cbf97701b589a047274e3a7fbaf17c54 | [
"BSD-3-Clause"
] | null | null | null | // Copyright (c) 2022, ETH Zurich and UNC Chapel Hill.
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
//
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
//
// * Neither the name of ETH Zurich and UNC Chapel Hill nor the names of
// its contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
// POSSIBILITY OF SUCH DAMAGE.
//
// Author: Johannes L. Schoenberger (jsch-at-demuc-dot-de)
#ifndef COLMAP_SRC_UTIL_RANDOM_H_
#define COLMAP_SRC_UTIL_RANDOM_H_
#include <chrono>
#include <memory>
#include <random>
#include <thread>
#include "util/logging.h"
namespace colmap {
extern thread_local std::unique_ptr<std::mt19937> PRNG;
static int kDefaultPRNGSeed = 0;
// Initialize the PRNG with the given seed.
//
// @param seed The seed for the PRNG. If the seed is -1, the current time
// is used as the seed.
void SetPRNGSeed(unsigned seed = kDefaultPRNGSeed);
// Generate uniformly distributed random integer number.
//
// This implementation is unbiased and thread-safe in contrast to `rand()`.
template <typename T>
T RandomInteger(const T min, const T max);
// Generate uniformly distributed random real number.
//
// This implementation is unbiased and thread-safe in contrast to `rand()`.
template <typename T>
T RandomReal(const T min, const T max);
// Generate Gaussian distributed random real number.
//
// This implementation is unbiased and thread-safe in contrast to `rand()`.
template <typename T>
T RandomGaussian(const T mean, const T stddev);
// Fisher-Yates shuffling.
//
// Note that the vector may not contain more values than UINT32_MAX. This
// restriction comes from the fact that the 32-bit version of the
// Mersenne Twister PRNG is significantly faster.
//
// @param elems Vector of elements to shuffle.
// @param num_to_shuffle Optional parameter, specifying the number of first
// N elements in the vector to shuffle.
template <typename T>
void Shuffle(const uint32_t num_to_shuffle, std::vector<T>* elems);
////////////////////////////////////////////////////////////////////////////////
// Implementation
////////////////////////////////////////////////////////////////////////////////
template <typename T>
T RandomInteger(const T min, const T max) {
if (PRNG == nullptr) {
SetPRNGSeed();
}
std::uniform_int_distribution<T> distribution(min, max);
return distribution(*PRNG);
}
template <typename T>
T RandomReal(const T min, const T max) {
if (PRNG == nullptr) {
SetPRNGSeed();
}
std::uniform_real_distribution<T> distribution(min, max);
return distribution(*PRNG);
}
template <typename T>
T RandomGaussian(const T mean, const T stddev) {
if (PRNG == nullptr) {
SetPRNGSeed();
}
std::normal_distribution<T> distribution(mean, stddev);
return distribution(*PRNG);
}
template <typename T>
void Shuffle(const uint32_t num_to_shuffle, std::vector<T>* elems) {
CHECK_LE(num_to_shuffle, elems->size());
const uint32_t last_idx = static_cast<uint32_t>(elems->size() - 1);
for (uint32_t i = 0; i < num_to_shuffle; ++i) {
const auto j = RandomInteger<uint32_t>(i, last_idx);
std::swap((*elems)[i], (*elems)[j]);
}
}
} // namespace colmap
#endif // COLMAP_SRC_UTIL_RANDOM_H_
| 34.052632 | 80 | 0.7017 | [
"vector"
] |
368c59db133e0f5687e9de416bc7f474d8db1cee | 666 | h | C | simdjson/bindings.h | ChrisSmith/simdjson_nodejs | 911c4b4036390a29a7f6008e9744ba831b790001 | [
"Apache-2.0"
] | 411 | 2019-02-27T13:14:27.000Z | 2022-03-31T21:59:44.000Z | simdjson/bindings.h | ChrisSmith/simdjson_nodejs | 911c4b4036390a29a7f6008e9744ba831b790001 | [
"Apache-2.0"
] | 39 | 2019-03-01T07:27:38.000Z | 2022-03-05T02:31:08.000Z | simdjson/bindings.h | ChrisSmith/simdjson_nodejs | 911c4b4036390a29a7f6008e9744ba831b790001 | [
"Apache-2.0"
] | 21 | 2019-02-27T20:09:46.000Z | 2021-12-06T17:37:52.000Z | #include <napi.h>
#include "src/simdjson.h"
namespace simdjsonnode {
using namespace simdjson;
bool isValid(std::string p);
Napi::Boolean IsValidWrapped(const Napi::CallbackInfo& info);
Napi::Object parse(Napi::Env env, std::string p);
Napi::Value makeJSONObject(Napi::Env env, dom::element element);
Napi::Value ParseWrapped(const Napi::CallbackInfo& info);
Napi::Object LazyParseWrapped(const Napi::CallbackInfo& info);
Napi::Value ValueForKeyPathWrapped(const Napi::CallbackInfo& info);
Napi::Value findKeyPath(Napi::Env env, std::vector<std::string> subpaths, dom::element pjh);
Napi::Object Init(Napi::Env env, Napi::Object exports);
}
| 33.3 | 94 | 0.74024 | [
"object",
"vector"
] |
368d181f7c6e86a03742a42cda1e11175f5c10ad | 1,120 | h | C | src/adtf/include/laserdata.h | chingoduc/parallel-bayesian-toolbox | 20c06a823c714a51a51e5b59c3232cd1260b0fa4 | [
"BSD-4-Clause-UC"
] | 1 | 2015-12-01T13:15:14.000Z | 2015-12-01T13:15:14.000Z | src/adtf/include/laserdata.h | chingoduc/parallel-bayesian-toolbox | 20c06a823c714a51a51e5b59c3232cd1260b0fa4 | [
"BSD-4-Clause-UC"
] | null | null | null | src/adtf/include/laserdata.h | chingoduc/parallel-bayesian-toolbox | 20c06a823c714a51a51e5b59c3232cd1260b0fa4 | [
"BSD-4-Clause-UC"
] | null | null | null | #ifndef LASERDATA_H
#define LASERDATA_H
#define MEDIA_TYPE_LASERDATA 0x00080001 //TODO: a versioned version would be better
#define MEDIA_SUBTYPE_XYFLOAT 0x00081001
#define MEDIA_SUBTYPE_XYDOUBLE 0x00081002
#define MEDIA_SUBTYPE_XYINT 0x00081003
#define MEDIA_SUBTYPE_DISTANCERAW 0x00081004
#define MEDIA_TYPE_OBJDESC 0x00080010 //TODO: a versioned version would be better
#define MEDIA_SUBTYPE_DETECT 0x00081011
#define MEDIA_SUBTYPE_TRACKED 0x00081012
struct LaserDataRaw
{
//laser measurement starting angle in radians
float startAngle;
//laser measurement step angle in radians
float stepAngle;
//scale to convert raw mesaures to meters (e.g. 1.0f/1000.0f - for distances sent in milimiters)
float scaleToMeters;
//number of measurements sent in the packet
unsigned int count;
//here folows the raw distance measures (d0, d1, d2, ...) in unsigned int words
};
template <class T>
struct LaserPoint
{
T x;
T y;
};
struct ObjDescription
{
// object position clockwise
float p0;
float p1;
float p2;
float p3;
// object velocity
float vx;
float vy;
};
#endif // LASERDATA_H
| 22.857143 | 97 | 0.76875 | [
"object"
] |
368de1694367aead35304181a546b0e480c64eb4 | 4,475 | h | C | plugins/geometry_calls/include/geometry_calls/AbstractParticleDataCall.h | xge/megamol | 1e298dd3d8b153d7468ed446f6b2ed3ac49f0d5b | [
"BSD-3-Clause"
] | null | null | null | plugins/geometry_calls/include/geometry_calls/AbstractParticleDataCall.h | xge/megamol | 1e298dd3d8b153d7468ed446f6b2ed3ac49f0d5b | [
"BSD-3-Clause"
] | null | null | null | plugins/geometry_calls/include/geometry_calls/AbstractParticleDataCall.h | xge/megamol | 1e298dd3d8b153d7468ed446f6b2ed3ac49f0d5b | [
"BSD-3-Clause"
] | null | null | null | /*
* AbstractParticleDataCall.h
*
* Copyright (C) VISUS 2011 (Universitaet Stuttgart)
* Alle Rechte vorbehalten.
*/
#pragma once
#include "mmcore/AbstractGetData3DCall.h"
#include "vislib/Array.h"
namespace megamol::geocalls {
/**
* Call for multi-stream particle data.
*
* template parameter T is the particle class
*/
template<class T>
class AbstractParticleDataCall : public core::AbstractGetData3DCall {
public:
/**
* Gets a human readable description of the module.
*
* @return A human readable description of the module.
*/
static const char* Description(void) {
return "Call to get multi-stream particle sphere data";
}
/**
* Answer the number of functions used for this call.
*
* @return The number of functions used for this call.
*/
static unsigned int FunctionCount(void) {
return AbstractGetData3DCall::FunctionCount();
}
/**
* Answer the name of the function used for this call.
*
* @param idx The index of the function to return it's name.
*
* @return The name of the requested function.
*/
static const char* FunctionName(unsigned int idx) {
return AbstractGetData3DCall::FunctionName(idx);
}
/**
* Accesses the particles of list item 'idx'
*
* @param idx The zero-based index of the particle list to return
*
* @return The requested particle list
*/
T& AccessParticles(unsigned int idx) {
return this->lists[idx];
}
/**
* Accesses the particles of list item 'idx'
*
* @param idx The zero-based index of the particle list to return
*
* @return The requested particle list
*/
const T& AccessParticles(unsigned int idx) const {
return this->lists[idx];
}
/**
* Answer the number of particle lists
*
* @return The number of particle lists
*/
inline unsigned int GetParticleListCount(void) const {
return static_cast<unsigned int>(this->lists.Count());
}
/**
* Sets the number of particle lists. All list items are in undefined
* states afterward.
*
* @param cnt The new number of particle lists
*/
void SetParticleListCount(unsigned int cnt) {
this->lists.SetCount(cnt);
}
/**
* Gets the data defined time stamp
*
* @return The data defined time stamp
*/
inline float GetTimeStamp(void) const {
return timeStamp;
}
/**
* Sets the data defined time stamp
*
* @param timeStamp The new time stamp value
*/
void SetTimeStamp(float timeStamp) {
this->timeStamp = timeStamp;
}
/**
* Assignment operator.
* Makes a deep copy of all members. While for data these are only
* pointers, the pointer to the unlocker object is also copied.
*
* @param rhs The right hand side operand
*
* @return A reference to this
*/
AbstractParticleDataCall<T>& operator=(const AbstractParticleDataCall<T>& rhs);
protected:
/** Ctor. */
AbstractParticleDataCall(void);
/** Dtor. */
virtual ~AbstractParticleDataCall(void);
private:
#ifdef _WIN32
#pragma warning(disable : 4251)
#endif /* _WIN32 */
/** Array of lists of particles */
vislib::Array<T> lists;
#ifdef _WIN32
#pragma warning(default : 4251)
#endif /* _WIN32 */
/** The data defined time stamp */
float timeStamp;
};
/*
* AbstractParticleDataCall<T>::AbstractParticleDataCall
*/
template<class T>
AbstractParticleDataCall<T>::AbstractParticleDataCall(void) : AbstractGetData3DCall()
, lists()
, timeStamp(0.0f) {
// Intentionally empty
}
/*
* AbstractParticleDataCall<T>::~AbstractParticleDataCall
*/
template<class T>
AbstractParticleDataCall<T>::~AbstractParticleDataCall(void) {
this->Unlock();
this->lists.Clear();
}
/*
* AbstractParticleDataCall<T>::operator=
*/
template<class T>
AbstractParticleDataCall<T>& AbstractParticleDataCall<T>::operator=(const AbstractParticleDataCall<T>& rhs) {
AbstractGetData3DCall::operator=(rhs);
this->lists.SetCount(rhs.lists.Count());
for (SIZE_T i = 0; i < this->lists.Count(); i++) {
this->lists[i] = rhs.lists[i];
}
this->timeStamp = rhs.timeStamp;
return *this;
}
} /* end namespace megamol::geocalls */
| 24.453552 | 109 | 0.626816 | [
"object"
] |
368fd09d064f65d9e380f207abee41b0b24a018b | 1,559 | h | C | include/openmc/tallies/filter_surface.h | stu314159/openmc | 2efe223404680099f9a77214e743ab78e37cd08c | [
"MIT"
] | 1 | 2021-10-09T17:55:14.000Z | 2021-10-09T17:55:14.000Z | include/openmc/tallies/filter_surface.h | huak95/openmc | 922f688978693761f82c4a3764ab05dd96cc8cff | [
"MIT"
] | null | null | null | include/openmc/tallies/filter_surface.h | huak95/openmc | 922f688978693761f82c4a3764ab05dd96cc8cff | [
"MIT"
] | null | null | null | #ifndef OPENMC_TALLIES_FILTER_SURFACE_H
#define OPENMC_TALLIES_FILTER_SURFACE_H
#include <cstdint>
#include <unordered_map>
#include <gsl/gsl>
#include "openmc/tallies/filter.h"
#include "openmc/vector.h"
namespace openmc {
//==============================================================================
//! Specifies which surface particles are crossing
//==============================================================================
class SurfaceFilter : public Filter {
public:
//----------------------------------------------------------------------------
// Constructors, destructors
~SurfaceFilter() = default;
//----------------------------------------------------------------------------
// Methods
std::string type() const override { return "surface"; }
void from_xml(pugi::xml_node node) override;
void get_all_bins(const Particle& p, TallyEstimator estimator,
FilterMatch& match) const override;
void to_statepoint(hid_t filter_group) const override;
std::string text_label(int bin) const override;
//----------------------------------------------------------------------------
// Accessors
void set_surfaces(gsl::span<int32_t> surfaces);
private:
//----------------------------------------------------------------------------
// Data members
//! The indices of the surfaces binned by this filter.
vector<int32_t> surfaces_;
//! A map from surface indices to filter bin indices.
std::unordered_map<int32_t, int> map_;
};
} // namespace openmc
#endif // OPENMC_TALLIES_FILTER_SURFACE_H
| 27.350877 | 80 | 0.513791 | [
"vector"
] |
369112368b3553adba0bb1e44c7c85870e936427 | 93,734 | c | C | arch/arm/src/lpc17xx/lpc17_ethernet.c | shirshaksengupta/nuttx-research | b65d26cf34360604dfb588c659babb621b166e32 | [
"BSD-4-Clause"
] | null | null | null | arch/arm/src/lpc17xx/lpc17_ethernet.c | shirshaksengupta/nuttx-research | b65d26cf34360604dfb588c659babb621b166e32 | [
"BSD-4-Clause"
] | null | null | null | arch/arm/src/lpc17xx/lpc17_ethernet.c | shirshaksengupta/nuttx-research | b65d26cf34360604dfb588c659babb621b166e32 | [
"BSD-4-Clause"
] | null | null | null | /****************************************************************************
* arch/arm/src/lpc17xx/lpc17_ethernet.c
*
* Copyright (C) 2010-2015 Gregory Nutt. All rights reserved.
* Author: Gregory Nutt <gnutt@nuttx.org>
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* 3. Neither the name NuttX nor the names of its contributors may be
* used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
* COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
* OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
* AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
* ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*
****************************************************************************/
/****************************************************************************
* Included Files
****************************************************************************/
#include <nuttx/config.h>
#if defined(CONFIG_NET) && defined(CONFIG_LPC17_ETHERNET)
#include <stdint.h>
#include <stdbool.h>
#include <time.h>
#include <string.h>
#include <debug.h>
#include <errno.h>
#include <arpa/inet.h>
#include <nuttx/wdog.h>
#include <nuttx/irq.h>
#include <nuttx/arch.h>
#include <nuttx/net/mii.h>
#include <nuttx/net/netconfig.h>
#include <nuttx/net/arp.h>
#include <nuttx/net/netdev.h>
#ifdef CONFIG_NET_NOINTS
# include <nuttx/wqueue.h>
#endif
#ifdef CONFIG_NET_PKT
# include <nuttx/net/pkt.h>
#endif
#include "up_arch.h"
#include "chip.h"
#include "chip/lpc17_syscon.h"
#include "lpc17_gpio.h"
#include "lpc17_ethernet.h"
#include "lpc17_emacram.h"
#include "lpc17_clrpend.h"
#include <arch/board/board.h>
/* Does this chip have and Ethernet controller? */
#if LPC17_NETHCONTROLLERS > 0
/****************************************************************************
* Pre-processor Definitions
****************************************************************************/
/* Configuration ************************************************************/
/* If processing is not done at the interrupt level, then high priority
* work queue support is required.
*/
#if defined(CONFIG_NET_NOINTS) && !defined(CONFIG_SCHED_HPWORK)
# error High priority work queue support is required
#endif
/* CONFIG_LPC17_NINTERFACES determines the number of physical interfaces
* that will be supported -- unless it is more than actually supported by the
* hardware!
*/
#if !defined(CONFIG_LPC17_NINTERFACES) || CONFIG_LPC17_NINTERFACES > LPC17_NETHCONTROLLERS
# undef CONFIG_LPC17_NINTERFACES
# define CONFIG_LPC17_NINTERFACES LPC17_NETHCONTROLLERS
#endif
/* The logic here has a few hooks for support for multiple interfaces, but
* that capability is not yet in place (and I won't worry about it until I get
* the first multi-interface LPC17xx).
*/
#if CONFIG_LPC17_NINTERFACES > 1
# warning "Only a single ethernet controller is supported"
# undef CONFIG_LPC17_NINTERFACES
# define CONFIG_LPC17_NINTERFACES 1
#endif
/* If IGMP is enabled, then accept multi-cast frames. */
#if defined(CONFIG_NET_IGMP) && !defined(CONFIG_LPC17_MULTICAST)
# define CONFIG_LPC17_MULTICAST 1
#endif
/* If the user did not specify a priority for Ethernet interrupts, set the
* interrupt priority to the default.
*/
#ifndef CONFIG_NET_PRIORITY
# define CONFIG_NET_PRIORITY NVIC_SYSH_PRIORITY_DEFAULT
#endif
/* Debug Configuration *****************************************************/
/* Register debug -- can only happen of CONFIG_DEBUG_FEATURES is selected */
#ifndef CONFIG_DEBUG_FEATURES
# undef CONFIG_NET_REGDEBUG
#endif
/* CONFIG_NET_DUMPPACKET will dump the contents of each packet to the
* console.
*/
#ifndef CONFIG_DEBUG_FEATURES
# undef CONFIG_NET_DUMPPACKET
#endif
#ifdef CONFIG_NET_DUMPPACKET
# define lpc17_dumppacket(m,a,n) lib_dumpbuffer(m,a,n)
#else
# define lpc17_dumppacket(m,a,n)
#endif
/* Timing *******************************************************************/
/* TX poll deley = 1 seconds. CLK_TCK is the number of clock ticks per second */
#define LPC17_WDDELAY (1*CLK_TCK)
/* TX timeout = 1 minute */
#define LPC17_TXTIMEOUT (60*CLK_TCK)
/* Interrupts ***************************************************************/
#define ETH_RXINTS (ETH_INT_RXOVR | ETH_INT_RXERR | \
ETH_INT_RXFIN | ETH_INT_RXDONE)
#define ETH_TXINTS (ETH_INT_TXUNR | ETH_INT_TXERR | \
ETH_INT_TXFIN | ETH_INT_TXDONE)
/* Misc. Helpers ***********************************************************/
/* This is a helper pointer for accessing the contents of the Ethernet header */
#define BUF ((struct eth_hdr_s *)priv->lp_dev.d_buf)
/* This is the number of ethernet GPIO pins that must be configured */
#define GPIO_NENET_PINS 10
/* PHYs *********************************************************************/
/* Select PHY-specific values. Add more PHYs as needed. */
#if defined(CONFIG_ETH0_PHY_KS8721)
# define LPC17_PHYNAME "KS8721"
# define LPC17_PHYID1 MII_PHYID1_KS8721
# define LPC17_PHYID2 MII_PHYID2_KS8721
# define LPC17_HAVE_PHY 1
#elif defined(CONFIG_ETH0_PHY_KSZ8041)
# define LPC17_PHYNAME "KSZ8041"
# define LPC17_PHYID1 MII_PHYID1_KSZ8041
# define LPC17_PHYID2 MII_PHYID2_KSZ8041
# define LPC17_HAVE_PHY 1
#elif defined(CONFIG_ETH0_PHY_DP83848C)
# define LPC17_PHYNAME "DP83848C"
# define LPC17_PHYID1 MII_PHYID1_DP83848C
# define LPC17_PHYID2 MII_PHYID2_DP83848C
# define LPC17_HAVE_PHY 1
#elif defined(CONFIG_ETH0_PHY_LAN8720)
# define LPC17_PHYNAME "LAN8720"
# define LPC17_PHYID1 MII_PHYID1_LAN8720
# define LPC17_PHYID2 MII_PHYID2_LAN8720
# define LPC17_HAVE_PHY 1
#else
# warning "No PHY specified!"
# undef LPC17_HAVE_PHY
#endif
#define MII_BIG_TIMEOUT 666666
/* These definitions are used to remember the speed/duplex settings */
#define LPC17_SPEED_MASK 0x01
#define LPC17_SPEED_100 0x01
#define LPC17_SPEED_10 0x00
#define LPC17_DUPLEX_MASK 0x02
#define LPC17_DUPLEX_FULL 0x02
#define LPC17_DUPLEX_HALF 0x00
#define LPC17_10BASET_HD (LPC17_SPEED_10 | LPC17_DUPLEX_HALF)
#define LPC17_10BASET_FD (LPC17_SPEED_10 | LPC17_DUPLEX_FULL)
#define LPC17_100BASET_HD (LPC17_SPEED_100 | LPC17_DUPLEX_HALF)
#define LPC17_100BASET_FD (LPC17_SPEED_100 | LPC17_DUPLEX_FULL)
#ifdef CONFIG_PHY_SPEED100
# ifdef CONFIG_PHY_FDUPLEX
# define LPC17_MODE_DEFLT LPC17_100BASET_FD
# else
# define LPC17_MODE_DEFLT LPC17_100BASET_HD
# endif
#else
# ifdef CONFIG_PHY_FDUPLEX
# define LPC17_MODE_DEFLT LPC17_10BASET_FD
# else
# define LPC17_MODE_DEFLT LPC17_10BASET_HD
# endif
#endif
/****************************************************************************
* Private Types
****************************************************************************/
/* The lpc17_driver_s encapsulates all state information for a single hardware
* interface
*/
struct lpc17_driver_s
{
/* The following fields would only be necessary on chips that support
* multiple Ethernet controllers.
*/
#if CONFIG_LPC17_NINTERFACES > 1
uint32_t lp_base; /* Ethernet controller base address */
int lp_irq; /* Ethernet controller IRQ */
#endif
bool lp_ifup; /* true:ifup false:ifdown */
bool lp_mode; /* speed/duplex */
bool lp_txpending; /* There is a pending Tx in lp_dev */
#ifdef LPC17_HAVE_PHY
uint8_t lp_phyaddr; /* PHY device address */
#endif
uint32_t lp_inten; /* Shadow copy of INTEN register */
WDOG_ID lp_txpoll; /* TX poll timer */
WDOG_ID lp_txtimeout; /* TX timeout timer */
#ifdef CONFIG_NET_NOINTS
struct work_s lp_txwork; /* TX work continuation */
struct work_s lp_rxwork; /* RX work continuation */
struct work_s lp_pollwork; /* Poll work continuation */
uint32_t status;
#endif /* CONFIG_NET_NOINTS */
/* This holds the information visible to the NuttX networking layer */
struct net_driver_s lp_dev; /* Interface understood by the network layer */
};
/****************************************************************************
* Private Data
****************************************************************************/
/* Array of ethernet driver status structures */
static struct lpc17_driver_s g_ethdrvr[CONFIG_LPC17_NINTERFACES];
/* ENET pins are on P1[0,1,4,6,8,9,10,14,15] + MDC on P1[16] or P2[8] and
* MDIO on P1[17] or P2[9]. The board.h file will define GPIO_ENET_MDC and
* PGIO_ENET_MDIO to selec which pin setting to use.
*
* On older Rev '-' devices, P1[6] ENET-TX_CLK would also have be to configured.
*/
static const uint16_t g_enetpins[GPIO_NENET_PINS] =
{
GPIO_ENET_TXD0, GPIO_ENET_TXD1, GPIO_ENET_TXEN, GPIO_ENET_CRS, GPIO_ENET_RXD0,
GPIO_ENET_RXD1, GPIO_ENET_RXER, GPIO_ENET_REFCLK, GPIO_ENET_MDC, GPIO_ENET_MDIO
};
/****************************************************************************
* Private Function Prototypes
****************************************************************************/
/* Register operations */
#ifdef CONFIG_NET_REGDEBUG
static void lpc17_printreg(uint32_t addr, uint32_t val, bool iswrite);
static void lpc17_checkreg(uint32_t addr, uint32_t val, bool iswrite);
static uint32_t lpc17_getreg(uint32_t addr);
static void lpc17_putreg(uint32_t val, uint32_t addr);
#else
# define lpc17_getreg(addr) getreg32(addr)
# define lpc17_putreg(val,addr) putreg32(val,addr)
#endif
/* Common TX logic */
static int lpc17_txdesc(struct lpc17_driver_s *priv);
static int lpc17_transmit(struct lpc17_driver_s *priv);
static int lpc17_txpoll(struct net_driver_s *dev);
/* Interrupt handling */
static void lpc17_response(struct lpc17_driver_s *priv);
static void lpc17_rxdone_process(struct lpc17_driver_s *priv);
static void lpc17_txdone_process(struct lpc17_driver_s *priv);
#ifdef CONFIG_NET_NOINTS
static void lpc17_txdone_work(FAR void *arg);
static void lpc17_rxdone_work(FAR void *arg);
#endif /* CONFIG_NET_NOINTS */
static int lpc17_interrupt(int irq, void *context);
/* Watchdog timer expirations */
static void lpc17_txtimeout_process(FAR struct lpc17_driver_s *priv);
#ifdef CONFIG_NET_NOINTS
static void lpc17_txtimeout_work(FAR void *arg);
#endif /* CONFIG_NET_NOINTS */
static void lpc17_txtimeout_expiry(int argc, uint32_t arg, ...);
static void lpc17_poll_process(FAR struct lpc17_driver_s *priv);
#ifdef CONFIG_NET_NOINTS
static void lpc17_poll_work(FAR void *arg);
#endif /* CONFIG_NET_NOINTS */
static void lpc17_poll_expiry(int argc, uint32_t arg, ...);
/* NuttX callback functions */
#ifdef CONFIG_NET_ICMPv6
static void lpc17_ipv6multicast(FAR struct lpc17_driver_s *priv);
#endif
static int lpc17_ifup(struct net_driver_s *dev);
static int lpc17_ifdown(struct net_driver_s *dev);
static void lpc17_txavail_process(FAR struct lpc17_driver_s *priv);
#ifdef CONFIG_NET_NOINTS
static void lpc17_txavail_work(FAR void *arg);
#endif
static int lpc17_txavail(struct net_driver_s *dev);
#if defined(CONFIG_NET_IGMP) || defined(CONFIG_NET_ICMPv6)
static uint32_t lpc17_calcethcrc(const uint8_t *data, size_t length);
static int lpc17_addmac(struct net_driver_s *dev, const uint8_t *mac);
#endif
#ifdef CONFIG_NET_IGMP
static int lpc17_rmmac(struct net_driver_s *dev, const uint8_t *mac);
#endif
/* Initialization functions */
#if defined(CONFIG_NET_REGDEBUG) && defined(CONFIG_DEBUG_GPIO)
static void lpc17_showpins(void);
#else
# define lpc17_showpins()
#endif
/* PHY initialization functions */
#ifdef LPC17_HAVE_PHY
# ifdef CONFIG_NET_REGDEBUG
static void lpc17_showmii(uint8_t phyaddr, const char *msg);
# else
# define lpc17_showmii(phyaddr,msg)
# endif
static void lpc17_phywrite(uint8_t phyaddr, uint8_t regaddr,
uint16_t phydata);
static uint16_t lpc17_phyread(uint8_t phyaddr, uint8_t regaddr);
static inline int lpc17_phyreset(uint8_t phyaddr);
# ifdef CONFIG_PHY_AUTONEG
static inline int lpc17_phyautoneg(uint8_t phyaddr);
# endif
static int lpc17_phymode(uint8_t phyaddr, uint8_t mode);
static inline int lpc17_phyinit(struct lpc17_driver_s *priv);
#else
# define lpc17_phyinit(priv)
#endif
/* EMAC Initialization functions */
static inline void lpc17_txdescinit(struct lpc17_driver_s *priv);
static inline void lpc17_rxdescinit(struct lpc17_driver_s *priv);
static void lpc17_macmode(uint8_t mode);
static void lpc17_ethreset(struct lpc17_driver_s *priv);
/****************************************************************************
* Private Functions
****************************************************************************/
/****************************************************************************
* Name: lpc17_printreg
*
* Description:
* Print the contents of an LPC17xx register operation
*
****************************************************************************/
#ifdef CONFIG_NET_REGDEBUG
static void lpc17_printreg(uint32_t addr, uint32_t val, bool iswrite)
{
err("%08x%s%08x\n", addr, iswrite ? "<-" : "->", val);
}
#endif
/****************************************************************************
* Name: lpc17_checkreg
*
* Description:
* Get the contents of an LPC17xx register
*
****************************************************************************/
#ifdef CONFIG_NET_REGDEBUG
static void lpc17_checkreg(uint32_t addr, uint32_t val, bool iswrite)
{
static uint32_t prevaddr = 0;
static uint32_t preval = 0;
static uint32_t count = 0;
static bool prevwrite = false;
/* Is this the same value that we read from/wrote to the same register last time?
* Are we polling the register? If so, suppress the output.
*/
if (addr == prevaddr && val == preval && prevwrite == iswrite)
{
/* Yes.. Just increment the count */
count++;
}
else
{
/* No this is a new address or value or operation. Were there any
* duplicate accesses before this one?
*/
if (count > 0)
{
/* Yes.. Just one? */
if (count == 1)
{
/* Yes.. Just one */
lpc17_printreg(prevaddr, preval, prevwrite);
}
else
{
/* No.. More than one. */
err("[repeats %d more times]\n", count);
}
}
/* Save the new address, value, count, and operation for next time */
prevaddr = addr;
preval = val;
count = 0;
prevwrite = iswrite;
/* Show the new regisgter access */
lpc17_printreg(addr, val, iswrite);
}
}
#endif
/****************************************************************************
* Name: lpc17_getreg
*
* Description:
* Get the contents of an LPC17xx register
*
****************************************************************************/
#ifdef CONFIG_NET_REGDEBUG
static uint32_t lpc17_getreg(uint32_t addr)
{
/* Read the value from the register */
uint32_t val = getreg32(addr);
/* Check if we need to print this value */
lpc17_checkreg(addr, val, false);
return val;
}
#endif
/****************************************************************************
* Name: lpc17_putreg
*
* Description:
* Set the contents of an LPC17xx register to a value
*
****************************************************************************/
#ifdef CONFIG_NET_REGDEBUG
static void lpc17_putreg(uint32_t val, uint32_t addr)
{
/* Check if we need to print this value */
lpc17_checkreg(addr, val, true);
/* Write the value */
putreg32(val, addr);
}
#endif
/****************************************************************************
* Function: lpc17_txdesc
*
* Description:
* Check if a free TX descriptor is available.
*
* Parameters:
* priv - Reference to the driver state structure
*
* Returned Value:
* OK on success; a negated errno on failure
*
* Assumptions:
* May or may not be called from an interrupt handler. In either case,
* global interrupts are disabled, either explicitly or indirectly through
* interrupt handling logic.
*
****************************************************************************/
static int lpc17_txdesc(struct lpc17_driver_s *priv)
{
unsigned int prodidx;
unsigned int considx;
/* Get the next producer index */
prodidx = lpc17_getreg(LPC17_ETH_TXPRODIDX) & ETH_TXPRODIDX_MASK;
if (++prodidx >= CONFIG_NET_NTXDESC)
{
/* Wrap back to index zero */
prodidx = 0;
}
/* If the next producer index would overrun the consumer index, then there
* are no available Tx descriptors.
*/
considx = lpc17_getreg(LPC17_ETH_TXCONSIDX) & ETH_TXCONSIDX_MASK;
return prodidx != considx ? OK : -EAGAIN;
}
/****************************************************************************
* Function: lpc17_transmit
*
* Description:
* Start hardware transmission. Called either from the txdone interrupt
* handling or from watchdog based polling.
*
* Parameters:
* priv - Reference to the driver state structure
*
* Returned Value:
* OK on success; a negated errno on failure
*
* Assumptions:
* May or may not be called from an interrupt handler. In either case,
* global interrupts are disabled, either explicitly or indirectly through
* interrupt handling logic.
*
****************************************************************************/
static int lpc17_transmit(struct lpc17_driver_s *priv)
{
uint32_t *txdesc;
void *txbuffer;
unsigned int prodidx;
/* Verify that the hardware is ready to send another packet. If we get
* here, then we are committed to sending a packet; Higher level logic
* must have assured that there is no transmission in progress.
*/
DEBUGASSERT(lpc17_txdesc(priv) == OK);
/* Increment statistics and dump the packet *if so configured) */
NETDEV_TXPACKETS(&priv->lp_dev);
lpc17_dumppacket("Transmit packet",
priv->lp_dev.d_buf, priv->lp_dev.d_len);
/* Get the current producer index */
prodidx = lpc17_getreg(LPC17_ETH_TXPRODIDX) & ETH_TXPRODIDX_MASK;
/* Get the packet address from the descriptor and set the descriptor control
* fields.
*/
txdesc = (uint32_t *)(LPC17_TXDESC_BASE + (prodidx << 3));
txbuffer = (void *)*txdesc++;
*txdesc = TXDESC_CONTROL_INT | TXDESC_CONTROL_LAST | TXDESC_CONTROL_CRC |
(priv->lp_dev.d_len - 1);
/* Copy the packet data into the Tx buffer assignd to this descriptor. It
* should fit because each packet buffer is the MTU size and breaking up
* largerTCP messasges is handled by higher level logic. The hardware
* does, however, support breaking up larger messages into many fragments,
* however, that capability is not exploited here.
*
* This would be a great performance improvement: Remove the buffer from
* the lp_dev structure and replace it a pointer directly into the EMAC
* DMA memory. This could eliminate the following, costly memcpy.
*/
DEBUGASSERT(priv->lp_dev.d_len <= LPC17_MAXPACKET_SIZE);
memcpy(txbuffer, priv->lp_dev.d_buf, priv->lp_dev.d_len);
/* Bump the producer index, making the packet available for transmission. */
if (++prodidx >= CONFIG_NET_NTXDESC)
{
/* Wrap back to index zero */
prodidx = 0;
}
lpc17_putreg(prodidx, LPC17_ETH_TXPRODIDX);
/* Enable Tx interrupts */
priv->lp_inten |= ETH_TXINTS;
lpc17_putreg(priv->lp_inten, LPC17_ETH_INTEN);
/* Setup the TX timeout watchdog (perhaps restarting the timer) */
(void)wd_start(priv->lp_txtimeout, LPC17_TXTIMEOUT, lpc17_txtimeout_expiry,
1, (uint32_t)priv);
return OK;
}
/****************************************************************************
* Function: lpc17_txpoll
*
* Description:
* The transmitter is available, check if the network layer has any
* outgoing packets ready to send. This is a callback from devif_poll().
* devif_poll() may be called:
*
* 1. When the preceding TX packet send is complete,
* 2. When the preceding TX packet send timesout and the interface is reset
* 3. During normal TX polling
*
* Parameters:
* dev - Reference to the NuttX driver state structure
*
* Returned Value:
* OK on success; a negated errno on failure
*
* Assumptions:
* May or may not be called from an interrupt handler. In either case,
* global interrupts are disabled, either explicitly or indirectly through
* interrupt handling logic.
*
****************************************************************************/
static int lpc17_txpoll(struct net_driver_s *dev)
{
struct lpc17_driver_s *priv = (struct lpc17_driver_s *)dev->d_private;
int ret = OK;
/* If the polling resulted in data that should be sent out on the network,
* the field d_len is set to a value > 0.
*/
if (priv->lp_dev.d_len > 0)
{
/* Look up the destination MAC address and add it to the Ethernet
* header.
*/
#ifdef CONFIG_NET_IPv4
#ifdef CONFIG_NET_IPv6
if (IFF_IS_IPv4(priv->lp_dev.d_flags))
#endif
{
arp_out(&priv->lp_dev);
}
#endif /* CONFIG_NET_IPv4 */
#ifdef CONFIG_NET_IPv6
#ifdef CONFIG_NET_IPv4
else
#endif
{
neighbor_out(&priv->lp_dev);
}
#endif /* CONFIG_NET_IPv6 */
/* Send this packet. In this context, we know that there is space for
* at least one more packet in the descriptor list.
*/
lpc17_transmit(priv);
/* Check if there is room in the device to hold another packet. If not,
* return any non-zero value to terminate the poll.
*/
ret = lpc17_txdesc(priv);
}
/* If zero is returned, the polling will continue until all connections have
* been examined.
*/
return ret;
}
/****************************************************************************
* Function: lpc17_response
*
* Description:
* While processing an RxDone event, higher logic decides to send a packet,
* possibly a response to the incoming packet (but probably not, in reality).
* However, since the Rx and Tx operations are decoupled, there is no
* guarantee that there will be a Tx descriptor available at that time.
* This function will perform that check and, if no Tx descriptor is
* available, this function will (1) stop incoming Rx processing (bad), and
* (2) hold the outgoing packet in a pending state until the next Tx
* interrupt occurs.
*
* Parameters:
* priv - Reference to the driver state structure
*
* Returned Value:
* None
*
* Assumptions:
* Global interrupts are disabled by interrupt handling logic.
*
****************************************************************************/
static void lpc17_response(struct lpc17_driver_s *priv)
{
int ret;
/* Check if there is room in the device to hold another packet. */
ret = lpc17_txdesc(priv);
if (ret == OK)
{
/* Yes.. queue the packet now. */
lpc17_transmit(priv);
}
else
{
/* No.. mark the Tx as pending and halt further RX interrupts that
* could generate more TX activity.
*/
DEBUGASSERT((priv->lp_inten & ETH_INT_TXDONE) != 0);
priv->lp_txpending = true;
priv->lp_inten &= ~ETH_RXINTS;
lpc17_putreg(priv->lp_inten, LPC17_ETH_INTEN);
}
}
/****************************************************************************
* Function: lpc17_rxdone_process
*
* Description:
* An interrupt was received indicating the availability of a new RX packet
*
* Parameters:
* priv - Reference to the driver state structure
*
* Returned Value:
* None
*
* Assumptions:
* Global interrupts are disabled by interrupt handling logic.
*
****************************************************************************/
static void lpc17_rxdone_process(struct lpc17_driver_s *priv)
{
uint32_t *rxstat;
bool fragment;
unsigned int prodidx;
unsigned int considx;
unsigned int pktlen;
/* Get the current producer and consumer indices */
considx = lpc17_getreg(LPC17_ETH_RXCONSIDX) & ETH_RXCONSIDX_MASK;
prodidx = lpc17_getreg(LPC17_ETH_RXPRODIDX) & ETH_RXPRODIDX_MASK;
/* Loop while there are incoming packets to be processed, that is, while
* the producer index is not equal to the consumer index.
*/
fragment = false;
while (considx != prodidx)
{
/* Update statistics */
NETDEV_RXPACKETS(&priv->lp_dev);
/* Get the Rx status and packet length (-4+1) */
rxstat = (uint32_t *)(LPC17_RXSTAT_BASE + (considx << 3));
pktlen = (*rxstat & RXSTAT_INFO_RXSIZE_MASK) - 3;
/* Check for errors. NOTE: The DMA engine reports bogus length errors,
* making this a pretty useless check.
*/
if ((*rxstat & RXSTAT_INFO_ERROR) != 0)
{
nllerr("Error. considx: %08x prodidx: %08x rxstat: %08x\n",
considx, prodidx, *rxstat);
NETDEV_RXERRORS(&priv->lp_dev);
}
/* If the pktlen is greater then the buffer, then we cannot accept
* the packet. Also, since the DMA packet buffers are set up to
* be the same size as our max packet size, any fragments also
* imply that the packet is too big.
*/
/* else */ if (pktlen > CONFIG_NET_ETH_MTU + CONFIG_NET_GUARDSIZE)
{
nllerr("Too big. considx: %08x prodidx: %08x pktlen: %d rxstat: %08x\n",
considx, prodidx, pktlen, *rxstat);
NETDEV_RXERRORS(&priv->lp_dev);
}
else if ((*rxstat & RXSTAT_INFO_LASTFLAG) == 0)
{
nllerr("Fragment. considx: %08x prodidx: %08x pktlen: %d rxstat: %08x\n",
considx, prodidx, pktlen, *rxstat);
NETDEV_RXFRAGMENTS(&priv->lp_dev);
fragment = true;
}
else if (fragment)
{
nllerr("Last fragment. considx: %08x prodidx: %08x pktlen: %d rxstat: %08x\n",
considx, prodidx, pktlen, *rxstat);
NETDEV_RXFRAGMENTS(&priv->lp_dev);
fragment = false;
}
else
{
uint32_t *rxdesc;
void *rxbuffer;
/* Get the Rx buffer address from the Rx descriptor */
rxdesc = (uint32_t *)(LPC17_RXDESC_BASE + (considx << 3));
rxbuffer = (void *)*rxdesc;
/* Copy the data data from the EMAC DMA RAM to priv->lp_dev.d_buf.
* Set amount of data in priv->lp_dev.d_len
*
* Here would be a great performance improvement: Remove the
* buffer from the lp_dev structure and replace it with a pointer
* directly into the EMAC DMA memory. This could eliminate the
* following, costly memcpy.
*/
memcpy(priv->lp_dev.d_buf, rxbuffer, pktlen);
priv->lp_dev.d_len = pktlen;
lpc17_dumppacket("Received packet",
priv->lp_dev.d_buf, priv->lp_dev.d_len);
#ifdef CONFIG_NET_PKT
/* When packet sockets are enabled, feed the frame into the packet
* tap.
*/
pkt_input(&priv->lp_dev);
#endif
/* We only accept IP packets of the configured type and ARP packets */
#ifdef CONFIG_NET_IPv4
if (BUF->type == HTONS(ETHTYPE_IP))
{
nllinfo("IPv4 frame\n");
NETDEV_RXIPV4(&priv->lp_dev);
/* Handle ARP on input then give the IPv4 packet to the
* network layer
*/
arp_ipin(&priv->lp_dev);
ipv4_input(&priv->lp_dev);
/* If the above function invocation resulted in data that
* should be sent out on the network, the field d_len will
* set to a value > 0.
*/
if (priv->lp_dev.d_len > 0)
{
/* Update the Ethernet header with the correct MAC address */
#ifdef CONFIG_NET_IPv6
if (IFF_IS_IPv4(priv->lp_dev.d_flags))
#endif
{
arp_out(&priv->lp_dev);
}
#ifdef CONFIG_NET_IPv6
else
{
neighbor_out(&priv->lp_dev);
}
#endif
/* And send the packet */
lpc17_response(priv);
}
}
else
#endif
#ifdef CONFIG_NET_IPv6
if (BUF->type == HTONS(ETHTYPE_IP6))
{
nllinfo("Iv6 frame\n");
NETDEV_RXIPV6(&priv->lp_dev);
/* Give the IPv6 packet to the network layer */
ipv6_input(&priv->lp_dev);
/* If the above function invocation resulted in data that
* should be sent out on the network, the field d_len will
* set to a value > 0.
*/
if (priv->lp_dev.d_len > 0)
{
/* Update the Ethernet header with the correct MAC address */
#ifdef CONFIG_NET_IPv4
if (IFF_IS_IPv4(priv->lp_dev.d_flags))
{
arp_out(&priv->lp_dev);
}
else
#endif
#ifdef CONFIG_NET_IPv6
{
neighbor_out(&priv->lp_dev);
}
#endif
/* And send the packet */
lpc17_response(priv);
}
}
else
#endif
#ifdef CONFIG_NET_ARP
if (BUF->type == htons(ETHTYPE_ARP))
{
NETDEV_RXARP(&priv->lp_dev);
arp_arpin(&priv->lp_dev);
/* If the above function invocation resulted in data that
* should be sent out on the network, the field d_len will
* set to a value > 0.
*/
if (priv->lp_dev.d_len > 0)
{
lpc17_response(priv);
}
}
else
#endif
{
/* Unrecognized... drop it. */
NETDEV_RXDROPPED(&priv->lp_dev);
}
}
/* Bump up the consumer index and resample the producer index (which
* might also have gotten bumped up by the hardware).
*/
if (++considx >= CONFIG_NET_NRXDESC)
{
/* Wrap back to index zero */
considx = 0;
}
lpc17_putreg(considx, LPC17_ETH_RXCONSIDX);
prodidx = lpc17_getreg(LPC17_ETH_RXPRODIDX) & ETH_RXPRODIDX_MASK;
}
}
/****************************************************************************
* Function: lpc17_txdone_process
*
* Description:
* An interrupt was received indicating that the last TX packet(s) is done
*
* Parameters:
* priv - Reference to the driver state structure
*
* Returned Value:
* None
*
* Assumptions:
* Global interrupts are disabled by interrupt handling logic.
*
****************************************************************************/
static void lpc17_txdone_process(struct lpc17_driver_s *priv)
{
/* Verify that the hardware is ready to send another packet. Since a Tx
* just completed, this must be the case.
*/
DEBUGASSERT(lpc17_txdesc(priv) == OK);
/* Check if there is a pending Tx transfer that was scheduled by Rx handling
* while the Tx logic was busy. If so, processing that pending Tx now.
*/
if (priv->lp_txpending)
{
/* Clear the pending condition, send the packet, and restore Rx interrupts */
priv->lp_txpending = false;
lpc17_transmit(priv);
priv->lp_inten |= ETH_RXINTS;
lpc17_putreg(priv->lp_inten, LPC17_ETH_INTEN);
}
/* Otherwise poll the network layer for new XMIT data */
else
{
(void)devif_poll(&priv->lp_dev, lpc17_txpoll);
}
}
/****************************************************************************
* Function: lpc17_txdone_work and lpc17_rxdone_work
*
* Description:
* Perform interrupt handling logic outside of the interrupt handler (on
* the work queue thread).
*
* Parameters:
* arg - The reference to the driver structure (case to void*)
*
* Returned Value:
* None
*
* Assumptions:
*
****************************************************************************/
#ifdef CONFIG_NET_NOINTS
static void lpc17_txdone_work(FAR void *arg)
{
FAR struct lpc17_driver_s *priv = (FAR struct lpc17_driver_s *)arg;
net_lock_t state;
DEBUGASSERT(priv);
/* Perform pending TX work. At this point TX interrupts are disable but
* may be re-enabled again depending on the actions of
* lpc17_txdone_process().
*/
state = net_lock();
lpc17_txdone_process(priv);
net_unlock(state);
}
static void lpc17_rxdone_work(FAR void *arg)
{
FAR struct lpc17_driver_s *priv = (FAR struct lpc17_driver_s *)arg;
irqstate_t flags;
net_lock_t state;
DEBUGASSERT(priv);
/* Perform pending RX work. RX interrupts were disabled prior to
* scheduling this work to prevent work queue overruns.
*/
state = net_lock();
lpc17_rxdone_process(priv);
net_unlock(state);
/* Re-enable RX interrupts (this must be atomic). Skip this step if the
* lp-txpending TX underrun state is in effect.
*/
flags = enter_critical_section();
if (!priv->lp_txpending)
{
priv->lp_inten |= ETH_RXINTS;
lpc17_putreg(priv->lp_inten, LPC17_ETH_INTEN);
}
leave_critical_section(flags);
}
#endif /* CONFIG_NET_NOINTS */
/****************************************************************************
* Function: lpc17_interrupt
*
* Description:
* Hardware interrupt handler
*
* Parameters:
* irq - Number of the IRQ that generated the interrupt
* context - Interrupt register state save info (architecture-specific)
*
* Returned Value:
* OK on success
*
* Assumptions:
*
****************************************************************************/
static int lpc17_interrupt(int irq, void *context)
{
register struct lpc17_driver_s *priv;
uint32_t status;
#if CONFIG_LPC17_NINTERFACES > 1
# error "A mechanism to associate and interface with an IRQ is needed"
#else
priv = &g_ethdrvr[0];
#endif
/* Get the interrupt status (zero means no interrupts pending). */
status = lpc17_getreg(LPC17_ETH_INTST);
if (status != 0)
{
/* Clear all pending interrupts */
lpc17_putreg(status, LPC17_ETH_INTCLR);
/* Handle each pending interrupt **************************************/
/* Check for Wake-Up on Lan *******************************************/
#ifdef CONFIG_NET_WOL
if ((status & ETH_INT_WKUP) != 0)
{
# warning "Missing logic"
}
else
#endif
/* Fatal Errors *******************************************************/
/* RX OVERRUN -- Fatal overrun error in the receive queue. The fatal
* interrupt should be resolved by a Rx soft-reset. The bit is not
* set when there is a nonfatal overrun error.
*
* TX UNDERRUN -- Interrupt set on a fatal underrun error in the
* transmit queue. The fatal interrupt should be resolved by a Tx
* soft-reset. The bit is not set when there is a nonfatal underrun
* error.
*/
if ((status & (ETH_INT_RXOVR | ETH_INT_TXUNR)) != 0)
{
if ((status & ETH_INT_RXOVR) != 0)
{
nllerr("RX Overrun. status: %08x\n", status);
NETDEV_RXERRORS(&priv->lp_dev);
}
if ((status & ETH_INT_TXUNR) != 0)
{
nllerr("TX Underrun. status: %08x\n", status);
NETDEV_TXERRORS(&priv->lp_dev);
}
/* ifup() will reset the EMAC and bring it back up */
(void)lpc17_ifup(&priv->lp_dev);
}
else
{
/* Check for receive events ***************************************/
/* RX ERROR -- Triggered on receive errors: AlignmentError,
* RangeError, LengthError, SymbolError, CRCError or NoDescriptor
* or Overrun. NOTE: (1) We will still need to call lpc17_rxdone_process
* on RX errors to bump the considx over the bad packet. (2) The
* DMA engine reports bogus length errors, making this a pretty
* useless check anyway.
*/
if ((status & ETH_INT_RXERR) != 0)
{
nllerr("RX Error. status: %08x\n", status);
NETDEV_RXERRORS(&priv->lp_dev);
}
/* RX FINISHED -- Triggered when all receive descriptors have
* been processed i.e. on the transition to the situation
* where ProduceIndex == ConsumeIndex.
*
* Treated as INT_RX_DONE if ProduceIndex != ConsumeIndex so the
* packets are processed anyway.
*
* RX DONE -- Triggered when a receive descriptor has been
* processed while the Interrupt bit in the Control field of
* the descriptor was set.
*/
if ((status & ETH_INT_RXFIN) != 0 || (status & ETH_INT_RXDONE) != 0)
{
/* We have received at least one new incoming packet. */
#ifdef CONFIG_NET_NOINTS
/* Disable further TX interrupts for now. TX interrupts will
* be re-enabled after the work has been processed.
*/
priv->lp_inten &= ~ETH_RXINTS;
lpc17_putreg(priv->lp_inten, LPC17_ETH_INTEN);
/* Cancel any pending RX done work */
work_cancel(HPWORK, &priv->lp_rxwork);
/* Schedule RX-related work to be performed on the work thread */
work_queue(HPWORK, &priv->lp_rxwork, (worker_t)lpc17_rxdone_work,
priv, 0);
#else /* CONFIG_NET_NOINTS */
lpc17_rxdone_process(priv);
#endif /* CONFIG_NET_NOINTS */
}
/* Check for Tx events ********************************************/
/* TX ERROR -- Triggered on transmit errors: LateCollision,
* ExcessiveCollision and ExcessiveDefer, NoDescriptor or Underrun.
* NOTE: We will still need to call lpc17_txdone_process() in order to
* clean up after the failed transmit.
*/
if ((status & ETH_INT_TXERR) != 0)
{
nllerr("TX Error. status: %08x\n", status);
NETDEV_TXERRORS(&priv->lp_dev);
}
#if 0
/* TX FINISHED -- Triggered when all transmit descriptors have
* been processed i.e. on the transition to the situation
* where ProduceIndex == ConsumeIndex.
*/
if ((status & ETH_INT_TXFIN) != 0)
{
}
#endif
/* TX DONE -- Triggered when a descriptor has been transmitted
* while the Interrupt bit in the Control field of the
* descriptor was set.
*/
if ((status & ETH_INT_TXDONE) != 0)
{
NETDEV_TXDONE(&priv->lp_dev);
/* A packet transmission just completed */
/* Cancel the pending Tx timeout */
wd_cancel(priv->lp_txtimeout);
/* Disable further Tx interrupts. Tx interrupts may be
* re-enabled again depending upon the actions of
* lpc17_txdone_process()
*/
priv->lp_inten &= ~ETH_TXINTS;
lpc17_putreg(priv->lp_inten, LPC17_ETH_INTEN);
#ifdef CONFIG_NET_NOINTS
/* Cancel any pending TX done work (to prevent overruns and also
* to avoid race conditions with the TX timeout work)
*/
work_cancel(HPWORK, &priv->lp_txwork);
/* Then make sure that the TX poll timer is running (if it is
* already running, the following would restart it). This is
* necessary to avoid certain race conditions where the polling
* sequence can be interrupted.
*/
(void)wd_start(priv->lp_txpoll, LPC17_WDDELAY, lpc17_poll_expiry,
1, priv);
/* Schedule TX-related work to be performed on the work thread */
work_queue(HPWORK, &priv->lp_txwork, (worker_t)lpc17_txdone_work,
priv, 0);
#else /* CONFIG_NET_NOINTS */
/* Perform the TX work at the interrupt level */
lpc17_txdone_process(priv);
#endif /* CONFIG_NET_NOINTS */
}
}
}
/* Clear the pending interrupt */
#if 0 /* Apparently not necessary */
# if CONFIG_LPC17_NINTERFACES > 1
lpc17_clrpend(priv->irq);
# else
lpc17_clrpend(LPC17_IRQ_ETH);
# endif
#endif
return OK;
}
/****************************************************************************
* Function: lpc17_txtimeout_process
*
* Description:
* Process a TX timeout. Called from the either the watchdog timer
* expiration logic or from the worker thread, depending upon the
* configuration. The timeout means that the last TX never completed.
* Reset the hardware and start again.
*
* Parameters:
* priv - Reference to the driver state structure
*
* Returned Value:
* None
*
****************************************************************************/
static void lpc17_txtimeout_process(FAR struct lpc17_driver_s *priv)
{
/* Increment statistics and dump debug info */
NETDEV_TXTIMEOUTS(&priv->lp_dev);
if (priv->lp_ifup)
{
/* Then reset the hardware. ifup() will reset the interface, then bring
* it back up.
*/
(void)lpc17_ifup(&priv->lp_dev);
/* Then poll the network layer for new XMIT data */
(void)devif_poll(&priv->lp_dev, lpc17_txpoll);
}
}
/****************************************************************************
* Function: lpc17_txtimeout_work
*
* Description:
* Perform TX timeout related work from the worker thread
*
* Parameters:
* arg - The argument passed when work_queue() as called.
*
* Returned Value:
* OK on success
*
* Assumptions:
* Ethernet interrupts are disabled
*
****************************************************************************/
#ifdef CONFIG_NET_NOINTS
static void lpc17_txtimeout_work(FAR void *arg)
{
FAR struct lpc17_driver_s *priv = (FAR struct lpc17_driver_s *)arg;
net_lock_t state;
/* Process pending Ethernet interrupts */
state = net_lock();
lpc17_txtimeout_process(priv);
net_unlock(state);
}
#endif
/****************************************************************************
* Function: lpc17_txtimeout_expiry
*
* Description:
* Our TX watchdog timed out. Called from the timer interrupt handler.
* The last TX never completed. Reset the hardware and start again.
*
* Parameters:
* argc - The number of available arguments
* arg - The first argument
*
* Returned Value:
* None
*
* Assumptions:
* Global interrupts are disabled by the watchdog logic.
*
****************************************************************************/
static void lpc17_txtimeout_expiry(int argc, uint32_t arg, ...)
{
struct lpc17_driver_s *priv = (struct lpc17_driver_s *)arg;
/* Disable further Tx interrupts. Tx interrupts may be re-enabled again
* depending upon the actions of lpc17_poll_process()
*/
priv->lp_inten &= ~ETH_TXINTS;
lpc17_putreg(priv->lp_inten, LPC17_ETH_INTEN);
#ifdef CONFIG_NET_NOINTS
/* Is the single TX work structure available? If not, then there is
* pending TX work to be done this must be a false alarm TX timeout.
*/
if (work_available(&priv->lp_txwork))
{
/* Schedule to perform the interrupt processing on the worker thread. */
work_queue(HPWORK, &priv->lp_txwork, lpc17_txtimeout_work, priv, 0);
}
#else
/* Process the timeout now */
lpc17_txtimeout_process(priv);
#endif
}
/****************************************************************************
* Function: lpc17_poll_process
*
* Description:
* Perform the periodic poll. This may be called either from watchdog
* timer logic or from the worker thread, depending upon the configuration.
*
* Parameters:
* priv - Reference to the driver state structure
*
* Returned Value:
* None
*
* Assumptions:
*
****************************************************************************/
static void lpc17_poll_process(FAR struct lpc17_driver_s *priv)
{
unsigned int prodidx;
unsigned int considx;
/* Check if there is room in the send another TX packet. We cannot perform
* the TX poll if he are unable to accept another packet for transmission.
*/
if (lpc17_txdesc(priv) == OK)
{
/* If so, update TCP timing states and poll the network layer for new
* XMIT data. Hmmm.. might be bug here. Does this mean if there is a
* transmit in progress, we will missing TCP time state updates?
*/
(void)devif_timer(&priv->lp_dev, lpc17_txpoll);
}
/* Simulate a fake receive to relaunch the data exchanges when a receive
* interrupt has been lost and all the receive buffers are used.
*/
/* Get the current producer and consumer indices */
considx = lpc17_getreg(LPC17_ETH_RXCONSIDX) & ETH_RXCONSIDX_MASK;
prodidx = lpc17_getreg(LPC17_ETH_RXPRODIDX) & ETH_RXPRODIDX_MASK;
if (considx != prodidx)
{
#ifdef CONFIG_NET_NOINTS
work_queue(HPWORK, &priv->lp_rxwork, (worker_t)lpc17_rxdone_work,
priv, 0);
#else /* CONFIG_NET_NOINTS */
lpc17_rxdone_process(priv);
#endif /* CONFIG_NET_NOINTS */
}
/* Setup the watchdog poll timer again */
(void)wd_start(priv->lp_txpoll, LPC17_WDDELAY, lpc17_poll_expiry,
1, priv);
}
/****************************************************************************
* Function: lpc17_poll_work
*
* Description:
* Perform periodic polling from the worker thread
*
* Parameters:
* arg - The argument passed when work_queue() as called.
*
* Returned Value:
* OK on success
*
* Assumptions:
* Ethernet interrupts are disabled
*
****************************************************************************/
#ifdef CONFIG_NET_NOINTS
static void lpc17_poll_work(FAR void *arg)
{
FAR struct lpc17_driver_s *priv = (FAR struct lpc17_driver_s *)arg;
net_lock_t state;
/* Perform the poll */
state = net_lock();
lpc17_poll_process(priv);
net_unlock(state);
}
#endif
/****************************************************************************
* Function: lpc17_poll_expiry
*
* Description:
* Periodic timer handler. Called from the timer interrupt handler.
*
* Parameters:
* argc - The number of available arguments
* arg - The first argument
*
* Returned Value:
* None
*
* Assumptions:
* Global interrupts are disabled by the watchdog logic.
*
****************************************************************************/
static void lpc17_poll_expiry(int argc, uint32_t arg, ...)
{
FAR struct lpc17_driver_s *priv = (FAR struct lpc17_driver_s *)arg;
DEBUGASSERT(arg);
#ifdef CONFIG_NET_NOINTS
/* Is our single work structure available? It may not be if there are
* pending interrupt actions.
*/
if (work_available(&priv->lp_pollwork))
{
/* Schedule to perform the interrupt processing on the worker thread. */
work_queue(HPWORK, &priv->lp_pollwork, lpc17_poll_work, priv, 0);
}
else
{
/* No.. Just re-start the watchdog poll timer, missing one polling
* cycle.
*/
(void)wd_start(priv->lp_txpoll, LPC17_WDDELAY, lpc17_poll_expiry, 1, arg);
}
#else
/* Process the interrupt now */
lpc17_poll_process(priv);
#endif
}
/****************************************************************************
* Function: lpc17_ipv6multicast
*
* Description:
* Configure the IPv6 multicast MAC address.
*
* Parameters:
* priv - A reference to the private driver state structure
*
* Returned Value:
* OK on success; Negated errno on failure.
*
* Assumptions:
*
****************************************************************************/
#ifdef CONFIG_NET_ICMPv6
static void lpc17_ipv6multicast(FAR struct lpc17_driver_s *priv)
{
struct net_driver_s *dev;
uint16_t tmp16;
uint8_t mac[6];
/* For ICMPv6, we need to add the IPv6 multicast address
*
* For IPv6 multicast addresses, the Ethernet MAC is derived by
* the four low-order octets OR'ed with the MAC 33:33:00:00:00:00,
* so for example the IPv6 address FF02:DEAD:BEEF::1:3 would map
* to the Ethernet MAC address 33:33:00:01:00:03.
*
* NOTES: This appears correct for the ICMPv6 Router Solicitation
* Message, but the ICMPv6 Neighbor Solicitation message seems to
* use 33:33:ff:01:00:03.
*/
mac[0] = 0x33;
mac[1] = 0x33;
dev = &priv->lp_dev;
tmp16 = dev->d_ipv6addr[6];
mac[2] = 0xff;
mac[3] = tmp16 >> 8;
tmp16 = dev->d_ipv6addr[7];
mac[4] = tmp16 & 0xff;
mac[5] = tmp16 >> 8;
ninfo("IPv6 Multicast: %02x:%02x:%02x:%02x:%02x:%02x\n",
mac[0], mac[1], mac[2], mac[3], mac[4], mac[5]);
(void)lpc17_addmac(dev, mac);
#ifdef CONFIG_NET_ICMPv6_AUTOCONF
/* Add the IPv6 all link-local nodes Ethernet address. This is the
* address that we expect to receive ICMPv6 Router Advertisement
* packets.
*/
(void)lpc17_addmac(dev, g_ipv6_ethallnodes.ether_addr_octet);
#endif /* CONFIG_NET_ICMPv6_AUTOCONF */
#ifdef CONFIG_NET_ICMPv6_ROUTER
/* Add the IPv6 all link-local routers Ethernet address. This is the
* address that we expect to receive ICMPv6 Router Solicitation
* packets.
*/
(void)lpc17_addmac(dev, g_ipv6_ethallrouters.ether_addr_octet);
#endif /* CONFIG_NET_ICMPv6_ROUTER */
}
#endif /* CONFIG_NET_ICMPv6 */
/****************************************************************************
* Function: lpc17_ifup
*
* Description:
* NuttX Callback: Bring up the Ethernet interface when an IP address is
* provided
*
* Parameters:
* dev - Reference to the NuttX driver state structure
*
* Returned Value:
* None
*
* Assumptions:
*
****************************************************************************/
static int lpc17_ifup(struct net_driver_s *dev)
{
struct lpc17_driver_s *priv = (struct lpc17_driver_s *)dev->d_private;
uint32_t regval;
int ret;
nerr("Bringing up: %d.%d.%d.%d\n",
dev->d_ipaddr & 0xff, (dev->d_ipaddr >> 8) & 0xff,
(dev->d_ipaddr >> 16) & 0xff, dev->d_ipaddr >> 24);
/* Reset the Ethernet controller (again) */
lpc17_ethreset(priv);
/* Initialize the PHY and wait for the link to be established */
ret = lpc17_phyinit(priv);
if (ret != 0)
{
nerr("lpc17_phyinit failed: %d\n", ret);
return ret;
}
/* Configure the MAC station address */
regval = (uint32_t)priv->lp_dev.d_mac.ether_addr_octet[5] << 8 |
(uint32_t)priv->lp_dev.d_mac.ether_addr_octet[4];
lpc17_putreg(regval, LPC17_ETH_SA0);
regval = (uint32_t)priv->lp_dev.d_mac.ether_addr_octet[3] << 8 |
(uint32_t)priv->lp_dev.d_mac.ether_addr_octet[2];
lpc17_putreg(regval, LPC17_ETH_SA1);
regval = (uint32_t)priv->lp_dev.d_mac.ether_addr_octet[1] << 8 |
(uint32_t)priv->lp_dev.d_mac.ether_addr_octet[0];
lpc17_putreg(regval, LPC17_ETH_SA2);
#ifdef CONFIG_NET_ICMPv6
/* Set up the IPv6 multicast address */
lpc17_ipv6multicast(priv);
#endif
/* Initialize Ethernet interface for the PHY setup */
lpc17_macmode(priv->lp_mode);
/* Initialize EMAC DMA memory -- descriptors, status, packet buffers, etc. */
lpc17_txdescinit(priv);
lpc17_rxdescinit(priv);
/* Configure to pass all received frames */
regval = lpc17_getreg(LPC17_ETH_MAC1);
regval |= ETH_MAC1_PARF;
lpc17_putreg(regval, LPC17_ETH_MAC1);
/* Set up RX filter and configure to accept broadcast addresses, multicast
* addresses, and perfect station address matches. We should also accept
* perfect matches and, most likely, broadcast (for example, for ARP requests).
* Other RX filter options will only be enabled if so selected. NOTE: There
* is a selection CONFIG_NET_BROADCAST, but this enables receipt of UDP
* broadcast packets inside of the stack.
*/
regval = ETH_RXFLCTRL_PERFEN | ETH_RXFLCTRL_BCASTEN;
#ifdef CONFIG_LPC17_MULTICAST
regval |= (ETH_RXFLCTRL_MCASTEN | ETH_RXFLCTRL_UCASTEN);
#endif
#ifdef CONFIG_NET_HASH
regval |= (ETH_RXFLCTRL_MCASTHASHEN | ETH_RXFLCTRL_UCASTHASHEN);
#endif
lpc17_putreg(regval, LPC17_ETH_RXFLCTRL);
/* Clear any pending interrupts (shouldn't be any) */
lpc17_putreg(0xffffffff, LPC17_ETH_INTCLR);
/* Configure interrupts. The Ethernet interrupt was attached during one-time
* initialization, so we only need to set the interrupt priority, configure
* interrupts, and enable them.
*/
/* Set the interrupt to the highest priority */
#ifdef CONFIG_ARCH_IRQPRIO
#if CONFIG_LPC17_NINTERFACES > 1
(void)up_prioritize_irq(priv->irq, CONFIG_NET_PRIORITY);
#else
(void)up_prioritize_irq(LPC17_IRQ_ETH, CONFIG_NET_PRIORITY);
#endif
#endif
/* Enable Ethernet interrupts. The way we do this depends on whether or
* not Wakeup on Lan (WoL) has been configured.
*/
#ifdef CONFIG_NET_WOL
/* Configure WoL: Clear all receive filter WoLs and enable the perfect
* match WoL interrupt. We will wait until the Wake-up to finish
* bringing things up.
*/
lpc17_putreg(0xffffffff, LPC17_ETH_RXFLWOLCLR);
lpc17_putreg(ETH_RXFLCTRL_RXFILEN, LPC17_ETH_RXFLCTRL);
priv->lp_inten = ETH_INT_WKUP;
lpc17_putreg(ETH_INT_WKUP, LPC17_ETH_INTEN);
#else
/* Otherwise, enable all Rx interrupts. Tx interrupts, SOFTINT and WoL are
* excluded. Tx interrupts will not be enabled until there is data to be
* sent.
*/
priv->lp_inten = ETH_RXINTS;
lpc17_putreg(ETH_RXINTS, LPC17_ETH_INTEN);
#endif
/* Enable Rx. "Enabling of the receive function is located in two places.
* The receive DMA manager needs to be enabled and the receive data path
* of the MAC needs to be enabled. To prevent overflow in the receive
* DMA engine the receive DMA engine should be enabled by setting the
* RxEnable bit in the Command register before enabling the receive data
* path in the MAC by setting the RECEIVE ENABLE bit in the MAC1 register."
*/
regval = lpc17_getreg(LPC17_ETH_CMD);
regval |= ETH_CMD_RXEN;
lpc17_putreg(regval, LPC17_ETH_CMD);
regval = lpc17_getreg(LPC17_ETH_MAC1);
regval |= ETH_MAC1_RE;
lpc17_putreg(regval, LPC17_ETH_MAC1);
/* Enable Tx */
regval = lpc17_getreg(LPC17_ETH_CMD);
regval |= ETH_CMD_TXEN;
lpc17_putreg(regval, LPC17_ETH_CMD);
/* Set and activate a timer process */
(void)wd_start(priv->lp_txpoll, LPC17_WDDELAY, lpc17_poll_expiry, 1,
(uint32_t)priv);
/* Finally, make the interface up and enable the Ethernet interrupt at
* the interrupt controller
*/
priv->lp_ifup = true;
#if CONFIG_LPC17_NINTERFACES > 1
up_enable_irq(priv->irq);
#else
up_enable_irq(LPC17_IRQ_ETH);
#endif
return OK;
}
/****************************************************************************
* Function: lpc17_ifdown
*
* Description:
* NuttX Callback: Stop the interface.
*
* Parameters:
* dev - Reference to the NuttX driver state structure
*
* Returned Value:
* None
*
* Assumptions:
*
****************************************************************************/
static int lpc17_ifdown(struct net_driver_s *dev)
{
struct lpc17_driver_s *priv = (struct lpc17_driver_s *)dev->d_private;
irqstate_t flags;
/* Disable the Ethernet interrupt */
flags = enter_critical_section();
up_disable_irq(LPC17_IRQ_ETH);
/* Cancel the TX poll timer and TX timeout timers */
wd_cancel(priv->lp_txpoll);
wd_cancel(priv->lp_txtimeout);
/* Reset the device and mark it as down. */
lpc17_ethreset(priv);
priv->lp_ifup = false;
leave_critical_section(flags);
return OK;
}
/****************************************************************************
* Function: lpc17_txavail_process
*
* Description:
* Perform an out-of-cycle poll.
*
* Parameters:
* dev - Reference to the NuttX driver state structure
*
* Returned Value:
* None
*
* Assumptions:
* Called in normal user mode
*
****************************************************************************/
static inline void lpc17_txavail_process(FAR struct lpc17_driver_s *priv)
{
net_lock_t state;
/* Ignore the notification if the interface is not yet up */
state = net_lock();
if (priv->lp_ifup)
{
/* Check if there is room in the hardware to hold another outgoing packet. */
if (lpc17_txdesc(priv) == OK)
{
/* If so, then poll the network layer for new XMIT data */
(void)devif_poll(&priv->lp_dev, lpc17_txpoll);
}
}
net_unlock(state);
}
/****************************************************************************
* Function: lpc17_txavail_work
*
* Description:
* Perform an out-of-cycle poll on the worker thread.
*
* Parameters:
* arg - Reference to the NuttX driver state structure (cast to void*)
*
* Returned Value:
* None
*
* Assumptions:
* Called on the higher priority worker thread.
*
****************************************************************************/
#ifdef CONFIG_NET_NOINTS
static void lpc17_txavail_work(FAR void *arg)
{
FAR struct lpc17_driver_s *priv = (FAR struct lpc17_driver_s *)arg;
/* Perform the poll */
lpc17_txavail_process(priv);
}
#endif
/****************************************************************************
* Function: lpc17_txavail
*
* Description:
* Driver callback invoked when new TX data is available. This is a
* stimulus perform an out-of-cycle poll and, thereby, reduce the TX
* latency.
*
* Parameters:
* dev - Reference to the NuttX driver state structure
*
* Returned Value:
* None
*
* Assumptions:
* Called in normal user mode
*
****************************************************************************/
static int lpc17_txavail(struct net_driver_s *dev)
{
FAR struct lpc17_driver_s *priv = (FAR struct lpc17_driver_s *)dev->d_private;
#ifdef CONFIG_NET_NOINTS
/* Is our single poll work structure available? It may not be if there
* are pending polling actions and we will have to ignore the Tx
* availability action (which is okay because all poll actions have,
* ultimately, the same effect.
*/
if (work_available(&priv->lp_pollwork))
{
/* Schedule to serialize the poll on the worker thread. */
work_queue(HPWORK, &priv->lp_pollwork, lpc17_txavail_work, priv, 0);
}
#else
/* Perform the out-of-cycle poll now */
lpc17_txavail_process(priv);
#endif
return OK;
}
/****************************************************************************
* Function: lpc17_calcethcrc
*
* Description:
* Function to calculate the CRC used by LPC17 to check an Ethernet frame
*
* Algorithm adapted from LPC17xx sample code that contains this notice:
*
* Software that is described herein is for illustrative purposes only
* which provides customers with programming information regarding the
* products. This software is supplied "AS IS" without any warranties.
* NXP Semiconductors assumes no responsibility or liability for the
* use of the software, conveys no license or title under any patent,
* copyright, or mask work right to the product. NXP Semiconductors
* reserves the right to make changes in the software without
* notification. NXP Semiconductors also make no representation or
* warranty that such application will be suitable for the specified
* use without further testing or modification.
*
* Parameters:
* data - the data to be checked
* length - length of the data
*
* Returned Value:
* None
*
* Assumptions:
*
****************************************************************************/
#if defined(CONFIG_NET_IGMP) || defined(CONFIG_NET_ICMPv6)
static uint32_t lpc17_calcethcrc(const uint8_t *data, size_t length)
{
char byte;
int crc;
int q0;
int q1;
int q2;
int q3;
int i;
int j;
crc = 0xffffffff;
for (i = 0; i < length; i++)
{
byte = *data++;
for (j = 0; j < 2; j++)
{
if (((crc >> 28) ^ (byte >> 3)) & 0x00000001)
{
q3 = 0x04c11db7;
}
else
{
q3 = 0x00000000;
}
if (((crc >> 29) ^ (byte >> 2)) & 0x00000001)
{
q2 = 0x09823b6e;
}
else
{
q2 = 0x00000000;
}
if (((crc >> 30) ^ (byte >> 1)) & 0x00000001)
{
q1 = 0x130476dc;
}
else
{
q1 = 0x00000000;
}
if (((crc >> 31) ^ (byte >> 0)) & 0x00000001)
{
q0 = 0x2608EDB8;
}
else
{
q0 = 0x00000000;
}
crc = (crc << 4) ^ q3 ^ q2 ^ q1 ^ q0;
byte >>= 4;
}
}
return crc;
}
#endif /* CONFIG_NET_IGMP || CONFIG_NET_ICMPv6 */
/****************************************************************************
* Function: lpc17_addmac
*
* Description:
* NuttX Callback: Add the specified MAC address to the hardware multicast
* address filtering
*
* Parameters:
* dev - Reference to the NuttX driver state structure
* mac - The MAC address to be added
*
* Returned Value:
* None
*
* Assumptions:
*
****************************************************************************/
#if defined(CONFIG_NET_IGMP) || defined(CONFIG_NET_ICMPv6)
static int lpc17_addmac(struct net_driver_s *dev, const uint8_t *mac)
{
uintptr_t regaddr;
uint32_t regval;
uint32_t crc;
unsigned int ndx;
nllinfo("MAC: %02x:%02x:%02x:%02x:%02x:%02x\n",
mac[0], mac[1], mac[2], mac[3], mac[4], mac[5]);
/* Hash function:
*
* The standard Ethernet cyclic redundancy check (CRC) function is
* calculated from the 6 byte MAC address. Bits [28:23] out of the 32-bit
* CRC result are taken to form the hash. The 6-bit hash is used to access
* the hash table: it is used as an index in the 64-bit HashFilter register
* that has been programmed with accept values. If the selected accept value
* is 1, the frame is accepted.
*/
crc = lpc17_calcethcrc(mac, 6);
ndx = (crc >> 23) & 0x3f;
/* Add the MAC address to the hardware multicast hash table */
if (ndx > 31)
{
regaddr = LPC17_ETH_HASHFLH; /* Hash filter table MSBs register */
ndx -= 32;
}
else
{
regaddr = LPC17_ETH_HASHFLL; /* Hash filter table LSBs register */
}
regval = lpc17_getreg(regaddr);
regval |= 1 << ndx;
lpc17_putreg(regval, regaddr);
/* Enabled multicast address filtering in the RxFilterControl register:
*
* AcceptUnicastHashEn: When set to ’1’, unicast frames that pass the
* imperfect hash filter are accepted.
* AcceptMulticastHashEn When set to ’1’, multicast frames that pass the
* imperfect hash filter are accepted.
*/
regval = lpc17_getreg(LPC17_ETH_RXFLCTRL);
regval &= ~ETH_RXFLCTRL_UCASTHASHEN;
regval |= ETH_RXFLCTRL_MCASTHASHEN;
lpc17_putreg(regval, LPC17_ETH_RXFLCTRL);
return OK;
}
#endif /* CONFIG_NET_IGMP || CONFIG_NET_ICMPv6 */
/****************************************************************************
* Function: lpc17_rmmac
*
* Description:
* NuttX Callback: Remove the specified MAC address from the hardware multicast
* address filtering
*
* Parameters:
* dev - Reference to the NuttX driver state structure
* mac - The MAC address to be removed
*
* Returned Value:
* None
*
* Assumptions:
*
****************************************************************************/
#ifdef CONFIG_NET_IGMP
static int lpc17_rmmac(struct net_driver_s *dev, const uint8_t *mac)
{
uintptr_t regaddr1;
uintptr_t regaddr2;
uint32_t regval;
uint32_t crc;
unsigned int ndx;
nllinfo("MAC: %02x:%02x:%02x:%02x:%02x:%02x\n",
mac[0], mac[1], mac[2], mac[3], mac[4], mac[5]);
/* Hash function:
*
* The standard Ethernet cyclic redundancy check (CRC) function is
* calculated from the 6 byte MAC address. Bits [28:23] out of the 32-bit
* CRC result are taken to form the hash. The 6-bit hash is used to access
* the hash table: it is used as an index in the 64-bit HashFilter register
* that has been programmed with accept values. If the selected accept value
* is 1, the frame is accepted.
*/
crc = lpc17_calcethcrc(mac, 6);
ndx = (crc >> 23) & 0x3f;
/* Remove the MAC address to the hardware multicast hash table */
if (ndx > 31)
{
regaddr1 = LPC17_ETH_HASHFLH; /* Hash filter table MSBs register */
regaddr2 = LPC17_ETH_HASHFLL; /* Hash filter table LSBs register */
ndx -= 32;
}
else
{
regaddr1 = LPC17_ETH_HASHFLL; /* Hash filter table LSBs register */
regaddr2 = LPC17_ETH_HASHFLH; /* Hash filter table MSBs register */
}
regval = lpc17_getreg(regaddr1);
regval &= ~(1 << ndx);
lpc17_putreg(regval, regaddr1);
/* If there are no longer addresses being filtered , disable multicast
* filtering.
*/
if (regval == 0 && lpc17_getreg(regaddr2) == 0)
{
/* AcceptUnicastHashEn: When set to ’1’, unicast frames that pass the
* imperfect hash filter are accepted.
* AcceptMulticastHashEn When set to ’1’, multicast frames that pass the
* imperfect hash filter are accepted.
*/
regval = lpc17_getreg(LPC17_ETH_RXFLCTRL);
regval &= ~(ETH_RXFLCTRL_UCASTHASHEN | ETH_RXFLCTRL_MCASTHASHEN);
lpc17_putreg(regval, LPC17_ETH_RXFLCTRL);
}
return OK;
}
#endif
/****************************************************************************
* Name: lpc17_showpins
*
* Description:
* Dump GPIO registers
*
* Parameters:
* None
*
* Returned Value:
* None
*
* Assumptions:
*
****************************************************************************/
#if defined(CONFIG_NET_REGDEBUG) && defined(CONFIG_DEBUG_GPIO)
static void lpc17_showpins(void)
{
lpc17_dumpgpio(GPIO_PORT1 | GPIO_PIN0, "P1[1-15]");
lpc17_dumpgpio(GPIO_PORT1 | GPIO_PIN16, "P1[16-31]");
}
#endif
/****************************************************************************
* Name: lpc17_showmii
*
* Description:
* Dump PHY MII registers
*
* Parameters:
* phyaddr - The device address where the PHY was discovered
*
* Returned Value:
* None
*
* Assumptions:
*
****************************************************************************/
#if defined(CONFIG_NET_REGDEBUG) && defined(LPC17_HAVE_PHY)
static void lpc17_showmii(uint8_t phyaddr, const char *msg)
{
err("PHY " LPC17_PHYNAME ": %s\n", msg);
err(" MCR: %04x\n", lpc17_phyread(phyaddr, MII_MCR));
err(" MSR: %04x\n", lpc17_phyread(phyaddr, MII_MSR));
err(" ADVERTISE: %04x\n", lpc17_phyread(phyaddr, MII_ADVERTISE));
err(" LPA: %04x\n", lpc17_phyread(phyaddr, MII_LPA));
err(" EXPANSION: %04x\n", lpc17_phyread(phyaddr, MII_EXPANSION));
#ifdef CONFIG_ETH0_PHY_KS8721
err(" 10BTCR: %04x\n", lpc17_phyread(phyaddr, MII_KS8721_10BTCR));
#endif
}
#endif
/****************************************************************************
* Function: lpc17_phywrite
*
* Description:
* Write a value to an MII PHY register
*
* Parameters:
* phyaddr - The device address where the PHY was discovered
* regaddr - The address of the PHY register to be written
* phydata - The data to write to the PHY register
*
* Returned Value:
* None
*
* Assumptions:
*
****************************************************************************/
#ifdef LPC17_HAVE_PHY
static void lpc17_phywrite(uint8_t phyaddr, uint8_t regaddr, uint16_t phydata)
{
uint32_t regval;
/* Set PHY address and PHY register address */
regval = ((uint32_t)phyaddr << ETH_MADR_PHYADDR_SHIFT) |
((uint32_t)regaddr << ETH_MADR_REGADDR_SHIFT);
lpc17_putreg(regval, LPC17_ETH_MADR);
/* Set up to write */
lpc17_putreg(ETH_MCMD_WRITE, LPC17_ETH_MCMD);
/* Write the register data to the PHY */
lpc17_putreg((uint32_t)phydata, LPC17_ETH_MWTD);
/* Wait for the PHY command to complete */
while ((lpc17_getreg(LPC17_ETH_MIND) & ETH_MIND_BUSY) != 0);
}
#endif
/****************************************************************************
* Function: lpc17_phyread
*
* Description:
* Read a value from an MII PHY register
*
* Parameters:
* phyaddr - The device address where the PHY was discovered
* regaddr - The address of the PHY register to be written
*
* Returned Value:
* Data read from the PHY register
*
* Assumptions:
*
****************************************************************************/
#ifdef LPC17_HAVE_PHY
static uint16_t lpc17_phyread(uint8_t phyaddr, uint8_t regaddr)
{
uint32_t regval;
lpc17_putreg(0, LPC17_ETH_MCMD);
/* Set PHY address and PHY register address */
regval = ((uint32_t)phyaddr << ETH_MADR_PHYADDR_SHIFT) |
((uint32_t)regaddr << ETH_MADR_REGADDR_SHIFT);
lpc17_putreg(regval, LPC17_ETH_MADR);
/* Set up to read */
lpc17_putreg(ETH_MCMD_READ, LPC17_ETH_MCMD);
/* Wait for the PHY command to complete */
while ((lpc17_getreg(LPC17_ETH_MIND) & (ETH_MIND_BUSY | ETH_MIND_NVALID)) != 0);
lpc17_putreg(0, LPC17_ETH_MCMD);
/* Return the PHY register data */
return (uint16_t)(lpc17_getreg(LPC17_ETH_MRDD) & ETH_MRDD_MASK);
}
#endif
/****************************************************************************
* Function: lpc17_phyreset
*
* Description:
* Reset the PHY
*
* Parameters:
* phyaddr - The device address where the PHY was discovered
*
* Returned Value:
* None
*
* Assumptions:
*
****************************************************************************/
#ifdef LPC17_HAVE_PHY
static inline int lpc17_phyreset(uint8_t phyaddr)
{
int32_t timeout;
uint16_t phyreg;
/* Reset the PHY. Needs a minimal 50uS delay after reset. */
lpc17_phywrite(phyaddr, MII_MCR, MII_MCR_RESET);
/* Wait for a minimum of 50uS no matter what */
up_udelay(50);
/* The MCR reset bit is self-clearing. Wait for it to be clear indicating
* that the reset is complete.
*/
for (timeout = MII_BIG_TIMEOUT; timeout > 0; timeout--)
{
phyreg = lpc17_phyread(phyaddr, MII_MCR);
if ((phyreg & MII_MCR_RESET) == 0)
{
return OK;
}
}
nerr("Reset failed. MCR: %04x\n", phyreg);
return -ETIMEDOUT;
}
#endif
/****************************************************************************
* Function: lpc17_phyautoneg
*
* Description:
* Enable auto-negotiation.
*
* Parameters:
* phyaddr - The device address where the PHY was discovered
*
* Returned Value:
* None
*
* Assumptions:
* The adverisement regiser has already been configured.
*
****************************************************************************/
#if defined(LPC17_HAVE_PHY) && defined(CONFIG_PHY_AUTONEG)
static inline int lpc17_phyautoneg(uint8_t phyaddr)
{
int32_t timeout;
uint16_t phyreg;
/* Start auto-negotiation */
lpc17_phywrite(phyaddr, MII_MCR, MII_MCR_ANENABLE | MII_MCR_ANRESTART);
/* Wait for autonegotiation to complete */
for (timeout = MII_BIG_TIMEOUT; timeout > 0; timeout--)
{
/* Check if auto-negotiation has completed */
phyreg = lpc17_phyread(phyaddr, MII_MSR);
if ((phyreg & MII_MSR_ANEGCOMPLETE) != 0)
{
/* Yes.. return success */
return OK;
}
}
nerr("Auto-negotiation failed. MSR: %04x\n", phyreg);
return -ETIMEDOUT;
}
#endif
/****************************************************************************
* Function: lpc17_phymode
*
* Description:
* Set the PHY to operate at a selected speed/duplex mode.
*
* Parameters:
* phyaddr - The device address where the PHY was discovered
* mode - speed/duplex mode
*
* Returned Value:
* None
*
* Assumptions:
*
****************************************************************************/
#ifdef LPC17_HAVE_PHY
static int lpc17_phymode(uint8_t phyaddr, uint8_t mode)
{
int32_t timeout;
uint16_t phyreg;
/* Disable auto-negotiation and set fixed Speed and Duplex settings:
*
* MII_MCR_UNIDIR 0=Disable unidirectional enable
* MII_MCR_SPEED1000 0=Reserved on 10/100
* MII_MCR_CTST 0=Disable collision test
* MII_MCR_FULLDPLX ?=Full duplex
* MII_MCR_ANRESTART 0=Don't restart auto negotiation
* MII_MCR_ISOLATE 0=Don't electronically isolate PHY from MII
* MII_MCR_PDOWN 0=Don't powerdown the PHY
* MII_MCR_ANENABLE 0=Disable auto negotiation
* MII_MCR_SPEED100 ?=Select 100Mbps
* MII_MCR_LOOPBACK 0=Disable loopback mode
* MII_MCR_RESET 0=No PHY reset
*/
phyreg = 0;
if ((mode & LPC17_SPEED_MASK) == LPC17_SPEED_100)
{
phyreg = MII_MCR_SPEED100;
}
if ((mode & LPC17_DUPLEX_MASK) == LPC17_DUPLEX_FULL)
{
phyreg |= MII_MCR_FULLDPLX;
}
lpc17_phywrite(phyaddr, MII_MCR, phyreg);
/* Then wait for the link to be established */
for (timeout = MII_BIG_TIMEOUT; timeout > 0; timeout--)
{
/* REVISIT: This should not depend explicity on the board configuration.
* Rather, there should be some additional configuration option to
* suppress this DP83848C-specific behavior.
*/
#if defined(CONFIG_ETH0_PHY_DP83848C) && !defined(CONFIG_ARCH_BOARD_MBED)
phyreg = lpc17_phyread(phyaddr, MII_DP83848C_STS);
if ((phyreg & 0x0001) != 0)
{
/* Yes.. return success */
return OK;
}
#else
phyreg = lpc17_phyread(phyaddr, MII_MSR);
if ((phyreg & MII_MSR_LINKSTATUS) != 0)
{
/* Yes.. return success */
return OK;
}
#endif
}
nerr("Link failed. MSR: %04x\n", phyreg);
return -ETIMEDOUT;
}
#endif
/****************************************************************************
* Function: lpc17_phyinit
*
* Description:
* Initialize the PHY
*
* Parameters:
* priv - Pointer to EMAC device driver structure
*
* Returned Value:
* None directly. As a side-effect, it will initialize priv->lp_phyaddr
* and priv->lp_phymode.
*
* Assumptions:
*
****************************************************************************/
#ifdef LPC17_HAVE_PHY
static inline int lpc17_phyinit(struct lpc17_driver_s *priv)
{
unsigned int phyaddr;
uint16_t phyreg;
uint32_t regval;
int ret;
/* MII configuration: host clocked divided per board.h, no suppress
* preamble, no scan increment.
*/
lpc17_putreg(ETH_MCFG_CLKSEL_DIV, LPC17_ETH_MCFG);
lpc17_putreg(0, LPC17_ETH_MCMD);
/* Enter RMII mode and select 100 MBPS support */
lpc17_putreg(ETH_CMD_RMII, LPC17_ETH_CMD);
lpc17_putreg(ETH_SUPP_SPEED, LPC17_ETH_SUPP);
/* Find PHY Address. Because the controller has a pull-up and the
* PHY has pull-down resistors on RXD lines some times the PHY
* latches different at different addresses.
*/
for (phyaddr = 1; phyaddr < 32; phyaddr++)
{
/* Check if we can see the selected device ID at this
* PHY address.
*/
phyreg = (unsigned int)lpc17_phyread(phyaddr, MII_PHYID1);
ninfo("Addr: %d PHY ID1: %04x\n", phyaddr, phyreg);
/* Compare OUI bits 3-18 */
if (phyreg == LPC17_PHYID1)
{
phyreg = lpc17_phyread(phyaddr, MII_PHYID2);
ninfo("Addr: %d PHY ID2: %04x\n", phyaddr, phyreg);
/* Compare OUI bits 19-24 and the 6-bit model number (ignoring the
* 4-bit revision number).
*/
if ((phyreg & 0xfff0) == LPC17_PHYID2)
{
break;
}
}
}
/* Check if the PHY device address was found */
if (phyaddr > 31)
{
/* Failed to find PHY at any location */
nerr("No PHY detected\n");
return -ENODEV;
}
ninfo("phyaddr: %d\n", phyaddr);
/* Save the discovered PHY device address */
priv->lp_phyaddr = phyaddr;
/* Reset the PHY */
ret = lpc17_phyreset(phyaddr);
if (ret < 0)
{
return ret;
}
lpc17_showmii(phyaddr, "After reset");
/* Check for preamble suppression support */
phyreg = lpc17_phyread(phyaddr, MII_MSR);
if ((phyreg & MII_MSR_MFRAMESUPPRESS) != 0)
{
/* The PHY supports preamble suppression */
regval = lpc17_getreg(LPC17_ETH_MCFG);
regval |= ETH_MCFG_SUPPRE;
lpc17_putreg(regval, LPC17_ETH_MCFG);
}
/* Are we configured to do auto-negotiation? */
#ifdef CONFIG_PHY_AUTONEG
/* Setup the Auto-negotiation advertisement: 100 or 10, and HD or FD */
lpc17_phywrite(phyaddr, MII_ADVERTISE,
(MII_ADVERTISE_100BASETXFULL | MII_ADVERTISE_100BASETXHALF |
MII_ADVERTISE_10BASETXFULL | MII_ADVERTISE_10BASETXHALF |
MII_ADVERTISE_CSMA));
/* Then perform the auto-negotiation */
ret = lpc17_phyautoneg(phyaddr);
if (ret < 0)
{
return ret;
}
#else
/* Set up the fixed PHY configuration */
ret = lpc17_phymode(phyaddr, LPC17_MODE_DEFLT);
if (ret < 0)
{
return ret;
}
#endif
/* The link is established */
lpc17_showmii(phyaddr, "After link established");
/* Check configuration */
#if defined(CONFIG_ETH0_PHY_KS8721)
phyreg = lpc17_phyread(phyaddr, MII_KS8721_10BTCR);
switch (phyreg & KS8721_10BTCR_MODE_MASK)
{
case KS8721_10BTCR_MODE_10BTHD: /* 10BASE-T half duplex */
priv->lp_mode = LPC17_10BASET_HD;
lpc17_putreg(0, LPC17_ETH_SUPP);
break;
case KS8721_10BTCR_MODE_100BTHD: /* 100BASE-T half duplex */
priv->lp_mode = LPC17_100BASET_HD;
break;
case KS8721_10BTCR_MODE_10BTFD: /* 10BASE-T full duplex */
priv->lp_mode = LPC17_10BASET_FD;
lpc17_putreg(0, LPC17_ETH_SUPP);
break;
case KS8721_10BTCR_MODE_100BTFD: /* 100BASE-T full duplex */
priv->lp_mode = LPC17_100BASET_FD;
break;
default:
nerr("Unrecognized mode: %04x\n", phyreg);
return -ENODEV;
}
#elif defined(CONFIG_ETH0_PHY_KSZ8041)
phyreg = lpc17_phyread(phyaddr, MII_KSZ8041_PHYCTRL2);
switch (phyreg & MII_PHYCTRL2_MODE_MASK)
{
case MII_PHYCTRL2_MODE_10HDX: /* 10BASE-T half duplex */
priv->lp_mode = LPC17_10BASET_HD;
lpc17_putreg(0, LPC17_ETH_SUPP);
break;
case MII_PHYCTRL2_MODE_100HDX: /* 100BASE-T half duplex */
priv->lp_mode = LPC17_100BASET_HD;
break;
case MII_PHYCTRL2_MODE_10FDX: /* 10BASE-T full duplex */
priv->lp_mode = LPC17_10BASET_FD;
lpc17_putreg(0, LPC17_ETH_SUPP);
break;
case MII_PHYCTRL2_MODE_100FDX: /* 100BASE-T full duplex */
priv->lp_mode = LPC17_100BASET_FD;
break;
default:
nerr("Unrecognized mode: %04x\n", phyreg);
return -ENODEV;
}
#elif defined(CONFIG_ETH0_PHY_DP83848C)
phyreg = lpc17_phyread(phyaddr, MII_DP83848C_STS);
/* Configure for full/half duplex mode and speed */
switch (phyreg & 0x0006)
{
case 0x0000:
priv->lp_mode = LPC17_100BASET_HD;
break;
case 0x0002:
priv->lp_mode = LPC17_10BASET_HD;
break;
case 0x0004:
priv->lp_mode = LPC17_100BASET_FD;
break;
case 0x0006:
priv->lp_mode = LPC17_10BASET_FD;
break;
default:
nerr("Unrecognized mode: %04x\n", phyreg);
return -ENODEV;
}
#elif defined(CONFIG_ETH0_PHY_LAN8720)
{
uint16_t advertise;
uint16_t lpa;
up_udelay(500);
advertise = lpc17_phyread(phyaddr, MII_ADVERTISE);
lpa = lpc17_phyread(phyaddr, MII_LPA);
/* Check for 100BASETX full duplex */
if ((advertise & MII_ADVERTISE_100BASETXFULL) != 0 &&
(lpa & MII_LPA_100BASETXFULL) != 0)
{
priv->lp_mode = LPC17_100BASET_FD;
}
/* Check for 100BASETX half duplex */
else if ((advertise & MII_ADVERTISE_100BASETXHALF) != 0 &&
(lpa & MII_LPA_100BASETXHALF) != 0)
{
priv->lp_mode = LPC17_100BASET_HD;
}
/* Check for 10BASETX full duplex */
else if ((advertise & MII_ADVERTISE_10BASETXFULL) != 0 &&
(lpa & MII_LPA_10BASETXFULL) != 0)
{
priv->lp_mode = LPC17_10BASET_FD;
}
/* Check for 10BASETX half duplex */
else if ((advertise & MII_ADVERTISE_10BASETXHALF) != 0 &&
(lpa & MII_LPA_10BASETXHALF) != 0)
{
priv->lp_mode = LPC17_10BASET_HD;
}
else
{
nerr("Unrecognized mode: %04x\n", phyreg);
return -ENODEV;
}
}
#else
# warning "PHY Unknown: speed and duplex are bogus"
#endif
nerr("%dBase-T %s duplex\n",
(priv->lp_mode & LPC17_SPEED_MASK) == LPC17_SPEED_100 ? 100 : 10,
(priv->lp_mode & LPC17_DUPLEX_MASK) == LPC17_DUPLEX_FULL ?"full" : "half");
/* Disable auto-configuration. Set the fixed speed/duplex mode.
* (probably more than little redundant).
*
* REVISIT: Revisit the following CONFIG_PHY_CEMENT_DISABLE work-around.
* It is should not needed if CONFIG_PHY_AUTONEG is defined and is known
* cause a problem for at least one PHY (DP83848I PHY). It might be
* safe just to remove this elided coded for all PHYs.
*/
#ifndef CONFIG_PHY_CEMENT_DISABLE
ret = lpc17_phymode(phyaddr, priv->lp_mode);
#endif
lpc17_showmii(phyaddr, "After final configuration");
return ret;
}
#else
static inline int lpc17_phyinit(struct lpc17_driver_s *priv)
{
priv->lp_mode = LPC17_MODE_DEFLT;
return OK;
}
#endif
/****************************************************************************
* Function: lpc17_txdescinit
*
* Description:
* Initialize the EMAC Tx descriptor table
*
* Parameters:
* priv - Pointer to EMAC device driver structure
*
* Returned Value:
* None directory.
* As a side-effect, it will initialize priv->lp_phyaddr and
* priv->lp_phymode.
*
* Assumptions:
*
****************************************************************************/
static inline void lpc17_txdescinit(struct lpc17_driver_s *priv)
{
uint32_t *txdesc;
uint32_t *txstat;
uint32_t pktaddr;
int i;
/* Configure Tx descriptor and status tables */
lpc17_putreg(LPC17_TXDESC_BASE, LPC17_ETH_TXDESC);
lpc17_putreg(LPC17_TXSTAT_BASE, LPC17_ETH_TXSTAT);
lpc17_putreg(CONFIG_NET_NTXDESC-1, LPC17_ETH_TXDESCRNO);
/* Initialize Tx descriptors and link to packet buffers */
txdesc = (uint32_t *)LPC17_TXDESC_BASE;
pktaddr = LPC17_TXBUFFER_BASE;
for (i = 0; i < CONFIG_NET_NTXDESC; i++)
{
*txdesc++ = pktaddr;
*txdesc++ = (TXDESC_CONTROL_INT | (LPC17_MAXPACKET_SIZE - 1));
pktaddr += LPC17_MAXPACKET_SIZE;
}
/* Initialize Tx status */
txstat = (uint32_t *)LPC17_TXSTAT_BASE;
for (i = 0; i < CONFIG_NET_NTXDESC; i++)
{
*txstat++ = 0;
}
/* Point to first Tx descriptor */
lpc17_putreg(0, LPC17_ETH_TXPRODIDX);
}
/****************************************************************************
* Function: lpc17_rxdescinit
*
* Description:
* Initialize the EMAC Rx descriptor table
*
* Parameters:
* priv - Pointer to EMAC device driver structure
*
* Returned Value:
* None directory.
* As a side-effect, it will initialize priv->lp_phyaddr and
* priv->lp_phymode.
*
* Assumptions:
*
****************************************************************************/
static inline void lpc17_rxdescinit(struct lpc17_driver_s *priv)
{
uint32_t *rxdesc;
uint32_t *rxstat;
uint32_t pktaddr;
int i;
/* Configure Rx descriptor and status tables */
lpc17_putreg(LPC17_RXDESC_BASE, LPC17_ETH_RXDESC);
lpc17_putreg(LPC17_RXSTAT_BASE, LPC17_ETH_RXSTAT);
lpc17_putreg(CONFIG_NET_NRXDESC-1, LPC17_ETH_RXDESCNO);
/* Initialize Rx descriptors and link to packet buffers */
rxdesc = (uint32_t *)LPC17_RXDESC_BASE;
pktaddr = LPC17_RXBUFFER_BASE;
for (i = 0; i < CONFIG_NET_NRXDESC; i++)
{
*rxdesc++ = pktaddr;
*rxdesc++ = (RXDESC_CONTROL_INT | (LPC17_MAXPACKET_SIZE - 1));
pktaddr += LPC17_MAXPACKET_SIZE;
}
/* Initialize Rx status */
rxstat = (uint32_t *)LPC17_RXSTAT_BASE;
for (i = 0; i < CONFIG_NET_NRXDESC; i++)
{
*rxstat++ = 0;
*rxstat++ = 0;
}
/* Point to first Rx descriptor */
lpc17_putreg(0, LPC17_ETH_RXCONSIDX);
}
/****************************************************************************
* Function: lpc17_macmode
*
* Description:
* Set the MAC to operate at a selected speed/duplex mode.
*
* Parameters:
* mode - speed/duplex mode
*
* Returned Value:
* None
*
* Assumptions:
*
****************************************************************************/
#ifdef LPC17_HAVE_PHY
static void lpc17_macmode(uint8_t mode)
{
uint32_t regval;
/* Set up for full or half duplex operation */
if ((mode & LPC17_DUPLEX_MASK) == LPC17_DUPLEX_FULL)
{
/* Set the back-to-back inter-packet gap */
lpc17_putreg(21, LPC17_ETH_IPGT);
/* Set MAC to operate in full duplex mode with CRC and Pad enabled */
regval = lpc17_getreg(LPC17_ETH_MAC2);
regval |= (ETH_MAC2_FD | ETH_MAC2_CRCEN | ETH_MAC2_PADCRCEN);
lpc17_putreg(regval, LPC17_ETH_MAC2);
/* Select full duplex operation for ethernet controller */
regval = lpc17_getreg(LPC17_ETH_CMD);
regval |= (ETH_CMD_FD | ETH_CMD_RMII | ETH_CMD_PRFRAME);
lpc17_putreg(regval, LPC17_ETH_CMD);
}
else
{
/* Set the back-to-back inter-packet gap */
lpc17_putreg(18, LPC17_ETH_IPGT);
/* Set MAC to operate in half duplex mode with CRC and Pad enabled */
regval = lpc17_getreg(LPC17_ETH_MAC2);
regval &= ~ETH_MAC2_FD;
regval |= (ETH_MAC2_CRCEN | ETH_MAC2_PADCRCEN);
lpc17_putreg(regval, LPC17_ETH_MAC2);
/* Select half duplex operation for ethernet controller */
regval = lpc17_getreg(LPC17_ETH_CMD);
regval &= ~ETH_CMD_FD;
regval |= (ETH_CMD_RMII | ETH_CMD_PRFRAME);
lpc17_putreg(regval, LPC17_ETH_CMD);
}
/* This is currently done in lpc17_phyinit(). That doesn't
* seem like the right place. It should be done here.
*/
#if 0
regval = lpc17_getreg(LPC17_ETH_SUPP);
if ((mode & LPC17_SPEED_MASK) == LPC17_SPEED_100)
{
regval |= ETH_SUPP_SPEED;
}
else
{
regval &= ~ETH_SUPP_SPEED;
}
lpc17_putreg(regval, LPC17_ETH_SUPP);
#endif
}
#endif
/****************************************************************************
* Function: lpc17_ethreset
*
* Description:
* Configure and reset the Ethernet module, leaving it in a disabled state.
*
* Parameters:
* priv - Reference to the driver state structure
*
* Returned Value:
* OK on success; a negated errno on failure
*
* Assumptions:
*
****************************************************************************/
static void lpc17_ethreset(struct lpc17_driver_s *priv)
{
irqstate_t flags;
/* Reset the MAC */
flags = enter_critical_section();
/* Put the MAC into the reset state */
lpc17_putreg((ETH_MAC1_TXRST | ETH_MAC1_MCSTXRST | ETH_MAC1_RXRST |
ETH_MAC1_MCSRXRST | ETH_MAC1_SIMRST | ETH_MAC1_SOFTRST),
LPC17_ETH_MAC1);
/* Disable RX/RX, clear modes, reset all control registers */
lpc17_putreg((ETH_CMD_REGRST | ETH_CMD_TXRST | ETH_CMD_RXRST),
LPC17_ETH_CMD);
/* Take the MAC out of the reset state */
up_udelay(50);
lpc17_putreg(0, LPC17_ETH_MAC1);
/* The RMII bit must be set on initialization (I'm not sure this needs
* to be done here but... oh well).
*/
lpc17_putreg(ETH_CMD_RMII, LPC17_ETH_CMD);
/* Set other misc configuration-related registers to default values */
lpc17_putreg(0, LPC17_ETH_MAC2);
lpc17_putreg(0, LPC17_ETH_SUPP);
lpc17_putreg(0, LPC17_ETH_TEST);
lpc17_putreg(18, LPC17_ETH_IPGR);
lpc17_putreg(((15 << ETH_CLRT_RMAX_SHIFT) | (55 << ETH_CLRT_COLWIN_SHIFT)),
LPC17_ETH_CLRT);
/* Set the Maximum Frame size register. "This field resets to the value
* 0x0600, which represents a maximum receive frame of 1536 octets. An
* untagged maximum size Ethernet frame is 1518 octets. A tagged frame adds
* four octets for a total of 1522 octets. If a shorter maximum length
* restriction is desired, program this 16-bit field."
*/
lpc17_putreg(LPC17_MAXPACKET_SIZE, LPC17_ETH_MAXF);
/* Disable all Ethernet controller interrupts */
lpc17_putreg(0, LPC17_ETH_INTEN);
/* Clear any pending interrupts (shouldn't be any) */
lpc17_putreg(0xffffffff, LPC17_ETH_INTCLR);
leave_critical_section(flags);
}
/****************************************************************************
* Public Functions
****************************************************************************/
/****************************************************************************
* Function: lpc17_ethinitialize
*
* Description:
* Initialize one Ethernet controller and driver structure.
*
* Parameters:
* intf - Selects the interface to be initialized.
*
* Returned Value:
* OK on success; Negated errno on failure.
*
* Assumptions:
*
****************************************************************************/
#if CONFIG_LPC17_NINTERFACES > 1
int lpc17_ethinitialize(int intf)
#else
static inline int lpc17_ethinitialize(int intf)
#endif
{
struct lpc17_driver_s *priv;
uint32_t regval;
int ret;
int i;
DEBUGASSERT(intf < CONFIG_LPC17_NINTERFACES);
priv = &g_ethdrvr[intf];
/* Turn on the ethernet MAC clock */
regval = lpc17_getreg(LPC17_SYSCON_PCONP);
regval |= SYSCON_PCONP_PCENET;
lpc17_putreg(regval, LPC17_SYSCON_PCONP);
/* Configure all GPIO pins needed by ENET */
for (i = 0; i < GPIO_NENET_PINS; i++)
{
(void)lpc17_configgpio(g_enetpins[i]);
}
lpc17_showpins();
/* Initialize the driver structure */
memset(priv, 0, sizeof(struct lpc17_driver_s));
priv->lp_dev.d_ifup = lpc17_ifup; /* I/F down callback */
priv->lp_dev.d_ifdown = lpc17_ifdown; /* I/F up (new IP address) callback */
priv->lp_dev.d_txavail = lpc17_txavail; /* New TX data callback */
#ifdef CONFIG_NET_IGMP
priv->lp_dev.d_addmac = lpc17_addmac; /* Add multicast MAC address */
priv->lp_dev.d_rmmac = lpc17_rmmac; /* Remove multicast MAC address */
#endif
priv->lp_dev.d_private = (void *)priv; /* Used to recover private state from dev */
#if CONFIG_LPC17_NINTERFACES > 1
# error "A mechanism to associate base address an IRQ with an interface is needed"
priv->lp_base = ??; /* Ethernet controller base address */
priv->lp_irq = ??; /* Ethernet controller IRQ number */
#endif
/* Create a watchdog for timing polling for and timing of transmisstions */
priv->lp_txpoll = wd_create(); /* Create periodic poll timer */
priv->lp_txtimeout = wd_create(); /* Create TX timeout timer */
/* Reset the Ethernet controller and leave in the ifdown statue. The
* Ethernet controller will be properly re-initialized each time
* lpc17_ifup() is called.
*/
lpc17_ifdown(&priv->lp_dev);
/* Attach the IRQ to the driver */
#if CONFIG_LPC17_NINTERFACES > 1
ret = irq_attach(priv->irq, lpc17_interrupt);
#else
ret = irq_attach(LPC17_IRQ_ETH, lpc17_interrupt);
#endif
if (ret != 0)
{
/* We could not attach the ISR to the interrupt */
return -EAGAIN;
}
/* Register the device with the OS so that socket IOCTLs can be performed */
(void)netdev_register(&priv->lp_dev, NET_LL_ETHERNET);
return OK;
}
/****************************************************************************
* Name: up_netinitialize
*
* Description:
* Initialize the first network interface. If there are more than one
* interface in the chip, then board-specific logic will have to provide
* this function to determine which, if any, Ethernet controllers should
* be initialized.
*
****************************************************************************/
#if CONFIG_LPC17_NINTERFACES == 1
void up_netinitialize(void)
{
(void)lpc17_ethinitialize(0);
}
#endif
#endif /* LPC17_NETHCONTROLLERS > 0 */
#endif /* CONFIG_NET && CONFIG_LPC17_ETHERNET */
| 28.51658 | 90 | 0.603751 | [
"model"
] |
36913d292975e3db93ab9b8e2b97808d0c379c8e | 2,189 | h | C | Source/Prototype_NinjaPuss/ClimbableStall.h | Chrishayb/Prototype_FPS | 6963a164fa531509bd3f38fb46d158b28a51a76f | [
"MIT"
] | null | null | null | Source/Prototype_NinjaPuss/ClimbableStall.h | Chrishayb/Prototype_FPS | 6963a164fa531509bd3f38fb46d158b28a51a76f | [
"MIT"
] | null | null | null | Source/Prototype_NinjaPuss/ClimbableStall.h | Chrishayb/Prototype_FPS | 6963a164fa531509bd3f38fb46d158b28a51a76f | [
"MIT"
] | null | null | null | // Fill out your copyright notice in the Description page of Project Settings.
#pragma once
#include "CoreMinimal.h"
#include "InteractActor.h"
#include "ClimbableStall.generated.h"
/**
* This is an interactabe object that allow player precisionly climb up to the top of the stall
*/
UCLASS()
class PROTOTYPE_NINJAPUSS_API AClimbableStall : public AInteractActor
{
GENERATED_BODY()
public:
UPROPERTY(VisibleAnywhere, BlueprintReadOnly)
class UStaticMeshComponent* Mesh;
UPROPERTY(Category = "Interaction", VisibleAnywhere, BlueprintReadOnly)
TArray<class USceneComponent*> WayPointArray;
public:
// Sets default values for this actor's properties
AClimbableStall();
UPROPERTY(EditDefaultsOnly, BlueprintReadWrite, Category = "Interaction")
float TotalTransferTime;
UPROPERTY(VisibleAnywhere, BlueprintReadOnly, Category = "Interaction")
float TotalDistanceTravel;
UPROPERTY(VisibleAnywhere, BlueprintReadOnly, Category = "Interaction")
bool bHasUsed;
/** Determent if the player has interact with this or not */
UPROPERTY(VisibleAnywhere, BlueprintReadOnly, Category = "Interaction")
bool bInUse;
UPROPERTY(VisibleAnywhere, BlueprintReadOnly, Category = "Interaction")
bool bInWayPointTransit;
UPROPERTY(VisibleAnywhere, BlueprintReadOnly, Category = "Interaction")
FVector CurrentWayPointLoc;
UPROPERTY(VisibleAnywhere, BlueprintReadOnly, Category = "Interaction")
FVector DestinationLoc;
UPROPERTY(VisibleAnywhere, BlueprintReadOnly, Category = "Interaction")
int WayPointCount;
UPROPERTY()
bool bHasSetPath;
UPROPERTY()
float CurrentPathLength;
UPROPERTY()
float LerpDelta;
UPROPERTY()
float TimeForCurrentPath;
protected:
// Called when the game starts or when spawned
virtual void BeginPlay() override;
/** Finds out the total distance needs to travel as soon as the player interact */
UFUNCTION()
float FindTotalDistance();
public:
// Called every frame
virtual void Tick(float DeltaTime) override;
// Called as the player is interacting with this actor (C++)
virtual void OnInteractionStart() override;
// Called as the player is interacting with this actor (C++)
virtual void OnInteractionEnd() override;
};
| 25.453488 | 95 | 0.782092 | [
"mesh",
"object"
] |
3693290491f4992d7b1a95af19868f29c3866364 | 1,680 | h | C | src/java_bytecode/java_bytecode_convert_class.h | thk123/cbmc | 392c7656e69eb1fb39e902cfa6c5b079a56288bf | [
"BSD-4-Clause"
] | null | null | null | src/java_bytecode/java_bytecode_convert_class.h | thk123/cbmc | 392c7656e69eb1fb39e902cfa6c5b079a56288bf | [
"BSD-4-Clause"
] | 2 | 2017-01-30T16:40:05.000Z | 2017-07-04T13:52:36.000Z | src/java_bytecode/java_bytecode_convert_class.h | thk123/cbmc | 392c7656e69eb1fb39e902cfa6c5b079a56288bf | [
"BSD-4-Clause"
] | null | null | null | /*******************************************************************\
Module: JAVA Bytecode Language Conversion
Author: Daniel Kroening, kroening@kroening.com
\*******************************************************************/
/// \file
/// JAVA Bytecode Language Conversion
#ifndef CPROVER_JAVA_BYTECODE_JAVA_BYTECODE_CONVERT_CLASS_H
#define CPROVER_JAVA_BYTECODE_JAVA_BYTECODE_CONVERT_CLASS_H
#include <unordered_set>
#include <util/symbol_table.h>
#include <util/message.h>
#include "java_bytecode_parse_tree.h"
#include "java_bytecode_language.h"
/// See class \ref java_bytecode_convert_classt
bool java_bytecode_convert_class(
const java_class_loadert::parse_tree_with_overlayst &parse_trees,
symbol_tablet &symbol_table,
message_handlert &message_handler,
size_t max_array_length,
method_bytecodet &,
java_string_library_preprocesst &string_preprocess,
const std::unordered_set<std::string> &no_load_classes);
void convert_annotations(
const java_bytecode_parse_treet::annotationst &parsed_annotations,
std::vector<java_annotationt> &annotations);
void mark_java_implicitly_generic_class_type(
const irep_idt &class_name,
symbol_tablet &symbol_table);
/// An exception that is raised checking whether a class is implicitly
/// generic if a symbol for an outer class is missing
class missing_outer_class_symbol_exceptiont : public std::logic_error
{
public:
explicit missing_outer_class_symbol_exceptiont(
const std::string &outer,
const std::string &inner)
: std::logic_error(
"Missing outer class symbol: " + outer + ", for class " + inner)
{
}
};
#endif // CPROVER_JAVA_BYTECODE_JAVA_BYTECODE_CONVERT_CLASS_H
| 30.545455 | 72 | 0.733333 | [
"vector"
] |
3694d6dd9463c7e494551a93e5a26d91d3559653 | 252,698 | c | C | dpdk/drivers/net/mlx5/mlx5_flow_dv.c | Wasdns/mtcp | 16ed7ea6da2c9dd86fa950d0feee07026f886e1d | [
"BSD-3-Clause"
] | 2 | 2021-08-13T08:09:20.000Z | 2021-11-02T09:13:36.000Z | dpdk/drivers/net/mlx5/mlx5_flow_dv.c | Wasdns/mtcp | 16ed7ea6da2c9dd86fa950d0feee07026f886e1d | [
"BSD-3-Clause"
] | null | null | null | dpdk/drivers/net/mlx5/mlx5_flow_dv.c | Wasdns/mtcp | 16ed7ea6da2c9dd86fa950d0feee07026f886e1d | [
"BSD-3-Clause"
] | null | null | null | /* SPDX-License-Identifier: BSD-3-Clause
* Copyright 2018 Mellanox Technologies, Ltd
*/
#include <sys/queue.h>
#include <stdalign.h>
#include <stdint.h>
#include <string.h>
#include <unistd.h>
/* Verbs header. */
/* ISO C doesn't support unnamed structs/unions, disabling -pedantic. */
#ifdef PEDANTIC
#pragma GCC diagnostic ignored "-Wpedantic"
#endif
#include <infiniband/verbs.h>
#ifdef PEDANTIC
#pragma GCC diagnostic error "-Wpedantic"
#endif
#include <rte_common.h>
#include <rte_ether.h>
#include <rte_ethdev_driver.h>
#include <rte_flow.h>
#include <rte_flow_driver.h>
#include <rte_malloc.h>
#include <rte_ip.h>
#include <rte_gre.h>
#include <rte_vxlan.h>
#include "mlx5.h"
#include "mlx5_defs.h"
#include "mlx5_glue.h"
#include "mlx5_flow.h"
#include "mlx5_prm.h"
#include "mlx5_rxtx.h"
#ifdef HAVE_IBV_FLOW_DV_SUPPORT
#ifndef HAVE_IBV_FLOW_DEVX_COUNTERS
#define MLX5DV_FLOW_ACTION_COUNTERS_DEVX 0
#endif
#ifndef HAVE_MLX5DV_DR_ESWITCH
#ifndef MLX5DV_FLOW_TABLE_TYPE_FDB
#define MLX5DV_FLOW_TABLE_TYPE_FDB 0
#endif
#endif
#ifndef HAVE_MLX5DV_DR
#define MLX5DV_DR_ACTION_FLAGS_ROOT_LEVEL 1
#endif
/* VLAN header definitions */
#define MLX5DV_FLOW_VLAN_PCP_SHIFT 13
#define MLX5DV_FLOW_VLAN_PCP_MASK (0x7 << MLX5DV_FLOW_VLAN_PCP_SHIFT)
#define MLX5DV_FLOW_VLAN_VID_MASK 0x0fff
#define MLX5DV_FLOW_VLAN_PCP_MASK_BE RTE_BE16(MLX5DV_FLOW_VLAN_PCP_MASK)
#define MLX5DV_FLOW_VLAN_VID_MASK_BE RTE_BE16(MLX5DV_FLOW_VLAN_VID_MASK)
union flow_dv_attr {
struct {
uint32_t valid:1;
uint32_t ipv4:1;
uint32_t ipv6:1;
uint32_t tcp:1;
uint32_t udp:1;
uint32_t reserved:27;
};
uint32_t attr;
};
/**
* Initialize flow attributes structure according to flow items' types.
*
* flow_dv_validate() avoids multiple L3/L4 layers cases other than tunnel
* mode. For tunnel mode, the items to be modified are the outermost ones.
*
* @param[in] item
* Pointer to item specification.
* @param[out] attr
* Pointer to flow attributes structure.
* @param[in] dev_flow
* Pointer to the sub flow.
* @param[in] tunnel_decap
* Whether action is after tunnel decapsulation.
*/
static void
flow_dv_attr_init(const struct rte_flow_item *item, union flow_dv_attr *attr,
struct mlx5_flow *dev_flow, bool tunnel_decap)
{
/*
* If layers is already initialized, it means this dev_flow is the
* suffix flow, the layers flags is set by the prefix flow. Need to
* use the layer flags from prefix flow as the suffix flow may not
* have the user defined items as the flow is split.
*/
if (dev_flow->layers) {
if (dev_flow->layers & MLX5_FLOW_LAYER_OUTER_L3_IPV4)
attr->ipv4 = 1;
else if (dev_flow->layers & MLX5_FLOW_LAYER_OUTER_L3_IPV6)
attr->ipv6 = 1;
if (dev_flow->layers & MLX5_FLOW_LAYER_OUTER_L4_TCP)
attr->tcp = 1;
else if (dev_flow->layers & MLX5_FLOW_LAYER_OUTER_L4_UDP)
attr->udp = 1;
attr->valid = 1;
return;
}
for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
uint8_t next_protocol = 0xff;
switch (item->type) {
case RTE_FLOW_ITEM_TYPE_GRE:
case RTE_FLOW_ITEM_TYPE_NVGRE:
case RTE_FLOW_ITEM_TYPE_VXLAN:
case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
case RTE_FLOW_ITEM_TYPE_GENEVE:
case RTE_FLOW_ITEM_TYPE_MPLS:
if (tunnel_decap)
attr->attr = 0;
break;
case RTE_FLOW_ITEM_TYPE_IPV4:
if (!attr->ipv6)
attr->ipv4 = 1;
if (item->mask != NULL &&
((const struct rte_flow_item_ipv4 *)
item->mask)->hdr.next_proto_id)
next_protocol =
((const struct rte_flow_item_ipv4 *)
(item->spec))->hdr.next_proto_id &
((const struct rte_flow_item_ipv4 *)
(item->mask))->hdr.next_proto_id;
if ((next_protocol == IPPROTO_IPIP ||
next_protocol == IPPROTO_IPV6) && tunnel_decap)
attr->attr = 0;
break;
case RTE_FLOW_ITEM_TYPE_IPV6:
if (!attr->ipv4)
attr->ipv6 = 1;
if (item->mask != NULL &&
((const struct rte_flow_item_ipv6 *)
item->mask)->hdr.proto)
next_protocol =
((const struct rte_flow_item_ipv6 *)
(item->spec))->hdr.proto &
((const struct rte_flow_item_ipv6 *)
(item->mask))->hdr.proto;
if ((next_protocol == IPPROTO_IPIP ||
next_protocol == IPPROTO_IPV6) && tunnel_decap)
attr->attr = 0;
break;
case RTE_FLOW_ITEM_TYPE_UDP:
if (!attr->tcp)
attr->udp = 1;
break;
case RTE_FLOW_ITEM_TYPE_TCP:
if (!attr->udp)
attr->tcp = 1;
break;
default:
break;
}
}
attr->valid = 1;
}
/**
* Convert rte_mtr_color to mlx5 color.
*
* @param[in] rcol
* rte_mtr_color.
*
* @return
* mlx5 color.
*/
static int
rte_col_2_mlx5_col(enum rte_color rcol)
{
switch (rcol) {
case RTE_COLOR_GREEN:
return MLX5_FLOW_COLOR_GREEN;
case RTE_COLOR_YELLOW:
return MLX5_FLOW_COLOR_YELLOW;
case RTE_COLOR_RED:
return MLX5_FLOW_COLOR_RED;
default:
break;
}
return MLX5_FLOW_COLOR_UNDEFINED;
}
struct field_modify_info {
uint32_t size; /* Size of field in protocol header, in bytes. */
uint32_t offset; /* Offset of field in protocol header, in bytes. */
enum mlx5_modification_field id;
};
struct field_modify_info modify_eth[] = {
{4, 0, MLX5_MODI_OUT_DMAC_47_16},
{2, 4, MLX5_MODI_OUT_DMAC_15_0},
{4, 6, MLX5_MODI_OUT_SMAC_47_16},
{2, 10, MLX5_MODI_OUT_SMAC_15_0},
{0, 0, 0},
};
struct field_modify_info modify_vlan_out_first_vid[] = {
/* Size in bits !!! */
{12, 0, MLX5_MODI_OUT_FIRST_VID},
{0, 0, 0},
};
struct field_modify_info modify_ipv4[] = {
{1, 8, MLX5_MODI_OUT_IPV4_TTL},
{4, 12, MLX5_MODI_OUT_SIPV4},
{4, 16, MLX5_MODI_OUT_DIPV4},
{0, 0, 0},
};
struct field_modify_info modify_ipv6[] = {
{1, 7, MLX5_MODI_OUT_IPV6_HOPLIMIT},
{4, 8, MLX5_MODI_OUT_SIPV6_127_96},
{4, 12, MLX5_MODI_OUT_SIPV6_95_64},
{4, 16, MLX5_MODI_OUT_SIPV6_63_32},
{4, 20, MLX5_MODI_OUT_SIPV6_31_0},
{4, 24, MLX5_MODI_OUT_DIPV6_127_96},
{4, 28, MLX5_MODI_OUT_DIPV6_95_64},
{4, 32, MLX5_MODI_OUT_DIPV6_63_32},
{4, 36, MLX5_MODI_OUT_DIPV6_31_0},
{0, 0, 0},
};
struct field_modify_info modify_udp[] = {
{2, 0, MLX5_MODI_OUT_UDP_SPORT},
{2, 2, MLX5_MODI_OUT_UDP_DPORT},
{0, 0, 0},
};
struct field_modify_info modify_tcp[] = {
{2, 0, MLX5_MODI_OUT_TCP_SPORT},
{2, 2, MLX5_MODI_OUT_TCP_DPORT},
{4, 4, MLX5_MODI_OUT_TCP_SEQ_NUM},
{4, 8, MLX5_MODI_OUT_TCP_ACK_NUM},
{0, 0, 0},
};
static void
mlx5_flow_tunnel_ip_check(const struct rte_flow_item *item __rte_unused,
uint8_t next_protocol, uint64_t *item_flags,
int *tunnel)
{
assert(item->type == RTE_FLOW_ITEM_TYPE_IPV4 ||
item->type == RTE_FLOW_ITEM_TYPE_IPV6);
if (next_protocol == IPPROTO_IPIP) {
*item_flags |= MLX5_FLOW_LAYER_IPIP;
*tunnel = 1;
}
if (next_protocol == IPPROTO_IPV6) {
*item_flags |= MLX5_FLOW_LAYER_IPV6_ENCAP;
*tunnel = 1;
}
}
/**
* Acquire the synchronizing object to protect multithreaded access
* to shared dv context. Lock occurs only if context is actually
* shared, i.e. we have multiport IB device and representors are
* created.
*
* @param[in] dev
* Pointer to the rte_eth_dev structure.
*/
static void
flow_dv_shared_lock(struct rte_eth_dev *dev)
{
struct mlx5_priv *priv = dev->data->dev_private;
struct mlx5_ibv_shared *sh = priv->sh;
if (sh->dv_refcnt > 1) {
int ret;
ret = pthread_mutex_lock(&sh->dv_mutex);
assert(!ret);
(void)ret;
}
}
static void
flow_dv_shared_unlock(struct rte_eth_dev *dev)
{
struct mlx5_priv *priv = dev->data->dev_private;
struct mlx5_ibv_shared *sh = priv->sh;
if (sh->dv_refcnt > 1) {
int ret;
ret = pthread_mutex_unlock(&sh->dv_mutex);
assert(!ret);
(void)ret;
}
}
/* Update VLAN's VID/PCP based on input rte_flow_action.
*
* @param[in] action
* Pointer to struct rte_flow_action.
* @param[out] vlan
* Pointer to struct rte_vlan_hdr.
*/
static void
mlx5_update_vlan_vid_pcp(const struct rte_flow_action *action,
struct rte_vlan_hdr *vlan)
{
uint16_t vlan_tci;
if (action->type == RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP) {
vlan_tci =
((const struct rte_flow_action_of_set_vlan_pcp *)
action->conf)->vlan_pcp;
vlan_tci = vlan_tci << MLX5DV_FLOW_VLAN_PCP_SHIFT;
vlan->vlan_tci &= ~MLX5DV_FLOW_VLAN_PCP_MASK;
vlan->vlan_tci |= vlan_tci;
} else if (action->type == RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID) {
vlan->vlan_tci &= ~MLX5DV_FLOW_VLAN_VID_MASK;
vlan->vlan_tci |= rte_be_to_cpu_16
(((const struct rte_flow_action_of_set_vlan_vid *)
action->conf)->vlan_vid);
}
}
/**
* Fetch 1, 2, 3 or 4 byte field from the byte array
* and return as unsigned integer in host-endian format.
*
* @param[in] data
* Pointer to data array.
* @param[in] size
* Size of field to extract.
*
* @return
* converted field in host endian format.
*/
static inline uint32_t
flow_dv_fetch_field(const uint8_t *data, uint32_t size)
{
uint32_t ret;
switch (size) {
case 1:
ret = *data;
break;
case 2:
ret = rte_be_to_cpu_16(*(const unaligned_uint16_t *)data);
break;
case 3:
ret = rte_be_to_cpu_16(*(const unaligned_uint16_t *)data);
ret = (ret << 8) | *(data + sizeof(uint16_t));
break;
case 4:
ret = rte_be_to_cpu_32(*(const unaligned_uint32_t *)data);
break;
default:
assert(false);
ret = 0;
break;
}
return ret;
}
/**
* Convert modify-header action to DV specification.
*
* Data length of each action is determined by provided field description
* and the item mask. Data bit offset and width of each action is determined
* by provided item mask.
*
* @param[in] item
* Pointer to item specification.
* @param[in] field
* Pointer to field modification information.
* For MLX5_MODIFICATION_TYPE_SET specifies destination field.
* For MLX5_MODIFICATION_TYPE_ADD specifies destination field.
* For MLX5_MODIFICATION_TYPE_COPY specifies source field.
* @param[in] dcopy
* Destination field info for MLX5_MODIFICATION_TYPE_COPY in @type.
* Negative offset value sets the same offset as source offset.
* size field is ignored, value is taken from source field.
* @param[in,out] resource
* Pointer to the modify-header resource.
* @param[in] type
* Type of modification.
* @param[out] error
* Pointer to the error structure.
*
* @return
* 0 on success, a negative errno value otherwise and rte_errno is set.
*/
static int
flow_dv_convert_modify_action(struct rte_flow_item *item,
struct field_modify_info *field,
struct field_modify_info *dcopy,
struct mlx5_flow_dv_modify_hdr_resource *resource,
uint32_t type, struct rte_flow_error *error)
{
uint32_t i = resource->actions_num;
struct mlx5_modification_cmd *actions = resource->actions;
/*
* The item and mask are provided in big-endian format.
* The fields should be presented as in big-endian format either.
* Mask must be always present, it defines the actual field width.
*/
assert(item->mask);
assert(field->size);
do {
unsigned int size_b;
unsigned int off_b;
uint32_t mask;
uint32_t data;
if (i >= MLX5_MAX_MODIFY_NUM)
return rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ACTION, NULL,
"too many items to modify");
/* Fetch variable byte size mask from the array. */
mask = flow_dv_fetch_field((const uint8_t *)item->mask +
field->offset, field->size);
if (!mask) {
++field;
continue;
}
/* Deduce actual data width in bits from mask value. */
off_b = rte_bsf32(mask);
size_b = sizeof(uint32_t) * CHAR_BIT -
off_b - __builtin_clz(mask);
assert(size_b);
size_b = size_b == sizeof(uint32_t) * CHAR_BIT ? 0 : size_b;
actions[i] = (struct mlx5_modification_cmd) {
.action_type = type,
.field = field->id,
.offset = off_b,
.length = size_b,
};
/* Convert entire record to expected big-endian format. */
actions[i].data0 = rte_cpu_to_be_32(actions[i].data0);
if (type == MLX5_MODIFICATION_TYPE_COPY) {
assert(dcopy);
actions[i].dst_field = dcopy->id;
actions[i].dst_offset =
(int)dcopy->offset < 0 ? off_b : dcopy->offset;
/* Convert entire record to big-endian format. */
actions[i].data1 = rte_cpu_to_be_32(actions[i].data1);
} else {
assert(item->spec);
data = flow_dv_fetch_field((const uint8_t *)item->spec +
field->offset, field->size);
/* Shift out the trailing masked bits from data. */
data = (data & mask) >> off_b;
actions[i].data1 = rte_cpu_to_be_32(data);
}
++i;
++field;
} while (field->size);
if (resource->actions_num == i)
return rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ACTION, NULL,
"invalid modification flow item");
resource->actions_num = i;
return 0;
}
/**
* Convert modify-header set IPv4 address action to DV specification.
*
* @param[in,out] resource
* Pointer to the modify-header resource.
* @param[in] action
* Pointer to action specification.
* @param[out] error
* Pointer to the error structure.
*
* @return
* 0 on success, a negative errno value otherwise and rte_errno is set.
*/
static int
flow_dv_convert_action_modify_ipv4
(struct mlx5_flow_dv_modify_hdr_resource *resource,
const struct rte_flow_action *action,
struct rte_flow_error *error)
{
const struct rte_flow_action_set_ipv4 *conf =
(const struct rte_flow_action_set_ipv4 *)(action->conf);
struct rte_flow_item item = { .type = RTE_FLOW_ITEM_TYPE_IPV4 };
struct rte_flow_item_ipv4 ipv4;
struct rte_flow_item_ipv4 ipv4_mask;
memset(&ipv4, 0, sizeof(ipv4));
memset(&ipv4_mask, 0, sizeof(ipv4_mask));
if (action->type == RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC) {
ipv4.hdr.src_addr = conf->ipv4_addr;
ipv4_mask.hdr.src_addr = rte_flow_item_ipv4_mask.hdr.src_addr;
} else {
ipv4.hdr.dst_addr = conf->ipv4_addr;
ipv4_mask.hdr.dst_addr = rte_flow_item_ipv4_mask.hdr.dst_addr;
}
item.spec = &ipv4;
item.mask = &ipv4_mask;
return flow_dv_convert_modify_action(&item, modify_ipv4, NULL, resource,
MLX5_MODIFICATION_TYPE_SET, error);
}
/**
* Convert modify-header set IPv6 address action to DV specification.
*
* @param[in,out] resource
* Pointer to the modify-header resource.
* @param[in] action
* Pointer to action specification.
* @param[out] error
* Pointer to the error structure.
*
* @return
* 0 on success, a negative errno value otherwise and rte_errno is set.
*/
static int
flow_dv_convert_action_modify_ipv6
(struct mlx5_flow_dv_modify_hdr_resource *resource,
const struct rte_flow_action *action,
struct rte_flow_error *error)
{
const struct rte_flow_action_set_ipv6 *conf =
(const struct rte_flow_action_set_ipv6 *)(action->conf);
struct rte_flow_item item = { .type = RTE_FLOW_ITEM_TYPE_IPV6 };
struct rte_flow_item_ipv6 ipv6;
struct rte_flow_item_ipv6 ipv6_mask;
memset(&ipv6, 0, sizeof(ipv6));
memset(&ipv6_mask, 0, sizeof(ipv6_mask));
if (action->type == RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC) {
memcpy(&ipv6.hdr.src_addr, &conf->ipv6_addr,
sizeof(ipv6.hdr.src_addr));
memcpy(&ipv6_mask.hdr.src_addr,
&rte_flow_item_ipv6_mask.hdr.src_addr,
sizeof(ipv6.hdr.src_addr));
} else {
memcpy(&ipv6.hdr.dst_addr, &conf->ipv6_addr,
sizeof(ipv6.hdr.dst_addr));
memcpy(&ipv6_mask.hdr.dst_addr,
&rte_flow_item_ipv6_mask.hdr.dst_addr,
sizeof(ipv6.hdr.dst_addr));
}
item.spec = &ipv6;
item.mask = &ipv6_mask;
return flow_dv_convert_modify_action(&item, modify_ipv6, NULL, resource,
MLX5_MODIFICATION_TYPE_SET, error);
}
/**
* Convert modify-header set MAC address action to DV specification.
*
* @param[in,out] resource
* Pointer to the modify-header resource.
* @param[in] action
* Pointer to action specification.
* @param[out] error
* Pointer to the error structure.
*
* @return
* 0 on success, a negative errno value otherwise and rte_errno is set.
*/
static int
flow_dv_convert_action_modify_mac
(struct mlx5_flow_dv_modify_hdr_resource *resource,
const struct rte_flow_action *action,
struct rte_flow_error *error)
{
const struct rte_flow_action_set_mac *conf =
(const struct rte_flow_action_set_mac *)(action->conf);
struct rte_flow_item item = { .type = RTE_FLOW_ITEM_TYPE_ETH };
struct rte_flow_item_eth eth;
struct rte_flow_item_eth eth_mask;
memset(ð, 0, sizeof(eth));
memset(ð_mask, 0, sizeof(eth_mask));
if (action->type == RTE_FLOW_ACTION_TYPE_SET_MAC_SRC) {
memcpy(ð.src.addr_bytes, &conf->mac_addr,
sizeof(eth.src.addr_bytes));
memcpy(ð_mask.src.addr_bytes,
&rte_flow_item_eth_mask.src.addr_bytes,
sizeof(eth_mask.src.addr_bytes));
} else {
memcpy(ð.dst.addr_bytes, &conf->mac_addr,
sizeof(eth.dst.addr_bytes));
memcpy(ð_mask.dst.addr_bytes,
&rte_flow_item_eth_mask.dst.addr_bytes,
sizeof(eth_mask.dst.addr_bytes));
}
item.spec = ð
item.mask = ð_mask;
return flow_dv_convert_modify_action(&item, modify_eth, NULL, resource,
MLX5_MODIFICATION_TYPE_SET, error);
}
/**
* Convert modify-header set VLAN VID action to DV specification.
*
* @param[in,out] resource
* Pointer to the modify-header resource.
* @param[in] action
* Pointer to action specification.
* @param[out] error
* Pointer to the error structure.
*
* @return
* 0 on success, a negative errno value otherwise and rte_errno is set.
*/
static int
flow_dv_convert_action_modify_vlan_vid
(struct mlx5_flow_dv_modify_hdr_resource *resource,
const struct rte_flow_action *action,
struct rte_flow_error *error)
{
const struct rte_flow_action_of_set_vlan_vid *conf =
(const struct rte_flow_action_of_set_vlan_vid *)(action->conf);
int i = resource->actions_num;
struct mlx5_modification_cmd *actions = resource->actions;
struct field_modify_info *field = modify_vlan_out_first_vid;
if (i >= MLX5_MAX_MODIFY_NUM)
return rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ACTION, NULL,
"too many items to modify");
actions[i] = (struct mlx5_modification_cmd) {
.action_type = MLX5_MODIFICATION_TYPE_SET,
.field = field->id,
.length = field->size,
.offset = field->offset,
};
actions[i].data0 = rte_cpu_to_be_32(actions[i].data0);
actions[i].data1 = conf->vlan_vid;
actions[i].data1 = actions[i].data1 << 16;
resource->actions_num = ++i;
return 0;
}
/**
* Convert modify-header set TP action to DV specification.
*
* @param[in,out] resource
* Pointer to the modify-header resource.
* @param[in] action
* Pointer to action specification.
* @param[in] items
* Pointer to rte_flow_item objects list.
* @param[in] attr
* Pointer to flow attributes structure.
* @param[in] dev_flow
* Pointer to the sub flow.
* @param[in] tunnel_decap
* Whether action is after tunnel decapsulation.
* @param[out] error
* Pointer to the error structure.
*
* @return
* 0 on success, a negative errno value otherwise and rte_errno is set.
*/
static int
flow_dv_convert_action_modify_tp
(struct mlx5_flow_dv_modify_hdr_resource *resource,
const struct rte_flow_action *action,
const struct rte_flow_item *items,
union flow_dv_attr *attr, struct mlx5_flow *dev_flow,
bool tunnel_decap, struct rte_flow_error *error)
{
const struct rte_flow_action_set_tp *conf =
(const struct rte_flow_action_set_tp *)(action->conf);
struct rte_flow_item item;
struct rte_flow_item_udp udp;
struct rte_flow_item_udp udp_mask;
struct rte_flow_item_tcp tcp;
struct rte_flow_item_tcp tcp_mask;
struct field_modify_info *field;
if (!attr->valid)
flow_dv_attr_init(items, attr, dev_flow, tunnel_decap);
if (attr->udp) {
memset(&udp, 0, sizeof(udp));
memset(&udp_mask, 0, sizeof(udp_mask));
if (action->type == RTE_FLOW_ACTION_TYPE_SET_TP_SRC) {
udp.hdr.src_port = conf->port;
udp_mask.hdr.src_port =
rte_flow_item_udp_mask.hdr.src_port;
} else {
udp.hdr.dst_port = conf->port;
udp_mask.hdr.dst_port =
rte_flow_item_udp_mask.hdr.dst_port;
}
item.type = RTE_FLOW_ITEM_TYPE_UDP;
item.spec = &udp;
item.mask = &udp_mask;
field = modify_udp;
}
if (attr->tcp) {
memset(&tcp, 0, sizeof(tcp));
memset(&tcp_mask, 0, sizeof(tcp_mask));
if (action->type == RTE_FLOW_ACTION_TYPE_SET_TP_SRC) {
tcp.hdr.src_port = conf->port;
tcp_mask.hdr.src_port =
rte_flow_item_tcp_mask.hdr.src_port;
} else {
tcp.hdr.dst_port = conf->port;
tcp_mask.hdr.dst_port =
rte_flow_item_tcp_mask.hdr.dst_port;
}
item.type = RTE_FLOW_ITEM_TYPE_TCP;
item.spec = &tcp;
item.mask = &tcp_mask;
field = modify_tcp;
}
return flow_dv_convert_modify_action(&item, field, NULL, resource,
MLX5_MODIFICATION_TYPE_SET, error);
}
/**
* Convert modify-header set TTL action to DV specification.
*
* @param[in,out] resource
* Pointer to the modify-header resource.
* @param[in] action
* Pointer to action specification.
* @param[in] items
* Pointer to rte_flow_item objects list.
* @param[in] attr
* Pointer to flow attributes structure.
* @param[in] dev_flow
* Pointer to the sub flow.
* @param[in] tunnel_decap
* Whether action is after tunnel decapsulation.
* @param[out] error
* Pointer to the error structure.
*
* @return
* 0 on success, a negative errno value otherwise and rte_errno is set.
*/
static int
flow_dv_convert_action_modify_ttl
(struct mlx5_flow_dv_modify_hdr_resource *resource,
const struct rte_flow_action *action,
const struct rte_flow_item *items,
union flow_dv_attr *attr, struct mlx5_flow *dev_flow,
bool tunnel_decap, struct rte_flow_error *error)
{
const struct rte_flow_action_set_ttl *conf =
(const struct rte_flow_action_set_ttl *)(action->conf);
struct rte_flow_item item;
struct rte_flow_item_ipv4 ipv4;
struct rte_flow_item_ipv4 ipv4_mask;
struct rte_flow_item_ipv6 ipv6;
struct rte_flow_item_ipv6 ipv6_mask;
struct field_modify_info *field;
if (!attr->valid)
flow_dv_attr_init(items, attr, dev_flow, tunnel_decap);
if (attr->ipv4) {
memset(&ipv4, 0, sizeof(ipv4));
memset(&ipv4_mask, 0, sizeof(ipv4_mask));
ipv4.hdr.time_to_live = conf->ttl_value;
ipv4_mask.hdr.time_to_live = 0xFF;
item.type = RTE_FLOW_ITEM_TYPE_IPV4;
item.spec = &ipv4;
item.mask = &ipv4_mask;
field = modify_ipv4;
}
if (attr->ipv6) {
memset(&ipv6, 0, sizeof(ipv6));
memset(&ipv6_mask, 0, sizeof(ipv6_mask));
ipv6.hdr.hop_limits = conf->ttl_value;
ipv6_mask.hdr.hop_limits = 0xFF;
item.type = RTE_FLOW_ITEM_TYPE_IPV6;
item.spec = &ipv6;
item.mask = &ipv6_mask;
field = modify_ipv6;
}
return flow_dv_convert_modify_action(&item, field, NULL, resource,
MLX5_MODIFICATION_TYPE_SET, error);
}
/**
* Convert modify-header decrement TTL action to DV specification.
*
* @param[in,out] resource
* Pointer to the modify-header resource.
* @param[in] action
* Pointer to action specification.
* @param[in] items
* Pointer to rte_flow_item objects list.
* @param[in] attr
* Pointer to flow attributes structure.
* @param[in] dev_flow
* Pointer to the sub flow.
* @param[in] tunnel_decap
* Whether action is after tunnel decapsulation.
* @param[out] error
* Pointer to the error structure.
*
* @return
* 0 on success, a negative errno value otherwise and rte_errno is set.
*/
static int
flow_dv_convert_action_modify_dec_ttl
(struct mlx5_flow_dv_modify_hdr_resource *resource,
const struct rte_flow_item *items,
union flow_dv_attr *attr, struct mlx5_flow *dev_flow,
bool tunnel_decap, struct rte_flow_error *error)
{
struct rte_flow_item item;
struct rte_flow_item_ipv4 ipv4;
struct rte_flow_item_ipv4 ipv4_mask;
struct rte_flow_item_ipv6 ipv6;
struct rte_flow_item_ipv6 ipv6_mask;
struct field_modify_info *field;
if (!attr->valid)
flow_dv_attr_init(items, attr, dev_flow, tunnel_decap);
if (attr->ipv4) {
memset(&ipv4, 0, sizeof(ipv4));
memset(&ipv4_mask, 0, sizeof(ipv4_mask));
ipv4.hdr.time_to_live = 0xFF;
ipv4_mask.hdr.time_to_live = 0xFF;
item.type = RTE_FLOW_ITEM_TYPE_IPV4;
item.spec = &ipv4;
item.mask = &ipv4_mask;
field = modify_ipv4;
}
if (attr->ipv6) {
memset(&ipv6, 0, sizeof(ipv6));
memset(&ipv6_mask, 0, sizeof(ipv6_mask));
ipv6.hdr.hop_limits = 0xFF;
ipv6_mask.hdr.hop_limits = 0xFF;
item.type = RTE_FLOW_ITEM_TYPE_IPV6;
item.spec = &ipv6;
item.mask = &ipv6_mask;
field = modify_ipv6;
}
return flow_dv_convert_modify_action(&item, field, NULL, resource,
MLX5_MODIFICATION_TYPE_ADD, error);
}
/**
* Convert modify-header increment/decrement TCP Sequence number
* to DV specification.
*
* @param[in,out] resource
* Pointer to the modify-header resource.
* @param[in] action
* Pointer to action specification.
* @param[out] error
* Pointer to the error structure.
*
* @return
* 0 on success, a negative errno value otherwise and rte_errno is set.
*/
static int
flow_dv_convert_action_modify_tcp_seq
(struct mlx5_flow_dv_modify_hdr_resource *resource,
const struct rte_flow_action *action,
struct rte_flow_error *error)
{
const rte_be32_t *conf = (const rte_be32_t *)(action->conf);
uint64_t value = rte_be_to_cpu_32(*conf);
struct rte_flow_item item;
struct rte_flow_item_tcp tcp;
struct rte_flow_item_tcp tcp_mask;
memset(&tcp, 0, sizeof(tcp));
memset(&tcp_mask, 0, sizeof(tcp_mask));
if (action->type == RTE_FLOW_ACTION_TYPE_DEC_TCP_SEQ)
/*
* The HW has no decrement operation, only increment operation.
* To simulate decrement X from Y using increment operation
* we need to add UINT32_MAX X times to Y.
* Each adding of UINT32_MAX decrements Y by 1.
*/
value *= UINT32_MAX;
tcp.hdr.sent_seq = rte_cpu_to_be_32((uint32_t)value);
tcp_mask.hdr.sent_seq = RTE_BE32(UINT32_MAX);
item.type = RTE_FLOW_ITEM_TYPE_TCP;
item.spec = &tcp;
item.mask = &tcp_mask;
return flow_dv_convert_modify_action(&item, modify_tcp, NULL, resource,
MLX5_MODIFICATION_TYPE_ADD, error);
}
/**
* Convert modify-header increment/decrement TCP Acknowledgment number
* to DV specification.
*
* @param[in,out] resource
* Pointer to the modify-header resource.
* @param[in] action
* Pointer to action specification.
* @param[out] error
* Pointer to the error structure.
*
* @return
* 0 on success, a negative errno value otherwise and rte_errno is set.
*/
static int
flow_dv_convert_action_modify_tcp_ack
(struct mlx5_flow_dv_modify_hdr_resource *resource,
const struct rte_flow_action *action,
struct rte_flow_error *error)
{
const rte_be32_t *conf = (const rte_be32_t *)(action->conf);
uint64_t value = rte_be_to_cpu_32(*conf);
struct rte_flow_item item;
struct rte_flow_item_tcp tcp;
struct rte_flow_item_tcp tcp_mask;
memset(&tcp, 0, sizeof(tcp));
memset(&tcp_mask, 0, sizeof(tcp_mask));
if (action->type == RTE_FLOW_ACTION_TYPE_DEC_TCP_ACK)
/*
* The HW has no decrement operation, only increment operation.
* To simulate decrement X from Y using increment operation
* we need to add UINT32_MAX X times to Y.
* Each adding of UINT32_MAX decrements Y by 1.
*/
value *= UINT32_MAX;
tcp.hdr.recv_ack = rte_cpu_to_be_32((uint32_t)value);
tcp_mask.hdr.recv_ack = RTE_BE32(UINT32_MAX);
item.type = RTE_FLOW_ITEM_TYPE_TCP;
item.spec = &tcp;
item.mask = &tcp_mask;
return flow_dv_convert_modify_action(&item, modify_tcp, NULL, resource,
MLX5_MODIFICATION_TYPE_ADD, error);
}
static enum mlx5_modification_field reg_to_field[] = {
[REG_NONE] = MLX5_MODI_OUT_NONE,
[REG_A] = MLX5_MODI_META_DATA_REG_A,
[REG_B] = MLX5_MODI_META_DATA_REG_B,
[REG_C_0] = MLX5_MODI_META_REG_C_0,
[REG_C_1] = MLX5_MODI_META_REG_C_1,
[REG_C_2] = MLX5_MODI_META_REG_C_2,
[REG_C_3] = MLX5_MODI_META_REG_C_3,
[REG_C_4] = MLX5_MODI_META_REG_C_4,
[REG_C_5] = MLX5_MODI_META_REG_C_5,
[REG_C_6] = MLX5_MODI_META_REG_C_6,
[REG_C_7] = MLX5_MODI_META_REG_C_7,
};
/**
* Convert register set to DV specification.
*
* @param[in,out] resource
* Pointer to the modify-header resource.
* @param[in] action
* Pointer to action specification.
* @param[out] error
* Pointer to the error structure.
*
* @return
* 0 on success, a negative errno value otherwise and rte_errno is set.
*/
static int
flow_dv_convert_action_set_reg
(struct mlx5_flow_dv_modify_hdr_resource *resource,
const struct rte_flow_action *action,
struct rte_flow_error *error)
{
const struct mlx5_rte_flow_action_set_tag *conf = action->conf;
struct mlx5_modification_cmd *actions = resource->actions;
uint32_t i = resource->actions_num;
if (i >= MLX5_MAX_MODIFY_NUM)
return rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ACTION, NULL,
"too many items to modify");
assert(conf->id != REG_NONE);
assert(conf->id < RTE_DIM(reg_to_field));
actions[i] = (struct mlx5_modification_cmd) {
.action_type = MLX5_MODIFICATION_TYPE_SET,
.field = reg_to_field[conf->id],
};
actions[i].data0 = rte_cpu_to_be_32(actions[i].data0);
actions[i].data1 = rte_cpu_to_be_32(conf->data);
++i;
resource->actions_num = i;
return 0;
}
/**
* Convert SET_TAG action to DV specification.
*
* @param[in] dev
* Pointer to the rte_eth_dev structure.
* @param[in,out] resource
* Pointer to the modify-header resource.
* @param[in] conf
* Pointer to action specification.
* @param[out] error
* Pointer to the error structure.
*
* @return
* 0 on success, a negative errno value otherwise and rte_errno is set.
*/
static int
flow_dv_convert_action_set_tag
(struct rte_eth_dev *dev,
struct mlx5_flow_dv_modify_hdr_resource *resource,
const struct rte_flow_action_set_tag *conf,
struct rte_flow_error *error)
{
rte_be32_t data = rte_cpu_to_be_32(conf->data);
rte_be32_t mask = rte_cpu_to_be_32(conf->mask);
struct rte_flow_item item = {
.spec = &data,
.mask = &mask,
};
struct field_modify_info reg_c_x[] = {
[1] = {0, 0, 0},
};
enum mlx5_modification_field reg_type;
int ret;
ret = mlx5_flow_get_reg_id(dev, MLX5_APP_TAG, conf->index, error);
if (ret < 0)
return ret;
assert(ret != REG_NONE);
assert((unsigned int)ret < RTE_DIM(reg_to_field));
reg_type = reg_to_field[ret];
assert(reg_type > 0);
reg_c_x[0] = (struct field_modify_info){4, 0, reg_type};
return flow_dv_convert_modify_action(&item, reg_c_x, NULL, resource,
MLX5_MODIFICATION_TYPE_SET, error);
}
/**
* Convert internal COPY_REG action to DV specification.
*
* @param[in] dev
* Pointer to the rte_eth_dev structure.
* @param[in,out] res
* Pointer to the modify-header resource.
* @param[in] action
* Pointer to action specification.
* @param[out] error
* Pointer to the error structure.
*
* @return
* 0 on success, a negative errno value otherwise and rte_errno is set.
*/
static int
flow_dv_convert_action_copy_mreg(struct rte_eth_dev *dev,
struct mlx5_flow_dv_modify_hdr_resource *res,
const struct rte_flow_action *action,
struct rte_flow_error *error)
{
const struct mlx5_flow_action_copy_mreg *conf = action->conf;
rte_be32_t mask = RTE_BE32(UINT32_MAX);
struct rte_flow_item item = {
.spec = NULL,
.mask = &mask,
};
struct field_modify_info reg_src[] = {
{4, 0, reg_to_field[conf->src]},
{0, 0, 0},
};
struct field_modify_info reg_dst = {
.offset = 0,
.id = reg_to_field[conf->dst],
};
/* Adjust reg_c[0] usage according to reported mask. */
if (conf->dst == REG_C_0 || conf->src == REG_C_0) {
struct mlx5_priv *priv = dev->data->dev_private;
uint32_t reg_c0 = priv->sh->dv_regc0_mask;
assert(reg_c0);
assert(priv->config.dv_xmeta_en != MLX5_XMETA_MODE_LEGACY);
if (conf->dst == REG_C_0) {
/* Copy to reg_c[0], within mask only. */
reg_dst.offset = rte_bsf32(reg_c0);
/*
* Mask is ignoring the enianness, because
* there is no conversion in datapath.
*/
#if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
/* Copy from destination lower bits to reg_c[0]. */
mask = reg_c0 >> reg_dst.offset;
#else
/* Copy from destination upper bits to reg_c[0]. */
mask = reg_c0 << (sizeof(reg_c0) * CHAR_BIT -
rte_fls_u32(reg_c0));
#endif
} else {
mask = rte_cpu_to_be_32(reg_c0);
#if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
/* Copy from reg_c[0] to destination lower bits. */
reg_dst.offset = 0;
#else
/* Copy from reg_c[0] to destination upper bits. */
reg_dst.offset = sizeof(reg_c0) * CHAR_BIT -
(rte_fls_u32(reg_c0) -
rte_bsf32(reg_c0));
#endif
}
}
return flow_dv_convert_modify_action(&item,
reg_src, ®_dst, res,
MLX5_MODIFICATION_TYPE_COPY,
error);
}
/**
* Convert MARK action to DV specification. This routine is used
* in extensive metadata only and requires metadata register to be
* handled. In legacy mode hardware tag resource is engaged.
*
* @param[in] dev
* Pointer to the rte_eth_dev structure.
* @param[in] conf
* Pointer to MARK action specification.
* @param[in,out] resource
* Pointer to the modify-header resource.
* @param[out] error
* Pointer to the error structure.
*
* @return
* 0 on success, a negative errno value otherwise and rte_errno is set.
*/
static int
flow_dv_convert_action_mark(struct rte_eth_dev *dev,
const struct rte_flow_action_mark *conf,
struct mlx5_flow_dv_modify_hdr_resource *resource,
struct rte_flow_error *error)
{
struct mlx5_priv *priv = dev->data->dev_private;
rte_be32_t mask = rte_cpu_to_be_32(MLX5_FLOW_MARK_MASK &
priv->sh->dv_mark_mask);
rte_be32_t data = rte_cpu_to_be_32(conf->id) & mask;
struct rte_flow_item item = {
.spec = &data,
.mask = &mask,
};
struct field_modify_info reg_c_x[] = {
{4, 0, 0}, /* dynamic instead of MLX5_MODI_META_REG_C_1. */
{0, 0, 0},
};
int reg;
if (!mask)
return rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ACTION_CONF,
NULL, "zero mark action mask");
reg = mlx5_flow_get_reg_id(dev, MLX5_FLOW_MARK, 0, error);
if (reg < 0)
return reg;
assert(reg > 0);
if (reg == REG_C_0) {
uint32_t msk_c0 = priv->sh->dv_regc0_mask;
uint32_t shl_c0 = rte_bsf32(msk_c0);
data = rte_cpu_to_be_32(rte_cpu_to_be_32(data) << shl_c0);
mask = rte_cpu_to_be_32(mask) & msk_c0;
mask = rte_cpu_to_be_32(mask << shl_c0);
}
reg_c_x[0].id = reg_to_field[reg];
return flow_dv_convert_modify_action(&item, reg_c_x, NULL, resource,
MLX5_MODIFICATION_TYPE_SET, error);
}
/**
* Get metadata register index for specified steering domain.
*
* @param[in] dev
* Pointer to the rte_eth_dev structure.
* @param[in] attr
* Attributes of flow to determine steering domain.
* @param[out] error
* Pointer to the error structure.
*
* @return
* positive index on success, a negative errno value otherwise
* and rte_errno is set.
*/
static enum modify_reg
flow_dv_get_metadata_reg(struct rte_eth_dev *dev,
const struct rte_flow_attr *attr,
struct rte_flow_error *error)
{
int reg =
mlx5_flow_get_reg_id(dev, attr->transfer ?
MLX5_METADATA_FDB :
attr->egress ?
MLX5_METADATA_TX :
MLX5_METADATA_RX, 0, error);
if (reg < 0)
return rte_flow_error_set(error,
ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM,
NULL, "unavailable "
"metadata register");
return reg;
}
/**
* Convert SET_META action to DV specification.
*
* @param[in] dev
* Pointer to the rte_eth_dev structure.
* @param[in,out] resource
* Pointer to the modify-header resource.
* @param[in] attr
* Attributes of flow that includes this item.
* @param[in] conf
* Pointer to action specification.
* @param[out] error
* Pointer to the error structure.
*
* @return
* 0 on success, a negative errno value otherwise and rte_errno is set.
*/
static int
flow_dv_convert_action_set_meta
(struct rte_eth_dev *dev,
struct mlx5_flow_dv_modify_hdr_resource *resource,
const struct rte_flow_attr *attr,
const struct rte_flow_action_set_meta *conf,
struct rte_flow_error *error)
{
uint32_t data = conf->data;
uint32_t mask = conf->mask;
struct rte_flow_item item = {
.spec = &data,
.mask = &mask,
};
struct field_modify_info reg_c_x[] = {
[1] = {0, 0, 0},
};
int reg = flow_dv_get_metadata_reg(dev, attr, error);
if (reg < 0)
return reg;
/*
* In datapath code there is no endianness
* coversions for perfromance reasons, all
* pattern conversions are done in rte_flow.
*/
if (reg == REG_C_0) {
struct mlx5_priv *priv = dev->data->dev_private;
uint32_t msk_c0 = priv->sh->dv_regc0_mask;
uint32_t shl_c0;
assert(msk_c0);
#if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
shl_c0 = rte_bsf32(msk_c0);
#else
shl_c0 = sizeof(msk_c0) * CHAR_BIT - rte_fls_u32(msk_c0);
#endif
mask <<= shl_c0;
data <<= shl_c0;
assert(!(~msk_c0 & rte_cpu_to_be_32(mask)));
}
reg_c_x[0] = (struct field_modify_info){4, 0, reg_to_field[reg]};
/* The routine expects parameters in memory as big-endian ones. */
return flow_dv_convert_modify_action(&item, reg_c_x, NULL, resource,
MLX5_MODIFICATION_TYPE_SET, error);
}
/**
* Validate MARK item.
*
* @param[in] dev
* Pointer to the rte_eth_dev structure.
* @param[in] item
* Item specification.
* @param[in] attr
* Attributes of flow that includes this item.
* @param[out] error
* Pointer to error structure.
*
* @return
* 0 on success, a negative errno value otherwise and rte_errno is set.
*/
static int
flow_dv_validate_item_mark(struct rte_eth_dev *dev,
const struct rte_flow_item *item,
const struct rte_flow_attr *attr __rte_unused,
struct rte_flow_error *error)
{
struct mlx5_priv *priv = dev->data->dev_private;
struct mlx5_dev_config *config = &priv->config;
const struct rte_flow_item_mark *spec = item->spec;
const struct rte_flow_item_mark *mask = item->mask;
const struct rte_flow_item_mark nic_mask = {
.id = priv->sh->dv_mark_mask,
};
int ret;
if (config->dv_xmeta_en == MLX5_XMETA_MODE_LEGACY)
return rte_flow_error_set(error, ENOTSUP,
RTE_FLOW_ERROR_TYPE_ITEM, item,
"extended metadata feature"
" isn't enabled");
if (!mlx5_flow_ext_mreg_supported(dev))
return rte_flow_error_set(error, ENOTSUP,
RTE_FLOW_ERROR_TYPE_ITEM, item,
"extended metadata register"
" isn't supported");
if (!nic_mask.id)
return rte_flow_error_set(error, ENOTSUP,
RTE_FLOW_ERROR_TYPE_ITEM, item,
"extended metadata register"
" isn't available");
ret = mlx5_flow_get_reg_id(dev, MLX5_FLOW_MARK, 0, error);
if (ret < 0)
return ret;
if (!spec)
return rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
item->spec,
"data cannot be empty");
if (spec->id >= (MLX5_FLOW_MARK_MAX & nic_mask.id))
return rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ACTION_CONF,
&spec->id,
"mark id exceeds the limit");
if (!mask)
mask = &nic_mask;
ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
(const uint8_t *)&nic_mask,
sizeof(struct rte_flow_item_mark),
error);
if (ret < 0)
return ret;
return 0;
}
/**
* Validate META item.
*
* @param[in] dev
* Pointer to the rte_eth_dev structure.
* @param[in] item
* Item specification.
* @param[in] attr
* Attributes of flow that includes this item.
* @param[out] error
* Pointer to error structure.
*
* @return
* 0 on success, a negative errno value otherwise and rte_errno is set.
*/
static int
flow_dv_validate_item_meta(struct rte_eth_dev *dev __rte_unused,
const struct rte_flow_item *item,
const struct rte_flow_attr *attr,
struct rte_flow_error *error)
{
struct mlx5_priv *priv = dev->data->dev_private;
struct mlx5_dev_config *config = &priv->config;
const struct rte_flow_item_meta *spec = item->spec;
const struct rte_flow_item_meta *mask = item->mask;
struct rte_flow_item_meta nic_mask = {
.data = UINT32_MAX
};
int reg;
int ret;
if (!spec)
return rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
item->spec,
"data cannot be empty");
if (!spec->data)
return rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ITEM_SPEC, NULL,
"data cannot be zero");
if (config->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY) {
if (!mlx5_flow_ext_mreg_supported(dev))
return rte_flow_error_set(error, ENOTSUP,
RTE_FLOW_ERROR_TYPE_ITEM, item,
"extended metadata register"
" isn't supported");
reg = flow_dv_get_metadata_reg(dev, attr, error);
if (reg < 0)
return reg;
if (reg == REG_B)
return rte_flow_error_set(error, ENOTSUP,
RTE_FLOW_ERROR_TYPE_ITEM, item,
"match on reg_b "
"isn't supported");
if (reg != REG_A)
nic_mask.data = priv->sh->dv_meta_mask;
}
if (!mask)
mask = &rte_flow_item_meta_mask;
ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
(const uint8_t *)&nic_mask,
sizeof(struct rte_flow_item_meta),
error);
return ret;
}
/**
* Validate TAG item.
*
* @param[in] dev
* Pointer to the rte_eth_dev structure.
* @param[in] item
* Item specification.
* @param[in] attr
* Attributes of flow that includes this item.
* @param[out] error
* Pointer to error structure.
*
* @return
* 0 on success, a negative errno value otherwise and rte_errno is set.
*/
static int
flow_dv_validate_item_tag(struct rte_eth_dev *dev,
const struct rte_flow_item *item,
const struct rte_flow_attr *attr __rte_unused,
struct rte_flow_error *error)
{
const struct rte_flow_item_tag *spec = item->spec;
const struct rte_flow_item_tag *mask = item->mask;
const struct rte_flow_item_tag nic_mask = {
.data = RTE_BE32(UINT32_MAX),
.index = 0xff,
};
int ret;
if (!mlx5_flow_ext_mreg_supported(dev))
return rte_flow_error_set(error, ENOTSUP,
RTE_FLOW_ERROR_TYPE_ITEM, item,
"extensive metadata register"
" isn't supported");
if (!spec)
return rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
item->spec,
"data cannot be empty");
if (!mask)
mask = &rte_flow_item_tag_mask;
ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
(const uint8_t *)&nic_mask,
sizeof(struct rte_flow_item_tag),
error);
if (ret < 0)
return ret;
if (mask->index != 0xff)
return rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ITEM_SPEC, NULL,
"partial mask for tag index"
" is not supported");
ret = mlx5_flow_get_reg_id(dev, MLX5_APP_TAG, spec->index, error);
if (ret < 0)
return ret;
assert(ret != REG_NONE);
return 0;
}
/**
* Validate vport item.
*
* @param[in] dev
* Pointer to the rte_eth_dev structure.
* @param[in] item
* Item specification.
* @param[in] attr
* Attributes of flow that includes this item.
* @param[in] item_flags
* Bit-fields that holds the items detected until now.
* @param[out] error
* Pointer to error structure.
*
* @return
* 0 on success, a negative errno value otherwise and rte_errno is set.
*/
static int
flow_dv_validate_item_port_id(struct rte_eth_dev *dev,
const struct rte_flow_item *item,
const struct rte_flow_attr *attr,
uint64_t item_flags,
struct rte_flow_error *error)
{
const struct rte_flow_item_port_id *spec = item->spec;
const struct rte_flow_item_port_id *mask = item->mask;
const struct rte_flow_item_port_id switch_mask = {
.id = 0xffffffff,
};
struct mlx5_priv *esw_priv;
struct mlx5_priv *dev_priv;
int ret;
if (!attr->transfer)
return rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ITEM,
NULL,
"match on port id is valid only"
" when transfer flag is enabled");
if (item_flags & MLX5_FLOW_ITEM_PORT_ID)
return rte_flow_error_set(error, ENOTSUP,
RTE_FLOW_ERROR_TYPE_ITEM, item,
"multiple source ports are not"
" supported");
if (!mask)
mask = &switch_mask;
if (mask->id != 0xffffffff)
return rte_flow_error_set(error, ENOTSUP,
RTE_FLOW_ERROR_TYPE_ITEM_MASK,
mask,
"no support for partial mask on"
" \"id\" field");
ret = mlx5_flow_item_acceptable
(item, (const uint8_t *)mask,
(const uint8_t *)&rte_flow_item_port_id_mask,
sizeof(struct rte_flow_item_port_id),
error);
if (ret)
return ret;
if (!spec)
return 0;
esw_priv = mlx5_port_to_eswitch_info(spec->id, false);
if (!esw_priv)
return rte_flow_error_set(error, rte_errno,
RTE_FLOW_ERROR_TYPE_ITEM_SPEC, spec,
"failed to obtain E-Switch info for"
" port");
dev_priv = mlx5_dev_to_eswitch_info(dev);
if (!dev_priv)
return rte_flow_error_set(error, rte_errno,
RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
NULL,
"failed to obtain E-Switch info");
if (esw_priv->domain_id != dev_priv->domain_id)
return rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ITEM_SPEC, spec,
"cannot match on a port from a"
" different E-Switch");
return 0;
}
/**
* Validate the pop VLAN action.
*
* @param[in] dev
* Pointer to the rte_eth_dev structure.
* @param[in] action_flags
* Holds the actions detected until now.
* @param[in] action
* Pointer to the pop vlan action.
* @param[in] item_flags
* The items found in this flow rule.
* @param[in] attr
* Pointer to flow attributes.
* @param[out] error
* Pointer to error structure.
*
* @return
* 0 on success, a negative errno value otherwise and rte_errno is set.
*/
static int
flow_dv_validate_action_pop_vlan(struct rte_eth_dev *dev,
uint64_t action_flags,
const struct rte_flow_action *action,
uint64_t item_flags,
const struct rte_flow_attr *attr,
struct rte_flow_error *error)
{
struct mlx5_priv *priv = dev->data->dev_private;
(void)action;
(void)attr;
if (!priv->sh->pop_vlan_action)
return rte_flow_error_set(error, ENOTSUP,
RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
NULL,
"pop vlan action is not supported");
if (attr->egress)
return rte_flow_error_set(error, ENOTSUP,
RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
NULL,
"pop vlan action not supported for "
"egress");
if (action_flags & MLX5_FLOW_VLAN_ACTIONS)
return rte_flow_error_set(error, ENOTSUP,
RTE_FLOW_ERROR_TYPE_ACTION, action,
"no support for multiple VLAN "
"actions");
if (!(item_flags & MLX5_FLOW_LAYER_OUTER_VLAN))
return rte_flow_error_set(error, ENOTSUP,
RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
NULL,
"cannot pop vlan without a "
"match on (outer) vlan in the flow");
if (action_flags & MLX5_FLOW_ACTION_PORT_ID)
return rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ACTION, action,
"wrong action order, port_id should "
"be after pop VLAN action");
return 0;
}
/**
* Get VLAN default info from vlan match info.
*
* @param[in] items
* the list of item specifications.
* @param[out] vlan
* pointer VLAN info to fill to.
*
* @return
* 0 on success, a negative errno value otherwise and rte_errno is set.
*/
static void
flow_dev_get_vlan_info_from_items(const struct rte_flow_item *items,
struct rte_vlan_hdr *vlan)
{
const struct rte_flow_item_vlan nic_mask = {
.tci = RTE_BE16(MLX5DV_FLOW_VLAN_PCP_MASK |
MLX5DV_FLOW_VLAN_VID_MASK),
.inner_type = RTE_BE16(0xffff),
};
if (items == NULL)
return;
for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
int type = items->type;
if (type == RTE_FLOW_ITEM_TYPE_VLAN ||
type == MLX5_RTE_FLOW_ITEM_TYPE_VLAN)
break;
}
if (items->type != RTE_FLOW_ITEM_TYPE_END) {
const struct rte_flow_item_vlan *vlan_m = items->mask;
const struct rte_flow_item_vlan *vlan_v = items->spec;
if (!vlan_m)
vlan_m = &nic_mask;
/* Only full match values are accepted */
if ((vlan_m->tci & MLX5DV_FLOW_VLAN_PCP_MASK_BE) ==
MLX5DV_FLOW_VLAN_PCP_MASK_BE) {
vlan->vlan_tci &= MLX5DV_FLOW_VLAN_PCP_MASK;
vlan->vlan_tci |=
rte_be_to_cpu_16(vlan_v->tci &
MLX5DV_FLOW_VLAN_PCP_MASK_BE);
}
if ((vlan_m->tci & MLX5DV_FLOW_VLAN_VID_MASK_BE) ==
MLX5DV_FLOW_VLAN_VID_MASK_BE) {
vlan->vlan_tci &= ~MLX5DV_FLOW_VLAN_VID_MASK;
vlan->vlan_tci |=
rte_be_to_cpu_16(vlan_v->tci &
MLX5DV_FLOW_VLAN_VID_MASK_BE);
}
if (vlan_m->inner_type == nic_mask.inner_type)
vlan->eth_proto = rte_be_to_cpu_16(vlan_v->inner_type &
vlan_m->inner_type);
}
}
/**
* Validate the push VLAN action.
*
* @param[in] action_flags
* Holds the actions detected until now.
* @param[in] item_flags
* The items found in this flow rule.
* @param[in] action
* Pointer to the action structure.
* @param[in] attr
* Pointer to flow attributes
* @param[out] error
* Pointer to error structure.
*
* @return
* 0 on success, a negative errno value otherwise and rte_errno is set.
*/
static int
flow_dv_validate_action_push_vlan(uint64_t action_flags,
uint64_t item_flags __rte_unused,
const struct rte_flow_action *action,
const struct rte_flow_attr *attr,
struct rte_flow_error *error)
{
const struct rte_flow_action_of_push_vlan *push_vlan = action->conf;
if (!attr->transfer && attr->ingress)
return rte_flow_error_set(error, ENOTSUP,
RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
NULL,
"push VLAN action not supported for "
"ingress");
if (push_vlan->ethertype != RTE_BE16(RTE_ETHER_TYPE_VLAN) &&
push_vlan->ethertype != RTE_BE16(RTE_ETHER_TYPE_QINQ))
return rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ACTION, action,
"invalid vlan ethertype");
if (action_flags & MLX5_FLOW_VLAN_ACTIONS)
return rte_flow_error_set(error, ENOTSUP,
RTE_FLOW_ERROR_TYPE_ACTION, action,
"no support for multiple VLAN "
"actions");
if (action_flags & MLX5_FLOW_ACTION_PORT_ID)
return rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ACTION, action,
"wrong action order, port_id should "
"be after push VLAN");
(void)attr;
return 0;
}
/**
* Validate the set VLAN PCP.
*
* @param[in] action_flags
* Holds the actions detected until now.
* @param[in] actions
* Pointer to the list of actions remaining in the flow rule.
* @param[out] error
* Pointer to error structure.
*
* @return
* 0 on success, a negative errno value otherwise and rte_errno is set.
*/
static int
flow_dv_validate_action_set_vlan_pcp(uint64_t action_flags,
const struct rte_flow_action actions[],
struct rte_flow_error *error)
{
const struct rte_flow_action *action = actions;
const struct rte_flow_action_of_set_vlan_pcp *conf = action->conf;
if (conf->vlan_pcp > 7)
return rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ACTION, action,
"VLAN PCP value is too big");
if (!(action_flags & MLX5_FLOW_ACTION_OF_PUSH_VLAN))
return rte_flow_error_set(error, ENOTSUP,
RTE_FLOW_ERROR_TYPE_ACTION, action,
"set VLAN PCP action must follow "
"the push VLAN action");
if (action_flags & MLX5_FLOW_ACTION_OF_SET_VLAN_PCP)
return rte_flow_error_set(error, ENOTSUP,
RTE_FLOW_ERROR_TYPE_ACTION, action,
"Multiple VLAN PCP modification are "
"not supported");
if (action_flags & MLX5_FLOW_ACTION_PORT_ID)
return rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ACTION, action,
"wrong action order, port_id should "
"be after set VLAN PCP");
return 0;
}
/**
* Validate the set VLAN VID.
*
* @param[in] item_flags
* Holds the items detected in this rule.
* @param[in] action_flags
* Holds the actions detected until now.
* @param[in] actions
* Pointer to the list of actions remaining in the flow rule.
* @param[out] error
* Pointer to error structure.
*
* @return
* 0 on success, a negative errno value otherwise and rte_errno is set.
*/
static int
flow_dv_validate_action_set_vlan_vid(uint64_t item_flags,
uint64_t action_flags,
const struct rte_flow_action actions[],
struct rte_flow_error *error)
{
const struct rte_flow_action *action = actions;
const struct rte_flow_action_of_set_vlan_vid *conf = action->conf;
if (conf->vlan_vid > RTE_BE16(0xFFE))
return rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ACTION, action,
"VLAN VID value is too big");
if (!(action_flags & MLX5_FLOW_ACTION_OF_PUSH_VLAN) &&
!(item_flags & MLX5_FLOW_LAYER_OUTER_VLAN))
return rte_flow_error_set(error, ENOTSUP,
RTE_FLOW_ERROR_TYPE_ACTION, action,
"set VLAN VID action must follow push"
" VLAN action or match on VLAN item");
if (action_flags & MLX5_FLOW_ACTION_OF_SET_VLAN_VID)
return rte_flow_error_set(error, ENOTSUP,
RTE_FLOW_ERROR_TYPE_ACTION, action,
"Multiple VLAN VID modifications are "
"not supported");
if (action_flags & MLX5_FLOW_ACTION_PORT_ID)
return rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ACTION, action,
"wrong action order, port_id should "
"be after set VLAN VID");
return 0;
}
/*
* Validate the FLAG action.
*
* @param[in] dev
* Pointer to the rte_eth_dev structure.
* @param[in] action_flags
* Holds the actions detected until now.
* @param[in] attr
* Pointer to flow attributes
* @param[out] error
* Pointer to error structure.
*
* @return
* 0 on success, a negative errno value otherwise and rte_errno is set.
*/
static int
flow_dv_validate_action_flag(struct rte_eth_dev *dev,
uint64_t action_flags,
const struct rte_flow_attr *attr,
struct rte_flow_error *error)
{
struct mlx5_priv *priv = dev->data->dev_private;
struct mlx5_dev_config *config = &priv->config;
int ret;
/* Fall back if no extended metadata register support. */
if (config->dv_xmeta_en == MLX5_XMETA_MODE_LEGACY)
return mlx5_flow_validate_action_flag(action_flags, attr,
error);
/* Extensive metadata mode requires registers. */
if (!mlx5_flow_ext_mreg_supported(dev))
return rte_flow_error_set(error, ENOTSUP,
RTE_FLOW_ERROR_TYPE_ACTION, NULL,
"no metadata registers "
"to support flag action");
if (!(priv->sh->dv_mark_mask & MLX5_FLOW_MARK_DEFAULT))
return rte_flow_error_set(error, ENOTSUP,
RTE_FLOW_ERROR_TYPE_ACTION, NULL,
"extended metadata register"
" isn't available");
ret = mlx5_flow_get_reg_id(dev, MLX5_FLOW_MARK, 0, error);
if (ret < 0)
return ret;
assert(ret > 0);
if (action_flags & MLX5_FLOW_ACTION_MARK)
return rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ACTION, NULL,
"can't mark and flag in same flow");
if (action_flags & MLX5_FLOW_ACTION_FLAG)
return rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ACTION, NULL,
"can't have 2 flag"
" actions in same flow");
return 0;
}
/**
* Validate MARK action.
*
* @param[in] dev
* Pointer to the rte_eth_dev structure.
* @param[in] action
* Pointer to action.
* @param[in] action_flags
* Holds the actions detected until now.
* @param[in] attr
* Pointer to flow attributes
* @param[out] error
* Pointer to error structure.
*
* @return
* 0 on success, a negative errno value otherwise and rte_errno is set.
*/
static int
flow_dv_validate_action_mark(struct rte_eth_dev *dev,
const struct rte_flow_action *action,
uint64_t action_flags,
const struct rte_flow_attr *attr,
struct rte_flow_error *error)
{
struct mlx5_priv *priv = dev->data->dev_private;
struct mlx5_dev_config *config = &priv->config;
const struct rte_flow_action_mark *mark = action->conf;
int ret;
/* Fall back if no extended metadata register support. */
if (config->dv_xmeta_en == MLX5_XMETA_MODE_LEGACY)
return mlx5_flow_validate_action_mark(action, action_flags,
attr, error);
/* Extensive metadata mode requires registers. */
if (!mlx5_flow_ext_mreg_supported(dev))
return rte_flow_error_set(error, ENOTSUP,
RTE_FLOW_ERROR_TYPE_ACTION, NULL,
"no metadata registers "
"to support mark action");
if (!priv->sh->dv_mark_mask)
return rte_flow_error_set(error, ENOTSUP,
RTE_FLOW_ERROR_TYPE_ACTION, NULL,
"extended metadata register"
" isn't available");
ret = mlx5_flow_get_reg_id(dev, MLX5_FLOW_MARK, 0, error);
if (ret < 0)
return ret;
assert(ret > 0);
if (!mark)
return rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ACTION, action,
"configuration cannot be null");
if (mark->id >= (MLX5_FLOW_MARK_MAX & priv->sh->dv_mark_mask))
return rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ACTION_CONF,
&mark->id,
"mark id exceeds the limit");
if (action_flags & MLX5_FLOW_ACTION_FLAG)
return rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ACTION, NULL,
"can't flag and mark in same flow");
if (action_flags & MLX5_FLOW_ACTION_MARK)
return rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ACTION, NULL,
"can't have 2 mark actions in same"
" flow");
return 0;
}
/**
* Validate SET_META action.
*
* @param[in] dev
* Pointer to the rte_eth_dev structure.
* @param[in] action
* Pointer to the action structure.
* @param[in] action_flags
* Holds the actions detected until now.
* @param[in] attr
* Pointer to flow attributes
* @param[out] error
* Pointer to error structure.
*
* @return
* 0 on success, a negative errno value otherwise and rte_errno is set.
*/
static int
flow_dv_validate_action_set_meta(struct rte_eth_dev *dev,
const struct rte_flow_action *action,
uint64_t action_flags __rte_unused,
const struct rte_flow_attr *attr,
struct rte_flow_error *error)
{
const struct rte_flow_action_set_meta *conf;
uint32_t nic_mask = UINT32_MAX;
int reg;
if (!mlx5_flow_ext_mreg_supported(dev))
return rte_flow_error_set(error, ENOTSUP,
RTE_FLOW_ERROR_TYPE_ACTION, action,
"extended metadata register"
" isn't supported");
reg = flow_dv_get_metadata_reg(dev, attr, error);
if (reg < 0)
return reg;
if (reg != REG_A && reg != REG_B) {
struct mlx5_priv *priv = dev->data->dev_private;
nic_mask = priv->sh->dv_meta_mask;
}
if (!(action->conf))
return rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ACTION, action,
"configuration cannot be null");
conf = (const struct rte_flow_action_set_meta *)action->conf;
if (!conf->mask)
return rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ACTION, action,
"zero mask doesn't have any effect");
if (conf->mask & ~nic_mask)
return rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ACTION, action,
"meta data must be within reg C0");
if (!(conf->data & conf->mask))
return rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ACTION, action,
"zero value has no effect");
return 0;
}
/**
* Validate SET_TAG action.
*
* @param[in] dev
* Pointer to the rte_eth_dev structure.
* @param[in] action
* Pointer to the action structure.
* @param[in] action_flags
* Holds the actions detected until now.
* @param[in] attr
* Pointer to flow attributes
* @param[out] error
* Pointer to error structure.
*
* @return
* 0 on success, a negative errno value otherwise and rte_errno is set.
*/
static int
flow_dv_validate_action_set_tag(struct rte_eth_dev *dev,
const struct rte_flow_action *action,
uint64_t action_flags,
const struct rte_flow_attr *attr,
struct rte_flow_error *error)
{
const struct rte_flow_action_set_tag *conf;
const uint64_t terminal_action_flags =
MLX5_FLOW_ACTION_DROP | MLX5_FLOW_ACTION_QUEUE |
MLX5_FLOW_ACTION_RSS;
int ret;
if (!mlx5_flow_ext_mreg_supported(dev))
return rte_flow_error_set(error, ENOTSUP,
RTE_FLOW_ERROR_TYPE_ACTION, action,
"extensive metadata register"
" isn't supported");
if (!(action->conf))
return rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ACTION, action,
"configuration cannot be null");
conf = (const struct rte_flow_action_set_tag *)action->conf;
if (!conf->mask)
return rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ACTION, action,
"zero mask doesn't have any effect");
ret = mlx5_flow_get_reg_id(dev, MLX5_APP_TAG, conf->index, error);
if (ret < 0)
return ret;
if (!attr->transfer && attr->ingress &&
(action_flags & terminal_action_flags))
return rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ACTION, action,
"set_tag has no effect"
" with terminal actions");
return 0;
}
/**
* Validate count action.
*
* @param[in] dev
* Pointer to rte_eth_dev structure.
* @param[out] error
* Pointer to error structure.
*
* @return
* 0 on success, a negative errno value otherwise and rte_errno is set.
*/
static int
flow_dv_validate_action_count(struct rte_eth_dev *dev,
struct rte_flow_error *error)
{
struct mlx5_priv *priv = dev->data->dev_private;
if (!priv->config.devx)
goto notsup_err;
#ifdef HAVE_IBV_FLOW_DEVX_COUNTERS
return 0;
#endif
notsup_err:
return rte_flow_error_set
(error, ENOTSUP,
RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
NULL,
"count action not supported");
}
/**
* Validate the L2 encap action.
*
* @param[in] action_flags
* Holds the actions detected until now.
* @param[in] action
* Pointer to the action structure.
* @param[out] error
* Pointer to error structure.
*
* @return
* 0 on success, a negative errno value otherwise and rte_errno is set.
*/
static int
flow_dv_validate_action_l2_encap(uint64_t action_flags,
const struct rte_flow_action *action,
struct rte_flow_error *error)
{
if (!(action->conf))
return rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ACTION, action,
"configuration cannot be null");
if (action_flags & MLX5_FLOW_ACTION_ENCAP)
return rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ACTION, NULL,
"can only have a single encap action "
"in a flow");
return 0;
}
/**
* Validate a decap action.
*
* @param[in] action_flags
* Holds the actions detected until now.
* @param[in] attr
* Pointer to flow attributes
* @param[out] error
* Pointer to error structure.
*
* @return
* 0 on success, a negative errno value otherwise and rte_errno is set.
*/
static int
flow_dv_validate_action_decap(uint64_t action_flags,
const struct rte_flow_attr *attr,
struct rte_flow_error *error)
{
if (action_flags & MLX5_FLOW_XCAP_ACTIONS)
return rte_flow_error_set(error, ENOTSUP,
RTE_FLOW_ERROR_TYPE_ACTION, NULL,
action_flags &
MLX5_FLOW_ACTION_DECAP ? "can only "
"have a single decap action" : "decap "
"after encap is not supported");
if (action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS)
return rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ACTION, NULL,
"can't have decap action after"
" modify action");
if (attr->egress)
return rte_flow_error_set(error, ENOTSUP,
RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
NULL,
"decap action not supported for "
"egress");
return 0;
}
const struct rte_flow_action_raw_decap empty_decap = {.data = NULL, .size = 0,};
/**
* Validate the raw encap and decap actions.
*
* @param[in] decap
* Pointer to the decap action.
* @param[in] encap
* Pointer to the encap action.
* @param[in] attr
* Pointer to flow attributes
* @param[in/out] action_flags
* Holds the actions detected until now.
* @param[out] actions_n
* pointer to the number of actions counter.
* @param[out] error
* Pointer to error structure.
*
* @return
* 0 on success, a negative errno value otherwise and rte_errno is set.
*/
static int
flow_dv_validate_action_raw_encap_decap
(const struct rte_flow_action_raw_decap *decap,
const struct rte_flow_action_raw_encap *encap,
const struct rte_flow_attr *attr, uint64_t *action_flags,
int *actions_n, struct rte_flow_error *error)
{
int ret;
if (encap && (!encap->size || !encap->data))
return rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ACTION, NULL,
"raw encap data cannot be empty");
if (decap && encap) {
if (decap->size <= MLX5_ENCAPSULATION_DECISION_SIZE &&
encap->size > MLX5_ENCAPSULATION_DECISION_SIZE)
/* L3 encap. */
decap = NULL;
else if (encap->size <=
MLX5_ENCAPSULATION_DECISION_SIZE &&
decap->size >
MLX5_ENCAPSULATION_DECISION_SIZE)
/* L3 decap. */
encap = NULL;
else if (encap->size >
MLX5_ENCAPSULATION_DECISION_SIZE &&
decap->size >
MLX5_ENCAPSULATION_DECISION_SIZE)
/* 2 L2 actions: encap and decap. */
;
else
return rte_flow_error_set(error,
ENOTSUP,
RTE_FLOW_ERROR_TYPE_ACTION,
NULL, "unsupported too small "
"raw decap and too small raw "
"encap combination");
}
if (decap) {
ret = flow_dv_validate_action_decap(*action_flags, attr, error);
if (ret < 0)
return ret;
*action_flags |= MLX5_FLOW_ACTION_DECAP;
++(*actions_n);
}
if (encap) {
if (encap->size <= MLX5_ENCAPSULATION_DECISION_SIZE)
return rte_flow_error_set(error, ENOTSUP,
RTE_FLOW_ERROR_TYPE_ACTION,
NULL,
"small raw encap size");
if (*action_flags & MLX5_FLOW_ACTION_ENCAP)
return rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ACTION,
NULL,
"more than one encap action");
*action_flags |= MLX5_FLOW_ACTION_ENCAP;
++(*actions_n);
}
return 0;
}
/**
* Find existing encap/decap resource or create and register a new one.
*
* @param[in, out] dev
* Pointer to rte_eth_dev structure.
* @param[in, out] resource
* Pointer to encap/decap resource.
* @parm[in, out] dev_flow
* Pointer to the dev_flow.
* @param[out] error
* pointer to error structure.
*
* @return
* 0 on success otherwise -errno and errno is set.
*/
static int
flow_dv_encap_decap_resource_register
(struct rte_eth_dev *dev,
struct mlx5_flow_dv_encap_decap_resource *resource,
struct mlx5_flow *dev_flow,
struct rte_flow_error *error)
{
struct mlx5_priv *priv = dev->data->dev_private;
struct mlx5_ibv_shared *sh = priv->sh;
struct mlx5_flow_dv_encap_decap_resource *cache_resource;
struct mlx5dv_dr_domain *domain;
resource->flags = dev_flow->group ? 0 : 1;
if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB)
domain = sh->fdb_domain;
else if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_NIC_RX)
domain = sh->rx_domain;
else
domain = sh->tx_domain;
/* Lookup a matching resource from cache. */
LIST_FOREACH(cache_resource, &sh->encaps_decaps, next) {
if (resource->reformat_type == cache_resource->reformat_type &&
resource->ft_type == cache_resource->ft_type &&
resource->flags == cache_resource->flags &&
resource->size == cache_resource->size &&
!memcmp((const void *)resource->buf,
(const void *)cache_resource->buf,
resource->size)) {
DRV_LOG(DEBUG, "encap/decap resource %p: refcnt %d++",
(void *)cache_resource,
rte_atomic32_read(&cache_resource->refcnt));
rte_atomic32_inc(&cache_resource->refcnt);
dev_flow->dv.encap_decap = cache_resource;
return 0;
}
}
/* Register new encap/decap resource. */
cache_resource = rte_calloc(__func__, 1, sizeof(*cache_resource), 0);
if (!cache_resource)
return rte_flow_error_set(error, ENOMEM,
RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
"cannot allocate resource memory");
*cache_resource = *resource;
cache_resource->verbs_action =
mlx5_glue->dv_create_flow_action_packet_reformat
(sh->ctx, cache_resource->reformat_type,
cache_resource->ft_type, domain, cache_resource->flags,
cache_resource->size,
(cache_resource->size ? cache_resource->buf : NULL));
if (!cache_resource->verbs_action) {
rte_free(cache_resource);
return rte_flow_error_set(error, ENOMEM,
RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
NULL, "cannot create action");
}
rte_atomic32_init(&cache_resource->refcnt);
rte_atomic32_inc(&cache_resource->refcnt);
LIST_INSERT_HEAD(&sh->encaps_decaps, cache_resource, next);
dev_flow->dv.encap_decap = cache_resource;
DRV_LOG(DEBUG, "new encap/decap resource %p: refcnt %d++",
(void *)cache_resource,
rte_atomic32_read(&cache_resource->refcnt));
return 0;
}
/**
* Find existing table jump resource or create and register a new one.
*
* @param[in, out] dev
* Pointer to rte_eth_dev structure.
* @param[in, out] tbl
* Pointer to flow table resource.
* @parm[in, out] dev_flow
* Pointer to the dev_flow.
* @param[out] error
* pointer to error structure.
*
* @return
* 0 on success otherwise -errno and errno is set.
*/
static int
flow_dv_jump_tbl_resource_register
(struct rte_eth_dev *dev __rte_unused,
struct mlx5_flow_tbl_resource *tbl,
struct mlx5_flow *dev_flow,
struct rte_flow_error *error)
{
struct mlx5_flow_tbl_data_entry *tbl_data =
container_of(tbl, struct mlx5_flow_tbl_data_entry, tbl);
int cnt;
assert(tbl);
cnt = rte_atomic32_read(&tbl_data->jump.refcnt);
if (!cnt) {
tbl_data->jump.action =
mlx5_glue->dr_create_flow_action_dest_flow_tbl
(tbl->obj);
if (!tbl_data->jump.action)
return rte_flow_error_set(error, ENOMEM,
RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
NULL, "cannot create jump action");
DRV_LOG(DEBUG, "new jump table resource %p: refcnt %d++",
(void *)&tbl_data->jump, cnt);
} else {
assert(tbl_data->jump.action);
DRV_LOG(DEBUG, "existed jump table resource %p: refcnt %d++",
(void *)&tbl_data->jump, cnt);
}
rte_atomic32_inc(&tbl_data->jump.refcnt);
dev_flow->dv.jump = &tbl_data->jump;
return 0;
}
/**
* Find existing table port ID resource or create and register a new one.
*
* @param[in, out] dev
* Pointer to rte_eth_dev structure.
* @param[in, out] resource
* Pointer to port ID action resource.
* @parm[in, out] dev_flow
* Pointer to the dev_flow.
* @param[out] error
* pointer to error structure.
*
* @return
* 0 on success otherwise -errno and errno is set.
*/
static int
flow_dv_port_id_action_resource_register
(struct rte_eth_dev *dev,
struct mlx5_flow_dv_port_id_action_resource *resource,
struct mlx5_flow *dev_flow,
struct rte_flow_error *error)
{
struct mlx5_priv *priv = dev->data->dev_private;
struct mlx5_ibv_shared *sh = priv->sh;
struct mlx5_flow_dv_port_id_action_resource *cache_resource;
/* Lookup a matching resource from cache. */
LIST_FOREACH(cache_resource, &sh->port_id_action_list, next) {
if (resource->port_id == cache_resource->port_id) {
DRV_LOG(DEBUG, "port id action resource resource %p: "
"refcnt %d++",
(void *)cache_resource,
rte_atomic32_read(&cache_resource->refcnt));
rte_atomic32_inc(&cache_resource->refcnt);
dev_flow->dv.port_id_action = cache_resource;
return 0;
}
}
/* Register new port id action resource. */
cache_resource = rte_calloc(__func__, 1, sizeof(*cache_resource), 0);
if (!cache_resource)
return rte_flow_error_set(error, ENOMEM,
RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
"cannot allocate resource memory");
*cache_resource = *resource;
/*
* Depending on rdma_core version the glue routine calls
* either mlx5dv_dr_action_create_dest_ib_port(domain, ibv_port)
* or mlx5dv_dr_action_create_dest_vport(domain, vport_id).
*/
cache_resource->action =
mlx5_glue->dr_create_flow_action_dest_port
(priv->sh->fdb_domain, resource->port_id);
if (!cache_resource->action) {
rte_free(cache_resource);
return rte_flow_error_set(error, ENOMEM,
RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
NULL, "cannot create action");
}
rte_atomic32_init(&cache_resource->refcnt);
rte_atomic32_inc(&cache_resource->refcnt);
LIST_INSERT_HEAD(&sh->port_id_action_list, cache_resource, next);
dev_flow->dv.port_id_action = cache_resource;
DRV_LOG(DEBUG, "new port id action resource %p: refcnt %d++",
(void *)cache_resource,
rte_atomic32_read(&cache_resource->refcnt));
return 0;
}
/**
* Find existing push vlan resource or create and register a new one.
*
* @param [in, out] dev
* Pointer to rte_eth_dev structure.
* @param[in, out] resource
* Pointer to port ID action resource.
* @parm[in, out] dev_flow
* Pointer to the dev_flow.
* @param[out] error
* pointer to error structure.
*
* @return
* 0 on success otherwise -errno and errno is set.
*/
static int
flow_dv_push_vlan_action_resource_register
(struct rte_eth_dev *dev,
struct mlx5_flow_dv_push_vlan_action_resource *resource,
struct mlx5_flow *dev_flow,
struct rte_flow_error *error)
{
struct mlx5_priv *priv = dev->data->dev_private;
struct mlx5_ibv_shared *sh = priv->sh;
struct mlx5_flow_dv_push_vlan_action_resource *cache_resource;
struct mlx5dv_dr_domain *domain;
/* Lookup a matching resource from cache. */
LIST_FOREACH(cache_resource, &sh->push_vlan_action_list, next) {
if (resource->vlan_tag == cache_resource->vlan_tag &&
resource->ft_type == cache_resource->ft_type) {
DRV_LOG(DEBUG, "push-VLAN action resource resource %p: "
"refcnt %d++",
(void *)cache_resource,
rte_atomic32_read(&cache_resource->refcnt));
rte_atomic32_inc(&cache_resource->refcnt);
dev_flow->dv.push_vlan_res = cache_resource;
return 0;
}
}
/* Register new push_vlan action resource. */
cache_resource = rte_calloc(__func__, 1, sizeof(*cache_resource), 0);
if (!cache_resource)
return rte_flow_error_set(error, ENOMEM,
RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
"cannot allocate resource memory");
*cache_resource = *resource;
if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB)
domain = sh->fdb_domain;
else if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_NIC_RX)
domain = sh->rx_domain;
else
domain = sh->tx_domain;
cache_resource->action =
mlx5_glue->dr_create_flow_action_push_vlan(domain,
resource->vlan_tag);
if (!cache_resource->action) {
rte_free(cache_resource);
return rte_flow_error_set(error, ENOMEM,
RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
NULL, "cannot create action");
}
rte_atomic32_init(&cache_resource->refcnt);
rte_atomic32_inc(&cache_resource->refcnt);
LIST_INSERT_HEAD(&sh->push_vlan_action_list, cache_resource, next);
dev_flow->dv.push_vlan_res = cache_resource;
DRV_LOG(DEBUG, "new push vlan action resource %p: refcnt %d++",
(void *)cache_resource,
rte_atomic32_read(&cache_resource->refcnt));
return 0;
}
/**
* Get the size of specific rte_flow_item_type
*
* @param[in] item_type
* Tested rte_flow_item_type.
*
* @return
* sizeof struct item_type, 0 if void or irrelevant.
*/
static size_t
flow_dv_get_item_len(const enum rte_flow_item_type item_type)
{
size_t retval;
switch (item_type) {
case RTE_FLOW_ITEM_TYPE_ETH:
retval = sizeof(struct rte_flow_item_eth);
break;
case RTE_FLOW_ITEM_TYPE_VLAN:
retval = sizeof(struct rte_flow_item_vlan);
break;
case RTE_FLOW_ITEM_TYPE_IPV4:
retval = sizeof(struct rte_flow_item_ipv4);
break;
case RTE_FLOW_ITEM_TYPE_IPV6:
retval = sizeof(struct rte_flow_item_ipv6);
break;
case RTE_FLOW_ITEM_TYPE_UDP:
retval = sizeof(struct rte_flow_item_udp);
break;
case RTE_FLOW_ITEM_TYPE_TCP:
retval = sizeof(struct rte_flow_item_tcp);
break;
case RTE_FLOW_ITEM_TYPE_VXLAN:
retval = sizeof(struct rte_flow_item_vxlan);
break;
case RTE_FLOW_ITEM_TYPE_GRE:
retval = sizeof(struct rte_flow_item_gre);
break;
case RTE_FLOW_ITEM_TYPE_NVGRE:
retval = sizeof(struct rte_flow_item_nvgre);
break;
case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
retval = sizeof(struct rte_flow_item_vxlan_gpe);
break;
case RTE_FLOW_ITEM_TYPE_MPLS:
retval = sizeof(struct rte_flow_item_mpls);
break;
case RTE_FLOW_ITEM_TYPE_VOID: /* Fall through. */
default:
retval = 0;
break;
}
return retval;
}
#define MLX5_ENCAP_IPV4_VERSION 0x40
#define MLX5_ENCAP_IPV4_IHL_MIN 0x05
#define MLX5_ENCAP_IPV4_TTL_DEF 0x40
#define MLX5_ENCAP_IPV6_VTC_FLOW 0x60000000
#define MLX5_ENCAP_IPV6_HOP_LIMIT 0xff
#define MLX5_ENCAP_VXLAN_FLAGS 0x08000000
#define MLX5_ENCAP_VXLAN_GPE_FLAGS 0x04
/**
* Convert the encap action data from list of rte_flow_item to raw buffer
*
* @param[in] items
* Pointer to rte_flow_item objects list.
* @param[out] buf
* Pointer to the output buffer.
* @param[out] size
* Pointer to the output buffer size.
* @param[out] error
* Pointer to the error structure.
*
* @return
* 0 on success, a negative errno value otherwise and rte_errno is set.
*/
static int
flow_dv_convert_encap_data(const struct rte_flow_item *items, uint8_t *buf,
size_t *size, struct rte_flow_error *error)
{
struct rte_ether_hdr *eth = NULL;
struct rte_vlan_hdr *vlan = NULL;
struct rte_ipv4_hdr *ipv4 = NULL;
struct rte_ipv6_hdr *ipv6 = NULL;
struct rte_udp_hdr *udp = NULL;
struct rte_vxlan_hdr *vxlan = NULL;
struct rte_vxlan_gpe_hdr *vxlan_gpe = NULL;
struct rte_gre_hdr *gre = NULL;
size_t len;
size_t temp_size = 0;
if (!items)
return rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ACTION,
NULL, "invalid empty data");
for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
len = flow_dv_get_item_len(items->type);
if (len + temp_size > MLX5_ENCAP_MAX_LEN)
return rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ACTION,
(void *)items->type,
"items total size is too big"
" for encap action");
rte_memcpy((void *)&buf[temp_size], items->spec, len);
switch (items->type) {
case RTE_FLOW_ITEM_TYPE_ETH:
eth = (struct rte_ether_hdr *)&buf[temp_size];
break;
case RTE_FLOW_ITEM_TYPE_VLAN:
vlan = (struct rte_vlan_hdr *)&buf[temp_size];
if (!eth)
return rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ACTION,
(void *)items->type,
"eth header not found");
if (!eth->ether_type)
eth->ether_type = RTE_BE16(RTE_ETHER_TYPE_VLAN);
break;
case RTE_FLOW_ITEM_TYPE_IPV4:
ipv4 = (struct rte_ipv4_hdr *)&buf[temp_size];
if (!vlan && !eth)
return rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ACTION,
(void *)items->type,
"neither eth nor vlan"
" header found");
if (vlan && !vlan->eth_proto)
vlan->eth_proto = RTE_BE16(RTE_ETHER_TYPE_IPV4);
else if (eth && !eth->ether_type)
eth->ether_type = RTE_BE16(RTE_ETHER_TYPE_IPV4);
if (!ipv4->version_ihl)
ipv4->version_ihl = MLX5_ENCAP_IPV4_VERSION |
MLX5_ENCAP_IPV4_IHL_MIN;
if (!ipv4->time_to_live)
ipv4->time_to_live = MLX5_ENCAP_IPV4_TTL_DEF;
break;
case RTE_FLOW_ITEM_TYPE_IPV6:
ipv6 = (struct rte_ipv6_hdr *)&buf[temp_size];
if (!vlan && !eth)
return rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ACTION,
(void *)items->type,
"neither eth nor vlan"
" header found");
if (vlan && !vlan->eth_proto)
vlan->eth_proto = RTE_BE16(RTE_ETHER_TYPE_IPV6);
else if (eth && !eth->ether_type)
eth->ether_type = RTE_BE16(RTE_ETHER_TYPE_IPV6);
if (!ipv6->vtc_flow)
ipv6->vtc_flow =
RTE_BE32(MLX5_ENCAP_IPV6_VTC_FLOW);
if (!ipv6->hop_limits)
ipv6->hop_limits = MLX5_ENCAP_IPV6_HOP_LIMIT;
break;
case RTE_FLOW_ITEM_TYPE_UDP:
udp = (struct rte_udp_hdr *)&buf[temp_size];
if (!ipv4 && !ipv6)
return rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ACTION,
(void *)items->type,
"ip header not found");
if (ipv4 && !ipv4->next_proto_id)
ipv4->next_proto_id = IPPROTO_UDP;
else if (ipv6 && !ipv6->proto)
ipv6->proto = IPPROTO_UDP;
break;
case RTE_FLOW_ITEM_TYPE_VXLAN:
vxlan = (struct rte_vxlan_hdr *)&buf[temp_size];
if (!udp)
return rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ACTION,
(void *)items->type,
"udp header not found");
if (!udp->dst_port)
udp->dst_port = RTE_BE16(MLX5_UDP_PORT_VXLAN);
if (!vxlan->vx_flags)
vxlan->vx_flags =
RTE_BE32(MLX5_ENCAP_VXLAN_FLAGS);
break;
case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
vxlan_gpe = (struct rte_vxlan_gpe_hdr *)&buf[temp_size];
if (!udp)
return rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ACTION,
(void *)items->type,
"udp header not found");
if (!vxlan_gpe->proto)
return rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ACTION,
(void *)items->type,
"next protocol not found");
if (!udp->dst_port)
udp->dst_port =
RTE_BE16(MLX5_UDP_PORT_VXLAN_GPE);
if (!vxlan_gpe->vx_flags)
vxlan_gpe->vx_flags =
MLX5_ENCAP_VXLAN_GPE_FLAGS;
break;
case RTE_FLOW_ITEM_TYPE_GRE:
case RTE_FLOW_ITEM_TYPE_NVGRE:
gre = (struct rte_gre_hdr *)&buf[temp_size];
if (!gre->proto)
return rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ACTION,
(void *)items->type,
"next protocol not found");
if (!ipv4 && !ipv6)
return rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ACTION,
(void *)items->type,
"ip header not found");
if (ipv4 && !ipv4->next_proto_id)
ipv4->next_proto_id = IPPROTO_GRE;
else if (ipv6 && !ipv6->proto)
ipv6->proto = IPPROTO_GRE;
break;
case RTE_FLOW_ITEM_TYPE_VOID:
break;
default:
return rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ACTION,
(void *)items->type,
"unsupported item type");
break;
}
temp_size += len;
}
*size = temp_size;
return 0;
}
static int
flow_dv_zero_encap_udp_csum(void *data, struct rte_flow_error *error)
{
struct rte_ether_hdr *eth = NULL;
struct rte_vlan_hdr *vlan = NULL;
struct rte_ipv6_hdr *ipv6 = NULL;
struct rte_udp_hdr *udp = NULL;
char *next_hdr;
uint16_t proto;
eth = (struct rte_ether_hdr *)data;
next_hdr = (char *)(eth + 1);
proto = RTE_BE16(eth->ether_type);
/* VLAN skipping */
while (proto == RTE_ETHER_TYPE_VLAN || proto == RTE_ETHER_TYPE_QINQ) {
vlan = (struct rte_vlan_hdr *)next_hdr;
proto = RTE_BE16(vlan->eth_proto);
next_hdr += sizeof(struct rte_vlan_hdr);
}
/* HW calculates IPv4 csum. no need to proceed */
if (proto == RTE_ETHER_TYPE_IPV4)
return 0;
/* non IPv4/IPv6 header. not supported */
if (proto != RTE_ETHER_TYPE_IPV6) {
return rte_flow_error_set(error, ENOTSUP,
RTE_FLOW_ERROR_TYPE_ACTION,
NULL, "Cannot offload non IPv4/IPv6");
}
ipv6 = (struct rte_ipv6_hdr *)next_hdr;
/* ignore non UDP */
if (ipv6->proto != IPPROTO_UDP)
return 0;
udp = (struct rte_udp_hdr *)(ipv6 + 1);
udp->dgram_cksum = 0;
return 0;
}
/**
* Convert L2 encap action to DV specification.
*
* @param[in] dev
* Pointer to rte_eth_dev structure.
* @param[in] action
* Pointer to action structure.
* @param[in, out] dev_flow
* Pointer to the mlx5_flow.
* @param[in] transfer
* Mark if the flow is E-Switch flow.
* @param[out] error
* Pointer to the error structure.
*
* @return
* 0 on success, a negative errno value otherwise and rte_errno is set.
*/
static int
flow_dv_create_action_l2_encap(struct rte_eth_dev *dev,
const struct rte_flow_action *action,
struct mlx5_flow *dev_flow,
uint8_t transfer,
struct rte_flow_error *error)
{
const struct rte_flow_item *encap_data;
const struct rte_flow_action_raw_encap *raw_encap_data;
struct mlx5_flow_dv_encap_decap_resource res = {
.reformat_type =
MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TO_L2_TUNNEL,
.ft_type = transfer ? MLX5DV_FLOW_TABLE_TYPE_FDB :
MLX5DV_FLOW_TABLE_TYPE_NIC_TX,
};
if (action->type == RTE_FLOW_ACTION_TYPE_RAW_ENCAP) {
raw_encap_data =
(const struct rte_flow_action_raw_encap *)action->conf;
res.size = raw_encap_data->size;
memcpy(res.buf, raw_encap_data->data, res.size);
} else {
if (action->type == RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP)
encap_data =
((const struct rte_flow_action_vxlan_encap *)
action->conf)->definition;
else
encap_data =
((const struct rte_flow_action_nvgre_encap *)
action->conf)->definition;
if (flow_dv_convert_encap_data(encap_data, res.buf,
&res.size, error))
return -rte_errno;
}
if (flow_dv_zero_encap_udp_csum(res.buf, error))
return -rte_errno;
if (flow_dv_encap_decap_resource_register(dev, &res, dev_flow, error))
return rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ACTION,
NULL, "can't create L2 encap action");
return 0;
}
/**
* Convert L2 decap action to DV specification.
*
* @param[in] dev
* Pointer to rte_eth_dev structure.
* @param[in, out] dev_flow
* Pointer to the mlx5_flow.
* @param[in] transfer
* Mark if the flow is E-Switch flow.
* @param[out] error
* Pointer to the error structure.
*
* @return
* 0 on success, a negative errno value otherwise and rte_errno is set.
*/
static int
flow_dv_create_action_l2_decap(struct rte_eth_dev *dev,
struct mlx5_flow *dev_flow,
uint8_t transfer,
struct rte_flow_error *error)
{
struct mlx5_flow_dv_encap_decap_resource res = {
.size = 0,
.reformat_type =
MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TUNNEL_TO_L2,
.ft_type = transfer ? MLX5DV_FLOW_TABLE_TYPE_FDB :
MLX5DV_FLOW_TABLE_TYPE_NIC_RX,
};
if (flow_dv_encap_decap_resource_register(dev, &res, dev_flow, error))
return rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ACTION,
NULL, "can't create L2 decap action");
return 0;
}
/**
* Convert raw decap/encap (L3 tunnel) action to DV specification.
*
* @param[in] dev
* Pointer to rte_eth_dev structure.
* @param[in] action
* Pointer to action structure.
* @param[in, out] dev_flow
* Pointer to the mlx5_flow.
* @param[in] attr
* Pointer to the flow attributes.
* @param[out] error
* Pointer to the error structure.
*
* @return
* 0 on success, a negative errno value otherwise and rte_errno is set.
*/
static int
flow_dv_create_action_raw_encap(struct rte_eth_dev *dev,
const struct rte_flow_action *action,
struct mlx5_flow *dev_flow,
const struct rte_flow_attr *attr,
struct rte_flow_error *error)
{
const struct rte_flow_action_raw_encap *encap_data;
struct mlx5_flow_dv_encap_decap_resource res;
encap_data = (const struct rte_flow_action_raw_encap *)action->conf;
res.size = encap_data->size;
memcpy(res.buf, encap_data->data, res.size);
res.reformat_type = res.size < MLX5_ENCAPSULATION_DECISION_SIZE ?
MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L3_TUNNEL_TO_L2 :
MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TO_L3_TUNNEL;
if (attr->transfer)
res.ft_type = MLX5DV_FLOW_TABLE_TYPE_FDB;
else
res.ft_type = attr->egress ? MLX5DV_FLOW_TABLE_TYPE_NIC_TX :
MLX5DV_FLOW_TABLE_TYPE_NIC_RX;
if (flow_dv_encap_decap_resource_register(dev, &res, dev_flow, error))
return rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ACTION,
NULL, "can't create encap action");
return 0;
}
/**
* Create action push VLAN.
*
* @param[in] dev
* Pointer to rte_eth_dev structure.
* @param[in] attr
* Pointer to the flow attributes.
* @param[in] vlan
* Pointer to the vlan to push to the Ethernet header.
* @param[in, out] dev_flow
* Pointer to the mlx5_flow.
* @param[out] error
* Pointer to the error structure.
*
* @return
* 0 on success, a negative errno value otherwise and rte_errno is set.
*/
static int
flow_dv_create_action_push_vlan(struct rte_eth_dev *dev,
const struct rte_flow_attr *attr,
const struct rte_vlan_hdr *vlan,
struct mlx5_flow *dev_flow,
struct rte_flow_error *error)
{
struct mlx5_flow_dv_push_vlan_action_resource res;
res.vlan_tag =
rte_cpu_to_be_32(((uint32_t)vlan->eth_proto) << 16 |
vlan->vlan_tci);
if (attr->transfer)
res.ft_type = MLX5DV_FLOW_TABLE_TYPE_FDB;
else
res.ft_type = attr->egress ? MLX5DV_FLOW_TABLE_TYPE_NIC_TX :
MLX5DV_FLOW_TABLE_TYPE_NIC_RX;
return flow_dv_push_vlan_action_resource_register
(dev, &res, dev_flow, error);
}
/**
* Validate the modify-header actions.
*
* @param[in] action_flags
* Holds the actions detected until now.
* @param[in] action
* Pointer to the modify action.
* @param[out] error
* Pointer to error structure.
*
* @return
* 0 on success, a negative errno value otherwise and rte_errno is set.
*/
static int
flow_dv_validate_action_modify_hdr(const uint64_t action_flags,
const struct rte_flow_action *action,
struct rte_flow_error *error)
{
if (action->type != RTE_FLOW_ACTION_TYPE_DEC_TTL && !action->conf)
return rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ACTION_CONF,
NULL, "action configuration not set");
if (action_flags & MLX5_FLOW_ACTION_ENCAP)
return rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ACTION, NULL,
"can't have encap action before"
" modify action");
return 0;
}
/**
* Validate the modify-header MAC address actions.
*
* @param[in] action_flags
* Holds the actions detected until now.
* @param[in] action
* Pointer to the modify action.
* @param[in] item_flags
* Holds the items detected.
* @param[out] error
* Pointer to error structure.
*
* @return
* 0 on success, a negative errno value otherwise and rte_errno is set.
*/
static int
flow_dv_validate_action_modify_mac(const uint64_t action_flags,
const struct rte_flow_action *action,
const uint64_t item_flags,
struct rte_flow_error *error)
{
int ret = 0;
ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
if (!ret) {
if (!(item_flags & MLX5_FLOW_LAYER_L2))
return rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ACTION,
NULL,
"no L2 item in pattern");
}
return ret;
}
/**
* Validate the modify-header IPv4 address actions.
*
* @param[in] action_flags
* Holds the actions detected until now.
* @param[in] action
* Pointer to the modify action.
* @param[in] item_flags
* Holds the items detected.
* @param[out] error
* Pointer to error structure.
*
* @return
* 0 on success, a negative errno value otherwise and rte_errno is set.
*/
static int
flow_dv_validate_action_modify_ipv4(const uint64_t action_flags,
const struct rte_flow_action *action,
const uint64_t item_flags,
struct rte_flow_error *error)
{
int ret = 0;
uint64_t layer;
ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
if (!ret) {
layer = (action_flags & MLX5_FLOW_ACTION_DECAP) ?
MLX5_FLOW_LAYER_INNER_L3_IPV4 :
MLX5_FLOW_LAYER_OUTER_L3_IPV4;
if (!(item_flags & layer))
return rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ACTION,
NULL,
"no ipv4 item in pattern");
}
return ret;
}
/**
* Validate the modify-header IPv6 address actions.
*
* @param[in] action_flags
* Holds the actions detected until now.
* @param[in] action
* Pointer to the modify action.
* @param[in] item_flags
* Holds the items detected.
* @param[out] error
* Pointer to error structure.
*
* @return
* 0 on success, a negative errno value otherwise and rte_errno is set.
*/
static int
flow_dv_validate_action_modify_ipv6(const uint64_t action_flags,
const struct rte_flow_action *action,
const uint64_t item_flags,
struct rte_flow_error *error)
{
int ret = 0;
uint64_t layer;
ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
if (!ret) {
layer = (action_flags & MLX5_FLOW_ACTION_DECAP) ?
MLX5_FLOW_LAYER_INNER_L3_IPV6 :
MLX5_FLOW_LAYER_OUTER_L3_IPV6;
if (!(item_flags & layer))
return rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ACTION,
NULL,
"no ipv6 item in pattern");
}
return ret;
}
/**
* Validate the modify-header TP actions.
*
* @param[in] action_flags
* Holds the actions detected until now.
* @param[in] action
* Pointer to the modify action.
* @param[in] item_flags
* Holds the items detected.
* @param[out] error
* Pointer to error structure.
*
* @return
* 0 on success, a negative errno value otherwise and rte_errno is set.
*/
static int
flow_dv_validate_action_modify_tp(const uint64_t action_flags,
const struct rte_flow_action *action,
const uint64_t item_flags,
struct rte_flow_error *error)
{
int ret = 0;
uint64_t layer;
ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
if (!ret) {
layer = (action_flags & MLX5_FLOW_ACTION_DECAP) ?
MLX5_FLOW_LAYER_INNER_L4 :
MLX5_FLOW_LAYER_OUTER_L4;
if (!(item_flags & layer))
return rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ACTION,
NULL, "no transport layer "
"in pattern");
}
return ret;
}
/**
* Validate the modify-header actions of increment/decrement
* TCP Sequence-number.
*
* @param[in] action_flags
* Holds the actions detected until now.
* @param[in] action
* Pointer to the modify action.
* @param[in] item_flags
* Holds the items detected.
* @param[out] error
* Pointer to error structure.
*
* @return
* 0 on success, a negative errno value otherwise and rte_errno is set.
*/
static int
flow_dv_validate_action_modify_tcp_seq(const uint64_t action_flags,
const struct rte_flow_action *action,
const uint64_t item_flags,
struct rte_flow_error *error)
{
int ret = 0;
uint64_t layer;
ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
if (!ret) {
layer = (action_flags & MLX5_FLOW_ACTION_DECAP) ?
MLX5_FLOW_LAYER_INNER_L4_TCP :
MLX5_FLOW_LAYER_OUTER_L4_TCP;
if (!(item_flags & layer))
return rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ACTION,
NULL, "no TCP item in"
" pattern");
if ((action->type == RTE_FLOW_ACTION_TYPE_INC_TCP_SEQ &&
(action_flags & MLX5_FLOW_ACTION_DEC_TCP_SEQ)) ||
(action->type == RTE_FLOW_ACTION_TYPE_DEC_TCP_SEQ &&
(action_flags & MLX5_FLOW_ACTION_INC_TCP_SEQ)))
return rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ACTION,
NULL,
"cannot decrease and increase"
" TCP sequence number"
" at the same time");
}
return ret;
}
/**
* Validate the modify-header actions of increment/decrement
* TCP Acknowledgment number.
*
* @param[in] action_flags
* Holds the actions detected until now.
* @param[in] action
* Pointer to the modify action.
* @param[in] item_flags
* Holds the items detected.
* @param[out] error
* Pointer to error structure.
*
* @return
* 0 on success, a negative errno value otherwise and rte_errno is set.
*/
static int
flow_dv_validate_action_modify_tcp_ack(const uint64_t action_flags,
const struct rte_flow_action *action,
const uint64_t item_flags,
struct rte_flow_error *error)
{
int ret = 0;
uint64_t layer;
ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
if (!ret) {
layer = (action_flags & MLX5_FLOW_ACTION_DECAP) ?
MLX5_FLOW_LAYER_INNER_L4_TCP :
MLX5_FLOW_LAYER_OUTER_L4_TCP;
if (!(item_flags & layer))
return rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ACTION,
NULL, "no TCP item in"
" pattern");
if ((action->type == RTE_FLOW_ACTION_TYPE_INC_TCP_ACK &&
(action_flags & MLX5_FLOW_ACTION_DEC_TCP_ACK)) ||
(action->type == RTE_FLOW_ACTION_TYPE_DEC_TCP_ACK &&
(action_flags & MLX5_FLOW_ACTION_INC_TCP_ACK)))
return rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ACTION,
NULL,
"cannot decrease and increase"
" TCP acknowledgment number"
" at the same time");
}
return ret;
}
/**
* Validate the modify-header TTL actions.
*
* @param[in] action_flags
* Holds the actions detected until now.
* @param[in] action
* Pointer to the modify action.
* @param[in] item_flags
* Holds the items detected.
* @param[out] error
* Pointer to error structure.
*
* @return
* 0 on success, a negative errno value otherwise and rte_errno is set.
*/
static int
flow_dv_validate_action_modify_ttl(const uint64_t action_flags,
const struct rte_flow_action *action,
const uint64_t item_flags,
struct rte_flow_error *error)
{
int ret = 0;
uint64_t layer;
ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
if (!ret) {
layer = (action_flags & MLX5_FLOW_ACTION_DECAP) ?
MLX5_FLOW_LAYER_INNER_L3 :
MLX5_FLOW_LAYER_OUTER_L3;
if (!(item_flags & layer))
return rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ACTION,
NULL,
"no IP protocol in pattern");
}
return ret;
}
/**
* Validate jump action.
*
* @param[in] action
* Pointer to the jump action.
* @param[in] action_flags
* Holds the actions detected until now.
* @param[in] attributes
* Pointer to flow attributes
* @param[in] external
* Action belongs to flow rule created by request external to PMD.
* @param[out] error
* Pointer to error structure.
*
* @return
* 0 on success, a negative errno value otherwise and rte_errno is set.
*/
static int
flow_dv_validate_action_jump(const struct rte_flow_action *action,
uint64_t action_flags,
const struct rte_flow_attr *attributes,
bool external, struct rte_flow_error *error)
{
uint32_t target_group, table;
int ret = 0;
if (action_flags & (MLX5_FLOW_FATE_ACTIONS |
MLX5_FLOW_FATE_ESWITCH_ACTIONS))
return rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ACTION, NULL,
"can't have 2 fate actions in"
" same flow");
if (action_flags & MLX5_FLOW_ACTION_METER)
return rte_flow_error_set(error, ENOTSUP,
RTE_FLOW_ERROR_TYPE_ACTION, NULL,
"jump with meter not support");
if (!action->conf)
return rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ACTION_CONF,
NULL, "action configuration not set");
target_group =
((const struct rte_flow_action_jump *)action->conf)->group;
ret = mlx5_flow_group_to_table(attributes, external, target_group,
true, &table, error);
if (ret)
return ret;
if (attributes->group == target_group)
return rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ACTION, NULL,
"target group must be other than"
" the current flow group");
return 0;
}
/*
* Validate the port_id action.
*
* @param[in] dev
* Pointer to rte_eth_dev structure.
* @param[in] action_flags
* Bit-fields that holds the actions detected until now.
* @param[in] action
* Port_id RTE action structure.
* @param[in] attr
* Attributes of flow that includes this action.
* @param[out] error
* Pointer to error structure.
*
* @return
* 0 on success, a negative errno value otherwise and rte_errno is set.
*/
static int
flow_dv_validate_action_port_id(struct rte_eth_dev *dev,
uint64_t action_flags,
const struct rte_flow_action *action,
const struct rte_flow_attr *attr,
struct rte_flow_error *error)
{
const struct rte_flow_action_port_id *port_id;
struct mlx5_priv *act_priv;
struct mlx5_priv *dev_priv;
uint16_t port;
if (!attr->transfer)
return rte_flow_error_set(error, ENOTSUP,
RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
NULL,
"port id action is valid in transfer"
" mode only");
if (!action || !action->conf)
return rte_flow_error_set(error, ENOTSUP,
RTE_FLOW_ERROR_TYPE_ACTION_CONF,
NULL,
"port id action parameters must be"
" specified");
if (action_flags & (MLX5_FLOW_FATE_ACTIONS |
MLX5_FLOW_FATE_ESWITCH_ACTIONS))
return rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ACTION, NULL,
"can have only one fate actions in"
" a flow");
dev_priv = mlx5_dev_to_eswitch_info(dev);
if (!dev_priv)
return rte_flow_error_set(error, rte_errno,
RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
NULL,
"failed to obtain E-Switch info");
port_id = action->conf;
port = port_id->original ? dev->data->port_id : port_id->id;
act_priv = mlx5_port_to_eswitch_info(port, false);
if (!act_priv)
return rte_flow_error_set
(error, rte_errno,
RTE_FLOW_ERROR_TYPE_ACTION_CONF, port_id,
"failed to obtain E-Switch port id for port");
if (act_priv->domain_id != dev_priv->domain_id)
return rte_flow_error_set
(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ACTION, NULL,
"port does not belong to"
" E-Switch being configured");
return 0;
}
/**
* Get the maximum number of modify header actions.
*
* @param dev
* Pointer to rte_eth_dev structure.
* @param flags
* Flags bits to check if root level.
*
* @return
* Max number of modify header actions device can support.
*/
static unsigned int
flow_dv_modify_hdr_action_max(struct rte_eth_dev *dev, uint64_t flags)
{
/*
* There's no way to directly query the max cap. Although it has to be
* acquried by iterative trial, it is a safe assumption that more
* actions are supported by FW if extensive metadata register is
* supported. (Only in the root table)
*/
if (!(flags & MLX5DV_DR_ACTION_FLAGS_ROOT_LEVEL))
return MLX5_MAX_MODIFY_NUM;
else
return mlx5_flow_ext_mreg_supported(dev) ?
MLX5_ROOT_TBL_MODIFY_NUM :
MLX5_ROOT_TBL_MODIFY_NUM_NO_MREG;
}
/**
* Validate the meter action.
*
* @param[in] dev
* Pointer to rte_eth_dev structure.
* @param[in] action_flags
* Bit-fields that holds the actions detected until now.
* @param[in] action
* Pointer to the meter action.
* @param[in] attr
* Attributes of flow that includes this action.
* @param[out] error
* Pointer to error structure.
*
* @return
* 0 on success, a negative errno value otherwise and rte_ernno is set.
*/
static int
mlx5_flow_validate_action_meter(struct rte_eth_dev *dev,
uint64_t action_flags,
const struct rte_flow_action *action,
const struct rte_flow_attr *attr,
struct rte_flow_error *error)
{
struct mlx5_priv *priv = dev->data->dev_private;
const struct rte_flow_action_meter *am = action->conf;
struct mlx5_flow_meter *fm;
if (!am)
return rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ACTION, NULL,
"meter action conf is NULL");
if (action_flags & MLX5_FLOW_ACTION_METER)
return rte_flow_error_set(error, ENOTSUP,
RTE_FLOW_ERROR_TYPE_ACTION, NULL,
"meter chaining not support");
if (action_flags & MLX5_FLOW_ACTION_JUMP)
return rte_flow_error_set(error, ENOTSUP,
RTE_FLOW_ERROR_TYPE_ACTION, NULL,
"meter with jump not support");
if (!priv->mtr_en)
return rte_flow_error_set(error, ENOTSUP,
RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
NULL,
"meter action not supported");
fm = mlx5_flow_meter_find(priv, am->mtr_id);
if (!fm)
return rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ACTION, NULL,
"Meter not found");
if (fm->ref_cnt && (!(fm->attr.transfer == attr->transfer ||
(!fm->attr.ingress && !attr->ingress && attr->egress) ||
(!fm->attr.egress && !attr->egress && attr->ingress))))
return rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ACTION, NULL,
"Flow attributes are either invalid "
"or have a conflict with current "
"meter attributes");
return 0;
}
/**
* Find existing modify-header resource or create and register a new one.
*
* @param dev[in, out]
* Pointer to rte_eth_dev structure.
* @param[in, out] resource
* Pointer to modify-header resource.
* @parm[in, out] dev_flow
* Pointer to the dev_flow.
* @param[out] error
* pointer to error structure.
*
* @return
* 0 on success otherwise -errno and errno is set.
*/
static int
flow_dv_modify_hdr_resource_register
(struct rte_eth_dev *dev,
struct mlx5_flow_dv_modify_hdr_resource *resource,
struct mlx5_flow *dev_flow,
struct rte_flow_error *error)
{
struct mlx5_priv *priv = dev->data->dev_private;
struct mlx5_ibv_shared *sh = priv->sh;
struct mlx5_flow_dv_modify_hdr_resource *cache_resource;
struct mlx5dv_dr_domain *ns;
uint32_t actions_len;
resource->flags =
dev_flow->group ? 0 : MLX5DV_DR_ACTION_FLAGS_ROOT_LEVEL;
if (resource->actions_num > flow_dv_modify_hdr_action_max(dev,
resource->flags))
return rte_flow_error_set(error, EOVERFLOW,
RTE_FLOW_ERROR_TYPE_ACTION, NULL,
"too many modify header items");
if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB)
ns = sh->fdb_domain;
else if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_NIC_TX)
ns = sh->tx_domain;
else
ns = sh->rx_domain;
/* Lookup a matching resource from cache. */
actions_len = resource->actions_num * sizeof(resource->actions[0]);
LIST_FOREACH(cache_resource, &sh->modify_cmds, next) {
if (resource->ft_type == cache_resource->ft_type &&
resource->actions_num == cache_resource->actions_num &&
resource->flags == cache_resource->flags &&
!memcmp((const void *)resource->actions,
(const void *)cache_resource->actions,
actions_len)) {
DRV_LOG(DEBUG, "modify-header resource %p: refcnt %d++",
(void *)cache_resource,
rte_atomic32_read(&cache_resource->refcnt));
rte_atomic32_inc(&cache_resource->refcnt);
dev_flow->dv.modify_hdr = cache_resource;
return 0;
}
}
/* Register new modify-header resource. */
cache_resource = rte_calloc(__func__, 1,
sizeof(*cache_resource) + actions_len, 0);
if (!cache_resource)
return rte_flow_error_set(error, ENOMEM,
RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
"cannot allocate resource memory");
*cache_resource = *resource;
rte_memcpy(cache_resource->actions, resource->actions, actions_len);
cache_resource->verbs_action =
mlx5_glue->dv_create_flow_action_modify_header
(sh->ctx, cache_resource->ft_type, ns,
cache_resource->flags, actions_len,
(uint64_t *)cache_resource->actions);
if (!cache_resource->verbs_action) {
rte_free(cache_resource);
return rte_flow_error_set(error, ENOMEM,
RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
NULL, "cannot create action");
}
rte_atomic32_init(&cache_resource->refcnt);
rte_atomic32_inc(&cache_resource->refcnt);
LIST_INSERT_HEAD(&sh->modify_cmds, cache_resource, next);
dev_flow->dv.modify_hdr = cache_resource;
DRV_LOG(DEBUG, "new modify-header resource %p: refcnt %d++",
(void *)cache_resource,
rte_atomic32_read(&cache_resource->refcnt));
return 0;
}
#define MLX5_CNT_CONTAINER_RESIZE 64
/**
* Get or create a flow counter.
*
* @param[in] dev
* Pointer to the Ethernet device structure.
* @param[in] shared
* Indicate if this counter is shared with other flows.
* @param[in] id
* Counter identifier.
*
* @return
* pointer to flow counter on success, NULL otherwise and rte_errno is set.
*/
static struct mlx5_flow_counter *
flow_dv_counter_alloc_fallback(struct rte_eth_dev *dev, uint32_t shared,
uint32_t id)
{
struct mlx5_priv *priv = dev->data->dev_private;
struct mlx5_flow_counter *cnt = NULL;
struct mlx5_devx_obj *dcs = NULL;
if (!priv->config.devx) {
rte_errno = ENOTSUP;
return NULL;
}
if (shared) {
TAILQ_FOREACH(cnt, &priv->sh->cmng.flow_counters, next) {
if (cnt->shared && cnt->id == id) {
cnt->ref_cnt++;
return cnt;
}
}
}
dcs = mlx5_devx_cmd_flow_counter_alloc(priv->sh->ctx, 0);
if (!dcs)
return NULL;
cnt = rte_calloc(__func__, 1, sizeof(*cnt), 0);
if (!cnt) {
claim_zero(mlx5_devx_cmd_destroy(cnt->dcs));
rte_errno = ENOMEM;
return NULL;
}
struct mlx5_flow_counter tmpl = {
.shared = shared,
.ref_cnt = 1,
.id = id,
.dcs = dcs,
};
tmpl.action = mlx5_glue->dv_create_flow_action_counter(dcs->obj, 0);
if (!tmpl.action) {
claim_zero(mlx5_devx_cmd_destroy(cnt->dcs));
rte_errno = errno;
rte_free(cnt);
return NULL;
}
*cnt = tmpl;
TAILQ_INSERT_HEAD(&priv->sh->cmng.flow_counters, cnt, next);
return cnt;
}
/**
* Release a flow counter.
*
* @param[in] dev
* Pointer to the Ethernet device structure.
* @param[in] counter
* Pointer to the counter handler.
*/
static void
flow_dv_counter_release_fallback(struct rte_eth_dev *dev,
struct mlx5_flow_counter *counter)
{
struct mlx5_priv *priv = dev->data->dev_private;
if (!counter)
return;
if (--counter->ref_cnt == 0) {
TAILQ_REMOVE(&priv->sh->cmng.flow_counters, counter, next);
claim_zero(mlx5_devx_cmd_destroy(counter->dcs));
rte_free(counter);
}
}
/**
* Query a devx flow counter.
*
* @param[in] dev
* Pointer to the Ethernet device structure.
* @param[in] cnt
* Pointer to the flow counter.
* @param[out] pkts
* The statistics value of packets.
* @param[out] bytes
* The statistics value of bytes.
*
* @return
* 0 on success, otherwise a negative errno value and rte_errno is set.
*/
static inline int
_flow_dv_query_count_fallback(struct rte_eth_dev *dev __rte_unused,
struct mlx5_flow_counter *cnt, uint64_t *pkts,
uint64_t *bytes)
{
return mlx5_devx_cmd_flow_counter_query(cnt->dcs, 0, 0, pkts, bytes,
0, NULL, NULL, 0);
}
/**
* Get a pool by a counter.
*
* @param[in] cnt
* Pointer to the counter.
*
* @return
* The counter pool.
*/
static struct mlx5_flow_counter_pool *
flow_dv_counter_pool_get(struct mlx5_flow_counter *cnt)
{
if (!cnt->batch) {
cnt -= cnt->dcs->id % MLX5_COUNTERS_PER_POOL;
return (struct mlx5_flow_counter_pool *)cnt - 1;
}
return cnt->pool;
}
/**
* Get a pool by devx counter ID.
*
* @param[in] cont
* Pointer to the counter container.
* @param[in] id
* The counter devx ID.
*
* @return
* The counter pool pointer if exists, NULL otherwise,
*/
static struct mlx5_flow_counter_pool *
flow_dv_find_pool_by_id(struct mlx5_pools_container *cont, int id)
{
struct mlx5_flow_counter_pool *pool;
TAILQ_FOREACH(pool, &cont->pool_list, next) {
int base = (pool->min_dcs->id / MLX5_COUNTERS_PER_POOL) *
MLX5_COUNTERS_PER_POOL;
if (id >= base && id < base + MLX5_COUNTERS_PER_POOL)
return pool;
};
return NULL;
}
/**
* Allocate a new memory for the counter values wrapped by all the needed
* management.
*
* @param[in] dev
* Pointer to the Ethernet device structure.
* @param[in] raws_n
* The raw memory areas - each one for MLX5_COUNTERS_PER_POOL counters.
*
* @return
* The new memory management pointer on success, otherwise NULL and rte_errno
* is set.
*/
static struct mlx5_counter_stats_mem_mng *
flow_dv_create_counter_stat_mem_mng(struct rte_eth_dev *dev, int raws_n)
{
struct mlx5_ibv_shared *sh = ((struct mlx5_priv *)
(dev->data->dev_private))->sh;
struct mlx5_devx_mkey_attr mkey_attr;
struct mlx5_counter_stats_mem_mng *mem_mng;
volatile struct flow_counter_stats *raw_data;
int size = (sizeof(struct flow_counter_stats) *
MLX5_COUNTERS_PER_POOL +
sizeof(struct mlx5_counter_stats_raw)) * raws_n +
sizeof(struct mlx5_counter_stats_mem_mng);
uint8_t *mem = rte_calloc(__func__, 1, size, sysconf(_SC_PAGESIZE));
int i;
if (!mem) {
rte_errno = ENOMEM;
return NULL;
}
mem_mng = (struct mlx5_counter_stats_mem_mng *)(mem + size) - 1;
size = sizeof(*raw_data) * MLX5_COUNTERS_PER_POOL * raws_n;
mem_mng->umem = mlx5_glue->devx_umem_reg(sh->ctx, mem, size,
IBV_ACCESS_LOCAL_WRITE);
if (!mem_mng->umem) {
rte_errno = errno;
rte_free(mem);
return NULL;
}
mkey_attr.addr = (uintptr_t)mem;
mkey_attr.size = size;
mkey_attr.umem_id = mem_mng->umem->umem_id;
mkey_attr.pd = sh->pdn;
mem_mng->dm = mlx5_devx_cmd_mkey_create(sh->ctx, &mkey_attr);
if (!mem_mng->dm) {
mlx5_glue->devx_umem_dereg(mem_mng->umem);
rte_errno = errno;
rte_free(mem);
return NULL;
}
mem_mng->raws = (struct mlx5_counter_stats_raw *)(mem + size);
raw_data = (volatile struct flow_counter_stats *)mem;
for (i = 0; i < raws_n; ++i) {
mem_mng->raws[i].mem_mng = mem_mng;
mem_mng->raws[i].data = raw_data + i * MLX5_COUNTERS_PER_POOL;
}
LIST_INSERT_HEAD(&sh->cmng.mem_mngs, mem_mng, next);
return mem_mng;
}
/**
* Resize a counter container.
*
* @param[in] dev
* Pointer to the Ethernet device structure.
* @param[in] batch
* Whether the pool is for counter that was allocated by batch command.
*
* @return
* The new container pointer on success, otherwise NULL and rte_errno is set.
*/
static struct mlx5_pools_container *
flow_dv_container_resize(struct rte_eth_dev *dev, uint32_t batch)
{
struct mlx5_priv *priv = dev->data->dev_private;
struct mlx5_pools_container *cont =
MLX5_CNT_CONTAINER(priv->sh, batch, 0);
struct mlx5_pools_container *new_cont =
MLX5_CNT_CONTAINER_UNUSED(priv->sh, batch, 0);
struct mlx5_counter_stats_mem_mng *mem_mng;
uint32_t resize = cont->n + MLX5_CNT_CONTAINER_RESIZE;
uint32_t mem_size = sizeof(struct mlx5_flow_counter_pool *) * resize;
int i;
if (cont != MLX5_CNT_CONTAINER(priv->sh, batch, 1)) {
/* The last resize still hasn't detected by the host thread. */
rte_errno = EAGAIN;
return NULL;
}
new_cont->pools = rte_calloc(__func__, 1, mem_size, 0);
if (!new_cont->pools) {
rte_errno = ENOMEM;
return NULL;
}
if (cont->n)
memcpy(new_cont->pools, cont->pools, cont->n *
sizeof(struct mlx5_flow_counter_pool *));
mem_mng = flow_dv_create_counter_stat_mem_mng(dev,
MLX5_CNT_CONTAINER_RESIZE + MLX5_MAX_PENDING_QUERIES);
if (!mem_mng) {
rte_free(new_cont->pools);
return NULL;
}
for (i = 0; i < MLX5_MAX_PENDING_QUERIES; ++i)
LIST_INSERT_HEAD(&priv->sh->cmng.free_stat_raws,
mem_mng->raws + MLX5_CNT_CONTAINER_RESIZE +
i, next);
new_cont->n = resize;
rte_atomic16_set(&new_cont->n_valid, rte_atomic16_read(&cont->n_valid));
TAILQ_INIT(&new_cont->pool_list);
TAILQ_CONCAT(&new_cont->pool_list, &cont->pool_list, next);
new_cont->init_mem_mng = mem_mng;
rte_cio_wmb();
/* Flip the master container. */
priv->sh->cmng.mhi[batch] ^= (uint8_t)1;
return new_cont;
}
/**
* Query a devx flow counter.
*
* @param[in] dev
* Pointer to the Ethernet device structure.
* @param[in] cnt
* Pointer to the flow counter.
* @param[out] pkts
* The statistics value of packets.
* @param[out] bytes
* The statistics value of bytes.
*
* @return
* 0 on success, otherwise a negative errno value and rte_errno is set.
*/
static inline int
_flow_dv_query_count(struct rte_eth_dev *dev,
struct mlx5_flow_counter *cnt, uint64_t *pkts,
uint64_t *bytes)
{
struct mlx5_priv *priv = dev->data->dev_private;
struct mlx5_flow_counter_pool *pool =
flow_dv_counter_pool_get(cnt);
int offset = cnt - &pool->counters_raw[0];
if (priv->counter_fallback)
return _flow_dv_query_count_fallback(dev, cnt, pkts, bytes);
rte_spinlock_lock(&pool->sl);
/*
* The single counters allocation may allocate smaller ID than the
* current allocated in parallel to the host reading.
* In this case the new counter values must be reported as 0.
*/
if (unlikely(!cnt->batch && cnt->dcs->id < pool->raw->min_dcs_id)) {
*pkts = 0;
*bytes = 0;
} else {
*pkts = rte_be_to_cpu_64(pool->raw->data[offset].hits);
*bytes = rte_be_to_cpu_64(pool->raw->data[offset].bytes);
}
rte_spinlock_unlock(&pool->sl);
return 0;
}
/**
* Create and initialize a new counter pool.
*
* @param[in] dev
* Pointer to the Ethernet device structure.
* @param[out] dcs
* The devX counter handle.
* @param[in] batch
* Whether the pool is for counter that was allocated by batch command.
*
* @return
* A new pool pointer on success, NULL otherwise and rte_errno is set.
*/
static struct mlx5_flow_counter_pool *
flow_dv_pool_create(struct rte_eth_dev *dev, struct mlx5_devx_obj *dcs,
uint32_t batch)
{
struct mlx5_priv *priv = dev->data->dev_private;
struct mlx5_flow_counter_pool *pool;
struct mlx5_pools_container *cont = MLX5_CNT_CONTAINER(priv->sh, batch,
0);
int16_t n_valid = rte_atomic16_read(&cont->n_valid);
uint32_t size;
if (cont->n == n_valid) {
cont = flow_dv_container_resize(dev, batch);
if (!cont)
return NULL;
}
size = sizeof(*pool) + MLX5_COUNTERS_PER_POOL *
sizeof(struct mlx5_flow_counter);
pool = rte_calloc(__func__, 1, size, 0);
if (!pool) {
rte_errno = ENOMEM;
return NULL;
}
pool->min_dcs = dcs;
pool->raw = cont->init_mem_mng->raws + n_valid %
MLX5_CNT_CONTAINER_RESIZE;
pool->raw_hw = NULL;
rte_spinlock_init(&pool->sl);
/*
* The generation of the new allocated counters in this pool is 0, 2 in
* the pool generation makes all the counters valid for allocation.
*/
rte_atomic64_set(&pool->query_gen, 0x2);
TAILQ_INIT(&pool->counters);
TAILQ_INSERT_TAIL(&cont->pool_list, pool, next);
cont->pools[n_valid] = pool;
/* Pool initialization must be updated before host thread access. */
rte_cio_wmb();
rte_atomic16_add(&cont->n_valid, 1);
return pool;
}
/**
* Prepare a new counter and/or a new counter pool.
*
* @param[in] dev
* Pointer to the Ethernet device structure.
* @param[out] cnt_free
* Where to put the pointer of a new counter.
* @param[in] batch
* Whether the pool is for counter that was allocated by batch command.
*
* @return
* The free counter pool pointer and @p cnt_free is set on success,
* NULL otherwise and rte_errno is set.
*/
static struct mlx5_flow_counter_pool *
flow_dv_counter_pool_prepare(struct rte_eth_dev *dev,
struct mlx5_flow_counter **cnt_free,
uint32_t batch)
{
struct mlx5_priv *priv = dev->data->dev_private;
struct mlx5_flow_counter_pool *pool;
struct mlx5_devx_obj *dcs = NULL;
struct mlx5_flow_counter *cnt;
uint32_t i;
if (!batch) {
/* bulk_bitmap must be 0 for single counter allocation. */
dcs = mlx5_devx_cmd_flow_counter_alloc(priv->sh->ctx, 0);
if (!dcs)
return NULL;
pool = flow_dv_find_pool_by_id
(MLX5_CNT_CONTAINER(priv->sh, batch, 0), dcs->id);
if (!pool) {
pool = flow_dv_pool_create(dev, dcs, batch);
if (!pool) {
mlx5_devx_cmd_destroy(dcs);
return NULL;
}
} else if (dcs->id < pool->min_dcs->id) {
rte_atomic64_set(&pool->a64_dcs,
(int64_t)(uintptr_t)dcs);
}
cnt = &pool->counters_raw[dcs->id % MLX5_COUNTERS_PER_POOL];
TAILQ_INSERT_HEAD(&pool->counters, cnt, next);
cnt->dcs = dcs;
*cnt_free = cnt;
return pool;
}
/* bulk_bitmap is in 128 counters units. */
if (priv->config.hca_attr.flow_counter_bulk_alloc_bitmap & 0x4)
dcs = mlx5_devx_cmd_flow_counter_alloc(priv->sh->ctx, 0x4);
if (!dcs) {
rte_errno = ENODATA;
return NULL;
}
pool = flow_dv_pool_create(dev, dcs, batch);
if (!pool) {
mlx5_devx_cmd_destroy(dcs);
return NULL;
}
for (i = 0; i < MLX5_COUNTERS_PER_POOL; ++i) {
cnt = &pool->counters_raw[i];
cnt->pool = pool;
TAILQ_INSERT_HEAD(&pool->counters, cnt, next);
}
*cnt_free = &pool->counters_raw[0];
return pool;
}
/**
* Search for existed shared counter.
*
* @param[in] cont
* Pointer to the relevant counter pool container.
* @param[in] id
* The shared counter ID to search.
*
* @return
* NULL if not existed, otherwise pointer to the shared counter.
*/
static struct mlx5_flow_counter *
flow_dv_counter_shared_search(struct mlx5_pools_container *cont,
uint32_t id)
{
static struct mlx5_flow_counter *cnt;
struct mlx5_flow_counter_pool *pool;
int i;
TAILQ_FOREACH(pool, &cont->pool_list, next) {
for (i = 0; i < MLX5_COUNTERS_PER_POOL; ++i) {
cnt = &pool->counters_raw[i];
if (cnt->ref_cnt && cnt->shared && cnt->id == id)
return cnt;
}
}
return NULL;
}
/**
* Allocate a flow counter.
*
* @param[in] dev
* Pointer to the Ethernet device structure.
* @param[in] shared
* Indicate if this counter is shared with other flows.
* @param[in] id
* Counter identifier.
* @param[in] group
* Counter flow group.
*
* @return
* pointer to flow counter on success, NULL otherwise and rte_errno is set.
*/
static struct mlx5_flow_counter *
flow_dv_counter_alloc(struct rte_eth_dev *dev, uint32_t shared, uint32_t id,
uint16_t group)
{
struct mlx5_priv *priv = dev->data->dev_private;
struct mlx5_flow_counter_pool *pool = NULL;
struct mlx5_flow_counter *cnt_free = NULL;
/*
* Currently group 0 flow counter cannot be assigned to a flow if it is
* not the first one in the batch counter allocation, so it is better
* to allocate counters one by one for these flows in a separate
* container.
* A counter can be shared between different groups so need to take
* shared counters from the single container.
*/
uint32_t batch = (group && !shared) ? 1 : 0;
struct mlx5_pools_container *cont = MLX5_CNT_CONTAINER(priv->sh, batch,
0);
if (priv->counter_fallback)
return flow_dv_counter_alloc_fallback(dev, shared, id);
if (!priv->config.devx) {
rte_errno = ENOTSUP;
return NULL;
}
if (shared) {
cnt_free = flow_dv_counter_shared_search(cont, id);
if (cnt_free) {
if (cnt_free->ref_cnt + 1 == 0) {
rte_errno = E2BIG;
return NULL;
}
cnt_free->ref_cnt++;
return cnt_free;
}
}
/* Pools which has a free counters are in the start. */
TAILQ_FOREACH(pool, &cont->pool_list, next) {
/*
* The free counter reset values must be updated between the
* counter release to the counter allocation, so, at least one
* query must be done in this time. ensure it by saving the
* query generation in the release time.
* The free list is sorted according to the generation - so if
* the first one is not updated, all the others are not
* updated too.
*/
cnt_free = TAILQ_FIRST(&pool->counters);
if (cnt_free && cnt_free->query_gen + 1 <
rte_atomic64_read(&pool->query_gen))
break;
cnt_free = NULL;
}
if (!cnt_free) {
pool = flow_dv_counter_pool_prepare(dev, &cnt_free, batch);
if (!pool)
return NULL;
}
cnt_free->batch = batch;
/* Create a DV counter action only in the first time usage. */
if (!cnt_free->action) {
uint16_t offset;
struct mlx5_devx_obj *dcs;
if (batch) {
offset = cnt_free - &pool->counters_raw[0];
dcs = pool->min_dcs;
} else {
offset = 0;
dcs = cnt_free->dcs;
}
cnt_free->action = mlx5_glue->dv_create_flow_action_counter
(dcs->obj, offset);
if (!cnt_free->action) {
rte_errno = errno;
return NULL;
}
}
/* Update the counter reset values. */
if (_flow_dv_query_count(dev, cnt_free, &cnt_free->hits,
&cnt_free->bytes))
return NULL;
cnt_free->shared = shared;
cnt_free->ref_cnt = 1;
cnt_free->id = id;
if (!priv->sh->cmng.query_thread_on)
/* Start the asynchronous batch query by the host thread. */
mlx5_set_query_alarm(priv->sh);
TAILQ_REMOVE(&pool->counters, cnt_free, next);
if (TAILQ_EMPTY(&pool->counters)) {
/* Move the pool to the end of the container pool list. */
TAILQ_REMOVE(&cont->pool_list, pool, next);
TAILQ_INSERT_TAIL(&cont->pool_list, pool, next);
}
return cnt_free;
}
/**
* Release a flow counter.
*
* @param[in] dev
* Pointer to the Ethernet device structure.
* @param[in] counter
* Pointer to the counter handler.
*/
static void
flow_dv_counter_release(struct rte_eth_dev *dev,
struct mlx5_flow_counter *counter)
{
struct mlx5_priv *priv = dev->data->dev_private;
if (!counter)
return;
if (priv->counter_fallback) {
flow_dv_counter_release_fallback(dev, counter);
return;
}
if (--counter->ref_cnt == 0) {
struct mlx5_flow_counter_pool *pool =
flow_dv_counter_pool_get(counter);
/* Put the counter in the end - the last updated one. */
TAILQ_INSERT_TAIL(&pool->counters, counter, next);
counter->query_gen = rte_atomic64_read(&pool->query_gen);
}
}
/**
* Verify the @p attributes will be correctly understood by the NIC and store
* them in the @p flow if everything is correct.
*
* @param[in] dev
* Pointer to dev struct.
* @param[in] attributes
* Pointer to flow attributes
* @param[in] external
* This flow rule is created by request external to PMD.
* @param[out] error
* Pointer to error structure.
*
* @return
* 0 on success, a negative errno value otherwise and rte_errno is set.
*/
static int
flow_dv_validate_attributes(struct rte_eth_dev *dev,
const struct rte_flow_attr *attributes,
bool external __rte_unused,
struct rte_flow_error *error)
{
struct mlx5_priv *priv = dev->data->dev_private;
uint32_t priority_max = priv->config.flow_prio - 1;
#ifndef HAVE_MLX5DV_DR
if (attributes->group)
return rte_flow_error_set(error, ENOTSUP,
RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
NULL,
"groups are not supported");
#else
uint32_t table;
int ret;
ret = mlx5_flow_group_to_table(attributes, external,
attributes->group, !!priv->fdb_def_rule,
&table, error);
if (ret)
return ret;
#endif
if (attributes->priority != MLX5_FLOW_PRIO_RSVD &&
attributes->priority >= priority_max)
return rte_flow_error_set(error, ENOTSUP,
RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
NULL,
"priority out of range");
if (attributes->transfer) {
if (!priv->config.dv_esw_en)
return rte_flow_error_set
(error, ENOTSUP,
RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
"E-Switch dr is not supported");
if (!(priv->representor || priv->master))
return rte_flow_error_set
(error, EINVAL, RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
NULL, "E-Switch configuration can only be"
" done by a master or a representor device");
if (attributes->egress)
return rte_flow_error_set
(error, ENOTSUP,
RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, attributes,
"egress is not supported");
}
if (!(attributes->egress ^ attributes->ingress))
return rte_flow_error_set(error, ENOTSUP,
RTE_FLOW_ERROR_TYPE_ATTR, NULL,
"must specify exactly one of "
"ingress or egress");
return 0;
}
/**
* Internal validation function. For validating both actions and items.
*
* @param[in] dev
* Pointer to the rte_eth_dev structure.
* @param[in] attr
* Pointer to the flow attributes.
* @param[in] items
* Pointer to the list of items.
* @param[in] actions
* Pointer to the list of actions.
* @param[in] external
* This flow rule is created by request external to PMD.
* @param[out] error
* Pointer to the error structure.
*
* @return
* 0 on success, a negative errno value otherwise and rte_errno is set.
*/
static int
flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr,
const struct rte_flow_item items[],
const struct rte_flow_action actions[],
bool external, struct rte_flow_error *error)
{
int ret;
uint64_t action_flags = 0;
uint64_t item_flags = 0;
uint64_t last_item = 0;
uint8_t next_protocol = 0xff;
uint16_t ether_type = 0;
int actions_n = 0;
uint8_t item_ipv6_proto = 0;
const struct rte_flow_item *gre_item = NULL;
const struct rte_flow_action_raw_decap *decap;
const struct rte_flow_action_raw_encap *encap;
const struct rte_flow_action_rss *rss;
struct rte_flow_item_tcp nic_tcp_mask = {
.hdr = {
.tcp_flags = 0xFF,
.src_port = RTE_BE16(UINT16_MAX),
.dst_port = RTE_BE16(UINT16_MAX),
}
};
struct mlx5_priv *priv = dev->data->dev_private;
struct mlx5_dev_config *dev_conf = &priv->config;
uint16_t queue_index = 0xFFFF;
if (items == NULL)
return -1;
ret = flow_dv_validate_attributes(dev, attr, external, error);
if (ret < 0)
return ret;
for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
int type = items->type;
switch (type) {
case RTE_FLOW_ITEM_TYPE_VOID:
break;
case RTE_FLOW_ITEM_TYPE_PORT_ID:
ret = flow_dv_validate_item_port_id
(dev, items, attr, item_flags, error);
if (ret < 0)
return ret;
last_item = MLX5_FLOW_ITEM_PORT_ID;
break;
case RTE_FLOW_ITEM_TYPE_ETH:
ret = mlx5_flow_validate_item_eth(items, item_flags,
error);
if (ret < 0)
return ret;
last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L2 :
MLX5_FLOW_LAYER_OUTER_L2;
if (items->mask != NULL && items->spec != NULL) {
ether_type =
((const struct rte_flow_item_eth *)
items->spec)->type;
ether_type &=
((const struct rte_flow_item_eth *)
items->mask)->type;
ether_type = rte_be_to_cpu_16(ether_type);
} else {
ether_type = 0;
}
break;
case RTE_FLOW_ITEM_TYPE_VLAN:
ret = mlx5_flow_validate_item_vlan(items, item_flags,
dev, error);
if (ret < 0)
return ret;
last_item = tunnel ? MLX5_FLOW_LAYER_INNER_VLAN :
MLX5_FLOW_LAYER_OUTER_VLAN;
if (items->mask != NULL && items->spec != NULL) {
ether_type =
((const struct rte_flow_item_vlan *)
items->spec)->inner_type;
ether_type &=
((const struct rte_flow_item_vlan *)
items->mask)->inner_type;
ether_type = rte_be_to_cpu_16(ether_type);
} else {
ether_type = 0;
}
break;
case RTE_FLOW_ITEM_TYPE_IPV4:
mlx5_flow_tunnel_ip_check(items, next_protocol,
&item_flags, &tunnel);
ret = mlx5_flow_validate_item_ipv4(items, item_flags,
last_item,
ether_type, NULL,
error);
if (ret < 0)
return ret;
last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV4 :
MLX5_FLOW_LAYER_OUTER_L3_IPV4;
if (items->mask != NULL &&
((const struct rte_flow_item_ipv4 *)
items->mask)->hdr.next_proto_id) {
next_protocol =
((const struct rte_flow_item_ipv4 *)
(items->spec))->hdr.next_proto_id;
next_protocol &=
((const struct rte_flow_item_ipv4 *)
(items->mask))->hdr.next_proto_id;
} else {
/* Reset for inner layer. */
next_protocol = 0xff;
}
break;
case RTE_FLOW_ITEM_TYPE_IPV6:
mlx5_flow_tunnel_ip_check(items, next_protocol,
&item_flags, &tunnel);
ret = mlx5_flow_validate_item_ipv6(items, item_flags,
last_item,
ether_type, NULL,
error);
if (ret < 0)
return ret;
last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV6 :
MLX5_FLOW_LAYER_OUTER_L3_IPV6;
if (items->mask != NULL &&
((const struct rte_flow_item_ipv6 *)
items->mask)->hdr.proto) {
item_ipv6_proto =
((const struct rte_flow_item_ipv6 *)
items->spec)->hdr.proto;
next_protocol =
((const struct rte_flow_item_ipv6 *)
items->spec)->hdr.proto;
next_protocol &=
((const struct rte_flow_item_ipv6 *)
items->mask)->hdr.proto;
} else {
/* Reset for inner layer. */
next_protocol = 0xff;
}
break;
case RTE_FLOW_ITEM_TYPE_TCP:
ret = mlx5_flow_validate_item_tcp
(items, item_flags,
next_protocol,
&nic_tcp_mask,
error);
if (ret < 0)
return ret;
last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_TCP :
MLX5_FLOW_LAYER_OUTER_L4_TCP;
break;
case RTE_FLOW_ITEM_TYPE_UDP:
ret = mlx5_flow_validate_item_udp(items, item_flags,
next_protocol,
error);
if (ret < 0)
return ret;
last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_UDP :
MLX5_FLOW_LAYER_OUTER_L4_UDP;
break;
case RTE_FLOW_ITEM_TYPE_GRE:
ret = mlx5_flow_validate_item_gre(items, item_flags,
next_protocol, error);
if (ret < 0)
return ret;
gre_item = items;
last_item = MLX5_FLOW_LAYER_GRE;
break;
case RTE_FLOW_ITEM_TYPE_NVGRE:
ret = mlx5_flow_validate_item_nvgre(items, item_flags,
next_protocol,
error);
if (ret < 0)
return ret;
last_item = MLX5_FLOW_LAYER_NVGRE;
break;
case RTE_FLOW_ITEM_TYPE_GRE_KEY:
ret = mlx5_flow_validate_item_gre_key
(items, item_flags, gre_item, error);
if (ret < 0)
return ret;
last_item = MLX5_FLOW_LAYER_GRE_KEY;
break;
case RTE_FLOW_ITEM_TYPE_VXLAN:
ret = mlx5_flow_validate_item_vxlan(items, item_flags,
error);
if (ret < 0)
return ret;
last_item = MLX5_FLOW_LAYER_VXLAN;
break;
case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
ret = mlx5_flow_validate_item_vxlan_gpe(items,
item_flags, dev,
error);
if (ret < 0)
return ret;
last_item = MLX5_FLOW_LAYER_VXLAN_GPE;
break;
case RTE_FLOW_ITEM_TYPE_GENEVE:
ret = mlx5_flow_validate_item_geneve(items,
item_flags, dev,
error);
if (ret < 0)
return ret;
last_item = MLX5_FLOW_LAYER_GENEVE;
break;
case RTE_FLOW_ITEM_TYPE_MPLS:
ret = mlx5_flow_validate_item_mpls(dev, items,
item_flags,
last_item, error);
if (ret < 0)
return ret;
last_item = MLX5_FLOW_LAYER_MPLS;
break;
case RTE_FLOW_ITEM_TYPE_MARK:
ret = flow_dv_validate_item_mark(dev, items, attr,
error);
if (ret < 0)
return ret;
last_item = MLX5_FLOW_ITEM_MARK;
break;
case RTE_FLOW_ITEM_TYPE_META:
ret = flow_dv_validate_item_meta(dev, items, attr,
error);
if (ret < 0)
return ret;
last_item = MLX5_FLOW_ITEM_METADATA;
break;
case RTE_FLOW_ITEM_TYPE_ICMP:
ret = mlx5_flow_validate_item_icmp(items, item_flags,
next_protocol,
error);
if (ret < 0)
return ret;
last_item = MLX5_FLOW_LAYER_ICMP;
break;
case RTE_FLOW_ITEM_TYPE_ICMP6:
ret = mlx5_flow_validate_item_icmp6(items, item_flags,
next_protocol,
error);
if (ret < 0)
return ret;
item_ipv6_proto = IPPROTO_ICMPV6;
last_item = MLX5_FLOW_LAYER_ICMP6;
break;
case RTE_FLOW_ITEM_TYPE_TAG:
ret = flow_dv_validate_item_tag(dev, items,
attr, error);
if (ret < 0)
return ret;
last_item = MLX5_FLOW_ITEM_TAG;
break;
case MLX5_RTE_FLOW_ITEM_TYPE_TAG:
case MLX5_RTE_FLOW_ITEM_TYPE_TX_QUEUE:
break;
default:
return rte_flow_error_set(error, ENOTSUP,
RTE_FLOW_ERROR_TYPE_ITEM,
NULL, "item not supported");
}
item_flags |= last_item;
}
for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
int type = actions->type;
if (actions_n == MLX5_DV_MAX_NUMBER_OF_ACTIONS)
return rte_flow_error_set(error, ENOTSUP,
RTE_FLOW_ERROR_TYPE_ACTION,
actions, "too many actions");
switch (type) {
case RTE_FLOW_ACTION_TYPE_VOID:
break;
case RTE_FLOW_ACTION_TYPE_PORT_ID:
ret = flow_dv_validate_action_port_id(dev,
action_flags,
actions,
attr,
error);
if (ret)
return ret;
action_flags |= MLX5_FLOW_ACTION_PORT_ID;
++actions_n;
break;
case RTE_FLOW_ACTION_TYPE_FLAG:
ret = flow_dv_validate_action_flag(dev, action_flags,
attr, error);
if (ret < 0)
return ret;
if (dev_conf->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY) {
/* Count all modify-header actions as one. */
if (!(action_flags &
MLX5_FLOW_MODIFY_HDR_ACTIONS))
++actions_n;
action_flags |= MLX5_FLOW_ACTION_FLAG |
MLX5_FLOW_ACTION_MARK_EXT;
} else {
action_flags |= MLX5_FLOW_ACTION_FLAG;
++actions_n;
}
break;
case RTE_FLOW_ACTION_TYPE_MARK:
ret = flow_dv_validate_action_mark(dev, actions,
action_flags,
attr, error);
if (ret < 0)
return ret;
if (dev_conf->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY) {
/* Count all modify-header actions as one. */
if (!(action_flags &
MLX5_FLOW_MODIFY_HDR_ACTIONS))
++actions_n;
action_flags |= MLX5_FLOW_ACTION_MARK |
MLX5_FLOW_ACTION_MARK_EXT;
} else {
action_flags |= MLX5_FLOW_ACTION_MARK;
++actions_n;
}
break;
case RTE_FLOW_ACTION_TYPE_SET_META:
ret = flow_dv_validate_action_set_meta(dev, actions,
action_flags,
attr, error);
if (ret < 0)
return ret;
/* Count all modify-header actions as one action. */
if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
++actions_n;
action_flags |= MLX5_FLOW_ACTION_SET_META;
break;
case RTE_FLOW_ACTION_TYPE_SET_TAG:
ret = flow_dv_validate_action_set_tag(dev, actions,
action_flags,
attr, error);
if (ret < 0)
return ret;
/* Count all modify-header actions as one action. */
if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
++actions_n;
action_flags |= MLX5_FLOW_ACTION_SET_TAG;
break;
case RTE_FLOW_ACTION_TYPE_DROP:
ret = mlx5_flow_validate_action_drop(action_flags,
attr, error);
if (ret < 0)
return ret;
action_flags |= MLX5_FLOW_ACTION_DROP;
++actions_n;
break;
case RTE_FLOW_ACTION_TYPE_QUEUE:
ret = mlx5_flow_validate_action_queue(actions,
action_flags, dev,
attr, error);
if (ret < 0)
return ret;
queue_index = ((const struct rte_flow_action_queue *)
(actions->conf))->index;
action_flags |= MLX5_FLOW_ACTION_QUEUE;
++actions_n;
break;
case RTE_FLOW_ACTION_TYPE_RSS:
rss = actions->conf;
ret = mlx5_flow_validate_action_rss(actions,
action_flags, dev,
attr, item_flags,
error);
if (ret < 0)
return ret;
if (rss != NULL && rss->queue_num)
queue_index = rss->queue[0];
action_flags |= MLX5_FLOW_ACTION_RSS;
++actions_n;
break;
case RTE_FLOW_ACTION_TYPE_COUNT:
ret = flow_dv_validate_action_count(dev, error);
if (ret < 0)
return ret;
action_flags |= MLX5_FLOW_ACTION_COUNT;
++actions_n;
break;
case RTE_FLOW_ACTION_TYPE_OF_POP_VLAN:
if (flow_dv_validate_action_pop_vlan(dev,
action_flags,
actions,
item_flags, attr,
error))
return -rte_errno;
action_flags |= MLX5_FLOW_ACTION_OF_POP_VLAN;
++actions_n;
break;
case RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN:
ret = flow_dv_validate_action_push_vlan(action_flags,
item_flags,
actions, attr,
error);
if (ret < 0)
return ret;
action_flags |= MLX5_FLOW_ACTION_OF_PUSH_VLAN;
++actions_n;
break;
case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP:
ret = flow_dv_validate_action_set_vlan_pcp
(action_flags, actions, error);
if (ret < 0)
return ret;
/* Count PCP with push_vlan command. */
action_flags |= MLX5_FLOW_ACTION_OF_SET_VLAN_PCP;
break;
case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID:
ret = flow_dv_validate_action_set_vlan_vid
(item_flags, action_flags,
actions, error);
if (ret < 0)
return ret;
/* Count VID with push_vlan command. */
action_flags |= MLX5_FLOW_ACTION_OF_SET_VLAN_VID;
break;
case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
ret = flow_dv_validate_action_l2_encap(action_flags,
actions, error);
if (ret < 0)
return ret;
action_flags |= MLX5_FLOW_ACTION_ENCAP;
++actions_n;
break;
case RTE_FLOW_ACTION_TYPE_VXLAN_DECAP:
case RTE_FLOW_ACTION_TYPE_NVGRE_DECAP:
ret = flow_dv_validate_action_decap(action_flags, attr,
error);
if (ret < 0)
return ret;
action_flags |= MLX5_FLOW_ACTION_DECAP;
++actions_n;
break;
case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
ret = flow_dv_validate_action_raw_encap_decap
(NULL, actions->conf, attr, &action_flags,
&actions_n, error);
if (ret < 0)
return ret;
break;
case RTE_FLOW_ACTION_TYPE_RAW_DECAP:
decap = actions->conf;
while ((++actions)->type == RTE_FLOW_ACTION_TYPE_VOID)
;
if (actions->type != RTE_FLOW_ACTION_TYPE_RAW_ENCAP) {
encap = NULL;
actions--;
} else {
encap = actions->conf;
}
ret = flow_dv_validate_action_raw_encap_decap
(decap ? decap : &empty_decap, encap,
attr, &action_flags, &actions_n,
error);
if (ret < 0)
return ret;
break;
case RTE_FLOW_ACTION_TYPE_SET_MAC_SRC:
case RTE_FLOW_ACTION_TYPE_SET_MAC_DST:
ret = flow_dv_validate_action_modify_mac(action_flags,
actions,
item_flags,
error);
if (ret < 0)
return ret;
/* Count all modify-header actions as one action. */
if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
++actions_n;
action_flags |= actions->type ==
RTE_FLOW_ACTION_TYPE_SET_MAC_SRC ?
MLX5_FLOW_ACTION_SET_MAC_SRC :
MLX5_FLOW_ACTION_SET_MAC_DST;
break;
case RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC:
case RTE_FLOW_ACTION_TYPE_SET_IPV4_DST:
ret = flow_dv_validate_action_modify_ipv4(action_flags,
actions,
item_flags,
error);
if (ret < 0)
return ret;
/* Count all modify-header actions as one action. */
if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
++actions_n;
action_flags |= actions->type ==
RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC ?
MLX5_FLOW_ACTION_SET_IPV4_SRC :
MLX5_FLOW_ACTION_SET_IPV4_DST;
break;
case RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC:
case RTE_FLOW_ACTION_TYPE_SET_IPV6_DST:
ret = flow_dv_validate_action_modify_ipv6(action_flags,
actions,
item_flags,
error);
if (ret < 0)
return ret;
if (item_ipv6_proto == IPPROTO_ICMPV6)
return rte_flow_error_set(error, ENOTSUP,
RTE_FLOW_ERROR_TYPE_ACTION,
actions,
"Can't change header "
"with ICMPv6 proto");
/* Count all modify-header actions as one action. */
if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
++actions_n;
action_flags |= actions->type ==
RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC ?
MLX5_FLOW_ACTION_SET_IPV6_SRC :
MLX5_FLOW_ACTION_SET_IPV6_DST;
break;
case RTE_FLOW_ACTION_TYPE_SET_TP_SRC:
case RTE_FLOW_ACTION_TYPE_SET_TP_DST:
ret = flow_dv_validate_action_modify_tp(action_flags,
actions,
item_flags,
error);
if (ret < 0)
return ret;
/* Count all modify-header actions as one action. */
if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
++actions_n;
action_flags |= actions->type ==
RTE_FLOW_ACTION_TYPE_SET_TP_SRC ?
MLX5_FLOW_ACTION_SET_TP_SRC :
MLX5_FLOW_ACTION_SET_TP_DST;
break;
case RTE_FLOW_ACTION_TYPE_DEC_TTL:
case RTE_FLOW_ACTION_TYPE_SET_TTL:
ret = flow_dv_validate_action_modify_ttl(action_flags,
actions,
item_flags,
error);
if (ret < 0)
return ret;
/* Count all modify-header actions as one action. */
if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
++actions_n;
action_flags |= actions->type ==
RTE_FLOW_ACTION_TYPE_SET_TTL ?
MLX5_FLOW_ACTION_SET_TTL :
MLX5_FLOW_ACTION_DEC_TTL;
break;
case RTE_FLOW_ACTION_TYPE_JUMP:
ret = flow_dv_validate_action_jump(actions,
action_flags,
attr, external,
error);
if (ret)
return ret;
++actions_n;
action_flags |= MLX5_FLOW_ACTION_JUMP;
break;
case RTE_FLOW_ACTION_TYPE_INC_TCP_SEQ:
case RTE_FLOW_ACTION_TYPE_DEC_TCP_SEQ:
ret = flow_dv_validate_action_modify_tcp_seq
(action_flags,
actions,
item_flags,
error);
if (ret < 0)
return ret;
/* Count all modify-header actions as one action. */
if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
++actions_n;
action_flags |= actions->type ==
RTE_FLOW_ACTION_TYPE_INC_TCP_SEQ ?
MLX5_FLOW_ACTION_INC_TCP_SEQ :
MLX5_FLOW_ACTION_DEC_TCP_SEQ;
break;
case RTE_FLOW_ACTION_TYPE_INC_TCP_ACK:
case RTE_FLOW_ACTION_TYPE_DEC_TCP_ACK:
ret = flow_dv_validate_action_modify_tcp_ack
(action_flags,
actions,
item_flags,
error);
if (ret < 0)
return ret;
/* Count all modify-header actions as one action. */
if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
++actions_n;
action_flags |= actions->type ==
RTE_FLOW_ACTION_TYPE_INC_TCP_ACK ?
MLX5_FLOW_ACTION_INC_TCP_ACK :
MLX5_FLOW_ACTION_DEC_TCP_ACK;
break;
case MLX5_RTE_FLOW_ACTION_TYPE_TAG:
case MLX5_RTE_FLOW_ACTION_TYPE_MARK:
case MLX5_RTE_FLOW_ACTION_TYPE_COPY_MREG:
break;
case RTE_FLOW_ACTION_TYPE_METER:
ret = mlx5_flow_validate_action_meter(dev,
action_flags,
actions, attr,
error);
if (ret < 0)
return ret;
action_flags |= MLX5_FLOW_ACTION_METER;
++actions_n;
break;
default:
return rte_flow_error_set(error, ENOTSUP,
RTE_FLOW_ERROR_TYPE_ACTION,
actions,
"action not supported");
}
}
/*
* Validate the drop action mutual exclusion with other actions.
* Drop action is mutually-exclusive with any other action, except for
* Count action.
*/
if ((action_flags & MLX5_FLOW_ACTION_DROP) &&
(action_flags & ~(MLX5_FLOW_ACTION_DROP | MLX5_FLOW_ACTION_COUNT)))
return rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ACTION, NULL,
"Drop action is mutually-exclusive "
"with any other action, except for "
"Count action");
/* Eswitch has few restrictions on using items and actions */
if (attr->transfer) {
if (!mlx5_flow_ext_mreg_supported(dev) &&
action_flags & MLX5_FLOW_ACTION_FLAG)
return rte_flow_error_set(error, ENOTSUP,
RTE_FLOW_ERROR_TYPE_ACTION,
NULL,
"unsupported action FLAG");
if (!mlx5_flow_ext_mreg_supported(dev) &&
action_flags & MLX5_FLOW_ACTION_MARK)
return rte_flow_error_set(error, ENOTSUP,
RTE_FLOW_ERROR_TYPE_ACTION,
NULL,
"unsupported action MARK");
if (action_flags & MLX5_FLOW_ACTION_QUEUE)
return rte_flow_error_set(error, ENOTSUP,
RTE_FLOW_ERROR_TYPE_ACTION,
NULL,
"unsupported action QUEUE");
if (action_flags & MLX5_FLOW_ACTION_RSS)
return rte_flow_error_set(error, ENOTSUP,
RTE_FLOW_ERROR_TYPE_ACTION,
NULL,
"unsupported action RSS");
if (!(action_flags & MLX5_FLOW_FATE_ESWITCH_ACTIONS))
return rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ACTION,
actions,
"no fate action is found");
} else {
if (!(action_flags & MLX5_FLOW_FATE_ACTIONS) && attr->ingress)
return rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ACTION,
actions,
"no fate action is found");
}
/* Continue validation for Xcap actions.*/
if ((action_flags & MLX5_FLOW_XCAP_ACTIONS) && (queue_index == 0xFFFF ||
mlx5_rxq_get_type(dev, queue_index) != MLX5_RXQ_TYPE_HAIRPIN)) {
if ((action_flags & MLX5_FLOW_XCAP_ACTIONS) ==
MLX5_FLOW_XCAP_ACTIONS)
return rte_flow_error_set(error, ENOTSUP,
RTE_FLOW_ERROR_TYPE_ACTION,
NULL, "encap and decap "
"combination aren't supported");
if (!attr->transfer && attr->ingress && (action_flags &
MLX5_FLOW_ACTION_ENCAP))
return rte_flow_error_set(error, ENOTSUP,
RTE_FLOW_ERROR_TYPE_ACTION,
NULL, "encap is not supported"
" for ingress traffic");
}
return 0;
}
/**
* Internal preparation function. Allocates the DV flow size,
* this size is constant.
*
* @param[in] attr
* Pointer to the flow attributes.
* @param[in] items
* Pointer to the list of items.
* @param[in] actions
* Pointer to the list of actions.
* @param[out] error
* Pointer to the error structure.
*
* @return
* Pointer to mlx5_flow object on success,
* otherwise NULL and rte_errno is set.
*/
static struct mlx5_flow *
flow_dv_prepare(const struct rte_flow_attr *attr __rte_unused,
const struct rte_flow_item items[] __rte_unused,
const struct rte_flow_action actions[] __rte_unused,
struct rte_flow_error *error)
{
size_t size = sizeof(struct mlx5_flow);
struct mlx5_flow *dev_flow;
dev_flow = rte_calloc(__func__, 1, size, 0);
if (!dev_flow) {
rte_flow_error_set(error, ENOMEM,
RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
"not enough memory to create flow");
return NULL;
}
dev_flow->dv.value.size = MLX5_ST_SZ_BYTES(fte_match_param);
dev_flow->ingress = attr->ingress;
dev_flow->transfer = attr->transfer;
return dev_flow;
}
#ifndef NDEBUG
/**
* Sanity check for match mask and value. Similar to check_valid_spec() in
* kernel driver. If unmasked bit is present in value, it returns failure.
*
* @param match_mask
* pointer to match mask buffer.
* @param match_value
* pointer to match value buffer.
*
* @return
* 0 if valid, -EINVAL otherwise.
*/
static int
flow_dv_check_valid_spec(void *match_mask, void *match_value)
{
uint8_t *m = match_mask;
uint8_t *v = match_value;
unsigned int i;
for (i = 0; i < MLX5_ST_SZ_BYTES(fte_match_param); ++i) {
if (v[i] & ~m[i]) {
DRV_LOG(ERR,
"match_value differs from match_criteria"
" %p[%u] != %p[%u]",
match_value, i, match_mask, i);
return -EINVAL;
}
}
return 0;
}
#endif
/**
* Add Ethernet item to matcher and to the value.
*
* @param[in, out] matcher
* Flow matcher.
* @param[in, out] key
* Flow matcher value.
* @param[in] item
* Flow pattern to translate.
* @param[in] inner
* Item is inner pattern.
*/
static void
flow_dv_translate_item_eth(void *matcher, void *key,
const struct rte_flow_item *item, int inner)
{
const struct rte_flow_item_eth *eth_m = item->mask;
const struct rte_flow_item_eth *eth_v = item->spec;
const struct rte_flow_item_eth nic_mask = {
.dst.addr_bytes = "\xff\xff\xff\xff\xff\xff",
.src.addr_bytes = "\xff\xff\xff\xff\xff\xff",
.type = RTE_BE16(0xffff),
};
void *headers_m;
void *headers_v;
char *l24_v;
unsigned int i;
if (!eth_v)
return;
if (!eth_m)
eth_m = &nic_mask;
if (inner) {
headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
inner_headers);
headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
} else {
headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
outer_headers);
headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
}
memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m, dmac_47_16),
ð_m->dst, sizeof(eth_m->dst));
/* The value must be in the range of the mask. */
l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v, dmac_47_16);
for (i = 0; i < sizeof(eth_m->dst); ++i)
l24_v[i] = eth_m->dst.addr_bytes[i] & eth_v->dst.addr_bytes[i];
memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m, smac_47_16),
ð_m->src, sizeof(eth_m->src));
l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v, smac_47_16);
/* The value must be in the range of the mask. */
for (i = 0; i < sizeof(eth_m->dst); ++i)
l24_v[i] = eth_m->src.addr_bytes[i] & eth_v->src.addr_bytes[i];
if (eth_v->type) {
/* When ethertype is present set mask for tagged VLAN. */
MLX5_SET(fte_match_set_lyr_2_4, headers_m, cvlan_tag, 1);
/* Set value for tagged VLAN if ethertype is 802.1Q. */
if (eth_v->type == RTE_BE16(RTE_ETHER_TYPE_VLAN) ||
eth_v->type == RTE_BE16(RTE_ETHER_TYPE_QINQ)) {
MLX5_SET(fte_match_set_lyr_2_4, headers_v, cvlan_tag,
1);
/* Return here to avoid setting match on ethertype. */
return;
}
}
/*
* HW supports match on one Ethertype, the Ethertype following the last
* VLAN tag of the packet (see PRM).
* Set match on ethertype only if ETH header is not followed by VLAN.
*/
MLX5_SET(fte_match_set_lyr_2_4, headers_m, ethertype,
rte_be_to_cpu_16(eth_m->type));
l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v, ethertype);
*(uint16_t *)(l24_v) = eth_m->type & eth_v->type;
}
/**
* Add VLAN item to matcher and to the value.
*
* @param[in, out] dev_flow
* Flow descriptor.
* @param[in, out] matcher
* Flow matcher.
* @param[in, out] key
* Flow matcher value.
* @param[in] item
* Flow pattern to translate.
* @param[in] inner
* Item is inner pattern.
*/
static void
flow_dv_translate_item_vlan(struct mlx5_flow *dev_flow,
void *matcher, void *key,
const struct rte_flow_item *item,
int inner)
{
const struct rte_flow_item_vlan *vlan_m = item->mask;
const struct rte_flow_item_vlan *vlan_v = item->spec;
void *headers_m;
void *headers_v;
uint16_t tci_m;
uint16_t tci_v;
if (!vlan_v)
return;
if (!vlan_m)
vlan_m = &rte_flow_item_vlan_mask;
if (inner) {
headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
inner_headers);
headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
} else {
headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
outer_headers);
headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
/*
* This is workaround, masks are not supported,
* and pre-validated.
*/
dev_flow->dv.vf_vlan.tag =
rte_be_to_cpu_16(vlan_v->tci) & 0x0fff;
}
tci_m = rte_be_to_cpu_16(vlan_m->tci);
tci_v = rte_be_to_cpu_16(vlan_m->tci & vlan_v->tci);
MLX5_SET(fte_match_set_lyr_2_4, headers_m, cvlan_tag, 1);
MLX5_SET(fte_match_set_lyr_2_4, headers_v, cvlan_tag, 1);
MLX5_SET(fte_match_set_lyr_2_4, headers_m, first_vid, tci_m);
MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_vid, tci_v);
MLX5_SET(fte_match_set_lyr_2_4, headers_m, first_cfi, tci_m >> 12);
MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_cfi, tci_v >> 12);
MLX5_SET(fte_match_set_lyr_2_4, headers_m, first_prio, tci_m >> 13);
MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_prio, tci_v >> 13);
MLX5_SET(fte_match_set_lyr_2_4, headers_m, ethertype,
rte_be_to_cpu_16(vlan_m->inner_type));
MLX5_SET(fte_match_set_lyr_2_4, headers_v, ethertype,
rte_be_to_cpu_16(vlan_m->inner_type & vlan_v->inner_type));
}
/**
* Add IPV4 item to matcher and to the value.
*
* @param[in, out] matcher
* Flow matcher.
* @param[in, out] key
* Flow matcher value.
* @param[in] item
* Flow pattern to translate.
* @param[in] item_flags
* Bit-fields that holds the items detected until now.
* @param[in] inner
* Item is inner pattern.
* @param[in] group
* The group to insert the rule.
*/
static void
flow_dv_translate_item_ipv4(void *matcher, void *key,
const struct rte_flow_item *item,
const uint64_t item_flags,
int inner, uint32_t group)
{
const struct rte_flow_item_ipv4 *ipv4_m = item->mask;
const struct rte_flow_item_ipv4 *ipv4_v = item->spec;
const struct rte_flow_item_ipv4 nic_mask = {
.hdr = {
.src_addr = RTE_BE32(0xffffffff),
.dst_addr = RTE_BE32(0xffffffff),
.type_of_service = 0xff,
.next_proto_id = 0xff,
},
};
void *headers_m;
void *headers_v;
char *l24_m;
char *l24_v;
uint8_t tos;
if (inner) {
headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
inner_headers);
headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
} else {
headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
outer_headers);
headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
}
if (group == 0)
MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_version, 0xf);
else
MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_version, 0x4);
MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_version, 4);
if (!ipv4_v)
return;
if (!ipv4_m)
ipv4_m = &nic_mask;
l24_m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m,
dst_ipv4_dst_ipv6.ipv4_layout.ipv4);
l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
dst_ipv4_dst_ipv6.ipv4_layout.ipv4);
*(uint32_t *)l24_m = ipv4_m->hdr.dst_addr;
*(uint32_t *)l24_v = ipv4_m->hdr.dst_addr & ipv4_v->hdr.dst_addr;
l24_m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m,
src_ipv4_src_ipv6.ipv4_layout.ipv4);
l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
src_ipv4_src_ipv6.ipv4_layout.ipv4);
*(uint32_t *)l24_m = ipv4_m->hdr.src_addr;
*(uint32_t *)l24_v = ipv4_m->hdr.src_addr & ipv4_v->hdr.src_addr;
tos = ipv4_m->hdr.type_of_service & ipv4_v->hdr.type_of_service;
MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_ecn,
ipv4_m->hdr.type_of_service);
MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ecn, tos);
MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_dscp,
ipv4_m->hdr.type_of_service >> 2);
MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_dscp, tos >> 2);
MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol,
ipv4_m->hdr.next_proto_id);
MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
ipv4_v->hdr.next_proto_id & ipv4_m->hdr.next_proto_id);
/*
* On outer header (which must contains L2), or inner header with L2,
* set cvlan_tag mask bit to mark this packet as untagged.
*/
if (!inner || item_flags & MLX5_FLOW_LAYER_INNER_L2)
MLX5_SET(fte_match_set_lyr_2_4, headers_m, cvlan_tag, 1);
}
/**
* Add IPV6 item to matcher and to the value.
*
* @param[in, out] matcher
* Flow matcher.
* @param[in, out] key
* Flow matcher value.
* @param[in] item
* Flow pattern to translate.
* @param[in] item_flags
* Bit-fields that holds the items detected until now.
* @param[in] inner
* Item is inner pattern.
* @param[in] group
* The group to insert the rule.
*/
static void
flow_dv_translate_item_ipv6(void *matcher, void *key,
const struct rte_flow_item *item,
const uint64_t item_flags,
int inner, uint32_t group)
{
const struct rte_flow_item_ipv6 *ipv6_m = item->mask;
const struct rte_flow_item_ipv6 *ipv6_v = item->spec;
const struct rte_flow_item_ipv6 nic_mask = {
.hdr = {
.src_addr =
"\xff\xff\xff\xff\xff\xff\xff\xff"
"\xff\xff\xff\xff\xff\xff\xff\xff",
.dst_addr =
"\xff\xff\xff\xff\xff\xff\xff\xff"
"\xff\xff\xff\xff\xff\xff\xff\xff",
.vtc_flow = RTE_BE32(0xffffffff),
.proto = 0xff,
.hop_limits = 0xff,
},
};
void *headers_m;
void *headers_v;
void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
char *l24_m;
char *l24_v;
uint32_t vtc_m;
uint32_t vtc_v;
int i;
int size;
if (inner) {
headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
inner_headers);
headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
} else {
headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
outer_headers);
headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
}
if (group == 0)
MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_version, 0xf);
else
MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_version, 0x6);
MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_version, 6);
if (!ipv6_v)
return;
if (!ipv6_m)
ipv6_m = &nic_mask;
size = sizeof(ipv6_m->hdr.dst_addr);
l24_m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m,
dst_ipv4_dst_ipv6.ipv6_layout.ipv6);
l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
dst_ipv4_dst_ipv6.ipv6_layout.ipv6);
memcpy(l24_m, ipv6_m->hdr.dst_addr, size);
for (i = 0; i < size; ++i)
l24_v[i] = l24_m[i] & ipv6_v->hdr.dst_addr[i];
l24_m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m,
src_ipv4_src_ipv6.ipv6_layout.ipv6);
l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
src_ipv4_src_ipv6.ipv6_layout.ipv6);
memcpy(l24_m, ipv6_m->hdr.src_addr, size);
for (i = 0; i < size; ++i)
l24_v[i] = l24_m[i] & ipv6_v->hdr.src_addr[i];
/* TOS. */
vtc_m = rte_be_to_cpu_32(ipv6_m->hdr.vtc_flow);
vtc_v = rte_be_to_cpu_32(ipv6_m->hdr.vtc_flow & ipv6_v->hdr.vtc_flow);
MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_ecn, vtc_m >> 20);
MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ecn, vtc_v >> 20);
MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_dscp, vtc_m >> 22);
MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_dscp, vtc_v >> 22);
/* Label. */
if (inner) {
MLX5_SET(fte_match_set_misc, misc_m, inner_ipv6_flow_label,
vtc_m);
MLX5_SET(fte_match_set_misc, misc_v, inner_ipv6_flow_label,
vtc_v);
} else {
MLX5_SET(fte_match_set_misc, misc_m, outer_ipv6_flow_label,
vtc_m);
MLX5_SET(fte_match_set_misc, misc_v, outer_ipv6_flow_label,
vtc_v);
}
/* Protocol. */
MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol,
ipv6_m->hdr.proto);
MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
ipv6_v->hdr.proto & ipv6_m->hdr.proto);
/*
* On outer header (which must contains L2), or inner header with L2,
* set cvlan_tag mask bit to mark this packet as untagged.
*/
if (!inner || item_flags & MLX5_FLOW_LAYER_INNER_L2)
MLX5_SET(fte_match_set_lyr_2_4, headers_m, cvlan_tag, 1);
}
/**
* Add TCP item to matcher and to the value.
*
* @param[in, out] matcher
* Flow matcher.
* @param[in, out] key
* Flow matcher value.
* @param[in] item
* Flow pattern to translate.
* @param[in] inner
* Item is inner pattern.
*/
static void
flow_dv_translate_item_tcp(void *matcher, void *key,
const struct rte_flow_item *item,
int inner)
{
const struct rte_flow_item_tcp *tcp_m = item->mask;
const struct rte_flow_item_tcp *tcp_v = item->spec;
void *headers_m;
void *headers_v;
if (inner) {
headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
inner_headers);
headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
} else {
headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
outer_headers);
headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
}
MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xff);
MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_TCP);
if (!tcp_v)
return;
if (!tcp_m)
tcp_m = &rte_flow_item_tcp_mask;
MLX5_SET(fte_match_set_lyr_2_4, headers_m, tcp_sport,
rte_be_to_cpu_16(tcp_m->hdr.src_port));
MLX5_SET(fte_match_set_lyr_2_4, headers_v, tcp_sport,
rte_be_to_cpu_16(tcp_v->hdr.src_port & tcp_m->hdr.src_port));
MLX5_SET(fte_match_set_lyr_2_4, headers_m, tcp_dport,
rte_be_to_cpu_16(tcp_m->hdr.dst_port));
MLX5_SET(fte_match_set_lyr_2_4, headers_v, tcp_dport,
rte_be_to_cpu_16(tcp_v->hdr.dst_port & tcp_m->hdr.dst_port));
MLX5_SET(fte_match_set_lyr_2_4, headers_m, tcp_flags,
tcp_m->hdr.tcp_flags);
MLX5_SET(fte_match_set_lyr_2_4, headers_v, tcp_flags,
(tcp_v->hdr.tcp_flags & tcp_m->hdr.tcp_flags));
}
/**
* Add UDP item to matcher and to the value.
*
* @param[in, out] matcher
* Flow matcher.
* @param[in, out] key
* Flow matcher value.
* @param[in] item
* Flow pattern to translate.
* @param[in] inner
* Item is inner pattern.
*/
static void
flow_dv_translate_item_udp(void *matcher, void *key,
const struct rte_flow_item *item,
int inner)
{
const struct rte_flow_item_udp *udp_m = item->mask;
const struct rte_flow_item_udp *udp_v = item->spec;
void *headers_m;
void *headers_v;
if (inner) {
headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
inner_headers);
headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
} else {
headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
outer_headers);
headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
}
MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xff);
MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_UDP);
if (!udp_v)
return;
if (!udp_m)
udp_m = &rte_flow_item_udp_mask;
MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_sport,
rte_be_to_cpu_16(udp_m->hdr.src_port));
MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_sport,
rte_be_to_cpu_16(udp_v->hdr.src_port & udp_m->hdr.src_port));
MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport,
rte_be_to_cpu_16(udp_m->hdr.dst_port));
MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport,
rte_be_to_cpu_16(udp_v->hdr.dst_port & udp_m->hdr.dst_port));
}
/**
* Add GRE optional Key item to matcher and to the value.
*
* @param[in, out] matcher
* Flow matcher.
* @param[in, out] key
* Flow matcher value.
* @param[in] item
* Flow pattern to translate.
* @param[in] inner
* Item is inner pattern.
*/
static void
flow_dv_translate_item_gre_key(void *matcher, void *key,
const struct rte_flow_item *item)
{
const rte_be32_t *key_m = item->mask;
const rte_be32_t *key_v = item->spec;
void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
rte_be32_t gre_key_default_mask = RTE_BE32(UINT32_MAX);
/* GRE K bit must be on and should already be validated */
MLX5_SET(fte_match_set_misc, misc_m, gre_k_present, 1);
MLX5_SET(fte_match_set_misc, misc_v, gre_k_present, 1);
if (!key_v)
return;
if (!key_m)
key_m = &gre_key_default_mask;
MLX5_SET(fte_match_set_misc, misc_m, gre_key_h,
rte_be_to_cpu_32(*key_m) >> 8);
MLX5_SET(fte_match_set_misc, misc_v, gre_key_h,
rte_be_to_cpu_32((*key_v) & (*key_m)) >> 8);
MLX5_SET(fte_match_set_misc, misc_m, gre_key_l,
rte_be_to_cpu_32(*key_m) & 0xFF);
MLX5_SET(fte_match_set_misc, misc_v, gre_key_l,
rte_be_to_cpu_32((*key_v) & (*key_m)) & 0xFF);
}
/**
* Add GRE item to matcher and to the value.
*
* @param[in, out] matcher
* Flow matcher.
* @param[in, out] key
* Flow matcher value.
* @param[in] item
* Flow pattern to translate.
* @param[in] inner
* Item is inner pattern.
*/
static void
flow_dv_translate_item_gre(void *matcher, void *key,
const struct rte_flow_item *item,
int inner)
{
const struct rte_flow_item_gre *gre_m = item->mask;
const struct rte_flow_item_gre *gre_v = item->spec;
void *headers_m;
void *headers_v;
void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
struct {
union {
__extension__
struct {
uint16_t version:3;
uint16_t rsvd0:9;
uint16_t s_present:1;
uint16_t k_present:1;
uint16_t rsvd_bit1:1;
uint16_t c_present:1;
};
uint16_t value;
};
} gre_crks_rsvd0_ver_m, gre_crks_rsvd0_ver_v;
if (inner) {
headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
inner_headers);
headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
} else {
headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
outer_headers);
headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
}
MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xff);
MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_GRE);
if (!gre_v)
return;
if (!gre_m)
gre_m = &rte_flow_item_gre_mask;
MLX5_SET(fte_match_set_misc, misc_m, gre_protocol,
rte_be_to_cpu_16(gre_m->protocol));
MLX5_SET(fte_match_set_misc, misc_v, gre_protocol,
rte_be_to_cpu_16(gre_v->protocol & gre_m->protocol));
gre_crks_rsvd0_ver_m.value = rte_be_to_cpu_16(gre_m->c_rsvd0_ver);
gre_crks_rsvd0_ver_v.value = rte_be_to_cpu_16(gre_v->c_rsvd0_ver);
MLX5_SET(fte_match_set_misc, misc_m, gre_c_present,
gre_crks_rsvd0_ver_m.c_present);
MLX5_SET(fte_match_set_misc, misc_v, gre_c_present,
gre_crks_rsvd0_ver_v.c_present &
gre_crks_rsvd0_ver_m.c_present);
MLX5_SET(fte_match_set_misc, misc_m, gre_k_present,
gre_crks_rsvd0_ver_m.k_present);
MLX5_SET(fte_match_set_misc, misc_v, gre_k_present,
gre_crks_rsvd0_ver_v.k_present &
gre_crks_rsvd0_ver_m.k_present);
MLX5_SET(fte_match_set_misc, misc_m, gre_s_present,
gre_crks_rsvd0_ver_m.s_present);
MLX5_SET(fte_match_set_misc, misc_v, gre_s_present,
gre_crks_rsvd0_ver_v.s_present &
gre_crks_rsvd0_ver_m.s_present);
}
/**
* Add NVGRE item to matcher and to the value.
*
* @param[in, out] matcher
* Flow matcher.
* @param[in, out] key
* Flow matcher value.
* @param[in] item
* Flow pattern to translate.
* @param[in] inner
* Item is inner pattern.
*/
static void
flow_dv_translate_item_nvgre(void *matcher, void *key,
const struct rte_flow_item *item,
int inner)
{
const struct rte_flow_item_nvgre *nvgre_m = item->mask;
const struct rte_flow_item_nvgre *nvgre_v = item->spec;
void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
const char *tni_flow_id_m = (const char *)nvgre_m->tni;
const char *tni_flow_id_v = (const char *)nvgre_v->tni;
char *gre_key_m;
char *gre_key_v;
int size;
int i;
/* For NVGRE, GRE header fields must be set with defined values. */
const struct rte_flow_item_gre gre_spec = {
.c_rsvd0_ver = RTE_BE16(0x2000),
.protocol = RTE_BE16(RTE_ETHER_TYPE_TEB)
};
const struct rte_flow_item_gre gre_mask = {
.c_rsvd0_ver = RTE_BE16(0xB000),
.protocol = RTE_BE16(UINT16_MAX),
};
const struct rte_flow_item gre_item = {
.spec = &gre_spec,
.mask = &gre_mask,
.last = NULL,
};
flow_dv_translate_item_gre(matcher, key, &gre_item, inner);
if (!nvgre_v)
return;
if (!nvgre_m)
nvgre_m = &rte_flow_item_nvgre_mask;
size = sizeof(nvgre_m->tni) + sizeof(nvgre_m->flow_id);
gre_key_m = MLX5_ADDR_OF(fte_match_set_misc, misc_m, gre_key_h);
gre_key_v = MLX5_ADDR_OF(fte_match_set_misc, misc_v, gre_key_h);
memcpy(gre_key_m, tni_flow_id_m, size);
for (i = 0; i < size; ++i)
gre_key_v[i] = gre_key_m[i] & tni_flow_id_v[i];
}
/**
* Add VXLAN item to matcher and to the value.
*
* @param[in, out] matcher
* Flow matcher.
* @param[in, out] key
* Flow matcher value.
* @param[in] item
* Flow pattern to translate.
* @param[in] inner
* Item is inner pattern.
*/
static void
flow_dv_translate_item_vxlan(void *matcher, void *key,
const struct rte_flow_item *item,
int inner)
{
const struct rte_flow_item_vxlan *vxlan_m = item->mask;
const struct rte_flow_item_vxlan *vxlan_v = item->spec;
void *headers_m;
void *headers_v;
void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
char *vni_m;
char *vni_v;
uint16_t dport;
int size;
int i;
if (inner) {
headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
inner_headers);
headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
} else {
headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
outer_headers);
headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
}
dport = item->type == RTE_FLOW_ITEM_TYPE_VXLAN ?
MLX5_UDP_PORT_VXLAN : MLX5_UDP_PORT_VXLAN_GPE;
if (!MLX5_GET16(fte_match_set_lyr_2_4, headers_v, udp_dport)) {
MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport, 0xFFFF);
MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport, dport);
}
if (!vxlan_v)
return;
if (!vxlan_m)
vxlan_m = &rte_flow_item_vxlan_mask;
size = sizeof(vxlan_m->vni);
vni_m = MLX5_ADDR_OF(fte_match_set_misc, misc_m, vxlan_vni);
vni_v = MLX5_ADDR_OF(fte_match_set_misc, misc_v, vxlan_vni);
memcpy(vni_m, vxlan_m->vni, size);
for (i = 0; i < size; ++i)
vni_v[i] = vni_m[i] & vxlan_v->vni[i];
}
/**
* Add VXLAN-GPE item to matcher and to the value.
*
* @param[in, out] matcher
* Flow matcher.
* @param[in, out] key
* Flow matcher value.
* @param[in] item
* Flow pattern to translate.
* @param[in] inner
* Item is inner pattern.
*/
static void
flow_dv_translate_item_vxlan_gpe(void *matcher, void *key,
const struct rte_flow_item *item, int inner)
{
const struct rte_flow_item_vxlan_gpe *vxlan_m = item->mask;
const struct rte_flow_item_vxlan_gpe *vxlan_v = item->spec;
void *headers_m;
void *headers_v;
void *misc_m =
MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters_3);
void *misc_v =
MLX5_ADDR_OF(fte_match_param, key, misc_parameters_3);
char *vni_m;
char *vni_v;
uint16_t dport;
int size;
int i;
uint8_t flags_m = 0xff;
uint8_t flags_v = 0xc;
if (inner) {
headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
inner_headers);
headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
} else {
headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
outer_headers);
headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
}
dport = item->type == RTE_FLOW_ITEM_TYPE_VXLAN ?
MLX5_UDP_PORT_VXLAN : MLX5_UDP_PORT_VXLAN_GPE;
if (!MLX5_GET16(fte_match_set_lyr_2_4, headers_v, udp_dport)) {
MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport, 0xFFFF);
MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport, dport);
}
if (!vxlan_v)
return;
if (!vxlan_m)
vxlan_m = &rte_flow_item_vxlan_gpe_mask;
size = sizeof(vxlan_m->vni);
vni_m = MLX5_ADDR_OF(fte_match_set_misc3, misc_m, outer_vxlan_gpe_vni);
vni_v = MLX5_ADDR_OF(fte_match_set_misc3, misc_v, outer_vxlan_gpe_vni);
memcpy(vni_m, vxlan_m->vni, size);
for (i = 0; i < size; ++i)
vni_v[i] = vni_m[i] & vxlan_v->vni[i];
if (vxlan_m->flags) {
flags_m = vxlan_m->flags;
flags_v = vxlan_v->flags;
}
MLX5_SET(fte_match_set_misc3, misc_m, outer_vxlan_gpe_flags, flags_m);
MLX5_SET(fte_match_set_misc3, misc_v, outer_vxlan_gpe_flags, flags_v);
MLX5_SET(fte_match_set_misc3, misc_m, outer_vxlan_gpe_next_protocol,
vxlan_m->protocol);
MLX5_SET(fte_match_set_misc3, misc_v, outer_vxlan_gpe_next_protocol,
vxlan_v->protocol);
}
/**
* Add Geneve item to matcher and to the value.
*
* @param[in, out] matcher
* Flow matcher.
* @param[in, out] key
* Flow matcher value.
* @param[in] item
* Flow pattern to translate.
* @param[in] inner
* Item is inner pattern.
*/
static void
flow_dv_translate_item_geneve(void *matcher, void *key,
const struct rte_flow_item *item, int inner)
{
const struct rte_flow_item_geneve *geneve_m = item->mask;
const struct rte_flow_item_geneve *geneve_v = item->spec;
void *headers_m;
void *headers_v;
void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
uint16_t dport;
uint16_t gbhdr_m;
uint16_t gbhdr_v;
char *vni_m;
char *vni_v;
size_t size, i;
if (inner) {
headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
inner_headers);
headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
} else {
headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
outer_headers);
headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
}
dport = MLX5_UDP_PORT_GENEVE;
if (!MLX5_GET16(fte_match_set_lyr_2_4, headers_v, udp_dport)) {
MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport, 0xFFFF);
MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport, dport);
}
if (!geneve_v)
return;
if (!geneve_m)
geneve_m = &rte_flow_item_geneve_mask;
size = sizeof(geneve_m->vni);
vni_m = MLX5_ADDR_OF(fte_match_set_misc, misc_m, geneve_vni);
vni_v = MLX5_ADDR_OF(fte_match_set_misc, misc_v, geneve_vni);
memcpy(vni_m, geneve_m->vni, size);
for (i = 0; i < size; ++i)
vni_v[i] = vni_m[i] & geneve_v->vni[i];
MLX5_SET(fte_match_set_misc, misc_m, geneve_protocol_type,
rte_be_to_cpu_16(geneve_m->protocol));
MLX5_SET(fte_match_set_misc, misc_v, geneve_protocol_type,
rte_be_to_cpu_16(geneve_v->protocol & geneve_m->protocol));
gbhdr_m = rte_be_to_cpu_16(geneve_m->ver_opt_len_o_c_rsvd0);
gbhdr_v = rte_be_to_cpu_16(geneve_v->ver_opt_len_o_c_rsvd0);
MLX5_SET(fte_match_set_misc, misc_m, geneve_oam,
MLX5_GENEVE_OAMF_VAL(gbhdr_m));
MLX5_SET(fte_match_set_misc, misc_v, geneve_oam,
MLX5_GENEVE_OAMF_VAL(gbhdr_v) & MLX5_GENEVE_OAMF_VAL(gbhdr_m));
MLX5_SET(fte_match_set_misc, misc_m, geneve_opt_len,
MLX5_GENEVE_OPTLEN_VAL(gbhdr_m));
MLX5_SET(fte_match_set_misc, misc_v, geneve_opt_len,
MLX5_GENEVE_OPTLEN_VAL(gbhdr_v) &
MLX5_GENEVE_OPTLEN_VAL(gbhdr_m));
}
/**
* Add MPLS item to matcher and to the value.
*
* @param[in, out] matcher
* Flow matcher.
* @param[in, out] key
* Flow matcher value.
* @param[in] item
* Flow pattern to translate.
* @param[in] prev_layer
* The protocol layer indicated in previous item.
* @param[in] inner
* Item is inner pattern.
*/
static void
flow_dv_translate_item_mpls(void *matcher, void *key,
const struct rte_flow_item *item,
uint64_t prev_layer,
int inner)
{
const uint32_t *in_mpls_m = item->mask;
const uint32_t *in_mpls_v = item->spec;
uint32_t *out_mpls_m = 0;
uint32_t *out_mpls_v = 0;
void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
void *misc2_m = MLX5_ADDR_OF(fte_match_param, matcher,
misc_parameters_2);
void *misc2_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_2);
void *headers_m = MLX5_ADDR_OF(fte_match_param, matcher, outer_headers);
void *headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
switch (prev_layer) {
case MLX5_FLOW_LAYER_OUTER_L4_UDP:
MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport, 0xffff);
MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport,
MLX5_UDP_PORT_MPLS);
break;
case MLX5_FLOW_LAYER_GRE:
MLX5_SET(fte_match_set_misc, misc_m, gre_protocol, 0xffff);
MLX5_SET(fte_match_set_misc, misc_v, gre_protocol,
RTE_ETHER_TYPE_MPLS);
break;
default:
MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xff);
MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
IPPROTO_MPLS);
break;
}
if (!in_mpls_v)
return;
if (!in_mpls_m)
in_mpls_m = (const uint32_t *)&rte_flow_item_mpls_mask;
switch (prev_layer) {
case MLX5_FLOW_LAYER_OUTER_L4_UDP:
out_mpls_m =
(uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2, misc2_m,
outer_first_mpls_over_udp);
out_mpls_v =
(uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2, misc2_v,
outer_first_mpls_over_udp);
break;
case MLX5_FLOW_LAYER_GRE:
out_mpls_m =
(uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2, misc2_m,
outer_first_mpls_over_gre);
out_mpls_v =
(uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2, misc2_v,
outer_first_mpls_over_gre);
break;
default:
/* Inner MPLS not over GRE is not supported. */
if (!inner) {
out_mpls_m =
(uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2,
misc2_m,
outer_first_mpls);
out_mpls_v =
(uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2,
misc2_v,
outer_first_mpls);
}
break;
}
if (out_mpls_m && out_mpls_v) {
*out_mpls_m = *in_mpls_m;
*out_mpls_v = *in_mpls_v & *in_mpls_m;
}
}
/**
* Add metadata register item to matcher
*
* @param[in, out] matcher
* Flow matcher.
* @param[in, out] key
* Flow matcher value.
* @param[in] reg_type
* Type of device metadata register
* @param[in] value
* Register value
* @param[in] mask
* Register mask
*/
static void
flow_dv_match_meta_reg(void *matcher, void *key,
enum modify_reg reg_type,
uint32_t data, uint32_t mask)
{
void *misc2_m =
MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters_2);
void *misc2_v =
MLX5_ADDR_OF(fte_match_param, key, misc_parameters_2);
uint32_t temp;
data &= mask;
switch (reg_type) {
case REG_A:
MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_a, mask);
MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_a, data);
break;
case REG_B:
MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_b, mask);
MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_b, data);
break;
case REG_C_0:
/*
* The metadata register C0 field might be divided into
* source vport index and META item value, we should set
* this field according to specified mask, not as whole one.
*/
temp = MLX5_GET(fte_match_set_misc2, misc2_m, metadata_reg_c_0);
temp |= mask;
MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_0, temp);
temp = MLX5_GET(fte_match_set_misc2, misc2_v, metadata_reg_c_0);
temp &= ~mask;
temp |= data;
MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_0, temp);
break;
case REG_C_1:
MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_1, mask);
MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_1, data);
break;
case REG_C_2:
MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_2, mask);
MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_2, data);
break;
case REG_C_3:
MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_3, mask);
MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_3, data);
break;
case REG_C_4:
MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_4, mask);
MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_4, data);
break;
case REG_C_5:
MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_5, mask);
MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_5, data);
break;
case REG_C_6:
MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_6, mask);
MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_6, data);
break;
case REG_C_7:
MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_7, mask);
MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_7, data);
break;
default:
assert(false);
break;
}
}
/**
* Add MARK item to matcher
*
* @param[in] dev
* The device to configure through.
* @param[in, out] matcher
* Flow matcher.
* @param[in, out] key
* Flow matcher value.
* @param[in] item
* Flow pattern to translate.
*/
static void
flow_dv_translate_item_mark(struct rte_eth_dev *dev,
void *matcher, void *key,
const struct rte_flow_item *item)
{
struct mlx5_priv *priv = dev->data->dev_private;
const struct rte_flow_item_mark *mark;
uint32_t value;
uint32_t mask;
mark = item->mask ? (const void *)item->mask :
&rte_flow_item_mark_mask;
mask = mark->id & priv->sh->dv_mark_mask;
mark = (const void *)item->spec;
assert(mark);
value = mark->id & priv->sh->dv_mark_mask & mask;
if (mask) {
enum modify_reg reg;
/* Get the metadata register index for the mark. */
reg = mlx5_flow_get_reg_id(dev, MLX5_FLOW_MARK, 0, NULL);
assert(reg > 0);
if (reg == REG_C_0) {
struct mlx5_priv *priv = dev->data->dev_private;
uint32_t msk_c0 = priv->sh->dv_regc0_mask;
uint32_t shl_c0 = rte_bsf32(msk_c0);
mask &= msk_c0;
mask <<= shl_c0;
value <<= shl_c0;
}
flow_dv_match_meta_reg(matcher, key, reg, value, mask);
}
}
/**
* Add META item to matcher
*
* @param[in] dev
* The devich to configure through.
* @param[in, out] matcher
* Flow matcher.
* @param[in, out] key
* Flow matcher value.
* @param[in] attr
* Attributes of flow that includes this item.
* @param[in] item
* Flow pattern to translate.
*/
static void
flow_dv_translate_item_meta(struct rte_eth_dev *dev,
void *matcher, void *key,
const struct rte_flow_attr *attr,
const struct rte_flow_item *item)
{
const struct rte_flow_item_meta *meta_m;
const struct rte_flow_item_meta *meta_v;
meta_m = (const void *)item->mask;
if (!meta_m)
meta_m = &rte_flow_item_meta_mask;
meta_v = (const void *)item->spec;
if (meta_v) {
int reg;
uint32_t value = meta_v->data;
uint32_t mask = meta_m->data;
reg = flow_dv_get_metadata_reg(dev, attr, NULL);
if (reg < 0)
return;
/*
* In datapath code there is no endianness
* coversions for perfromance reasons, all
* pattern conversions are done in rte_flow.
*/
value = rte_cpu_to_be_32(value);
mask = rte_cpu_to_be_32(mask);
if (reg == REG_C_0) {
struct mlx5_priv *priv = dev->data->dev_private;
uint32_t msk_c0 = priv->sh->dv_regc0_mask;
uint32_t shl_c0 = rte_bsf32(msk_c0);
#if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
uint32_t shr_c0 = __builtin_clz(priv->sh->dv_meta_mask);
value >>= shr_c0;
mask >>= shr_c0;
#endif
value <<= shl_c0;
mask <<= shl_c0;
assert(msk_c0);
assert(!(~msk_c0 & mask));
}
flow_dv_match_meta_reg(matcher, key, reg, value, mask);
}
}
/**
* Add vport metadata Reg C0 item to matcher
*
* @param[in, out] matcher
* Flow matcher.
* @param[in, out] key
* Flow matcher value.
* @param[in] reg
* Flow pattern to translate.
*/
static void
flow_dv_translate_item_meta_vport(void *matcher, void *key,
uint32_t value, uint32_t mask)
{
flow_dv_match_meta_reg(matcher, key, REG_C_0, value, mask);
}
/**
* Add tag item to matcher
*
* @param[in] dev
* The devich to configure through.
* @param[in, out] matcher
* Flow matcher.
* @param[in, out] key
* Flow matcher value.
* @param[in] item
* Flow pattern to translate.
*/
static void
flow_dv_translate_mlx5_item_tag(struct rte_eth_dev *dev,
void *matcher, void *key,
const struct rte_flow_item *item)
{
const struct mlx5_rte_flow_item_tag *tag_v = item->spec;
const struct mlx5_rte_flow_item_tag *tag_m = item->mask;
uint32_t mask, value;
assert(tag_v);
value = tag_v->data;
mask = tag_m ? tag_m->data : UINT32_MAX;
if (tag_v->id == REG_C_0) {
struct mlx5_priv *priv = dev->data->dev_private;
uint32_t msk_c0 = priv->sh->dv_regc0_mask;
uint32_t shl_c0 = rte_bsf32(msk_c0);
mask &= msk_c0;
mask <<= shl_c0;
value <<= shl_c0;
}
flow_dv_match_meta_reg(matcher, key, tag_v->id, value, mask);
}
/**
* Add TAG item to matcher
*
* @param[in] dev
* The devich to configure through.
* @param[in, out] matcher
* Flow matcher.
* @param[in, out] key
* Flow matcher value.
* @param[in] item
* Flow pattern to translate.
*/
static void
flow_dv_translate_item_tag(struct rte_eth_dev *dev,
void *matcher, void *key,
const struct rte_flow_item *item)
{
const struct rte_flow_item_tag *tag_v = item->spec;
const struct rte_flow_item_tag *tag_m = item->mask;
enum modify_reg reg;
assert(tag_v);
tag_m = tag_m ? tag_m : &rte_flow_item_tag_mask;
/* Get the metadata register index for the tag. */
reg = mlx5_flow_get_reg_id(dev, MLX5_APP_TAG, tag_v->index, NULL);
assert(reg > 0);
flow_dv_match_meta_reg(matcher, key, reg, tag_v->data, tag_m->data);
}
/**
* Add source vport match to the specified matcher.
*
* @param[in, out] matcher
* Flow matcher.
* @param[in, out] key
* Flow matcher value.
* @param[in] port
* Source vport value to match
* @param[in] mask
* Mask
*/
static void
flow_dv_translate_item_source_vport(void *matcher, void *key,
int16_t port, uint16_t mask)
{
void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
MLX5_SET(fte_match_set_misc, misc_m, source_port, mask);
MLX5_SET(fte_match_set_misc, misc_v, source_port, port);
}
/**
* Translate port-id item to eswitch match on port-id.
*
* @param[in] dev
* The devich to configure through.
* @param[in, out] matcher
* Flow matcher.
* @param[in, out] key
* Flow matcher value.
* @param[in] item
* Flow pattern to translate.
*
* @return
* 0 on success, a negative errno value otherwise.
*/
static int
flow_dv_translate_item_port_id(struct rte_eth_dev *dev, void *matcher,
void *key, const struct rte_flow_item *item)
{
const struct rte_flow_item_port_id *pid_m = item ? item->mask : NULL;
const struct rte_flow_item_port_id *pid_v = item ? item->spec : NULL;
struct mlx5_priv *priv;
uint16_t mask, id;
mask = pid_m ? pid_m->id : 0xffff;
id = pid_v ? pid_v->id : dev->data->port_id;
priv = mlx5_port_to_eswitch_info(id, item == NULL);
if (!priv)
return -rte_errno;
/* Translate to vport field or to metadata, depending on mode. */
if (priv->vport_meta_mask)
flow_dv_translate_item_meta_vport(matcher, key,
priv->vport_meta_tag,
priv->vport_meta_mask);
else
flow_dv_translate_item_source_vport(matcher, key,
priv->vport_id, mask);
return 0;
}
/**
* Add ICMP6 item to matcher and to the value.
*
* @param[in, out] matcher
* Flow matcher.
* @param[in, out] key
* Flow matcher value.
* @param[in] item
* Flow pattern to translate.
* @param[in] inner
* Item is inner pattern.
*/
static void
flow_dv_translate_item_icmp6(void *matcher, void *key,
const struct rte_flow_item *item,
int inner)
{
const struct rte_flow_item_icmp6 *icmp6_m = item->mask;
const struct rte_flow_item_icmp6 *icmp6_v = item->spec;
void *headers_m;
void *headers_v;
void *misc3_m = MLX5_ADDR_OF(fte_match_param, matcher,
misc_parameters_3);
void *misc3_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_3);
if (inner) {
headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
inner_headers);
headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
} else {
headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
outer_headers);
headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
}
MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xFF);
MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_ICMPV6);
if (!icmp6_v)
return;
if (!icmp6_m)
icmp6_m = &rte_flow_item_icmp6_mask;
/*
* Force flow only to match the non-fragmented IPv6 ICMPv6 packets.
* If only the protocol is specified, no need to match the frag.
*/
MLX5_SET(fte_match_set_lyr_2_4, headers_m, frag, 1);
MLX5_SET(fte_match_set_lyr_2_4, headers_v, frag, 0);
MLX5_SET(fte_match_set_misc3, misc3_m, icmpv6_type, icmp6_m->type);
MLX5_SET(fte_match_set_misc3, misc3_v, icmpv6_type,
icmp6_v->type & icmp6_m->type);
MLX5_SET(fte_match_set_misc3, misc3_m, icmpv6_code, icmp6_m->code);
MLX5_SET(fte_match_set_misc3, misc3_v, icmpv6_code,
icmp6_v->code & icmp6_m->code);
}
/**
* Add ICMP item to matcher and to the value.
*
* @param[in, out] matcher
* Flow matcher.
* @param[in, out] key
* Flow matcher value.
* @param[in] item
* Flow pattern to translate.
* @param[in] inner
* Item is inner pattern.
*/
static void
flow_dv_translate_item_icmp(void *matcher, void *key,
const struct rte_flow_item *item,
int inner)
{
const struct rte_flow_item_icmp *icmp_m = item->mask;
const struct rte_flow_item_icmp *icmp_v = item->spec;
void *headers_m;
void *headers_v;
void *misc3_m = MLX5_ADDR_OF(fte_match_param, matcher,
misc_parameters_3);
void *misc3_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_3);
if (inner) {
headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
inner_headers);
headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
} else {
headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
outer_headers);
headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
}
MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xFF);
MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_ICMP);
if (!icmp_v)
return;
if (!icmp_m)
icmp_m = &rte_flow_item_icmp_mask;
/*
* Force flow only to match the non-fragmented IPv4 ICMP packets.
* If only the protocol is specified, no need to match the frag.
*/
MLX5_SET(fte_match_set_lyr_2_4, headers_m, frag, 1);
MLX5_SET(fte_match_set_lyr_2_4, headers_v, frag, 0);
MLX5_SET(fte_match_set_misc3, misc3_m, icmp_type,
icmp_m->hdr.icmp_type);
MLX5_SET(fte_match_set_misc3, misc3_v, icmp_type,
icmp_v->hdr.icmp_type & icmp_m->hdr.icmp_type);
MLX5_SET(fte_match_set_misc3, misc3_m, icmp_code,
icmp_m->hdr.icmp_code);
MLX5_SET(fte_match_set_misc3, misc3_v, icmp_code,
icmp_v->hdr.icmp_code & icmp_m->hdr.icmp_code);
}
static uint32_t matcher_zero[MLX5_ST_SZ_DW(fte_match_param)] = { 0 };
#define HEADER_IS_ZERO(match_criteria, headers) \
!(memcmp(MLX5_ADDR_OF(fte_match_param, match_criteria, headers), \
matcher_zero, MLX5_FLD_SZ_BYTES(fte_match_param, headers))) \
/**
* Calculate flow matcher enable bitmap.
*
* @param match_criteria
* Pointer to flow matcher criteria.
*
* @return
* Bitmap of enabled fields.
*/
static uint8_t
flow_dv_matcher_enable(uint32_t *match_criteria)
{
uint8_t match_criteria_enable;
match_criteria_enable =
(!HEADER_IS_ZERO(match_criteria, outer_headers)) <<
MLX5_MATCH_CRITERIA_ENABLE_OUTER_BIT;
match_criteria_enable |=
(!HEADER_IS_ZERO(match_criteria, misc_parameters)) <<
MLX5_MATCH_CRITERIA_ENABLE_MISC_BIT;
match_criteria_enable |=
(!HEADER_IS_ZERO(match_criteria, inner_headers)) <<
MLX5_MATCH_CRITERIA_ENABLE_INNER_BIT;
match_criteria_enable |=
(!HEADER_IS_ZERO(match_criteria, misc_parameters_2)) <<
MLX5_MATCH_CRITERIA_ENABLE_MISC2_BIT;
match_criteria_enable |=
(!HEADER_IS_ZERO(match_criteria, misc_parameters_3)) <<
MLX5_MATCH_CRITERIA_ENABLE_MISC3_BIT;
return match_criteria_enable;
}
/**
* Get a flow table.
*
* @param[in, out] dev
* Pointer to rte_eth_dev structure.
* @param[in] table_id
* Table id to use.
* @param[in] egress
* Direction of the table.
* @param[in] transfer
* E-Switch or NIC flow.
* @param[out] error
* pointer to error structure.
*
* @return
* Returns tables resource based on the index, NULL in case of failed.
*/
static struct mlx5_flow_tbl_resource *
flow_dv_tbl_resource_get(struct rte_eth_dev *dev,
uint32_t table_id, uint8_t egress,
uint8_t transfer,
struct rte_flow_error *error)
{
struct mlx5_priv *priv = dev->data->dev_private;
struct mlx5_ibv_shared *sh = priv->sh;
struct mlx5_flow_tbl_resource *tbl;
union mlx5_flow_tbl_key table_key = {
{
.table_id = table_id,
.reserved = 0,
.domain = !!transfer,
.direction = !!egress,
}
};
struct mlx5_hlist_entry *pos = mlx5_hlist_lookup(sh->flow_tbls,
table_key.v64);
struct mlx5_flow_tbl_data_entry *tbl_data;
int ret;
void *domain;
if (pos) {
tbl_data = container_of(pos, struct mlx5_flow_tbl_data_entry,
entry);
tbl = &tbl_data->tbl;
rte_atomic32_inc(&tbl->refcnt);
return tbl;
}
tbl_data = rte_zmalloc(NULL, sizeof(*tbl_data), 0);
if (!tbl_data) {
rte_flow_error_set(error, ENOMEM,
RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
NULL,
"cannot allocate flow table data entry");
return NULL;
}
tbl = &tbl_data->tbl;
pos = &tbl_data->entry;
if (transfer)
domain = sh->fdb_domain;
else if (egress)
domain = sh->tx_domain;
else
domain = sh->rx_domain;
tbl->obj = mlx5_glue->dr_create_flow_tbl(domain, table_id);
if (!tbl->obj) {
rte_flow_error_set(error, ENOMEM,
RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
NULL, "cannot create flow table object");
rte_free(tbl_data);
return NULL;
}
/*
* No multi-threads now, but still better to initialize the reference
* count before insert it into the hash list.
*/
rte_atomic32_init(&tbl->refcnt);
/* Jump action reference count is initialized here. */
rte_atomic32_init(&tbl_data->jump.refcnt);
pos->key = table_key.v64;
ret = mlx5_hlist_insert(sh->flow_tbls, pos);
if (ret < 0) {
rte_flow_error_set(error, -ret,
RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
"cannot insert flow table data entry");
mlx5_glue->dr_destroy_flow_tbl(tbl->obj);
rte_free(tbl_data);
}
rte_atomic32_inc(&tbl->refcnt);
return tbl;
}
/**
* Release a flow table.
*
* @param[in] dev
* Pointer to rte_eth_dev structure.
* @param[in] tbl
* Table resource to be released.
*
* @return
* Returns 0 if table was released, else return 1;
*/
static int
flow_dv_tbl_resource_release(struct rte_eth_dev *dev,
struct mlx5_flow_tbl_resource *tbl)
{
struct mlx5_priv *priv = dev->data->dev_private;
struct mlx5_ibv_shared *sh = priv->sh;
struct mlx5_flow_tbl_data_entry *tbl_data =
container_of(tbl, struct mlx5_flow_tbl_data_entry, tbl);
if (!tbl)
return 0;
if (rte_atomic32_dec_and_test(&tbl->refcnt)) {
struct mlx5_hlist_entry *pos = &tbl_data->entry;
mlx5_glue->dr_destroy_flow_tbl(tbl->obj);
tbl->obj = NULL;
/* remove the entry from the hash list and free memory. */
mlx5_hlist_remove(sh->flow_tbls, pos);
rte_free(tbl_data);
return 0;
}
return 1;
}
/**
* Register the flow matcher.
*
* @param[in, out] dev
* Pointer to rte_eth_dev structure.
* @param[in, out] matcher
* Pointer to flow matcher.
* @param[in, out] key
* Pointer to flow table key.
* @parm[in, out] dev_flow
* Pointer to the dev_flow.
* @param[out] error
* pointer to error structure.
*
* @return
* 0 on success otherwise -errno and errno is set.
*/
static int
flow_dv_matcher_register(struct rte_eth_dev *dev,
struct mlx5_flow_dv_matcher *matcher,
union mlx5_flow_tbl_key *key,
struct mlx5_flow *dev_flow,
struct rte_flow_error *error)
{
struct mlx5_priv *priv = dev->data->dev_private;
struct mlx5_ibv_shared *sh = priv->sh;
struct mlx5_flow_dv_matcher *cache_matcher;
struct mlx5dv_flow_matcher_attr dv_attr = {
.type = IBV_FLOW_ATTR_NORMAL,
.match_mask = (void *)&matcher->mask,
};
struct mlx5_flow_tbl_resource *tbl;
struct mlx5_flow_tbl_data_entry *tbl_data;
tbl = flow_dv_tbl_resource_get(dev, key->table_id, key->direction,
key->domain, error);
if (!tbl)
return -rte_errno; /* No need to refill the error info */
tbl_data = container_of(tbl, struct mlx5_flow_tbl_data_entry, tbl);
/* Lookup from cache. */
LIST_FOREACH(cache_matcher, &tbl_data->matchers, next) {
if (matcher->crc == cache_matcher->crc &&
matcher->priority == cache_matcher->priority &&
!memcmp((const void *)matcher->mask.buf,
(const void *)cache_matcher->mask.buf,
cache_matcher->mask.size)) {
DRV_LOG(DEBUG,
"%s group %u priority %hd use %s "
"matcher %p: refcnt %d++",
key->domain ? "FDB" : "NIC", key->table_id,
cache_matcher->priority,
key->direction ? "tx" : "rx",
(void *)cache_matcher,
rte_atomic32_read(&cache_matcher->refcnt));
rte_atomic32_inc(&cache_matcher->refcnt);
dev_flow->dv.matcher = cache_matcher;
/* old matcher should not make the table ref++. */
flow_dv_tbl_resource_release(dev, tbl);
return 0;
}
}
/* Register new matcher. */
cache_matcher = rte_calloc(__func__, 1, sizeof(*cache_matcher), 0);
if (!cache_matcher) {
flow_dv_tbl_resource_release(dev, tbl);
return rte_flow_error_set(error, ENOMEM,
RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
"cannot allocate matcher memory");
}
*cache_matcher = *matcher;
dv_attr.match_criteria_enable =
flow_dv_matcher_enable(cache_matcher->mask.buf);
dv_attr.priority = matcher->priority;
if (key->direction)
dv_attr.flags |= IBV_FLOW_ATTR_FLAGS_EGRESS;
cache_matcher->matcher_object =
mlx5_glue->dv_create_flow_matcher(sh->ctx, &dv_attr, tbl->obj);
if (!cache_matcher->matcher_object) {
rte_free(cache_matcher);
#ifdef HAVE_MLX5DV_DR
flow_dv_tbl_resource_release(dev, tbl);
#endif
return rte_flow_error_set(error, ENOMEM,
RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
NULL, "cannot create matcher");
}
/* Save the table information */
cache_matcher->tbl = tbl;
rte_atomic32_init(&cache_matcher->refcnt);
/* only matcher ref++, table ref++ already done above in get API. */
rte_atomic32_inc(&cache_matcher->refcnt);
LIST_INSERT_HEAD(&tbl_data->matchers, cache_matcher, next);
dev_flow->dv.matcher = cache_matcher;
DRV_LOG(DEBUG, "%s group %u priority %hd new %s matcher %p: refcnt %d",
key->domain ? "FDB" : "NIC", key->table_id,
cache_matcher->priority,
key->direction ? "tx" : "rx", (void *)cache_matcher,
rte_atomic32_read(&cache_matcher->refcnt));
return 0;
}
/**
* Find existing tag resource or create and register a new one.
*
* @param dev[in, out]
* Pointer to rte_eth_dev structure.
* @param[in, out] tag_be24
* Tag value in big endian then R-shift 8.
* @parm[in, out] dev_flow
* Pointer to the dev_flow.
* @param[out] error
* pointer to error structure.
*
* @return
* 0 on success otherwise -errno and errno is set.
*/
static int
flow_dv_tag_resource_register
(struct rte_eth_dev *dev,
uint32_t tag_be24,
struct mlx5_flow *dev_flow,
struct rte_flow_error *error)
{
struct mlx5_priv *priv = dev->data->dev_private;
struct mlx5_ibv_shared *sh = priv->sh;
struct mlx5_flow_dv_tag_resource *cache_resource;
struct mlx5_hlist_entry *entry;
/* Lookup a matching resource from cache. */
entry = mlx5_hlist_lookup(sh->tag_table, (uint64_t)tag_be24);
if (entry) {
cache_resource = container_of
(entry, struct mlx5_flow_dv_tag_resource, entry);
rte_atomic32_inc(&cache_resource->refcnt);
dev_flow->dv.tag_resource = cache_resource;
DRV_LOG(DEBUG, "cached tag resource %p: refcnt now %d++",
(void *)cache_resource,
rte_atomic32_read(&cache_resource->refcnt));
return 0;
}
/* Register new resource. */
cache_resource = rte_calloc(__func__, 1, sizeof(*cache_resource), 0);
if (!cache_resource)
return rte_flow_error_set(error, ENOMEM,
RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
"cannot allocate resource memory");
cache_resource->entry.key = (uint64_t)tag_be24;
cache_resource->action = mlx5_glue->dv_create_flow_action_tag(tag_be24);
if (!cache_resource->action) {
rte_free(cache_resource);
return rte_flow_error_set(error, ENOMEM,
RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
NULL, "cannot create action");
}
rte_atomic32_init(&cache_resource->refcnt);
rte_atomic32_inc(&cache_resource->refcnt);
if (mlx5_hlist_insert(sh->tag_table, &cache_resource->entry)) {
mlx5_glue->destroy_flow_action(cache_resource->action);
rte_free(cache_resource);
return rte_flow_error_set(error, EEXIST,
RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
NULL, "cannot insert tag");
}
dev_flow->dv.tag_resource = cache_resource;
DRV_LOG(DEBUG, "new tag resource %p: refcnt now %d++",
(void *)cache_resource,
rte_atomic32_read(&cache_resource->refcnt));
return 0;
}
/**
* Release the tag.
*
* @param dev
* Pointer to Ethernet device.
* @param flow
* Pointer to mlx5_flow.
*
* @return
* 1 while a reference on it exists, 0 when freed.
*/
static int
flow_dv_tag_release(struct rte_eth_dev *dev,
struct mlx5_flow_dv_tag_resource *tag)
{
struct mlx5_priv *priv = dev->data->dev_private;
struct mlx5_ibv_shared *sh = priv->sh;
assert(tag);
DRV_LOG(DEBUG, "port %u tag %p: refcnt %d--",
dev->data->port_id, (void *)tag,
rte_atomic32_read(&tag->refcnt));
if (rte_atomic32_dec_and_test(&tag->refcnt)) {
claim_zero(mlx5_glue->destroy_flow_action(tag->action));
mlx5_hlist_remove(sh->tag_table, &tag->entry);
DRV_LOG(DEBUG, "port %u tag %p: removed",
dev->data->port_id, (void *)tag);
rte_free(tag);
return 0;
}
return 1;
}
/**
* Translate port ID action to vport.
*
* @param[in] dev
* Pointer to rte_eth_dev structure.
* @param[in] action
* Pointer to the port ID action.
* @param[out] dst_port_id
* The target port ID.
* @param[out] error
* Pointer to the error structure.
*
* @return
* 0 on success, a negative errno value otherwise and rte_errno is set.
*/
static int
flow_dv_translate_action_port_id(struct rte_eth_dev *dev,
const struct rte_flow_action *action,
uint32_t *dst_port_id,
struct rte_flow_error *error)
{
uint32_t port;
struct mlx5_priv *priv;
const struct rte_flow_action_port_id *conf =
(const struct rte_flow_action_port_id *)action->conf;
port = conf->original ? dev->data->port_id : conf->id;
priv = mlx5_port_to_eswitch_info(port, false);
if (!priv)
return rte_flow_error_set(error, -rte_errno,
RTE_FLOW_ERROR_TYPE_ACTION,
NULL,
"No eswitch info was found for port");
#ifdef HAVE_MLX5DV_DR_DEVX_PORT
/*
* This parameter is transferred to
* mlx5dv_dr_action_create_dest_ib_port().
*/
*dst_port_id = priv->ibv_port;
#else
/*
* Legacy mode, no LAG configurations is supported.
* This parameter is transferred to
* mlx5dv_dr_action_create_dest_vport().
*/
*dst_port_id = priv->vport_id;
#endif
return 0;
}
/**
* Add Tx queue matcher
*
* @param[in] dev
* Pointer to the dev struct.
* @param[in, out] matcher
* Flow matcher.
* @param[in, out] key
* Flow matcher value.
* @param[in] item
* Flow pattern to translate.
* @param[in] inner
* Item is inner pattern.
*/
static void
flow_dv_translate_item_tx_queue(struct rte_eth_dev *dev,
void *matcher, void *key,
const struct rte_flow_item *item)
{
const struct mlx5_rte_flow_item_tx_queue *queue_m;
const struct mlx5_rte_flow_item_tx_queue *queue_v;
void *misc_m =
MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
void *misc_v =
MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
struct mlx5_txq_ctrl *txq;
uint32_t queue;
queue_m = (const void *)item->mask;
if (!queue_m)
return;
queue_v = (const void *)item->spec;
if (!queue_v)
return;
txq = mlx5_txq_get(dev, queue_v->queue);
if (!txq)
return;
queue = txq->obj->sq->id;
MLX5_SET(fte_match_set_misc, misc_m, source_sqn, queue_m->queue);
MLX5_SET(fte_match_set_misc, misc_v, source_sqn,
queue & queue_m->queue);
mlx5_txq_release(dev, queue_v->queue);
}
/**
* Fill the flow with DV spec, lock free
* (mutex should be acquired by caller).
*
* @param[in] dev
* Pointer to rte_eth_dev structure.
* @param[in, out] dev_flow
* Pointer to the sub flow.
* @param[in] attr
* Pointer to the flow attributes.
* @param[in] items
* Pointer to the list of items.
* @param[in] actions
* Pointer to the list of actions.
* @param[out] error
* Pointer to the error structure.
*
* @return
* 0 on success, a negative errno value otherwise and rte_errno is set.
*/
static int
__flow_dv_translate(struct rte_eth_dev *dev,
struct mlx5_flow *dev_flow,
const struct rte_flow_attr *attr,
const struct rte_flow_item items[],
const struct rte_flow_action actions[],
struct rte_flow_error *error)
{
struct mlx5_priv *priv = dev->data->dev_private;
struct mlx5_dev_config *dev_conf = &priv->config;
struct rte_flow *flow = dev_flow->flow;
uint64_t item_flags = 0;
uint64_t last_item = 0;
uint64_t action_flags = 0;
uint64_t priority = attr->priority;
struct mlx5_flow_dv_matcher matcher = {
.mask = {
.size = sizeof(matcher.mask.buf),
},
};
int actions_n = 0;
bool actions_end = false;
union {
struct mlx5_flow_dv_modify_hdr_resource res;
uint8_t len[sizeof(struct mlx5_flow_dv_modify_hdr_resource) +
sizeof(struct mlx5_modification_cmd) *
(MLX5_MAX_MODIFY_NUM + 1)];
} mhdr_dummy;
struct mlx5_flow_dv_modify_hdr_resource *mhdr_res = &mhdr_dummy.res;
union flow_dv_attr flow_attr = { .attr = 0 };
uint32_t tag_be;
union mlx5_flow_tbl_key tbl_key;
uint32_t modify_action_position = UINT32_MAX;
void *match_mask = matcher.mask.buf;
void *match_value = dev_flow->dv.value.buf;
uint8_t next_protocol = 0xff;
struct rte_vlan_hdr vlan = { 0 };
uint32_t table;
int ret = 0;
mhdr_res->ft_type = attr->egress ? MLX5DV_FLOW_TABLE_TYPE_NIC_TX :
MLX5DV_FLOW_TABLE_TYPE_NIC_RX;
ret = mlx5_flow_group_to_table(attr, dev_flow->external, attr->group,
!!priv->fdb_def_rule, &table, error);
if (ret)
return ret;
dev_flow->group = table;
if (attr->transfer)
mhdr_res->ft_type = MLX5DV_FLOW_TABLE_TYPE_FDB;
if (priority == MLX5_FLOW_PRIO_RSVD)
priority = dev_conf->flow_prio - 1;
/* number of actions must be set to 0 in case of dirty stack. */
mhdr_res->actions_num = 0;
for (; !actions_end ; actions++) {
const struct rte_flow_action_queue *queue;
const struct rte_flow_action_rss *rss;
const struct rte_flow_action *action = actions;
const struct rte_flow_action_count *count = action->conf;
const uint8_t *rss_key;
const struct rte_flow_action_jump *jump_data;
const struct rte_flow_action_meter *mtr;
struct mlx5_flow_tbl_resource *tbl;
uint32_t port_id = 0;
struct mlx5_flow_dv_port_id_action_resource port_id_resource;
int action_type = actions->type;
const struct rte_flow_action *found_action = NULL;
switch (action_type) {
case RTE_FLOW_ACTION_TYPE_VOID:
break;
case RTE_FLOW_ACTION_TYPE_PORT_ID:
if (flow_dv_translate_action_port_id(dev, action,
&port_id, error))
return -rte_errno;
port_id_resource.port_id = port_id;
if (flow_dv_port_id_action_resource_register
(dev, &port_id_resource, dev_flow, error))
return -rte_errno;
dev_flow->dv.actions[actions_n++] =
dev_flow->dv.port_id_action->action;
action_flags |= MLX5_FLOW_ACTION_PORT_ID;
break;
case RTE_FLOW_ACTION_TYPE_FLAG:
action_flags |= MLX5_FLOW_ACTION_FLAG;
if (dev_conf->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY) {
struct rte_flow_action_mark mark = {
.id = MLX5_FLOW_MARK_DEFAULT,
};
if (flow_dv_convert_action_mark(dev, &mark,
mhdr_res,
error))
return -rte_errno;
action_flags |= MLX5_FLOW_ACTION_MARK_EXT;
break;
}
tag_be = mlx5_flow_mark_set(MLX5_FLOW_MARK_DEFAULT);
if (!dev_flow->dv.tag_resource)
if (flow_dv_tag_resource_register
(dev, tag_be, dev_flow, error))
return -rte_errno;
dev_flow->dv.actions[actions_n++] =
dev_flow->dv.tag_resource->action;
break;
case RTE_FLOW_ACTION_TYPE_MARK:
action_flags |= MLX5_FLOW_ACTION_MARK;
if (dev_conf->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY) {
const struct rte_flow_action_mark *mark =
(const struct rte_flow_action_mark *)
actions->conf;
if (flow_dv_convert_action_mark(dev, mark,
mhdr_res,
error))
return -rte_errno;
action_flags |= MLX5_FLOW_ACTION_MARK_EXT;
break;
}
/* Fall-through */
case MLX5_RTE_FLOW_ACTION_TYPE_MARK:
/* Legacy (non-extensive) MARK action. */
tag_be = mlx5_flow_mark_set
(((const struct rte_flow_action_mark *)
(actions->conf))->id);
if (!dev_flow->dv.tag_resource)
if (flow_dv_tag_resource_register
(dev, tag_be, dev_flow, error))
return -rte_errno;
dev_flow->dv.actions[actions_n++] =
dev_flow->dv.tag_resource->action;
break;
case RTE_FLOW_ACTION_TYPE_SET_META:
if (flow_dv_convert_action_set_meta
(dev, mhdr_res, attr,
(const struct rte_flow_action_set_meta *)
actions->conf, error))
return -rte_errno;
action_flags |= MLX5_FLOW_ACTION_SET_META;
break;
case RTE_FLOW_ACTION_TYPE_SET_TAG:
if (flow_dv_convert_action_set_tag
(dev, mhdr_res,
(const struct rte_flow_action_set_tag *)
actions->conf, error))
return -rte_errno;
action_flags |= MLX5_FLOW_ACTION_SET_TAG;
break;
case RTE_FLOW_ACTION_TYPE_DROP:
action_flags |= MLX5_FLOW_ACTION_DROP;
break;
case RTE_FLOW_ACTION_TYPE_QUEUE:
assert(flow->rss.queue);
queue = actions->conf;
flow->rss.queue_num = 1;
(*flow->rss.queue)[0] = queue->index;
action_flags |= MLX5_FLOW_ACTION_QUEUE;
break;
case RTE_FLOW_ACTION_TYPE_RSS:
assert(flow->rss.queue);
rss = actions->conf;
if (flow->rss.queue)
memcpy((*flow->rss.queue), rss->queue,
rss->queue_num * sizeof(uint16_t));
flow->rss.queue_num = rss->queue_num;
/* NULL RSS key indicates default RSS key. */
rss_key = !rss->key ? rss_hash_default_key : rss->key;
memcpy(flow->rss.key, rss_key, MLX5_RSS_HASH_KEY_LEN);
/*
* rss->level and rss.types should be set in advance
* when expanding items for RSS.
*/
action_flags |= MLX5_FLOW_ACTION_RSS;
break;
case RTE_FLOW_ACTION_TYPE_COUNT:
if (!dev_conf->devx) {
rte_errno = ENOTSUP;
goto cnt_err;
}
flow->counter = flow_dv_counter_alloc(dev,
count->shared,
count->id,
dev_flow->group);
if (flow->counter == NULL)
goto cnt_err;
dev_flow->dv.actions[actions_n++] =
flow->counter->action;
action_flags |= MLX5_FLOW_ACTION_COUNT;
break;
cnt_err:
if (rte_errno == ENOTSUP)
return rte_flow_error_set
(error, ENOTSUP,
RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
NULL,
"count action not supported");
else
return rte_flow_error_set
(error, rte_errno,
RTE_FLOW_ERROR_TYPE_ACTION,
action,
"cannot create counter"
" object.");
break;
case RTE_FLOW_ACTION_TYPE_OF_POP_VLAN:
dev_flow->dv.actions[actions_n++] =
priv->sh->pop_vlan_action;
action_flags |= MLX5_FLOW_ACTION_OF_POP_VLAN;
break;
case RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN:
flow_dev_get_vlan_info_from_items(items, &vlan);
vlan.eth_proto = rte_be_to_cpu_16
((((const struct rte_flow_action_of_push_vlan *)
actions->conf)->ethertype));
found_action = mlx5_flow_find_action
(actions + 1,
RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID);
if (found_action)
mlx5_update_vlan_vid_pcp(found_action, &vlan);
found_action = mlx5_flow_find_action
(actions + 1,
RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP);
if (found_action)
mlx5_update_vlan_vid_pcp(found_action, &vlan);
if (flow_dv_create_action_push_vlan
(dev, attr, &vlan, dev_flow, error))
return -rte_errno;
dev_flow->dv.actions[actions_n++] =
dev_flow->dv.push_vlan_res->action;
action_flags |= MLX5_FLOW_ACTION_OF_PUSH_VLAN;
break;
case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP:
/* of_vlan_push action handled this action */
assert(action_flags & MLX5_FLOW_ACTION_OF_PUSH_VLAN);
break;
case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID:
if (action_flags & MLX5_FLOW_ACTION_OF_PUSH_VLAN)
break;
flow_dev_get_vlan_info_from_items(items, &vlan);
mlx5_update_vlan_vid_pcp(actions, &vlan);
/* If no VLAN push - this is a modify header action */
if (flow_dv_convert_action_modify_vlan_vid
(mhdr_res, actions, error))
return -rte_errno;
action_flags |= MLX5_FLOW_ACTION_OF_SET_VLAN_VID;
break;
case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
if (flow_dv_create_action_l2_encap(dev, actions,
dev_flow,
attr->transfer,
error))
return -rte_errno;
dev_flow->dv.actions[actions_n++] =
dev_flow->dv.encap_decap->verbs_action;
action_flags |= MLX5_FLOW_ACTION_ENCAP;
break;
case RTE_FLOW_ACTION_TYPE_VXLAN_DECAP:
case RTE_FLOW_ACTION_TYPE_NVGRE_DECAP:
if (flow_dv_create_action_l2_decap(dev, dev_flow,
attr->transfer,
error))
return -rte_errno;
dev_flow->dv.actions[actions_n++] =
dev_flow->dv.encap_decap->verbs_action;
action_flags |= MLX5_FLOW_ACTION_DECAP;
break;
case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
/* Handle encap with preceding decap. */
if (action_flags & MLX5_FLOW_ACTION_DECAP) {
if (flow_dv_create_action_raw_encap
(dev, actions, dev_flow, attr, error))
return -rte_errno;
dev_flow->dv.actions[actions_n++] =
dev_flow->dv.encap_decap->verbs_action;
} else {
/* Handle encap without preceding decap. */
if (flow_dv_create_action_l2_encap
(dev, actions, dev_flow, attr->transfer,
error))
return -rte_errno;
dev_flow->dv.actions[actions_n++] =
dev_flow->dv.encap_decap->verbs_action;
}
action_flags |= MLX5_FLOW_ACTION_ENCAP;
break;
case RTE_FLOW_ACTION_TYPE_RAW_DECAP:
while ((++action)->type == RTE_FLOW_ACTION_TYPE_VOID)
;
if (action->type != RTE_FLOW_ACTION_TYPE_RAW_ENCAP) {
if (flow_dv_create_action_l2_decap
(dev, dev_flow, attr->transfer, error))
return -rte_errno;
dev_flow->dv.actions[actions_n++] =
dev_flow->dv.encap_decap->verbs_action;
}
/* If decap is followed by encap, handle it at encap. */
action_flags |= MLX5_FLOW_ACTION_DECAP;
break;
case RTE_FLOW_ACTION_TYPE_JUMP:
jump_data = action->conf;
ret = mlx5_flow_group_to_table(attr, dev_flow->external,
jump_data->group,
!!priv->fdb_def_rule,
&table, error);
if (ret)
return ret;
tbl = flow_dv_tbl_resource_get(dev, table,
attr->egress,
attr->transfer, error);
if (!tbl)
return rte_flow_error_set
(error, errno,
RTE_FLOW_ERROR_TYPE_ACTION,
NULL,
"cannot create jump action.");
if (flow_dv_jump_tbl_resource_register
(dev, tbl, dev_flow, error)) {
flow_dv_tbl_resource_release(dev, tbl);
return rte_flow_error_set
(error, errno,
RTE_FLOW_ERROR_TYPE_ACTION,
NULL,
"cannot create jump action.");
}
dev_flow->dv.actions[actions_n++] =
dev_flow->dv.jump->action;
action_flags |= MLX5_FLOW_ACTION_JUMP;
break;
case RTE_FLOW_ACTION_TYPE_SET_MAC_SRC:
case RTE_FLOW_ACTION_TYPE_SET_MAC_DST:
if (flow_dv_convert_action_modify_mac
(mhdr_res, actions, error))
return -rte_errno;
action_flags |= actions->type ==
RTE_FLOW_ACTION_TYPE_SET_MAC_SRC ?
MLX5_FLOW_ACTION_SET_MAC_SRC :
MLX5_FLOW_ACTION_SET_MAC_DST;
break;
case RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC:
case RTE_FLOW_ACTION_TYPE_SET_IPV4_DST:
if (flow_dv_convert_action_modify_ipv4
(mhdr_res, actions, error))
return -rte_errno;
action_flags |= actions->type ==
RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC ?
MLX5_FLOW_ACTION_SET_IPV4_SRC :
MLX5_FLOW_ACTION_SET_IPV4_DST;
break;
case RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC:
case RTE_FLOW_ACTION_TYPE_SET_IPV6_DST:
if (flow_dv_convert_action_modify_ipv6
(mhdr_res, actions, error))
return -rte_errno;
action_flags |= actions->type ==
RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC ?
MLX5_FLOW_ACTION_SET_IPV6_SRC :
MLX5_FLOW_ACTION_SET_IPV6_DST;
break;
case RTE_FLOW_ACTION_TYPE_SET_TP_SRC:
case RTE_FLOW_ACTION_TYPE_SET_TP_DST:
if (flow_dv_convert_action_modify_tp
(mhdr_res, actions, items,
&flow_attr, dev_flow, !!(action_flags &
MLX5_FLOW_ACTION_DECAP), error))
return -rte_errno;
action_flags |= actions->type ==
RTE_FLOW_ACTION_TYPE_SET_TP_SRC ?
MLX5_FLOW_ACTION_SET_TP_SRC :
MLX5_FLOW_ACTION_SET_TP_DST;
break;
case RTE_FLOW_ACTION_TYPE_DEC_TTL:
if (flow_dv_convert_action_modify_dec_ttl
(mhdr_res, items, &flow_attr, dev_flow,
!!(action_flags &
MLX5_FLOW_ACTION_DECAP), error))
return -rte_errno;
action_flags |= MLX5_FLOW_ACTION_DEC_TTL;
break;
case RTE_FLOW_ACTION_TYPE_SET_TTL:
if (flow_dv_convert_action_modify_ttl
(mhdr_res, actions, items, &flow_attr,
dev_flow, !!(action_flags &
MLX5_FLOW_ACTION_DECAP), error))
return -rte_errno;
action_flags |= MLX5_FLOW_ACTION_SET_TTL;
break;
case RTE_FLOW_ACTION_TYPE_INC_TCP_SEQ:
case RTE_FLOW_ACTION_TYPE_DEC_TCP_SEQ:
if (flow_dv_convert_action_modify_tcp_seq
(mhdr_res, actions, error))
return -rte_errno;
action_flags |= actions->type ==
RTE_FLOW_ACTION_TYPE_INC_TCP_SEQ ?
MLX5_FLOW_ACTION_INC_TCP_SEQ :
MLX5_FLOW_ACTION_DEC_TCP_SEQ;
break;
case RTE_FLOW_ACTION_TYPE_INC_TCP_ACK:
case RTE_FLOW_ACTION_TYPE_DEC_TCP_ACK:
if (flow_dv_convert_action_modify_tcp_ack
(mhdr_res, actions, error))
return -rte_errno;
action_flags |= actions->type ==
RTE_FLOW_ACTION_TYPE_INC_TCP_ACK ?
MLX5_FLOW_ACTION_INC_TCP_ACK :
MLX5_FLOW_ACTION_DEC_TCP_ACK;
break;
case MLX5_RTE_FLOW_ACTION_TYPE_TAG:
if (flow_dv_convert_action_set_reg
(mhdr_res, actions, error))
return -rte_errno;
action_flags |= MLX5_FLOW_ACTION_SET_TAG;
break;
case MLX5_RTE_FLOW_ACTION_TYPE_COPY_MREG:
if (flow_dv_convert_action_copy_mreg
(dev, mhdr_res, actions, error))
return -rte_errno;
action_flags |= MLX5_FLOW_ACTION_SET_TAG;
break;
case RTE_FLOW_ACTION_TYPE_METER:
mtr = actions->conf;
if (!flow->meter) {
flow->meter = mlx5_flow_meter_attach(priv,
mtr->mtr_id, attr,
error);
if (!flow->meter)
return rte_flow_error_set(error,
rte_errno,
RTE_FLOW_ERROR_TYPE_ACTION,
NULL,
"meter not found "
"or invalid parameters");
}
/* Set the meter action. */
dev_flow->dv.actions[actions_n++] =
flow->meter->mfts->meter_action;
action_flags |= MLX5_FLOW_ACTION_METER;
break;
case RTE_FLOW_ACTION_TYPE_END:
actions_end = true;
if (mhdr_res->actions_num) {
/* create modify action if needed. */
if (flow_dv_modify_hdr_resource_register
(dev, mhdr_res, dev_flow, error))
return -rte_errno;
dev_flow->dv.actions[modify_action_position] =
dev_flow->dv.modify_hdr->verbs_action;
}
break;
default:
break;
}
if (mhdr_res->actions_num &&
modify_action_position == UINT32_MAX)
modify_action_position = actions_n++;
}
dev_flow->dv.actions_n = actions_n;
dev_flow->actions = action_flags;
for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
int item_type = items->type;
switch (item_type) {
case RTE_FLOW_ITEM_TYPE_PORT_ID:
flow_dv_translate_item_port_id(dev, match_mask,
match_value, items);
last_item = MLX5_FLOW_ITEM_PORT_ID;
break;
case RTE_FLOW_ITEM_TYPE_ETH:
flow_dv_translate_item_eth(match_mask, match_value,
items, tunnel);
matcher.priority = MLX5_PRIORITY_MAP_L2;
last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L2 :
MLX5_FLOW_LAYER_OUTER_L2;
break;
case RTE_FLOW_ITEM_TYPE_VLAN:
flow_dv_translate_item_vlan(dev_flow,
match_mask, match_value,
items, tunnel);
matcher.priority = MLX5_PRIORITY_MAP_L2;
last_item = tunnel ? (MLX5_FLOW_LAYER_INNER_L2 |
MLX5_FLOW_LAYER_INNER_VLAN) :
(MLX5_FLOW_LAYER_OUTER_L2 |
MLX5_FLOW_LAYER_OUTER_VLAN);
break;
case RTE_FLOW_ITEM_TYPE_IPV4:
mlx5_flow_tunnel_ip_check(items, next_protocol,
&item_flags, &tunnel);
flow_dv_translate_item_ipv4(match_mask, match_value,
items, item_flags, tunnel,
dev_flow->group);
matcher.priority = MLX5_PRIORITY_MAP_L3;
dev_flow->hash_fields |=
mlx5_flow_hashfields_adjust
(dev_flow, tunnel,
MLX5_IPV4_LAYER_TYPES,
MLX5_IPV4_IBV_RX_HASH);
last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV4 :
MLX5_FLOW_LAYER_OUTER_L3_IPV4;
if (items->mask != NULL &&
((const struct rte_flow_item_ipv4 *)
items->mask)->hdr.next_proto_id) {
next_protocol =
((const struct rte_flow_item_ipv4 *)
(items->spec))->hdr.next_proto_id;
next_protocol &=
((const struct rte_flow_item_ipv4 *)
(items->mask))->hdr.next_proto_id;
} else {
/* Reset for inner layer. */
next_protocol = 0xff;
}
break;
case RTE_FLOW_ITEM_TYPE_IPV6:
mlx5_flow_tunnel_ip_check(items, next_protocol,
&item_flags, &tunnel);
flow_dv_translate_item_ipv6(match_mask, match_value,
items, item_flags, tunnel,
dev_flow->group);
matcher.priority = MLX5_PRIORITY_MAP_L3;
dev_flow->hash_fields |=
mlx5_flow_hashfields_adjust
(dev_flow, tunnel,
MLX5_IPV6_LAYER_TYPES,
MLX5_IPV6_IBV_RX_HASH);
last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV6 :
MLX5_FLOW_LAYER_OUTER_L3_IPV6;
if (items->mask != NULL &&
((const struct rte_flow_item_ipv6 *)
items->mask)->hdr.proto) {
next_protocol =
((const struct rte_flow_item_ipv6 *)
items->spec)->hdr.proto;
next_protocol &=
((const struct rte_flow_item_ipv6 *)
items->mask)->hdr.proto;
} else {
/* Reset for inner layer. */
next_protocol = 0xff;
}
break;
case RTE_FLOW_ITEM_TYPE_TCP:
flow_dv_translate_item_tcp(match_mask, match_value,
items, tunnel);
matcher.priority = MLX5_PRIORITY_MAP_L4;
dev_flow->hash_fields |=
mlx5_flow_hashfields_adjust
(dev_flow, tunnel, ETH_RSS_TCP,
IBV_RX_HASH_SRC_PORT_TCP |
IBV_RX_HASH_DST_PORT_TCP);
last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_TCP :
MLX5_FLOW_LAYER_OUTER_L4_TCP;
break;
case RTE_FLOW_ITEM_TYPE_UDP:
flow_dv_translate_item_udp(match_mask, match_value,
items, tunnel);
matcher.priority = MLX5_PRIORITY_MAP_L4;
dev_flow->hash_fields |=
mlx5_flow_hashfields_adjust
(dev_flow, tunnel, ETH_RSS_UDP,
IBV_RX_HASH_SRC_PORT_UDP |
IBV_RX_HASH_DST_PORT_UDP);
last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_UDP :
MLX5_FLOW_LAYER_OUTER_L4_UDP;
break;
case RTE_FLOW_ITEM_TYPE_GRE:
flow_dv_translate_item_gre(match_mask, match_value,
items, tunnel);
matcher.priority = flow->rss.level >= 2 ?
MLX5_PRIORITY_MAP_L2 : MLX5_PRIORITY_MAP_L4;
last_item = MLX5_FLOW_LAYER_GRE;
break;
case RTE_FLOW_ITEM_TYPE_GRE_KEY:
flow_dv_translate_item_gre_key(match_mask,
match_value, items);
last_item = MLX5_FLOW_LAYER_GRE_KEY;
break;
case RTE_FLOW_ITEM_TYPE_NVGRE:
flow_dv_translate_item_nvgre(match_mask, match_value,
items, tunnel);
matcher.priority = flow->rss.level >= 2 ?
MLX5_PRIORITY_MAP_L2 : MLX5_PRIORITY_MAP_L4;
last_item = MLX5_FLOW_LAYER_GRE;
break;
case RTE_FLOW_ITEM_TYPE_VXLAN:
flow_dv_translate_item_vxlan(match_mask, match_value,
items, tunnel);
matcher.priority = flow->rss.level >= 2 ?
MLX5_PRIORITY_MAP_L2 : MLX5_PRIORITY_MAP_L4;
last_item = MLX5_FLOW_LAYER_VXLAN;
break;
case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
flow_dv_translate_item_vxlan_gpe(match_mask,
match_value, items,
tunnel);
matcher.priority = flow->rss.level >= 2 ?
MLX5_PRIORITY_MAP_L2 : MLX5_PRIORITY_MAP_L4;
last_item = MLX5_FLOW_LAYER_VXLAN_GPE;
break;
case RTE_FLOW_ITEM_TYPE_GENEVE:
flow_dv_translate_item_geneve(match_mask, match_value,
items, tunnel);
matcher.priority = flow->rss.level >= 2 ?
MLX5_PRIORITY_MAP_L2 : MLX5_PRIORITY_MAP_L4;
last_item = MLX5_FLOW_LAYER_GENEVE;
break;
case RTE_FLOW_ITEM_TYPE_MPLS:
flow_dv_translate_item_mpls(match_mask, match_value,
items, last_item, tunnel);
matcher.priority = flow->rss.level >= 2 ?
MLX5_PRIORITY_MAP_L2 : MLX5_PRIORITY_MAP_L4;
last_item = MLX5_FLOW_LAYER_MPLS;
break;
case RTE_FLOW_ITEM_TYPE_MARK:
flow_dv_translate_item_mark(dev, match_mask,
match_value, items);
last_item = MLX5_FLOW_ITEM_MARK;
break;
case RTE_FLOW_ITEM_TYPE_META:
flow_dv_translate_item_meta(dev, match_mask,
match_value, attr, items);
last_item = MLX5_FLOW_ITEM_METADATA;
break;
case RTE_FLOW_ITEM_TYPE_ICMP:
flow_dv_translate_item_icmp(match_mask, match_value,
items, tunnel);
last_item = MLX5_FLOW_LAYER_ICMP;
break;
case RTE_FLOW_ITEM_TYPE_ICMP6:
flow_dv_translate_item_icmp6(match_mask, match_value,
items, tunnel);
last_item = MLX5_FLOW_LAYER_ICMP6;
break;
case RTE_FLOW_ITEM_TYPE_TAG:
flow_dv_translate_item_tag(dev, match_mask,
match_value, items);
last_item = MLX5_FLOW_ITEM_TAG;
break;
case MLX5_RTE_FLOW_ITEM_TYPE_TAG:
flow_dv_translate_mlx5_item_tag(dev, match_mask,
match_value, items);
last_item = MLX5_FLOW_ITEM_TAG;
break;
case MLX5_RTE_FLOW_ITEM_TYPE_TX_QUEUE:
flow_dv_translate_item_tx_queue(dev, match_mask,
match_value,
items);
last_item = MLX5_FLOW_ITEM_TX_QUEUE;
break;
default:
break;
}
item_flags |= last_item;
}
/*
* When E-Switch mode is enabled, we have two cases where we need to
* set the source port manually.
* The first one, is in case of Nic steering rule, and the second is
* E-Switch rule where no port_id item was found. In both cases
* the source port is set according the current port in use.
*/
if (!(item_flags & MLX5_FLOW_ITEM_PORT_ID) &&
(priv->representor || priv->master)) {
if (flow_dv_translate_item_port_id(dev, match_mask,
match_value, NULL))
return -rte_errno;
}
assert(!flow_dv_check_valid_spec(matcher.mask.buf,
dev_flow->dv.value.buf));
/*
* Layers may be already initialized from prefix flow if this dev_flow
* is the suffix flow.
*/
dev_flow->layers |= item_flags;
/* Register matcher. */
matcher.crc = rte_raw_cksum((const void *)matcher.mask.buf,
matcher.mask.size);
matcher.priority = mlx5_flow_adjust_priority(dev, priority,
matcher.priority);
/* reserved field no needs to be set to 0 here. */
tbl_key.domain = attr->transfer;
tbl_key.direction = attr->egress;
tbl_key.table_id = dev_flow->group;
if (flow_dv_matcher_register(dev, &matcher, &tbl_key, dev_flow, error))
return -rte_errno;
return 0;
}
/**
* Apply the flow to the NIC, lock free,
* (mutex should be acquired by caller).
*
* @param[in] dev
* Pointer to the Ethernet device structure.
* @param[in, out] flow
* Pointer to flow structure.
* @param[out] error
* Pointer to error structure.
*
* @return
* 0 on success, a negative errno value otherwise and rte_errno is set.
*/
static int
__flow_dv_apply(struct rte_eth_dev *dev, struct rte_flow *flow,
struct rte_flow_error *error)
{
struct mlx5_flow_dv *dv;
struct mlx5_flow *dev_flow;
struct mlx5_priv *priv = dev->data->dev_private;
int n;
int err;
LIST_FOREACH(dev_flow, &flow->dev_flows, next) {
dv = &dev_flow->dv;
n = dv->actions_n;
if (dev_flow->actions & MLX5_FLOW_ACTION_DROP) {
if (dev_flow->transfer) {
dv->actions[n++] = priv->sh->esw_drop_action;
} else {
dv->hrxq = mlx5_hrxq_drop_new(dev);
if (!dv->hrxq) {
rte_flow_error_set
(error, errno,
RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
NULL,
"cannot get drop hash queue");
goto error;
}
dv->actions[n++] = dv->hrxq->action;
}
} else if (dev_flow->actions &
(MLX5_FLOW_ACTION_QUEUE | MLX5_FLOW_ACTION_RSS)) {
struct mlx5_hrxq *hrxq;
assert(flow->rss.queue);
hrxq = mlx5_hrxq_get(dev, flow->rss.key,
MLX5_RSS_HASH_KEY_LEN,
dev_flow->hash_fields,
(*flow->rss.queue),
flow->rss.queue_num);
if (!hrxq) {
hrxq = mlx5_hrxq_new
(dev, flow->rss.key,
MLX5_RSS_HASH_KEY_LEN,
dev_flow->hash_fields,
(*flow->rss.queue),
flow->rss.queue_num,
!!(dev_flow->layers &
MLX5_FLOW_LAYER_TUNNEL));
}
if (!hrxq) {
rte_flow_error_set
(error, rte_errno,
RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
"cannot get hash queue");
goto error;
}
dv->hrxq = hrxq;
dv->actions[n++] = dv->hrxq->action;
}
dv->flow =
mlx5_glue->dv_create_flow(dv->matcher->matcher_object,
(void *)&dv->value, n,
dv->actions);
if (!dv->flow) {
rte_flow_error_set(error, errno,
RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
NULL,
"hardware refuses to create flow");
goto error;
}
if (priv->vmwa_context &&
dev_flow->dv.vf_vlan.tag &&
!dev_flow->dv.vf_vlan.created) {
/*
* The rule contains the VLAN pattern.
* For VF we are going to create VLAN
* interface to make hypervisor set correct
* e-Switch vport context.
*/
mlx5_vlan_vmwa_acquire(dev, &dev_flow->dv.vf_vlan);
}
}
return 0;
error:
err = rte_errno; /* Save rte_errno before cleanup. */
LIST_FOREACH(dev_flow, &flow->dev_flows, next) {
struct mlx5_flow_dv *dv = &dev_flow->dv;
if (dv->hrxq) {
if (dev_flow->actions & MLX5_FLOW_ACTION_DROP)
mlx5_hrxq_drop_release(dev);
else
mlx5_hrxq_release(dev, dv->hrxq);
dv->hrxq = NULL;
}
if (dev_flow->dv.vf_vlan.tag &&
dev_flow->dv.vf_vlan.created)
mlx5_vlan_vmwa_release(dev, &dev_flow->dv.vf_vlan);
}
rte_errno = err; /* Restore rte_errno. */
return -rte_errno;
}
/**
* Release the flow matcher.
*
* @param dev
* Pointer to Ethernet device.
* @param flow
* Pointer to mlx5_flow.
*
* @return
* 1 while a reference on it exists, 0 when freed.
*/
static int
flow_dv_matcher_release(struct rte_eth_dev *dev,
struct mlx5_flow *flow)
{
struct mlx5_flow_dv_matcher *matcher = flow->dv.matcher;
assert(matcher->matcher_object);
DRV_LOG(DEBUG, "port %u matcher %p: refcnt %d--",
dev->data->port_id, (void *)matcher,
rte_atomic32_read(&matcher->refcnt));
if (rte_atomic32_dec_and_test(&matcher->refcnt)) {
claim_zero(mlx5_glue->dv_destroy_flow_matcher
(matcher->matcher_object));
LIST_REMOVE(matcher, next);
/* table ref-- in release interface. */
flow_dv_tbl_resource_release(dev, matcher->tbl);
rte_free(matcher);
DRV_LOG(DEBUG, "port %u matcher %p: removed",
dev->data->port_id, (void *)matcher);
return 0;
}
return 1;
}
/**
* Release an encap/decap resource.
*
* @param flow
* Pointer to mlx5_flow.
*
* @return
* 1 while a reference on it exists, 0 when freed.
*/
static int
flow_dv_encap_decap_resource_release(struct mlx5_flow *flow)
{
struct mlx5_flow_dv_encap_decap_resource *cache_resource =
flow->dv.encap_decap;
assert(cache_resource->verbs_action);
DRV_LOG(DEBUG, "encap/decap resource %p: refcnt %d--",
(void *)cache_resource,
rte_atomic32_read(&cache_resource->refcnt));
if (rte_atomic32_dec_and_test(&cache_resource->refcnt)) {
claim_zero(mlx5_glue->destroy_flow_action
(cache_resource->verbs_action));
LIST_REMOVE(cache_resource, next);
rte_free(cache_resource);
DRV_LOG(DEBUG, "encap/decap resource %p: removed",
(void *)cache_resource);
return 0;
}
return 1;
}
/**
* Release an jump to table action resource.
*
* @param dev
* Pointer to Ethernet device.
* @param flow
* Pointer to mlx5_flow.
*
* @return
* 1 while a reference on it exists, 0 when freed.
*/
static int
flow_dv_jump_tbl_resource_release(struct rte_eth_dev *dev,
struct mlx5_flow *flow)
{
struct mlx5_flow_dv_jump_tbl_resource *cache_resource = flow->dv.jump;
struct mlx5_flow_tbl_data_entry *tbl_data =
container_of(cache_resource,
struct mlx5_flow_tbl_data_entry, jump);
assert(cache_resource->action);
DRV_LOG(DEBUG, "jump table resource %p: refcnt %d--",
(void *)cache_resource,
rte_atomic32_read(&cache_resource->refcnt));
if (rte_atomic32_dec_and_test(&cache_resource->refcnt)) {
claim_zero(mlx5_glue->destroy_flow_action
(cache_resource->action));
/* jump action memory free is inside the table release. */
flow_dv_tbl_resource_release(dev, &tbl_data->tbl);
DRV_LOG(DEBUG, "jump table resource %p: removed",
(void *)cache_resource);
return 0;
}
return 1;
}
/**
* Release a modify-header resource.
*
* @param flow
* Pointer to mlx5_flow.
*
* @return
* 1 while a reference on it exists, 0 when freed.
*/
static int
flow_dv_modify_hdr_resource_release(struct mlx5_flow *flow)
{
struct mlx5_flow_dv_modify_hdr_resource *cache_resource =
flow->dv.modify_hdr;
assert(cache_resource->verbs_action);
DRV_LOG(DEBUG, "modify-header resource %p: refcnt %d--",
(void *)cache_resource,
rte_atomic32_read(&cache_resource->refcnt));
if (rte_atomic32_dec_and_test(&cache_resource->refcnt)) {
claim_zero(mlx5_glue->destroy_flow_action
(cache_resource->verbs_action));
LIST_REMOVE(cache_resource, next);
rte_free(cache_resource);
DRV_LOG(DEBUG, "modify-header resource %p: removed",
(void *)cache_resource);
return 0;
}
return 1;
}
/**
* Release port ID action resource.
*
* @param flow
* Pointer to mlx5_flow.
*
* @return
* 1 while a reference on it exists, 0 when freed.
*/
static int
flow_dv_port_id_action_resource_release(struct mlx5_flow *flow)
{
struct mlx5_flow_dv_port_id_action_resource *cache_resource =
flow->dv.port_id_action;
assert(cache_resource->action);
DRV_LOG(DEBUG, "port ID action resource %p: refcnt %d--",
(void *)cache_resource,
rte_atomic32_read(&cache_resource->refcnt));
if (rte_atomic32_dec_and_test(&cache_resource->refcnt)) {
claim_zero(mlx5_glue->destroy_flow_action
(cache_resource->action));
LIST_REMOVE(cache_resource, next);
rte_free(cache_resource);
DRV_LOG(DEBUG, "port id action resource %p: removed",
(void *)cache_resource);
return 0;
}
return 1;
}
/**
* Release push vlan action resource.
*
* @param flow
* Pointer to mlx5_flow.
*
* @return
* 1 while a reference on it exists, 0 when freed.
*/
static int
flow_dv_push_vlan_action_resource_release(struct mlx5_flow *flow)
{
struct mlx5_flow_dv_push_vlan_action_resource *cache_resource =
flow->dv.push_vlan_res;
assert(cache_resource->action);
DRV_LOG(DEBUG, "push VLAN action resource %p: refcnt %d--",
(void *)cache_resource,
rte_atomic32_read(&cache_resource->refcnt));
if (rte_atomic32_dec_and_test(&cache_resource->refcnt)) {
claim_zero(mlx5_glue->destroy_flow_action
(cache_resource->action));
LIST_REMOVE(cache_resource, next);
rte_free(cache_resource);
DRV_LOG(DEBUG, "push vlan action resource %p: removed",
(void *)cache_resource);
return 0;
}
return 1;
}
/**
* Remove the flow from the NIC but keeps it in memory.
* Lock free, (mutex should be acquired by caller).
*
* @param[in] dev
* Pointer to Ethernet device.
* @param[in, out] flow
* Pointer to flow structure.
*/
static void
__flow_dv_remove(struct rte_eth_dev *dev, struct rte_flow *flow)
{
struct mlx5_flow_dv *dv;
struct mlx5_flow *dev_flow;
if (!flow)
return;
LIST_FOREACH(dev_flow, &flow->dev_flows, next) {
dv = &dev_flow->dv;
if (dv->flow) {
claim_zero(mlx5_glue->dv_destroy_flow(dv->flow));
dv->flow = NULL;
}
if (dv->hrxq) {
if (dev_flow->actions & MLX5_FLOW_ACTION_DROP)
mlx5_hrxq_drop_release(dev);
else
mlx5_hrxq_release(dev, dv->hrxq);
dv->hrxq = NULL;
}
if (dev_flow->dv.vf_vlan.tag &&
dev_flow->dv.vf_vlan.created)
mlx5_vlan_vmwa_release(dev, &dev_flow->dv.vf_vlan);
}
}
/**
* Remove the flow from the NIC and the memory.
* Lock free, (mutex should be acquired by caller).
*
* @param[in] dev
* Pointer to the Ethernet device structure.
* @param[in, out] flow
* Pointer to flow structure.
*/
static void
__flow_dv_destroy(struct rte_eth_dev *dev, struct rte_flow *flow)
{
struct mlx5_flow *dev_flow;
if (!flow)
return;
__flow_dv_remove(dev, flow);
if (flow->counter) {
flow_dv_counter_release(dev, flow->counter);
flow->counter = NULL;
}
if (flow->meter) {
mlx5_flow_meter_detach(flow->meter);
flow->meter = NULL;
}
while (!LIST_EMPTY(&flow->dev_flows)) {
dev_flow = LIST_FIRST(&flow->dev_flows);
LIST_REMOVE(dev_flow, next);
if (dev_flow->dv.matcher)
flow_dv_matcher_release(dev, dev_flow);
if (dev_flow->dv.encap_decap)
flow_dv_encap_decap_resource_release(dev_flow);
if (dev_flow->dv.modify_hdr)
flow_dv_modify_hdr_resource_release(dev_flow);
if (dev_flow->dv.jump)
flow_dv_jump_tbl_resource_release(dev, dev_flow);
if (dev_flow->dv.port_id_action)
flow_dv_port_id_action_resource_release(dev_flow);
if (dev_flow->dv.push_vlan_res)
flow_dv_push_vlan_action_resource_release(dev_flow);
if (dev_flow->dv.tag_resource)
flow_dv_tag_release(dev, dev_flow->dv.tag_resource);
rte_free(dev_flow);
}
}
/**
* Query a dv flow rule for its statistics via devx.
*
* @param[in] dev
* Pointer to Ethernet device.
* @param[in] flow
* Pointer to the sub flow.
* @param[out] data
* data retrieved by the query.
* @param[out] error
* Perform verbose error reporting if not NULL.
*
* @return
* 0 on success, a negative errno value otherwise and rte_errno is set.
*/
static int
flow_dv_query_count(struct rte_eth_dev *dev, struct rte_flow *flow,
void *data, struct rte_flow_error *error)
{
struct mlx5_priv *priv = dev->data->dev_private;
struct rte_flow_query_count *qc = data;
if (!priv->config.devx)
return rte_flow_error_set(error, ENOTSUP,
RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
NULL,
"counters are not supported");
if (flow->counter) {
uint64_t pkts, bytes;
int err = _flow_dv_query_count(dev, flow->counter, &pkts,
&bytes);
if (err)
return rte_flow_error_set(error, -err,
RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
NULL, "cannot read counters");
qc->hits_set = 1;
qc->bytes_set = 1;
qc->hits = pkts - flow->counter->hits;
qc->bytes = bytes - flow->counter->bytes;
if (qc->reset) {
flow->counter->hits = pkts;
flow->counter->bytes = bytes;
}
return 0;
}
return rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
NULL,
"counters are not available");
}
/**
* Query a flow.
*
* @see rte_flow_query()
* @see rte_flow_ops
*/
static int
flow_dv_query(struct rte_eth_dev *dev,
struct rte_flow *flow __rte_unused,
const struct rte_flow_action *actions __rte_unused,
void *data __rte_unused,
struct rte_flow_error *error __rte_unused)
{
int ret = -EINVAL;
for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
switch (actions->type) {
case RTE_FLOW_ACTION_TYPE_VOID:
break;
case RTE_FLOW_ACTION_TYPE_COUNT:
ret = flow_dv_query_count(dev, flow, data, error);
break;
default:
return rte_flow_error_set(error, ENOTSUP,
RTE_FLOW_ERROR_TYPE_ACTION,
actions,
"action not supported");
}
}
return ret;
}
/**
* Destroy the meter table set.
* Lock free, (mutex should be acquired by caller).
*
* @param[in] dev
* Pointer to Ethernet device.
* @param[in] tbl
* Pointer to the meter table set.
*
* @return
* Always 0.
*/
static int
flow_dv_destroy_mtr_tbl(struct rte_eth_dev *dev,
struct mlx5_meter_domains_infos *tbl)
{
struct mlx5_priv *priv = dev->data->dev_private;
struct mlx5_meter_domains_infos *mtd =
(struct mlx5_meter_domains_infos *)tbl;
if (!mtd || !priv->config.dv_flow_en)
return 0;
if (mtd->ingress.policer_rules[RTE_MTR_DROPPED])
claim_zero(mlx5_glue->dv_destroy_flow
(mtd->ingress.policer_rules[RTE_MTR_DROPPED]));
if (mtd->egress.policer_rules[RTE_MTR_DROPPED])
claim_zero(mlx5_glue->dv_destroy_flow
(mtd->egress.policer_rules[RTE_MTR_DROPPED]));
if (mtd->transfer.policer_rules[RTE_MTR_DROPPED])
claim_zero(mlx5_glue->dv_destroy_flow
(mtd->transfer.policer_rules[RTE_MTR_DROPPED]));
if (mtd->egress.color_matcher)
claim_zero(mlx5_glue->dv_destroy_flow_matcher
(mtd->egress.color_matcher));
if (mtd->egress.any_matcher)
claim_zero(mlx5_glue->dv_destroy_flow_matcher
(mtd->egress.any_matcher));
if (mtd->egress.tbl)
claim_zero(flow_dv_tbl_resource_release(dev,
mtd->egress.tbl));
if (mtd->ingress.color_matcher)
claim_zero(mlx5_glue->dv_destroy_flow_matcher
(mtd->ingress.color_matcher));
if (mtd->ingress.any_matcher)
claim_zero(mlx5_glue->dv_destroy_flow_matcher
(mtd->ingress.any_matcher));
if (mtd->ingress.tbl)
claim_zero(flow_dv_tbl_resource_release(dev,
mtd->ingress.tbl));
if (mtd->transfer.color_matcher)
claim_zero(mlx5_glue->dv_destroy_flow_matcher
(mtd->transfer.color_matcher));
if (mtd->transfer.any_matcher)
claim_zero(mlx5_glue->dv_destroy_flow_matcher
(mtd->transfer.any_matcher));
if (mtd->transfer.tbl)
claim_zero(flow_dv_tbl_resource_release(dev,
mtd->transfer.tbl));
if (mtd->drop_actn)
claim_zero(mlx5_glue->destroy_flow_action(mtd->drop_actn));
rte_free(mtd);
return 0;
}
/* Number of meter flow actions, count and jump or count and drop. */
#define METER_ACTIONS 2
/**
* Create specify domain meter table and suffix table.
*
* @param[in] dev
* Pointer to Ethernet device.
* @param[in,out] mtb
* Pointer to DV meter table set.
* @param[in] egress
* Table attribute.
* @param[in] transfer
* Table attribute.
* @param[in] color_reg_c_idx
* Reg C index for color match.
*
* @return
* 0 on success, -1 otherwise and rte_errno is set.
*/
static int
flow_dv_prepare_mtr_tables(struct rte_eth_dev *dev,
struct mlx5_meter_domains_infos *mtb,
uint8_t egress, uint8_t transfer,
uint32_t color_reg_c_idx)
{
struct mlx5_priv *priv = dev->data->dev_private;
struct mlx5_ibv_shared *sh = priv->sh;
struct mlx5_flow_dv_match_params mask = {
.size = sizeof(mask.buf),
};
struct mlx5_flow_dv_match_params value = {
.size = sizeof(value.buf),
};
struct mlx5dv_flow_matcher_attr dv_attr = {
.type = IBV_FLOW_ATTR_NORMAL,
.priority = 0,
.match_criteria_enable = 0,
.match_mask = (void *)&mask,
};
void *actions[METER_ACTIONS];
struct mlx5_flow_tbl_resource **sfx_tbl;
struct mlx5_meter_domain_info *dtb;
struct rte_flow_error error;
int i = 0;
if (transfer) {
sfx_tbl = &sh->fdb_mtr_sfx_tbl;
dtb = &mtb->transfer;
} else if (egress) {
sfx_tbl = &sh->tx_mtr_sfx_tbl;
dtb = &mtb->egress;
} else {
sfx_tbl = &sh->rx_mtr_sfx_tbl;
dtb = &mtb->ingress;
}
/* If the suffix table in missing, create it. */
if (!(*sfx_tbl)) {
*sfx_tbl = flow_dv_tbl_resource_get(dev,
MLX5_FLOW_TABLE_LEVEL_SUFFIX,
egress, transfer, &error);
if (!(*sfx_tbl)) {
DRV_LOG(ERR, "Failed to create meter suffix table.");
return -1;
}
}
/* Create the meter table with METER level. */
dtb->tbl = flow_dv_tbl_resource_get(dev, MLX5_FLOW_TABLE_LEVEL_METER,
egress, transfer, &error);
if (!dtb->tbl) {
DRV_LOG(ERR, "Failed to create meter policer table.");
return -1;
}
/* Create matchers, Any and Color. */
dv_attr.priority = 3;
dv_attr.match_criteria_enable = 0;
dtb->any_matcher = mlx5_glue->dv_create_flow_matcher(sh->ctx,
&dv_attr,
dtb->tbl->obj);
if (!dtb->any_matcher) {
DRV_LOG(ERR, "Failed to create meter"
" policer default matcher.");
goto error_exit;
}
dv_attr.priority = 0;
dv_attr.match_criteria_enable =
1 << MLX5_MATCH_CRITERIA_ENABLE_MISC2_BIT;
flow_dv_match_meta_reg(mask.buf, value.buf, color_reg_c_idx,
rte_col_2_mlx5_col(RTE_COLORS), UINT8_MAX);
dtb->color_matcher = mlx5_glue->dv_create_flow_matcher(sh->ctx,
&dv_attr,
dtb->tbl->obj);
if (!dtb->color_matcher) {
DRV_LOG(ERR, "Failed to create meter policer color matcher.");
goto error_exit;
}
if (mtb->count_actns[RTE_MTR_DROPPED])
actions[i++] = mtb->count_actns[RTE_MTR_DROPPED];
actions[i++] = mtb->drop_actn;
/* Default rule: lowest priority, match any, actions: drop. */
dtb->policer_rules[RTE_MTR_DROPPED] =
mlx5_glue->dv_create_flow(dtb->any_matcher,
(void *)&value, i, actions);
if (!dtb->policer_rules[RTE_MTR_DROPPED]) {
DRV_LOG(ERR, "Failed to create meter policer drop rule.");
goto error_exit;
}
return 0;
error_exit:
return -1;
}
/**
* Create the needed meter and suffix tables.
* Lock free, (mutex should be acquired by caller).
*
* @param[in] dev
* Pointer to Ethernet device.
* @param[in] fm
* Pointer to the flow meter.
*
* @return
* Pointer to table set on success, NULL otherwise and rte_errno is set.
*/
static struct mlx5_meter_domains_infos *
flow_dv_create_mtr_tbl(struct rte_eth_dev *dev,
const struct mlx5_flow_meter *fm)
{
struct mlx5_priv *priv = dev->data->dev_private;
struct mlx5_meter_domains_infos *mtb;
int ret;
int i;
if (!priv->mtr_en) {
rte_errno = ENOTSUP;
return NULL;
}
mtb = rte_calloc(__func__, 1, sizeof(*mtb), 0);
if (!mtb) {
DRV_LOG(ERR, "Failed to allocate memory for meter.");
return NULL;
}
/* Create meter count actions */
for (i = 0; i <= RTE_MTR_DROPPED; i++) {
if (!fm->policer_stats.cnt[i])
continue;
mtb->count_actns[i] = fm->policer_stats.cnt[i]->action;
}
/* Create drop action. */
mtb->drop_actn = mlx5_glue->dr_create_flow_action_drop();
if (!mtb->drop_actn) {
DRV_LOG(ERR, "Failed to create drop action.");
goto error_exit;
}
/* Egress meter table. */
ret = flow_dv_prepare_mtr_tables(dev, mtb, 1, 0, priv->mtr_color_reg);
if (ret) {
DRV_LOG(ERR, "Failed to prepare egress meter table.");
goto error_exit;
}
/* Ingress meter table. */
ret = flow_dv_prepare_mtr_tables(dev, mtb, 0, 0, priv->mtr_color_reg);
if (ret) {
DRV_LOG(ERR, "Failed to prepare ingress meter table.");
goto error_exit;
}
/* FDB meter table. */
if (priv->config.dv_esw_en) {
ret = flow_dv_prepare_mtr_tables(dev, mtb, 0, 1,
priv->mtr_color_reg);
if (ret) {
DRV_LOG(ERR, "Failed to prepare fdb meter table.");
goto error_exit;
}
}
return mtb;
error_exit:
flow_dv_destroy_mtr_tbl(dev, mtb);
return NULL;
}
/**
* Destroy domain policer rule.
*
* @param[in] dt
* Pointer to domain table.
*/
static void
flow_dv_destroy_domain_policer_rule(struct mlx5_meter_domain_info *dt)
{
int i;
for (i = 0; i < RTE_MTR_DROPPED; i++) {
if (dt->policer_rules[i]) {
claim_zero(mlx5_glue->dv_destroy_flow
(dt->policer_rules[i]));
dt->policer_rules[i] = NULL;
}
}
if (dt->jump_actn) {
claim_zero(mlx5_glue->destroy_flow_action(dt->jump_actn));
dt->jump_actn = NULL;
}
}
/**
* Destroy policer rules.
*
* @param[in] dev
* Pointer to Ethernet device.
* @param[in] fm
* Pointer to flow meter structure.
* @param[in] attr
* Pointer to flow attributes.
*
* @return
* Always 0.
*/
static int
flow_dv_destroy_policer_rules(struct rte_eth_dev *dev __rte_unused,
const struct mlx5_flow_meter *fm,
const struct rte_flow_attr *attr)
{
struct mlx5_meter_domains_infos *mtb = fm ? fm->mfts : NULL;
if (!mtb)
return 0;
if (attr->egress)
flow_dv_destroy_domain_policer_rule(&mtb->egress);
if (attr->ingress)
flow_dv_destroy_domain_policer_rule(&mtb->ingress);
if (attr->transfer)
flow_dv_destroy_domain_policer_rule(&mtb->transfer);
return 0;
}
/**
* Create specify domain meter policer rule.
*
* @param[in] fm
* Pointer to flow meter structure.
* @param[in] mtb
* Pointer to DV meter table set.
* @param[in] sfx_tb
* Pointer to suffix table.
* @param[in] mtr_reg_c
* Color match REG_C.
*
* @return
* 0 on success, -1 otherwise.
*/
static int
flow_dv_create_policer_forward_rule(struct mlx5_flow_meter *fm,
struct mlx5_meter_domain_info *dtb,
struct mlx5_flow_tbl_resource *sfx_tb,
uint8_t mtr_reg_c)
{
struct mlx5_flow_dv_match_params matcher = {
.size = sizeof(matcher.buf),
};
struct mlx5_flow_dv_match_params value = {
.size = sizeof(value.buf),
};
struct mlx5_meter_domains_infos *mtb = fm->mfts;
void *actions[METER_ACTIONS];
int i;
/* Create jump action. */
if (!sfx_tb)
return -1;
if (!dtb->jump_actn)
dtb->jump_actn =
mlx5_glue->dr_create_flow_action_dest_flow_tbl
(sfx_tb->obj);
if (!dtb->jump_actn) {
DRV_LOG(ERR, "Failed to create policer jump action.");
goto error;
}
for (i = 0; i < RTE_MTR_DROPPED; i++) {
int j = 0;
flow_dv_match_meta_reg(matcher.buf, value.buf, mtr_reg_c,
rte_col_2_mlx5_col(i), UINT8_MAX);
if (mtb->count_actns[i])
actions[j++] = mtb->count_actns[i];
if (fm->params.action[i] == MTR_POLICER_ACTION_DROP)
actions[j++] = mtb->drop_actn;
else
actions[j++] = dtb->jump_actn;
dtb->policer_rules[i] =
mlx5_glue->dv_create_flow(dtb->color_matcher,
(void *)&value,
j, actions);
if (!dtb->policer_rules[i]) {
DRV_LOG(ERR, "Failed to create policer rule.");
goto error;
}
}
return 0;
error:
rte_errno = errno;
return -1;
}
/**
* Create policer rules.
*
* @param[in] dev
* Pointer to Ethernet device.
* @param[in] fm
* Pointer to flow meter structure.
* @param[in] attr
* Pointer to flow attributes.
*
* @return
* 0 on success, -1 otherwise.
*/
static int
flow_dv_create_policer_rules(struct rte_eth_dev *dev,
struct mlx5_flow_meter *fm,
const struct rte_flow_attr *attr)
{
struct mlx5_priv *priv = dev->data->dev_private;
struct mlx5_meter_domains_infos *mtb = fm->mfts;
int ret;
if (attr->egress) {
ret = flow_dv_create_policer_forward_rule(fm, &mtb->egress,
priv->sh->tx_mtr_sfx_tbl,
priv->mtr_color_reg);
if (ret) {
DRV_LOG(ERR, "Failed to create egress policer.");
goto error;
}
}
if (attr->ingress) {
ret = flow_dv_create_policer_forward_rule(fm, &mtb->ingress,
priv->sh->rx_mtr_sfx_tbl,
priv->mtr_color_reg);
if (ret) {
DRV_LOG(ERR, "Failed to create ingress policer.");
goto error;
}
}
if (attr->transfer) {
ret = flow_dv_create_policer_forward_rule(fm, &mtb->transfer,
priv->sh->fdb_mtr_sfx_tbl,
priv->mtr_color_reg);
if (ret) {
DRV_LOG(ERR, "Failed to create transfer policer.");
goto error;
}
}
return 0;
error:
flow_dv_destroy_policer_rules(dev, fm, attr);
return -1;
}
/**
* Query a devx counter.
*
* @param[in] dev
* Pointer to the Ethernet device structure.
* @param[in] cnt
* Pointer to the flow counter.
* @param[in] clear
* Set to clear the counter statistics.
* @param[out] pkts
* The statistics value of packets.
* @param[out] bytes
* The statistics value of bytes.
*
* @return
* 0 on success, otherwise return -1.
*/
static int
flow_dv_counter_query(struct rte_eth_dev *dev,
struct mlx5_flow_counter *cnt, bool clear,
uint64_t *pkts, uint64_t *bytes)
{
struct mlx5_priv *priv = dev->data->dev_private;
uint64_t inn_pkts, inn_bytes;
int ret;
if (!priv->config.devx)
return -1;
ret = _flow_dv_query_count(dev, cnt, &inn_pkts, &inn_bytes);
if (ret)
return -1;
*pkts = inn_pkts - cnt->hits;
*bytes = inn_bytes - cnt->bytes;
if (clear) {
cnt->hits = inn_pkts;
cnt->bytes = inn_bytes;
}
return 0;
}
/*
* Mutex-protected thunk to lock-free __flow_dv_translate().
*/
static int
flow_dv_translate(struct rte_eth_dev *dev,
struct mlx5_flow *dev_flow,
const struct rte_flow_attr *attr,
const struct rte_flow_item items[],
const struct rte_flow_action actions[],
struct rte_flow_error *error)
{
int ret;
flow_dv_shared_lock(dev);
ret = __flow_dv_translate(dev, dev_flow, attr, items, actions, error);
flow_dv_shared_unlock(dev);
return ret;
}
/*
* Mutex-protected thunk to lock-free __flow_dv_apply().
*/
static int
flow_dv_apply(struct rte_eth_dev *dev,
struct rte_flow *flow,
struct rte_flow_error *error)
{
int ret;
flow_dv_shared_lock(dev);
ret = __flow_dv_apply(dev, flow, error);
flow_dv_shared_unlock(dev);
return ret;
}
/*
* Mutex-protected thunk to lock-free __flow_dv_remove().
*/
static void
flow_dv_remove(struct rte_eth_dev *dev, struct rte_flow *flow)
{
flow_dv_shared_lock(dev);
__flow_dv_remove(dev, flow);
flow_dv_shared_unlock(dev);
}
/*
* Mutex-protected thunk to lock-free __flow_dv_destroy().
*/
static void
flow_dv_destroy(struct rte_eth_dev *dev, struct rte_flow *flow)
{
flow_dv_shared_lock(dev);
__flow_dv_destroy(dev, flow);
flow_dv_shared_unlock(dev);
}
/*
* Mutex-protected thunk to lock-free flow_dv_counter_alloc().
*/
static struct mlx5_flow_counter *
flow_dv_counter_allocate(struct rte_eth_dev *dev)
{
struct mlx5_flow_counter *cnt;
flow_dv_shared_lock(dev);
cnt = flow_dv_counter_alloc(dev, 0, 0, 1);
flow_dv_shared_unlock(dev);
return cnt;
}
/*
* Mutex-protected thunk to lock-free flow_dv_counter_release().
*/
static void
flow_dv_counter_free(struct rte_eth_dev *dev, struct mlx5_flow_counter *cnt)
{
flow_dv_shared_lock(dev);
flow_dv_counter_release(dev, cnt);
flow_dv_shared_unlock(dev);
}
const struct mlx5_flow_driver_ops mlx5_flow_dv_drv_ops = {
.validate = flow_dv_validate,
.prepare = flow_dv_prepare,
.translate = flow_dv_translate,
.apply = flow_dv_apply,
.remove = flow_dv_remove,
.destroy = flow_dv_destroy,
.query = flow_dv_query,
.create_mtr_tbls = flow_dv_create_mtr_tbl,
.destroy_mtr_tbls = flow_dv_destroy_mtr_tbl,
.create_policer_rules = flow_dv_create_policer_rules,
.destroy_policer_rules = flow_dv_destroy_policer_rules,
.counter_alloc = flow_dv_counter_allocate,
.counter_free = flow_dv_counter_free,
.counter_query = flow_dv_counter_query,
};
#endif /* HAVE_IBV_FLOW_DV_SUPPORT */
| 29.496673 | 80 | 0.707449 | [
"object"
] |
3694f417226c53c5fd0925557adeec1fa5272b99 | 3,093 | h | C | src/Transformation.h | ORB-HD/Puppeteer | 6b9cea9e74987932ff8ed3bcb562eaa4abd39355 | [
"MIT"
] | 10 | 2016-06-17T10:41:32.000Z | 2021-06-10T13:14:21.000Z | src/Transformation.h | ORB-HD/Puppeteer | 6b9cea9e74987932ff8ed3bcb562eaa4abd39355 | [
"MIT"
] | 2 | 2019-10-14T09:33:18.000Z | 2021-03-06T19:00:46.000Z | src/Transformation.h | ORB-HD/Puppeteer | 6b9cea9e74987932ff8ed3bcb562eaa4abd39355 | [
"MIT"
] | 3 | 2018-03-31T01:29:13.000Z | 2020-10-21T14:27:15.000Z | /*
* Puppeteer - A Motion Capture Mapping Tool
* Copyright (c) 2013-2015 Martin Felis <martin.felis@iwr.uni-heidelberg.de>.
* All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining
* a copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sublicense, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice shall be
* included in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
* IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
* CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE*
*/
#ifndef _TRANSFORMATION_H
#define _TRANSFORMATION_H
#include "SimpleMath/SimpleMath.h"
#include "SimpleMath/SimpleMathGL.h"
#include <iostream>
struct Transformation {
Vector3f translation;
SimpleMath::GL::Quaternion rotation;
Vector3f scaling;
Transformation() :
translation (0.f, 0.f, 0.f),
rotation (0.f, 0.f, 0.f, 1.f),
scaling (1.f, 1.f, 1.f)
{}
Transformation ( const Vector3f &translation, const SimpleMath::GL::Quaternion& rotation = SimpleMath::GL::Quaternion(0., 0., 0., 1.)) :
translation (translation),
rotation (rotation),
scaling (1.f, 1.f, 1.f)
{}
Transformation ( const Vector3f &translation, const Matrix33f rotation_mat) :
translation (translation),
rotation (SimpleMath::GL::Quaternion::fromMatrix(rotation_mat)),
scaling (1.f, 1.f, 1.f)
{}
Matrix44f toGLMatrix() const {
return SimpleMath::GL::ScaleMat44 (scaling[0], scaling[1], scaling[2])
* rotation.toGLMatrix()
* SimpleMath::GL::TranslateMat44 (translation[0], translation[1], translation[2]);
}
Transformation operator* (const Transformation &transform) const {
Transformation result (*this);
result.translation = transform.rotation.rotate(translation) + transform.translation ;
result.scaling = Vector3f (
transform.scaling[0] * scaling[0],
transform.scaling[1] * scaling[1],
transform.scaling[2] * scaling[2]
);
result.rotation = transform.rotation * rotation;
return result;
}
Transformation inverse() const {
Transformation result (*this);
result.rotation = rotation.conjugate();
result.translation = result.rotation.rotate(translation) * -1.f;
return result;
}
Vector3f apply(const Vector3f &vec) const {
Vector3f temp = rotation.rotate (vec + translation);
return Vector3f (temp[0] * scaling[0], temp[1] * scaling[1], temp[2] * scaling[2]);
}
};
/* _TRANSFORMATION_H */
#endif
| 34.366667 | 137 | 0.725509 | [
"transform"
] |
369864ccf2c42c24cf83cb98297f0c289e34ada3 | 15,539 | c | C | ApproxSpline/Resources/C-Sources/dierckx/fpopsp.c | modelica-3rdparty/ApproxSpline | 28420f5c1a88c9cd069defbd8c05e4a78a090675 | [
"BSD-3-Clause"
] | 2 | 2020-07-13T17:24:35.000Z | 2021-03-18T16:16:02.000Z | ApproxSpline/Resources/C-Sources/dierckx/fpopsp.c | tbeu/ApproxSpline | 28420f5c1a88c9cd069defbd8c05e4a78a090675 | [
"BSD-3-Clause"
] | 2 | 2019-03-28T09:08:23.000Z | 2022-03-29T07:35:48.000Z | ApproxSpline/Resources/C-Sources/dierckx/fpopsp.c | modelica-3rdparty/ApproxSpline | 28420f5c1a88c9cd069defbd8c05e4a78a090675 | [
"BSD-3-Clause"
] | 3 | 2018-10-21T07:46:49.000Z | 2022-03-29T20:02:06.000Z | /* fpopsp.f -- translated by f2c (version 20061008).
You must link the resulting object file with libf2c:
on Microsoft Windows system, link with libf2c.lib;
on Linux or Unix systems, link with .../path/to/libf2c.a -lm
or, if you install libf2c.a in a standard place, with -lf2c -lm
-- in that order, at the end of the command line, as in
cc *.o -lf2c -lm
Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
http://www.netlib.org/f2c/libf2c.zip
*/
#include "f2c.h"
/* Table of constant values */
static integer c__0 = 0;
static integer c__1 = 1;
/*< >*/
/* Subroutine */ int fpopsp_(integer *ifsu, integer *ifsv, integer *ifbu,
integer *ifbv, doublereal *u, integer *mu, doublereal *v, integer *mv,
doublereal *r__, integer *mr, doublereal *r0, doublereal *r1,
doublereal *dr, integer *iopt, integer *ider, doublereal *tu, integer
*nu, doublereal *tv, integer *nv, integer *nuest, integer *nvest,
doublereal *p, doublereal *step, doublereal *c__, integer *nc,
doublereal *fp, doublereal *fpu, doublereal *fpv, integer *nru,
integer *nrv, doublereal *wrk, integer *lwrk)
{
/* System generated locals */
integer i__1, i__2;
doublereal d__1;
/* Local variables */
static doublereal a[36] /* was [6][6] */, g[6];
static integer i__, j, l, i1, l1, l2, mm, lq, nr[6];
static doublereal sq;
static integer id0, la0, la1, lb0, lb1, lc0, lc1, id1;
static doublereal sq0, sq1;
static integer lau, lbu, lbv, lcs, lri;
static doublereal drr[6], sqq;
static integer lsu, lsv;
static doublereal sum[6];
static integer lav1, lav2, iop0, iop1, mvnu;
static doublereal step1, step2, delta[6], three;
static integer number;
extern /* Subroutine */ int fpgrsp_(integer *, integer *, integer *,
integer *, integer *, doublereal *, integer *, doublereal *,
integer *, doublereal *, integer *, doublereal *, integer *,
integer *, doublereal *, integer *, doublereal *, integer *,
doublereal *, doublereal *, integer *, doublereal *, doublereal *,
doublereal *, doublereal *, integer *, integer *, doublereal *,
doublereal *, doublereal *, doublereal *, doublereal *,
doublereal *, doublereal *, doublereal *, doublereal *,
doublereal *, doublereal *, doublereal *, doublereal *,
doublereal *, doublereal *, doublereal *, integer *, integer *),
fpsysy_(doublereal *, integer *, doublereal *);
/* given the set of function values r(i,j) defined on the rectangular */
/* grid (u(i),v(j)),i=1,2,...,mu;j=1,2,...,mv, fpopsp determines a */
/* smooth bicubic spline approximation with given knots tu(i),i=1,..,nu */
/* in the u-direction and tv(j),j=1,2,...,nv in the v-direction. this */
/* spline sp(u,v) will be periodic in the variable v and will satisfy */
/* the following constraints */
/* s(tu(1),v) = dr(1) , tv(4) <=v<= tv(nv-3) */
/* s(tu(nu),v) = dr(4) , tv(4) <=v<= tv(nv-3) */
/* and (if iopt(2) = 1) */
/* d s(tu(1),v) */
/* ------------ = dr(2)*cos(v)+dr(3)*sin(v) , tv(4) <=v<= tv(nv-3) */
/* d u */
/* and (if iopt(3) = 1) */
/* d s(tu(nu),v) */
/* ------------- = dr(5)*cos(v)+dr(6)*sin(v) , tv(4) <=v<= tv(nv-3) */
/* d u */
/* where the parameters dr(i) correspond to the derivative values at the */
/* poles as defined in subroutine spgrid. */
/* the b-spline coefficients of sp(u,v) are determined as the least- */
/* squares solution of an overdetermined linear system which depends */
/* on the value of p and on the values dr(i),i=1,...,6. the correspond- */
/* ing sum of squared residuals sq is a simple quadratic function in */
/* the variables dr(i). these may or may not be provided. the values */
/* dr(i) which are not given will be determined so as to minimize the */
/* resulting sum of squared residuals sq. in that case the user must */
/* provide some initial guess dr(i) and some estimate (dr(i)-step, */
/* dr(i)+step) of the range of possible values for these latter. */
/* sp(u,v) also depends on the parameter p (p>0) in such a way that */
/* - if p tends to infinity, sp(u,v) becomes the least-squares spline */
/* with given knots, satisfying the constraints. */
/* - if p tends to zero, sp(u,v) becomes the least-squares polynomial, */
/* satisfying the constraints. */
/* - the function f(p)=sumi=1,mu(sumj=1,mv((r(i,j)-sp(u(i),v(j)))**2) */
/* is continuous and strictly decreasing for p>0. */
/* ..scalar arguments.. */
/*< >*/
/*< real r0,r1,p,fp >*/
/* ..array arguments.. */
/*< integer ider(4),nru(mu),nrv(mv),iopt(3) >*/
/*< >*/
/* ..local scalars.. */
/*< real res,sq,sqq,sq0,sq1,step1,step2,three >*/
/*< >*/
/* ..local arrays.. */
/*< integer nr(6) >*/
/*< real delta(6),drr(6),sum(6),a(6,6),g(6) >*/
/* ..function references.. */
/*< integer max0 >*/
/* ..subroutine references.. */
/* fpgrsp,fpsysy */
/* .. */
/* set constant */
/*< three = 3 >*/
/* Parameter adjustments */
--nru;
--u;
--nrv;
--v;
--r__;
--dr;
--iopt;
--ider;
--fpu;
--tu;
--fpv;
--tv;
--step;
--c__;
--wrk;
/* Function Body */
three = 3.;
/* we partition the working space */
/*< lsu = 1 >*/
lsu = 1;
/*< lsv = lsu+4*mu >*/
lsv = lsu + (*mu << 2);
/*< lri = lsv+4*mv >*/
lri = lsv + (*mv << 2);
/*< mm = max0(nuest,mv+nvest) >*/
/* Computing MAX */
i__1 = *nuest, i__2 = *mv + *nvest;
mm = max(i__1,i__2);
/*< lq = lri+mm >*/
lq = lri + mm;
/*< mvnu = nuest*(mv+nvest-8) >*/
mvnu = *nuest * (*mv + *nvest - 8);
/*< lau = lq+mvnu >*/
lau = lq + mvnu;
/*< lav1 = lau+5*nuest >*/
lav1 = lau + *nuest * 5;
/*< lav2 = lav1+6*nvest >*/
lav2 = lav1 + *nvest * 6;
/*< lbu = lav2+4*nvest >*/
lbu = lav2 + (*nvest << 2);
/*< lbv = lbu+5*nuest >*/
lbv = lbu + *nuest * 5;
/*< la0 = lbv+5*nvest >*/
la0 = lbv + *nvest * 5;
/*< la1 = la0+2*mv >*/
la1 = la0 + (*mv << 1);
/*< lb0 = la1+2*mv >*/
lb0 = la1 + (*mv << 1);
/*< lb1 = lb0+2*nvest >*/
lb1 = lb0 + (*nvest << 1);
/*< lc0 = lb1+2*nvest >*/
lc0 = lb1 + (*nvest << 1);
/*< lc1 = lc0+nvest >*/
lc1 = lc0 + *nvest;
/*< lcs = lc1+nvest >*/
lcs = lc1 + *nvest;
/* we calculate the smoothing spline sp(u,v) according to the input */
/* values dr(i),i=1,...,6. */
/*< iop0 = iopt(2) >*/
iop0 = iopt[2];
/*< iop1 = iopt(3) >*/
iop1 = iopt[3];
/*< id0 = ider(1) >*/
id0 = ider[1];
/*< id1 = ider(3) >*/
id1 = ider[3];
/*< >*/
fpgrsp_(ifsu, ifsv, ifbu, ifbv, &c__0, &u[1], mu, &v[1], mv, &r__[1], mr,
&dr[1], &iop0, &iop1, &tu[1], nu, &tv[1], nv, p, &c__[1], nc, &sq,
fp, &fpu[1], &fpv[1], &mm, &mvnu, &wrk[lsu], &wrk[lsv], &wrk[lri]
, &wrk[lq], &wrk[lau], &wrk[lav1], &wrk[lav2], &wrk[lbu], &wrk[
lbv], &wrk[la0], &wrk[la1], &wrk[lb0], &wrk[lb1], &wrk[lc0], &wrk[
lc1], &wrk[lcs], &nru[1], &nrv[1]);
/*< sq0 = 0. >*/
sq0 = 0.;
/*< sq1 = 0. >*/
sq1 = 0.;
/*< if(id0.eq.0) sq0 = (r0-dr(1))**2 >*/
if (id0 == 0) {
/* Computing 2nd power */
d__1 = *r0 - dr[1];
sq0 = d__1 * d__1;
}
/*< if(id1.eq.0) sq1 = (r1-dr(4))**2 >*/
if (id1 == 0) {
/* Computing 2nd power */
d__1 = *r1 - dr[4];
sq1 = d__1 * d__1;
}
/*< sq = sq+sq0+sq1 >*/
sq = sq + sq0 + sq1;
/* in case all derivative values dr(i) are given (step<=0) or in case */
/* we have spline interpolation, we accept this spline as a solution. */
/*< if(sq.le.0.) return >*/
if (sq <= 0.) {
return 0;
}
/*< if(step(1).le.0. .and. step(2).le.0.) return >*/
if (step[1] <= 0. && step[2] <= 0.) {
return 0;
}
/*< do 10 i=1,6 >*/
for (i__ = 1; i__ <= 6; ++i__) {
/*< drr(i) = dr(i) >*/
drr[i__ - 1] = dr[i__];
/*< 10 continue >*/
/* L10: */
}
/* number denotes the number of derivative values dr(i) that still must */
/* be optimized. let us denote these parameters by g(j),j=1,...,number. */
/*< number = 0 >*/
number = 0;
/*< if(id0.gt.0) go to 20 >*/
if (id0 > 0) {
goto L20;
}
/*< number = 1 >*/
number = 1;
/*< nr(1) = 1 >*/
nr[0] = 1;
/*< delta(1) = step(1) >*/
delta[0] = step[1];
/*< 20 if(iop0.eq.0) go to 30 >*/
L20:
if (iop0 == 0) {
goto L30;
}
/*< if(ider(2).ne.0) go to 30 >*/
if (ider[2] != 0) {
goto L30;
}
/*< step2 = step(1)*three/(tu(5)-tu(4)) >*/
step2 = step[1] * three / (tu[5] - tu[4]);
/*< nr(number+1) = 2 >*/
nr[number] = 2;
/*< nr(number+2) = 3 >*/
nr[number + 1] = 3;
/*< delta(number+1) = step2 >*/
delta[number] = step2;
/*< delta(number+2) = step2 >*/
delta[number + 1] = step2;
/*< number = number+2 >*/
number += 2;
/*< 30 if(id1.gt.0) go to 40 >*/
L30:
if (id1 > 0) {
goto L40;
}
/*< number = number+1 >*/
++number;
/*< nr(number) = 4 >*/
nr[number - 1] = 4;
/*< delta(number) = step(2) >*/
delta[number - 1] = step[2];
/*< 40 if(iop1.eq.0) go to 50 >*/
L40:
if (iop1 == 0) {
goto L50;
}
/*< if(ider(4).ne.0) go to 50 >*/
if (ider[4] != 0) {
goto L50;
}
/*< step2 = step(2)*three/(tu(nu)-tu(nu-4)) >*/
step2 = step[2] * three / (tu[*nu] - tu[*nu - 4]);
/*< nr(number+1) = 5 >*/
nr[number] = 5;
/*< nr(number+2) = 6 >*/
nr[number + 1] = 6;
/*< delta(number+1) = step2 >*/
delta[number] = step2;
/*< delta(number+2) = step2 >*/
delta[number + 1] = step2;
/*< number = number+2 >*/
number += 2;
/*< 50 if(number.eq.0) return >*/
L50:
if (number == 0) {
return 0;
}
/* the sum of squared residulas sq is a quadratic polynomial in the */
/* parameters g(j). we determine the unknown coefficients of this */
/* polymomial by calculating (number+1)*(number+2)/2 different splines */
/* according to specific values for g(j). */
/*< do 60 i=1,number >*/
i__1 = number;
for (i__ = 1; i__ <= i__1; ++i__) {
/*< l = nr(i) >*/
l = nr[i__ - 1];
/*< step1 = delta(i) >*/
step1 = delta[i__ - 1];
/*< drr(l) = dr(l)+step1 >*/
drr[l - 1] = dr[l] + step1;
/*< >*/
fpgrsp_(ifsu, ifsv, ifbu, ifbv, &c__1, &u[1], mu, &v[1], mv, &r__[1],
mr, drr, &iop0, &iop1, &tu[1], nu, &tv[1], nv, p, &c__[1], nc,
&sum[i__ - 1], fp, &fpu[1], &fpv[1], &mm, &mvnu, &wrk[lsu], &
wrk[lsv], &wrk[lri], &wrk[lq], &wrk[lau], &wrk[lav1], &wrk[
lav2], &wrk[lbu], &wrk[lbv], &wrk[la0], &wrk[la1], &wrk[lb0],
&wrk[lb1], &wrk[lc0], &wrk[lc1], &wrk[lcs], &nru[1], &nrv[1]);
/*< if(id0.eq.0) sq0 = (r0-drr(1))**2 >*/
if (id0 == 0) {
/* Computing 2nd power */
d__1 = *r0 - drr[0];
sq0 = d__1 * d__1;
}
/*< if(id1.eq.0) sq1 = (r1-drr(4))**2 >*/
if (id1 == 0) {
/* Computing 2nd power */
d__1 = *r1 - drr[3];
sq1 = d__1 * d__1;
}
/*< sum(i) = sum(i)+sq0+sq1 >*/
sum[i__ - 1] = sum[i__ - 1] + sq0 + sq1;
/*< drr(l) = dr(l)-step1 >*/
drr[l - 1] = dr[l] - step1;
/*< >*/
fpgrsp_(ifsu, ifsv, ifbu, ifbv, &c__1, &u[1], mu, &v[1], mv, &r__[1],
mr, drr, &iop0, &iop1, &tu[1], nu, &tv[1], nv, p, &c__[1], nc,
&sqq, fp, &fpu[1], &fpv[1], &mm, &mvnu, &wrk[lsu], &wrk[lsv],
&wrk[lri], &wrk[lq], &wrk[lau], &wrk[lav1], &wrk[lav2], &wrk[
lbu], &wrk[lbv], &wrk[la0], &wrk[la1], &wrk[lb0], &wrk[lb1], &
wrk[lc0], &wrk[lc1], &wrk[lcs], &nru[1], &nrv[1]);
/*< if(id0.eq.0) sq0 = (r0-drr(1))**2 >*/
if (id0 == 0) {
/* Computing 2nd power */
d__1 = *r0 - drr[0];
sq0 = d__1 * d__1;
}
/*< if(id1.eq.0) sq1 = (r1-drr(4))**2 >*/
if (id1 == 0) {
/* Computing 2nd power */
d__1 = *r1 - drr[3];
sq1 = d__1 * d__1;
}
/*< sqq = sqq+sq0+sq1 >*/
sqq = sqq + sq0 + sq1;
/*< drr(l) = dr(l) >*/
drr[l - 1] = dr[l];
/*< a(i,i) = (sum(i)+sqq-sq-sq)/step1**2 >*/
/* Computing 2nd power */
d__1 = step1;
a[i__ + i__ * 6 - 7] = (sum[i__ - 1] + sqq - sq - sq) / (d__1 * d__1);
/*< if(a(i,i).le.0.) go to 110 >*/
if (a[i__ + i__ * 6 - 7] <= 0.) {
goto L110;
}
/*< g(i) = (sqq-sum(i))/(step1+step1) >*/
g[i__ - 1] = (sqq - sum[i__ - 1]) / (step1 + step1);
/*< 60 continue >*/
/* L60: */
}
/*< if(number.eq.1) go to 90 >*/
if (number == 1) {
goto L90;
}
/*< do 80 i=2,number >*/
i__1 = number;
for (i__ = 2; i__ <= i__1; ++i__) {
/*< l1 = nr(i) >*/
l1 = nr[i__ - 1];
/*< step1 = delta(i) >*/
step1 = delta[i__ - 1];
/*< drr(l1) = dr(l1)+step1 >*/
drr[l1 - 1] = dr[l1] + step1;
/*< i1 = i-1 >*/
i1 = i__ - 1;
/*< do 70 j=1,i1 >*/
i__2 = i1;
for (j = 1; j <= i__2; ++j) {
/*< l2 = nr(j) >*/
l2 = nr[j - 1];
/*< step2 = delta(j) >*/
step2 = delta[j - 1];
/*< drr(l2) = dr(l2)+step2 >*/
drr[l2 - 1] = dr[l2] + step2;
/*< >*/
fpgrsp_(ifsu, ifsv, ifbu, ifbv, &c__1, &u[1], mu, &v[1], mv, &r__[
1], mr, drr, &iop0, &iop1, &tu[1], nu, &tv[1], nv, p, &
c__[1], nc, &sqq, fp, &fpu[1], &fpv[1], &mm, &mvnu, &wrk[
lsu], &wrk[lsv], &wrk[lri], &wrk[lq], &wrk[lau], &wrk[
lav1], &wrk[lav2], &wrk[lbu], &wrk[lbv], &wrk[la0], &wrk[
la1], &wrk[lb0], &wrk[lb1], &wrk[lc0], &wrk[lc1], &wrk[
lcs], &nru[1], &nrv[1]);
/*< if(id0.eq.0) sq0 = (r0-drr(1))**2 >*/
if (id0 == 0) {
/* Computing 2nd power */
d__1 = *r0 - drr[0];
sq0 = d__1 * d__1;
}
/*< if(id1.eq.0) sq1 = (r1-drr(4))**2 >*/
if (id1 == 0) {
/* Computing 2nd power */
d__1 = *r1 - drr[3];
sq1 = d__1 * d__1;
}
/*< sqq = sqq+sq0+sq1 >*/
sqq = sqq + sq0 + sq1;
/*< a(i,j) = (sq+sqq-sum(i)-sum(j))/(step1*step2) >*/
a[i__ + j * 6 - 7] = (sq + sqq - sum[i__ - 1] - sum[j - 1]) / (
step1 * step2);
/*< drr(l2) = dr(l2) >*/
drr[l2 - 1] = dr[l2];
/*< 70 continue >*/
/* L70: */
}
/*< drr(l1) = dr(l1) >*/
drr[l1 - 1] = dr[l1];
/*< 80 continue >*/
/* L80: */
}
/* the optimal values g(j) are found as the solution of the system */
/* d (sq) / d (g(j)) = 0 , j=1,...,number. */
/*< 90 call fpsysy(a,number,g) >*/
L90:
fpsysy_(a, &number, g);
/*< do 100 i=1,number >*/
i__1 = number;
for (i__ = 1; i__ <= i__1; ++i__) {
/*< l = nr(i) >*/
l = nr[i__ - 1];
/*< dr(l) = dr(l)+g(i) >*/
dr[l] += g[i__ - 1];
/*< 100 continue >*/
/* L100: */
}
/* we determine the spline sp(u,v) according to the optimal values g(j). */
/*< 1 >*/
L110:
fpgrsp_(ifsu, ifsv, ifbu, ifbv, &c__0, &u[1], mu, &v[1], mv, &r__[1], mr,
&dr[1], &iop0, &iop1, &tu[1], nu, &tv[1], nv, p, &c__[1], nc, &sq,
fp, &fpu[1], &fpv[1], &mm, &mvnu, &wrk[lsu], &wrk[lsv], &wrk[lri]
, &wrk[lq], &wrk[lau], &wrk[lav1], &wrk[lav2], &wrk[lbu], &wrk[
lbv], &wrk[la0], &wrk[la1], &wrk[lb0], &wrk[lb1], &wrk[lc0], &wrk[
lc1], &wrk[lcs], &nru[1], &nrv[1]);
/*< if(id0.eq.0) sq0 = (r0-dr(1))**2 >*/
if (id0 == 0) {
/* Computing 2nd power */
d__1 = *r0 - dr[1];
sq0 = d__1 * d__1;
}
/*< if(id1.eq.0) sq1 = (r1-dr(4))**2 >*/
if (id1 == 0) {
/* Computing 2nd power */
d__1 = *r1 - dr[4];
sq1 = d__1 * d__1;
}
/*< sq = sq+sq0+sq1 >*/
sq = sq + sq0 + sq1;
/*< return >*/
return 0;
/*< end >*/
} /* fpopsp_ */
| 32.508368 | 78 | 0.488641 | [
"object"
] |
36a565862176d8c9d102aa2aedc26030a6a937d4 | 4,700 | h | C | aws-cpp-sdk-groundstation/include/aws/groundstation/model/GetMinuteUsageResult.h | Neusoft-Technology-Solutions/aws-sdk-cpp | 88c041828b0dbee18a297c3cfe98c5ecd0706d0b | [
"Apache-2.0"
] | 1 | 2022-02-10T08:06:54.000Z | 2022-02-10T08:06:54.000Z | aws-cpp-sdk-groundstation/include/aws/groundstation/model/GetMinuteUsageResult.h | Neusoft-Technology-Solutions/aws-sdk-cpp | 88c041828b0dbee18a297c3cfe98c5ecd0706d0b | [
"Apache-2.0"
] | 1 | 2022-01-03T23:59:37.000Z | 2022-01-03T23:59:37.000Z | aws-cpp-sdk-groundstation/include/aws/groundstation/model/GetMinuteUsageResult.h | ravindra-wagh/aws-sdk-cpp | 7d5ff01b3c3b872f31ca98fb4ce868cd01e97696 | [
"Apache-2.0"
] | 1 | 2021-11-09T11:58:03.000Z | 2021-11-09T11:58:03.000Z | /**
* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
* SPDX-License-Identifier: Apache-2.0.
*/
#pragma once
#include <aws/groundstation/GroundStation_EXPORTS.h>
namespace Aws
{
template<typename RESULT_TYPE>
class AmazonWebServiceResult;
namespace Utils
{
namespace Json
{
class JsonValue;
} // namespace Json
} // namespace Utils
namespace GroundStation
{
namespace Model
{
/**
* <p/><p><h3>See Also:</h3> <a
* href="http://docs.aws.amazon.com/goto/WebAPI/groundstation-2019-05-23/GetMinuteUsageResponse">AWS
* API Reference</a></p>
*/
class AWS_GROUNDSTATION_API GetMinuteUsageResult
{
public:
GetMinuteUsageResult();
GetMinuteUsageResult(const Aws::AmazonWebServiceResult<Aws::Utils::Json::JsonValue>& result);
GetMinuteUsageResult& operator=(const Aws::AmazonWebServiceResult<Aws::Utils::Json::JsonValue>& result);
/**
* <p>Estimated number of minutes remaining for an account, specific to the month
* being requested.</p>
*/
inline int GetEstimatedMinutesRemaining() const{ return m_estimatedMinutesRemaining; }
/**
* <p>Estimated number of minutes remaining for an account, specific to the month
* being requested.</p>
*/
inline void SetEstimatedMinutesRemaining(int value) { m_estimatedMinutesRemaining = value; }
/**
* <p>Estimated number of minutes remaining for an account, specific to the month
* being requested.</p>
*/
inline GetMinuteUsageResult& WithEstimatedMinutesRemaining(int value) { SetEstimatedMinutesRemaining(value); return *this;}
/**
* <p>Returns whether or not an account has signed up for the reserved minutes
* pricing plan, specific to the month being requested.</p>
*/
inline bool GetIsReservedMinutesCustomer() const{ return m_isReservedMinutesCustomer; }
/**
* <p>Returns whether or not an account has signed up for the reserved minutes
* pricing plan, specific to the month being requested.</p>
*/
inline void SetIsReservedMinutesCustomer(bool value) { m_isReservedMinutesCustomer = value; }
/**
* <p>Returns whether or not an account has signed up for the reserved minutes
* pricing plan, specific to the month being requested.</p>
*/
inline GetMinuteUsageResult& WithIsReservedMinutesCustomer(bool value) { SetIsReservedMinutesCustomer(value); return *this;}
/**
* <p>Total number of reserved minutes allocated, specific to the month being
* requested.</p>
*/
inline int GetTotalReservedMinuteAllocation() const{ return m_totalReservedMinuteAllocation; }
/**
* <p>Total number of reserved minutes allocated, specific to the month being
* requested.</p>
*/
inline void SetTotalReservedMinuteAllocation(int value) { m_totalReservedMinuteAllocation = value; }
/**
* <p>Total number of reserved minutes allocated, specific to the month being
* requested.</p>
*/
inline GetMinuteUsageResult& WithTotalReservedMinuteAllocation(int value) { SetTotalReservedMinuteAllocation(value); return *this;}
/**
* <p>Total scheduled minutes for an account, specific to the month being
* requested.</p>
*/
inline int GetTotalScheduledMinutes() const{ return m_totalScheduledMinutes; }
/**
* <p>Total scheduled minutes for an account, specific to the month being
* requested.</p>
*/
inline void SetTotalScheduledMinutes(int value) { m_totalScheduledMinutes = value; }
/**
* <p>Total scheduled minutes for an account, specific to the month being
* requested.</p>
*/
inline GetMinuteUsageResult& WithTotalScheduledMinutes(int value) { SetTotalScheduledMinutes(value); return *this;}
/**
* <p>Upcoming minutes scheduled for an account, specific to the month being
* requested.</p>
*/
inline int GetUpcomingMinutesScheduled() const{ return m_upcomingMinutesScheduled; }
/**
* <p>Upcoming minutes scheduled for an account, specific to the month being
* requested.</p>
*/
inline void SetUpcomingMinutesScheduled(int value) { m_upcomingMinutesScheduled = value; }
/**
* <p>Upcoming minutes scheduled for an account, specific to the month being
* requested.</p>
*/
inline GetMinuteUsageResult& WithUpcomingMinutesScheduled(int value) { SetUpcomingMinutesScheduled(value); return *this;}
private:
int m_estimatedMinutesRemaining;
bool m_isReservedMinutesCustomer;
int m_totalReservedMinuteAllocation;
int m_totalScheduledMinutes;
int m_upcomingMinutesScheduled;
};
} // namespace Model
} // namespace GroundStation
} // namespace Aws
| 31.756757 | 135 | 0.706809 | [
"model"
] |
36aa5687429d87710303b1f59f1aff41f723d837 | 7,438 | h | C | aws-cpp-sdk-awstransfer/include/aws/awstransfer/model/SendWorkflowStepStateRequest.h | perfectrecall/aws-sdk-cpp | fb8cbebf2fd62720b65aeff841ad2950e73d8ebd | [
"Apache-2.0"
] | 1 | 2022-02-10T08:06:54.000Z | 2022-02-10T08:06:54.000Z | aws-cpp-sdk-awstransfer/include/aws/awstransfer/model/SendWorkflowStepStateRequest.h | perfectrecall/aws-sdk-cpp | fb8cbebf2fd62720b65aeff841ad2950e73d8ebd | [
"Apache-2.0"
] | 1 | 2022-01-03T23:59:37.000Z | 2022-01-03T23:59:37.000Z | aws-cpp-sdk-awstransfer/include/aws/awstransfer/model/SendWorkflowStepStateRequest.h | ravindra-wagh/aws-sdk-cpp | 7d5ff01b3c3b872f31ca98fb4ce868cd01e97696 | [
"Apache-2.0"
] | 1 | 2021-11-09T11:58:03.000Z | 2021-11-09T11:58:03.000Z | /**
* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
* SPDX-License-Identifier: Apache-2.0.
*/
#pragma once
#include <aws/awstransfer/Transfer_EXPORTS.h>
#include <aws/awstransfer/TransferRequest.h>
#include <aws/core/utils/memory/stl/AWSString.h>
#include <aws/awstransfer/model/CustomStepStatus.h>
#include <utility>
namespace Aws
{
namespace Transfer
{
namespace Model
{
/**
*/
class AWS_TRANSFER_API SendWorkflowStepStateRequest : public TransferRequest
{
public:
SendWorkflowStepStateRequest();
// Service request name is the Operation name which will send this request out,
// each operation should has unique request name, so that we can get operation's name from this request.
// Note: this is not true for response, multiple operations may have the same response name,
// so we can not get operation's name from response.
inline virtual const char* GetServiceRequestName() const override { return "SendWorkflowStepState"; }
Aws::String SerializePayload() const override;
Aws::Http::HeaderValueCollection GetRequestSpecificHeaders() const override;
/**
* <p>A unique identifier for the workflow.</p>
*/
inline const Aws::String& GetWorkflowId() const{ return m_workflowId; }
/**
* <p>A unique identifier for the workflow.</p>
*/
inline bool WorkflowIdHasBeenSet() const { return m_workflowIdHasBeenSet; }
/**
* <p>A unique identifier for the workflow.</p>
*/
inline void SetWorkflowId(const Aws::String& value) { m_workflowIdHasBeenSet = true; m_workflowId = value; }
/**
* <p>A unique identifier for the workflow.</p>
*/
inline void SetWorkflowId(Aws::String&& value) { m_workflowIdHasBeenSet = true; m_workflowId = std::move(value); }
/**
* <p>A unique identifier for the workflow.</p>
*/
inline void SetWorkflowId(const char* value) { m_workflowIdHasBeenSet = true; m_workflowId.assign(value); }
/**
* <p>A unique identifier for the workflow.</p>
*/
inline SendWorkflowStepStateRequest& WithWorkflowId(const Aws::String& value) { SetWorkflowId(value); return *this;}
/**
* <p>A unique identifier for the workflow.</p>
*/
inline SendWorkflowStepStateRequest& WithWorkflowId(Aws::String&& value) { SetWorkflowId(std::move(value)); return *this;}
/**
* <p>A unique identifier for the workflow.</p>
*/
inline SendWorkflowStepStateRequest& WithWorkflowId(const char* value) { SetWorkflowId(value); return *this;}
/**
* <p>A unique identifier for the execution of a workflow.</p>
*/
inline const Aws::String& GetExecutionId() const{ return m_executionId; }
/**
* <p>A unique identifier for the execution of a workflow.</p>
*/
inline bool ExecutionIdHasBeenSet() const { return m_executionIdHasBeenSet; }
/**
* <p>A unique identifier for the execution of a workflow.</p>
*/
inline void SetExecutionId(const Aws::String& value) { m_executionIdHasBeenSet = true; m_executionId = value; }
/**
* <p>A unique identifier for the execution of a workflow.</p>
*/
inline void SetExecutionId(Aws::String&& value) { m_executionIdHasBeenSet = true; m_executionId = std::move(value); }
/**
* <p>A unique identifier for the execution of a workflow.</p>
*/
inline void SetExecutionId(const char* value) { m_executionIdHasBeenSet = true; m_executionId.assign(value); }
/**
* <p>A unique identifier for the execution of a workflow.</p>
*/
inline SendWorkflowStepStateRequest& WithExecutionId(const Aws::String& value) { SetExecutionId(value); return *this;}
/**
* <p>A unique identifier for the execution of a workflow.</p>
*/
inline SendWorkflowStepStateRequest& WithExecutionId(Aws::String&& value) { SetExecutionId(std::move(value)); return *this;}
/**
* <p>A unique identifier for the execution of a workflow.</p>
*/
inline SendWorkflowStepStateRequest& WithExecutionId(const char* value) { SetExecutionId(value); return *this;}
/**
* <p>Used to distinguish between multiple callbacks for multiple Lambda steps
* within the same execution.</p>
*/
inline const Aws::String& GetToken() const{ return m_token; }
/**
* <p>Used to distinguish between multiple callbacks for multiple Lambda steps
* within the same execution.</p>
*/
inline bool TokenHasBeenSet() const { return m_tokenHasBeenSet; }
/**
* <p>Used to distinguish between multiple callbacks for multiple Lambda steps
* within the same execution.</p>
*/
inline void SetToken(const Aws::String& value) { m_tokenHasBeenSet = true; m_token = value; }
/**
* <p>Used to distinguish between multiple callbacks for multiple Lambda steps
* within the same execution.</p>
*/
inline void SetToken(Aws::String&& value) { m_tokenHasBeenSet = true; m_token = std::move(value); }
/**
* <p>Used to distinguish between multiple callbacks for multiple Lambda steps
* within the same execution.</p>
*/
inline void SetToken(const char* value) { m_tokenHasBeenSet = true; m_token.assign(value); }
/**
* <p>Used to distinguish between multiple callbacks for multiple Lambda steps
* within the same execution.</p>
*/
inline SendWorkflowStepStateRequest& WithToken(const Aws::String& value) { SetToken(value); return *this;}
/**
* <p>Used to distinguish between multiple callbacks for multiple Lambda steps
* within the same execution.</p>
*/
inline SendWorkflowStepStateRequest& WithToken(Aws::String&& value) { SetToken(std::move(value)); return *this;}
/**
* <p>Used to distinguish between multiple callbacks for multiple Lambda steps
* within the same execution.</p>
*/
inline SendWorkflowStepStateRequest& WithToken(const char* value) { SetToken(value); return *this;}
/**
* <p>Indicates whether the specified step succeeded or failed.</p>
*/
inline const CustomStepStatus& GetStatus() const{ return m_status; }
/**
* <p>Indicates whether the specified step succeeded or failed.</p>
*/
inline bool StatusHasBeenSet() const { return m_statusHasBeenSet; }
/**
* <p>Indicates whether the specified step succeeded or failed.</p>
*/
inline void SetStatus(const CustomStepStatus& value) { m_statusHasBeenSet = true; m_status = value; }
/**
* <p>Indicates whether the specified step succeeded or failed.</p>
*/
inline void SetStatus(CustomStepStatus&& value) { m_statusHasBeenSet = true; m_status = std::move(value); }
/**
* <p>Indicates whether the specified step succeeded or failed.</p>
*/
inline SendWorkflowStepStateRequest& WithStatus(const CustomStepStatus& value) { SetStatus(value); return *this;}
/**
* <p>Indicates whether the specified step succeeded or failed.</p>
*/
inline SendWorkflowStepStateRequest& WithStatus(CustomStepStatus&& value) { SetStatus(std::move(value)); return *this;}
private:
Aws::String m_workflowId;
bool m_workflowIdHasBeenSet;
Aws::String m_executionId;
bool m_executionIdHasBeenSet;
Aws::String m_token;
bool m_tokenHasBeenSet;
CustomStepStatus m_status;
bool m_statusHasBeenSet;
};
} // namespace Model
} // namespace Transfer
} // namespace Aws
| 34.276498 | 128 | 0.681635 | [
"model"
] |
36aaa19ab04b3535840a4d84ed2c3beec3a613d8 | 4,709 | h | C | Src/Player/Content/Scenes/Blobs.h | visualizersdotnl/tpb-06-final | 7bd0b0e3fb954381466b2eb89d5edebef9f39ea7 | [
"MIT"
] | 4 | 2015-12-15T23:04:27.000Z | 2018-01-17T23:09:10.000Z | Src/Player/Content/Scenes/Blobs.h | visualizersdotnl/tpb-06-final | 7bd0b0e3fb954381466b2eb89d5edebef9f39ea7 | [
"MIT"
] | null | null | null | Src/Player/Content/Scenes/Blobs.h | visualizersdotnl/tpb-06-final | 7bd0b0e3fb954381466b2eb89d5edebef9f39ea7 | [
"MIT"
] | null | null | null |
#pragma once
class Blobs : public Scene
{
private:
Pimp::Texture2D *bgTile;
Pimp::Texture2D *envMap, *projMap;
Pimp::Texture2D *alien, *alien_a, *guy, *guy_a, *glow, *glow_a, *plek, *plek_a, *punqtured, *punqtured_a,
*shifter, *shifter_a;
const sync_track *st_credit, *st_creditX, *st_creditY;
public:
Blobs()
{
}
~Blobs()
{
}
void ReqRocketTracks()
{
s_syncTracks.push_back(SyncTrack("credit", false, &st_credit));
s_syncTracks.push_back(SyncTrack("creditX", false, &st_creditX));
s_syncTracks.push_back(SyncTrack("creditY", false, &st_creditY));
}
void ReqAssets()
{
Assets::AddTexture2D("textures\\creds\\tile-00.png", &bgTile);
Assets::AddTexture2D("textures\\creds\\envmap.png", &envMap);
Assets::AddTexture2D("textures\\creds\\projmap.png", &projMap);
Assets::AddTexture2D("textures\\creds\\alien.png", &alien, true);
Assets::AddTexture2D("textures\\creds\\alien-a.png", &alien_a, true);
Assets::AddTexture2D("textures\\creds\\glow.png", &glow, true);
Assets::AddTexture2D("textures\\creds\\glow-a.png", &glow_a, true);
Assets::AddTexture2D("textures\\creds\\shifter.png", &shifter, true);
Assets::AddTexture2D("textures\\creds\\shifter-a.png", &shifter_a, true);
Assets::AddTexture2D("textures\\creds\\plek.png", &plek, true);
Assets::AddTexture2D("textures\\creds\\plek-a.png", &plek_a, true);
Assets::AddTexture2D("textures\\creds\\punqtured.png", &punqtured, true);
Assets::AddTexture2D("textures\\creds\\punctured-a.png", &punqtured_a, true);
Assets::AddTexture2D("textures\\creds\\guy.png", &guy, true);
Assets::AddTexture2D("textures\\creds\\guy-a.png", &guy_a, true);
}
void BindToWorld()
{
}
void Tick(double row)
{
SetMainSceneAndCamera(s_defaultCam);
float time = (float) sync_get_val(st_fxTime, row);
s_sprites->AddBackgroundSprite(
bgTile,
Pimp::D3D::Blend::BM_None,
-1,
Vector2(0.f, 0.f),
Vector2(1920.f, 1080.f),
1.f,
0.f,
false,
Vector2(3.f*kTileMul, 3.f),
Vector2(-time*0.4f, -time));
// Credits
const float kCredZ = 1.f;
Pimp::Texture2D *first, *second;
first = second = nullptr;
const int credit = (int) sync_get_val(st_credit, row);
switch (credit)
{
case 1:
first = guy_a;
second = guy;
break;
case 2:
first = glow_a;
second = glow;
break;
case 3:
first = shifter_a;
second = shifter;
break;
case 4:
first = plek_a;
second = plek;
break;
case 5:
first = alien_a;
second = alien;
break;
case 6:
first = punqtured_a;
second = punqtured;
break;
}
if (first != nullptr)
{
const float first_offs = (float) sync_get_val(st_creditX, row);
const float second_offs = (float) sync_get_val(st_creditY, row);
float xoffs1 = 0.f;
float yoffs1 = 0.f;
float xoffs2 = 0.f;
float yoffs2 = 0.f;
xoffs1 = first_offs*1920.f;
yoffs1 = -first_offs*(1080.f);
xoffs2 = second_offs*1920.f;
yoffs2 = -second_offs*(1080.f);
unsigned int fade = AlphaToVtxColor(1.f);
s_sprites->AddSprite(
first,
Pimp::D3D::Blend::BM_AlphaBlend,
// Pimp::D3D::Blend::BM_AlphaPreMul,
fade,
Vector2(xoffs1, yoffs1),
Vector2(1920.f, 1080.f),
kCredZ,
0.f,
true,
false);
s_sprites->AddSprite(
second,
Pimp::D3D::Blend::BM_AlphaBlend,
// Pimp::D3D::Blend::BM_AlphaPreMul,
fade,
Vector2(xoffs2, yoffs2),
Vector2(1920.f, 1080.f),
kCredZ+0.1f,
0.f,
true,
false);
}
// FIXME: parametrize w/Rocket?
// Quaternion rotation = CreateQuaternionFromYawPitchRoll(0.f, 0.f, 0.f);
Quaternion rotation = CreateQuaternionFromYawPitchRoll(time*0.6f, time*0.8f, time*0.4f);
s_pMetaballs->SetRotation(rotation);
// FIXME: make it look interesting (attractors?)
for (unsigned int iBall4 = 0; iBall4 < kNumMetaball4s; ++iBall4)
{
for (unsigned int iBall = 0; iBall < 4; ++iBall)
{
const unsigned int ballCnt = iBall4*4 + iBall;
s_metaball4s[iBall4].X[iBall] = 0.5f*sinf(ballCnt + time*1.1f);
s_metaball4s[iBall4].Y[iBall] = 0.5f*sinf((ballCnt^7) + time*1.35f);
s_metaball4s[iBall4].Z[iBall] = 0.5f*cosf((ballCnt^5) + time*1.4f);
}
}
// Generate geometry (triggers visibility).
s_pMetaballs->Generate(kNumMetaball4s, s_metaball4s, 200.f);
// s_pMetaballs->Generate(kNumMetaball4s, s_metaball4s, 190.f);
// Set maps & lighting.
s_pMetaballs->SetMaps(
envMap, projMap,
(float) sync_get_val(st_blobsProjScrollU, row),
(float) sync_get_val(st_blobsProjScrollV, row));
s_pMetaballs->SetLighting(
(float) sync_get_val(st_blobsShininess, row),
(float) sync_get_val(st_blobsOverbright, row));
s_pMetaballs->SetRim(0.0f);
}
};
| 24.784211 | 107 | 0.660437 | [
"geometry"
] |
36aca7bcac31e2db3dbe9230ad196067667d58db | 2,480 | c | C | cmds/spells/d/_dissolving_weapon.c | Dbevan/SunderingShadows | 6c15ec56cef43c36361899bae6dc08d0ee907304 | [
"MIT"
] | 9 | 2021-07-05T15:24:54.000Z | 2022-02-25T19:44:15.000Z | cmds/spells/d/_dissolving_weapon.c | Dbevan/SunderingShadows | 6c15ec56cef43c36361899bae6dc08d0ee907304 | [
"MIT"
] | 4 | 2021-03-15T18:56:39.000Z | 2021-08-17T17:08:22.000Z | cmds/spells/d/_dissolving_weapon.c | Dbevan/SunderingShadows | 6c15ec56cef43c36361899bae6dc08d0ee907304 | [
"MIT"
] | 10 | 2021-03-13T00:18:03.000Z | 2022-03-29T15:02:42.000Z | /*
_dissolving_weapon.c
Dissolving Weapon from PF SRD.
Adds acid damage to your weapon.
-- Tlaloc --
*/
#include <std.h>
inherit SPELL;
object weapon;
void create()
{
::create();
set_author("tlaloc");
set_spell_name("dissolving weapon");
set_spell_level( ([ "psywarrior" : 2 ]) );
set_spell_sphere("psychometabolism");
set_syntax("cast CLASS dissolving weapon on WEAPON");
set_damage_desc("4d6 Acid damage on enemy on hit.");
set_description("With this power, the caster imbues their weapon with acid. The weapon has a chance on hit to do 4d6 acid damage on the victim.");
set_arg_needed(1);
}
void spell_effect()
{
mapping info;
string pname, wname, pposs;
if(!arg)
{
tell_object(caster, "You need a target for dissolving weapon.");
return;
}
weapon = present(arg, caster);
if(!objectp(weapon))
{
tell_object(caster, "There is no " + arg + " in your possession.");
this_object()->remove();
return;
}
if(!weapon->is_weapon())
{
tell_object(caster, "That is not a weapon!");
this_object()->remove();
return;
}
if(weapon->query_property("temp_hit_bonus"))
{
tell_object(caster, "That weapon is already magically enhanced.");
this_object()->remove();
return;
}
info = ([ ]);
info["file"] = "/d/magic/obj/weap_effects/acidic";
info["func name"] = "acid_func";
info["spell"] = this_object();
weapon->set_property("temp_hit_bonus", info);
pname = caster->query_cap_name();
wname = weapon->query_name();
pposs = caster->query_possessive();
tell_object(caster, "%^GREEN%^BOLD%^You focus your psychic energies onto your " + wname + " and it takes on a coat of acid.%^RESET%^");
place && tell_room(place, "%^GREEN%^BOLD%^" + pname + " focuses " + pposs + " psychic energies onto " + pposs + " weapon and it is soon coated with acid.%^RESET%^", ({ caster }));
caster->set_property("spelled", ({ this_object() }));
weapon->set_property("added short", ({ "%^GREEN%^BOLD%^ {acidic}%^RESET%^" }) );
addSpellToCaster();
}
void dest_effect()
{
if(weapon && objectp(weapon))
load_object("/d/magic/obj/weap_effects/acidic.c")->remove_prop(weapon);
::dest_effect();
if(objectp(this_object()))
this_object()->remove();
}
| 28.181818 | 184 | 0.595161 | [
"object"
] |
36adb71632e36ae766ce58f0a41762e43853a14e | 2,798 | h | C | src/plugins/blogique/interfaces/blogique/ibloggingplatformplugin.h | MellonQ/leechcraft | 71cbb238d2dade56b3865278a6a8e6a58c217fc5 | [
"BSL-1.0"
] | null | null | null | src/plugins/blogique/interfaces/blogique/ibloggingplatformplugin.h | MellonQ/leechcraft | 71cbb238d2dade56b3865278a6a8e6a58c217fc5 | [
"BSL-1.0"
] | null | null | null | src/plugins/blogique/interfaces/blogique/ibloggingplatformplugin.h | MellonQ/leechcraft | 71cbb238d2dade56b3865278a6a8e6a58c217fc5 | [
"BSL-1.0"
] | null | null | null | /**********************************************************************
* LeechCraft - modular cross-platform feature rich internet client.
* Copyright (C) 2010-2012 Oleg Linkin
*
* Boost Software License - Version 1.0 - August 17th, 2003
*
* Permission is hereby granted, free of charge, to any person or organization
* obtaining a copy of the software and accompanying documentation covered by
* this license (the "Software") to use, reproduce, display, distribute,
* execute, and transmit the Software, and to prepare derivative works of the
* Software, and to permit third-parties to whom the Software is furnished to
* do so, all subject to the following:
*
* The copyright notices in the Software and this entire statement, including
* the above license grant, this restriction and the following disclaimer,
* must be included in all copies of the Software, in whole or in part, and
* all derivative works of the Software, unless such copies or derivative
* works are solely in the form of machine-executable object code generated by
* a source language processor.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE, TITLE AND NON-INFRINGEMENT. IN NO EVENT
* SHALL THE COPYRIGHT HOLDERS OR ANYONE DISTRIBUTING THE SOFTWARE BE LIABLE
* FOR ANY DAMAGES OR OTHER LIABILITY, WHETHER IN CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
**********************************************************************/
#pragma once
#include <QList>
class QObject;
namespace LeechCraft
{
namespace Blogique
{
/** This is the base interface for plugins providing blogging platforms.
* Since these plugins are plugins for plugins, they
* should also implement IPlugin2 and return the
* "org.LeechCraft.Plugins.Blogique.Plugins.IBloggingPlatformPlugin"
* string, among others, from their IPlugin2::GetPluginClasses()
* method.
*/
class IBloggingPlatformPlugin
{
public:
virtual ~IBloggingPlatformPlugin () {}
/** @brief Returns the protocol plugin object as a QObject.
*
* @return The protocol plugin as a QObject.
*/
virtual QObject* GetQObject () = 0;
/** @brief Returns the blogging platforms list provided by this plugin.
*
* Each object in this list must implement the IBloggingPlatform
* interface.
*
* @return The list of this plugin's blogging platforms.
*
* @sa IBloggingPlatform
*/
virtual QList<QObject*> GetBloggingPlatforms () const = 0;
};
}
}
Q_DECLARE_INTERFACE (LeechCraft::Blogique::IBloggingPlatformPlugin,
"org.Deviant.LeechCraft.Blogique.IBloggingPlatformPlugin/1.0");
| 37.306667 | 78 | 0.71158 | [
"object"
] |
36b05fda02ecfad85d22a53b954a3d5fb240cb53 | 11,485 | h | C | sw_discoveryf4/trunk/lib/libopencm3/stm32/f2/scb.h | GliderWinchCommons/mc | e81878973d3120fb84c12e81bf9ef5a5fe1beec9 | [
"BSD-2-Clause"
] | 1 | 2019-07-18T07:22:19.000Z | 2019-07-18T07:22:19.000Z | sw_discoveryf4/trunk/lib/libopencm3/stm32/f2/scb.h | GliderWinchCommons/mc | e81878973d3120fb84c12e81bf9ef5a5fe1beec9 | [
"BSD-2-Clause"
] | null | null | null | sw_discoveryf4/trunk/lib/libopencm3/stm32/f2/scb.h | GliderWinchCommons/mc | e81878973d3120fb84c12e81bf9ef5a5fe1beec9 | [
"BSD-2-Clause"
] | 2 | 2019-04-03T01:44:46.000Z | 2020-04-01T07:41:41.000Z | /*
* This file is part of the libopencm3 project.
*
* Copyright (C) 2010 Piotr Esden-Tempski <piotr@esden.net>
* Copyright (C) 2010 Thomas Otto <tommi@viadmin.org>
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#ifndef LIBOPENCM3_SCB_H
#define LIBOPENCM3_SCB_H
#include <libopencm3/stm32/memorymap.h>
#include <libopencm3/cm3/common.h>
/* --- SCB: Registers ------------------------------------------------------ */
/* CPUID: CPUID base register */
#define SCB_CPUID MMIO32(SCB_BASE + 0x00)
/* ICSR: Interrupt Control State Register */
#define SCB_ICSR MMIO32(SCB_BASE + 0x04)
/* VTOR: Vector Table Offset Register */
#define SCB_VTOR MMIO32(SCB_BASE + 0x08)
/* AIRCR: Application Interrupt and Reset Control Register */
#define SCB_AIRCR MMIO32(SCB_BASE + 0x0C)
/* SCR: System Control Register */
#define SCB_SCR MMIO32(SCB_BASE + 0x10)
/* CCR: Configuration Control Register */
#define SCB_CCR MMIO32(SCB_BASE + 0x14)
/* SHP: System Handler Priority Registers */
/* Note: 12 8bit registers */
#define SCB_SHPR(shpr_id) MMIO8(SCB_BASE + 0x18 + shpr_id)
#define SCB_SHPR1 MMIO8(SCB_BASE + 0x18 + 1)
#define SCB_SHPR2 MMIO8(SCB_BASE + 0x18 + 2)
#define SCB_SHPR3 MMIO8(SCB_BASE + 0x18 + 3)
/* SHCSR: System Handler Control and State Register */
#define SCB_SHCSR MMIO32(SCB_BASE + 0x24)
/* CFSR: Configurable Fault Status Registers */
#define SCB_CFSR MMIO32(SCB_BASE + 0x28)
/* HFSR: Hard Fault Status Register */
#define SCB_HFSR MMIO32(SCB_BASE + 0x2C)
/* DFSR: Debug Fault Status Register */
#define SCB_DFSR MMIO32(SCB_BASE + 0x30)
/* MMFAR: Memory Manage Fault Address Register */
#define SCB_MMFAR MMIO32(SCB_BASE + 0x34)
/* BFAR: Bus Fault Address Register */
#define SCB_BFAR MMIO32(SCB_BASE + 0x38)
/* AFSR: Auxiliary Fault Status Register */
#define SCB_AFSR MMIO32(SCB_BASE + 0x3C)
/* --- SCB values ---------------------------------------------------------- */
/* --- SCB_CPUID values ---------------------------------------------------- */
/* Implementer[31:24]: Implementer code */
#define SCP_CPUID_IMPLEMENTER_LSB 24
/* Variant[23:20]: Variant number */
#define SCP_CPUID_VARIANT_LSB 20
/* Constant[19:16]: Reads as 0xF */
#define SCP_CPUID_CONSTANT_LSB 16
/* PartNo[15:4]: Part number of the processor */
#define SCP_CPUID_PARTNO_LSB 4
/* Revision[3:0]: Revision number */
#define SCP_CPUID_REVISION_LSB 0
/* --- SCB_ICSR values ----------------------------------------------------- */
/* NMIPENDSET: NMI set-pending bit */
#define SCB_ICSR_NMIPENDSET (1 << 31)
/* Bits [30:29]: reserved - must be kept cleared */
/* PENDSVSET: PendSV set-pending bit */
#define SCB_ICSR_PENDSVSET (1 << 28)
/* PENDSVCLR: PendSV clear-pending bit */
#define SCB_ICSR_PENDSVCLR (1 << 27)
/* PENDSTSET: SysTick exception set-pending bit */
#define SCB_ICSR_PENDSTSET (1 << 26)
/* PENDSTCLR: SysTick exception clear-pending bit */
#define SCB_ICSR_PENDSTCLR (1 << 25)
/* Bit 24: reserved - must be kept cleared */
/* Bit 23: reserved for debug - reads as 0 when not in debug mode */
/* ISRPENDING: Interrupt pending flag, excluding NMI and Faults */
#define SCB_ICSR_ISRPENDING (1 << 22)
/* VECTPENDING[21:12] Pending vector */
#define SCB_ICSR_VECTPENDING_LSB 12
/* RETOBASE: Return to base level */
#define SCB_ICSR_RETOBASE (1 << 11)
/* Bits [10:9]: reserved - must be kept cleared */
/* VECTACTIVE[8:0] Active vector */
#define SCB_ICSR_VECTACTIVE_LSB 0
/* --- SCB_VTOR values ----------------------------------------------------- */
/* Bits [31:30]: reserved - must be kept cleared */
/* TBLOFF[29:9]: Vector table base offset field */
#define SCB_VTOR_TBLOFF_LSB 9 /* inconsistent datasheet - LSB could be 11 */
/* --- SCB_AIRCR values ---------------------------------------------------- */
/* VECTKEYSTAT[31:16]/ VECTKEY[31:16] Register key */
#define SCB_AIRCR_VECTKEYSTAT_LSB 16
#define SCB_AIRCR_VECTKEY 0x05FA0000
/* ENDIANESS Data endianness bit */
#define SCB_AIRCR_ENDIANESS (1 << 15)
/* Bits [14:11]: reserved - must be kept cleared */
/* PRIGROUP[10:8]: Interrupt priority grouping field */
#define SCB_AIRCR_PRIGROUP_GROUP16_NOSUB (0x3 << 8)
#define SCB_AIRCR_PRIGROUP_GROUP8_SUB2 (0x4 << 8)
#define SCB_AIRCR_PRIGROUP_GROUP4_SUB4 (0x5 << 8)
#define SCB_AIRCR_PRIGROUP_GROUP2_SUB8 (0x6 << 8)
#define SCB_AIRCR_PRIGROUP_NOGROUP_SUB16 (0x7 << 8)
#define SCB_AIRCR_PRIGROUP_MASK (0x7 << 8)
#define SCB_AIRCR_PRIGROUP_SHIFT 8
/* Bits [7:3]: reserved - must be kept cleared */
/* SYSRESETREQ System reset request */
#define SCB_AIRCR_SYSRESETREQ (1 << 2)
/* VECTCLRACTIVE */
#define SCB_AIRCR_VECTCLRACTIVE (1 << 1)
/* VECTRESET */
#define SCB_AIRCR_VECTRESET (1 << 0)
/* --- SCB_SCR values ------------------------------------------------------ */
/* Bits [31:5]: reserved - must be kept cleared */
/* SEVEONPEND Send Event on Pending bit */
#define SCB_SCR_SEVEONPEND (1 << 4)
/* Bit 3: reserved - must be kept cleared */
/* SLEEPDEEP */
#define SCB_SCR_SLEEPDEEP (1 << 2)
/* SLEEPONEXIT */
#define SCB_SCR_SLEEPONEXIT (1 << 1)
/* Bit 0: reserved - must be kept cleared */
/* --- SCB_CCR values ------------------------------------------------------ */
/* Bits [31:10]: reserved - must be kept cleared */
/* STKALIGN */
#define SCB_CCR_STKALIGN (1 << 9)
/* BFHFNMIGN */
#define SCB_CCR_BFHFNMIGN (1 << 8)
/* Bits [7:5]: reserved - must be kept cleared */
/* DIV_0_TRP */
#define SCB_CCR_DIV_0_TRP (1 << 4)
/* UNALIGN_TRP */
#define SCB_CCR_UNALIGN_TRP (1 << 3)
/* Bit 2: reserved - must be kept cleared */
/* USERSETMPEND */
#define SCB_CCR_USERSETMPEND (1 << 1)
/* NONBASETHRDENA */
#define SCB_CCR_NONBASETHRDENA (1 << 0)
/* --- SCB_SHPR1 values ---------------------------------------------------- */
/* Bits [31:24]: reserved - must be kept cleared */
/* PRI_6[23:16]: Priority of system handler 6, usage fault */
#define SCB_SHPR1_PRI_6_LSB 16
/* PRI_5[15:8]: Priority of system handler 5, bus fault */
#define SCB_SHPR1_PRI_5_LSB 8
/* PRI_4[7:0]: Priority of system handler 4, memory management fault */
#define SCB_SHPR1_PRI_4_LSB 0
/* --- SCB_SHPR2 values ---------------------------------------------------- */
/* PRI_11[31:24]: Priority of system handler 11, SVCall */
#define SCB_SHPR2_PRI_11_LSB 24
/* Bits [23:0]: reserved - must be kept cleared */
/* --- SCB_SHPR3 values ---------------------------------------------------- */
/* PRI_15[31:24]: Priority of system handler 15, SysTick exception */
#define SCB_SHPR3_PRI_15_LSB 24
/* PRI_14[23:16]: Priority of system handler 14, PendSV */
#define SCB_SHPR3_PRI_14_LSB 16
/* Bits [15:0]: reserved - must be kept cleared */
/* --- SCB_SHCSR values ---------------------------------------------------- */
/* Bits [31:19]: reserved - must be kept cleared */
/* USGFAULTENA: Usage fault enable */
#define SCB_SHCSR_USGFAULTENA (1 << 18)
/* BUSFAULTENA: Bus fault enable */
#define SCB_SHCSR_BUSFAULTENA (1 << 17)
/* MEMFAULTENA: Memory management fault enable */
#define SCB_SHCSR_MEMFAULTENA (1 << 16)
/* SVCALLPENDED: SVC call pending */
#define SCB_SHCSR_SVCALLPENDED (1 << 15)
/* BUSFAULTPENDED: Bus fault exception pending */
#define SCB_SHCSR_BUSFAULTPENDED (1 << 14)
/* MEMFAULTPENDED: Memory management fault exception pending */
#define SCB_SHCSR_MEMFAULTPENDED (1 << 13)
/* USGFAULTPENDED: Usage fault exception pending */
#define SCB_SHCSR_USGFAULTPENDED (1 << 12)
/* SYSTICKACT: SysTick exception active */
#define SCB_SHCSR_SYSTICKACT (1 << 11)
/* PENDSVACT: PendSV exception active */
#define SCB_SHCSR_PENDSVACT (1 << 10)
/* Bit 9: reserved - must be kept cleared */
/* MONITORACT: Debug monitor active */
#define SCB_SHCSR_MONITORACT (1 << 8)
/* SVCALLACT: SVC call active */
#define SCB_SHCSR_SVCALLACT (1 << 7)
/* Bits [6:4]: reserved - must be kept cleared */
/* USGFAULTACT: Usage fault exception active */
#define SCB_SHCSR_USGFAULTACT (1 << 3)
/* Bit 2: reserved - must be kept cleared */
/* BUSFAULTACT: Bus fault exception active */
#define SCB_SHCSR_BUSFAULTACT (1 << 1)
/* MEMFAULTACT: Memory management fault exception active */
#define SCB_SHCSR_MEMFAULTACT (1 << 0)
/* --- SCB_CFSR values ----------------------------------------------------- */
/* Bits [31:26]: reserved - must be kept cleared */
/* DIVBYZERO: Divide by zero usage fault */
#define SCB_CFSR_DIVBYZERO (1 << 25)
/* UNALIGNED: Unaligned access usage fault */
#define SCB_CFSR_UNALIGNED (1 << 24)
/* Bits [23:20]: reserved - must be kept cleared */
/* NOCP: No coprocessor usage fault */
#define SCB_CFSR_NOCP (1 << 19)
/* INVPC: Invalid PC load usage fault */
#define SCB_CFSR_INVPC (1 << 18)
/* INVSTATE: Invalid state usage fault */
#define SCB_CFSR_INVSTATE (1 << 17)
/* UNDEFINSTR: Undefined instruction usage fault */
#define SCB_CFSR_UNDEFINSTR (1 << 16)
/* BFARVALID: Bus Fault Address Register (BFAR) valid flag */
#define SCB_CFSR_BFARVALID (1 << 15)
/* Bits [14:13]: reserved - must be kept cleared */
/* STKERR: Bus fault on stacking for exception entry */
#define SCB_CFSR_STKERR (1 << 12)
/* UNSTKERR: Bus fault on unstacking for a return from exception */
#define SCB_CFSR_UNSTKERR (1 << 11)
/* IMPRECISERR: Imprecise data bus error */
#define SCB_CFSR_IMPRECISERR (1 << 10)
/* PRECISERR: Precise data bus error */
#define SCB_CFSR_PRECISERR (1 << 9)
/* IBUSERR: Instruction bus error */
#define SCB_CFSR_IBUSERR (1 << 8)
/* MMARVALID: Memory Management Fault Address Register (MMAR) valid flag */
#define SCB_CFSR_MMARVALID (1 << 7)
/* Bits [6:5]: reserved - must be kept cleared */
/* MSTKERR: Memory manager fault on stacking for exception entry */
#define SCB_CFSR_MSTKERR (1 << 4)
/* MUNSTKERR: Memory manager fault on unstacking for a return from exception */
#define SCB_CFSR_MUNSTKERR (1 << 3)
/* Bit 2: reserved - must be kept cleared */
/* DACCVIOL: Data access violation flag */
#define SCB_CFSR_DACCVIOL (1 << 1)
/* IACCVIOL: Instruction access violation flag */
#define SCB_CFSR_IACCVIOL (1 << 0)
/* --- SCB_HFSR values ----------------------------------------------------- */
/* DEBUG_VT: reserved for debug use */
#define SCB_HFSR_DEBUG_VT (1 << 31)
/* FORCED: Forced hard fault */
#define SCB_HFSR_FORCED (1 << 30)
/* Bits [29:2]: reserved - must be kept cleared */
/* VECTTBL: Vector table hard fault */
#define SCB_HFSR_VECTTBL (1 << 1)
/* Bit 0: reserved - must be kept cleared */
/* --- SCB_MMFAR values ---------------------------------------------------- */
/* MMFAR [31:0]: Memory management fault address */
/* --- SCB_BFAR values ----------------------------------------------------- */
/* BFAR [31:0]: Bus fault address */
/* --- SCB functions ------------------------------------------------------- */
void scb_reset_core(void);
void scb_reset_system(void);
void scb_set_priority_grouping(u32 prigroup);
/* TODO: */
#endif
| 37.90429 | 79 | 0.645973 | [
"vector"
] |
36b225c4c9eba7290aba95a1e51f593db33d0ce8 | 27,960 | c | C | src/lj_gc.c | isage/luavela | 88a1d72f27a66de384471a179ca8d45ee241ecbc | [
"MIT"
] | 1 | 2019-09-02T20:58:24.000Z | 2019-09-02T20:58:24.000Z | src/lj_gc.c | isage/luavela | 88a1d72f27a66de384471a179ca8d45ee241ecbc | [
"MIT"
] | 1 | 2020-04-27T21:35:55.000Z | 2020-04-27T21:35:55.000Z | src/lj_gc.c | isage/luavela | 88a1d72f27a66de384471a179ca8d45ee241ecbc | [
"MIT"
] | 2 | 2020-04-25T18:20:48.000Z | 2020-08-03T13:00:50.000Z | /*
* Garbage collector.
* Copyright (C) 2015-2019 IPONWEB Ltd. See Copyright Notice in COPYRIGHT
*
* Portions taken verbatim or adapted from LuaJIT.
* Copyright (C) 2005-2017 Mike Pall. See Copyright Notice in luajit.h
*
* Major portions taken verbatim or adapted from the Lua interpreter.
* Copyright (C) 1994-2008 Lua.org, PUC-Rio. See Copyright Notice in lua.h
*/
#include "lj_obj.h"
#include "uj_obj_seal.h"
#include "lj_gc.h"
#include "uj_dispatch.h"
#include "uj_throw.h"
#include "uj_str.h"
#include "uj_sbuf.h"
#include "lj_tab.h"
#include "uj_func.h"
#include "uj_proto.h"
#include "uj_upval.h"
#include "uj_udata.h"
#include "uj_meta.h"
#include "uj_state.h"
#include "lj_frame.h"
#if LJ_HASFFI
#include "ffi/lj_ctype.h"
#include "ffi/lj_cdata.h"
#endif
#include "jit/lj_trace.h"
#include "lj_vm.h"
#include "uj_strhash.h"
#define GCSTEPSIZE 1024u
/* Number of regular objects (either dead or alive) to traverse
** during GCSsweep phase in a single gc_onestep.
*/
#define GCSWEEPMAX 40
#define GCSWEEPCOST 10
#define GCFINALIZECOST 100
/* Macros to set GCobj colors and flags. */
#define white2gray(x) ((x)->gch.marked &= (uint8_t)~LJ_GC_WHITES)
#define gray2black(x) ((x)->gch.marked |= LJ_GC_BLACK)
#define isfinalized(u) ((u)->marked & LJ_GC_FINALIZED)
/* -- Mark phase ---------------------------------------------------------- */
/* Mark a TValue (if needed). */
#define gc_marktv(g, tv) \
{ lua_assert(!tvisgcv(tv) || (~gettag(tv) == gcval(tv)->gch.gct)); \
if (tviswhite(tv)) gc_mark(g, gcV(tv)); }
/* Mark a GCobj (if needed). */
#define gc_markobj(g, o) \
{ if (iswhite(obj2gco(o))) gc_mark(g, obj2gco(o)); }
/* Mark a string object. */
#define gc_mark_str(s) ((s)->marked &= (uint8_t)~LJ_GC_WHITES)
/* Mark a white GCobj. */
static void gc_mark(global_State *g, GCobj *o) {
int gct = o->gch.gct;
if (LJ_UNLIKELY(uj_obj_is_sealed(o))) { return; }
lua_assert(iswhite(o) && !isdead(g, o));
white2gray(o);
if (LJ_UNLIKELY(gct == ~LJ_TUDATA)) {
GCtab *mt = gco2ud(o)->metatable;
gray2black(o); /* Userdata are never gray. */
if (mt) { gc_markobj(g, mt); }
gc_markobj(g, gco2ud(o)->env);
} else if (LJ_UNLIKELY(gct == ~LJ_TUPVAL)) {
GCupval *uv = gco2uv(o);
gc_marktv(g, uvval(uv));
if (uv->closed) {
gray2black(o); /* Closed upvalues are never gray. */
}
} else if (gct != ~LJ_TSTR && gct != ~LJ_TCDATA) {
lua_assert(gct == ~LJ_TFUNC || gct == ~LJ_TTAB ||
gct == ~LJ_TTHREAD || gct == ~LJ_TPROTO);
lj_gc_push(o, &g->gc.gray);
}
}
/* Mark GC roots. */
static void gc_mark_gcroot(global_State *g) {
ptrdiff_t i;
for (i = 0; i < GCROOT_MAX; i++) {
if (g->gcroot[i] != NULL) {
gc_markobj(g, g->gcroot[i]);
}
}
}
/* Start a GC cycle and mark the root set. */
static void gc_mark_start(global_State *g)
{
g->gc.gray = NULL;
g->gc.grayagain = NULL;
g->gc.weak = NULL;
gc_markobj(g, mainthread(g));
gc_markobj(g, mainthread(g)->env);
gc_marktv(g, &g->registrytv);
gc_mark_gcroot(g);
g->gc.state = GCSpropagate;
}
/* Mark open upvalues. */
static void gc_mark_uv(global_State *g) {
GCupval *uv;
for (uv = uvnext(&g->uvhead); uv != &g->uvhead; uv = uvnext(uv)) {
lua_assert(uvprev(uvnext(uv)) == uv && uvnext(uvprev(uv)) == uv);
if (isgray(obj2gco(uv))) {
gc_marktv(g, uvval(uv));
}
}
}
/* Mark userdata in mmudata list. */
static void gc_mark_mmudata(global_State *g) {
GCobj *root = g->gc.mmudata;
GCobj *u = root;
if (u) {
do {
u = gcnext(u);
makewhite(g, u); /* Could be from previous GC. */
gc_mark(g, u);
} while (u != root);
}
}
/* Separate userdata objects to be finalized to mmudata list.
** Chain traversal stops at the first sealed object.
*/
size_t lj_gc_separateudata(global_State *g, int all) {
size_t m = 0;
GCobj **p = &mainthread(g)->nextgc;
GCobj *o;
while ((o = (*p)) != NULL) {
if (LJ_UNLIKELY(uj_obj_is_sealed(o))) { break; }
if (!(iswhite(o) || all) || isfinalized(gco2ud(o))) {
p = &o->gch.nextgc; /* Nothing to do. */
} else if (!uj_meta_lookup_mt(g, gco2ud(o)->metatable, MM_gc)) {
markfinalized(o); /* Done, as there's no __gc metamethod. */
p = &o->gch.nextgc;
} else { /* Otherwise move userdata to be finalized to mmudata list. */
m += uj_udata_sizeof(gco2ud(o));
markfinalized(o);
*p = o->gch.nextgc;
if (g->gc.mmudata != NULL) { /* Link to end of mmudata list. */
GCobj *root = g->gc.mmudata;
o->gch.nextgc = root->gch.nextgc;
root->gch.nextgc = o;
g->gc.mmudata = o;
} else { /* Create circular list. */
o->gch.nextgc = o;
g->gc.mmudata = o;
}
}
}
return m;
}
/* -- Propagation phase --------------------------------------------------- */
/* Traverse a table. */
static int gc_traverse_tab(global_State *g, GCtab *t) {
int weak = 0;
const TValue *mode;
GCtab *mt = t->metatable;
if (mt) {
gc_markobj(g, mt);
}
mode = uj_meta_lookup_mt(g, mt, MM_mode);
if (mode && tvisstr(mode)) { /* Valid __mode field? */
const char *modestr = strVdata(mode);
int c;
while ((c = *modestr++)) {
if (c == 'k') { weak |= LJ_GC_WEAKKEY; }
else if (c == 'v') { weak |= LJ_GC_WEAKVAL; }
}
if (weak) { /* Weak tables are cleared in the atomic phase. */
#if LJ_HASFFI
CTState *cts = ctype_ctsG(g);
if (cts && cts->finalizer == t) {
weak = (int)(~0u & ~LJ_GC_WEAKVAL);
} else
#endif
{
t->marked = (uint8_t)((t->marked & ~LJ_GC_WEAK) | weak);
lj_gc_push(obj2gco(t), &g->gc.weak);
}
}
}
if (weak == LJ_GC_WEAK) { /* Nothing to mark if both keys/values are weak. */
return 1;
}
if (!(weak & LJ_GC_WEAKVAL)) { /* Mark array part. */
size_t i, asize = t->asize;
for (i = 0; i < asize; i++) {
gc_marktv(g, arrayslot(t, i));
}
}
if (t->hmask > 0) { /* Mark hash part. */
Node *node = t->node;
size_t i, hmask = t->hmask;
for (i = 0; i <= hmask; i++) {
Node *n = &node[i];
if (!tvisnil(&n->val)) { /* Mark non-empty slot. */
lua_assert(!tvisnil(&n->key));
if (!(weak & LJ_GC_WEAKKEY)) { gc_marktv(g, &n->key); }
if (!(weak & LJ_GC_WEAKVAL)) { gc_marktv(g, &n->val); }
}
}
}
return weak;
}
/* Traverse a function. */
static void gc_traverse_func(global_State *g, GCfunc *fn) {
gc_markobj(g, fn->c.env);
if (isluafunc(fn)) {
uint32_t i;
lua_assert(fn->l.nupvalues <= funcproto(fn)->sizeuv);
gc_markobj(g, funcproto(fn));
for (i = 0; i < fn->l.nupvalues; i++) { /* Mark Lua function upvalues. */
gc_markobj(g, fn->l.uvptr[i]);
}
} else {
uint32_t i;
for (i = 0; i < fn->c.nupvalues; i++) { /* Mark C function upvalues. */
gc_marktv(g, &fn->c.upvalue[i]);
}
}
}
#if LJ_HASJIT
/* Mark a trace. */
static void gc_marktrace(global_State *g, TraceNo traceno) {
GCobj *o = obj2gco(traceref(G2J(g), traceno));
lua_assert(traceno != G2J(g)->cur.traceno);
if (iswhite(o)) {
white2gray(o);
lj_gc_push(o, &g->gc.gray);
}
}
/* Traverse a trace. */
static void gc_traverse_trace(global_State *g, GCtrace *T) {
IRRef ref;
if (T->traceno == 0) return;
for (ref = T->nk; ref < REF_TRUE; ref++) {
IRIns *ir = &T->ir[ref];
if (ir->o == IR_KGC) {
gc_markobj(g, ir_kgc(ir));
}
}
if (T->link) { gc_marktrace(g, T->link); }
if (T->nextroot) { gc_marktrace(g, T->nextroot); }
if (T->nextside) { gc_marktrace(g, T->nextside); }
gc_markobj(g, T->startpt);
}
/* The current trace is a GC root while not anchored in the prototype (yet). */
#define gc_traverse_curtrace(g) gc_traverse_trace(g, &G2J(g)->cur)
#else
#define gc_traverse_curtrace(g) UNUSED(g)
#endif
/* Traverse a prototype. */
static void gc_traverse_proto(global_State *g, GCproto *pt) {
ptrdiff_t i;
gc_mark_str(proto_chunkname(pt));
for (i = -(ptrdiff_t)pt->sizekgc; i < 0; i++) { /* Mark collectable consts. */
gc_markobj(g, proto_kgc(pt, i));
}
#if LJ_HASJIT
if (pt->trace) { gc_marktrace(g, pt->trace); }
#endif
}
static const TValue *max_declared_frame_slot(const TValue *frame) {
const GCfunc *fn = frame_func(frame);
const TValue *slot = frame;
if (isluafunc(fn)) {
slot += funcproto(fn)->framesize;
}
return slot;
}
/* Marks a single stack frame, i.e slots in the range [frame; top - 1]. */
static void gc_mark_stack_frame(lua_State *L, TValue *frame, const TValue *top) {
global_State *g = G(L);
TValue *slot;
lua_assert(top - frame >= 1);
if (!frame_isdummy(L, frame)) {
/* Need to mark hidden function (but not L). */
gc_markobj(g, frame_func(frame));
}
for (slot = frame + 1; slot < top; slot++) {
gc_marktv(g, slot);
}
}
/* Clears all unmarked slots above top - 1 by setting them to nil. */
static void gc_clear_upper_stack_slots(const lua_State *L) {
TValue *slot;
lua_assert(G(L)->gc.state == GCSatomic);
for (slot = L->top; slot < L->stack + L->stacksize; slot++) {
setnilV(slot);
}
}
/* Traverse stack backwards frame by frame marking all slots on each iteration.
** NB! Extra vararg frame not skipped, marks function twice (harmless).
** Returns minimum needed stack size.
**/
static size_t gc_traverse_stack(lua_State *L) {
TValue *frame = L->base - 1; /* link of the current frame */
TValue *top = L->top; /* first slot above the current frame */
const TValue *bottom = L->stack; /* absolute stack bottom */
const TValue *max_slot = L->top - 1; /* max slot that *may* be used */
lua_assert(L->top >= L->base);
lua_assert(L->top > L->stack);
lua_assert(frame_isdummy(L, L->stack));
lua_assert(frame_isbottom(L->stack));
for (;;) {
const TValue *slot;
gc_mark_stack_frame(L, frame, top); /* There is always at least 1 frame */
if (frame == bottom) {
break;
}
slot = max_declared_frame_slot(frame);
if (slot > max_slot) {
max_slot = slot;
}
top = !frame_iscont(frame)? frame : frame - 1; /* cont occupies 2 slots */
frame = frame_prev(frame);
}
max_slot++; /* Correct bias of -1 (frame == base - 1). */
if (max_slot > L->maxstack) {
max_slot = L->maxstack;
}
return (size_t)(max_slot - bottom);
}
/* Traverse a thread object. */
static void gc_traverse_thread(global_State *g, lua_State *L) {
size_t used = gc_traverse_stack(L);
if (g->gc.state == GCSatomic) {
gc_clear_upper_stack_slots(L);
}
gc_markobj(g, L->env);
uj_state_stack_shrink(L, used);
}
/* Propagate one gray object. Traverse it and turn it black. */
static size_t propagatemark(global_State *g) {
GCobj *o = g->gc.gray;
int gct = o->gch.gct;
lua_assert(isgray(o));
gray2black(o);
g->gc.gray = o->gch.gclist; /* Remove from gray list. */
if (LJ_LIKELY(gct == ~LJ_TTAB)) {
GCtab *t = gco2tab(o);
if (gc_traverse_tab(g, t) > 0) {
black2gray(o); /* Keep weak tables gray. */
}
return lj_tab_sizeof(t);
} else if (LJ_LIKELY(gct == ~LJ_TFUNC)) {
GCfunc *fn = gco2func(o);
gc_traverse_func(g, fn);
return uj_func_sizeof(fn);
} else if (LJ_LIKELY(gct == ~LJ_TPROTO)) {
GCproto *pt = gco2pt(o);
gc_traverse_proto(g, pt);
return uj_proto_sizeof(pt);
} else if (LJ_LIKELY(gct == ~LJ_TTHREAD)) {
lua_State *th = gco2th(o);
lj_gc_push(o, &g->gc.grayagain);
black2gray(o); /* Threads are never black. */
gc_traverse_thread(g, th);
return sizeof(lua_State) + sizeof(TValue) * th->stacksize;
} else {
#if LJ_HASJIT
GCtrace *T = gco2trace(o);
gc_traverse_trace(g, T);
return lj_trace_sizeof(T);
#else
lua_assert(0);
return 0;
#endif
}
}
/* Propagate all gray objects. */
static size_t gc_propagate_gray(global_State *g) {
size_t m = 0;
while (g->gc.gray != NULL) {
m += propagatemark(g);
}
return m;
}
/* -- Sweep phase --------------------------------------------------------- */
/* Try to shrink some common data structures. */
static void gc_shrink(global_State *g, lua_State *L) {
uj_strhash_shrink(gl_strhash(g), L);
uj_sbuf_shrink_tmp(L);
}
/* Type of GC free functions. */
typedef void (*GCFreeFunc)(global_State *g, GCobj *o);
/* GC free functions for LJ_TSTR .. LJ_TUDATA. ORDER LJ_T */
static const GCFreeFunc gc_freefunc[] = {
(GCFreeFunc)uj_str_free,
(GCFreeFunc)uj_upval_free,
(GCFreeFunc)uj_state_free,
(GCFreeFunc)uj_proto_free,
(GCFreeFunc)uj_func_free,
#if LJ_HASJIT
(GCFreeFunc)lj_trace_free,
#else
(GCFreeFunc)0,
#endif
#if LJ_HASFFI
(GCFreeFunc)lj_cdata_free,
#else
(GCFreeFunc)0,
#endif
(GCFreeFunc)lj_tab_free,
(GCFreeFunc)uj_udata_free
};
/* Full sweep of a GC list. */
#define gc_fullsweep(g, p) gc_sweep(g, (p), LJ_MAX_MEM)
/* Partial sweep of a GC list.
** @param lim: number of objects (either dead or alive) to traverse
** Note that this should terminate when sealed object
** is encountered. In this case, end of the chain
** return is emulated.
*/
static GCobj** gc_sweep(global_State *g, GCobj **p, uint32_t lim) {
/* Mask with other white and LJ_GC_FIXED. */
int ow = otherwhite(g);
GCobj *o;
while ((o = (*p)) != NULL && lim-- > 0) {
if (uj_obj_is_sealed(o)) {
/* Emulate end of chain sweeping when sealed object is found. */
p = &g->nullobj;
break;
}
if (o->gch.gct == ~LJ_TTHREAD) { /* Need to sweep open upvalues, too. */
gc_fullsweep(g, &gco2th(o)->openupval);
}
if (((o->gch.marked ^ LJ_GC_WHITES) & ow)) { /* Black or current white? */
lua_assert(!isdead(g, o) || (o->gch.marked & LJ_GC_FIXED));
makewhite(g, o); /* Value is alive, change to the current white. */
p = &o->gch.nextgc;
} else { /* Otherwise value is dead, free it. */
lua_assert(isdead(g, o) || g->gc.currentwhite == LJ_GC_WHITES);
*p = o->gch.nextgc;
if (o == g->gc.root) {
g->gc.root = o->gch.nextgc; /* Adjust list anchor. */
}
gc_freefunc[o->gch.gct - ~LJ_TSTR](g, o);
}
}
return p;
}
/* Check whether we can clear a key or a value slot from a table. */
static int gc_mayclear(const TValue *o, int val) {
if (!tvisgcv(o)) { /* Only collectable objects can be weak references. */
return 0;
}
if (tvisstr(o)) { /* But strings cannot be used as weak references. */
gc_mark_str(strV(o)); /* And need to be marked. */
return 0;
}
if (uj_obj_is_sealed(gcV(o))) {
return 0; /* Sealed objects are not collected, cannot clear. */
}
if (iswhite(gcV(o))) {
return 1; /* Object is about to be collected. */
}
if (tvisudata(o) && val && isfinalized(udataV(o))) {
return 1; /* Finalized userdata is dropped only from values. */
}
return 0; /* Default: Cannot clear. */
}
/* Clear collected entries from weak tables. */
static void gc_clearweak(GCobj *o) {
while (o) {
GCtab *t = gco2tab(o);
lua_assert((t->marked & LJ_GC_WEAK));
if ((t->marked & LJ_GC_WEAKVAL)) {
size_t i, asize = t->asize;
for (i = 0; i < asize; i++) {
/* Clear array slot when value is about to be collected. */
TValue *tv = arrayslot(t, i);
if (gc_mayclear(tv, 1)) { setnilV(tv); }
}
}
if (t->hmask > 0) {
Node *node = t->node;
size_t i, hmask = t->hmask;
for (i = 0; i <= hmask; i++) {
Node *n = &node[i];
/* Clear hash slot when key or value is about to be collected. */
if (!tvisnil(&n->val) && (gc_mayclear(&n->key, 0) ||
gc_mayclear(&n->val, 1))) {
setnilV(&n->val);
}
}
}
o = t->gclist;
}
}
/* Call a userdata or cdata finalizer. */
static void gc_call_finalizer(global_State *g, lua_State *L,
const TValue *mo, GCobj *o) {
/* Save and restore lots of state around the __gc callback. */
uint8_t oldh = hook_save(g);
size_t oldt = g->gc.threshold;
int errcode;
TValue *top;
lj_trace_abort(g);
top = L->top;
L->top = top + uj_mm_narg[MM_gc] + 1;
hook_entergc(g); /* Disable hooks and new traces during __gc. */
g->gc.threshold = LJ_MAX_MEM; /* Prevent GC steps. */
uj_state_add_event(L, EXTEV_GC_FINALIZER);
copyTV(L, top, mo);
setgcV(L, top+1, o, ~o->gch.gct);
errcode = lj_vm_pcall(L, top+1, 1+0, -1); /* Stack: |mo|o| -> | */
hook_restore(g, oldh);
g->gc.threshold = oldt; /* Restore GC threshold. */
uj_state_remove_event(L, EXTEV_GC_FINALIZER);
if (errcode) {
uj_throw(L, errcode); /* Propagate errors. */
}
}
/* Finalize one userdata or cdata object from the mmudata list. */
static void gc_finalize(lua_State *L) {
global_State *g = G(L);
GCobj *o = gcnext(g->gc.mmudata);
const TValue *mo;
lua_assert(g->jit_L == NULL); /* Must not be called on trace. */
/* Unchain from list of userdata to be finalized. */
if (o == g->gc.mmudata) {
g->gc.mmudata = NULL;
} else {
g->gc.mmudata->gch.nextgc = o->gch.nextgc;
}
#if LJ_HASFFI
if (o->gch.gct == ~LJ_TCDATA) {
TValue tmp, *tv;
/* Add cdata back to the GC list and make it white. */
o->gch.nextgc = g->gc.root;
g->gc.root = o;
makewhite(g, o);
o->gch.marked &= (uint8_t)~LJ_GC_CDATA_FIN;
/* Resolve finalizer. */
setcdataV(L, &tmp, gco2cd(o));
tv = lj_tab_set(L, ctype_ctsG(g)->finalizer, &tmp);
if (!tvisnil(tv)) {
g->gc.nocdatafin = 0;
copyTV(L, &tmp, tv);
setnilV(tv); /* Clear entry in finalizer table. */
gc_call_finalizer(g, L, &tmp, o);
}
return;
}
#endif
/* Add userdata back to the main userdata list and make it white. */
o->gch.nextgc = mainthread(g)->nextgc;
mainthread(g)->nextgc = o;
makewhite(g, o);
/* Resolve the __gc metamethod. */
mo = uj_meta_lookup_mt(g, gco2ud(o)->metatable, MM_gc);
if (mo) { gc_call_finalizer(g, L, mo, o); }
}
/* Finalize all userdata objects from mmudata list. */
void lj_gc_finalize_udata(lua_State *L) {
while (G(L)->gc.mmudata != NULL) { gc_finalize(L); }
}
#if LJ_HASFFI
/* Finalize all cdata objects from finalizer table. */
void lj_gc_finalize_cdata(lua_State *L) {
global_State *g = G(L);
CTState *cts = ctype_ctsG(g);
if (cts) {
GCtab *t = cts->finalizer;
Node *node = t->node;
ptrdiff_t i;
t->metatable = NULL; /* Mark finalizer table as disabled. */
for (i = (ptrdiff_t)t->hmask; i >= 0; i--) {
if (!tvisnil(&node[i].val) && tviscdata(&node[i].key)) {
GCobj *o = gcV(&node[i].key);
TValue tmp;
makewhite(g, o);
o->gch.marked &= (uint8_t)~LJ_GC_CDATA_FIN;
copyTV(L, &tmp, &node[i].val);
setnilV(&node[i].val);
gc_call_finalizer(g, L, &tmp, o);
}
}
}
}
#endif
/* Free all remaining GC objects. */
void lj_gc_freeall(global_State *g) {
size_t i, strmask;
uj_strhash_t *strhash = gl_strhash(g);
uj_strhash_t *strhash_sealed = gl_strhash_sealed(g);
/* Free everything. */
g->gc.currentwhite = LJ_GC_WHITES;
uj_obj_unseal_all(g);
gc_fullsweep(g, &g->gc.root);
strmask = strhash->mask;
for (i = 0; i <= strmask; i++) { /* Free all string hash chains. */
gc_fullsweep(g, &strhash->hash[i]);
}
if (!gl_datastate(g)) {
/* Otherwise it's not ours to manage. */
g->strhash_sweep = strhash_sealed;
strmask = strhash_sealed->mask;
for (i = 0; i <= strmask; i++) { /* Free all string hash chains. */
gc_fullsweep(g, &strhash_sealed->hash[i]);
}
}
}
/* -- Collector ----------------------------------------------------------- */
/* Atomic part of the GC cycle, transitioning from mark to sweep phase. */
static void atomic(global_State *g, lua_State *L) {
size_t udsize;
gc_mark_uv(g); /* Need to remark open upvalues (the thread may be dead). */
gc_propagate_gray(g); /* Propagate any left-overs. */
g->gc.gray = g->gc.weak; /* Empty the list of weak tables. */
g->gc.weak = NULL;
lua_assert(!iswhite(obj2gco(mainthread(g))));
gc_markobj(g, L); /* Mark running thread. */
gc_traverse_curtrace(g); /* Traverse current trace. */
gc_mark_gcroot(g); /* Mark GC roots (again). */
gc_propagate_gray(g); /* Propagate all of the above. */
g->gc.gray = g->gc.grayagain; /* Empty the 2nd chance list. */
g->gc.grayagain = NULL;
gc_propagate_gray(g); /* Propagate it. */
udsize = lj_gc_separateudata(g, 0); /* Separate userdata to be finalized. */
gc_mark_mmudata(g); /* Mark them. */
udsize += gc_propagate_gray(g); /* And propagate the marks. */
/* All marking done, clear weak tables. */
gc_clearweak(g->gc.weak);
/* Prepare for sweep phase. */
g->gc.currentwhite = (uint8_t)otherwhite(g); /* Flip current white. */
flipwhite(obj2gco(&g->strempty_own));
g->gc.sweep = &g->gc.root;
g->gc.estimate = uj_mem_total(MEM_G(g)) - (size_t)udsize; /* Initial estimate. */
}
/* GC state machine. Returns a cost estimate for each step performed. */
static size_t gc_onestep(lua_State *L) {
global_State *g = G(L);
g->gc.state_count[g->gc.state]++;
switch (g->gc.state) {
case GCSpause:
gc_mark_start(g); /* Start a new GC cycle by marking all GC roots. */
return 0;
case GCSpropagate:
if (g->gc.gray != NULL) {
return propagatemark(g); /* Propagate one gray object. */
}
g->gc.state = GCSatomic; /* End of mark phase. */
return 0;
case GCSatomic:
if (g->jit_L != NULL) { /* Don't run atomic phase on trace. */
return LJ_MAX_MEM;
}
atomic(g, L);
g->gc.state = GCSsweepstring; /* Start of sweep phase. */
g->gc.sweepstr = 0;
return 0;
case GCSsweepstring: {
size_t old = uj_mem_total(MEM(L));
uj_strhash_t *strhash = gl_strhash(g);
gc_fullsweep(g, &strhash->hash[g->gc.sweepstr++]); /* Sweep one chain. */
if (g->gc.sweepstr > strhash->mask) {
g->gc.state = GCSsweep; /* All string hash chains sweeped. */
}
lua_assert(old >= uj_mem_total(MEM(L)));
g->gc.estimate -= old - uj_mem_total(MEM(L));
return GCSWEEPCOST;
}
case GCSsweep: {
size_t old = uj_mem_total(MEM(L));
g->gc.sweep = gc_sweep(g, g->gc.sweep, GCSWEEPMAX);
lua_assert(old >= uj_mem_total(MEM(L)));
g->gc.estimate -= old - uj_mem_total(MEM(L));
if (*g->gc.sweep == NULL) {
gc_shrink(g, L);
if (g->gc.mmudata != NULL) { /* Need any finalizations? */
g->gc.state = GCSfinalize;
#if LJ_HASFFI
g->gc.nocdatafin = 1;
#endif
} else { /* Otherwise skip this phase to help the JIT. */
g->gc.state = GCSpause; /* End of GC cycle. */
g->gc.debt = 0;
}
}
return GCSWEEPMAX*GCSWEEPCOST;
}
case GCSfinalize:
if (g->gc.mmudata != NULL) {
if (g->jit_L != NULL) { /* Don't call finalizers on trace. */
return LJ_MAX_MEM;
}
gc_finalize(L); /* Finalize one userdata object. */
if (g->gc.estimate > GCFINALIZECOST) {
g->gc.estimate -= GCFINALIZECOST;
}
return GCFINALIZECOST;
}
#if LJ_HASFFI
if (!g->gc.nocdatafin) lj_tab_rehash(L, ctype_ctsG(g)->finalizer);
#endif
g->gc.state = GCSpause; /* End of GC cycle. */
g->gc.debt = 0;
return 0;
default:
lua_assert(0);
return 0;
}
}
/* Perform a limited amount of incremental GC steps. */
int lj_gc_step(lua_State *L) {
size_t lim;
global_State *g = G(L);
struct vmstate_context vmsc;
uj_vmstate_save(g->vmstate, &vmsc);
uj_vmstate_set(&g->vmstate, UJ_VMST_GC);
lim = (GCSTEPSIZE/100) * g->gc.stepmul;
if (lim == 0) { lim = LJ_MAX_MEM; }
if (uj_mem_total(MEM(L)) > g->gc.threshold) {
g->gc.debt += uj_mem_total(MEM(L)) - g->gc.threshold;
}
do {
lim -= gc_onestep(L);
if (g->gc.state == GCSpause) {
g->gc.threshold = (g->gc.estimate/100) * g->gc.pause;
uj_vmstate_restore(&g->vmstate, &vmsc);
return 1; /* Finished a GC cycle. */
}
} while ((int32_t)lim > 0);
if (g->gc.debt < GCSTEPSIZE) {
g->gc.threshold = uj_mem_total(MEM(L)) + GCSTEPSIZE;
uj_vmstate_restore(&g->vmstate, &vmsc);
return -1;
} else {
g->gc.debt -= GCSTEPSIZE;
g->gc.threshold = uj_mem_total(MEM(L));
uj_vmstate_restore(&g->vmstate, &vmsc);
return 0;
}
}
/* Ditto, but fix the stack top first. */
static LJ_AINLINE void gc_step_fixtop(lua_State *L)
{
uj_state_stack_sync_top(L);
lj_gc_step(L);
}
void lj_gc_step_fixtop(lua_State *L) {
gc_step_fixtop(L);
}
void lj_gc_check_fixtop(lua_State *L) {
if (LJ_UNLIKELY(uj_mem_total(MEM(L)) >= G(L)->gc.threshold))
gc_step_fixtop(L);
}
#if LJ_HASJIT
/* Perform multiple GC steps. Called from JIT-compiled code. */
int lj_gc_step_jit(global_State *g, size_t steps) {
lua_State *L = g->jit_L;
L->base = G(L)->jit_base;
L->top = curr_topL(L);
while (steps-- > 0 && lj_gc_step(L) == 0)
;
/* Return 1 to force a trace exit. */
return (G(L)->gc.state == GCSatomic || G(L)->gc.state == GCSfinalize);
}
#endif
/* Perform a full GC cycle. */
void lj_gc_fullgc(lua_State *L) {
global_State *g = G(L);
struct vmstate_context vmsc;
uj_vmstate_save(g->vmstate, &vmsc);
uj_vmstate_set(&g->vmstate, UJ_VMST_GC);
if (g->gc.state <= GCSatomic) { /* Caught somewhere in the middle. */
g->gc.sweep = &g->gc.root; /* Sweep everything (preserving it). */
g->gc.gray = NULL; /* Reset lists from partial propagation. */
g->gc.grayagain = NULL;
g->gc.weak = NULL;
g->gc.state = GCSsweepstring; /* Fast forward to the sweep phase. */
g->gc.sweepstr = 0;
}
while (g->gc.state == GCSsweepstring || g->gc.state == GCSsweep) {
gc_onestep(L); /* Finish sweep. */
}
lua_assert(g->gc.state == GCSfinalize || g->gc.state == GCSpause);
/* Now perform a full GC. */
g->gc.state = GCSpause;
do { gc_onestep(L); } while (g->gc.state != GCSpause);
g->gc.threshold = (g->gc.estimate/100) * g->gc.pause;
uj_vmstate_restore(&g->vmstate, &vmsc);
}
/* -- Write barriers ------------------------------------------------------ */
/* Move the GC propagation frontier forward. */
void lj_gc_barrierf(global_State *g, GCobj *o, GCobj *v) {
lua_assert(isblack(o) && iswhite(v) && !isdead(g, v) && !isdead(g, o));
lua_assert(g->gc.state != GCSfinalize && g->gc.state != GCSpause);
lua_assert(o->gch.gct != ~LJ_TTAB);
/* Preserve invariant during propagation. Otherwise it doesn't matter. */
if (g->gc.state == GCSpropagate || g->gc.state == GCSatomic) {
gc_mark(g, v); /* Move frontier forward. */
} else {
makewhite(g, o); /* Make it white to avoid the following barrier. */
}
}
/* Specialized barrier for closed upvalue. Pass &uv->tv. */
void lj_gc_barrieruv(global_State *g, TValue *tv) {
#define TV2MARKED(x) \
(*((uint8_t *)(x) - offsetof(GCupval, tv) + offsetof(GCupval, marked)))
if (g->gc.state == GCSpropagate || g->gc.state == GCSatomic) {
gc_mark(g, gcV(tv));
} else {
TV2MARKED(tv) = (TV2MARKED(tv) & (uint8_t)~LJ_GC_COLORS) | curwhite(g);
}
#undef TV2MARKED
}
/* Close upvalue. Also needs a write barrier. */
void lj_gc_closeuv(global_State *g, GCupval *uv) {
GCobj *o = obj2gco(uv);
/* Copy stack slot to upvalue itself and point to the copy. */
copyTV(mainthread(g), &uv->tv, uvval(uv));
uv->v = &uv->tv;
uv->closed = 1;
o->gch.nextgc = g->gc.root;
g->gc.root = o;
if (isgray(o)) { /* A closed upvalue is never gray, so fix this. */
if (g->gc.state == GCSpropagate || g->gc.state == GCSatomic) {
gray2black(o); /* Make it black and preserve invariant. */
if (tviswhite(&uv->tv)) {
lj_gc_barrierf(g, o, gcV(&uv->tv));
}
} else {
makewhite(g, o); /* Make it white, i.e. sweep the upvalue. */
lua_assert(g->gc.state != GCSfinalize && g->gc.state != GCSpause);
}
}
}
#if LJ_HASJIT
/* Mark a trace if it's saved during the propagation phase. */
void lj_gc_barriertrace(global_State *g, uint32_t traceno) {
if (g->gc.state == GCSpropagate || g->gc.state == GCSatomic) {
gc_marktrace(g, traceno);
}
}
#endif
| 30.358306 | 84 | 0.605186 | [
"object"
] |
36b24ce63bbca63d4b61981059a90dd19d0296cb | 36,456 | c | C | tests/mupdf-1.14.0-source/src/thirdparty/freeglut/src/fg_menu.c | sillywalk/grazz | a0adb1a90d41ff9006d8c1476546263f728b3c83 | [
"Apache-2.0"
] | 64 | 2018-07-19T11:34:24.000Z | 2022-03-26T03:06:45.000Z | extern/freeglut/src/fg_menu.c | eshafeeqe/SPlisHSPlasH | 6f9cebfbec4b08e4a83ed5ad3f9b1288c3dfc192 | [
"MIT"
] | 5 | 2017-08-14T16:26:04.000Z | 2022-03-15T07:20:07.000Z | extern/freeglut/src/fg_menu.c | eshafeeqe/SPlisHSPlasH | 6f9cebfbec4b08e4a83ed5ad3f9b1288c3dfc192 | [
"MIT"
] | 22 | 2017-05-16T09:08:11.000Z | 2021-08-21T09:03:52.000Z | /*
* fg_menu.c
*
* Pull-down menu creation and handling.
*
* Copyright (c) 1999-2000 Pawel W. Olszta. All Rights Reserved.
* Written by Pawel W. Olszta, <olszta@sourceforge.net>
* Creation date: Thu Dec 16 1999
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included
* in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
* OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* PAWEL W. OLSZTA BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
* IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*/
#define FREEGLUT_BUILDING_LIB
#include <GL/freeglut.h>
#include "fg_internal.h"
/* -- DEFINITIONS ---------------------------------------------------------- */
/*
* FREEGLUT_MENU_FONT can be any freeglut bitmapped font.
* (Stroked fonts would not be out of the question, but we'd need to alter
* code, since GLUT (hence freeglut) does not quite unify stroked and
* bitmapped font handling.)
* Old UNIX/X11 GLUT (BSD, UNIX, IRIX, LINUX, HPUX, ...) used a system
* font best approximated by an 18-pixel HELVETICA, I think. MS-WINDOWS
* GLUT used something closest to the 8x13 fixed-width font. (Old
* GLUT apparently uses host-system menus rather than building its own.
* freeglut is building its own menus from scratch.)
*
* FREEGLUT_MENUENTRY_HEIGHT gives the height of ONE menu box. This should
* be the distances between two adjacent menu entries. It should scale
* automatically with the font choice, so you needn't alter it---unless you
* use a stroked font.
*
* FREEGLUT_MENU_BORDER says how many pixels to allow around the edge of a
* menu. (It also seems to be the same as the number of pixels used as
* a border around *items* to separate them from neighbors. John says
* that that wasn't the original intent...if not, perhaps we need another
* symbolic constant, FREEGLUT_MENU_ITEM_BORDER, or such.)
*/
/* See platform-specific header files for menu font and color definitions */
#define FREEGLUT_MENUENTRY_HEIGHT(font) (glutBitmapHeight(font) + \
FREEGLUT_MENU_BORDER)
#define FREEGLUT_MENU_BORDER 2
/*
* These variables are for rendering the freeglut menu items.
*
* The choices are fore- and background, with and without h for Highlighting.
* Old GLUT appeared to be system-dependent for its colors (sigh) so we are
* too. These variables should be stuffed into global state and initialized
* via the glutInit*() system.
*/
static float menu_pen_fore [4] = FREEGLUT_MENU_PEN_FORE_COLORS ;
static float menu_pen_back [4] = FREEGLUT_MENU_PEN_BACK_COLORS ;
static float menu_pen_hfore [4] = FREEGLUT_MENU_PEN_HFORE_COLORS;
static float menu_pen_hback [4] = FREEGLUT_MENU_PEN_HBACK_COLORS;
extern GLvoid fgPlatformGetGameModeVMaxExtent( SFG_Window* window, int* x, int* y );
extern void fghPlatformGetCursorPos(const SFG_Window *window, GLboolean client, SFG_XYUse *mouse_pos);
extern SFG_Font* fghFontByID( void* font );
extern void fgPlatformHideWindow( SFG_Window* window );
/* -- PRIVATE FUNCTIONS ---------------------------------------------------- */
/*
* Private function to find a menu entry by index
*/
static SFG_MenuEntry *fghFindMenuEntry( SFG_Menu* menu, int index )
{
SFG_MenuEntry *entry;
int i = 1;
for( entry = (SFG_MenuEntry *)menu->Entries.First;
entry;
entry = (SFG_MenuEntry *)entry->Node.Next )
{
if( i == index )
break;
++i;
}
return entry;
}
/*
* Deactivates a menu pointed by the function argument.
*/
static void fghDeactivateSubMenu( SFG_MenuEntry *menuEntry )
{
SFG_MenuEntry *subMenuIter;
/* Hide the present menu's window */
fgPlatformHideWindow( menuEntry->SubMenu->Window );
/* Forget about having that menu active anymore, now: */
menuEntry->SubMenu->Window->ActiveMenu = NULL;
menuEntry->SubMenu->IsActive = GL_FALSE;
menuEntry->SubMenu->ActiveEntry = NULL;
/* Hide all submenu windows, and the root menu's window. */
for ( subMenuIter = (SFG_MenuEntry *)menuEntry->SubMenu->Entries.First;
subMenuIter;
subMenuIter = (SFG_MenuEntry *)subMenuIter->Node.Next )
{
subMenuIter->IsActive = GL_FALSE;
/* Is that an active submenu by any case? */
if( subMenuIter->SubMenu )
fghDeactivateSubMenu( subMenuIter );
}
}
/*
* Private function to get the virtual maximum screen extent
*/
static GLvoid fghGetVMaxExtent( SFG_Window* window, int* x, int* y )
{
if( fgStructure.GameModeWindow )
fgPlatformGetGameModeVMaxExtent ( window, x, y );
else
{
*x = fgDisplay.ScreenWidth;
*y = fgDisplay.ScreenHeight;
}
}
/*
* Private function to check for the current menu/sub menu activity state
*/
static GLboolean fghCheckMenuStatus( SFG_Menu* menu )
{
SFG_MenuEntry* menuEntry;
int x, y;
/* First of all check any of the active sub menus... */
for( menuEntry = (SFG_MenuEntry *)menu->Entries.First;
menuEntry;
menuEntry = (SFG_MenuEntry *)menuEntry->Node.Next )
{
if( menuEntry->SubMenu && menuEntry->IsActive )
{
/*
* OK, have the sub-menu checked, too. If it returns GL_TRUE, it
* will mean that it caught the mouse cursor and we do not need
* to regenerate the activity list, and so our parents do...
*/
GLboolean return_status;
menuEntry->SubMenu->Window->State.MouseX =
menu->Window->State.MouseX + menu->X - menuEntry->SubMenu->X;
menuEntry->SubMenu->Window->State.MouseY =
menu->Window->State.MouseY + menu->Y - menuEntry->SubMenu->Y;
return_status = fghCheckMenuStatus( menuEntry->SubMenu );
if ( return_status )
return GL_TRUE;
}
}
/* That much about our sub menus, let's get to checking the current menu: */
x = menu->Window->State.MouseX;
y = menu->Window->State.MouseY;
/* Check if the mouse cursor is contained within the current menu box */
if( ( x >= FREEGLUT_MENU_BORDER ) &&
( x < menu->Width - FREEGLUT_MENU_BORDER ) &&
( y >= FREEGLUT_MENU_BORDER ) &&
( y < menu->Height - FREEGLUT_MENU_BORDER ) )
{
int menuID = ( y - FREEGLUT_MENU_BORDER ) / FREEGLUT_MENUENTRY_HEIGHT(menu->Font);
/* The mouse cursor is somewhere over our box, check it out. */
menuEntry = fghFindMenuEntry( menu, menuID + 1 );
FREEGLUT_INTERNAL_ERROR_EXIT( menuEntry, "Cannot find menu entry",
"fghCheckMenuStatus" );
menuEntry->IsActive = GL_TRUE;
menuEntry->Ordinal = menuID;
/*
* If this is not the same as the last active menu entry, deactivate
* the previous entry. Specifically, if the previous active entry
* was a submenu then deactivate it.
*/
if( menu->ActiveEntry && ( menuEntry != menu->ActiveEntry ) )
if( menu->ActiveEntry->SubMenu )
fghDeactivateSubMenu( menu->ActiveEntry );
if( menuEntry != menu->ActiveEntry )
{
menu->Window->State.WorkMask |= GLUT_DISPLAY_WORK;
if( menu->ActiveEntry )
menu->ActiveEntry->IsActive = GL_FALSE;
}
menu->ActiveEntry = menuEntry;
menu->IsActive = GL_TRUE;
/*
* OK, we have marked that entry as active, but it would be also
* nice to have its contents updated, in case it's a sub menu.
* Also, ignore the return value of the check function:
*/
if( menuEntry->SubMenu )
{
if ( ! menuEntry->SubMenu->IsActive )
{
int max_x, max_y;
SFG_Window *current_window = fgStructure.CurrentWindow;
/* Set up the initial menu position now... */
menuEntry->SubMenu->IsActive = GL_TRUE;
/* Set up the initial submenu position now: */
fghGetVMaxExtent(menu->ParentWindow, &max_x, &max_y);
menuEntry->SubMenu->X = menu->X + menu->Width;
menuEntry->SubMenu->Y = menu->Y +
menuEntry->Ordinal * FREEGLUT_MENUENTRY_HEIGHT(menu->Font);
if( menuEntry->SubMenu->X + menuEntry->SubMenu->Width > max_x )
menuEntry->SubMenu->X = menu->X - menuEntry->SubMenu->Width;
if( menuEntry->SubMenu->Y + menuEntry->SubMenu->Height > max_y )
{
menuEntry->SubMenu->Y -= ( menuEntry->SubMenu->Height -
FREEGLUT_MENUENTRY_HEIGHT(menu->Font) -
2 * FREEGLUT_MENU_BORDER );
if( menuEntry->SubMenu->Y < 0 )
menuEntry->SubMenu->Y = 0;
}
fgSetWindow( menuEntry->SubMenu->Window );
glutPositionWindow( menuEntry->SubMenu->X,
menuEntry->SubMenu->Y );
glutReshapeWindow( menuEntry->SubMenu->Width,
menuEntry->SubMenu->Height );
glutPopWindow( );
glutShowWindow( );
menuEntry->SubMenu->Window->ActiveMenu = menuEntry->SubMenu;
fgSetWindow( current_window );
menuEntry->SubMenu->Window->State.MouseX =
x + menu->X - menuEntry->SubMenu->X;
menuEntry->SubMenu->Window->State.MouseY =
y + menu->Y - menuEntry->SubMenu->Y;
fghCheckMenuStatus( menuEntry->SubMenu );
}
/* Activate it because its parent entry is active */
menuEntry->SubMenu->IsActive = GL_TRUE;
}
/* Report back that we have caught the menu cursor */
return GL_TRUE;
}
/* Looks like the menu cursor is somewhere else... */
if( menu->ActiveEntry && menu->ActiveEntry->IsActive &&
( !menu->ActiveEntry->SubMenu ||
!menu->ActiveEntry->SubMenu->IsActive ) )
{
menu->Window->State.WorkMask |= GLUT_DISPLAY_WORK;
menu->ActiveEntry->IsActive = GL_FALSE;
menu->ActiveEntry = NULL;
}
return GL_FALSE;
}
/*
* Displays a menu box and all of its submenus (if they are active)
*/
static void fghDisplayMenuBox( SFG_Menu* menu )
{
SFG_MenuEntry *menuEntry;
int i;
int border = FREEGLUT_MENU_BORDER;
/*
* Have the menu box drawn first. The +- values are
* here just to make it more nice-looking...
*/
/* a non-black dark version of the below. */
glColor4f( 1.0f, 1.0f, 1.0f, 1.0f );
glBegin( GL_QUAD_STRIP );
glVertex2i( menu->Width , 0 );
glVertex2i( menu->Width - border, border);
glVertex2i( 0 , 0 );
glVertex2i( border, border);
glVertex2i( 0 , menu->Height );
glVertex2i( border, menu->Height - border);
glEnd( );
/* a non-black dark version of the below. */
glColor4f( 0.5f, 0.5f, 0.5f, 1.0f );
glBegin( GL_QUAD_STRIP );
glVertex2i( 0 , menu->Height );
glVertex2i( border, menu->Height - border);
glVertex2i( menu->Width , menu->Height );
glVertex2i( menu->Width - border, menu->Height - border);
glVertex2i( menu->Width , 0 );
glVertex2i( menu->Width - border, border);
glEnd( );
glColor4fv( menu_pen_back );
glBegin( GL_QUADS );
glVertex2i( border, border);
glVertex2i( menu->Width - border, border);
glVertex2i( menu->Width - border, menu->Height - border);
glVertex2i( border, menu->Height - border);
glEnd( );
/* Check if any of the submenus is currently active... */
for( menuEntry = (SFG_MenuEntry *)menu->Entries.First;
menuEntry;
menuEntry = (SFG_MenuEntry *)menuEntry->Node.Next )
{
/* Has the menu been marked as active, maybe? */
if( menuEntry->IsActive )
{
/*
* That's truly right, and we need to have it highlighted.
* There is an assumption that mouse cursor didn't move
* since the last check of menu activity state:
*/
int menuID = menuEntry->Ordinal;
/* So have the highlight drawn... */
glColor4fv( menu_pen_hback );
glBegin( GL_QUADS );
glVertex2i( border,
(menuID + 0)*FREEGLUT_MENUENTRY_HEIGHT(menu->Font) + border );
glVertex2i( menu->Width - border,
(menuID + 0)*FREEGLUT_MENUENTRY_HEIGHT(menu->Font) + border );
glVertex2i( menu->Width - border,
(menuID + 1)*FREEGLUT_MENUENTRY_HEIGHT(menu->Font) + border );
glVertex2i( border,
(menuID + 1)*FREEGLUT_MENUENTRY_HEIGHT(menu->Font) + border );
glEnd( );
}
}
/* Print the menu entries now... */
glColor4fv( menu_pen_fore );
for( menuEntry = (SFG_MenuEntry *)menu->Entries.First, i = 0;
menuEntry;
menuEntry = (SFG_MenuEntry *)menuEntry->Node.Next, ++i )
{
/* If the menu entry is active, set the color to white */
if( menuEntry->IsActive )
glColor4fv( menu_pen_hfore );
/* Move the raster into position... */
/* Try to center the text - JCJ 31 July 2003*/
glRasterPos2i(
2 * border,
( i + 1 )*FREEGLUT_MENUENTRY_HEIGHT(menu->Font) -
( int )( FREEGLUT_MENUENTRY_HEIGHT(menu->Font)*0.3 - border )
);
/* Have the label drawn, character after character: */
glutBitmapString( menu->Font,
(unsigned char *)menuEntry->Text);
/* If it's a submenu, draw a right arrow */
if( menuEntry->SubMenu )
{
int width = glutBitmapWidth( menu->Font, '_' );
int x_base = menu->Width - 2 - width;
int y_base = i*FREEGLUT_MENUENTRY_HEIGHT(menu->Font) + border;
glBegin( GL_TRIANGLES );
glVertex2i( x_base, y_base + 2*border);
glVertex2i( menu->Width - 2, y_base +
( FREEGLUT_MENUENTRY_HEIGHT(menu->Font) + border) / 2 );
glVertex2i( x_base, y_base + FREEGLUT_MENUENTRY_HEIGHT(menu->Font) - border );
glEnd( );
}
/* If the menu entry is active, reset the color */
if( menuEntry->IsActive )
glColor4fv( menu_pen_fore );
}
}
/*
* Private static function to set the parent window of a submenu and all
* of its submenus.
*/
static void fghSetMenuParentWindow( SFG_Window *window, SFG_Menu *menu )
{
SFG_MenuEntry *menuEntry;
menu->ParentWindow = window;
for( menuEntry = ( SFG_MenuEntry * )menu->Entries.First;
menuEntry;
menuEntry = ( SFG_MenuEntry * )menuEntry->Node.Next )
if( menuEntry->SubMenu )
fghSetMenuParentWindow( window, menuEntry->SubMenu );
}
/*
* Displays the currently active menu for the current window
*/
void fgDisplayMenu( void )
{
SFG_Window* window = fgStructure.CurrentWindow;
SFG_Menu* menu = NULL;
FREEGLUT_INTERNAL_ERROR_EXIT ( fgStructure.CurrentWindow, "Displaying menu in nonexistent window",
"fgDisplayMenu" );
/* Check if there is an active menu attached to this window... */
menu = window->ActiveMenu;
freeglut_return_if_fail( menu );
fgSetWindow( menu->Window );
glPushAttrib( GL_DEPTH_BUFFER_BIT | GL_TEXTURE_BIT | GL_LIGHTING_BIT |
GL_POLYGON_BIT );
glDisable( GL_DEPTH_TEST );
glDisable( GL_TEXTURE_2D );
glDisable( GL_LIGHTING );
glDisable( GL_CULL_FACE );
glMatrixMode( GL_PROJECTION );
glPushMatrix( );
glLoadIdentity( );
glOrtho(
0, glutGet( GLUT_WINDOW_WIDTH ),
glutGet( GLUT_WINDOW_HEIGHT ), 0,
-1, 1
);
glMatrixMode( GL_MODELVIEW );
glPushMatrix( );
glLoadIdentity( );
fghDisplayMenuBox( menu );
glPopAttrib( );
glMatrixMode( GL_PROJECTION );
glPopMatrix( );
glMatrixMode( GL_MODELVIEW );
glPopMatrix( );
glutSwapBuffers( );
fgSetWindow ( window );
}
/*
* Activates a menu pointed by the function argument
*/
static void fghActivateMenu( SFG_Window* window, int button )
{
int max_x, max_y;
SFG_XYUse mouse_pos;
/* We'll be referencing this menu a lot, so remember its address: */
SFG_Menu* menu = window->Menu[ button ];
SFG_Window* current_window = fgStructure.CurrentWindow;
/* If the menu is already active in another window, deactivate it (and any submenus) there */
if ( menu->ParentWindow )
fgDeactivateMenu(menu->ParentWindow);
/* Mark the menu as active, so that it gets displayed: */
window->ActiveMenu = menu;
menu->IsActive = GL_TRUE;
fghSetMenuParentWindow ( window, menu );
fgState.ActiveMenus++;
/* Set up the initial menu position now: */
fghGetVMaxExtent(menu->ParentWindow, &max_x, &max_y);
fgSetWindow( window );
/* get mouse position on screen (window->State.MouseX and window->State.MouseY
* are relative to client area origin), and not easy to correct given that
* glutGet( GLUT_WINDOW_X ) and glutGet( GLUT_WINDOW_Y ) return relative to parent
* origin when looking at a child window
* for parent windows: window->State.MouseX + glutGet( GLUT_WINDOW_X ) == mouse_pos.X
*/
fghPlatformGetCursorPos(NULL, GL_FALSE, &mouse_pos);
menu->X = mouse_pos.X;
menu->Y = mouse_pos.Y;
/* Make sure the whole menu is on the screen */
if( menu->X + menu->Width > max_x )
menu->X -=menu->Width;
if( menu->Y + menu->Height > max_y )
{
menu->Y -=menu->Height;
if( menu->Y < 0 )
menu->Y = 0;
}
/* Set position of mouse relative to top-left menu in menu's window state (could as well set 0 at creation time...) */
menu->Window->State.MouseX = mouse_pos.X - menu->X;
menu->Window->State.MouseY = mouse_pos.Y - menu->Y;
/* Menu status callback */
if (fgState.MenuStateCallback || fgState.MenuStatusCallback)
{
fgStructure.CurrentMenu = menu;
fgStructure.CurrentWindow = window;
if (fgState.MenuStateCallback)
fgState.MenuStateCallback(GLUT_MENU_IN_USE);
if (fgState.MenuStatusCallback)
/* window->State.MouseX and window->State.MouseY are relative to client area origin, as needed */
fgState.MenuStatusCallback(GLUT_MENU_IN_USE, window->State.MouseX, window->State.MouseY);
}
fgSetWindow( menu->Window );
glutPositionWindow( menu->X, menu->Y );
glutReshapeWindow( menu->Width, menu->Height );
glutPopWindow( );
glutShowWindow( );
menu->Window->ActiveMenu = menu;
fghCheckMenuStatus( menu );
fgSetWindow( current_window );
}
/*
* Update Highlight states of the menu
* NB: Current mouse position is in menu->Window->State.MouseX/Y
*/
void fgUpdateMenuHighlight ( SFG_Menu *menu )
{
fghCheckMenuStatus( menu );
}
/*
* Check whether an active menu absorbs a mouse click
*/
GLboolean fgCheckActiveMenu ( SFG_Window *window, int button, GLboolean pressed,
int mouse_x, int mouse_y )
{
GLboolean is_handled = GL_FALSE;
GLboolean is_clicked = GL_FALSE;
/*
* Near as I can tell, this is the menu behaviour:
* - Down-click the menu button, menu not active: activate
* the menu with its upper left-hand corner at the mouse
* location.
* - Down-click any button outside the menu, menu active:
* deactivate the menu, and potentially activate a new menu
* at the new mouse location. This includes clicks in
* different windows of course
* - Down-click any button inside the menu, menu active:
* select the menu entry and deactivate the menu
* - Up-click the menu button, menu not active: nothing happens
* - Up-click the menu button outside the menu, menu active:
* nothing happens
* - Up-click the menu button inside the menu, menu active:
* select the menu entry and deactivate the menu
* Since menus can have submenus, we need to check this recursively.
*/
if( window->ActiveMenu )
{
if( window == window->ActiveMenu->ParentWindow )
{
window->ActiveMenu->Window->State.MouseX =
mouse_x - window->ActiveMenu->X;
window->ActiveMenu->Window->State.MouseY =
mouse_y - window->ActiveMenu->Y;
}
/* In the menu, deactivate the menu and invoke the callback */
if( fghCheckMenuStatus( window->ActiveMenu ) )
{
/*
* Save the current window and menu and set the current
* window to the window whose menu this is
*/
SFG_Window *save_window = fgStructure.CurrentWindow;
SFG_Menu *save_menu = fgStructure.CurrentMenu, *active_menu = window->ActiveMenu; /* active menu is always the one with the mouse in it, due to fghCheckMenuStatus */
SFG_MenuEntry *active_entry = active_menu->ActiveEntry; /* currently highlighted item -> must be the one that was just clicked */
SFG_Window *parent_window = window->ActiveMenu->ParentWindow;
/* ignore clicks on the submenu entry */
if (!active_entry->SubMenu)
{
fgSetWindow( parent_window );
fgStructure.CurrentMenu = active_menu;
/* Deactivate menu and then call callback (we don't want menu to stay in view while callback is executing, and user should be able to change menus in callback) */
fgDeactivateMenu( parent_window );
active_menu->Callback( active_entry->ID );
/* Restore the current window and menu */
fgSetWindow( save_window );
fgStructure.CurrentMenu = save_menu;
}
is_clicked = GL_TRUE; /* Don't reopen... */
}
else if( pressed )
/*
* Outside the menu, deactivate if it's a downclick
*
* A downclick outside of the interior of our freeglut windows
* is dealt with in the WM_KILLFOCUS handler of fgPlatformWindowProc
*/
{
fgDeactivateMenu( window->ActiveMenu->ParentWindow );
/* Could reopen again in different location, as is_clicked remains false */
}
is_handled = GL_TRUE;
}
else if ( fgState.ActiveMenus ) /* Don't have to check whether this was a downpress or an uppress, there is no way to get an uppress in another window before a downpress... */
{
/* if another window than the one clicked in has an open menu, close it */
SFG_Menu *menu = fgGetActiveMenu();
if ( menu ) /* any open menu? */
fgDeactivateMenu( menu->ParentWindow );
/* Leave is_handled to false, we didn't do anything relevant from the perspective of the window that was clicked */
}
/* No active menu, let's check whether we need to activate one. */
if( !is_clicked &&
( 0 <= button ) &&
( FREEGLUT_MAX_MENUS > button ) &&
( window->Menu[ button ] ) &&
pressed )
{
/* If mouseclick was outside the parent window, ignore. This can
* happen when another mouse button is already depressed and the
* window thus has mouse capture
*/
if (window->State.MouseX>0 && window->State.MouseY>0 &&
window->State.MouseX<window->State.Width && window->State.MouseY<window->State.Height)
{
fghActivateMenu( window, button );
is_handled = GL_TRUE;
}
}
return is_handled;
}
/*
* Deactivates a menu pointed by the function argument.
*/
static SFG_Menu* menuDeactivating = NULL;
void fgDeactivateMenu( SFG_Window *window )
{
SFG_Window *parent_window = NULL;
SFG_Menu* menu;
SFG_MenuEntry *menuEntry;
/* Did we find an active window? */
freeglut_return_if_fail( window );
/* Check if there is an active menu attached to this window... */
menu = window->ActiveMenu;
freeglut_return_if_fail( menu );
/* Check if we are already deactivating this menu, abort in that case (glutHideWindow below can cause this function to be called again on the same menu..) */
if (menu==menuDeactivating)
return;
menuDeactivating = menu;
parent_window = menu->ParentWindow;
/* Hide the present menu's window */
fgPlatformHideWindow( menu->Window );
/* Forget about having that menu active anymore, now: */
menu->Window->ActiveMenu = NULL;
menu->ParentWindow->ActiveMenu = NULL;
fghSetMenuParentWindow ( NULL, menu );
menu->IsActive = GL_FALSE;
menu->ActiveEntry = NULL;
fgState.ActiveMenus--;
/* Hide all submenu windows, and the root menu's window. */
for ( menuEntry = ( SFG_MenuEntry * )menu->Entries.First;
menuEntry;
menuEntry = ( SFG_MenuEntry * )menuEntry->Node.Next )
{
menuEntry->IsActive = GL_FALSE;
/* Is that an active submenu by any chance? */
if( menuEntry->SubMenu )
fghDeactivateSubMenu( menuEntry );
}
/* Done deactivating menu */
menuDeactivating = NULL;
/* Menu status callback */
if (fgState.MenuStateCallback || fgState.MenuStatusCallback)
{
fgStructure.CurrentMenu = menu;
fgStructure.CurrentWindow = parent_window;
if (fgState.MenuStateCallback)
fgState.MenuStateCallback(GLUT_MENU_NOT_IN_USE);
if (fgState.MenuStatusCallback)
{
/* Get cursor position relative to parent_window's client area */
SFG_XYUse mouse_pos;
fghPlatformGetCursorPos(parent_window, GL_TRUE, &mouse_pos);
fgState.MenuStatusCallback(GLUT_MENU_NOT_IN_USE, mouse_pos.X, mouse_pos.Y);
}
}
}
/*
* Recalculates current menu's box size
*/
void fghCalculateMenuBoxSize( void )
{
SFG_MenuEntry* menuEntry;
int width = 0, height = 0;
/* Make sure there is a current menu set */
freeglut_return_if_fail( fgStructure.CurrentMenu );
/* The menu's box size depends on the menu entries: */
for( menuEntry = ( SFG_MenuEntry * )fgStructure.CurrentMenu->Entries.First;
menuEntry;
menuEntry = ( SFG_MenuEntry * )menuEntry->Node.Next )
{
/* Update the menu entry's width value */
menuEntry->Width = glutBitmapLength(
fgStructure.CurrentMenu->Font,
(unsigned char *)menuEntry->Text
);
/*
* If the entry is a submenu, then it needs to be wider to
* accommodate the arrow.
*/
if (menuEntry->SubMenu)
menuEntry->Width += glutBitmapLength(
fgStructure.CurrentMenu->Font,
(unsigned char *)"_"
);
/* Check if it's the biggest we've found */
if( menuEntry->Width > width )
width = menuEntry->Width;
height += FREEGLUT_MENUENTRY_HEIGHT(fgStructure.CurrentMenu->Font);
}
/* Store the menu's box size now: */
fgStructure.CurrentMenu->Height = height + 2 * FREEGLUT_MENU_BORDER;
fgStructure.CurrentMenu->Width = width + 4 * FREEGLUT_MENU_BORDER;
}
/* -- INTERFACE FUNCTIONS -------------------------------------------------- */
/*
* Creates a new menu object, adding it to the freeglut structure
*/
int FGAPIENTRY glutCreateMenu( FGCBMenu callback )
{
/* The menu object creation code resides in fg_structure.c */
FREEGLUT_EXIT_IF_NOT_INITIALISED ( "glutCreateMenu" );
if (fgState.ActiveMenus)
fgError("Menu manipulation not allowed while menus in use.");
return fgCreateMenu( callback )->ID;
}
/*
* Destroys a menu object, removing all references to it
*/
void FGAPIENTRY glutDestroyMenu( int menuID )
{
SFG_Menu* menu;
FREEGLUT_EXIT_IF_NOT_INITIALISED ( "glutDestroyMenu" );
menu = fgMenuByID( menuID );
freeglut_return_if_fail( menu );
if (fgState.ActiveMenus)
fgError("Menu manipulation not allowed while menus in use.");
/* The menu object destruction code resides in fg_structure.c */
fgDestroyMenu( menu );
}
/*
* Returns the ID number of the currently active menu
*/
int FGAPIENTRY glutGetMenu( void )
{
FREEGLUT_EXIT_IF_NOT_INITIALISED ( "glutGetMenu" );
if( fgStructure.CurrentMenu )
return fgStructure.CurrentMenu->ID;
return 0;
}
/*
* Sets the current menu given its menu ID
*/
void FGAPIENTRY glutSetMenu( int menuID )
{
SFG_Menu* menu;
FREEGLUT_EXIT_IF_NOT_INITIALISED ( "glutSetMenu" );
menu = fgMenuByID( menuID );
freeglut_return_if_fail( menu );
fgStructure.CurrentMenu = menu;
}
/*
* Adds a menu entry to the bottom of the current menu
*/
void FGAPIENTRY glutAddMenuEntry( const char* label, int value )
{
SFG_MenuEntry* menuEntry;
FREEGLUT_EXIT_IF_NOT_INITIALISED ( "glutAddMenuEntry" );
menuEntry = (SFG_MenuEntry *)calloc( sizeof(SFG_MenuEntry), 1 );
freeglut_return_if_fail( fgStructure.CurrentMenu );
if (fgState.ActiveMenus)
fgError("Menu manipulation not allowed while menus in use.");
menuEntry->Text = strdup( label );
menuEntry->ID = value;
/* Have the new menu entry attached to the current menu */
fgListAppend( &fgStructure.CurrentMenu->Entries, &menuEntry->Node );
fghCalculateMenuBoxSize( );
}
/*
* Add a sub menu to the bottom of the current menu
*/
void FGAPIENTRY glutAddSubMenu( const char *label, int subMenuID )
{
SFG_MenuEntry *menuEntry;
SFG_Menu *subMenu;
FREEGLUT_EXIT_IF_NOT_INITIALISED ( "glutAddSubMenu" );
menuEntry = ( SFG_MenuEntry * )calloc( sizeof( SFG_MenuEntry ), 1 );
subMenu = fgMenuByID( subMenuID );
freeglut_return_if_fail( fgStructure.CurrentMenu );
if (fgState.ActiveMenus)
fgError("Menu manipulation not allowed while menus in use.");
freeglut_return_if_fail( subMenu );
menuEntry->Text = strdup( label );
menuEntry->SubMenu = subMenu;
menuEntry->ID = -1;
fgListAppend( &fgStructure.CurrentMenu->Entries, &menuEntry->Node );
fghCalculateMenuBoxSize( );
}
/*
* Changes the current menu's font
*/
void FGAPIENTRY glutSetMenuFont( int menuID, void* fontID )
{
SFG_Font* font;
SFG_Menu* menu;
FREEGLUT_EXIT_IF_NOT_INITIALISED ( "glutSetMenuFont" );
menu = fgMenuByID( menuID );
freeglut_return_if_fail( menu );
if (fgState.ActiveMenus)
fgError("Menu manipulation not allowed while menus in use.");
font = fghFontByID( fontID );
if (!font)
{
fgWarning("glutChangeMenuFont: bitmap font 0x%08x not found. Make sure you're not passing a stroke font. Ignoring...\n",fontID);
return;
}
fgStructure.CurrentMenu->Font = fontID;
fghCalculateMenuBoxSize( );
}
/*
* Changes the specified menu item in the current menu into a menu entry
*/
void FGAPIENTRY glutChangeToMenuEntry( int item, const char* label, int value )
{
SFG_MenuEntry* menuEntry = NULL;
FREEGLUT_EXIT_IF_NOT_INITIALISED ( "glutChangeToMenuEntry" );
freeglut_return_if_fail( fgStructure.CurrentMenu );
if (fgState.ActiveMenus)
fgError("Menu manipulation not allowed while menus in use.");
/* Get n-th menu entry in the current menu, starting from one: */
menuEntry = fghFindMenuEntry( fgStructure.CurrentMenu, item );
freeglut_return_if_fail( menuEntry );
/* We want it to become a normal menu entry, so: */
if( menuEntry->Text )
free( menuEntry->Text );
menuEntry->Text = strdup( label );
menuEntry->ID = value;
menuEntry->SubMenu = NULL;
fghCalculateMenuBoxSize( );
}
/*
* Changes the specified menu item in the current menu into a sub-menu trigger.
*/
void FGAPIENTRY glutChangeToSubMenu( int item, const char* label,
int subMenuID )
{
SFG_Menu* subMenu;
SFG_MenuEntry* menuEntry;
FREEGLUT_EXIT_IF_NOT_INITIALISED ( "glutChangeToSubMenu" );
freeglut_return_if_fail( fgStructure.CurrentMenu );
if (fgState.ActiveMenus)
fgError("Menu manipulation not allowed while menus in use.");
/* Get handle to sub menu */
subMenu = fgMenuByID( subMenuID );
menuEntry = NULL;
freeglut_return_if_fail( subMenu );
/* Get n-th menu entry in the current menu, starting from one: */
menuEntry = fghFindMenuEntry( fgStructure.CurrentMenu, item );
freeglut_return_if_fail( menuEntry );
/* We want it to become a sub menu entry, so: */
if( menuEntry->Text )
free( menuEntry->Text );
menuEntry->Text = strdup( label );
menuEntry->SubMenu = subMenu;
menuEntry->ID = -1;
fghCalculateMenuBoxSize( );
}
/*
* Removes the specified menu item from the current menu
*/
void FGAPIENTRY glutRemoveMenuItem( int item )
{
SFG_MenuEntry* menuEntry;
FREEGLUT_EXIT_IF_NOT_INITIALISED ( "glutRemoveMenuItem" );
freeglut_return_if_fail( fgStructure.CurrentMenu );
if (fgState.ActiveMenus)
fgError("Menu manipulation not allowed while menus in use.");
/* Get n-th menu entry in the current menu, starting from one: */
menuEntry = fghFindMenuEntry( fgStructure.CurrentMenu, item );
freeglut_return_if_fail( menuEntry );
fgListRemove( &fgStructure.CurrentMenu->Entries, &menuEntry->Node );
if ( menuEntry->Text )
free( menuEntry->Text );
free( menuEntry );
fghCalculateMenuBoxSize( );
}
/*
* Attaches a menu to the current window
*/
void FGAPIENTRY glutAttachMenu( int button )
{
FREEGLUT_EXIT_IF_NOT_INITIALISED ( "glutAttachMenu" );
freeglut_return_if_fail( fgStructure.CurrentWindow );
freeglut_return_if_fail( fgStructure.CurrentMenu );
if (fgState.ActiveMenus)
fgError("Menu manipulation not allowed while menus in use.");
freeglut_return_if_fail( button >= 0 );
freeglut_return_if_fail( button < FREEGLUT_MAX_MENUS );
fgStructure.CurrentWindow->Menu[ button ] = fgStructure.CurrentMenu;
}
/*
* Detaches a menu from the current window
*/
void FGAPIENTRY glutDetachMenu( int button )
{
FREEGLUT_EXIT_IF_NOT_INITIALISED ( "glutDetachMenu" );
freeglut_return_if_fail( fgStructure.CurrentWindow );
freeglut_return_if_fail( fgStructure.CurrentMenu );
if (fgState.ActiveMenus)
fgError("Menu manipulation not allowed while menus in use.");
freeglut_return_if_fail( button >= 0 );
freeglut_return_if_fail( button < FREEGLUT_MAX_MENUS );
fgStructure.CurrentWindow->Menu[ button ] = NULL;
}
/*
* A.Donev: Set and retrieve the menu's user data
*/
void* FGAPIENTRY glutGetMenuData( void )
{
FREEGLUT_EXIT_IF_NOT_INITIALISED ( "glutGetMenuData" );
return fgStructure.CurrentMenu->UserData;
}
void FGAPIENTRY glutSetMenuData(void* data)
{
FREEGLUT_EXIT_IF_NOT_INITIALISED ( "glutSetMenuData" );
fgStructure.CurrentMenu->UserData=data;
}
/*** END OF FILE ***/
| 34.55545 | 179 | 0.624424 | [
"object"
] |
36b61a9f32a2083eee199eeb73370c1384398c17 | 46,506 | h | C | newton-4.00/sdk/dCore/dVectorArmNeon.h | MADEAPPS/newton-dynamics | e346eec9d19ffb1c995b09417400167d3c52a635 | [
"Zlib"
] | 1,031 | 2015-01-02T14:08:47.000Z | 2022-03-29T02:25:27.000Z | newton-4.00/sdk/dCore/dVectorArmNeon.h | MADEAPPS/newton-dynamics | e346eec9d19ffb1c995b09417400167d3c52a635 | [
"Zlib"
] | 240 | 2015-01-11T04:27:19.000Z | 2022-03-30T00:35:57.000Z | newton-4.00/sdk/dCore/dVectorArmNeon.h | MADEAPPS/newton-dynamics | e346eec9d19ffb1c995b09417400167d3c52a635 | [
"Zlib"
] | 224 | 2015-01-05T06:13:54.000Z | 2022-02-25T14:39:51.000Z | /* Copyright (c) <2003-2021> <Julio Jerez, Newton Game Dynamics>
*
* This software is provided 'as-is', without any express or implied
* warranty. In no event will the authors be held liable for any damages
* arising from the use of this software.
*
* Permission is granted to anyone to use this software for any purpose,
* including commercial applications, and to alter it and redistribute it
* freely, subject to the following restrictions:
*
* 1. The origin of this software must not be misrepresented; you must not
* claim that you wrote the original software. If you use this software
* in a product, an acknowledgment in the product documentation would be
* appreciated but is not required.
*
* 2. Altered source versions must be plainly marked as such, and must not be
* misrepresented as being the original software.
*
* 3. This notice may not be removed or altered from any source distribution.
*/
#ifndef __D_VECTOR_ARM_NEON_H__
#define __D_VECTOR_ARM_NEON_H__
//#include <arm_neon.h>
#if 0
// *****************************************************************************************
//
// 4 x 1 single precision vector class declaration
//
// *****************************************************************************************
#ifdef D_NEWTON_USE_DOUBLE
#define dVector dBigVector
#else
#if DG_ARCH >= DG_ARCH_NEON_64
A R M 6 4
#define vec_minv vminvq_u32
#define vec_maxv maxvq_u32
#define vec_hadd4 vaddvq_f32
#define vec_floor vrndq_f32
#else
#define inline_FORCEINLINE(type) static inline type
inline_FORCEINLINE(dInt32) vec_minv(uint32x4_t v)
{
uint32x2_t tmp = vpmin_u32(vget_low_u32(v), vget_high_u32(v));
tmp = vpmin_u32(tmp, tmp);
return tmp[0] != 0;
}
inline_FORCEINLINE(dInt32) vec_maxv(uint32x4_t v)
{
uint32x2_t tmp = vpmax_u32(vget_low_u32(v), vget_high_u32(v));
tmp = vpmax_u32(tmp, tmp);
return tmp[0] != 0;
}
inline_FORCEINLINE(float)vec_hadd4(float32x4_t v)
{
float32x4_t tmp = vaddq_f32(v, vrev64q_f32(v));
tmp = vaddq_f32(tmp, vcombine_f32(vget_high_f32(tmp), vget_low_f32(tmp)));
return tmp[0];
}
#endif
inline_FORCEINLINE(float) vec_hadd3(float32x4_t v)
{
float32x2_t temp = vpadd_f32(vget_low_f32(v), vget_low_f32(v));
temp = vadd_f32(temp, vget_high_f32(v));
return vget_lane_f32(temp, 0);
}
#define vec_mul vmulq_f32
#define vec_add vaddq_f32
#define vec_sub vsubq_f32
#define vec_max vmaxq_f32
#define vec_min vminq_f32
#define vec_splat vdupq_n_f32
#define vec_div vdivq_f32
#define vec_rcp vrecpeq_f32
#define vec_store vst1q_f32
#define vec_load vld1q_f32
#define vec_abs vabsq_f32
#define vec_cvt vcvtq_s32_f32
#define vec_sqrt vrsqrtsq_f32
#define vec_recp vrecpsq_f32
#define vec_rsqrt rsqrteq_f32
#define vec_cmpne vceqq_f32
#define vec_cmpgt vcgtq_f32
#define vec_cmpge vcgeq_f32
#define vec_cmpeq vceqq_f32
#define vec_cmplt vcltq_f32
#define vec_cmple vcleq_f32
#define vec_xor veorq_u32
#define vec_or vorrq_u32
#define vec_and vandq_u32
#define vec_not vmvnq_u32
#define vec_andnot vbicq_u32
#if defined __ARM_FEATURE_FMA
// a * b + c (no rounding, better results)
#define vec_madd vfmaq_f32
#define vec_msub vmlsq_f32
#else
#define vec_madd vmlaq_f32
#define vec_msub vmlsq_f32
#endif
static inline float32x4_t vec_set(const float w, const float z, const float y, const float x)
{
float ptr[] = { x, y, z, w };
return vec_load(ptr);
}
class dBigVector;
DG_MSC_VECTOR_ALIGMENT
class dVector
{
public:
inline dVector()
{
}
inline dVector(const float32x4_t type)
: m_type(type) {
}
inline dVector(const uint32x4_t type)
: m_typeInt(type) {
}
inline dVector(dFloat32 val)
: m_type(vmovq_n_f32(val))
{
}
inline dVector (const dVector& v)
: m_type( v.m_type )
{
//dAssert (dCheckVector ((*this)));
}
inline dVector (const dFloat32* const ptr)
:m_x(ptr[0]), m_y(ptr[1]), m_z(ptr[2]), m_w (dFloat32 (0.0f))
{
dAssert (dCheckVector ((*this)));
}
#ifndef D_NEWTON_USE_DOUBLE
inline dVector(const dFloat64* const ptr)
:m_x(dFloat32(ptr[0]))
,m_y(dFloat32(ptr[1]))
,m_z(dFloat32(ptr[2]))
,m_w(dFloat32(0.0f))
{
}
#endif
inline dVector (dFloat32 x, dFloat32 y, dFloat32 z, dFloat32 w)
:m_x(x), m_y(y), m_z(z), m_w(w)
{
dAssert (dCheckVector ((*this)));
}
inline dVector (dInt32 ix, dInt32 iy, dInt32 iz, dInt32 iw)
:m_x(*((dFloat32*)&ix)), m_y(*((dFloat32*)&iy)), m_z(*((dFloat32*)&iz)), m_w(*((dFloat32*)&iw))
{
}
#ifndef D_NEWTON_USE_DOUBLE
inline dVector (const dBigVector& copy)
:m_x(dFloat32 (((dFloat64*)©)[0]))
,m_y(dFloat32 (((dFloat64*)©)[1]))
,m_z(dFloat32 (((dFloat64*)©)[2]))
,m_w(dFloat32 (((dFloat64*)©)[3]))
{
dAssert (dCheckVector ((*this)));
}
#endif
inline dFloat32 GetScalar () const
{
return m_x;
}
inline void Store (dFloat32* const dst) const
{
vec_store(dst, m_type);
}
inline dVector BroadcastX () const
{
return dVector (m_x);
}
inline dVector BroadcastY () const
{
return dVector (m_y);
}
inline dVector BroadcastZ () const
{
return dVector (m_z);
}
inline dVector BroadcastW () const
{
return dVector (m_w);
}
inline dFloat32& operator[] (dInt32 i)
{
dAssert (i < 4);
dAssert (i >= 0);
return (&m_x)[i];
}
inline const dFloat32& operator[] (dInt32 i) const
{
dAssert (i < 4);
dAssert (i >= 0);
return (&m_x)[i];
}
inline dVector operator+ (const dVector& A) const
{
return vec_add(m_type, A.m_type);
}
inline dVector operator- (const dVector& A) const
{
return vec_sub(m_type, A.m_type);
}
inline dVector operator* (const dVector& A) const
{
return vec_mul(m_type, A.m_type);
}
inline dVector& operator+= (const dVector& A)
{
m_type = vec_add(m_type, A.m_type);
return *this;
}
inline dVector& operator-= (const dVector& A)
{
m_type = vec_sub(m_type, A.m_type);
return *this;
}
inline dVector& operator*= (const dVector& A)
{
m_type = vec_mul(m_type, A.m_type);
return *this;
}
inline dVector AddHorizontal () const
{
return vec_hadd3(m_type); // dVector (m_x + m_y + m_z + m_w);
}
inline dVector Scale3 (dFloat32 scale) const
{
return dVector (m_x * scale, m_y * scale, m_z * scale, m_w);
}
inline dVector Scale (dFloat32 scale) const
{
return vec_mul(vmovq_n_f32(scale), m_type);
}
// component wise multiplication
inline dVector CompProduct3 (const dVector& A) const
{
return dVector (m_x * A.m_x, m_y * A.m_y, m_z * A.m_z, A.m_w);
}
// return cross product
inline dVector CrossProduct (const dVector& B) const
{
/*
float32x4_t v1 = m_type;
float32x4_t v2 = B.m_type;
float32x4x2_t v_1203 = vzipq_f32(vcombine_f32(vrev64_f32(vget_low_f32(v1)), vrev64_f32(vget_low_f32(v2))), vcombine_f32(vget_high_f32(v1), vget_high_f32(v2)));
float32x4x2_t v_2013 = vzipq_f32(vcombine_f32(vrev64_f32(vget_low_f32(v_1203.val[0])), vrev64_f32(vget_low_f32(v_1203.val[1]))), vcombine_f32(vget_high_f32(v_1203.val[0]), vget_high_f32(v_1203.val[1])));
inVec->vec = vmlsq_f32(vmulq_f32(v_1203.val[0], v_2013.val[1]), v_1203.val[1], v_2013.val[0]);
*/
return dVector (m_y * B.m_z - m_z * B.m_y,
m_z * B.m_x - m_x * B.m_z,
m_x * B.m_y - m_y * B.m_x, m_w);
}
inline dVector CrossProduct (const dVector& A, const dVector& B) const
{
dFloat32 cofactor[3][3];
dFloat32 array[4][4];
const dVector& me = *this;
for (dInt32 i = 0; i < 4; i ++) {
array[0][i] = me[i];
array[1][i] = A[i];
array[2][i] = B[i];
array[3][i] = dFloat32 (1.0f);
}
dVector normal;
dFloat32 sign = dFloat32 (-1.0f);
for (dInt32 i = 0; i < 4; i ++)
{
for (dInt32 j = 0; j < 3; j ++)
{
dInt32 k0 = 0;
for (dInt32 k = 0; k < 4; k ++)
{
if (k != i)
{
cofactor[j][k0] = array[j][k];
k0 ++;
}
}
}
dFloat32 x = cofactor[0][0] * (cofactor[1][1] * cofactor[2][2] - cofactor[1][2] * cofactor[2][1]);
dFloat32 y = cofactor[0][1] * (cofactor[1][2] * cofactor[2][0] - cofactor[1][0] * cofactor[2][2]);
dFloat32 z = cofactor[0][2] * (cofactor[1][0] * cofactor[2][1] - cofactor[1][1] * cofactor[2][0]);
dFloat32 det = x + y + z;
normal[i] = sign * det;
sign *= dFloat32 (-1.0f);
}
return normal;
}
inline dVector GetInt () const
{
return vcvtq_u32_f32(m_type);
}
inline dVector GetFloat () const
{
return vcvtq_f32_u32(m_type);
}
inline dVector TestZero() const
{
return m_negOne & (*this == m_zero);
}
inline dVector Floor () const
{
#if DG_ARCH >= DG_ARCH_NEON_64
// #if __ARM_ARCH >= 8 && defined(__ARM_FEATURE_DIRECTED_ROUNDING)
return vec_floor(m_type);
#else
return dVector (dgFloor (m_x), dgFloor (m_y), dgFloor (m_z), dgFloor (m_w));
#endif
}
inline dVector DotProduct (const dVector &A) const
{
auto tmp = vec_mul(m_type, A.m_type);
return vec_hadd4(tmp);
}
inline dVector InvMagSqrt() const
{
return dVector(dgRsqrt(DotProduct(*this).m_x));
}
inline dVector Reciproc() const
{
float32x4_t reciprocal = vrecpeq_f32(m_type);
reciprocal = vrecpsq_f32(m_type, reciprocal) * reciprocal;
return reciprocal;
}
inline dVector MulAdd(const dVector& A, const dVector& B) const
{
// a * b + this
return vec_madd(A.m_type, B.m_type, m_type);
}
inline dVector MulSub(const dVector& A, const dVector& B) const
{
// a * b - this
return vec_msub(A.m_type, B.m_type, m_type);
}
inline dVector InvSqrt() const
{
float32x4_t sqrt_reciprocal = vrsqrteq_f32(m_type);
return vrsqrtsq_f32(m_type * sqrt_reciprocal, sqrt_reciprocal) * sqrt_reciprocal;
}
inline dVector Sqrt() const
{
float32x4_t sqrt_reciprocal = vrsqrteq_f32(m_type);
float32x4_t tmp = vrsqrtsq_f32(m_type * sqrt_reciprocal, sqrt_reciprocal) * sqrt_reciprocal;
return vec_mul(m_type, tmp);
}
inline dVector Normalize () const
{
dAssert (m_w == dFloat32 (0.0f));
const dVector& me = *this;
return me * InvMagSqrt();
}
dVector Abs () const
{
return vec_abs(m_type);
}
dFloat32 GetMax () const
{
return dMax(dMax(m_x, m_y), dMax(m_z, m_w));
}
dVector GetMax (const dVector& data) const
{
return vec_max(m_type, data.m_type);
}
dVector GetMin (const dVector& data) const
{
return vec_min(m_type, data.m_type);
}
// relational operators
inline dVector operator== (const dVector& data) const
{
return vec_cmpeq(m_typeInt, data.m_typeInt);
}
inline dVector operator!= (const dVector& data) const
{
return vec_cmpne(m_typeInt, data.m_typeInt);
}
inline dVector operator> (const dVector& data) const
{
return vec_cmpgt(m_typeInt, data.m_typeInt);
}
inline dVector operator< (const dVector& data) const
{
return vec_cmplt(m_typeInt, data.m_typeInt);
}
inline dVector operator>= (const dVector& data) const
{
return vec_cmpge(m_typeInt, data.m_typeInt);
}
inline dVector operator<= (const dVector& data) const
{
return vec_cmple(m_typeInt, data.m_typeInt);
}
// logical operations
inline dVector operator& (const dVector& data) const
{
return vec_and(m_typeInt, data.m_typeInt);
}
inline dVector operator| (const dVector& data) const
{
return vec_or(m_typeInt, data.m_typeInt);
}
inline dVector operator^ (const dVector& data) const
{
return vec_xor(m_typeInt, data.m_typeInt);
}
inline dVector AndNot (const dVector& data) const
{
return vec_andnot(m_typeInt, data.m_typeInt);
}
inline dInt32 GetSignMask() const
{
const dInt32* const a = (dInt32*)&m_x;
return (((a[0] & 0x80000000) ? 1 : 0) | ((a[1] & 0x80000000) ? 2 : 0) | ((a[2] & 0x80000000) ? 4 : 0) | ((a[3] & 0x80000000) ? 8 : 0));
}
inline dVector ShiftTripleRight () const
{
return dVector (m_z, m_x, m_y, m_w);
}
inline dVector ShiftTripleLeft () const
{
return dVector (m_y, m_z, m_x, m_w);
}
inline dVector ShiftRightLogical (dInt32 bits) const
{
return dVector (dInt32 (dUnsigned32 (m_ix) >> bits), dInt32 (dUnsigned32 (m_iy) >> bits), dInt32 (dUnsigned32 (m_iz) >> bits), dInt32 (dUnsigned32 (m_iw) >> bits));
}
inline static void Transpose4x4 (dVector& dst0, dVector& dst1, dVector& dst2, dVector& dst3, const dVector& src0, const dVector& src1, const dVector& src2, const dVector& src3)
{
float32x4x2_t vtrn1 = vzipq_f32(src0.m_type, src2.m_type);
float32x4x2_t vtrn2 = vzipq_f32(src1.m_type, src3.m_type);
float32x4x2_t res1 = vzipq_f32(vtrn1.val[0], vtrn2.val[0]);
float32x4x2_t res2 = vzipq_f32(vtrn1.val[1], vtrn2.val[1]);
dst0.m_type = res1.val[0];
dst1.m_type = res1.val[1];
dst2.m_type = res2.val[0];
dst3.m_type = res2.val[1];
}
DG_CLASS_ALLOCATOR(allocator)
union {
float32x4_t m_type;
uint32x4_t m_typeInt;
dInt32 m_i[4];
struct {
dFloat32 m_x;
dFloat32 m_y;
dFloat32 m_z;
dFloat32 m_w;
};
struct {
dInt32 m_ix;
dInt32 m_iy;
dInt32 m_iz;
dInt32 m_iw;
};
};
D_CORE_API static dVector m_zero;
D_CORE_API static dVector m_one;
D_CORE_API static dVector m_wOne;
D_CORE_API static dVector m_half;
D_CORE_API static dVector m_two;
D_CORE_API static dVector m_three;
D_CORE_API static dVector m_negOne;
D_CORE_API static dVector m_xMask;
D_CORE_API static dVector m_yMask;
D_CORE_API static dVector m_zMask;
D_CORE_API static dVector m_wMask;
D_CORE_API static dVector m_xyzwMask;
D_CORE_API static dVector m_signMask;
D_CORE_API static dVector m_triplexMask;
} DG_GCC_VECTOR_ALIGMENT;
#endif
DG_MSC_VECTOR_ALIGMENT
class dBigVector
{
public:
inline dBigVector()
{
}
inline dBigVector(dFloat64 val)
:m_x(val), m_y(val), m_z(val), m_w(val)
{
}
inline dBigVector(const dBigVector& v)
: m_x(v.m_x), m_y(v.m_y), m_z(v.m_z), m_w(v.m_w)
{
}
#ifndef D_NEWTON_USE_DOUBLE
inline dBigVector(const dVector& v)
: m_x(v.m_x), m_y(v.m_y), m_z(v.m_z), m_w(v.m_w)
{
}
inline dBigVector(const dFloat32* const ptr)
: m_x(ptr[0]), m_y(ptr[1]), m_z(ptr[2]), m_w(dFloat32(0.0f))
{
dAssert(dCheckVector((*this)));
}
#endif
inline dBigVector(const dFloat64* const ptr)
:m_x(ptr[0]), m_y(ptr[1]), m_z(ptr[2]), m_w(dFloat32(0.0f))
{
dAssert(dCheckVector((*this)));
}
inline dBigVector(dFloat64 x, dFloat64 y, dFloat64 z, dFloat64 w)
: m_x(x), m_y(y), m_z(z), m_w(w)
{
dAssert(dCheckVector((*this)));
}
inline dBigVector(dInt32 ix, dInt32 iy, dInt32 iz, dInt32 iw)
: m_ix(ix), m_iy(iy), m_iz(iz), m_iw(iw)
{
}
inline dBigVector(dInt64 ix, dInt64 iy, dInt64 iz, dInt64 iw)
: m_ix(ix), m_iy(iy), m_iz(iz), m_iw(iw)
{
}
inline dFloat64 GetScalar() const
{
return m_x;
}
inline void Store(dFloat64* const dst) const
{
dst[0] = m_x;
dst[1] = m_y;
dst[2] = m_z;
dst[3] = m_w;
}
inline dBigVector BroadcastX() const
{
return dBigVector(m_x);
}
inline dBigVector BroadcastY() const
{
return dBigVector(m_y);
}
inline dBigVector BroadcastZ() const
{
return dBigVector(m_z);
}
inline dBigVector BroadcastW() const
{
return dBigVector(m_w);
}
inline dFloat64& operator[] (dInt32 i)
{
dAssert(i < 4);
dAssert(i >= 0);
return (&m_x)[i];
}
inline const dFloat64& operator[] (dInt32 i) const
{
dAssert(i < 4);
dAssert(i >= 0);
return (&m_x)[i];
}
inline dBigVector operator+ (const dBigVector& A) const
{
return dBigVector(m_x + A.m_x, m_y + A.m_y, m_z + A.m_z, m_w + A.m_w);
}
inline dBigVector operator- (const dBigVector& A) const
{
return dBigVector(m_x - A.m_x, m_y - A.m_y, m_z - A.m_z, m_w - A.m_w);
}
inline dBigVector operator* (const dBigVector& A) const
{
return dBigVector(m_x * A.m_x, m_y * A.m_y, m_z * A.m_z, m_w * A.m_w);
}
inline dBigVector& operator+= (const dBigVector& A)
{
return (*this = dBigVector(m_x + A.m_x, m_y + A.m_y, m_z + A.m_z, m_w + A.m_w));
}
inline dBigVector& operator-= (const dBigVector& A)
{
return (*this = dBigVector(m_x - A.m_x, m_y - A.m_y, m_z - A.m_z, m_w - A.m_w));
}
inline dBigVector& operator*= (const dBigVector& A)
{
return (*this = dBigVector(m_x * A.m_x, m_y * A.m_y, m_z * A.m_z, m_w * A.m_w));
}
inline dBigVector AddHorizontal() const
{
return dBigVector(m_x + m_y + m_z + m_w);
}
inline dBigVector Scale3(dFloat64 scale) const
{
return dBigVector(m_x * scale, m_y * scale, m_z * scale, m_w);
}
inline dBigVector Scale(dFloat64 scale) const
{
return dBigVector(m_x * scale, m_y * scale, m_z * scale, m_w * scale);
}
// component wise multiplication
inline dBigVector CompProduct3(const dBigVector& A) const
{
return dBigVector(m_x * A.m_x, m_y * A.m_y, m_z * A.m_z, A.m_w);
}
// return cross product
inline dBigVector CrossProduct(const dBigVector& B) const
{
return dBigVector(m_y * B.m_z - m_z * B.m_y, m_z * B.m_x - m_x * B.m_z, m_x * B.m_y - m_y * B.m_x, m_w);
}
inline dBigVector CrossProduct(const dBigVector& A, const dBigVector& B) const
{
dFloat64 cofactor[3][3];
dFloat64 array[4][4];
const dBigVector& me = *this;
for (dInt32 i = 0; i < 4; i++) {
array[0][i] = me[i];
array[1][i] = A[i];
array[2][i] = B[i];
array[3][i] = dFloat32(1.0f);
}
dBigVector normal;
dFloat64 sign = dFloat64(-1.0f);
for (dInt32 i = 0; i < 4; i++) {
for (dInt32 j = 0; j < 3; j++) {
dInt32 k0 = 0;
for (dInt32 k = 0; k < 4; k++) {
if (k != i) {
cofactor[j][k0] = array[j][k];
k0++;
}
}
}
dFloat64 x = cofactor[0][0] * (cofactor[1][1] * cofactor[2][2] - cofactor[1][2] * cofactor[2][1]);
dFloat64 y = cofactor[0][1] * (cofactor[1][2] * cofactor[2][0] - cofactor[1][0] * cofactor[2][2]);
dFloat64 z = cofactor[0][2] * (cofactor[1][0] * cofactor[2][1] - cofactor[1][1] * cofactor[2][0]);
dFloat64 det = x + y + z;
normal[i] = sign * det;
sign *= dFloat64(-1.0f);
}
return normal;
}
inline dBigVector GetInt() const
{
return dBigVector(dInt64(floor(m_x)), dInt64(floor(m_y)), dInt64(floor(m_z)), dInt64(floor(m_w)));
}
inline dBigVector TestZero() const
{
const dInt64* const a = (dInt64*)&m_x;
return dBigVector((a[0] == 0) ? dFloat64(-1.0f) : dFloat64(1.0f),
(a[1] == 0) ? dFloat64(-1.0f) : dFloat64(1.0f),
(a[2] == 0) ? dFloat64(-1.0f) : dFloat64(1.0f),
(a[3] == 0) ? dFloat64(-1.0f) : dFloat64(1.0f));
}
inline dBigVector Floor() const
{
return dBigVector(floor(m_x), floor(m_y), floor(m_z), floor(m_w));
}
inline dBigVector DotProduct(const dBigVector &A) const
{
return dBigVector(m_x * A.m_x + m_y * A.m_y + m_z * A.m_z + m_w * A.m_w);
}
inline dBigVector Reciproc() const
{
return dBigVector(dFloat64(1.0f) / m_x, dFloat64(1.0f) / m_y, dFloat64(1.0f) / m_z, dFloat64(1.0f) / m_w);
}
inline dBigVector Sqrt() const
{
return dBigVector(sqrt(m_x), sqrt(m_y), sqrt(m_z), sqrt(m_w));
}
inline dBigVector InvSqrt() const
{
return dBigVector(dFloat64(1.0f) / sqrt(m_x), dFloat64(1.0f) / sqrt(m_y), dFloat64(1.0f) / sqrt(m_z), dFloat64(1.0f) / sqrt(m_w));
}
inline dBigVector InvMagSqrt() const
{
return dBigVector(dFloat64(1.0f) / sqrt(DotProduct(*this).m_x));
}
inline dBigVector Normalize() const
{
dAssert(m_w == dFloat64(0.0f));
//const dBigVector& me = *this;
//return *this * dBigVector (dgRsqrt(DotProduct(*this).m_x));
return *this * InvMagSqrt();
}
dBigVector Abs() const
{
return dBigVector((m_x > dFloat64(0.0f)) ? m_x : -m_x,
(m_y > dFloat64(0.0f)) ? m_y : -m_y,
(m_z > dFloat64(0.0f)) ? m_z : -m_z,
(m_w > dFloat64(0.0f)) ? m_w : -m_w);
}
dFloat64 GetMax() const
{
return dMax(dMax(m_x, m_y), dMax(m_z, m_w));
}
dBigVector GetMax(const dBigVector& data) const
{
return dBigVector((m_x > data.m_x) ? m_x : data.m_x,
(m_y > data.m_y) ? m_y : data.m_y,
(m_z > data.m_z) ? m_z : data.m_z,
(m_w > data.m_w) ? m_w : data.m_w);
}
dBigVector GetMin(const dBigVector& data) const
{
return dBigVector((m_x < data.m_x) ? m_x : data.m_x,
(m_y < data.m_y) ? m_y : data.m_y,
(m_z < data.m_z) ? m_z : data.m_z,
(m_w < data.m_w) ? m_w : data.m_w);
}
// relational operators
inline dBigVector operator== (const dBigVector& data) const
{
return dBigVector((m_x == data.m_x) ? dInt64(-1) : dInt64(0),
(m_y == data.m_y) ? dInt64(-1) : dInt64(0),
(m_z == data.m_z) ? dInt64(-1) : dInt64(0),
(m_w == data.m_w) ? dInt64(-1) : dInt64(0));
}
inline dBigVector operator> (const dBigVector& data) const
{
return dBigVector((m_x > data.m_x) ? dInt64(-1) : dInt64(0),
(m_y > data.m_y) ? dInt64(-1) : dInt64(0),
(m_z > data.m_z) ? dInt64(-1) : dInt64(0),
(m_w > data.m_w) ? dInt64(-1) : dInt64(0));
}
inline dBigVector operator< (const dBigVector& data) const
{
return dBigVector((m_x < data.m_x) ? dInt64(-1) : dInt64(0),
(m_y < data.m_y) ? dInt64(-1) : dInt64(0),
(m_z < data.m_z) ? dInt64(-1) : dInt64(0),
(m_w < data.m_w) ? dInt64(-1) : dInt64(0));
}
inline dBigVector operator>= (const dBigVector& data) const
{
return dBigVector((m_x >= data.m_x) ? dInt64(-1) : dInt64(0),
(m_y >= data.m_y) ? dInt64(-1) : dInt64(0),
(m_z >= data.m_z) ? dInt64(-1) : dInt64(0),
(m_w >= data.m_w) ? dInt64(-1) : dInt64(0));
}
inline dBigVector operator<= (const dBigVector& data) const
{
return dBigVector((m_x <= data.m_x) ? dInt64(-1) : dInt64(0),
(m_y <= data.m_y) ? dInt64(-1) : dInt64(0),
(m_z <= data.m_z) ? dInt64(-1) : dInt64(0),
(m_w <= data.m_w) ? dInt64(-1) : dInt64(0));
}
// logical operations
inline dBigVector operator& (const dBigVector& data) const
{
const dInt64* const a = (dInt64*)&m_x;
const dInt64* const b = (dInt64*)&data.m_x;
return dBigVector(a[0] & b[0], a[1] & b[1], a[2] & b[2], a[3] & b[3]);
}
inline dBigVector operator| (const dBigVector& data) const
{
const dInt64* const a = (dInt64*)&m_x;
const dInt64* const b = (dInt64*)&data.m_x;
return dBigVector(a[0] | b[0], a[1] | b[1], a[2] | b[2], a[3] | b[3]);
}
inline dBigVector operator^ (const dBigVector& data) const
{
const dInt64* const a = (dInt64*)&m_x;
const dInt64* const b = (dInt64*)&data.m_x;
return dBigVector(a[0] ^ b[0], a[1] ^ b[1], a[2] ^ b[2], a[3] ^ b[3]);
}
inline dBigVector AndNot(const dBigVector& data) const
{
const dInt64* const a = (dInt64*)&m_x;
const dInt64* const b = (dInt64*)&data.m_x;
return dBigVector(a[0] & ~b[0], a[1] & ~b[1], a[2] & ~b[2], a[3] & ~b[3]);
}
inline dInt32 GetSignMask() const
{
const dInt64* const a = (dInt64*)&m_x;
return (((a[0] >> 63) ? 1 : 0) | ((a[1] >> 63) ? 2 : 0) | ((a[2] >> 63) ? 4 : 0) | ((a[3] >> 63) ? 8 : 0));
}
inline dBigVector ShiftTripleRight() const
{
return dBigVector(m_z, m_x, m_y, m_w);
}
inline dBigVector ShiftTripleLeft() const
{
return dBigVector(m_y, m_z, m_x, m_w);
}
inline dBigVector ShiftRightLogical(dInt32 bits) const
{
return dBigVector(dInt64(dUnsigned64(m_ix) >> bits), dInt64(dUnsigned64(m_iy) >> bits), dInt64(dUnsigned64(m_iz) >> bits), dInt64(dUnsigned64(m_iw) >> bits));
}
inline static void Transpose4x4(dBigVector& dst0, dBigVector& dst1, dBigVector& dst2, dBigVector& dst3, const dBigVector& src0, const dBigVector& src1, const dBigVector& src2, const dBigVector& src3)
{
dBigVector tmp0(src0);
dBigVector tmp1(src1);
dBigVector tmp2(src2);
dBigVector tmp3(src3);
dst0 = dBigVector(tmp0.m_x, tmp1.m_x, tmp2.m_x, tmp3.m_x);
dst1 = dBigVector(tmp0.m_y, tmp1.m_y, tmp2.m_y, tmp3.m_y);
dst2 = dBigVector(tmp0.m_z, tmp1.m_z, tmp2.m_z, tmp3.m_z);
dst3 = dBigVector(tmp0.m_w, tmp1.m_w, tmp2.m_w, tmp3.m_w);
}
DG_CLASS_ALLOCATOR(allocator)
union
{
#if DG_ARCH >= DG_ARCH_NEON_64
struct {
float64x2_t m_xy;
float64x2_t m_zw;
};
struct {
int64x2_t m_ixy;
int64x2_t m_izw;
};
#endif
dInt64 m_i[4];
struct
{
dFloat64 m_x;
dFloat64 m_y;
dFloat64 m_z;
dFloat64 m_w;
};
struct
{
dInt64 m_ix;
dInt64 m_iy;
dInt64 m_iz;
dInt64 m_iw;
};
};
D_CORE_API static dBigVector m_zero;
D_CORE_API static dBigVector m_one;
D_CORE_API static dBigVector m_wOne;
D_CORE_API static dBigVector m_half;
D_CORE_API static dBigVector m_two;
D_CORE_API static dBigVector m_three;
D_CORE_API static dBigVector m_negOne;
D_CORE_API static dBigVector m_xMask;
D_CORE_API static dBigVector m_yMask;
D_CORE_API static dBigVector m_zMask;
D_CORE_API static dBigVector m_wMask;
D_CORE_API static dBigVector m_xyzwMask;
D_CORE_API static dBigVector m_signMask;
D_CORE_API static dBigVector m_triplexMask;
} DG_GCC_VECTOR_ALIGMENT;
#endif
// *****************************************************************************************
//
// 4 x 1 single precision vector class declaration
//
// *****************************************************************************************
#ifdef D_NEWTON_USE_DOUBLE
#define dVector dBigVector
#else
class dBigVector;
D_MSV_NEWTON_ALIGN_16
class dVector
{
public:
D_OPERATOR_NEW_AND_DELETE
inline dVector()
{
}
inline dVector(dFloat32 val)
:m_type(vmovq_n_f32(val))
{
}
inline dVector(const dVector& v)
:m_type(v.m_type)
{
}
inline dVector(const float32x4_t type)
:m_type(type)
{
}
inline dVector(const dFloat32* const ptr)
//: m_x(ptr[0]), m_y(ptr[1]), m_z(ptr[2]), m_w(ptr[3])
:m_type(vld1q_f32 (ptr))
{
dAssert(dCheckVector((*this)));
}
inline dVector(const dFloat32* const baseAddr, const dInt32* const index)
:m_x(baseAddr[index[0]])
,m_y(baseAddr[index[1]])
,m_z(baseAddr[index[2]])
,m_w(baseAddr[index[3]])
{
}
#ifndef D_NEWTON_USE_DOUBLE
inline dVector(const dFloat64* const ptr)
:m_x(dFloat32(ptr[0]))
,m_y(dFloat32(ptr[1]))
,m_z(dFloat32(ptr[2]))
,m_w(dFloat32(ptr[3]))
{
}
#endif
inline dVector(dFloat32 x, dFloat32 y, dFloat32 z, dFloat32 w)
:m_x(x), m_y(y), m_z(z), m_w(w)
{
dAssert(dCheckVector((*this)));
}
inline dVector(dInt32 ix, dInt32 iy, dInt32 iz, dInt32 iw)
: m_x(*((dFloat32*)&ix)), m_y(*((dFloat32*)&iy)), m_z(*((dFloat32*)&iz)), m_w(*((dFloat32*)&iw))
{
}
#ifndef D_NEWTON_USE_DOUBLE
inline dVector(const dBigVector& copy)
:m_x(dFloat32(((dFloat64*)©)[0]))
,m_y(dFloat32(((dFloat64*)©)[1]))
,m_z(dFloat32(((dFloat64*)©)[2]))
,m_w(dFloat32(((dFloat64*)©)[3]))
{
dAssert(dCheckVector((*this)));
}
#endif
inline dFloat32 GetScalar() const
{
return m_x;
}
inline void Store(dFloat32* const dst) const
{
vst1q_f32(dst, m_type);
}
inline dVector BroadcastX() const
{
return dVector(m_x);
}
inline dVector BroadcastY() const
{
return dVector(m_y);
}
inline dVector BroadcastZ() const
{
return dVector(m_z);
}
inline dVector BroadcastW() const
{
return dVector(m_w);
}
inline dFloat32& operator[] (dInt32 i)
{
dAssert(i < 4);
dAssert(i >= 0);
return (&m_x)[i];
}
inline const dFloat32& operator[] (dInt32 i) const
{
dAssert(i < 4);
dAssert(i >= 0);
return (&m_x)[i];
}
inline dVector operator+ (const dVector& A) const
{
return vaddq_f32(m_type, A.m_type);
}
inline dVector operator- (const dVector& A) const
{
return vsubq_f32(m_type, A.m_type);
}
inline dVector operator* (const dVector& A) const
{
return vmulq_f32(m_type, A.m_type);
}
inline dVector& operator+= (const dVector& A)
{
return (*this = vsubq_f32(m_type, A.m_type));
}
inline dVector& operator-= (const dVector& A)
{
return (*this = vsubq_f32(m_type, A.m_type));
}
inline dVector& operator*= (const dVector& A)
{
return (*this = vmulq_f32(m_type, A.m_type));
}
inline dVector MulAdd(const dVector& A, const dVector& B) const
{
//return *this + A * B;
//return vfmaq_f32(A.m_type, B.m_type, m_type);
return vmlaq_f32(m_type, A.m_type, B.m_type);
}
inline dVector MulSub(const dVector& A, const dVector& B) const
{
//return *this - A * B;
return vmlsq_f32(m_type, A.m_type, B.m_type);
}
inline dVector AddHorizontal() const
{
return dVector(m_x + m_y + m_z + m_w);
//float32x2_t temp = vpadd_f32(vget_low_f32(m_type), vget_low_f32(m_type));
//temp = vadd_f32(temp, vget_high_f32(m_type));
//return vget_lane_f32(temp, 0);
}
inline dVector Scale(dFloat32 scale) const
{
return dVector(m_x * scale, m_y * scale, m_z * scale, m_w * scale);
}
// return cross product
inline dVector CrossProduct(const dVector& B) const
{
return dVector(m_y * B.m_z - m_z * B.m_y,
m_z * B.m_x - m_x * B.m_z,
m_x * B.m_y - m_y * B.m_x, m_w);
}
inline dVector CrossProduct(const dVector& A, const dVector& B) const
{
dFloat32 cofactor[3][3];
dFloat32 array[4][4];
const dVector& me = *this;
for (dInt32 i = 0; i < 4; i++)
{
array[0][i] = me[i];
array[1][i] = A[i];
array[2][i] = B[i];
array[3][i] = dFloat32(1.0f);
}
dVector normal;
dFloat32 sign = dFloat32(-1.0f);
for (dInt32 i = 0; i < 4; i++)
{
for (dInt32 j = 0; j < 3; j++)
{
dInt32 k0 = 0;
for (dInt32 k = 0; k < 4; k++)
{
if (k != i)
{
cofactor[j][k0] = array[j][k];
k0++;
}
}
}
dFloat32 x = cofactor[0][0] * (cofactor[1][1] * cofactor[2][2] - cofactor[1][2] * cofactor[2][1]);
dFloat32 y = cofactor[0][1] * (cofactor[1][2] * cofactor[2][0] - cofactor[1][0] * cofactor[2][2]);
dFloat32 z = cofactor[0][2] * (cofactor[1][0] * cofactor[2][1] - cofactor[1][1] * cofactor[2][0]);
dFloat32 det = x + y + z;
normal[i] = sign * det;
sign *= dFloat32(-1.0f);
}
return normal;
}
inline dVector GetInt() const
{
return dVector(dInt32(dFloor(m_x)), dInt32(dFloor(m_y)), dInt32(dFloor(m_z)), dInt32(dFloor(m_w)));
}
inline dVector TestZero() const
{
const dInt32* const a = (dInt32*)&m_x;
return dVector((a[0] == 0) ? dFloat32(-1.0f) : dFloat32(1.0f),
(a[1] == 0) ? dFloat32(-1.0f) : dFloat32(1.0f),
(a[2] == 0) ? dFloat32(-1.0f) : dFloat32(1.0f),
(a[3] == 0) ? dFloat32(-1.0f) : dFloat32(1.0f));
}
inline dVector Floor() const
{
return dVector(dFloor(m_x), dFloor(m_y), dFloor(m_z), dFloor(m_w));
}
inline dVector DotProduct(const dVector &A) const
{
return dVector(m_x * A.m_x + m_y * A.m_y + m_z * A.m_z + m_w * A.m_w);
}
inline dVector Reciproc() const
{
return dVector(dFloat32(1.0f) / m_x, dFloat32(1.0f) / m_y, dFloat32(1.0f) / m_z, dFloat32(1.0f) / m_w);
}
inline dVector Sqrt() const
{
return dVector(dSqrt(m_x), dSqrt(m_y), dSqrt(m_z), dSqrt(m_w));
}
inline dVector InvSqrt() const
{
return dVector(dRsqrt(m_x), dRsqrt(m_y), dRsqrt(m_z), dRsqrt(m_w));
}
inline dVector InvMagSqrt() const
{
return dVector(dRsqrt(DotProduct(*this).m_x));
}
inline dVector Normalize() const
{
dAssert(m_w == dFloat32(0.0f));
const dVector& me = *this;
return me * InvMagSqrt();
}
dVector Abs() const
{
return vabsq_f32(m_type);
}
dFloat32 GetMax() const
{
return dMax(dMax(m_x, m_y), dMax(m_z, m_w));
}
dVector GetMax(const dVector& data) const
{
return vmaxq_f32(m_type, data.m_type);
}
dVector GetMin(const dVector& data) const
{
return vminq_f32(m_type, data.m_type);
}
// relational operators
inline dVector operator== (const dVector& data) const
{
return vceqq_f32(m_typeInt, data.m_typeInt);
}
inline dVector operator> (const dVector& data) const
{
return vcgtq_f32(m_typeInt, data.m_typeInt);
}
inline dVector operator< (const dVector& data) const
{
return vcltq_f32(m_typeInt, data.m_typeInt);
}
inline dVector operator>= (const dVector& data) const
{
return vcgeq_f32(m_typeInt, data.m_typeInt);
}
inline dVector operator<= (const dVector& data) const
{
return vcleq_f32(m_typeInt, data.m_typeInt);
}
// logical operations
inline dVector operator& (const dVector& data) const
{
return vandq_u32(m_typeInt, data.m_typeInt);
}
inline dVector operator| (const dVector& data) const
{
return vorrq_u32(m_typeInt, data.m_typeInt);
}
inline dVector operator^ (const dVector& data) const
{
return veorq_u32(m_typeInt, data.m_typeInt);
}
inline dVector AndNot(const dVector& data) const
{
return vbicq_u32(m_typeInt, data.m_typeInt);
}
inline dVector Select(const dVector& data, const dVector& mask) const
{
// (((b ^ a) & mask)^a)
return (*this) ^ (mask & (data ^ (*this)));
}
inline dInt32 GetSignMask() const
{
const dInt32* const a = (dInt32*)&m_x;
return (((a[0] & 0x80000000) ? 1 : 0) | ((a[1] & 0x80000000) ? 2 : 0) | ((a[2] & 0x80000000) ? 4 : 0) | ((a[3] & 0x80000000) ? 8 : 0));
}
inline dVector ShiftRight() const
{
return dVector(m_w, m_x, m_y, m_z);
}
inline dVector ShiftTripleRight() const
{
return dVector(m_z, m_x, m_y, m_w);
}
inline dVector ShiftTripleLeft() const
{
return dVector(m_y, m_z, m_x, m_w);
}
inline dVector ShiftRightLogical(dInt32 bits) const
{
return dVector(dInt32(dUnsigned32(m_ix) >> bits), dInt32(dUnsigned32(m_iy) >> bits), dInt32(dUnsigned32(m_iz) >> bits), dInt32(dUnsigned32(m_iw) >> bits));
}
inline static void Transpose4x4(dVector& dst0, dVector& dst1, dVector& dst2, dVector& dst3, const dVector& src0, const dVector& src1, const dVector& src2, const dVector& src3)
{
float32x4x2_t vtrn1 = vzipq_f32(src0.m_type, src2.m_type);
float32x4x2_t vtrn2 = vzipq_f32(src1.m_type, src3.m_type);
float32x4x2_t res1 = vzipq_f32(vtrn1.val[0], vtrn2.val[0]);
float32x4x2_t res2 = vzipq_f32(vtrn1.val[1], vtrn2.val[1]);
dst0.m_type = res1.val[0];
dst1.m_type = res1.val[1];
dst2.m_type = res2.val[0];
dst3.m_type = res2.val[1];
}
union
{
dFloat32 m_f[4];
dInt32 m_i[4];
float32x4_t m_type;
uint32x4_t m_typeInt;
struct
{
dFloat32 m_x;
dFloat32 m_y;
dFloat32 m_z;
dFloat32 m_w;
};
struct
{
dInt32 m_ix;
dInt32 m_iy;
dInt32 m_iz;
dInt32 m_iw;
};
};
D_CORE_API static dVector m_zero;
D_CORE_API static dVector m_one;
D_CORE_API static dVector m_wOne;
D_CORE_API static dVector m_half;
D_CORE_API static dVector m_two;
D_CORE_API static dVector m_three;
D_CORE_API static dVector m_negOne;
D_CORE_API static dVector m_xMask;
D_CORE_API static dVector m_yMask;
D_CORE_API static dVector m_zMask;
D_CORE_API static dVector m_wMask;
D_CORE_API static dVector m_xyzwMask;
D_CORE_API static dVector m_epsilon;
D_CORE_API static dVector m_signMask;
D_CORE_API static dVector m_triplexMask;
} D_GCC_NEWTON_ALIGN_32 ;
#endif
D_MSV_NEWTON_ALIGN_32
class dBigVector
{
public:
D_OPERATOR_NEW_AND_DELETE
inline dBigVector()
{
}
inline dBigVector(dFloat64 val)
:m_x(val), m_y(val), m_z(val), m_w(val)
{
}
inline dBigVector(const dBigVector& v)
: m_x(v.m_x), m_y(v.m_y), m_z(v.m_z), m_w(v.m_w)
{
}
#ifndef D_NEWTON_USE_DOUBLE
inline dBigVector(const dVector& v)
: m_x(v.m_x), m_y(v.m_y), m_z(v.m_z), m_w(v.m_w)
{
}
inline dBigVector(const dFloat32* const ptr)
: m_x(ptr[0]), m_y(ptr[1]), m_z(ptr[2]), m_w(dFloat32(0.0f))
{
dAssert(dCheckVector((*this)));
}
#endif
inline dBigVector(const dFloat64* const ptr)
:m_x(ptr[0]), m_y(ptr[1]), m_z(ptr[2]), m_w(ptr[3])
{
dAssert(dCheckVector((*this)));
}
inline dBigVector(dFloat64 x, dFloat64 y, dFloat64 z, dFloat64 w)
: m_x(x), m_y(y), m_z(z), m_w(w)
{
dAssert(dCheckVector((*this)));
}
inline dBigVector(dInt32 ix, dInt32 iy, dInt32 iz, dInt32 iw)
: m_ix(ix), m_iy(iy), m_iz(iz), m_iw(iw)
{
}
inline dBigVector(dInt64 ix, dInt64 iy, dInt64 iz, dInt64 iw)
: m_ix(ix), m_iy(iy), m_iz(iz), m_iw(iw)
{
}
inline dFloat64 GetScalar() const
{
return m_x;
}
inline void Store(dFloat64* const dst) const
{
dst[0] = m_x;
dst[1] = m_y;
dst[2] = m_z;
dst[3] = m_w;
}
inline dBigVector BroadcastX() const
{
return dBigVector(m_x);
}
inline dBigVector BroadcastY() const
{
return dBigVector(m_y);
}
inline dBigVector BroadcastZ() const
{
return dBigVector(m_z);
}
inline dBigVector BroadcastW() const
{
return dBigVector(m_w);
}
inline dFloat64& operator[] (dInt32 i)
{
dAssert(i < 4);
dAssert(i >= 0);
return (&m_x)[i];
}
inline const dFloat64& operator[] (dInt32 i) const
{
dAssert(i < 4);
dAssert(i >= 0);
return (&m_x)[i];
}
inline dBigVector operator+ (const dBigVector& A) const
{
return dBigVector(m_x + A.m_x, m_y + A.m_y, m_z + A.m_z, m_w + A.m_w);
}
inline dBigVector operator- (const dBigVector& A) const
{
return dBigVector(m_x - A.m_x, m_y - A.m_y, m_z - A.m_z, m_w - A.m_w);
}
inline dBigVector operator* (const dBigVector& A) const
{
return dBigVector(m_x * A.m_x, m_y * A.m_y, m_z * A.m_z, m_w * A.m_w);
}
inline dBigVector& operator+= (const dBigVector& A)
{
return (*this = dBigVector(m_x + A.m_x, m_y + A.m_y, m_z + A.m_z, m_w + A.m_w));
}
inline dBigVector& operator-= (const dBigVector& A)
{
return (*this = dBigVector(m_x - A.m_x, m_y - A.m_y, m_z - A.m_z, m_w - A.m_w));
}
inline dBigVector& operator*= (const dBigVector& A)
{
return (*this = dBigVector(m_x * A.m_x, m_y * A.m_y, m_z * A.m_z, m_w * A.m_w));
}
inline dBigVector MulAdd(const dBigVector& A, const dBigVector& B) const
{
return *this + A * B;
}
inline dBigVector MulSub(const dVector& A, const dBigVector& B) const
{
return *this - A * B;
}
inline dBigVector AddHorizontal() const
{
return dBigVector(m_x + m_y + m_z + m_w);
}
inline dBigVector Scale(dFloat64 scale) const
{
return dBigVector(m_x * scale, m_y * scale, m_z * scale, m_w * scale);
}
// return cross product
inline dBigVector CrossProduct(const dBigVector& B) const
{
return dBigVector(m_y * B.m_z - m_z * B.m_y, m_z * B.m_x - m_x * B.m_z, m_x * B.m_y - m_y * B.m_x, m_w);
}
inline dBigVector CrossProduct(const dBigVector& A, const dBigVector& B) const
{
dFloat64 cofactor[3][3];
dFloat64 array[4][4];
const dBigVector& me = *this;
for (dInt32 i = 0; i < 4; i++) {
array[0][i] = me[i];
array[1][i] = A[i];
array[2][i] = B[i];
array[3][i] = dFloat32(1.0f);
}
dBigVector normal;
dFloat64 sign = dFloat64(-1.0f);
for (dInt32 i = 0; i < 4; i++)
{
for (dInt32 j = 0; j < 3; j++)
{
dInt32 k0 = 0;
for (dInt32 k = 0; k < 4; k++)
{
if (k != i)
{
cofactor[j][k0] = array[j][k];
k0++;
}
}
}
dFloat64 x = cofactor[0][0] * (cofactor[1][1] * cofactor[2][2] - cofactor[1][2] * cofactor[2][1]);
dFloat64 y = cofactor[0][1] * (cofactor[1][2] * cofactor[2][0] - cofactor[1][0] * cofactor[2][2]);
dFloat64 z = cofactor[0][2] * (cofactor[1][0] * cofactor[2][1] - cofactor[1][1] * cofactor[2][0]);
dFloat64 det = x + y + z;
normal[i] = sign * det;
sign *= dFloat64(-1.0f);
}
return normal;
}
inline dBigVector GetInt() const
{
return dBigVector(dInt64(floor(m_x)), dInt64(floor(m_y)), dInt64(floor(m_z)), dInt64(floor(m_w)));
}
inline dBigVector TestZero() const
{
const dInt64* const a = (dInt64*)&m_x;
return dBigVector((a[0] == 0) ? dFloat64(-1.0f) : dFloat64(1.0f),
(a[1] == 0) ? dFloat64(-1.0f) : dFloat64(1.0f),
(a[2] == 0) ? dFloat64(-1.0f) : dFloat64(1.0f),
(a[3] == 0) ? dFloat64(-1.0f) : dFloat64(1.0f));
}
inline dBigVector Floor() const
{
return dBigVector(floor(m_x), floor(m_y), floor(m_z), floor(m_w));
}
inline dBigVector DotProduct(const dBigVector &A) const
{
return dBigVector(m_x * A.m_x + m_y * A.m_y + m_z * A.m_z + m_w * A.m_w);
}
inline dBigVector Reciproc() const
{
return dBigVector(dFloat64(1.0f) / m_x, dFloat64(1.0f) / m_y, dFloat64(1.0f) / m_z, dFloat64(1.0f) / m_w);
}
inline dBigVector Sqrt() const
{
return dBigVector(sqrt(m_x), sqrt(m_y), sqrt(m_z), sqrt(m_w));
}
inline dBigVector InvSqrt() const
{
return dBigVector(dFloat64(1.0f) / sqrt(m_x), dFloat64(1.0f) / sqrt(m_y), dFloat64(1.0f) / sqrt(m_z), dFloat64(1.0f) / sqrt(m_w));
}
inline dBigVector InvMagSqrt() const
{
return dBigVector(dFloat64(1.0f) / sqrt(DotProduct(*this).m_x));
}
inline dBigVector Normalize() const
{
dAssert(m_w == dFloat64(0.0f));
//const dBigVector& me = *this;
//return *this * dBigVector (dgRsqrt(DotProduct(*this).m_x));
return *this * InvMagSqrt();
}
dBigVector Abs() const
{
return dBigVector((m_x > dFloat64(0.0f)) ? m_x : -m_x,
(m_y > dFloat64(0.0f)) ? m_y : -m_y,
(m_z > dFloat64(0.0f)) ? m_z : -m_z,
(m_w > dFloat64(0.0f)) ? m_w : -m_w);
}
dFloat64 GetMax() const
{
return dMax(dMax(m_x, m_y), dMax(m_z, m_w));
}
dBigVector GetMax(const dBigVector& data) const
{
return dBigVector((m_x > data.m_x) ? m_x : data.m_x,
(m_y > data.m_y) ? m_y : data.m_y,
(m_z > data.m_z) ? m_z : data.m_z,
(m_w > data.m_w) ? m_w : data.m_w);
}
dBigVector GetMin(const dBigVector& data) const
{
return dBigVector((m_x < data.m_x) ? m_x : data.m_x,
(m_y < data.m_y) ? m_y : data.m_y,
(m_z < data.m_z) ? m_z : data.m_z,
(m_w < data.m_w) ? m_w : data.m_w);
}
// relational operators
inline dBigVector operator== (const dBigVector& data) const
{
return dBigVector((m_x == data.m_x) ? dInt64(-1) : dInt64(0),
(m_y == data.m_y) ? dInt64(-1) : dInt64(0),
(m_z == data.m_z) ? dInt64(-1) : dInt64(0),
(m_w == data.m_w) ? dInt64(-1) : dInt64(0));
}
inline dBigVector operator> (const dBigVector& data) const
{
return dBigVector((m_x > data.m_x) ? dInt64(-1) : dInt64(0),
(m_y > data.m_y) ? dInt64(-1) : dInt64(0),
(m_z > data.m_z) ? dInt64(-1) : dInt64(0),
(m_w > data.m_w) ? dInt64(-1) : dInt64(0));
}
inline dBigVector operator< (const dBigVector& data) const
{
return dBigVector((m_x < data.m_x) ? dInt64(-1) : dInt64(0),
(m_y < data.m_y) ? dInt64(-1) : dInt64(0),
(m_z < data.m_z) ? dInt64(-1) : dInt64(0),
(m_w < data.m_w) ? dInt64(-1) : dInt64(0));
}
inline dBigVector operator>= (const dBigVector& data) const
{
return dBigVector((m_x >= data.m_x) ? dInt64(-1) : dInt64(0),
(m_y >= data.m_y) ? dInt64(-1) : dInt64(0),
(m_z >= data.m_z) ? dInt64(-1) : dInt64(0),
(m_w >= data.m_w) ? dInt64(-1) : dInt64(0));
}
inline dBigVector operator<= (const dBigVector& data) const
{
return dBigVector((m_x <= data.m_x) ? dInt64(-1) : dInt64(0),
(m_y <= data.m_y) ? dInt64(-1) : dInt64(0),
(m_z <= data.m_z) ? dInt64(-1) : dInt64(0),
(m_w <= data.m_w) ? dInt64(-1) : dInt64(0));
}
// logical operations
inline dBigVector operator& (const dBigVector& data) const
{
const dInt64* const a = (dInt64*)&m_x;
const dInt64* const b = (dInt64*)&data.m_x;
return dBigVector(a[0] & b[0], a[1] & b[1], a[2] & b[2], a[3] & b[3]);
}
inline dBigVector operator| (const dBigVector& data) const
{
const dInt64* const a = (dInt64*)&m_x;
const dInt64* const b = (dInt64*)&data.m_x;
return dBigVector(a[0] | b[0], a[1] | b[1], a[2] | b[2], a[3] | b[3]);
}
inline dBigVector operator^ (const dBigVector& data) const
{
const dInt64* const a = (dInt64*)&m_x;
const dInt64* const b = (dInt64*)&data.m_x;
return dBigVector(a[0] ^ b[0], a[1] ^ b[1], a[2] ^ b[2], a[3] ^ b[3]);
}
inline dBigVector AndNot(const dBigVector& data) const
{
const dInt64* const a = (dInt64*)&m_x;
const dInt64* const b = (dInt64*)&data.m_x;
return dBigVector(a[0] & ~b[0], a[1] & ~b[1], a[2] & ~b[2], a[3] & ~b[3]);
}
inline dBigVector Select(const dBigVector& data, const dBigVector& mask) const
{
// (((b ^ a) & mask)^a)
return (*this) ^ (mask & (data ^ (*this)));
}
inline dInt32 GetSignMask() const
{
const dInt64* const a = (dInt64*)&m_x;
return (((a[0] >> 63) ? 1 : 0) | ((a[1] >> 63) ? 2 : 0) | ((a[2] >> 63) ? 4 : 0) | ((a[3] >> 63) ? 8 : 0));
}
inline dVector ShiftRight() const
{
return dBigVector(m_w, m_x, m_y, m_z);
}
inline dBigVector ShiftTripleRight() const
{
return dBigVector(m_z, m_x, m_y, m_w);
}
inline dBigVector ShiftTripleLeft() const
{
return dBigVector(m_y, m_z, m_x, m_w);
}
inline dBigVector ShiftRightLogical(dInt32 bits) const
{
return dBigVector(dInt64(dUnsigned64(m_ix) >> bits), dInt64(dUnsigned64(m_iy) >> bits), dInt64(dUnsigned64(m_iz) >> bits), dInt64(dUnsigned64(m_iw) >> bits));
}
inline static void Transpose4x4(dBigVector& dst0, dBigVector& dst1, dBigVector& dst2, dBigVector& dst3, const dBigVector& src0, const dBigVector& src1, const dBigVector& src2, const dBigVector& src3)
{
dBigVector tmp0(src0);
dBigVector tmp1(src1);
dBigVector tmp2(src2);
dBigVector tmp3(src3);
dst0 = dBigVector(tmp0.m_x, tmp1.m_x, tmp2.m_x, tmp3.m_x);
dst1 = dBigVector(tmp0.m_y, tmp1.m_y, tmp2.m_y, tmp3.m_y);
dst2 = dBigVector(tmp0.m_z, tmp1.m_z, tmp2.m_z, tmp3.m_z);
dst3 = dBigVector(tmp0.m_w, tmp1.m_w, tmp2.m_w, tmp3.m_w);
}
union
{
dInt64 m_i[4];
struct
{
dFloat64 m_x;
dFloat64 m_y;
dFloat64 m_z;
dFloat64 m_w;
};
struct
{
dInt64 m_ix;
dInt64 m_iy;
dInt64 m_iz;
dInt64 m_iw;
};
};
D_CORE_API static dBigVector m_zero;
D_CORE_API static dBigVector m_one;
D_CORE_API static dBigVector m_wOne;
D_CORE_API static dBigVector m_half;
D_CORE_API static dBigVector m_two;
D_CORE_API static dBigVector m_three;
D_CORE_API static dBigVector m_negOne;
D_CORE_API static dBigVector m_xMask;
D_CORE_API static dBigVector m_yMask;
D_CORE_API static dBigVector m_zMask;
D_CORE_API static dBigVector m_wMask;
D_CORE_API static dBigVector m_xyzwMask;
D_CORE_API static dBigVector m_epsilon;
D_CORE_API static dBigVector m_signMask;
D_CORE_API static dBigVector m_triplexMask;
} D_GCC_NEWTON_ALIGN_32 ;
#endif | 24.710946 | 212 | 0.626091 | [
"vector"
] |
36c331be5d689104d0fdebde8fd627212bbb1509 | 9,930 | h | C | lib/common/zbuff.h | wqweto/zstd | 6425a48aa1f70db56136a63bdb2a669175a1daea | [
"BSD-3-Clause"
] | 2 | 2021-03-16T19:21:30.000Z | 2021-03-16T19:54:17.000Z | lib/common/zbuff.h | wqweto/zstd | 6425a48aa1f70db56136a63bdb2a669175a1daea | [
"BSD-3-Clause"
] | null | null | null | lib/common/zbuff.h | wqweto/zstd | 6425a48aa1f70db56136a63bdb2a669175a1daea | [
"BSD-3-Clause"
] | null | null | null | /**
* Copyright (c) 2016-present, Yann Collet, Facebook, Inc.
* All rights reserved.
*
* This source code is licensed under the BSD-style license found in the
* LICENSE file in the root directory of this source tree. An additional grant
* of patent rights can be found in the PATENTS file in the same directory.
*/
/* ***************************************************************
* NOTES/WARNINGS
*****************************************************************/
/* The streaming API defined here will soon be deprecated by the
* new one in 'zstd.h'; consider migrating towards newer streaming
* API. See 'lib/README.md'.
*****************************************************************/
#ifndef ZSTD_BUFFERED_H_23987
#define ZSTD_BUFFERED_H_23987
#if defined (__cplusplus)
extern "C" {
#endif
/* *************************************
* Dependencies
***************************************/
#include <stddef.h> /* size_t */
/* ***************************************************************
* Compiler specifics
*****************************************************************/
/* ZSTD_DLL_STDCALL :
* Provide the ability to use stdcall linkage on public interface when building a Windows DLL */
#if defined(_WIN32) && defined(ZSTD_DLL_STDCALL) && (ZSTD_DLL_STDCALL==1)
# define ZSTDLIB_STDCALL __stdcall
#else
# define ZSTDLIB_STDCALL
#endif
/* ZSTD_DLL_EXPORT :
* Enable exporting of functions when building a Windows DLL */
#if defined(_WIN32) && defined(ZSTD_DLL_EXPORT) && (ZSTD_DLL_EXPORT==1)
# define ZSTDLIB_API(T) __declspec(dllexport) T ZSTDLIB_STDCALL
#else
# define ZSTDLIB_API(T) T ZSTDLIB_STDCALL
#endif
/* *************************************
* Streaming functions
***************************************/
/* This is the easier "buffered" streaming API,
* using an internal buffer to lift all restrictions on user-provided buffers
* which can be any size, any place, for both input and output.
* ZBUFF and ZSTD are 100% interoperable,
* frames created by one can be decoded by the other one */
typedef struct ZBUFF_CCtx_s ZBUFF_CCtx;
ZSTDLIB_API(ZBUFF_CCtx*) ZBUFF_createCCtx(void);
ZSTDLIB_API(size_t) ZBUFF_freeCCtx(ZBUFF_CCtx* cctx);
ZSTDLIB_API(size_t) ZBUFF_compressInit(ZBUFF_CCtx* cctx, int compressionLevel);
ZSTDLIB_API(size_t) ZBUFF_compressInitDictionary(ZBUFF_CCtx* cctx, const void* dict, size_t dictSize, int compressionLevel);
ZSTDLIB_API(size_t) ZBUFF_compressContinue(ZBUFF_CCtx* cctx, void* dst, size_t* dstCapacityPtr, const void* src, size_t* srcSizePtr);
ZSTDLIB_API(size_t) ZBUFF_compressFlush(ZBUFF_CCtx* cctx, void* dst, size_t* dstCapacityPtr);
ZSTDLIB_API(size_t) ZBUFF_compressEnd(ZBUFF_CCtx* cctx, void* dst, size_t* dstCapacityPtr);
/*-*************************************************
* Streaming compression - howto
*
* A ZBUFF_CCtx object is required to track streaming operation.
* Use ZBUFF_createCCtx() and ZBUFF_freeCCtx() to create/release resources.
* ZBUFF_CCtx objects can be reused multiple times.
*
* Start by initializing ZBUF_CCtx.
* Use ZBUFF_compressInit() to start a new compression operation.
* Use ZBUFF_compressInitDictionary() for a compression which requires a dictionary.
*
* Use ZBUFF_compressContinue() repetitively to consume input stream.
* *srcSizePtr and *dstCapacityPtr can be any size.
* The function will report how many bytes were read or written within *srcSizePtr and *dstCapacityPtr.
* Note that it may not consume the entire input, in which case it's up to the caller to present again remaining data.
* The content of `dst` will be overwritten (up to *dstCapacityPtr) at each call, so save its content if it matters or change @dst .
* @return : a hint to preferred nb of bytes to use as input for next function call (it's just a hint, to improve latency)
* or an error code, which can be tested using ZBUFF_isError().
*
* At any moment, it's possible to flush whatever data remains within buffer, using ZBUFF_compressFlush().
* The nb of bytes written into `dst` will be reported into *dstCapacityPtr.
* Note that the function cannot output more than *dstCapacityPtr,
* therefore, some content might still be left into internal buffer if *dstCapacityPtr is too small.
* @return : nb of bytes still present into internal buffer (0 if it's empty)
* or an error code, which can be tested using ZBUFF_isError().
*
* ZBUFF_compressEnd() instructs to finish a frame.
* It will perform a flush and write frame epilogue.
* The epilogue is required for decoders to consider a frame completed.
* Similar to ZBUFF_compressFlush(), it may not be able to output the entire internal buffer content if *dstCapacityPtr is too small.
* In which case, call again ZBUFF_compressFlush() to complete the flush.
* @return : nb of bytes still present into internal buffer (0 if it's empty)
* or an error code, which can be tested using ZBUFF_isError().
*
* Hint : _recommended buffer_ sizes (not compulsory) : ZBUFF_recommendedCInSize() / ZBUFF_recommendedCOutSize()
* input : ZBUFF_recommendedCInSize==128 KB block size is the internal unit, use this value to reduce intermediate stages (better latency)
* output : ZBUFF_recommendedCOutSize==ZSTD_compressBound(128 KB) + 3 + 3 : ensures it's always possible to write/flush/end a full block. Skip some buffering.
* By using both, it ensures that input will be entirely consumed, and output will always contain the result, reducing intermediate buffering.
* **************************************************/
typedef struct ZBUFF_DCtx_s ZBUFF_DCtx;
ZSTDLIB_API(ZBUFF_DCtx*) ZBUFF_createDCtx(void);
ZSTDLIB_API(size_t) ZBUFF_freeDCtx(ZBUFF_DCtx* dctx);
ZSTDLIB_API(size_t) ZBUFF_decompressInit(ZBUFF_DCtx* dctx);
ZSTDLIB_API(size_t) ZBUFF_decompressInitDictionary(ZBUFF_DCtx* dctx, const void* dict, size_t dictSize);
ZSTDLIB_API(size_t) ZBUFF_decompressContinue(ZBUFF_DCtx* dctx,
void* dst, size_t* dstCapacityPtr,
const void* src, size_t* srcSizePtr);
/*-***************************************************************************
* Streaming decompression howto
*
* A ZBUFF_DCtx object is required to track streaming operations.
* Use ZBUFF_createDCtx() and ZBUFF_freeDCtx() to create/release resources.
* Use ZBUFF_decompressInit() to start a new decompression operation,
* or ZBUFF_decompressInitDictionary() if decompression requires a dictionary.
* Note that ZBUFF_DCtx objects can be re-init multiple times.
*
* Use ZBUFF_decompressContinue() repetitively to consume your input.
* *srcSizePtr and *dstCapacityPtr can be any size.
* The function will report how many bytes were read or written by modifying *srcSizePtr and *dstCapacityPtr.
* Note that it may not consume the entire input, in which case it's up to the caller to present remaining input again.
* The content of `dst` will be overwritten (up to *dstCapacityPtr) at each function call, so save its content if it matters, or change `dst`.
* @return : 0 when a frame is completely decoded and fully flushed,
* 1 when there is still some data left within internal buffer to flush,
* >1 when more data is expected, with value being a suggested next input size (it's just a hint, which helps latency),
* or an error code, which can be tested using ZBUFF_isError().
*
* Hint : recommended buffer sizes (not compulsory) : ZBUFF_recommendedDInSize() and ZBUFF_recommendedDOutSize()
* output : ZBUFF_recommendedDOutSize== 128 KB block size is the internal unit, it ensures it's always possible to write a full block when decoded.
* input : ZBUFF_recommendedDInSize == 128KB + 3;
* just follow indications from ZBUFF_decompressContinue() to minimize latency. It should always be <= 128 KB + 3 .
* *******************************************************************************/
/* *************************************
* Tool functions
***************************************/
ZSTDLIB_API(unsigned) ZBUFF_isError(size_t errorCode);
ZSTDLIB_API(const char*) ZBUFF_getErrorName(size_t errorCode);
/** Functions below provide recommended buffer sizes for Compression or Decompression operations.
* These sizes are just hints, they tend to offer better latency */
ZSTDLIB_API(size_t) ZBUFF_recommendedCInSize(void);
ZSTDLIB_API(size_t) ZBUFF_recommendedCOutSize(void);
ZSTDLIB_API(size_t) ZBUFF_recommendedDInSize(void);
ZSTDLIB_API(size_t) ZBUFF_recommendedDOutSize(void);
#ifdef ZBUFF_STATIC_LINKING_ONLY
/* ====================================================================================
* The definitions in this section are considered experimental.
* They should never be used in association with a dynamic library, as they may change in the future.
* They are provided for advanced usages.
* Use them only in association with static linking.
* ==================================================================================== */
/*--- Dependency ---*/
#define ZSTD_STATIC_LINKING_ONLY /* ZSTD_parameters, ZSTD_customMem */
#include "zstd.h"
/*--- Custom memory allocator ---*/
/*! ZBUFF_createCCtx_advanced() :
* Create a ZBUFF compression context using external alloc and free functions */
ZSTDLIB_API(ZBUFF_CCtx*) ZBUFF_createCCtx_advanced(ZSTD_customMem customMem);
/*! ZBUFF_createDCtx_advanced() :
* Create a ZBUFF decompression context using external alloc and free functions */
ZSTDLIB_API(ZBUFF_DCtx*) ZBUFF_createDCtx_advanced(ZSTD_customMem customMem);
/*--- Advanced Streaming Initialization ---*/
ZSTDLIB_API(size_t) ZBUFF_compressInit_advanced(ZBUFF_CCtx* zbc,
const void* dict, size_t dictSize,
ZSTD_parameters params, U64 pledgedSrcSize);
#endif /* ZBUFF_STATIC_LINKING_ONLY */
#if defined (__cplusplus)
}
#endif
#endif /* ZSTD_BUFFERED_H_23987 */
| 49.65 | 158 | 0.676334 | [
"object"
] |
36c520f0a774526cc1ac9fad23dd4eb0b4f68e33 | 663 | h | C | HotDir/hd60/sortcont.h | vividos/OldStuff | dbfcce086d1101b576d99d25ef051efbd8dd117c | [
"BSD-2-Clause"
] | 1 | 2015-03-26T02:35:13.000Z | 2015-03-26T02:35:13.000Z | HotDir/hd60/sortcont.h | vividos/OldStuff | dbfcce086d1101b576d99d25ef051efbd8dd117c | [
"BSD-2-Clause"
] | null | null | null | HotDir/hd60/sortcont.h | vividos/OldStuff | dbfcce086d1101b576d99d25ef051efbd8dd117c | [
"BSD-2-Clause"
] | null | null | null | /* sortcont.h - a container with sorted content
see 'sortcont.h' for details
http://vividos.home.pages.de - vividos@asamnet.de
(c) 1998-1999 Michael Fink
*/
#ifndef __sortcont_h_
#define __sortcont_h_
#include "defs.h"
#include "containr.h"
class sortable_object:public object { // the sortable object class
public:
sortable_object(); // constructor
virtual bool is_bigger_than(sortable_object *so);
// compares if this object is "bigger" than 'so'
};
class sorted_container:public container { // the sorted container
public:
sorted_container(); // constructor
void sorted_insert(sortable_object *so); // inserts the object to sort
};
#endif
| 22.862069 | 72 | 0.742081 | [
"object"
] |
36c9b96434f7e776b67d791859bdc8e090ad0505 | 3,850 | h | C | opengl_06/src/camera.h | Secgode/OpengGL_exercise | e3a8287eae6a87fe1fcb3a3347e76f4209f3d7da | [
"MIT"
] | null | null | null | opengl_06/src/camera.h | Secgode/OpengGL_exercise | e3a8287eae6a87fe1fcb3a3347e76f4209f3d7da | [
"MIT"
] | null | null | null | opengl_06/src/camera.h | Secgode/OpengGL_exercise | e3a8287eae6a87fe1fcb3a3347e76f4209f3d7da | [
"MIT"
] | null | null | null | //
// Created by 二狗子 on 2020-02-13.
//
#ifndef CAMERA_H
#define CAMERA_H
#include <glad/glad.h>
#include <glm/glm.hpp>
#include <glm/gtc/matrix_transform.hpp>
#include <vector>
// Define options for camera movement
// ----------------------------------
enum Camera_Movement {
FORWARD,
BACKWARD,
LEFT,
RIGHT,
};
// Default camera values
// ----------------------------------
const float YAW = -90.0f;
const float PITCH = 0.0f;
const float SPEED = 2.5f;
const float SENSITIVITY = 0.1f;
const float ZOOM = 45.0f;
// Abstract camera class
// ----------------------------------
class Camera
{
public:
// Camera attributes
glm::vec3 Position;
glm::vec3 Front;
glm::vec3 Up;
glm::vec3 Right;
glm::vec3 WorldUp;
// Euler Angles
float Yaw;
float Pitch;
// Camera options
float MovementSpeed;
float MouseSensitivity;
float Zoom;
// Constructor with vectors
explicit Camera (glm::vec3 position = glm::vec3(0.0f), glm::vec3 up = glm::vec3(0.0f, 1.0f, 0.0f), float yaw = YAW, float pitch = PITCH) :
Front (glm::vec3(0.0f, 0.0f, -1.0f)), MovementSpeed(SPEED), MouseSensitivity(SENSITIVITY), Zoom(ZOOM)
{
Position = position;
WorldUp = up;
Yaw = yaw;
Pitch = pitch;
updateCameraVectors();
}
// Constructor with scalar values
Camera (float posX, float posY, float posZ, float upX, float upY, float upZ, float yaw, float pitch) :
Front(glm::vec3(0.0f, 0.0f, -1.0f)), MovementSpeed(SPEED), MouseSensitivity(SENSITIVITY), Zoom(ZOOM)
{
Position = glm::vec3(posX, posY, posZ);
WorldUp = glm::vec3(upX, upY, upZ);
Yaw = yaw;
Pitch = pitch;
updateCameraVectors();
}
// Returns the view matrix calculated using Euler Angles & LookAt Matrix
glm::mat4 GetViewMatrix()
{
return glm::lookAt(Position, Position + Front, Up);
}
// Keyboard I/O stream
void ProcessKeyboard (Camera_Movement direction, float deltaTime)
{
float velocity = MovementSpeed * deltaTime;
if (FORWARD == direction)
Position += Front * velocity;
if (BACKWARD == direction)
Position -= Front * velocity;
if (LEFT == direction)
Position -= Right * velocity;
if (RIGHT == direction)
Position += Right * velocity;
Position.y = 0.0f;
}
// Mouse movement I/O stream
void ProcessMouseMovement (float xoffset, float yoffset, GLboolean constrainPitch = true)
{
xoffset *= MouseSensitivity;
yoffset *= MouseSensitivity;
Yaw += xoffset;
Pitch += yoffset;
// Make sure when pitch is out of bounds
if (constrainPitch)
{
if (Pitch > 89.0f)
Pitch = 89.0f;
if (Pitch < -89.0f)
Pitch = 89.0f;
}
// Update Front
updateCameraVectors();
}
// Mouse scroll I/O stream
void ProcessMouseScroll (float yoffset)
{
if (Zoom >= 1.0f && Zoom <= 45.0f)
Zoom -= yoffset;
if (Zoom < 1.0f)
Zoom = 1.0f;
if (Zoom >= 45.0f)
Zoom = 45.0f;
}
private:
// Calculates the front vector and other vectors
void updateCameraVectors()
{
// front
glm::vec3 front;
front.x = cos(glm::radians(Yaw)) * cos(glm::radians(Pitch));
front.y = sin(glm::radians(Pitch));
front.z = sin(glm::radians(Yaw)) * cos(glm::radians(Pitch));
Front = glm::normalize(front);
// Right & Up
Right = glm::normalize(glm::cross(Front, WorldUp));
Up = glm::normalize((glm::cross(Right, Front)));
}
};
#endif //OPENGL_06_CAMERA_H
| 26.369863 | 142 | 0.55039 | [
"vector"
] |
36c9c1a89b7e7c2b946f358c7379f50001ba8d1d | 4,888 | c | C | drivers/dma/idxd/submit.c | Server2356/Sun-Kernel | 63cc54a6948ba572388f829c1fd641ffde0803d8 | [
"MIT"
] | null | null | null | drivers/dma/idxd/submit.c | Server2356/Sun-Kernel | 63cc54a6948ba572388f829c1fd641ffde0803d8 | [
"MIT"
] | null | null | null | drivers/dma/idxd/submit.c | Server2356/Sun-Kernel | 63cc54a6948ba572388f829c1fd641ffde0803d8 | [
"MIT"
] | null | null | null | // SPDX-License-Identifier: GPL-2.0
/* Copyright(c) 2019 Intel Corporation. All rights rsvd. */
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <uapi/linux/idxd.h>
#include "idxd.h"
#include "registers.h"
static struct idxd_desc *__get_desc(struct idxd_wq *wq, int idx, int cpu)
{
struct idxd_desc *desc;
struct idxd_device *idxd = wq->idxd;
desc = wq->descs[idx];
memset(desc->hw, 0, sizeof(struct dsa_hw_desc));
memset(desc->completion, 0, idxd->data->compl_size);
desc->cpu = cpu;
if (device_pasid_enabled(idxd))
desc->hw->pasid = idxd->pasid;
/*
* Descriptor completion vectors are 1...N for MSIX. We will round
* robin through the N vectors.
*/
wq->vec_ptr = desc->vector = (wq->vec_ptr % idxd->num_wq_irqs) + 1;
if (!idxd->int_handles) {
desc->hw->int_handle = wq->vec_ptr;
} else {
/*
* int_handles are only for descriptor completion. However for device
* MSIX enumeration, vec 0 is used for misc interrupts. Therefore even
* though we are rotating through 1...N for descriptor interrupts, we
* need to acqurie the int_handles from 0..N-1.
*/
desc->hw->int_handle = idxd->int_handles[desc->vector - 1];
}
return desc;
}
struct idxd_desc *idxd_alloc_desc(struct idxd_wq *wq, enum idxd_op_type optype)
{
int cpu, idx;
struct idxd_device *idxd = wq->idxd;
DEFINE_SBQ_WAIT(wait);
struct sbq_wait_state *ws;
struct sbitmap_queue *sbq;
if (idxd->state != IDXD_DEV_ENABLED)
return ERR_PTR(-EIO);
sbq = &wq->sbq;
idx = sbitmap_queue_get(sbq, &cpu);
if (idx < 0) {
if (optype == IDXD_OP_NONBLOCK)
return ERR_PTR(-EAGAIN);
} else {
return __get_desc(wq, idx, cpu);
}
ws = &sbq->ws[0];
for (;;) {
sbitmap_prepare_to_wait(sbq, ws, &wait, TASK_INTERRUPTIBLE);
if (signal_pending_state(TASK_INTERRUPTIBLE, current))
break;
idx = sbitmap_queue_get(sbq, &cpu);
if (idx > 0)
break;
schedule();
}
sbitmap_finish_wait(sbq, ws, &wait);
if (idx < 0)
return ERR_PTR(-EAGAIN);
return __get_desc(wq, idx, cpu);
}
void idxd_free_desc(struct idxd_wq *wq, struct idxd_desc *desc)
{
int cpu = desc->cpu;
desc->cpu = -1;
sbitmap_queue_clear(&wq->sbq, desc->id, cpu);
}
static struct idxd_desc *list_abort_desc(struct idxd_wq *wq, struct idxd_irq_entry *ie,
struct idxd_desc *desc)
{
struct idxd_desc *d, *n;
lockdep_assert_held(&ie->list_lock);
list_for_each_entry_safe(d, n, &ie->work_list, list) {
if (d == desc) {
list_del(&d->list);
return d;
}
}
/*
* At this point, the desc needs to be aborted is held by the completion
* handler where it has taken it off the pending list but has not added to the
* work list. It will be cleaned up by the interrupt handler when it sees the
* IDXD_COMP_DESC_ABORT for completion status.
*/
return NULL;
}
static void llist_abort_desc(struct idxd_wq *wq, struct idxd_irq_entry *ie,
struct idxd_desc *desc)
{
struct idxd_desc *d, *t, *found = NULL;
struct llist_node *head;
unsigned long flags;
desc->completion->status = IDXD_COMP_DESC_ABORT;
/*
* Grab the list lock so it will block the irq thread handler. This allows the
* abort code to locate the descriptor need to be aborted.
*/
spin_lock_irqsave(&ie->list_lock, flags);
head = llist_del_all(&ie->pending_llist);
if (head) {
llist_for_each_entry_safe(d, t, head, llnode) {
if (d == desc) {
found = desc;
continue;
}
list_add_tail(&desc->list, &ie->work_list);
}
}
if (!found)
found = list_abort_desc(wq, ie, desc);
spin_unlock_irqrestore(&ie->list_lock, flags);
if (found)
complete_desc(found, IDXD_COMPLETE_ABORT);
}
int idxd_submit_desc(struct idxd_wq *wq, struct idxd_desc *desc)
{
struct idxd_device *idxd = wq->idxd;
struct idxd_irq_entry *ie = NULL;
void __iomem *portal;
int rc;
if (idxd->state != IDXD_DEV_ENABLED)
return -EIO;
if (!percpu_ref_tryget_live(&wq->wq_active))
return -ENXIO;
portal = wq->portal;
/*
* The wmb() flushes writes to coherent DMA data before
* possibly triggering a DMA read. The wmb() is necessary
* even on UP because the recipient is a device.
*/
wmb();
/*
* Pending the descriptor to the lockless list for the irq_entry
* that we designated the descriptor to.
*/
if (desc->hw->flags & IDXD_OP_FLAG_RCI) {
ie = &idxd->irq_entries[desc->vector];
llist_add(&desc->llnode, &ie->pending_llist);
}
if (wq_dedicated(wq)) {
iosubmit_cmds512(portal, desc->hw, 1);
} else {
/*
* It's not likely that we would receive queue full rejection
* since the descriptor allocation gates at wq size. If we
* receive a -EAGAIN, that means something went wrong such as the
* device is not accepting descriptor at all.
*/
rc = enqcmds(portal, desc->hw);
if (rc < 0) {
if (ie)
llist_abort_desc(wq, ie, desc);
return rc;
}
}
percpu_ref_put(&wq->wq_active);
return 0;
}
| 25.066667 | 87 | 0.686784 | [
"vector"
] |
36c9cee5a87e4061446a9f8c6d4b46bbedf840d2 | 646 | h | C | venv/Include/rangeobject.h | unbun/snake.ai | 0c017357608dc7c06af0ca3ca57d870641461207 | [
"MIT"
] | 6,989 | 2017-07-18T06:23:18.000Z | 2022-03-31T15:58:36.000Z | build/cmd/win32/Python27/include/rangeobject.h | IamBaoMouMou/AliOS-Things | 195a9160b871b3d78de6f8cf6c2ab09a71977527 | [
"Apache-2.0"
] | 1,978 | 2017-07-18T09:17:58.000Z | 2022-03-31T14:28:43.000Z | build/cmd/win32/Python27/include/rangeobject.h | IamBaoMouMou/AliOS-Things | 195a9160b871b3d78de6f8cf6c2ab09a71977527 | [
"Apache-2.0"
] | 1,228 | 2017-07-18T09:03:13.000Z | 2022-03-29T05:57:40.000Z |
/* Range object interface */
#ifndef Py_RANGEOBJECT_H
#define Py_RANGEOBJECT_H
#ifdef __cplusplus
extern "C" {
#endif
/* This is about the type 'xrange', not the built-in function range(), which
returns regular lists. */
/*
A range object represents an integer range. This is an immutable object;
a range cannot change its value after creation.
Range objects behave like the corresponding tuple objects except that
they are represented by a start, stop, and step datamembers.
*/
PyAPI_DATA(PyTypeObject) PyRange_Type;
#define PyRange_Check(op) (Py_TYPE(op) == &PyRange_Type)
#ifdef __cplusplus
}
#endif
#endif /* !Py_RANGEOBJECT_H */
| 22.275862 | 76 | 0.758514 | [
"object"
] |
36d49e076c392e15f775c5e8f18b4c7a5de5dde6 | 1,764 | h | C | Resources/KMGraphDoubleSlider/KMGraphDoubleSlider.h | feorean/Snapshooter | 749219642f0fa1091e6b7f0f9796f46ba907bb98 | [
"Apache-2.0"
] | null | null | null | Resources/KMGraphDoubleSlider/KMGraphDoubleSlider.h | feorean/Snapshooter | 749219642f0fa1091e6b7f0f9796f46ba907bb98 | [
"Apache-2.0"
] | null | null | null | Resources/KMGraphDoubleSlider/KMGraphDoubleSlider.h | feorean/Snapshooter | 749219642f0fa1091e6b7f0f9796f46ba907bb98 | [
"Apache-2.0"
] | null | null | null | //
// KMGraphDoubleSlider.h
// GraphSlider
//
// Created by Khalid Mammadov on 01/04/2015.
// Copyright (c) 2015 Mammadov. All rights reserved.
//
#import <Cocoa/Cocoa.h>
typedef NSInteger NSCurrentSelecedKnob;
enum {
NSNothing = -1,
NSLeftKnob = 0,
NSRightKnob = 1
};
typedef NSInteger NSClickedArea;
enum {
NSIncorrectArea = -1,
NSTopSide = 0,
NSBottomSide = 1
};
//This protocol is to be able to create Events and send them to main "m" file (Deligate object)
@protocol DobleSliderKnobEvents <NSObject>
@optional
- (void) leftKnobMoved:(NSEvent *)theEvent;
- (void) rightKnobMoved:(NSEvent *)theEvent;
- (void) leftKnobDragged:(NSEvent *)theEvent;
- (void) rightKnobDragged:(NSEvent *)theEvent;
@end
@interface KMGraphDoubleSlider : NSView <DobleSliderKnobEvents> {
CALayer *leftKnob;
CALayer *rightKnob;
NSCurrentSelecedKnob currentKnob;
NSClickedArea clickedSide;
NSBezierPath * graphPath;
NSBezierPath * cetralLine;
NSUInteger knobWidth;
NSUInteger halfKnob;
NSMutableArray * _graphArrayConverted;
}
- (void) leftKnobMoved:(NSEvent *)theEvent;
- (void) rightKnobMoved:(NSEvent *)theEvent;
- (void) leftKnobDragged:(NSEvent *)theEvent;
- (void) rightKnobDragged:(NSEvent *)theEvent;
- (id) delegate;
- (void) setDelegate: (id) newDelegate;
@property (nonatomic) NSMutableArray * graphArray;
@property (nonatomic) NSNumber * leftKnobValue;
@property (nonatomic) NSNumber * rightKnobValue;
@property (nonatomic) NSColor * backgroundColor;
@property (nonatomic) NSColor * graphColor;
@property (nonatomic) NSColor * lineColor;
@property id delegate;
@end
| 21.777778 | 95 | 0.67517 | [
"object"
] |
36d7f066194e4c47f9b5cb4991398f28ffde42ae | 6,495 | h | C | camera/QCamera2/stack/mm-lib2d-interface/inc/mm_lib2d.h | radu-v/android_device_nokia_nb1 | 59672ac3c6a18aa69c57df3098dd1b8d3edfcd82 | [
"FTL"
] | 1 | 2021-07-22T18:40:12.000Z | 2021-07-22T18:40:12.000Z | camera/QCamera2/stack/mm-lib2d-interface/inc/mm_lib2d.h | radu-v/android_device_nokia_nb1 | 59672ac3c6a18aa69c57df3098dd1b8d3edfcd82 | [
"FTL"
] | null | null | null | camera/QCamera2/stack/mm-lib2d-interface/inc/mm_lib2d.h | radu-v/android_device_nokia_nb1 | 59672ac3c6a18aa69c57df3098dd1b8d3edfcd82 | [
"FTL"
] | null | null | null | /* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided
* with the distribution.
* * Neither the name of The Linux Foundation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
* BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
* OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
* IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
#ifndef MM_LIB2D_H_
#define MM_LIB2D_H_
#include "cam_types.h"
#ifdef QCAMERA_REDEFINE_LOG
#ifndef CAM_MODULE
#define CAM_MODULE CAM_NO_MODULE
#endif
// Camera dependencies
#include "mm_camera_dbg.h"
#endif
/** lib2d_error
* @MM_LIB2D_SUCCESS: Success
* @MM_LIB2D_ERR_GENERAL: General Error
* @MM_LIB2D_ERR_MEMORY: Insufficient memory error
* @MM_LIB2D_ERR_BAD_PARAM: Bad params error
**/
typedef enum lib2d_error_t {
MM_LIB2D_SUCCESS,
MM_LIB2D_ERR_GENERAL,
MM_LIB2D_ERR_MEMORY,
MM_LIB2D_ERR_BAD_PARAM,
} lib2d_error;
/** lib2d_mode
* @MM_LIB2D_SYNC_MODE: Synchronous mode
* @MM_LIB2D_ASYNC_MODE: Asynchronous mode
**/
typedef enum mm_lib2d_mode_t {
MM_LIB2D_SYNC_MODE,
MM_LIB2D_ASYNC_MODE,
} lib2d_mode;
/** mm_lib2d_buffer_type
* @MM_LIB2D_BUFFER_TYPE_RGB: RGB Buffer type
* @MM_LIB2D_BUFFER_TYPE_YUV: YUV buffer type
**/
typedef enum mm_lib2d_buffer_type_t {
MM_LIB2D_BUFFER_TYPE_RGB,
MM_LIB2D_BUFFER_TYPE_YUV,
} mm_lib2d_buffer_type;
/** mm_lib2d_rgb_buffer
* @fd: handle to the buffer memory
* @format: RGB color format
* @width: defines width in pixels
* @height: defines height in pixels
* @buffer: pointer to the RGB buffer
* @phys: gpu mapped physical address
* @stride: defines stride in bytes
**/
typedef struct mm_lib2d_rgb_buffer_t {
int32_t fd;
cam_format_t format;
uint32_t width;
uint32_t height;
void *buffer;
void *phys;
int32_t stride;
} mm_lib2d_rgb_buffer;
/** mm_lib2d_yuv_buffer
* @fd: handle to the buffer memory
* @format: YUV color format
* @width: defines width in pixels
* @height: defines height in pixels
* @plane0: holds the whole buffer if YUV format is not planar
* @phys0: gpu mapped physical address
* @stride0: stride in bytes
* @plane1: holds UV or VU plane for planar interleaved
* @phys2: gpu mapped physical address
* @stride1: stride in bytes
* @plane2: holds the 3. plane, ignored if YUV format is not planar
* @phys2: gpu mapped physical address
* @stride2: stride in bytes
**/
typedef struct mm_lib2d_yuv_buffer_t {
int32_t fd;
cam_format_t format;
uint32_t width;
uint32_t height;
void *plane0;
void *phys0;
int32_t stride0;
void *plane1;
void *phys1;
int32_t stride1;
void *plane2;
void *phys2;
int32_t stride2;
} mm_lib2d_yuv_buffer;
/** mm_lib2d_buffer
* @buffer_type: Buffer type. whether RGB or YUV
* @rgb_buffer: RGB buffer handle
* @yuv_buffer: YUV buffer handle
**/
typedef struct mm_lib2d_buffer_t {
mm_lib2d_buffer_type buffer_type;
union {
mm_lib2d_rgb_buffer rgb_buffer;
mm_lib2d_yuv_buffer yuv_buffer;
};
} mm_lib2d_buffer;
/** lib2d_client_cb
* @userdata: App userdata
* @jobid: job id
**/
typedef lib2d_error (*lib2d_client_cb) (void *userdata, int jobid);
/**
* Function: mm_lib2d_init
*
* Description: Initialization function for Lib2D. src_format, dst_format
* are hints to the underlying component to initialize.
*
* Input parameters:
* mode - Mode (sync/async) in which App wants lib2d to run.
* src_format - source surface format
* dst_format - Destination surface format
* my_obj - handle that will be returned on succesful Init. App has to
* call other lib2d functions by passing this handle.
*
* Return values:
* MM_LIB2D_SUCCESS
* MM_LIB2D_ERR_MEMORY
* MM_LIB2D_ERR_BAD_PARAM
* MM_LIB2D_ERR_GENERAL
*
* Notes: none
**/
lib2d_error mm_lib2d_init(lib2d_mode mode, cam_format_t src_format,
cam_format_t dst_format, void **lib2d_obj_handle);
/**
* Function: mm_lib2d_deinit
*
* Description: De-Initialization function for Lib2D
*
* Input parameters:
* lib2d_obj_handle - handle tto the lib2d object
*
* Return values:
* MM_LIB2D_SUCCESS
* MM_LIB2D_ERR_GENERAL
*
* Notes: none
**/
lib2d_error mm_lib2d_deinit(void *lib2d_obj_handle);
/**
* Function: mm_lib2d_start_job
*
* Description: Start executing the job
*
* Input parameters:
* lib2d_obj_handle - handle tto the lib2d object
* src_buffer - pointer to the source buffer
* dst_buffer - pointer to the destination buffer
* jobid - job id of this request
* userdata - userdata that will be pass through callback function
* cb - callback function that will be called on completion of this job
* rotation - rotation to be applied
*
* Return values:
* MM_LIB2D_SUCCESS
* MM_LIB2D_ERR_MEMORY
* MM_LIB2D_ERR_GENERAL
*
* Notes: none
**/
lib2d_error mm_lib2d_start_job(void *lib2d_obj_handle,
mm_lib2d_buffer* src_buffer, mm_lib2d_buffer* dst_buffer,
int jobid, void *userdata, lib2d_client_cb cb, uint32_t rotation);
#endif /* MM_LIB2D_H_ */
| 30.928571 | 74 | 0.708083 | [
"object"
] |
36d9449dadcafdcd35b3dcb0f91a7951b1ebec85 | 689 | h | C | src/fl-derived/ZeroLM.h | talonvoice/w2ldecode | a5b154f5775e16a9908e0db48270a222e70d1c94 | [
"BSD-3-Clause"
] | 1 | 2022-03-31T08:48:17.000Z | 2022-03-31T08:48:17.000Z | src/fl-derived/ZeroLM.h | talonvoice/w2ldecode | a5b154f5775e16a9908e0db48270a222e70d1c94 | [
"BSD-3-Clause"
] | null | null | null | src/fl-derived/ZeroLM.h | talonvoice/w2ldecode | a5b154f5775e16a9908e0db48270a222e70d1c94 | [
"BSD-3-Clause"
] | 2 | 2021-06-18T23:29:17.000Z | 2021-08-15T15:31:10.000Z | /*
* Copyright (c) Facebook, Inc. and its affiliates.
*
* This source code is licensed under the MIT-style license found in the
* LICENSE file in the root directory of this source tree.
*/
#pragma once
#include "fl-derived/LM.h"
namespace w2l {
/**
* ZeroLM is a dummy language model class, which mimics the behavious of a
* uni-gram language model but always returns 0 as score.
*/
class ZeroLM : public LM {
public:
LMStatePtr start(bool startWithNothing) override;
std::pair<LMStatePtr, float> score(
const LMStatePtr& state,
const int usrTokenIdx) override;
std::pair<LMStatePtr, float> finish(const LMStatePtr& state) override;
};
} // namespace w2l
| 22.966667 | 74 | 0.714078 | [
"model"
] |
36e025b165656d9e0f0e7f7a48a8e17eef90e1aa | 3,837 | h | C | src/core/SkFontPriv.h | CarbonROM/android_external_skqp | 72c9856641fddcfe46a1c2287550604061f1eefd | [
"BSD-3-Clause"
] | null | null | null | src/core/SkFontPriv.h | CarbonROM/android_external_skqp | 72c9856641fddcfe46a1c2287550604061f1eefd | [
"BSD-3-Clause"
] | null | null | null | src/core/SkFontPriv.h | CarbonROM/android_external_skqp | 72c9856641fddcfe46a1c2287550604061f1eefd | [
"BSD-3-Clause"
] | null | null | null | /*
* Copyright 2018 Google Inc.
*
* Use of this source code is governed by a BSD-style license that can be
* found in the LICENSE file.
*/
#ifndef SkFontPriv_DEFINED
#define SkFontPriv_DEFINED
#include "SkFont.h"
#include "SkMatrix.h"
#include "SkTypeface.h"
class SkReadBuffer;
class SkWriteBuffer;
class SkFontPriv {
public:
/* This is the size we use when we ask for a glyph's path. We then
* post-transform it as we draw to match the request.
* This is done to try to re-use cache entries for the path.
*
* This value is somewhat arbitrary. In theory, it could be 1, since
* we store paths as floats. However, we get the path from the font
* scaler, and it may represent its paths as fixed-point (or 26.6),
* so we shouldn't ask for something too big (might overflow 16.16)
* or too small (underflow 26.6).
*
* This value could track kMaxSizeForGlyphCache, assuming the above
* constraints, but since we ask for unhinted paths, the two values
* need not match per-se.
*/
static constexpr int kCanonicalTextSizeForPaths = 64;
static bool TooBigToUseCache(const SkMatrix& ctm, const SkMatrix& textM, SkScalar maxLimit);
static SkScalar MaxCacheSize2(SkScalar maxLimit);
/**
* Return a matrix that applies the paint's text values: size, scale, skew
*/
static SkMatrix MakeTextMatrix(SkScalar size, SkScalar scaleX, SkScalar skewX) {
SkMatrix m = SkMatrix::MakeScale(size * scaleX, size);
if (skewX) {
m.postSkew(skewX, 0);
}
return m;
}
static SkMatrix MakeTextMatrix(const SkFont& font) {
return MakeTextMatrix(font.getSize(), font.getScaleX(), font.getSkewX());
}
static void ScaleFontMetrics(SkFontMetrics*, SkScalar);
// returns -1 if buffer is invalid for specified encoding
static int ValidCountText(const void* text, size_t length, SkTextEncoding);
/**
Returns the union of bounds of all glyphs.
Returned dimensions are computed by font manager from font data,
ignoring SkPaint::Hinting. Includes font metrics, but not fake bold or SkPathEffect.
If text size is large, text scale is one, and text skew is zero,
returns the bounds as:
{ SkFontMetrics::fXMin, SkFontMetrics::fTop, SkFontMetrics::fXMax, SkFontMetrics::fBottom }.
@return union of bounds of all glyphs
*/
static SkRect GetFontBounds(const SkFont&);
static bool IsFinite(const SkFont& font) {
return SkScalarIsFinite(font.fSize) &&
SkScalarIsFinite(font.fScaleX) &&
SkScalarIsFinite(font.fSkewX);
}
// Returns the number of elements (characters or glyphs) in the array.
static int CountTextElements(const void* text, size_t byteLength, SkTextEncoding);
static void GlyphsToUnichars(const SkFont&, const uint16_t glyphs[], int count, SkUnichar[]);
static void Flatten(const SkFont&, SkWriteBuffer& buffer);
static bool Unflatten(SkFont*, SkReadBuffer& buffer);
};
class SkAutoToGlyphs {
public:
SkAutoToGlyphs(const SkFont& font, const void* text, size_t length, SkTextEncoding encoding) {
if (encoding == kGlyphID_SkTextEncoding || length == 0) {
fGlyphs = reinterpret_cast<const uint16_t*>(text);
fCount = length >> 1;
} else {
fCount = font.countText(text, length, encoding);
fStorage.reset(fCount);
font.textToGlyphs(text, length, encoding, fStorage.get(), fCount);
fGlyphs = fStorage.get();
}
}
int count() const { return fCount; }
const uint16_t* glyphs() const { return fGlyphs; }
private:
SkAutoSTArray<32, uint16_t> fStorage;
const uint16_t* fGlyphs;
int fCount;
};
#endif
| 34.258929 | 100 | 0.667188 | [
"transform"
] |
36e0883c53643eb455781c70e634cd869390c92d | 4,816 | h | C | src/widgets/modest-webkit-mime-part-view.h | community-ssu/modest | 47e30072309811dc24abc9809952ed1ea846ebfa | [
"BSD-3-Clause"
] | null | null | null | src/widgets/modest-webkit-mime-part-view.h | community-ssu/modest | 47e30072309811dc24abc9809952ed1ea846ebfa | [
"BSD-3-Clause"
] | null | null | null | src/widgets/modest-webkit-mime-part-view.h | community-ssu/modest | 47e30072309811dc24abc9809952ed1ea846ebfa | [
"BSD-3-Clause"
] | 2 | 2020-11-15T13:57:11.000Z | 2021-09-01T16:32:55.000Z | /* Copyright (c) 2009, Nokia Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of the Nokia Corporation nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
* IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
* TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
* PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER
* OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
* LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
* NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef __MODEST_WEBKIT_MIME_PART_VIEW_H__
#define __MODEST_WEBKIT_MIME_PART_VIEW_H__
#include <config.h>
#include <glib.h>
#include <glib-object.h>
#include <webkit/webkit.h>
#include <tny-mime-part-view.h>
#include <widgets/modest-mime-part-view.h>
#include <widgets/modest-zoomable.h>
#include <widgets/modest-isearch-view.h>
G_BEGIN_DECLS
/* convenience macros */
#define MODEST_TYPE_WEBKIT_MIME_PART_VIEW (modest_webkit_mime_part_view_get_type())
#define MODEST_WEBKIT_MIME_PART_VIEW(obj) (G_TYPE_CHECK_INSTANCE_CAST((obj),MODEST_TYPE_WEBKIT_MIME_PART_VIEW,ModestWebkitMimePartView))
#define MODEST_WEBKIT_MIME_PART_VIEW_CLASS(klass) (G_TYPE_CHECK_CLASS_CAST((klass),MODEST_TYPE_WEBKIT_MIME_PART_VIEW,ModestWebkitMimePartViewClass))
#define MODEST_IS_WEBKIT_MIME_PART_VIEW(obj) (G_TYPE_CHECK_INSTANCE_TYPE((obj),MODEST_TYPE_WEBKIT_MIME_PART_VIEW))
#define MODEST_IS_WEBKIT_MIME_PART_VIEW_CLASS(klass) (G_TYPE_CHECK_CLASS_TYPE((klass),MODEST_TYPE_WEBKIT_MIME_PART_VIEW))
#define MODEST_WEBKIT_MIME_PART_VIEW_GET_CLASS(obj) (G_TYPE_INSTANCE_GET_CLASS((obj),MODEST_TYPE_WEBKIT_MIME_PART_VIEW,ModestWebkitMimePartViewClass))
typedef struct _ModestWebkitMimePartView ModestWebkitMimePartView;
typedef struct _ModestWebkitMimePartViewClass ModestWebkitMimePartViewClass;
struct _ModestWebkitMimePartView {
WebKitWebView parent;
};
struct _ModestWebkitMimePartViewClass {
WebKitWebViewClass parent_class;
/* TnyMimePartView interface methods */
TnyMimePart* (*get_part_func) (TnyMimePartView *self);
void (*set_part_func) (TnyMimePartView *self, TnyMimePart *part);
void (*clear_func) (TnyMimePartView *self);
/* ModestMimePartView interface methods */
gboolean (*is_empty_func) (ModestMimePartView *self);
gboolean (*get_view_images_func) (ModestMimePartView *self);
void (*set_view_images_func) (ModestMimePartView *self, gboolean view_images);
gboolean (*has_external_images_func) (ModestMimePartView *self);
/* ModestZoomable interface methods */
gdouble (*get_zoom_func) (ModestZoomable *self);
void (*set_zoom_func) (ModestZoomable *self, gdouble value);
gboolean (*zoom_minus_func) (ModestZoomable *self);
gboolean (*zoom_plus_func) (ModestZoomable *self);
/* ModestISearchView interface methods */
gboolean (*search_func) (ModestISearchView *self, const gchar *string);
gboolean (*search_next_func) (ModestISearchView *self);
gboolean (*get_selection_area_func) (ModestISearchView *self, gint *x, gint *y, gint *width, gint *height);
/* signals */
void (*stop_streams) (ModestWebkitMimePartView *self);
void (*limit_error) (ModestWebkitMimePartView *self);
};
/**
*
* modest_webkit_mime_part_view_get_type
*
* get the GType for the this class
*
* Returns: the GType for this class
*/
GType modest_webkit_mime_part_view_get_type (void) G_GNUC_CONST;
/**
* modest_webkit_mime_part_view_new:
*
* create a new #ModestWebkitMimePartView instance, implementing
* interfaces #TnyMimePartView, #ModestMimePartView, #ModestZoomable
* and #ModestISearchView.
*
* Returns: a #ModestWebkitMimePartView
*/
GtkWidget *modest_webkit_mime_part_view_new (void);
G_END_DECLS
#endif /* __MODEST_GTK_HTML_MIME_PART_VIEW_H__ */
| 43.387387 | 152 | 0.78613 | [
"object"
] |
36e29e53adca67724aa02e836293d974cc395ac2 | 18,566 | h | C | GTE/Mathematics/Polynomial1.h | tranthaiphi/GeometricTools | 451e412a0715dbb4fcafbe486ca33d84a404b78d | [
"BSL-1.0"
] | null | null | null | GTE/Mathematics/Polynomial1.h | tranthaiphi/GeometricTools | 451e412a0715dbb4fcafbe486ca33d84a404b78d | [
"BSL-1.0"
] | null | null | null | GTE/Mathematics/Polynomial1.h | tranthaiphi/GeometricTools | 451e412a0715dbb4fcafbe486ca33d84a404b78d | [
"BSL-1.0"
] | null | null | null | // David Eberly, Geometric Tools, Redmond WA 98052
// Copyright (c) 1998-2021
// Distributed under the Boost Software License, Version 1.0.
// https://www.boost.org/LICENSE_1_0.txt
// https://www.geometrictools.com/License/Boost/LICENSE_1_0.txt
// Version: 4.0.2019.08.13
#pragma once
#include <Mathematics/Logger.h>
#include <algorithm>
#include <initializer_list>
#include <vector>
namespace gte
{
template <typename Real>
class Polynomial1
{
public:
// Construction and destruction. The first constructor creates a
// polynomial of the specified degree but sets all coefficients to
// zero (to ensure initialization). You are responsible for setting
// the coefficients, presumably with the degree-term set to a nonzero
// number. In the second constructor, the degree is the number of
// initializers plus 1, but then adjusted so that coefficient[degree]
// is not zero (unless all initializer values are zero).
Polynomial1(unsigned int degree = 0)
:
mCoefficient(degree + 1, (Real)0)
{
}
Polynomial1(std::initializer_list<Real> values)
{
// C++ 11 will call the default constructor for
// Polynomial1<Real> p{}, so it is guaranteed that
// values.size() > 0.
mCoefficient.resize(values.size());
std::copy(values.begin(), values.end(), mCoefficient.begin());
EliminateLeadingZeros();
}
// Support for partial construction, where the default constructor is
// used when the degree is not yet known. The coefficients are
// uninitialized.
void SetDegree(unsigned int degree)
{
mCoefficient.resize(degree + 1);
}
// Set all coefficients to the specified value.
void SetCoefficients(Real value)
{
std::fill(mCoefficient.begin(), mCoefficient.end(), value);
}
// Member access.
inline unsigned int GetDegree() const
{
// By design, mCoefficient.size() > 0.
return static_cast<unsigned int>(mCoefficient.size() - 1);
}
inline Real const& operator[](unsigned int i) const
{
return mCoefficient[i];
}
inline Real& operator[](unsigned int i)
{
return mCoefficient[i];
}
// Comparisons.
inline bool operator==(Polynomial1<Real> const& p) const
{
return mCoefficient == p.mCoefficient;
}
inline bool operator!=(Polynomial1<Real> const& p) const
{
return mCoefficient != p.mCoefficient;
}
inline bool operator< (Polynomial1<Real> const& p) const
{
return mCoefficient < p.mCoefficient;
}
inline bool operator<=(Polynomial1<Real> const& p) const
{
return mCoefficient <= p.mCoefficient;
}
inline bool operator> (Polynomial1<Real> const& p) const
{
return mCoefficient > p.mCoefficient;
}
inline bool operator>=(Polynomial1<Real> const& p) const
{
return mCoefficient >= p.mCoefficient;
}
// Evaluate the polynomial. If the polynomial is invalid, the
// function returns zero.
Real operator()(Real t) const
{
int i = static_cast<int>(mCoefficient.size());
Real result = mCoefficient[--i];
for (--i; i >= 0; --i)
{
result *= t;
result += mCoefficient[i];
}
return result;
}
// Compute the derivative of the polynomial.
Polynomial1 GetDerivative() const
{
unsigned int const degree = GetDegree();
if (degree > 0)
{
Polynomial1 result(degree - 1);
for (unsigned int i0 = 0, i1 = 1; i0 < degree; ++i0, ++i1)
{
result.mCoefficient[i0] = mCoefficient[i1] * (Real)i1;
}
return result;
}
else
{
Polynomial1 result(0);
result[0] = (Real)0;
return result;
}
}
// Inversion (invpoly[i] = poly[degree-i] for 0 <= i <= degree).
Polynomial1 GetInversion() const
{
unsigned int const degree = GetDegree();
Polynomial1 result(degree);
for (unsigned int i = 0; i <= degree; ++i)
{
result.mCoefficient[i] = mCoefficient[degree - i];
}
return result;
}
// Tranlation. If 'this' is p(t}, return p(t-t0).
Polynomial1 GetTranslation(Real t0) const
{
Polynomial1<Real> factor{ -t0, (Real)1 }; // f(t) = t - t0
unsigned int const degree = GetDegree();
Polynomial1 result{ mCoefficient[degree] };
for (unsigned int i = 1, j = degree - 1; i <= degree; ++i, --j)
{
result = mCoefficient[j] + factor * result;
}
return result;
}
// Eliminate any leading zeros in the polynomial, except in the case
// the degree is 0 and the coefficient is 0. The elimination is
// necessary when arithmetic operations cause a decrease in the degree
// of the result. For example, (1 + x + x^2) + (1 + 2*x - x^2) =
// (2 + 3*x). The inputs both have degree 2, so the result is created
// with degree 2. After the addition we find that the degree is in
// fact 1 and resize the array of coefficients. This function is
// called internally by the arithmetic operators, but it is exposed in
// the public interface in case you need it for your own purposes.
void EliminateLeadingZeros()
{
size_t size = mCoefficient.size();
if (size > 1)
{
Real const zero = (Real)0;
int leading;
for (leading = static_cast<int>(size) - 1; leading > 0; --leading)
{
if (mCoefficient[leading] != zero)
{
break;
}
}
mCoefficient.resize(++leading);
}
}
// If 'this' is P(t) and the divisor is D(t) with
// degree(P) >= degree(D), then P(t) = Q(t)*D(t)+R(t) where Q(t) is
// the quotient with degree(Q) = degree(P) - degree(D) and R(t) is the
// remainder with degree(R) < degree(D). If this routine is called
// with degree(P) < degree(D), then Q = 0 and R = P are returned.
void Divide(Polynomial1 const& divisor, Polynomial1& quotient, Polynomial1& remainder) const
{
Real const zero = (Real)0;
int divisorDegree = static_cast<int>(divisor.GetDegree());
int quotientDegree = static_cast<int>(GetDegree()) - divisorDegree;
if (quotientDegree >= 0)
{
quotient.SetDegree(quotientDegree);
// Temporary storage for the remainder.
Polynomial1 tmp = *this;
// Do the division using the Euclidean algorithm.
Real inv = ((Real)1) / divisor[divisorDegree];
for (int i = quotientDegree; i >= 0; --i)
{
int j = divisorDegree + i;
quotient[i] = inv * tmp[j];
for (j--; j >= i; j--)
{
tmp[j] -= quotient[i] * divisor[j - i];
}
}
// Calculate the correct degree for the remainder.
if (divisorDegree >= 1)
{
int remainderDegree = divisorDegree - 1;
while (remainderDegree > 0 && tmp[remainderDegree] == zero)
{
--remainderDegree;
}
remainder.SetDegree(remainderDegree);
for (int i = 0; i <= remainderDegree; ++i)
{
remainder[i] = tmp[i];
}
}
else
{
remainder.SetDegree(0);
remainder[0] = zero;
}
}
else
{
quotient.SetDegree(0);
quotient[0] = zero;
remainder = *this;
}
}
// Scale the polynomial so the highest-degree term has coefficient 1.
void MakeMonic()
{
EliminateLeadingZeros();
Real const one(1);
if (mCoefficient.back() != one)
{
unsigned int degree = GetDegree();
Real invLeading = one / mCoefficient.back();
mCoefficient.back() = one;
for (unsigned int i = 0; i < degree; ++i)
{
mCoefficient[i] *= invLeading;
}
}
}
protected:
// The class is designed so that mCoefficient.size() >= 1.
std::vector<Real> mCoefficient;
};
// Compute the greatest common divisor of two polynomials. The returned
// polynomial has leading coefficient 1 (except when zero-valued
// polynomials are passed to the function.
template <typename Real>
Polynomial1<Real> GreatestCommonDivisor(Polynomial1<Real> const& p0, Polynomial1<Real> const& p1)
{
// The numerator should be the polynomial of larger degree.
Polynomial1<Real> a, b;
if (p0.GetDegree() >= p1.GetDegree())
{
a = p0;
b = p1;
}
else
{
a = p1;
b = p0;
}
Polynomial1<Real> const zero{ (Real)0 };
if (a == zero || b == zero)
{
return (a != zero ? a : zero);
}
// Make the polynomials monic to keep the coefficients reasonable size
// when computing with floating-point Real.
a.MakeMonic();
b.MakeMonic();
Polynomial1<Real> q, r;
for (;;)
{
a.Divide(b, q, r);
if (r != zero)
{
// a = q * b + r, so gcd(a,b) = gcd(b, r)
a = b;
b = r;
b.MakeMonic();
}
else
{
b.MakeMonic();
break;
}
}
return b;
}
// Factor f = factor[0]*factor[1]^2*factor[2]^3*...*factor[n-1]^n
// according to the square-free factorization algorithm
// https://en.wikipedia.org/wiki/Square-free_polynomial
template <typename Real>
void SquareFreeFactorization(Polynomial1<Real> const& f, std::vector<Polynomial1<Real>>& factors)
{
// In the call to Divide(...), we know that the divisor exactly
// divides the numerator, so r = 0 after all such calls.
Polynomial1<Real> fder = f.GetDerivative();
Polynomial1<Real> a, b, c, d, q, r;
a = GreatestCommonDivisor(f, fder);
f.Divide(a, b, r); // b = f / a
fder.Divide(a, c, r); // c = fder / a
d = c - b.GetDerivative();
do
{
a = GreatestCommonDivisor(b, d);
factors.emplace_back(a);
b.Divide(a, q, r); // q = b / a
b = std::move(q);
d.Divide(a, c, r); // c = d / a
d = c - b.GetDerivative();
} while (b.GetDegree() > 0);
}
// Unary operations.
template <typename Real>
Polynomial1<Real> operator+(Polynomial1<Real> const& p)
{
return p;
}
template <typename Real>
Polynomial1<Real> operator-(Polynomial1<Real> const& p)
{
unsigned int const degree = p.GetDegree();
Polynomial1<Real> result(degree);
for (unsigned int i = 0; i <= degree; ++i)
{
result[i] = -p[i];
}
return result;
}
// Linear-algebraic operations.
template <typename Real>
Polynomial1<Real> operator+(Polynomial1<Real> const& p0, Polynomial1<Real> const& p1)
{
unsigned int const p0Degree = p0.GetDegree(), p1Degree = p1.GetDegree();
unsigned int i;
if (p0Degree >= p1Degree)
{
Polynomial1<Real> result(p0Degree);
for (i = 0; i <= p1Degree; ++i)
{
result[i] = p0[i] + p1[i];
}
for (/**/; i <= p0Degree; ++i)
{
result[i] = p0[i];
}
result.EliminateLeadingZeros();
return result;
}
else
{
Polynomial1<Real> result(p1Degree);
for (i = 0; i <= p0Degree; ++i)
{
result[i] = p0[i] + p1[i];
}
for (/**/; i <= p1Degree; ++i)
{
result[i] = p1[i];
}
result.EliminateLeadingZeros();
return result;
}
}
template <typename Real>
Polynomial1<Real> operator-(Polynomial1<Real> const& p0, Polynomial1<Real> const& p1)
{
unsigned int const p0Degree = p0.GetDegree(), p1Degree = p1.GetDegree();
unsigned int i;
if (p0Degree >= p1Degree)
{
Polynomial1<Real> result(p0Degree);
for (i = 0; i <= p1Degree; ++i)
{
result[i] = p0[i] - p1[i];
}
for (/**/; i <= p0Degree; ++i)
{
result[i] = p0[i];
}
result.EliminateLeadingZeros();
return result;
}
else
{
Polynomial1<Real> result(p1Degree);
for (i = 0; i <= p0Degree; ++i)
{
result[i] = p0[i] - p1[i];
}
for (/**/; i <= p1Degree; ++i)
{
result[i] = -p1[i];
}
result.EliminateLeadingZeros();
return result;
}
}
template <typename Real>
Polynomial1<Real> operator*(Polynomial1<Real> const& p0, Polynomial1<Real> const& p1)
{
unsigned int const p0Degree = p0.GetDegree(), p1Degree = p1.GetDegree();
Polynomial1<Real> result(p0Degree + p1Degree);
result.SetCoefficients((Real)0);
for (unsigned int i0 = 0; i0 <= p0Degree; ++i0)
{
for (unsigned int i1 = 0; i1 <= p1Degree; ++i1)
{
result[i0 + i1] += p0[i0] * p1[i1];
}
}
return result;
}
template <typename Real>
Polynomial1<Real> operator+(Polynomial1<Real> const& p, Real scalar)
{
unsigned int const degree = p.GetDegree();
Polynomial1<Real> result(degree);
result[0] = p[0] + scalar;
for (unsigned int i = 1; i <= degree; ++i)
{
result[i] = p[i];
}
return result;
}
template <typename Real>
Polynomial1<Real> operator+(Real scalar, Polynomial1<Real> const& p)
{
unsigned int const degree = p.GetDegree();
Polynomial1<Real> result(degree);
result[0] = p[0] + scalar;
for (unsigned int i = 1; i <= degree; ++i)
{
result[i] = p[i];
}
return result;
}
template <typename Real>
Polynomial1<Real> operator-(Polynomial1<Real> const& p, Real scalar)
{
unsigned int const degree = p.GetDegree();
Polynomial1<Real> result(degree);
result[0] = p[0] - scalar;
for (unsigned int i = 1; i <= degree; ++i)
{
result[i] = p[i];
}
return result;
}
template <typename Real>
Polynomial1<Real> operator-(Real scalar, Polynomial1<Real> const& p)
{
unsigned int const degree = p.GetDegree();
Polynomial1<Real> result(degree);
result[0] = scalar - p[0];
for (unsigned int i = 1; i <= degree; ++i)
{
result[i] = -p[i];
}
return result;
}
template <typename Real>
Polynomial1<Real> operator*(Polynomial1<Real> const& p, Real scalar)
{
unsigned int const degree = p.GetDegree();
Polynomial1<Real> result(degree);
for (unsigned int i = 0; i <= degree; ++i)
{
result[i] = scalar * p[i];
}
return result;
}
template <typename Real>
Polynomial1<Real> operator*(Real scalar, Polynomial1<Real> const& p)
{
unsigned int const degree = p.GetDegree();
Polynomial1<Real> result(degree);
for (unsigned int i = 0; i <= degree; ++i)
{
result[i] = scalar * p[i];
}
return result;
}
template <typename Real>
Polynomial1<Real> operator/(Polynomial1<Real> const& p, Real scalar)
{
LogAssert(scalar != (Real)0, "Division by zero.");
unsigned int const degree = p.GetDegree();
Real invScalar = (Real)1 / scalar;
Polynomial1<Real> result(degree);
for (unsigned int i = 0; i <= degree; ++i)
{
result[i] = invScalar * p[i];
}
return result;
}
template <typename Real>
Polynomial1<Real>& operator+=(Polynomial1<Real>& p0, Polynomial1<Real> const& p1)
{
p0 = p0 + p1;
return p0;
}
template <typename Real>
Polynomial1<Real>& operator-=(Polynomial1<Real>& p0, Polynomial1<Real> const& p1)
{
p0 = p0 - p1;
return p0;
}
template <typename Real>
Polynomial1<Real>& operator*=(Polynomial1<Real>& p0, Polynomial1<Real> const& p1)
{
p0 = p0 * p1;
return p0;
}
template <typename Real>
Polynomial1<Real>& operator+=(Polynomial1<Real>& p, Real scalar)
{
p[0] += scalar;
return p;
}
template <typename Real>
Polynomial1<Real>& operator-=(Polynomial1<Real>& p, Real scalar)
{
p[0] -= scalar;
return p;
}
template <typename Real>
Polynomial1<Real>& operator*=(Polynomial1<Real>& p, Real scalar)
{
p = p * scalar;
return p;
}
template <typename Real>
Polynomial1<Real> & operator/=(Polynomial1<Real>& p, Real scalar)
{
p = p / scalar;
return p;
}
}
| 30.994992 | 101 | 0.5007 | [
"vector"
] |
36e369b0bdfa7dc426af914dd495fea21dabbfbd | 57,529 | h | C | Adobe/Illustrator/plugin/Illustrator2019SDK/illustratorapi/illustrator/IAIUnicodeString.h | MingboPeng/rhino.inside | 9fdb1cf1c72e8f16e2e75509c48d22b15e61bef5 | [
"MIT"
] | 317 | 2018-08-23T19:43:37.000Z | 2022-03-12T04:22:20.000Z | Adobe/Illustrator/plugin/Illustrator2019SDK/illustratorapi/illustrator/IAIUnicodeString.h | hiroyainage/rhino.inside | d4bc5d212a3e7492cc0dd18af541232c24bf9117 | [
"MIT"
] | 90 | 2018-10-23T17:11:15.000Z | 2022-03-12T12:47:39.000Z | Adobe/Illustrator/plugin/Illustrator2019SDK/illustratorapi/illustrator/IAIUnicodeString.h | hiroyainage/rhino.inside | d4bc5d212a3e7492cc0dd18af541232c24bf9117 | [
"MIT"
] | 153 | 2018-09-12T02:16:55.000Z | 2022-03-30T04:30:52.000Z | /*
* Name: IAIUnicodeString.h
* $Revision: 1 $
* Author:
* Date:
* Purpose: Interface to the wrapper class for AIUnicodeStringSuite & the
* primary interface for the AI core to
* the ai::UnicodeString objects.
*
* ADOBE SYSTEMS INCORPORATED
* Copyright 2004-2015 Adobe Systems Incorporated.
* All rights reserved.
*
* NOTICE: Adobe permits you to use, modify, and distribute this file
* in accordance with the terms of the Adobe license agreement
* accompanying it. If you have received this file from a source other
* than Adobe, then your use, modification, or distribution of it
* requires the prior written permission of Adobe.
*
*/
#ifndef _IAIUNICODESTRING_H_
#define _IAIUNICODESTRING_H_
#include "AITypes.h"
#include "AICharacterEncoding.h"
#include "IAIAutoBuffer.h"
#include <string>
#include <iterator>
#include <stdexcept>
#if defined(MAC_ENV)
#import <CoreFoundation/CFString.h>
#if defined _UTF16_BASIC_STRING_EXPORT_H_
#include _UTF16_BASIC_STRING_EXPORT_H_
#endif
#endif // defined(MAC_ENV)
/** @file IAIUnicodeString.h */
class CAIUnicodeStringImpl;
/** @ingroup Errors
An out-of-range index was used.
See \c #ai::UnicodeString */
#define kUnicodeStringBadIndex 'US!I'
/** @ingroup Errors
An attempt to create string longer than maximum allowed length.
See \c #ai::UnicodeString */
#define kUnicodeStringLengthError 'US#L'
/** @ingroup Errors
A string is malformed.
See \c #ai::UnicodeString */
#define kUnicodeStringMalformedError 'US!F'
namespace ai {
/** Constant Pascal string adapter object. */
class const_PStr
{
public:
/** Constructor. Creates a constant Pascal string object from a Pascal string.
@param pascalString The Pascal string.
@return The new object. */
explicit const_PStr(const unsigned char* pascalString) : fConstStr(pascalString) {};
/** Destructor */
virtual ~const_PStr() {}
/** Constructor. Creates a constant Pascal string from a string pointer.
@param p The string pointer.
@return The new object. */
const_PStr(const const_PStr& p) : fConstStr(p.fConstStr) {};
/** Retrieves a reference to this object.
@return The object reference. */
const unsigned char* get() const
{ return fConstStr; }
/** Retrieves a character from this string.
@return The character. */
const unsigned char& operator[] (size_t i) const
{ return fConstStr[i]; }
protected:
const unsigned char* fConstStr;
const_PStr& operator=(const const_PStr&);
};
/** Mutable Pascal string adapter object. */
class PStr : public const_PStr
{
public:
/** Constructor. Creates a mutable Pascal string object from a Pascal string.
@param pascalString The C string.
@return The new object. */
explicit PStr(unsigned char* pascalString) : const_PStr(pascalString) {};
/** <Constructor. Creates a mutable Pascal string object from a string pointer.
@param p The string pointer.
@return The new object. */
PStr(const PStr& p) : const_PStr(p) {};
/** Retrieves a reference to this object.
@return The object reference. */
unsigned char* get() const
{ return const_cast<unsigned char*>(fConstStr); }
/** Retrieves a character from this string.
@return The character. */
unsigned char& operator[] (size_t i) const
{ return get()[i]; }
///** Convenience operator */
//operator const const_PStr& ()
//{ return *this; }
};
#if defined(WIN_ENV)
class UnicodeString;
/** In Windows only. An adapter object that helps move data back and forth
between Windows functions and the UnicodeString class when the VisualStudio
compiler's options are set to treat \c wchar_t as an intrinsic type.
This class requires that \c UTF16 and \c wchar_t be the same size.
This class provides a very thin object veneer on the Windows string
pointer types. The object takes a copy of the data with which it is constructed.
*/
class WCHARStr
{
public:
/** Type equivalent to Windows \c WCHAR type. */
typedef wchar_t WCHAR;
/** Type equivalent to Windows \c LPCWSTR type. */
typedef const WCHAR* LPCWSTR;
/** Type equivalent to Windows \c LPWSTR type. */
typedef WCHAR* LPWSTR;
public:
/** Constructs an empty \c WCHARStr object.
@return The \c WCHARStr object.
*/
WCHARStr () : fConstStr() {}
/** Constructs a \c WCHARStr object from the contents of a UnicodeString.
@param string The source Unicode string.
@return The \c WCHARStr object.
*/
WCHARStr (const ai::UnicodeString& string);
#if defined(_NATIVE_WCHAR_T_DEFINED)
// This ctor is redundant if wchar_t is not an intrinsic. It cannot be distinguished from the const ASUnicode* ctor.
/** Constructs a \c WCHARStr object from the contents pointed at by a 0-terminated
\c LPCWSTR. It is assumed that the contents of the source are UTF-16 code units in
platform byte order.
@param wcharString The source string data, UTF-16 code units in platform byte order.
*/
WCHARStr (LPCWSTR wcharString);
#endif // defined(_NATIVE_WCHAR_T_DEFINED)
/** Constructs a \c WCHARStr object from the contents pointed at by a 0-terminated array of
\c #ASUnicode. It is assumed that the contents of the source are UTF-16 code units in
platform byte order.
@param string The source string data, UTF-16 code units in platform byte order.
*/
WCHARStr (const ASUnicode* string);
/** Copy constructor, creates a copy of a \c WCHARStr object.
@param p The object to copy.
*/
WCHARStr (const WCHARStr& p) : fConstStr(p.fConstStr) {}
#if defined(AI_HAS_RVALUE_REFERENCES) && defined(AI_HAS_DEFAULTED_FUNCTIONS)
/** Move constructor */
WCHARStr (WCHARStr&&) = default;
#endif
/** Destructor */
virtual ~WCHARStr() {}
/** Assignment operator */
WCHARStr& operator= (const WCHARStr& rhs)
{
fConstStr = rhs.fConstStr;
return *this;
}
#if defined(AI_HAS_RVALUE_REFERENCES) && defined(AI_HAS_DEFAULTED_FUNCTIONS)
/** Assignment operator */
WCHARStr& operator= (WCHARStr&&) = default;
#endif
/** Converts the contents of this object to \c LPCWSTR.
The returned value is only valid for the lifetime of this object.
@return The \c LPCWSTR value.
*/
LPCWSTR as_LPCWSTR () const
{
return fConstStr.c_str();
}
/** Converts the contents of this object to \c #ASUnicode.
The returned value is only valid for the lifetime of this object.
@return A pointer to the \c #ASUnicode value.
*/
const ASUnicode* as_ASUnicode () const;
/** Computes the length of the string contents of this object.
@return The number of \c WCHAR characters.
*/
size_t length() const;
/* Convenience operators */
#if defined(_NATIVE_WCHAR_T_DEFINED)
// This operator is redundant if wchar_t is not an intrinsic. It cannot be distinguished from the const ASUnicode* operator.
operator LPCWSTR () const
{
return as_LPCWSTR();
}
#endif //defined(_NATIVE_WCHAR_T_DEFINED)
operator const ASUnicode* () const
{
return as_ASUnicode();
}
protected:
std::basic_string<WCHAR> fConstStr;
};
#endif /* WIN_ENV */
/** \c UnicodeString objects provide basic string functionality for
Unicode-based strings, similar to that provided by the standard C string library.
The contents of the string are treated as if all characters are represented
as full 32-bit Unicode characters, which means you do not need to know the
internal representation of the string, or deal with surrogate pairs during
editing or searching operations.
*/
class UnicodeString {
public:
/** A numeric relative offset type (signed integer). */
typedef ai::sizediff_t offset_type;
/** A numeric value type (unsigned integer). */
typedef size_t size_type;
/** Value type for a UTF-32 character code. */
typedef ASUInt32 UTF32TextChar;
/** Value type for a UTF-16 character code. */
typedef ASUnicode UTF16Char;
typedef UTF32TextChar value_type;
/** The maximum number of characters possible in a string. This also indicates a failure if used as return value. */
static const size_type npos;
/** Normalization forms for use with the \c #normalize() method.
See http://www.unicode.org/reports/tr15/ for more information. */
enum NormalizedForm {
/** Canonical Decomposition */
kForm_NFD = 0,
/** Canonical Decomposition, followed by Canonical Composition */
kForm_NFC,
/** Compatibility Decomposition */
kForm_NFKD,
/** Comparability Decomposition, followed by Canonical Composition. */
kForm_NFKC,
/** Do not use this */
kForm_DummyValue = 0xFFFFFFFF
};
/** TBD */
class Collator;
// ai::UnicodeString doesn't support non-const iterators because
// its operator[] or at() doesn't return a reference to value_type
// and for the same reason const_iterator doesn't overload operator->()
class const_iterator;
friend class const_iterator;
public:
class const_iterator
: public std::iterator<std::random_access_iterator_tag, UnicodeString::value_type>
{
public:
typedef const_iterator self_type;
typedef UnicodeString::size_type size_type;
typedef const UnicodeString* container_type;
// default construct NULL iterator
// can't be dereferenced, incremented or decremented
const_iterator() : fIndex(0), fStringContainer(nullptr)
{
}
// construct iterator corresponding to a given index in the container
const_iterator(size_type index, container_type container)
: fIndex(index), fStringContainer(container)
{
}
const_iterator(const self_type& rhs) = default; // copy constructor
self_type& operator=(const self_type& rhs) = default; // copy assignment
value_type operator*() const
{
if (!fStringContainer || fIndex >= fStringContainer->size())
{
throw std::out_of_range{"string iterator not dereferencable"};
}
return (*fStringContainer)[fIndex];
}
self_type& operator++() // preincrement
{
++fIndex;
return (*this);
}
self_type operator++(int) // postincrement
{
auto temp = *this;
++(*this);
return temp;
}
self_type& operator--() // predecrement
{
--fIndex;
return (*this);
}
self_type operator--(int) // postdecrement
{
auto temp = *this;
--(*this);
return temp;
}
self_type& operator+=(difference_type offset) // increment by integer
{
fIndex += offset;
return (*this);
}
self_type operator+(difference_type offset) const // return this + integer
{
self_type temp = *this;
return (temp += offset);
}
self_type& operator-=(difference_type offset) // decrement by integer
{
fIndex -= offset;
return (*this);
}
self_type operator-(difference_type offset) const // return this - integer
{
self_type temp = *this;
return (temp -= offset);
}
difference_type operator-(const self_type& rhs) const // return difference of iterators
{
return (fIndex - rhs.fIndex);
}
bool operator==(const self_type& rhs) const // test for iterator equality
{
return (fIndex == rhs.fIndex);
}
bool operator!=(const self_type& rhs) const // test for iterator inequality
{
return !(*this == rhs);
}
bool operator<(const self_type& rhs) const // test if this < rhs
{
return (fIndex < rhs.fIndex);
}
bool operator>(const self_type& rhs) const // test if this > rhs
{
return (rhs < *this);
}
bool operator<=(const self_type& rhs) const // test if this <= rhs
{
return (!(rhs < *this));
}
bool operator>=(const self_type& rhs) const // test if this >= rhs
{
return (!(*this < rhs));
}
private:
bool compatible(const self_type& rhs) const
{
return (fStringContainer->fImpl == rhs.fStringContainer->fImpl);
}
private:
size_type fIndex = 0;
container_type fStringContainer;
};
//----------------------------------------------------------------------
/** @name Constructors & Destructor */
//----------------------------------------------------------------------
//@{
/** Empty string constructor. Creates a valid, empty string. This method is guaranteed
to not throw any exceptions. */
explicit UnicodeString () AINOTHROW;
/** Constructs a UnicodeString from an encoded byte array.
@param string Array of bytes to construct from.
@param srcByteLen Length of the array.
@param encoding Encoding of the contents of the byte array. Default
is the current platform encoding
*/
explicit UnicodeString (const char* string, offset_type srcByteLen,
AICharacterEncoding encoding = kAIPlatformCharacterEncoding);
/** Constructs a UnicodeString using a copies of a single character.
@param count Number of characters to insert in this string.
@param ch The UTF32 code point (character) with which to initialize the string.
*/
explicit UnicodeString (size_type count, UTF32TextChar ch);
/** Convenience constructor from a \c char buffer that is known to be 0 terminated.
@param string Initial contents.
@param encoding Encoding of the contents of the byte array.
*/
explicit UnicodeString (const char* string, AICharacterEncoding encoding = kAIPlatformCharacterEncoding);
/** Convenience constructor from a \c std::string.
@param string Initial contents.
@param encoding Encoding of the contents of the byte array.
*/
explicit UnicodeString (const std::string& string, AICharacterEncoding encoding = kAIPlatformCharacterEncoding);
/** Constructor from 0-terminated, platform-byte-ordered, UTF-16 array.
Surrogate pairs are okay. Exception raised if string is malformed.
@param string Pointer to a valid 0-terminated array of UTF-16 \c ASUnicode.
*/
explicit UnicodeString (const ASUnicode* string);
/** Constructor from the ZString referred to by a ZString key.
@param zStringKey The ZString key.
*/
explicit UnicodeString (const ZRef zStringKey);
/** Constructor from a non-0-terminated platform-byte-ordered, UTF-16 array.
Surrogate pairs are okay. Exception raised if string is malformed.
@param string Pointer to an array of UTF-16 \c ASUnicode.
@param srcUTF16Count The number of UTF-16 code units to be read from string.
*/
explicit UnicodeString (const ASUnicode* string, size_type srcUTF16Count);
/** Constructor from a \c std::basic_string of platform-byte-ordered \c ASUnicode characters.
Surrogate pairs are okay. Exception raised if string is malformed.
@param string A \c std::basic_string<ASUnicode>.
*/
explicit UnicodeString (const std::basic_string<ASUnicode>& string);
/** Copy Constructor, creates a copy of a string.
@param s The string to copy.
*/
UnicodeString (const UnicodeString& s);
#ifdef AI_HAS_RVALUE_REFERENCES
/** Move Constructor
@param other The string to move from.
*/
UnicodeString(UnicodeString&& other) AINOEXCEPT;
#endif // AI_HAS_RVALUE_REFERENCES
/** Destructor */
~UnicodeString ();
//@}
//----------------------------------------------------------------------
/** @name Factory methods */
//----------------------------------------------------------------------
//@{
/** Constructs a UnicodeString from a
non-zero-terminated \c char array whose encoding is ISO Roman/Latin.
@param string The \c char array.
@param count the number of bytes (chars) in the string.
@return The Unicode string object.
*/
static UnicodeString FromRoman (const char* string, size_type count);
/** Constructs a UnicodeString from a
zero-terminated \c char array whose encoding is ISO Roman/Latin.
@param string The \c char array.
@return The Unicode string object.
*/
static UnicodeString FromRoman (const char* string);
/** Constructs a UnicodeString from a
\c std::string whose encoding is ISO Roman/Latin.
@param string The string.
@return The Unicode string object.
*/
static UnicodeString FromRoman (const std::string& string);
/** Constructs a UnicodeString from a
Pascal string whose encoding is ISO Roman/Latin.
@param pascalString The Pascal string.
@return The Unicode string object.
*/
static UnicodeString FromRoman (const const_PStr& pascalString);
/** Constructs a UnicodeString from a
non-zero-terminated \c char array whose encoding is the current platform encoding.
@param string The \c char array.
@param count The number of bytes (chars) in the string.
@return The Unicode string object.
*/
static UnicodeString FromPlatform (const char* string, size_type count);
/** Constructs a UnicodeString from a
zero-terminated \c char array whose encoding is the current platform encoding.
@param string The \c char array.
@return The Unicode string object.
*/
static UnicodeString FromPlatform (const char* string);
/** Constructs a UnicodeString from a
\c std::string whose encoding is the current platform encoding.
@param string The string.
@return The Unicode string object.
*/
static UnicodeString FromPlatform (const std::string& string);
/** Constructs a UnicodeString from a
Pascal string whose encoding is the current platform encoding.
@param pascalString The Pascal string.
@return The Unicode string object.
*/
static UnicodeString FromPlatform (const const_PStr& pascalString);
/** Constructs a UnicodeString from a
zero-terminated \c char array whose encoding is UTF8.
@param string The \c char array.
@return The Unicode string object.
*/
static UnicodeString FromUTF8 (const char* string);
/** Constructs a UnicodeString from a
\c std::string whose encoding is UTF8.
@param string The string.
@return The Unicode string object.
*/
static UnicodeString FromUTF8 (const std::string& string);
/** Constructs a UnicodeString from a
Pascal string whose encoding is UTF8.
@param pascalString The Pascal string.
@return The Unicode string object.
*/
static UnicodeString FromUTF8 (const const_PStr& pascalString);
//@}
/* Basic operations (as in std::basic_string) */
/** Appends a string to this string.
@param str The string to append.
@return A reference to this Unicode string object.
*/
UnicodeString& append (const UnicodeString& str);
/** Appends a substring of a given string to this string.
@param str The source string of the substring.
@param startOffset The 0-based index of the first character in the substring.
@param count The maximum number of characters in the substring.
@return A reference to this Unicode string object.
*/
UnicodeString& append (const UnicodeString& str, size_type startOffset,
size_type count);
/** Appends a given number of copies of a given character to this string.
@param count The number of characters to append.
@param ch The character to append.
@return A reference to this Unicode string object.
*/
UnicodeString& append (size_type count, UTF32TextChar ch)
{ return append(UnicodeString(count, ch)); }
/** Replaces the contents of this string with another string.
@param str The new contents.
@return A reference to this Unicode string object.
*/
UnicodeString& assign (const UnicodeString& str);
/** Replaces the contents of this string with a substring of another string.
@param str The source of the substring.
@param offset The 0-based index of the first character in the substring.
@param count The maximum number of characters in the substring.
@return A reference to this Unicode string object.
*/
UnicodeString& assign (const UnicodeString& str, size_type offset,
size_type count)
{ return assign(str.substr(offset, count)); }
/** Retrieves the UTF32 code point (character) from a string by position index.
Raises \c #kUnicodeStringBadIndex exception if offset is out of range.
@param offset The 0-based index of the character.
@return The UTF32 value of the character.
*/
UTF32TextChar at (size_type offset) const;
/** Erases all characters in this string. Same as <code>erase(0, npos)</code>.
Uninitialized strings remain unchanged.
@return Nothing.
@see \c #erase()
*/
void clear ();
/** Compares this string and another string for equality.
Uninitialized strings are equal to other uninitialized strings and
to empty strings.Uninitialized and empty strings are less than initialized,
non-empty strings.
@param str The string to compare against (right side of compare)..
@return Zero if the two strings are equal.
<br> Negative if this string is less than \c str.
<br> Positive if this string is greater than \c str.
*/
ai::int32 compare (const UnicodeString& str) const;
/** Compares this string and a substring of another string for equality.
Uninitialized strings are equal to other uninitialized strings and
to empty strings.Uninitialized and empty strings are less than initialized,
non-empty strings.
@param pos The 0-based index of the first character of the substring.
@param num The number of characters to compare.
@param str The source of the substring to compare against (right side of compare)..
@return Zero if this string and the substring are equal.
<br> Negative if this string is less than the substring of \c str.
<br> Positive if this string is greater than the substring of \c str.
*/
ai::int32 compare (size_type pos, size_type num, const UnicodeString& str) const;
/** Compares a substring of this string and a substring of another string for equality.
Uninitialized strings are equal to other uninitialized strings and
to empty strings.Uninitialized and empty strings are less than initialized,
non-empty strings.
@param pos The 0-based index of the first character of the substring in this string.
@param num The number of characters to compare in this string.
@param str The source of the substring to compare against (right side of compare)..
@param startOffset The 0-based index of the first character of the substring of \c str.
@param count The number of characters from \c str to compare.
@return Zero if the two substrings are equal.
<br> Negative if the substring of this string is less than the substring of \c str.
<br> Positive if the substring of this string is greater than the substring of \c str.
*/
ai::int32 compare (size_type pos, size_type num, const UnicodeString& str,
size_type startOffset, size_type count) const;
/** Retrieves the number of characters (UTF code points) in this string.
@return The number of UTF code points in this string.
*/
size_type length () const;
/** Reports whether this string is an empty string.
@return True if the string is empty.
*/
bool empty () const;
/** Removes characters (UTF code points) from this string.
@param pos The 0-based position index of the first character to remove.
@param count The number of characters to remove.
@return A reference to this Unicode string object.
*/
UnicodeString& erase (size_type pos=0, size_type count = npos);
/** Erase character from this string
@param position iterator to the character to remove
@return iterator pointing to the character immediately following the character erased,
or end() if no such character exists
*/
const_iterator erase (const_iterator position)
{
size_type index = position - begin();
erase(index, 1);
return (begin() + index);
}
/** Erase substring [first, last) from this string
@param first, last Range of the characters to remove
@return iterator pointing to the character last pointed to before the erase,
or end() if no such character exists
*/
const_iterator erase (const_iterator first, const_iterator last)
{
size_type index = first - begin();
erase(index, last - first);
return (begin() + index);
}
/** Searches for a character in this string.
@param ch The character to search for.
@param startOffset The 0-based position index in this string at which to start the search.
@return The 0-based position index of the found character, or
the number of characters in this string (\c #npos)
if the character is not found.
*/
size_type find (UTF32TextChar ch, size_type startOffset = 0 ) const;
/** Searches for a string within a subset of this string.
@param target The string to search for.
@param startOffset The 0-based position index in this string at which to start the search.
@return The 0-based position index of the first character of the target
string within this string, if found, or the number of
characters in this string (\c #npos)
if the target string is not found.
*/
size_type find (const UnicodeString& target, size_type startOffset = 0) const;
/** Searches for a substring within a subset of this string.
@param target The string to search for.
@param startOffset The 0-based position index in this string at which to start the search.
@param count The number of characters in the target string to search for.
@return The 0-based position index of the first character of the target
substring within this string, if found, or (\c #npos)
if the target substring is not found.
*/
size_type find (const UnicodeString& target, size_type startOffset, size_type count) const;
/** Searches for a character in this string, performing a caseless compare.
@param ch The character to search for.
@param startOffset The 0-based position index in this string at which to start the search.
@return The 0-based position index of the found character, or (\c #npos)
if the character is not found.
*/
size_type caseFind (UTF32TextChar ch, size_type startOffset = 0 ) const
{ return caseFind(ai::UnicodeString(1, ch), startOffset, 1); }
/** Searches for a string within a subset of this string, performing a caseless compare.
@param target The string to search for.
@param startOffset The 0-based position index in this string at which to start the search.
@return The 0-based position index of the first character of the target
string within this string, if found, or (\c #npos)
if the target string is not found.
*/
size_type caseFind (const UnicodeString& target, size_type startOffset = 0) const
{ return caseFind(target, startOffset, target.length()); }
/** Searches for a substring within a subset of this string, performing a caseless compare.
@param target The string to search for.
@param startOffset The 0-based position index in this string at which to start the search.
@param count The number of characters in the target string to search for.
@return The 0-based position index of the first character of the target
substring within this string, if found, or (\c #npos)
if the target substring is not found.
*/
size_type caseFind (const UnicodeString& target, size_type startOffset, size_type count) const;
/** Searches for a character in this string,
moving backward from a point in this string.
@param ch The character to search for.
@param startOffset The 0-based position index in this string at which to start the search.
@return The 0-based position index of the found character, or (\c #npos)
if the character is not found.
*/
size_type rfind (UTF32TextChar ch, size_type startOffset = npos ) const;
/** Searches for a string within a subset of this string,
moving backward from a point in this string.
@param target The string to search for.
@param startOffset The 0-based position index in this string at which to start the search.
@return The 0-based position index of the first character of the target
string within this string, if found, or (\c #npos)
if the target string is not found.
*/
size_type rfind (const UnicodeString& target, size_type startOffset = npos) const;
/** Searches for a substring within a subset of this string,
moving backward from a point in this string.
@param target The string to search for.
@param startOffset The 0-based position index in this string at which to start the search.
@param count The number of characters in the target string to search for.
@return The 0-based position index of the first character of the target
substring within this string, if found, or (\c #npos)
if the target substring is not found.
*/
size_type rfind (const UnicodeString& target, size_type startOffset, size_type count) const;
/** Searches for the first character of a string within a subset of this string.
@param target The string to search for.
@param startOffset The 0-based position index in this string at which to start the search.
@return The 0-based position index of the first character of the target
string within this string, if found, or (\c #npos)
if the first character of the target string is not found.
*/
size_type find_first_of (const UnicodeString& target, size_type startOffset = 0) const
{ return find_first_of(target, startOffset, npos); }
/** Searches for the first character of a string within a subset of this string.
@param target The string to search for.
@param startOffset The 0-based position index in this string at which to start the search.
@param count The number of characters in the substring to search.
@return The 0-based position index of the first character of the target
string within this string, if found, or (\c #npos)
if the character is not found.
*/
size_type find_first_of (const UnicodeString& target, size_type startOffset, size_type count) const;
/** Searches for the last character of a string within a subset of this string.
@param target The string containing the character to search for.
@param startOffset The 0-based position index in this string at which to start the search.
@return The 0-based position index of the last character of the target
string within this string, if found, or (\c #npos)
if the character is not found.
*/
size_type find_last_of (const UnicodeString& target, size_type startOffset = npos) const
{ return find_last_of(target, startOffset, npos); }
/** Searches for the last character of a string within a subset of this string.
@param target The string containing the character to search for.
@param startOffset The 0-based position index in this string at which to start the search.
@param count The number of characters in this string in which to search.
@return The 0-based position index of the last character of the target
string within this string, if found, or (\c #npos)
if the character is not found.
*/
size_type find_last_of (const UnicodeString& target, size_type startOffset, size_type count) const;
/** Searches for the first occurrence in this string of a character
that is not in a target string.
@param target The target string.
@param startOffset The 0-based position index in this string at which to start the search.
@return The 0-based position index of the character within this string,
if found, or (\c #npos) if the character is not found.
*/
size_type find_first_not_of (const UnicodeString& target, size_type startOffset = 0) const
{ return find_first_not_of(target, startOffset, npos); }
/** Searches for the first occurrence in a subset of this string of a character
that is not in a target string.
@param target The target string.
@param startOffset The 0-based position index in this string at which to start the search.
@param count The number of characters in this string to search.
@return The 0-based position index of the character within this string,
if found, or (\c #npos) if the character is not found.
*/
size_type find_first_not_of (const UnicodeString& target, size_type startOffset, size_type count) const;
/** Searches for the last occurrence in this string of a character
that is not in a target string.
@param target The target string.
@param startOffset The 0-based position index in this string at which to start the search.
@return The 0-based position index of the character within this string,
if found, or (\c #npos) if the character is not found.
*/
size_type find_last_not_of (const UnicodeString& target, size_type startOffset = npos) const
{ return find_last_not_of (target, startOffset, npos); }
/** Searches for the last occurrence in a subset of this string of a character
that is not in a target string.
@param target The target string.
@param startOffset The 0-based position index in this string at which to start the search.
@param count The number of characters in this string to search.
@return The 0-based position index of the character within this string,
if found, or (\c #npos) if the character is not found.
*/
size_type find_last_not_of (const UnicodeString& target, size_type startOffset, size_type count) const;
/** Replaces a substring in this string with another string.
@param pos The 0-based position index in this string at which to start the replacement.
@param num The number of characters in this string to replace.
@param str The replacement string.
@return This string object.
*/
UnicodeString& replace (size_type pos, size_type num, const UnicodeString& str)
{ return replace(pos, num, str, 0, npos); }
/** Replaces a substring in this string with a substring of another string.
@param pos The 0-based position index in this string at which to start the replacement.
@param num The number of characters in this string to replace.
@param str The replacement string.
@param count The number of characters in the replacement substring, starting at the first character.
@return This string object.
*/
UnicodeString& replace (size_type pos, size_type num, const UnicodeString& str, size_type count)
{ return replace(pos, num, str, 0, count); }
/** Replaces a substring in this string with a substring of another string.
@param pos The 0-based position index in this string at which to start the replacement.
@param num The number of characters in this string to replace.
@param str The replacement string.
@param startOffset The 0-based position index of the first character of the replacement substring.
@param count The number of characters in the replacement substring, starting at the offset.
@return This string object.
*/
UnicodeString& replace (size_type pos, size_type num, const UnicodeString& str,
size_type startOffset, size_type count);
/** Inserts a string into this string, inserting as many characters as will fit.
@param insertOffset The 0-based position index in this string
after which to start the insertion.
@param str The string to be partially or wholly inserted.
@return This string object.
*/
UnicodeString& insert (size_type insertOffset, const UnicodeString& str)
{ return insert(insertOffset, str, 0, npos); }
/** Inserts a substring into this string.
@param insertOffset The 0-based position index in this string
after which to start the insertion.
@param str The source string of the substring to insert.
@param offset he 0-based position index of the first character
of the substring to insert.
@param count The number of characters to insert.
@return This string object.
*/
UnicodeString& insert (size_type insertOffset, const UnicodeString& str,
size_type offset, size_type count);
/** Inserts repeated characters into this string.
@param insertOffset The 0-based position index in this string
after which to start the insertion.
@param count The number of copies of the character to insert.
@param ch The character to insert.
@return This string object.
*/
UnicodeString& insert (size_type insertOffset, size_type count,
UTF32TextChar ch)
{ return insert(insertOffset, UnicodeString(count, ch), 0, count); }
/** Appends a character to the end of this string.
@param ch The character to append.
@return Nothing.
*/
void push_back(UTF32TextChar ch)
{ (void) append(1, ch); }
/** Resizes this string, truncating it or adding characters as needed.
@param count The new number of characters.
@param ch The UTF code point value with which to initialize new elements,
if the size of the string is increased.
@return Nothing.
*/
void resize (size_type count, UTF32TextChar ch = UTF32TextChar());
/** Retrieves the number of characters (UTF code points) in this string.
This is the same as \c #length().
@return The number of UTF code points in this string.
*/
ai::UnicodeString::size_type size () const
{ return length(); }
/** Creates a copy of a substring of this string.
@param offset The 0-based position index of the first character
to be copied to the substring.
@param count The maximum number of characters to copy to the new substring.
@return A \c UnicodeString object containing the requested substring.
*/
UnicodeString substr (size_type offset = 0, size_type count = npos) const;
/** Swaps the contents of this string with another string.
@param str The string to swap.
@return Nothing.
*/
void swap (UnicodeString& str) AINOEXCEPT;
/* Operators */
/** Assignment operator.
@param rhs The Unicode string object to assign into this one.
@return A reference to this string.
*/
UnicodeString& operator= (const UnicodeString& rhs);
#ifdef AI_HAS_RVALUE_REFERENCES
/** Move Assignment operator.
@param rhs The Unicode string object to be moved from.
@return A reference to this string.
*/
UnicodeString& operator= (UnicodeString&& rhs) AINOEXCEPT;
#endif // AI_HAS_RVALUE_REFERENCES
/** Append operator.
@param ch The character to append to this string.
@return A reference to this string.
*/
UnicodeString& operator+= (UTF32TextChar ch)
{ return append(1, ch); }
/** Append operator.
@param rhs The Unicode string object to append to this one.
@return A reference to this string.
*/
UnicodeString& operator+= (const UnicodeString& rhs)
{ return append(rhs); }
/** Retrieves a character from this string.
@param offset The 0-based position index of the character.
@return The character.
@note This behavior differs from \c std::basic_string operator[].
This method returns the character at the given offset.
\c std::basic_string::operator[] returns a writeable reference
to the elopement at the given offset.
*/
UTF32TextChar operator[] (size_type offset) const;
/** Equality operator. Does a simple, direct, code-point-based
comparison.
@param rhs The string with which to compare this string.
@return True if the strings are equal.
*/
bool operator== (const UnicodeString& rhs) const
{ return compare(rhs) == 0; }
/** Non-equality operator. Does a simple, direct, code-point-based
comparison.
@param rhs The string with which to compare this string.
@return True if the strings are not equal.
*/
bool operator!= (const UnicodeString& rhs) const
{ return !(operator==(rhs)); }
/** Less-than operator. Does a simple, direct, code-point-based
comparison.
@param rhs The string with which to compare this string.
@return True if this string is less than the comparison string.
*/
bool operator< (const UnicodeString& rhs) const
{ return compare(rhs) < 0; }
/** iterator support
@return iterator to the first character
*/
const_iterator begin() const
{
return const_iterator(0, this);
}
/** iterator support
@return iterator to the character following the last character
*/
const_iterator end() const
{
return const_iterator(this->size(), this);
}
/* non-std::basic_string based functionality */
/** Convert the characters in this to lower case following the conventions of
the default locale.
@return A reference to this.
*/
UnicodeString& toLower();
/**
Convert the characters in this to UPPER CASE following the conventions of
the default locale.
@return A reference to this.
*/
UnicodeString& toUpper();
/** Compares this string with another string, doing a caseless
code-point-based comparison.
Uninitialized strings are equal to other uninitialized strings and
empty strings. Uninitialized and empty strings are less than initialized,
non-empty strings.
@param str The comparison string (right side of compare).
@return 0 if the strings are equal.
<br> Positive if this string is greater than the comparison string.
<br> Negative if this string is less than the comparison string.
*/
ai::int32 caseCompare (const UnicodeString& str) const
{ return caseCompare(0, npos, str, 0, npos); }
/** Compares this string with a substring of another string, doing a caseless
code-point-based comparison.
Uninitialized strings are equal to other uninitialized strings and
empty strings. Uninitialized and empty strings are less than initialized,
non-empty strings.
@param pos The 0-based position index of the first character of the substring.
@param num The number of characters to compare.
@param str The comparison string (right side of compare).
@return 0 if this string and the substring are equal.
<br> Positive if this string is greater than the substring.
<br> Negative if this string is less than the substring.
*/
ai::int32 caseCompare (size_type pos, size_type num, const UnicodeString& str) const
{ return caseCompare(pos, num, str, 0, npos); }
/** Compares a substring of this string with a substring of another string, doing a caseless
code-point-based comparison.
Uninitialized strings are equal to other uninitialized strings and
empty strings. Uninitialized and empty strings are less than initialized,
non-empty strings.
@param pos The 0-based position index of the first character of
the substring of this string.
@param num The number of characters in the substring of this string.
@param str The comparison string (right side of compare).
@param startOffset The 0-based position index of the first character of
the substring of the comparison string.
@param count The number of characters in the substring of the comparison string.
@return 0 if this substring and the comparison substring are equal.
<br> Positive if this substring is greater than the comparison substring.
<br> Negative if this substring is less than the comparison substring.
*/
ai::int32 caseCompare (size_type pos, size_type num, const UnicodeString& str,
size_type startOffset, size_type count) const;
/** Compares this string with another string for canonical equivalence
of their normalized forms (NFD or NFC). This requires temporary
allocation of memory, and can throw out-of-memory errors.
Uninitialized strings are equal to other uninitialized strings and
empty strings. Uninitialized and empty strings are less than initialized,
non-empty strings.
@param str The comparison string (right side of compare).
@return 0 if the strings are equal.
<br> Positive if this string is greater than the comparison string.
<br> Negative if this string is less than the comparison string.
@note Substring comparison of non-normalized strings is not directly available.
You can create substrings and pass them to this method.
*/
ai::int32 canonicalCompare (const UnicodeString& str) const;
/** Compares this string with another string for canonical equivalence
of their normalized forms (NFD or NFC), performing a caseless compare.
This requires temporary allocation of memory, and can throw out-of-memory errors.
Uninitialized strings are equal to other uninitialized strings and
empty strings. Uninitialized and empty strings are less than initialized,
non-empty strings.
@param str The comparison string (right side of compare).
@return 0 if the strings are equal.
<br> Positive if this string is greater than the comparison string.
<br> Negative if this string is less than the comparison string.
@note Substring comparison of non-normalized strings is not directly available.
You can create substrings and pass them to this method.
*/
ai::int32 canonicalCaseCompare (const UnicodeString& str) const;
/** Normalizes this string in place into the specified form.
Normalization may require re-allocation of the string. If this happens,
the method can throw out-of-memory errors. On failure, this
string remains unmodified.
@param form The form with which to normalize the string.
@return A reference to this string.
*/
UnicodeString& normalize (NormalizedForm form);
/** Reports whether this string contains surrogate pairs.
@return True if there are surrogate pairs in this string.
*/
bool hasSurrogates () const;
/** Creates a UTF-16 string from the contents of this string, in platform byte order.
This method does not allocate any memory, and returns in constant time.
@param buffer A reference to a const pointer to \c UTF16Char in which
to return the contents of this string as UTF-16 code units. Can be
0 if this string is empty.
This buffer pointer is only valid at most for the lifetime of this string.
The buffer is not guaranteed to be 0 terminated.
Use \c #as_ASUnicode() to guarantee a 0-terminated buffer.
@return The number of UTF-16 characters in the returned buffer.
*/
size_type utf_16 (const UTF16Char*& buffer) const;
/** Creates a 0-terminated UTF-16 string from the contents of this string,
in platform byte order.
@return The contents of this string as UTF-16 code units.
*/
std::basic_string<ASUnicode> as_ASUnicode ( ) const;
/** Creates a UTF-8 string from the contents of this string.
@return The contents of this string as a UTF-8 encoded \c std::string.
*/
std::string as_UTF8 ( ) const;
/** Creates a platform-encoded version of this string.
@return The contents of this string as a UTF-8 encoded \c std::string. */
std::string as_Platform () const;
/** Creates an ISO Latin/Roman-encoded version of this string.
@return The contents of this string as a UTF-8 encoded \c std::string. */
std::string as_Roman () const;
/** Copies the contents of this string to a provided buffer. This method
follows the \c strlcpy paradigm.
@param buffer A pointer to a byte array in which to return the 0-terminated copy.
@param bufferMax The maximum byte count to write to the buffer, including the 0 terminator,
or \c NULL to return the required size.
@param encoding The encoding for the result.
@return The size needed to write the entire string contents to a buffer,
including a 0 terminator.
*/
size_type getToBuffer ( char* buffer, size_type bufferMax, AICharacterEncoding encoding ) const;
/** Copies the contents of this string to a Pascal string in a provided buffer.
@param pascalString A pointer to a Pascal string in which to return the 0-terminated copy.
@param bufferMax The maximum byte count to write to the buffer, including the 0 terminator,
or \c NULL to return the required size.
@param encoding The encoding for the result.
@return The size needed to write the entire string contents to a buffer,
including a 0 terminator.
*/
size_type getToBuffer ( const PStr& pascalString, size_type bufferMax, AICharacterEncoding encoding ) const;
/** Copies the contents of this string to a Unicode string in a provided buffer.
@param buffer A pointer to a Unicode string in which to return the 0-terminated copy.
@param bufferMax The maximum count of UTF-16 code units (\c ASUnicode) to write to the
buffer including a 0 terminator. Or, pass 0 to request
the required buffer size.
@return The size needed to write the entire string contents to a buffer,
including a 0 terminator.
*/
size_type as_ASUnicode (ASUnicode* buffer, size_type bufferMax ) const;
/** Copies the contents of this string to a platform-encoded Pascal string in a provided buffer.
@param pascalString A pointer to a Pascal string in which to return the 0-terminated copy.
@param bufferMax The maximum byte count to write to the buffer, including the 0 terminator,
or \c NULL to return the required size.
@return The size needed to write the entire string contents to a buffer,
including a 0 terminator.
*/
size_type as_Platform ( const PStr& pascalString, size_type bufferMax ) const;
/** Copies the contents of this string to a C string in a provided buffer.
@param buffer A pointer to a C string in which to return the 0-terminated copy.
@param bufferMax The maximum byte count to write to the buffer, including the 0 terminator,
or \c NULL to return the required size.
@return The size needed to write the entire string contents to a buffer,
including a 0 terminator.
*/
size_type as_Platform ( char* buffer, size_type bufferMax ) const;
/** Copies the contents of this string to an ISO Latin/Roman-encoded C string in a provided buffer.
@param buffer A pointer to a C string in which to return the 0-terminated copy.
@param bufferMax The maximum byte count to write to the buffer, including the 0 terminator,
or \c NULL to return the required size.
@return The size needed to write the entire string contents to a buffer,
including a 0 terminator.
*/
size_type as_Roman ( char* buffer, size_type bufferMax ) const;
/** Copies the contents of this string to an ISO Latin/Roman-encoded Pascal string in a provided buffer.
@param pascalString A pointer to a Pascal string in which to return the 0-terminated copy.
@param bufferMax The maximum byte count to write to the buffer, including the 0 terminator,
or \c NULL to return the required size.
@return The size needed to write the entire string contents to a buffer,
including a 0 terminator.
*/
size_type as_Roman ( const PStr& pascalString, size_type bufferMax ) const;
/** Creates a \c std::string from the contents of this string. Can throw
an out-of-memory error.
@param encoding The encoding for the result.
@return The string result.
*/
std::string getInStdString (AICharacterEncoding encoding) const;
/** Copies the contents of this string to an \c ai::AutoBuffer<char>.
@param encoding The encoding for the result.
@param b The buffer in which to return the result. The resulting
contents might not be 0 terminated.
@return The number of characters (bytes) returned in the buffer.
*/
size_type getAs (AICharacterEncoding encoding, ai::AutoBuffer<char>& b) const;
#if defined(MAC_ENV)
//----------------------------------------------------------------------
/** @name Mac OS-specific Methods */
//----------------------------------------------------------------------
//@{
/** Constructs a Unicode string object from a Mac OS \c CFString.
@param cfString The source \c CFString.
*/
explicit UnicodeString (const CFStringRef& cfString);
/** Converts the contents of this string to a Mac OS \c CFString.
@param alloc A valid \c CFAllocatorRef for allocating the \c CFString.
@return An immutable \c CFString object containing chars, or \c NULL if there was a problem
creating the object. You must dispose of this object when it is no longer needed.
*/
CFStringRef as_CFString (CFAllocatorRef alloc) const;
//@}
#endif // defined(MAC_ENV)
#if defined(WIN_ENV)
//----------------------------------------------------------------------
/** @name Windows-specific Methods */
//----------------------------------------------------------------------
//@{
/** Constructs a Unicode string object from a Windows \c WCHARStr wrapper object.
@param string The source \c WCHARStr.
*/
explicit UnicodeString (const WCHARStr& string);
/** Converts the contents of this string to a \c WCHARStr wrapper object.
@return The \c WCHARStr object. Contents are UTF-16 encoded.
*/
WCHARStr as_WCHARStr () const;
/** Copies the contents of this string to a provided buffer.
@param buffer A pointer to a buffer of \c WCHARStr::LPWSTR in which to return the
0-terminated copy.
@param bufferMax The maximum count of \c WCHARStr::LPWSTR to write to the buffer.
@return The size needed to write the entire string contents to a buffer,
including a 0 terminator. Returned size is the count of \c WCHARStr::LPWSTR
needed.
*/
size_type as_WCHARStr (WCHARStr::LPWSTR buffer, size_type bufferMax ) const;
//@}
#endif // defined(WIN_ENV)
public: // internal use public interface
void deleteImpl();
protected:
explicit UnicodeString(class CAIUnicodeStringImpl* impl);
private:
CAIUnicodeStringImpl* fImpl;
};
/** Append operator.
@param lhs The Unicode string object to append to.
@param rhs The Unicode string object which is to be appended.
@return A new Unicode string object.
*/
inline UnicodeString operator+(UnicodeString lhs, const UnicodeString& rhs)
{
return lhs.append(rhs);
}
///////////////////////////////////////////////////////////
// Inline implementations - Yes, some of these could cause code bloat.
// These could be moved to the IAIUnicodeString.inl file.
//
///////////////////////////////////////////////////////////
// UnicodeString inlines
///////////////////////////////////////////////////////////
inline void UnicodeString::swap(UnicodeString& str) AINOEXCEPT
{
std::swap(fImpl, str.fImpl);
}
#ifdef AI_HAS_RVALUE_REFERENCES
// Move Constructor
inline UnicodeString::UnicodeString(UnicodeString&& other) AINOEXCEPT
: fImpl{other.fImpl}
{
other.fImpl = nullptr;
}
// Move Assignment operator
inline UnicodeString& UnicodeString::operator=(UnicodeString&& rhs) AINOEXCEPT
{
swap(rhs);
return *this;
}
#endif // AI_HAS_RVALUE_REFERENCES
inline UnicodeString& UnicodeString::insert (size_type insertOffset, const UnicodeString& str,
size_type offset, size_type count)
{
if ( insertOffset > length() || offset > str.length() )
throw ai::Error(kUnicodeStringBadIndex);
UnicodeString result = substr(0, insertOffset);
result.append(str, offset, count);
result.append(substr(insertOffset));
*this = result;
return *this;
}
inline UnicodeString& UnicodeString::replace (size_type pos, size_type num, const UnicodeString& str,
size_type startOffset, size_type count)
{
if ( pos > length() || startOffset > str.length() )
throw ai::Error(kUnicodeStringBadIndex);
erase(pos, num);
insert(pos, str, startOffset, count);
return *this;
}
inline std::string UnicodeString::as_UTF8 ( ) const
{
return getInStdString(kAIUTF8CharacterEncoding);
}
inline std::string UnicodeString::as_Platform () const
{
return getInStdString(kAIPlatformCharacterEncoding);
}
inline std::string UnicodeString::as_Roman () const
{
return getInStdString(kAIRomanCharacterEncoding);
}
inline UnicodeString::size_type UnicodeString::as_ASUnicode ( ASUnicode* buffer, size_type bufferMax ) const
{
const UTF16Char* bufPtr = 0;
const size_type kThisUTF16Len = utf_16(bufPtr) + 1;
const size_type kCopyMax = (bufferMax < kThisUTF16Len ? bufferMax : kThisUTF16Len) - 1;
memcpy(buffer, bufPtr, kCopyMax*sizeof(UTF16Char));
buffer[kCopyMax] = 0;
return kThisUTF16Len;
}
#if defined(WIN_ENV)
inline UnicodeString::size_type UnicodeString::as_WCHARStr ( WCHARStr::LPWSTR buffer, size_type bufferMax ) const
{
AI_STATIC_CHECK(sizeof(WCHARStr::WCHAR) == sizeof(ai::UnicodeString::UTF16Char), WCHAR_size_does_not_match_unsigned_short_size); //-V503
return as_ASUnicode( reinterpret_cast<ASUnicode*>(buffer), bufferMax );
}
#endif // defined(WIN_ENV)
inline UnicodeString::size_type UnicodeString::as_Platform ( char* buffer, size_type bufferMax ) const
{
return getToBuffer( buffer, bufferMax, kAIPlatformCharacterEncoding);
}
inline UnicodeString::size_type UnicodeString::as_Roman ( char* buffer, size_type bufferMax ) const
{
return getToBuffer( buffer, bufferMax, kAIRomanCharacterEncoding);
}
inline UnicodeString::size_type UnicodeString::as_Platform ( const ai::PStr& pascalString, size_type bufferMax ) const
{
return getToBuffer( pascalString, bufferMax, kAIPlatformCharacterEncoding );
}
inline UnicodeString::size_type UnicodeString::as_Roman ( const ai::PStr& pascalString, size_type bufferMax ) const
{
return getToBuffer( pascalString, bufferMax, kAIRomanCharacterEncoding );
}
#if defined(WIN_ENV)
///////////////////////////////////////////////////////////
// WCHARStr inlines
///////////////////////////////////////////////////////////
inline WCHARStr::WCHARStr (const UnicodeString& string) : fConstStr()
{
AI_STATIC_CHECK(sizeof(WCHAR) == sizeof(ai::UnicodeString::UTF16Char), WCHAR_size_does_not_match_unsigned_short_size); //-V503
const std::basic_string<ASUnicode>& cInput = string.as_ASUnicode();
const size_t cLen = cInput.length();
if ( cLen > 0 )
fConstStr = std::basic_string<WCHAR>(reinterpret_cast<LPCWSTR>(cInput.data()), cLen);
};
#if defined(_NATIVE_WCHAR_T_DEFINED)
// This ctor is redundant if wchar_t is not an intrinsic. This is the same as the const ASUnicode* ctor.
inline WCHARStr::WCHARStr (WCHARStr::LPCWSTR wcharString) : fConstStr( wcharString )
{
}
#endif // defined(_NATIVE_WCHAR_T_DEFINED)
inline WCHARStr::WCHARStr (const ASUnicode* string)
{
if ( string && *string )
{
AI_STATIC_CHECK(sizeof(WCHARStr::WCHAR) == sizeof(ai::UnicodeString::UTF16Char), WCHAR_size_does_not_match_unsigned_short_size); //-V503
fConstStr.assign(reinterpret_cast<LPCWSTR>(string));
}
}
inline const ASUnicode* WCHARStr::as_ASUnicode () const
{
AI_STATIC_CHECK(sizeof(WCHARStr::WCHAR) == sizeof(ai::UnicodeString::UTF16Char), WCHAR_size_does_not_match_unsigned_short_size); //-V503
return reinterpret_cast<const ASUnicode*>(this->as_LPCWSTR());
}
inline size_t WCHARStr::length() const
{
return fConstStr.length();
}
#endif // defined(WIN_ENV)
} // end of namespace ai
#endif // _IAIUNICODESTRING_H_
| 38.023133 | 138 | 0.723183 | [
"object"
] |
36e5c0c8bd1162ff04453eac801a2b0efa2adc83 | 477 | h | C | EU4toV2/Source/Mappers/ReligionMapper/ReligionMapping.h | GregB76/EU4toVic2 | 0a8822101a36a16036fdc315e706d113d9231101 | [
"MIT"
] | 3 | 2018-12-23T17:04:15.000Z | 2021-05-06T14:12:28.000Z | EU4toV2/Source/Mappers/ReligionMapper/ReligionMapping.h | IhateTrains/EU4toVic2 | 061f5e1a0bc1a1f3b54bdfe471b501260149b56b | [
"MIT"
] | null | null | null | EU4toV2/Source/Mappers/ReligionMapper/ReligionMapping.h | IhateTrains/EU4toVic2 | 061f5e1a0bc1a1f3b54bdfe471b501260149b56b | [
"MIT"
] | null | null | null | #ifndef RELIGION_MAPPING_H
#define RELIGION_MAPPING_H
#include "Parser.h"
namespace mappers
{
class ReligionMapping: commonItems::parser
{
public:
explicit ReligionMapping(std::istream& theStream);
[[nodiscard]] const auto& getVic2Religion() const { return vic2Religion; }
[[nodiscard]] const auto& getEU4Religions() const { return eu4Religions; }
private:
std::string vic2Religion;
std::vector<std::string> eu4Religions;
};
}
#endif // RELIGION_MAPPING_H | 21.681818 | 76 | 0.746331 | [
"vector"
] |
36e6bc48f4d246430840289949ccf2d52f1aa951 | 10,144 | c | C | examples/noise_model.c | oddstone/aom | 7a3c264606a2f7fc64dfa2b19692adffe55e2801 | [
"BSD-2-Clause"
] | null | null | null | examples/noise_model.c | oddstone/aom | 7a3c264606a2f7fc64dfa2b19692adffe55e2801 | [
"BSD-2-Clause"
] | null | null | null | examples/noise_model.c | oddstone/aom | 7a3c264606a2f7fc64dfa2b19692adffe55e2801 | [
"BSD-2-Clause"
] | null | null | null | /*
* Copyright (c) 2018, Alliance for Open Media. All rights reserved
*
* This source code is subject to the terms of the BSD 2 Clause License and
* the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License
* was not distributed with this source code in the LICENSE file, you can
* obtain it at www.aomedia.org/license/software. If the Alliance for Open
* Media Patent License 1.0 was not distributed with this source code in the
* PATENTS file, you can obtain it at www.aomedia.org/license/patent.
*/
/*!\file
* \brief This is an sample binary to create noise params from input video.
*
* To allow for external denoising applications, this sample binary illustrates
* how to create a film grain table (film grain params as a function of time)
* from an input video and its corresponding denoised source.
*
* The --output-grain-table file can be passed as input to the encoder (in
* aomenc this is done through the "--film-grain-table" parameter).
*/
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include "../args.h"
#include "../tools_common.h"
#include "../video_writer.h"
#include "aom/aom_encoder.h"
#include "aom_dsp/aom_dsp_common.h"
#include "aom_dsp/noise_model.h"
#include "aom_dsp/noise_util.h"
#include "aom_dsp/grain_table.h"
#include "aom_mem/aom_mem.h"
static const char *exec_name;
void usage_exit(void) {
fprintf(stderr,
"Usage: %s --input=<input> --input-denoised=<denoised> "
"--output-grain-table=<outfile> "
"See comments in noise_model.c for more information.\n",
exec_name);
exit(EXIT_FAILURE);
}
static const arg_def_t help =
ARG_DEF(NULL, "help", 0, "Show usage options and exit");
static const arg_def_t width_arg =
ARG_DEF("w", "width", 1, "Input width (if rawvideo)");
static const arg_def_t height_arg =
ARG_DEF("h", "height", 1, "Input height (if rawvideo)");
static const arg_def_t skip_frames_arg =
ARG_DEF("s", "skip-frames", 1, "Number of frames to skip (default = 1)");
static const arg_def_t fps_arg = ARG_DEF(NULL, "fps", 1, "Frame rate");
static const arg_def_t input_arg = ARG_DEF("-i", "input", 1, "Input filename");
static const arg_def_t output_grain_table_arg =
ARG_DEF("n", "output-grain-table", 1, "Output noise file");
static const arg_def_t input_denoised_arg =
ARG_DEF("d", "input-denoised", 1, "Input denoised filename (YUV) only");
static const arg_def_t block_size_arg =
ARG_DEF("b", "block_size", 1, "Block size");
static const arg_def_t use_i420 =
ARG_DEF(NULL, "i420", 0, "Input file (and denoised) is I420 (default)");
static const arg_def_t use_i422 =
ARG_DEF(NULL, "i422", 0, "Input file (and denoised) is I422");
static const arg_def_t use_i444 =
ARG_DEF(NULL, "i444", 0, "Input file (and denoised) is I444");
typedef struct {
int width;
int height;
struct aom_rational fps;
const char *input;
const char *input_denoised;
const char *output_grain_table;
int img_fmt;
int block_size;
int run_flat_block_finder;
int force_flat_psd;
int skip_frames;
} noise_model_args_t;
void parse_args(noise_model_args_t *noise_args, int *argc, char **argv) {
struct arg arg;
static const arg_def_t *main_args[] = { &help,
&input_arg,
&fps_arg,
&width_arg,
&height_arg,
&block_size_arg,
&output_grain_table_arg,
&input_denoised_arg,
&use_i420,
&use_i422,
&use_i444,
NULL };
for (int argi = *argc + 1; *argv; argi++, argv++) {
if (arg_match(&arg, &help, argv)) {
fprintf(stdout, "\nOptions:\n");
arg_show_usage(stdout, main_args);
exit(0);
} else if (arg_match(&arg, &width_arg, argv)) {
noise_args->width = atoi(arg.val);
} else if (arg_match(&arg, &height_arg, argv)) {
noise_args->height = atoi(arg.val);
} else if (arg_match(&arg, &input_arg, argv)) {
noise_args->input = arg.val;
} else if (arg_match(&arg, &input_denoised_arg, argv)) {
noise_args->input_denoised = arg.val;
} else if (arg_match(&arg, &output_grain_table_arg, argv)) {
noise_args->output_grain_table = arg.val;
} else if (arg_match(&arg, &block_size_arg, argv)) {
noise_args->block_size = atoi(arg.val);
} else if (arg_match(&arg, &fps_arg, argv)) {
noise_args->fps = arg_parse_rational(&arg);
} else if (arg_match(&arg, &use_i420, argv)) {
noise_args->img_fmt = AOM_IMG_FMT_I420;
} else if (arg_match(&arg, &use_i422, argv)) {
noise_args->img_fmt = AOM_IMG_FMT_I422;
} else if (arg_match(&arg, &use_i444, argv)) {
noise_args->img_fmt = AOM_IMG_FMT_I444;
} else if (arg_match(&arg, &skip_frames_arg, argv)) {
noise_args->skip_frames = atoi(arg.val);
} else {
fprintf(stdout, "Unknown arg: %s\n\nUsage:\n", *argv);
arg_show_usage(stdout, main_args);
exit(0);
}
}
}
int main(int argc, char *argv[]) {
noise_model_args_t args = { 0, 0, { 1, 25 }, 0, 0, 0, AOM_IMG_FMT_I420,
32, 0, 0, 1 };
aom_image_t raw, denoised;
FILE *infile = NULL;
AvxVideoInfo info;
memset(&info, 0, sizeof(info));
exec_name = argv[0];
parse_args(&args, &argc, argv + 1);
info.frame_width = args.width;
info.frame_height = args.height;
info.time_base.numerator = args.fps.den;
info.time_base.denominator = args.fps.num;
if (info.frame_width <= 0 || info.frame_height <= 0 ||
(info.frame_width % 2) != 0 || (info.frame_height % 2) != 0) {
die("Invalid frame size: %dx%d", info.frame_width, info.frame_height);
}
if (!aom_img_alloc(&raw, args.img_fmt, info.frame_width, info.frame_height,
1)) {
die("Failed to allocate image.");
}
if (!aom_img_alloc(&denoised, args.img_fmt, info.frame_width,
info.frame_height, 1)) {
die("Failed to allocate image.");
}
infile = fopen(args.input, "r");
if (!infile) {
die("Failed to open input file:", args.input);
}
const int block_size = args.block_size;
aom_flat_block_finder_t block_finder;
aom_flat_block_finder_init(&block_finder, block_size);
const int num_blocks_w = (info.frame_width + block_size - 1) / block_size;
const int num_blocks_h = (info.frame_height + block_size - 1) / block_size;
uint8_t *flat_blocks = (uint8_t *)aom_malloc(num_blocks_w * num_blocks_h);
aom_noise_model_t noise_model;
aom_noise_model_params_t params = { AOM_NOISE_SHAPE_SQUARE, 3 };
aom_noise_model_init(&noise_model, params);
FILE *denoised_file = 0;
if (args.input_denoised) {
denoised_file = fopen(args.input_denoised, "rb");
if (!denoised_file)
die("Unable to open input_denoised: %s", args.input_denoised);
} else {
die("--input-denoised file must be specified");
}
aom_film_grain_table_t grain_table = { 0 };
int64_t prev_timestamp = 0;
int frame_count = 0;
while (aom_img_read(&raw, infile)) {
if (args.input_denoised) {
if (!aom_img_read(&denoised, denoised_file)) {
die("Unable to read input denoised file");
}
}
if (frame_count % args.skip_frames == 0) {
int num_flat_blocks = num_blocks_w * num_blocks_h;
memset(flat_blocks, 1, num_flat_blocks);
if (args.run_flat_block_finder) {
memset(flat_blocks, 0, num_flat_blocks);
num_flat_blocks = aom_flat_block_finder_run(
&block_finder, raw.planes[0], info.frame_width, info.frame_height,
info.frame_width, flat_blocks);
fprintf(stdout, "Num flat blocks %d\n", num_flat_blocks);
}
const uint8_t *planes[3] = { raw.planes[0], raw.planes[1],
raw.planes[2] };
uint8_t *denoised_planes[3] = { denoised.planes[0], denoised.planes[1],
denoised.planes[2] };
int strides[3] = { raw.stride[0], raw.stride[1], raw.stride[2] };
int chroma_sub[3] = { raw.x_chroma_shift, raw.y_chroma_shift, 0 };
fprintf(stdout, "Updating noise model...\n");
aom_noise_status_t status = aom_noise_model_update(
&noise_model, (const uint8_t *const *)planes,
(const uint8_t *const *)denoised_planes, info.frame_width,
info.frame_height, strides, chroma_sub, flat_blocks, block_size);
int64_t cur_timestamp =
frame_count * 10000000ULL * args.fps.den / args.fps.num;
if (status == AOM_NOISE_STATUS_DIFFERENT_NOISE_TYPE) {
fprintf(stdout,
"Noise type is different, updating parameters for time "
"[ %" PRId64 ", %" PRId64 ")\n",
prev_timestamp, cur_timestamp);
aom_film_grain_t grain;
aom_noise_model_get_grain_parameters(&noise_model, &grain);
aom_film_grain_table_append(&grain_table, prev_timestamp, cur_timestamp,
&grain);
aom_noise_model_save_latest(&noise_model);
prev_timestamp = cur_timestamp;
}
fprintf(stdout, "Done noise model update, status = %d\n", status);
}
frame_count++;
}
aom_film_grain_t grain;
aom_noise_model_get_grain_parameters(&noise_model, &grain);
aom_film_grain_table_append(&grain_table, prev_timestamp, INT64_MAX, &grain);
if (args.output_grain_table) {
struct aom_internal_error_info error_info;
if (AOM_CODEC_OK != aom_film_grain_table_write(&grain_table,
args.output_grain_table,
&error_info)) {
die("Unable to write output film grain table");
}
}
aom_film_grain_table_free(&grain_table);
if (infile) fclose(infile);
if (denoised_file) fclose(denoised_file);
aom_img_free(&raw);
aom_img_free(&denoised);
return EXIT_SUCCESS;
}
| 39.166023 | 80 | 0.631802 | [
"model"
] |
36e969dcf0f0a2deedbd8a4d6963fe81a57151e6 | 6,630 | h | C | libCoords/Cartesian.h | lrmcfarland/Coordinates | 2041ec8bfeed870e8f242f19ad517e3adea52897 | [
"MIT"
] | 4 | 2015-09-07T01:32:43.000Z | 2021-05-30T23:29:56.000Z | libCoords/Cartesian.h | lrmcfarland/Coordinates | 2041ec8bfeed870e8f242f19ad517e3adea52897 | [
"MIT"
] | null | null | null | libCoords/Cartesian.h | lrmcfarland/Coordinates | 2041ec8bfeed870e8f242f19ad517e3adea52897 | [
"MIT"
] | 1 | 2017-10-22T02:47:10.000Z | 2017-10-22T02:47:10.000Z | // ================================================================
// Filename: Cartesian.h
//
// Description: This defines a Cartesian coordinate class for physics
// applications. Implemented as classic Cartesian three
// space coordinates.
//
// Author: L.R. McFarland
// Created: 12 May 2004
// Language: C++
//
// Coords is free software: you can redistribute it and/or modify it
// under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// Coords is distributed in the hope that it will be useful, but
// WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
// General Public License for more details.
//
// You should have received a copy of the GNU General Public License
// along with Coords. If not, see <http://www.gnu.org/licenses/>.
// ================================================================
#pragma once
#include <cmath>
#include <deque>
#include <fstream>
#include <vector>
#include <utils.h>
namespace Coords {
class angle;
class spherical;
class Cartesian {
public:
// ----- unit vectors -----
static const Cartesian Uo; // zero
static const Cartesian Ux;
static const Cartesian Uy;
static const Cartesian Uz;
// ----- ctor and dtor -----
explicit Cartesian(const double& a = 0.0,
const double& b = 0.0,
const double& c = 0.0)
: m_x(a), m_y(b), m_z(c) {}; // ctors, including default.
explicit Cartesian(const std::string& a, // The ambiguity is in the box.
const std::string& b="0",
const std::string& c="0");
explicit Cartesian(const spherical& a);
~Cartesian() {};
Cartesian(const Cartesian& a);
Cartesian& operator=(const Cartesian& rhs);
// ----- accessors -----
void x(const double& rhs) {m_x = rhs;}
const double& x() const {return m_x;}
double getX() const {return m_x;} // for boost python wrappers
void y(const double& rhs) {m_y = rhs;}
const double& y() const {return m_y;}
double getY() const {return m_y;} // for boost python wrappers
void z(const double& rhs) {m_z = rhs;}
const double& z() const {return m_z;}
double getZ() const {return m_z;} // for boost python wrappers
// ----- bool operators -----
bool operator==(const Cartesian& rhs) const;
bool operator!=(const Cartesian& rhs) const;
// ----- in-place operators -----
Cartesian& operator+=(const Cartesian& rhs);
Cartesian& operator-=(const Cartesian& rhs);
Cartesian& operator*=(const double& rhs); // scale
Cartesian& operator/=(const double& rhs);
// ----- other methods -----
void zero() {x(0.0); y(0.0); z(0.0);};
double magnitude() const;
double magnitude2() const;
Cartesian normalized() const;
private:
// ----- data members -----
double m_x, m_y, m_z;
};
// ---------------------
// ----- operators -----
// ---------------------
Cartesian operator+(const Cartesian& lhs, const Cartesian& rhs);
Cartesian operator-(const Cartesian& lhs, const Cartesian& rhs);
Cartesian operator-(const Cartesian& rhs); // unary minus
// explicit double cast to force scale and not dot product of default Cartesian ctor.
Cartesian operator*(const Cartesian& lhs, const double& rhs); // scale
Cartesian operator*(const double& lhs, const Cartesian& rhs); // scale
Cartesian operator/(const Cartesian& lhs, const double& rhs); // scale
Cartesian operator/(const double& lhs, const Cartesian& rhs); // scale
// vector products
double operator*(const Cartesian& lhs, const Cartesian& rhs); // dot product
double dot(const Cartesian& a, const Cartesian& b); // vector dot product
Cartesian cross(const Cartesian& a, const Cartesian& b); // vector cross product
// -------------------------------
// ----- output operator<<() -----
// -------------------------------
// inline for boost. Use hpp instead?
inline std::ostream& operator<< (std::ostream& os, const Cartesian& a) {
os << "<Cartesian>"
<< "<x>" << a.x() << "</x>"
<< "<y>" << a.y() << "</y>"
<< "<z>" << a.z() << "</z>"
<< "</Cartesian>";
return os;
}
// -------------------------
// ----- class rotator -----
// -------------------------
// supports rotating Cartesian vectors about Cartesian axies.
class rotator {
public:
rotator(const Cartesian& an_axis=Coords::Cartesian::Uz); // ctor
~rotator() {}; // dtor
const Cartesian& axis() const {return m_axis;}
void axis(const Cartesian& an_axis);
Cartesian rotate(const Cartesian& a_vector, const angle& an_angle);
// Boost needs this
rotator(const rotator& a);
rotator& operator=(const rotator& rhs);
private:
Cartesian m_axis;
std::vector< std::vector<double> > m_rotation_matrix;
// for optimization
bool m_is_new_axis;
angle m_current_angle;
};
// -----------------------------------
// ----- class CartesianRecorder -----
// -----------------------------------
// implements a simple deque to store three Cartesian data. It is
// intended to store and later plot positions and other three
// Cartesian data.
class CartesianRecorderIOError : public Error {
public:
CartesianRecorderIOError(const std::string& msg) : Error(msg) {}
};
class CartesianRecorder {
public:
static const unsigned int default_size; /// default size limit for deque
CartesianRecorder(const unsigned int& a_size_limit=CartesianRecorder::default_size);
~CartesianRecorder() {}; // dtor
CartesianRecorder(const CartesianRecorder& a); // copy ctor
CartesianRecorder& operator=(const CartesianRecorder& a); // copy assignment
const unsigned int& sizeLimit() const {return m_size_limit;}
void sizeLimit(const int& a) {m_size_limit = a;}
unsigned long size() const {return m_data.size();}
const Cartesian& get(const unsigned int& idx) {return m_data[idx];}
void push(Cartesian a);
void clear() {m_data.clear();}
void write2R(const std::string& flnm, bool skip_Uo=true);
private:
unsigned int m_size_limit; /// size limit of position deque
std::deque<Cartesian> m_data; /// data deque
};
} // end namespace Coords
| 28.951965 | 88 | 0.58914 | [
"vector"
] |
d2187b6f36fe980ab1fec917632a56c13f52022b | 7,423 | h | C | Modules/socketmodule.h | eegorov/Python | ed49f3dfcfd6b3ac4358e59381245cb045de9d26 | [
"0BSD"
] | null | null | null | Modules/socketmodule.h | eegorov/Python | ed49f3dfcfd6b3ac4358e59381245cb045de9d26 | [
"0BSD"
] | null | null | null | Modules/socketmodule.h | eegorov/Python | ed49f3dfcfd6b3ac4358e59381245cb045de9d26 | [
"0BSD"
] | null | null | null | /* Socket module header file */
/* Includes needed for the sockaddr_* symbols below */
#ifndef MS_WINDOWS
#ifdef __VMS
# include <socket.h>
# else
# include <sys/socket.h>
# endif
# include <netinet/in.h>
# include <netinet/tcp.h>
#else /* MS_WINDOWS */
# include <winsock2.h>
/* Windows 'supports' CMSG_LEN, but does not follow the POSIX standard
* interface at all, so there is no point including the code that
* attempts to use it.
*/
# ifdef PySocket_BUILDING_SOCKET
# undef CMSG_LEN
# endif
# include <ws2tcpip.h>
/* VC6 is shipped with old platform headers, and does not have MSTcpIP.h
* Separate SDKs have all the functions we want, but older ones don't have
* any version information.
* I use SIO_GET_MULTICAST_FILTER to detect a decent SDK.
*/
# ifdef SIO_GET_MULTICAST_FILTER
# include <mstcpip.h> /* for SIO_RCVALL */
# define HAVE_ADDRINFO
# define HAVE_SOCKADDR_STORAGE
# define HAVE_GETADDRINFO
# define HAVE_GETNAMEINFO
# define ENABLE_IPV6
# else
typedef int socklen_t;
# endif /* IPPROTO_IPV6 */
#endif /* MS_WINDOWS */
#ifdef HAVE_SYS_UN_H
# include <sys/un.h>
#else
# undef AF_UNIX
#endif
#ifdef HAVE_LINUX_NETLINK_H
# ifdef HAVE_ASM_TYPES_H
# include <asm/types.h>
# endif
# include <linux/netlink.h>
#else
# undef AF_NETLINK
#endif
#ifdef HAVE_LINUX_QRTR_H
# ifdef HAVE_ASM_TYPES_H
# include <asm/types.h>
# endif
# include <linux/qrtr.h>
#else
# undef AF_QIPCRTR
#endif
#ifdef HAVE_BLUETOOTH_BLUETOOTH_H
#include <bluetooth/bluetooth.h>
#include <bluetooth/rfcomm.h>
#include <bluetooth/l2cap.h>
#include <bluetooth/sco.h>
#include <bluetooth/hci.h>
#endif
#ifdef HAVE_BLUETOOTH_H
#include <bluetooth.h>
#endif
#ifdef HAVE_NET_IF_H
# include <net/if.h>
#endif
#ifdef HAVE_NETPACKET_PACKET_H
# include <sys/ioctl.h>
# include <netpacket/packet.h>
#endif
#ifdef HAVE_LINUX_TIPC_H
# include <linux/tipc.h>
#endif
#ifdef HAVE_LINUX_CAN_H
# include <linux/can.h>
#else
# undef AF_CAN
# undef PF_CAN
#endif
#ifdef HAVE_LINUX_CAN_RAW_H
#include <linux/can/raw.h>
#endif
#ifdef HAVE_LINUX_CAN_BCM_H
#include <linux/can/bcm.h>
#endif
#ifdef HAVE_SYS_SYS_DOMAIN_H
#include <sys/sys_domain.h>
#endif
#ifdef HAVE_SYS_KERN_CONTROL_H
#include <sys/kern_control.h>
#endif
#ifdef HAVE_LINUX_VM_SOCKETS_H
# include <linux/vm_sockets.h>
#else
# undef AF_VSOCK
#endif
#ifdef HAVE_SOCKADDR_ALG
# include <linux/if_alg.h>
# ifndef AF_ALG
# define AF_ALG 38
# endif
# ifndef SOL_ALG
# define SOL_ALG 279
# endif
/* Linux 3.19 */
# ifndef ALG_SET_AEAD_ASSOCLEN
# define ALG_SET_AEAD_ASSOCLEN 4
# endif
# ifndef ALG_SET_AEAD_AUTHSIZE
# define ALG_SET_AEAD_AUTHSIZE 5
# endif
/* Linux 4.8 */
# ifndef ALG_SET_PUBKEY
# define ALG_SET_PUBKEY 6
# endif
# ifndef ALG_OP_SIGN
# define ALG_OP_SIGN 2
# endif
# ifndef ALG_OP_VERIFY
# define ALG_OP_VERIFY 3
# endif
#endif /* HAVE_SOCKADDR_ALG */
#ifndef Py__SOCKET_H
#define Py__SOCKET_H
#ifdef __cplusplus
extern "C" {
#endif
/* Python module and C API name */
#define PySocket_MODULE_NAME "_socket"
#define PySocket_CAPI_NAME "CAPI"
#define PySocket_CAPSULE_NAME PySocket_MODULE_NAME "." PySocket_CAPI_NAME
/* Abstract the socket file descriptor type */
#ifdef MS_WINDOWS
typedef SOCKET SOCKET_T;
# ifdef MS_WIN64
# define SIZEOF_SOCKET_T 8
# else
# define SIZEOF_SOCKET_T 4
# endif
#else
typedef int SOCKET_T;
# define SIZEOF_SOCKET_T SIZEOF_INT
#endif
#if SIZEOF_SOCKET_T <= SIZEOF_LONG
#define PyLong_FromSocket_t(fd) PyLong_FromLong((SOCKET_T)(fd))
#define PyLong_AsSocket_t(fd) (SOCKET_T)PyLong_AsLong(fd)
#else
#define PyLong_FromSocket_t(fd) PyLong_FromLongLong((SOCKET_T)(fd))
#define PyLong_AsSocket_t(fd) (SOCKET_T)PyLong_AsLongLong(fd)
#endif
/* Socket address */
typedef union sock_addr {
struct sockaddr_in in;
struct sockaddr sa;
#ifdef AF_UNIX
struct sockaddr_un un;
#endif
#ifdef AF_NETLINK
struct sockaddr_nl nl;
#endif
#ifdef ENABLE_IPV6
struct sockaddr_in6 in6;
struct sockaddr_storage storage;
#endif
#ifdef HAVE_BLUETOOTH_BLUETOOTH_H
struct sockaddr_l2 bt_l2;
struct sockaddr_rc bt_rc;
struct sockaddr_sco bt_sco;
struct sockaddr_hci bt_hci;
#endif
#ifdef HAVE_NETPACKET_PACKET_H
struct sockaddr_ll ll;
#endif
#ifdef HAVE_LINUX_CAN_H
struct sockaddr_can can;
#endif
#ifdef HAVE_SYS_KERN_CONTROL_H
struct sockaddr_ctl ctl;
#endif
#ifdef HAVE_SOCKADDR_ALG
struct sockaddr_alg alg;
#endif
#ifdef AF_QIPCRTR
struct sockaddr_qrtr sq;
#endif
#ifdef AF_VSOCK
struct sockaddr_vm vm;
#endif
} sock_addr_t;
/* The object holding a socket. It holds some extra information,
like the address family, which is used to decode socket address
arguments properly. */
typedef struct {
PyObject_HEAD
SOCKET_T sock_fd; /* Socket file descriptor */
int sock_family; /* Address family, e.g., AF_INET */
int sock_type; /* Socket type, e.g., SOCK_STREAM */
int sock_proto; /* Protocol type, usually 0 */
PyObject *(*errorhandler)(void); /* Error handler; checks
errno, returns NULL and
sets a Python exception */
_PyTime_t sock_timeout; /* Operation timeout in seconds;
0.0 means non-blocking */
} PySocketSockObject;
/* --- C API ----------------------------------------------------*/
/* Short explanation of what this C API export mechanism does
and how it works:
The _ssl module needs access to the type object defined in
the _socket module. Since cross-DLL linking introduces a lot of
problems on many platforms, the "trick" is to wrap the
C API of a module in a struct which then gets exported to
other modules via a PyCapsule.
The code in socketmodule.c defines this struct (which currently
only contains the type object reference, but could very
well also include other C APIs needed by other modules)
and exports it as PyCapsule via the module dictionary
under the name "CAPI".
Other modules can now include the socketmodule.h file
which defines the needed C APIs to import and set up
a static copy of this struct in the importing module.
After initialization, the importing module can then
access the C APIs from the _socket module by simply
referring to the static struct, e.g.
Load _socket module and its C API; this sets up the global
PySocketModule:
if (PySocketModule_ImportModuleAndAPI())
return;
Now use the C API as if it were defined in the using
module:
if (!PyArg_ParseTuple(args, "O!|zz:ssl",
PySocketModule.Sock_Type,
(PyObject*)&Sock,
&key_file, &cert_file))
return NULL;
Support could easily be extended to export more C APIs/symbols
this way. Currently, only the type object is exported,
other candidates would be socket constructors and socket
access functions.
*/
/* C API for usage by other Python modules */
typedef struct {
PyTypeObject *Sock_Type;
PyObject *error;
PyObject *timeout_error;
} PySocketModule_APIObject;
#define PySocketModule_ImportModuleAndAPI() PyCapsule_Import(PySocket_CAPSULE_NAME, 1)
#ifdef __cplusplus
}
#endif
#endif /* !Py__SOCKET_H */
| 24.66113 | 86 | 0.703354 | [
"object"
] |
d21edf70e8c9f9a083762fc37b9401f6f4234ed3 | 4,690 | h | C | libs/EXTERNAL/capnproto/c++/src/kj/threadlocal.h | brycejh/vtr-verilog-to-routing | f61da5eb2d4e008728a01def827d55a0e9f285d0 | [
"MIT"
] | 682 | 2015-07-10T00:39:26.000Z | 2022-03-30T05:24:53.000Z | libs/EXTERNAL/capnproto/c++/src/kj/threadlocal.h | a-canela/OpenFPGA | 063c58b6cbe2e01aa5520ec43ec80ff064d7f228 | [
"MIT"
] | 1,399 | 2015-07-24T22:09:09.000Z | 2022-03-29T06:22:48.000Z | libs/EXTERNAL/capnproto/c++/src/kj/threadlocal.h | a-canela/OpenFPGA | 063c58b6cbe2e01aa5520ec43ec80ff064d7f228 | [
"MIT"
] | 311 | 2015-07-09T13:59:48.000Z | 2022-03-28T00:15:20.000Z | // Copyright (c) 2014, Jason Choy <jjwchoy@gmail.com>
// Copyright (c) 2013-2014 Sandstorm Development Group, Inc. and contributors
// Licensed under the MIT License:
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
#pragma once
#if defined(__GNUC__) && !KJ_HEADER_WARNINGS
#pragma GCC system_header
#endif
// This file declares a macro `KJ_THREADLOCAL_PTR` for declaring thread-local pointer-typed
// variables. Use like:
// KJ_THREADLOCAL_PTR(MyType) foo = nullptr;
// This is equivalent to:
// thread_local MyType* foo = nullptr;
// This can only be used at the global scope.
//
// AVOID USING THIS. Use of thread-locals is discouraged because they often have many of the same
// properties as singletons: http://www.object-oriented-security.org/lets-argue/singletons
//
// Also, thread-locals tend to be hostile to event-driven code, which can be particularly
// surprising when using fibers (all fibers in the same thread will share the same threadlocals,
// even though they do not share a stack).
//
// That said, thread-locals are sometimes needed for runtime logistics in the KJ framework. For
// example, the current exception callback and current EventLoop are stored as thread-local
// pointers. Since KJ only ever needs to store pointers, not values, we avoid the question of
// whether these values' destructors need to be run, and we avoid the need for heap allocation.
#include "common.h"
#if !defined(KJ_USE_PTHREAD_THREADLOCAL) && defined(__APPLE__)
#include "TargetConditionals.h"
#if TARGET_OS_IPHONE
// iOS apparently does not support __thread (nor C++11 thread_local).
#define KJ_USE_PTHREAD_TLS 1
#endif
#endif
#if KJ_USE_PTHREAD_TLS
#include <pthread.h>
#endif
namespace kj {
#if KJ_USE_PTHREAD_TLS
// If __thread is unavailable, we'll fall back to pthreads.
#define KJ_THREADLOCAL_PTR(type) \
namespace { struct KJ_UNIQUE_NAME(_kj_TlpTag); } \
static ::kj::_::ThreadLocalPtr< type, KJ_UNIQUE_NAME(_kj_TlpTag)>
// Hack: In order to ensure each thread-local results in a unique template instance, we declare
// a one-off dummy type to use as the second type parameter.
namespace _ { // private
template <typename T, typename>
class ThreadLocalPtr {
// Hacky type to emulate __thread T*. We need a separate instance of the ThreadLocalPtr template
// for every thread-local variable, because we don't want to require a global constructor, and in
// order to initialize the TLS on first use we need to use a local static variable (in getKey()).
// Each template instance will get a separate such local static variable, fulfilling our need.
public:
ThreadLocalPtr() = default;
constexpr ThreadLocalPtr(decltype(nullptr)) {}
// Allow initialization to nullptr without a global constructor.
inline ThreadLocalPtr& operator=(T* val) {
pthread_setspecific(getKey(), val);
return *this;
}
inline operator T*() const {
return get();
}
inline T& operator*() const {
return *get();
}
inline T* operator->() const {
return get();
}
private:
inline T* get() const {
return reinterpret_cast<T*>(pthread_getspecific(getKey()));
}
inline static pthread_key_t getKey() {
static pthread_key_t key = createKey();
return key;
}
static pthread_key_t createKey() {
pthread_key_t key;
pthread_key_create(&key, 0);
return key;
}
};
} // namespace _ (private)
#elif __GNUC__
#define KJ_THREADLOCAL_PTR(type) static __thread type*
// GCC's __thread is lighter-weight than thread_local and is good enough for our purposes.
#else
#define KJ_THREADLOCAL_PTR(type) static thread_local type*
#endif // KJ_USE_PTHREAD_TLS
} // namespace kj
| 35 | 99 | 0.744136 | [
"object"
] |
d2237ad29d20bfcf7a0ee74e1d6e75a3705415db | 29,705 | c | C | src/options.c | tatokis/compton | cbb859ba3cd639cd898c02010297822b961fcc7c | [
"MIT"
] | null | null | null | src/options.c | tatokis/compton | cbb859ba3cd639cd898c02010297822b961fcc7c | [
"MIT"
] | null | null | null | src/options.c | tatokis/compton | cbb859ba3cd639cd898c02010297822b961fcc7c | [
"MIT"
] | null | null | null | // SPDX-License-Identifier: MPL-2.0
// Copyright (c) Yuxuan Shui <yshuiv7@gmail.com>
#include <getopt.h>
#include <locale.h>
#include <stdbool.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <unistd.h>
#include <xcb/render.h> // for xcb_render_fixed_t, XXX
#include "common.h"
#include "config.h"
#include "log.h"
#include "options.h"
#include "utils.h"
#include "win.h"
#pragma GCC diagnostic error "-Wunused-parameter"
/**
* Print usage text.
*/
static void usage(int ret) {
#define WARNING_DISABLED " (DISABLED AT COMPILE TIME)"
static const char *usage_text =
"compton (" COMPTON_VERSION ")\n"
"This is the maintenance fork of compton, please report\n"
"bugs to https://github.com/yshui/compton\n\n"
"usage: compton [options]\n"
"Options:\n"
"\n"
"-r radius\n"
" The blur radius for shadows. (default 12)\n"
"\n"
"-o opacity\n"
" The translucency for shadows. (default .75)\n"
"\n"
"-l left-offset\n"
" The left offset for shadows. (default -15)\n"
"\n"
"-t top-offset\n"
" The top offset for shadows. (default -15)\n"
"\n"
"-I fade-in-step\n"
" Opacity change between steps while fading in. (default 0.028)\n"
"\n"
"-O fade-out-step\n"
" Opacity change between steps while fading out. (default 0.03)\n"
"\n"
"-D fade-delta-time\n"
" The time between steps in a fade in milliseconds. (default 10)\n"
"\n"
"-m opacity\n"
" The opacity for menus. (default 1.0)\n"
"\n"
"-c\n"
" Enabled client-side shadows on windows.\n"
"\n"
"-C\n"
" Avoid drawing shadows on dock/panel windows.\n"
"\n"
"-z\n"
" Zero the part of the shadow's mask behind the window.\n"
"\n"
"-f\n"
" Fade windows in/out when opening/closing and when opacity\n"
" changes, unless --no-fading-openclose is used.\n"
"\n"
"-F\n"
" Equals to -f. Deprecated.\n"
"\n"
"-i opacity\n"
" Opacity of inactive windows. (0.1 - 1.0)\n"
"\n"
"-e opacity\n"
" Opacity of window titlebars and borders. (0.1 - 1.0)\n"
"\n"
"-G\n"
" Don't draw shadows on DND windows\n"
"\n"
"-b\n"
" Daemonize process.\n"
"\n"
"--show-all-xerrors\n"
" Show all X errors (for debugging).\n"
"\n"
"--config path\n"
" Look for configuration file at the path. Use /dev/null to avoid\n"
" loading configuration file."
#ifndef CONFIG_LIBCONFIG
WARNING_DISABLED
#endif
"\n\n"
"--write-pid-path path\n"
" Write process ID to a file.\n"
"\n"
"--shadow-red value\n"
" Red color value of shadow (0.0 - 1.0, defaults to 0).\n"
"\n"
"--shadow-green value\n"
" Green color value of shadow (0.0 - 1.0, defaults to 0).\n"
"\n"
"--shadow-blue value\n"
" Blue color value of shadow (0.0 - 1.0, defaults to 0).\n"
"\n"
"--inactive-opacity-override\n"
" Inactive opacity set by -i overrides value of _NET_WM_OPACITY.\n"
"\n"
"--inactive-dim value\n"
" Dim inactive windows. (0.0 - 1.0, defaults to 0)\n"
"\n"
"--active-opacity opacity\n"
" Default opacity for active windows. (0.0 - 1.0)\n"
"\n"
"--mark-wmwin-focused\n"
" Try to detect WM windows and mark them as active.\n"
"\n"
"--shadow-exclude condition\n"
" Exclude conditions for shadows.\n"
"\n"
"--fade-exclude condition\n"
" Exclude conditions for fading.\n"
"\n"
"--mark-ovredir-focused\n"
" Mark windows that have no WM frame as active.\n"
"\n"
"--no-fading-openclose\n"
" Do not fade on window open/close.\n"
"\n"
"--no-fading-destroyed-argb\n"
" Do not fade destroyed ARGB windows with WM frame. Workaround of bugs\n"
" in Openbox, Fluxbox, etc.\n"
"\n"
"--shadow-ignore-shaped\n"
" Do not paint shadows on shaped windows. (Deprecated, use\n"
" --shadow-exclude \'bounding_shaped\' or\n"
" --shadow-exclude \'bounding_shaped && !rounded_corners\' instead.)\n"
"\n"
"--detect-rounded-corners\n"
" Try to detect windows with rounded corners and don't consider\n"
" them shaped windows. Affects --shadow-ignore-shaped,\n"
" --unredir-if-possible, and possibly others. You need to turn this\n"
" on manually if you want to match against rounded_corners in\n"
" conditions.\n"
"\n"
"--detect-client-opacity\n"
" Detect _NET_WM_OPACITY on client windows, useful for window\n"
" managers not passing _NET_WM_OPACITY of client windows to frame\n"
" windows.\n"
"\n"
"--refresh-rate val\n"
" Specify refresh rate of the screen. If not specified or 0, compton\n"
" will try detecting this with X RandR extension.\n"
"\n"
"--vsync\n"
" Enable VSync\n"
"\n"
"--paint-on-overlay\n"
" Painting on X Composite overlay window.\n"
"\n"
"--sw-opti\n"
" Limit compton to repaint at most once every 1 / refresh_rate\n"
" second to boost performance.\n"
"\n"
"--use-ewmh-active-win\n"
" Use _NET_WM_ACTIVE_WINDOW on the root window to determine which\n"
" window is focused instead of using FocusIn/Out events.\n"
"\n"
"--respect-prop-shadow\n"
" Respect _COMPTON_SHADOW. This a prototype-level feature, which\n"
" you must not rely on.\n"
"\n"
"--unredir-if-possible\n"
" Unredirect all windows if a full-screen opaque window is\n"
" detected, to maximize performance for full-screen windows.\n"
"\n"
"--unredir-if-possible-delay ms\n"
" Delay before unredirecting the window, in milliseconds.\n"
" Defaults to 0.\n"
"\n"
"--unredir-if-possible-exclude condition\n"
" Conditions of windows that shouldn't be considered full-screen\n"
" for unredirecting screen.\n"
"\n"
"--focus-exclude condition\n"
" Specify a list of conditions of windows that should always be\n"
" considered focused.\n"
"\n"
"--inactive-dim-fixed\n"
" Use fixed inactive dim value.\n"
"\n"
"--detect-transient\n"
" Use WM_TRANSIENT_FOR to group windows, and consider windows in\n"
" the same group focused at the same time.\n"
"\n"
"--detect-client-leader\n"
" Use WM_CLIENT_LEADER to group windows, and consider windows in\n"
" the same group focused at the same time. WM_TRANSIENT_FOR has\n"
" higher priority if --detect-transient is enabled, too.\n"
"\n"
"--blur-background\n"
" Blur background of semi-transparent / ARGB windows. Bad in\n"
" performance. The switch name may change without prior\n"
" notifications.\n"
"\n"
"--blur-background-frame\n"
" Blur background of windows when the window frame is not opaque.\n"
" Implies --blur-background. Bad in performance. The switch name\n"
" may change.\n"
"\n"
"--blur-background-fixed\n"
" Use fixed blur strength instead of adjusting according to window\n"
" opacity.\n"
"\n"
"--blur-kern matrix\n"
" Specify the blur convolution kernel, with the following format:\n"
" WIDTH,HEIGHT,ELE1,ELE2,ELE3,ELE4,ELE5...\n"
" The element in the center must not be included, it will be forever\n"
" 1.0 or changing based on opacity, depending on whether you have\n"
" --blur-background-fixed.\n"
" A 7x7 Gaussian blur kernel looks like:\n"
" --blur-kern "
"'7,7,0.000003,0.000102,0.000849,0.001723,0.000849,0.000102,0.000003,0."
"000102,0.003494,0.029143,0.059106,0.029143,0.003494,0.000102,0.000849,0."
"029143,0.243117,0.493069,0.243117,0.029143,0.000849,0.001723,0.059106,0."
"493069,0.493069,0.059106,0.001723,0.000849,0.029143,0.243117,0.493069,0."
"243117,0.029143,0.000849,0.000102,0.003494,0.029143,0.059106,0.029143,0."
"003494,0.000102,0.000003,0.000102,0.000849,0.001723,0.000849,0.000102,0."
"000003'\n"
" Up to 4 blur kernels may be specified, separated with semicolon, for\n"
" multi-pass blur.\n"
" May also be one the predefined kernels: 3x3box (default), 5x5box,\n"
" 7x7box, 3x3gaussian, 5x5gaussian, 7x7gaussian, 9x9gaussian,\n"
" 11x11gaussian.\n"
"\n"
"--blur-background-exclude condition\n"
" Exclude conditions for background blur.\n"
"\n"
"--resize-damage integer\n"
" Resize damaged region by a specific number of pixels. A positive\n"
" value enlarges it while a negative one shrinks it. Useful for\n"
" fixing the line corruption issues of blur. May or may not\n"
" work with --glx-no-stencil. Shrinking doesn't function correctly.\n"
"\n"
"--invert-color-include condition\n"
" Specify a list of conditions of windows that should be painted with\n"
" inverted color. Resource-hogging, and is not well tested.\n"
"\n"
"--opacity-rule opacity:condition\n"
" Specify a list of opacity rules, in the format \"PERCENT:PATTERN\",\n"
" like \'50:name *= \"Firefox\"'. compton-trans is recommended over\n"
" this. Note we do not distinguish 100% and unset, and we don't make\n"
" any guarantee about possible conflicts with other programs that set\n"
" _NET_WM_WINDOW_OPACITY on frame or client windows.\n"
"\n"
"--shadow-exclude-reg geometry\n"
" Specify a X geometry that describes the region in which shadow\n"
" should not be painted in, such as a dock window region.\n"
" Use --shadow-exclude-reg \'x10+0-0\', for example, if the 10 pixels\n"
" on the bottom of the screen should not have shadows painted on.\n"
"\n"
"--xinerama-shadow-crop\n"
" Crop shadow of a window fully on a particular Xinerama screen to the\n"
" screen.\n"
"\n"
"--backend backend\n"
" Choose backend. Possible choices are xrender, glx, and\n"
" xr_glx_hybrid."
#ifndef CONFIG_OPENGL
" (GLX BACKENDS DISABLED AT COMPILE TIME)"
#endif
"\n\n"
"--glx-no-stencil\n"
" GLX backend: Avoid using stencil buffer. Might cause issues\n"
" when rendering transparent content. My tests show a 15% performance\n"
" boost.\n"
"\n"
"--glx-no-rebind-pixmap\n"
" GLX backend: Avoid rebinding pixmap on window damage. Probably\n"
" could improve performance on rapid window content changes, but is\n"
" known to break things on some drivers (LLVMpipe, xf86-video-intel,\n"
" etc.).\n"
"\n"
"--use-damage\n"
" Use the damage information to limit rendering to parts of the screen\n"
" that has actually changed. Potentially improves the performance.\n"
"\n"
"--xrender-sync-fence\n"
" Additionally use X Sync fence to sync clients' draw calls. Needed\n"
" on nvidia-drivers with GLX backend for some users.\n"
"\n"
"--force-win-blend\n"
" Force all windows to be painted with blending. Useful if you have a\n"
" --glx-fshader-win that could turn opaque pixels transparent.\n"
"\n"
"--dbus\n"
" Enable remote control via D-Bus. See the D-BUS API section in the\n"
" man page for more details."
#ifndef CONFIG_DBUS
WARNING_DISABLED
#endif
"\n\n"
"--benchmark cycles\n"
" Benchmark mode. Repeatedly paint until reaching the specified cycles.\n"
"\n"
"--benchmark-wid window-id\n"
" Specify window ID to repaint in benchmark mode. If omitted or is 0,\n"
" the whole screen is repainted.\n"
"--monitor-repaint\n"
" Highlight the updated area of the screen. For debugging the xrender\n"
" backend only.\n";
FILE *f = (ret ? stderr : stdout);
fputs(usage_text, f);
#undef WARNING_DISABLED
}
static const char *shortopts = "D:I:O:d:r:o:m:l:t:i:e:hscnfFCaSzGb";
static const struct option longopts[] = {
{"help", no_argument, NULL, 'h'},
{"config", required_argument, NULL, 256},
{"shadow-radius", required_argument, NULL, 'r'},
{"shadow-opacity", required_argument, NULL, 'o'},
{"shadow-offset-x", required_argument, NULL, 'l'},
{"shadow-offset-y", required_argument, NULL, 't'},
{"fade-in-step", required_argument, NULL, 'I'},
{"fade-out-step", required_argument, NULL, 'O'},
{"fade-delta", required_argument, NULL, 'D'},
{"menu-opacity", required_argument, NULL, 'm'},
{"shadow", no_argument, NULL, 'c'},
{"no-dock-shadow", no_argument, NULL, 'C'},
{"clear-shadow", no_argument, NULL, 'z'},
{"fading", no_argument, NULL, 'f'},
{"inactive-opacity", required_argument, NULL, 'i'},
{"frame-opacity", required_argument, NULL, 'e'},
{"daemon", no_argument, NULL, 'b'},
{"no-dnd-shadow", no_argument, NULL, 'G'},
{"shadow-red", required_argument, NULL, 257},
{"shadow-green", required_argument, NULL, 258},
{"shadow-blue", required_argument, NULL, 259},
{"inactive-opacity-override", no_argument, NULL, 260},
{"inactive-dim", required_argument, NULL, 261},
{"mark-wmwin-focused", no_argument, NULL, 262},
{"shadow-exclude", required_argument, NULL, 263},
{"mark-ovredir-focused", no_argument, NULL, 264},
{"no-fading-openclose", no_argument, NULL, 265},
{"shadow-ignore-shaped", no_argument, NULL, 266},
{"detect-rounded-corners", no_argument, NULL, 267},
{"detect-client-opacity", no_argument, NULL, 268},
{"refresh-rate", required_argument, NULL, 269},
{"vsync", optional_argument, NULL, 270},
{"alpha-step", required_argument, NULL, 271},
{"dbe", no_argument, NULL, 272},
{"paint-on-overlay", no_argument, NULL, 273},
{"sw-opti", no_argument, NULL, 274},
{"vsync-aggressive", no_argument, NULL, 275},
{"use-ewmh-active-win", no_argument, NULL, 276},
{"respect-prop-shadow", no_argument, NULL, 277},
{"unredir-if-possible", no_argument, NULL, 278},
{"focus-exclude", required_argument, NULL, 279},
{"inactive-dim-fixed", no_argument, NULL, 280},
{"detect-transient", no_argument, NULL, 281},
{"detect-client-leader", no_argument, NULL, 282},
{"blur-background", no_argument, NULL, 283},
{"blur-background-frame", no_argument, NULL, 284},
{"blur-background-fixed", no_argument, NULL, 285},
{"dbus", no_argument, NULL, 286},
{"logpath", required_argument, NULL, 287},
{"invert-color-include", required_argument, NULL, 288},
{"opengl", no_argument, NULL, 289},
{"backend", required_argument, NULL, 290},
{"glx-no-stencil", no_argument, NULL, 291},
{"glx-copy-from-front", no_argument, NULL, 292},
{"benchmark", required_argument, NULL, 293},
{"benchmark-wid", required_argument, NULL, 294},
{"glx-use-copysubbuffermesa", no_argument, NULL, 295},
{"blur-background-exclude", required_argument, NULL, 296},
{"active-opacity", required_argument, NULL, 297},
{"glx-no-rebind-pixmap", no_argument, NULL, 298},
{"glx-swap-method", required_argument, NULL, 299},
{"fade-exclude", required_argument, NULL, 300},
{"blur-kern", required_argument, NULL, 301},
{"resize-damage", required_argument, NULL, 302},
{"glx-use-gpushader4", no_argument, NULL, 303},
{"opacity-rule", required_argument, NULL, 304},
{"shadow-exclude-reg", required_argument, NULL, 305},
{"paint-exclude", required_argument, NULL, 306},
{"xinerama-shadow-crop", no_argument, NULL, 307},
{"unredir-if-possible-exclude", required_argument, NULL, 308},
{"unredir-if-possible-delay", required_argument, NULL, 309},
{"write-pid-path", required_argument, NULL, 310},
{"vsync-use-glfinish", no_argument, NULL, 311},
{"xrender-sync", no_argument, NULL, 312},
{"xrender-sync-fence", no_argument, NULL, 313},
{"show-all-xerrors", no_argument, NULL, 314},
{"no-fading-destroyed-argb", no_argument, NULL, 315},
{"force-win-blend", no_argument, NULL, 316},
{"glx-fshader-win", required_argument, NULL, 317},
{"version", no_argument, NULL, 318},
{"no-x-selection", no_argument, NULL, 319},
{"no-name-pixmap", no_argument, NULL, 320},
{"log-level", required_argument, NULL, 321},
{"log-file", required_argument, NULL, 322},
{"use-damage", no_argument, NULL, 323},
{"experimental-backends", no_argument, NULL, 733},
{"monitor-repaint", no_argument, NULL, 800},
{"diagnostics", no_argument, NULL, 801},
// Must terminate with a NULL entry
{NULL, 0, NULL, 0},
};
/// Get config options that are needed to parse the rest of the options
/// Return true if we should quit
bool get_early_config(int argc, char *const *argv, char **config_file, bool *all_xerrors,
bool *fork, int *exit_code) {
int o = 0, longopt_idx = -1;
// Pre-parse the commandline arguments to check for --config and invalid
// switches
// Must reset optind to 0 here in case we reread the commandline
// arguments
optind = 1;
*config_file = NULL;
*exit_code = 0;
while (-1 != (o = getopt_long(argc, argv, shortopts, longopts, &longopt_idx))) {
if (o == 256) {
*config_file = strdup(optarg);
} else if (o == 'h') {
usage(0);
return true;
} else if (o == 'b') {
*fork = true;
} else if (o == 'd') {
log_warn("-d will be ignored, please use the DISPLAY "
"environment variable");
} else if (o == 314) {
*all_xerrors = true;
} else if (o == 318) {
printf("%s\n", COMPTON_VERSION);
return true;
} else if (o == 'S') {
log_warn("-S will be ignored");
} else if (o == 320) {
log_warn("--no-name-pixmap will be ignored");
} else if (o == '?' || o == ':') {
usage(1);
*exit_code = 1;
return true;
}
}
// Check for abundant positional arguments
if (optind < argc) {
// log is not initialized here yet
fprintf(stderr, "compton doesn't accept positional arguments.\n");
*exit_code = 1;
return true;
}
return false;
}
/**
* Process arguments and configuration files.
*/
void get_cfg(options_t *opt, int argc, char *const *argv, bool shadow_enable,
bool fading_enable, bool conv_kern_hasneg, win_option_mask_t *winopt_mask) {
int o = 0, longopt_idx = -1;
char *lc_numeric_old = strdup(setlocale(LC_NUMERIC, NULL));
// Enforce LC_NUMERIC locale "C" here to make sure dots are recognized
// instead of commas in atof().
setlocale(LC_NUMERIC, "C");
// Parse commandline arguments. Range checking will be done later.
const char *deprecation_message = "has been removed. If you encounter problems "
"without this feature, please feel free to "
"open a bug report.";
optind = 1;
while (-1 != (o = getopt_long(argc, argv, shortopts, longopts, &longopt_idx))) {
switch (o) {
#define P_CASEBOOL(idx, option) \
case idx: \
opt->option = true; \
break
#define P_CASELONG(idx, option) \
case idx: \
if (!parse_long(optarg, &opt->option)) { \
exit(1); \
} \
break
#define P_CASEINT(idx, option) \
case idx: \
if (!parse_int(optarg, &opt->option)) { \
exit(1); \
} \
break
// clang-format off
// Short options
case 318:
case 'h':
// These options should cause compton to exit early,
// so assert(false) here
assert(false);
break;
case 'd':
case 'b':
case 'S':
case 314:
case 320:
// These options are handled by get_early_config()
break;
P_CASEINT('D', fade_delta);
case 'I': opt->fade_in_step = normalize_d(atof(optarg)); break;
case 'O': opt->fade_out_step = normalize_d(atof(optarg)); break;
case 'c': shadow_enable = true; break;
case 'C':
winopt_mask[WINTYPE_DOCK].shadow = true;
opt->wintype_option[WINTYPE_DOCK].shadow = false;
break;
case 'G':
winopt_mask[WINTYPE_DND].shadow = true;
opt->wintype_option[WINTYPE_DND].shadow = false;
break;
case 'm':;
double tmp;
tmp = normalize_d(atof(optarg));
winopt_mask[WINTYPE_DROPDOWN_MENU].opacity = true;
winopt_mask[WINTYPE_POPUP_MENU].opacity = true;
opt->wintype_option[WINTYPE_POPUP_MENU].opacity = tmp;
opt->wintype_option[WINTYPE_DROPDOWN_MENU].opacity = tmp;
break;
case 'f':
case 'F':
fading_enable = true;
break;
P_CASEINT('r', shadow_radius);
case 'o':
opt->shadow_opacity = atof(optarg);
break;
P_CASEINT('l', shadow_offset_x);
P_CASEINT('t', shadow_offset_y);
case 'i':
opt->inactive_opacity = normalize_d(atof(optarg));
break;
case 'e': opt->frame_opacity = atof(optarg); break;
case 'z':
log_warn("clear-shadow is removed, shadows are automatically "
"cleared now. If you want to prevent shadow from been "
"cleared under certain types of windows, you can use "
"the \"full-shadow\" per window type option.");
break;
case 'n':
case 'a':
case 's':
log_error("-n, -a, and -s have been removed.");
break;
// Long options
case 256:
// --config
break;
case 257:
// --shadow-red
opt->shadow_red = atof(optarg);
break;
case 258:
// --shadow-green
opt->shadow_green = atof(optarg);
break;
case 259:
// --shadow-blue
opt->shadow_blue = atof(optarg);
break;
P_CASEBOOL(260, inactive_opacity_override);
case 261:
// --inactive-dim
opt->inactive_dim = atof(optarg);
break;
P_CASEBOOL(262, mark_wmwin_focused);
case 263:
// --shadow-exclude
condlst_add(&opt->shadow_blacklist, optarg);
break;
P_CASEBOOL(264, mark_ovredir_focused);
P_CASEBOOL(265, no_fading_openclose);
P_CASEBOOL(266, shadow_ignore_shaped);
P_CASEBOOL(267, detect_rounded_corners);
P_CASEBOOL(268, detect_client_opacity);
P_CASEINT(269, refresh_rate);
case 270:
if (optarg) {
opt->vsync = parse_vsync(optarg);
log_warn("--vsync doesn't take argument anymore. \"%s\" "
"is interpreted as \"%s\" for compatibility, but "
"this will stop working soon",
optarg, opt->vsync ? "true" : "false");
} else {
opt->vsync = true;
}
break;
case 271:
// --alpha-step
log_warn("--alpha-step has been removed, compton now tries to "
"make use of all alpha values");
break;
case 272: log_warn("use of --dbe is deprecated"); break;
case 273:
log_warn("--paint-on-overlay has been removed, and is enabled "
"when possible");
break;
P_CASEBOOL(274, sw_opti);
case 275:
// --vsync-aggressive
log_warn("--vsync-aggressive has been deprecated, please remove it"
" from the command line options");
break;
P_CASEBOOL(276, use_ewmh_active_win);
P_CASEBOOL(277, respect_prop_shadow);
P_CASEBOOL(278, unredir_if_possible);
case 279:
// --focus-exclude
condlst_add(&opt->focus_blacklist, optarg);
break;
P_CASEBOOL(280, inactive_dim_fixed);
P_CASEBOOL(281, detect_transient);
P_CASEBOOL(282, detect_client_leader);
case 283:
// --blur_background
opt->blur_method = BLUR_METHOD_KERNEL;
break;
P_CASEBOOL(284, blur_background_frame);
P_CASEBOOL(285, blur_background_fixed);
P_CASEBOOL(286, dbus);
case 287:
log_warn("Please use --log-file instead of --logpath");
// fallthrough
case 322:
// --logpath, --log-file
free(opt->logpath);
opt->logpath = strdup(optarg);
break;
case 288:
// --invert-color-include
condlst_add(&opt->invert_color_list, optarg);
break;
case 289:
// --opengl
opt->backend = BKEND_GLX;
break;
case 290:
// --backend
opt->backend = parse_backend(optarg);
if (opt->backend >= NUM_BKEND)
exit(1);
break;
P_CASEBOOL(291, glx_no_stencil);
case 292:
log_error("--glx-copy-from-front %s", deprecation_message);
exit(1);
break;
P_CASEINT(293, benchmark);
case 294:
// --benchmark-wid
opt->benchmark_wid = (xcb_window_t)strtol(optarg, NULL, 0);
break;
case 295:
log_error("--glx-use-copysubbuffermesa %s", deprecation_message);
exit(1);
break;
case 296:
// --blur-background-exclude
condlst_add(&opt->blur_background_blacklist, optarg);
break;
case 297:
// --active-opacity
opt->active_opacity = normalize_d(atof(optarg));
break;
P_CASEBOOL(298, glx_no_rebind_pixmap);
case 299: {
// --glx-swap-method
char *endptr;
long tmpval = strtol(optarg, &endptr, 10);
bool should_remove = true;
if (*endptr || !(*optarg)) {
// optarg is not a number, or an empty string
tmpval = -1;
}
if (strcmp(optarg, "undefined") != 0 && tmpval != 0) {
// If not undefined, we will use damage and buffer-age to
// limit the rendering area.
opt->use_damage = true;
should_remove = false;
}
log_warn("--glx-swap-method has been deprecated, your setting "
"\"%s\" should be %s.",
optarg,
!should_remove ? "replaced by `--use-damage`" :
"removed");
break;
}
case 300:
// --fade-exclude
condlst_add(&opt->fade_blacklist, optarg);
break;
case 301:
// --blur-kern
if (!parse_blur_kern_lst(optarg, opt->blur_kerns,
MAX_BLUR_PASS, &conv_kern_hasneg))
exit(1);
break;
P_CASEINT(302, resize_damage);
case 303:
// --glx-use-gpushader4
log_warn("--glx-use-gpushader4 is deprecated since v6."
" Please remove it from command line options.");
break;
case 304:
// --opacity-rule
if (!parse_rule_opacity(&opt->opacity_rules, optarg))
exit(1);
break;
case 305:
// --shadow-exclude-reg
free(opt->shadow_exclude_reg_str);
opt->shadow_exclude_reg_str = strdup(optarg);
log_warn("--shadow-exclude-reg is deprecated. You are likely "
"better off using --shadow-exclude anyway");
break;
case 306:
// --paint-exclude
condlst_add(&opt->paint_blacklist, optarg);
break;
P_CASEBOOL(307, xinerama_shadow_crop);
case 308:
// --unredir-if-possible-exclude
condlst_add(&opt->unredir_if_possible_blacklist, optarg);
break;
P_CASELONG(309, unredir_if_possible_delay);
case 310:
// --write-pid-path
opt->write_pid_path = strdup(optarg);
break;
P_CASEBOOL(311, vsync_use_glfinish);
case 312:
// --xrender-sync
log_warn("Please use --xrender-sync-fence instead of --xrender-sync");
opt->xrender_sync_fence = true;
break;
P_CASEBOOL(313, xrender_sync_fence);
P_CASEBOOL(315, no_fading_destroyed_argb);
P_CASEBOOL(316, force_win_blend);
case 317:
opt->glx_fshader_win_str = strdup(optarg);
log_warn("--glx-fshader-win is being deprecated, and might be "
"removed in the future. If you really need this "
"feature, please report an issue to let us know");
break;
case 321: {
enum log_level tmp_level = string_to_log_level(optarg);
if (tmp_level == LOG_LEVEL_INVALID) {
log_warn("Invalid log level, defaults to WARN");
} else {
log_set_level_tls(tmp_level);
}
break;
}
P_CASEBOOL(319, no_x_selection);
P_CASEBOOL(323, use_damage);
P_CASEBOOL(733, experimental_backends);
P_CASEBOOL(800, monitor_repaint);
case 801: opt->print_diagnostics = true; break;
default: usage(1); break;
#undef P_CASEBOOL
}
// clang-format on
}
// Restore LC_NUMERIC
setlocale(LC_NUMERIC, lc_numeric_old);
free(lc_numeric_old);
if (opt->monitor_repaint && opt->backend != BKEND_XRENDER) {
log_warn("--monitor-repaint has no effect when backend is not xrender");
}
// Range checking and option assignments
opt->fade_delta = max2(opt->fade_delta, 1);
opt->shadow_radius = max2(opt->shadow_radius, 0);
opt->shadow_red = normalize_d(opt->shadow_red);
opt->shadow_green = normalize_d(opt->shadow_green);
opt->shadow_blue = normalize_d(opt->shadow_blue);
opt->inactive_dim = normalize_d(opt->inactive_dim);
opt->frame_opacity = normalize_d(opt->frame_opacity);
opt->shadow_opacity = normalize_d(opt->shadow_opacity);
opt->refresh_rate = normalize_i_range(opt->refresh_rate, 0, 300);
// Apply default wintype options that are dependent on global options
set_default_winopts(opt, winopt_mask, shadow_enable, fading_enable);
// --blur-background-frame implies --blur-background
if (opt->blur_background_frame && !opt->blur_method) {
opt->blur_method = BLUR_METHOD_KERNEL;
}
// Other variables determined by options
// Determine whether we track window grouping
if (opt->detect_transient || opt->detect_client_leader) {
opt->track_leader = true;
}
// Fill default blur kernel
if (opt->blur_method == BLUR_METHOD_KERNEL && !opt->blur_kerns[0]) {
CHECK(parse_blur_kern_lst("3x3box", opt->blur_kerns, MAX_BLUR_PASS,
&conv_kern_hasneg));
}
if (opt->resize_damage < 0) {
log_warn("Negative --resize-damage will not work correctly.");
}
if (opt->backend == BKEND_XRENDER && conv_kern_hasneg) {
log_warn("A convolution kernel with negative values may not work "
"properly under X Render backend.");
}
}
// vim: set noet sw=8 ts=8 :
| 35.57485 | 90 | 0.623666 | [
"geometry",
"render"
] |
d226882a583df8cd913d9f3b9806365a35f76f71 | 1,797 | h | C | Applications/VpView/vtkVpInteractionCallback.h | judajake/vivia | ac0bad0dc200b5af25911513edb0ca6fd6e9f622 | [
"BSD-3-Clause"
] | 1 | 2017-07-31T07:08:05.000Z | 2017-07-31T07:08:05.000Z | Applications/VpView/vtkVpInteractionCallback.h | judajake/vivia | ac0bad0dc200b5af25911513edb0ca6fd6e9f622 | [
"BSD-3-Clause"
] | null | null | null | Applications/VpView/vtkVpInteractionCallback.h | judajake/vivia | ac0bad0dc200b5af25911513edb0ca6fd6e9f622 | [
"BSD-3-Clause"
] | null | null | null | /*ckwg +5
* Copyright 2013 by Kitware, Inc. All Rights Reserved. Please refer to
* KITWARE_LICENSE.TXT for licensing information, or contact General Counsel,
* Kitware, Inc., 28 Corporate Drive, Clifton Park, NY 12065.
*/
#ifndef __vtkVpInteractionCallback_h
#define __vtkVpInteractionCallback_h
// VTK includes.
#include <vtkCamera.h>
#include <vtkCommand.h>
#include <vtkSetGet.h>
// VG Application includes.
#include <vpViewCore.h>
// VG VTK includes.
#include "vtkVgInteractorStyleRubberBand2D.h"
class vtkVpInteractionCallback : public vtkCommand
{
public:
static vtkVpInteractionCallback* New()
{
return new vtkVpInteractionCallback();
}
void UpdateViewCoreInstance()
{
this->ViewCoreInstance->updateExtents();
this->ViewCoreInstance->render(false);
}
virtual void Execute(vtkObject* caller, unsigned long eventId,
void* vtkNotUsed(callData))
{
if (!vtkVgInteractorStyleRubberBand2D::SafeDownCast(caller))
{
return;
}
if (eventId == vtkCommand::InteractionEvent)
{
int interaction =
this->ViewCoreInstance->getInteractorStyle()->GetInteraction();
if (interaction == vtkInteractorStyleRubberBand2D::PANNING ||
interaction == vtkInteractorStyleRubberBand2D::ZOOMING)
{
if (interaction == vtkInteractorStyleRubberBand2D::PANNING)
{
this->ViewCoreInstance->setIdOfTrackToFollow(-1);
}
this->UpdateViewCoreInstance();
}
}
else if (eventId == vtkVgInteractorStyleRubberBand2D::ZoomCompleteEvent)
{
this->ViewCoreInstance->setIdOfTrackToFollow(-1);
this->UpdateViewCoreInstance();
}
}
vpViewCore* ViewCoreInstance;
};
#endif // __vtkVpInteractionCallback_h
| 25.671429 | 77 | 0.6867 | [
"render"
] |
d227025d045eddca47e2f0055c559639b9670118 | 4,448 | h | C | catkin_ws/devel/include/pmt/pmt.h | PMinThant/ROS-with-Cpp | 5a63722b4817b02b5e2e66c14a9ff88a4fe2fbd7 | [
"MIT"
] | null | null | null | catkin_ws/devel/include/pmt/pmt.h | PMinThant/ROS-with-Cpp | 5a63722b4817b02b5e2e66c14a9ff88a4fe2fbd7 | [
"MIT"
] | null | null | null | catkin_ws/devel/include/pmt/pmt.h | PMinThant/ROS-with-Cpp | 5a63722b4817b02b5e2e66c14a9ff88a4fe2fbd7 | [
"MIT"
] | null | null | null | // Generated by gencpp from file pmt/pmt.msg
// DO NOT EDIT!
#ifndef PMT_MESSAGE_PMT_H
#define PMT_MESSAGE_PMT_H
#include <string>
#include <vector>
#include <map>
#include <ros/types.h>
#include <ros/serialization.h>
#include <ros/builtin_message_traits.h>
#include <ros/message_operations.h>
namespace pmt
{
template <class ContainerAllocator>
struct pmt_
{
typedef pmt_<ContainerAllocator> Type;
pmt_()
: A(0)
, B(0)
, C(0) {
}
pmt_(const ContainerAllocator& _alloc)
: A(0)
, B(0)
, C(0) {
(void)_alloc;
}
typedef int32_t _A_type;
_A_type A;
typedef int32_t _B_type;
_B_type B;
typedef int32_t _C_type;
_C_type C;
typedef boost::shared_ptr< ::pmt::pmt_<ContainerAllocator> > Ptr;
typedef boost::shared_ptr< ::pmt::pmt_<ContainerAllocator> const> ConstPtr;
}; // struct pmt_
typedef ::pmt::pmt_<std::allocator<void> > pmt;
typedef boost::shared_ptr< ::pmt::pmt > pmtPtr;
typedef boost::shared_ptr< ::pmt::pmt const> pmtConstPtr;
// constants requiring out of line definition
template<typename ContainerAllocator>
std::ostream& operator<<(std::ostream& s, const ::pmt::pmt_<ContainerAllocator> & v)
{
ros::message_operations::Printer< ::pmt::pmt_<ContainerAllocator> >::stream(s, "", v);
return s;
}
template<typename ContainerAllocator1, typename ContainerAllocator2>
bool operator==(const ::pmt::pmt_<ContainerAllocator1> & lhs, const ::pmt::pmt_<ContainerAllocator2> & rhs)
{
return lhs.A == rhs.A &&
lhs.B == rhs.B &&
lhs.C == rhs.C;
}
template<typename ContainerAllocator1, typename ContainerAllocator2>
bool operator!=(const ::pmt::pmt_<ContainerAllocator1> & lhs, const ::pmt::pmt_<ContainerAllocator2> & rhs)
{
return !(lhs == rhs);
}
} // namespace pmt
namespace ros
{
namespace message_traits
{
template <class ContainerAllocator>
struct IsMessage< ::pmt::pmt_<ContainerAllocator> >
: TrueType
{ };
template <class ContainerAllocator>
struct IsMessage< ::pmt::pmt_<ContainerAllocator> const>
: TrueType
{ };
template <class ContainerAllocator>
struct IsFixedSize< ::pmt::pmt_<ContainerAllocator> >
: TrueType
{ };
template <class ContainerAllocator>
struct IsFixedSize< ::pmt::pmt_<ContainerAllocator> const>
: TrueType
{ };
template <class ContainerAllocator>
struct HasHeader< ::pmt::pmt_<ContainerAllocator> >
: FalseType
{ };
template <class ContainerAllocator>
struct HasHeader< ::pmt::pmt_<ContainerAllocator> const>
: FalseType
{ };
template<class ContainerAllocator>
struct MD5Sum< ::pmt::pmt_<ContainerAllocator> >
{
static const char* value()
{
return "e7a68ce4e0b75a9719b4950a7069c9d4";
}
static const char* value(const ::pmt::pmt_<ContainerAllocator>&) { return value(); }
static const uint64_t static_value1 = 0xe7a68ce4e0b75a97ULL;
static const uint64_t static_value2 = 0x19b4950a7069c9d4ULL;
};
template<class ContainerAllocator>
struct DataType< ::pmt::pmt_<ContainerAllocator> >
{
static const char* value()
{
return "pmt/pmt";
}
static const char* value(const ::pmt::pmt_<ContainerAllocator>&) { return value(); }
};
template<class ContainerAllocator>
struct Definition< ::pmt::pmt_<ContainerAllocator> >
{
static const char* value()
{
return "int32 A\n"
"int32 B\n"
"int32 C\n"
;
}
static const char* value(const ::pmt::pmt_<ContainerAllocator>&) { return value(); }
};
} // namespace message_traits
} // namespace ros
namespace ros
{
namespace serialization
{
template<class ContainerAllocator> struct Serializer< ::pmt::pmt_<ContainerAllocator> >
{
template<typename Stream, typename T> inline static void allInOne(Stream& stream, T m)
{
stream.next(m.A);
stream.next(m.B);
stream.next(m.C);
}
ROS_DECLARE_ALLINONE_SERIALIZER
}; // struct pmt_
} // namespace serialization
} // namespace ros
namespace ros
{
namespace message_operations
{
template<class ContainerAllocator>
struct Printer< ::pmt::pmt_<ContainerAllocator> >
{
template<typename Stream> static void stream(Stream& s, const std::string& indent, const ::pmt::pmt_<ContainerAllocator>& v)
{
s << indent << "A: ";
Printer<int32_t>::stream(s, indent + " ", v.A);
s << indent << "B: ";
Printer<int32_t>::stream(s, indent + " ", v.B);
s << indent << "C: ";
Printer<int32_t>::stream(s, indent + " ", v.C);
}
};
} // namespace message_operations
} // namespace ros
#endif // PMT_MESSAGE_PMT_H
| 20.592593 | 126 | 0.694694 | [
"vector"
] |
d236f86667ffeb4624222c2d14de626438a59ce6 | 41,026 | c | C | bench-tools/fio/gclient.c | yejr/MyDBAtools | 1f1d8448ee6911a4caf1a25625667b3b7f7f3020 | [
"BSD-3-Clause"
] | 12 | 2016-06-03T14:57:51.000Z | 2021-05-14T00:06:37.000Z | bench-tools/fio/gclient.c | yejr/MyDBAtools | 1f1d8448ee6911a4caf1a25625667b3b7f7f3020 | [
"BSD-3-Clause"
] | null | null | null | bench-tools/fio/gclient.c | yejr/MyDBAtools | 1f1d8448ee6911a4caf1a25625667b3b7f7f3020 | [
"BSD-3-Clause"
] | 16 | 2015-06-09T04:33:55.000Z | 2021-07-20T02:36:22.000Z | #include <malloc.h>
#include <string.h>
#include <glib.h>
#include <cairo.h>
#include <gtk/gtk.h>
#include "fio.h"
#include "gfio.h"
#include "ghelpers.h"
#include "goptions.h"
#include "gerror.h"
#include "graph.h"
#include "gclient.h"
#include "printing.h"
static void gfio_display_ts(struct fio_client *client, struct thread_stat *ts,
struct group_run_stats *rs);
static gboolean results_window_delete(GtkWidget *w, gpointer data)
{
struct gui_entry *ge = (struct gui_entry *) data;
gtk_widget_destroy(w);
ge->results_window = NULL;
ge->results_notebook = NULL;
return TRUE;
}
static void results_close(GtkWidget *w, gpointer *data)
{
struct gui_entry *ge = (struct gui_entry *) data;
gtk_widget_destroy(ge->results_window);
}
static void results_print(GtkWidget *w, gpointer *data)
{
struct gui_entry *ge = (struct gui_entry *) data;
gfio_print_results(ge);
}
static GtkActionEntry results_menu_items[] = {
{ "FileMenuAction", GTK_STOCK_FILE, "File", NULL, NULL, NULL},
{ "GraphMenuAction", GTK_STOCK_FILE, "Graph", NULL, NULL, NULL},
{ "PrintFile", GTK_STOCK_PRINT, "Print", "<Control>P", NULL, G_CALLBACK(results_print) },
{ "CloseFile", GTK_STOCK_CLOSE, "Close", "<Control>W", NULL, G_CALLBACK(results_close) },
};
static gint results_nmenu_items = sizeof(results_menu_items) / sizeof(results_menu_items[0]);
static const gchar *results_ui_string = " \
<ui> \
<menubar name=\"MainMenu\"> \
<menu name=\"FileMenu\" action=\"FileMenuAction\"> \
<menuitem name=\"Print\" action=\"PrintFile\" /> \
<menuitem name=\"Close\" action=\"CloseFile\" /> \
</menu> \
<menu name=\"GraphMenu\" action=\"GraphMenuAction\"> \
</menu>\
</menubar> \
</ui> \
";
static GtkWidget *get_results_menubar(GtkWidget *window, struct gui_entry *ge)
{
GtkActionGroup *action_group;
GtkWidget *widget;
GError *error = 0;
ge->results_uimanager = gtk_ui_manager_new();
action_group = gtk_action_group_new("ResultsMenu");
gtk_action_group_add_actions(action_group, results_menu_items, results_nmenu_items, ge);
gtk_ui_manager_insert_action_group(ge->results_uimanager, action_group, 0);
gtk_ui_manager_add_ui_from_string(GTK_UI_MANAGER(ge->results_uimanager), results_ui_string, -1, &error);
gtk_window_add_accel_group(GTK_WINDOW(window), gtk_ui_manager_get_accel_group(ge->results_uimanager));
widget = gtk_ui_manager_get_widget(ge->results_uimanager, "/MainMenu");
return widget;
}
static GtkWidget *get_results_window(struct gui_entry *ge)
{
GtkWidget *win, *notebook, *vbox;
if (ge->results_window)
return ge->results_notebook;
win = gtk_window_new(GTK_WINDOW_TOPLEVEL);
gtk_window_set_title(GTK_WINDOW(win), "Results");
gtk_window_set_default_size(GTK_WINDOW(win), 1024, 768);
g_signal_connect(win, "delete-event", G_CALLBACK(results_window_delete), ge);
g_signal_connect(win, "destroy", G_CALLBACK(results_window_delete), ge);
vbox = gtk_vbox_new(FALSE, 0);
gtk_container_add(GTK_CONTAINER(win), vbox);
ge->results_menu = get_results_menubar(win, ge);
gtk_box_pack_start(GTK_BOX(vbox), ge->results_menu, FALSE, FALSE, 0);
notebook = gtk_notebook_new();
gtk_notebook_set_scrollable(GTK_NOTEBOOK(notebook), 1);
gtk_notebook_popup_enable(GTK_NOTEBOOK(notebook));
gtk_container_add(GTK_CONTAINER(vbox), notebook);
ge->results_window = win;
ge->results_notebook = notebook;
return ge->results_notebook;
}
static void gfio_text_op(struct fio_client *client, struct fio_net_cmd *cmd)
{
struct cmd_text_pdu *p = (struct cmd_text_pdu *) cmd->payload;
struct gfio_client *gc = client->client_data;
struct gui_entry *ge = gc->ge;
struct gui *ui = ge->ui;
GtkTreeIter iter;
struct tm *tm;
time_t sec;
char tmp[64], timebuf[80];
sec = p->log_sec;
tm = localtime(&sec);
strftime(tmp, sizeof(tmp), "%Y-%m-%d %H:%M:%S", tm);
sprintf(timebuf, "%s.%03ld", tmp, (long) p->log_usec / 1000);
gdk_threads_enter();
gtk_list_store_append(ui->log_model, &iter);
gtk_list_store_set(ui->log_model, &iter, 0, timebuf, -1);
gtk_list_store_set(ui->log_model, &iter, 1, client->hostname, -1);
gtk_list_store_set(ui->log_model, &iter, 2, log_get_level(p->level), -1);
gtk_list_store_set(ui->log_model, &iter, 3, p->buf, -1);
if (p->level == FIO_LOG_ERR)
gfio_view_log(ui);
gdk_threads_leave();
}
static void disk_util_destroy(GtkWidget *w, gpointer data)
{
struct gui_entry *ge = (struct gui_entry *) data;
ge->disk_util_vbox = NULL;
gtk_widget_destroy(w);
}
static GtkWidget *gfio_disk_util_get_vbox(struct gui_entry *ge)
{
GtkWidget *vbox, *box, *scroll, *res_notebook;
if (ge->disk_util_vbox)
return ge->disk_util_vbox;
scroll = get_scrolled_window(5);
vbox = gtk_vbox_new(FALSE, 3);
box = gtk_hbox_new(FALSE, 0);
gtk_box_pack_start(GTK_BOX(vbox), box, FALSE, FALSE, 5);
gtk_scrolled_window_add_with_viewport(GTK_SCROLLED_WINDOW(scroll), vbox);
res_notebook = get_results_window(ge);
gtk_notebook_append_page(GTK_NOTEBOOK(res_notebook), scroll, gtk_label_new("Disk utilization"));
ge->disk_util_vbox = box;
g_signal_connect(vbox, "destroy", G_CALLBACK(disk_util_destroy), ge);
return ge->disk_util_vbox;
}
static int __gfio_disk_util_show(GtkWidget *res_notebook,
struct gfio_client *gc, struct cmd_du_pdu *p)
{
GtkWidget *box, *frame, *entry, *vbox, *util_vbox;
struct gui_entry *ge = gc->ge;
double util;
char tmp[16];
util_vbox = gfio_disk_util_get_vbox(ge);
vbox = gtk_vbox_new(FALSE, 3);
gtk_container_add(GTK_CONTAINER(util_vbox), vbox);
frame = gtk_frame_new((char *) p->dus.name);
gtk_box_pack_start(GTK_BOX(vbox), frame, FALSE, FALSE, 2);
box = gtk_vbox_new(FALSE, 3);
gtk_container_add(GTK_CONTAINER(frame), box);
frame = gtk_frame_new("Read");
gtk_box_pack_start(GTK_BOX(box), frame, FALSE, FALSE, 2);
vbox = gtk_hbox_new(TRUE, 3);
gtk_container_add(GTK_CONTAINER(frame), vbox);
entry = new_info_entry_in_frame(vbox, "IOs");
entry_set_int_value(entry, p->dus.s.ios[0]);
entry = new_info_entry_in_frame(vbox, "Merges");
entry_set_int_value(entry, p->dus.s.merges[0]);
entry = new_info_entry_in_frame(vbox, "Sectors");
entry_set_int_value(entry, p->dus.s.sectors[0]);
entry = new_info_entry_in_frame(vbox, "Ticks");
entry_set_int_value(entry, p->dus.s.ticks[0]);
frame = gtk_frame_new("Write");
gtk_box_pack_start(GTK_BOX(box), frame, FALSE, FALSE, 2);
vbox = gtk_hbox_new(TRUE, 3);
gtk_container_add(GTK_CONTAINER(frame), vbox);
entry = new_info_entry_in_frame(vbox, "IOs");
entry_set_int_value(entry, p->dus.s.ios[1]);
entry = new_info_entry_in_frame(vbox, "Merges");
entry_set_int_value(entry, p->dus.s.merges[1]);
entry = new_info_entry_in_frame(vbox, "Sectors");
entry_set_int_value(entry, p->dus.s.sectors[1]);
entry = new_info_entry_in_frame(vbox, "Ticks");
entry_set_int_value(entry, p->dus.s.ticks[1]);
frame = gtk_frame_new("Shared");
gtk_box_pack_start(GTK_BOX(box), frame, FALSE, FALSE, 2);
vbox = gtk_hbox_new(TRUE, 3);
gtk_container_add(GTK_CONTAINER(frame), vbox);
entry = new_info_entry_in_frame(vbox, "IO ticks");
entry_set_int_value(entry, p->dus.s.io_ticks);
entry = new_info_entry_in_frame(vbox, "Time in queue");
entry_set_int_value(entry, p->dus.s.time_in_queue);
util = 0.0;
if (p->dus.s.msec)
util = (double) 100 * p->dus.s.io_ticks / (double) p->dus.s.msec;
if (util > 100.0)
util = 100.0;
sprintf(tmp, "%3.2f%%", util);
entry = new_info_entry_in_frame(vbox, "Disk utilization");
gtk_entry_set_text(GTK_ENTRY(entry), tmp);
gtk_widget_show_all(ge->results_window);
return 0;
}
static int gfio_disk_util_show(struct gfio_client *gc)
{
struct gui_entry *ge = gc->ge;
GtkWidget *res_notebook;
int i;
if (!gc->nr_du)
return 1;
res_notebook = get_results_window(ge);
for (i = 0; i < gc->nr_du; i++) {
struct cmd_du_pdu *p = &gc->du[i];
__gfio_disk_util_show(res_notebook, gc, p);
}
gtk_widget_show_all(ge->results_window);
return 0;
}
static void gfio_disk_util_op(struct fio_client *client, struct fio_net_cmd *cmd)
{
struct cmd_du_pdu *p = (struct cmd_du_pdu *) cmd->payload;
struct gfio_client *gc = client->client_data;
struct gui_entry *ge = gc->ge;
unsigned int nr = gc->nr_du;
gc->du = realloc(gc->du, (nr + 1) * sizeof(struct cmd_du_pdu));
memcpy(&gc->du[nr], p, sizeof(*p));
gc->nr_du++;
gdk_threads_enter();
if (ge->results_window)
__gfio_disk_util_show(ge->results_notebook, gc, p);
else
gfio_disk_util_show(gc);
gdk_threads_leave();
}
extern int sum_stat_clients;
extern struct thread_stat client_ts;
extern struct group_run_stats client_gs;
static int sum_stat_nr;
static void gfio_thread_status_op(struct fio_client *client,
struct fio_net_cmd *cmd)
{
struct cmd_ts_pdu *p = (struct cmd_ts_pdu *) cmd->payload;
gfio_display_ts(client, &p->ts, &p->rs);
if (sum_stat_clients == 1)
return;
sum_thread_stats(&client_ts, &p->ts, sum_stat_nr);
sum_group_stats(&client_gs, &p->rs);
client_ts.members++;
client_ts.thread_number = p->ts.thread_number;
client_ts.groupid = p->ts.groupid;
if (++sum_stat_nr == sum_stat_clients) {
strcpy(client_ts.name, "All clients");
gfio_display_ts(client, &client_ts, &client_gs);
}
}
static void gfio_group_stats_op(struct fio_client *client,
struct fio_net_cmd *cmd)
{
/* We're ignoring group stats for now */
}
static void gfio_update_thread_status(struct gui_entry *ge,
char *status_message, double perc)
{
static char message[100];
const char *m = message;
strncpy(message, status_message, sizeof(message) - 1);
gtk_progress_bar_set_text(GTK_PROGRESS_BAR(ge->thread_status_pb), m);
gtk_progress_bar_set_fraction(GTK_PROGRESS_BAR(ge->thread_status_pb), perc / 100.0);
gtk_widget_queue_draw(ge->ui->window);
}
static void gfio_update_thread_status_all(struct gui *ui, char *status_message,
double perc)
{
static char message[100];
const char *m = message;
strncpy(message, status_message, sizeof(message) - 1);
gtk_progress_bar_set_text(GTK_PROGRESS_BAR(ui->thread_status_pb), m);
gtk_progress_bar_set_fraction(GTK_PROGRESS_BAR(ui->thread_status_pb), perc / 100.0);
gtk_widget_queue_draw(ui->window);
}
/*
* Client specific ETA
*/
static void gfio_update_client_eta(struct fio_client *client, struct jobs_eta *je)
{
struct gfio_client *gc = client->client_data;
struct gui_entry *ge = gc->ge;
static int eta_good;
char eta_str[128];
char output[256];
char tmp[32];
double perc = 0.0;
int i2p = 0;
gdk_threads_enter();
eta_str[0] = '\0';
output[0] = '\0';
if (je->eta_sec != INT_MAX && je->elapsed_sec) {
perc = (double) je->elapsed_sec / (double) (je->elapsed_sec + je->eta_sec);
eta_to_str(eta_str, je->eta_sec);
}
sprintf(tmp, "%u", je->nr_running);
gtk_entry_set_text(GTK_ENTRY(ge->eta.jobs), tmp);
sprintf(tmp, "%u", je->files_open);
gtk_entry_set_text(GTK_ENTRY(ge->eta.files), tmp);
#if 0
if (je->m_rate[0] || je->m_rate[1] || je->t_rate[0] || je->t_rate[1]) {
if (je->m_rate || je->t_rate) {
char *tr, *mr;
mr = num2str(je->m_rate, 4, 0, i2p);
tr = num2str(je->t_rate, 4, 0, i2p);
gtk_entry_set_text(GTK_ENTRY(ge->eta);
p += sprintf(p, ", CR=%s/%s KB/s", tr, mr);
free(tr);
free(mr);
} else if (je->m_iops || je->t_iops)
p += sprintf(p, ", CR=%d/%d IOPS", je->t_iops, je->m_iops);
gtk_entry_set_text(GTK_ENTRY(ge->eta.cr_bw), "---");
gtk_entry_set_text(GTK_ENTRY(ge->eta.cr_iops), "---");
gtk_entry_set_text(GTK_ENTRY(ge->eta.cw_bw), "---");
gtk_entry_set_text(GTK_ENTRY(ge->eta.cw_iops), "---");
#endif
if (je->eta_sec != INT_MAX && je->nr_running) {
char *iops_str[DDIR_RWDIR_CNT];
char *rate_str[DDIR_RWDIR_CNT];
int i;
if ((!je->eta_sec && !eta_good) || je->nr_ramp == je->nr_running)
strcpy(output, "-.-% done");
else {
eta_good = 1;
perc *= 100.0;
sprintf(output, "%3.1f%% done", perc);
}
rate_str[0] = num2str(je->rate[0], 5, 10, i2p, 0);
rate_str[1] = num2str(je->rate[1], 5, 10, i2p, 0);
rate_str[2] = num2str(je->rate[2], 5, 10, i2p, 0);
iops_str[0] = num2str(je->iops[0], 4, 1, 0, 0);
iops_str[1] = num2str(je->iops[1], 4, 1, 0, 0);
iops_str[2] = num2str(je->iops[2], 4, 1, 0, 0);
gtk_entry_set_text(GTK_ENTRY(ge->eta.read_bw), rate_str[0]);
gtk_entry_set_text(GTK_ENTRY(ge->eta.read_iops), iops_str[0]);
gtk_entry_set_text(GTK_ENTRY(ge->eta.write_bw), rate_str[1]);
gtk_entry_set_text(GTK_ENTRY(ge->eta.write_iops), iops_str[1]);
gtk_entry_set_text(GTK_ENTRY(ge->eta.trim_bw), rate_str[2]);
gtk_entry_set_text(GTK_ENTRY(ge->eta.trim_iops), iops_str[2]);
graph_add_xy_data(ge->graphs.iops_graph, ge->graphs.read_iops, je->elapsed_sec, je->iops[0], iops_str[0]);
graph_add_xy_data(ge->graphs.iops_graph, ge->graphs.write_iops, je->elapsed_sec, je->iops[1], iops_str[1]);
graph_add_xy_data(ge->graphs.iops_graph, ge->graphs.trim_iops, je->elapsed_sec, je->iops[2], iops_str[2]);
graph_add_xy_data(ge->graphs.bandwidth_graph, ge->graphs.read_bw, je->elapsed_sec, je->rate[0], rate_str[0]);
graph_add_xy_data(ge->graphs.bandwidth_graph, ge->graphs.write_bw, je->elapsed_sec, je->rate[1], rate_str[1]);
graph_add_xy_data(ge->graphs.bandwidth_graph, ge->graphs.trim_bw, je->elapsed_sec, je->rate[2], rate_str[2]);
for (i = 0; i < DDIR_RWDIR_CNT; i++) {
free(rate_str[i]);
free(iops_str[i]);
}
}
if (eta_str[0]) {
char *dst = output + strlen(output);
sprintf(dst, " - %s", eta_str);
}
gfio_update_thread_status(ge, output, perc);
gdk_threads_leave();
}
/*
* Update ETA in main window for all clients
*/
static void gfio_update_all_eta(struct jobs_eta *je)
{
struct gui *ui = &main_ui;
static int eta_good;
char eta_str[128];
char output[256];
double perc = 0.0;
int i, i2p = 0;
gdk_threads_enter();
eta_str[0] = '\0';
output[0] = '\0';
if (je->eta_sec != INT_MAX && je->elapsed_sec) {
perc = (double) je->elapsed_sec / (double) (je->elapsed_sec + je->eta_sec);
eta_to_str(eta_str, je->eta_sec);
}
#if 0
if (je->m_rate[0] || je->m_rate[1] || je->t_rate[0] || je->t_rate[1]) {
if (je->m_rate || je->t_rate) {
char *tr, *mr;
mr = num2str(je->m_rate, 4, 0, i2p);
tr = num2str(je->t_rate, 4, 0, i2p);
gtk_entry_set_text(GTK_ENTRY(ui->eta);
p += sprintf(p, ", CR=%s/%s KB/s", tr, mr);
free(tr);
free(mr);
} else if (je->m_iops || je->t_iops)
p += sprintf(p, ", CR=%d/%d IOPS", je->t_iops, je->m_iops);
gtk_entry_set_text(GTK_ENTRY(ui->eta.cr_bw), "---");
gtk_entry_set_text(GTK_ENTRY(ui->eta.cr_iops), "---");
gtk_entry_set_text(GTK_ENTRY(ui->eta.cw_bw), "---");
gtk_entry_set_text(GTK_ENTRY(ui->eta.cw_iops), "---");
#endif
entry_set_int_value(ui->eta.jobs, je->nr_running);
if (je->eta_sec != INT_MAX && je->nr_running) {
char *iops_str[3];
char *rate_str[3];
if ((!je->eta_sec && !eta_good) || je->nr_ramp == je->nr_running)
strcpy(output, "-.-% done");
else {
eta_good = 1;
perc *= 100.0;
sprintf(output, "%3.1f%% done", perc);
}
rate_str[0] = num2str(je->rate[0], 5, 10, i2p, 0);
rate_str[1] = num2str(je->rate[1], 5, 10, i2p, 0);
rate_str[2] = num2str(je->rate[2], 5, 10, i2p, 0);
iops_str[0] = num2str(je->iops[0], 4, 1, 0, 0);
iops_str[1] = num2str(je->iops[1], 4, 1, 0, 0);
iops_str[2] = num2str(je->iops[2], 4, 1, 0, 0);
gtk_entry_set_text(GTK_ENTRY(ui->eta.read_bw), rate_str[0]);
gtk_entry_set_text(GTK_ENTRY(ui->eta.read_iops), iops_str[0]);
gtk_entry_set_text(GTK_ENTRY(ui->eta.write_bw), rate_str[1]);
gtk_entry_set_text(GTK_ENTRY(ui->eta.write_iops), iops_str[1]);
gtk_entry_set_text(GTK_ENTRY(ui->eta.trim_bw), rate_str[2]);
gtk_entry_set_text(GTK_ENTRY(ui->eta.trim_iops), iops_str[2]);
graph_add_xy_data(ui->graphs.iops_graph, ui->graphs.read_iops, je->elapsed_sec, je->iops[0], iops_str[0]);
graph_add_xy_data(ui->graphs.iops_graph, ui->graphs.write_iops, je->elapsed_sec, je->iops[1], iops_str[1]);
graph_add_xy_data(ui->graphs.iops_graph, ui->graphs.trim_iops, je->elapsed_sec, je->iops[2], iops_str[2]);
graph_add_xy_data(ui->graphs.bandwidth_graph, ui->graphs.read_bw, je->elapsed_sec, je->rate[0], rate_str[0]);
graph_add_xy_data(ui->graphs.bandwidth_graph, ui->graphs.write_bw, je->elapsed_sec, je->rate[1], rate_str[1]);
graph_add_xy_data(ui->graphs.bandwidth_graph, ui->graphs.trim_bw, je->elapsed_sec, je->rate[2], rate_str[2]);
for (i = 0; i < DDIR_RWDIR_CNT; i++) {
free(rate_str[i]);
free(iops_str[i]);
}
}
if (eta_str[0]) {
char *dst = output + strlen(output);
sprintf(dst, " - %s", eta_str);
}
gfio_update_thread_status_all(ui, output, perc);
gdk_threads_leave();
}
static void gfio_probe_op(struct fio_client *client, struct fio_net_cmd *cmd)
{
struct cmd_probe_reply_pdu *probe = (struct cmd_probe_reply_pdu *) cmd->payload;
struct gfio_client *gc = client->client_data;
struct gui_entry *ge = gc->ge;
const char *os, *arch;
os = fio_get_os_string(probe->os);
if (!os)
os = "unknown";
arch = fio_get_arch_string(probe->arch);
if (!arch)
os = "unknown";
if (!client->name)
client->name = strdup((char *) probe->hostname);
gc->client_cpus = le32_to_cpu(probe->cpus);
gc->client_flags = le64_to_cpu(probe->flags);
gdk_threads_enter();
gtk_label_set_text(GTK_LABEL(ge->probe.hostname), (char *) probe->hostname);
gtk_label_set_text(GTK_LABEL(ge->probe.os), os);
gtk_label_set_text(GTK_LABEL(ge->probe.arch), arch);
gtk_label_set_text(GTK_LABEL(ge->probe.fio_ver), (char *) probe->fio_version);
gfio_set_state(ge, GE_STATE_CONNECTED);
gdk_threads_leave();
}
static void gfio_quit_op(struct fio_client *client, struct fio_net_cmd *cmd)
{
struct gfio_client *gc = client->client_data;
gdk_threads_enter();
gfio_set_state(gc->ge, GE_STATE_NEW);
gdk_threads_leave();
}
static struct thread_options *gfio_client_add_job(struct gfio_client *gc,
struct thread_options_pack *top)
{
struct gfio_client_options *gco;
gco = calloc(1, sizeof(*gco));
convert_thread_options_to_cpu(&gco->o, top);
INIT_FLIST_HEAD(&gco->list);
flist_add_tail(&gco->list, &gc->o_list);
gc->o_list_nr = 1;
return &gco->o;
}
static void gfio_add_job_op(struct fio_client *client, struct fio_net_cmd *cmd)
{
struct cmd_add_job_pdu *p = (struct cmd_add_job_pdu *) cmd->payload;
struct gfio_client *gc = client->client_data;
struct gui_entry *ge = gc->ge;
struct thread_options *o;
char *c1, *c2, *c3, *c4;
char tmp[80];
p->thread_number = le32_to_cpu(p->thread_number);
p->groupid = le32_to_cpu(p->groupid);
o = gfio_client_add_job(gc, &p->top);
gdk_threads_enter();
gtk_combo_box_text_append_text(GTK_COMBO_BOX_TEXT(ge->eta.names), (gchar *) o->name);
gtk_combo_box_set_active(GTK_COMBO_BOX(ge->eta.names), 0);
sprintf(tmp, "%s %s", o->odirect ? "direct" : "buffered", ddir_str(o->td_ddir));
multitext_add_entry(&ge->eta.iotype, tmp);
c1 = fio_uint_to_kmg(o->min_bs[DDIR_READ]);
c2 = fio_uint_to_kmg(o->max_bs[DDIR_WRITE]);
c3 = fio_uint_to_kmg(o->min_bs[DDIR_READ]);
c4 = fio_uint_to_kmg(o->max_bs[DDIR_WRITE]);
sprintf(tmp, "%s-%s/%s-%s", c1, c2, c3, c4);
free(c1);
free(c2);
free(c3);
free(c4);
multitext_add_entry(&ge->eta.bs, tmp);
multitext_add_entry(&ge->eta.ioengine, (const char *) o->ioengine);
sprintf(tmp, "%u", o->iodepth);
multitext_add_entry(&ge->eta.iodepth, tmp);
multitext_set_entry(&ge->eta.iotype, 0);
multitext_set_entry(&ge->eta.bs, 0);
multitext_set_entry(&ge->eta.ioengine, 0);
multitext_set_entry(&ge->eta.iodepth, 0);
gfio_set_state(ge, GE_STATE_JOB_SENT);
gdk_threads_leave();
}
static void gfio_update_job_op(struct fio_client *client,
struct fio_net_cmd *cmd)
{
uint32_t *pdu_error = (uint32_t *) cmd->payload;
struct gfio_client *gc = client->client_data;
gc->update_job_status = le32_to_cpu(*pdu_error);
gc->update_job_done = 1;
}
static void gfio_client_timed_out(struct fio_client *client)
{
struct gfio_client *gc = client->client_data;
char buf[256];
gdk_threads_enter();
gfio_set_state(gc->ge, GE_STATE_NEW);
clear_ge_ui_info(gc->ge);
sprintf(buf, "Client %s: timeout talking to server.\n", client->hostname);
gfio_report_info(gc->ge->ui, "Network timeout", buf);
gdk_threads_leave();
}
static void gfio_client_stop(struct fio_client *client, struct fio_net_cmd *cmd)
{
struct gfio_client *gc = client->client_data;
gdk_threads_enter();
gfio_set_state(gc->ge, GE_STATE_JOB_DONE);
if (gc->err_entry)
entry_set_int_value(gc->err_entry, client->error);
gdk_threads_leave();
}
static void gfio_client_start(struct fio_client *client, struct fio_net_cmd *cmd)
{
struct gfio_client *gc = client->client_data;
gdk_threads_enter();
gfio_set_state(gc->ge, GE_STATE_JOB_STARTED);
gdk_threads_leave();
}
static void gfio_client_job_start(struct fio_client *client, struct fio_net_cmd *cmd)
{
struct gfio_client *gc = client->client_data;
gdk_threads_enter();
gfio_set_state(gc->ge, GE_STATE_JOB_RUNNING);
gdk_threads_leave();
}
static void gfio_client_iolog(struct fio_client *client, struct cmd_iolog_pdu *pdu)
{
printf("got iolog: name=%s, type=%u, entries=%u\n", pdu->name, pdu->log_type, pdu->nr_samples);
free(pdu);
}
static void gfio_add_total_depths_tree(GtkListStore *model,
struct thread_stat *ts, unsigned int len)
{
double io_u_dist[FIO_IO_U_MAP_NR];
GtkTreeIter iter;
/* Bits 1-6, and 8 */
const int add_mask = 0x17e;
int i, j;
stat_calc_dist(ts->io_u_map, ddir_rw_sum(ts->total_io_u), io_u_dist);
gtk_list_store_append(model, &iter);
gtk_list_store_set(model, &iter, 0, "Total", -1);
for (i = 1, j = 0; i < len; i++) {
char fbuf[32];
if (!(add_mask & (1UL << (i - 1))))
sprintf(fbuf, "0.0%%");
else {
sprintf(fbuf, "%3.1f%%", io_u_dist[j]);
j++;
}
gtk_list_store_set(model, &iter, i, fbuf, -1);
}
}
static void gfio_add_end_results(struct gfio_client *gc, struct thread_stat *ts,
struct group_run_stats *rs)
{
unsigned int nr = gc->nr_results;
gc->results = realloc(gc->results, (nr + 1) * sizeof(struct end_results));
memcpy(&gc->results[nr].ts, ts, sizeof(*ts));
memcpy(&gc->results[nr].gs, rs, sizeof(*rs));
gc->nr_results++;
}
static void gfio_add_sc_depths_tree(GtkListStore *model,
struct thread_stat *ts, unsigned int len,
int submit)
{
double io_u_dist[FIO_IO_U_MAP_NR];
GtkTreeIter iter;
/* Bits 0, and 3-8 */
const int add_mask = 0x1f9;
int i, j;
if (submit)
stat_calc_dist(ts->io_u_submit, ts->total_submit, io_u_dist);
else
stat_calc_dist(ts->io_u_complete, ts->total_complete, io_u_dist);
gtk_list_store_append(model, &iter);
gtk_list_store_set(model, &iter, 0, submit ? "Submit" : "Complete", -1);
for (i = 1, j = 0; i < len; i++) {
char fbuf[32];
if (!(add_mask & (1UL << (i - 1))))
sprintf(fbuf, "0.0%%");
else {
sprintf(fbuf, "%3.1f%%", io_u_dist[j]);
j++;
}
gtk_list_store_set(model, &iter, i, fbuf, -1);
}
}
static void gfio_show_io_depths(GtkWidget *vbox, struct thread_stat *ts)
{
GtkWidget *frame, *box, *tree_view = NULL;
GtkTreeSelection *selection;
GtkListStore *model;
int i;
const char *labels[] = { "Depth", "0", "1", "2", "4", "8", "16", "32", "64", ">= 64" };
const int nr_labels = ARRAY_SIZE(labels);
GType types[nr_labels];
frame = gtk_frame_new("IO depths");
gtk_box_pack_start(GTK_BOX(vbox), frame, FALSE, FALSE, 5);
box = gtk_hbox_new(FALSE, 3);
gtk_container_add(GTK_CONTAINER(frame), box);
for (i = 0; i < nr_labels; i++)
types[i] = G_TYPE_STRING;
model = gtk_list_store_newv(nr_labels, types);
tree_view = gtk_tree_view_new_with_model(GTK_TREE_MODEL(model));
gtk_widget_set_can_focus(tree_view, FALSE);
g_object_set(G_OBJECT(tree_view), "headers-visible", TRUE,
"enable-grid-lines", GTK_TREE_VIEW_GRID_LINES_BOTH, NULL);
selection = gtk_tree_view_get_selection(GTK_TREE_VIEW(tree_view));
gtk_tree_selection_set_mode(GTK_TREE_SELECTION(selection), GTK_SELECTION_BROWSE);
for (i = 0; i < nr_labels; i++)
tree_view_column(tree_view, i, labels[i], ALIGN_RIGHT | UNSORTABLE);
gfio_add_total_depths_tree(model, ts, nr_labels);
gfio_add_sc_depths_tree(model, ts, nr_labels, 1);
gfio_add_sc_depths_tree(model, ts, nr_labels, 0);
gtk_box_pack_start(GTK_BOX(box), tree_view, TRUE, TRUE, 3);
}
static void gfio_show_cpu_usage(GtkWidget *vbox, struct thread_stat *ts)
{
GtkWidget *box, *frame, *entry;
double usr_cpu, sys_cpu;
unsigned long runtime;
char tmp[32];
runtime = ts->total_run_time;
if (runtime) {
double runt = (double) runtime;
usr_cpu = (double) ts->usr_time * 100 / runt;
sys_cpu = (double) ts->sys_time * 100 / runt;
} else {
usr_cpu = 0;
sys_cpu = 0;
}
frame = gtk_frame_new("OS resources");
gtk_box_pack_start(GTK_BOX(vbox), frame, FALSE, FALSE, 5);
box = gtk_hbox_new(FALSE, 3);
gtk_container_add(GTK_CONTAINER(frame), box);
entry = new_info_entry_in_frame(box, "User CPU");
sprintf(tmp, "%3.2f%%", usr_cpu);
gtk_entry_set_text(GTK_ENTRY(entry), tmp);
entry = new_info_entry_in_frame(box, "System CPU");
sprintf(tmp, "%3.2f%%", sys_cpu);
gtk_entry_set_text(GTK_ENTRY(entry), tmp);
entry = new_info_entry_in_frame(box, "Context switches");
entry_set_int_value(entry, ts->ctx);
entry = new_info_entry_in_frame(box, "Major faults");
entry_set_int_value(entry, ts->majf);
entry = new_info_entry_in_frame(box, "Minor faults");
entry_set_int_value(entry, ts->minf);
}
static GtkWidget *gfio_output_lat_buckets(double *lat, const char **labels,
int num)
{
GtkWidget *tree_view;
GtkTreeSelection *selection;
GtkListStore *model;
GtkTreeIter iter;
GType *types;
int i;
types = malloc(num * sizeof(GType));
for (i = 0; i < num; i++)
types[i] = G_TYPE_STRING;
model = gtk_list_store_newv(num, types);
free(types);
types = NULL;
tree_view = gtk_tree_view_new_with_model(GTK_TREE_MODEL(model));
gtk_widget_set_can_focus(tree_view, FALSE);
g_object_set(G_OBJECT(tree_view), "headers-visible", TRUE,
"enable-grid-lines", GTK_TREE_VIEW_GRID_LINES_BOTH, NULL);
selection = gtk_tree_view_get_selection(GTK_TREE_VIEW(tree_view));
gtk_tree_selection_set_mode(GTK_TREE_SELECTION(selection), GTK_SELECTION_BROWSE);
for (i = 0; i < num; i++)
tree_view_column(tree_view, i, labels[i], ALIGN_RIGHT | UNSORTABLE);
gtk_list_store_append(model, &iter);
for (i = 0; i < num; i++) {
char fbuf[32];
if (lat[i] <= 0.0)
sprintf(fbuf, "0.00");
else
sprintf(fbuf, "%3.2f%%", lat[i]);
gtk_list_store_set(model, &iter, i, fbuf, -1);
}
return tree_view;
}
static struct graph *setup_lat_bucket_graph(const char *title, double *lat,
const char **labels,
unsigned int len,
double xdim, double ydim)
{
struct graph *g;
int i;
g = graph_new(xdim, ydim, gfio_graph_font);
graph_title(g, title);
graph_x_title(g, "Buckets");
graph_y_title(g, "Percent");
for (i = 0; i < len; i++) {
graph_label_t l;
l = graph_add_label(g, labels[i]);
graph_add_data(g, l, lat[i]);
}
return g;
}
static int on_expose_lat_drawing_area(GtkWidget *w, GdkEvent *event, gpointer p)
{
struct graph *g = p;
cairo_t *cr;
cr = gdk_cairo_create(gtk_widget_get_window(w));
#if 0
if (graph_has_tooltips(g)) {
g_object_set(w, "has-tooltip", TRUE, NULL);
g_signal_connect(w, "query-tooltip", G_CALLBACK(clat_graph_tooltip), g);
}
#endif
cairo_set_source_rgb(cr, 0, 0, 0);
bar_graph_draw(g, cr);
cairo_destroy(cr);
return FALSE;
}
static gint on_config_lat_drawing_area(GtkWidget *w, GdkEventConfigure *event,
gpointer data)
{
guint width = gtk_widget_get_allocated_width(w);
guint height = gtk_widget_get_allocated_height(w);
struct graph *g = data;
graph_set_size(g, width, height);
graph_set_size(g, width, height);
graph_set_position(g, 0, 0);
return TRUE;
}
static void gfio_show_latency_buckets(struct gfio_client *gc, GtkWidget *vbox,
struct thread_stat *ts)
{
double io_u_lat[FIO_IO_U_LAT_U_NR + FIO_IO_U_LAT_M_NR];
const char *ranges[] = { "2u", "4u", "10u", "20u", "50u", "100u",
"250u", "500u", "750u", "1m", "2m",
"4m", "10m", "20m", "50m", "100m",
"250m", "500m", "750m", "1s", "2s", ">= 2s" };
int start, end, i;
const int total = FIO_IO_U_LAT_U_NR + FIO_IO_U_LAT_M_NR;
GtkWidget *frame, *tree_view, *hbox, *completion_vbox, *drawing_area;
struct gui_entry *ge = gc->ge;
stat_calc_lat_u(ts, io_u_lat);
stat_calc_lat_m(ts, &io_u_lat[FIO_IO_U_LAT_U_NR]);
/*
* Found out which first bucket has entries, and which last bucket
*/
start = end = -1U;
for (i = 0; i < total; i++) {
if (io_u_lat[i] == 0.00)
continue;
if (start == -1U)
start = i;
end = i;
}
/*
* No entries...
*/
if (start == -1U)
return;
tree_view = gfio_output_lat_buckets(&io_u_lat[start], &ranges[start], end - start + 1);
ge->lat_bucket_graph = setup_lat_bucket_graph("Latency Buckets", &io_u_lat[start], &ranges[start], end - start + 1, 700.0, 300.0);
frame = gtk_frame_new("Latency buckets");
gtk_box_pack_start(GTK_BOX(vbox), frame, FALSE, FALSE, 5);
completion_vbox = gtk_vbox_new(FALSE, 3);
gtk_container_add(GTK_CONTAINER(frame), completion_vbox);
hbox = gtk_hbox_new(FALSE, 3);
gtk_container_add(GTK_CONTAINER(completion_vbox), hbox);
drawing_area = gtk_drawing_area_new();
gtk_widget_set_size_request(GTK_WIDGET(drawing_area), 700, 300);
gtk_widget_modify_bg(drawing_area, GTK_STATE_NORMAL, &gfio_color_white);
gtk_container_add(GTK_CONTAINER(completion_vbox), drawing_area);
g_signal_connect(G_OBJECT(drawing_area), GFIO_DRAW_EVENT, G_CALLBACK(on_expose_lat_drawing_area), ge->lat_bucket_graph);
g_signal_connect(G_OBJECT(drawing_area), "configure_event", G_CALLBACK(on_config_lat_drawing_area), ge->lat_bucket_graph);
gtk_box_pack_start(GTK_BOX(hbox), tree_view, TRUE, TRUE, 3);
}
static void gfio_show_lat(GtkWidget *vbox, const char *name, unsigned long min,
unsigned long max, double mean, double dev)
{
const char *base = "(usec)";
GtkWidget *hbox, *label, *frame;
char *minp, *maxp;
char tmp[64];
if (!usec_to_msec(&min, &max, &mean, &dev))
base = "(msec)";
minp = num2str(min, 6, 1, 0, 0);
maxp = num2str(max, 6, 1, 0, 0);
sprintf(tmp, "%s %s", name, base);
frame = gtk_frame_new(tmp);
gtk_box_pack_start(GTK_BOX(vbox), frame, FALSE, FALSE, 5);
hbox = gtk_hbox_new(FALSE, 3);
gtk_container_add(GTK_CONTAINER(frame), hbox);
label = new_info_label_in_frame(hbox, "Minimum");
gtk_label_set_text(GTK_LABEL(label), minp);
label = new_info_label_in_frame(hbox, "Maximum");
gtk_label_set_text(GTK_LABEL(label), maxp);
label = new_info_label_in_frame(hbox, "Average");
sprintf(tmp, "%5.02f", mean);
gtk_label_set_text(GTK_LABEL(label), tmp);
label = new_info_label_in_frame(hbox, "Standard deviation");
sprintf(tmp, "%5.02f", dev);
gtk_label_set_text(GTK_LABEL(label), tmp);
free(minp);
free(maxp);
}
static GtkWidget *gfio_output_clat_percentiles(unsigned int *ovals,
fio_fp64_t *plist,
unsigned int len,
const char *base,
unsigned int scale)
{
GType types[FIO_IO_U_LIST_MAX_LEN];
GtkWidget *tree_view;
GtkTreeSelection *selection;
GtkListStore *model;
GtkTreeIter iter;
int i;
for (i = 0; i < len; i++)
types[i] = G_TYPE_INT;
model = gtk_list_store_newv(len, types);
tree_view = gtk_tree_view_new_with_model(GTK_TREE_MODEL(model));
gtk_widget_set_can_focus(tree_view, FALSE);
g_object_set(G_OBJECT(tree_view), "headers-visible", TRUE,
"enable-grid-lines", GTK_TREE_VIEW_GRID_LINES_BOTH, NULL);
selection = gtk_tree_view_get_selection(GTK_TREE_VIEW(tree_view));
gtk_tree_selection_set_mode(GTK_TREE_SELECTION(selection), GTK_SELECTION_BROWSE);
for (i = 0; i < len; i++) {
char fbuf[8];
sprintf(fbuf, "%2.2f%%", plist[i].u.f);
tree_view_column(tree_view, i, fbuf, ALIGN_RIGHT | UNSORTABLE);
}
gtk_list_store_append(model, &iter);
for (i = 0; i < len; i++) {
if (scale)
ovals[i] = (ovals[i] + 999) / 1000;
gtk_list_store_set(model, &iter, i, ovals[i], -1);
}
return tree_view;
}
static struct graph *setup_clat_graph(char *title, unsigned int *ovals,
fio_fp64_t *plist,
unsigned int len,
double xdim, double ydim)
{
struct graph *g;
int i;
g = graph_new(xdim, ydim, gfio_graph_font);
graph_title(g, title);
graph_x_title(g, "Percentile");
graph_y_title(g, "Time");
for (i = 0; i < len; i++) {
graph_label_t l;
char fbuf[8];
sprintf(fbuf, "%2.2f%%", plist[i].u.f);
l = graph_add_label(g, fbuf);
graph_add_data(g, l, (double) ovals[i]);
}
return g;
}
static void gfio_show_clat_percentiles(struct gfio_client *gc,
GtkWidget *vbox, struct thread_stat *ts,
int ddir)
{
unsigned int *io_u_plat = ts->io_u_plat[ddir];
unsigned long nr = ts->clat_stat[ddir].samples;
fio_fp64_t *plist = ts->percentile_list;
unsigned int *ovals, len, minv, maxv, scale_down;
const char *base;
GtkWidget *tree_view, *frame, *hbox, *drawing_area, *completion_vbox;
struct gui_entry *ge = gc->ge;
char tmp[64];
len = calc_clat_percentiles(io_u_plat, nr, plist, &ovals, &maxv, &minv);
if (!len)
goto out;
/*
* We default to usecs, but if the value range is such that we
* should scale down to msecs, do that.
*/
if (minv > 2000 && maxv > 99999) {
scale_down = 1;
base = "msec";
} else {
scale_down = 0;
base = "usec";
}
sprintf(tmp, "Completion percentiles (%s)", base);
tree_view = gfio_output_clat_percentiles(ovals, plist, len, base, scale_down);
ge->clat_graph = setup_clat_graph(tmp, ovals, plist, len, 700.0, 300.0);
frame = gtk_frame_new(tmp);
gtk_box_pack_start(GTK_BOX(vbox), frame, FALSE, FALSE, 5);
completion_vbox = gtk_vbox_new(FALSE, 3);
gtk_container_add(GTK_CONTAINER(frame), completion_vbox);
hbox = gtk_hbox_new(FALSE, 3);
gtk_container_add(GTK_CONTAINER(completion_vbox), hbox);
drawing_area = gtk_drawing_area_new();
gtk_widget_set_size_request(GTK_WIDGET(drawing_area), 700, 300);
gtk_widget_modify_bg(drawing_area, GTK_STATE_NORMAL, &gfio_color_white);
gtk_container_add(GTK_CONTAINER(completion_vbox), drawing_area);
g_signal_connect(G_OBJECT(drawing_area), GFIO_DRAW_EVENT, G_CALLBACK(on_expose_lat_drawing_area), ge->clat_graph);
g_signal_connect(G_OBJECT(drawing_area), "configure_event", G_CALLBACK(on_config_lat_drawing_area), ge->clat_graph);
gtk_box_pack_start(GTK_BOX(hbox), tree_view, TRUE, TRUE, 3);
out:
if (ovals)
free(ovals);
}
#define GFIO_CLAT 1
#define GFIO_SLAT 2
#define GFIO_LAT 4
static void gfio_show_ddir_status(struct gfio_client *gc, GtkWidget *mbox,
struct group_run_stats *rs,
struct thread_stat *ts, int ddir)
{
const char *ddir_label[3] = { "Read", "Write", "Trim" };
GtkWidget *frame, *label, *box, *vbox, *main_vbox;
unsigned long min[3], max[3], runt;
unsigned long long bw, iops;
unsigned int flags = 0;
double mean[3], dev[3];
char *io_p, *bw_p, *iops_p;
int i2p;
if (!ts->runtime[ddir])
return;
i2p = is_power_of_2(rs->kb_base);
runt = ts->runtime[ddir];
bw = (1000 * ts->io_bytes[ddir]) / runt;
io_p = num2str(ts->io_bytes[ddir], 6, 1, i2p, 8);
bw_p = num2str(bw, 6, 1, i2p, ts->unit_base);
iops = (1000 * (uint64_t)ts->total_io_u[ddir]) / runt;
iops_p = num2str(iops, 6, 1, 0, 0);
box = gtk_hbox_new(FALSE, 3);
gtk_box_pack_start(GTK_BOX(mbox), box, TRUE, FALSE, 3);
frame = gtk_frame_new(ddir_label[ddir]);
gtk_box_pack_start(GTK_BOX(box), frame, TRUE, TRUE, 5);
main_vbox = gtk_vbox_new(FALSE, 3);
gtk_container_add(GTK_CONTAINER(frame), main_vbox);
box = gtk_hbox_new(FALSE, 3);
gtk_box_pack_start(GTK_BOX(main_vbox), box, TRUE, FALSE, 3);
label = new_info_label_in_frame(box, "IO");
gtk_label_set_text(GTK_LABEL(label), io_p);
label = new_info_label_in_frame(box, "Bandwidth");
gtk_label_set_text(GTK_LABEL(label), bw_p);
label = new_info_label_in_frame(box, "IOPS");
gtk_label_set_text(GTK_LABEL(label), iops_p);
label = new_info_label_in_frame(box, "Runtime (msec)");
label_set_int_value(label, ts->runtime[ddir]);
if (calc_lat(&ts->bw_stat[ddir], &min[0], &max[0], &mean[0], &dev[0])) {
double p_of_agg = 100.0;
const char *bw_str = "KB";
char tmp[32];
if (rs->agg[ddir]) {
p_of_agg = mean[0] * 100 / (double) rs->agg[ddir];
if (p_of_agg > 100.0)
p_of_agg = 100.0;
}
if (mean[0] > 999999.9) {
min[0] /= 1000.0;
max[0] /= 1000.0;
mean[0] /= 1000.0;
dev[0] /= 1000.0;
bw_str = "MB";
}
sprintf(tmp, "Bandwidth (%s)", bw_str);
frame = gtk_frame_new(tmp);
gtk_box_pack_start(GTK_BOX(main_vbox), frame, FALSE, FALSE, 5);
box = gtk_hbox_new(FALSE, 3);
gtk_container_add(GTK_CONTAINER(frame), box);
label = new_info_label_in_frame(box, "Minimum");
label_set_int_value(label, min[0]);
label = new_info_label_in_frame(box, "Maximum");
label_set_int_value(label, max[0]);
label = new_info_label_in_frame(box, "Percentage of jobs");
sprintf(tmp, "%3.2f%%", p_of_agg);
gtk_label_set_text(GTK_LABEL(label), tmp);
label = new_info_label_in_frame(box, "Average");
sprintf(tmp, "%5.02f", mean[0]);
gtk_label_set_text(GTK_LABEL(label), tmp);
label = new_info_label_in_frame(box, "Standard deviation");
sprintf(tmp, "%5.02f", dev[0]);
gtk_label_set_text(GTK_LABEL(label), tmp);
}
if (calc_lat(&ts->slat_stat[ddir], &min[0], &max[0], &mean[0], &dev[0]))
flags |= GFIO_SLAT;
if (calc_lat(&ts->clat_stat[ddir], &min[1], &max[1], &mean[1], &dev[1]))
flags |= GFIO_CLAT;
if (calc_lat(&ts->lat_stat[ddir], &min[2], &max[2], &mean[2], &dev[2]))
flags |= GFIO_LAT;
if (flags) {
frame = gtk_frame_new("Latency");
gtk_box_pack_start(GTK_BOX(main_vbox), frame, FALSE, FALSE, 5);
vbox = gtk_vbox_new(FALSE, 3);
gtk_container_add(GTK_CONTAINER(frame), vbox);
if (flags & GFIO_SLAT)
gfio_show_lat(vbox, "Submission latency", min[0], max[0], mean[0], dev[0]);
if (flags & GFIO_CLAT)
gfio_show_lat(vbox, "Completion latency", min[1], max[1], mean[1], dev[1]);
if (flags & GFIO_LAT)
gfio_show_lat(vbox, "Total latency", min[2], max[2], mean[2], dev[2]);
}
if (ts->clat_percentiles)
gfio_show_clat_percentiles(gc, main_vbox, ts, ddir);
free(io_p);
free(bw_p);
free(iops_p);
}
static void __gfio_display_end_results(GtkWidget *win, struct gfio_client *gc,
struct thread_stat *ts,
struct group_run_stats *rs)
{
GtkWidget *box, *vbox, *entry, *scroll;
int i;
scroll = gtk_scrolled_window_new(NULL, NULL);
gtk_container_set_border_width(GTK_CONTAINER(scroll), 5);
gtk_scrolled_window_set_policy(GTK_SCROLLED_WINDOW(scroll), GTK_POLICY_AUTOMATIC, GTK_POLICY_AUTOMATIC);
vbox = gtk_vbox_new(FALSE, 3);
box = gtk_hbox_new(FALSE, 0);
gtk_box_pack_start(GTK_BOX(vbox), box, TRUE, FALSE, 5);
gtk_scrolled_window_add_with_viewport(GTK_SCROLLED_WINDOW(scroll), vbox);
gtk_notebook_append_page(GTK_NOTEBOOK(win), scroll, gtk_label_new(ts->name));
entry = new_info_entry_in_frame(box, "Name");
gtk_entry_set_text(GTK_ENTRY(entry), ts->name);
if (strlen(ts->description)) {
entry = new_info_entry_in_frame(box, "Description");
gtk_entry_set_text(GTK_ENTRY(entry), ts->description);
}
entry = new_info_entry_in_frame(box, "Group ID");
entry_set_int_value(entry, ts->groupid);
entry = new_info_entry_in_frame(box, "Jobs");
entry_set_int_value(entry, ts->members);
gc->err_entry = entry = new_info_entry_in_frame(box, "Error");
entry_set_int_value(entry, ts->error);
entry = new_info_entry_in_frame(box, "PID");
entry_set_int_value(entry, ts->pid);
for (i = 0; i < DDIR_RWDIR_CNT; i++) {
if (ts->io_bytes[i])
gfio_show_ddir_status(gc, vbox, rs, ts, i);
}
gfio_show_latency_buckets(gc, vbox, ts);
gfio_show_cpu_usage(vbox, ts);
gfio_show_io_depths(vbox, ts);
}
void gfio_display_end_results(struct gfio_client *gc)
{
struct gui_entry *ge = gc->ge;
GtkWidget *res_notebook;
int i;
res_notebook = get_results_window(ge);
for (i = 0; i < gc->nr_results; i++) {
struct end_results *e = &gc->results[i];
__gfio_display_end_results(res_notebook, gc, &e->ts, &e->gs);
}
if (gfio_disk_util_show(gc))
gtk_widget_show_all(ge->results_window);
}
static void gfio_display_ts(struct fio_client *client, struct thread_stat *ts,
struct group_run_stats *rs)
{
struct gfio_client *gc = client->client_data;
struct gui_entry *ge = gc->ge;
gfio_add_end_results(gc, ts, rs);
gdk_threads_enter();
if (ge->results_window)
__gfio_display_end_results(ge->results_notebook, gc, ts, rs);
else
gfio_display_end_results(gc);
gdk_threads_leave();
}
static void gfio_client_removed(struct fio_client *client)
{
struct gfio_client *gc = client->client_data;
assert(gc->client == client);
fio_put_client(gc->client);
gc->client = NULL;
}
struct client_ops gfio_client_ops = {
.text = gfio_text_op,
.disk_util = gfio_disk_util_op,
.thread_status = gfio_thread_status_op,
.group_stats = gfio_group_stats_op,
.jobs_eta = gfio_update_client_eta,
.eta = gfio_update_all_eta,
.probe = gfio_probe_op,
.quit = gfio_quit_op,
.add_job = gfio_add_job_op,
.update_job = gfio_update_job_op,
.timed_out = gfio_client_timed_out,
.stop = gfio_client_stop,
.start = gfio_client_start,
.job_start = gfio_client_job_start,
.iolog = gfio_client_iolog,
.removed = gfio_client_removed,
.eta_msec = FIO_CLIENT_DEF_ETA_MSEC,
.stay_connected = 1,
.client_type = FIO_CLIENT_TYPE_GUI,
};
| 29.262482 | 131 | 0.706259 | [
"model"
] |
d2370b17a73b69324b5e5a7a060283c25c92ec66 | 1,653 | h | C | services/HistoryService/HistoryServiceTools.h | knowac/tizen-browser-30 | 0ea06a4cd6bdca3dc3da674dd8189bf528c166f8 | [
"Apache-2.0"
] | 1 | 2019-01-31T21:44:00.000Z | 2019-01-31T21:44:00.000Z | services/HistoryService/HistoryServiceTools.h | knowac/tizen-browser-1.6.4 | a37a3ea5b8c01d86bd3dac00d228800e5eed4619 | [
"Apache-2.0"
] | null | null | null | services/HistoryService/HistoryServiceTools.h | knowac/tizen-browser-1.6.4 | a37a3ea5b8c01d86bd3dac00d228800e5eed4619 | [
"Apache-2.0"
] | 1 | 2019-01-31T21:44:04.000Z | 2019-01-31T21:44:04.000Z | /*
* Copyright (c) 2015 Samsung Electronics Co., Ltd.
*
* Licensed under the Apache License, Version 2.0 (the License);
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an AS IS BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef HISTORYMATCHFINDER_H_
#define HISTORYMATCHFINDER_H_
#include <string>
#include <vector>
#include "HistoryItemTypedef.h"
using namespace std;
namespace tizen_browser {
namespace services {
/**
* @brief Removes history items not matching given keywords
* @param historyItems Vector from which mismatching items will be removed
* @param keywords Keywords (history item is a match, when all keywords are
* matching)
*/
void removeMismatches(std::shared_ptr<HistoryItemVector> historyItems,
const vector<string>& keywords);
/**
* @brief Returns true, if vector contains at least two items with the same url.
*/
bool containsDuplicates(std::shared_ptr<HistoryItemVector> vec,
std::shared_ptr<HistoryItem> checked);
/**
* @brief Removes history items with urls duplicating other items.
* In the end, vector has items with unique URLs.
*/
void removeUrlDuplicates(std::shared_ptr<HistoryItemVector> historyItems);
} /* namespace services */
} /* namespace tizen_browser */
#endif /* HISTORYMATCHFINDER_H_ */
| 30.054545 | 80 | 0.751966 | [
"vector"
] |
d237606f77a95798b720839b7294641772d32e31 | 13,271 | c | C | source/spike.c | dos-games/vanilla-shadow_warrior | bf781c586c7e9cda0cfb0b3bc56983f535cb75c4 | [
"Unlicense"
] | 18 | 2015-07-21T03:53:29.000Z | 2021-12-20T18:42:56.000Z | source/spike.c | Azarien/shadow-warrior | bf781c586c7e9cda0cfb0b3bc56983f535cb75c4 | [
"Unlicense"
] | null | null | null | source/spike.c | Azarien/shadow-warrior | bf781c586c7e9cda0cfb0b3bc56983f535cb75c4 | [
"Unlicense"
] | 6 | 2016-10-17T09:06:22.000Z | 2022-02-11T10:02:17.000Z | //-------------------------------------------------------------------------
/*
Copyright (C) 1997, 2005 - 3D Realms Entertainment
This file is part of Shadow Warrior version 1.2
Shadow Warrior is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public License
as published by the Free Software Foundation; either version 2
of the License, or (at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
See the GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
Original Source: 1997 - Frank Maddin and Jim Norwood
Prepared for public release: 03/28/2005 - Charlie Wiederhold, 3D Realms
*/
//-------------------------------------------------------------------------
#include <stdlib.h>
#include <stdio.h>
#include "build.h"
#include "names2.h"
#include "panel.h"
#include "game.h"
#include "tags.h"
#include "def.h"
// CTW MODIFICATION ENTIRE FILE
// Converted all "false" and "true" to "FALSE" and "TRUE"
// CTW MODIFICATION ENTIRE FILE END
extern CHARp KeyDoorMessage[];
short DoSpikeMatch(PLAYERp pp, short match);
BOOL TestSpikeMatchActive(short match);
int DoVatorMove(short SpriteNum, long *lptr);
VOID InterpSectorSprites(short sectnum, BOOL state);
void ReverseSpike(short SpriteNum)
{
USERp u = User[SpriteNum];
SPRITEp sp = u->SpriteP;
// if paused go ahead and start it up again
if (u->Tics)
{
u->Tics = 0;
SetSpikeActive(SpriteNum);
return;
}
// moving toward to OFF pos
if (u->z_tgt == u->oz)
{
if (sp->z == u->oz)
u->z_tgt = u->sz;
else
if (u->sz == u->oz)
u->z_tgt = sp->z;
}
else
if (u->z_tgt == u->sz)
{
if (sp->z == u->oz)
u->z_tgt = sp->z;
else
if (u->sz == u->oz)
u->z_tgt = u->sz;
}
u->vel_rate = -u->vel_rate;
}
BOOL
SpikeSwitch(short match, short setting)
{
SPRITEp sp;
short i,nexti;
BOOL found = FALSE;
TRAVERSE_SPRITE_STAT(headspritestat[STAT_DEFAULT], i, nexti)
{
sp = &sprite[i];
if (sp->lotag == TAG_SPRITE_SWITCH_VATOR && sp->hitag == match)
{
found = TRUE;
AnimateSwitch(sp, setting);
}
}
return(found);
}
void SetSpikeActive(short SpriteNum)
{
USERp u = User[SpriteNum];
SPRITEp sp = u->SpriteP;
SECTORp sectp = §or[sp->sectnum];
if (TEST(sp->cstat, CSTAT_SPRITE_YFLIP))
short_setinterpolation(§p->ceilingheinum);
else
short_setinterpolation(§p->floorheinum);
InterpSectorSprites(sp->sectnum, ON);
// play activate sound
DoSoundSpotMatch(SP_TAG2(sp), 1, SOUND_OBJECT_TYPE);
SET(u->Flags, SPR_ACTIVE);
u->Tics = 0;
// moving to the ON position
if (u->z_tgt == sp->z)
VatorSwitch(SP_TAG2(sp), ON);
else
// moving to the OFF position
if (u->z_tgt == u->sz)
VatorSwitch(SP_TAG2(sp), OFF);
}
void SetSpikeInactive(short SpriteNum)
{
USERp u = User[SpriteNum];
SPRITEp sp = u->SpriteP;
SECTORp sectp = §or[sp->sectnum];
if (TEST(sp->cstat, CSTAT_SPRITE_YFLIP))
short_stopinterpolation(§p->ceilingheinum);
else
short_stopinterpolation(§p->floorheinum);
InterpSectorSprites(sp->sectnum, OFF);
// play activate sound
DoSoundSpotMatch(SP_TAG2(sp), 2, SOUND_OBJECT_TYPE);
RESET(u->Flags, SPR_ACTIVE);
}
// called for operation from the space bar
short DoSpikeOperate(PLAYERp pp, short sectnum)
{
USERp fu;
SPRITEp fsp;
short match;
short i,nexti;
TRAVERSE_SPRITE_SECT(headspritesect[sectnum], i, nexti)
{
fsp = &sprite[i];
if (fsp->statnum == STAT_SPIKE && SP_TAG1(fsp) == SECT_SPIKE && SP_TAG3(fsp) == 0)
{
fu = User[i];
sectnum = fsp->sectnum;
match = SP_TAG2(fsp);
if (match > 0)
{
if (TestSpikeMatchActive(match))
return(-1);
else
return(DoSpikeMatch(pp, match));
}
SetSpikeActive(i);
break;
}
}
return(i);
}
// called from switches and triggers
// returns first spike found
short
DoSpikeMatch(PLAYERp pp, short match)
{
USERp fu;
SPRITEp fsp;
short sectnum;
short first_spike = -1;
short i,nexti;
//SpikeSwitch(match, ON);
TRAVERSE_SPRITE_STAT(headspritestat[STAT_SPIKE], i, nexti)
{
fsp = &sprite[i];
if (SP_TAG1(fsp) == SECT_SPIKE && SP_TAG2(fsp) == match)
{
fu = User[i];
if (first_spike == -1)
first_spike = i;
sectnum = fsp->sectnum;
if (TEST(fu->Flags, SPR_ACTIVE))
{
ReverseSpike(i);
continue;
}
SetSpikeActive(i);
}
}
return(first_spike);
}
BOOL
TestSpikeMatchActive(short match)
{
USERp fu;
SPRITEp fsp;
short sectnum;
short i,nexti;
TRAVERSE_SPRITE_STAT(headspritestat[STAT_SPIKE], i, nexti)
{
fsp = &sprite[i];
if (SP_TAG1(fsp) == SECT_SPIKE && SP_TAG2(fsp) == match)
{
fu = User[i];
// door war
if (TEST_BOOL6(fsp))
continue;
if (TEST(fu->Flags, SPR_ACTIVE) || fu->Tics)
return(TRUE);
}
}
return(FALSE);
}
int DoSpikeMove(short SpriteNum, long *lptr)
{
USERp u = User[SpriteNum];
SPRITEp sp = u->SpriteP;
SECTORp sectp = §or[sp->sectnum];
long zval;
zval = *lptr;
// if LESS THAN goal
if (zval < u->z_tgt)
{
// move it DOWN
zval += (synctics * u->jump_speed);
u->jump_speed += u->vel_rate * synctics;
// if the other way make it equal
if (zval > u->z_tgt)
zval = u->z_tgt;
}
// if GREATER THAN goal
if (zval > u->z_tgt)
{
// move it UP
zval -= (synctics * u->jump_speed);
u->jump_speed += u->vel_rate * synctics;
if (zval < u->z_tgt)
zval = u->z_tgt;
}
*lptr = zval;
return(0);
}
VOID SpikeAlign(short SpriteNum)
{
USERp u = User[SpriteNum];
SPRITEp sp = u->SpriteP;
// either work on single sector or all tagged in SOBJ
if ((CHAR)SP_TAG7(sp) < 0)
{
if (TEST(sp->cstat, CSTAT_SPRITE_YFLIP))
alignceilslope(sp->sectnum, sp->x, sp->y, u->zclip);
else
alignflorslope(sp->sectnum, sp->x, sp->y, u->zclip);
}
else
{
if (TEST(sp->cstat, CSTAT_SPRITE_YFLIP))
SOBJ_AlignCeilingToPoint(&SectorObject[SP_TAG7(sp)], sp->x, sp->y, u->zclip);
else
SOBJ_AlignFloorToPoint(&SectorObject[SP_TAG7(sp)], sp->x, sp->y, u->zclip);
}
}
VOID MoveSpritesWithSpike(short sectnum)
{
SECTORp sectp = §or[sectnum];
SPRITEp sp;
short i,nexti;
long cz,fz;
TRAVERSE_SPRITE_SECT(headspritesect[sectnum], i, nexti)
{
sp = &sprite[i];
if (User[i])
continue;
if (TEST(sp->extra, SPRX_STAY_PUT_VATOR))
continue;
getzsofslope(sectnum, sp->x, sp->y, &cz, &fz);
sp->z = fz;
}
}
int DoSpike(short SpriteNum)
{
USERp u = User[SpriteNum];
SPRITEp sp = u->SpriteP;
SECTORp sectp = §or[sp->sectnum];
long *lptr;
long amt;
// zclip = floor or ceiling z
// oz = original z
// z_tgt = target z - on pos
// sz = starting z - off pos
lptr = &u->zclip;
DoSpikeMove(SpriteNum, lptr);
MoveSpritesWithSpike(sp->sectnum);
SpikeAlign(SpriteNum);
// EQUAL this entry has finished
if (*lptr == u->z_tgt)
{
// in the ON position
if (u->z_tgt == sp->z)
{
// change target
u->z_tgt = u->sz;
u->vel_rate = -u->vel_rate;
SetSpikeInactive(SpriteNum);
if (SP_TAG6(sp))
DoMatchEverything(NULL, SP_TAG6(sp), -1);
}
else
// in the OFF position
if (u->z_tgt == u->sz)
{
short match = SP_TAG2(sp);
// change target
u->jump_speed = u->vel_tgt;
u->vel_rate = labs(u->vel_rate);
u->z_tgt = sp->z;
SetSpikeInactive(SpriteNum);
// set owner swith back to OFF
// only if ALL spikes are inactive
if (!TestSpikeMatchActive(match))
{
//SpikeSwitch(match, OFF);
}
if (SP_TAG6(sp) && TEST_BOOL5(sp))
DoMatchEverything(NULL, SP_TAG6(sp), -1);
}
// operate only once
if (TEST_BOOL2(sp))
{
SetSpikeInactive(SpriteNum);
KillSprite(SpriteNum);
return(0);
}
// setup to go back to the original z
if (*lptr != u->oz)
{
if (u->WaitTics)
u->Tics = u->WaitTics;
}
}
else // if (*lptr == u->z_tgt)
{
// if heading for the OFF (original) position and should NOT CRUSH
if (TEST_BOOL3(sp) && u->z_tgt == u->oz)
{
int i,nexti;
SPRITEp bsp;
USERp bu;
BOOL found = FALSE;
TRAVERSE_SPRITE_SECT(headspritesect[sp->sectnum], i, nexti)
{
bsp = &sprite[i];
bu = User[i];
if (bu && TEST(bsp->cstat, CSTAT_SPRITE_BLOCK) && TEST(bsp->extra, SPRX_PLAYER_OR_ENEMY))
{
ReverseSpike(SpriteNum);
found = TRUE;
break;
}
}
if (!found)
{
short pnum;
PLAYERp pp;
// go ahead and look for players clip box bounds
TRAVERSE_CONNECT(pnum)
{
pp = Player + pnum;
if (pp->lo_sectp == §or[sp->sectnum] ||
pp->hi_sectp == §or[sp->sectnum])
{
ReverseSpike(SpriteNum);
found = TRUE;
}
}
}
}
}
return(0);
}
int DoSpikeAuto(short SpriteNum)
{
USERp u = User[SpriteNum];
SPRITEp sp = u->SpriteP;
SECTORp sectp = §or[sp->sectnum];
long zval;
long *lptr;
long amt;
lptr = &u->zclip;
DoSpikeMove(SpriteNum, lptr);
MoveSpritesWithSpike(sp->sectnum);
SpikeAlign(SpriteNum);
// EQUAL this entry has finished
if (*lptr == u->z_tgt)
{
// in the UP position
if (u->z_tgt == sp->z)
{
// change target
u->z_tgt = u->sz;
u->vel_rate = -u->vel_rate;
u->Tics = u->WaitTics;
if (SP_TAG6(sp))
DoMatchEverything(NULL, SP_TAG6(sp), -1);
}
else
// in the DOWN position
if (u->z_tgt == u->sz)
{
// change target
u->jump_speed = u->vel_tgt;
u->vel_rate = labs(u->vel_rate);
u->z_tgt = sp->z;
u->Tics = u->WaitTics;
if (SP_TAG6(sp) && TEST_BOOL5(sp))
DoMatchEverything(NULL, SP_TAG6(sp), -1);
}
}
return(0);
}
| 26.383698 | 106 | 0.4653 | [
"3d"
] |
d23ccc8e4d1c937477f6009c5a5a434659ee6542 | 2,612 | h | C | Engine/RHI/Base/Include/RHI/GraphicsPipeline.h | bluesky013/Explosion | 1950121c48280fc5749e490c86acd75960ae8494 | [
"MIT"
] | null | null | null | Engine/RHI/Base/Include/RHI/GraphicsPipeline.h | bluesky013/Explosion | 1950121c48280fc5749e490c86acd75960ae8494 | [
"MIT"
] | null | null | null | Engine/RHI/Base/Include/RHI/GraphicsPipeline.h | bluesky013/Explosion | 1950121c48280fc5749e490c86acd75960ae8494 | [
"MIT"
] | null | null | null | //
// Created by John Kindem on 2021/5/16 0016.
//
#ifndef EXPLOSION_GRAPHICSPIPELINE_H
#define EXPLOSION_GRAPHICSPIPELINE_H
#include <vector>
#include <RHI/Enum.h>
namespace Explosion::RHI {
class RenderPass;
class Shader;
class GraphicsPipeline {
public:
struct ShaderConfig {
std::vector<Shader*> shaderModules;
};
struct VertexBinding {
uint32_t binding;
uint32_t stride;
VertexInputRate inputRate;
};
struct VertexAttribute {
uint32_t binding;
uint32_t location;
Format format;
uint32_t offset;
};
struct VertexConfig {
std::vector<VertexBinding> vertexBindings;
std::vector<VertexAttribute> vertexAttributes;
};
struct DescriptorAttribute {
uint32_t binding;
DescriptorType type;
ShaderStageFlags shaderStages;
};
struct DescriptorConfig {
std::vector<DescriptorAttribute> descriptorAttributes;
};
struct Viewport {
float x;
float y;
float width;
float height;
float minDepth;
float maxDepth;
};
struct Scissor {
int32_t x;
int32_t y;
uint32_t width;
uint32_t height;
};
struct ViewportScissorConfig {
Viewport viewport;
Scissor scissor;
};
struct RasterizerConfig {
bool depthClamp;
bool discard;
CullModeFlags cullModes;
FrontFace frontFace;
};
struct DepthStencilConfig {
bool depthTest;
bool depthWrite;
bool stencilTest;
};
struct ColorBlendConfig {
bool enabled;
};
struct AssemblyConfig {
PrimitiveTopology topology;
};
struct Config {
RenderPass* renderPass;
ShaderConfig shaderConfig;
VertexConfig vertexConfig;
DescriptorConfig descriptorConfig;
ViewportScissorConfig viewportScissorConfig;
RasterizerConfig rasterizerConfig;
DepthStencilConfig depthStencilConfig;
ColorBlendConfig colorBlendConfig;
AssemblyConfig assemblyConfig;
};
virtual ~GraphicsPipeline();
protected:
explicit GraphicsPipeline(Config config);
Config config;
};
}
#endif //EXPLOSION_GRAPHICSPIPELINE_H
| 22.912281 | 66 | 0.558959 | [
"vector"
] |
d242fc007055dd6d2c51196a5bd715fec85d4eef | 5,279 | h | C | src/qt/src/gui/widgets/qmdisubwindow.h | ant0ine/phantomjs | 8114d44a28134b765ab26b7e13ce31594fa81253 | [
"BSD-3-Clause"
] | 46 | 2015-01-08T14:32:34.000Z | 2022-02-05T16:48:26.000Z | src/qt/src/gui/widgets/qmdisubwindow.h | ant0ine/phantomjs | 8114d44a28134b765ab26b7e13ce31594fa81253 | [
"BSD-3-Clause"
] | 7 | 2015-01-20T14:28:12.000Z | 2017-01-18T17:21:44.000Z | src/qt/src/gui/widgets/qmdisubwindow.h | ant0ine/phantomjs | 8114d44a28134b765ab26b7e13ce31594fa81253 | [
"BSD-3-Clause"
] | 14 | 2015-10-27T06:17:48.000Z | 2020-03-03T06:15:50.000Z | /****************************************************************************
**
** Copyright (C) 2012 Digia Plc and/or its subsidiary(-ies).
** Contact: http://www.qt-project.org/legal
**
** This file is part of the QtGui module of the Qt Toolkit.
**
** $QT_BEGIN_LICENSE:LGPL$
** Commercial License Usage
** Licensees holding valid commercial Qt licenses may use this file in
** accordance with the commercial license agreement provided with the
** Software or, alternatively, in accordance with the terms contained in
** a written agreement between you and Digia. For licensing terms and
** conditions see http://qt.digia.com/licensing. For further information
** use the contact form at http://qt.digia.com/contact-us.
**
** GNU Lesser General Public License Usage
** Alternatively, this file may be used under the terms of the GNU Lesser
** General Public License version 2.1 as published by the Free Software
** Foundation and appearing in the file LICENSE.LGPL included in the
** packaging of this file. Please review the following information to
** ensure the GNU Lesser General Public License version 2.1 requirements
** will be met: http://www.gnu.org/licenses/old-licenses/lgpl-2.1.html.
**
** In addition, as a special exception, Digia gives you certain additional
** rights. These rights are described in the Digia Qt LGPL Exception
** version 1.1, included in the file LGPL_EXCEPTION.txt in this package.
**
** GNU General Public License Usage
** Alternatively, this file may be used under the terms of the GNU
** General Public License version 3.0 as published by the Free Software
** Foundation and appearing in the file LICENSE.GPL included in the
** packaging of this file. Please review the following information to
** ensure the GNU General Public License version 3.0 requirements will be
** met: http://www.gnu.org/copyleft/gpl.html.
**
**
** $QT_END_LICENSE$
**
****************************************************************************/
#ifndef QMDISUBWINDOW_H
#define QMDISUBWINDOW_H
#include <QtGui/qwidget.h>
QT_BEGIN_HEADER
QT_BEGIN_NAMESPACE
QT_MODULE(Gui)
#ifndef QT_NO_MDIAREA
class QMenu;
class QMdiArea;
namespace QMdi { class ControlContainer; }
class QMdiSubWindowPrivate;
class Q_GUI_EXPORT QMdiSubWindow : public QWidget
{
Q_OBJECT
Q_PROPERTY(int keyboardSingleStep READ keyboardSingleStep WRITE setKeyboardSingleStep)
Q_PROPERTY(int keyboardPageStep READ keyboardPageStep WRITE setKeyboardPageStep)
public:
enum SubWindowOption {
AllowOutsideAreaHorizontally = 0x1, // internal
AllowOutsideAreaVertically = 0x2, // internal
RubberBandResize = 0x4,
RubberBandMove = 0x8
};
Q_DECLARE_FLAGS(SubWindowOptions, SubWindowOption)
QMdiSubWindow(QWidget *parent = 0, Qt::WindowFlags flags = 0);
~QMdiSubWindow();
QSize sizeHint() const;
QSize minimumSizeHint() const;
void setWidget(QWidget *widget);
QWidget *widget() const;
QWidget *maximizedButtonsWidget() const; // internal
QWidget *maximizedSystemMenuIconWidget() const; // internal
bool isShaded() const;
void setOption(SubWindowOption option, bool on = true);
bool testOption(SubWindowOption) const;
void setKeyboardSingleStep(int step);
int keyboardSingleStep() const;
void setKeyboardPageStep(int step);
int keyboardPageStep() const;
#ifndef QT_NO_MENU
void setSystemMenu(QMenu *systemMenu);
QMenu *systemMenu() const;
#endif
QMdiArea *mdiArea() const;
Q_SIGNALS:
void windowStateChanged(Qt::WindowStates oldState, Qt::WindowStates newState);
void aboutToActivate();
public Q_SLOTS:
#ifndef QT_NO_MENU
void showSystemMenu();
#endif
void showShaded();
protected:
bool eventFilter(QObject *object, QEvent *event);
bool event(QEvent *event);
void showEvent(QShowEvent *showEvent);
void hideEvent(QHideEvent *hideEvent);
void changeEvent(QEvent *changeEvent);
void closeEvent(QCloseEvent *closeEvent);
void leaveEvent(QEvent *leaveEvent);
void resizeEvent(QResizeEvent *resizeEvent);
void timerEvent(QTimerEvent *timerEvent);
void moveEvent(QMoveEvent *moveEvent);
void paintEvent(QPaintEvent *paintEvent);
void mousePressEvent(QMouseEvent *mouseEvent);
void mouseDoubleClickEvent(QMouseEvent *mouseEvent);
void mouseReleaseEvent(QMouseEvent *mouseEvent);
void mouseMoveEvent(QMouseEvent *mouseEvent);
void keyPressEvent(QKeyEvent *keyEvent);
#ifndef QT_NO_CONTEXTMENU
void contextMenuEvent(QContextMenuEvent *contextMenuEvent);
#endif
void focusInEvent(QFocusEvent *focusInEvent);
void focusOutEvent(QFocusEvent *focusOutEvent);
void childEvent(QChildEvent *childEvent);
private:
Q_DISABLE_COPY(QMdiSubWindow)
Q_DECLARE_PRIVATE(QMdiSubWindow)
Q_PRIVATE_SLOT(d_func(), void _q_updateStaysOnTopHint())
Q_PRIVATE_SLOT(d_func(), void _q_enterInteractiveMode())
Q_PRIVATE_SLOT(d_func(), void _q_processFocusChanged(QWidget *, QWidget *))
friend class QMdiAreaPrivate;
#ifndef QT_NO_TABBAR
friend class QMdiAreaTabBar;
#endif
friend class QMdi::ControlContainer;
};
Q_DECLARE_OPERATORS_FOR_FLAGS(QMdiSubWindow::SubWindowOptions)
QT_END_NAMESPACE
QT_END_HEADER
#endif // QT_NO_MDIAREA
#endif // QMDISUBWINDOW_H
| 32.99375 | 90 | 0.734988 | [
"object"
] |
d247086ce6c320cad4bd9c44265bbb1c0752dfab | 670 | h | C | renderer/core/include/resources/model/Model.h | wookie41/blitz | 7f587bee9b6189c32f0f60c69316bc8deca23c16 | [
"MIT"
] | 1 | 2020-01-04T21:04:52.000Z | 2020-01-04T21:04:52.000Z | renderer/core/include/resources/model/Model.h | wookie41/blitz | 7f587bee9b6189c32f0f60c69316bc8deca23c16 | [
"MIT"
] | 15 | 2019-08-26T20:54:31.000Z | 2020-03-15T14:11:44.000Z | renderer/core/include/resources/model/Model.h | wookie41/blitz | 7f587bee9b6189c32f0f60c69316bc8deca23c16 | [
"MIT"
] | null | null | null | #ifndef BLITZ_MODEL_H
#define BLITZ_MODEL_H
#include <core/Precompiled.h>
namespace blitz
{
class VertexArray;
class TextureSampler;
struct Mesh
{
uint64 facesCount;
uint64 verticesCount;
TextureSampler* diffuseSampler;
TextureSampler* specularSampler;
TextureSampler* normalMapSampler;
};
struct Model
{
uint8 verticesInFace;
uint64 totalFacesCount;
uint64 totalVerticesCount;
VertexArray* vertexArray;
Array<Mesh>* meshes;
Array<Model*>* children;
uint32 totalNodesCount; //includes children of children...
};
}
#endif //BLITZ_MODEL_H
| 20.30303 | 66 | 0.652239 | [
"mesh",
"model"
] |
d248c6f195be05123f2a94063d8b349ccb05a896 | 6,125 | h | C | include/nncase/kernels/cpu/reference/tensor_compute.h | louareg/nncase | 0125654eb57b7ff753fe9c396c84b264c01f34d3 | [
"Apache-2.0"
] | null | null | null | include/nncase/kernels/cpu/reference/tensor_compute.h | louareg/nncase | 0125654eb57b7ff753fe9c396c84b264c01f34d3 | [
"Apache-2.0"
] | null | null | null | include/nncase/kernels/cpu/reference/tensor_compute.h | louareg/nncase | 0125654eb57b7ff753fe9c396c84b264c01f34d3 | [
"Apache-2.0"
] | null | null | null | /* Copyright 2019-2021 Canaan Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include "runtime_types.h"
#include <nncase/kernels/kernel_context.h>
BEGIN_NS_NNCASE_KERNELS_CPU_REF
NNCASE_API result<void> batch_to_space(datatype_t type, const gsl::byte *input, gsl::byte *output, const runtime_shape_t &in_shape,
const runtime_shape_t &block_shape, const runtime_paddings_t &crops, const runtime_shape_t &in_strides, const runtime_shape_t &out_strides,
kernel_context &context) noexcept;
NNCASE_API result<void> broadcast(datatype_t type, const gsl::byte *input, gsl::byte *output, const runtime_shape_t &in_shape,
const runtime_shape_t &in_strides, const runtime_shape_t &out_shape, const runtime_shape_t &out_strides, kernel_context &context) noexcept;
NNCASE_API result<void> concat(datatype_t type, gsl::span<const gsl::byte *const> inputs, gsl::byte *output, const runtime_shape_t &out_shape,
gsl::span<const runtime_shape_t> in_strides, const runtime_shape_t &out_strides, size_t axis, const runtime_shape_t &concat_dims,
kernel_context &context) noexcept;
NNCASE_API result<void> convert(datatype_t in_type, datatype_t out_type, const gsl::byte *input, gsl::byte *output,
const runtime_shape_t &in_shape, const runtime_shape_t &in_strides, const runtime_shape_t &out_strides, kernel_context &context) noexcept;
NNCASE_API result<void> copy(datatype_t type, const gsl::byte *src, gsl::byte *dest,
const runtime_shape_t &shape, const runtime_shape_t &src_strides, const runtime_shape_t &dest_strides, kernel_context &context) noexcept;
NNCASE_API result<void> transpose(datatype_t type, const gsl::byte *input, gsl::byte *output, const runtime_shape_t &in_shape,
const runtime_shape_t &perm, const runtime_shape_t &in_strides, const runtime_shape_t &out_strides, kernel_context &context) noexcept;
NNCASE_API result<void> binary(binary_op_t op, const float *input_a, const float *input_b, float *output,
const runtime_shape_t &in_a_shape, const runtime_shape_t &in_a_strides, const runtime_shape_t &in_b_shape,
const runtime_shape_t &in_b_strides, const runtime_shape_t &out_strides, value_range<float> fused_activation, kernel_context &context) noexcept;
NNCASE_API result<void> dequantize(datatype_t in_type, datatype_t out_type, const gsl::byte *input, gsl::byte *output,
const runtime_shape_t &in_shape, const runtime_shape_t &in_strides, const runtime_shape_t &out_strides, float scale, float bias,
kernel_context &context) noexcept;
NNCASE_API result<void> lut1d(datatype_t type, const gsl::byte *input, const gsl::byte *table, gsl::byte *output, const runtime_shape_t &shape,
const runtime_shape_t &in_strides, const runtime_shape_t &out_strides, const scalar &min, const scalar &max) noexcept;
NNCASE_API result<void> pad(datatype_t type, const gsl::byte *input, gsl::byte *output, const runtime_shape_t &in_shape,
const runtime_shape_t &in_strides, const runtime_shape_t &out_strides, const runtime_paddings_t &paddings, pad_mode_t mode, const scalar &pad_value,
kernel_context &context) noexcept;
NNCASE_API result<void> quantize(datatype_t in_type, datatype_t out_type, const gsl::byte *input, gsl::byte *output,
const runtime_shape_t &in_shape, const runtime_shape_t &in_strides, const runtime_shape_t &out_strides, float scale, float bias,
kernel_context &context) noexcept;
NNCASE_API result<void> unary(unary_op_t op, const float *input, float *output, const runtime_shape_t &shape,
const runtime_shape_t &in_strides, const runtime_shape_t &out_strides, kernel_context &context) noexcept;
NNCASE_API result<void> reduce(reduce_op_t op, float init_value, const float *input, float *output, const runtime_shape_t &in_shape, const runtime_shape_t &axis,
const runtime_shape_t &in_strides, const runtime_shape_t &out_strides, bool keep_dims, kernel_context &context) noexcept;
NNCASE_API result<void> resize_bilinear(datatype_t type, const gsl::byte *input, gsl::byte *output, const runtime_shape_t &in_shape,
const runtime_shape_t &in_strides, const runtime_shape_t &out_strides, int32_t out_h, int32_t out_w, bool align_corners, bool half_pixel_centers,
kernel_context &context) noexcept;
NNCASE_API result<void> resize_nearest_neighbor(datatype_t type, const gsl::byte *input, gsl::byte *output, const runtime_shape_t &in_shape,
const runtime_shape_t &in_strides, const runtime_shape_t &out_strides, int32_t out_h, int32_t out_w, bool align_corners, bool half_pixel_centers,
kernel_context &context) noexcept;
NNCASE_API result<void> slice(datatype_t type, const gsl::byte *input, gsl::byte *output, const runtime_shape_t &in_shape,
const runtime_shape_t &in_strides, const runtime_shape_t &out_strides, const runtime_shape_t &begins, const runtime_shape_t &ends, const runtime_axis_t &strides,
kernel_context &context) noexcept;
NNCASE_API result<void> gather(datatype_t type, const gsl::byte *input, gsl::byte *output, const runtime_shape_t &in_shape, const runtime_shape_t &out_shape,
const runtime_shape_t &in_strides, const runtime_shape_t &out_strides, const int32_t *indices, const runtime_shape_t &indices_shape, size_t axis, kernel_context &context) noexcept;
NNCASE_API result<void> gather_nd(datatype_t type, const gsl::byte *input, gsl::byte *output, const runtime_shape_t &in_shape, const runtime_shape_t &out_shape,
const runtime_shape_t &in_strides, const runtime_shape_t &out_strides, const int32_t *indices, const runtime_shape_t &indices_shape, size_t batch_dims, kernel_context &context) noexcept;
END_NS_NNCASE_KERNELS_CPU_REF
| 72.058824 | 190 | 0.798367 | [
"shape"
] |
d24a8a278025da497ff379bf7bba681016755645 | 23,291 | c | C | src/platform/lm3s/driverlib/interrupt.c | amit08thakare/Elua | e104a9adf3dcafe6f871191ac008cd438a189c0a | [
"MIT"
] | 641 | 2015-01-02T04:41:47.000Z | 2022-03-31T03:15:13.000Z | src/platform/lm3s/driverlib/interrupt.c | amit08thakare/Elua | e104a9adf3dcafe6f871191ac008cd438a189c0a | [
"MIT"
] | 58 | 2015-03-11T08:05:27.000Z | 2021-12-31T11:49:55.000Z | src/platform/lm3s/driverlib/interrupt.c | darren1713/elua | c280894885218858806b68cf04fbafa66a52c36a | [
"MIT"
] | 237 | 2015-01-04T07:49:39.000Z | 2022-03-11T03:31:35.000Z | //*****************************************************************************
//
// interrupt.c - Driver for the NVIC Interrupt Controller.
//
// Copyright (c) 2005-2011 Texas Instruments Incorporated. All rights reserved.
// Software License Agreement
//
// Texas Instruments (TI) is supplying this software for use solely and
// exclusively on TI's microcontroller products. The software is owned by
// TI and/or its suppliers, and is protected under applicable copyright
// laws. You may not combine this software with "viral" open-source
// software in order to form a larger program.
//
// THIS SOFTWARE IS PROVIDED "AS IS" AND WITH ALL FAULTS.
// NO WARRANTIES, WHETHER EXPRESS, IMPLIED OR STATUTORY, INCLUDING, BUT
// NOT LIMITED TO, IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE APPLY TO THIS SOFTWARE. TI SHALL NOT, UNDER ANY
// CIRCUMSTANCES, BE LIABLE FOR SPECIAL, INCIDENTAL, OR CONSEQUENTIAL
// DAMAGES, FOR ANY REASON WHATSOEVER.
//
// This is part of revision 7611 of the Stellaris Peripheral Driver Library.
//
//*****************************************************************************
//*****************************************************************************
//
//! \addtogroup interrupt_api
//! @{
//
//*****************************************************************************
#include "inc/hw_ints.h"
#include "inc/hw_nvic.h"
#include "inc/hw_types.h"
#include "driverlib/cpu.h"
#include "driverlib/debug.h"
#include "driverlib/interrupt.h"
//*****************************************************************************
//
// This is a mapping between priority grouping encodings and the number of
// preemption priority bits.
//
//*****************************************************************************
static const unsigned long g_pulPriority[] =
{
NVIC_APINT_PRIGROUP_0_8, NVIC_APINT_PRIGROUP_1_7, NVIC_APINT_PRIGROUP_2_6,
NVIC_APINT_PRIGROUP_3_5, NVIC_APINT_PRIGROUP_4_4, NVIC_APINT_PRIGROUP_5_3,
NVIC_APINT_PRIGROUP_6_2, NVIC_APINT_PRIGROUP_7_1
};
//*****************************************************************************
//
// This is a mapping between interrupt number and the register that contains
// the priority encoding for that interrupt.
//
//*****************************************************************************
static const unsigned long g_pulRegs[] =
{
0, NVIC_SYS_PRI1, NVIC_SYS_PRI2, NVIC_SYS_PRI3, NVIC_PRI0, NVIC_PRI1,
NVIC_PRI2, NVIC_PRI3, NVIC_PRI4, NVIC_PRI5, NVIC_PRI6, NVIC_PRI7,
NVIC_PRI8, NVIC_PRI9, NVIC_PRI10, NVIC_PRI11, NVIC_PRI12, NVIC_PRI13
};
//*****************************************************************************
//
//! \internal
//! The default interrupt handler.
//!
//! This is the default interrupt handler for all interrupts. It simply loops
//! forever so that the system state is preserved for observation by a
//! debugger. Since interrupts should be disabled before unregistering the
//! corresponding handler, this should never be called.
//!
//! \return None.
//
//*****************************************************************************
static void
IntDefaultHandler(void)
{
//
// Go into an infinite loop.
//
while(1)
{
}
}
//*****************************************************************************
//
// The processor vector table.
//
// This contains a list of the handlers for the various interrupt sources in
// the system. The layout of this list is defined by the hardware; assertion
// of an interrupt causes the processor to start executing directly at the
// address given in the corresponding location in this list.
//
//*****************************************************************************
#if defined(ewarm)
#pragma data_alignment=1024
static __no_init void (*g_pfnRAMVectors[NUM_INTERRUPTS])(void) @ "VTABLE";
#elif defined(sourcerygxx)
static __attribute__((section(".cs3.region-head.ram")))
void (*g_pfnRAMVectors[NUM_INTERRUPTS])(void) __attribute__ ((aligned(1024)));
#elif defined(ccs) || defined(DOXYGEN)
#pragma DATA_ALIGN(g_pfnRAMVectors, 1024)
#pragma DATA_SECTION(g_pfnRAMVectors, ".vtable")
void (*g_pfnRAMVectors[NUM_INTERRUPTS])(void);
#else
static __attribute__((section("vtable")))
void (*g_pfnRAMVectors[NUM_INTERRUPTS])(void) __attribute__ ((aligned(1024)));
#endif
//*****************************************************************************
//
//! Enables the processor interrupt.
//!
//! Allows the processor to respond to interrupts. This does not affect the
//! set of interrupts enabled in the interrupt controller; it just gates the
//! single interrupt from the controller to the processor.
//!
//! \note Previously, this function had no return value. As such, it was
//! possible to include <tt>interrupt.h</tt> and call this function without
//! having included <tt>hw_types.h</tt>. Now that the return is a
//! <tt>tBoolean</tt>, a compiler error will occur in this case. The solution
//! is to include <tt>hw_types.h</tt> before including <tt>interrupt.h</tt>.
//!
//! \return Returns \b true if interrupts were disabled when the function was
//! called or \b false if they were initially enabled.
//
//*****************************************************************************
tBoolean
IntMasterEnable(void)
{
//
// Enable processor interrupts.
//
return(CPUcpsie());
}
//*****************************************************************************
//
//! Disables the processor interrupt.
//!
//! Prevents the processor from receiving interrupts. This does not affect the
//! set of interrupts enabled in the interrupt controller; it just gates the
//! single interrupt from the controller to the processor.
//!
//! \note Previously, this function had no return value. As such, it was
//! possible to include <tt>interrupt.h</tt> and call this function without
//! having included <tt>hw_types.h</tt>. Now that the return is a
//! <tt>tBoolean</tt>, a compiler error will occur in this case. The solution
//! is to include <tt>hw_types.h</tt> before including <tt>interrupt.h</tt>.
//!
//! \return Returns \b true if interrupts were already disabled when the
//! function was called or \b false if they were initially enabled.
//
//*****************************************************************************
tBoolean
IntMasterDisable(void)
{
//
// Disable processor interrupts.
//
return(CPUcpsid());
}
//*****************************************************************************
//
//! Registers a function to be called when an interrupt occurs.
//!
//! \param ulInterrupt specifies the interrupt in question.
//! \param pfnHandler is a pointer to the function to be called.
//!
//! This function is used to specify the handler function to be called when the
//! given interrupt is asserted to the processor. When the interrupt occurs,
//! if it is enabled (via IntEnable()), the handler function will be called in
//! interrupt context. Since the handler function can preempt other code, care
//! must be taken to protect memory or peripherals that are accessed by the
//! handler and other non-handler code.
//!
//! \note The use of this function (directly or indirectly via a peripheral
//! driver interrupt register function) moves the interrupt vector table from
//! flash to SRAM. Therefore, care must be taken when linking the application
//! to ensure that the SRAM vector table is located at the beginning of SRAM;
//! otherwise NVIC will not look in the correct portion of memory for the
//! vector table (it requires the vector table be on a 1 kB memory alignment).
//! Normally, the SRAM vector table is so placed via the use of linker scripts.
//! See the discussion of compile-time versus run-time interrupt handler
//! registration in the introduction to this chapter.
//!
//! \return None.
//
//*****************************************************************************
void
IntRegister(unsigned long ulInterrupt, void (*pfnHandler)(void))
{
unsigned long ulIdx, ulValue;
//
// Check the arguments.
//
ASSERT(ulInterrupt < NUM_INTERRUPTS);
//
// Make sure that the RAM vector table is correctly aligned.
//
ASSERT(((unsigned long)g_pfnRAMVectors & 0x000003ff) == 0);
//
// See if the RAM vector table has been initialized.
//
if(HWREG(NVIC_VTABLE) != (unsigned long)g_pfnRAMVectors)
{
//
// Copy the vector table from the beginning of FLASH to the RAM vector
// table.
//
ulValue = HWREG(NVIC_VTABLE);
for(ulIdx = 0; ulIdx < NUM_INTERRUPTS; ulIdx++)
{
g_pfnRAMVectors[ulIdx] = (void (*)(void))HWREG((ulIdx * 4) +
ulValue);
}
//
// Point NVIC at the RAM vector table.
//
HWREG(NVIC_VTABLE) = (unsigned long)g_pfnRAMVectors;
}
//
// Save the interrupt handler.
//
g_pfnRAMVectors[ulInterrupt] = pfnHandler;
}
//*****************************************************************************
//
//! Unregisters the function to be called when an interrupt occurs.
//!
//! \param ulInterrupt specifies the interrupt in question.
//!
//! This function is used to indicate that no handler should be called when the
//! given interrupt is asserted to the processor. The interrupt source will be
//! automatically disabled (via IntDisable()) if necessary.
//!
//! \sa IntRegister() for important information about registering interrupt
//! handlers.
//!
//! \return None.
//
//*****************************************************************************
void
IntUnregister(unsigned long ulInterrupt)
{
//
// Check the arguments.
//
ASSERT(ulInterrupt < NUM_INTERRUPTS);
//
// Reset the interrupt handler.
//
g_pfnRAMVectors[ulInterrupt] = IntDefaultHandler;
}
//*****************************************************************************
//
//! Sets the priority grouping of the interrupt controller.
//!
//! \param ulBits specifies the number of bits of preemptable priority.
//!
//! This function specifies the split between preemptable priority levels and
//! subpriority levels in the interrupt priority specification. The range of
//! the grouping values are dependent upon the hardware implementation; on
//! the Stellaris family, three bits are available for hardware interrupt
//! prioritization and therefore priority grouping values of three through
//! seven have the same effect.
//!
//! \return None.
//
//*****************************************************************************
void
IntPriorityGroupingSet(unsigned long ulBits)
{
//
// Check the arguments.
//
ASSERT(ulBits < NUM_PRIORITY);
//
// Set the priority grouping.
//
HWREG(NVIC_APINT) = NVIC_APINT_VECTKEY | g_pulPriority[ulBits];
}
//*****************************************************************************
//
//! Gets the priority grouping of the interrupt controller.
//!
//! This function returns the split between preemptable priority levels and
//! subpriority levels in the interrupt priority specification.
//!
//! \return The number of bits of preemptable priority.
//
//*****************************************************************************
unsigned long
IntPriorityGroupingGet(void)
{
unsigned long ulLoop, ulValue;
//
// Read the priority grouping.
//
ulValue = HWREG(NVIC_APINT) & NVIC_APINT_PRIGROUP_M;
//
// Loop through the priority grouping values.
//
for(ulLoop = 0; ulLoop < NUM_PRIORITY; ulLoop++)
{
//
// Stop looping if this value matches.
//
if(ulValue == g_pulPriority[ulLoop])
{
break;
}
}
//
// Return the number of priority bits.
//
return(ulLoop);
}
//*****************************************************************************
//
//! Sets the priority of an interrupt.
//!
//! \param ulInterrupt specifies the interrupt in question.
//! \param ucPriority specifies the priority of the interrupt.
//!
//! This function is used to set the priority of an interrupt. When multiple
//! interrupts are asserted simultaneously, the ones with the highest priority
//! are processed before the lower priority interrupts. Smaller numbers
//! correspond to higher interrupt priorities; priority 0 is the highest
//! interrupt priority.
//!
//! The hardware priority mechanism will only look at the upper N bits of the
//! priority level (where N is 3 for the Stellaris family), so any
//! prioritization must be performed in those bits. The remaining bits can be
//! used to sub-prioritize the interrupt sources, and may be used by the
//! hardware priority mechanism on a future part. This arrangement allows
//! priorities to migrate to different NVIC implementations without changing
//! the gross prioritization of the interrupts.
//!
//! \return None.
//
//*****************************************************************************
void
IntPrioritySet(unsigned long ulInterrupt, unsigned char ucPriority)
{
unsigned long ulTemp;
//
// Check the arguments.
//
ASSERT((ulInterrupt >= 4) && (ulInterrupt < NUM_INTERRUPTS));
//
// Set the interrupt priority.
//
ulTemp = HWREG(g_pulRegs[ulInterrupt >> 2]);
ulTemp &= ~(0xFF << (8 * (ulInterrupt & 3)));
ulTemp |= ucPriority << (8 * (ulInterrupt & 3));
HWREG(g_pulRegs[ulInterrupt >> 2]) = ulTemp;
}
//*****************************************************************************
//
//! Gets the priority of an interrupt.
//!
//! \param ulInterrupt specifies the interrupt in question.
//!
//! This function gets the priority of an interrupt. See IntPrioritySet() for
//! a definition of the priority value.
//!
//! \return Returns the interrupt priority, or -1 if an invalid interrupt was
//! specified.
//
//*****************************************************************************
long
IntPriorityGet(unsigned long ulInterrupt)
{
//
// Check the arguments.
//
ASSERT((ulInterrupt >= 4) && (ulInterrupt < NUM_INTERRUPTS));
//
// Return the interrupt priority.
//
return((HWREG(g_pulRegs[ulInterrupt >> 2]) >> (8 * (ulInterrupt & 3))) &
0xFF);
}
//*****************************************************************************
//
//! Enables an interrupt.
//!
//! \param ulInterrupt specifies the interrupt to be enabled.
//!
//! The specified interrupt is enabled in the interrupt controller. Other
//! enables for the interrupt (such as at the peripheral level) are unaffected
//! by this function.
//!
//! \return None.
//
//*****************************************************************************
void
IntEnable(unsigned long ulInterrupt)
{
//
// Check the arguments.
//
ASSERT(ulInterrupt < NUM_INTERRUPTS);
//
// Determine the interrupt to enable.
//
if(ulInterrupt == FAULT_MPU)
{
//
// Enable the MemManage interrupt.
//
HWREG(NVIC_SYS_HND_CTRL) |= NVIC_SYS_HND_CTRL_MEM;
}
else if(ulInterrupt == FAULT_BUS)
{
//
// Enable the bus fault interrupt.
//
HWREG(NVIC_SYS_HND_CTRL) |= NVIC_SYS_HND_CTRL_BUS;
}
else if(ulInterrupt == FAULT_USAGE)
{
//
// Enable the usage fault interrupt.
//
HWREG(NVIC_SYS_HND_CTRL) |= NVIC_SYS_HND_CTRL_USAGE;
}
else if(ulInterrupt == FAULT_SYSTICK)
{
//
// Enable the System Tick interrupt.
//
HWREG(NVIC_ST_CTRL) |= NVIC_ST_CTRL_INTEN;
}
else if((ulInterrupt >= 16) && (ulInterrupt <= 47))
{
//
// Enable the general interrupt.
//
HWREG(NVIC_EN0) = 1 << (ulInterrupt - 16);
}
else if(ulInterrupt >= 48)
{
//
// Enable the general interrupt.
//
HWREG(NVIC_EN1) = 1 << (ulInterrupt - 48);
}
}
//*****************************************************************************
//
//! Disables an interrupt.
//!
//! \param ulInterrupt specifies the interrupt to be disabled.
//!
//! The specified interrupt is disabled in the interrupt controller. Other
//! enables for the interrupt (such as at the peripheral level) are unaffected
//! by this function.
//!
//! \return None.
//
//*****************************************************************************
void
IntDisable(unsigned long ulInterrupt)
{
//
// Check the arguments.
//
ASSERT(ulInterrupt < NUM_INTERRUPTS);
//
// Determine the interrupt to disable.
//
if(ulInterrupt == FAULT_MPU)
{
//
// Disable the MemManage interrupt.
//
HWREG(NVIC_SYS_HND_CTRL) &= ~(NVIC_SYS_HND_CTRL_MEM);
}
else if(ulInterrupt == FAULT_BUS)
{
//
// Disable the bus fault interrupt.
//
HWREG(NVIC_SYS_HND_CTRL) &= ~(NVIC_SYS_HND_CTRL_BUS);
}
else if(ulInterrupt == FAULT_USAGE)
{
//
// Disable the usage fault interrupt.
//
HWREG(NVIC_SYS_HND_CTRL) &= ~(NVIC_SYS_HND_CTRL_USAGE);
}
else if(ulInterrupt == FAULT_SYSTICK)
{
//
// Disable the System Tick interrupt.
//
HWREG(NVIC_ST_CTRL) &= ~(NVIC_ST_CTRL_INTEN);
}
else if((ulInterrupt >= 16) && (ulInterrupt <= 47))
{
//
// Disable the general interrupt.
//
HWREG(NVIC_DIS0) = 1 << (ulInterrupt - 16);
}
else if(ulInterrupt >= 48)
{
//
// Disable the general interrupt.
//
HWREG(NVIC_DIS1) = 1 << (ulInterrupt - 48);
}
}
//*****************************************************************************
//
//! Pends an interrupt.
//!
//! \param ulInterrupt specifies the interrupt to be pended.
//!
//! The specified interrupt is pended in the interrupt controller. This will
//! cause the interrupt controller to execute the corresponding interrupt
//! handler at the next available time, based on the current interrupt state
//! priorities. For example, if called by a higher priority interrupt handler,
//! the specified interrupt handler will not be called until after the current
//! interrupt handler has completed execution. The interrupt must have been
//! enabled for it to be called.
//!
//! \return None.
//
//*****************************************************************************
void
IntPendSet(unsigned long ulInterrupt)
{
//
// Check the arguments.
//
ASSERT(ulInterrupt < NUM_INTERRUPTS);
//
// Determine the interrupt to pend.
//
if(ulInterrupt == FAULT_NMI)
{
//
// Pend the NMI interrupt.
//
HWREG(NVIC_INT_CTRL) |= NVIC_INT_CTRL_NMI_SET;
}
else if(ulInterrupt == FAULT_PENDSV)
{
//
// Pend the PendSV interrupt.
//
HWREG(NVIC_INT_CTRL) |= NVIC_INT_CTRL_PEND_SV;
}
else if(ulInterrupt == FAULT_SYSTICK)
{
//
// Pend the SysTick interrupt.
//
HWREG(NVIC_INT_CTRL) |= NVIC_INT_CTRL_PENDSTSET;
}
else if((ulInterrupt >= 16) && (ulInterrupt <= 47))
{
//
// Pend the general interrupt.
//
HWREG(NVIC_PEND0) = 1 << (ulInterrupt - 16);
}
else if(ulInterrupt >= 48)
{
//
// Pend the general interrupt.
//
HWREG(NVIC_PEND1) = 1 << (ulInterrupt - 48);
}
}
//*****************************************************************************
//
//! Unpends an interrupt.
//!
//! \param ulInterrupt specifies the interrupt to be unpended.
//!
//! The specified interrupt is unpended in the interrupt controller. This will
//! cause any previously generated interrupts that have not been handled yet
//! (due to higher priority interrupts or the interrupt no having been enabled
//! yet) to be discarded.
//!
//! \return None.
//
//*****************************************************************************
void
IntPendClear(unsigned long ulInterrupt)
{
//
// Check the arguments.
//
ASSERT(ulInterrupt < NUM_INTERRUPTS);
//
// Determine the interrupt to unpend.
//
if(ulInterrupt == FAULT_PENDSV)
{
//
// Unpend the PendSV interrupt.
//
HWREG(NVIC_INT_CTRL) |= NVIC_INT_CTRL_UNPEND_SV;
}
else if(ulInterrupt == FAULT_SYSTICK)
{
//
// Unpend the SysTick interrupt.
//
HWREG(NVIC_INT_CTRL) |= NVIC_INT_CTRL_PENDSTCLR;
}
else if((ulInterrupt >= 16) && (ulInterrupt <= 47))
{
//
// Unpend the general interrupt.
//
HWREG(NVIC_UNPEND0) = 1 << (ulInterrupt - 16);
}
else if(ulInterrupt >= 48)
{
//
// Unpend the general interrupt.
//
HWREG(NVIC_UNPEND1) = 1 << (ulInterrupt - 48);
}
}
//*****************************************************************************
//
//! Sets the priority masking level
//!
//! \param ulPriorityMask is the priority level that will be masked.
//!
//! This function sets the interrupt priority masking level so that all
//! interrupts at the specified or lesser priority level is masked. This
//! can be used to globally disable a set of interrupts with priority below
//! a predetermined threshold. A value of 0 disables priority
//! masking.
//!
//! Smaller numbers correspond to higher interrupt priorities. So for example
//! a priority level mask of 4 will allow interrupts of priority level 0-3,
//! and interrupts with a numerical priority of 4 and greater will be blocked.
//!
//! The hardware priority mechanism will only look at the upper N bits of the
//! priority level (where N is 3 for the Stellaris family), so any
//! prioritization must be performed in those bits.
//!
//! \return None.
//
//*****************************************************************************
void
IntPriorityMaskSet(unsigned long ulPriorityMask)
{
CPUbasepriSet(ulPriorityMask);
}
//*****************************************************************************
//
//! Gets the priority masking level
//!
//! This function gets the current setting of the interrupt priority masking
//! level. The value returned is the priority level such that all interrupts
//! of that and lesser priority are masked. A value of 0 means that priority
//! masking is disabled.
//!
//! Smaller numbers correspond to higher interrupt priorities. So for example
//! a priority level mask of 4 will allow interrupts of priority level 0-3,
//! and interrupts with a numerical priority of 4 and greater will be blocked.
//!
//! The hardware priority mechanism will only look at the upper N bits of the
//! priority level (where N is 3 for the Stellaris family), so any
//! prioritization must be performed in those bits.
//!
//! \return Returns the value of the interrupt priority level mask.
//
//*****************************************************************************
unsigned long
IntPriorityMaskGet(void)
{
return(CPUbasepriGet());
}
//*****************************************************************************
//
// Close the Doxygen group.
//! @}
//
//*****************************************************************************
| 32.081267 | 80 | 0.572109 | [
"vector"
] |
d24bbc82bc93ef5eb9cb746032ff37c938c3ce18 | 2,091 | c | C | CVector/main.c | ryandw11/CVector | 54e8574b97cb2bf83379b589324f301b9d917b8f | [
"MIT"
] | null | null | null | CVector/main.c | ryandw11/CVector | 54e8574b97cb2bf83379b589324f301b9d917b8f | [
"MIT"
] | null | null | null | CVector/main.c | ryandw11/CVector | 54e8574b97cb2bf83379b589324f301b9d917b8f | [
"MIT"
] | null | null | null | #include <stdlib.h>
#include <stdio.h>
#include <string.h>
#define CVEC_UPPERCASE
#include "CVector.h"
InitializeVector(int);
InitializeVector(float);
void CVec_Resize_f(Vector_int* vec, int resizeTo) {
if(resizeTo < 0) return;
int* bufStart = vec->buffer;
vec->bufferSize = resizeTo;
vec->buffer = (int*)realloc(vec->buffer, sizeof(int) * vec->bufferSize);
if(vec->buffer == NULL) {
vec->buffer = bufStart;
}
}
int main(char** args, int argv) {
Vector(int) vec = CreateVector(int);
CVecAdd(int, &vec, 10);
CVec_Add_int(&vec, 9);
CVec_Add_int(&vec, 8);
CVec_Add_int(&vec, 7);
CVec_Add_int(&vec, 6);
CVec_Add_int(&vec, 5);
CVec_Add_int(&vec, 4);
CVec_Add_int(&vec, 3);
CVec_Add_int(&vec, 2);
CVec_Add_int(&vec, 1);
CVec_Add_int(&vec, 0);
CVec_Add_int(&vec, -1);
printf("%d\n", *CVec_Get_int(&vec, 11));
CVecPop(int, &vec);
CVec_Pop_int(&vec, NULL);
int value = 0;
CVecPopV(int, &vec, &value);
printf("Popped Value: %d\n", value);
printf("%d\n", *CVec_Get_int(&vec, 4));
CVecAddAll(int, &vec, 5, 43, 2, 32, 43, 32);
CVecPrintPrimitive(int, &vec, " %d ");
CVecRemove(int, &vec, 1);
CVecPrintPrimitive(int, &vec, " %d ");
printf("Vec Size: %d, Vec Buffer Size: %d", vec.count, vec.bufferSize);
Vector(float) floatVec = CreateVector(float);
CVecAdd(float, &floatVec, 25.5);
CVecAdd(float, &floatVec, 29.5);
CVecAdd(float, &floatVec, 110.232);
CVec_Insert_float(&floatVec, 3, 420.69);
CVecPrintPrimitive(float, &floatVec, " %.2f ");
CVecFree(float, &floatVec);
printf("Vector Add All Test:");
CVecPrintPrimitive(int, &vec, " %d ");
Vector(int)* intVec = MallocVector(int);
CVecAdd(int, intVec, 20);
CVecAdd(int, intVec, 10);
CVecPrintPrimitive(int, intVec, " %d ");
CVecAddVector(int, intVec, &vec);
CVecPrintPrimitive(int, intVec, " %d ");
int testArr[] = {-23, -43, -3, 4};
CVecAddArray(int, intVec, testArr, 4);
CVecPrintPrimitive(int, intVec, " %d ");
printf("For Each Print: \n");
CVEC_FOR_EACH(int, intVec, vecData,
printf(" %d \n", vecData);
);
CVecFree(int, &vec);
CVecFree(int, intVec);
free(intVec);
return 0;
} | 25.814815 | 73 | 0.661406 | [
"vector"
] |
d24d4ba91ee5dd816c3e97876f270e17ac294afd | 5,787 | h | C | cTORS/include/Plan.h | AlgTUDelft/cTORS | 1d34c26d912b37a09289d6fe52cb0d9aded6d77d | [
"Apache-2.0"
] | 5 | 2021-04-25T10:40:55.000Z | 2022-02-24T14:07:28.000Z | cTORS/include/Plan.h | UtrechtUniversity/cTORS | 1d34c26d912b37a09289d6fe52cb0d9aded6d77d | [
"Apache-2.0"
] | null | null | null | cTORS/include/Plan.h | UtrechtUniversity/cTORS | 1d34c26d912b37a09289d6fe52cb0d9aded6d77d | [
"Apache-2.0"
] | 1 | 2022-03-04T05:08:05.000Z | 2022-03-04T05:08:05.000Z | /** \file Plan.h
* Describes the POSPlan (Partial Order Schedule Plan), POSAction, POSMatch and POSPrecedenceConstraint class
*/
#ifndef PLAN_H
#define PLAN_H
#include "Engine.h"
class Engine;
class LocationEngine;
/**
* A POSAction is an action in a Partial Order Schedule
*
* This class is used for serializing actions to protobuf
*/
class POSAction {
private:
static int newUID;
int id;
int suggestedStart, suggestedEnd, minDuration;
const SimpleAction* action;
public:
POSAction() = delete;
/** Construct a POSAction from the given parameters */
POSAction(int suggestedStart, int suggestedEnd, int minDuration, const SimpleAction* action) :
id(newUID++), suggestedStart(suggestedStart), suggestedEnd(suggestedEnd), minDuration(minDuration), action(action) {}
/** Copy constructor */
POSAction(const POSAction& pa) : id(pa.id), suggestedStart(pa.suggestedStart), suggestedEnd(pa.suggestedEnd),
minDuration(pa.minDuration), action(pa.action->Clone()) {}
/** POSAction destructor */
~POSAction() { delete action; }
/** Assignment operator */
POSAction& operator=(const POSAction& pa);
/** Get the unique id of the action */
inline int GetID() const { return id; }
/** Get the suggested start time of the action */
inline int GetSuggestedStart() const { return suggestedStart; }
/** Get the suggested end time of the action */
inline int GetSuggestedEnd() const { return suggestedEnd; }
/** Get the suggested minimum duration time of the action */
inline int GetMinimumDuration() const { return minDuration; }
/** Get the SimpleAction of this POSAction */
inline const SimpleAction* GetAction() const { return action; }
/** Serialize this POSAction to a protobuf object */
void Serialize(const LocationEngine& engine, const State* state, PBAction* pb_action) const;
/** Construct a POSAction from a protobuf action */
static POSAction CreatePOSAction(const Location* location, const Scenario* scenario, const PBAction& pb_action);
};
/**
* A POSMatch describes a match between Outgoing trains and Train%s (not yet implemented)
*/
class POSMatch {
private:
const Train* train;
const Outgoing* out;
int position;
};
/**
* A POSPrecedenceConstraint describes a precedence constraint between two POSAction%s (not yet implemented)
*/
class POSPrecedenceConstraint {
const POSAction *firstAction, *secondAction;
int minimumTimeLag;
};
/**
* A POPlan describes a Partial Order Schedule
*
* A POSPlan consists of a list of POSAction%s, POSMatch%es and POSPrecedenceConstraint%s
*/
class POSPlan {
private:
vector<POSAction> actions;
vector<POSMatch> matching;
vector<POSPrecedenceConstraint> graph;
bool feasible;
public:
/** Construct an empty POSPlan */
POSPlan() = default;
/** Construct a POSPlan based on the list of POSAction%s */
POSPlan(vector<POSAction> actions) : actions(actions) {}
/** Get the list of POSAction%s */
inline const vector<POSAction>& GetActions() const { return actions; }
/** Add a POSAction to the list of POSAction%s */
inline void AddAction(const POSAction& action) { actions.push_back(action); }
/** Serialize this plan to a protobuf object */
void Serialize(LocationEngine& engine, const Scenario& scenario, PBPOSPlan* pb_plan) const;
/** Serialize this plan to a protobuf file */
void SerializeToFile(LocationEngine& engine, const Scenario& scenario, const string& outfile) const;
/** Construct a POSPlan from a protobuf object */
static POSPlan CreatePOSPlan(const Location* location, const Scenario* scenario, const PBPOSPlan& pb_plan);
};
/**
* A RunResult describes a TORS session
*
* A TORS session is run at a Location, given a certain Scenario.
* In this context a POSPlan will be, or was run
*/
class RunResult {
private:
Scenario scenario;
POSPlan plan;
string location;
bool feasible;
public:
RunResult() = delete;
/** Construct a RunResult for a location and a Scenario with an empty plan */
RunResult(const string& location, const Scenario& scenario) : location(location), scenario(scenario), feasible(false) {}
/** Construct a RunResult for a location and a Scenario and a plan */
RunResult(const string& location, const Scenario& scenario, const POSPlan& plan, bool feasible)
: location(location), scenario(scenario), plan(plan), feasible(feasible) {}
/** Default copy constructor */
RunResult(const RunResult& rr) = default;
/** Default destructor */
~RunResult() = default;
/** Get the actions in the plan */
inline const vector<POSAction>& GetActions() const { return plan.GetActions(); }
/** Add a POSAction to the plan */
inline void AddAction(const POSAction& action) { plan.AddAction(action); }
/** Get the Scenario for this run */
inline const Scenario& GetScenario() const { return scenario; }
/** Get the POSPlan for this run */
inline const POSPlan& GetPlan() const { return plan; }
/** Get the location string for this run */
inline const string& GetLocation() const { return location; }
/** Serialize this object to a protobuf object */
void Serialize(LocationEngine& engine, PBRun* pb_run) const;
/** Serialize this object to a protobuf file */
void SerializeToFile(LocationEngine& engine, const string& outfile) const;
/** Construct a RunResult from a protobuf object by using the provided Engine */
static RunResult* CreateRunResult(const Engine& engine, const PBRun& pb_run);
/** Construct a RunResult from a protobuf object by using the provided Location */
static RunResult* CreateRunResult(const Location* location, const PBRun& pb_run);
};
#endif | 41.042553 | 125 | 0.707621 | [
"object",
"vector"
] |
d259fb8d9cdc07e796a498358feedf7a6cc32407 | 47,566 | h | C | backend/vendor/node/include/node/openssl/objects.h | somnathbm/horizon-energymeter-iocl | f9fe2afd272fbfa7eb670caa80ccd15d70e77837 | [
"MIT"
] | null | null | null | backend/vendor/node/include/node/openssl/objects.h | somnathbm/horizon-energymeter-iocl | f9fe2afd272fbfa7eb670caa80ccd15d70e77837 | [
"MIT"
] | null | null | null | backend/vendor/node/include/node/openssl/objects.h | somnathbm/horizon-energymeter-iocl | f9fe2afd272fbfa7eb670caa80ccd15d70e77837 | [
"MIT"
] | null | null | null |
/* crypto/objects/objects.h */
/* Copyright (C) 1995-1998 Eric Young (eay@cryptsoft.com)
* All rights reserved.
*
* This package is an SSL implementation written
* by Eric Young (eay@cryptsoft.com).
* The implementation was written so as to conform with Netscapes SSL.
*
* This library is free for commercial and non-commercial use as long as
* the following conditions are aheared to. The following conditions
* apply to all code found in this distribution, be it the RC4, RSA,
* lhash, DES, etc., code; not just the SSL code. The SSL documentation
* included with this distribution is covered by the same copyright terms
* except that the holder is Tim Hudson (tjh@cryptsoft.com).
*
* Copyright remains Eric Young's, and as such any Copyright notices in
* the code are not to be removed.
* If this package is used in a product, Eric Young should be given attribution
* as the author of the parts of the library used.
* This can be in the form of a textual message at program startup or
* in documentation (online or textual) provided with the package.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. All advertising materials mentioning features or use of this software
* must display the following acknowledgement:
* "This product includes cryptographic software written by
* Eric Young (eay@cryptsoft.com)"
* The word 'cryptographic' can be left out if the rouines from the library
* being used are not cryptographic related :-).
* 4. If you include any Windows specific code (or a derivative thereof) from
* the apps directory (application code) you must include an acknowledgement:
* "This product includes software written by Tim Hudson (tjh@cryptsoft.com)"
*
* THIS SOFTWARE IS PROVIDED BY ERIC YOUNG ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* The licence and distribution terms for any publically available version or
* derivative of this code cannot be changed. i.e. this code cannot simply be
* copied and put under another distribution licence
* [including the GNU Public Licence.]
*/
#ifndef HEADER_OBJECTS_H
# define HEADER_OBJECTS_H
# define USE_OBJ_MAC
# ifdef USE_OBJ_MAC
# include <openssl/obj_mac.h>
# else
# define SN_undef "UNDEF"
# define LN_undef "undefined"
# define NID_undef 0
# define OBJ_undef 0L
# define SN_Algorithm "Algorithm"
# define LN_algorithm "algorithm"
# define NID_algorithm 38
# define OBJ_algorithm 1L,3L,14L,3L,2L
# define LN_rsadsi "rsadsi"
# define NID_rsadsi 1
# define OBJ_rsadsi 1L,2L,840L,113549L
# define LN_pkcs "pkcs"
# define NID_pkcs 2
# define OBJ_pkcs OBJ_rsadsi,1L
# define SN_md2 "MD2"
# define LN_md2 "md2"
# define NID_md2 3
# define OBJ_md2 OBJ_rsadsi,2L,2L
# define SN_md5 "MD5"
# define LN_md5 "md5"
# define NID_md5 4
# define OBJ_md5 OBJ_rsadsi,2L,5L
# define SN_rc4 "RC4"
# define LN_rc4 "rc4"
# define NID_rc4 5
# define OBJ_rc4 OBJ_rsadsi,3L,4L
# define LN_rsaEncryption "rsaEncryption"
# define NID_rsaEncryption 6
# define OBJ_rsaEncryption OBJ_pkcs,1L,1L
# define SN_md2WithRSAEncryption "RSA-MD2"
# define LN_md2WithRSAEncryption "md2WithRSAEncryption"
# define NID_md2WithRSAEncryption 7
# define OBJ_md2WithRSAEncryption OBJ_pkcs,1L,2L
# define SN_md5WithRSAEncryption "RSA-MD5"
# define LN_md5WithRSAEncryption "md5WithRSAEncryption"
# define NID_md5WithRSAEncryption 8
# define OBJ_md5WithRSAEncryption OBJ_pkcs,1L,4L
# define SN_pbeWithMD2AndDES_CBC "PBE-MD2-DES"
# define LN_pbeWithMD2AndDES_CBC "pbeWithMD2AndDES-CBC"
# define NID_pbeWithMD2AndDES_CBC 9
# define OBJ_pbeWithMD2AndDES_CBC OBJ_pkcs,5L,1L
# define SN_pbeWithMD5AndDES_CBC "PBE-MD5-DES"
# define LN_pbeWithMD5AndDES_CBC "pbeWithMD5AndDES-CBC"
# define NID_pbeWithMD5AndDES_CBC 10
# define OBJ_pbeWithMD5AndDES_CBC OBJ_pkcs,5L,3L
# define LN_X500 "X500"
# define NID_X500 11
# define OBJ_X500 2L,5L
# define LN_X509 "X509"
# define NID_X509 12
# define OBJ_X509 OBJ_X500,4L
# define SN_commonName "CN"
# define LN_commonName "commonName"
# define NID_commonName 13
# define OBJ_commonName OBJ_X509,3L
# define SN_countryName "C"
# define LN_countryName "countryName"
# define NID_countryName 14
# define OBJ_countryName OBJ_X509,6L
# define SN_localityName "L"
# define LN_localityName "localityName"
# define NID_localityName 15
# define OBJ_localityName OBJ_X509,7L
/* Postal Address? PA */
/* should be "ST" (rfc1327) but MS uses 'S' */
# define SN_stateOrProvinceName "ST"
# define LN_stateOrProvinceName "stateOrProvinceName"
# define NID_stateOrProvinceName 16
# define OBJ_stateOrProvinceName OBJ_X509,8L
# define SN_organizationName "O"
# define LN_organizationName "organizationName"
# define NID_organizationName 17
# define OBJ_organizationName OBJ_X509,10L
# define SN_organizationalUnitName "OU"
# define LN_organizationalUnitName "organizationalUnitName"
# define NID_organizationalUnitName 18
# define OBJ_organizationalUnitName OBJ_X509,11L
# define SN_rsa "RSA"
# define LN_rsa "rsa"
# define NID_rsa 19
# define OBJ_rsa OBJ_X500,8L,1L,1L
# define LN_pkcs7 "pkcs7"
# define NID_pkcs7 20
# define OBJ_pkcs7 OBJ_pkcs,7L
# define LN_pkcs7_data "pkcs7-data"
# define NID_pkcs7_data 21
# define OBJ_pkcs7_data OBJ_pkcs7,1L
# define LN_pkcs7_signed "pkcs7-signedData"
# define NID_pkcs7_signed 22
# define OBJ_pkcs7_signed OBJ_pkcs7,2L
# define LN_pkcs7_enveloped "pkcs7-envelopedData"
# define NID_pkcs7_enveloped 23
# define OBJ_pkcs7_enveloped OBJ_pkcs7,3L
# define LN_pkcs7_signedAndEnveloped "pkcs7-signedAndEnvelopedData"
# define NID_pkcs7_signedAndEnveloped 24
# define OBJ_pkcs7_signedAndEnveloped OBJ_pkcs7,4L
# define LN_pkcs7_digest "pkcs7-digestData"
# define NID_pkcs7_digest 25
# define OBJ_pkcs7_digest OBJ_pkcs7,5L
# define LN_pkcs7_encrypted "pkcs7-encryptedData"
# define NID_pkcs7_encrypted 26
# define OBJ_pkcs7_encrypted OBJ_pkcs7,6L
# define LN_pkcs3 "pkcs3"
# define NID_pkcs3 27
# define OBJ_pkcs3 OBJ_pkcs,3L
# define LN_dhKeyAgreement "dhKeyAgreement"
# define NID_dhKeyAgreement 28
# define OBJ_dhKeyAgreement OBJ_pkcs3,1L
# define SN_des_ecb "DES-ECB"
# define LN_des_ecb "des-ecb"
# define NID_des_ecb 29
# define OBJ_des_ecb OBJ_algorithm,6L
# define SN_des_cfb64 "DES-CFB"
# define LN_des_cfb64 "des-cfb"
# define NID_des_cfb64 30
/* IV + num */
# define OBJ_des_cfb64 OBJ_algorithm,9L
# define SN_des_cbc "DES-CBC"
# define LN_des_cbc "des-cbc"
# define NID_des_cbc 31
/* IV */
# define OBJ_des_cbc OBJ_algorithm,7L
# define SN_des_ede "DES-EDE"
# define LN_des_ede "des-ede"
# define NID_des_ede 32
/* ?? */
# define OBJ_des_ede OBJ_algorithm,17L
# define SN_des_ede3 "DES-EDE3"
# define LN_des_ede3 "des-ede3"
# define NID_des_ede3 33
# define SN_idea_cbc "IDEA-CBC"
# define LN_idea_cbc "idea-cbc"
# define NID_idea_cbc 34
# define OBJ_idea_cbc 1L,3L,6L,1L,4L,1L,188L,7L,1L,1L,2L
# define SN_idea_cfb64 "IDEA-CFB"
# define LN_idea_cfb64 "idea-cfb"
# define NID_idea_cfb64 35
# define SN_idea_ecb "IDEA-ECB"
# define LN_idea_ecb "idea-ecb"
# define NID_idea_ecb 36
# define SN_rc2_cbc "RC2-CBC"
# define LN_rc2_cbc "rc2-cbc"
# define NID_rc2_cbc 37
# define OBJ_rc2_cbc OBJ_rsadsi,3L,2L
# define SN_rc2_ecb "RC2-ECB"
# define LN_rc2_ecb "rc2-ecb"
# define NID_rc2_ecb 38
# define SN_rc2_cfb64 "RC2-CFB"
# define LN_rc2_cfb64 "rc2-cfb"
# define NID_rc2_cfb64 39
# define SN_rc2_ofb64 "RC2-OFB"
# define LN_rc2_ofb64 "rc2-ofb"
# define NID_rc2_ofb64 40
# define SN_sha "SHA"
# define LN_sha "sha"
# define NID_sha 41
# define OBJ_sha OBJ_algorithm,18L
# define SN_shaWithRSAEncryption "RSA-SHA"
# define LN_shaWithRSAEncryption "shaWithRSAEncryption"
# define NID_shaWithRSAEncryption 42
# define OBJ_shaWithRSAEncryption OBJ_algorithm,15L
# define SN_des_ede_cbc "DES-EDE-CBC"
# define LN_des_ede_cbc "des-ede-cbc"
# define NID_des_ede_cbc 43
# define SN_des_ede3_cbc "DES-EDE3-CBC"
# define LN_des_ede3_cbc "des-ede3-cbc"
# define NID_des_ede3_cbc 44
# define OBJ_des_ede3_cbc OBJ_rsadsi,3L,7L
# define SN_des_ofb64 "DES-OFB"
# define LN_des_ofb64 "des-ofb"
# define NID_des_ofb64 45
# define OBJ_des_ofb64 OBJ_algorithm,8L
# define SN_idea_ofb64 "IDEA-OFB"
# define LN_idea_ofb64 "idea-ofb"
# define NID_idea_ofb64 46
# define LN_pkcs9 "pkcs9"
# define NID_pkcs9 47
# define OBJ_pkcs9 OBJ_pkcs,9L
# define SN_pkcs9_emailAddress "Email"
# define LN_pkcs9_emailAddress "emailAddress"
# define NID_pkcs9_emailAddress 48
# define OBJ_pkcs9_emailAddress OBJ_pkcs9,1L
# define LN_pkcs9_unstructuredName "unstructuredName"
# define NID_pkcs9_unstructuredName 49
# define OBJ_pkcs9_unstructuredName OBJ_pkcs9,2L
# define LN_pkcs9_contentType "contentType"
# define NID_pkcs9_contentType 50
# define OBJ_pkcs9_contentType OBJ_pkcs9,3L
# define LN_pkcs9_messageDigest "messageDigest"
# define NID_pkcs9_messageDigest 51
# define OBJ_pkcs9_messageDigest OBJ_pkcs9,4L
# define LN_pkcs9_signingTime "signingTime"
# define NID_pkcs9_signingTime 52
# define OBJ_pkcs9_signingTime OBJ_pkcs9,5L
# define LN_pkcs9_countersignature "countersignature"
# define NID_pkcs9_countersignature 53
# define OBJ_pkcs9_countersignature OBJ_pkcs9,6L
# define LN_pkcs9_challengePassword "challengePassword"
# define NID_pkcs9_challengePassword 54
# define OBJ_pkcs9_challengePassword OBJ_pkcs9,7L
# define LN_pkcs9_unstructuredAddress "unstructuredAddress"
# define NID_pkcs9_unstructuredAddress 55
# define OBJ_pkcs9_unstructuredAddress OBJ_pkcs9,8L
# define LN_pkcs9_extCertAttributes "extendedCertificateAttributes"
# define NID_pkcs9_extCertAttributes 56
# define OBJ_pkcs9_extCertAttributes OBJ_pkcs9,9L
# define SN_netscape "Netscape"
# define LN_netscape "Netscape Communications Corp."
# define NID_netscape 57
# define OBJ_netscape 2L,16L,840L,1L,113730L
# define SN_netscape_cert_extension "nsCertExt"
# define LN_netscape_cert_extension "Netscape Certificate Extension"
# define NID_netscape_cert_extension 58
# define OBJ_netscape_cert_extension OBJ_netscape,1L
# define SN_netscape_data_type "nsDataType"
# define LN_netscape_data_type "Netscape Data Type"
# define NID_netscape_data_type 59
# define OBJ_netscape_data_type OBJ_netscape,2L
# define SN_des_ede_cfb64 "DES-EDE-CFB"
# define LN_des_ede_cfb64 "des-ede-cfb"
# define NID_des_ede_cfb64 60
# define SN_des_ede3_cfb64 "DES-EDE3-CFB"
# define LN_des_ede3_cfb64 "des-ede3-cfb"
# define NID_des_ede3_cfb64 61
# define SN_des_ede_ofb64 "DES-EDE-OFB"
# define LN_des_ede_ofb64 "des-ede-ofb"
# define NID_des_ede_ofb64 62
# define SN_des_ede3_ofb64 "DES-EDE3-OFB"
# define LN_des_ede3_ofb64 "des-ede3-ofb"
# define NID_des_ede3_ofb64 63
/* I'm not sure about the object ID */
# define SN_sha1 "SHA1"
# define LN_sha1 "sha1"
# define NID_sha1 64
# define OBJ_sha1 OBJ_algorithm,26L
/* 28 Jun 1996 - eay */
/* #define OBJ_sha1 1L,3L,14L,2L,26L,05L <- wrong */
# define SN_sha1WithRSAEncryption "RSA-SHA1"
# define LN_sha1WithRSAEncryption "sha1WithRSAEncryption"
# define NID_sha1WithRSAEncryption 65
# define OBJ_sha1WithRSAEncryption OBJ_pkcs,1L,5L
# define SN_dsaWithSHA "DSA-SHA"
# define LN_dsaWithSHA "dsaWithSHA"
# define NID_dsaWithSHA 66
# define OBJ_dsaWithSHA OBJ_algorithm,13L
# define SN_dsa_2 "DSA-old"
# define LN_dsa_2 "dsaEncryption-old"
# define NID_dsa_2 67
# define OBJ_dsa_2 OBJ_algorithm,12L
/* proposed by microsoft to RSA */
# define SN_pbeWithSHA1AndRC2_CBC "PBE-SHA1-RC2-64"
# define LN_pbeWithSHA1AndRC2_CBC "pbeWithSHA1AndRC2-CBC"
# define NID_pbeWithSHA1AndRC2_CBC 68
# define OBJ_pbeWithSHA1AndRC2_CBC OBJ_pkcs,5L,11L
/*
* proposed by microsoft to RSA as pbeWithSHA1AndRC4: it is now defined
* explicitly in PKCS#5 v2.0 as id-PBKDF2 which is something completely
* different.
*/
# define LN_id_pbkdf2 "PBKDF2"
# define NID_id_pbkdf2 69
# define OBJ_id_pbkdf2 OBJ_pkcs,5L,12L
# define SN_dsaWithSHA1_2 "DSA-SHA1-old"
# define LN_dsaWithSHA1_2 "dsaWithSHA1-old"
# define NID_dsaWithSHA1_2 70
/* Got this one from 'sdn706r20.pdf' which is actually an NSA document :-) */
# define OBJ_dsaWithSHA1_2 OBJ_algorithm,27L
# define SN_netscape_cert_type "nsCertType"
# define LN_netscape_cert_type "Netscape Cert Type"
# define NID_netscape_cert_type 71
# define OBJ_netscape_cert_type OBJ_netscape_cert_extension,1L
# define SN_netscape_base_url "nsBaseUrl"
# define LN_netscape_base_url "Netscape Base Url"
# define NID_netscape_base_url 72
# define OBJ_netscape_base_url OBJ_netscape_cert_extension,2L
# define SN_netscape_revocation_url "nsRevocationUrl"
# define LN_netscape_revocation_url "Netscape Revocation Url"
# define NID_netscape_revocation_url 73
# define OBJ_netscape_revocation_url OBJ_netscape_cert_extension,3L
# define SN_netscape_ca_revocation_url "nsCaRevocationUrl"
# define LN_netscape_ca_revocation_url "Netscape CA Revocation Url"
# define NID_netscape_ca_revocation_url 74
# define OBJ_netscape_ca_revocation_url OBJ_netscape_cert_extension,4L
# define SN_netscape_renewal_url "nsRenewalUrl"
# define LN_netscape_renewal_url "Netscape Renewal Url"
# define NID_netscape_renewal_url 75
# define OBJ_netscape_renewal_url OBJ_netscape_cert_extension,7L
# define SN_netscape_ca_policy_url "nsCaPolicyUrl"
# define LN_netscape_ca_policy_url "Netscape CA Policy Url"
# define NID_netscape_ca_policy_url 76
# define OBJ_netscape_ca_policy_url OBJ_netscape_cert_extension,8L
# define SN_netscape_ssl_server_name "nsSslServerName"
# define LN_netscape_ssl_server_name "Netscape SSL Server Name"
# define NID_netscape_ssl_server_name 77
# define OBJ_netscape_ssl_server_name OBJ_netscape_cert_extension,12L
# define SN_netscape_comment "nsComment"
# define LN_netscape_comment "Netscape Comment"
# define NID_netscape_comment 78
# define OBJ_netscape_comment OBJ_netscape_cert_extension,13L
# define SN_netscape_cert_sequence "nsCertSequence"
# define LN_netscape_cert_sequence "Netscape Certificate Sequence"
# define NID_netscape_cert_sequence 79
# define OBJ_netscape_cert_sequence OBJ_netscape_data_type,5L
# define SN_desx_cbc "DESX-CBC"
# define LN_desx_cbc "desx-cbc"
# define NID_desx_cbc 80
# define SN_id_ce "id-ce"
# define NID_id_ce 81
# define OBJ_id_ce 2L,5L,29L
# define SN_subject_key_identifier "subjectKeyIdentifier"
# define LN_subject_key_identifier "X509v3 Subject Key Identifier"
# define NID_subject_key_identifier 82
# define OBJ_subject_key_identifier OBJ_id_ce,14L
# define SN_key_usage "keyUsage"
# define LN_key_usage "X509v3 Key Usage"
# define NID_key_usage 83
# define OBJ_key_usage OBJ_id_ce,15L
# define SN_private_key_usage_period "privateKeyUsagePeriod"
# define LN_private_key_usage_period "X509v3 Private Key Usage Period"
# define NID_private_key_usage_period 84
# define OBJ_private_key_usage_period OBJ_id_ce,16L
# define SN_subject_alt_name "subjectAltName"
# define LN_subject_alt_name "X509v3 Subject Alternative Name"
# define NID_subject_alt_name 85
# define OBJ_subject_alt_name OBJ_id_ce,17L
# define SN_issuer_alt_name "issuerAltName"
# define LN_issuer_alt_name "X509v3 Issuer Alternative Name"
# define NID_issuer_alt_name 86
# define OBJ_issuer_alt_name OBJ_id_ce,18L
# define SN_basic_constraints "basicConstraints"
# define LN_basic_constraints "X509v3 Basic Constraints"
# define NID_basic_constraints 87
# define OBJ_basic_constraints OBJ_id_ce,19L
# define SN_crl_number "crlNumber"
# define LN_crl_number "X509v3 CRL Number"
# define NID_crl_number 88
# define OBJ_crl_number OBJ_id_ce,20L
# define SN_certificate_policies "certificatePolicies"
# define LN_certificate_policies "X509v3 Certificate Policies"
# define NID_certificate_policies 89
# define OBJ_certificate_policies OBJ_id_ce,32L
# define SN_authority_key_identifier "authorityKeyIdentifier"
# define LN_authority_key_identifier "X509v3 Authority Key Identifier"
# define NID_authority_key_identifier 90
# define OBJ_authority_key_identifier OBJ_id_ce,35L
# define SN_bf_cbc "BF-CBC"
# define LN_bf_cbc "bf-cbc"
# define NID_bf_cbc 91
# define OBJ_bf_cbc 1L,3L,6L,1L,4L,1L,3029L,1L,2L
# define SN_bf_ecb "BF-ECB"
# define LN_bf_ecb "bf-ecb"
# define NID_bf_ecb 92
# define SN_bf_cfb64 "BF-CFB"
# define LN_bf_cfb64 "bf-cfb"
# define NID_bf_cfb64 93
# define SN_bf_ofb64 "BF-OFB"
# define LN_bf_ofb64 "bf-ofb"
# define NID_bf_ofb64 94
# define SN_mdc2 "MDC2"
# define LN_mdc2 "mdc2"
# define NID_mdc2 95
# define OBJ_mdc2 2L,5L,8L,3L,101L
/* An alternative? 1L,3L,14L,3L,2L,19L */
# define SN_mdc2WithRSA "RSA-MDC2"
# define LN_mdc2WithRSA "mdc2withRSA"
# define NID_mdc2WithRSA 96
# define OBJ_mdc2WithRSA 2L,5L,8L,3L,100L
# define SN_rc4_40 "RC4-40"
# define LN_rc4_40 "rc4-40"
# define NID_rc4_40 97
# define SN_rc2_40_cbc "RC2-40-CBC"
# define LN_rc2_40_cbc "rc2-40-cbc"
# define NID_rc2_40_cbc 98
# define SN_givenName "G"
# define LN_givenName "givenName"
# define NID_givenName 99
# define OBJ_givenName OBJ_X509,42L
# define SN_surname "S"
# define LN_surname "surname"
# define NID_surname 100
# define OBJ_surname OBJ_X509,4L
# define SN_initials "I"
# define LN_initials "initials"
# define NID_initials 101
# define OBJ_initials OBJ_X509,43L
# define SN_uniqueIdentifier "UID"
# define LN_uniqueIdentifier "uniqueIdentifier"
# define NID_uniqueIdentifier 102
# define OBJ_uniqueIdentifier OBJ_X509,45L
# define SN_crl_distribution_points "crlDistributionPoints"
# define LN_crl_distribution_points "X509v3 CRL Distribution Points"
# define NID_crl_distribution_points 103
# define OBJ_crl_distribution_points OBJ_id_ce,31L
# define SN_md5WithRSA "RSA-NP-MD5"
# define LN_md5WithRSA "md5WithRSA"
# define NID_md5WithRSA 104
# define OBJ_md5WithRSA OBJ_algorithm,3L
# define SN_serialNumber "SN"
# define LN_serialNumber "serialNumber"
# define NID_serialNumber 105
# define OBJ_serialNumber OBJ_X509,5L
# define SN_title "T"
# define LN_title "title"
# define NID_title 106
# define OBJ_title OBJ_X509,12L
# define SN_description "D"
# define LN_description "description"
# define NID_description 107
# define OBJ_description OBJ_X509,13L
/* CAST5 is CAST-128, I'm just sticking with the documentation */
# define SN_cast5_cbc "CAST5-CBC"
# define LN_cast5_cbc "cast5-cbc"
# define NID_cast5_cbc 108
# define OBJ_cast5_cbc 1L,2L,840L,113533L,7L,66L,10L
# define SN_cast5_ecb "CAST5-ECB"
# define LN_cast5_ecb "cast5-ecb"
# define NID_cast5_ecb 109
# define SN_cast5_cfb64 "CAST5-CFB"
# define LN_cast5_cfb64 "cast5-cfb"
# define NID_cast5_cfb64 110
# define SN_cast5_ofb64 "CAST5-OFB"
# define LN_cast5_ofb64 "cast5-ofb"
# define NID_cast5_ofb64 111
# define LN_pbeWithMD5AndCast5_CBC "pbeWithMD5AndCast5CBC"
# define NID_pbeWithMD5AndCast5_CBC 112
# define OBJ_pbeWithMD5AndCast5_CBC 1L,2L,840L,113533L,7L,66L,12L
/*-
* This is one sun will soon be using :-(
* id-dsa-with-sha1 ID ::= {
* iso(1) member-body(2) us(840) x9-57 (10040) x9cm(4) 3 }
*/
# define SN_dsaWithSHA1 "DSA-SHA1"
# define LN_dsaWithSHA1 "dsaWithSHA1"
# define NID_dsaWithSHA1 113
# define OBJ_dsaWithSHA1 1L,2L,840L,10040L,4L,3L
# define NID_md5_sha1 114
# define SN_md5_sha1 "MD5-SHA1"
# define LN_md5_sha1 "md5-sha1"
# define SN_sha1WithRSA "RSA-SHA1-2"
# define LN_sha1WithRSA "sha1WithRSA"
# define NID_sha1WithRSA 115
# define OBJ_sha1WithRSA OBJ_algorithm,29L
# define SN_dsa "DSA"
# define LN_dsa "dsaEncryption"
# define NID_dsa 116
# define OBJ_dsa 1L,2L,840L,10040L,4L,1L
# define SN_ripemd160 "RIPEMD160"
# define LN_ripemd160 "ripemd160"
# define NID_ripemd160 117
# define OBJ_ripemd160 1L,3L,36L,3L,2L,1L
/*
* The name should actually be rsaSignatureWithripemd160, but I'm going to
* continue using the convention I'm using with the other ciphers
*/
# define SN_ripemd160WithRSA "RSA-RIPEMD160"
# define LN_ripemd160WithRSA "ripemd160WithRSA"
# define NID_ripemd160WithRSA 119
# define OBJ_ripemd160WithRSA 1L,3L,36L,3L,3L,1L,2L
/*-
* Taken from rfc2040
* RC5_CBC_Parameters ::= SEQUENCE {
* version INTEGER (v1_0(16)),
* rounds INTEGER (8..127),
* blockSizeInBits INTEGER (64, 128),
* iv OCTET STRING OPTIONAL
* }
*/
# define SN_rc5_cbc "RC5-CBC"
# define LN_rc5_cbc "rc5-cbc"
# define NID_rc5_cbc 120
# define OBJ_rc5_cbc OBJ_rsadsi,3L,8L
# define SN_rc5_ecb "RC5-ECB"
# define LN_rc5_ecb "rc5-ecb"
# define NID_rc5_ecb 121
# define SN_rc5_cfb64 "RC5-CFB"
# define LN_rc5_cfb64 "rc5-cfb"
# define NID_rc5_cfb64 122
# define SN_rc5_ofb64 "RC5-OFB"
# define LN_rc5_ofb64 "rc5-ofb"
# define NID_rc5_ofb64 123
# define SN_rle_compression "RLE"
# define LN_rle_compression "run length compression"
# define NID_rle_compression 124
# define OBJ_rle_compression 1L,1L,1L,1L,666L,1L
# define SN_zlib_compression "ZLIB"
# define LN_zlib_compression "zlib compression"
# define NID_zlib_compression 125
# define OBJ_zlib_compression 1L,1L,1L,1L,666L,2L
# define SN_ext_key_usage "extendedKeyUsage"
# define LN_ext_key_usage "X509v3 Extended Key Usage"
# define NID_ext_key_usage 126
# define OBJ_ext_key_usage OBJ_id_ce,37
# define SN_id_pkix "PKIX"
# define NID_id_pkix 127
# define OBJ_id_pkix 1L,3L,6L,1L,5L,5L,7L
# define SN_id_kp "id-kp"
# define NID_id_kp 128
# define OBJ_id_kp OBJ_id_pkix,3L
/* PKIX extended key usage OIDs */
# define SN_server_auth "serverAuth"
# define LN_server_auth "TLS Web Server Authentication"
# define NID_server_auth 129
# define OBJ_server_auth OBJ_id_kp,1L
# define SN_client_auth "clientAuth"
# define LN_client_auth "TLS Web Client Authentication"
# define NID_client_auth 130
# define OBJ_client_auth OBJ_id_kp,2L
# define SN_code_sign "codeSigning"
# define LN_code_sign "Code Signing"
# define NID_code_sign 131
# define OBJ_code_sign OBJ_id_kp,3L
# define SN_email_protect "emailProtection"
# define LN_email_protect "E-mail Protection"
# define NID_email_protect 132
# define OBJ_email_protect OBJ_id_kp,4L
# define SN_time_stamp "timeStamping"
# define LN_time_stamp "Time Stamping"
# define NID_time_stamp 133
# define OBJ_time_stamp OBJ_id_kp,8L
/* Additional extended key usage OIDs: Microsoft */
# define SN_ms_code_ind "msCodeInd"
# define LN_ms_code_ind "Microsoft Individual Code Signing"
# define NID_ms_code_ind 134
# define OBJ_ms_code_ind 1L,3L,6L,1L,4L,1L,311L,2L,1L,21L
# define SN_ms_code_com "msCodeCom"
# define LN_ms_code_com "Microsoft Commercial Code Signing"
# define NID_ms_code_com 135
# define OBJ_ms_code_com 1L,3L,6L,1L,4L,1L,311L,2L,1L,22L
# define SN_ms_ctl_sign "msCTLSign"
# define LN_ms_ctl_sign "Microsoft Trust List Signing"
# define NID_ms_ctl_sign 136
# define OBJ_ms_ctl_sign 1L,3L,6L,1L,4L,1L,311L,10L,3L,1L
# define SN_ms_sgc "msSGC"
# define LN_ms_sgc "Microsoft Server Gated Crypto"
# define NID_ms_sgc 137
# define OBJ_ms_sgc 1L,3L,6L,1L,4L,1L,311L,10L,3L,3L
# define SN_ms_efs "msEFS"
# define LN_ms_efs "Microsoft Encrypted File System"
# define NID_ms_efs 138
# define OBJ_ms_efs 1L,3L,6L,1L,4L,1L,311L,10L,3L,4L
/* Additional usage: Netscape */
# define SN_ns_sgc "nsSGC"
# define LN_ns_sgc "Netscape Server Gated Crypto"
# define NID_ns_sgc 139
# define OBJ_ns_sgc OBJ_netscape,4L,1L
# define SN_delta_crl "deltaCRL"
# define LN_delta_crl "X509v3 Delta CRL Indicator"
# define NID_delta_crl 140
# define OBJ_delta_crl OBJ_id_ce,27L
# define SN_crl_reason "CRLReason"
# define LN_crl_reason "CRL Reason Code"
# define NID_crl_reason 141
# define OBJ_crl_reason OBJ_id_ce,21L
# define SN_invalidity_date "invalidityDate"
# define LN_invalidity_date "Invalidity Date"
# define NID_invalidity_date 142
# define OBJ_invalidity_date OBJ_id_ce,24L
# define SN_sxnet "SXNetID"
# define LN_sxnet "Strong Extranet ID"
# define NID_sxnet 143
# define OBJ_sxnet 1L,3L,101L,1L,4L,1L
/* PKCS12 and related OBJECT IDENTIFIERS */
# define OBJ_pkcs12 OBJ_pkcs,12L
# define OBJ_pkcs12_pbeids OBJ_pkcs12, 1
# define SN_pbe_WithSHA1And128BitRC4 "PBE-SHA1-RC4-128"
# define LN_pbe_WithSHA1And128BitRC4 "pbeWithSHA1And128BitRC4"
# define NID_pbe_WithSHA1And128BitRC4 144
# define OBJ_pbe_WithSHA1And128BitRC4 OBJ_pkcs12_pbeids, 1L
# define SN_pbe_WithSHA1And40BitRC4 "PBE-SHA1-RC4-40"
# define LN_pbe_WithSHA1And40BitRC4 "pbeWithSHA1And40BitRC4"
# define NID_pbe_WithSHA1And40BitRC4 145
# define OBJ_pbe_WithSHA1And40BitRC4 OBJ_pkcs12_pbeids, 2L
# define SN_pbe_WithSHA1And3_Key_TripleDES_CBC "PBE-SHA1-3DES"
# define LN_pbe_WithSHA1And3_Key_TripleDES_CBC "pbeWithSHA1And3-KeyTripleDES-CBC"
# define NID_pbe_WithSHA1And3_Key_TripleDES_CBC 146
# define OBJ_pbe_WithSHA1And3_Key_TripleDES_CBC OBJ_pkcs12_pbeids, 3L
# define SN_pbe_WithSHA1And2_Key_TripleDES_CBC "PBE-SHA1-2DES"
# define LN_pbe_WithSHA1And2_Key_TripleDES_CBC "pbeWithSHA1And2-KeyTripleDES-CBC"
# define NID_pbe_WithSHA1And2_Key_TripleDES_CBC 147
# define OBJ_pbe_WithSHA1And2_Key_TripleDES_CBC OBJ_pkcs12_pbeids, 4L
# define SN_pbe_WithSHA1And128BitRC2_CBC "PBE-SHA1-RC2-128"
# define LN_pbe_WithSHA1And128BitRC2_CBC "pbeWithSHA1And128BitRC2-CBC"
# define NID_pbe_WithSHA1And128BitRC2_CBC 148
# define OBJ_pbe_WithSHA1And128BitRC2_CBC OBJ_pkcs12_pbeids, 5L
# define SN_pbe_WithSHA1And40BitRC2_CBC "PBE-SHA1-RC2-40"
# define LN_pbe_WithSHA1And40BitRC2_CBC "pbeWithSHA1And40BitRC2-CBC"
# define NID_pbe_WithSHA1And40BitRC2_CBC 149
# define OBJ_pbe_WithSHA1And40BitRC2_CBC OBJ_pkcs12_pbeids, 6L
# define OBJ_pkcs12_Version1 OBJ_pkcs12, 10L
# define OBJ_pkcs12_BagIds OBJ_pkcs12_Version1, 1L
# define LN_keyBag "keyBag"
# define NID_keyBag 150
# define OBJ_keyBag OBJ_pkcs12_BagIds, 1L
# define LN_pkcs8ShroudedKeyBag "pkcs8ShroudedKeyBag"
# define NID_pkcs8ShroudedKeyBag 151
# define OBJ_pkcs8ShroudedKeyBag OBJ_pkcs12_BagIds, 2L
# define LN_certBag "certBag"
# define NID_certBag 152
# define OBJ_certBag OBJ_pkcs12_BagIds, 3L
# define LN_crlBag "crlBag"
# define NID_crlBag 153
# define OBJ_crlBag OBJ_pkcs12_BagIds, 4L
# define LN_secretBag "secretBag"
# define NID_secretBag 154
# define OBJ_secretBag OBJ_pkcs12_BagIds, 5L
# define LN_safeContentsBag "safeContentsBag"
# define NID_safeContentsBag 155
# define OBJ_safeContentsBag OBJ_pkcs12_BagIds, 6L
# define LN_friendlyName "friendlyName"
# define NID_friendlyName 156
# define OBJ_friendlyName OBJ_pkcs9, 20L
# define LN_localKeyID "localKeyID"
# define NID_localKeyID 157
# define OBJ_localKeyID OBJ_pkcs9, 21L
# define OBJ_certTypes OBJ_pkcs9, 22L
# define LN_x509Certificate "x509Certificate"
# define NID_x509Certificate 158
# define OBJ_x509Certificate OBJ_certTypes, 1L
# define LN_sdsiCertificate "sdsiCertificate"
# define NID_sdsiCertificate 159
# define OBJ_sdsiCertificate OBJ_certTypes, 2L
# define OBJ_crlTypes OBJ_pkcs9, 23L
# define LN_x509Crl "x509Crl"
# define NID_x509Crl 160
# define OBJ_x509Crl OBJ_crlTypes, 1L
/* PKCS#5 v2 OIDs */
# define LN_pbes2 "PBES2"
# define NID_pbes2 161
# define OBJ_pbes2 OBJ_pkcs,5L,13L
# define LN_pbmac1 "PBMAC1"
# define NID_pbmac1 162
# define OBJ_pbmac1 OBJ_pkcs,5L,14L
# define LN_hmacWithSHA1 "hmacWithSHA1"
# define NID_hmacWithSHA1 163
# define OBJ_hmacWithSHA1 OBJ_rsadsi,2L,7L
/* Policy Qualifier Ids */
# define LN_id_qt_cps "Policy Qualifier CPS"
# define SN_id_qt_cps "id-qt-cps"
# define NID_id_qt_cps 164
# define OBJ_id_qt_cps OBJ_id_pkix,2L,1L
# define LN_id_qt_unotice "Policy Qualifier User Notice"
# define SN_id_qt_unotice "id-qt-unotice"
# define NID_id_qt_unotice 165
# define OBJ_id_qt_unotice OBJ_id_pkix,2L,2L
# define SN_rc2_64_cbc "RC2-64-CBC"
# define LN_rc2_64_cbc "rc2-64-cbc"
# define NID_rc2_64_cbc 166
# define SN_SMIMECapabilities "SMIME-CAPS"
# define LN_SMIMECapabilities "S/MIME Capabilities"
# define NID_SMIMECapabilities 167
# define OBJ_SMIMECapabilities OBJ_pkcs9,15L
# define SN_pbeWithMD2AndRC2_CBC "PBE-MD2-RC2-64"
# define LN_pbeWithMD2AndRC2_CBC "pbeWithMD2AndRC2-CBC"
# define NID_pbeWithMD2AndRC2_CBC 168
# define OBJ_pbeWithMD2AndRC2_CBC OBJ_pkcs,5L,4L
# define SN_pbeWithMD5AndRC2_CBC "PBE-MD5-RC2-64"
# define LN_pbeWithMD5AndRC2_CBC "pbeWithMD5AndRC2-CBC"
# define NID_pbeWithMD5AndRC2_CBC 169
# define OBJ_pbeWithMD5AndRC2_CBC OBJ_pkcs,5L,6L
# define SN_pbeWithSHA1AndDES_CBC "PBE-SHA1-DES"
# define LN_pbeWithSHA1AndDES_CBC "pbeWithSHA1AndDES-CBC"
# define NID_pbeWithSHA1AndDES_CBC 170
# define OBJ_pbeWithSHA1AndDES_CBC OBJ_pkcs,5L,10L
/* Extension request OIDs */
# define LN_ms_ext_req "Microsoft Extension Request"
# define SN_ms_ext_req "msExtReq"
# define NID_ms_ext_req 171
# define OBJ_ms_ext_req 1L,3L,6L,1L,4L,1L,311L,2L,1L,14L
# define LN_ext_req "Extension Request"
# define SN_ext_req "extReq"
# define NID_ext_req 172
# define OBJ_ext_req OBJ_pkcs9,14L
# define SN_name "name"
# define LN_name "name"
# define NID_name 173
# define OBJ_name OBJ_X509,41L
# define SN_dnQualifier "dnQualifier"
# define LN_dnQualifier "dnQualifier"
# define NID_dnQualifier 174
# define OBJ_dnQualifier OBJ_X509,46L
# define SN_id_pe "id-pe"
# define NID_id_pe 175
# define OBJ_id_pe OBJ_id_pkix,1L
# define SN_id_ad "id-ad"
# define NID_id_ad 176
# define OBJ_id_ad OBJ_id_pkix,48L
# define SN_info_access "authorityInfoAccess"
# define LN_info_access "Authority Information Access"
# define NID_info_access 177
# define OBJ_info_access OBJ_id_pe,1L
# define SN_ad_OCSP "OCSP"
# define LN_ad_OCSP "OCSP"
# define NID_ad_OCSP 178
# define OBJ_ad_OCSP OBJ_id_ad,1L
# define SN_ad_ca_issuers "caIssuers"
# define LN_ad_ca_issuers "CA Issuers"
# define NID_ad_ca_issuers 179
# define OBJ_ad_ca_issuers OBJ_id_ad,2L
# define SN_OCSP_sign "OCSPSigning"
# define LN_OCSP_sign "OCSP Signing"
# define NID_OCSP_sign 180
# define OBJ_OCSP_sign OBJ_id_kp,9L
# endif /* USE_OBJ_MAC */
# include <openssl/bio.h>
# include <openssl/asn1.h>
# define OBJ_NAME_TYPE_UNDEF 0x00
# define OBJ_NAME_TYPE_MD_METH 0x01
# define OBJ_NAME_TYPE_CIPHER_METH 0x02
# define OBJ_NAME_TYPE_PKEY_METH 0x03
# define OBJ_NAME_TYPE_COMP_METH 0x04
# define OBJ_NAME_TYPE_NUM 0x05
# define OBJ_NAME_ALIAS 0x8000
# define OBJ_BSEARCH_VALUE_ON_NOMATCH 0x01
# define OBJ_BSEARCH_FIRST_VALUE_ON_MATCH 0x02
#ifdef __cplusplus
extern "C" {
#endif
typedef struct obj_name_st {
int type;
int alias;
const char *name;
const char *data;
} OBJ_NAME;
# define OBJ_create_and_add_object(a,b,c) OBJ_create(a,b,c)
int OBJ_NAME_init(void);
int OBJ_NAME_new_index(unsigned long (*hash_func) (const char *),
int (*cmp_func) (const char *, const char *),
void (*free_func) (const char *, int, const char *));
const char *OBJ_NAME_get(const char *name, int type);
int OBJ_NAME_add(const char *name, int type, const char *data);
int OBJ_NAME_remove(const char *name, int type);
void OBJ_NAME_cleanup(int type); /* -1 for everything */
void OBJ_NAME_do_all(int type, void (*fn) (const OBJ_NAME *, void *arg),
void *arg);
void OBJ_NAME_do_all_sorted(int type,
void (*fn) (const OBJ_NAME *, void *arg),
void *arg);
ASN1_OBJECT *OBJ_dup(const ASN1_OBJECT *o);
ASN1_OBJECT *OBJ_nid2obj(int n);
const char *OBJ_nid2ln(int n);
const char *OBJ_nid2sn(int n);
int OBJ_obj2nid(const ASN1_OBJECT *o);
ASN1_OBJECT *OBJ_txt2obj(const char *s, int no_name);
int OBJ_obj2txt(char *buf, int buf_len, const ASN1_OBJECT *a, int no_name);
int OBJ_txt2nid(const char *s);
int OBJ_ln2nid(const char *s);
int OBJ_sn2nid(const char *s);
int OBJ_cmp(const ASN1_OBJECT *a, const ASN1_OBJECT *b);
const void *OBJ_bsearch_(const void *key, const void *base, int num, int size,
int (*cmp) (const void *, const void *));
const void *OBJ_bsearch_ex_(const void *key, const void *base, int num,
int size,
int (*cmp) (const void *, const void *),
int flags);
# define _DECLARE_OBJ_BSEARCH_CMP_FN(scope, type1, type2, nm) \
static int nm##_cmp_BSEARCH_CMP_FN(const void *, const void *); \
static int nm##_cmp(type1 const *, type2 const *); \
scope type2 * OBJ_bsearch_##nm(type1 *key, type2 const *base, int num)
# define DECLARE_OBJ_BSEARCH_CMP_FN(type1, type2, cmp) \
_DECLARE_OBJ_BSEARCH_CMP_FN(static, type1, type2, cmp)
# define DECLARE_OBJ_BSEARCH_GLOBAL_CMP_FN(type1, type2, nm) \
type2 * OBJ_bsearch_##nm(type1 *key, type2 const *base, int num)
/*-
* Unsolved problem: if a type is actually a pointer type, like
* nid_triple is, then its impossible to get a const where you need
* it. Consider:
*
* typedef int nid_triple[3];
* const void *a_;
* const nid_triple const *a = a_;
*
* The assignement discards a const because what you really want is:
*
* const int const * const *a = a_;
*
* But if you do that, you lose the fact that a is an array of 3 ints,
* which breaks comparison functions.
*
* Thus we end up having to cast, sadly, or unpack the
* declarations. Or, as I finally did in this case, delcare nid_triple
* to be a struct, which it should have been in the first place.
*
* Ben, August 2008.
*
* Also, strictly speaking not all types need be const, but handling
* the non-constness means a lot of complication, and in practice
* comparison routines do always not touch their arguments.
*/
# define IMPLEMENT_OBJ_BSEARCH_CMP_FN(type1, type2, nm) \
static int nm##_cmp_BSEARCH_CMP_FN(const void *a_, const void *b_) \
{ \
type1 const *a = a_; \
type2 const *b = b_; \
return nm##_cmp(a,b); \
} \
static type2 *OBJ_bsearch_##nm(type1 *key, type2 const *base, int num) \
{ \
return (type2 *)OBJ_bsearch_(key, base, num, sizeof(type2), \
nm##_cmp_BSEARCH_CMP_FN); \
} \
extern void dummy_prototype(void)
# define IMPLEMENT_OBJ_BSEARCH_GLOBAL_CMP_FN(type1, type2, nm) \
static int nm##_cmp_BSEARCH_CMP_FN(const void *a_, const void *b_) \
{ \
type1 const *a = a_; \
type2 const *b = b_; \
return nm##_cmp(a,b); \
} \
type2 *OBJ_bsearch_##nm(type1 *key, type2 const *base, int num) \
{ \
return (type2 *)OBJ_bsearch_(key, base, num, sizeof(type2), \
nm##_cmp_BSEARCH_CMP_FN); \
} \
extern void dummy_prototype(void)
# define OBJ_bsearch(type1,key,type2,base,num,cmp) \
((type2 *)OBJ_bsearch_(CHECKED_PTR_OF(type1,key),CHECKED_PTR_OF(type2,base), \
num,sizeof(type2), \
((void)CHECKED_PTR_OF(type1,cmp##_type_1), \
(void)CHECKED_PTR_OF(type2,cmp##_type_2), \
cmp##_BSEARCH_CMP_FN)))
# define OBJ_bsearch_ex(type1,key,type2,base,num,cmp,flags) \
((type2 *)OBJ_bsearch_ex_(CHECKED_PTR_OF(type1,key),CHECKED_PTR_OF(type2,base), \
num,sizeof(type2), \
((void)CHECKED_PTR_OF(type1,cmp##_type_1), \
(void)type_2=CHECKED_PTR_OF(type2,cmp##_type_2), \
cmp##_BSEARCH_CMP_FN)),flags)
int OBJ_new_nid(int num);
int OBJ_add_object(const ASN1_OBJECT *obj);
int OBJ_create(const char *oid, const char *sn, const char *ln);
void OBJ_cleanup(void);
int OBJ_create_objects(BIO *in);
int OBJ_find_sigid_algs(int signid, int *pdig_nid, int *ppkey_nid);
int OBJ_find_sigid_by_algs(int *psignid, int dig_nid, int pkey_nid);
int OBJ_add_sigid(int signid, int dig_id, int pkey_id);
void OBJ_sigid_free(void);
extern int obj_cleanup_defer;
void check_defer(int nid);
/* BEGIN ERROR CODES */
/*
* The following lines are auto generated by the script mkerr.pl. Any changes
* made after this point may be overwritten when the script is next run.
*/
void ERR_load_OBJ_strings(void);
/* Error codes for the OBJ functions. */
/* Function codes. */
# define OBJ_F_OBJ_ADD_OBJECT 105
# define OBJ_F_OBJ_CREATE 100
# define OBJ_F_OBJ_DUP 101
# define OBJ_F_OBJ_NAME_NEW_INDEX 106
# define OBJ_F_OBJ_NID2LN 102
# define OBJ_F_OBJ_NID2OBJ 103
# define OBJ_F_OBJ_NID2SN 104
/* Reason codes. */
# define OBJ_R_MALLOC_FAILURE 100
# define OBJ_R_UNKNOWN_NID 101
#ifdef __cplusplus
}
#endif
#endif
| 41.506108 | 84 | 0.601648 | [
"object"
] |
d25a4f88d7de9cf95bb2775ddcdc63bfbaf3bbb0 | 2,234 | h | C | include/exec/agg_node.h | phantom9999/BaikalDB | 094a7fc457cb345d8e9834a052d7bc17e996999f | [
"Apache-2.0"
] | 1 | 2018-08-07T13:33:47.000Z | 2018-08-07T13:33:47.000Z | include/exec/agg_node.h | MinHaoSun/BaikalDB | 191870058f1956d8b50258526aef92c24fc0262f | [
"Apache-2.0"
] | null | null | null | include/exec/agg_node.h | MinHaoSun/BaikalDB | 191870058f1956d8b50258526aef92c24fc0262f | [
"Apache-2.0"
] | null | null | null | // Copyright (c) 2018 Baidu, Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#pragma once
#include <string>
#include <map>
#include <unordered_map>
#ifdef BAIDU_INTERNAL
#include <base/containers/flat_map.h>
#else
#include <butil/containers/flat_map.h>
#endif
#include <vector>
#include "exec_node.h"
#include "agg_fn_call.h"
#include "mut_table_key.h"
namespace baikaldb {
class AggNode : public ExecNode {
public:
AggNode() {
}
virtual ~AggNode() {
for (auto expr : _group_exprs) {
ExprNode::destory_tree(expr);
}
for (auto agg : _agg_fn_calls) {
ExprNode::destory_tree(agg);
}
}
virtual int init(const pb::PlanNode& node);
virtual int expr_optimize(std::vector<pb::TupleDescriptor>* tuple_descs);
virtual int open(RuntimeState* state);
virtual int get_next(RuntimeState* state, RowBatch* batch, bool* eos);
virtual void close(RuntimeState* state);
virtual void transfer_pb(pb::PlanNode* pb_node);
void encode_agg_key(MemRow* row, MutTableKey& key);
void process_row_batch(RowBatch& batch);
private:
//需要推导_group_tuple_id _agg_tuple_id内部slot的类型
std::vector<ExprNode*> _group_exprs;
//int32_t _group_tuple_id;
int32_t _agg_tuple_id;
pb::TupleDescriptor* _group_tuple_desc;
std::vector<AggFnCall*> _agg_fn_calls;
//std::vector<int32_t> _intermediate_slot_ids;
//std::vector<int32_t> _final_slot_ids;
bool _is_merger = false;
MemRowDescriptor* _mem_row_desc;
//用于分组和get_next的定位,用map可与mysql保持一致
butil::FlatMap<std::string, MemRow*> _hash_map;
butil::FlatMap<std::string, MemRow*>::iterator _iter;
};
}
/* vim: set ts=4 sw=4 sts=4 tw=100 */
| 32.852941 | 77 | 0.70949 | [
"vector"
] |
d25ad1b39e2b8eca0512502960a39ec3ff6b91b6 | 26,853 | c | C | src/s_audio.c | fdch/pure-data | e7003d747339a2fb637c56b59ee53508b5f5be4e | [
"TCL"
] | null | null | null | src/s_audio.c | fdch/pure-data | e7003d747339a2fb637c56b59ee53508b5f5be4e | [
"TCL"
] | null | null | null | src/s_audio.c | fdch/pure-data | e7003d747339a2fb637c56b59ee53508b5f5be4e | [
"TCL"
] | null | null | null | /* Copyright (c) 2003, Miller Puckette and others.
* For information on usage and redistribution, and for a DISCLAIMER OF ALL
* WARRANTIES, see the file, "LICENSE.txt," in this distribution. */
/* machine-independent (well, mostly!) audio layer. Stores and recalls
audio settings from argparse routine and from dialog window.
LATER: save audio settings for various APIs for easier switching
*/
#include "m_pd.h"
#include "s_stuff.h"
#include <stdio.h>
#ifdef _WIN32
#include <time.h>
#else
#include <unistd.h>
#include <sys/time.h>
#include <sys/resource.h>
#endif /* _WIN32 */
#include <stdlib.h>
#include <string.h>
#include <errno.h>
#ifdef _MSC_VER
#define snprintf _snprintf
#endif
#define SYS_DEFAULTCH 2
#define MAXNDEV 128
#define DEVDESCSIZE 128
#define MAXBLOCKSIZE 2048
/* exported variables */
int sys_schedadvance; /* scheduler advance in microseconds */
static int sys_audioapiopened; /* what API is open, API_NONE if none */
static int audio_callback_is_open; /* true if we're open in callback mode */
/* current parameters (if an API is open) or requested ones otherwise: */
static t_audiosettings audio_nextsettings;
void sched_audio_callbackfn(void);
void sched_reopenmeplease(void);
int audio_isopen(void)
{
return (sys_audioapiopened > 0);
}
static int audio_isfixedsr(int api)
{
#ifdef USEAPI_JACK
/* JACK server sets it's own samplerate */
return (api == API_JACK);
#endif
return 0;
}
static int audio_isfixedblocksize(int api)
{
#ifdef USEAPI_JACK
/* JACK server sets it's own blocksize */
return (api == API_JACK);
#endif
return 0;
}
#ifdef USEAPI_JACK
int jack_get_blocksize(void);
#endif
static int audio_getfixedblocksize(int api)
{
#ifdef USEAPI_JACK
/* JACK server sets it's own blocksize */
return (api == API_JACK ? jack_get_blocksize() : 0);
#endif
return 0;
}
/* inform rest of Pd of current channels and sample rate. Do this when
opening audio device. This is also called from alsamm but I think that
is no longer in use, so in principle this could be static. */
void sys_setchsr(int chin, int chout, int sr)
{
int inbytes = (chin ? chin : 2) *
(DEFDACBLKSIZE*sizeof(t_sample));
int outbytes = (chout ? chout : 2) *
(DEFDACBLKSIZE*sizeof(t_sample));
if (STUFF->st_soundin)
freebytes(STUFF->st_soundin,
(STUFF->st_inchannels? STUFF->st_inchannels : 2) *
(DEFDACBLKSIZE*sizeof(t_sample)));
if (STUFF->st_soundout)
freebytes(STUFF->st_soundout,
(STUFF->st_outchannels? STUFF->st_outchannels : 2) *
(DEFDACBLKSIZE*sizeof(t_sample)));
STUFF->st_inchannels = chin;
STUFF->st_outchannels = chout;
if (!audio_isfixedsr(sys_audioapiopened))
STUFF->st_dacsr = sr;
STUFF->st_soundin = (t_sample *)getbytes(inbytes);
memset(STUFF->st_soundin, 0, inbytes);
STUFF->st_soundout = (t_sample *)getbytes(outbytes);
memset(STUFF->st_soundout, 0, outbytes);
logpost(NULL, PD_VERBOSE, "input channels = %d, output channels = %d",
STUFF->st_inchannels, STUFF->st_outchannels);
canvas_resume_dsp(canvas_suspend_dsp());
}
static void audio_make_sane(int *ndev, int *devvec,
int *nchan, int *chanvec, int maxdev)
{
int i;
if (*ndev == -1)
{ /* no input audio devices specified */
if (*nchan == -1)
{
if (*ndev >= 1)
{
*nchan=1;
chanvec[0] = SYS_DEFAULTCH;
*ndev = 1;
devvec[0] = DEFAULTAUDIODEV;
}
else *ndev = *nchan = 0;
}
else
{
for (i = 0; i < maxdev; i++)
devvec[i] = i;
*ndev = *nchan;
}
}
else
{
if (*nchan == -1)
{
*nchan = *ndev;
for (i = 0; i < *ndev; i++)
chanvec[i] = SYS_DEFAULTCH;
}
else if (*nchan > *ndev)
{
for (i = *ndev; i < *nchan; i++)
{
if (i == 0)
devvec[0] = DEFAULTAUDIODEV;
else devvec[i] = devvec[i-1] + 1;
}
*ndev = *nchan;
}
else if (*nchan < *ndev)
{
for (i = *nchan; i < *ndev; i++)
{
if (i == 0)
chanvec[0] = SYS_DEFAULTCH;
else chanvec[i] = chanvec[i-1];
}
*ndev = *nchan;
}
}
for (i = *ndev; i < maxdev; i++)
devvec[i] = -1;
for (i = *nchan; i < maxdev; i++)
chanvec[i] = 0;
}
/* compact the list of audio devices by skipping those whose channel
counts are zero, and add up all channel counts. Assumes you've already
called make_sane above */
static void audio_compact_and_count_channels(int *ndev, int *devvec,
int *chanvec, int *totalchans, int maxdev)
{
int i, newndev;
/* count total number of input and output channels */
for (i = newndev = *totalchans = 0; i < *ndev; i++)
if (chanvec[i] > 0)
{
chanvec[newndev] = chanvec[i];
devvec[newndev] = devvec[i];
*totalchans += chanvec[i];
newndev++;
}
*ndev = newndev;
}
/* ----------------------- public routines ----------------------- */
void sys_get_audio_settings(t_audiosettings *a)
{
static int initted;
if (!initted)
{
audio_nextsettings.a_api = API_DEFAULT;
audio_nextsettings.a_srate = DEFAULTSRATE;
audio_nextsettings.a_nindev = audio_nextsettings.a_nchindev =
audio_nextsettings.a_noutdev = audio_nextsettings.a_nchoutdev
= 1;
audio_nextsettings.a_indevvec[0] =
audio_nextsettings.a_outdevvec[0] = DEFAULTAUDIODEV;
audio_nextsettings.a_chindevvec[0] =
audio_nextsettings.a_choutdevvec[0] = SYS_DEFAULTCH;
audio_nextsettings.a_advance = DEFAULTADVANCE;
audio_nextsettings.a_blocksize = DEFDACBLKSIZE;
initted = 1;
}
*a = audio_nextsettings;
if (audio_isfixedsr(a->a_api))
a->a_srate = STUFF->st_dacsr;
if (audio_isfixedblocksize(a->a_api))
a->a_blocksize = audio_getfixedblocksize(a->a_api);
}
/* Since the channel vector might be longer than the
audio device vector, or vice versa, we fill the shorter one
in to match the longer one. Also, if both are empty, we fill in
one device (the default) and two channels. This function can leave number
of channels at zero which is appropriate for the dialog window but before
starting audio we also call audio_compact_and_count_channels below.*/
/* set audio device settings (after cleaning up the specified device and
channel vectors). The audio devices are "zero based" (i.e. "0" means the
first one.) We can later re-open audio and/or show the settings on a
dialog window. */
void sys_set_audio_settings(t_audiosettings *a)
{
int i;
char indevlist[MAXNDEV*DEVDESCSIZE], outdevlist[MAXNDEV*DEVDESCSIZE];
int indevs = 0, outdevs = 0, canmulti = 0, cancallback = 0;
sys_get_audio_devs(indevlist, &indevs, outdevlist, &outdevs, &canmulti,
&cancallback, MAXNDEV, DEVDESCSIZE, a->a_api);
if (a->a_srate < 1)
a->a_srate = DEFAULTSRATE;
if (a->a_advance < 0)
a->a_advance = DEFAULTADVANCE;
a->a_blocksize = 1 << ilog2(a->a_blocksize);
if (a->a_blocksize < DEFDACBLKSIZE || a->a_blocksize > MAXBLOCKSIZE)
a->a_blocksize = DEFDACBLKSIZE;
audio_make_sane(&a->a_noutdev, a->a_outdevvec,
&a->a_nchoutdev, a->a_choutdevvec, MAXAUDIOOUTDEV);
audio_make_sane(&a->a_nindev, a->a_indevvec,
&a->a_nchindev, a->a_chindevvec, MAXAUDIOINDEV);
sys_schedadvance = a->a_advance * 1000;
audio_nextsettings = *a;
sys_log_error(ERR_NOTHING);
sys_vgui("set pd_whichapi %d\n", audio_nextsettings.a_api);
}
void sys_close_audio(void)
{
if (sys_externalschedlib)
{
return;
}
if (!audio_isopen())
return;
#ifdef USEAPI_PORTAUDIO
if (sys_audioapiopened == API_PORTAUDIO)
pa_close_audio();
else
#endif
#ifdef USEAPI_JACK
if (sys_audioapiopened == API_JACK)
jack_close_audio();
else
#endif
#ifdef USEAPI_OSS
if (sys_audioapiopened == API_OSS)
oss_close_audio();
else
#endif
#ifdef USEAPI_ALSA
if (sys_audioapiopened == API_ALSA)
alsa_close_audio();
else
#endif
#ifdef USEAPI_MMIO
if (sys_audioapiopened == API_MMIO)
mmio_close_audio();
else
#endif
#ifdef USEAPI_AUDIOUNIT
if (sys_audioapiopened == API_AUDIOUNIT)
audiounit_close_audio();
else
#endif
#ifdef USEAPI_ESD
if (sys_audioapiopened == API_ESD)
esd_close_audio();
else
#endif
#ifdef USEAPI_DUMMY
if (sys_audioapiopened == API_DUMMY)
dummy_close_audio();
else
#endif
post("sys_close_audio: unknown API %d", sys_audioapiopened);
sys_audioapiopened = API_NONE;
sched_set_using_audio(SCHED_AUDIO_NONE);
audio_callback_is_open = 0;
sys_vgui("set pd_whichapi 0\n");
}
void sys_init_audio(void)
{
t_audiosettings as;
int totalinchans, totaloutchans;
sys_get_audio_settings(&as);
audio_compact_and_count_channels(&as.a_nindev, as.a_indevvec,
as.a_chindevvec, &totalinchans, MAXAUDIOINDEV);
audio_compact_and_count_channels(&as.a_noutdev, as.a_outdevvec,
as.a_choutdevvec, &totaloutchans, MAXAUDIOOUTDEV);
sys_setchsr(totalinchans, totaloutchans, as.a_srate);
}
/* open audio using currently requested parameters */
void sys_reopen_audio(void)
{
t_audiosettings as;
int outcome = 0, totalinchans, totaloutchans;
sys_get_audio_settings(&as);
/* fprintf(stderr, "audio in ndev %d, dev %d; out ndev %d, dev %d\n",
as.a_nindev, as.a_indevvec[0], as.a_noutdev, as.a_outdevvec[0]); */
audio_compact_and_count_channels(&as.a_nindev, as.a_indevvec,
as.a_chindevvec, &totalinchans, MAXAUDIOINDEV);
audio_compact_and_count_channels(&as.a_noutdev, as.a_outdevvec,
as.a_choutdevvec, &totaloutchans, MAXAUDIOOUTDEV);
sys_setchsr(totalinchans, totaloutchans, as.a_srate);
if (!as.a_nindev && !as.a_noutdev)
{
sched_set_using_audio(SCHED_AUDIO_NONE);
return;
}
#ifdef USEAPI_PORTAUDIO
if (as.a_api == API_PORTAUDIO)
{
int blksize = (as.a_blocksize ? as.a_blocksize : 64);
int nbufs = (double)sys_schedadvance / 1000000. * as.a_srate / blksize;
if (nbufs < 1) nbufs = 1;
outcome = pa_open_audio((as.a_nindev > 0 ? as.a_chindevvec[0] : 0),
(as.a_noutdev > 0 ? as.a_choutdevvec[0] : 0), as.a_srate,
STUFF->st_soundin, STUFF->st_soundout, blksize, nbufs,
(as.a_nindev > 0 ? as.a_indevvec[0] : 0),
(as.a_noutdev > 0 ? as.a_outdevvec[0] : 0),
(as.a_callback ? sched_audio_callbackfn : 0));
}
else
#endif
#ifdef USEAPI_JACK
if (as.a_api == API_JACK)
outcome = jack_open_audio((as.a_nindev > 0 ? as.a_chindevvec[0] : 0),
(as.a_noutdev > 0 ? as.a_choutdevvec[0] : 0),
(as.a_callback ? sched_audio_callbackfn : 0));
else
#endif
#ifdef USEAPI_OSS
if (as.a_api == API_OSS)
outcome = oss_open_audio(as.a_nindev, as.a_indevvec,
as.a_nindev, as.a_chindevvec, as.a_noutdev, as.a_outdevvec,
as.a_noutdev, as.a_choutdevvec, as.a_srate,
as.a_blocksize);
else
#endif
#ifdef USEAPI_ALSA
if (as.a_api == API_ALSA)
outcome = alsa_open_audio(as.a_nindev, as.a_indevvec,
as.a_nindev, as.a_chindevvec, as.a_noutdev,
as.a_outdevvec, as.a_noutdev, as.a_choutdevvec, as.a_srate,
as.a_blocksize);
else
#endif
#ifdef USEAPI_MMIO
if (as.a_api == API_MMIO)
outcome = mmio_open_audio(as.a_nindev, as.a_indevvec,
as.a_nindev, as.a_chindevvec, as.a_noutdev,
as.a_outdevvec, as.a_noutdev, as.a_choutdevvec, as.a_srate,
as.a_blocksize);
else
#endif
#ifdef USEAPI_AUDIOUNIT
if (as.a_api == API_AUDIOUNIT)
outcome = audiounit_open_audio(
(as.a_nindev > 0 ? as.a_chindev[0] : 0),
(as.a_nindev > 0 ? as.a_choutdev[0] : 0), as.a_srate);
else
#endif
#ifdef USEAPI_ESD
if (as.a_api == API_ALSA)
outcome = esd_open_audio(as.a_nindev, as.a_indevvec,
as.a_nindev, as.a_chindevvec, as.a_noutdev,
as.a_outdevvec, as.a_noutdev, as.a_choutdevvec, as.a_srate);
else
#endif
#ifdef USEAPI_DUMMY
if (as.a_api == API_DUMMY)
outcome = dummy_open_audio(as.a_nindev, as.a_noutdev,
as.a_srate);
else
#endif
if (as.a_api == API_NONE)
;
else post("unknown audio API specified");
if (outcome) /* failed */
{
sys_audioapiopened = API_NONE;
sched_set_using_audio(SCHED_AUDIO_NONE);
audio_callback_is_open = 0;
}
else
{
sys_audioapiopened = as.a_api;
sched_set_using_audio(
(as.a_callback ? SCHED_AUDIO_CALLBACK : SCHED_AUDIO_POLL));
audio_callback_is_open = as.a_callback;
}
sys_vgui("set pd_whichapi %d\n", sys_audioapiopened);
}
int sys_send_dacs(void)
{
#ifdef USEAPI_PORTAUDIO
if (sys_audioapiopened == API_PORTAUDIO)
return (pa_send_dacs());
else
#endif
#ifdef USEAPI_JACK
if (sys_audioapiopened == API_JACK)
return (jack_send_dacs());
else
#endif
#ifdef USEAPI_OSS
if (sys_audioapiopened == API_OSS)
return (oss_send_dacs());
else
#endif
#ifdef USEAPI_ALSA
if (sys_audioapiopened == API_ALSA)
return (alsa_send_dacs());
else
#endif
#ifdef USEAPI_MMIO
if (sys_audioapiopened == API_MMIO)
return (mmio_send_dacs());
else
#endif
#ifdef USEAPI_AUDIOUNIT
if (sys_audioapiopened == API_AUDIOUNIT)
return (audiounit_send_dacs());
else
#endif
#ifdef USEAPI_ESD
if (sys_audioapiopened == API_ESD)
return (esd_send_dacs());
else
#endif
#ifdef USEAPI_DUMMY
if (sys_audioapiopened == API_DUMMY)
return (dummy_send_dacs());
else
#endif
post("unknown API");
return (0);
}
t_float sys_getsr(void)
{
return (STUFF->st_dacsr);
}
int sys_get_outchannels(void)
{
return (STUFF->st_outchannels);
}
int sys_get_inchannels(void)
{
return (STUFF->st_inchannels);
}
/* this could later be set by a preference but for now it seems OK to just
keep jack audio open but close unused audio devices for any other API */
int audio_shouldkeepopen(void)
{
return (sys_audioapiopened == API_JACK);
}
/* get names of available audio devices for the specified API */
void sys_get_audio_devs(char *indevlist, int *nindevs,
char *outdevlist, int *noutdevs, int *canmulti, int *cancallback,
int maxndev, int devdescsize, int api)
{
*cancallback = 0; /* may be overridden by specific API implementation */
#ifdef USEAPI_PORTAUDIO
if (api == API_PORTAUDIO)
{
pa_getdevs(indevlist, nindevs, outdevlist, noutdevs, canmulti,
maxndev, devdescsize);
*cancallback = 1;
}
else
#endif
#ifdef USEAPI_JACK
if (api == API_JACK)
{
jack_getdevs(indevlist, nindevs, outdevlist, noutdevs, canmulti,
maxndev, devdescsize);
*cancallback = 1;
}
else
#endif
#ifdef USEAPI_OSS
if (api == API_OSS)
{
oss_getdevs(indevlist, nindevs, outdevlist, noutdevs, canmulti,
maxndev, devdescsize);
}
else
#endif
#ifdef USEAPI_ALSA
if (api == API_ALSA)
{
alsa_getdevs(indevlist, nindevs, outdevlist, noutdevs, canmulti,
maxndev, devdescsize);
}
else
#endif
#ifdef USEAPI_MMIO
if (api == API_MMIO)
{
mmio_getdevs(indevlist, nindevs, outdevlist, noutdevs, canmulti,
maxndev, devdescsize);
}
else
#endif
#ifdef USEAPI_AUDIOUNIT
if (api == API_AUDIOUNIT)
{
}
else
#endif
#ifdef USEAPI_ESD
if (api == API_ESD)
{
esd_getdevs(indevlist, nindevs, outdevlist, noutdevs, canmulti,
maxndev, devdescsize);
}
else
#endif
#ifdef USEAPI_DUMMY
if (api == API_DUMMY)
{
dummy_getdevs(indevlist, nindevs, outdevlist, noutdevs, canmulti,
maxndev, devdescsize);
}
else
#endif
{
/* this shouldn't happen once all the above get filled in. */
int i;
*nindevs = *noutdevs = 3;
for (i = 0; i < 3; i++)
{
sprintf(indevlist + i * devdescsize, "input device #%d", i+1);
sprintf(outdevlist + i * devdescsize, "output device #%d", i+1);
}
*canmulti = 0;
}
}
/* start an audio settings dialog window */
void glob_audio_properties(t_pd *dummy, t_floatarg flongform)
{
char buf[MAXPDSTRING];
t_audiosettings as;
/* these are all the devices on your system: */
char indevlist[MAXNDEV*DEVDESCSIZE], outdevlist[MAXNDEV*DEVDESCSIZE];
char device[MAXPDSTRING];
int nindevs = 0, noutdevs = 0, canmulti = 0, cancallback = 0, i;
sys_get_audio_devs(indevlist, &nindevs, outdevlist, &noutdevs, &canmulti,
&cancallback, MAXNDEV, DEVDESCSIZE, audio_nextsettings.a_api);
sys_gui("global audio_indevlist; set audio_indevlist {}\n");
for (i = 0; i < nindevs; i++)
sys_vgui("lappend audio_indevlist {%s}\n",
pdgui_strnescape(device, MAXPDSTRING, indevlist + i * DEVDESCSIZE, 0));
sys_gui("global audio_outdevlist; set audio_outdevlist {}\n");
for (i = 0; i < noutdevs; i++)
sys_vgui("lappend audio_outdevlist {%s}\n",
pdgui_strnescape(device, MAXPDSTRING, outdevlist + i * DEVDESCSIZE, 0));
sys_get_audio_settings(&as);
if (as.a_nindev > 1 || as.a_noutdev > 1)
flongform = 1;
/* values that are fixed and must not be changed by the GUI are
prefixed with '!'; * the GUI will then display these values but
disable their widgets */
snprintf(buf, MAXPDSTRING,
"pdtk_audio_dialog %%s \
%d %d %d %d %d %d %d %d \
%d %d %d %d %d %d %d %d \
%s%d %d %d %s%d %d %s%d\n",
as.a_indevvec[0], as.a_indevvec[1],
as.a_indevvec[2], as.a_indevvec[3],
as.a_chindevvec[0], as.a_chindevvec[1],
as.a_chindevvec[2], as.a_chindevvec[3],
as.a_outdevvec[0], as.a_outdevvec[1],
as.a_outdevvec[2], as.a_outdevvec[3],
as.a_choutdevvec[0], as.a_choutdevvec[1],
as.a_choutdevvec[2], as.a_choutdevvec[3],
audio_isfixedsr(as.a_api)?"!":"", as.a_srate, as.a_advance, canmulti,
cancallback?"":"!", as.a_callback,
(flongform != 0), audio_isfixedblocksize(as.a_api)?"!":"", as.a_blocksize);
gfxstub_deleteforkey(0);
gfxstub_new(&glob_pdobject, (void *)glob_audio_properties, buf);
}
/* new values from dialog window */
void glob_audio_dialog(t_pd *dummy, t_symbol *s, int argc, t_atom *argv)
{
int i;
t_audiosettings as;
as.a_api = audio_nextsettings.a_api;
as.a_srate = atom_getfloatarg(16, argc, argv);
as.a_advance = atom_getfloatarg(17, argc, argv);
as.a_callback = atom_getfloatarg(18, argc, argv);
as.a_blocksize = atom_getfloatarg(19, argc, argv);
for (i = 0; i < 4; i++)
{
as.a_indevvec[i] = atom_getfloatarg(i, argc, argv);
as.a_chindevvec[i] = atom_getfloatarg(i+4, argc, argv);
as.a_outdevvec[i] = atom_getfloatarg(i+8, argc, argv);
as.a_choutdevvec[i] = atom_getfloatarg(i+12, argc, argv);
}
/* compact out any zeros and count nonzero entries */
for (i = 0, as.a_nindev = 0; i < 4; i++)
{
if (as.a_chindevvec[i])
{
as.a_indevvec[as.a_nindev] = as.a_indevvec[i];
as.a_chindevvec[as.a_nindev] = as.a_chindevvec[i];
as.a_nindev++;
}
}
for (i = 0, as.a_noutdev = 0; i < 4; i++)
{
if (as.a_choutdevvec[i])
{
as.a_outdevvec[as.a_noutdev] = as.a_outdevvec[i];
as.a_choutdevvec[as.a_noutdev] = as.a_choutdevvec[i];
as.a_noutdev++;
}
}
as.a_nchindev = as.a_nindev;
as.a_nchoutdev = as.a_noutdev;
if (as.a_callback < 0)
as.a_callback = 0;
as.a_blocksize = (1<<ilog2(as.a_blocksize));
if (as.a_blocksize < DEFDACBLKSIZE || as.a_blocksize > MAXBLOCKSIZE)
as.a_blocksize = DEFDACBLKSIZE;
if (!audio_callback_is_open && !as.a_callback)
sys_close_audio();
sys_set_audio_settings(&as);
if (!audio_callback_is_open && !as.a_callback)
sys_reopen_audio();
else sched_reopenmeplease();
}
void sys_listdevs(void)
{
char indevlist[MAXNDEV*DEVDESCSIZE], outdevlist[MAXNDEV*DEVDESCSIZE];
int nindevs = 0, noutdevs = 0, i, canmulti = 0, cancallback = 0;
sys_get_audio_devs(indevlist, &nindevs, outdevlist, &noutdevs,
&canmulti, &cancallback, MAXNDEV, DEVDESCSIZE,
audio_nextsettings.a_api);
if (!nindevs)
post("no audio input devices found");
else
{
/* To agree with command line flags, normally start at 1 */
/* But microsoft "MMIO" device list starts at 0 (the "mapper"). */
/* (see also sys_mmio variable in s_main.c) */
post("audio input devices:");
for (i = 0; i < nindevs; i++)
post("%d. %s", i + (audio_nextsettings.a_api != API_MMIO),
indevlist + i * DEVDESCSIZE);
}
if (!noutdevs)
post("no audio output devices found");
else
{
post("audio output devices:");
for (i = 0; i < noutdevs; i++)
post("%d. %s", i + (audio_nextsettings.a_api != API_MMIO),
outdevlist + i * DEVDESCSIZE);
}
post("API number %d\n", audio_nextsettings.a_api);
sys_listmididevs();
}
void glob_audio_setapi(void *dummy, t_floatarg f)
{
int newapi = f;
if (newapi)
{
if (newapi == audio_nextsettings.a_api)
{
if (!audio_isopen() && audio_shouldkeepopen())
sys_reopen_audio();
}
else
{
sys_close_audio();
audio_nextsettings.a_api = newapi;
/* bash device params back to default */
audio_nextsettings.a_nindev = audio_nextsettings.a_nchindev =
audio_nextsettings.a_noutdev = audio_nextsettings.a_nchoutdev
= 1;
audio_nextsettings.a_indevvec[0] =
audio_nextsettings.a_outdevvec[0] = DEFAULTAUDIODEV;
audio_nextsettings.a_chindevvec[0] =
audio_nextsettings.a_choutdevvec[0] = SYS_DEFAULTCH;
audio_nextsettings.a_blocksize = DEFDACBLKSIZE;
sys_reopen_audio();
}
glob_audio_properties(0, 0);
}
else if (audio_isopen())
{
sys_close_audio();
}
}
/* start or stop the audio hardware */
void sys_set_audio_state(int onoff)
{
if (onoff) /* start */
{
if (!audio_isopen())
sys_reopen_audio();
}
else
{
if (audio_isopen())
sys_close_audio();
}
}
#define MAXAPIENTRY 10
typedef struct _apientry
{
char a_name[30];
int a_id;
} t_apientry;
static t_apientry audio_apilist[] = {
#ifdef USEAPI_OSS
{"OSS", API_OSS},
#endif
#ifdef USEAPI_MMIO
{"\"standard (MMIO)\"", API_MMIO},
#endif
#ifdef USEAPI_ALSA
{"ALSA", API_ALSA},
#endif
#ifdef USEAPI_PORTAUDIO
#ifdef _WIN32
{"\"ASIO (portaudio)\"", API_PORTAUDIO},
#else
#ifdef __APPLE__
{"\"standard (portaudio)\"", API_PORTAUDIO},
#else
{"portaudio", API_PORTAUDIO},
#endif
#endif
#endif /* USEAPI_PORTAUDIO */
#ifdef USEAPI_JACK
{"jack", API_JACK},
#endif
#ifdef USEAPI_AUDIOUNIT
{"Audiounit", API_AUDIOUNIT},
#endif
#ifdef USEAPI_ESD
{"ESD", API_ESD},
#endif
#ifdef USEAPI_DUMMY
{"dummy", API_DUMMY},
#endif
};
void sys_get_audio_apis(char *buf)
{
unsigned int n;
if (sizeof(audio_apilist)/sizeof(t_apientry) < 2)
strcpy(buf, "{}");
else
{
strcpy(buf, "{ ");
for (n = 0; n < sizeof(audio_apilist)/sizeof(t_apientry); n++)
sprintf(buf + strlen(buf), "{%s %d} ",
audio_apilist[n].a_name, audio_apilist[n].a_id);
strcat(buf, "}");
}
}
/* convert a device name to a (1-based) device number. (Output device if
'output' parameter is true, otherwise input device). Negative on failure. */
int sys_audiodevnametonumber(int output, const char *name)
{
char indevlist[MAXNDEV*DEVDESCSIZE], outdevlist[MAXNDEV*DEVDESCSIZE];
int nindevs = 0, noutdevs = 0, i, canmulti, cancallback;
sys_get_audio_devs(indevlist, &nindevs, outdevlist, &noutdevs,
&canmulti, &cancallback, MAXNDEV, DEVDESCSIZE,
audio_nextsettings.a_api);
if (output)
{
/* try first for exact match */
for (i = 0; i < noutdevs; i++)
if (!strcmp(name, outdevlist + i * DEVDESCSIZE))
return (i);
/* failing that, a match up to end of shorter string */
for (i = 0; i < noutdevs; i++)
{
unsigned long comp = strlen(name);
if (comp > strlen(outdevlist + i * DEVDESCSIZE))
comp = strlen(outdevlist + i * DEVDESCSIZE);
if (!strncmp(name, outdevlist + i * DEVDESCSIZE, comp))
return (i);
}
}
else
{
for (i = 0; i < nindevs; i++)
if (!strcmp(name, indevlist + i * DEVDESCSIZE))
return (i);
for (i = 0; i < nindevs; i++)
{
unsigned long comp = strlen(name);
if (comp > strlen(indevlist + i * DEVDESCSIZE))
comp = strlen(indevlist + i * DEVDESCSIZE);
if (!strncmp(name, indevlist + i * DEVDESCSIZE, comp))
return (i);
}
}
return (-1);
}
/* convert a (1-based) device number to a device name. (Output device if
'output' parameter is true, otherwise input device). Empty string on failure.
*/
void sys_audiodevnumbertoname(int output, int devno, char *name, int namesize)
{
char indevlist[MAXNDEV*DEVDESCSIZE], outdevlist[MAXNDEV*DEVDESCSIZE];
int nindevs = 0, noutdevs = 0, canmulti, cancallback;
if (devno < 0)
{
*name = 0;
return;
}
sys_get_audio_devs(indevlist, &nindevs, outdevlist, &noutdevs,
&canmulti, &cancallback, MAXNDEV, DEVDESCSIZE,
audio_nextsettings.a_api);
if (output && (devno < noutdevs))
strncpy(name, outdevlist + devno * DEVDESCSIZE, namesize);
else if (!output && (devno < nindevs))
strncpy(name, indevlist + devno * DEVDESCSIZE, namesize);
else *name = 0;
name[namesize-1] = 0;
}
| 29.903118 | 84 | 0.616356 | [
"vector"
] |
d25e8c93d622daeb063d52705efb0f9583b1cf98 | 102,906 | c | C | front-end/qemu-2.3/hw/net/rtl8139.c | zheli-1/crete-dev | a226c245f51347b88ba9a95448a694bf1997a080 | [
"BSD-2-Clause-FreeBSD"
] | 52 | 2016-11-03T06:48:16.000Z | 2021-03-30T07:22:41.000Z | front-end/qemu-2.3/hw/net/rtl8139.c | zheli-1/crete-dev | a226c245f51347b88ba9a95448a694bf1997a080 | [
"BSD-2-Clause-FreeBSD"
] | 46 | 2016-11-16T02:07:38.000Z | 2020-04-01T06:17:33.000Z | front-end/qemu-2.3/hw/net/rtl8139.c | zheli-1/crete-dev | a226c245f51347b88ba9a95448a694bf1997a080 | [
"BSD-2-Clause-FreeBSD"
] | 13 | 2016-11-06T00:41:27.000Z | 2020-06-01T07:18:48.000Z | /**
* QEMU RTL8139 emulation
*
* Copyright (c) 2006 Igor Kovalenko
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
* Modifications:
* 2006-Jan-28 Mark Malakanov : TSAD and CSCR implementation (for Windows driver)
*
* 2006-Apr-28 Juergen Lock : EEPROM emulation changes for FreeBSD driver
* HW revision ID changes for FreeBSD driver
*
* 2006-Jul-01 Igor Kovalenko : Implemented loopback mode for FreeBSD driver
* Corrected packet transfer reassembly routine for 8139C+ mode
* Rearranged debugging print statements
* Implemented PCI timer interrupt (disabled by default)
* Implemented Tally Counters, increased VM load/save version
* Implemented IP/TCP/UDP checksum task offloading
*
* 2006-Jul-04 Igor Kovalenko : Implemented TCP segmentation offloading
* Fixed MTU=1500 for produced ethernet frames
*
* 2006-Jul-09 Igor Kovalenko : Fixed TCP header length calculation while processing
* segmentation offloading
* Removed slirp.h dependency
* Added rx/tx buffer reset when enabling rx/tx operation
*
* 2010-Feb-04 Frediano Ziglio: Rewrote timer support using QEMU timer only
* when strictly needed (required for for
* Darwin)
* 2011-Mar-22 Benjamin Poirier: Implemented VLAN offloading
*/
/* For crc32 */
#include <zlib.h>
#include "hw/hw.h"
#include "hw/pci/pci.h"
#include "sysemu/dma.h"
#include "qemu/timer.h"
#include "net/net.h"
#include "hw/loader.h"
#include "sysemu/sysemu.h"
#include "qemu/iov.h"
/* debug RTL8139 card */
//#define DEBUG_RTL8139 1
#define PCI_FREQUENCY 33000000L
#define SET_MASKED(input, mask, curr) \
( ( (input) & ~(mask) ) | ( (curr) & (mask) ) )
/* arg % size for size which is a power of 2 */
#define MOD2(input, size) \
( ( input ) & ( size - 1 ) )
#define ETHER_ADDR_LEN 6
#define ETHER_TYPE_LEN 2
#define ETH_HLEN (ETHER_ADDR_LEN * 2 + ETHER_TYPE_LEN)
#define ETH_P_IP 0x0800 /* Internet Protocol packet */
#define ETH_P_8021Q 0x8100 /* 802.1Q VLAN Extended Header */
#define ETH_MTU 1500
#define VLAN_TCI_LEN 2
#define VLAN_HLEN (ETHER_TYPE_LEN + VLAN_TCI_LEN)
#if defined (DEBUG_RTL8139)
# define DPRINTF(fmt, ...) \
do { fprintf(stderr, "RTL8139: " fmt, ## __VA_ARGS__); } while (0)
#else
static inline GCC_FMT_ATTR(1, 2) int DPRINTF(const char *fmt, ...)
{
return 0;
}
#endif
#define TYPE_RTL8139 "rtl8139"
#define RTL8139(obj) \
OBJECT_CHECK(RTL8139State, (obj), TYPE_RTL8139)
/* Symbolic offsets to registers. */
enum RTL8139_registers {
MAC0 = 0, /* Ethernet hardware address. */
MAR0 = 8, /* Multicast filter. */
TxStatus0 = 0x10,/* Transmit status (Four 32bit registers). C mode only */
/* Dump Tally Conter control register(64bit). C+ mode only */
TxAddr0 = 0x20, /* Tx descriptors (also four 32bit). */
RxBuf = 0x30,
ChipCmd = 0x37,
RxBufPtr = 0x38,
RxBufAddr = 0x3A,
IntrMask = 0x3C,
IntrStatus = 0x3E,
TxConfig = 0x40,
RxConfig = 0x44,
Timer = 0x48, /* A general-purpose counter. */
RxMissed = 0x4C, /* 24 bits valid, write clears. */
Cfg9346 = 0x50,
Config0 = 0x51,
Config1 = 0x52,
FlashReg = 0x54,
MediaStatus = 0x58,
Config3 = 0x59,
Config4 = 0x5A, /* absent on RTL-8139A */
HltClk = 0x5B,
MultiIntr = 0x5C,
PCIRevisionID = 0x5E,
TxSummary = 0x60, /* TSAD register. Transmit Status of All Descriptors*/
BasicModeCtrl = 0x62,
BasicModeStatus = 0x64,
NWayAdvert = 0x66,
NWayLPAR = 0x68,
NWayExpansion = 0x6A,
/* Undocumented registers, but required for proper operation. */
FIFOTMS = 0x70, /* FIFO Control and test. */
CSCR = 0x74, /* Chip Status and Configuration Register. */
PARA78 = 0x78,
PARA7c = 0x7c, /* Magic transceiver parameter register. */
Config5 = 0xD8, /* absent on RTL-8139A */
/* C+ mode */
TxPoll = 0xD9, /* Tell chip to check Tx descriptors for work */
RxMaxSize = 0xDA, /* Max size of an Rx packet (8169 only) */
CpCmd = 0xE0, /* C+ Command register (C+ mode only) */
IntrMitigate = 0xE2, /* rx/tx interrupt mitigation control */
RxRingAddrLO = 0xE4, /* 64-bit start addr of Rx ring */
RxRingAddrHI = 0xE8, /* 64-bit start addr of Rx ring */
TxThresh = 0xEC, /* Early Tx threshold */
};
enum ClearBitMasks {
MultiIntrClear = 0xF000,
ChipCmdClear = 0xE2,
Config1Clear = (1<<7)|(1<<6)|(1<<3)|(1<<2)|(1<<1),
};
enum ChipCmdBits {
CmdReset = 0x10,
CmdRxEnb = 0x08,
CmdTxEnb = 0x04,
RxBufEmpty = 0x01,
};
/* C+ mode */
enum CplusCmdBits {
CPlusRxVLAN = 0x0040, /* enable receive VLAN detagging */
CPlusRxChkSum = 0x0020, /* enable receive checksum offloading */
CPlusRxEnb = 0x0002,
CPlusTxEnb = 0x0001,
};
/* Interrupt register bits, using my own meaningful names. */
enum IntrStatusBits {
PCIErr = 0x8000,
PCSTimeout = 0x4000,
RxFIFOOver = 0x40,
RxUnderrun = 0x20, /* Packet Underrun / Link Change */
RxOverflow = 0x10,
TxErr = 0x08,
TxOK = 0x04,
RxErr = 0x02,
RxOK = 0x01,
RxAckBits = RxFIFOOver | RxOverflow | RxOK,
};
enum TxStatusBits {
TxHostOwns = 0x2000,
TxUnderrun = 0x4000,
TxStatOK = 0x8000,
TxOutOfWindow = 0x20000000,
TxAborted = 0x40000000,
TxCarrierLost = 0x80000000,
};
enum RxStatusBits {
RxMulticast = 0x8000,
RxPhysical = 0x4000,
RxBroadcast = 0x2000,
RxBadSymbol = 0x0020,
RxRunt = 0x0010,
RxTooLong = 0x0008,
RxCRCErr = 0x0004,
RxBadAlign = 0x0002,
RxStatusOK = 0x0001,
};
/* Bits in RxConfig. */
enum rx_mode_bits {
AcceptErr = 0x20,
AcceptRunt = 0x10,
AcceptBroadcast = 0x08,
AcceptMulticast = 0x04,
AcceptMyPhys = 0x02,
AcceptAllPhys = 0x01,
};
/* Bits in TxConfig. */
enum tx_config_bits {
/* Interframe Gap Time. Only TxIFG96 doesn't violate IEEE 802.3 */
TxIFGShift = 24,
TxIFG84 = (0 << TxIFGShift), /* 8.4us / 840ns (10 / 100Mbps) */
TxIFG88 = (1 << TxIFGShift), /* 8.8us / 880ns (10 / 100Mbps) */
TxIFG92 = (2 << TxIFGShift), /* 9.2us / 920ns (10 / 100Mbps) */
TxIFG96 = (3 << TxIFGShift), /* 9.6us / 960ns (10 / 100Mbps) */
TxLoopBack = (1 << 18) | (1 << 17), /* enable loopback test mode */
TxCRC = (1 << 16), /* DISABLE appending CRC to end of Tx packets */
TxClearAbt = (1 << 0), /* Clear abort (WO) */
TxDMAShift = 8, /* DMA burst value (0-7) is shifted this many bits */
TxRetryShift = 4, /* TXRR value (0-15) is shifted this many bits */
TxVersionMask = 0x7C800000, /* mask out version bits 30-26, 23 */
};
/* Transmit Status of All Descriptors (TSAD) Register */
enum TSAD_bits {
TSAD_TOK3 = 1<<15, // TOK bit of Descriptor 3
TSAD_TOK2 = 1<<14, // TOK bit of Descriptor 2
TSAD_TOK1 = 1<<13, // TOK bit of Descriptor 1
TSAD_TOK0 = 1<<12, // TOK bit of Descriptor 0
TSAD_TUN3 = 1<<11, // TUN bit of Descriptor 3
TSAD_TUN2 = 1<<10, // TUN bit of Descriptor 2
TSAD_TUN1 = 1<<9, // TUN bit of Descriptor 1
TSAD_TUN0 = 1<<8, // TUN bit of Descriptor 0
TSAD_TABT3 = 1<<07, // TABT bit of Descriptor 3
TSAD_TABT2 = 1<<06, // TABT bit of Descriptor 2
TSAD_TABT1 = 1<<05, // TABT bit of Descriptor 1
TSAD_TABT0 = 1<<04, // TABT bit of Descriptor 0
TSAD_OWN3 = 1<<03, // OWN bit of Descriptor 3
TSAD_OWN2 = 1<<02, // OWN bit of Descriptor 2
TSAD_OWN1 = 1<<01, // OWN bit of Descriptor 1
TSAD_OWN0 = 1<<00, // OWN bit of Descriptor 0
};
/* Bits in Config1 */
enum Config1Bits {
Cfg1_PM_Enable = 0x01,
Cfg1_VPD_Enable = 0x02,
Cfg1_PIO = 0x04,
Cfg1_MMIO = 0x08,
LWAKE = 0x10, /* not on 8139, 8139A */
Cfg1_Driver_Load = 0x20,
Cfg1_LED0 = 0x40,
Cfg1_LED1 = 0x80,
SLEEP = (1 << 1), /* only on 8139, 8139A */
PWRDN = (1 << 0), /* only on 8139, 8139A */
};
/* Bits in Config3 */
enum Config3Bits {
Cfg3_FBtBEn = (1 << 0), /* 1 = Fast Back to Back */
Cfg3_FuncRegEn = (1 << 1), /* 1 = enable CardBus Function registers */
Cfg3_CLKRUN_En = (1 << 2), /* 1 = enable CLKRUN */
Cfg3_CardB_En = (1 << 3), /* 1 = enable CardBus registers */
Cfg3_LinkUp = (1 << 4), /* 1 = wake up on link up */
Cfg3_Magic = (1 << 5), /* 1 = wake up on Magic Packet (tm) */
Cfg3_PARM_En = (1 << 6), /* 0 = software can set twister parameters */
Cfg3_GNTSel = (1 << 7), /* 1 = delay 1 clock from PCI GNT signal */
};
/* Bits in Config4 */
enum Config4Bits {
LWPTN = (1 << 2), /* not on 8139, 8139A */
};
/* Bits in Config5 */
enum Config5Bits {
Cfg5_PME_STS = (1 << 0), /* 1 = PCI reset resets PME_Status */
Cfg5_LANWake = (1 << 1), /* 1 = enable LANWake signal */
Cfg5_LDPS = (1 << 2), /* 0 = save power when link is down */
Cfg5_FIFOAddrPtr = (1 << 3), /* Realtek internal SRAM testing */
Cfg5_UWF = (1 << 4), /* 1 = accept unicast wakeup frame */
Cfg5_MWF = (1 << 5), /* 1 = accept multicast wakeup frame */
Cfg5_BWF = (1 << 6), /* 1 = accept broadcast wakeup frame */
};
enum RxConfigBits {
/* rx fifo threshold */
RxCfgFIFOShift = 13,
RxCfgFIFONone = (7 << RxCfgFIFOShift),
/* Max DMA burst */
RxCfgDMAShift = 8,
RxCfgDMAUnlimited = (7 << RxCfgDMAShift),
/* rx ring buffer length */
RxCfgRcv8K = 0,
RxCfgRcv16K = (1 << 11),
RxCfgRcv32K = (1 << 12),
RxCfgRcv64K = (1 << 11) | (1 << 12),
/* Disable packet wrap at end of Rx buffer. (not possible with 64k) */
RxNoWrap = (1 << 7),
};
/* Twister tuning parameters from RealTek.
Completely undocumented, but required to tune bad links on some boards. */
/*
enum CSCRBits {
CSCR_LinkOKBit = 0x0400,
CSCR_LinkChangeBit = 0x0800,
CSCR_LinkStatusBits = 0x0f000,
CSCR_LinkDownOffCmd = 0x003c0,
CSCR_LinkDownCmd = 0x0f3c0,
*/
enum CSCRBits {
CSCR_Testfun = 1<<15, /* 1 = Auto-neg speeds up internal timer, WO, def 0 */
CSCR_LD = 1<<9, /* Active low TPI link disable signal. When low, TPI still transmits link pulses and TPI stays in good link state. def 1*/
CSCR_HEART_BIT = 1<<8, /* 1 = HEART BEAT enable, 0 = HEART BEAT disable. HEART BEAT function is only valid in 10Mbps mode. def 1*/
CSCR_JBEN = 1<<7, /* 1 = enable jabber function. 0 = disable jabber function, def 1*/
CSCR_F_LINK_100 = 1<<6, /* Used to login force good link in 100Mbps for diagnostic purposes. 1 = DISABLE, 0 = ENABLE. def 1*/
CSCR_F_Connect = 1<<5, /* Assertion of this bit forces the disconnect function to be bypassed. def 0*/
CSCR_Con_status = 1<<3, /* This bit indicates the status of the connection. 1 = valid connected link detected; 0 = disconnected link detected. RO def 0*/
CSCR_Con_status_En = 1<<2, /* Assertion of this bit configures LED1 pin to indicate connection status. def 0*/
CSCR_PASS_SCR = 1<<0, /* Bypass Scramble, def 0*/
};
enum Cfg9346Bits {
Cfg9346_Normal = 0x00,
Cfg9346_Autoload = 0x40,
Cfg9346_Programming = 0x80,
Cfg9346_ConfigWrite = 0xC0,
};
typedef enum {
CH_8139 = 0,
CH_8139_K,
CH_8139A,
CH_8139A_G,
CH_8139B,
CH_8130,
CH_8139C,
CH_8100,
CH_8100B_8139D,
CH_8101,
} chip_t;
enum chip_flags {
HasHltClk = (1 << 0),
HasLWake = (1 << 1),
};
#define HW_REVID(b30, b29, b28, b27, b26, b23, b22) \
(b30<<30 | b29<<29 | b28<<28 | b27<<27 | b26<<26 | b23<<23 | b22<<22)
#define HW_REVID_MASK HW_REVID(1, 1, 1, 1, 1, 1, 1)
#define RTL8139_PCI_REVID_8139 0x10
#define RTL8139_PCI_REVID_8139CPLUS 0x20
#define RTL8139_PCI_REVID RTL8139_PCI_REVID_8139CPLUS
/* Size is 64 * 16bit words */
#define EEPROM_9346_ADDR_BITS 6
#define EEPROM_9346_SIZE (1 << EEPROM_9346_ADDR_BITS)
#define EEPROM_9346_ADDR_MASK (EEPROM_9346_SIZE - 1)
enum Chip9346Operation
{
Chip9346_op_mask = 0xc0, /* 10 zzzzzz */
Chip9346_op_read = 0x80, /* 10 AAAAAA */
Chip9346_op_write = 0x40, /* 01 AAAAAA D(15)..D(0) */
Chip9346_op_ext_mask = 0xf0, /* 11 zzzzzz */
Chip9346_op_write_enable = 0x30, /* 00 11zzzz */
Chip9346_op_write_all = 0x10, /* 00 01zzzz */
Chip9346_op_write_disable = 0x00, /* 00 00zzzz */
};
enum Chip9346Mode
{
Chip9346_none = 0,
Chip9346_enter_command_mode,
Chip9346_read_command,
Chip9346_data_read, /* from output register */
Chip9346_data_write, /* to input register, then to contents at specified address */
Chip9346_data_write_all, /* to input register, then filling contents */
};
typedef struct EEprom9346
{
uint16_t contents[EEPROM_9346_SIZE];
int mode;
uint32_t tick;
uint8_t address;
uint16_t input;
uint16_t output;
uint8_t eecs;
uint8_t eesk;
uint8_t eedi;
uint8_t eedo;
} EEprom9346;
typedef struct RTL8139TallyCounters
{
/* Tally counters */
uint64_t TxOk;
uint64_t RxOk;
uint64_t TxERR;
uint32_t RxERR;
uint16_t MissPkt;
uint16_t FAE;
uint32_t Tx1Col;
uint32_t TxMCol;
uint64_t RxOkPhy;
uint64_t RxOkBrd;
uint32_t RxOkMul;
uint16_t TxAbt;
uint16_t TxUndrn;
} RTL8139TallyCounters;
/* Clears all tally counters */
static void RTL8139TallyCounters_clear(RTL8139TallyCounters* counters);
typedef struct RTL8139State {
/*< private >*/
PCIDevice parent_obj;
/*< public >*/
uint8_t phys[8]; /* mac address */
uint8_t mult[8]; /* multicast mask array */
uint32_t TxStatus[4]; /* TxStatus0 in C mode*/ /* also DTCCR[0] and DTCCR[1] in C+ mode */
uint32_t TxAddr[4]; /* TxAddr0 */
uint32_t RxBuf; /* Receive buffer */
uint32_t RxBufferSize;/* internal variable, receive ring buffer size in C mode */
uint32_t RxBufPtr;
uint32_t RxBufAddr;
uint16_t IntrStatus;
uint16_t IntrMask;
uint32_t TxConfig;
uint32_t RxConfig;
uint32_t RxMissed;
uint16_t CSCR;
uint8_t Cfg9346;
uint8_t Config0;
uint8_t Config1;
uint8_t Config3;
uint8_t Config4;
uint8_t Config5;
uint8_t clock_enabled;
uint8_t bChipCmdState;
uint16_t MultiIntr;
uint16_t BasicModeCtrl;
uint16_t BasicModeStatus;
uint16_t NWayAdvert;
uint16_t NWayLPAR;
uint16_t NWayExpansion;
uint16_t CpCmd;
uint8_t TxThresh;
NICState *nic;
NICConf conf;
/* C ring mode */
uint32_t currTxDesc;
/* C+ mode */
uint32_t cplus_enabled;
uint32_t currCPlusRxDesc;
uint32_t currCPlusTxDesc;
uint32_t RxRingAddrLO;
uint32_t RxRingAddrHI;
EEprom9346 eeprom;
uint32_t TCTR;
uint32_t TimerInt;
int64_t TCTR_base;
/* Tally counters */
RTL8139TallyCounters tally_counters;
/* Non-persistent data */
uint8_t *cplus_txbuffer;
int cplus_txbuffer_len;
int cplus_txbuffer_offset;
/* PCI interrupt timer */
QEMUTimer *timer;
MemoryRegion bar_io;
MemoryRegion bar_mem;
/* Support migration to/from old versions */
int rtl8139_mmio_io_addr_dummy;
} RTL8139State;
/* Writes tally counters to memory via DMA */
static void RTL8139TallyCounters_dma_write(RTL8139State *s, dma_addr_t tc_addr);
static void rtl8139_set_next_tctr_time(RTL8139State *s);
static void prom9346_decode_command(EEprom9346 *eeprom, uint8_t command)
{
DPRINTF("eeprom command 0x%02x\n", command);
switch (command & Chip9346_op_mask)
{
case Chip9346_op_read:
{
eeprom->address = command & EEPROM_9346_ADDR_MASK;
eeprom->output = eeprom->contents[eeprom->address];
eeprom->eedo = 0;
eeprom->tick = 0;
eeprom->mode = Chip9346_data_read;
DPRINTF("eeprom read from address 0x%02x data=0x%04x\n",
eeprom->address, eeprom->output);
}
break;
case Chip9346_op_write:
{
eeprom->address = command & EEPROM_9346_ADDR_MASK;
eeprom->input = 0;
eeprom->tick = 0;
eeprom->mode = Chip9346_none; /* Chip9346_data_write */
DPRINTF("eeprom begin write to address 0x%02x\n",
eeprom->address);
}
break;
default:
eeprom->mode = Chip9346_none;
switch (command & Chip9346_op_ext_mask)
{
case Chip9346_op_write_enable:
DPRINTF("eeprom write enabled\n");
break;
case Chip9346_op_write_all:
DPRINTF("eeprom begin write all\n");
break;
case Chip9346_op_write_disable:
DPRINTF("eeprom write disabled\n");
break;
}
break;
}
}
static void prom9346_shift_clock(EEprom9346 *eeprom)
{
int bit = eeprom->eedi?1:0;
++ eeprom->tick;
DPRINTF("eeprom: tick %d eedi=%d eedo=%d\n", eeprom->tick, eeprom->eedi,
eeprom->eedo);
switch (eeprom->mode)
{
case Chip9346_enter_command_mode:
if (bit)
{
eeprom->mode = Chip9346_read_command;
eeprom->tick = 0;
eeprom->input = 0;
DPRINTF("eeprom: +++ synchronized, begin command read\n");
}
break;
case Chip9346_read_command:
eeprom->input = (eeprom->input << 1) | (bit & 1);
if (eeprom->tick == 8)
{
prom9346_decode_command(eeprom, eeprom->input & 0xff);
}
break;
case Chip9346_data_read:
eeprom->eedo = (eeprom->output & 0x8000)?1:0;
eeprom->output <<= 1;
if (eeprom->tick == 16)
{
#if 1
// the FreeBSD drivers (rl and re) don't explicitly toggle
// CS between reads (or does setting Cfg9346 to 0 count too?),
// so we need to enter wait-for-command state here
eeprom->mode = Chip9346_enter_command_mode;
eeprom->input = 0;
eeprom->tick = 0;
DPRINTF("eeprom: +++ end of read, awaiting next command\n");
#else
// original behaviour
++eeprom->address;
eeprom->address &= EEPROM_9346_ADDR_MASK;
eeprom->output = eeprom->contents[eeprom->address];
eeprom->tick = 0;
DPRINTF("eeprom: +++ read next address 0x%02x data=0x%04x\n",
eeprom->address, eeprom->output);
#endif
}
break;
case Chip9346_data_write:
eeprom->input = (eeprom->input << 1) | (bit & 1);
if (eeprom->tick == 16)
{
DPRINTF("eeprom write to address 0x%02x data=0x%04x\n",
eeprom->address, eeprom->input);
eeprom->contents[eeprom->address] = eeprom->input;
eeprom->mode = Chip9346_none; /* waiting for next command after CS cycle */
eeprom->tick = 0;
eeprom->input = 0;
}
break;
case Chip9346_data_write_all:
eeprom->input = (eeprom->input << 1) | (bit & 1);
if (eeprom->tick == 16)
{
int i;
for (i = 0; i < EEPROM_9346_SIZE; i++)
{
eeprom->contents[i] = eeprom->input;
}
DPRINTF("eeprom filled with data=0x%04x\n", eeprom->input);
eeprom->mode = Chip9346_enter_command_mode;
eeprom->tick = 0;
eeprom->input = 0;
}
break;
default:
break;
}
}
static int prom9346_get_wire(RTL8139State *s)
{
EEprom9346 *eeprom = &s->eeprom;
if (!eeprom->eecs)
return 0;
return eeprom->eedo;
}
/* FIXME: This should be merged into/replaced by eeprom93xx.c. */
static void prom9346_set_wire(RTL8139State *s, int eecs, int eesk, int eedi)
{
EEprom9346 *eeprom = &s->eeprom;
uint8_t old_eecs = eeprom->eecs;
uint8_t old_eesk = eeprom->eesk;
eeprom->eecs = eecs;
eeprom->eesk = eesk;
eeprom->eedi = eedi;
DPRINTF("eeprom: +++ wires CS=%d SK=%d DI=%d DO=%d\n", eeprom->eecs,
eeprom->eesk, eeprom->eedi, eeprom->eedo);
if (!old_eecs && eecs)
{
/* Synchronize start */
eeprom->tick = 0;
eeprom->input = 0;
eeprom->output = 0;
eeprom->mode = Chip9346_enter_command_mode;
DPRINTF("=== eeprom: begin access, enter command mode\n");
}
if (!eecs)
{
DPRINTF("=== eeprom: end access\n");
return;
}
if (!old_eesk && eesk)
{
/* SK front rules */
prom9346_shift_clock(eeprom);
}
}
static void rtl8139_update_irq(RTL8139State *s)
{
PCIDevice *d = PCI_DEVICE(s);
int isr;
isr = (s->IntrStatus & s->IntrMask) & 0xffff;
DPRINTF("Set IRQ to %d (%04x %04x)\n", isr ? 1 : 0, s->IntrStatus,
s->IntrMask);
pci_set_irq(d, (isr != 0));
}
static int rtl8139_RxWrap(RTL8139State *s)
{
/* wrapping enabled; assume 1.5k more buffer space if size < 65536 */
return (s->RxConfig & (1 << 7));
}
static int rtl8139_receiver_enabled(RTL8139State *s)
{
return s->bChipCmdState & CmdRxEnb;
}
static int rtl8139_transmitter_enabled(RTL8139State *s)
{
return s->bChipCmdState & CmdTxEnb;
}
static int rtl8139_cp_receiver_enabled(RTL8139State *s)
{
return s->CpCmd & CPlusRxEnb;
}
static int rtl8139_cp_transmitter_enabled(RTL8139State *s)
{
return s->CpCmd & CPlusTxEnb;
}
static void rtl8139_write_buffer(RTL8139State *s, const void *buf, int size)
{
PCIDevice *d = PCI_DEVICE(s);
if (s->RxBufAddr + size > s->RxBufferSize)
{
int wrapped = MOD2(s->RxBufAddr + size, s->RxBufferSize);
/* write packet data */
if (wrapped && !(s->RxBufferSize < 65536 && rtl8139_RxWrap(s)))
{
DPRINTF(">>> rx packet wrapped in buffer at %d\n", size - wrapped);
if (size > wrapped)
{
pci_dma_write(d, s->RxBuf + s->RxBufAddr,
buf, size-wrapped);
}
/* reset buffer pointer */
s->RxBufAddr = 0;
pci_dma_write(d, s->RxBuf + s->RxBufAddr,
buf + (size-wrapped), wrapped);
s->RxBufAddr = wrapped;
return;
}
}
/* non-wrapping path or overwrapping enabled */
pci_dma_write(d, s->RxBuf + s->RxBufAddr, buf, size);
s->RxBufAddr += size;
}
#define MIN_BUF_SIZE 60
static inline dma_addr_t rtl8139_addr64(uint32_t low, uint32_t high)
{
return low | ((uint64_t)high << 32);
}
/* Workaround for buggy guest driver such as linux who allocates rx
* rings after the receiver were enabled. */
static bool rtl8139_cp_rx_valid(RTL8139State *s)
{
return !(s->RxRingAddrLO == 0 && s->RxRingAddrHI == 0);
}
static int rtl8139_can_receive(NetClientState *nc)
{
RTL8139State *s = qemu_get_nic_opaque(nc);
int avail;
/* Receive (drop) packets if card is disabled. */
if (!s->clock_enabled)
return 1;
if (!rtl8139_receiver_enabled(s))
return 1;
if (rtl8139_cp_receiver_enabled(s) && rtl8139_cp_rx_valid(s)) {
/* ??? Flow control not implemented in c+ mode.
This is a hack to work around slirp deficiencies anyway. */
return 1;
} else {
avail = MOD2(s->RxBufferSize + s->RxBufPtr - s->RxBufAddr,
s->RxBufferSize);
return (avail == 0 || avail >= 1514 || (s->IntrMask & RxOverflow));
}
}
static ssize_t rtl8139_do_receive(NetClientState *nc, const uint8_t *buf, size_t size_, int do_interrupt)
{
RTL8139State *s = qemu_get_nic_opaque(nc);
PCIDevice *d = PCI_DEVICE(s);
/* size is the length of the buffer passed to the driver */
int size = size_;
const uint8_t *dot1q_buf = NULL;
uint32_t packet_header = 0;
uint8_t buf1[MIN_BUF_SIZE + VLAN_HLEN];
static const uint8_t broadcast_macaddr[6] =
{ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
DPRINTF(">>> received len=%d\n", size);
/* test if board clock is stopped */
if (!s->clock_enabled)
{
DPRINTF("stopped ==========================\n");
return -1;
}
/* first check if receiver is enabled */
if (!rtl8139_receiver_enabled(s))
{
DPRINTF("receiver disabled ================\n");
return -1;
}
/* XXX: check this */
if (s->RxConfig & AcceptAllPhys) {
/* promiscuous: receive all */
DPRINTF(">>> packet received in promiscuous mode\n");
} else {
if (!memcmp(buf, broadcast_macaddr, 6)) {
/* broadcast address */
if (!(s->RxConfig & AcceptBroadcast))
{
DPRINTF(">>> broadcast packet rejected\n");
/* update tally counter */
++s->tally_counters.RxERR;
return size;
}
packet_header |= RxBroadcast;
DPRINTF(">>> broadcast packet received\n");
/* update tally counter */
++s->tally_counters.RxOkBrd;
} else if (buf[0] & 0x01) {
/* multicast */
if (!(s->RxConfig & AcceptMulticast))
{
DPRINTF(">>> multicast packet rejected\n");
/* update tally counter */
++s->tally_counters.RxERR;
return size;
}
int mcast_idx = compute_mcast_idx(buf);
if (!(s->mult[mcast_idx >> 3] & (1 << (mcast_idx & 7))))
{
DPRINTF(">>> multicast address mismatch\n");
/* update tally counter */
++s->tally_counters.RxERR;
return size;
}
packet_header |= RxMulticast;
DPRINTF(">>> multicast packet received\n");
/* update tally counter */
++s->tally_counters.RxOkMul;
} else if (s->phys[0] == buf[0] &&
s->phys[1] == buf[1] &&
s->phys[2] == buf[2] &&
s->phys[3] == buf[3] &&
s->phys[4] == buf[4] &&
s->phys[5] == buf[5]) {
/* match */
if (!(s->RxConfig & AcceptMyPhys))
{
DPRINTF(">>> rejecting physical address matching packet\n");
/* update tally counter */
++s->tally_counters.RxERR;
return size;
}
packet_header |= RxPhysical;
DPRINTF(">>> physical address matching packet received\n");
/* update tally counter */
++s->tally_counters.RxOkPhy;
} else {
DPRINTF(">>> unknown packet\n");
/* update tally counter */
++s->tally_counters.RxERR;
return size;
}
}
/* if too small buffer, then expand it
* Include some tailroom in case a vlan tag is later removed. */
if (size < MIN_BUF_SIZE + VLAN_HLEN) {
memcpy(buf1, buf, size);
memset(buf1 + size, 0, MIN_BUF_SIZE + VLAN_HLEN - size);
buf = buf1;
if (size < MIN_BUF_SIZE) {
size = MIN_BUF_SIZE;
}
}
if (rtl8139_cp_receiver_enabled(s))
{
if (!rtl8139_cp_rx_valid(s)) {
return size;
}
DPRINTF("in C+ Rx mode ================\n");
/* begin C+ receiver mode */
/* w0 ownership flag */
#define CP_RX_OWN (1<<31)
/* w0 end of ring flag */
#define CP_RX_EOR (1<<30)
/* w0 bits 0...12 : buffer size */
#define CP_RX_BUFFER_SIZE_MASK ((1<<13) - 1)
/* w1 tag available flag */
#define CP_RX_TAVA (1<<16)
/* w1 bits 0...15 : VLAN tag */
#define CP_RX_VLAN_TAG_MASK ((1<<16) - 1)
/* w2 low 32bit of Rx buffer ptr */
/* w3 high 32bit of Rx buffer ptr */
int descriptor = s->currCPlusRxDesc;
dma_addr_t cplus_rx_ring_desc;
cplus_rx_ring_desc = rtl8139_addr64(s->RxRingAddrLO, s->RxRingAddrHI);
cplus_rx_ring_desc += 16 * descriptor;
DPRINTF("+++ C+ mode reading RX descriptor %d from host memory at "
"%08x %08x = "DMA_ADDR_FMT"\n", descriptor, s->RxRingAddrHI,
s->RxRingAddrLO, cplus_rx_ring_desc);
uint32_t val, rxdw0,rxdw1,rxbufLO,rxbufHI;
pci_dma_read(d, cplus_rx_ring_desc, &val, 4);
rxdw0 = le32_to_cpu(val);
pci_dma_read(d, cplus_rx_ring_desc+4, &val, 4);
rxdw1 = le32_to_cpu(val);
pci_dma_read(d, cplus_rx_ring_desc+8, &val, 4);
rxbufLO = le32_to_cpu(val);
pci_dma_read(d, cplus_rx_ring_desc+12, &val, 4);
rxbufHI = le32_to_cpu(val);
DPRINTF("+++ C+ mode RX descriptor %d %08x %08x %08x %08x\n",
descriptor, rxdw0, rxdw1, rxbufLO, rxbufHI);
if (!(rxdw0 & CP_RX_OWN))
{
DPRINTF("C+ Rx mode : descriptor %d is owned by host\n",
descriptor);
s->IntrStatus |= RxOverflow;
++s->RxMissed;
/* update tally counter */
++s->tally_counters.RxERR;
++s->tally_counters.MissPkt;
rtl8139_update_irq(s);
return size_;
}
uint32_t rx_space = rxdw0 & CP_RX_BUFFER_SIZE_MASK;
/* write VLAN info to descriptor variables. */
if (s->CpCmd & CPlusRxVLAN && be16_to_cpup((uint16_t *)
&buf[ETHER_ADDR_LEN * 2]) == ETH_P_8021Q) {
dot1q_buf = &buf[ETHER_ADDR_LEN * 2];
size -= VLAN_HLEN;
/* if too small buffer, use the tailroom added duing expansion */
if (size < MIN_BUF_SIZE) {
size = MIN_BUF_SIZE;
}
rxdw1 &= ~CP_RX_VLAN_TAG_MASK;
/* BE + ~le_to_cpu()~ + cpu_to_le() = BE */
rxdw1 |= CP_RX_TAVA | le16_to_cpup((uint16_t *)
&dot1q_buf[ETHER_TYPE_LEN]);
DPRINTF("C+ Rx mode : extracted vlan tag with tci: ""%u\n",
be16_to_cpup((uint16_t *)&dot1q_buf[ETHER_TYPE_LEN]));
} else {
/* reset VLAN tag flag */
rxdw1 &= ~CP_RX_TAVA;
}
/* TODO: scatter the packet over available receive ring descriptors space */
if (size+4 > rx_space)
{
DPRINTF("C+ Rx mode : descriptor %d size %d received %d + 4\n",
descriptor, rx_space, size);
s->IntrStatus |= RxOverflow;
++s->RxMissed;
/* update tally counter */
++s->tally_counters.RxERR;
++s->tally_counters.MissPkt;
rtl8139_update_irq(s);
return size_;
}
dma_addr_t rx_addr = rtl8139_addr64(rxbufLO, rxbufHI);
/* receive/copy to target memory */
if (dot1q_buf) {
pci_dma_write(d, rx_addr, buf, 2 * ETHER_ADDR_LEN);
pci_dma_write(d, rx_addr + 2 * ETHER_ADDR_LEN,
buf + 2 * ETHER_ADDR_LEN + VLAN_HLEN,
size - 2 * ETHER_ADDR_LEN);
} else {
pci_dma_write(d, rx_addr, buf, size);
}
if (s->CpCmd & CPlusRxChkSum)
{
/* do some packet checksumming */
}
/* write checksum */
val = cpu_to_le32(crc32(0, buf, size_));
pci_dma_write(d, rx_addr+size, (uint8_t *)&val, 4);
/* first segment of received packet flag */
#define CP_RX_STATUS_FS (1<<29)
/* last segment of received packet flag */
#define CP_RX_STATUS_LS (1<<28)
/* multicast packet flag */
#define CP_RX_STATUS_MAR (1<<26)
/* physical-matching packet flag */
#define CP_RX_STATUS_PAM (1<<25)
/* broadcast packet flag */
#define CP_RX_STATUS_BAR (1<<24)
/* runt packet flag */
#define CP_RX_STATUS_RUNT (1<<19)
/* crc error flag */
#define CP_RX_STATUS_CRC (1<<18)
/* IP checksum error flag */
#define CP_RX_STATUS_IPF (1<<15)
/* UDP checksum error flag */
#define CP_RX_STATUS_UDPF (1<<14)
/* TCP checksum error flag */
#define CP_RX_STATUS_TCPF (1<<13)
/* transfer ownership to target */
rxdw0 &= ~CP_RX_OWN;
/* set first segment bit */
rxdw0 |= CP_RX_STATUS_FS;
/* set last segment bit */
rxdw0 |= CP_RX_STATUS_LS;
/* set received packet type flags */
if (packet_header & RxBroadcast)
rxdw0 |= CP_RX_STATUS_BAR;
if (packet_header & RxMulticast)
rxdw0 |= CP_RX_STATUS_MAR;
if (packet_header & RxPhysical)
rxdw0 |= CP_RX_STATUS_PAM;
/* set received size */
rxdw0 &= ~CP_RX_BUFFER_SIZE_MASK;
rxdw0 |= (size+4);
/* update ring data */
val = cpu_to_le32(rxdw0);
pci_dma_write(d, cplus_rx_ring_desc, (uint8_t *)&val, 4);
val = cpu_to_le32(rxdw1);
pci_dma_write(d, cplus_rx_ring_desc+4, (uint8_t *)&val, 4);
/* update tally counter */
++s->tally_counters.RxOk;
/* seek to next Rx descriptor */
if (rxdw0 & CP_RX_EOR)
{
s->currCPlusRxDesc = 0;
}
else
{
++s->currCPlusRxDesc;
}
DPRINTF("done C+ Rx mode ----------------\n");
}
else
{
DPRINTF("in ring Rx mode ================\n");
/* begin ring receiver mode */
int avail = MOD2(s->RxBufferSize + s->RxBufPtr - s->RxBufAddr, s->RxBufferSize);
/* if receiver buffer is empty then avail == 0 */
if (avail != 0 && size + 8 >= avail)
{
DPRINTF("rx overflow: rx buffer length %d head 0x%04x "
"read 0x%04x === available 0x%04x need 0x%04x\n",
s->RxBufferSize, s->RxBufAddr, s->RxBufPtr, avail, size + 8);
s->IntrStatus |= RxOverflow;
++s->RxMissed;
rtl8139_update_irq(s);
return size_;
}
packet_header |= RxStatusOK;
packet_header |= (((size+4) << 16) & 0xffff0000);
/* write header */
uint32_t val = cpu_to_le32(packet_header);
rtl8139_write_buffer(s, (uint8_t *)&val, 4);
rtl8139_write_buffer(s, buf, size);
/* write checksum */
val = cpu_to_le32(crc32(0, buf, size));
rtl8139_write_buffer(s, (uint8_t *)&val, 4);
/* correct buffer write pointer */
s->RxBufAddr = MOD2((s->RxBufAddr + 3) & ~0x3, s->RxBufferSize);
/* now we can signal we have received something */
DPRINTF("received: rx buffer length %d head 0x%04x read 0x%04x\n",
s->RxBufferSize, s->RxBufAddr, s->RxBufPtr);
}
s->IntrStatus |= RxOK;
if (do_interrupt)
{
rtl8139_update_irq(s);
}
return size_;
}
static ssize_t rtl8139_receive(NetClientState *nc, const uint8_t *buf, size_t size)
{
return rtl8139_do_receive(nc, buf, size, 1);
}
static void rtl8139_reset_rxring(RTL8139State *s, uint32_t bufferSize)
{
s->RxBufferSize = bufferSize;
s->RxBufPtr = 0;
s->RxBufAddr = 0;
}
static void rtl8139_reset(DeviceState *d)
{
RTL8139State *s = RTL8139(d);
int i;
/* restore MAC address */
memcpy(s->phys, s->conf.macaddr.a, 6);
qemu_format_nic_info_str(qemu_get_queue(s->nic), s->phys);
/* reset interrupt mask */
s->IntrStatus = 0;
s->IntrMask = 0;
rtl8139_update_irq(s);
/* mark all status registers as owned by host */
for (i = 0; i < 4; ++i)
{
s->TxStatus[i] = TxHostOwns;
}
s->currTxDesc = 0;
s->currCPlusRxDesc = 0;
s->currCPlusTxDesc = 0;
s->RxRingAddrLO = 0;
s->RxRingAddrHI = 0;
s->RxBuf = 0;
rtl8139_reset_rxring(s, 8192);
/* ACK the reset */
s->TxConfig = 0;
#if 0
// s->TxConfig |= HW_REVID(1, 0, 0, 0, 0, 0, 0); // RTL-8139 HasHltClk
s->clock_enabled = 0;
#else
s->TxConfig |= HW_REVID(1, 1, 1, 0, 1, 1, 0); // RTL-8139C+ HasLWake
s->clock_enabled = 1;
#endif
s->bChipCmdState = CmdReset; /* RxBufEmpty bit is calculated on read from ChipCmd */;
/* set initial state data */
s->Config0 = 0x0; /* No boot ROM */
s->Config1 = 0xC; /* IO mapped and MEM mapped registers available */
s->Config3 = 0x1; /* fast back-to-back compatible */
s->Config5 = 0x0;
s->CSCR = CSCR_F_LINK_100 | CSCR_HEART_BIT | CSCR_LD;
s->CpCmd = 0x0; /* reset C+ mode */
s->cplus_enabled = 0;
// s->BasicModeCtrl = 0x3100; // 100Mbps, full duplex, autonegotiation
// s->BasicModeCtrl = 0x2100; // 100Mbps, full duplex
s->BasicModeCtrl = 0x1000; // autonegotiation
s->BasicModeStatus = 0x7809;
//s->BasicModeStatus |= 0x0040; /* UTP medium */
s->BasicModeStatus |= 0x0020; /* autonegotiation completed */
/* preserve link state */
s->BasicModeStatus |= qemu_get_queue(s->nic)->link_down ? 0 : 0x04;
s->NWayAdvert = 0x05e1; /* all modes, full duplex */
s->NWayLPAR = 0x05e1; /* all modes, full duplex */
s->NWayExpansion = 0x0001; /* autonegotiation supported */
/* also reset timer and disable timer interrupt */
s->TCTR = 0;
s->TimerInt = 0;
s->TCTR_base = 0;
rtl8139_set_next_tctr_time(s);
/* reset tally counters */
RTL8139TallyCounters_clear(&s->tally_counters);
}
static void RTL8139TallyCounters_clear(RTL8139TallyCounters* counters)
{
counters->TxOk = 0;
counters->RxOk = 0;
counters->TxERR = 0;
counters->RxERR = 0;
counters->MissPkt = 0;
counters->FAE = 0;
counters->Tx1Col = 0;
counters->TxMCol = 0;
counters->RxOkPhy = 0;
counters->RxOkBrd = 0;
counters->RxOkMul = 0;
counters->TxAbt = 0;
counters->TxUndrn = 0;
}
static void RTL8139TallyCounters_dma_write(RTL8139State *s, dma_addr_t tc_addr)
{
PCIDevice *d = PCI_DEVICE(s);
RTL8139TallyCounters *tally_counters = &s->tally_counters;
uint16_t val16;
uint32_t val32;
uint64_t val64;
val64 = cpu_to_le64(tally_counters->TxOk);
pci_dma_write(d, tc_addr + 0, (uint8_t *)&val64, 8);
val64 = cpu_to_le64(tally_counters->RxOk);
pci_dma_write(d, tc_addr + 8, (uint8_t *)&val64, 8);
val64 = cpu_to_le64(tally_counters->TxERR);
pci_dma_write(d, tc_addr + 16, (uint8_t *)&val64, 8);
val32 = cpu_to_le32(tally_counters->RxERR);
pci_dma_write(d, tc_addr + 24, (uint8_t *)&val32, 4);
val16 = cpu_to_le16(tally_counters->MissPkt);
pci_dma_write(d, tc_addr + 28, (uint8_t *)&val16, 2);
val16 = cpu_to_le16(tally_counters->FAE);
pci_dma_write(d, tc_addr + 30, (uint8_t *)&val16, 2);
val32 = cpu_to_le32(tally_counters->Tx1Col);
pci_dma_write(d, tc_addr + 32, (uint8_t *)&val32, 4);
val32 = cpu_to_le32(tally_counters->TxMCol);
pci_dma_write(d, tc_addr + 36, (uint8_t *)&val32, 4);
val64 = cpu_to_le64(tally_counters->RxOkPhy);
pci_dma_write(d, tc_addr + 40, (uint8_t *)&val64, 8);
val64 = cpu_to_le64(tally_counters->RxOkBrd);
pci_dma_write(d, tc_addr + 48, (uint8_t *)&val64, 8);
val32 = cpu_to_le32(tally_counters->RxOkMul);
pci_dma_write(d, tc_addr + 56, (uint8_t *)&val32, 4);
val16 = cpu_to_le16(tally_counters->TxAbt);
pci_dma_write(d, tc_addr + 60, (uint8_t *)&val16, 2);
val16 = cpu_to_le16(tally_counters->TxUndrn);
pci_dma_write(d, tc_addr + 62, (uint8_t *)&val16, 2);
}
/* Loads values of tally counters from VM state file */
static const VMStateDescription vmstate_tally_counters = {
.name = "tally_counters",
.version_id = 1,
.minimum_version_id = 1,
.fields = (VMStateField[]) {
VMSTATE_UINT64(TxOk, RTL8139TallyCounters),
VMSTATE_UINT64(RxOk, RTL8139TallyCounters),
VMSTATE_UINT64(TxERR, RTL8139TallyCounters),
VMSTATE_UINT32(RxERR, RTL8139TallyCounters),
VMSTATE_UINT16(MissPkt, RTL8139TallyCounters),
VMSTATE_UINT16(FAE, RTL8139TallyCounters),
VMSTATE_UINT32(Tx1Col, RTL8139TallyCounters),
VMSTATE_UINT32(TxMCol, RTL8139TallyCounters),
VMSTATE_UINT64(RxOkPhy, RTL8139TallyCounters),
VMSTATE_UINT64(RxOkBrd, RTL8139TallyCounters),
VMSTATE_UINT16(TxAbt, RTL8139TallyCounters),
VMSTATE_UINT16(TxUndrn, RTL8139TallyCounters),
VMSTATE_END_OF_LIST()
}
};
static void rtl8139_ChipCmd_write(RTL8139State *s, uint32_t val)
{
DeviceState *d = DEVICE(s);
val &= 0xff;
DPRINTF("ChipCmd write val=0x%08x\n", val);
if (val & CmdReset)
{
DPRINTF("ChipCmd reset\n");
rtl8139_reset(d);
}
if (val & CmdRxEnb)
{
DPRINTF("ChipCmd enable receiver\n");
s->currCPlusRxDesc = 0;
}
if (val & CmdTxEnb)
{
DPRINTF("ChipCmd enable transmitter\n");
s->currCPlusTxDesc = 0;
}
/* mask unwritable bits */
val = SET_MASKED(val, 0xe3, s->bChipCmdState);
/* Deassert reset pin before next read */
val &= ~CmdReset;
s->bChipCmdState = val;
}
static int rtl8139_RxBufferEmpty(RTL8139State *s)
{
int unread = MOD2(s->RxBufferSize + s->RxBufAddr - s->RxBufPtr, s->RxBufferSize);
if (unread != 0)
{
DPRINTF("receiver buffer data available 0x%04x\n", unread);
return 0;
}
DPRINTF("receiver buffer is empty\n");
return 1;
}
static uint32_t rtl8139_ChipCmd_read(RTL8139State *s)
{
uint32_t ret = s->bChipCmdState;
if (rtl8139_RxBufferEmpty(s))
ret |= RxBufEmpty;
DPRINTF("ChipCmd read val=0x%04x\n", ret);
return ret;
}
static void rtl8139_CpCmd_write(RTL8139State *s, uint32_t val)
{
val &= 0xffff;
DPRINTF("C+ command register write(w) val=0x%04x\n", val);
s->cplus_enabled = 1;
/* mask unwritable bits */
val = SET_MASKED(val, 0xff84, s->CpCmd);
s->CpCmd = val;
}
static uint32_t rtl8139_CpCmd_read(RTL8139State *s)
{
uint32_t ret = s->CpCmd;
DPRINTF("C+ command register read(w) val=0x%04x\n", ret);
return ret;
}
static void rtl8139_IntrMitigate_write(RTL8139State *s, uint32_t val)
{
DPRINTF("C+ IntrMitigate register write(w) val=0x%04x\n", val);
}
static uint32_t rtl8139_IntrMitigate_read(RTL8139State *s)
{
uint32_t ret = 0;
DPRINTF("C+ IntrMitigate register read(w) val=0x%04x\n", ret);
return ret;
}
static int rtl8139_config_writable(RTL8139State *s)
{
if ((s->Cfg9346 & Chip9346_op_mask) == Cfg9346_ConfigWrite)
{
return 1;
}
DPRINTF("Configuration registers are write-protected\n");
return 0;
}
static void rtl8139_BasicModeCtrl_write(RTL8139State *s, uint32_t val)
{
val &= 0xffff;
DPRINTF("BasicModeCtrl register write(w) val=0x%04x\n", val);
/* mask unwritable bits */
uint32_t mask = 0x4cff;
if (1 || !rtl8139_config_writable(s))
{
/* Speed setting and autonegotiation enable bits are read-only */
mask |= 0x3000;
/* Duplex mode setting is read-only */
mask |= 0x0100;
}
val = SET_MASKED(val, mask, s->BasicModeCtrl);
s->BasicModeCtrl = val;
}
static uint32_t rtl8139_BasicModeCtrl_read(RTL8139State *s)
{
uint32_t ret = s->BasicModeCtrl;
DPRINTF("BasicModeCtrl register read(w) val=0x%04x\n", ret);
return ret;
}
static void rtl8139_BasicModeStatus_write(RTL8139State *s, uint32_t val)
{
val &= 0xffff;
DPRINTF("BasicModeStatus register write(w) val=0x%04x\n", val);
/* mask unwritable bits */
val = SET_MASKED(val, 0xff3f, s->BasicModeStatus);
s->BasicModeStatus = val;
}
static uint32_t rtl8139_BasicModeStatus_read(RTL8139State *s)
{
uint32_t ret = s->BasicModeStatus;
DPRINTF("BasicModeStatus register read(w) val=0x%04x\n", ret);
return ret;
}
static void rtl8139_Cfg9346_write(RTL8139State *s, uint32_t val)
{
DeviceState *d = DEVICE(s);
val &= 0xff;
DPRINTF("Cfg9346 write val=0x%02x\n", val);
/* mask unwritable bits */
val = SET_MASKED(val, 0x31, s->Cfg9346);
uint32_t opmode = val & 0xc0;
uint32_t eeprom_val = val & 0xf;
if (opmode == 0x80) {
/* eeprom access */
int eecs = (eeprom_val & 0x08)?1:0;
int eesk = (eeprom_val & 0x04)?1:0;
int eedi = (eeprom_val & 0x02)?1:0;
prom9346_set_wire(s, eecs, eesk, eedi);
} else if (opmode == 0x40) {
/* Reset. */
val = 0;
rtl8139_reset(d);
}
s->Cfg9346 = val;
}
static uint32_t rtl8139_Cfg9346_read(RTL8139State *s)
{
uint32_t ret = s->Cfg9346;
uint32_t opmode = ret & 0xc0;
if (opmode == 0x80)
{
/* eeprom access */
int eedo = prom9346_get_wire(s);
if (eedo)
{
ret |= 0x01;
}
else
{
ret &= ~0x01;
}
}
DPRINTF("Cfg9346 read val=0x%02x\n", ret);
return ret;
}
static void rtl8139_Config0_write(RTL8139State *s, uint32_t val)
{
val &= 0xff;
DPRINTF("Config0 write val=0x%02x\n", val);
if (!rtl8139_config_writable(s)) {
return;
}
/* mask unwritable bits */
val = SET_MASKED(val, 0xf8, s->Config0);
s->Config0 = val;
}
static uint32_t rtl8139_Config0_read(RTL8139State *s)
{
uint32_t ret = s->Config0;
DPRINTF("Config0 read val=0x%02x\n", ret);
return ret;
}
static void rtl8139_Config1_write(RTL8139State *s, uint32_t val)
{
val &= 0xff;
DPRINTF("Config1 write val=0x%02x\n", val);
if (!rtl8139_config_writable(s)) {
return;
}
/* mask unwritable bits */
val = SET_MASKED(val, 0xC, s->Config1);
s->Config1 = val;
}
static uint32_t rtl8139_Config1_read(RTL8139State *s)
{
uint32_t ret = s->Config1;
DPRINTF("Config1 read val=0x%02x\n", ret);
return ret;
}
static void rtl8139_Config3_write(RTL8139State *s, uint32_t val)
{
val &= 0xff;
DPRINTF("Config3 write val=0x%02x\n", val);
if (!rtl8139_config_writable(s)) {
return;
}
/* mask unwritable bits */
val = SET_MASKED(val, 0x8F, s->Config3);
s->Config3 = val;
}
static uint32_t rtl8139_Config3_read(RTL8139State *s)
{
uint32_t ret = s->Config3;
DPRINTF("Config3 read val=0x%02x\n", ret);
return ret;
}
static void rtl8139_Config4_write(RTL8139State *s, uint32_t val)
{
val &= 0xff;
DPRINTF("Config4 write val=0x%02x\n", val);
if (!rtl8139_config_writable(s)) {
return;
}
/* mask unwritable bits */
val = SET_MASKED(val, 0x0a, s->Config4);
s->Config4 = val;
}
static uint32_t rtl8139_Config4_read(RTL8139State *s)
{
uint32_t ret = s->Config4;
DPRINTF("Config4 read val=0x%02x\n", ret);
return ret;
}
static void rtl8139_Config5_write(RTL8139State *s, uint32_t val)
{
val &= 0xff;
DPRINTF("Config5 write val=0x%02x\n", val);
/* mask unwritable bits */
val = SET_MASKED(val, 0x80, s->Config5);
s->Config5 = val;
}
static uint32_t rtl8139_Config5_read(RTL8139State *s)
{
uint32_t ret = s->Config5;
DPRINTF("Config5 read val=0x%02x\n", ret);
return ret;
}
static void rtl8139_TxConfig_write(RTL8139State *s, uint32_t val)
{
if (!rtl8139_transmitter_enabled(s))
{
DPRINTF("transmitter disabled; no TxConfig write val=0x%08x\n", val);
return;
}
DPRINTF("TxConfig write val=0x%08x\n", val);
val = SET_MASKED(val, TxVersionMask | 0x8070f80f, s->TxConfig);
s->TxConfig = val;
}
static void rtl8139_TxConfig_writeb(RTL8139State *s, uint32_t val)
{
DPRINTF("RTL8139C TxConfig via write(b) val=0x%02x\n", val);
uint32_t tc = s->TxConfig;
tc &= 0xFFFFFF00;
tc |= (val & 0x000000FF);
rtl8139_TxConfig_write(s, tc);
}
static uint32_t rtl8139_TxConfig_read(RTL8139State *s)
{
uint32_t ret = s->TxConfig;
DPRINTF("TxConfig read val=0x%04x\n", ret);
return ret;
}
static void rtl8139_RxConfig_write(RTL8139State *s, uint32_t val)
{
DPRINTF("RxConfig write val=0x%08x\n", val);
/* mask unwritable bits */
val = SET_MASKED(val, 0xf0fc0040, s->RxConfig);
s->RxConfig = val;
/* reset buffer size and read/write pointers */
rtl8139_reset_rxring(s, 8192 << ((s->RxConfig >> 11) & 0x3));
DPRINTF("RxConfig write reset buffer size to %d\n", s->RxBufferSize);
}
static uint32_t rtl8139_RxConfig_read(RTL8139State *s)
{
uint32_t ret = s->RxConfig;
DPRINTF("RxConfig read val=0x%08x\n", ret);
return ret;
}
static void rtl8139_transfer_frame(RTL8139State *s, uint8_t *buf, int size,
int do_interrupt, const uint8_t *dot1q_buf)
{
struct iovec *iov = NULL;
struct iovec vlan_iov[3];
if (!size)
{
DPRINTF("+++ empty ethernet frame\n");
return;
}
if (dot1q_buf && size >= ETHER_ADDR_LEN * 2) {
iov = (struct iovec[3]) {
{ .iov_base = buf, .iov_len = ETHER_ADDR_LEN * 2 },
{ .iov_base = (void *) dot1q_buf, .iov_len = VLAN_HLEN },
{ .iov_base = buf + ETHER_ADDR_LEN * 2,
.iov_len = size - ETHER_ADDR_LEN * 2 },
};
memcpy(vlan_iov, iov, sizeof(vlan_iov));
iov = vlan_iov;
}
if (TxLoopBack == (s->TxConfig & TxLoopBack))
{
size_t buf2_size;
uint8_t *buf2;
if (iov) {
buf2_size = iov_size(iov, 3);
buf2 = g_malloc(buf2_size);
iov_to_buf(iov, 3, 0, buf2, buf2_size);
buf = buf2;
}
DPRINTF("+++ transmit loopback mode\n");
rtl8139_do_receive(qemu_get_queue(s->nic), buf, size, do_interrupt);
if (iov) {
g_free(buf2);
}
}
else
{
if (iov) {
qemu_sendv_packet(qemu_get_queue(s->nic), iov, 3);
} else {
qemu_send_packet(qemu_get_queue(s->nic), buf, size);
}
}
}
static int rtl8139_transmit_one(RTL8139State *s, int descriptor)
{
if (!rtl8139_transmitter_enabled(s))
{
DPRINTF("+++ cannot transmit from descriptor %d: transmitter "
"disabled\n", descriptor);
return 0;
}
if (s->TxStatus[descriptor] & TxHostOwns)
{
DPRINTF("+++ cannot transmit from descriptor %d: owned by host "
"(%08x)\n", descriptor, s->TxStatus[descriptor]);
return 0;
}
DPRINTF("+++ transmitting from descriptor %d\n", descriptor);
PCIDevice *d = PCI_DEVICE(s);
int txsize = s->TxStatus[descriptor] & 0x1fff;
uint8_t txbuffer[0x2000];
DPRINTF("+++ transmit reading %d bytes from host memory at 0x%08x\n",
txsize, s->TxAddr[descriptor]);
pci_dma_read(d, s->TxAddr[descriptor], txbuffer, txsize);
/* Mark descriptor as transferred */
s->TxStatus[descriptor] |= TxHostOwns;
s->TxStatus[descriptor] |= TxStatOK;
rtl8139_transfer_frame(s, txbuffer, txsize, 0, NULL);
DPRINTF("+++ transmitted %d bytes from descriptor %d\n", txsize,
descriptor);
/* update interrupt */
s->IntrStatus |= TxOK;
rtl8139_update_irq(s);
return 1;
}
/* structures and macros for task offloading */
typedef struct ip_header
{
uint8_t ip_ver_len; /* version and header length */
uint8_t ip_tos; /* type of service */
uint16_t ip_len; /* total length */
uint16_t ip_id; /* identification */
uint16_t ip_off; /* fragment offset field */
uint8_t ip_ttl; /* time to live */
uint8_t ip_p; /* protocol */
uint16_t ip_sum; /* checksum */
uint32_t ip_src,ip_dst; /* source and dest address */
} ip_header;
#define IP_HEADER_VERSION_4 4
#define IP_HEADER_VERSION(ip) ((ip->ip_ver_len >> 4)&0xf)
#define IP_HEADER_LENGTH(ip) (((ip->ip_ver_len)&0xf) << 2)
typedef struct tcp_header
{
uint16_t th_sport; /* source port */
uint16_t th_dport; /* destination port */
uint32_t th_seq; /* sequence number */
uint32_t th_ack; /* acknowledgement number */
uint16_t th_offset_flags; /* data offset, reserved 6 bits, TCP protocol flags */
uint16_t th_win; /* window */
uint16_t th_sum; /* checksum */
uint16_t th_urp; /* urgent pointer */
} tcp_header;
typedef struct udp_header
{
uint16_t uh_sport; /* source port */
uint16_t uh_dport; /* destination port */
uint16_t uh_ulen; /* udp length */
uint16_t uh_sum; /* udp checksum */
} udp_header;
typedef struct ip_pseudo_header
{
uint32_t ip_src;
uint32_t ip_dst;
uint8_t zeros;
uint8_t ip_proto;
uint16_t ip_payload;
} ip_pseudo_header;
#define IP_PROTO_TCP 6
#define IP_PROTO_UDP 17
#define TCP_HEADER_DATA_OFFSET(tcp) (((be16_to_cpu(tcp->th_offset_flags) >> 12)&0xf) << 2)
#define TCP_FLAGS_ONLY(flags) ((flags)&0x3f)
#define TCP_HEADER_FLAGS(tcp) TCP_FLAGS_ONLY(be16_to_cpu(tcp->th_offset_flags))
#define TCP_HEADER_CLEAR_FLAGS(tcp, off) ((tcp)->th_offset_flags &= cpu_to_be16(~TCP_FLAGS_ONLY(off)))
#define TCP_FLAG_FIN 0x01
#define TCP_FLAG_PUSH 0x08
/* produces ones' complement sum of data */
static uint16_t ones_complement_sum(uint8_t *data, size_t len)
{
uint32_t result = 0;
for (; len > 1; data+=2, len-=2)
{
result += *(uint16_t*)data;
}
/* add the remainder byte */
if (len)
{
uint8_t odd[2] = {*data, 0};
result += *(uint16_t*)odd;
}
while (result>>16)
result = (result & 0xffff) + (result >> 16);
return result;
}
static uint16_t ip_checksum(void *data, size_t len)
{
return ~ones_complement_sum((uint8_t*)data, len);
}
static int rtl8139_cplus_transmit_one(RTL8139State *s)
{
if (!rtl8139_transmitter_enabled(s))
{
DPRINTF("+++ C+ mode: transmitter disabled\n");
return 0;
}
if (!rtl8139_cp_transmitter_enabled(s))
{
DPRINTF("+++ C+ mode: C+ transmitter disabled\n");
return 0 ;
}
PCIDevice *d = PCI_DEVICE(s);
int descriptor = s->currCPlusTxDesc;
dma_addr_t cplus_tx_ring_desc = rtl8139_addr64(s->TxAddr[0], s->TxAddr[1]);
/* Normal priority ring */
cplus_tx_ring_desc += 16 * descriptor;
DPRINTF("+++ C+ mode reading TX descriptor %d from host memory at "
"%08x %08x = 0x"DMA_ADDR_FMT"\n", descriptor, s->TxAddr[1],
s->TxAddr[0], cplus_tx_ring_desc);
uint32_t val, txdw0,txdw1,txbufLO,txbufHI;
pci_dma_read(d, cplus_tx_ring_desc, (uint8_t *)&val, 4);
txdw0 = le32_to_cpu(val);
pci_dma_read(d, cplus_tx_ring_desc+4, (uint8_t *)&val, 4);
txdw1 = le32_to_cpu(val);
pci_dma_read(d, cplus_tx_ring_desc+8, (uint8_t *)&val, 4);
txbufLO = le32_to_cpu(val);
pci_dma_read(d, cplus_tx_ring_desc+12, (uint8_t *)&val, 4);
txbufHI = le32_to_cpu(val);
DPRINTF("+++ C+ mode TX descriptor %d %08x %08x %08x %08x\n", descriptor,
txdw0, txdw1, txbufLO, txbufHI);
/* w0 ownership flag */
#define CP_TX_OWN (1<<31)
/* w0 end of ring flag */
#define CP_TX_EOR (1<<30)
/* first segment of received packet flag */
#define CP_TX_FS (1<<29)
/* last segment of received packet flag */
#define CP_TX_LS (1<<28)
/* large send packet flag */
#define CP_TX_LGSEN (1<<27)
/* large send MSS mask, bits 16...25 */
#define CP_TC_LGSEN_MSS_MASK ((1 << 12) - 1)
/* IP checksum offload flag */
#define CP_TX_IPCS (1<<18)
/* UDP checksum offload flag */
#define CP_TX_UDPCS (1<<17)
/* TCP checksum offload flag */
#define CP_TX_TCPCS (1<<16)
/* w0 bits 0...15 : buffer size */
#define CP_TX_BUFFER_SIZE (1<<16)
#define CP_TX_BUFFER_SIZE_MASK (CP_TX_BUFFER_SIZE - 1)
/* w1 add tag flag */
#define CP_TX_TAGC (1<<17)
/* w1 bits 0...15 : VLAN tag (big endian) */
#define CP_TX_VLAN_TAG_MASK ((1<<16) - 1)
/* w2 low 32bit of Rx buffer ptr */
/* w3 high 32bit of Rx buffer ptr */
/* set after transmission */
/* FIFO underrun flag */
#define CP_TX_STATUS_UNF (1<<25)
/* transmit error summary flag, valid if set any of three below */
#define CP_TX_STATUS_TES (1<<23)
/* out-of-window collision flag */
#define CP_TX_STATUS_OWC (1<<22)
/* link failure flag */
#define CP_TX_STATUS_LNKF (1<<21)
/* excessive collisions flag */
#define CP_TX_STATUS_EXC (1<<20)
if (!(txdw0 & CP_TX_OWN))
{
DPRINTF("C+ Tx mode : descriptor %d is owned by host\n", descriptor);
return 0 ;
}
DPRINTF("+++ C+ Tx mode : transmitting from descriptor %d\n", descriptor);
if (txdw0 & CP_TX_FS)
{
DPRINTF("+++ C+ Tx mode : descriptor %d is first segment "
"descriptor\n", descriptor);
/* reset internal buffer offset */
s->cplus_txbuffer_offset = 0;
}
int txsize = txdw0 & CP_TX_BUFFER_SIZE_MASK;
dma_addr_t tx_addr = rtl8139_addr64(txbufLO, txbufHI);
/* make sure we have enough space to assemble the packet */
if (!s->cplus_txbuffer)
{
s->cplus_txbuffer_len = CP_TX_BUFFER_SIZE;
s->cplus_txbuffer = g_malloc(s->cplus_txbuffer_len);
s->cplus_txbuffer_offset = 0;
DPRINTF("+++ C+ mode transmission buffer allocated space %d\n",
s->cplus_txbuffer_len);
}
if (s->cplus_txbuffer_offset + txsize >= s->cplus_txbuffer_len)
{
/* The spec didn't tell the maximum size, stick to CP_TX_BUFFER_SIZE */
txsize = s->cplus_txbuffer_len - s->cplus_txbuffer_offset;
DPRINTF("+++ C+ mode transmission buffer overrun, truncated descriptor"
"length to %d\n", txsize);
}
/* append more data to the packet */
DPRINTF("+++ C+ mode transmit reading %d bytes from host memory at "
DMA_ADDR_FMT" to offset %d\n", txsize, tx_addr,
s->cplus_txbuffer_offset);
pci_dma_read(d, tx_addr,
s->cplus_txbuffer + s->cplus_txbuffer_offset, txsize);
s->cplus_txbuffer_offset += txsize;
/* seek to next Rx descriptor */
if (txdw0 & CP_TX_EOR)
{
s->currCPlusTxDesc = 0;
}
else
{
++s->currCPlusTxDesc;
if (s->currCPlusTxDesc >= 64)
s->currCPlusTxDesc = 0;
}
/* transfer ownership to target */
txdw0 &= ~CP_RX_OWN;
/* reset error indicator bits */
txdw0 &= ~CP_TX_STATUS_UNF;
txdw0 &= ~CP_TX_STATUS_TES;
txdw0 &= ~CP_TX_STATUS_OWC;
txdw0 &= ~CP_TX_STATUS_LNKF;
txdw0 &= ~CP_TX_STATUS_EXC;
/* update ring data */
val = cpu_to_le32(txdw0);
pci_dma_write(d, cplus_tx_ring_desc, (uint8_t *)&val, 4);
/* Now decide if descriptor being processed is holding the last segment of packet */
if (txdw0 & CP_TX_LS)
{
uint8_t dot1q_buffer_space[VLAN_HLEN];
uint16_t *dot1q_buffer;
DPRINTF("+++ C+ Tx mode : descriptor %d is last segment descriptor\n",
descriptor);
/* can transfer fully assembled packet */
uint8_t *saved_buffer = s->cplus_txbuffer;
int saved_size = s->cplus_txbuffer_offset;
int saved_buffer_len = s->cplus_txbuffer_len;
/* create vlan tag */
if (txdw1 & CP_TX_TAGC) {
/* the vlan tag is in BE byte order in the descriptor
* BE + le_to_cpu() + ~swap()~ = cpu */
DPRINTF("+++ C+ Tx mode : inserting vlan tag with ""tci: %u\n",
bswap16(txdw1 & CP_TX_VLAN_TAG_MASK));
dot1q_buffer = (uint16_t *) dot1q_buffer_space;
dot1q_buffer[0] = cpu_to_be16(ETH_P_8021Q);
/* BE + le_to_cpu() + ~cpu_to_le()~ = BE */
dot1q_buffer[1] = cpu_to_le16(txdw1 & CP_TX_VLAN_TAG_MASK);
} else {
dot1q_buffer = NULL;
}
/* reset the card space to protect from recursive call */
s->cplus_txbuffer = NULL;
s->cplus_txbuffer_offset = 0;
s->cplus_txbuffer_len = 0;
if (txdw0 & (CP_TX_IPCS | CP_TX_UDPCS | CP_TX_TCPCS | CP_TX_LGSEN))
{
DPRINTF("+++ C+ mode offloaded task checksum\n");
/* Large enough for Ethernet and IP headers? */
if (saved_size < ETH_HLEN + sizeof(ip_header)) {
goto skip_offload;
}
/* ip packet header */
ip_header *ip = NULL;
int hlen = 0;
uint8_t ip_protocol = 0;
uint16_t ip_data_len = 0;
uint8_t *eth_payload_data = NULL;
size_t eth_payload_len = 0;
int proto = be16_to_cpu(*(uint16_t *)(saved_buffer + 12));
if (proto != ETH_P_IP)
{
goto skip_offload;
}
DPRINTF("+++ C+ mode has IP packet\n");
/* not aligned */
eth_payload_data = saved_buffer + ETH_HLEN;
eth_payload_len = saved_size - ETH_HLEN;
ip = (ip_header*)eth_payload_data;
if (IP_HEADER_VERSION(ip) != IP_HEADER_VERSION_4) {
DPRINTF("+++ C+ mode packet has bad IP version %d "
"expected %d\n", IP_HEADER_VERSION(ip),
IP_HEADER_VERSION_4);
goto skip_offload;
}
hlen = IP_HEADER_LENGTH(ip);
if (hlen < sizeof(ip_header) || hlen > eth_payload_len) {
goto skip_offload;
}
ip_protocol = ip->ip_p;
ip_data_len = be16_to_cpu(ip->ip_len);
if (ip_data_len < hlen || ip_data_len > eth_payload_len) {
goto skip_offload;
}
ip_data_len -= hlen;
if (txdw0 & CP_TX_IPCS)
{
DPRINTF("+++ C+ mode need IP checksum\n");
ip->ip_sum = 0;
ip->ip_sum = ip_checksum(ip, hlen);
DPRINTF("+++ C+ mode IP header len=%d checksum=%04x\n",
hlen, ip->ip_sum);
}
if ((txdw0 & CP_TX_LGSEN) && ip_protocol == IP_PROTO_TCP)
{
/* Large enough for the TCP header? */
if (ip_data_len < sizeof(tcp_header)) {
goto skip_offload;
}
int large_send_mss = (txdw0 >> 16) & CP_TC_LGSEN_MSS_MASK;
DPRINTF("+++ C+ mode offloaded task TSO MTU=%d IP data %d "
"frame data %d specified MSS=%d\n", ETH_MTU,
ip_data_len, saved_size - ETH_HLEN, large_send_mss);
int tcp_send_offset = 0;
int send_count = 0;
/* maximum IP header length is 60 bytes */
uint8_t saved_ip_header[60];
/* save IP header template; data area is used in tcp checksum calculation */
memcpy(saved_ip_header, eth_payload_data, hlen);
/* a placeholder for checksum calculation routine in tcp case */
uint8_t *data_to_checksum = eth_payload_data + hlen - 12;
// size_t data_to_checksum_len = eth_payload_len - hlen + 12;
/* pointer to TCP header */
tcp_header *p_tcp_hdr = (tcp_header*)(eth_payload_data + hlen);
int tcp_hlen = TCP_HEADER_DATA_OFFSET(p_tcp_hdr);
/* Invalid TCP data offset? */
if (tcp_hlen < sizeof(tcp_header) || tcp_hlen > ip_data_len) {
goto skip_offload;
}
/* ETH_MTU = ip header len + tcp header len + payload */
int tcp_data_len = ip_data_len - tcp_hlen;
int tcp_chunk_size = ETH_MTU - hlen - tcp_hlen;
DPRINTF("+++ C+ mode TSO IP data len %d TCP hlen %d TCP "
"data len %d TCP chunk size %d\n", ip_data_len,
tcp_hlen, tcp_data_len, tcp_chunk_size);
/* note the cycle below overwrites IP header data,
but restores it from saved_ip_header before sending packet */
int is_last_frame = 0;
for (tcp_send_offset = 0; tcp_send_offset < tcp_data_len; tcp_send_offset += tcp_chunk_size)
{
uint16_t chunk_size = tcp_chunk_size;
/* check if this is the last frame */
if (tcp_send_offset + tcp_chunk_size >= tcp_data_len)
{
is_last_frame = 1;
chunk_size = tcp_data_len - tcp_send_offset;
}
DPRINTF("+++ C+ mode TSO TCP seqno %08x\n",
be32_to_cpu(p_tcp_hdr->th_seq));
/* add 4 TCP pseudoheader fields */
/* copy IP source and destination fields */
memcpy(data_to_checksum, saved_ip_header + 12, 8);
DPRINTF("+++ C+ mode TSO calculating TCP checksum for "
"packet with %d bytes data\n", tcp_hlen +
chunk_size);
if (tcp_send_offset)
{
memcpy((uint8_t*)p_tcp_hdr + tcp_hlen, (uint8_t*)p_tcp_hdr + tcp_hlen + tcp_send_offset, chunk_size);
}
/* keep PUSH and FIN flags only for the last frame */
if (!is_last_frame)
{
TCP_HEADER_CLEAR_FLAGS(p_tcp_hdr, TCP_FLAG_PUSH|TCP_FLAG_FIN);
}
/* recalculate TCP checksum */
ip_pseudo_header *p_tcpip_hdr = (ip_pseudo_header *)data_to_checksum;
p_tcpip_hdr->zeros = 0;
p_tcpip_hdr->ip_proto = IP_PROTO_TCP;
p_tcpip_hdr->ip_payload = cpu_to_be16(tcp_hlen + chunk_size);
p_tcp_hdr->th_sum = 0;
int tcp_checksum = ip_checksum(data_to_checksum, tcp_hlen + chunk_size + 12);
DPRINTF("+++ C+ mode TSO TCP checksum %04x\n",
tcp_checksum);
p_tcp_hdr->th_sum = tcp_checksum;
/* restore IP header */
memcpy(eth_payload_data, saved_ip_header, hlen);
/* set IP data length and recalculate IP checksum */
ip->ip_len = cpu_to_be16(hlen + tcp_hlen + chunk_size);
/* increment IP id for subsequent frames */
ip->ip_id = cpu_to_be16(tcp_send_offset/tcp_chunk_size + be16_to_cpu(ip->ip_id));
ip->ip_sum = 0;
ip->ip_sum = ip_checksum(eth_payload_data, hlen);
DPRINTF("+++ C+ mode TSO IP header len=%d "
"checksum=%04x\n", hlen, ip->ip_sum);
int tso_send_size = ETH_HLEN + hlen + tcp_hlen + chunk_size;
DPRINTF("+++ C+ mode TSO transferring packet size "
"%d\n", tso_send_size);
rtl8139_transfer_frame(s, saved_buffer, tso_send_size,
0, (uint8_t *) dot1q_buffer);
/* add transferred count to TCP sequence number */
p_tcp_hdr->th_seq = cpu_to_be32(chunk_size + be32_to_cpu(p_tcp_hdr->th_seq));
++send_count;
}
/* Stop sending this frame */
saved_size = 0;
}
else if (txdw0 & (CP_TX_TCPCS|CP_TX_UDPCS))
{
DPRINTF("+++ C+ mode need TCP or UDP checksum\n");
/* maximum IP header length is 60 bytes */
uint8_t saved_ip_header[60];
memcpy(saved_ip_header, eth_payload_data, hlen);
uint8_t *data_to_checksum = eth_payload_data + hlen - 12;
// size_t data_to_checksum_len = eth_payload_len - hlen + 12;
/* add 4 TCP pseudoheader fields */
/* copy IP source and destination fields */
memcpy(data_to_checksum, saved_ip_header + 12, 8);
if ((txdw0 & CP_TX_TCPCS) && ip_protocol == IP_PROTO_TCP)
{
DPRINTF("+++ C+ mode calculating TCP checksum for "
"packet with %d bytes data\n", ip_data_len);
ip_pseudo_header *p_tcpip_hdr = (ip_pseudo_header *)data_to_checksum;
p_tcpip_hdr->zeros = 0;
p_tcpip_hdr->ip_proto = IP_PROTO_TCP;
p_tcpip_hdr->ip_payload = cpu_to_be16(ip_data_len);
tcp_header* p_tcp_hdr = (tcp_header *) (data_to_checksum+12);
p_tcp_hdr->th_sum = 0;
int tcp_checksum = ip_checksum(data_to_checksum, ip_data_len + 12);
DPRINTF("+++ C+ mode TCP checksum %04x\n",
tcp_checksum);
p_tcp_hdr->th_sum = tcp_checksum;
}
else if ((txdw0 & CP_TX_UDPCS) && ip_protocol == IP_PROTO_UDP)
{
DPRINTF("+++ C+ mode calculating UDP checksum for "
"packet with %d bytes data\n", ip_data_len);
ip_pseudo_header *p_udpip_hdr = (ip_pseudo_header *)data_to_checksum;
p_udpip_hdr->zeros = 0;
p_udpip_hdr->ip_proto = IP_PROTO_UDP;
p_udpip_hdr->ip_payload = cpu_to_be16(ip_data_len);
udp_header *p_udp_hdr = (udp_header *) (data_to_checksum+12);
p_udp_hdr->uh_sum = 0;
int udp_checksum = ip_checksum(data_to_checksum, ip_data_len + 12);
DPRINTF("+++ C+ mode UDP checksum %04x\n",
udp_checksum);
p_udp_hdr->uh_sum = udp_checksum;
}
/* restore IP header */
memcpy(eth_payload_data, saved_ip_header, hlen);
}
}
skip_offload:
/* update tally counter */
++s->tally_counters.TxOk;
DPRINTF("+++ C+ mode transmitting %d bytes packet\n", saved_size);
rtl8139_transfer_frame(s, saved_buffer, saved_size, 1,
(uint8_t *) dot1q_buffer);
/* restore card space if there was no recursion and reset offset */
if (!s->cplus_txbuffer)
{
s->cplus_txbuffer = saved_buffer;
s->cplus_txbuffer_len = saved_buffer_len;
s->cplus_txbuffer_offset = 0;
}
else
{
g_free(saved_buffer);
}
}
else
{
DPRINTF("+++ C+ mode transmission continue to next descriptor\n");
}
return 1;
}
static void rtl8139_cplus_transmit(RTL8139State *s)
{
int txcount = 0;
while (rtl8139_cplus_transmit_one(s))
{
++txcount;
}
/* Mark transfer completed */
if (!txcount)
{
DPRINTF("C+ mode : transmitter queue stalled, current TxDesc = %d\n",
s->currCPlusTxDesc);
}
else
{
/* update interrupt status */
s->IntrStatus |= TxOK;
rtl8139_update_irq(s);
}
}
static void rtl8139_transmit(RTL8139State *s)
{
int descriptor = s->currTxDesc, txcount = 0;
/*while*/
if (rtl8139_transmit_one(s, descriptor))
{
++s->currTxDesc;
s->currTxDesc %= 4;
++txcount;
}
/* Mark transfer completed */
if (!txcount)
{
DPRINTF("transmitter queue stalled, current TxDesc = %d\n",
s->currTxDesc);
}
}
static void rtl8139_TxStatus_write(RTL8139State *s, uint32_t txRegOffset, uint32_t val)
{
int descriptor = txRegOffset/4;
/* handle C+ transmit mode register configuration */
if (s->cplus_enabled)
{
DPRINTF("RTL8139C+ DTCCR write offset=0x%x val=0x%08x "
"descriptor=%d\n", txRegOffset, val, descriptor);
/* handle Dump Tally Counters command */
s->TxStatus[descriptor] = val;
if (descriptor == 0 && (val & 0x8))
{
hwaddr tc_addr = rtl8139_addr64(s->TxStatus[0] & ~0x3f, s->TxStatus[1]);
/* dump tally counters to specified memory location */
RTL8139TallyCounters_dma_write(s, tc_addr);
/* mark dump completed */
s->TxStatus[0] &= ~0x8;
}
return;
}
DPRINTF("TxStatus write offset=0x%x val=0x%08x descriptor=%d\n",
txRegOffset, val, descriptor);
/* mask only reserved bits */
val &= ~0xff00c000; /* these bits are reset on write */
val = SET_MASKED(val, 0x00c00000, s->TxStatus[descriptor]);
s->TxStatus[descriptor] = val;
/* attempt to start transmission */
rtl8139_transmit(s);
}
static uint32_t rtl8139_TxStatus_TxAddr_read(RTL8139State *s, uint32_t regs[],
uint32_t base, uint8_t addr,
int size)
{
uint32_t reg = (addr - base) / 4;
uint32_t offset = addr & 0x3;
uint32_t ret = 0;
if (addr & (size - 1)) {
DPRINTF("not implemented read for TxStatus/TxAddr "
"addr=0x%x size=0x%x\n", addr, size);
return ret;
}
switch (size) {
case 1: /* fall through */
case 2: /* fall through */
case 4:
ret = (regs[reg] >> offset * 8) & (((uint64_t)1 << (size * 8)) - 1);
DPRINTF("TxStatus/TxAddr[%d] read addr=0x%x size=0x%x val=0x%08x\n",
reg, addr, size, ret);
break;
default:
DPRINTF("unsupported size 0x%x of TxStatus/TxAddr reading\n", size);
break;
}
return ret;
}
static uint16_t rtl8139_TSAD_read(RTL8139State *s)
{
uint16_t ret = 0;
/* Simulate TSAD, it is read only anyway */
ret = ((s->TxStatus[3] & TxStatOK )?TSAD_TOK3:0)
|((s->TxStatus[2] & TxStatOK )?TSAD_TOK2:0)
|((s->TxStatus[1] & TxStatOK )?TSAD_TOK1:0)
|((s->TxStatus[0] & TxStatOK )?TSAD_TOK0:0)
|((s->TxStatus[3] & TxUnderrun)?TSAD_TUN3:0)
|((s->TxStatus[2] & TxUnderrun)?TSAD_TUN2:0)
|((s->TxStatus[1] & TxUnderrun)?TSAD_TUN1:0)
|((s->TxStatus[0] & TxUnderrun)?TSAD_TUN0:0)
|((s->TxStatus[3] & TxAborted )?TSAD_TABT3:0)
|((s->TxStatus[2] & TxAborted )?TSAD_TABT2:0)
|((s->TxStatus[1] & TxAborted )?TSAD_TABT1:0)
|((s->TxStatus[0] & TxAborted )?TSAD_TABT0:0)
|((s->TxStatus[3] & TxHostOwns )?TSAD_OWN3:0)
|((s->TxStatus[2] & TxHostOwns )?TSAD_OWN2:0)
|((s->TxStatus[1] & TxHostOwns )?TSAD_OWN1:0)
|((s->TxStatus[0] & TxHostOwns )?TSAD_OWN0:0) ;
DPRINTF("TSAD read val=0x%04x\n", ret);
return ret;
}
static uint16_t rtl8139_CSCR_read(RTL8139State *s)
{
uint16_t ret = s->CSCR;
DPRINTF("CSCR read val=0x%04x\n", ret);
return ret;
}
static void rtl8139_TxAddr_write(RTL8139State *s, uint32_t txAddrOffset, uint32_t val)
{
DPRINTF("TxAddr write offset=0x%x val=0x%08x\n", txAddrOffset, val);
s->TxAddr[txAddrOffset/4] = val;
}
static uint32_t rtl8139_TxAddr_read(RTL8139State *s, uint32_t txAddrOffset)
{
uint32_t ret = s->TxAddr[txAddrOffset/4];
DPRINTF("TxAddr read offset=0x%x val=0x%08x\n", txAddrOffset, ret);
return ret;
}
static void rtl8139_RxBufPtr_write(RTL8139State *s, uint32_t val)
{
DPRINTF("RxBufPtr write val=0x%04x\n", val);
/* this value is off by 16 */
s->RxBufPtr = MOD2(val + 0x10, s->RxBufferSize);
/* more buffer space may be available so try to receive */
qemu_flush_queued_packets(qemu_get_queue(s->nic));
DPRINTF(" CAPR write: rx buffer length %d head 0x%04x read 0x%04x\n",
s->RxBufferSize, s->RxBufAddr, s->RxBufPtr);
}
static uint32_t rtl8139_RxBufPtr_read(RTL8139State *s)
{
/* this value is off by 16 */
uint32_t ret = s->RxBufPtr - 0x10;
DPRINTF("RxBufPtr read val=0x%04x\n", ret);
return ret;
}
static uint32_t rtl8139_RxBufAddr_read(RTL8139State *s)
{
/* this value is NOT off by 16 */
uint32_t ret = s->RxBufAddr;
DPRINTF("RxBufAddr read val=0x%04x\n", ret);
return ret;
}
static void rtl8139_RxBuf_write(RTL8139State *s, uint32_t val)
{
DPRINTF("RxBuf write val=0x%08x\n", val);
s->RxBuf = val;
/* may need to reset rxring here */
}
static uint32_t rtl8139_RxBuf_read(RTL8139State *s)
{
uint32_t ret = s->RxBuf;
DPRINTF("RxBuf read val=0x%08x\n", ret);
return ret;
}
static void rtl8139_IntrMask_write(RTL8139State *s, uint32_t val)
{
DPRINTF("IntrMask write(w) val=0x%04x\n", val);
/* mask unwritable bits */
val = SET_MASKED(val, 0x1e00, s->IntrMask);
s->IntrMask = val;
rtl8139_update_irq(s);
}
static uint32_t rtl8139_IntrMask_read(RTL8139State *s)
{
uint32_t ret = s->IntrMask;
DPRINTF("IntrMask read(w) val=0x%04x\n", ret);
return ret;
}
static void rtl8139_IntrStatus_write(RTL8139State *s, uint32_t val)
{
DPRINTF("IntrStatus write(w) val=0x%04x\n", val);
#if 0
/* writing to ISR has no effect */
return;
#else
uint16_t newStatus = s->IntrStatus & ~val;
/* mask unwritable bits */
newStatus = SET_MASKED(newStatus, 0x1e00, s->IntrStatus);
/* writing 1 to interrupt status register bit clears it */
s->IntrStatus = 0;
rtl8139_update_irq(s);
s->IntrStatus = newStatus;
rtl8139_set_next_tctr_time(s);
rtl8139_update_irq(s);
#endif
}
static uint32_t rtl8139_IntrStatus_read(RTL8139State *s)
{
uint32_t ret = s->IntrStatus;
DPRINTF("IntrStatus read(w) val=0x%04x\n", ret);
#if 0
/* reading ISR clears all interrupts */
s->IntrStatus = 0;
rtl8139_update_irq(s);
#endif
return ret;
}
static void rtl8139_MultiIntr_write(RTL8139State *s, uint32_t val)
{
DPRINTF("MultiIntr write(w) val=0x%04x\n", val);
/* mask unwritable bits */
val = SET_MASKED(val, 0xf000, s->MultiIntr);
s->MultiIntr = val;
}
static uint32_t rtl8139_MultiIntr_read(RTL8139State *s)
{
uint32_t ret = s->MultiIntr;
DPRINTF("MultiIntr read(w) val=0x%04x\n", ret);
return ret;
}
static void rtl8139_io_writeb(void *opaque, uint8_t addr, uint32_t val)
{
RTL8139State *s = opaque;
switch (addr)
{
case MAC0 ... MAC0+4:
s->phys[addr - MAC0] = val;
break;
case MAC0+5:
s->phys[addr - MAC0] = val;
qemu_format_nic_info_str(qemu_get_queue(s->nic), s->phys);
break;
case MAC0+6 ... MAC0+7:
/* reserved */
break;
case MAR0 ... MAR0+7:
s->mult[addr - MAR0] = val;
break;
case ChipCmd:
rtl8139_ChipCmd_write(s, val);
break;
case Cfg9346:
rtl8139_Cfg9346_write(s, val);
break;
case TxConfig: /* windows driver sometimes writes using byte-lenth call */
rtl8139_TxConfig_writeb(s, val);
break;
case Config0:
rtl8139_Config0_write(s, val);
break;
case Config1:
rtl8139_Config1_write(s, val);
break;
case Config3:
rtl8139_Config3_write(s, val);
break;
case Config4:
rtl8139_Config4_write(s, val);
break;
case Config5:
rtl8139_Config5_write(s, val);
break;
case MediaStatus:
/* ignore */
DPRINTF("not implemented write(b) to MediaStatus val=0x%02x\n",
val);
break;
case HltClk:
DPRINTF("HltClk write val=0x%08x\n", val);
if (val == 'R')
{
s->clock_enabled = 1;
}
else if (val == 'H')
{
s->clock_enabled = 0;
}
break;
case TxThresh:
DPRINTF("C+ TxThresh write(b) val=0x%02x\n", val);
s->TxThresh = val;
break;
case TxPoll:
DPRINTF("C+ TxPoll write(b) val=0x%02x\n", val);
if (val & (1 << 7))
{
DPRINTF("C+ TxPoll high priority transmission (not "
"implemented)\n");
//rtl8139_cplus_transmit(s);
}
if (val & (1 << 6))
{
DPRINTF("C+ TxPoll normal priority transmission\n");
rtl8139_cplus_transmit(s);
}
break;
default:
DPRINTF("not implemented write(b) addr=0x%x val=0x%02x\n", addr,
val);
break;
}
}
static void rtl8139_io_writew(void *opaque, uint8_t addr, uint32_t val)
{
RTL8139State *s = opaque;
switch (addr)
{
case IntrMask:
rtl8139_IntrMask_write(s, val);
break;
case IntrStatus:
rtl8139_IntrStatus_write(s, val);
break;
case MultiIntr:
rtl8139_MultiIntr_write(s, val);
break;
case RxBufPtr:
rtl8139_RxBufPtr_write(s, val);
break;
case BasicModeCtrl:
rtl8139_BasicModeCtrl_write(s, val);
break;
case BasicModeStatus:
rtl8139_BasicModeStatus_write(s, val);
break;
case NWayAdvert:
DPRINTF("NWayAdvert write(w) val=0x%04x\n", val);
s->NWayAdvert = val;
break;
case NWayLPAR:
DPRINTF("forbidden NWayLPAR write(w) val=0x%04x\n", val);
break;
case NWayExpansion:
DPRINTF("NWayExpansion write(w) val=0x%04x\n", val);
s->NWayExpansion = val;
break;
case CpCmd:
rtl8139_CpCmd_write(s, val);
break;
case IntrMitigate:
rtl8139_IntrMitigate_write(s, val);
break;
default:
DPRINTF("ioport write(w) addr=0x%x val=0x%04x via write(b)\n",
addr, val);
rtl8139_io_writeb(opaque, addr, val & 0xff);
rtl8139_io_writeb(opaque, addr + 1, (val >> 8) & 0xff);
break;
}
}
static void rtl8139_set_next_tctr_time(RTL8139State *s)
{
const uint64_t ns_per_period =
muldiv64(0x100000000LL, get_ticks_per_sec(), PCI_FREQUENCY);
DPRINTF("entered rtl8139_set_next_tctr_time\n");
/* This function is called at least once per period, so it is a good
* place to update the timer base.
*
* After one iteration of this loop the value in the Timer register does
* not change, but the device model is counting up by 2^32 ticks (approx.
* 130 seconds).
*/
while (s->TCTR_base + ns_per_period <= qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL)) {
s->TCTR_base += ns_per_period;
}
if (!s->TimerInt) {
timer_del(s->timer);
} else {
uint64_t delta = muldiv64(s->TimerInt, get_ticks_per_sec(), PCI_FREQUENCY);
if (s->TCTR_base + delta <= qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL)) {
delta += ns_per_period;
}
timer_mod(s->timer, s->TCTR_base + delta);
}
}
static void rtl8139_io_writel(void *opaque, uint8_t addr, uint32_t val)
{
RTL8139State *s = opaque;
switch (addr)
{
case RxMissed:
DPRINTF("RxMissed clearing on write\n");
s->RxMissed = 0;
break;
case TxConfig:
rtl8139_TxConfig_write(s, val);
break;
case RxConfig:
rtl8139_RxConfig_write(s, val);
break;
case TxStatus0 ... TxStatus0+4*4-1:
rtl8139_TxStatus_write(s, addr-TxStatus0, val);
break;
case TxAddr0 ... TxAddr0+4*4-1:
rtl8139_TxAddr_write(s, addr-TxAddr0, val);
break;
case RxBuf:
rtl8139_RxBuf_write(s, val);
break;
case RxRingAddrLO:
DPRINTF("C+ RxRing low bits write val=0x%08x\n", val);
s->RxRingAddrLO = val;
break;
case RxRingAddrHI:
DPRINTF("C+ RxRing high bits write val=0x%08x\n", val);
s->RxRingAddrHI = val;
break;
case Timer:
DPRINTF("TCTR Timer reset on write\n");
s->TCTR_base = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
rtl8139_set_next_tctr_time(s);
break;
case FlashReg:
DPRINTF("FlashReg TimerInt write val=0x%08x\n", val);
if (s->TimerInt != val) {
s->TimerInt = val;
rtl8139_set_next_tctr_time(s);
}
break;
default:
DPRINTF("ioport write(l) addr=0x%x val=0x%08x via write(b)\n",
addr, val);
rtl8139_io_writeb(opaque, addr, val & 0xff);
rtl8139_io_writeb(opaque, addr + 1, (val >> 8) & 0xff);
rtl8139_io_writeb(opaque, addr + 2, (val >> 16) & 0xff);
rtl8139_io_writeb(opaque, addr + 3, (val >> 24) & 0xff);
break;
}
}
static uint32_t rtl8139_io_readb(void *opaque, uint8_t addr)
{
RTL8139State *s = opaque;
int ret;
switch (addr)
{
case MAC0 ... MAC0+5:
ret = s->phys[addr - MAC0];
break;
case MAC0+6 ... MAC0+7:
ret = 0;
break;
case MAR0 ... MAR0+7:
ret = s->mult[addr - MAR0];
break;
case TxStatus0 ... TxStatus0+4*4-1:
ret = rtl8139_TxStatus_TxAddr_read(s, s->TxStatus, TxStatus0,
addr, 1);
break;
case ChipCmd:
ret = rtl8139_ChipCmd_read(s);
break;
case Cfg9346:
ret = rtl8139_Cfg9346_read(s);
break;
case Config0:
ret = rtl8139_Config0_read(s);
break;
case Config1:
ret = rtl8139_Config1_read(s);
break;
case Config3:
ret = rtl8139_Config3_read(s);
break;
case Config4:
ret = rtl8139_Config4_read(s);
break;
case Config5:
ret = rtl8139_Config5_read(s);
break;
case MediaStatus:
/* The LinkDown bit of MediaStatus is inverse with link status */
ret = 0xd0 | (~s->BasicModeStatus & 0x04);
DPRINTF("MediaStatus read 0x%x\n", ret);
break;
case HltClk:
ret = s->clock_enabled;
DPRINTF("HltClk read 0x%x\n", ret);
break;
case PCIRevisionID:
ret = RTL8139_PCI_REVID;
DPRINTF("PCI Revision ID read 0x%x\n", ret);
break;
case TxThresh:
ret = s->TxThresh;
DPRINTF("C+ TxThresh read(b) val=0x%02x\n", ret);
break;
case 0x43: /* Part of TxConfig register. Windows driver tries to read it */
ret = s->TxConfig >> 24;
DPRINTF("RTL8139C TxConfig at 0x43 read(b) val=0x%02x\n", ret);
break;
default:
DPRINTF("not implemented read(b) addr=0x%x\n", addr);
ret = 0;
break;
}
return ret;
}
static uint32_t rtl8139_io_readw(void *opaque, uint8_t addr)
{
RTL8139State *s = opaque;
uint32_t ret;
switch (addr)
{
case TxAddr0 ... TxAddr0+4*4-1:
ret = rtl8139_TxStatus_TxAddr_read(s, s->TxAddr, TxAddr0, addr, 2);
break;
case IntrMask:
ret = rtl8139_IntrMask_read(s);
break;
case IntrStatus:
ret = rtl8139_IntrStatus_read(s);
break;
case MultiIntr:
ret = rtl8139_MultiIntr_read(s);
break;
case RxBufPtr:
ret = rtl8139_RxBufPtr_read(s);
break;
case RxBufAddr:
ret = rtl8139_RxBufAddr_read(s);
break;
case BasicModeCtrl:
ret = rtl8139_BasicModeCtrl_read(s);
break;
case BasicModeStatus:
ret = rtl8139_BasicModeStatus_read(s);
break;
case NWayAdvert:
ret = s->NWayAdvert;
DPRINTF("NWayAdvert read(w) val=0x%04x\n", ret);
break;
case NWayLPAR:
ret = s->NWayLPAR;
DPRINTF("NWayLPAR read(w) val=0x%04x\n", ret);
break;
case NWayExpansion:
ret = s->NWayExpansion;
DPRINTF("NWayExpansion read(w) val=0x%04x\n", ret);
break;
case CpCmd:
ret = rtl8139_CpCmd_read(s);
break;
case IntrMitigate:
ret = rtl8139_IntrMitigate_read(s);
break;
case TxSummary:
ret = rtl8139_TSAD_read(s);
break;
case CSCR:
ret = rtl8139_CSCR_read(s);
break;
default:
DPRINTF("ioport read(w) addr=0x%x via read(b)\n", addr);
ret = rtl8139_io_readb(opaque, addr);
ret |= rtl8139_io_readb(opaque, addr + 1) << 8;
DPRINTF("ioport read(w) addr=0x%x val=0x%04x\n", addr, ret);
break;
}
return ret;
}
static uint32_t rtl8139_io_readl(void *opaque, uint8_t addr)
{
RTL8139State *s = opaque;
uint32_t ret;
switch (addr)
{
case RxMissed:
ret = s->RxMissed;
DPRINTF("RxMissed read val=0x%08x\n", ret);
break;
case TxConfig:
ret = rtl8139_TxConfig_read(s);
break;
case RxConfig:
ret = rtl8139_RxConfig_read(s);
break;
case TxStatus0 ... TxStatus0+4*4-1:
ret = rtl8139_TxStatus_TxAddr_read(s, s->TxStatus, TxStatus0,
addr, 4);
break;
case TxAddr0 ... TxAddr0+4*4-1:
ret = rtl8139_TxAddr_read(s, addr-TxAddr0);
break;
case RxBuf:
ret = rtl8139_RxBuf_read(s);
break;
case RxRingAddrLO:
ret = s->RxRingAddrLO;
DPRINTF("C+ RxRing low bits read val=0x%08x\n", ret);
break;
case RxRingAddrHI:
ret = s->RxRingAddrHI;
DPRINTF("C+ RxRing high bits read val=0x%08x\n", ret);
break;
case Timer:
ret = muldiv64(qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) - s->TCTR_base,
PCI_FREQUENCY, get_ticks_per_sec());
DPRINTF("TCTR Timer read val=0x%08x\n", ret);
break;
case FlashReg:
ret = s->TimerInt;
DPRINTF("FlashReg TimerInt read val=0x%08x\n", ret);
break;
default:
DPRINTF("ioport read(l) addr=0x%x via read(b)\n", addr);
ret = rtl8139_io_readb(opaque, addr);
ret |= rtl8139_io_readb(opaque, addr + 1) << 8;
ret |= rtl8139_io_readb(opaque, addr + 2) << 16;
ret |= rtl8139_io_readb(opaque, addr + 3) << 24;
DPRINTF("read(l) addr=0x%x val=%08x\n", addr, ret);
break;
}
return ret;
}
/* */
static void rtl8139_mmio_writeb(void *opaque, hwaddr addr, uint32_t val)
{
rtl8139_io_writeb(opaque, addr & 0xFF, val);
}
static void rtl8139_mmio_writew(void *opaque, hwaddr addr, uint32_t val)
{
rtl8139_io_writew(opaque, addr & 0xFF, val);
}
static void rtl8139_mmio_writel(void *opaque, hwaddr addr, uint32_t val)
{
rtl8139_io_writel(opaque, addr & 0xFF, val);
}
static uint32_t rtl8139_mmio_readb(void *opaque, hwaddr addr)
{
return rtl8139_io_readb(opaque, addr & 0xFF);
}
static uint32_t rtl8139_mmio_readw(void *opaque, hwaddr addr)
{
uint32_t val = rtl8139_io_readw(opaque, addr & 0xFF);
return val;
}
static uint32_t rtl8139_mmio_readl(void *opaque, hwaddr addr)
{
uint32_t val = rtl8139_io_readl(opaque, addr & 0xFF);
return val;
}
static int rtl8139_post_load(void *opaque, int version_id)
{
RTL8139State* s = opaque;
rtl8139_set_next_tctr_time(s);
if (version_id < 4) {
s->cplus_enabled = s->CpCmd != 0;
}
/* nc.link_down can't be migrated, so infer link_down according
* to link status bit in BasicModeStatus */
qemu_get_queue(s->nic)->link_down = (s->BasicModeStatus & 0x04) == 0;
return 0;
}
static bool rtl8139_hotplug_ready_needed(void *opaque)
{
return qdev_machine_modified();
}
static const VMStateDescription vmstate_rtl8139_hotplug_ready ={
.name = "rtl8139/hotplug_ready",
.version_id = 1,
.minimum_version_id = 1,
.fields = (VMStateField[]) {
VMSTATE_END_OF_LIST()
}
};
static void rtl8139_pre_save(void *opaque)
{
RTL8139State* s = opaque;
int64_t current_time = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
/* for migration to older versions */
s->TCTR = muldiv64(current_time - s->TCTR_base, PCI_FREQUENCY,
get_ticks_per_sec());
s->rtl8139_mmio_io_addr_dummy = 0;
}
static const VMStateDescription vmstate_rtl8139 = {
.name = "rtl8139",
.version_id = 4,
.minimum_version_id = 3,
.post_load = rtl8139_post_load,
.pre_save = rtl8139_pre_save,
.fields = (VMStateField[]) {
VMSTATE_PCI_DEVICE(parent_obj, RTL8139State),
VMSTATE_PARTIAL_BUFFER(phys, RTL8139State, 6),
VMSTATE_BUFFER(mult, RTL8139State),
VMSTATE_UINT32_ARRAY(TxStatus, RTL8139State, 4),
VMSTATE_UINT32_ARRAY(TxAddr, RTL8139State, 4),
VMSTATE_UINT32(RxBuf, RTL8139State),
VMSTATE_UINT32(RxBufferSize, RTL8139State),
VMSTATE_UINT32(RxBufPtr, RTL8139State),
VMSTATE_UINT32(RxBufAddr, RTL8139State),
VMSTATE_UINT16(IntrStatus, RTL8139State),
VMSTATE_UINT16(IntrMask, RTL8139State),
VMSTATE_UINT32(TxConfig, RTL8139State),
VMSTATE_UINT32(RxConfig, RTL8139State),
VMSTATE_UINT32(RxMissed, RTL8139State),
VMSTATE_UINT16(CSCR, RTL8139State),
VMSTATE_UINT8(Cfg9346, RTL8139State),
VMSTATE_UINT8(Config0, RTL8139State),
VMSTATE_UINT8(Config1, RTL8139State),
VMSTATE_UINT8(Config3, RTL8139State),
VMSTATE_UINT8(Config4, RTL8139State),
VMSTATE_UINT8(Config5, RTL8139State),
VMSTATE_UINT8(clock_enabled, RTL8139State),
VMSTATE_UINT8(bChipCmdState, RTL8139State),
VMSTATE_UINT16(MultiIntr, RTL8139State),
VMSTATE_UINT16(BasicModeCtrl, RTL8139State),
VMSTATE_UINT16(BasicModeStatus, RTL8139State),
VMSTATE_UINT16(NWayAdvert, RTL8139State),
VMSTATE_UINT16(NWayLPAR, RTL8139State),
VMSTATE_UINT16(NWayExpansion, RTL8139State),
VMSTATE_UINT16(CpCmd, RTL8139State),
VMSTATE_UINT8(TxThresh, RTL8139State),
VMSTATE_UNUSED(4),
VMSTATE_MACADDR(conf.macaddr, RTL8139State),
VMSTATE_INT32(rtl8139_mmio_io_addr_dummy, RTL8139State),
VMSTATE_UINT32(currTxDesc, RTL8139State),
VMSTATE_UINT32(currCPlusRxDesc, RTL8139State),
VMSTATE_UINT32(currCPlusTxDesc, RTL8139State),
VMSTATE_UINT32(RxRingAddrLO, RTL8139State),
VMSTATE_UINT32(RxRingAddrHI, RTL8139State),
VMSTATE_UINT16_ARRAY(eeprom.contents, RTL8139State, EEPROM_9346_SIZE),
VMSTATE_INT32(eeprom.mode, RTL8139State),
VMSTATE_UINT32(eeprom.tick, RTL8139State),
VMSTATE_UINT8(eeprom.address, RTL8139State),
VMSTATE_UINT16(eeprom.input, RTL8139State),
VMSTATE_UINT16(eeprom.output, RTL8139State),
VMSTATE_UINT8(eeprom.eecs, RTL8139State),
VMSTATE_UINT8(eeprom.eesk, RTL8139State),
VMSTATE_UINT8(eeprom.eedi, RTL8139State),
VMSTATE_UINT8(eeprom.eedo, RTL8139State),
VMSTATE_UINT32(TCTR, RTL8139State),
VMSTATE_UINT32(TimerInt, RTL8139State),
VMSTATE_INT64(TCTR_base, RTL8139State),
VMSTATE_STRUCT(tally_counters, RTL8139State, 0,
vmstate_tally_counters, RTL8139TallyCounters),
VMSTATE_UINT32_V(cplus_enabled, RTL8139State, 4),
VMSTATE_END_OF_LIST()
},
.subsections = (VMStateSubsection []) {
{
.vmsd = &vmstate_rtl8139_hotplug_ready,
.needed = rtl8139_hotplug_ready_needed,
}, {
/* empty */
}
}
};
/***********************************************************/
/* PCI RTL8139 definitions */
static void rtl8139_ioport_write(void *opaque, hwaddr addr,
uint64_t val, unsigned size)
{
switch (size) {
case 1:
rtl8139_io_writeb(opaque, addr, val);
break;
case 2:
rtl8139_io_writew(opaque, addr, val);
break;
case 4:
rtl8139_io_writel(opaque, addr, val);
break;
}
}
static uint64_t rtl8139_ioport_read(void *opaque, hwaddr addr,
unsigned size)
{
switch (size) {
case 1:
return rtl8139_io_readb(opaque, addr);
case 2:
return rtl8139_io_readw(opaque, addr);
case 4:
return rtl8139_io_readl(opaque, addr);
}
return -1;
}
static const MemoryRegionOps rtl8139_io_ops = {
.read = rtl8139_ioport_read,
.write = rtl8139_ioport_write,
.impl = {
.min_access_size = 1,
.max_access_size = 4,
},
.endianness = DEVICE_LITTLE_ENDIAN,
};
static const MemoryRegionOps rtl8139_mmio_ops = {
.old_mmio = {
.read = {
rtl8139_mmio_readb,
rtl8139_mmio_readw,
rtl8139_mmio_readl,
},
.write = {
rtl8139_mmio_writeb,
rtl8139_mmio_writew,
rtl8139_mmio_writel,
},
},
.endianness = DEVICE_LITTLE_ENDIAN,
};
static void rtl8139_timer(void *opaque)
{
RTL8139State *s = opaque;
if (!s->clock_enabled)
{
DPRINTF(">>> timer: clock is not running\n");
return;
}
s->IntrStatus |= PCSTimeout;
rtl8139_update_irq(s);
rtl8139_set_next_tctr_time(s);
}
static void pci_rtl8139_uninit(PCIDevice *dev)
{
RTL8139State *s = RTL8139(dev);
if (s->cplus_txbuffer) {
g_free(s->cplus_txbuffer);
s->cplus_txbuffer = NULL;
}
timer_del(s->timer);
timer_free(s->timer);
qemu_del_nic(s->nic);
}
static void rtl8139_set_link_status(NetClientState *nc)
{
RTL8139State *s = qemu_get_nic_opaque(nc);
if (nc->link_down) {
s->BasicModeStatus &= ~0x04;
} else {
s->BasicModeStatus |= 0x04;
}
s->IntrStatus |= RxUnderrun;
rtl8139_update_irq(s);
}
static NetClientInfo net_rtl8139_info = {
.type = NET_CLIENT_OPTIONS_KIND_NIC,
.size = sizeof(NICState),
.can_receive = rtl8139_can_receive,
.receive = rtl8139_receive,
.link_status_changed = rtl8139_set_link_status,
};
static void pci_rtl8139_realize(PCIDevice *dev, Error **errp)
{
RTL8139State *s = RTL8139(dev);
DeviceState *d = DEVICE(dev);
uint8_t *pci_conf;
pci_conf = dev->config;
pci_conf[PCI_INTERRUPT_PIN] = 1; /* interrupt pin A */
/* TODO: start of capability list, but no capability
* list bit in status register, and offset 0xdc seems unused. */
pci_conf[PCI_CAPABILITY_LIST] = 0xdc;
memory_region_init_io(&s->bar_io, OBJECT(s), &rtl8139_io_ops, s,
"rtl8139", 0x100);
memory_region_init_io(&s->bar_mem, OBJECT(s), &rtl8139_mmio_ops, s,
"rtl8139", 0x100);
pci_register_bar(dev, 0, PCI_BASE_ADDRESS_SPACE_IO, &s->bar_io);
pci_register_bar(dev, 1, PCI_BASE_ADDRESS_SPACE_MEMORY, &s->bar_mem);
qemu_macaddr_default_if_unset(&s->conf.macaddr);
/* prepare eeprom */
s->eeprom.contents[0] = 0x8129;
#if 1
/* PCI vendor and device ID should be mirrored here */
s->eeprom.contents[1] = PCI_VENDOR_ID_REALTEK;
s->eeprom.contents[2] = PCI_DEVICE_ID_REALTEK_8139;
#endif
s->eeprom.contents[7] = s->conf.macaddr.a[0] | s->conf.macaddr.a[1] << 8;
s->eeprom.contents[8] = s->conf.macaddr.a[2] | s->conf.macaddr.a[3] << 8;
s->eeprom.contents[9] = s->conf.macaddr.a[4] | s->conf.macaddr.a[5] << 8;
s->nic = qemu_new_nic(&net_rtl8139_info, &s->conf,
object_get_typename(OBJECT(dev)), d->id, s);
qemu_format_nic_info_str(qemu_get_queue(s->nic), s->conf.macaddr.a);
s->cplus_txbuffer = NULL;
s->cplus_txbuffer_len = 0;
s->cplus_txbuffer_offset = 0;
s->timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, rtl8139_timer, s);
}
static void rtl8139_instance_init(Object *obj)
{
RTL8139State *s = RTL8139(obj);
device_add_bootindex_property(obj, &s->conf.bootindex,
"bootindex", "/ethernet-phy@0",
DEVICE(obj), NULL);
}
static Property rtl8139_properties[] = {
DEFINE_NIC_PROPERTIES(RTL8139State, conf),
DEFINE_PROP_END_OF_LIST(),
};
static void rtl8139_class_init(ObjectClass *klass, void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
PCIDeviceClass *k = PCI_DEVICE_CLASS(klass);
k->realize = pci_rtl8139_realize;
k->exit = pci_rtl8139_uninit;
k->romfile = "efi-rtl8139.rom";
k->vendor_id = PCI_VENDOR_ID_REALTEK;
k->device_id = PCI_DEVICE_ID_REALTEK_8139;
k->revision = RTL8139_PCI_REVID; /* >=0x20 is for 8139C+ */
k->class_id = PCI_CLASS_NETWORK_ETHERNET;
dc->reset = rtl8139_reset;
dc->vmsd = &vmstate_rtl8139;
dc->props = rtl8139_properties;
set_bit(DEVICE_CATEGORY_NETWORK, dc->categories);
}
static const TypeInfo rtl8139_info = {
.name = TYPE_RTL8139,
.parent = TYPE_PCI_DEVICE,
.instance_size = sizeof(RTL8139State),
.class_init = rtl8139_class_init,
.instance_init = rtl8139_instance_init,
};
static void rtl8139_register_types(void)
{
type_register_static(&rtl8139_info);
}
type_init(rtl8139_register_types)
| 28.873737 | 157 | 0.583756 | [
"object",
"model"
] |
d260a7ed2ee00cfab92012c759a5c2243841cfa4 | 4,434 | h | C | dds/include/alibabacloud/dds/model/DescribeDBInstancesRequest.h | sdk-team/aliyun-openapi-cpp-sdk | d0e92f6f33126dcdc7e40f60582304faf2c229b7 | [
"Apache-2.0"
] | 3 | 2020-01-06T08:23:14.000Z | 2022-01-22T04:41:35.000Z | dds/include/alibabacloud/dds/model/DescribeDBInstancesRequest.h | sdk-team/aliyun-openapi-cpp-sdk | d0e92f6f33126dcdc7e40f60582304faf2c229b7 | [
"Apache-2.0"
] | null | null | null | dds/include/alibabacloud/dds/model/DescribeDBInstancesRequest.h | sdk-team/aliyun-openapi-cpp-sdk | d0e92f6f33126dcdc7e40f60582304faf2c229b7 | [
"Apache-2.0"
] | null | null | null | /*
* Copyright 2009-2017 Alibaba Cloud All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef ALIBABACLOUD_DDS_MODEL_DESCRIBEDBINSTANCESREQUEST_H_
#define ALIBABACLOUD_DDS_MODEL_DESCRIBEDBINSTANCESREQUEST_H_
#include <string>
#include <vector>
#include <alibabacloud/core/RpcServiceRequest.h>
#include <alibabacloud/dds/DdsExport.h>
namespace AlibabaCloud
{
namespace Dds
{
namespace Model
{
class ALIBABACLOUD_DDS_EXPORT DescribeDBInstancesRequest : public RpcServiceRequest
{
struct Tag
{
std::string value;
std::string key;
};
public:
DescribeDBInstancesRequest();
~DescribeDBInstancesRequest();
long getResourceOwnerId()const;
void setResourceOwnerId(long resourceOwnerId);
std::string getEngineVersion()const;
void setEngineVersion(const std::string& engineVersion);
std::string getNetworkType()const;
void setNetworkType(const std::string& networkType);
int getPageNumber()const;
void setPageNumber(int pageNumber);
std::string getReplicationFactor()const;
void setReplicationFactor(const std::string& replicationFactor);
std::string getAccessKeyId()const;
void setAccessKeyId(const std::string& accessKeyId);
std::string getExpired()const;
void setExpired(const std::string& expired);
std::string getSecurityToken()const;
void setSecurityToken(const std::string& securityToken);
std::string getRegionId()const;
void setRegionId(const std::string& regionId);
std::string getEngine()const;
void setEngine(const std::string& engine);
int getPageSize()const;
void setPageSize(int pageSize);
std::string getDBInstanceId()const;
void setDBInstanceId(const std::string& dBInstanceId);
std::string getDBInstanceDescription()const;
void setDBInstanceDescription(const std::string& dBInstanceDescription);
std::string getDBInstanceStatus()const;
void setDBInstanceStatus(const std::string& dBInstanceStatus);
std::vector<Tag> getTag()const;
void setTag(const std::vector<Tag>& tag);
std::string getExpireTime()const;
void setExpireTime(const std::string& expireTime);
std::string getResourceOwnerAccount()const;
void setResourceOwnerAccount(const std::string& resourceOwnerAccount);
std::string getOwnerAccount()const;
void setOwnerAccount(const std::string& ownerAccount);
long getOwnerId()const;
void setOwnerId(long ownerId);
std::string getDBInstanceType()const;
void setDBInstanceType(const std::string& dBInstanceType);
std::string getDBInstanceClass()const;
void setDBInstanceClass(const std::string& dBInstanceClass);
std::string getVSwitchId()const;
void setVSwitchId(const std::string& vSwitchId);
std::string getVpcId()const;
void setVpcId(const std::string& vpcId);
std::string getZoneId()const;
void setZoneId(const std::string& zoneId);
std::string getChargeType()const;
void setChargeType(const std::string& chargeType);
private:
long resourceOwnerId_;
std::string engineVersion_;
std::string networkType_;
int pageNumber_;
std::string replicationFactor_;
std::string accessKeyId_;
std::string expired_;
std::string securityToken_;
std::string regionId_;
std::string engine_;
int pageSize_;
std::string dBInstanceId_;
std::string dBInstanceDescription_;
std::string dBInstanceStatus_;
std::vector<Tag> tag_;
std::string expireTime_;
std::string resourceOwnerAccount_;
std::string ownerAccount_;
long ownerId_;
std::string dBInstanceType_;
std::string dBInstanceClass_;
std::string vSwitchId_;
std::string vpcId_;
std::string zoneId_;
std::string chargeType_;
};
}
}
}
#endif // !ALIBABACLOUD_DDS_MODEL_DESCRIBEDBINSTANCESREQUEST_H_ | 35.472 | 86 | 0.721019 | [
"vector",
"model"
] |
d2652dd3ef0088cf17a962783721cd9786019f76 | 4,772 | h | C | src/HBHS.h | shiggs90/2HDMC-NLO-master-wei | 76de26638d30e2840bbb6c680e4108b81a2a8feb | [
"MIT"
] | null | null | null | src/HBHS.h | shiggs90/2HDMC-NLO-master-wei | 76de26638d30e2840bbb6c680e4108b81a2a8feb | [
"MIT"
] | null | null | null | src/HBHS.h | shiggs90/2HDMC-NLO-master-wei | 76de26638d30e2840bbb6c680e4108b81a2a8feb | [
"MIT"
] | 1 | 2019-11-27T14:59:29.000Z | 2019-11-27T14:59:29.000Z | #if !defined(HBHS_H)
#define HBHS_H
#include "SM.h"
#include "THDM.h"
#include "DecayTable.h"
using namespace std;
// --------------- HiggsBounds -----------------
void HB_init();
void HB_set_input(THDM model);
void HB_set_input_effC(THDM model);
void HB_run_full(int hbres[],int hbchan[], double hbobs[], int hbcomb[]);
double HB_get_gammah(double m);
void HB_finish();
// --------------- HiggsSignals ----------------
void HS_init();
void HS_set_pdf(int pdf);
void HS_set_nparam(int nparam);
void HS_set_assignment_range(double range);
void HS_setup_assignment_range_massobservables(double range);
void HS_set_mass_uncertainties(double dM[]);
void HS_set_rate_uncertainties(double dCS[], double dBR[]);
void HS_set_output_level(int level);
void HS_run(double *csqmu, double *csqmh, double *csqtot, int *nobs, double *pval);
void HS_get_Rvalues(int i, int collider, double *R_H_WW, double *R_H_ZZ, double *R_H_gaga, double *R_H_tautau, double *R_H_bb, double *R_VH_bb);
void HS_finish();
// Structs used by the Wrapper to HiggsBounds Fortran subroutines and common blocks
extern "C"
{
extern void initialize_higgsbounds_int_(int *nH0, int *nHp, int *flag);
extern void higgsbounds_neutral_input_effc_(
double Mh[3],
double GammaTotal[3],
double g2hjss_s[3],
double g2hjss_p[3],
double g2hjcc_s[3],
double g2hjcc_p[3],
double g2hjbb_s[3],
double g2hjbb_p[3],
double g2hjtt_s[3],
double g2hjtt_p[3],
double g2hjmumu_s[3],
double g2hjmumu_p[3],
double g2hjtautau_s[3],
double g2hjtautau_p[3],
double g2hjWW[3],
double g2hjZZ[3],
double g2hjZga[3],
double g2hjgaga[3],
double g2hjgg[3],
double g2hjggZ[3],
double g2hjhiZ[3][3],
double BR_hjinvisible[3],
double BR_hjhihi[3][3]);
extern void higgsbounds_neutral_input_part_(
double Mh[3],
double MhGammaTot[3],
int CP[3],
double CS_lep_hjZ_ratio[3],
double CS_lep_bbhj_ratio[3],
double CS_lep_tautauhj_ratio[3],
double CS_lep_hjhi_ratio[3][3],
double CS_tev_gg_hj_ratio[3],
double CS_tev_bb_hj_ratio[3],
double CS_tev_bg_hjb_ratio[3],
double CS_tev_ud_hjWp_ratio[3],
double CS_tev_cs_hjWp_ratio[3],
double CS_tev_ud_hjWm_ratio[3],
double CS_tev_cs_hjWm_ratio[3],
double CS_tev_gg_hjZ_ratio[3],
double CS_tev_dd_hjZ_ratio[3],
double CS_tev_uu_hjZ_ratio[3],
double CS_tev_ss_hjZ_ratio[3],
double CS_tev_cc_hjZ_ratio[3],
double CS_tev_bb_hjZ_ratio[3],
double CS_tev_pp_vbf_ratio[3],
double CS_tev_pp_tthj_ratio[3],
double CS_lhc7_pp_vbf_ratio[3],
double CS_lhc7_pp_tthj_ratio[3],
double CS_lhc8_pp_vbf_ratio[3],
double CS_lhc8_pp_tthj_ratio[3],
double BR_hjss[3],
double BR_hjcc[3],
double BR_hjbb[3],
double BR_hjtautau[3],
double BR_hjmumu[3],
double BR_hjWW[3],
double BR_hjZZ[3],
double BR_hjZga[3],
double BR_hjgaga[3],
double BR_hjgg[3],
double BR_hjinvisible[3],
double BR_hjhihi[3][3]);
extern void higgsbounds_charged_input_(
double MHplus[1],
double MHplusGammaTot[1],
double CS_lep_HpjHmi_ratio[1],
double BR_tWpb[1],
double BR_tHpjb[1],
double BR_Hpjcs[1],
double BR_Hpjcb[1],
double BR_Hptaunu[1]);
extern void run_higgsbounds_(int *HBresult, int *chan,
double *obsratio, int *ncombined);
extern void run_higgsbounds_full_(int HBresult[6], int chan[6],
double obsratio[6], int ncombined[6]);
extern double smgamma_h_(double *Mh);
extern double smbr_hww_(double *Mh);
extern double smbr_hzz_(double *Mh);
extern double smbr_hgg_(double *Mh);
extern double smbr_htoptop_(double *Mh);
extern double smbr_hbb_(double *Mh);
extern double smbr_hcc_(double *Mh);
extern double smbr_hss_(double *Mh);
extern double smbr_htautau_(double *Mh);
extern double smbr_hmumu_(double *Mh);
extern double smbr_hzgam_(double *Mh);
extern double smbr_hgamgam_(double *Mh);
extern void finish_higgsbounds_();
extern void initialize_higgssignals_latestresults_(int *nHzero, int *nHplus);
extern void setup_pdf_(int *pdf);
extern void setup_output_level_(int *level);
extern void setup_nparam_(int *npara);
extern void higgssignals_neutral_input_massuncertainty_(double dMh[3]);
extern void setup_rate_uncertainties_(double dCS[5], double dBR[5]);
extern void setup_assignmentrange_(double *range);
extern void setup_assignmentrange_massobservables_(double *range);
extern void run_higgssignals_(int *mode, double *csqmu, double *csqmh, double *csqtot, int *nobs, double *pval);
extern void finish_higgssignals_();
extern void get_rvalues_(int *i, int *collider, double *R_H_WW, double *R_H_ZZ, double *R_H_gaga, double *R_H_tautau, double *R_H_bb, double *R_VH_bb);
}
#endif
| 28.57485 | 155 | 0.720034 | [
"model"
] |
d26964cd1c3efdbe7b3fa4563dcc0fb653e4b924 | 3,470 | c | C | src/Util/vector.c | kiat/star | dab5951266cb3d226344a9860d443e312fa23062 | [
"Apache-2.0"
] | null | null | null | src/Util/vector.c | kiat/star | dab5951266cb3d226344a9860d443e312fa23062 | [
"Apache-2.0"
] | null | null | null | src/Util/vector.c | kiat/star | dab5951266cb3d226344a9860d443e312fa23062 | [
"Apache-2.0"
] | 1 | 2018-12-05T09:37:49.000Z | 2018-12-05T09:37:49.000Z | //
// Created by Francis McCabe on 7/31/18.
//
// Vector structures
#include "vectorP.h"
#include "utils.h"
#include <assert.h>
#include <stdlib.h>
static integer vectorHash(objectPo o);
static logical vectorEquality(objectPo o1, objectPo o2);
static void vectorInit(objectPo o, va_list *args);
static void vectorDestroy(objectPo o);
VectorClassRec VectorClass = {
{
(classPo) &ObjectClass,
"vector",
O_INHERIT_DEF,
O_INHERIT_DEF,
O_INHERIT_DEF,
vectorDestroy,
O_INHERIT_DEF,
vectorInit,
sizeof(VectorObjRecord),
vectorHash,
vectorEquality,
NULL,
PTHREAD_ONCE_INIT, /* not yet initialized */
PTHREAD_MUTEX_INITIALIZER
},
{}
};
classPo vectorClass = (classPo) &VectorClass;
void vectorInit(objectPo o, va_list *args) {
vectorPo l = O_VECT(o);
l->vect.count = 0;
l->vect.size = 0;
l->vect.data = Null;
}
void vectorDestroy(objectPo o) {
vectorPo v = O_VECT(o);
for (integer ix = 0; ix < vectLength(v); ix++) {
decReference(getVectEl(v, ix));
}
}
static integer vectorHash(objectPo o) {
vectorPo v = O_VECT(o);
integer hash = 0;
for (integer ix = 0; ix < vectLength(v); ix++) {
hash = hash * 37 + hashCode(getVectEl(v, ix));
}
return hash;
}
static logical vectorEquality(objectPo o1, objectPo o2) {
vectorPo v1 = O_VECT(o1);
vectorPo v2 = O_VECT(o2);
if (vectLength(v1) != vectLength(v2))
return False;
else {
for (integer ix = 0; ix < vectLength(v1); ix++) {
if (!equals(getVectEl(v1, ix), getVectEl(v2, ix)))
return False;
}
return True;
}
}
vectorPo vector(int count, ...) {
va_list args;
va_start(args, count);
vectorPo v = O_VECT(makeObject(vectorClass, &args));
for (int ix = 0; ix < count; ix++) {
objectPo arg = va_arg(args, objectPo);
appendVectEl(v, arg);
}
va_end(args);
return v;
}
objectPo getVectEl(vectorPo v, integer ix) {
assert(ix >= 0 && ix < v->vect.count && ix < v->vect.size);
return v->vect.data[ix];
}
retCode addVectEl(vectorPo v, integer off, objectPo el) {
assert(off >= 0 && el != Null);
if (v->vect.count == v->vect.size) {
integer nSize = ((v->vect.size + 1) * 3) / 2;
objectPo *nvect = realloc(v->vect.data, nSize * sizeof(objectPo));
if (nvect == Null)
return Error;
else {
v->vect.data = nvect;
v->vect.size = nSize;
}
}
if (off >= v->vect.count) {
v->vect.data[v->vect.count++] = el;
} else {
for (integer ix = v->vect.count; ix >= off; ix--) {
v->vect.data[ix] = v->vect.data[ix - 1];
}
v->vect.data[off] = el;
v->vect.count++;
}
return Ok;
}
retCode appendVectEl(vectorPo v, objectPo el) {
if (v->vect.count == v->vect.size) {
integer nSize = ((v->vect.size + 1) * 3) / 2;
v->vect.data = realloc(v->vect.data, nSize * sizeof(objectPo));
v->vect.size = nSize;
}
v->vect.data[v->vect.count++] = el;
return Ok;
}
retCode pushVectEl(vectorPo v, objectPo el) {
return appendVectEl(v, el);
}
objectPo popVectEl(vectorPo v) {
return removeVectEl(v, v->vect.count - 1);
}
objectPo removeVectEl(vectorPo v, integer off) {
assert(v != Null && off >= 0 && off < v->vect.count);
objectPo el = v->vect.data[off];
for (integer ix = off + 1; ix < v->vect.count; ix++) {
v->vect.data[ix - 1] = v->vect.data[ix];
v->vect.count--;
return el;
}
return Null;
}
integer vectLength(vectorPo v) {
return v->vect.count;
}
| 21.288344 | 70 | 0.60951 | [
"vector"
] |
d269fa9405b6e79856af175cc668303174cb0e07 | 6,077 | h | C | src/components/store/pmstore/src/pm_store.h | ghsecuritylab/comanche | a8862eaed59045377874b95b120832a0cba42193 | [
"Apache-2.0"
] | 19 | 2017-10-03T16:01:49.000Z | 2021-06-07T10:21:46.000Z | src/components/store/pmstore/src/pm_store.h | dnbaker/comanche | 121cd0fa16e55d461b366e83511d3810ea2b11c9 | [
"Apache-2.0"
] | 25 | 2018-02-21T23:43:03.000Z | 2020-09-02T08:47:32.000Z | src/components/store/pmstore/src/pm_store.h | dnbaker/comanche | 121cd0fa16e55d461b366e83511d3810ea2b11c9 | [
"Apache-2.0"
] | 19 | 2017-10-24T17:41:40.000Z | 2022-02-22T02:17:18.000Z | /*
* (C) Copyright IBM Corporation 2018. All rights reserved.
*
*/
/*
* Authors:
*
* Daniel G. Waddington (daniel.waddington@ibm.com)
*
*/
#ifndef __KVSTORE_COMPONENT_H__
#define __KVSTORE_COMPONENT_H__
#include <unordered_map>
#include <pthread.h>
#include <common/rwlock.h>
#include <tbb/concurrent_hash_map.h>
#include <api/kvstore_itf.h>
class PM_store : public Component::IKVStore
{
private:
const unsigned _debug_level;
public:
/**
* Constructor
*
* @param block_device Block device interface
*
*/
PM_store(unsigned int debug_level, const std::string& owner, const std::string& name);
/**
* Destructor
*
*/
virtual ~PM_store();
/**
* Component/interface management
*
*/
DECLARE_VERSION(0.1);
DECLARE_COMPONENT_UUID(0x59564581,0x9e1b,0x4811,0xbdb2,0x19,0x57,0xa0,0xa6,0x84,0x57);
void * query_interface(Component::uuid_t& itf_uuid) override {
if(itf_uuid == Component::IKVStore::iid()) {
return (void *) static_cast<Component::IKVStore*>(this);
}
else return NULL; // we don't support this interface
}
void unload() override {
delete this;
}
public:
/* IKVStore */
virtual status_t thread_safety() const { return THREAD_MODEL_SINGLE_PER_POOL; }
virtual int get_capability(Capability cap) const;
virtual pool_t create_pool(const std::string& name,
const size_t size,
unsigned int flags,
uint64_t expected_obj_count = 0
) override;
virtual pool_t open_pool(const std::string& name,
unsigned int flags) override;
virtual status_t delete_pool(const std::string& name) override;
virtual status_t close_pool(const pool_t pid) override;
virtual status_t get_pool_regions(const pool_t pool, std::vector<::iovec>& out_regions) override;
virtual status_t put(const pool_t pool,
const std::string& key,
const void * value,
const size_t value_len,
unsigned int flags = FLAGS_NONE) override;
virtual status_t get(const pool_t pool,
const std::string& key,
void*& out_value,
size_t& out_value_len) override;
virtual status_t get_direct(const pool_t pool,
const std::string& key,
void* out_value,
size_t& out_value_len,
Component::IKVStore::memory_handle_t handle = nullptr) override;
virtual status_t put_direct(const pool_t pool,
const std::string& key,
const void * value,
const size_t value_len,
memory_handle_t handle = HANDLE_NONE,
unsigned flags = FLAGS_NONE) override;
virtual Component::IKVStore::memory_handle_t register_direct_memory(void * vaddr, size_t len) override;
virtual status_t lock(const pool_t pool,
const std::string& key,
lock_type_t type,
void*& out_value,
size_t& out_value_len, IKVStore::key_t &out_key) override;
virtual status_t unlock(const pool_t pool,
Component::IKVStore::key_t key_handle) override;
virtual status_t erase(const pool_t pool,
const std::string& key) override;
virtual size_t count(const pool_t pool) override;
// virtual status_t map(const pool_t pool,
// std::function<int(const std::string& key,
// const void * value,
// const size_t value_len)> function) P
virtual void debug(const pool_t pool, unsigned cmd, uint64_t arg) override;
private:
struct volatile_state_t
{
Common::RWLock _lock;
};
class State_map
{
using pool_state_map_t = std::unordered_map<const void*, volatile_state_t>;
/* we use a concurrent/thread-safe map so we can support multiple
threads on different pools
TODO: cleaning up out pool entries? */
using state_map_t = tbb::concurrent_hash_map<const pool_t, pool_state_map_t>;
public:
bool state_get_read_lock(const pool_t pool, const void * ptr);
bool state_get_write_lock(const pool_t pool, const void * ptr);
void state_unlock(const pool_t pool, const void * ptr);
void state_remove(const pool_t pool, const void * ptr);
private:
state_map_t _state_map;
};
State_map _sm;
};
class PM_store_factory : public Component::IKVStore_factory
{
public:
/**
* Component/interface management
*
*/
DECLARE_VERSION(0.1);
DECLARE_COMPONENT_UUID(0xfac64581,0x9e1b,0x4811,0xbdb2,0x19,0x57,0xa0,0xa6,0x84,0x57);
void * query_interface(Component::uuid_t& itf_uuid) override {
if(itf_uuid == Component::IKVStore_factory::iid()) {
return (void *) static_cast<Component::IKVStore_factory*>(this);
}
else return NULL; // we don't support this interface
}
void unload() override {
delete this;
}
virtual Component::IKVStore * create(unsigned int debug_level,
const std::string& owner,
const std::string& name,
const std::string& param2) override
{
Component::IKVStore * obj = static_cast<Component::IKVStore*>(new PM_store(debug_level, owner, name));
obj->add_ref();
return obj;
}
// virtual Component::IKVStore * create(const std::string& owner,
// const std::string& name) override
// {
// Component::IKVStore * obj = static_cast<Component::IKVStore*>(new PM_store(debug_level, owner, name));
// obj->add_ref();
// return obj;
// }
};
#endif
| 29.789216 | 109 | 0.594372 | [
"vector"
] |
d26b2c7b893c8a5195b105472affafc45955ae60 | 3,522 | h | C | contrib/su4/cpp2tex/include/io/format.h | agrishutin/teambook | 7e9ca28cd10241edbf9cd04ebdc1df3fa1c4b107 | [
"MIT"
] | 13 | 2017-07-04T14:58:47.000Z | 2022-03-23T09:04:41.000Z | contrib/su4/cpp2tex/include/io/format.h | agrishutin/teambook | 7e9ca28cd10241edbf9cd04ebdc1df3fa1c4b107 | [
"MIT"
] | null | null | null | contrib/su4/cpp2tex/include/io/format.h | agrishutin/teambook | 7e9ca28cd10241edbf9cd04ebdc1df3fa1c4b107 | [
"MIT"
] | 5 | 2017-10-14T21:48:20.000Z | 2018-06-18T12:12:15.000Z | /*
* This file [io/format.h] is part of the “libtamias” library
* Copyright (c) 2007-2010 Oleg Davydov, Yury Petrov
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Our contacts:
* mailto: burunduk3@gmail.com (Oleg Davydov)
* mailto: jpsbur@gmail.com (Yury Petrov)
*/
#pragma once
namespace tamias {
class Format;
}
/*
* about conversions
* the generic form is: %<flags><width><precision><modificators><specifier>
* specifiers:
* c — character
* d, i — decimal integer
* e — float number, exponential format
* f — float number, fixed format
* g — float number, automatic format
* o — octal integer
* s — string
* x — hex integer
* % — special, outputs '%' into output stream
*/
#include "../basic/exception.h"
#include "../basic/string.h"
#include "../basic/utilities.h"
#include "../data/pair.h"
#include "../data/vector.h"
class tamias::Format {
public:
Format( String const &format );
Format( Format const &format );
Format& operator = ( Format const &format );
virtual ~Format();
Format& operator << ( int value );
Format& operator << ( long value );
Format& operator << ( long long value );
Format& operator << ( unsigned int value );
Format& operator << ( unsigned long value );
Format& operator << ( unsigned long long value );
Format& operator << ( char value );
Format& operator << ( char const *value );
Format& operator << ( String const &value );
bool ready();
operator String() const;
String output() const;
static String intToString( int value );
static String intToString( long value );
static String intToString( long long value );
static String intToString( unsigned int value );
static String intToString( unsigned long value );
static String intToString( unsigned long long value );
protected:
enum ValueType {
TYPE_ECHO, TYPE_CHARACTER, TYPE_DECIMAL, TYPE_OCTAL, TYPE_HEX, TYPE_FLOAT, TYPE_EXPONENT, TYPE_STRING
};
virtual String handle( ValueType type, String const &spec, int value );
virtual String handle( ValueType type, String const &spec, long value );
virtual String handle( ValueType type, String const &spec, long long value );
virtual String handle( ValueType type, String const &spec, unsigned int value );
virtual String handle( ValueType type, String const &spec, unsigned long value );
virtual String handle( ValueType type, String const &spec, unsigned long long value );
virtual String handle( ValueType type, String const &spec, char value );
virtual String handle( ValueType type, String const &spec, char const *value );
virtual String handle( ValueType type, String const &spec, String const &value );
private:
Vector <Pair <ValueType, String> > mData;
tamias::sizetype mIndex;
void skip();
};
| 35.22 | 107 | 0.690233 | [
"vector"
] |
d26b6be828fc789e30ad4991fc41c4852ca8406c | 3,652 | h | C | src/libQtl/QtlTranslator.h | qtlmovie/qtlmovie | 082ad5ea6522a02d5ac0d86f23cdd6152edff613 | [
"BSD-2-Clause"
] | 8 | 2016-08-09T14:05:58.000Z | 2020-09-05T14:43:36.000Z | src/libQtl/QtlTranslator.h | qtlmovie/qtlmovie | 082ad5ea6522a02d5ac0d86f23cdd6152edff613 | [
"BSD-2-Clause"
] | 15 | 2016-08-09T14:11:21.000Z | 2022-01-15T23:39:07.000Z | src/libQtl/QtlTranslator.h | qtlmovie/qtlmovie | 082ad5ea6522a02d5ac0d86f23cdd6152edff613 | [
"BSD-2-Clause"
] | 1 | 2017-08-26T22:08:58.000Z | 2017-08-26T22:08:58.000Z | //----------------------------------------------------------------------------
//
// Copyright (c) 2013-2017, Thierry Lelegard
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// 1. Redistributions of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
// 2. Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
// THE POSSIBILITY OF SUCH DAMAGE.
//
//----------------------------------------------------------------------------
//!
//! @file QtlTranslator.h
//!
//! Declare the class QtlTranslator.
//! Qtl, Qt utility library.
//!
//----------------------------------------------------------------------------
#ifndef QTLTRANSLATOR_H
#define QTLTRANSLATOR_H
#include "QtlCore.h"
//!
//! A subclass of QTranslator with extended file search capabilities.
//!
class QtlTranslator : public QTranslator
{
Q_OBJECT
public:
//!
//! Constructor.
//! The translator is automatically installed in the application.
//! @param [in] fileNamePrefix Same as the @a filename parameter in QTranslator::load()
//! without the locale name part.
//! @param [in] localeName Locale name. Defaults to the system locale.
//! @param [in] directories List of directories to search for the file. The Qt translation
//! directory is automatically added at the end of the list.
//! @param [in] parent Optional parent object.
//!
explicit QtlTranslator(const QString& fileNamePrefix,
const QString& localeName = QString(),
const QStringList& directories = QStringList(),
QObject *parent = 0);
//!
//! Get the list of all loaded locales using this class.
//! @return The list of all loaded locales using this class, most recent first.
//! This list is never empty, its last element is the system locale.
//!
static QStringList loadedLocales();
//!
//! Search a locale variant of a file, based on all loaded locales.
//! In the list of all loaded locales, seach the first existing file
//! in the form {basename}_{locale}.{suffix}.
//! @param [in] fileName A file name.
//! @return The first existing locale variant or @a fileName if none is found.
//!
static QString searchLocaleFile(const QString& fileName);
private:
static QStringList _loadedLocales; //!< List of loaded locales.
static QMutex _loadedLocalesMutex; //!< Synchronize access to _loadedLocales.
// Unaccessible operation.
QtlTranslator() Q_DECL_EQ_DELETE;
};
#endif // QTLTRANSLATOR_H
| 41.033708 | 94 | 0.659091 | [
"object"
] |
d275b36d4d44247c0821e9050963724b96ea7679 | 6,503 | h | C | src/xbridge/xbridgetransaction.h | Matthelonianxl/BlockDX | c69674cc29a4f676794f6efd4c5acc4726f9073d | [
"MIT"
] | 1 | 2018-10-15T03:52:40.000Z | 2018-10-15T03:52:40.000Z | src/xbridge/xbridgetransaction.h | Matthelonianxl/BlockDX | c69674cc29a4f676794f6efd4c5acc4726f9073d | [
"MIT"
] | null | null | null | src/xbridge/xbridgetransaction.h | Matthelonianxl/BlockDX | c69674cc29a4f676794f6efd4c5acc4726f9073d | [
"MIT"
] | null | null | null | //*****************************************************************************
//*****************************************************************************
#ifndef XBRIDGETRANSACTION_H
#define XBRIDGETRANSACTION_H
#include "uint256.h"
#include "xbridgetransactionmember.h"
#include "xbridgedef.h"
#include "sync.h"
#include <vector>
#include <string>
#include <boost/cstdint.hpp>
#include <boost/shared_ptr.hpp>
#include <boost/thread/mutex.hpp>
#include <boost/date_time/posix_time/ptime.hpp>
//******************************************************************************
//******************************************************************************
namespace xbridge
{
//*****************************************************************************
//*****************************************************************************
class Transaction
{
public:
// see strState when editing
enum State
{
trInvalid = 0,
trNew,
trJoined,
trHold,
trInitialized,
trCreated,
trSigned,
trCommited,
trFinished,
trCancelled,
trDropped
};
enum
{
// transaction lock time base, in seconds, 60 sec * 10 min
lockTime = 60 * 10,
// pending transaction ttl in seconds, 6 min from last update
pendingTTL = 60 * 6,
// transaction ttl in seconds, 60 sec * 60 min
TTL = 60 * 60,
// order deadline ttl in seconds, 60 sec * 60 min * 24 hours * 7 days
deadlineTTL = 60 * 60 * 24 * 7,
// number of blocks ttl, 1440 blocks per day * 7 days
blocksTTL = 1440 * 7
};
public:
Transaction();
Transaction(const uint256 & id,
const std::vector<unsigned char> & sourceAddr,
const std::string & sourceCurrency,
const uint64_t & sourceAmount,
const std::vector<unsigned char> & destAddr,
const std::string & destCurrency,
const uint64_t & destAmount,
const uint64_t & created,
const uint256 & blockHash,
const std::vector<unsigned char> & mpubkey);
~Transaction();
uint256 id() const;
uint256 blockHash() const;
//
/**
* @brief state
* @return state of transaction
*/
State state() const;
//
/**
* @brief increaseStateCounter update state counter and update state
* @param state
* @param from
* @return
*/
State increaseStateCounter(const State state, const std::vector<unsigned char> & from);
/**
* @brief strState
* @param state - transaction state
* @return string name of state
*/
static std::string strState(const State state);
/**
* @brief strState
* @return string name of state
*/
std::string strState() const;
/**
* @brief updateTimestamp - update transaction time
*/
void updateTimestamp();
/**
* @brief createdTime
* @return time of creation transaction
*/
boost::posix_time::ptime createdTime() const;
/**
* @brief isFinished
* @return true if transaction finished, canclelled or dropped
*/
bool isFinished() const;
/**
* @brief isValid
* @return true, if transaction not invalid
*/
bool isValid() const;
/**
* @brief isExpired check time of last transaction update
* @return true, if la
*/
bool isExpired() const;
bool isExpiredByBlockNumber() const;
/**
* @brief cancel - set transaction state to trCancelled
*/
void cancel();
/**
* @brief drop - set transaction state to trDropped
*/
void drop();
/**
* @brief finish - set transaction state to finished
*/
void finish();
// uint256 firstId() const;
std::vector<unsigned char> a_address() const;
std::vector<unsigned char> a_destination() const;
std::string a_currency() const;
uint64_t a_amount() const;
std::string a_payTx() const;
std::string a_refTx() const;
std::string a_bintxid() const;
// TODO remove script
std::vector<unsigned char> a_innerScript() const;
std::vector<unsigned char> a_pk1() const;
// uint256 secondId() const;
std::vector<unsigned char> b_address() const;
std::vector<unsigned char> b_destination() const;
std::string b_currency() const;
uint64_t b_amount() const;
std::string b_payTx() const;
std::string b_refTx() const;
std::string b_bintxid() const;
// TODO remove script
std::vector<unsigned char> b_innerScript() const;
std::vector<unsigned char> b_pk1() const;
bool tryJoin(const TransactionPtr other);
bool setKeys(const std::vector<unsigned char> & addr,
const std::vector<unsigned char> & pk);
bool setBinTxId(const std::vector<unsigned char> &addr,
const std::string & id,
const std::vector<unsigned char> & innerScript);
friend std::ostream & operator << (std::ostream & out, const TransactionPtr & tx);
public:
CCriticalSection m_lock;
private:
uint256 m_id;
boost::posix_time::ptime m_created;
boost::posix_time::ptime m_last;
uint256 m_blockHash; //hash of block when transaction created
State m_state;
bool m_a_stateChanged;
bool m_b_stateChanged;
unsigned int m_confirmationCounter;
std::string m_sourceCurrency;
std::string m_destCurrency;
uint64_t m_sourceAmount;
uint64_t m_destAmount;
std::string m_bintxid1;
std::string m_bintxid2;
std::vector<unsigned char> m_innerScript1;
std::vector<unsigned char> m_innerScript2;
XBridgeTransactionMember m_a;
XBridgeTransactionMember m_b;
};
} // namespace xbridge
#endif // XBRIDGETRANSACTION_H
| 28.774336 | 91 | 0.510226 | [
"vector"
] |
d278250a6e55becbb651699a877c2f6b575b576c | 6,983 | h | C | include/CConcurrentObjectCache.h | WieszKto/IrrlichtBAW | bcef8386c2ca7f06ff006b866c397035551a2351 | [
"Apache-2.0"
] | 5 | 2018-01-31T16:15:26.000Z | 2019-08-29T17:53:53.000Z | include/CConcurrentObjectCache.h | WieszKto/IrrlichtBAW | bcef8386c2ca7f06ff006b866c397035551a2351 | [
"Apache-2.0"
] | 27 | 2019-03-01T21:05:15.000Z | 2020-07-08T21:10:01.000Z | include/CConcurrentObjectCache.h | WieszKto/IrrlichtBAW | bcef8386c2ca7f06ff006b866c397035551a2351 | [
"Apache-2.0"
] | 3 | 2018-08-28T12:26:54.000Z | 2020-03-03T20:36:07.000Z | #ifndef __C_CONCURRENT_OBJECT_CACHE_H_INCLUDED__
#define __C_CONCURRENT_OBJECT_CACHE_H_INCLUDED__
#include "CObjectCache.h"
#include "../source/Irrlicht/FW_Mutex.h"
namespace irr { namespace core
{
namespace impl
{
struct IRR_FORCE_EBO CConcurrentObjectCacheBase
{
CConcurrentObjectCacheBase() = default;
// explicitely making concurrent caches non-copy-and-move-constructible and non-copy-and-move-assignable
CConcurrentObjectCacheBase(const CConcurrentObjectCacheBase&) = delete;
CConcurrentObjectCacheBase(CConcurrentObjectCacheBase&&) = delete;
CConcurrentObjectCacheBase& operator=(const CConcurrentObjectCacheBase&) = delete;
CConcurrentObjectCacheBase& operator=(CConcurrentObjectCacheBase&&) = delete;
struct
{
void lockRead() const { FW_AtomicCounterIncr(ctr); }
void unlockRead() const { FW_AtomicCounterDecr(ctr); }
void lockWrite() const { FW_AtomicCounterBlock(ctr); }
void unlockWrite() const { FW_AtomicCounterUnBlock(ctr); }
private:
mutable FW_AtomicCounter ctr = 0;
} m_lock;
};
template<typename CacheT>
class CMakeCacheConcurrent : private impl::CConcurrentObjectCacheBase, private CacheT
{
using BaseCache = CacheT;
using K = typename BaseCache::KeyType_impl;
using T = typename BaseCache::CachedType;
public:
using IteratorType = typename BaseCache::IteratorType;
using ConstIteratorType = typename BaseCache::ConstIteratorType;
using RevIteratorType = typename BaseCache::RevIteratorType;
using ConstRevIteratorType = typename BaseCache::ConstRevIteratorType;
using RangeType = typename BaseCache::RangeType;
using ConstRangeType = typename BaseCache::ConstRangeType;
using PairType = typename BaseCache::PairType;
using MutablePairType = typename BaseCache::MutablePairType;
using CachedType = T;
using KeyType = typename BaseCache::KeyType;
using BaseCache::BaseCache;
template<typename RngT>
static bool isNonZeroRange(const RngT& _rng) { return BaseCache::isNonZeroRange(_rng); }
inline bool insert(const typename BaseCache::KeyType_impl& _key, const typename BaseCache::ValueType_impl& _val)
{
this->m_lock.lockWrite();
const bool r = BaseCache::insert(_key, _val);
this->m_lock.unlockWrite();
return r;
}
inline bool contains(typename BaseCache::ImmutableValueType_impl& _object) const
{
this->m_lock.lockRead();
const bool r = BaseCache::contains(_object);
this->m_lock.unlockRead();
return r;
}
inline size_t getSize() const
{
this->m_lock.lockRead();
const size_t r = BaseCache::getSize();
this->m_lock.unlockRead();
return r;
}
inline void clear()
{
this->m_lock.lockWrite();
BaseCache::clear();
this->m_lock.unlockWrite();
}
//! Returns true if had to insert
bool swapObjectValue(const typename BaseCache::KeyType_impl& _key, const typename BaseCache::ImmutableValueType_impl& _obj, const typename BaseCache::ValueType_impl& _val)
{
this->m_lock.lockWrite();
bool r = BaseCache::swapObjectValue(_key, _obj, _val);
this->m_lock.unlockWrite();
return r;
}
bool getAndStoreKeyRangeOrReserve(const typename BaseCache::KeyType_impl& _key, size_t& _inOutStorageSize, typename BaseCache::ValueType_impl* _out, bool* _gotAll)
{
this->m_lock.lockWrite();
const bool r = BaseCache::getAndStoreKeyRangeOrReserve(_key, _inOutStorageSize, _out, _gotAll);
this->m_lock.unlockWrite();
return r;
}
inline bool removeObject(const typename BaseCache::ValueType_impl& _obj, const typename BaseCache::KeyType_impl& _key)
{
this->m_lock.lockWrite();
const bool r = BaseCache::removeObject(_obj, _key);
this->m_lock.unlockWrite();
return r;
}
inline bool findAndStoreRange(const typename BaseCache::KeyType_impl& _key, size_t& _inOutStorageSize, typename BaseCache::MutablePairType* _out)
{
m_lock.lockRead();
const bool r = BaseCache::findAndStoreRange(_key, _inOutStorageSize, _out);
m_lock.unlockRead();
return r;
}
inline bool findAndStoreRange(const typename BaseCache::KeyType_impl& _key, size_t& _inOutStorageSize, typename BaseCache::MutablePairType* _out) const
{
m_lock.lockRead();
const bool r = BaseCache::findAndStoreRange(_key, _inOutStorageSize, _out);
m_lock.unlockRead();
return r;
}
inline bool findAndStoreRange(const typename BaseCache::KeyType_impl& _key, size_t& _inOutStorageSize, typename BaseCache::ValueType_impl* _out)
{
m_lock.lockRead();
const bool r = BaseCache::findAndStoreRange(_key, _inOutStorageSize, _out);
m_lock.unlockRead();
return r;
}
inline bool findAndStoreRange(const typename BaseCache::KeyType_impl& _key, size_t& _inOutStorageSize, typename BaseCache::ValueType_impl* _out) const
{
m_lock.lockRead();
const bool r = BaseCache::findAndStoreRange(_key, _inOutStorageSize, _out);
m_lock.unlockRead();
return r;
}
inline bool outputAll(size_t& _inOutStorageSize, MutablePairType* _out) const
{
m_lock.lockRead();
const bool r = BaseCache::outputAll(_inOutStorageSize, _out);
m_lock.unlockRead();
return r;
}
inline bool changeObjectKey(const typename BaseCache::ValueType_impl& _obj, const typename BaseCache::KeyType_impl& _key, const typename BaseCache::KeyType_impl& _newKey)
{
m_lock.lockWrite();
const bool r = BaseCache::changeObjectKey(_obj, _key, _newKey);
m_lock.unlockWrite();
return r;
}
};
}
template<
typename K,
typename T,
template<typename...> class ContainerT_T = std::vector,
typename Alloc = core::allocator<typename impl::key_val_pair_type_for<ContainerT_T, K, T>::type>
>
using CConcurrentObjectCache =
impl::CMakeCacheConcurrent<
CObjectCache<K, T, ContainerT_T, Alloc>
>;
template<
typename K,
typename T,
template<typename...> class ContainerT_T = std::vector,
typename Alloc = core::allocator<typename impl::key_val_pair_type_for<ContainerT_T, K, T>::type>
>
using CConcurrentMultiObjectCache =
impl::CMakeCacheConcurrent<
CMultiObjectCache<K, T, ContainerT_T, Alloc>
>;
}}
#endif
| 37.143617 | 179 | 0.647 | [
"vector"
] |
d27aa81956508a243db27c08d3359e25a5adb618 | 15,406 | h | C | contrib/depends/SDKs/MacOSX10.11.sdk/usr/include/sys/signal.h | Amity-Network/amity | 3e57379f01af4ca851079c8e469fbda7c8165b47 | [
"MIT"
] | 416 | 2016-08-20T03:40:59.000Z | 2022-03-30T14:27:47.000Z | contrib/depends/SDKs/MacOSX10.11.sdk/usr/include/sys/signal.h | Amity-Network/amity | 3e57379f01af4ca851079c8e469fbda7c8165b47 | [
"MIT"
] | 41 | 2016-08-22T14:41:42.000Z | 2022-02-25T11:38:16.000Z | contrib/depends/SDKs/MacOSX10.11.sdk/usr/include/sys/signal.h | Amity-Network/amity | 3e57379f01af4ca851079c8e469fbda7c8165b47 | [
"MIT"
] | 173 | 2016-08-28T15:09:18.000Z | 2022-03-23T15:42:52.000Z | /*
* Copyright (c) 2000-2006 Apple Computer, Inc. All rights reserved.
*
* @APPLE_OSREFERENCE_LICENSE_HEADER_START@
*
* This file contains Original Code and/or Modifications of Original Code
* as defined in and that are subject to the Apple Public Source License
* Version 2.0 (the 'License'). You may not use this file except in
* compliance with the License. The rights granted to you under the License
* may not be used to create, or enable the creation or redistribution of,
* unlawful or unlicensed copies of an Apple operating system, or to
* circumvent, violate, or enable the circumvention or violation of, any
* terms of an Apple operating system software license agreement.
*
* Please obtain a copy of the License at
* http://www.opensource.apple.com/apsl/ and read it before using this file.
*
* The Original Code and all software distributed under the License are
* distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
* EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
* INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
* Please see the License for the specific language governing rights and
* limitations under the License.
*
* @APPLE_OSREFERENCE_LICENSE_HEADER_END@
*/
/* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */
/*
* Copyright (c) 1982, 1986, 1989, 1991, 1993
* The Regents of the University of California. All rights reserved.
* (c) UNIX System Laboratories, Inc.
* All or some portions of this file are derived from material licensed
* to the University of California by American Telephone and Telegraph
* Co. or Unix System Laboratories, Inc. and are reproduced herein with
* the permission of UNIX System Laboratories, Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. All advertising materials mentioning features or use of this software
* must display the following acknowledgement:
* This product includes software developed by the University of
* California, Berkeley and its contributors.
* 4. Neither the name of the University nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* @(#)signal.h 8.2 (Berkeley) 1/21/94
*/
#ifndef _SYS_SIGNAL_H_
#define _SYS_SIGNAL_H_
#include <sys/cdefs.h>
#include <sys/appleapiopts.h>
#include <Availability.h>
#define __DARWIN_NSIG 32 /* counting 0; could be 33 (mask is 1-32) */
#if !defined(_ANSI_SOURCE) && (!defined(_POSIX_C_SOURCE) || defined(_DARWIN_C_SOURCE))
#define NSIG __DARWIN_NSIG
#endif
#include <machine/signal.h> /* sigcontext; codes for SIGILL, SIGFPE */
#define SIGHUP 1 /* hangup */
#define SIGINT 2 /* interrupt */
#define SIGQUIT 3 /* quit */
#define SIGILL 4 /* illegal instruction (not reset when caught) */
#define SIGTRAP 5 /* trace trap (not reset when caught) */
#define SIGABRT 6 /* abort() */
#if (defined(_POSIX_C_SOURCE) && !defined(_DARWIN_C_SOURCE))
#define SIGPOLL 7 /* pollable event ([XSR] generated, not supported) */
#else /* (!_POSIX_C_SOURCE || _DARWIN_C_SOURCE) */
#define SIGIOT SIGABRT /* compatibility */
#define SIGEMT 7 /* EMT instruction */
#endif /* (!_POSIX_C_SOURCE || _DARWIN_C_SOURCE) */
#define SIGFPE 8 /* floating point exception */
#define SIGKILL 9 /* kill (cannot be caught or ignored) */
#define SIGBUS 10 /* bus error */
#define SIGSEGV 11 /* segmentation violation */
#define SIGSYS 12 /* bad argument to system call */
#define SIGPIPE 13 /* write on a pipe with no one to read it */
#define SIGALRM 14 /* alarm clock */
#define SIGTERM 15 /* software termination signal from kill */
#define SIGURG 16 /* urgent condition on IO channel */
#define SIGSTOP 17 /* sendable stop signal not from tty */
#define SIGTSTP 18 /* stop signal from tty */
#define SIGCONT 19 /* continue a stopped process */
#define SIGCHLD 20 /* to parent on child stop or exit */
#define SIGTTIN 21 /* to readers pgrp upon background tty read */
#define SIGTTOU 22 /* like TTIN for output if (tp->t_local<OSTOP) */
#if (!defined(_POSIX_C_SOURCE) || defined(_DARWIN_C_SOURCE))
#define SIGIO 23 /* input/output possible signal */
#endif
#define SIGXCPU 24 /* exceeded CPU time limit */
#define SIGXFSZ 25 /* exceeded file size limit */
#define SIGVTALRM 26 /* virtual time alarm */
#define SIGPROF 27 /* profiling time alarm */
#if (!defined(_POSIX_C_SOURCE) || defined(_DARWIN_C_SOURCE))
#define SIGWINCH 28 /* window size changes */
#define SIGINFO 29 /* information request */
#endif
#define SIGUSR1 30 /* user defined signal 1 */
#define SIGUSR2 31 /* user defined signal 2 */
#if defined(_ANSI_SOURCE) || __DARWIN_UNIX03 || defined(__cplusplus)
/*
* Language spec sez we must list exactly one parameter, even though we
* actually supply three. Ugh!
* SIG_HOLD is chosen to avoid KERN_SIG_* values in <sys/signalvar.h>
*/
#define SIG_DFL (void (*)(int))0
#define SIG_IGN (void (*)(int))1
#define SIG_HOLD (void (*)(int))5
#define SIG_ERR ((void (*)(int))-1)
#else
/* DO NOT REMOVE THE COMMENTED OUT int: fixincludes needs to see them */
#define SIG_DFL (void (*)(/*int*/))0
#define SIG_IGN (void (*)(/*int*/))1
#define SIG_HOLD (void (*)(/*int*/))5
#define SIG_ERR ((void (*)(/*int*/))-1)
#endif
#ifndef _ANSI_SOURCE
#include <sys/_types.h>
#include <machine/_mcontext.h>
#include <sys/_pthread/_pthread_attr_t.h>
#include <sys/_types/_sigaltstack.h>
#include <sys/_types/_ucontext.h>
#include <sys/_types/_pid_t.h>
#include <sys/_types/_sigset_t.h>
#include <sys/_types/_size_t.h>
#include <sys/_types/_uid_t.h>
union sigval {
/* Members as suggested by Annex C of POSIX 1003.1b. */
int sival_int;
void *sival_ptr;
};
#define SIGEV_NONE 0 /* No async notification */
#define SIGEV_SIGNAL 1 /* aio - completion notification */
#define SIGEV_THREAD 3 /* [NOTIMP] [RTS] call notification function */
struct sigevent {
int sigev_notify; /* Notification type */
int sigev_signo; /* Signal number */
union sigval sigev_value; /* Signal value */
void (*sigev_notify_function)(union sigval); /* Notification function */
pthread_attr_t *sigev_notify_attributes; /* Notification attributes */
};
typedef struct __siginfo {
int si_signo; /* signal number */
int si_errno; /* errno association */
int si_code; /* signal code */
pid_t si_pid; /* sending process */
uid_t si_uid; /* sender's ruid */
int si_status; /* exit value */
void *si_addr; /* faulting instruction */
union sigval si_value; /* signal value */
long si_band; /* band event for SIGPOLL */
unsigned long __pad[7]; /* Reserved for Future Use */
} siginfo_t;
/*
* When the signal is SIGILL or SIGFPE, si_addr contains the address of
* the faulting instruction.
* When the signal is SIGSEGV or SIGBUS, si_addr contains the address of
* the faulting memory reference. Although for x86 there are cases of SIGSEGV
* for which si_addr cannot be determined and is NULL.
* If the signal is SIGCHLD, the si_pid field will contain the child process ID,
* si_status contains the exit value or signal and
* si_uid contains the real user ID of the process that sent the signal.
*/
/* Values for si_code */
/* Codes for SIGILL */
#if !defined(_POSIX_C_SOURCE) || defined(_DARWIN_C_SOURCE)
#define ILL_NOOP 0 /* if only I knew... */
#endif
#define ILL_ILLOPC 1 /* [XSI] illegal opcode */
#define ILL_ILLTRP 2 /* [XSI] illegal trap */
#define ILL_PRVOPC 3 /* [XSI] privileged opcode */
#define ILL_ILLOPN 4 /* [XSI] illegal operand -NOTIMP */
#define ILL_ILLADR 5 /* [XSI] illegal addressing mode -NOTIMP */
#define ILL_PRVREG 6 /* [XSI] privileged register -NOTIMP */
#define ILL_COPROC 7 /* [XSI] coprocessor error -NOTIMP */
#define ILL_BADSTK 8 /* [XSI] internal stack error -NOTIMP */
/* Codes for SIGFPE */
#if !defined(_POSIX_C_SOURCE) || defined(_DARWIN_C_SOURCE)
#define FPE_NOOP 0 /* if only I knew... */
#endif
#define FPE_FLTDIV 1 /* [XSI] floating point divide by zero */
#define FPE_FLTOVF 2 /* [XSI] floating point overflow */
#define FPE_FLTUND 3 /* [XSI] floating point underflow */
#define FPE_FLTRES 4 /* [XSI] floating point inexact result */
#define FPE_FLTINV 5 /* [XSI] invalid floating point operation */
#define FPE_FLTSUB 6 /* [XSI] subscript out of range -NOTIMP */
#define FPE_INTDIV 7 /* [XSI] integer divide by zero */
#define FPE_INTOVF 8 /* [XSI] integer overflow */
/* Codes for SIGSEGV */
#if !defined(_POSIX_C_SOURCE) || defined(_DARWIN_C_SOURCE)
#define SEGV_NOOP 0 /* if only I knew... */
#endif
#define SEGV_MAPERR 1 /* [XSI] address not mapped to object */
#define SEGV_ACCERR 2 /* [XSI] invalid permission for mapped object */
/* Codes for SIGBUS */
#if !defined(_POSIX_C_SOURCE) || defined(_DARWIN_C_SOURCE)
#define BUS_NOOP 0 /* if only I knew... */
#endif
#define BUS_ADRALN 1 /* [XSI] Invalid address alignment */
#define BUS_ADRERR 2 /* [XSI] Nonexistent physical address -NOTIMP */
#define BUS_OBJERR 3 /* [XSI] Object-specific HW error - NOTIMP */
/* Codes for SIGTRAP */
#define TRAP_BRKPT 1 /* [XSI] Process breakpoint -NOTIMP */
#define TRAP_TRACE 2 /* [XSI] Process trace trap -NOTIMP */
/* Codes for SIGCHLD */
#if !defined(_POSIX_C_SOURCE) || defined(_DARWIN_C_SOURCE)
#define CLD_NOOP 0 /* if only I knew... */
#endif
#define CLD_EXITED 1 /* [XSI] child has exited */
#define CLD_KILLED 2 /* [XSI] terminated abnormally, no core file */
#define CLD_DUMPED 3 /* [XSI] terminated abnormally, core file */
#define CLD_TRAPPED 4 /* [XSI] traced child has trapped */
#define CLD_STOPPED 5 /* [XSI] child has stopped */
#define CLD_CONTINUED 6 /* [XSI] stopped child has continued */
/* Codes for SIGPOLL */
#define POLL_IN 1 /* [XSR] Data input available */
#define POLL_OUT 2 /* [XSR] Output buffers available */
#define POLL_MSG 3 /* [XSR] Input message available */
#define POLL_ERR 4 /* [XSR] I/O error */
#define POLL_PRI 5 /* [XSR] High priority input available */
#define POLL_HUP 6 /* [XSR] Device disconnected */
/* union for signal handlers */
union __sigaction_u {
void (*__sa_handler)(int);
void (*__sa_sigaction)(int, struct __siginfo *,
void *);
};
/* Signal vector template for Kernel user boundary */
struct __sigaction {
union __sigaction_u __sigaction_u; /* signal handler */
void (*sa_tramp)(void *, int, int, siginfo_t *, void *);
sigset_t sa_mask; /* signal mask to apply */
int sa_flags; /* see signal options below */
};
/*
* Signal vector "template" used in sigaction call.
*/
struct sigaction {
union __sigaction_u __sigaction_u; /* signal handler */
sigset_t sa_mask; /* signal mask to apply */
int sa_flags; /* see signal options below */
};
/* if SA_SIGINFO is set, sa_sigaction is to be used instead of sa_handler. */
#define sa_handler __sigaction_u.__sa_handler
#define sa_sigaction __sigaction_u.__sa_sigaction
#define SA_ONSTACK 0x0001 /* take signal on signal stack */
#define SA_RESTART 0x0002 /* restart system on signal return */
#define SA_RESETHAND 0x0004 /* reset to SIG_DFL when taking signal */
#define SA_NOCLDSTOP 0x0008 /* do not generate SIGCHLD on child stop */
#define SA_NODEFER 0x0010 /* don't mask the signal we're delivering */
#define SA_NOCLDWAIT 0x0020 /* don't keep zombies around */
#define SA_SIGINFO 0x0040 /* signal handler with SA_SIGINFO args */
#if !defined(_POSIX_C_SOURCE) || defined(_DARWIN_C_SOURCE)
#define SA_USERTRAMP 0x0100 /* do not bounce off kernel's sigtramp */
/* This will provide 64bit register set in a 32bit user address space */
#define SA_64REGSET 0x0200 /* signal handler with SA_SIGINFO args with 64bit regs information */
#endif /* (!_POSIX_C_SOURCE || _DARWIN_C_SOURCE) */
/* the following are the only bits we support from user space, the
* rest are for kernel use only.
*/
#define SA_USERSPACE_MASK (SA_ONSTACK | SA_RESTART | SA_RESETHAND | SA_NOCLDSTOP | SA_NODEFER | SA_NOCLDWAIT | SA_SIGINFO)
/*
* Flags for sigprocmask:
*/
#define SIG_BLOCK 1 /* block specified signal set */
#define SIG_UNBLOCK 2 /* unblock specified signal set */
#define SIG_SETMASK 3 /* set specified signal set */
/* POSIX 1003.1b required values. */
#define SI_USER 0x10001 /* [CX] signal from kill() */
#define SI_QUEUE 0x10002 /* [CX] signal from sigqueue() */
#define SI_TIMER 0x10003 /* [CX] timer expiration */
#define SI_ASYNCIO 0x10004 /* [CX] aio request completion */
#define SI_MESGQ 0x10005 /* [CX] from message arrival on empty queue */
#if !defined(_POSIX_C_SOURCE) || defined(_DARWIN_C_SOURCE)
typedef void (*sig_t)(int); /* type of signal function */
#endif
/*
* Structure used in sigaltstack call.
*/
#define SS_ONSTACK 0x0001 /* take signal on signal stack */
#define SS_DISABLE 0x0004 /* disable taking signals on alternate stack */
#define MINSIGSTKSZ 32768 /* (32K)minimum allowable stack */
#define SIGSTKSZ 131072 /* (128K)recommended stack size */
#if !defined(_POSIX_C_SOURCE) || defined(_DARWIN_C_SOURCE)
/*
* 4.3 compatibility:
* Signal vector "template" used in sigvec call.
*/
struct sigvec {
void (*sv_handler)(int); /* signal handler */
int sv_mask; /* signal mask to apply */
int sv_flags; /* see signal options below */
};
#define SV_ONSTACK SA_ONSTACK
#define SV_INTERRUPT SA_RESTART /* same bit, opposite sense */
#define SV_RESETHAND SA_RESETHAND
#define SV_NODEFER SA_NODEFER
#define SV_NOCLDSTOP SA_NOCLDSTOP
#define SV_SIGINFO SA_SIGINFO
#define sv_onstack sv_flags /* isn't compatibility wonderful! */
#endif /* (!_POSIX_C_SOURCE || _DARWIN_C_SOURCE) */
/*
* Structure used in sigstack call.
*/
struct sigstack {
char *ss_sp; /* signal stack pointer */
int ss_onstack; /* current status */
};
#if !defined(_POSIX_C_SOURCE) || defined(_DARWIN_C_SOURCE)
/*
* Macro for converting signal number to a mask suitable for
* sigblock().
*/
#define sigmask(m) (1 << ((m)-1))
#define BADSIG SIG_ERR
#endif /* (!_POSIX_C_SOURCE || _DARWIN_C_SOURCE) */
#endif /* !_ANSI_SOURCE */
/*
* For historical reasons; programs expect signal's return value to be
* defined by <sys/signal.h>.
*/
__BEGIN_DECLS
void (*signal(int, void (*)(int)))(int);
__END_DECLS
#endif /* !_SYS_SIGNAL_H_ */
| 39.201018 | 122 | 0.725172 | [
"object",
"vector"
] |
cf9acbb61a0b000f035092ef945da031d6fdfedf | 3,315 | h | C | ttk-0.9.7/ttk_install/include/ttk/base/Debug.h | DaVisLab/PersistenceCycles | b68ae3ebc218ed69babeee5c1e4ac7f5a89564cd | [
"MIT"
] | null | null | null | ttk-0.9.7/ttk_install/include/ttk/base/Debug.h | DaVisLab/PersistenceCycles | b68ae3ebc218ed69babeee5c1e4ac7f5a89564cd | [
"MIT"
] | null | null | null | ttk-0.9.7/ttk_install/include/ttk/base/Debug.h | DaVisLab/PersistenceCycles | b68ae3ebc218ed69babeee5c1e4ac7f5a89564cd | [
"MIT"
] | 1 | 2021-12-24T15:05:31.000Z | 2021-12-24T15:05:31.000Z | /// \namespace ttk The Topology ToolKit
/// \mainpage TTK 0.9.7 Documentation
/// \image html "../img/splash.png"
/// Useful links:
/// - TTK Home:
/// <a href="https://topology-tool-kit.github.io/"
/// target="new">http://topology-tool-kit .github.io/</a>
/// \defgroup base base
/// \brief The Topology ToolKit - Base code processing packages.
/// @{
/// \class ttk::Debug
/// \author Julien Tierny <julien.tierny@lip6.fr>
/// \date February 2011.
///
/// \brief Minimalist debugging class.
///
/// %Debug provides a few mechanisms to handle debugging messages at a global
/// and local scope, time and memory measurements, etc.
/// Each ttk class should inheritate from it.
#ifndef _DEBUG_H
#define _DEBUG_H
#include <cerrno>
#include <fstream>
#include <iostream>
#include <sstream>
#include <string>
#include <vector>
#include <BaseClass.h>
namespace ttk{
extern bool welcomeMsg_;
extern bool goodbyeMsg_;
extern int globalDebugLevel_;
class Debug: public BaseClass{
public:
// 1) constructors, destructors, operators, etc.
Debug();
virtual ~Debug();
enum debugPriority{
fatalMsg, // 0
timeMsg, // 1
memoryMsg, // 2
infoMsg, // 3
detailedInfoMsg, // 4
advancedInfoMsg // 5
};
// 2) functions
/// Send a debug message to a stream with a priority debugLevel (lower
/// means higher priority).
/// If the global debug level for the program is set to 0, the program
/// should be completely quiet. So the '0' priority should only be
/// reserved for fatal errors.
/// \param stream Output stream.
/// \param msg %Debug message (can contain std::endl characters).
/// \param debugLevel Priority of the message.
/// \return Returns 0 upon success, negative values otherwise.
/// \sa msg(), err()
virtual int dMsg(std::ostream &stream, std::string msg,
const int &debugLevel = infoMsg) const;
/// Wrapper for dMsg() that sends a debug message to the standard error
/// output stream.
/// \return Returns 0 upon success, negative values otherwise.
/// \sa dMsg(), msg()
int err(const std::string msg, const int &debugLevel = fatalMsg) const;
/// Wrapper for dMsg() that sends a debug message to the standard
/// output stream.
/// \return Returns 0 upon success, negative values otherwise.
/// \sa dMsg(), msg()
int msg(const char *msg, const int &debugLevel = infoMsg) const;
/// Set the debug level of a particular object. The global variable
/// globalDebugLevel_ will over-ride this setting if it has a lower value.
/// \return Returns 0 upon success, negative values otherwise.
virtual int setDebugLevel(const int &debugLevel);
protected:
mutable int debugLevel_;
};
}
#include <Os.h>
namespace ttk{
/// \brief Legacy backward compatibility
class DebugTimer : public Timer{};
/// \brief Legacy backward compatibility.
class DebugMemory : public Memory{};
}
#endif
/// @}
| 31.571429 | 80 | 0.600905 | [
"object",
"vector"
] |
cf9c23ccadc3ad255ec0d5397674b78a0cdd7320 | 32,329 | c | C | src/usr.sbin/nandsim/nandsim.c | dnybz/MeshBSD | 5c6c0539ce13d7cda9e2645e2e9e916e371f87b2 | [
"BSD-3-Clause"
] | null | null | null | src/usr.sbin/nandsim/nandsim.c | dnybz/MeshBSD | 5c6c0539ce13d7cda9e2645e2e9e916e371f87b2 | [
"BSD-3-Clause"
] | null | null | null | src/usr.sbin/nandsim/nandsim.c | dnybz/MeshBSD | 5c6c0539ce13d7cda9e2645e2e9e916e371f87b2 | [
"BSD-3-Clause"
] | null | null | null | /*-
* Copyright (C) 2009-2012 Semihalf
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
/*
* Control application for the NAND simulator.
*/
#include <sys/cdefs.h>
__FBSDID("$FreeBSD: releng/11.0/usr.sbin/nandsim/nandsim.c 289677 2015-10-21 05:37:09Z eadler $");
#include <sys/errno.h>
#include <sys/ioctl.h>
#include <sys/mman.h>
#include <sys/stat.h>
#include <sys/types.h>
#include <dev/nand/nandsim.h>
#include <dev/nand/nand_dev.h>
#include <ctype.h>
#include <fcntl.h>
#include <getopt.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <stdarg.h>
#include <unistd.h>
#include <stdlib.h>
#include <limits.h>
#include <sysexits.h>
#include "nandsim_cfgparse.h"
#define SIMDEVICE "/dev/nandsim.ioctl"
#define error(fmt, args...) do { \
printf("ERROR: " fmt "\n", ##args); } while (0)
#define warn(fmt, args...) do { \
printf("WARNING: " fmt "\n", ##args); } while (0)
#define DEBUG
#undef DEBUG
#ifdef DEBUG
#define debug(fmt, args...) do { \
printf("NANDSIM_CONF:" fmt "\n", ##args); } while (0)
#else
#define debug(fmt, args...) do {} while(0)
#endif
#define NANDSIM_RAM_LOG_SIZE 16384
#define MSG_NOTRUNNING "Controller#%d is not running.Please start" \
" it first."
#define MSG_RUNNING "Controller#%d is already running!"
#define MSG_CTRLCHIPNEEDED "You have to specify ctrl_no:cs_no pair!"
#define MSG_STATUSACQCTRLCHIP "Could not acquire status for ctrl#%d chip#%d"
#define MSG_STATUSACQCTRL "Could not acquire status for ctrl#%d"
#define MSG_NOCHIP "There is no such chip configured (chip#%d "\
"at ctrl#%d)!"
#define MSG_NOCTRL "Controller#%d is not configured!"
#define MSG_NOTCONFIGDCTRLCHIP "Chip connected to ctrl#%d at cs#%d " \
"is not configured."
typedef int (commandfunc_t)(int , char **);
static struct nandsim_command *getcommand(char *);
static int parse_devstring(char *, int *, int *);
static void printchip(struct sim_chip *, uint8_t);
static void printctrl(struct sim_ctrl *);
static int opendev(int *);
static commandfunc_t cmdstatus;
static commandfunc_t cmdconf;
static commandfunc_t cmdstart;
static commandfunc_t cmdstop;
static commandfunc_t cmdmod;
static commandfunc_t cmderror;
static commandfunc_t cmdbb;
static commandfunc_t cmdfreeze;
static commandfunc_t cmdlog;
static commandfunc_t cmdstats;
static commandfunc_t cmddump;
static commandfunc_t cmdrestore;
static commandfunc_t cmddestroy;
static commandfunc_t cmdhelp;
static int checkusage(int, int, char **);
static int is_chip_created(int, int, int *);
static int is_ctrl_created(int, int *);
static int is_ctrl_running(int, int *);
static int assert_chip_connected(int , int);
static int printstats(int, int, uint32_t, int);
struct nandsim_command {
const char *cmd_name; /* Command name */
commandfunc_t *commandfunc; /* Ptr to command function */
uint8_t req_argc; /* Mandatory arguments count */
const char *usagestring; /* Usage string */
};
static struct nandsim_command commands[] = {
{"status", cmdstatus, 1,
"status <ctl_no|--all|-a> [-v]\n" },
{"conf", cmdconf, 1,
"conf <filename>\n" },
{"start", cmdstart, 1,
"start <ctrl_no>\n" },
{"mod", cmdmod, 2,
"mod [-l <loglevel>] | <ctl_no:cs_no> [-p <prog_time>]\n"
"\t[-e <erase_time>] [-r <read_time>]\n"
"\t[-E <error_ratio>] | [-h]\n" },
{"stop", cmdstop, 1,
"stop <ctrl_no>\n" },
{"error", cmderror, 5,
"error <ctrl_no:cs_no> <page_num> <column> <length> <pattern>\n" },
{"bb", cmdbb, 2,
"bb <ctl_no:cs_no> [blk_num1,blk_num2,..] [-U] [-L]\n" },
{"freeze", cmdfreeze, 1,
"freeze [ctrl_no]\n" },
{"log", cmdlog, 1,
"log <ctrl_no|--all|-a>\n" },
{"stats", cmdstats, 2,
"stats <ctrl_no:cs_no> <pagenumber>\n" },
{"dump", cmddump, 2,
"dump <ctrl_no:cs_no> <filename>\n" },
{"restore", cmdrestore, 2,
"restore <ctrl_no:chip_no> <filename>\n" },
{"destroy", cmddestroy, 1,
"destroy <ctrl_no[:cs_no]|--all|-a>\n" },
{"help", cmdhelp, 0,
"help [-v]" },
{NULL, NULL, 0, NULL},
};
/* Parse command name, and start appropriate function */
static struct nandsim_command*
getcommand(char *arg)
{
struct nandsim_command *opts;
for (opts = commands; (opts != NULL) &&
(opts->cmd_name != NULL); opts++) {
if (strcmp(opts->cmd_name, arg) == 0)
return (opts);
}
return (NULL);
}
/*
* Parse given string in format <ctrl_no>:<cs_no>, if possible -- set
* ctrl and/or cs, and return 0 (success) or 1 (in case of error).
*
* ctrl == 0xff && chip == 0xff : '--all' flag specified
* ctrl != 0xff && chip != 0xff : both ctrl & chip were specified
* ctrl != 0xff && chip == 0xff : only ctrl was specified
*/
static int
parse_devstring(char *str, int *ctrl, int *cs)
{
char *tmpstr;
unsigned int num = 0;
/* Ignore white spaces at the beginning */
while (isspace(*str) && (*str != '\0'))
str++;
*ctrl = 0xff;
*cs = 0xff;
if (strcmp(str, "--all") == 0 ||
strcmp(str, "-a") == 0) {
/* If --all or -a is specified, ctl==chip==0xff */
debug("CTRL=%d CHIP=%d\n", *ctrl, *cs);
return (0);
}
/* Separate token and try to convert it to int */
tmpstr = (char *)strtok(str, ":");
if ((tmpstr != NULL) && (*tmpstr != '\0')) {
if (convert_arguint(tmpstr, &num) != 0)
return (1);
if (num > MAX_SIM_DEV - 1) {
error("Invalid ctrl_no supplied: %s. Valid ctrl_no "
"value must lie between 0 and 3!", tmpstr);
return (1);
}
*ctrl = num;
tmpstr = (char *)strtok(NULL, ":");
if ((tmpstr != NULL) && (*tmpstr != '\0')) {
if (convert_arguint(tmpstr, &num) != 0)
return (1);
/* Check if chip_no is valid */
if (num > MAX_CTRL_CS - 1) {
error("Invalid chip_no supplied: %s. Valid "
"chip_no value must lie between 0 and 3!",
tmpstr);
return (1);
}
*cs = num;
}
} else
/* Empty devstring supplied */
return (1);
debug("CTRL=%d CHIP=%d\n", *ctrl, *cs);
return (0);
}
static int
opendev(int *fd)
{
*fd = open(SIMDEVICE, O_RDWR);
if (*fd == -1) {
error("Could not open simulator device file (%s)!",
SIMDEVICE);
return (EX_OSFILE);
}
return (EX_OK);
}
static int
opencdev(int *cdevd, int ctrl, int chip)
{
char fname[255];
sprintf(fname, "/dev/nandsim%d.%d", ctrl, chip);
*cdevd = open(fname, O_RDWR);
if (*cdevd == -1)
return (EX_NOINPUT);
return (EX_OK);
}
/*
* Check if given arguments count match requirements. If no, or
* --help (-h) flag is specified -- return 1 (print usage)
*/
static int
checkusage(int gargc, int argsreqd, char **gargv)
{
if (gargc < argsreqd + 2 || (gargc >= (argsreqd + 2) &&
(strcmp(gargv[1], "--help") == 0 ||
strcmp(gargv[1], "-h") == 0)))
return (1);
return (0);
}
static int
cmdstatus(int gargc, char **gargv)
{
int chip = 0, ctl = 0, err = 0, fd, idx, idx2, start, stop;
uint8_t verbose = 0;
struct sim_ctrl ctrlconf;
struct sim_chip chipconf;
err = parse_devstring(gargv[2], &ctl, &chip);
if (err) {
return (EX_USAGE);
} else if (ctl == 0xff) {
/* Every controller */
start = 0;
stop = MAX_SIM_DEV-1;
} else {
/* Specified controller only */
start = ctl;
stop = ctl;
}
if (opendev(&fd) != EX_OK)
return (EX_OSFILE);
for (idx = 0; idx < gargc; idx ++)
if (strcmp(gargv[idx], "-v") == 0 ||
strcmp(gargv[idx], "--verbose") == 0)
verbose = 1;
for (idx = start; idx <= stop; idx++) {
ctrlconf.num = idx;
err = ioctl(fd, NANDSIM_STATUS_CTRL, &ctrlconf);
if (err) {
err = EX_SOFTWARE;
error(MSG_STATUSACQCTRL, idx);
continue;
}
printctrl(&ctrlconf);
for (idx2 = 0; idx2 < MAX_CTRL_CS; idx2++) {
chipconf.num = idx2;
chipconf.ctrl_num = idx;
err = ioctl(fd, NANDSIM_STATUS_CHIP, &chipconf);
if (err) {
err = EX_SOFTWARE;
error(MSG_STATUSACQCTRL, idx);
continue;
}
printchip(&chipconf, verbose);
}
}
close(fd);
return (err);
}
static int
cmdconf(int gargc __unused, char **gargv)
{
int err;
err = parse_config(gargv[2], SIMDEVICE);
if (err)
return (EX_DATAERR);
return (EX_OK);
}
static int
cmdstart(int gargc __unused, char **gargv)
{
int chip = 0, ctl = 0, err = 0, fd, running, state;
err = parse_devstring(gargv[2], &ctl, &chip);
if (err)
return (EX_USAGE);
err = is_ctrl_created(ctl, &state);
if (err) {
return (EX_SOFTWARE);
} else if (state == 0) {
error(MSG_NOCTRL, ctl);
return (EX_SOFTWARE);
}
err = is_ctrl_running(ctl, &running);
if (err)
return (EX_SOFTWARE);
if (running) {
warn(MSG_RUNNING, ctl);
} else {
if (opendev(&fd) != EX_OK)
return (EX_OSFILE);
err = ioctl(fd, NANDSIM_START_CTRL, &ctl);
close(fd);
if (err) {
error("Cannot start controller#%d", ctl);
err = EX_SOFTWARE;
}
}
return (err);
}
static int
cmdstop(int gargc __unused, char **gargv)
{
int chip = 0, ctl = 0, err = 0, fd, running;
err = parse_devstring(gargv[2], &ctl, &chip);
if (err)
return (EX_USAGE);
err = is_ctrl_running(ctl, &running);
if (err)
return (EX_SOFTWARE);
if (!running) {
error(MSG_NOTRUNNING, ctl);
} else {
if (opendev(&fd) != EX_OK)
return (EX_OSFILE);
err = ioctl(fd, NANDSIM_STOP_CTRL, &ctl);
close(fd);
if (err) {
error("Cannot stop controller#%d", ctl);
err = EX_SOFTWARE;
}
}
return (err);
}
static int
cmdmod(int gargc __unused, char **gargv)
{
int chip, ctl, err = 0, fd = -1, i;
struct sim_mod mods;
if (gargc >= 4) {
if (strcmp(gargv[2], "--loglevel") == 0 || strcmp(gargv[2],
"-l") == 0) {
/* Set loglevel (ctrl:chip pair independent) */
mods.field = SIM_MOD_LOG_LEVEL;
if (convert_arguint(gargv[3], &mods.new_value) != 0)
return (EX_SOFTWARE);
if (opendev(&fd) != EX_OK)
return (EX_OSFILE);
err = ioctl(fd, NANDSIM_MODIFY, &mods);
if (err) {
error("simulator parameter %s could not be "
"modified !", gargv[3]);
close(fd);
return (EX_SOFTWARE);
}
debug("request : loglevel = %d\n", mods.new_value);
close(fd);
return (EX_OK);
}
}
err = parse_devstring(gargv[2], &ctl, &chip);
if (err)
return (EX_USAGE);
else if (chip == 0xff) {
error(MSG_CTRLCHIPNEEDED);
return (EX_USAGE);
}
if (!assert_chip_connected(ctl, chip))
return (EX_SOFTWARE);
if (opendev(&fd) != EX_OK)
return (EX_OSFILE);
/* Find out which flags were passed */
for (i = 3; i < gargc; i++) {
if (convert_arguint(gargv[i + 1], &mods.new_value) != 0)
continue;
if (strcmp(gargv[i], "--prog-time") == 0 ||
strcmp(gargv[i], "-p") == 0) {
mods.field = SIM_MOD_PROG_TIME;
debug("request : progtime = %d\n", mods.new_value);
} else if (strcmp(gargv[i], "--erase-time") == 0 ||
strcmp(gargv[i], "-e") == 0) {
mods.field = SIM_MOD_ERASE_TIME;
debug("request : eraseime = %d\n", mods.new_value);
} else if (strcmp(gargv[i], "--read-time") == 0 ||
strcmp(gargv[i], "-r") == 0) {
mods.field = SIM_MOD_READ_TIME;
debug("request : read_time = %d\n", mods.new_value);
} else if (strcmp(gargv[i], "--error-ratio") == 0 ||
strcmp(gargv[i], "-E") == 0) {
mods.field = SIM_MOD_ERROR_RATIO;
debug("request : error_ratio = %d\n", mods.new_value);
} else {
/* Flag not recognized, or nothing specified. */
error("Unrecognized flag:%s\n", gargv[i]);
if (fd >= 0)
close(fd);
return (EX_USAGE);
}
mods.chip_num = chip;
mods.ctrl_num = ctl;
/* Call appropriate ioctl */
err = ioctl(fd, NANDSIM_MODIFY, &mods);
if (err) {
error("simulator parameter %s could not be modified! ",
gargv[i]);
continue;
}
i++;
}
close(fd);
return (EX_OK);
}
static int
cmderror(int gargc __unused, char **gargv)
{
uint32_t page, column, len, pattern;
int chip = 0, ctl = 0, err = 0, fd;
struct sim_error sim_err;
err = parse_devstring(gargv[2], &ctl, &chip);
if (err)
return (EX_USAGE);
if (chip == 0xff) {
error(MSG_CTRLCHIPNEEDED);
return (EX_USAGE);
}
if (convert_arguint(gargv[3], &page) ||
convert_arguint(gargv[4], &column) ||
convert_arguint(gargv[5], &len) ||
convert_arguint(gargv[6], &pattern))
return (EX_SOFTWARE);
if (!assert_chip_connected(ctl, chip))
return (EX_SOFTWARE);
sim_err.page_num = page;
sim_err.column = column;
sim_err.len = len;
sim_err.pattern = pattern;
sim_err.ctrl_num = ctl;
sim_err.chip_num = chip;
if (opendev(&fd) != EX_OK)
return (EX_OSFILE);
err = ioctl(fd, NANDSIM_INJECT_ERROR, &sim_err);
close(fd);
if (err) {
error("Could not inject error !");
return (EX_SOFTWARE);
}
return (EX_OK);
}
static int
cmdbb(int gargc, char **gargv)
{
struct sim_block_state bs;
struct chip_param_io cparams;
uint32_t blkidx;
int c, cdevd, chip = 0, ctl = 0, err = 0, fd, idx;
uint8_t flagL = 0, flagU = 0;
int *badblocks = NULL;
/* Check for --list/-L or --unmark/-U flags */
for (idx = 3; idx < gargc; idx++) {
if (strcmp(gargv[idx], "--list") == 0 ||
strcmp(gargv[idx], "-L") == 0)
flagL = idx;
if (strcmp(gargv[idx], "--unmark") == 0 ||
strcmp(gargv[idx], "-U") == 0)
flagU = idx;
}
if (flagL == 2 || flagU == 2 || flagU == 3)
return (EX_USAGE);
err = parse_devstring(gargv[2], &ctl, &chip);
if (err) {
return (EX_USAGE);
}
if (chip == 0xff || ctl == 0xff) {
error(MSG_CTRLCHIPNEEDED);
return (EX_USAGE);
}
bs.ctrl_num = ctl;
bs.chip_num = chip;
if (!assert_chip_connected(ctl, chip))
return (EX_SOFTWARE);
if (opencdev(&cdevd, ctl, chip) != EX_OK)
return (EX_OSFILE);
err = ioctl(cdevd, NAND_IO_GET_CHIP_PARAM, &cparams);
if (err)
return (EX_SOFTWARE);
close(cdevd);
bs.ctrl_num = ctl;
bs.chip_num = chip;
if (opendev(&fd) != EX_OK)
return (EX_OSFILE);
if (flagL != 3) {
/*
* Flag -L was specified either after blocklist or was not
* specified at all.
*/
c = parse_intarray(gargv[3], &badblocks);
for (idx = 0; idx < c; idx++) {
bs.block_num = badblocks[idx];
/* Do not change wearout */
bs.wearout = -1;
bs.state = (flagU == 0) ? NANDSIM_BAD_BLOCK :
NANDSIM_GOOD_BLOCK;
err = ioctl(fd, NANDSIM_SET_BLOCK_STATE, &bs);
if (err) {
error("Could not set bad block(%d) for "
"controller (%d)!",
badblocks[idx], ctl);
err = EX_SOFTWARE;
break;
}
}
}
if (flagL != 0) {
/* If flag -L was specified (anywhere) */
for (blkidx = 0; blkidx < cparams.blocks; blkidx++) {
bs.block_num = blkidx;
/* Do not change the wearout */
bs.wearout = -1;
err = ioctl(fd, NANDSIM_GET_BLOCK_STATE, &bs);
if (err) {
error("Could not acquire block state");
err = EX_SOFTWARE;
continue;
}
printf("Block#%d: wear count: %d %s\n", blkidx,
bs.wearout,
(bs.state == NANDSIM_BAD_BLOCK) ? "BAD":"GOOD");
}
}
close(fd);
return (err);
}
static int
cmdfreeze(int gargc __unused, char **gargv)
{
int chip = 0, ctl = 0, err = 0, fd, i, start = 0, state, stop = 0;
struct sim_ctrl_chip ctrlchip;
err = parse_devstring(gargv[2], &ctl, &chip);
if (err)
return (EX_USAGE);
if (ctl == 0xff) {
error("You have to specify at least controller number");
return (EX_USAGE);
}
if (ctl != 0xff && chip == 0xff) {
start = 0;
stop = MAX_CTRL_CS - 1;
} else {
start = chip;
stop = chip;
}
ctrlchip.ctrl_num = ctl;
err = is_ctrl_running(ctl, &state);
if (err)
return (EX_SOFTWARE);
if (state == 0) {
error(MSG_NOTRUNNING, ctl);
return (EX_SOFTWARE);
}
if (opendev(&fd) != EX_OK)
return (EX_OSFILE);
for (i = start; i <= stop; i++) {
err = is_chip_created(ctl, i, &state);
if (err)
return (EX_SOFTWARE);
else if (state == 0) {
continue;
}
ctrlchip.chip_num = i;
err = ioctl(fd, NANDSIM_FREEZE, &ctrlchip);
if (err) {
error("Could not freeze ctrl#%d chip#%d", ctl, i);
close(fd);
return (EX_SOFTWARE);
}
}
close(fd);
return (EX_OK);
}
static int
cmdlog(int gargc __unused, char **gargv)
{
struct sim_log log;
int chip = 0, ctl = 0, err = 0, fd, idx, start = 0, stop = 0;
char *logbuf;
err = parse_devstring(gargv[2], &ctl, &chip);
if (err)
return (EX_USAGE);
logbuf = (char *)malloc(sizeof(char) * NANDSIM_RAM_LOG_SIZE);
if (logbuf == NULL) {
error("Not enough memory to create log buffer");
return (EX_SOFTWARE);
}
memset(logbuf, 0, NANDSIM_RAM_LOG_SIZE);
log.log = logbuf;
log.len = NANDSIM_RAM_LOG_SIZE;
if (ctl == 0xff) {
start = 0;
stop = MAX_SIM_DEV-1;
} else {
start = ctl;
stop = ctl;
}
if (opendev(&fd) != EX_OK) {
free(logbuf);
return (EX_OSFILE);
}
/* Print logs for selected controller(s) */
for (idx = start; idx <= stop; idx++) {
log.ctrl_num = idx;
err = ioctl(fd, NANDSIM_PRINT_LOG, &log);
if (err) {
error("Could not get log for controller %d!", idx);
continue;
}
printf("Logs for controller#%d:\n%s\n", idx, logbuf);
}
free(logbuf);
close(fd);
return (EX_OK);
}
static int
cmdstats(int gargc __unused, char **gargv)
{
int cdevd, chip = 0, ctl = 0, err = 0;
uint32_t pageno = 0;
err = parse_devstring(gargv[2], &ctl, &chip);
if (err)
return (EX_USAGE);
if (chip == 0xff) {
error(MSG_CTRLCHIPNEEDED);
return (EX_USAGE);
}
if (convert_arguint(gargv[3], &pageno) != 0)
return (EX_USAGE);
if (!assert_chip_connected(ctl, chip))
return (EX_SOFTWARE);
if (opencdev(&cdevd, ctl, chip) != EX_OK)
return (EX_OSFILE);
err = printstats(ctl, chip, pageno, cdevd);
if (err) {
close(cdevd);
return (EX_SOFTWARE);
}
close(cdevd);
return (EX_OK);
}
static int
cmddump(int gargc __unused, char **gargv)
{
struct sim_dump dump;
struct sim_block_state bs;
struct chip_param_io cparams;
int chip = 0, ctl = 0, err = EX_OK, fd, dumpfd;
uint32_t blkidx, bwritten = 0, totalwritten = 0;
void *buf;
err = parse_devstring(gargv[2], &ctl, &chip);
if (err)
return (EX_USAGE);
if (chip == 0xff || ctl == 0xff) {
error(MSG_CTRLCHIPNEEDED);
return (EX_USAGE);
}
if (!assert_chip_connected(ctl, chip))
return (EX_SOFTWARE);
if (opencdev(&fd, ctl, chip) != EX_OK)
return (EX_OSFILE);
err = ioctl(fd, NAND_IO_GET_CHIP_PARAM, &cparams);
if (err) {
error("Cannot get parameters for chip %d:%d", ctl, chip);
close(fd);
return (EX_SOFTWARE);
}
close(fd);
dump.ctrl_num = ctl;
dump.chip_num = chip;
dump.len = cparams.pages_per_block * (cparams.page_size +
cparams.oob_size);
buf = malloc(dump.len);
if (buf == NULL) {
error("Could not allocate memory!");
return (EX_SOFTWARE);
}
dump.data = buf;
errno = 0;
dumpfd = open(gargv[3], O_WRONLY | O_CREAT, 0666);
if (dumpfd == -1) {
error("Cannot create dump file.");
free(buf);
return (EX_SOFTWARE);
}
if (opendev(&fd)) {
close(dumpfd);
free(buf);
return (EX_SOFTWARE);
}
bs.ctrl_num = ctl;
bs.chip_num = chip;
/* First uint32_t in file shall contain block count */
if (write(dumpfd, &cparams, sizeof(cparams)) < (int)sizeof(cparams)) {
error("Error writing to dumpfile!");
close(fd);
close(dumpfd);
free(buf);
return (EX_SOFTWARE);
}
/*
* First loop acquires blocks states and writes them to
* the dump file.
*/
for (blkidx = 0; blkidx < cparams.blocks; blkidx++) {
bs.block_num = blkidx;
err = ioctl(fd, NANDSIM_GET_BLOCK_STATE, &bs);
if (err) {
error("Could not get bad block(%d) for "
"controller (%d)!", blkidx, ctl);
close(fd);
close(dumpfd);
free(buf);
return (EX_SOFTWARE);
}
bwritten = write(dumpfd, &bs, sizeof(bs));
if (bwritten != sizeof(bs)) {
error("Error writing to dumpfile");
close(fd);
close(dumpfd);
free(buf);
return (EX_SOFTWARE);
}
}
/* Second loop dumps the data */
for (blkidx = 0; blkidx < cparams.blocks; blkidx++) {
debug("Block#%d...", blkidx);
dump.block_num = blkidx;
err = ioctl(fd, NANDSIM_DUMP, &dump);
if (err) {
error("Could not dump ctrl#%d chip#%d "
"block#%d", ctl, chip, blkidx);
err = EX_SOFTWARE;
break;
}
bwritten = write(dumpfd, dump.data, dump.len);
if (bwritten != dump.len) {
error("Error writing to dumpfile");
err = EX_SOFTWARE;
break;
}
debug("OK!\n");
totalwritten += bwritten;
}
printf("%d out of %d B written.\n", totalwritten, dump.len * blkidx);
close(fd);
close(dumpfd);
free(buf);
return (err);
}
static int
cmdrestore(int gargc __unused, char **gargv)
{
struct sim_dump dump;
struct sim_block_state bs;
struct stat filestat;
int chip = 0, ctl = 0, err = 0, fd, dumpfd = -1;
uint32_t blkidx, blksz, fsize = 0, expfilesz;
void *buf;
struct chip_param_io cparams, dumpcparams;
err = parse_devstring(gargv[2], &ctl, &chip);
if (err)
return (EX_USAGE);
else if (ctl == 0xff) {
error(MSG_CTRLCHIPNEEDED);
return (EX_USAGE);
}
if (!assert_chip_connected(ctl, chip))
return (EX_SOFTWARE);
/* Get chip geometry */
if (opencdev(&fd, ctl, chip) != EX_OK)
return (EX_OSFILE);
err = ioctl(fd, NAND_IO_GET_CHIP_PARAM, &cparams);
if (err) {
error("Cannot get parameters for chip %d:%d", ctl, chip);
close(fd);
return (err);
}
close(fd);
/* Obtain dump file size */
errno = 0;
if (stat(gargv[3], &filestat) != 0) {
error("Could not acquire file size! : %s",
strerror(errno));
return (EX_IOERR);
}
fsize = filestat.st_size;
blksz = cparams.pages_per_block * (cparams.page_size +
cparams.oob_size);
/* Expected dump file size for chip */
expfilesz = cparams.blocks * (blksz + sizeof(bs)) + sizeof(cparams);
if (fsize != expfilesz) {
error("File size does not match chip geometry (file size: %d"
", dump size: %d)", fsize, expfilesz);
return (EX_SOFTWARE);
}
dumpfd = open(gargv[3], O_RDONLY);
if (dumpfd == -1) {
error("Could not open dump file!");
return (EX_IOERR);
}
/* Read chip params saved in dumpfile */
read(dumpfd, &dumpcparams, sizeof(dumpcparams));
/* XXX */
if (bcmp(&dumpcparams, &cparams, sizeof(cparams)) != 0) {
error("Supplied dump is created for a chip with different "
"chip configuration!");
close(dumpfd);
return (EX_SOFTWARE);
}
if (opendev(&fd) != EX_OK) {
close(dumpfd);
return (EX_OSFILE);
}
buf = malloc(blksz);
if (buf == NULL) {
error("Could not allocate memory for block buffer");
close(dumpfd);
close(fd);
return (EX_SOFTWARE);
}
dump.ctrl_num = ctl;
dump.chip_num = chip;
dump.data = buf;
/* Restore block states and wearouts */
for (blkidx = 0; blkidx < cparams.blocks; blkidx++) {
dump.block_num = blkidx;
if (read(dumpfd, &bs, sizeof(bs)) != sizeof(bs)) {
error("Error reading dumpfile");
close(dumpfd);
close(fd);
free(buf);
return (EX_SOFTWARE);
}
bs.ctrl_num = ctl;
bs.chip_num = chip;
debug("BLKIDX=%d BLOCKS=%d CTRL=%d CHIP=%d STATE=%d\n"
"WEAROUT=%d BS.CTRL_NUM=%d BS.CHIP_NUM=%d\n",
blkidx, cparams.blocks, dump.ctrl_num, dump.chip_num,
bs.state, bs.wearout, bs.ctrl_num, bs.chip_num);
err = ioctl(fd, NANDSIM_SET_BLOCK_STATE, &bs);
if (err) {
error("Could not set bad block(%d) for "
"controller: %d, chip: %d!", blkidx, ctl, chip);
close(dumpfd);
close(fd);
free(buf);
return (EX_SOFTWARE);
}
}
/* Restore data */
for (blkidx = 0; blkidx < cparams.blocks; blkidx++) {
errno = 0;
dump.len = read(dumpfd, buf, blksz);
if (errno) {
error("Failed to read block#%d from dumpfile.", blkidx);
err = EX_SOFTWARE;
break;
}
dump.block_num = blkidx;
err = ioctl(fd, NANDSIM_RESTORE, &dump);
if (err) {
error("Could not restore block#%d of ctrl#%d chip#%d"
": %s", blkidx, ctl, chip, strerror(errno));
err = EX_SOFTWARE;
break;
}
}
free(buf);
close(dumpfd);
close(fd);
return (err);
}
static int
cmddestroy(int gargc __unused, char **gargv)
{
int chip = 0, ctl = 0, err = 0, fd, idx, idx2, state;
int chipstart, chipstop, ctrlstart, ctrlstop;
struct sim_chip_destroy chip_destroy;
err = parse_devstring(gargv[2], &ctl, &chip);
if (err)
return (EX_USAGE);
if (ctl == 0xff) {
/* Every chip at every controller */
ctrlstart = chipstart = 0;
ctrlstop = MAX_SIM_DEV - 1;
chipstop = MAX_CTRL_CS - 1;
} else {
ctrlstart = ctrlstop = ctl;
if (chip == 0xff) {
/* Every chip at selected controller */
chipstart = 0;
chipstop = MAX_CTRL_CS - 1;
} else
/* Selected chip at selected controller */
chipstart = chipstop = chip;
}
debug("CTRLSTART=%d CTRLSTOP=%d CHIPSTART=%d CHIPSTOP=%d\n",
ctrlstart, ctrlstop, chipstart, chipstop);
for (idx = ctrlstart; idx <= ctrlstop; idx++) {
err = is_ctrl_created(idx, &state);
if (err) {
error("Could not acquire ctrl#%d state. Cannot "
"destroy controller.", idx);
return (EX_SOFTWARE);
}
if (state == 0) {
continue;
}
err = is_ctrl_running(idx, &state);
if (err) {
error(MSG_STATUSACQCTRL, idx);
return (EX_SOFTWARE);
}
if (state != 0) {
error(MSG_RUNNING, ctl);
return (EX_SOFTWARE);
}
if (opendev(&fd) != EX_OK)
return (EX_OSFILE);
for (idx2 = chipstart; idx2 <= chipstop; idx2++) {
err = is_chip_created(idx, idx2, &state);
if (err) {
error(MSG_STATUSACQCTRLCHIP, idx2, idx);
continue;
}
if (state == 0)
/* There is no such chip running */
continue;
chip_destroy.ctrl_num = idx;
chip_destroy.chip_num = idx2;
ioctl(fd, NANDSIM_DESTROY_CHIP,
&chip_destroy);
}
/* If chip isn't explicitly specified -- destroy ctrl */
if (chip == 0xff) {
err = ioctl(fd, NANDSIM_DESTROY_CTRL, &idx);
if (err) {
error("Could not destroy ctrl#%d", idx);
continue;
}
}
close(fd);
}
return (err);
}
int
main(int argc, char **argv)
{
struct nandsim_command *cmdopts;
int retcode = 0;
if (argc < 2) {
cmdhelp(argc, argv);
retcode = EX_USAGE;
} else {
cmdopts = getcommand(argv[1]);
if (cmdopts != NULL && cmdopts->commandfunc != NULL) {
if (checkusage(argc, cmdopts->req_argc, argv) == 1) {
/* Print command specific usage */
printf("nandsim %s", cmdopts->usagestring);
return (EX_USAGE);
}
retcode = cmdopts->commandfunc(argc, argv);
if (retcode == EX_USAGE) {
/* Print command-specific usage */
printf("nandsim %s", cmdopts->usagestring);
} else if (retcode == EX_OSFILE) {
error("Could not open device file");
}
} else {
error("Unknown command!");
retcode = EX_USAGE;
}
}
return (retcode);
}
static int
cmdhelp(int gargc __unused, char **gargv __unused)
{
struct nandsim_command *opts;
printf("usage: nandsim <command> [command params] [params]\n\n");
for (opts = commands; (opts != NULL) &&
(opts->cmd_name != NULL); opts++)
printf("nandsim %s", opts->usagestring);
printf("\n");
return (EX_OK);
}
static void
printchip(struct sim_chip *chip, uint8_t verbose)
{
if (chip->created == 0)
return;
if (verbose > 0) {
printf("\n[Chip info]\n");
printf("num= %d\nctrl_num=%d\ndevice_id=%02x"
"\tmanufacturer_id=%02x\ndevice_model=%s\nmanufacturer="
"%s\ncol_addr_cycles=%d\nrow_addr_cycles=%d"
"\npage_size=%d\noob_size=%d\npages_per_block=%d\n"
"blocks_per_lun=%d\nluns=%d\n\nprog_time=%d\n"
"erase_time=%d\nread_time=%d\n"
"error_ratio=%d\nwear_level=%d\nwrite_protect=%c\n"
"chip_width=%db\n", chip->num, chip->ctrl_num,
chip->device_id, chip->manufact_id,chip->device_model,
chip->manufacturer, chip->col_addr_cycles,
chip->row_addr_cycles, chip->page_size,
chip->oob_size, chip->pgs_per_blk, chip->blks_per_lun,
chip->luns,chip->prog_time, chip->erase_time,
chip->read_time, chip->error_ratio, chip->wear_level,
(chip->is_wp == 0) ? 'N':'Y', chip->width);
} else {
printf("[Chip info]\n");
printf("\tnum=%d\n\tdevice_model=%s\n\tmanufacturer=%s\n"
"\tpage_size=%d\n\twrite_protect=%s\n",
chip->num, chip->device_model, chip->manufacturer,
chip->page_size, (chip->is_wp == 0) ? "NO":"YES");
}
}
static void
printctrl(struct sim_ctrl *ctrl)
{
int i;
if (ctrl->created == 0) {
printf(MSG_NOCTRL "\n", ctrl->num);
return;
}
printf("\n[Controller info]\n");
printf("\trunning: %s\n", ctrl->running ? "yes" : "no");
printf("\tnum cs: %d\n", ctrl->num_cs);
printf("\tecc: %d\n", ctrl->ecc);
printf("\tlog_filename: %s\n", ctrl->filename);
printf("\tecc_layout:");
for (i = 0; i < MAX_ECC_BYTES; i++) {
if (ctrl->ecc_layout[i] == 0xffff)
break;
else
printf("%c%d", i%16 ? ' ' : '\n',
ctrl->ecc_layout[i]);
}
printf("\n");
}
static int
is_ctrl_running(int ctrl_no, int *running)
{
struct sim_ctrl ctrl;
int err, fd;
ctrl.num = ctrl_no;
if (opendev(&fd) != EX_OK)
return (EX_OSFILE);
err = ioctl(fd, NANDSIM_STATUS_CTRL, &ctrl);
if (err) {
error(MSG_STATUSACQCTRL, ctrl_no);
close(fd);
return (err);
}
*running = ctrl.running;
close(fd);
return (0);
}
static int
is_ctrl_created(int ctrl_no, int *created)
{
struct sim_ctrl ctrl;
int err, fd;
ctrl.num = ctrl_no;
if (opendev(&fd) != EX_OK)
return (EX_OSFILE);
err = ioctl(fd, NANDSIM_STATUS_CTRL, &ctrl);
if (err) {
error("Could not acquire conf for ctrl#%d", ctrl_no);
close(fd);
return (err);
}
*created = ctrl.created;
close(fd);
return (0);
}
static int
is_chip_created(int ctrl_no, int chip_no, int *created)
{
struct sim_chip chip;
int err, fd;
chip.ctrl_num = ctrl_no;
chip.num = chip_no;
if (opendev(&fd) != EX_OK)
return (EX_OSFILE);
err = ioctl(fd, NANDSIM_STATUS_CHIP, &chip);
if (err) {
error("Could not acquire conf for chip#%d", chip_no);
close(fd);
return (err);
}
*created = chip.created;
close(fd);
return (0);
}
static int
assert_chip_connected(int ctrl_no, int chip_no)
{
int created, running;
if (is_ctrl_created(ctrl_no, &created))
return (0);
if (!created) {
error(MSG_NOCTRL, ctrl_no);
return (0);
}
if (is_chip_created(ctrl_no, chip_no, &created))
return (0);
if (!created) {
error(MSG_NOTCONFIGDCTRLCHIP, ctrl_no, chip_no);
return (0);
}
if (is_ctrl_running(ctrl_no, &running))
return (0);
if (!running) {
error(MSG_NOTRUNNING, ctrl_no);
return (0);
}
return (1);
}
static int
printstats(int ctrlno, int chipno, uint32_t pageno, int cdevd)
{
struct page_stat_io pstats;
struct block_stat_io bstats;
struct chip_param_io cparams;
uint32_t blkidx;
int err;
/* Gather information about chip */
err = ioctl(cdevd, NAND_IO_GET_CHIP_PARAM, &cparams);
if (err) {
error("Could not acquire chip info for chip attached to cs#"
"%d, ctrl#%d", chipno, ctrlno);
return (EX_SOFTWARE);
}
blkidx = (pageno / cparams.pages_per_block);
bstats.block_num = blkidx;
err = ioctl(cdevd, NAND_IO_BLOCK_STAT, &bstats);
if (err) {
error("Could not acquire block#%d statistics!", blkidx);
return (ENXIO);
}
printf("Block #%d erased: %d\n", blkidx, bstats.block_erased);
pstats.page_num = pageno;
err = ioctl(cdevd, NAND_IO_PAGE_STAT, &pstats);
if (err) {
error("Could not acquire page statistics!");
return (ENXIO);
}
debug("BLOCKIDX = %d PAGENO (REL. TO BLK) = %d\n", blkidx,
pstats.page_num);
printf("Page#%d : reads:%d writes:%d \n\traw reads:%d raw writes:%d "
"\n\tecc_succeeded:%d ecc_corrected:%d ecc_failed:%d\n",
pstats.page_num, pstats.page_read, pstats.page_written,
pstats.page_raw_read, pstats.page_raw_written,
pstats.ecc_succeded, pstats.ecc_corrected, pstats.ecc_failed);
return (0);
}
| 23.125179 | 98 | 0.634755 | [
"geometry"
] |
cf9d83a8282c2c84fbce905d952107189b88d6d7 | 5,884 | h | C | rgbd_slam/ORB_SLAM2_modified/include/System.h | AndyoyoZ/kinect_rgb_calibration | 80b6ff877879224cd3898f69d7f483f44f404079 | [
"Apache-2.0"
] | 1 | 2019-09-24T05:35:02.000Z | 2019-09-24T05:35:02.000Z | rgbd_slam/ORB_SLAM2_modified/include/System.h | AndyoyoZ/kinect_rgb_calibration | 80b6ff877879224cd3898f69d7f483f44f404079 | [
"Apache-2.0"
] | null | null | null | rgbd_slam/ORB_SLAM2_modified/include/System.h | AndyoyoZ/kinect_rgb_calibration | 80b6ff877879224cd3898f69d7f483f44f404079 | [
"Apache-2.0"
] | null | null | null | /**
* This file is part of ORB-SLAM2.
*
* Copyright (C) 2014-2016 Raúl Mur-Artal <raulmur at unizar dot es> (University of Zaragoza)
* For more information see <https://github.com/raulmur/ORB_SLAM2>
*
* ORB-SLAM2 is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* ORB-SLAM2 is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with ORB-SLAM2. If not, see <http://www.gnu.org/licenses/>.
*/
#ifndef SYSTEM_H
#define SYSTEM_H
#include <string>
#include <thread>
#include <opencv2/core/core.hpp>
#include "Tracking.h"
#include "FrameDrawer.h"
#include "MapDrawer.h"
#include "Map.h"
#include "LocalMapping.h"
#include "LoopClosing.h"
#include "KeyFrameDatabase.h"
#include "ORBVocabulary.h"
#include "Viewer.h"
#include "pointcloudmapping.h"
class PointCloudMapping;
namespace ORB_SLAM2
{
class Viewer;
class FrameDrawer;
class Map;
class Tracking;
class LocalMapping;
class LoopClosing;
class System
{
public:
// Input sensor
enum eSensor{
MONOCULAR=0,
STEREO=1,
RGBD=2
};
// point cloud mapping
shared_ptr<PointCloudMapping> mpPointCloudMapping;//add by andyoyo
public:
// Initialize the SLAM system. It launches the Local Mapping, Loop Closing and Viewer threads.
System(const string &strVocFile, const string &strSettingsFile, const eSensor sensor, const bool bUseViewer = true);
// Proccess the given stereo frame. Images must be synchronized and rectified.
// Input images: RGB (CV_8UC3) or grayscale (CV_8U). RGB is converted to grayscale.
// Returns the camera pose (empty if tracking fails).
cv::Mat TrackStereo(const cv::Mat &imLeft, const cv::Mat &imRight, const double ×tamp);
// Process the given rgbd frame. Depthmap must be registered to the RGB frame.
// Input image: RGB (CV_8UC3) or grayscale (CV_8U). RGB is converted to grayscale.
// Input depthmap: Float (CV_32F).
// Returns the camera pose (empty if tracking fails).
cv::Mat TrackRGBD(const cv::Mat &im, const cv::Mat &depthmap, const double ×tamp);
// Proccess the given monocular frame
// Input images: RGB (CV_8UC3) or grayscale (CV_8U). RGB is converted to grayscale.
// Returns the camera pose (empty if tracking fails).
cv::Mat TrackMonocular(const cv::Mat &im, const double ×tamp);
// This stops local mapping thread (map building) and performs only camera tracking.
void ActivateLocalizationMode();
// This resumes local mapping thread and performs SLAM again.
void DeactivateLocalizationMode();
// Reset the system (clear map)
void Reset();
// All threads will be requested to finish.
// It waits until all threads have finished.
// This function must be called before saving the trajectory.
void Shutdown();
// Save camera trajectory in the TUM RGB-D dataset format.
// Call first Shutdown()
// See format details at: http://vision.in.tum.de/data/datasets/rgbd-dataset
void SaveTrajectoryTUM(const string &filename);
void SaveAllCameraTransformMatrix(const string &filename);
void SaveKeyFrameCameraTransformMatrix(const string &filename);
// Save keyframe poses in the TUM RGB-D dataset format.
// Use this function in the monocular case.
// Call first Shutdown()
// See format details at: http://vision.in.tum.de/data/datasets/rgbd-dataset
void SaveKeyFrameTrajectoryTUM(const string &filename);
// Save camera trajectory in the KITTI dataset format.
// Call first Shutdown()
// See format details at: http://www.cvlibs.net/datasets/kitti/eval_odometry.php
void SaveTrajectoryKITTI(const string &filename);
// TODO: Save/Load functions
// SaveMap(const string &filename);
// LoadMap(const string &filename);
private:
// Input sensor
eSensor mSensor;
// ORB vocabulary used for place recognition and feature matching.
ORBVocabulary* mpVocabulary;
// KeyFrame database for place recognition (relocalization and loop detection).
KeyFrameDatabase* mpKeyFrameDatabase;
// Map structure that stores the pointers to all KeyFrames and MapPoints.
Map* mpMap;
// Tracker. It receives a frame and computes the associated camera pose.
// It also decides when to insert a new keyframe, create some new MapPoints and
// performs relocalization if tracking fails.
Tracking* mpTracker;
// Local Mapper. It manages the local map and performs local bundle adjustment.
LocalMapping* mpLocalMapper;
// Loop Closer. It searches loops with every new keyframe. If there is a loop it performs
// a pose graph optimization and full bundle adjustment (in a new thread) afterwards.
LoopClosing* mpLoopCloser;
// The viewer draws the map and the current camera pose. It uses Pangolin.
Viewer* mpViewer;
FrameDrawer* mpFrameDrawer;
MapDrawer* mpMapDrawer;
// System threads: Local Mapping, Loop Closing, Viewer.
// The Tracking thread "lives" in the main execution thread that creates the System object.
std::thread* mptLocalMapping;
std::thread* mptLoopClosing;
std::thread* mptViewer;
// Reset flag
std::mutex mMutexReset;
bool mbReset;
// Change mode flags
std::mutex mMutexMode;
bool mbActivateLocalizationMode;
bool mbDeactivateLocalizationMode;
// // point cloud mapping
// shared_ptr<PointCloudMapping> mpPointCloudMapping;
};
}// namespace ORB_SLAM
#endif // SYSTEM_H
| 33.816092 | 120 | 0.720598 | [
"object"
] |
cfa6e0853eb6b617c67432192404a1a18f0f393c | 3,630 | h | C | src/net/spdy/hpack/hpack_encoder.h | acmd/GIT-TCC-LIBQUIC-ACMD | f100c1ff7b4c24ed2fbb3ae11d9516f6ce5ccc6d | [
"BSD-3-Clause"
] | 2 | 2019-01-28T08:09:58.000Z | 2021-11-15T15:32:10.000Z | src/net/spdy/hpack/hpack_encoder.h | acmd/GIT-TCC-LIBQUIC-ACMD | f100c1ff7b4c24ed2fbb3ae11d9516f6ce5ccc6d | [
"BSD-3-Clause"
] | null | null | null | src/net/spdy/hpack/hpack_encoder.h | acmd/GIT-TCC-LIBQUIC-ACMD | f100c1ff7b4c24ed2fbb3ae11d9516f6ce5ccc6d | [
"BSD-3-Clause"
] | 6 | 2020-09-23T08:56:12.000Z | 2021-11-18T03:40:49.000Z | // Copyright 2014 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef NET_SPDY_HPACK_ENCODER_H_
#define NET_SPDY_HPACK_ENCODER_H_
#include <stddef.h>
#include <map>
#include <string>
#include <utility>
#include <vector>
#include "base/macros.h"
#include "base/strings/string_piece.h"
#include "net/base/net_export.h"
#include "net/spdy/hpack/hpack_header_table.h"
#include "net/spdy/hpack/hpack_output_stream.h"
#include "net/spdy/spdy_protocol.h"
// An HpackEncoder encodes header sets as outlined in
// http://tools.ietf.org/html/rfc7541.
namespace net {
class HpackHuffmanTable;
namespace test {
class HpackEncoderPeer;
} // namespace test
class NET_EXPORT_PRIVATE HpackEncoder {
public:
friend class test::HpackEncoderPeer;
// |table| is an initialized HPACK Huffman table, having an
// externally-managed lifetime which spans beyond HpackEncoder.
explicit HpackEncoder(const HpackHuffmanTable& table);
~HpackEncoder();
// Encodes the given header set into the given string. Returns
// whether or not the encoding was successful.
bool EncodeHeaderSet(const SpdyHeaderBlock& header_set, std::string* output);
// Encodes the given header set into the given string. Only non-indexed
// literal representations are emitted, bypassing the header table. Huffman
// coding is also not used. Returns whether the encoding was successful.
bool EncodeHeaderSetWithoutCompression(const SpdyHeaderBlock& header_set,
std::string* output);
// Called upon a change to SETTINGS_HEADER_TABLE_SIZE. Specifically, this
// is to be called after receiving (and sending an acknowledgement for) a
// SETTINGS_HEADER_TABLE_SIZE update from the remote decoding endpoint.
void ApplyHeaderTableSizeSetting(size_t size_setting);
size_t CurrentHeaderTableSizeSetting() const {
return header_table_.settings_size_bound();
}
void SetHeaderTableDebugVisitor(
std::unique_ptr<HpackHeaderTable::DebugVisitorInterface> visitor) {
header_table_.set_debug_visitor(std::move(visitor));
}
private:
typedef std::pair<base::StringPiece, base::StringPiece> Representation;
typedef std::vector<Representation> Representations;
// Emits a static/dynamic indexed representation (Section 7.1).
void EmitIndex(const HpackEntry* entry);
// Emits a literal representation (Section 7.2).
void EmitIndexedLiteral(const Representation& representation);
void EmitNonIndexedLiteral(const Representation& representation);
void EmitLiteral(const Representation& representation);
// Emits a Huffman or identity string (whichever is smaller).
void EmitString(base::StringPiece str);
// Emits the current dynamic table size if the table size was recently
// updated and we have not yet emitted it (Section 6.3).
void MaybeEmitTableSize();
// Crumbles a cookie header into ";" delimited crumbs.
static void CookieToCrumbs(const Representation& cookie,
Representations* crumbs_out);
// Crumbles other header field values at \0 delimiters.
static void DecomposeRepresentation(const Representation& header_field,
Representations* out);
HpackHeaderTable header_table_;
HpackOutputStream output_stream_;
const HpackHuffmanTable& huffman_table_;
size_t min_table_size_setting_received_;
bool allow_huffman_compression_;
bool should_emit_table_size_;
DISALLOW_COPY_AND_ASSIGN(HpackEncoder);
};
} // namespace net
#endif // NET_SPDY_HPACK_ENCODER_H_
| 33.925234 | 79 | 0.756749 | [
"vector"
] |
cfa782d89f4d16d2b2382049757bd7035fd78e61 | 18,733 | c | C | apps/pic32mz_ef_sk_meb2/audio_player/audio_player_lab5/firmware/src/config/default/peripheral/dmac/plib_dmac.c | SyedThaseemuddin/MPLAB-Harmony-Reference-Apps | 16e4da0b1d071a9c1f454697b275a2577c70a0aa | [
"0BSD"
] | null | null | null | apps/pic32mz_ef_sk_meb2/audio_player/audio_player_lab5/firmware/src/config/default/peripheral/dmac/plib_dmac.c | SyedThaseemuddin/MPLAB-Harmony-Reference-Apps | 16e4da0b1d071a9c1f454697b275a2577c70a0aa | [
"0BSD"
] | null | null | null | apps/pic32mz_ef_sk_meb2/audio_player/audio_player_lab5/firmware/src/config/default/peripheral/dmac/plib_dmac.c | SyedThaseemuddin/MPLAB-Harmony-Reference-Apps | 16e4da0b1d071a9c1f454697b275a2577c70a0aa | [
"0BSD"
] | 1 | 2021-06-16T11:54:03.000Z | 2021-06-16T11:54:03.000Z | /*******************************************************************************
Direct Memory Access Controller (DMAC) PLIB
Company
Microchip Technology Inc.
File Name
plib_dmac.c
Summary
Source for DMAC peripheral library interface Implementation.
Description
This file defines the interface to the DMAC peripheral library. This
library provides access to and control of the DMAC controller.
Remarks:
None.
*******************************************************************************/
// DOM-IGNORE-BEGIN
/*******************************************************************************
* Copyright (C) 2018 Microchip Technology Inc. and its subsidiaries.
*
* Subject to your compliance with these terms, you may use Microchip software
* and any derivatives exclusively with Microchip products. It is your
* responsibility to comply with third party license terms applicable to your
* use of third party software (including open source software) that may
* accompany Microchip software.
*
* THIS SOFTWARE IS SUPPLIED BY MICROCHIP "AS IS". NO WARRANTIES, WHETHER
* EXPRESS, IMPLIED OR STATUTORY, APPLY TO THIS SOFTWARE, INCLUDING ANY IMPLIED
* WARRANTIES OF NON-INFRINGEMENT, MERCHANTABILITY, AND FITNESS FOR A
* PARTICULAR PURPOSE.
*
* IN NO EVENT WILL MICROCHIP BE LIABLE FOR ANY INDIRECT, SPECIAL, PUNITIVE,
* INCIDENTAL OR CONSEQUENTIAL LOSS, DAMAGE, COST OR EXPENSE OF ANY KIND
* WHATSOEVER RELATED TO THE SOFTWARE, HOWEVER CAUSED, EVEN IF MICROCHIP HAS
* BEEN ADVISED OF THE POSSIBILITY OR THE DAMAGES ARE FORESEEABLE. TO THE
* FULLEST EXTENT ALLOWED BY LAW, MICROCHIP'S TOTAL LIABILITY ON ALL CLAIMS IN
* ANY WAY RELATED TO THIS SOFTWARE WILL NOT EXCEED THE AMOUNT OF FEES, IF ANY,
* THAT YOU HAVE PAID DIRECTLY TO MICROCHIP FOR THIS SOFTWARE.
*******************************************************************************/
// DOM-IGNORE-END
#include "plib_dmac.h"
// *****************************************************************************
// *****************************************************************************
// Section: Global Data
// *****************************************************************************
// *****************************************************************************
static DMAC_CHANNEL_OBJECT gDMAChannelObj[8];
#define ConvertToPhysicalAddress(a) ((uint32_t)KVA_TO_PA(a))
#define ConvertToVirtualAddress(a) PA_TO_KVA1(a)
// *****************************************************************************
// *****************************************************************************
// Section: DMAC PLib Local Functions
// *****************************************************************************
// *****************************************************************************
// *****************************************************************************
/* Function:
static void DMAC_ChannelSetAddresses
Summary:
Converter to physical start addresses for DMA address registers to use
Description:
Calculates physical start addresses and stores into source and destination
address registers DCHxSSA and DCHxDSA.
Parameters:
DMAC_CHANNEL channel - DMA channel this function call pertains to
const void *srcAddr - starting address of source buffer to transfer
const void *destAddr - starting address of destination buffer to transfer
Returns:
void
*/
static void DMAC_ChannelSetAddresses( DMAC_CHANNEL channel, const void *srcAddr, const void *destAddr)
{
uint32_t sourceAddress = (uint32_t)srcAddr;
uint32_t destAddress = (uint32_t)destAddr;
volatile uint32_t * regs;
/* Set the source address */
/* Check if the address lies in the KSEG2 for MZ devices */
if ((sourceAddress >> 29) == 0x6)
{
if ((sourceAddress >> 28)== 0xc)
{
// EBI Address translation
sourceAddress = ((sourceAddress | 0x20000000) & 0x2FFFFFFF);
}
else if((sourceAddress >> 28)== 0xD)
{
//SQI Address translation
sourceAddress = ((sourceAddress | 0x30000000) & 0x3FFFFFFF);
}
}
else if ((sourceAddress >> 29) == 0x7)
{
if ((sourceAddress >> 28)== 0xE)
{
// EBI Address translation
sourceAddress = ((sourceAddress | 0x20000000) & 0x2FFFFFFF);
}
else if ((sourceAddress >> 28)== 0xF)
{
// SQI Address translation
sourceAddress = ((sourceAddress | 0x30000000) & 0x3FFFFFFF);
}
}
else
{
/* For KSEG0 and KSEG1, The translation is done by KVA_TO_PA */
sourceAddress = ConvertToPhysicalAddress(sourceAddress);
}
/* Set the source address, DCHxSSA */
regs = (volatile uint32_t *)(_DMAC_BASE_ADDRESS + 0x60 + (channel * 0xC0) + 0x30);
*(volatile uint32_t *)(regs) = sourceAddress;
/* Set the destination address */
/* Check if the address lies in the KSEG2 for MZ devices */
if ((destAddress >> 29) == 0x6)
{
// EBI Address translation
if ((destAddress >> 28)== 0xc)
{
destAddress = ((destAddress | 0x20000000) & 0x2FFFFFFF);
}
//SQI Address translation
else if ((destAddress >> 28)== 0xd)
{
destAddress = ((destAddress | 0x30000000) & 0x3FFFFFFF);
}
}
else if ((destAddress >> 29) == 0x7)
{ /* Check if the address lies in the KSEG3 for MZ devices */
// EBI Address translation
if ((destAddress >> 28)== 0xe)
{
destAddress = ((destAddress | 0x20000000) & 0x2FFFFFFF);
}
//SQI Address translation
else if ((destAddress >> 28)== 0xf)
{
destAddress = ((destAddress | 0x30000000) & 0x3FFFFFFF);
}
}
else
{
/*For KSEG0 and KSEG1, The translation is done by KVA_TO_PA */
destAddress = ConvertToPhysicalAddress(destAddress);
}
/* Set destination address, DCHxDSA */
regs = (volatile uint32_t *)(_DMAC_BASE_ADDRESS + 0x60 + (channel * 0xC0) + 0x40);
*(volatile uint32_t *)(regs) = destAddress;
}
// *****************************************************************************
// *****************************************************************************
// Section: DMAC PLib Interface Implementations
// *****************************************************************************
// *****************************************************************************
// *****************************************************************************
/* Function:
void DMAC_Initialize( void )
Summary:
This function initializes the DMAC controller of the device.
Description:
Sets up a DMA controller for subsequent transfer activity.
Parameters:
none
Returns:
void
*/
void DMAC_Initialize( void )
{
uint8_t chanIndex;
DMAC_CHANNEL_OBJECT *chanObj;
/* Enable the DMA module */
DMACONSET = _DMACON_ON_MASK;
/* Initialize the available channel objects */
chanObj = (DMAC_CHANNEL_OBJECT *)&gDMAChannelObj[0];
for(chanIndex = 0; chanIndex < 8; chanIndex++)
{
chanObj->inUse = false;
chanObj->pEventCallBack = NULL;
chanObj->hClientArg = 0;
chanObj->errorInfo = DMAC_ERROR_NONE;
chanObj = chanObj + 1; /* linked list 'next' */
}
/* DMACON register */
/* ON = 1 */
DMACON = 0x8000;
/* DMA channel-level control registers. They will have additional settings made when starting a transfer. */
/* DMA channel 0 configuration */
/* CHPRI = 0 */
DCH0CON = 0x0;
/* CHSIRQ = 111, SIRQEN = 1 */
DCH0ECON = 0x6f10;
/* CHBCIE = 1, CHTAIE=1, CHERIE=1 */
DCH0INT = 0xB0000;
/* DMA channel 1 configuration */
/* CHPRI = 0 */
DCH1CON = 0x0;
/* CHSIRQ = 110, SIRQEN = 1 */
DCH1ECON = 0x6e10;
/* CHBCIE = 1, CHTAIE=1, CHERIE=1 */
DCH1INT = 0xB0000;
/* DMA channel 2 configuration */
/* CHPRI = 0 */
DCH2CON = 0x0;
/* CHSIRQ = 0, SIRQEN = 0 */
DCH2ECON = 0x0;
/* CHBCIE = 1, CHTAIE=1, CHERIE=1 */
DCH2INT = 0xB0000;
/* Enable DMA channel interrupts */
IEC4SET = 0 | 0x40 | 0x80 | 0x100 ;
}
// *****************************************************************************
/* Function:
void DMAC_ChannelCallbackRegister
Summary:
Callback function registration function
Description:
Registers the callback function (and context pointer, if used) for a given DMA interrupt.
Parameters:
DMAC_CHANNEL channel - DMA channel this callback pertains to
const DMAC_CHANNEL_CALLBACK eventHandler - pointer to callback function
const uintptr_t contextHandle - pointer of context information callback is to use (set to NULL if not used)
Returns:
void
*/
void DMAC_ChannelCallbackRegister(DMAC_CHANNEL channel, const DMAC_CHANNEL_CALLBACK eventHandler, const uintptr_t contextHandle)
{
gDMAChannelObj[channel].pEventCallBack = eventHandler;
gDMAChannelObj[channel].hClientArg = contextHandle;
}
// *****************************************************************************
/* Function:
bool DMAC_ChannelTransfer
Summary:
DMA channel transfer function
Description:
Sets up a DMA transfer, and starts the transfer if user specified a
software-initiated transfer in Harmony.
Parameters:
DMAC_CHANNEL channel - DMA channel to use for this transfer
const void *srcAddr - pointer to source data
const void *destAddr - pointer to where data is to be moved to
size_t blockSize - the transfer size to use
Returns:
false, if DMA already is busy / true, if DMA is not busy before calling function
*/
bool DMAC_ChannelTransfer( DMAC_CHANNEL channel, const void *srcAddr, size_t srcSize, const void *destAddr, size_t destSize, size_t cellSize)
{
bool returnStatus = false;
volatile uint32_t *regs;
if(gDMAChannelObj[channel].inUse == false)
{
gDMAChannelObj[channel].inUse = true;
returnStatus = true;
/* Set the source / destination addresses, DCHxSSA and DCHxDSA */
DMAC_ChannelSetAddresses(channel, srcAddr, destAddr);
/* Set the source size, DCHxSSIZ */
regs = (volatile uint32_t *)(_DMAC_BASE_ADDRESS + 0x60 + (channel * 0xC0) + 0x50);
*(volatile uint32_t *)(regs) = srcSize;
/* Set the destination size (set same as source size), DCHxDSIZ */
regs = (volatile uint32_t *)(_DMAC_BASE_ADDRESS + 0x60 + (channel * 0xC0) + 0x60);
*(volatile uint32_t *)(regs) = destSize;
/* Set the cell size (set same as source size), DCHxCSIZ */
regs = (volatile uint32_t *)(_DMAC_BASE_ADDRESS + 0x60 + (channel * 0xC0) + 0x90);
*(volatile uint32_t *)(regs) = cellSize;
/* Enable the channel */
/* CHEN = 1 */
regs = (volatile uint32_t *)(_DMAC_BASE_ADDRESS + 0x60 + (channel * 0xC0) + 0x0)+2;
*(volatile uint32_t *)(regs) = _DCH0CON_CHEN_MASK;
/* Check Channel Start IRQ Enable bit - SIRQEN */
regs = (volatile uint32_t *)(_DMAC_BASE_ADDRESS + 0x60 + (channel * 0xC0) + 0x10);
/* Initiate transfer if user did not set up channel for interrupt-initiated transfer. */
if((*(volatile uint32_t *)(regs) & _DCH1ECON_SIRQEN_MASK) == 0)
{
/* CFORCE = 1 */
regs = (volatile uint32_t *)(_DMAC_BASE_ADDRESS + 0x60 + (channel * 0xC0) + 0x10)+2;
*(volatile uint32_t *)(regs) = _DCH0ECON_CFORCE_MASK;
}
}
return returnStatus;
}
// *****************************************************************************
/* Function:
void DMAC_ChannelDisable (DMAC_CHANNEL channel)
Summary:
This function disables the DMA channel.
Description:
Disables the DMA channel specified.
Parameters:
DMAC_CHANNEL channel - the particular channel to be disabled
Returns:
void
*/
void DMAC_ChannelDisable (DMAC_CHANNEL channel)
{
volatile uint32_t * regs;
if(channel < 8)
{
/* Disable channel in register DCHxCON */
/* CHEN = 0 */
regs = (volatile uint32_t *)(_DMAC_BASE_ADDRESS + 0x60 + (channel * 0xC0) + 0x0)+1;
*(volatile uint32_t *)(regs) = _DCH0CON_CHEN_MASK;
gDMAChannelObj[channel].inUse = false;
}
}
// *****************************************************************************
/* Function:
bool DMAC_ChannelIsBusy (DMAC_CHANNEL channel)
Summary:
Reads the busy status of a channel.
Description:
Reads the busy status of a channel and returns status to caller.
Parameters:
DMAC_CHANNEL channel - the particular channel to be interrogated
Returns:
true - channel is busy
false - channel is not busy
*/
bool DMAC_ChannelIsBusy (DMAC_CHANNEL channel)
{
return (gDMAChannelObj[channel].inUse);
}
// *****************************************************************************
/* Function:
void DMA0_InterruptHandler (void)
Summary:
Interrupt handler for interrupts from DMA0.
Description:
None
Parameters:
none
Returns:
void
*/
void DMA0_InterruptHandler (void)
{
DMAC_CHANNEL_OBJECT *chanObj;
DMAC_TRANSFER_EVENT dmaEvent = DMAC_TRANSFER_EVENT_NONE;
bool retVal = false;
/* Find out the channel object */
chanObj = (DMAC_CHANNEL_OBJECT *) &gDMAChannelObj[0];
/* Check whether the active DMA channel event has occurred */
retVal = DCH0INTbits.CHBCIF;
if(true == retVal) /* irq due to transfer complete */
{
/* Channel is by default disabled on completion of a block transfer */
/* Clear the Block transfer complete flag */
DCH0INTCLR = _DCH0INT_CHBCIF_MASK;
/* Update error and event */
chanObj->errorInfo = DMAC_ERROR_NONE;
dmaEvent = DMAC_TRANSFER_EVENT_COMPLETE;
}
else if(true == DCH0INTbits.CHTAIF) /* irq due to transfer abort */
{
/* Channel is by default disabled on Transfer Abortion */
/* Clear the Abort transfer complete flag */
DCH0INTCLR = _DCH0INT_CHTAIF_MASK;
/* Update error and event */
chanObj->errorInfo = DMAC_ERROR_NONE;
dmaEvent = DMAC_TRANSFER_EVENT_ERROR;
}
else if(true == DCH0INTbits.CHERIF)
{
/* Clear the Block transfer complete flag */
DCH0INTCLR = _DCH0INT_CHERIF_MASK;
/* Update error and event */
chanObj->errorInfo = DMAC_ERROR_ADDRESS_ERROR;
dmaEvent = DMAC_TRANSFER_EVENT_ERROR;
}
chanObj->inUse = false;
IFS4CLR = 0x40;
/* Clear the interrupt flag and call event handler */
if((NULL != chanObj->pEventCallBack) && (DMAC_TRANSFER_EVENT_NONE != dmaEvent))
{
chanObj->pEventCallBack(dmaEvent, chanObj->hClientArg);
}
}
// *****************************************************************************
/* Function:
void DMA1_InterruptHandler (void)
Summary:
Interrupt handler for interrupts from DMA1.
Description:
None
Parameters:
none
Returns:
void
*/
void DMA1_InterruptHandler (void)
{
DMAC_CHANNEL_OBJECT *chanObj;
DMAC_TRANSFER_EVENT dmaEvent = DMAC_TRANSFER_EVENT_NONE;
bool retVal = false;
/* Find out the channel object */
chanObj = (DMAC_CHANNEL_OBJECT *) &gDMAChannelObj[1];
/* Check whether the active DMA channel event has occurred */
retVal = DCH1INTbits.CHBCIF;
if(true == retVal) /* irq due to transfer complete */
{
/* Channel is by default disabled on completion of a block transfer */
/* Clear the Block transfer complete flag */
DCH1INTCLR = _DCH1INT_CHBCIF_MASK;
/* Update error and event */
chanObj->errorInfo = DMAC_ERROR_NONE;
dmaEvent = DMAC_TRANSFER_EVENT_COMPLETE;
}
else if(true == DCH1INTbits.CHTAIF) /* irq due to transfer abort */
{
/* Channel is by default disabled on Transfer Abortion */
/* Clear the Abort transfer complete flag */
DCH1INTCLR = _DCH1INT_CHTAIF_MASK;
/* Update error and event */
chanObj->errorInfo = DMAC_ERROR_NONE;
dmaEvent = DMAC_TRANSFER_EVENT_ERROR;
}
else if(true == DCH1INTbits.CHERIF)
{
/* Clear the Block transfer complete flag */
DCH1INTCLR = _DCH1INT_CHERIF_MASK;
/* Update error and event */
chanObj->errorInfo = DMAC_ERROR_ADDRESS_ERROR;
dmaEvent = DMAC_TRANSFER_EVENT_ERROR;
}
chanObj->inUse = false;
IFS4CLR = 0x80;
/* Clear the interrupt flag and call event handler */
if((NULL != chanObj->pEventCallBack) && (DMAC_TRANSFER_EVENT_NONE != dmaEvent))
{
chanObj->pEventCallBack(dmaEvent, chanObj->hClientArg);
}
}
// *****************************************************************************
/* Function:
void DMA2_InterruptHandler (void)
Summary:
Interrupt handler for interrupts from DMA2.
Description:
None
Parameters:
none
Returns:
void
*/
void DMA2_InterruptHandler (void)
{
DMAC_CHANNEL_OBJECT *chanObj;
DMAC_TRANSFER_EVENT dmaEvent = DMAC_TRANSFER_EVENT_NONE;
bool retVal = false;
/* Find out the channel object */
chanObj = (DMAC_CHANNEL_OBJECT *) &gDMAChannelObj[2];
/* Check whether the active DMA channel event has occurred */
retVal = DCH2INTbits.CHBCIF;
if(true == retVal) /* irq due to transfer complete */
{
/* Channel is by default disabled on completion of a block transfer */
/* Clear the Block transfer complete flag */
DCH2INTCLR = _DCH2INT_CHBCIF_MASK;
/* Update error and event */
chanObj->errorInfo = DMAC_ERROR_NONE;
dmaEvent = DMAC_TRANSFER_EVENT_COMPLETE;
}
else if(true == DCH2INTbits.CHTAIF) /* irq due to transfer abort */
{
/* Channel is by default disabled on Transfer Abortion */
/* Clear the Abort transfer complete flag */
DCH2INTCLR = _DCH2INT_CHTAIF_MASK;
/* Update error and event */
chanObj->errorInfo = DMAC_ERROR_NONE;
dmaEvent = DMAC_TRANSFER_EVENT_ERROR;
}
else if(true == DCH2INTbits.CHERIF)
{
/* Clear the Block transfer complete flag */
DCH2INTCLR = _DCH2INT_CHERIF_MASK;
/* Update error and event */
chanObj->errorInfo = DMAC_ERROR_ADDRESS_ERROR;
dmaEvent = DMAC_TRANSFER_EVENT_ERROR;
}
chanObj->inUse = false;
IFS4CLR = 0x100;
/* Clear the interrupt flag and call event handler */
if((NULL != chanObj->pEventCallBack) && (DMAC_TRANSFER_EVENT_NONE != dmaEvent))
{
chanObj->pEventCallBack(dmaEvent, chanObj->hClientArg);
}
}
| 31.27379 | 141 | 0.583729 | [
"object"
] |
cfaa8acc710aae96c53152908014bd2649800063 | 3,706 | h | C | tasmota/Tasmota-8.4.0/libesp32/NimBLE-Arduino/src/NimBLEServer.h | zorcec/SARAH | c7936ce9467fb11594b6ae4a937d6766060bec05 | [
"MIT"
] | 2 | 2021-01-03T07:48:53.000Z | 2021-03-13T22:44:16.000Z | tasmota/Tasmota-8.4.0/libesp32/NimBLE-Arduino/src/NimBLEServer.h | zorcec/SARAH | c7936ce9467fb11594b6ae4a937d6766060bec05 | [
"MIT"
] | null | null | null | tasmota/Tasmota-8.4.0/libesp32/NimBLE-Arduino/src/NimBLEServer.h | zorcec/SARAH | c7936ce9467fb11594b6ae4a937d6766060bec05 | [
"MIT"
] | null | null | null | /*
* NimBLEServer.h
*
* Created: on March 2, 2020
* Author H2zero
*
* Originally:
*
* BLEServer.h
*
* Created on: Apr 16, 2017
* Author: kolban
*/
#ifndef MAIN_NIMBLESERVER_H_
#define MAIN_NIMBLESERVER_H_
#include "sdkconfig.h"
#if defined(CONFIG_BT_ENABLED)
#include "nimconfig.h"
#if defined(CONFIG_BT_NIMBLE_ROLE_PERIPHERAL)
#include "NimBLEUtils.h"
#include "NimBLEAddress.h"
#include "NimBLEAdvertising.h"
#include "NimBLEService.h"
#include "NimBLESecurity.h"
class NimBLEService;
class NimBLECharacteristic;
class NimBLEServerCallbacks;
/**
* @brief The model of a %BLE server.
*/
class NimBLEServer {
public:
size_t getConnectedCount();
NimBLEService* createService(const char* uuid);
NimBLEService* createService(const NimBLEUUID &uuid, uint32_t numHandles=15,
uint8_t inst_id=0);
NimBLEAdvertising* getAdvertising();
void setCallbacks(NimBLEServerCallbacks* pCallbacks);
void startAdvertising();
void stopAdvertising();
void start();
NimBLEService* getServiceByUUID(const char* uuid);
NimBLEService* getServiceByUUID(const NimBLEUUID &uuid);
int disconnect(uint16_t connID,
uint8_t reason = BLE_ERR_REM_USER_CONN_TERM);
void updateConnParams(uint16_t conn_handle,
uint16_t minInterval, uint16_t maxInterval,
uint16_t latency, uint16_t timeout);
uint16_t getPeerMTU(uint16_t conn_id);
std::vector<uint16_t> getPeerDevices();
void advertiseOnDisconnect(bool);
private:
NimBLEServer();
friend class NimBLECharacteristic;
friend class NimBLEDevice;
friend class NimBLEAdvertising;
bool m_gattsStarted;
bool m_advertiseOnDisconnect;
NimBLEServerCallbacks* m_pServerCallbacks;
std::vector<uint16_t> m_connectedPeersVec;
// uint16_t m_svcChgChrHdl; // Future use
std::vector<NimBLEService*> m_svcVec;
std::vector<NimBLECharacteristic*> m_notifyChrVec;
static int handleGapEvent(struct ble_gap_event *event, void *arg);
}; // NimBLEServer
/**
* @brief Callbacks associated with the operation of a %BLE server.
*/
class NimBLEServerCallbacks {
public:
virtual ~NimBLEServerCallbacks() {};
/**
* @brief Handle a new client connection.
*
* When a new client connects, we are invoked.
*
* @param [in] pServer A reference to the %BLE server that received the client connection.
*/
virtual void onConnect(NimBLEServer* pServer);
virtual void onConnect(NimBLEServer* pServer, ble_gap_conn_desc* desc);
/**
* @brief Handle an existing client disconnection.
*
* When an existing client disconnects, we are invoked.
*
* @param [in] pServer A reference to the %BLE server that received the existing client disconnection.
*/
virtual void onDisconnect(NimBLEServer* pServer);
virtual uint32_t onPassKeyRequest(); //{return 0;}
virtual void onPassKeyNotify(uint32_t pass_key); //{}
virtual bool onSecurityRequest(); //{return true;}
virtual void onAuthenticationComplete(ble_gap_conn_desc* desc);//{};
virtual bool onConfirmPIN(uint32_t pin);//{return true;}
}; // NimBLEServerCallbacks
#endif // #if defined(CONFIG_BT_NIMBLE_ROLE_PERIPHERAL)
#endif /* CONFIG_BT_ENABLED */
#endif /* MAIN_NIMBLESERVER_H_ */
| 32.226087 | 106 | 0.639234 | [
"vector",
"model"
] |
cfadff9dfe6a382d6e2b016c8d3674aaef04d35f | 2,842 | h | C | vulkan/render_vulkan.h | TheVaffel/ChameleonRT | deb7216ed448b3c59809a0b9a270ea81b48759a7 | [
"MIT"
] | null | null | null | vulkan/render_vulkan.h | TheVaffel/ChameleonRT | deb7216ed448b3c59809a0b9a270ea81b48759a7 | [
"MIT"
] | null | null | null | vulkan/render_vulkan.h | TheVaffel/ChameleonRT | deb7216ed448b3c59809a0b9a270ea81b48759a7 | [
"MIT"
] | null | null | null | #pragma once
#include <memory>
#include <unordered_map>
#include <vulkan/vulkan.h>
#include "render_backend.h"
#include "vulkan_utils.h"
#include "vulkanrt_utils.h"
struct HitGroupParams {
uint64_t vert_buf = 0;
uint64_t idx_buf = 0;
uint64_t normal_buf = 0;
uint64_t uv_buf = 0;
uint32_t num_normals = 0;
uint32_t num_uvs = 0;
uint32_t material_id = 0;
};
struct RenderVulkan : RenderBackend {
std::shared_ptr<vkrt::Device> device;
std::shared_ptr<vkrt::Buffer> view_param_buf, img_readback_buf, mat_params, light_params;
std::shared_ptr<vkrt::Texture2D> render_target, accum_buffer;
#ifdef REPORT_RAY_STATS
std::shared_ptr<vkrt::Texture2D> ray_stats;
std::shared_ptr<vkrt::Buffer> ray_stats_readback_buf;
std::vector<uint16_t> ray_counts;
#endif
std::vector<std::unique_ptr<vkrt::TriangleMesh>> meshes;
std::unique_ptr<vkrt::TopLevelBVH> scene_bvh;
size_t total_geom = 0;
std::vector<std::shared_ptr<vkrt::Texture2D>> textures;
VkSampler sampler = VK_NULL_HANDLE;
VkCommandPool command_pool = VK_NULL_HANDLE;
VkCommandBuffer command_buffer = VK_NULL_HANDLE;
VkCommandPool render_cmd_pool = VK_NULL_HANDLE;
VkCommandBuffer render_cmd_buf = VK_NULL_HANDLE;
VkCommandBuffer readback_cmd_buf = VK_NULL_HANDLE;
vkrt::RTPipeline rt_pipeline;
VkPipelineLayout pipeline_layout = VK_NULL_HANDLE;
VkDescriptorSetLayout desc_layout = VK_NULL_HANDLE;
VkDescriptorSetLayout textures_desc_layout = VK_NULL_HANDLE;
VkDescriptorPool desc_pool = VK_NULL_HANDLE;
// We need a set per varying size array of things we're sending
VkDescriptorSet desc_set = VK_NULL_HANDLE;
VkDescriptorSet textures_desc_set = VK_NULL_HANDLE;
vkrt::ShaderBindingTable shader_table;
VkFence fence = VK_NULL_HANDLE;
size_t frame_id = 0;
bool native_display = false;
RenderVulkan(std::shared_ptr<vkrt::Device> device, bool native_display);
RenderVulkan();
virtual ~RenderVulkan();
std::string name() override;
void initialize(const int fb_width, const int fb_height) override;
void set_scene(const Scene &scene) override;
RenderStats render(const glm::vec3 &pos,
const glm::vec3 &dir,
const glm::vec3 &up,
const float fovy,
const bool camera_changed,
const bool readback_framebuffer) override;
private:
void build_raytracing_pipeline();
void build_shader_descriptor_table();
void build_shader_binding_table();
void update_view_parameters(const glm::vec3 &pos,
const glm::vec3 &dir,
const glm::vec3 &up,
const float fovy);
void record_command_buffers();
};
| 29.298969 | 93 | 0.6886 | [
"render",
"vector"
] |
cfb0bf529ea21c10c3044d4d820b9f05e556dab7 | 1,771 | h | C | Code/Libraries/Audio/src/isoundinstance.h | dphrygian/zeta | 2b32760558cf2b20c626cf46fcf2a382924988fe | [
"Zlib",
"Unlicense"
] | 6 | 2022-01-22T02:18:07.000Z | 2022-02-14T09:30:53.000Z | Code/Libraries/Audio/src/isoundinstance.h | dphrygian/zeta | 2b32760558cf2b20c626cf46fcf2a382924988fe | [
"Zlib",
"Unlicense"
] | null | null | null | Code/Libraries/Audio/src/isoundinstance.h | dphrygian/zeta | 2b32760558cf2b20c626cf46fcf2a382924988fe | [
"Zlib",
"Unlicense"
] | null | null | null | #ifndef ISOUNDINSTANCE_H
#define ISOUNDINSTANCE_H
#include "iaudiosystem.h"
class ISound;
class Sound3DListener;
class View;
class Vector;
class HashedString;
class ISoundInstance
{
public:
virtual ~ISoundInstance() {}
virtual void Play() = 0;
virtual void Stop() = 0;
virtual void SetPaused( bool Paused ) = 0;
virtual void SetVolume( float Volume ) = 0;
virtual void SetPan( float Pan ) = 0;
virtual void SetLocation( const Vector& Location ) = 0;
virtual void SetPriority( ESoundPriority Priority ) const = 0;
virtual void SetPitch( const float Pitch ) = 0;
virtual uint GetPosition() const = 0;
virtual void SetPosition( const uint Position ) = 0;
virtual bool IsPlaying() const = 0;
virtual bool IsFinished() const = 0;
virtual ISound* GetSound() const = 0;
virtual float GetAttenuation() const = 0;
virtual float GetTimeElapsed() const = 0;
virtual void SetBaseVolume( const float BaseVolume ) = 0;
virtual float GetBaseVolume() const = 0;
virtual Vector GetLocation() const = 0;
// Stuff that just gets forwarded up the chain to the sound or audio system
virtual const HashedString& GetCategory() const = 0;
virtual float GetCategoryVolume() const = 0;
virtual const Sound3DListener* Get3DListener() const = 0;
virtual float GetMasterVolume() const = 0;
virtual bool ShouldCalcOcclusion() const = 0;
virtual float GetOcclusionDepthScalar() const = 0;
virtual float GetOccludedFalloffRadius() const = 0;
virtual float GetPitchMin() const = 0;
virtual float GetPitchMax() const = 0;
virtual bool GetShouldSerialize() const = 0;
virtual void Tick() = 0;
virtual void Tick3D() = 0;
};
#endif // ISOUNDINSTANCE_H
| 31.070175 | 76 | 0.6917 | [
"vector"
] |
cfb5aec5a22e7c2663173f51612a7fe59a99ccb4 | 5,076 | h | C | os/arch/arm/include/stm32l4/irq.h | ziyik/TizenRT-1 | d510c03303fcfa605bc12c60f826fa5642bbe406 | [
"Apache-2.0"
] | 511 | 2017-03-29T09:14:09.000Z | 2022-03-30T23:10:29.000Z | os/arch/arm/include/stm32l4/irq.h | ziyik/TizenRT-1 | d510c03303fcfa605bc12c60f826fa5642bbe406 | [
"Apache-2.0"
] | 4,673 | 2017-03-29T10:43:43.000Z | 2022-03-31T08:33:44.000Z | os/arch/arm/include/stm32l4/irq.h | ziyik/TizenRT-1 | d510c03303fcfa605bc12c60f826fa5642bbe406 | [
"Apache-2.0"
] | 642 | 2017-03-30T20:45:33.000Z | 2022-03-24T17:07:33.000Z | /************************************************************************************
* arch/arm/include/stm32l4/irq.h
*
* Copyright (C) 2015 Sebastien Lorquet. All rights reserved.
* Author: Sebastien Lorquet <sebastien@lorquet.fr>
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* 3. Neither the name NuttX nor the names of its contributors may be
* used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
* COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
* OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
* AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
* ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*
************************************************************************************/
/* This file should never be included directed but, rather,
* only indirectly through nuttx/irq.h
*/
#ifndef __ARCH_ARM_INCLUDE_STM32L4_IRQ_H
#define __ARCH_ARM_INCLUDE_STM32L4_IRQ_H
/************************************************************************************
* Included Files
************************************************************************************/
#include <tinyara/config.h>
/************************************************************************************
* Pre-processor Definitions
************************************************************************************/
/* IRQ numbers. The IRQ number corresponds vector number and hence map directly to
* bits in the NVIC. This does, however, waste several words of memory in the IRQ
* to handle mapping tables.
*/
/* Processor Exceptions (vectors 0-15) */
#define STM32L4_IRQ_RESERVED (0) /* Reserved vector (only used with CONFIG_DEBUG) */
/* Vector 0: Reset stack pointer value */
/* Vector 1: Reset (not handler as an IRQ) */
#define STM32L4_IRQ_NMI (2) /* Vector 2: Non-Maskable Interrupt (NMI) */
#define STM32L4_IRQ_HARDFAULT (3) /* Vector 3: Hard fault */
#define STM32L4_IRQ_MEMFAULT (4) /* Vector 4: Memory management (MPU) */
#define STM32L4_IRQ_BUSFAULT (5) /* Vector 5: Bus fault */
#define STM32L4_IRQ_USAGEFAULT (6) /* Vector 6: Usage fault */
/* Vectors 7-10: Reserved */
#define STM32L4_IRQ_SVCALL (11) /* Vector 11: SVC call */
#define STM32L4_IRQ_DBGMONITOR (12) /* Vector 12: Debug Monitor */
/* Vector 13: Reserved */
#define STM32L4_IRQ_PENDSV (14) /* Vector 14: Pendable system service request */
#define STM32L4_IRQ_SYSTICK (15) /* Vector 15: System tick */
/* External interrupts (vectors >= 16). These definitions are chip-specific */
#define STM32L4_IRQ_FIRST (16) /* Vector number of the first external interrupt */
#if defined(CONFIG_STM32L4_STM32L4X3)
# include <arch/stm32l4/stm32l4x3xx_irq.h>
#elif defined(CONFIG_STM32L4_STM32L4X5)
# include <arch/stm32l4/stm32l4x5xx_irq.h>
#elif defined(CONFIG_STM32L4_STM32L4X6)
# include <arch/stm32l4/stm32l4x6xx_irq.h>
#elif defined(CONFIG_STM32L4_STM32L4XR)
# include <arch/stm32l4/stm32l4xrxx_irq.h>
#else
# error "Unsupported STM32 L4 chip"
#endif
/************************************************************************************
* Public Types
************************************************************************************/
/************************************************************************************
* Public Data
************************************************************************************/
#ifndef __ASSEMBLY__
#ifdef __cplusplus
#define EXTERN extern "C"
extern "C"
{
#else
#define EXTERN extern
#endif
/************************************************************************************
* Public Functions
************************************************************************************/
#undef EXTERN
#ifdef __cplusplus
}
#endif
#endif
#endif /* __ARCH_ARM_INCLUDE_STM32L4_IRQ_H */
| 42.3 | 90 | 0.565997 | [
"vector"
] |
cfb796fc2934027c801a14c2dd524d19def5ec9a | 490 | h | C | src/glfoundation/skybox.h | ChinYing-Li/OpenGL-cart | 2625dfb194a65d6b277f6c3c57602319000bce16 | [
"MIT"
] | 2 | 2021-06-03T03:36:35.000Z | 2021-09-18T07:25:24.000Z | src/glfoundation/skybox.h | ChinYing-Li/OpenGL-cart | 2625dfb194a65d6b277f6c3c57602319000bce16 | [
"MIT"
] | null | null | null | src/glfoundation/skybox.h | ChinYing-Li/OpenGL-cart | 2625dfb194a65d6b277f6c3c57602319000bce16 | [
"MIT"
] | null | null | null | #pragma once
#include <experimental/filesystem>
#include <string>
#include "glfoundation/cubemap.h"
#include "glfoundation/texture.h"
namespace fs = std::experimental::filesystem;
namespace Cluster
{
class Skybox: Cubemap
{
public:
Skybox();
Skybox();
~Skybox() = default;
void bind(const GLuint binding_point);
void render(const Shader& shaderID) override;
private:
TextureCubemap m_cubemap;
};
} // namespace Cluster
| 20.416667 | 53 | 0.657143 | [
"render"
] |
cfbc761f495d2ef1145f631ff7b21252d0fc399d | 7,595 | h | C | src/Open3D/ColorMap/ColorMapOptimization.h | arunabhcode/Open3D | 40902ae67947fa27abeb748673bcc78b002180f6 | [
"MIT"
] | 8 | 2021-01-23T13:03:03.000Z | 2022-03-31T12:55:59.000Z | src/Open3D/ColorMap/ColorMapOptimization.h | arunabhcode/Open3D | 40902ae67947fa27abeb748673bcc78b002180f6 | [
"MIT"
] | 1 | 2019-01-24T00:33:28.000Z | 2019-06-11T00:44:32.000Z | src/Open3D/ColorMap/ColorMapOptimization.h | arunabhcode/Open3D | 40902ae67947fa27abeb748673bcc78b002180f6 | [
"MIT"
] | 14 | 2020-12-30T12:19:45.000Z | 2022-02-24T01:28:45.000Z | // ----------------------------------------------------------------------------
// - Open3D: www.open3d.org -
// ----------------------------------------------------------------------------
// The MIT License (MIT)
//
// Copyright (c) 2018 www.open3d.org
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
// IN THE SOFTWARE.
// ----------------------------------------------------------------------------
#pragma once
#include <memory>
#include <vector>
namespace open3d {
namespace geometry {
class TriangleMesh;
}
namespace geometry {
class RGBDImage;
}
namespace geometry {
class Image;
}
namespace camera {
class PinholeCameraTrajectory;
}
namespace color_map {
/// \class ColorMapOptimizationOption
///
/// \brief Defines options for color map optimization.
class ColorMapOptimizationOption {
public:
ColorMapOptimizationOption(
// Attention: when you update the defaults, update the docstrings in
// Python/color_map/color_map.cpp
bool non_rigid_camera_coordinate = false,
int number_of_vertical_anchors = 16,
double non_rigid_anchor_point_weight = 0.316,
int maximum_iteration = 300,
double maximum_allowable_depth = 2.5,
double depth_threshold_for_visibility_check = 0.03,
double depth_threshold_for_discontinuity_check = 0.1,
int half_dilation_kernel_size_for_discontinuity_map = 3,
int image_boundary_margin = 10,
int invisible_vertex_color_knn = 3)
: non_rigid_camera_coordinate_(non_rigid_camera_coordinate),
number_of_vertical_anchors_(number_of_vertical_anchors),
non_rigid_anchor_point_weight_(non_rigid_anchor_point_weight),
maximum_iteration_(maximum_iteration),
maximum_allowable_depth_(maximum_allowable_depth),
depth_threshold_for_visibility_check_(
depth_threshold_for_visibility_check),
depth_threshold_for_discontinuity_check_(
depth_threshold_for_discontinuity_check),
half_dilation_kernel_size_for_discontinuity_map_(
half_dilation_kernel_size_for_discontinuity_map),
image_boundary_margin_(image_boundary_margin),
invisible_vertex_color_knn_(invisible_vertex_color_knn) {}
~ColorMapOptimizationOption() {}
public:
/// Set to `true` to enable non-rigid optimization (optimizing camera
/// extrinsic params and image wrapping field for color assignment), set to
/// False to only enable rigid optimization (optimize camera extrinsic
/// params).
bool non_rigid_camera_coordinate_;
/// Number of vertical anchor points for image wrapping field. The number of
/// horizontal anchor points is computed automatically based on the number
/// of vertical anchor points. This option is only used when non-rigid
/// optimization is enabled.
int number_of_vertical_anchors_;
/// Additional regularization terms added to non-rigid regularization. A
/// higher value results gives more conservative updates. If the residual
/// error does not stably decrease, it is mainly because images are being
/// bended abruptly. In this case, consider making iteration more
/// conservative by increasing the value. This option is only used when
/// non-rigid optimization is enabled.
double non_rigid_anchor_point_weight_;
/// Number of iterations for optimization steps.
int maximum_iteration_;
/// Parameter to check the visibility of a point. Points with depth larger
/// than maximum_allowable_depth in a RGB-D will be marked as invisible for
/// the camera producing that RGB-D image. Select a proper value to include
/// necessary points while ignoring unwanted points such as the background.
double maximum_allowable_depth_;
/// Parameter to check the visibility of a point. When the difference of a
/// point’s depth value in the RGB-D image and the point’s depth value in
/// the 3D mesh is greater than depth_threshold_for_visibility_check, the
/// point is marked as invisible to the camera producing the RGB-D image.
double depth_threshold_for_visibility_check_;
/// Parameter to check the visibility of a point. It’s often desirable to
/// ignore points where there is an abrupt change in depth value. First the
/// depth gradient image is computed, points are considered to be invisible
/// if the depth gradient magnitude is larger than
/// depth_threshold_for_discontinuity_check.
double depth_threshold_for_discontinuity_check_;
/// Parameter to check the visibility of a point. Related to
/// depth_threshold_for_discontinuity_check, when boundary points are
/// detected, dilation is performed to ignore points near the object
/// boundary. half_dilation_kernel_size_for_discontinuity_map specifies the
/// half-kernel size for the dilation applied on the visibility mask image.
int half_dilation_kernel_size_for_discontinuity_map_;
/// If a projected 3D point onto a 2D image lies in the image border within
/// image_boundary_margin, the 3D point is considered invisible from the
/// camera producing the image. This parmeter is not used for visibility
/// check, but used when computing the final color assignment after color
/// map optimization.
int image_boundary_margin_;
/// If a vertex is invisible from all images, we assign the averaged color
/// of the k nearest visible vertices to fill the invisible vertex. Set to
/// 0 to disable this feature and all invisible vertices will be black.
int invisible_vertex_color_knn_;
};
/// \brief Function for color mapping of reconstructed scenes via optimization.
///
/// This is implementation of following paper
/// Q.-Y. Zhou and V. Koltun,
/// Color Map Optimization for 3D Reconstruction with Consumer Depth Cameras,
/// SIGGRAPH 2014.
///
/// \param mesh The input geometry mesh.
/// \param imgs_rgbd A list of RGBDImages seen by cameras.
/// \param camera Cameras' parameters.
/// \param option Color map optimization options. Takes the original
/// ColorMapOptimizationOption values by default.
void ColorMapOptimization(
geometry::TriangleMesh& mesh,
const std::vector<std::shared_ptr<geometry::RGBDImage>>& imgs_rgbd,
camera::PinholeCameraTrajectory& camera,
const ColorMapOptimizationOption& option =
ColorMapOptimizationOption());
} // namespace color_map
} // namespace open3d
| 48.685897 | 80 | 0.709809 | [
"mesh",
"geometry",
"object",
"vector",
"3d"
] |
cfbf9a364729649af52d6074a05d8a222e198cf5 | 1,356 | h | C | Source/Engine/Core/CoreForward.h | youngjunh98/Game-Engine | 7ec6356bd2835ba7f7a1119e873acee3e60b35f5 | [
"MIT"
] | null | null | null | Source/Engine/Core/CoreForward.h | youngjunh98/Game-Engine | 7ec6356bd2835ba7f7a1119e873acee3e60b35f5 | [
"MIT"
] | 1 | 2020-11-21T10:05:14.000Z | 2020-11-21T10:05:14.000Z | Source/Engine/Core/CoreForward.h | youngjunh98/GameEngine | 7ec6356bd2835ba7f7a1119e873acee3e60b35f5 | [
"MIT"
] | null | null | null | #ifndef INCLUDE_CORE_FORWARD
#define INCLUDE_CORE_FORWARD
namespace GameEngine
{
class Engine;
class Application;
class Input;
class Timer;
// Math
class Math;
class Vector2;
class Vector3;
class Vector4;
class Quaternion;
class Matrix4x4;
class AABB;
class Plane;
// RI
class RenderingInterface;
class RI_Resource;
class RI_Buffer;
class RI_VertexBuffer;
class RI_IndexBuffer;
class RI_ShaderConstantBuffer;
class RI_Shader;
class RI_VertexShader;
class RI_PixelShader;
class RI_HullShader;
class RI_DomainShader;
class RI_GeometryShader;
class RI_ShaderResourceView;
class RI_Sampler;
class RI_Texture;
class RI_Texture1D;
class RI_Texture2D;
class RI_Texture3D;
class RI_RasterizerState;
class RI_DepthStencilState;
class RI_RenderTargetView;
class RI_DepthStencilView;
class RI_InputLayout;
// Rendering
class GlobalRenderer;
class Shader;
class Material;
class Mesh;
class Texture;
class Texture2D;
class TextureCube;
class RenderPipeline;
class RenderPass;
// Physics
class Physics;
class Rigidbody;
class Collider;
class BoxCollider;
class SphereCollider;
// Audio
class AudioClip;
// Scene
class Scene;
class SceneManager;
// Object
class Object;
//class GameObject;
class Component;
class Transform;
class Camera;
class Renderer;
class MeshRenderer;
class Light;
}
#endif | 16.536585 | 31 | 0.779499 | [
"mesh",
"object",
"transform"
] |
cfc168f40df285ae7538c023cd749bd1982c29ac | 7,669 | h | C | Source/Hamakaze/wdksup.h | Shhoya/KDU | a900341c5fb2efa5967ba0fd60db7597778aaf5e | [
"MIT"
] | null | null | null | Source/Hamakaze/wdksup.h | Shhoya/KDU | a900341c5fb2efa5967ba0fd60db7597778aaf5e | [
"MIT"
] | null | null | null | Source/Hamakaze/wdksup.h | Shhoya/KDU | a900341c5fb2efa5967ba0fd60db7597778aaf5e | [
"MIT"
] | null | null | null | /************************************************************************************
*
* (C) COPYRIGHT AUTHORS, 2018 - 2022, translated from Microsoft sources/debugger
*
* TITLE: WDKSUP.H
*
* VERSION: 1.20
*
* DATE: 15 Feb 2022
*
* Header file for NT WDK definitions.
*
* WARNING: some structures are opaque and incomplete.
*
* THIS CODE AND INFORMATION IS PROVIDED "AS IS" WITHOUT WARRANTY OF
* ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING BUT NOT LIMITED
* TO THE IMPLIED WARRANTIES OF MERCHANTABILITY AND/OR FITNESS FOR A
* PARTICULAR PURPOSE.
*
************************************************************************************/
#pragma once
//
// Processor modes.
//
typedef CCHAR KPROCESSOR_MODE;
typedef enum _MODE {
KernelMode,
UserMode,
MaximumMode
} MODE;
#define FIXED_UNICODE_STRING_LENGTH MAX_PATH
typedef struct _FIXED_UNICODE_STRING {
USHORT Length;
USHORT MaximumLength;
WCHAR Buffer[FIXED_UNICODE_STRING_LENGTH];
} FIXED_UNICODE_STRING, * PFIXED_UNICODE_STRING;
typedef _Enum_is_bitflag_ enum _WORK_QUEUE_TYPE {
CriticalWorkQueue,
DelayedWorkQueue,
HyperCriticalWorkQueue,
NormalWorkQueue,
BackgroundWorkQueue,
RealTimeWorkQueue,
SuperCriticalWorkQueue,
MaximumWorkQueue,
CustomPriorityWorkQueue = 32
} WORK_QUEUE_TYPE;
typedef
VOID
WORKER_THREAD_ROUTINE(
_In_ PVOID Parameter);
typedef WORKER_THREAD_ROUTINE* PWORKER_THREAD_ROUTINE;
typedef VOID* PACCESS_STATE;
typedef
NTSTATUS
DRIVER_INITIALIZE(
_In_ struct _DRIVER_OBJECT* DriverObject,
_In_ PUNICODE_STRING RegistryPath);
typedef DRIVER_INITIALIZE* PDRIVER_INITIALIZE;
typedef struct _WORK_QUEUE_ITEM {
LIST_ENTRY List;
PWORKER_THREAD_ROUTINE WorkerRoutine;
__volatile PVOID Parameter;
} WORK_QUEUE_ITEM, * PWORK_QUEUE_ITEM;
typedef NTSTATUS(NTAPI* pfnDriverEntry)();
typedef BOOLEAN (NTAPI *pfnRtlCreateUnicodeString)(
_Out_ _At_(DestinationString->Buffer, __drv_allocatesMem(Mem))
PUNICODE_STRING DestinationString,
_In_z_ PCWSTR SourceString);
typedef NTSTATUS (WINAPI *pfnIoCreateDriver)(
_In_ PUNICODE_STRING DriverName, OPTIONAL
_In_ PDRIVER_INITIALIZE InitializationFunction);
typedef VOID(NTAPI* pfnExQueueWorkItem)(
_Inout_ PWORK_QUEUE_ITEM WorkItem,
_In_ WORK_QUEUE_TYPE QueueType);
typedef NTSTATUS(NTAPI* pfnZwOpenSection)(
_Out_ PHANDLE SectionHandle,
_In_ ACCESS_MASK DesiredAccess,
_In_ POBJECT_ATTRIBUTES ObjectAttributes);
typedef NTSTATUS(NTAPI* pfnZwMapViewOfSection)(
_In_ HANDLE SectionHandle,
_In_ HANDLE ProcessHandle,
_Inout_ _At_(*BaseAddress, _Readable_bytes_(*ViewSize) _Writable_bytes_(*ViewSize) _Post_readable_byte_size_(*ViewSize)) PVOID* BaseAddress,
_In_ ULONG_PTR ZeroBits,
_In_ SIZE_T CommitSize,
_Inout_opt_ PLARGE_INTEGER SectionOffset,
_Inout_ PSIZE_T ViewSize,
_In_ SECTION_INHERIT InheritDisposition,
_In_ ULONG AllocationType,
_In_ ULONG Win32Protect);
typedef NTSTATUS(NTAPI* pfnZwUnmapViewOfSection)(
_In_ HANDLE ProcessHandle,
_In_opt_ PVOID BaseAddress);
typedef ULONG(NTAPI* pfnDbgPrint)(
_In_ PCHAR Format,
...);
typedef PVOID(NTAPI* pfnExAllocatePool)(
_In_ POOL_TYPE PoolType,
_In_ SIZE_T NumberOfBytes);
typedef PVOID(NTAPI* pfnExAllocatePoolWithTag)(
_In_ POOL_TYPE PoolType,
_In_ SIZE_T NumberOfBytes,
_In_ ULONG Tag);
typedef VOID(NTAPI* pfnExFreePoolWithTag)(
_In_ PVOID P,
_In_ ULONG Tag);
typedef NTSTATUS(NTAPI* pfnPsCreateSystemThread)(
_Out_ PHANDLE ThreadHandle,
_In_ ULONG DesiredAccess,
_In_opt_ POBJECT_ATTRIBUTES ObjectAttributes,
_In_opt_ HANDLE ProcessHandle,
_Out_opt_ PCLIENT_ID ClientId,
_In_ PKSTART_ROUTINE StartRoutine,
_In_opt_ PVOID StartContext);
typedef NTSTATUS(NTAPI* pfnZwClose)(
_In_ HANDLE Handle);
typedef VOID(NTAPI* pfnIofCompleteRequest)(
_In_ VOID* Irp,
_In_ CCHAR PriorityBoost);
typedef NTSTATUS(NTAPI* pfnObReferenceObjectByHandle)(
_In_ HANDLE Handle,
_In_ ACCESS_MASK DesiredAccess,
_In_opt_ POBJECT_TYPE ObjectType,
_In_ KPROCESSOR_MODE AccessMode,
_Out_ PVOID* Object,
_Out_opt_ PVOID HandleInformation);
typedef VOID(NTAPI* pfnObfDereferenceObject)(
_In_ PVOID Object);
typedef NTSTATUS(NTAPI* pfnKeSetEvent)(
_In_ PKEVENT Event,
_In_ KPRIORITY Increment,
_In_ _Literal_ BOOLEAN Wait);
typedef NTSTATUS(NTAPI* pfnObCreateObject)(
_In_ KPROCESSOR_MODE ProbeMode,
_In_ POBJECT_TYPE ObjectType,
_In_ POBJECT_ATTRIBUTES ObjectAttributes,
_In_ KPROCESSOR_MODE OwnershipMode,
_Inout_opt_ PVOID ParseContext,
_In_ ULONG ObjectBodySize,
_In_ ULONG PagedPoolCharge,
_In_ ULONG NonPagedPoolCharge,
_Out_ PVOID* Object);
typedef NTSTATUS(NTAPI* pfnObInsertObject)(
_In_ PVOID Object,
_Inout_opt_ PACCESS_STATE AccessState,
_Inout_opt_ ACCESS_MASK DesiredAccess,
_In_ ULONG ObjectPointerBias,
_Out_opt_ PVOID* NewObject,
_Out_opt_ PHANDLE Handle);
typedef VOID(NTAPI* pfnObMakeTemporaryObject)(
_In_ PVOID Object);
typedef NTSTATUS(NTAPI *pfnZwMakeTemporaryObject)(
_In_ HANDLE Handle);
typedef
VOID
(NTAPI* PIO_APC_ROUTINE) (
_In_ PVOID ApcContext,
_In_ PIO_STATUS_BLOCK IoStatusBlock,
_In_ ULONG Reserved
);
typedef struct _IO_STACK_LOCATION {
UCHAR MajorFunction;
UCHAR MinorFunction;
UCHAR Flags;
UCHAR Control;
//incomplete
} IO_STACK_LOCATION, * PIO_STACK_LOCATION;
typedef struct _KAPC {
UCHAR Type;
UCHAR SpareByte0;
UCHAR Size;
UCHAR SpareByte1;
ULONG SpareLong0;
struct _KTHREAD* Thread;
LIST_ENTRY ApcListEntry;
PVOID Reserved[3];
PVOID NormalContext;
PVOID SystemArgument1;
PVOID SystemArgument2;
CCHAR ApcStateIndex;
KPROCESSOR_MODE ApcMode;
BOOLEAN Inserted;
} KAPC, * PKAPC, * PRKAPC;
#pragma warning(push)
#pragma warning(disable:4324) // structure padded due to __declspec(align())
typedef struct DECLSPEC_ALIGN(MEMORY_ALLOCATION_ALIGNMENT) _IRP {
CSHORT Type;
USHORT Size;
PVOID MdlAddress;
ULONG Flags;
union {
struct _IRP* MasterIrp;
__volatile LONG IrpCount;
PVOID SystemBuffer;
} AssociatedIrp;
LIST_ENTRY ThreadListEntry;
IO_STATUS_BLOCK IoStatus;
KPROCESSOR_MODE RequestorMode;
BOOLEAN PendingReturned;
CHAR StackCount;
CHAR CurrentLocation;
BOOLEAN Cancel;
KIRQL CancelIrql;
CCHAR ApcEnvironment;
UCHAR AllocationFlags;
PIO_STATUS_BLOCK UserIosb;
PVOID UserEvent;
union {
struct {
union {
PIO_APC_ROUTINE UserApcRoutine;
PVOID IssuingProcess;
};
PVOID UserApcContext;
} AsynchronousParameters;
LARGE_INTEGER AllocationSize;
} Overlay;
__volatile PVOID CancelRoutine;
PVOID UserBuffer;
union {
struct {
union {
KDEVICE_QUEUE_ENTRY DeviceQueueEntry;
struct {
PVOID DriverContext[4];
};
};
PVOID Thread;
PCHAR AuxiliaryBuffer;
struct {
LIST_ENTRY ListEntry;
union {
struct _IO_STACK_LOCATION* CurrentStackLocation;
ULONG PacketType;
};
};
PVOID OriginalFileObject;
} Overlay;
//incomplete
} Tail;
} IRP;
#pragma warning(pop)
typedef IRP* PIRP;
FORCEINLINE
PIO_STACK_LOCATION
IoGetCurrentIrpStackLocation(
_In_ PIRP Irp
)
{
return Irp->Tail.Overlay.CurrentStackLocation;
}
| 24.73871 | 144 | 0.701917 | [
"object"
] |
cfc38d928d10db78e690e612597c966c28de8354 | 17,106 | c | C | coders/xps.c | UnshapenTube/Image-Magick | 81e01104fac0180234710c240206ca5000ea9343 | [
"ImageMagick"
] | null | null | null | coders/xps.c | UnshapenTube/Image-Magick | 81e01104fac0180234710c240206ca5000ea9343 | [
"ImageMagick"
] | null | null | null | coders/xps.c | UnshapenTube/Image-Magick | 81e01104fac0180234710c240206ca5000ea9343 | [
"ImageMagick"
] | null | null | null | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% X X PPPP SSSSS %
% X X P P SS %
% X PPPP SSS %
% X X P SS %
% X X P SSSSS %
% %
% %
% Read/Write Microsoft XML Paper Specification Format %
% %
% Software Design %
% Cristy %
% January 2008 %
% %
% %
% Copyright 1999-2021 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% https://imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
*/
/*
Include declarations.
*/
#include "magick/studio.h"
#include "magick/artifact.h"
#include "magick/attribute.h"
#include "magick/blob.h"
#include "magick/blob-private.h"
#include "magick/cache.h"
#include "magick/color.h"
#include "magick/color-private.h"
#include "magick/colorspace.h"
#include "magick/colorspace-private.h"
#include "magick/constitute.h"
#include "magick/delegate.h"
#include "magick/delegate-private.h"
#include "magick/draw.h"
#include "magick/exception.h"
#include "magick/exception-private.h"
#include "magick/geometry.h"
#include "magick/image.h"
#include "magick/image-private.h"
#include "magick/list.h"
#include "magick/magick.h"
#include "magick/memory_.h"
#include "magick/module.h"
#include "magick/monitor.h"
#include "magick/monitor-private.h"
#include "magick/nt-base-private.h"
#include "magick/option.h"
#include "magick/profile.h"
#include "magick/resource_.h"
#include "magick/pixel-accessor.h"
#include "magick/property.h"
#include "magick/quantum-private.h"
#include "magick/static.h"
#include "magick/string_.h"
#include "magick/string-private.h"
#include "magick/timer-private.h"
#include "magick/token.h"
#include "magick/transform.h"
#include "magick/utility.h"
#include "coders/bytebuffer-private.h"
#include "coders/ghostscript-private.h"
/*
Typedef declaractions.
*/
typedef struct _XPSInfo
{
MagickBooleanType
cmyk;
SegmentInfo
bounds;
unsigned long
columns,
rows;
StringInfo
*icc_profile,
*photoshop_profile,
*xmp_profile;
} XPSInfo;
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% R e a d X P S I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ReadXPSImage() reads a Printer Control Language image file and returns it.
% It allocates the memory necessary for the new Image structure and returns a
% pointer to the new image.
%
% The format of the ReadPSImage method is:
%
% Image *ReadPSImage(const ImageInfo *image_info,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image_info: the image info.
%
% o exception: return any errors or warnings in this structure.
%
*/
static Image *ReadXPSImage(const ImageInfo *image_info,ExceptionInfo *exception)
{
char
command[MagickPathExtent],
*density,
filename[MagickPathExtent],
input_filename[MagickPathExtent],
message[MagickPathExtent],
*options;
const char
*option;
const DelegateInfo
*delegate_info;
GeometryInfo
geometry_info;
Image
*image,
*next,
*postscript_image;
ImageInfo
*read_info;
MagickBooleanType
fitPage,
status;
MagickStatusType
flags;
PointInfo
delta;
RectangleInfo
page;
ssize_t
i;
unsigned long
scene;
/*
Open image file.
*/
assert(image_info != (const ImageInfo *) NULL);
assert(image_info->signature == MagickCoreSignature);
if (image_info->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
image_info->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
image=AcquireImage(image_info);
status=OpenBlob(image_info,image,ReadBinaryBlobMode,exception);
if (status == MagickFalse)
{
image=DestroyImageList(image);
return((Image *) NULL);
}
status=AcquireUniqueSymbolicLink(image_info->filename,input_filename);
if (status == MagickFalse)
{
ThrowFileException(exception,FileOpenError,"UnableToCreateTemporaryFile",
image_info->filename);
image=DestroyImageList(image);
return((Image *) NULL);
}
/*
Set the page density.
*/
delta.x=DefaultResolution;
delta.y=DefaultResolution;
if ((image->x_resolution == 0.0) || (image->y_resolution == 0.0))
{
flags=ParseGeometry(PSDensityGeometry,&geometry_info);
image->x_resolution=geometry_info.rho;
image->y_resolution=geometry_info.sigma;
if ((flags & SigmaValue) == 0)
image->y_resolution=image->x_resolution;
}
if (image_info->density != (char *) NULL)
{
flags=ParseGeometry(image_info->density,&geometry_info);
image->x_resolution=geometry_info.rho;
image->y_resolution=geometry_info.sigma;
if ((flags & SigmaValue) == 0)
image->y_resolution=image->x_resolution;
}
(void) ParseAbsoluteGeometry(PSPageGeometry,&page);
if (image_info->page != (char *) NULL)
(void) ParseAbsoluteGeometry(image_info->page,&page);
page.width=(size_t) ((ssize_t) ceil((double) (page.width*
image->x_resolution/delta.x)-0.5));
page.height=(size_t) ((ssize_t) ceil((double) (page.height*
image->y_resolution/delta.y)-0.5));
fitPage=MagickFalse;
option=GetImageOption(image_info,"xps:fit-page");
if (option != (char *) NULL)
{
char
*page_geometry;
page_geometry=GetPageGeometry(option);
flags=ParseMetaGeometry(page_geometry,&page.x,&page.y,&page.width,
&page.height);
if (flags == NoValue)
{
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"InvalidGeometry","`%s'",option);
page_geometry=DestroyString(page_geometry);
image=DestroyImage(image);
return((Image *) NULL);
}
page.width=(size_t) ((ssize_t) ceil((double) (page.width*
image->x_resolution/delta.x)-0.5));
page.height=(size_t) ((ssize_t) ceil((double) (page.height*
image->y_resolution/delta.y) -0.5));
page_geometry=DestroyString(page_geometry);
fitPage=MagickTrue;
}
/*
Render Postscript with the Ghostscript delegate.
*/
delegate_info=GetDelegateInfo("xps:color",(char *) NULL,exception);
if (delegate_info == (const DelegateInfo *) NULL)
{
image=DestroyImageList(image);
return((Image *) NULL);
}
density=AcquireString("");
options=AcquireString("");
(void) FormatLocaleString(density,MagickPathExtent,"%gx%g",
image->x_resolution,image->y_resolution);
if (image_info->ping != MagickFalse)
(void) FormatLocaleString(density,MagickPathExtent,"2.0x2.0");
(void) FormatLocaleString(options,MagickPathExtent,"-g%.20gx%.20g ",(double)
page.width,(double) page.height);
read_info=CloneImageInfo(image_info);
*read_info->magick='\0';
if (read_info->number_scenes != 0)
{
char
pages[MagickPathExtent];
(void) FormatLocaleString(pages,MagickPathExtent,"-dFirstPage=%.20g "
"-dLastPage=%.20g ",(double) read_info->scene+1,(double)
(read_info->scene+read_info->number_scenes));
(void) ConcatenateMagickString(options,pages,MagickPathExtent);
read_info->number_scenes=0;
if (read_info->scenes != (char *) NULL)
*read_info->scenes='\0';
}
if (*image_info->magick == 'E')
{
option=GetImageOption(image_info,"xps:use-cropbox");
if ((option == (const char *) NULL) ||
(IsStringTrue(option) != MagickFalse))
(void) ConcatenateMagickString(options,"-dEPSCrop ",MagickPathExtent);
if (fitPage != MagickFalse)
(void) ConcatenateMagickString(options,"-dEPSFitPage ",
MagickPathExtent);
}
(void) AcquireUniqueFilename(read_info->filename);
(void) RelinquishUniqueFileResource(read_info->filename);
(void) ConcatenateMagickString(read_info->filename,"%d",MagickPathExtent);
(void) CopyMagickString(filename,read_info->filename,MagickPathExtent);
(void) FormatLocaleString(command,MagickPathExtent,
GetDelegateCommands(delegate_info),
read_info->antialias != MagickFalse ? 4 : 1,
read_info->antialias != MagickFalse ? 4 : 1,density,options,
read_info->filename,input_filename);
options=DestroyString(options);
density=DestroyString(density);
*message='\0';
status=ExternalDelegateCommand(MagickFalse,read_info->verbose,command,
(char *) NULL,exception) != 0 ? MagickTrue : MagickFalse;
(void) RelinquishUniqueFileResource(input_filename);
postscript_image=(Image *) NULL;
if (status == MagickFalse)
for (i=1; ; i++)
{
(void) InterpretImageFilename(image_info,image,filename,(int) i,
read_info->filename);
if (IsGhostscriptRendered(read_info->filename) == MagickFalse)
break;
read_info->blob=NULL;
read_info->length=0;
next=ReadImage(read_info,exception);
(void) RelinquishUniqueFileResource(read_info->filename);
if (next == (Image *) NULL)
break;
AppendImageToList(&postscript_image,next);
}
else
for (i=1; ; i++)
{
(void) InterpretImageFilename(image_info,image,filename,(int) i,
read_info->filename);
if (IsGhostscriptRendered(read_info->filename) == MagickFalse)
break;
read_info->blob=NULL;
read_info->length=0;
next=ReadImage(read_info,exception);
(void) RelinquishUniqueFileResource(read_info->filename);
if (next == (Image *) NULL)
break;
AppendImageToList(&postscript_image,next);
}
(void) RelinquishUniqueFileResource(filename);
read_info=DestroyImageInfo(read_info);
if (postscript_image == (Image *) NULL)
{
if (*message != '\0')
(void) ThrowMagickException(exception,GetMagickModule(),
DelegateError,"PostscriptDelegateFailed","`%s'",message);
image=DestroyImageList(image);
return((Image *) NULL);
}
if (LocaleCompare(postscript_image->magick,"BMP") == 0)
{
Image
*cmyk_image;
cmyk_image=ConsolidateCMYKImages(postscript_image,exception);
if (cmyk_image != (Image *) NULL)
{
postscript_image=DestroyImageList(postscript_image);
postscript_image=cmyk_image;
}
}
if (image_info->number_scenes != 0)
{
Image
*clone_image;
/*
Add place holder images to meet the subimage specification requirement.
*/
for (i=0; i < (ssize_t) image_info->scene; i++)
{
clone_image=CloneImage(postscript_image,1,1,MagickTrue,exception);
if (clone_image != (Image *) NULL)
PrependImageToList(&postscript_image,clone_image);
}
}
do
{
(void) CopyMagickString(postscript_image->filename,filename,
MagickPathExtent);
(void) CopyMagickString(postscript_image->magick,image->magick,
MagickPathExtent);
postscript_image->page=page;
if (image_info->ping != MagickFalse)
{
postscript_image->magick_columns*=image->x_resolution/2.0;
postscript_image->magick_rows*=image->y_resolution/2.0;
postscript_image->columns*=image->x_resolution/2.0;
postscript_image->rows*=image->y_resolution/2.0;
}
(void) CloneImageProfiles(postscript_image,image);
(void) CloneImageProperties(postscript_image,image);
next=SyncNextImageInList(postscript_image);
if (next != (Image *) NULL)
postscript_image=next;
} while (next != (Image *) NULL);
image=DestroyImageList(image);
scene=0;
for (next=GetFirstImageInList(postscript_image); next != (Image *) NULL; )
{
next->scene=scene++;
next=GetNextImageInList(next);
}
return(GetFirstImageInList(postscript_image));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% R e g i s t e r X P S I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% RegisterXPSImage() adds properties for the PS image format to
% the list of supported formats. The properties include the image format
% tag, a method to read and/or write the format, whether the format
% supports the saving of more than one frame to the same file or blob,
% whether the format supports native in-memory I/O, and a brief
% description of the format.
%
% The format of the RegisterXPSImage method is:
%
% size_t RegisterXPSImage(void)
%
*/
ModuleExport size_t RegisterXPSImage(void)
{
MagickInfo
*entry;
entry=SetMagickInfo("XPS");
entry->decoder=(DecodeImageHandler *) ReadXPSImage;
entry->adjoin=MagickFalse;
entry->blob_support=MagickFalse;
entry->seekable_stream=MagickTrue;
entry->thread_support=EncoderThreadSupport;
entry->description=ConstantString("Microsoft XML Paper Specification");
entry->magick_module=ConstantString("XPS");
(void) RegisterMagickInfo(entry);
return(MagickImageCoderSignature);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% U n r e g i s t e r X P S I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% UnregisterXPSImage() removes format registrations made by the
% XPS module from the list of supported formats.
%
% The format of the UnregisterXPSImage method is:
%
% UnregisterXPSImage(void)
%
*/
ModuleExport void UnregisterXPSImage(void)
{
(void) UnregisterMagickInfo("XPS");
}
| 36.088608 | 80 | 0.531743 | [
"geometry",
"render",
"transform"
] |
cfc6371a472163d121e8a644c9b553bec3c957b5 | 3,726 | h | C | Modules/Core/Common/include/itkDefaultVectorPixelAccessor.h | floryst/ITK | 321e673bcbac15aae2fcad863fd0977b7fbdb3e9 | [
"Apache-2.0"
] | 1 | 2020-10-09T18:12:53.000Z | 2020-10-09T18:12:53.000Z | Modules/Core/Common/include/itkDefaultVectorPixelAccessor.h | floryst/ITK | 321e673bcbac15aae2fcad863fd0977b7fbdb3e9 | [
"Apache-2.0"
] | 1 | 2017-08-18T19:28:52.000Z | 2017-08-18T19:28:52.000Z | Modules/Core/Common/include/itkDefaultVectorPixelAccessor.h | floryst/ITK | 321e673bcbac15aae2fcad863fd0977b7fbdb3e9 | [
"Apache-2.0"
] | 1 | 2017-08-18T19:07:39.000Z | 2017-08-18T19:07:39.000Z | /*=========================================================================
*
* Copyright Insight Software Consortium
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0.txt
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*=========================================================================*/
#ifndef itkDefaultVectorPixelAccessor_h
#define itkDefaultVectorPixelAccessor_h
#include "itkMacro.h"
#include "itkVariableLengthVector.h"
#include "itkIntTypes.h"
namespace itk
{
/** \class DefaultVectorPixelAccessor
* \brief Give access to partial aspects of a type
*
* DefaultVectorPixelAccessor is specifically meant to provide VectorImage
* with the same \c DefaultPixelAccessor interface that
* DefaultPixelAccessor provides to Image.
*
* The template parameters is the type that is contained in by the elements of
* a vector.
*
* The class also contains a m_VectorLength parameter, set with the SetVectorLength
* method to set the length of the vectors. This must be set before the accessor
* can be used. This is the length of each of the vector containers.
*
* \note
* This work is part of the National Alliance for Medical Image Computing
* (NAMIC), funded by the National Institutes of Health through the NIH Roadmap
* for Medical Research, Grant U54 EB005149.
*
* \ingroup ImageAdaptors
* \ingroup ITKCommon
*/
template< typename TType >
class ITK_TEMPLATE_EXPORT DefaultVectorPixelAccessor
{
public:
using VectorLengthType = unsigned int;
/** External type alias. It defines the external aspect
* that this class will exhibit. Here it is an VariableLengthVector. The container does not
* manage the memory. In other words it is an array reference with the contents
* pointing to the actual data in the image. */
using ExternalType = VariableLengthVector< TType >;
/** Internal type alias. It defines the internal real representation of data. */
using InternalType = TType;
/** Set output using the value in input */
inline void Set(InternalType & output, const ExternalType & input,
const unsigned long offset) const
{
InternalType *truePixel = ( &output ) + offset * m_OffsetMultiplier;
for ( VectorLengthType i = 0; i < m_VectorLength; i++ )
{
truePixel[i] = input[i];
}
}
/** Get the value from input */
inline ExternalType Get(const InternalType & input, const SizeValueType offset) const
{
// Do not create a local for this method, to use return value
// optimization.
return ExternalType( ( &input ) + ( offset * m_OffsetMultiplier ), m_VectorLength );
}
/** Set the length of each vector in the VectorImage */
void SetVectorLength(VectorLengthType l)
{
m_VectorLength = l;
m_OffsetMultiplier = ( l - 1 );
}
/** Get Vector lengths */
VectorLengthType GetVectorLength() const { return m_VectorLength; }
DefaultVectorPixelAccessor() {}
/** Constructor to initialize VectorLength at construction time */
DefaultVectorPixelAccessor(VectorLengthType l)
{
m_VectorLength = l;
m_OffsetMultiplier = l - 1;
}
~DefaultVectorPixelAccessor() = default;
private:
VectorLengthType m_VectorLength{0};
VectorLengthType m_OffsetMultiplier{0};
};
} // end namespace itk
#endif
| 32.973451 | 93 | 0.698068 | [
"vector"
] |
cfc85624decda7e593799c5dbd4789eece947469 | 8,485 | h | C | libs/DS4_SDK/include/dzcontentmgr.h | Red54/reality | 510d4f5fde2f4c5535482f1ea199f914102b8a2a | [
"BSD-3-Clause"
] | null | null | null | libs/DS4_SDK/include/dzcontentmgr.h | Red54/reality | 510d4f5fde2f4c5535482f1ea199f914102b8a2a | [
"BSD-3-Clause"
] | null | null | null | libs/DS4_SDK/include/dzcontentmgr.h | Red54/reality | 510d4f5fde2f4c5535482f1ea199f914102b8a2a | [
"BSD-3-Clause"
] | null | null | null | /**********************************************************************
Copyright (C) 2002-2012 DAZ 3D, Inc. All Rights Reserved.
This file is part of the DAZ Studio SDK.
This file may be used only in accordance with the DAZ Studio SDK
license provided with the DAZ Studio SDK.
The contents of this file may not be disclosed to third parties,
copied or duplicated in any form, in whole or in part, without the
prior written permission of DAZ 3D, Inc, except as explicitly
allowed in the DAZ Studio SDK license.
See http://www.daz3d.com to contact DAZ 3D or for more
information about the DAZ Studio SDK.
**********************************************************************/
/**
@sdk
@file
Defines the DzContentMgr class.
**/
#ifndef DAZ_CONTENT_MGR_H
#define DAZ_CONTENT_MGR_H
/****************************
Include files
****************************/
#include <QtCore/QVariantList>
#include "dzbase.h"
#include "dztypes.h"
/****************************
Forward declarations
****************************/
class DzContentDropOptions;
class DzContentFolder;
class DzContentReplaceMgr;
class DzFileLoadFilter;
class DzNode;
class QDir;
class DzSettings;
/****************************
Class definitions
****************************/
class DZ_EXPORT DzContentMgr : public DzBase {
Q_OBJECT
Q_FLAGS( DirectoryTypes )
#ifndef DSON_IO
Q_PROPERTY( QString startupScene READ getStartupScene WRITE setStartupScene )
Q_PROPERTY( bool autoRefresh READ isAutoRefreshOn WRITE setAutoRefreshOn )
Q_PROPERTY( bool showBuiltInContent READ isBuiltInContentShowing WRITE setBuiltInContentShowing )
friend class DzApp;
friend class DzContentUpdateThread;
#else
friend class DzDSONIO;
#endif
private:
//
// CREATORS
//
DzContentMgr();
~DzContentMgr();
public:
enum DirectoryType
{
NativeDirs = 0x01,
PoserDirs = 0x02,
ImportDirs = 0x04,
BuiltInDirs = 0x08,
NativeAndBuiltInDirs = NativeDirs | BuiltInDirs,
AllDirs = NativeDirs | PoserDirs | ImportDirs | BuiltInDirs
};
Q_DECLARE_FLAGS( DirectoryTypes, DirectoryType )
//
// REIMPLEMENTATIONS
//
#ifndef DSON_IO
virtual bool event( QEvent *e );
//
// MANIPULATORS
//
bool startup();
void shutdown();
void setStartupScene( const QString &filepath );
void setOnNewScene( const QString &filepath );
QString getStartupScene() const;
QString getOnNewScene() const;
void setAutoRefreshOn( bool onOff );
bool isAutoRefreshOn() const;
void setBuiltInContentShowing( bool onOff );
bool isBuiltInContentShowing() const;
void setContentDropOptions( DzContentDropOptions *options );
void importPreviouslyMappedDirectories();
bool userFindFile( const QString &filename, QString &foundPath, const QString& emptyName ) const;
#endif
public slots:
#ifndef DSON_IO
void setUseDatabaseSearch( bool useDatabase );
void setCheckCompatibilityOn( bool useCheckCompatibility );
bool usingDatabaseSearch();
bool usingCheckCompatibility();
bool doContentSearch();
#endif
void addContentDirectory( const QString &directory, bool saveSetting = true );
void addImportDirectory( const QString &directory, bool saveSetting = true );
void addPoserDirectory( const QString &directory, bool saveSetting = true );
#ifdef DSON_IO
void replaceContentAndPoserDirectories( const QStringList &dirs );
#endif
#ifndef DSON_IO
bool removeContentDirectory( const QString &directory );
void removeAllContentDirectories();
bool removeImportDirectory( const QString &directory );
void removeAllImportDirectories();
bool removePoserDirectory( const QString &directory );
void removeAllPoserDirectories();
void refresh( bool reloadIcons = false );
bool openFile( const QString &filename, bool merge = true );
bool mergeFiles( const QStringList &filenames );
bool openNativeFile( const QString &filename, bool isMerge=true );
bool importFile( const QString &path );
void fileCreated( const QString &filename );
bool poserDirectoryIsMapped( const QString &dirPath );
#endif
#ifndef DSON_IO
#ifndef Q_MOC_RUN
static
#endif
QString getContentPath( const DzNode *node );
#endif
//
// ACCESSORS
//
QString getRelativePath( const QString &absolutePath, bool useImportFolders ) const;
QStringList getRelativePaths( const QString &absolutePath, bool useImportFolders ) const;
QString getMappedPath( const QString &path, bool useImportFolders, bool isRelative ) const;
QString getAbsolutePath( const QString &relativePath, bool useImportFolders, const QString& perferredPath = QString::null ) const;
QStringList getAbsolutePaths( const QString &relativePath, bool useImportFolders ) const;
QString getPoserAbsolutePath( const QString &relativePath ) const;
QString getNativeAbsolutePath( const QString &relativePath ) const;
QString getImportAbsolutePath( const QString &relativePath ) const;
#ifndef DSON_IO
DzContentFolder* getBuiltInContentDirectory() const;
QString getBuiltInContentDirectoryPath() const;
#endif
int getNumContentDirectories() const;
DzContentFolder* getContentDirectory( int which ) const;
QString getContentDirectoryPath( int which ) const;
int getNumImportDirectories() const;
DzContentFolder* getImportDirectory( int which ) const;
QString getImportDirectoryPath( int which ) const;
int getNumPoserDirectories() const;
DzContentFolder* getPoserDirectory( int which ) const;
QString getPoserDirectoryPath( int which ) const;
QString findFile( const QString &partialPath, DirectoryTypes dirTypes = NativeAndBuiltInDirs ) const;
DzContentFolder* findBaseDirectory( const QString &path, bool useImportFolders ) const;
#ifndef DSON_IO
void doDirNumCheck() const;
DzContentReplaceMgr* getContentReplaceMgr();
QString getDefaultContentDir() const;
void saveAllMappedDirectories() const;
bool isDirectoryMapped( const QString &directory )const;
bool reloadSceneAssetsFromSource( const DzSettings* settings);
DzError reloadAssetsFromSource( DzNode* sceneNode, const DzSettings* settings, const QString& srcFilePath=QString::null);
DzError reloadAssetsFromSource( const DzNodeList& sceneNodes, const DzSettings* settings, const QString& absolutePath );
QString findSourcePathForNode( DzNode* sceneNode );
public:
bool findPoserPaths( QStringList &list ) const;
DzContentDropOptions* getContentDropOptions() const;
signals:
void contentDirectoryListChanged();
void importDirectoryListChanged();
void poserDirectoryListChanged();
void folderTreeChanged();
void autoRefreshChanged( bool onOff );
void startupSceneChanged( const QString &filename );
void onNewSceneChanged( const QString &filename );
void newFileCreated( const QString &filename );
protected:
//////////////////////
// from QObject
virtual void timerEvent( QTimerEvent *e );
bool needUpdate();
#endif
private:
#ifndef DSON_IO
bool loadScript( const QString &path, const QVariantList &args );
bool updateBaseItemForLoad( const QString &fileName );
void contentReplaceInit( const QString &filename );
void doContentReplace();
void setDefaultContentDir();
#endif
QString getRelativePath( const QString &folderPath, const QString &absolutePath ) const;
QString getMappedPath( const QString &folderPath, const QString &path, bool isRelative ) const;
QString getAbsolutePath( const QString &folderPath, const QString &relativePath ) const;
#ifndef DSON_IO
DzContentFolder* findFolderRecurse( const QString &path, const DzContentFolder *folder ) const;
bool refreshRecurse( DzContentFolder *folder, bool reloadIcons );
bool needUpdateRecurse( DzContentFolder *folder );
void findPoserPathsRecurse( QDir &dir, QStringList &list ) const;
void savePoserDirectories() const;
void saveImportDirectories() const;
void saveContentDirectories() const;
void saveLegacyContentDirectories() const;
void saveStartupScene() const;
void saveOnNewScene() const;
DzFileLoadFilter* getFilterForFile( const QString &filename );
QString getProgramDataContentDir() const;
void cleanUpDBPathReferences( const QString& dir );
#endif
struct Data;
Data *m_data;
};
Q_DECLARE_OPERATORS_FOR_FLAGS( DzContentMgr::DirectoryTypes )
#endif // DAZ_CONTENT_MGR_H
| 32.385496 | 135 | 0.720212 | [
"3d"
] |
cfcadbed22bf44ff3bc152f515be92d11972fa44 | 7,145 | h | C | Direct2DNet/DWriteNet/IDWriteInlineObject.h | SansyHuman/Direct2DNet | 345d981a07fd8cbdf7cf7e6ca9d7b493439dbd17 | [
"MIT"
] | null | null | null | Direct2DNet/DWriteNet/IDWriteInlineObject.h | SansyHuman/Direct2DNet | 345d981a07fd8cbdf7cf7e6ca9d7b493439dbd17 | [
"MIT"
] | null | null | null | Direct2DNet/DWriteNet/IDWriteInlineObject.h | SansyHuman/Direct2DNet | 345d981a07fd8cbdf7cf7e6ca9d7b493439dbd17 | [
"MIT"
] | null | null | null | #pragma once
#include "DWNetHeaders.h"
#include "DWriteSettings.h"
#include "IDirectWriteObject.h"
#include "../Direct2DNet/Exception/DXException.h"
using namespace System::Runtime::InteropServices;
using namespace System::Runtime::CompilerServices;
using namespace System::Collections::Generic;
namespace D2DNet
{
namespace DWriteNet
{
using fnMet = HRESULT(__stdcall *)(::DWRITE_INLINE_OBJECT_METRICS *);
using fnOhMet = HRESULT(__stdcall *)(::DWRITE_OVERHANG_METRICS *);
using fnBC = HRESULT(__stdcall *)(::DWRITE_BREAK_CONDITION *, ::DWRITE_BREAK_CONDITION *);
class IDWriteNativeInlineObject : public ::IDWriteInlineObject
{
private:
UINT m_cRef;
public:
fnMet m_getMetrics;
fnOhMet m_getOverhangMetrics;
fnBC m_getBreakConditions;
IDWriteNativeInlineObject() : m_cRef(1) {}
IDWriteNativeInlineObject(const IDWriteNativeInlineObject &) = delete;
IDWriteNativeInlineObject(IDWriteNativeInlineObject &&) = delete;
IDWriteNativeInlineObject &operator=(const IDWriteNativeInlineObject &) = delete;
STDMETHOD_(ULONG, AddRef)(void) override
{
return InterlockedIncrement(reinterpret_cast<LONG volatile *>(&m_cRef));
}
STDMETHOD_(ULONG, Release)(void) override
{
ULONG cRef = static_cast<ULONG>(
InterlockedDecrement(reinterpret_cast<LONG volatile *>(&m_cRef)));
if(0 == cRef)
{
delete this;
}
return cRef;
}
STDMETHOD(QueryInterface)(THIS_ REFIID iid, void **ppvObject) override
{
HRESULT hr = S_OK;
if(__uuidof(::IUnknown) == iid)
{
*ppvObject = static_cast<::IUnknown *>(this);
AddRef();
}
else if(__uuidof(::IDWriteInlineObject) == iid)
{
*ppvObject = static_cast<::IDWriteInlineObject *>(this);
AddRef();
}
else
{
*ppvObject = NULL;
hr = E_NOINTERFACE;
}
return hr;
}
STDMETHOD(Draw)(
_In_opt_ void *clientDrawingContext,
_In_ IDWriteTextRenderer *renderer,
FLOAT originX,
FLOAT originY,
BOOL isSideways,
BOOL isRightToLeft,
_In_opt_ IUnknown *clientDrawingEffect
) override
{
return E_NOTIMPL;
}
STDMETHOD(GetMetrics)(
_Out_ ::DWRITE_INLINE_OBJECT_METRICS *metrics
) override
{
return m_getMetrics(metrics);
}
STDMETHOD(GetOverhangMetrics)(
_Out_ ::DWRITE_OVERHANG_METRICS *overhangs
) override
{
return m_getOverhangMetrics(overhangs);
}
STDMETHOD(GetBreakConditions)(
_Out_ ::DWRITE_BREAK_CONDITION *breakConditionBefore,
_Out_ ::DWRITE_BREAK_CONDITION *breakConditionAfter
) override
{
return m_getBreakConditions(breakConditionBefore, breakConditionAfter);
}
};
/// <summary>
/// The IDWriteInlineObject interface wraps an application defined inline graphic,
/// allowing DWrite to query metrics as if it was a glyph inline with the text.
/// </summary>
[System::Runtime::InteropServices::GuidAttribute("8339FDE3-106F-47ab-8373-1C6295EB10B3")]
public ref class IDWriteInlineObject abstract : DWriteNet::IDirectWriteObject
{
protected:
delegate HRESULT FnMet(::DWRITE_INLINE_OBJECT_METRICS *);
delegate HRESULT FnOhMet(::DWRITE_OVERHANG_METRICS *);
delegate HRESULT FnBC(::DWRITE_BREAK_CONDITION *, ::DWRITE_BREAK_CONDITION *);
internal:
::IDWriteInlineObject *m_pObject;
static Dictionary<System::IntPtr, DWriteNet::IDWriteInlineObject ^> ^objList = gcnew Dictionary<System::IntPtr, DWriteNet::IDWriteInlineObject ^>();
private:
GCHandle m_getMetrics;
GCHandle m_getOverhangMetrics;
GCHandle m_getBreakConditions;
internal:
// For CreateEllipsisTrimmingSign.
IDWriteInlineObject(::IDWriteInlineObject *pObject);
protected:
/// <summary>
/// The basic constructor of IDWriteInlineObject. You should call this ctor when you
/// implement the inline object.
/// </summary>
IDWriteInlineObject();
public:
~IDWriteInlineObject();
!IDWriteInlineObject();
property void *NativePointer
{
virtual void *get()
{
return m_pObject;
}
}
virtual void HandleCOMInterface(void *obj) sealed;
private:
HRESULT GetMetricsInternal(::DWRITE_INLINE_OBJECT_METRICS *metrics);
HRESULT GetOverhangMetricsInternal(::DWRITE_OVERHANG_METRICS *overhangs);
HRESULT GetBreakConditionsInternal(
::DWRITE_BREAK_CONDITION *breakConditionBefore,
::DWRITE_BREAK_CONDITION *breakConditionAfter
);
public:
// Draw
virtual HRESULT GetMetrics(
[OutAttribute] DWriteNet::DWRITE_INLINE_OBJECT_METRICS %metrics
) abstract;
virtual HRESULT GetOverhangMetrics(
[OutAttribute] DWriteNet::DWRITE_OVERHANG_METRICS %overhangs
) abstract;
virtual HRESULT GetBreakConditions(
[OutAttribute] DWriteNet::DWRITE_BREAK_CONDITION %breakConditionBefore,
[OutAttribute] DWriteNet::DWRITE_BREAK_CONDITION %breakConditionAfter
) abstract;
};
ref class IDWriteFactory;
ref class IDWriteTextFormat;
ref class EllipsisInlineObject : DWriteNet::IDWriteInlineObject
{
internal:
EllipsisInlineObject(
DWriteNet::IDWriteFactory ^factory,
DWriteNet::IDWriteTextFormat ^format);
public:
virtual HRESULT GetMetrics(
[OutAttribute] DWriteNet::DWRITE_INLINE_OBJECT_METRICS %metrics
) override;
virtual HRESULT GetOverhangMetrics(
[OutAttribute] DWriteNet::DWRITE_OVERHANG_METRICS %overhangs
) override;
virtual HRESULT GetBreakConditions(
[OutAttribute] DWriteNet::DWRITE_BREAK_CONDITION %breakConditionBefore,
[OutAttribute] DWriteNet::DWRITE_BREAK_CONDITION %breakConditionAfter
) override;
};
}
} | 33.862559 | 160 | 0.569769 | [
"object"
] |
cfcbba51dd089edb0bbb4a50714eede94884039a | 4,876 | h | C | src/core/inc/blit.h | haampie/ROCR-Runtime | 0d9576a5cba4ca4bd17bd2a4a744e59151fcdf9c | [
"AMDPLPA"
] | null | null | null | src/core/inc/blit.h | haampie/ROCR-Runtime | 0d9576a5cba4ca4bd17bd2a4a744e59151fcdf9c | [
"AMDPLPA"
] | null | null | null | src/core/inc/blit.h | haampie/ROCR-Runtime | 0d9576a5cba4ca4bd17bd2a4a744e59151fcdf9c | [
"AMDPLPA"
] | null | null | null | ////////////////////////////////////////////////////////////////////////////////
//
// The University of Illinois/NCSA
// Open Source License (NCSA)
//
// Copyright (c) 2014-2015, Advanced Micro Devices, Inc. All rights reserved.
//
// Developed by:
//
// AMD Research and AMD HSA Software Development
//
// Advanced Micro Devices, Inc.
//
// www.amd.com
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to
// deal with the Software without restriction, including without limitation
// the rights to use, copy, modify, merge, publish, distribute, sublicense,
// and/or sell copies of the Software, and to permit persons to whom the
// Software is furnished to do so, subject to the following conditions:
//
// - Redistributions of source code must retain the above copyright notice,
// this list of conditions and the following disclaimers.
// - Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimers in
// the documentation and/or other materials provided with the distribution.
// - Neither the names of Advanced Micro Devices, Inc,
// nor the names of its contributors may be used to endorse or promote
// products derived from this Software without specific prior written
// permission.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
// THE CONTRIBUTORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
// OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
// ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
// DEALINGS WITH THE SOFTWARE.
//
////////////////////////////////////////////////////////////////////////////////
#ifndef HSA_RUNTIME_CORE_INC_BLIT_H_
#define HSA_RUNTIME_CORE_INC_BLIT_H_
#include <stdint.h>
#include "core/inc/agent.h"
namespace core {
class Blit {
public:
explicit Blit() {}
virtual ~Blit() {}
/// @brief Marks the blit object as invalid and uncouples its link with
/// the underlying compute device's control block. Use of blit object
/// once it has been release is illegal and any behavior is indeterminate
///
/// @note: The call will block until all commands have executed.
///
/// @param agent Agent passed to Initialize.
///
/// @return hsa_status_t
virtual hsa_status_t Destroy(const core::Agent& agent) = 0;
/// @brief Submit a linear copy command to the the underlying compute device's
/// control block. The call is blocking until the command execution is
/// finished.
///
/// @param dst Memory address of the copy destination.
/// @param src Memory address of the copy source.
/// @param size Size of the data to be copied.
virtual hsa_status_t SubmitLinearCopyCommand(void* dst, const void* src,
size_t size) = 0;
/// @brief Submit a linear copy command to the the underlying compute device's
/// control block. The call is non blocking. The memory transfer will start
/// after all dependent signals are satisfied. After the transfer is
/// completed, the out signal will be decremented.
///
/// @param dst Memory address of the copy destination.
/// @param src Memory address of the copy source.
/// @param size Size of the data to be copied.
/// @param dep_signals Arrays of dependent signal.
/// @param out_signal Output signal.
virtual hsa_status_t SubmitLinearCopyCommand(
void* dst, const void* src, size_t size,
std::vector<core::Signal*>& dep_signals, core::Signal& out_signal) = 0;
/// @brief Submit a linear fill command to the the underlying compute device's
/// control block. The call is blocking until the command execution is
/// finished.
///
/// @param ptr Memory address of the fill destination.
/// @param value Value to be set.
/// @param num Number of uint32_t element to be set to the value.
virtual hsa_status_t SubmitLinearFillCommand(void* ptr, uint32_t value,
size_t num) = 0;
/// @brief Enable profiling of the asynchronous copy command. The timestamp
/// of each copy request will be stored in the completion signal structure.
///
/// @param enable True to enable profiling. False to disable profiling.
///
/// @return HSA_STATUS_SUCCESS if the request to enable/disable profiling is
/// successful.
virtual hsa_status_t EnableProfiling(bool enable) = 0;
/// @brief Blit operations use SDMA.
virtual bool isSDMA() const { return false; }
};
} // namespace core
#endif // header guard
| 42.034483 | 80 | 0.681501 | [
"object",
"vector"
] |
cfce2270992d32091a38767e76430dd88a2a5619 | 3,042 | h | C | include/base/CotEntity.h | TeamNut/CotEngine-Release | 5f3781894e4ead2421f6ef1996e7e6eaa3a289ff | [
"MIT"
] | 2 | 2017-10-11T02:16:55.000Z | 2018-04-13T07:26:28.000Z | include/base/CotEntity.h | TeamNut/CotEngine-Release | 5f3781894e4ead2421f6ef1996e7e6eaa3a289ff | [
"MIT"
] | null | null | null | include/base/CotEntity.h | TeamNut/CotEngine-Release | 5f3781894e4ead2421f6ef1996e7e6eaa3a289ff | [
"MIT"
] | null | null | null | #pragma once
#include "CotBroadcastProtocol.h"
#include "CotTime.h"
#include "container/CotVectorMap.hpp"
#include "math/CotMath.h"
namespace Cot
{
class IComponent;
class COT_API Entity
{
protected:
string _name;
bool _active;
Mat4 _world;
bool _dirty;
Vec3 _localPosition;
Vec3 _localRotate;
Vec3 _localScale;
Entity* _parent;
std::vector<Entity*> _children;
BroadCastProtocol* _broadCastProtocol;
MultiVectorMap<string, IComponent*> _components;
public:
Entity() = delete;
Entity(const string& name);
virtual ~Entity();
void ComponentStart();
template <typename T>
T* AddComponent()
{
T* result = new T();
result->SetOwner(this);
result->OnEnable();
result->Awake();
_components.add(result->GetType(), result);
return result;
}
template <typename T>
void RemoveComponent()
{
string key = ComponentType<T>::GetType();
T* temp = static_cast<T*>(_components.find(key));
_components.remove(key);
SafeDelete(temp);
}
template <typename T>
T* GetComponent()
{
string key = ComponentType<T>::GetType();
return static_cast<T*>(_components.find(key));
}
template <typename T>
inline std::vector<T*> GetComponents()
{
string key = ComponentType<T>::GetType();
return _components.finds<T*>(key);
}
MultiVectorMap<string, IComponent*>& GetComponentMap() { return _components; }
void CreateBroadCastProtocol();
void AddBroadCastListener(const string& name, const std::function<void()>& function);
void SendMsg(const string& function);
void BroadCastMsg(const string& function);
void AddChild(Entity* child);
std::vector<Entity*>& GetChildren() { return _children; }
void RemoveParent();
void RemoveChild(Entity* child);
void RemoveChildByName(const string& name);
void RemoveAllChild();
void SetName(const string& name);
string GetName() { return _name; }
void SetParent(Entity* parent);
Entity* GetParent() { return _parent; }
void ResetTransform();
void SetPosition(const Vec3& position);
void SetPositionX(float x);
void SetPositionY(float y);
void SetPositionZ(float z);
Vec3 GetPosition();
void SetRotateAxis(float deg, const Vec3& axis);
Vec3 GetRotate();
void SetScale(const Vec3& scale);
void SetScaleX(float x);
void SetScaleY(float y);
void SetScaleZ(float z);
Vec3 GetScale();
void SetLocalPosition(const Vec3& position);
void SetLocalPositionX(float x);
void SetLocalPositionY(float y);
void SetLocalPositionZ(float z);
Vec3 GetLocalPosition();
void SetLocalRotateAxis(float deg, const Vec3& axis);
Vec3 GetLocalRotate();
void SetLocalScale(const Vec3& scale);
void SetLocalScaleX(float x);
void SetLocalScaleY(float y);
void SetLocalScaleZ(float z);
Vec3 GetLocalScale();
Mat4 GetWorldMatrix();
void SetDirty(bool value);
bool IsDirty() { return _dirty; }
void SetActive(bool active);
bool IsActive() { return _active; }
virtual void Update(Time& time);
virtual void LateUpdate(Time& time);
};
} | 22.533333 | 87 | 0.702498 | [
"vector"
] |
cfd411887b86ede9109cb747c3fc294f8a4788e8 | 1,480 | h | C | Core/MAGESLAM/Source/Debugging/Introspection.h | syntheticmagus/mageslam | ba79a4e6315689c072c29749de18d70279a4c5e4 | [
"MIT"
] | 70 | 2020-05-07T03:09:09.000Z | 2022-02-11T01:04:54.000Z | Core/MAGESLAM/Source/Debugging/Introspection.h | syntheticmagus/mageslam | ba79a4e6315689c072c29749de18d70279a4c5e4 | [
"MIT"
] | 3 | 2020-06-01T00:34:01.000Z | 2020-10-08T07:43:32.000Z | Core/MAGESLAM/Source/Debugging/Introspection.h | syntheticmagus/mageslam | ba79a4e6315689c072c29749de18d70279a4c5e4 | [
"MIT"
] | 16 | 2020-05-07T03:09:13.000Z | 2022-03-31T15:36:49.000Z | // Copyright (c) Microsoft Corporation.
// Licensed under the MIT License.
#pragma once
#include "Introspector.h"
#include <vector>
#include <mutex>
namespace mage
{
class Introspection: public Introspector
{
public:
void AddIntrospector(Introspector& introspector)
{
std::lock_guard<std::mutex> guard{ m_mutex };
m_introspectors.push_back(&introspector);
}
virtual void Introspect(const InitializationData& data) override
{
std::lock_guard<std::mutex> guard{ m_mutex };
for (Introspector* ptr : m_introspectors)
{
ptr->Introspect(data);
}
}
virtual void IntrospectEstimatedPose(const mage::FrameId& frameId, const mage::Matrix& viewMatrix) override
{
std::lock_guard<std::mutex> guard{ m_mutex };
for (Introspector* ptr : m_introspectors)
{
ptr->IntrospectEstimatedPose(frameId,viewMatrix);
}
}
virtual void IntrospectAnalyzedImage(const mage::FrameData& frame, const mage::AnalyzedImage& image) override
{
std::lock_guard<std::mutex> guard{ m_mutex };
for (Introspector* ptr : m_introspectors)
{
ptr->IntrospectAnalyzedImage(frame, image);
}
}
private:
std::mutex m_mutex;
std::vector<Introspector*> m_introspectors;
};
}
| 27.407407 | 117 | 0.586486 | [
"vector"
] |
cfda46667d02aefafae6ebe9a0e36d539e9c3112 | 3,121 | h | C | ref/src/Chapter08/smart sweepers - v2.1/CMineSweeper.h | kugao222/geneticAlgorithmInLua | 9bfd2a443f9492759027791a2b2b41b9467241b2 | [
"MIT"
] | null | null | null | ref/src/Chapter08/smart sweepers - v2.1/CMineSweeper.h | kugao222/geneticAlgorithmInLua | 9bfd2a443f9492759027791a2b2b41b9467241b2 | [
"MIT"
] | null | null | null | ref/src/Chapter08/smart sweepers - v2.1/CMineSweeper.h | kugao222/geneticAlgorithmInLua | 9bfd2a443f9492759027791a2b2b41b9467241b2 | [
"MIT"
] | null | null | null | #ifndef CMINESWEEPER_H
#define CMINESWEEPER_H
//------------------------------------------------------------------------
//
// Name: CMineSweeper.h
//
// Author: Mat Buckland 2002
//
// Desc: Class to create a minesweeper object used in the 'Smart Sweeper'
// example from the book Game AI Programming with Neural Nets and
// Genetic Algorithms.
//
//------------------------------------------------------------------------
#include <vector>
#include <math.h>
#include "CNeuralNet.h"
#include "utils.h"
#include "C2DMatrix.h"
#include "SVector2D.h"
#include "CParams.h"
#include "collision.h"
using namespace std;
class CMinesweeper
{
private:
CNeuralNet m_ItsBrain;
//its position in the world
SVector2D m_vPosition;
//direction sweeper is facing
SVector2D m_vLookAt;
double m_dRotation;
double m_dSpeed;
//to store output from the ANN
double m_lTrack, m_rTrack;
//the sweepers energy level.
double m_dFitness;
//the scale of the sweeper when drawn
double m_dScale;
//these members keep track of the individual penalties
double m_dSpinBonus;
double m_dCollisionBonus;
//to store end vertices of sensor segments
vector<SPoint> m_Sensors;
vector<SPoint> m_tranSensors;
//this keeps a record of how far down the sensor segment
//a 'hit' has occurred.
vector<double> m_vecdSensors;
//if a collision has been detected this flag is set
bool m_bCollided;
//this function returns a vector of points which make up
//the line segments of the sweepers sensors.
void CreateSensors(vector<SPoint> &sensors,
int NumSensors,
double range);
int CheckForHit(vector<SVector2D> &objects, double size);
//this function checks for any intersections between the
//sweeper's sensors and the objects in its environment
void TestSensors(vector<SPoint> &objects);
public:
CMinesweeper();
//updates the ANN with information from the sweepers enviroment
bool Update(vector<SPoint> &objects);
//used to transform the sweepers vertices prior to rendering
void WorldTransform(vector<SPoint> &sweeper, double scale);
void Reset();
void EndOfRunCalculations();
void RenderStats(HDC surface);
void RenderPenalties(HDC surface);
//-------------------accessor functions
SVector2D Position()const{return m_vPosition;}
double Rotation()const{return m_dRotation;}
float Fitness()const{return m_dFitness;}
void ZeroEnergy(){m_dFitness = 0;}
double Scale()const{return m_dScale;}
vector<SPoint>& Sensors(){return m_tranSensors;}
vector<double>& SensorReadings(){return m_vecdSensors;}
bool Collided()const{return m_bCollided;}
void PutWeights(vector<double> &w){m_ItsBrain.PutWeights(w);}
int GetNumberOfWeights(){return m_ItsBrain.GetNumberOfWeights();}
vector<int> CalculateSplitPoints(){return m_ItsBrain.CalculateSplitPoints();}
};
#endif
| 22.948529 | 85 | 0.639859 | [
"object",
"vector",
"transform"
] |
cfdb1bed3c05a34a628cf9008d836aabff39947e | 9,847 | h | C | Filters/Verdict/vtkCellQuality.h | satya-arjunan/vtk8 | ee7ced57de6d382a2d12693c01e2fcdac350b25f | [
"BSD-3-Clause"
] | 3 | 2015-07-28T18:07:50.000Z | 2018-02-28T20:59:58.000Z | Filters/Verdict/vtkCellQuality.h | satya-arjunan/vtk8 | ee7ced57de6d382a2d12693c01e2fcdac350b25f | [
"BSD-3-Clause"
] | 4 | 2018-10-25T09:46:11.000Z | 2019-01-17T16:49:17.000Z | Filters/Verdict/vtkCellQuality.h | satya-arjunan/vtk8 | ee7ced57de6d382a2d12693c01e2fcdac350b25f | [
"BSD-3-Clause"
] | 4 | 2016-09-08T02:11:00.000Z | 2019-08-15T02:38:39.000Z | /*=========================================================================
Program: Visualization Toolkit
Module: vtkObject.cxx
Copyright (c) Ken Martin, Will Schroeder, Bill Lorensen
All rights reserved.
See Copyright.txt or http://www.kitware.com/Copyright.htm for details.
This software is distributed WITHOUT ANY WARRANTY; without even
the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
PURPOSE. See the above copyright notice for more information.
=========================================================================*/
/**
* @class vtkCellQuality
* @brief Calculate functions of quality of the elements
* of a mesh
*
*
* vtkCellQuality computes one or more functions of (geometric) quality for each
* cell of a mesh. The per-cell quality is added to the mesh's cell data, in an
* array named "CellQuality." Cell types not supported by this filter or undefined
* quality of supported cell types will have an entry of -1.
*
* @warning
* Most quadrilateral quality functions are intended for planar quadrilaterals
* only. The minimal angle is not, strictly speaking, a quality function, but
* it is provided because of its usage by many authors.
*/
#ifndef vtkCellQuality_h
#define vtkCellQuality_h
#include "vtkFiltersVerdictModule.h" // For export macro
#include "vtkDataSetAlgorithm.h"
class vtkCell;
class vtkDataArray;
class vtkIdList;
class vtkPoints;
class VTKFILTERSVERDICT_EXPORT vtkCellQuality : public vtkDataSetAlgorithm
{
enum
{
NONE = 0,
AREA,
ASPECT_BETA,
ASPECT_FROBENIUS,
ASPECT_GAMMA,
ASPECT_RATIO,
COLLAPSE_RATIO,
CONDITION,
DIAGONAL,
DIMENSION,
DISTORTION,
EDGE_RATIO,
JACOBIAN,
MAX_ANGLE,
MAX_ASPECT_FROBENIUS,
MAX_EDGE_RATIO,
MED_ASPECT_FROBENIUS,
MIN_ANGLE,
NORMAL,
ODDY,
RADIUS_RATIO,
RELATIVE_SIZE_SQUARED,
SCALED_JACOBIAN,
SHAPE,
SHAPE_AND_SIZE,
SHEAR,
SHEAR_AND_SIZE,
SKEW,
STRETCH,
TAPER,
VOLUME,
WARPAGE
};
public:
void PrintSelf (ostream&, vtkIndent) override;
vtkTypeMacro(vtkCellQuality, vtkDataSetAlgorithm);
static vtkCellQuality* New ();
//@{
/**
* Set/Get the particular estimator used to function the quality of all
* supported geometries. For qualities that are not defined for certain
* geometries, later program logic ensures that CellQualityNone static
* function will be used so that a predefined value is returned for the
* request.
* There is no default value for this call and valid values include all
* possible qualities supported by this class.
*/
vtkSetMacro(QualityMeasure, int);
vtkGetMacro(QualityMeasure, int);
//@}
void SetQualityMeasureToArea ()
{
this->SetQualityMeasure(AREA);
}
void SetQualityMeasureToAspectBeta ()
{
this->SetQualityMeasure(ASPECT_BETA);
}
void SetQualityMeasureToAspectFrobenius ()
{
this->SetQualityMeasure(ASPECT_FROBENIUS);
}
void SetQualityMeasureToAspectGamma ()
{
this->SetQualityMeasure(ASPECT_GAMMA);
}
void SetQualityMeasureToAspectRatio ()
{
this->SetQualityMeasure(ASPECT_RATIO);
}
void SetQualityMeasureToCollapseRatio ()
{
this->SetQualityMeasure(COLLAPSE_RATIO);
}
void SetQualityMeasureToCondition ()
{
this->SetQualityMeasure(CONDITION);
}
void SetQualityMeasureToDiagonal ()
{
this->SetQualityMeasure(DIAGONAL);
}
void SetQualityMeasureToDimension ()
{
this->SetQualityMeasure(DIMENSION);
}
void SetQualityMeasureToDistortion ()
{
this->SetQualityMeasure(DISTORTION);
}
void SetQualityMeasureToJacobian ()
{
this->SetQualityMeasure(JACOBIAN);
}
void SetQualityMeasureToMaxAngle ()
{
this->SetQualityMeasure(MAX_ANGLE);
}
void SetQualityMeasureToMaxAspectFrobenius ()
{
this->SetQualityMeasure(MAX_ASPECT_FROBENIUS);
}
void SetQualityMeasureToMaxEdgeRatio ()
{
this->SetQualityMeasure(MAX_EDGE_RATIO);
}
void SetQualityMeasureToMedAspectFrobenius ()
{
this->SetQualityMeasure(MED_ASPECT_FROBENIUS);
}
void SetQualityMeasureToMinAngle ()
{
this->SetQualityMeasure(MIN_ANGLE);
}
void SetQualityMeasureToOddy ()
{
this->SetQualityMeasure(ODDY);
}
void SetQualityMeasureToRadiusRatio ()
{
this->SetQualityMeasure(RADIUS_RATIO);
}
void SetQualityMeasureToRelativeSizeSquared ()
{
this->SetQualityMeasure(RELATIVE_SIZE_SQUARED);
}
void SetQualityMeasureToScaledJacobian ()
{
this->SetQualityMeasure(SCALED_JACOBIAN);
}
void SetQualityMeasureToShapeAndSize ()
{
this->SetQualityMeasure(SHAPE_AND_SIZE);
}
void SetQualityMeasureToShape ()
{
this->SetQualityMeasure(SHAPE);
}
void SetQualityMeasureToShearAndSize ()
{
this->SetQualityMeasure(SHEAR_AND_SIZE);
}
void SetQualityMeasureToShear ()
{
this->SetQualityMeasure(SHEAR);
}
void SetQualityMeasureToSkew ()
{
this->SetQualityMeasure(SKEW);
}
void SetQualityMeasureToStretch ()
{
this->SetQualityMeasure(STRETCH);
}
void SetQualityMeasureToTaper ()
{
this->SetQualityMeasure(TAPER);
}
void SetQualityMeasureToVolume ()
{
this->SetQualityMeasure(VOLUME);
}
void SetQualityMeasureToWarpage ()
{
this->SetQualityMeasure(WARPAGE);
}
//@{
/**
* Set/Get the return value for unsupported geometry. Unsupported geometry
* are geometries that are not supported by this filter currently, future
* implementation might include support for them. The default value for
* UnsupportedGeometry is -1.
*/
vtkSetMacro(UnsupportedGeometry, double);
vtkGetMacro(UnsupportedGeometry, double);
//@}
//@{
/**
* Set/Get the return value for undefined quality. Undefined quality
* are qualities that could be addressed by this filter but is not well
* defined for the particular geometry of cell in question, e.g. a
* volume query for a triangle. Undefined quality will always be undefined.
* The default value for UndefinedQuality is -1.
*/
vtkSetMacro(UndefinedQuality, double);
vtkGetMacro(UndefinedQuality, double);
//@}
double TriangleStripArea (vtkCell*);
double PixelArea (vtkCell*);
double PolygonArea (vtkCell*);
protected:
~vtkCellQuality () override;
vtkCellQuality ();
/**
* Set/Get the particular estimator used to function the quality of triangles.
* The default is NONE and valid values also include
* ASPECT_FROBENIUS
* ASPECT_RATIO
* CONDITION
* DISTORTION
* EDGE_RATIO
* MAX_ANGLE
* MIN_ANGLE
* RADIUS_RATIO
* RELATIVE_SIZE_SQUARED
* SCALED_JACOBIAN
* SHAPE
* SHAPE_AND_SIZE
*/
double ComputeTriangleQuality (vtkCell*);
/**
* Set/Get the particular estimator used to measure the quality of quadrilaterals.
* The default is NONE and valid values also include
* AREA
* ASPECT_RATIO
* CONDITION
* DISTORTION
* EDGE_RATIO
* JACOBIAN
* MAX_ANGLE
* MAX_EDGE_RATIO
* MIN_ANGLE
* ODDY
* RADIUS_RATIO
* RELATIVE_SIZE_SQUARED
* SCALED_JACOBIAN
* SHAPE
* SHAPE_AND_SIZE
* SHEAR
* SHEAR_AND_SIZE
* SKEW
* STRETCH
* TAPER
* WARPAGE
* Scope: Except for EDGE_RATIO, these estimators are intended for planar
* quadrilaterals only; use at your own risk if you really want to assess non-planar
* quadrilateral quality with those.
*/
double ComputeQuadQuality (vtkCell*);
/**
* Set/Get the particular estimator used to measure the quality of tetrahedra.
* The default is NONE and valid values also include
* ASPECT_BETA
* ASPECT_FROBENIUS
* ASPECT_GAMMA
* ASPECT_RATIO
* COLLAPSE_RATIO
* CONDITION
* DISTORTION
* EDGE_RATIO
* JACOBIAN
* RADIUS_RATIO (identical to Verdict's aspect ratio beta)
* RELATIVE_SIZE_SQUARED
* SCALED_JACOBIAN
* SHAPE
* SHAPE_AND_SIZE
* VOLUME
*/
double ComputeTetQuality (vtkCell*);
/**
* Set/Get the particular estimator used to measure the quality of hexahedra.
* The default is NONE and valid values also include
* CONDITION
* DIAGONAL
* DIMENSION
* DISTORTION
* EDGE_RATIO
* JACOBIAN
* MAX_ASPECT_FROBENIUS
* MAX_ASPECT_FROBENIUS
* MAX_EDGE_RATIO
* ODDY
* RELATIVE_SIZE_SQUARED
* SCALED_JACOBIAN
* SHAPE
* SHAPE_AND_SIZE
* SHEAR
* SHEAR_AND_SIZE
* SKEW
* STRETCH
* TAPER
* VOLUME
*/
double ComputeHexQuality (vtkCell*);
/**
* Set/Get the particular estimator used to measure the quality of triangle
* strip.
* The default is NONE and valid values also include
* AREA
*/
double ComputeTriangleStripQuality (vtkCell*);
/**
* Set/Get the particular estimator used to measure the quality of pixel.
* The default is NONE and valid values also include
* AREA
*/
double ComputePixelQuality (vtkCell*);
int RequestData
(vtkInformation*, vtkInformationVector**, vtkInformationVector*) override;
//@{
/**
* A function called by some VERDICT triangle quality functions to test for
* inverted triangles.
* VERDICT only accepts plain function pointers which means the follow
* function and member must be static. Unfortunately, this makes the use of
* this part not thread safe.
*/
static int GetCurrentTriangleNormal (double point [3], double normal [3]);
static double CurrentTriNormal [3];
//@}
int QualityMeasure;
// Default return value for unsupported geometry
double UnsupportedGeometry;
// Default return value for qualities that are not well-defined for certain
// types of supported geometries. e.g. volume of a triangle
double UndefinedQuality;
private:
vtkIdList* PointIds;
vtkPoints* Points;
vtkCellQuality(const vtkCellQuality&) = delete;
void operator=(const vtkCellQuality&) = delete;
};
#endif // vtkCellQuality_h
| 25.119898 | 86 | 0.702447 | [
"mesh",
"geometry",
"shape"
] |
cfddb7f4b1052cf81da0b64f41041809f114a191 | 367 | h | C | src/life.h | TheCharmingSociopath/JetpackJoyride | 99af36e750b92ba96c6ce8d42c9160b1e5c741c5 | [
"MIT"
] | null | null | null | src/life.h | TheCharmingSociopath/JetpackJoyride | 99af36e750b92ba96c6ce8d42c9160b1e5c741c5 | [
"MIT"
] | null | null | null | src/life.h | TheCharmingSociopath/JetpackJoyride | 99af36e750b92ba96c6ce8d42c9160b1e5c741c5 | [
"MIT"
] | null | null | null | #include "main.h"
#ifndef LIFE_H
#define LIFE_H
#define GLM_ENABLE_EXPERIMENTAL
class Life {
public:
Life() {}
Life(float x, float y, color_t color);
glm::vec3 position;
float rotation;
bool direction = false;
void draw(glm::mat4 VP);
void set_position(float x, float y);
void tick();
private:
VAO *object;
};
#endif // LIFE_H
| 15.956522 | 42 | 0.648501 | [
"object"
] |
cfe8ae4fb7814d9ff56d3481421e0b80e7ee3969 | 3,523 | h | C | src/progress.h | GhostatSpirit/hdrview | 61596f8ba45554db23ae1b214354ab40da065638 | [
"MIT"
] | 94 | 2021-04-23T03:31:15.000Z | 2022-03-29T08:20:26.000Z | src/progress.h | GhostatSpirit/hdrview | 61596f8ba45554db23ae1b214354ab40da065638 | [
"MIT"
] | 64 | 2021-05-05T21:51:15.000Z | 2022-02-08T17:06:52.000Z | src/progress.h | GhostatSpirit/hdrview | 61596f8ba45554db23ae1b214354ab40da065638 | [
"MIT"
] | 3 | 2021-07-06T04:58:27.000Z | 2022-02-08T16:53:48.000Z | //
// Copyright (C) Wojciech Jarosz <wjarosz@gmail.com>. All rights reserved.
// Use of this source code is governed by a BSD-style license that can
// be found in the LICENSE.txt file.
//
#pragma once
#include <atomic>
#include <cmath>
#include <cstdint>
#include <memory>
/*!
* A fixed-point fractional number stored using an std::atomic
*/
template <typename Fixed, typename BigFixed, int FractionBits>
class AtomicFixed
{
public:
static const Fixed ScalingFactor = (1 << FractionBits);
static Fixed float2fixed(float b) { return (Fixed)std::round(b * ScalingFactor); }
static float fixed2float(Fixed f) { return float(f) / ScalingFactor; }
std::atomic<Fixed> f;
AtomicFixed() = default;
explicit AtomicFixed(float d) : f(float2fixed(d))
{
// empty
}
explicit operator float() const { return fixed2float(f); }
Fixed operator=(float b) { return (f = float2fixed(b)); }
Fixed operator+=(float b) { return (f += float2fixed(b)); }
Fixed operator-=(float b) { return (f -= float2fixed(b)); }
// Disabling to avoid accidental non-atomic operations
// /// This operator is *NOT* atomic
// Fixed operator*=(float b)
// {
// return (f = Fixed(BigFixed(f) * BigFixed(float2fixed(b))) / ScalingFactor);
// }
// /// This operator is *NOT* atomic
// Fixed operator/=(float b)
// {
// return (f = Fixed((BigFixed(f) * ScalingFactor) / float2fixed(b)));
// }
bool operator<(float b) const { return f < float2fixed(b); }
bool operator<=(float b) const { return f <= float2fixed(b); }
bool operator>(float b) const { return f > float2fixed(b); }
bool operator>=(float b) const { return f >= float2fixed(b); }
bool operator==(float b) const { return f == float2fixed(b); }
bool operator!=(float b) const { return f != float2fixed(b); }
};
using AtomicFixed16 = AtomicFixed<std::int16_t, std::int32_t, 8>;
using AtomicFixed32 = AtomicFixed<std::int32_t, std::int64_t, 16>;
/*!
* Helper object to manage the progress display.
* {
* AtomicProgress p1(true);
* p1.set_num_steps(10);
* for (int i = 0; i < 10; ++i, ++p1)
* {
* // do something
* }
* } // end progress p1
*
*/
class AtomicProgress
{
public:
using AtomicPercent32 = AtomicFixed<std::int32_t, std::int64_t, 30>;
explicit AtomicProgress(bool createState = false, float totalPercentage = 1.f);
AtomicProgress(const AtomicProgress &parent, float percentageOfParent = 1.f);
// access to the atomic internal storage
void reset_progress(float p = 0.f);
float progress() const;
void set_done() { reset_progress(1.f); }
void set_busy() { reset_progress(-1.f); }
bool canceled() const;
void cancel();
// access to the discrete stepping
void set_available_percent(float percent);
void set_num_steps(int numSteps);
AtomicProgress &operator+=(int steps);
AtomicProgress &operator++() { return ((*this) += 1); }
private:
int m_num_steps;
float m_percentage_of_parent, m_step_percent;
struct State
{
State() : progress(0.f), canceled(false) {}
AtomicPercent32 progress; ///< Atomic internal state of progress
// theoretically this should be guarded by an std::atomic,
// but on any reasonable architecture a bool will be atomic
bool canceled; ///< Flag set if the calling code wants to cancel the associated task
};
std::shared_ptr<State> m_state;
}; | 29.115702 | 92 | 0.641783 | [
"object"
] |
cfe973332340cec39f9b0356c911d087a559cf00 | 26,066 | c | C | src/qos/sai_qos_queue.c | Dell-Networking/sonic-sai-common | 0ad86ab7f29732d318013e55fe11e7197c7c1246 | [
"Apache-2.0"
] | 1 | 2020-07-30T02:53:02.000Z | 2020-07-30T02:53:02.000Z | src/qos/sai_qos_queue.c | Dell-Networking/sonic-sai-common | 0ad86ab7f29732d318013e55fe11e7197c7c1246 | [
"Apache-2.0"
] | null | null | null | src/qos/sai_qos_queue.c | Dell-Networking/sonic-sai-common | 0ad86ab7f29732d318013e55fe11e7197c7c1246 | [
"Apache-2.0"
] | 4 | 2016-08-12T19:06:03.000Z | 2020-07-30T02:53:04.000Z | /************************************************************************
* LEGALESE: "Copyright (c) 2015, Dell Inc. All rights reserved."
*
* This source code is confidential, proprietary, and contains trade
* secrets that are the sole property of Dell Inc.
* Copy and/or distribution of this source code or disassembly or reverse
* engineering of the resultant object code are strictly forbidden without
* the written consent of Dell Inc.
*
************************************************************************/
/**
* @file sai_qos_queue.c
*
* @brief This file contains function definitions for SAI QOS queue
* initilization and SAI queue functionality API implementation.
*
*************************************************************************/
#include "sai_qos_common.h"
#include "sai_qos_util.h"
#include "sai_qos_api_utils.h"
#include "sai_qos_mem.h"
#include "sai_switch_utils.h"
#include "sai_common_infra.h"
#include "saistatus.h"
#include "std_assert.h"
#include <string.h>
#include <stdio.h>
#include <inttypes.h>
static void sai_qos_queue_node_init (dn_sai_qos_queue_t *p_queue_node,
sai_object_id_t port_id,
sai_queue_type_t queue_type)
{
p_queue_node->port_id = port_id;
p_queue_node->queue_type = queue_type;
p_queue_node->drop_type = SAI_DROP_TYPE_TAIL;
p_queue_node->parent_sched_group_id = SAI_NULL_OBJECT_ID;
p_queue_node->child_offset = SAI_QOS_CHILD_INDEX_INVALID;
return;
}
static bool sai_qos_queue_is_in_use (dn_sai_qos_queue_t *p_queue_node)
{
STD_ASSERT (p_queue_node != NULL);
/* Verify WRED profile is assigned to queue */
if (p_queue_node->wred_id != SAI_NULL_OBJECT_ID)
return true;
/* Verify queue is child of any parent. */
if (p_queue_node->parent_sched_group_id != SAI_NULL_OBJECT_ID)
return true;
/* Verify Scheduler profile is assigned to queue. */
if (p_queue_node->scheduler_id != SAI_NULL_OBJECT_ID)
return true;
return false;
}
static void sai_qos_queue_free_resources (dn_sai_qos_queue_t *p_queue_node,
bool is_queue_set_in_npu,
bool is_queue_set_in_port_list)
{
if (p_queue_node == NULL) {
return;
}
/* Remove Queue from NPU, if it was already applied created. */
if (is_queue_set_in_npu) {
sai_queue_npu_api_get()->queue_remove (p_queue_node);
}
/* Delete Queue node from the PORT's Queue list, if it was already added. */
if (is_queue_set_in_port_list) {
sai_qos_port_queue_list_update (p_queue_node, false);
}
sai_qos_queue_node_free (p_queue_node);
return;
}
static sai_status_t sai_qos_queue_node_insert_to_tree (
dn_sai_qos_queue_t *p_queue_node)
{
rbtree_handle queue_tree = NULL;
t_std_error err_rc = STD_ERR_OK;
STD_ASSERT (p_queue_node != NULL);
queue_tree = sai_qos_access_global_config()->queue_tree;
STD_ASSERT (queue_tree != NULL);
err_rc = std_rbtree_insert (queue_tree, p_queue_node);
if (STD_IS_ERR(err_rc)) {
SAI_QUEUE_LOG_ERR ("Failed to insert Queue node for QID 0x%"PRIx64" "
"into Queue Tree", p_queue_node->key.queue_id);
return SAI_STATUS_FAILURE;
}
return SAI_STATUS_SUCCESS;
}
static void sai_qos_queue_node_remove_from_tree (dn_sai_qos_queue_t *p_queue_node)
{
rbtree_handle queue_tree = NULL;
STD_ASSERT (p_queue_node != NULL);
queue_tree = sai_qos_access_global_config()->queue_tree;
STD_ASSERT (queue_tree != NULL);
std_rbtree_remove (queue_tree, p_queue_node);
return;
}
static void sai_qos_queue_attr_set (dn_sai_qos_queue_t *p_queue_node,
uint_t attr_count,
const sai_attribute_t *p_attr_list,
dn_sai_operations_t op_type)
{
const sai_attribute_t *p_attr = NULL;
uint_t list_index = 0;
STD_ASSERT(p_queue_node != NULL);
STD_ASSERT(p_attr_list != NULL);
SAI_QUEUE_LOG_TRACE ("Set attributes for Queue, attribute count %d "
"op_type %d.", attr_count, op_type);
for (list_index = 0, p_attr = p_attr_list;
list_index < attr_count; ++list_index, ++p_attr) {
switch (p_attr->id)
{
case SAI_QUEUE_ATTR_TYPE:
p_queue_node->queue_type = p_attr->value.s32;
break;
case SAI_QUEUE_ATTR_SCHEDULER_PROFILE_ID:
p_queue_node->scheduler_id = p_attr->value.oid;
break;
case SAI_QUEUE_ATTR_WRED_PROFILE_ID:
p_queue_node->wred_id = p_attr->value.oid;
break;
case SAI_QUEUE_ATTR_BUFFER_PROFILE_ID:
p_queue_node->buffer_profile_id = p_attr->value.oid;
break;
default:
SAI_QUEUE_LOG_TRACE ("Attribute id: %d - read-only attribute.",
p_attr->id);
break;
}
}
}
static sai_status_t sai_qos_queue_attributes_validate (uint_t attr_count,
const sai_attribute_t *attr_list,
dn_sai_operations_t op_type)
{
sai_status_t sai_rc = SAI_STATUS_SUCCESS;
uint_t max_vendor_attr_count = 0;
const dn_sai_attribute_entry_t *p_vendor_attr = NULL;
SAI_QUEUE_LOG_TRACE ("Parsing attributes for queue, attribute count %d "
"op_type %d.", attr_count, op_type);
if (attr_count == 0)
return SAI_STATUS_INVALID_PARAMETER;
sai_queue_npu_api_get()->attribute_table_get(&p_vendor_attr,
&max_vendor_attr_count);
STD_ASSERT(p_vendor_attr != NULL);
STD_ASSERT(max_vendor_attr_count > 0);
sai_rc = sai_attribute_validate (attr_count, attr_list, p_vendor_attr,
op_type, max_vendor_attr_count);
if(sai_rc != SAI_STATUS_SUCCESS) {
SAI_QUEUE_LOG_ERR ("Attribute validation failed for %d "
"operation", op_type);
}
return sai_rc;
}
/* Attribute validation happen before this function */
static bool sai_qos_queue_is_duplicate_set (dn_sai_qos_queue_t *p_queue_node,
const sai_attribute_t *p_attr)
{
STD_ASSERT(p_queue_node != NULL);
STD_ASSERT(p_attr != NULL);
SAI_QUEUE_LOG_TRACE ("Verify duplicate set attributes value, ID: %d.",
p_attr->id);
switch (p_attr->id)
{
case SAI_QUEUE_ATTR_SCHEDULER_PROFILE_ID:
if (p_queue_node->scheduler_id == p_attr->value.oid)
return true;
break;
case SAI_QUEUE_ATTR_WRED_PROFILE_ID:
if (p_queue_node->wred_id == p_attr->value.oid)
return true;
break;
case SAI_QUEUE_ATTR_BUFFER_PROFILE_ID:
if (p_queue_node->buffer_profile_id == p_attr->value.oid)
return true;
break;
default:
SAI_QUEUE_LOG_TRACE ("Attribute id: %d - read-only attribute.",
p_attr->id);
break;
}
return false;
}
static sai_status_t sai_qos_queue_create (sai_object_id_t port_id,
sai_queue_type_t queue_type)
{
sai_status_t sai_rc = SAI_STATUS_SUCCESS;
dn_sai_qos_queue_t *p_queue_node = NULL;
sai_object_id_t queue_oid = SAI_NULL_OBJECT_ID;
bool is_queue_set_in_npu = false;
bool is_queue_set_in_port_list = false;
SAI_QUEUE_LOG_TRACE ("Queue Creation for port 0x%"PRIx64", queue_type %s.",
port_id, sai_qos_queue_type_to_str (queue_type));
sai_qos_lock ();
do {
p_queue_node = sai_qos_queue_node_alloc ();
if (NULL == p_queue_node) {
SAI_QUEUE_LOG_ERR ("Queue node memory allocation failed.");
sai_rc = SAI_STATUS_NO_MEMORY;
break;
}
sai_qos_queue_node_init (p_queue_node, port_id, queue_type);
sai_rc = sai_queue_npu_api_get()->queue_create (p_queue_node,
&queue_oid);
if (sai_rc != SAI_STATUS_SUCCESS) {
SAI_QUEUE_LOG_ERR ("Queue creation failed in NPU.");
break;
}
p_queue_node->key.queue_id = queue_oid;
SAI_QUEUE_LOG_TRACE ("Queue Created in NPU.");
is_queue_set_in_npu = true;
/* Add the Queue node to PORT's Queue list */
sai_rc = sai_qos_port_queue_list_update (p_queue_node, true);
if (sai_rc != SAI_STATUS_SUCCESS) {
SAI_QUEUE_LOG_ERR ("Failed to add Queue node to the "
"Queue List in PORT node");
break;
}
is_queue_set_in_port_list = true;
sai_rc = sai_qos_queue_node_insert_to_tree (p_queue_node);
if (sai_rc != SAI_STATUS_SUCCESS) {
SAI_QUEUE_LOG_ERR ("Queue insertion to tree failed.");
break;
}
} while (0);
if (sai_rc == SAI_STATUS_SUCCESS) {
SAI_QUEUE_LOG_INFO ("Queue Obj Id: 0x%"PRIx64" created.", queue_oid);
} else {
SAI_QUEUE_LOG_ERR ("Failed to create Queue.");
sai_qos_queue_free_resources (p_queue_node, is_queue_set_in_npu,
is_queue_set_in_port_list);
}
sai_qos_unlock ();
return sai_rc;
}
static sai_status_t sai_qos_queue_remove (sai_object_id_t queue_id)
{
sai_status_t sai_rc = SAI_STATUS_SUCCESS;
dn_sai_qos_queue_t *p_queue_node = NULL;
SAI_QUEUE_LOG_TRACE ("Queue remove QID 0x%"PRIx64".", queue_id);
if (! sai_is_obj_id_queue (queue_id)) {
SAI_QUEUE_LOG_ERR ("Queue 0x%"PRIx64" is not a valid queue obj id.",
queue_id);
return SAI_STATUS_INVALID_OBJECT_TYPE;
}
sai_qos_lock ();
do {
p_queue_node = sai_qos_queue_node_get (queue_id);
if (NULL == p_queue_node) {
SAI_QUEUE_LOG_ERR ("Queue 0x%"PRIx64" does not exist in tree.",
queue_id);
sai_rc = SAI_STATUS_INVALID_OBJECT_ID;
break;
}
SAI_QUEUE_LOG_TRACE ("Queue remove. Port ID 0x%"PRIx64", "
"Q ID 0x%"PRIx64", Q Type: %s.", queue_id,
p_queue_node->port_id,
sai_qos_queue_type_to_str(p_queue_node->queue_type));
if ((sai_qos_queue_is_in_use (p_queue_node))) {
SAI_QUEUE_LOG_ERR ("Queue 0x%"PRIx64" can't be deleted, Queue is"
" in use.", queue_id);
sai_rc = SAI_STATUS_OBJECT_IN_USE;
break;
}
sai_rc = sai_queue_npu_api_get()->queue_remove (p_queue_node);
if (sai_rc != SAI_STATUS_SUCCESS) {
SAI_QUEUE_LOG_ERR ("Queue 0x%"PRIx64" deletion failed in NPU.",
queue_id);
break;
}
/* Delete from the PORT's queue list */
sai_qos_port_queue_list_update (p_queue_node, false);
sai_qos_queue_node_remove_from_tree (p_queue_node);
sai_qos_queue_node_free (p_queue_node);
} while (0);
sai_qos_unlock ();
if (sai_rc == SAI_STATUS_SUCCESS) {
SAI_QUEUE_LOG_INFO ("Queue 0x%"PRIx64" removed.", queue_id);
} else {
SAI_QUEUE_LOG_ERR ("Failed to remove Queue 0x%"PRIx64".", queue_id);
}
return sai_rc;
}
static sai_status_t sai_qos_queue_attribute_set (sai_object_id_t queue_id,
const sai_attribute_t *p_attr)
{
sai_status_t sai_rc = SAI_STATUS_SUCCESS;
dn_sai_qos_queue_t *p_queue_node = NULL;
uint_t attr_count = 1;
STD_ASSERT (p_attr != NULL);
SAI_QUEUE_LOG_TRACE ("Setting Attribute ID %d on Queue 0x%"PRIx64".",
p_attr->id, queue_id);
if (! sai_is_obj_id_queue (queue_id)) {
SAI_QUEUE_LOG_ERR ("%"PRIu64" is not a valid Queue obj id.",
queue_id);
return SAI_STATUS_INVALID_OBJECT_TYPE;
}
sai_qos_lock ();
do {
p_queue_node = sai_qos_queue_node_get (queue_id);
if (NULL == p_queue_node) {
SAI_QUEUE_LOG_ERR ("Queue 0x%"PRIx64" does not exist in tree.",
queue_id);
sai_rc = SAI_STATUS_INVALID_OBJECT_ID;
break;
}
sai_rc = sai_qos_queue_attributes_validate (attr_count, p_attr,
SAI_OP_SET);
if (sai_rc != SAI_STATUS_SUCCESS) {
SAI_QUEUE_LOG_ERR ("Input parameters validation failed for "
"Queue attributes set.");
break;
}
if (sai_qos_queue_is_duplicate_set (p_queue_node, p_attr)) {
SAI_QUEUE_LOG_TRACE ("Duplicate set value for Attribute ID %d.",
p_attr->id);
break;
}
switch (p_attr->id)
{
case SAI_QUEUE_ATTR_SCHEDULER_PROFILE_ID:
sai_rc = sai_qos_queue_scheduler_set(p_queue_node, p_attr);
break;
case SAI_QUEUE_ATTR_WRED_PROFILE_ID:
sai_rc = sai_qos_wred_set_on_queue(queue_id, p_attr);
break;
case SAI_QUEUE_ATTR_BUFFER_PROFILE_ID:
sai_rc = sai_qos_obj_update_buffer_profile(queue_id,
p_attr->value.oid);
break;
default:
sai_rc = sai_queue_npu_api_get()->queue_attribute_set(p_queue_node,
attr_count,
p_attr);
break;
}
if (sai_rc != SAI_STATUS_SUCCESS) {
SAI_QUEUE_LOG_ERR ("Failed to set queue Attribute ID: %d "
"in NPU, Error: %d.", p_attr->id, sai_rc);
break;
}
sai_qos_queue_attr_set (p_queue_node, attr_count, p_attr, SAI_OP_SET);
} while (0);
sai_qos_unlock ();
return sai_rc;
}
static sai_status_t sai_qos_queue_attribute_get (sai_object_id_t queue_id,
uint32_t attr_count,
sai_attribute_t *p_attr_list)
{
sai_status_t sai_rc = SAI_STATUS_SUCCESS;
dn_sai_qos_queue_t *p_queue_node = NULL;
STD_ASSERT (p_attr_list != NULL);
SAI_QUEUE_LOG_TRACE ("Getting Attributes for queue 0x%"PRIx64", "
"attr_count %d.", queue_id, attr_count);
if (! sai_is_obj_id_queue (queue_id)) {
SAI_QUEUE_LOG_ERR ("%"PRIu64" is not a valid Queue obj id.",
queue_id);
return SAI_STATUS_INVALID_OBJECT_TYPE;
}
sai_qos_lock ();
do {
p_queue_node = sai_qos_queue_node_get (queue_id);
if (NULL == p_queue_node) {
SAI_QUEUE_LOG_ERR ("Queue 0x%"PRIx64" does not exist in tree.", queue_id);
sai_rc = SAI_STATUS_INVALID_OBJECT_ID;
break;
}
sai_rc = sai_qos_queue_attributes_validate (attr_count, p_attr_list,
SAI_OP_GET);
if (sai_rc != SAI_STATUS_SUCCESS) {
SAI_QUEUE_LOG_ERR ("Input parameters validation failed for "
"Queue attributes get.");
break;
}
sai_rc = sai_queue_npu_api_get()->queue_attribute_get (p_queue_node,
attr_count,
p_attr_list);
if (sai_rc != SAI_STATUS_SUCCESS) {
SAI_QUEUE_LOG_ERR ("Failed to get Queue Attributes from "
"NPU, Error: %d.", sai_rc);
break;
}
} while (0);
sai_qos_unlock ();
return sai_rc;
}
sai_status_t sai_qos_port_queue_all_init (sai_object_id_t port_id)
{
sai_status_t sai_rc = SAI_STATUS_SUCCESS;
uint_t queue = 0;
uint_t max_queues = 0;
SAI_QUEUE_LOG_TRACE ("Port Queue All Init.");
if (sai_qos_queue_is_seperate_ucast_or_mcast_supported (port_id)) {
max_queues = sai_switch_max_uc_queues_per_port_get (port_id);
for (queue = 0; queue < max_queues; queue++) {
sai_rc = sai_qos_queue_create (port_id, SAI_QUEUE_TYPE_UNICAST);
if (sai_rc != SAI_STATUS_SUCCESS) {
SAI_QUEUE_LOG_ERR ("Unicast Queue create failed "
"for port 0x%"PRIx64", Queue %d sai_rc=%d.",
port_id, queue, sai_rc);
return sai_rc;
}
}
SAI_QUEUE_LOG_TRACE ("Port Unicast Queues Init success.");
max_queues = sai_switch_max_mc_queues_per_port_get (port_id);
for (queue = 0; queue < max_queues; queue++) {
sai_rc = sai_qos_queue_create (port_id, SAI_QUEUE_TYPE_MULTICAST);
if (sai_rc != SAI_STATUS_SUCCESS) {
SAI_QUEUE_LOG_ERR ("Multicast Queue create failed "
"for port 0x%"PRIx64", Queue %d sai_rc=%d.",
port_id, queue, sai_rc);
return sai_rc;
}
}
} else {
max_queues = sai_switch_max_queues_per_port_get (port_id);
for (queue = 0; queue < max_queues; queue++) {
sai_rc = sai_qos_queue_create (port_id, SAI_QUEUE_TYPE_ALL);
if (sai_rc != SAI_STATUS_SUCCESS) {
SAI_QUEUE_LOG_ERR ("Queue create failed "
"for Port 0x%"PRIx64", Queue %d sai_rc=%d.",
port_id, queue, sai_rc);
return sai_rc;
}
}
}
SAI_QUEUE_LOG_INFO ("Port 0x%"PRIx64", All Queue Init complete.", port_id);
return sai_rc;
}
sai_status_t sai_qos_port_queue_all_deinit (sai_object_id_t port_id)
{
sai_status_t sai_rc = SAI_STATUS_SUCCESS;
dn_sai_qos_queue_t *p_queue_node = NULL;
dn_sai_qos_queue_t *p_next_queue_node = NULL;
dn_sai_qos_port_t *p_qos_port_node = NULL;
SAI_QUEUE_LOG_TRACE ("Port 0x%"PRIx64" Queue All De-Init.", port_id);
p_qos_port_node = sai_qos_port_node_get (port_id);
if (NULL == p_qos_port_node) {
SAI_QUEUE_LOG_ERR ("Qos Port 0x%"PRIx64" does not exist in tree.",
port_id);
return SAI_STATUS_INVALID_OBJECT_ID;
}
for (p_queue_node = sai_qos_port_get_first_queue (p_qos_port_node);
p_queue_node != NULL; p_queue_node = p_next_queue_node) {
p_next_queue_node = sai_qos_port_get_next_queue (p_qos_port_node,
p_queue_node);
sai_rc = sai_qos_queue_remove (p_queue_node->key.queue_id);
if (sai_rc != SAI_STATUS_SUCCESS) {
SAI_QUEUE_LOG_ERR ("Queue remove failed "
"for port 0x%"PRIx64", Queue 0x%"PRIx64".",
port_id, p_queue_node->key.queue_id);
return sai_rc;
}
}
SAI_QUEUE_LOG_INFO ("Port 0x%"PRIx64", All Queue De-Init success.", port_id);
return sai_rc;
}
sai_status_t sai_qos_first_free_queue_get (sai_object_id_t port_id,
sai_queue_type_t queue_type,
sai_object_id_t *p_queue_id)
{
dn_sai_qos_port_t *p_qos_port_node = NULL;
dn_sai_qos_queue_t *p_queue_node = NULL;
p_qos_port_node = sai_qos_port_node_get (port_id);
if (NULL == p_qos_port_node) {
SAI_SCHED_GRP_LOG_ERR ("Qos Port 0x%"PRIx64" does not exist in tree.",
port_id);
return SAI_STATUS_INVALID_OBJECT_ID;
}
for (p_queue_node = sai_qos_port_get_first_queue (p_qos_port_node);
p_queue_node != NULL; p_queue_node =
sai_qos_port_get_next_queue (p_qos_port_node, p_queue_node)) {
if (p_queue_node->queue_type != queue_type)
continue;
if (SAI_NULL_OBJECT_ID == p_queue_node->parent_sched_group_id) {
*p_queue_id = p_queue_node->key.queue_id;
return SAI_STATUS_SUCCESS;
}
}
return SAI_STATUS_INSUFFICIENT_RESOURCES;
}
sai_status_t sai_qos_queue_id_list_get (sai_object_id_t port_id,
uint_t queue_id_list_count,
sai_object_id_t *p_queue_id_list)
{
dn_sai_qos_port_t *p_qos_port_node = NULL;
dn_sai_qos_queue_t *p_queue_node = NULL;
size_t queue_count = 0;
if (0 == queue_id_list_count)
return SAI_STATUS_SUCCESS;
STD_ASSERT (p_queue_id_list != NULL);
p_qos_port_node = sai_qos_port_node_get (port_id);
if (NULL == p_qos_port_node) {
SAI_QUEUE_LOG_ERR ("Qos Port 0x%"PRIx64" does not exist in tree.",
port_id);
return SAI_STATUS_INVALID_OBJECT_ID;
}
for (p_queue_node = sai_qos_port_get_first_queue (p_qos_port_node);
((p_queue_node != NULL) && (queue_count < queue_id_list_count)); p_queue_node =
sai_qos_port_get_next_queue (p_qos_port_node, p_queue_node)) {
p_queue_id_list[queue_count] = p_queue_node->key.queue_id;
queue_count++;
}
if (queue_id_list_count != queue_count) {
SAI_QUEUE_LOG_ERR ("Required queues not exits in "
"port 0x%"PRIx64", "
"req count %d, queues present %d.",
port_id, queue_id_list_count, queue_count);
return SAI_STATUS_FAILURE;
}
return SAI_STATUS_SUCCESS;
}
static sai_status_t sai_qos_queue_stats_get (sai_object_id_t queue_id,
const sai_queue_stat_counter_t *counter_ids,
uint32_t number_of_counters,
uint64_t* counters)
{
sai_status_t sai_rc = SAI_STATUS_SUCCESS;
dn_sai_qos_queue_t *p_queue_node = NULL;
if (counter_ids == NULL) {
SAI_QUEUE_LOG_ERR("Invalid parameter counter_ids is NULL");
STD_ASSERT(0);
return SAI_STATUS_INVALID_PARAMETER;
}
if (counters == NULL) {
SAI_QUEUE_LOG_ERR("Invalid parameter counters is NULL");
STD_ASSERT(0);
return SAI_STATUS_INVALID_PARAMETER;
}
if (number_of_counters == 0) {
SAI_QUEUE_LOG_ERR("Invalid parameter number_of_counters is zero");
STD_ASSERT(0);
return SAI_STATUS_INVALID_PARAMETER;
}
SAI_QUEUE_LOG_TRACE ("Getting stats for queue 0x%"PRIx64"", queue_id);
if (! sai_is_obj_id_queue (queue_id)) {
SAI_QUEUE_LOG_ERR ("0x%"PRIx64" is not a valid Queue obj id.",
queue_id);
return SAI_STATUS_INVALID_OBJECT_TYPE;
}
sai_qos_lock ();
p_queue_node = sai_qos_queue_node_get (queue_id);
if (NULL == p_queue_node) {
SAI_QUEUE_LOG_ERR ("Queue 0x%"PRIx64" does not exist in tree.", queue_id);
sai_qos_unlock ();
return SAI_STATUS_INVALID_OBJECT_ID;
}
sai_rc = sai_queue_npu_api_get()->queue_stats_get (p_queue_node, counter_ids,
number_of_counters, counters);
if (sai_rc != SAI_STATUS_SUCCESS) {
SAI_QUEUE_LOG_ERR ("Failed to get Queue stats NPU, Error: %d.", sai_rc);
}
sai_qos_unlock ();
return sai_rc;
}
static sai_status_t sai_qos_queue_stats_clear (sai_object_id_t queue_id,
const sai_queue_stat_counter_t *counter_ids,
uint32_t number_of_counters)
{
sai_status_t sai_rc = SAI_STATUS_SUCCESS;
dn_sai_qos_queue_t *p_queue_node = NULL;
if (counter_ids == NULL) {
SAI_QUEUE_LOG_ERR("Invalid parameter counter_ids is NULL");
STD_ASSERT(0);
return SAI_STATUS_INVALID_PARAMETER;
}
if (number_of_counters == 0) {
SAI_QUEUE_LOG_ERR("Invalid parameter number_of_counters is zero");
STD_ASSERT(0);
return SAI_STATUS_INVALID_PARAMETER;
}
SAI_QUEUE_LOG_TRACE ("Clearing stats for queue 0x%"PRIx64"", queue_id);
if (! sai_is_obj_id_queue (queue_id)) {
SAI_QUEUE_LOG_ERR ("0x%"PRIx64" is not a valid Queue obj id.",
queue_id);
return SAI_STATUS_INVALID_OBJECT_TYPE;
}
sai_qos_lock ();
p_queue_node = sai_qos_queue_node_get (queue_id);
if (NULL == p_queue_node) {
SAI_QUEUE_LOG_ERR ("Queue 0x%"PRIx64" does not exist in tree.", queue_id);
sai_qos_unlock ();
return SAI_STATUS_INVALID_OBJECT_ID;
}
sai_rc = sai_queue_npu_api_get()->queue_stats_clear (p_queue_node, counter_ids,
number_of_counters);
if (sai_rc != SAI_STATUS_SUCCESS) {
SAI_QUEUE_LOG_ERR ("Failed to clear Queue stats NPU, Error: %d.", sai_rc);
}
sai_qos_unlock ();
return sai_rc;
}
static sai_queue_api_t sai_qos_queue_method_table = {
sai_qos_queue_attribute_set,
sai_qos_queue_attribute_get,
sai_qos_queue_stats_get,
sai_qos_queue_stats_clear
};
sai_queue_api_t *sai_qos_queue_api_query (void)
{
return (&sai_qos_queue_method_table);
}
| 31.710462 | 91 | 0.581869 | [
"object"
] |
cfea4c7c67165437e21f9b7022b746ad383b8549 | 4,437 | h | C | src/trunk/libs/seiscomp3/core/interruptible.h | yannikbehr/seiscomp3 | ebb44c77092555eef7786493d00ac4efc679055f | [
"Naumen",
"Condor-1.1",
"MS-PL"
] | 94 | 2015-02-04T13:57:34.000Z | 2021-11-01T15:10:06.000Z | src/trunk/libs/seiscomp3/core/interruptible.h | yannikbehr/seiscomp3 | ebb44c77092555eef7786493d00ac4efc679055f | [
"Naumen",
"Condor-1.1",
"MS-PL"
] | 233 | 2015-01-28T15:16:46.000Z | 2021-08-23T11:31:37.000Z | src/trunk/libs/seiscomp3/core/interruptible.h | yannikbehr/seiscomp3 | ebb44c77092555eef7786493d00ac4efc679055f | [
"Naumen",
"Condor-1.1",
"MS-PL"
] | 95 | 2015-02-13T15:53:30.000Z | 2021-11-02T14:54:54.000Z | /***************************************************************************
* Copyright (C) by GFZ Potsdam *
* *
* You can redistribute and/or modify this program under the *
* terms of the SeisComP Public License. *
* *
* This program is distributed in the hope that it will be useful, *
* but WITHOUT ANY WARRANTY; without even the implied warranty of *
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
* SeisComP Public License for more details. *
***************************************************************************/
#ifndef __SEISCOMP_CORE_INTERRUPTIBLE_H__
#define __SEISCOMP_CORE_INTERRUPTIBLE_H__
#include <string>
#include <set>
#include <utility>
#include <seiscomp3/core/baseobject.h>
namespace Seiscomp {
namespace Core {
namespace _private {
class SC_SYSTEM_CORE_API Alarmable {
public:
Alarmable();
virtual ~Alarmable();
protected:
void setAlarm(unsigned int seconds);
void clearAlarm();
virtual void handleAlarm() throw() {};
private:
std::list<std::pair<Alarmable*, time_t> >::iterator _link;
static std::list<std::pair<Alarmable*, time_t> > _alarms;
static bool _signalInit;
static void CheckAlarms();
static void SignalHandler(int);
#if defined(_MSC_VER)
friend VOID CALLBACK timerCompletion(LPVOID, DWORD, DWORD);
#endif
};
class SC_SYSTEM_CORE_API Interruptible {
public:
Interruptible();
virtual ~Interruptible();
static void Interrupt(int sig) throw();
protected:
virtual void handleInterrupt(int) throw() {};
private:
std::list<Interruptible*>::iterator _link;
static std::list<Interruptible*> _registered;
};
class SC_SYSTEM_CORE_API OperationInterrupted : public GeneralException {
public:
OperationInterrupted(): GeneralException("operation interrupted") {}
OperationInterrupted(const std::string& what): GeneralException(what) {}
};
// >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>
/** Classes that implement opterations, which may potentially take long time
and need to be interrupted by the user, should inherit from
InterruptibleObject.
Inherited classes can also use setAlarm(int seconds) and clearAlarm() to
implement timeouts. One alarm per object can be used, eg., setAlarm()
cancels the previous alarm. However, there is no per-process limitation
like in case of alarm() system call. setAlarm() and clearAlarm() themselves
are implemented on top of alarm(), which means that alarm() cannot be used
at the same time.
The inherited class is supposed to override handleInterrupt(int sig) and
handleAlarm() methods, which are called when interrupt is requested or
alarm expires respectively. These methods are normally called from a signal
handler, so they are not allowed to throw any exceptions. Normally they
just set a flag, and exception is thrown after returning from a signal
handler. For this purpose, the exception OperationInterrupted has been
defined. Note: according the POSIX standard, a flag that is set in a
signal handler should be of type 'volatile sig_atomic_t'.
The main program should set up signal handler as follows:
\code
void signalHandler(int signal) {
Seiscomp::Core::InterruptibleObject::Interrupt(signal);
}
int main(int argc, char **argv) {
struct sigaction sa;
sa.sa_handler = signalHandler;
sa.sa_flags = 0;
sigemptyset(&sa.sa_mask);
sigaction(SIGINT, &sa, NULL);
sigaction(SIGTERM, &sa, NULL);
// Optionally, disable SIGHUP, so it is not necessary
// to start the process with nohup.
sa.sa_handler = SIG_IGN;
sigaction(SIGHUP, &sa, NULL);
...
return 0
}
\endcode
*/
DEFINE_SMARTPOINTER(InterruptibleObject);
class SC_SYSTEM_CORE_API InterruptibleObject : public BaseObject, public Interruptible, public Alarmable {
DECLARE_SC_CLASS(InterruptibleObject);
};
} // namespace _private
using _private::OperationInterrupted;
using _private::InterruptibleObjectPtr;
using _private::InterruptibleObject;
} // namespace Core
} // namespace Seiscomp
#endif
| 33.11194 | 106 | 0.642551 | [
"object"
] |
cfead14712b7e69fdc022966cd71950a88f1aada | 28,247 | h | C | include/server.h | kidadult/swoole-src | 3cac6e42255e9e201e778f5164ecfb95cfd52a8a | [
"Apache-2.0"
] | 1 | 2020-08-06T04:01:08.000Z | 2020-08-06T04:01:08.000Z | include/server.h | kavi1990/swoole-src | c87dd9c97c2608d2d08ce88a59293a0eacd6ac47 | [
"Apache-2.0"
] | null | null | null | include/server.h | kavi1990/swoole-src | c87dd9c97c2608d2d08ce88a59293a0eacd6ac47 | [
"Apache-2.0"
] | 1 | 2019-12-18T11:49:03.000Z | 2019-12-18T11:49:03.000Z | /*
+----------------------------------------------------------------------+
| Swoole |
+----------------------------------------------------------------------+
| This source file is subject to version 2.0 of the Apache license, |
| that is bundled with this package in the file LICENSE, and is |
| available through the world-wide-web at the following url: |
| http://www.apache.org/licenses/LICENSE-2.0.html |
| If you did not receive a copy of the Apache2.0 license and are unable|
| to obtain it through the world-wide-web, please send a note to |
| license@php.net so we can mail you a copy immediately. |
+----------------------------------------------------------------------+
| Author: Tianfeng Han <mikan.tenny@gmail.com> |
+----------------------------------------------------------------------+
*/
#pragma once
#include "swoole_api.h"
#include "buffer.h"
#include "connection.h"
#include "http.h"
#include <string>
#include <unordered_map>
#include <unordered_set>
#define SW_REACTOR_NUM SW_CPU_NUM
#define SW_WORKER_NUM (SW_CPU_NUM*2)
enum swServer_event_type
{
//data payload
SW_SERVER_EVENT_SEND_DATA,
SW_SERVER_EVENT_SEND_FILE,
SW_SERVER_EVENT_SNED_DGRAM,
//connection event
SW_SERVER_EVENT_CLOSE,
SW_SERVER_EVENT_CONNECT,
//task
SW_SERVER_EVENT_TASK,
SW_SERVER_EVENT_FINISH,
//pipe
SW_SERVER_EVENT_PIPE_MESSAGE,
//proxy
SW_SERVER_EVENT_PROXY_START,
SW_SERVER_EVENT_PROXY_END,
//event operate
SW_SERVER_EVENT_CONFIRM,
SW_SERVER_EVENT_PAUSE_RECV,
SW_SERVER_EVENT_RESUME_RECV,
//buffer event
SW_SERVER_EVENT_BUFFER_FULL,
SW_SERVER_EVENT_BUFFER_EMPTY,
//process message
SW_SERVER_EVENT_INCOMING,
SW_SERVER_EVENT_SHUTDOWN,
};
enum swTask_ipc_mode
{
SW_TASK_IPC_UNIXSOCK = 1,
SW_TASK_IPC_MSGQUEUE = 2,
SW_TASK_IPC_PREEMPTIVE = 3,
SW_TASK_IPC_STREAM = 4,
};
/**
* use swDataHead->from_fd, 1 byte 8 bit
*/
enum swTask_type
{
SW_TASK_TMPFILE = 1, //tmp file
SW_TASK_SERIALIZE = 2, //php serialize
SW_TASK_NONBLOCK = 4, //task
SW_TASK_CALLBACK = 8, //callback
SW_TASK_WAITALL = 16, //for taskWaitAll
SW_TASK_COROUTINE = 32, //coroutine
SW_TASK_PEEK = 64, //peek
SW_TASK_NOREPLY = 128, //don't reply
};
enum swFactory_dispatch_mode
{
SW_DISPATCH_ROUND = 1,
SW_DISPATCH_FDMOD = 2,
SW_DISPATCH_QUEUE = 3,
SW_DISPATCH_IPMOD = 4,
SW_DISPATCH_UIDMOD = 5,
SW_DISPATCH_USERFUNC = 6,
SW_DISPATCH_STREAM = 7,
};
enum swFactory_dispatch_result
{
SW_DISPATCH_RESULT_DISCARD_PACKET = -1,
SW_DISPATCH_RESULT_CLOSE_CONNECTION = -2,
SW_DISPATCH_RESULT_USERFUNC_FALLBACK = -3,
};
typedef struct _swReactorThread
{
pthread_t thread_id;
swReactor reactor;
int notify_pipe;
uint32_t pipe_num;
swSocket *pipe_sockets;
std::unordered_map<int, swString *> *send_buffers;
} swReactorThread;
typedef struct _swListenPort
{
struct _swListenPort *next, *prev;
/**
* tcp socket listen backlog
*/
uint16_t backlog;
/**
* open tcp_defer_accept option
*/
int tcp_defer_accept;
/**
* TCP_FASTOPEN
*/
int tcp_fastopen;
/**
* TCP KeepAlive
*/
int tcp_keepidle;
int tcp_keepinterval;
int tcp_keepcount;
int socket_buffer_size;
uint32_t buffer_high_watermark;
uint32_t buffer_low_watermark;
enum swSocket_type type;
uint8_t ssl;
int port;
swSocket *socket;
pthread_t thread_id;
char host[SW_HOST_MAXSIZE];
/**
* check data eof
*/
uint32_t open_eof_check :1;
/**
* built-in http protocol
*/
uint32_t open_http_protocol :1;
/**
* built-in http2.0 protocol
*/
uint32_t open_http2_protocol :1;
/**
* built-in websocket protocol
*/
uint32_t open_websocket_protocol :1;
/**
* open websocket close frame
*/
uint32_t open_websocket_close_frame :1;
/**
* one package: length check
*/
uint32_t open_length_check :1;
/**
* for mqtt protocol
*/
uint32_t open_mqtt_protocol :1;
/**
* redis protocol
*/
uint32_t open_redis_protocol :1;
/**
* open tcp nodelay option
*/
uint32_t open_tcp_nodelay :1;
/**
* open tcp nopush option(for sendfile)
*/
uint32_t open_tcp_nopush :1;
/**
* open tcp keepalive
*/
uint32_t open_tcp_keepalive :1;
/**
* open tcp keepalive
*/
uint32_t open_ssl_encrypt :1;
/**
* Sec-WebSocket-Protocol
*/
char *websocket_subprotocol;
uint16_t websocket_subprotocol_length;
/**
* set socket option
*/
int kernel_socket_recv_buffer_size;
int kernel_socket_send_buffer_size;
#ifdef SW_USE_OPENSSL
SSL_CTX *ssl_context;
swSSL_config ssl_config;
swSSL_option ssl_option;
#endif
sw_atomic_t connection_num;
swProtocol protocol;
void *ptr;
int (*onRead)(swReactor *reactor, struct _swListenPort *port, swEvent *event);
} swListenPort;
typedef struct _swUserWorker_node
{
struct _swUserWorker_node *next, *prev;
swWorker *worker;
} swUserWorker_node;
typedef struct _swWorkerStopMessage
{
pid_t pid;
uint16_t worker_id;
} swWorkerStopMessage;
//------------------------------------Packet-------------------------------------------
typedef struct _swPacket_task
{
size_t length;
char tmpfile[SW_TASK_TMPDIR_SIZE + sizeof(SW_TASK_TMP_FILE)];
} swPacket_task;
typedef struct _swPacket_response
{
int length;
int worker_id;
} swPacket_response;
typedef struct _swPacket_ptr
{
swDataHead info;
swString data;
} swPacket_ptr;
//-----------------------------------Factory--------------------------------------------
struct swFactory
{
void *object;
void *ptr; //server object
int (*start)(swFactory *);
int (*shutdown)(swFactory *);
int (*dispatch)(swFactory *, swSendData *);
/**
* Returns the number of bytes sent
*/
int (*finish)(swFactory *, swSendData *);
int (*notify)(swFactory *, swDataHead *); //send a event notify
int (*end)(swFactory *, int fd);
void (*free)(swFactory *);
};
int swFactory_create(swFactory *factory);
int swFactory_finish(swFactory *factory, swSendData *_send);
int swFactory_check_callback(swFactory *factory);
int swFactoryProcess_create(swFactory *factory, uint32_t worker_num);
//------------------------------------Server-------------------------------------------
enum swServer_hook_type
{
SW_SERVER_HOOK_MASTER_START,
SW_SERVER_HOOK_MASTER_TIMER,
SW_SERVER_HOOK_REACTOR_START,
SW_SERVER_HOOK_WORKER_START,
SW_SERVER_HOOK_TASK_WORKER_START,
SW_SERVER_HOOK_MASTER_CONNECT,
SW_SERVER_HOOK_REACTOR_CONNECT,
SW_SERVER_HOOK_WORKER_CONNECT,
SW_SERVER_HOOK_REACTOR_RECEIVE,
SW_SERVER_HOOK_WORKER_RECEIVE,
SW_SERVER_HOOK_REACTOR_CLOSE,
SW_SERVER_HOOK_WORKER_CLOSE,
SW_SERVER_HOOK_MANAGER_START,
SW_SERVER_HOOK_MANAGER_TIMER,
SW_SERVER_HOOK_PROCESS_TIMER,
};
typedef struct _swServerStats
{
time_t start_time;
sw_atomic_t connection_num;
sw_atomic_t tasking_num;
sw_atomic_long_t accept_count;
sw_atomic_long_t close_count;
sw_atomic_long_t request_count;
} swServerStats;
typedef struct _swServerGS
{
pid_t master_pid;
pid_t manager_pid;
uint32_t session_round :24;
sw_atomic_t start;
sw_atomic_t shutdown;
time_t now;
sw_atomic_t spinlock;
swProcessPool task_workers;
swProcessPool event_workers;
} swServerGS;
struct swServer
{
/**
* reactor thread/process num
*/
uint16_t reactor_num;
/**
* worker process num
*/
uint32_t worker_num;
/**
* The number of pipe per reactor maintenance
*/
uint16_t reactor_pipe_num;
uint8_t factory_mode;
uint8_t dgram_port_num;
/**
* package dispatch mode
*/
uint8_t dispatch_mode;
/**
* No idle work process is available.
*/
uint8_t scheduler_warning;
int worker_uid;
int worker_groupid;
/**
* max connection num
*/
uint32_t max_connection;
/**
* worker process max request
*/
uint32_t max_request;
uint32_t max_request_grace;
int udp_socket_ipv4;
int udp_socket_ipv6;
uint32_t max_wait_time;
/*----------------------------Reactor schedule--------------------------------*/
uint16_t reactor_round_i;
uint16_t reactor_next_i;
uint16_t reactor_schedule_count;
sw_atomic_t worker_round_id;
/**
* run as a daemon process
*/
uint32_t daemonize :1;
/**
* have dgram socket
*/
uint32_t have_dgram_sock :1;
/**
* have stream socket
*/
uint32_t have_stream_sock :1;
/**
* open cpu affinity setting
*/
uint32_t open_cpu_affinity :1;
/**
* disable notice when use SW_DISPATCH_ROUND and SW_DISPATCH_QUEUE
*/
uint32_t disable_notify :1;
/**
* discard the timeout request
*/
uint32_t discard_timeout_request :1;
/**
* parse cookie header
*/
uint32_t http_parse_cookie :1;
/**
* parse x-www-form-urlencoded data
*/
uint32_t http_parse_post :1;
/**
* parse multipart/form-data files to match $_FILES
*/
uint32_t http_parse_files :1;
/**
* http content compression
*/
uint32_t http_compression :1;
/**
* RFC-7692
*/
uint32_t websocket_compression :1;
/**
* handle static files
*/
uint32_t enable_static_handler :1;
/**
* enable onConnect/onClose event when use dispatch_mode=1/3
*/
uint32_t enable_unsafe_event :1;
/**
* waiting for worker onConnect callback function to return
*/
uint32_t enable_delay_receive :1;
/**
* asynchronous reloading
*/
uint32_t reload_async :1;
/**
* use task object
*/
uint32_t task_use_object :1;
/**
* enable coroutine in task worker
*/
uint32_t task_enable_coroutine :1;
/**
* yield coroutine when the output buffer is full
*/
uint32_t send_yield :1;
/**
* enable coroutine
*/
uint32_t enable_coroutine :1;
/**
* disable multi-threads
*/
uint32_t single_thread :1;
/**
* server status
*/
uint32_t running :1;
/**
* heartbeat check time
*/
uint16_t heartbeat_idle_time;
uint16_t heartbeat_check_interval;
int *cpu_affinity_available;
int cpu_affinity_available_num;
swPipeBuffer **pipe_buffers;
double send_timeout;
uint16_t listen_port_num;
time_t reload_time;
time_t warning_time;
long timezone;
swTimer_node *master_timer;
swTimer_node *heartbeat_timer;
swTimer_node *enable_accept_timer;
/* buffer output/input setting*/
uint32_t buffer_output_size;
uint32_t buffer_input_size;
uint32_t ipc_max_size;
void *ptr2;
void *private_data_3;
swFactory factory;
swListenPort *listen_list;
pthread_t heartbeat_pidt;
/**
* task process
*/
uint32_t task_worker_num;
uint8_t task_ipc_mode;
uint32_t task_max_request;
uint32_t task_max_request_grace;
swPipe *task_notify;
swEventData *task_result;
/**
* user process
*/
uint32_t user_worker_num;
swUserWorker_node *user_worker_list;
swHashMap *user_worker_map;
swWorker *user_workers;
swReactorThread *reactor_threads;
swWorker *workers;
swLock lock;
swChannel *message_box;
swServerStats *stats;
swServerGS *gs;
std::unordered_set<std::string> *types;
std::unordered_set<std::string> *locations;
#ifdef HAVE_PTHREAD_BARRIER
pthread_barrier_t barrier;
#endif
swConnection *connection_list;
swSession *session_list;
/**
* temporary directory for HTTP uploaded file.
*/
char *upload_tmp_dir;
/**
* http compression level for gzip/br
*/
uint8_t http_compression_level;
/**
* http static file directory
*/
char *document_root;
uint16_t document_root_len;
/**
* master process pid
*/
char *pid_file;
/**
* stream
*/
char *stream_socket_file;
swSocket *stream_socket;
swProtocol stream_protocol;
swSocket *last_stream_socket;
swLinkedList *buffer_pool;
#ifdef SW_BUFFER_RECV_TIME
double last_receive_usec;
#endif
int manager_alarm;
/**
* message queue key
*/
uint64_t message_queue_key;
swLinkedList *hooks[SW_MAX_HOOK_TYPE];
void (*onStart)(swServer *serv);
void (*onManagerStart)(swServer *serv);
void (*onManagerStop)(swServer *serv);
void (*onShutdown)(swServer *serv);
void (*onPipeMessage)(swServer *, swEventData *);
void (*onWorkerStart)(swServer *serv, int worker_id);
void (*onWorkerStop)(swServer *serv, int worker_id);
void (*onWorkerExit)(swServer *serv, int worker_id);
void (*onWorkerError)(swServer *serv, int worker_id, pid_t worker_pid, int exit_code, int signo);
void (*onUserWorkerStart)(swServer *serv, swWorker *worker);
/**
* Client
*/
int (*onReceive)(swServer *, swEventData *);
int (*onPacket)(swServer *, swEventData *);
void (*onClose)(swServer *serv, swDataHead *);
void (*onConnect)(swServer *serv, swDataHead *);
void (*onBufferFull)(swServer *serv, swDataHead *);
void (*onBufferEmpty)(swServer *serv, swDataHead *);
/**
* Task Worker
*/
int (*onTask)(swServer *serv, swEventData *data);
int (*onFinish)(swServer *serv, swEventData *data);
/**
* Server method
*/
int (*send)(swServer *serv, int session_id, void *data, uint32_t length);
int (*sendfile)(swServer *serv, int session_id, const char *file, uint32_t l_file, off_t offset, size_t length);
int (*sendwait)(swServer *serv, int session_id, void *data, uint32_t length);
int (*close)(swServer *serv, int session_id, int reset);
int (*notify)(swServer *serv, swConnection *conn, int event);
int (*feedback)(swServer *serv, int session_id, int event);
/**
* Chunk control
*/
int (*merge_chunk)(swServer *serv, int key, const char *data, size_t len);
size_t (*get_packet)(swServer *serv, swEventData *req, char **data_ptr);
/**
* Hook
*/
int (*dispatch_func)(swServer *, swConnection *, swSendData *);
};
typedef int (*swServer_dispatch_function)(swServer *, swConnection *, swSendData *);
int swServer_master_onAccept(swReactor *reactor, swEvent *event);
void swServer_master_onTimer(swTimer *timer, swTimer_node *tnode);
int swServer_master_send(swServer *serv, swSendData *_send);
int swServer_onFinish(swFactory *factory, swSendData *resp);
int swServer_onFinish2(swFactory *factory, swSendData *resp);
void swServer_init(swServer *serv);
void swServer_signal_init(swServer *serv);
int swServer_start(swServer *serv);
swListenPort* swServer_add_port(swServer *serv, enum swSocket_type type, const char *host, int port);
void swServer_close_port(swServer *serv, enum swBool_type only_stream_port);
int swServer_add_worker(swServer *serv, swWorker *worker);
int swServer_add_systemd_socket(swServer *serv);
int swServer_add_hook(swServer *serv, enum swServer_hook_type type, swCallback func, int push_back);
void swServer_call_hook(swServer *serv, enum swServer_hook_type type, void *arg);
void swServer_clear_timer(swServer *serv);
int swServer_create(swServer *serv);
int swServer_shutdown(swServer *serv);
static sw_inline swListenPort* swServer_get_port(swServer *serv, int fd)
{
sw_atomic_t server_fd = serv->connection_list[fd].server_fd;
return (swListenPort*) serv->connection_list[server_fd].object;
}
static sw_inline void swServer_lock(swServer *serv)
{
if (serv->single_thread)
{
return;
}
serv->lock.lock(&serv->lock);
}
static sw_inline void swServer_unlock(swServer *serv)
{
if (serv->single_thread)
{
return;
}
serv->lock.unlock(&serv->lock);
}
#define SW_MAX_SESSION_ID 0x1000000
static sw_inline int swEventData_is_dgram(uint8_t type)
{
switch (type)
{
case SW_SERVER_EVENT_SNED_DGRAM:
return SW_TRUE;
default:
return SW_FALSE;
}
}
static sw_inline int swEventData_is_stream(uint8_t type)
{
switch (type)
{
case SW_SERVER_EVENT_SEND_DATA:
case SW_SERVER_EVENT_CONNECT:
case SW_SERVER_EVENT_CLOSE:
case SW_SERVER_EVENT_PAUSE_RECV:
case SW_SERVER_EVENT_RESUME_RECV:
case SW_SERVER_EVENT_BUFFER_FULL:
case SW_SERVER_EVENT_BUFFER_EMPTY:
return SW_TRUE;
default:
return SW_FALSE;
}
}
swPipe * swServer_get_pipe_object(swServer *serv, int pipe_fd);
void swServer_store_pipe_fd(swServer *serv, swPipe *p);
void swServer_store_listen_socket(swServer *serv);
int swServer_get_socket(swServer *serv, int port);
int swServer_worker_create(swServer *serv, swWorker *worker);
int swServer_worker_init(swServer *serv, swWorker *worker);
void swServer_worker_start(swServer *serv, swWorker *worker);
swString** swServer_create_worker_buffer(swServer *serv);
int swServer_create_task_worker(swServer *serv);
void swServer_reopen_log_file(swServer *serv);
void swTaskWorker_init(swServer *serv);
int swTaskWorker_onTask(swProcessPool *pool, swEventData *task);
int swTaskWorker_onFinish(swReactor *reactor, swEvent *event);
void swTaskWorker_onStart(swProcessPool *pool, int worker_id);
void swTaskWorker_onStop(swProcessPool *pool, int worker_id);
int swTaskWorker_large_pack(swEventData *task, const void *data, size_t data_len);
int swTaskWorker_finish(swServer *serv, const char *data, size_t data_len, int flags, swEventData *current_task);
#define swTask_type(task) ((task)->info.server_fd)
static sw_inline swString* swTaskWorker_large_unpack(swEventData *task_result)
{
swPacket_task _pkg;
memcpy(&_pkg, task_result->data, sizeof(_pkg));
int tmp_file_fd = open(_pkg.tmpfile, O_RDONLY);
if (tmp_file_fd < 0)
{
swSysWarn("open(%s) failed", _pkg.tmpfile);
return NULL;
}
if (SwooleTG.buffer_stack->size < _pkg.length && swString_extend_align(SwooleTG.buffer_stack, _pkg.length) < 0)
{
close(tmp_file_fd);
return NULL;
}
if (swoole_sync_readfile(tmp_file_fd, SwooleTG.buffer_stack->str, _pkg.length) != _pkg.length)
{
close(tmp_file_fd);
return NULL;
}
close(tmp_file_fd);
if (!(swTask_type(task_result) & SW_TASK_PEEK))
{
unlink(_pkg.tmpfile);
}
SwooleTG.buffer_stack->length = _pkg.length;
return SwooleTG.buffer_stack;
}
#define SW_SERVER_MAX_FD_INDEX 0 //max connection socket
#define SW_SERVER_MIN_FD_INDEX 1 //min listen socket
// connection_list[0] => the largest fd
#define swServer_set_maxfd(serv,maxfd) (serv->connection_list[SW_SERVER_MAX_FD_INDEX].fd=maxfd)
#define swServer_get_maxfd(serv) (serv->connection_list[SW_SERVER_MAX_FD_INDEX].fd)
// connection_list[1] => the smallest fd
#define swServer_set_minfd(serv,maxfd) (serv->connection_list[SW_SERVER_MIN_FD_INDEX].fd=maxfd)
#define swServer_get_minfd(serv) (serv->connection_list[SW_SERVER_MIN_FD_INDEX].fd)
#define swServer_get_thread(serv, reactor_id) (&(serv->reactor_threads[reactor_id]))
static sw_inline swConnection* swServer_connection_get(swServer *serv, int fd)
{
if ((uint32_t) fd > serv->max_connection)
{
return NULL;
}
else
{
return &serv->connection_list[fd];
}
}
static sw_inline swSession* swServer_get_session(swServer *serv, uint32_t session_id)
{
return &serv->session_list[session_id % SW_SESSION_LIST_SIZE];
}
static sw_inline int swServer_get_fd(swServer *serv, uint32_t session_id)
{
return serv->session_list[session_id % SW_SESSION_LIST_SIZE].fd;
}
static sw_inline swWorker* swServer_get_worker(swServer *serv, uint16_t worker_id)
{
//Event Worker
if (worker_id < serv->worker_num)
{
return &(serv->gs->event_workers.workers[worker_id]);
}
//Task Worker
uint32_t task_worker_max = serv->task_worker_num + serv->worker_num;
if (worker_id < task_worker_max)
{
return &(serv->gs->task_workers.workers[worker_id - serv->worker_num]);
}
//User Worker
uint32_t user_worker_max = task_worker_max + serv->user_worker_num;
if (worker_id < user_worker_max)
{
return &(serv->user_workers[worker_id - task_worker_max]);
}
return NULL;
}
static sw_inline int swServer_worker_schedule(swServer *serv, int fd, swSendData *data)
{
uint32_t key = 0;
if (serv->dispatch_func)
{
int id = serv->dispatch_func(serv, swServer_connection_get(serv, fd), data);
if (id != SW_DISPATCH_RESULT_USERFUNC_FALLBACK)
{
return id;
}
}
//polling mode
if (serv->dispatch_mode == SW_DISPATCH_ROUND)
{
key = sw_atomic_fetch_add(&serv->worker_round_id, 1);
}
//Using the FD touch access to hash
else if (serv->dispatch_mode == SW_DISPATCH_FDMOD)
{
key = fd;
}
//Using the IP touch access to hash
else if (serv->dispatch_mode == SW_DISPATCH_IPMOD)
{
swConnection *conn = swServer_connection_get(serv, fd);
//UDP
if (conn == NULL)
{
key = fd;
}
//IPv4
else if (conn->socket_type == SW_SOCK_TCP)
{
key = conn->info.addr.inet_v4.sin_addr.s_addr;
}
//IPv6
else
{
#ifdef HAVE_KQUEUE
key = *(((uint32_t *) &conn->info.addr.inet_v6.sin6_addr) + 3);
#elif defined(_WIN32)
key = conn->info.addr.inet_v6.sin6_addr.u.Word[3];
#else
key = conn->info.addr.inet_v6.sin6_addr.s6_addr32[3];
#endif
}
}
else if (serv->dispatch_mode == SW_DISPATCH_UIDMOD)
{
swConnection *conn = swServer_connection_get(serv, fd);
if (conn == NULL || conn->uid == 0)
{
key = fd;
}
else
{
key = conn->uid;
}
}
//Preemptive distribution
else
{
uint32_t i;
uint8_t found = 0;
for (i = 0; i < serv->worker_num + 1; i++)
{
key = sw_atomic_fetch_add(&serv->worker_round_id, 1) % serv->worker_num;
if (serv->workers[key].status == SW_WORKER_IDLE)
{
found = 1;
break;
}
}
if (sw_unlikely(found == 0))
{
serv->scheduler_warning = 1;
}
swTraceLog(SW_TRACE_SERVER, "schedule=%d, round=%d", key, serv->worker_round_id);
return key;
}
return key % serv->worker_num;
}
void swServer_worker_onStart(swServer *serv);
void swServer_worker_onStop(swServer *serv);
int swServer_http_static_handler_hit(swServer *serv, swHttpRequest *request, swConnection *conn);
int swServer_http_static_handler_add_location(swServer *serv, const char *location, size_t length);
int swWorker_onTask(swFactory *factory, swEventData *task);
void swWorker_stop(swWorker *worker);
static sw_inline swConnection *swWorker_get_connection(swServer *serv, int session_id)
{
int real_fd = swServer_get_fd(serv, session_id);
swConnection *conn = swServer_connection_get(serv, real_fd);
return conn;
}
static sw_inline swConnection *swServer_connection_verify_no_ssl(swServer *serv, uint32_t session_id)
{
swSession *session = swServer_get_session(serv, session_id);
int fd = session->fd;
swConnection *conn = swServer_connection_get(serv, fd);
if (!conn || conn->active == 0)
{
return NULL;
}
if (session->id != session_id || conn->session_id != session_id)
{
return NULL;
}
return conn;
}
static sw_inline swConnection *swServer_connection_verify(swServer *serv, int session_id)
{
swConnection *conn = swServer_connection_verify_no_ssl(serv, session_id);
#ifdef SW_USE_OPENSSL
if (conn && conn->ssl && !conn->ssl_ready)
{
swoole_error_log(SW_LOG_NOTICE, SW_ERROR_SSL_NOT_READY, "SSL not ready");
return NULL;
}
#endif
return conn;
}
static sw_inline int swServer_connection_incoming(swServer *serv, swReactor *reactor, swConnection *conn)
{
#ifdef SW_USE_OPENSSL
if (conn->socket->ssl)
{
return reactor->add(reactor, conn->socket, SW_EVENT_READ);
}
#endif
//delay receive, wait resume command.
if (serv->enable_delay_receive)
{
conn->socket->listen_wait = 1;
return SW_OK;
}
if (reactor->add(reactor, conn->socket, SW_EVENT_READ) < 0)
{
return SW_ERR;
}
//notify worker process
if (serv->onConnect)
{
return serv->notify(serv, conn, SW_SERVER_EVENT_CONNECT);
}
else
{
return SW_OK;
}
}
void swServer_connection_each(swServer *serv, void (*callback)(swConnection *conn));
/**
* reactor_id: The fd in which the reactor.
*/
static sw_inline swSocket* swServer_get_send_pipe(swServer *serv, int session_id, int reactor_id)
{
int pipe_index = session_id % serv->reactor_pipe_num;
/**
* pipe_worker_id: The pipe in which worker.
*/
int pipe_worker_id = reactor_id + (pipe_index * serv->reactor_num);
swWorker *worker = swServer_get_worker(serv, pipe_worker_id);
return worker->pipe_worker;
}
static sw_inline uint8_t swServer_support_unsafe_events(swServer *serv)
{
if (serv->dispatch_mode != SW_DISPATCH_ROUND && serv->dispatch_mode != SW_DISPATCH_QUEUE
&& serv->dispatch_mode != SW_DISPATCH_STREAM)
{
return 1;
}
else
{
return serv->enable_unsafe_event;
}
}
static sw_inline uint8_t swServer_dispatch_mode_is_mod(swServer *serv)
{
return serv->dispatch_mode == SW_DISPATCH_FDMOD || serv->dispatch_mode == SW_DISPATCH_IPMOD;
}
static sw_inline swServer* sw_server()
{
return (swServer *) SwooleG.serv;
}
#define swServer_support_send_yield swServer_dispatch_mode_is_mod
//------------------------------------Listen Port-------------------------------------------
void swPort_init(swListenPort *port);
void swPort_free(swListenPort *port);
int swPort_listen(swListenPort *ls);
void swPort_set_protocol(swServer *serv, swListenPort *ls);
int swPort_set_address(swListenPort *ls, int sock);
#ifdef SW_USE_OPENSSL
int swPort_enable_ssl_encrypt(swListenPort *ls);
#endif
void swPort_clear_protocol(swListenPort *ls);
//------------------------------------Worker Process-------------------------------------------
void swWorker_onStart(swServer *serv);
void swWorker_onStop(swServer *serv);
int swWorker_loop(swServer *serv, int worker_pti);
void swWorker_clean_pipe_buffer(swServer *serv);
int swWorker_send2reactor(swServer *serv, swEventData *ev_data, size_t sendn, int session_id);
int swWorker_send2worker(swWorker *dst_worker, const void *buf, int n, int flag);
void swWorker_signal_handler(int signo);
void swWorker_signal_init(void);
int swReactorThread_create(swServer *serv);
int swReactorThread_start(swServer *serv);
void swReactorThread_set_protocol(swServer *serv, swReactor *reactor);
void swReactorThread_join(swServer *serv);
void swReactorThread_free(swServer *serv);
int swReactorThread_close(swReactor *reactor, swSocket *_socket);
int swReactorThread_dispatch(swProtocol *proto, swSocket *_socket, char *data, uint32_t length);
int swReactorThread_send2worker(swServer *serv, swWorker *worker, void *data, size_t len);
int swReactorProcess_create(swServer *serv);
int swReactorProcess_start(swServer *serv);
void swReactorProcess_free(swServer *serv);
int swManager_start(swServer *serv);
pid_t swManager_spawn_user_worker(swServer *serv, swWorker* worker);
pid_t swManager_spawn_task_worker(swServer *serv, swWorker* worker);
int swManager_wait_other_worker(swProcessPool *pool, pid_t pid, int status);
void swManager_kill_user_worker(swServer *serv);
| 27.186718 | 116 | 0.662371 | [
"object"
] |
cfef5587de41f6d18e1ab510c06f923bce3d0cba | 55,527 | h | C | Release/include/cpprest/astreambuf.h | thomasouvre/casablanca | 9cc7758d714bebbc00d66bf9a49bb648e9a57c17 | [
"Apache-2.0"
] | null | null | null | Release/include/cpprest/astreambuf.h | thomasouvre/casablanca | 9cc7758d714bebbc00d66bf9a49bb648e9a57c17 | [
"Apache-2.0"
] | null | null | null | Release/include/cpprest/astreambuf.h | thomasouvre/casablanca | 9cc7758d714bebbc00d66bf9a49bb648e9a57c17 | [
"Apache-2.0"
] | 1 | 2021-11-10T12:33:50.000Z | 2021-11-10T12:33:50.000Z | /***
* ==++==
*
* Copyright (c) Microsoft Corporation. All rights reserved.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* ==--==
* =+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+
*
* astreambuf.h
*
* Asynchronous I/O: stream buffer. This is an extension to the PPL concurrency features and therefore
* lives in the Concurrency namespace.
*
* For the latest on this and related APIs, please see http://casablanca.codeplex.com.
*
* =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
****/
#pragma once
#include <ios>
#include <memory>
#include <cstring>
#include <math.h>
#if (defined(_MSC_VER) && (_MSC_VER >= 1800))
#include <ppltasks.h>
namespace pplx = Concurrency;
#else
#include "pplx/pplxtasks.h"
#endif
#include "cpprest/basic_types.h"
#include "cpprest/asyncrt_utils.h"
#ifndef _CONCRT_H
#ifndef _LWRCASE_CNCRRNCY
#define _LWRCASE_CNCRRNCY
// Note to reader: we're using lower-case namespace names everywhere, but the 'Concurrency' namespace
// is capitalized for historical reasons. The alias let's us pretend that style issue doesn't exist.
namespace Concurrency { }
namespace concurrency = Concurrency;
#endif
#endif
#pragma warning(push)
// Suppress unreferenced formal parameter warning as they are required for documentation.
#pragma warning(disable : 4100)
// Suppress no-side-effect recursion warning, since it is safe and template-binding-dependent.
#pragma warning(disable : 4718)
#ifndef _MS_WINDOWS
// TFS 579628 - 1206: figure out how to avoid having this specialization for Linux (beware of 64-bit Linux)
namespace std {
template<>
struct char_traits<unsigned char> : private char_traits<char>
{
public:
typedef unsigned char char_type;
using char_traits<char>::eof;
using char_traits<char>::int_type;
using char_traits<char>::off_type;
using char_traits<char>::pos_type;
static size_t length(const unsigned char* str)
{
return char_traits<char>::length(reinterpret_cast<const char*>(str));
}
static void assign(unsigned char& left, const unsigned char& right) { left = right; }
static unsigned char* assign(unsigned char* left, size_t n, unsigned char value)
{
return reinterpret_cast<unsigned char*>(char_traits<char>::assign(reinterpret_cast<char*>(left), n, static_cast<char>(value)));
}
static unsigned char* copy(unsigned char* left, const unsigned char* right, size_t n)
{
return reinterpret_cast<unsigned char*>(char_traits<char>::copy(reinterpret_cast<char*>(left), reinterpret_cast<const char*>(right), n));
}
static unsigned char* move(unsigned char* left, const unsigned char* right, size_t n)
{
return reinterpret_cast<unsigned char*>(char_traits<char>::move(reinterpret_cast<char*>(left), reinterpret_cast<const char*>(right), n));
}
};
}
#endif // _MS_WINDOWS
namespace Concurrency { namespace streams
{
/// <summary>
/// Extending the standard char_traits type with one that adds values and types
/// that are unique to "C++ REST SDK" streams.
/// </summary>
/// <typeparam name="_CharType">
/// The data type of the basic element of the stream.
/// </typeparam>
template<typename _CharType>
struct char_traits : std::char_traits<_CharType>
{
/// <summary>
/// Some synchronous functions will return this value if the operation
/// requires an asynchronous call in a given situation.
/// </summary>
/// <returns>An <c>int_type</c> value which implies that an asynchronous call is required.</returns>
static typename std::char_traits<_CharType>::int_type requires_async() { return std::char_traits<_CharType>::eof()-1; }
};
namespace details {
/// <summary>
/// Stream buffer base class.
/// </summary>
template<typename _CharType>
class basic_streambuf
{
public:
typedef _CharType char_type;
typedef ::concurrency::streams::char_traits<_CharType> traits;
typedef typename traits::int_type int_type;
typedef typename traits::pos_type pos_type;
typedef typename traits::off_type off_type;
/// <summary>
/// Virtual constructor for stream buffers.
/// </summary>
virtual ~basic_streambuf() { }
/// <summary>
/// <c>can_read</c> is used to determine whether a stream buffer will support read operations (get).
/// </summary>
virtual bool can_read() const = 0;
/// <summary>
/// <c>can_write</c> is used to determine whether a stream buffer will support write operations (put).
/// </summary>
virtual bool can_write() const = 0;
/// <summary>
/// <c>can_seek<c/> is used to determine whether a stream buffer supports seeking.
/// </summary>
virtual bool can_seek() const = 0;
/// <summary>
/// <c>has_size<c/> is used to determine whether a stream buffer supports size().
/// </summary>
virtual bool has_size() const = 0;
/// <summary>
/// <c>is_eof</c> is used to determine whether a read head has reached the end of the buffer.
/// </summary>
virtual bool is_eof() const = 0;
/// <summary>
/// Gets the stream buffer size, if one has been set.
/// </summary>
/// <param name="direction">The direction of buffering (in or out)</param>
/// <returns>The size of the internal buffer (for the given direction).</returns>
/// <remarks>An implementation that does not support buffering will always return 0.</remarks>
virtual size_t buffer_size(std::ios_base::openmode direction = std::ios_base::in) const = 0;
/// <summary>
/// Sets the stream buffer implementation to buffer or not buffer.
/// </summary>
/// <param name="size">The size to use for internal buffering, 0 if no buffering should be done.</param>
/// <param name="direction">The direction of buffering (in or out)</param>
/// <remarks>An implementation that does not support buffering will silently ignore calls to this function and it will not have any effect on what is returned by subsequent calls to <see cref="::buffer_size method" />.</remarks>
virtual void set_buffer_size(size_t size, std::ios_base::openmode direction = std::ios_base::in) = 0;
/// <summary>
/// For any input stream, <c>in_avail</c> returns the number of characters that are immediately available
/// to be consumed without blocking. May be used in conjunction with <cref="::sbumpc method"/> to read data without
/// incurring the overhead of using tasks.
/// </summary>
virtual size_t in_avail() const = 0;
/// <summary>
/// Checks if the stream buffer is open.
/// </summary>
/// <remarks>No separation is made between open for reading and open for writing.</remarks>
virtual bool is_open() const = 0;
/// <summary>
/// Closes the stream buffer, preventing further read or write operations.
/// </summary>
/// <param name="mode">The I/O mode (in or out) to close for.</param>
virtual pplx::task<void> close(std::ios_base::openmode mode = (std::ios_base::in | std::ios_base::out)) = 0;
/// <summary>
/// Closes the stream buffer with an exception.
/// </summary>
/// <param name="mode">The I/O mode (in or out) to close for.</param>
/// <param name="eptr">Pointer to the exception.</param>
virtual pplx::task<void> close(std::ios_base::openmode mode, std::exception_ptr eptr) = 0;
/// <summary>
/// Writes a single character to the stream.
/// </summary>
/// <param name="ch">The character to write</param>
/// <returns>A <c>task</c> that holds the value of the character. This value is EOF if the write operation fails.</returns>
virtual pplx::task<int_type> putc(_CharType ch) = 0;
/// <summary>
/// Writes a number of characters to the stream.
/// </summary>
/// <param name="ptr">A pointer to the block of data to be written.</param>
/// <param name="count">The number of characters to write.</param>
/// <returns>A <c>task</c> that holds the number of characters actually written, either 'count' or 0.</returns>
virtual pplx::task<size_t> putn(const _CharType *ptr, size_t count) = 0;
/// <summary>
/// Reads a single character from the stream and advances the read position.
/// </summary>
/// <returns>A <c>task</c> that holds the value of the character. This value is EOF if the read fails.</returns>
virtual pplx::task<int_type> bumpc() = 0;
/// <summary>
/// Reads a single character from the stream and advances the read position.
/// </summary>
/// <returns>The value of the character. <c>-1</c> if the read fails. <c>-2</c> if an asynchronous read is required</returns>
/// <remarks>This is a synchronous operation, but is guaranteed to never block.</remarks>
virtual int_type sbumpc() = 0;
/// <summary>
/// Reads a single character from the stream without advancing the read position.
/// </summary>
/// <returns>A <c>task</c> that holds the value of the byte. This value is EOF if the read fails.</returns>
virtual pplx::task<int_type> getc() = 0;
/// <summary>
/// Reads a single character from the stream without advancing the read position.
/// </summary>
/// <returns>The value of the character. EOF if the read fails. <see cref="::requires_async method" /> if an asynchronous read is required</returns>
/// <remarks>This is a synchronous operation, but is guaranteed to never block.</remarks>
virtual int_type sgetc() = 0;
/// <summary>
/// Advances the read position, then returns the next character without advancing again.
/// </summary>
/// <returns>A <c>task</c> that holds the value of the character. This value is EOF if the read fails.</returns>
virtual pplx::task<int_type> nextc() = 0;
/// <summary>
/// Retreats the read position, then returns the current character without advancing.
/// </summary>
/// <returns>A <c>task</c> that holds the value of the character. This value is EOF if the read fails, <c>requires_async</c> if an asynchronous read is required</returns>
virtual pplx::task<int_type> ungetc() = 0;
/// <summary>
/// Reads up to a given number of characters from the stream.
/// </summary>
/// <param name="ptr">The address of the target memory area.</param>
/// <param name="count">The maximum number of characters to read.</param>
/// <returns>A <c>task</c> that holds the number of characters read. This value is O if the end of the stream is reached.</returns>
virtual pplx::task<size_t> getn(_Out_writes_(count) _CharType *ptr, _In_ size_t count) = 0;
/// <summary>
/// Copies up to a given number of characters from the stream, synchronously.
/// </summary>
/// <param name="ptr">The address of the target memory area.</param>
/// <param name="count">The maximum number of characters to read.</param>
/// <returns>The number of characters copied. O if the end of the stream is reached or an asynchronous read is required.</returns>
/// <remarks>This is a synchronous operation, but is guaranteed to never block.</remarks>
virtual size_t scopy(_Out_writes_(count) _CharType *ptr, _In_ size_t count) = 0;
/// <summary>
/// Gets the current read or write position in the stream.
/// </summary>
/// <param name="direction">The I/O direction to seek (see remarks)</param>
/// <returns>The current position. EOF if the operation fails.</returns>
/// <remarks>Some streams may have separate write and read cursors.
/// For such streams, the direction parameter defines whether to move the read or the write cursor.</remarks>
virtual pos_type getpos(std::ios_base::openmode direction) const = 0;
/// <summary>
/// Gets the size of the stream, if known. Calls to <c>has_size</c> will determine whether
/// the result of <c>size</c> can be relied on.
/// </summary>
virtual utility::size64_t size() const = 0;
/// <summary>
/// Seeks to the given position.
/// </summary>
/// <param name="pos">The offset from the beginning of the stream.</param>
/// <param name="direction">The I/O direction to seek (see remarks).</param>
/// <returns>The position. EOF if the operation fails.</returns>
/// <remarks>Some streams may have separate write and read cursors. For such streams, the direction parameter defines whether to move the read or the write cursor.</remarks>
virtual pos_type seekpos(pos_type pos, std::ios_base::openmode direction) = 0;
/// <summary>
/// Seeks to a position given by a relative offset.
/// </summary>
/// <param name="offset">The relative position to seek to</param>
/// <param name="way">The starting point (beginning, end, current) for the seek.</param>
/// <param name="mode">The I/O direction to seek (see remarks)</param>
/// <returns>The position. EOF if the operation fails.</returns>
/// <remarks>Some streams may have separate write and read cursors.
/// For such streams, the mode parameter defines whether to move the read or the write cursor.</remarks>
virtual pos_type seekoff(off_type offset, std::ios_base::seekdir way, std::ios_base::openmode mode) = 0;
/// <summary>
/// For output streams, flush any internally buffered data to the underlying medium.
/// </summary>
/// <returns>A <c>task</c> that returns <c>true</c> if the sync succeeds, <c>false</c> if not.</returns>
virtual pplx::task<void> sync() = 0;
//
// Efficient read and write.
//
// The following routines are intended to be used for more efficient, copy-free, reading and
// writing of data from/to the stream. Rather than having the caller provide a buffer into which
// data is written or from which it is read, the stream buffer provides a pointer directly to the
// internal data blocks that it is using. Since not all stream buffers use internal data structures
// to copy data, the functions may not be supported by all. An application that wishes to use this
// functionality should therefore first try them and check for failure to support. If there is
// such failure, the application should fall back on the copying interfaces (putn / getn)
//
/// <summary>
/// Allocates a contiguous memory block and returns it.
/// </summary>
/// <param name="count">The number of characters to allocate.</param>
/// <returns>A pointer to a block to write to, null if the stream buffer implementation does not support alloc/commit.</returns>
virtual _CharType* alloc(_In_ size_t count) = 0;
/// <summary>
/// Submits a block already allocated by the stream buffer.
/// </summary>
/// <param name="count">The number of characters to be committed.</param>
virtual void commit(_In_ size_t count) = 0;
/// <summary>
/// Gets a pointer to the next already allocated contiguous block of data.
/// </summary>
/// <param name="ptr">A reference to a pointer variable that will hold the address of the block on success.</param>
/// <param name="count">The number of contiguous characters available at the address in 'ptr.'</param>
/// <returns><c>true</c> if the operation succeeded, <c>false</c> otherwise.</returns>
/// <remarks>
/// A return of false does not necessarily indicate that a subsequent read operation would fail, only that
/// there is no block to return immediately or that the stream buffer does not support the operation.
/// The stream buffer may not de-allocate the block until <see cref="::release method" /> is called.
/// If the end of the stream is reached, the function will return <c>true</c>, a null pointer, and a count of zero;
/// a subsequent read will not succeed.
/// </remarks>
virtual bool acquire(_Out_ _CharType*& ptr, _Out_ size_t& count) = 0;
/// <summary>
/// Releases a block of data acquired using <see cref="::acquire method"/>. This frees the stream buffer to de-allocate the
/// memory, if it so desires. Move the read position ahead by the count.
/// </summary>
/// <param name="ptr">A pointer to the block of data to be released.</param>
/// <param name="count">The number of characters that were read.</param>
virtual void release(_Out_writes_(count) _CharType *ptr, _In_ size_t count) = 0;
/// <summary>
/// Retrieves the stream buffer exception_ptr if it has been set.
/// </summary>
/// <returns>Pointer to the exception, if it has been set; otherwise, <c>nullptr</c> will be returned</returns>
virtual std::exception_ptr exception() const = 0;
};
template<typename _CharType>
class streambuf_state_manager : public basic_streambuf<_CharType>, public std::enable_shared_from_this<streambuf_state_manager<_CharType>>
{
public:
typedef typename details::basic_streambuf<_CharType>::traits traits;
typedef typename details::basic_streambuf<_CharType>::int_type int_type;
typedef typename details::basic_streambuf<_CharType>::pos_type pos_type;
typedef typename details::basic_streambuf<_CharType>::off_type off_type;
/// <summary>
/// <c>can_read</c> is used to determine whether a stream buffer will support read operations (get).
/// </summary>
virtual bool can_read() const
{
return m_stream_can_read;
}
/// <summary>
/// <c>can_write</c> is used to determine whether a stream buffer will support write operations (put).
/// </summary>
virtual bool can_write() const
{
return m_stream_can_write;
}
/// <summary>
/// Checks if the stream buffer is open.
/// </summary>
/// <remarks>No separation is made between open for reading and open for writing.</remarks>
virtual bool is_open() const
{
return can_read() || can_write();
}
/// <summary>
/// Closes the stream buffer, preventing further read or write operations.
/// </summary>
/// <param name="mode">The I/O mode (in or out) to close for.</param>
virtual pplx::task<void> close(std::ios_base::openmode mode = std::ios_base::in | std::ios_base::out)
{
pplx::task<void> closeOp = pplx::task_from_result();
if (mode & std::ios_base::in && can_read()) {
closeOp = _close_read();
}
// After the flush_internal task completed, "this" object may have been destroyed,
// accessing the memebers is invalid, use shared_from_this to avoid access violation exception.
auto this_ptr = std::static_pointer_cast<streambuf_state_manager>(this->shared_from_this());
if (mode & std::ios_base::out && can_write()) {
if (closeOp.is_done())
closeOp = closeOp && _close_write().then([this_ptr]{}); // passing down exceptions from closeOp
else
closeOp = closeOp.then([this_ptr] { return this_ptr->_close_write().then([this_ptr]{}); });
}
return closeOp;
}
/// <summary>
/// Closes the stream buffer with an exception.
/// </summary>
/// <param name="mode">The I/O mode (in or out) to close for.</param>
/// <param name="eptr">Pointer to the exception.</param>
virtual pplx::task<void> close(std::ios_base::openmode mode, std::exception_ptr eptr)
{
if (m_currentException == nullptr)
m_currentException = eptr;
return close(mode);
}
/// <summary>
/// <c>is_eof</c> is used to determine whether a read head has reached the end of the buffer.
/// </summary>
virtual bool is_eof() const
{
return m_stream_read_eof;
}
/// <summary>
/// Writes a single character to the stream.
/// </summary>
/// <param name="ch">The character to write</param>
/// <returns>The value of the character. EOF if the write operation fails</returns>
virtual pplx::task<int_type> putc(_CharType ch)
{
if (!can_write())
return create_exception_checked_value_task<int_type>(traits::eof());
return create_exception_checked_task<int_type>(_putc(ch), [](int_type) {
return false; // no EOF for write
});
}
/// <summary>
/// Writes a number of characters to the stream.
/// </summary>
/// <param name="ptr">A pointer to the block of data to be written.</param>
/// <param name="count">The number of characters to write.</param>
/// <returns>The number of characters actually written, either 'count' or 0.</returns>
virtual pplx::task<size_t> putn(const _CharType *ptr, size_t count)
{
if (!can_write())
return create_exception_checked_value_task<size_t>(0);
if (count == 0)
return pplx::task_from_result<size_t>(0);
return create_exception_checked_task<size_t>(_putn(ptr, count), [](size_t) {
return false; // no EOF for write
});
}
/// <summary>
/// Reads a single character from the stream and advances the read position.
/// </summary>
/// <returns>The value of the character. EOF if the read fails.</returns>
virtual pplx::task<int_type> bumpc()
{
if (!can_read())
return create_exception_checked_value_task<int_type>(streambuf_state_manager<_CharType>::traits::eof());
return create_exception_checked_task<int_type>(_bumpc(), [](int_type val) {
return val == streambuf_state_manager<_CharType>::traits::eof();
});
}
/// <summary>
/// Reads a single character from the stream and advances the read position.
/// </summary>
/// <returns>The value of the character. <c>-1</c> if the read fails. <c>-2</c> if an asynchronous read is required</returns>
/// <remarks>This is a synchronous operation, but is guaranteed to never block.</remarks>
virtual int_type sbumpc()
{
if ( !(m_currentException == nullptr) )
std::rethrow_exception(m_currentException);
if (!can_read())
return traits::eof();
return check_sync_read_eof(_sbumpc());
}
/// <summary>
/// Reads a single character from the stream without advancing the read position.
/// </summary>
/// <returns>The value of the byte. EOF if the read fails.</returns>
virtual pplx::task<int_type> getc()
{
if (!can_read())
return create_exception_checked_value_task<int_type>(traits::eof());
return create_exception_checked_task<int_type>(_getc(), [](int_type val) {
return val == streambuf_state_manager<_CharType>::traits::eof();
});
}
/// <summary>
/// Reads a single character from the stream without advancing the read position.
/// </summary>
/// <returns>The value of the character. EOF if the read fails. <see cref="::requires_async method" /> if an asynchronous read is required</returns>
/// <remarks>This is a synchronous operation, but is guaranteed to never block.</remarks>
virtual int_type sgetc()
{
if ( !(m_currentException == nullptr) )
std::rethrow_exception(m_currentException);
if (!can_read())
return traits::eof();
return check_sync_read_eof(_sgetc());
}
/// <summary>
/// Advances the read position, then returns the next character without advancing again.
/// </summary>
/// <returns>The value of the character. EOF if the read fails.</returns>
virtual pplx::task<int_type> nextc()
{
if (!can_read())
return create_exception_checked_value_task<int_type>(traits::eof());
return create_exception_checked_task<int_type>(_nextc(), [](int_type val) {
return val == streambuf_state_manager<_CharType>::traits::eof();
});
}
/// <summary>
/// Retreats the read position, then returns the current character without advancing.
/// </summary>
/// <returns>The value of the character. EOF if the read fails. <see cref="::requires_async method" /> if an asynchronous read is required</returns>
virtual pplx::task<int_type> ungetc()
{
if (!can_read())
return create_exception_checked_value_task<int_type>(traits::eof());
return create_exception_checked_task<int_type>(_ungetc(), [](int_type) {
return false;
});
}
/// <summary>
/// Reads up to a given number of characters from the stream.
/// </summary>
/// <param name="ptr">The address of the target memory area.</param>
/// <param name="count">The maximum number of characters to read.</param>
/// <returns>The number of characters read. O if the end of the stream is reached.</returns>
virtual pplx::task<size_t> getn(_Out_writes_(count) _CharType *ptr, _In_ size_t count)
{
if (!can_read())
return create_exception_checked_value_task<size_t>(0);
if (count == 0)
return pplx::task_from_result<size_t>(0);
return create_exception_checked_task<size_t>(_getn(ptr, count), [](size_t val) {
return val == 0;
});
}
/// <summary>
/// Copies up to a given number of characters from the stream, synchronously.
/// </summary>
/// <param name="ptr">The address of the target memory area.</param>
/// <param name="count">The maximum number of characters to read.</param>
/// <returns>The number of characters copied. O if the end of the stream is reached or an asynchronous read is required.</returns>
/// <remarks>This is a synchronous operation, but is guaranteed to never block.</remarks>
virtual size_t scopy(_Out_writes_(count) _CharType *ptr, _In_ size_t count)
{
if ( !(m_currentException == nullptr) )
std::rethrow_exception(m_currentException);
if (!can_read())
return 0;
return _scopy(ptr, count);
}
/// <summary>
/// For output streams, flush any internally buffered data to the underlying medium.
/// </summary>
/// <returns><c>true</c> if the flush succeeds, <c>false</c> if not</returns>
virtual pplx::task<void> sync()
{
if (!can_write())
{
if (m_currentException == nullptr)
return pplx::task_from_result();
else
return pplx::task_from_exception<void>(m_currentException);
}
return create_exception_checked_task<bool>(_sync(), [](bool) {
return false;
}).then([](bool){});
}
/// <summary>
/// Retrieves the stream buffer exception_ptr if it has been set.
/// </summary>
/// <returns>Pointer to the exception, if it has been set; otherwise, <c>nullptr</c> will be returned.</returns>
virtual std::exception_ptr exception() const
{
return m_currentException;
}
/// <summary>
/// Allocates a contiguous memory block and returns it.
/// </summary>
/// <param name="count">The number of characters to allocate.</param>
/// <returns>A pointer to a block to write to, null if the stream buffer implementation does not support alloc/commit.</returns>
/// <remarks>This is intended as an advanced API to be used only when it is important to avoid extra copies.</remarks>
_CharType* alloc(size_t count)
{
if (m_alloced)
throw std::logic_error("The buffer is already allocated, this maybe caused by overlap of stream read or write");
_CharType* alloc_result = _alloc(count);
if (alloc_result)
m_alloced = true;
return alloc_result;
}
/// <summary>
/// Submits a block already allocated by the stream buffer.
/// </summary>
/// <param name="count">The number of characters to be committed.</param>
/// <remarks>This is intended as an advanced API to be used only when it is important to avoid extra copies.</remarks>
void commit(size_t count)
{
if (!m_alloced)
throw std::logic_error("The buffer needs to allocate first");
_commit(count);
m_alloced = false;
}
#pragma region dependencies
public:
virtual bool can_seek() const = 0;
virtual bool has_size() const = 0;
virtual utility::size64_t size() const { return 0; }
virtual size_t buffer_size(std::ios_base::openmode direction = std::ios_base::in) const = 0;
virtual void set_buffer_size(size_t size, std::ios_base::openmode direction = std::ios_base::in) = 0;
virtual size_t in_avail() const = 0;
virtual pos_type getpos(std::ios_base::openmode direction) const = 0;
virtual pos_type seekpos(pos_type pos, std::ios_base::openmode direction) = 0;
virtual pos_type seekoff(off_type offset, std::ios_base::seekdir way, std::ios_base::openmode mode) = 0;
virtual bool acquire(_Out_writes_(count) _CharType*& ptr, _In_ size_t& count) = 0;
virtual void release(_Out_writes_(count) _CharType *ptr, _In_ size_t count) = 0;
protected:
virtual pplx::task<int_type> _putc(_CharType ch) = 0;
virtual pplx::task<size_t> _putn(const _CharType *ptr, size_t count) = 0;
virtual pplx::task<int_type> _bumpc() = 0;
virtual int_type _sbumpc() = 0;
virtual pplx::task<int_type> _getc() = 0;
virtual int_type _sgetc() = 0;
virtual pplx::task<int_type> _nextc() = 0;
virtual pplx::task<int_type> _ungetc() = 0;
virtual pplx::task<size_t> _getn(_Out_writes_(count) _CharType *ptr, _In_ size_t count) = 0;
virtual size_t _scopy(_Out_writes_(count) _CharType *ptr, _In_ size_t count) = 0;
virtual pplx::task<bool> _sync() = 0;
virtual _CharType* _alloc(size_t count) = 0;
virtual void _commit(size_t count) = 0;
/// <summary>
/// The real read head close operation, implementation should override it if there is any resource to be released.
/// </summary>
virtual pplx::task<void> _close_read()
{
m_stream_can_read = false;
return pplx::task_from_result();
}
/// <summary>
/// The real write head close operation, implementation should override it if there is any resource to be released.
/// </summary>
virtual pplx::task<void> _close_write()
{
m_stream_can_write = false;
return pplx::task_from_result();
}
#pragma endregion
protected:
streambuf_state_manager(std::ios_base::openmode mode)
{
m_stream_can_read = (mode & std::ios_base::in) != 0;
m_stream_can_write = (mode & std::ios_base::out) != 0;
m_stream_read_eof = false;
m_alloced = false;
}
std::exception_ptr m_currentException;
// The in/out mode for the buffer
bool m_stream_can_read, m_stream_can_write, m_stream_read_eof, m_alloced;
private:
template<typename _CharType1>
pplx::task<_CharType1> create_exception_checked_value_task(const _CharType1 &val) const
{
if (this->exception() == nullptr)
return pplx::task_from_result<_CharType1>(static_cast<_CharType1>(val));
else
return pplx::task_from_exception<_CharType1>(this->exception());
}
// Set exception and eof states for async read
template<typename _CharType1>
pplx::task<_CharType1> create_exception_checked_task(pplx::task<_CharType1> result, std::function<bool(_CharType1)> eof_test, std::ios_base::openmode mode = std::ios_base::in | std::ios_base::out)
{
auto thisPointer = this->shared_from_this();
auto func1 = [=](pplx::task<_CharType1> t1) -> pplx::task<_CharType1> {
try {
thisPointer->m_stream_read_eof = eof_test(t1.get());
} catch (...) {
thisPointer->close(mode, std::current_exception()).get();
return pplx::task_from_exception<_CharType1>(thisPointer->exception(), pplx::task_options());
}
if (thisPointer->m_stream_read_eof && !(thisPointer->exception() == nullptr))
return pplx::task_from_exception<_CharType1>(thisPointer->exception(), pplx::task_options());
return t1;
};
if ( result.is_done() )
{
// If the data is already available, we should avoid scheduling a continuation, so we do it inline.
return func1(result);
}
else
{
return result.then(func1);
}
}
// Set eof states for sync read
int_type check_sync_read_eof(int_type ch)
{
m_stream_read_eof = ch == traits::eof();
return ch;
}
};
} // namespace details
// Forward declarations
template<typename _CharType> class basic_istream;
template<typename _CharType> class basic_ostream;
/// <summary>
/// Reference-counted stream buffer.
/// </summary>
/// <typeparam name="_CharType">
/// The data type of the basic element of the <c>streambuf.</c>
/// </typeparam>
/// <typeparam name="_CharType2">
/// The data type of the basic element of the <c>streambuf.</c>
/// </typeparam>
/// <remarks>
/// The rationale for refcounting is discussed in the accompanying design
/// documentation.
/// </remarks>
template<typename _CharType>
class streambuf : public details::basic_streambuf<_CharType>
{
public:
typedef typename details::basic_streambuf<_CharType>::traits traits;
typedef typename details::basic_streambuf<_CharType>::int_type int_type;
typedef typename details::basic_streambuf<_CharType>::pos_type pos_type;
typedef typename details::basic_streambuf<_CharType>::off_type off_type;
typedef typename details::basic_streambuf<_CharType>::char_type char_type;
template <typename _CharType2> friend class streambuf;
/// <summary>
/// Constructor.
/// </summary>
/// <param name="ptr">A pointer to the concrete stream buffer implementation.</param>
streambuf(_In_ const std::shared_ptr<details::basic_streambuf<_CharType>> &ptr) : m_buffer(ptr) {}
/// <summary>
/// Default constructor.
/// </summary>
streambuf() { }
/// <summary>
/// Copy constructor.
/// </summary>
/// <param name="other">The source object.</param>
streambuf(const streambuf &other) : m_buffer(other.m_buffer) { }
/// <summary>
/// Converter Constructor.
/// </summary>
/// <typeparam name="AlterCharType">
/// The data type of the basic element of the source <c>streambuf</c>.
/// </typeparam>
/// <param name="other">The source buffer to be converted.</param>
template <typename AlterCharType>
streambuf(const streambuf<AlterCharType> &other) :
m_buffer(std::static_pointer_cast<details::basic_streambuf<_CharType>>(std::static_pointer_cast<void>(other.m_buffer)))
{
static_assert(std::is_same<pos_type, typename details::basic_streambuf<AlterCharType>::pos_type>::value
&& std::is_same<off_type, typename details::basic_streambuf<AlterCharType>::off_type>::value
&& std::is_integral<_CharType>::value && std::is_integral<AlterCharType>::value
&& std::is_integral<int_type>::value && std::is_integral<typename details::basic_streambuf<AlterCharType>::int_type>::value
&& sizeof(_CharType) == sizeof(AlterCharType)
&& sizeof(int_type) == sizeof(typename details::basic_streambuf<AlterCharType>::int_type),
"incompatible stream character types");
}
/// <summary>
/// Move constructor.
/// </summary>
/// <param name="other">The source object.</param>
streambuf(streambuf &&other) : m_buffer(std::move(other.m_buffer)) { }
/// <summary>
/// Assignment operator.
/// </summary>
/// <param name="other">The source object.</param>
/// <returns>A reference to the <c>streambuf</c> object that contains the result of the assignment.</returns>
streambuf & operator =(const streambuf &other) { m_buffer = other.m_buffer; return *this; }
/// <summary>
/// Move operator.
/// </summary>
/// <param name="other">The source object.</param>
/// <returns>A reference to the <c>streambuf</c> object that contains the result of the assignment.</returns>
streambuf & operator =(streambuf &&other) { m_buffer = std::move(other.m_buffer); return *this; }
/// <summary>
/// Constructs an input stream head for this stream buffer.
/// </summary>
/// <returns><c>basic_istream</c>.</returns>
concurrency::streams::basic_istream<_CharType> create_istream() const
{
if (!can_read()) throw std::runtime_error("stream buffer not set up for input of data");
return concurrency::streams::basic_istream<_CharType>(*this);
}
/// <summary>
/// Constructs an output stream for this stream buffer.
/// </summary>
/// <returns>basic_ostream</returns>
concurrency::streams::basic_ostream<_CharType> create_ostream() const
{
if (!can_write()) throw std::runtime_error("stream buffer not set up for output of data");
return concurrency::streams::basic_ostream<_CharType>(*this);
}
/// <summary>
/// Checks if the stream buffer has been initialized or not.
/// </summary>
operator bool() const { return (bool)m_buffer; }
/// <summary>
/// Destructor
/// </summary>
virtual ~streambuf() { }
std::shared_ptr<details::basic_streambuf<_CharType>> get_base() const
{
if (!m_buffer)
{
throw std::invalid_argument("Invalid streambuf object");
}
return m_buffer;
}
#pragma region Function forwarding
/// <summary>
/// <c>can_read</c> is used to determine whether a stream buffer will support read operations (get).
/// </summary>
virtual bool can_read() const { return get_base()->can_read(); }
/// <summary>
/// <c>can_write</c> is used to determine whether a stream buffer will support write operations (put).
/// </summary>
virtual bool can_write() const { return get_base()->can_write(); }
/// <summary>
/// <c>can_seek<c/> is used to determine whether a stream buffer supports seeking.
/// </summary>
virtual bool can_seek() const { return get_base()->can_seek(); }
/// <summary>
/// <c>has_size<c/> is used to determine whether a stream buffer supports size().
/// </summary>
virtual bool has_size() const { return get_base()->has_size(); }
/// <summary>
/// Gets the size of the stream, if known. Calls to <c>has_size</c> will determine whether
/// the result of <c>size</c> can be relied on.
/// </summary>
virtual utility::size64_t size() const { return get_base()->size(); }
/// <summary>
/// Gets the stream buffer size, if one has been set.
/// </summary>
/// <param name="direction">The direction of buffering (in or out)</param>
/// <returns>The size of the internal buffer (for the given direction).</returns>
/// <remarks>An implementation that does not support buffering will always return 0.</remarks>
virtual size_t buffer_size(std::ios_base::openmode direction = std::ios_base::in) const { return get_base()->buffer_size(direction); }
/// <summary>
/// Sets the stream buffer implementation to buffer or not buffer.
/// </summary>
/// <param name="size">The size to use for internal buffering, 0 if no buffering should be done.</param>
/// <param name="direction">The direction of buffering (in or out)</param>
/// <remarks>An implementation that does not support buffering will silently ignore calls to this function and it will not have any effect on what is returned by subsequent calls to <see cref="::buffer_size method" />.</remarks>
virtual void set_buffer_size(size_t size, std::ios_base::openmode direction = std::ios_base::in) { get_base()->set_buffer_size(size,direction); }
/// <summary>
/// For any input stream, <c>in_avail</c> returns the number of characters that are immediately available
/// to be consumed without blocking. May be used in conjunction with <cref="::sbumpc method"/> to read data without
/// incurring the overhead of using tasks.
/// </summary>
virtual size_t in_avail() const { return get_base()->in_avail(); }
/// <summary>
/// Checks if the stream buffer is open.
/// </summary>
/// <remarks>No separation is made between open for reading and open for writing.</remarks>
virtual bool is_open() const { return get_base()->is_open(); }
/// <summary>
/// <c>is_eof</c> is used to determine whether a read head has reached the end of the buffer.
/// </summary>
virtual bool is_eof() const { return get_base()->is_eof(); }
/// <summary>
/// Closes the stream buffer, preventing further read or write operations.
/// </summary>
/// <param name="mode">The I/O mode (in or out) to close for.</param>
virtual pplx::task<void> close(std::ios_base::openmode mode = (std::ios_base::in | std::ios_base::out))
{
// We preserve the check here to workaround a Dev10 compiler crash
auto buffer = get_base();
return buffer ? buffer->close(mode) : pplx::task_from_result();
}
/// <summary>
/// Closes the stream buffer with an exception.
/// </summary>
/// <param name="mode">The I/O mode (in or out) to close for.</param>
/// <param name="eptr">Pointer to the exception.</param>
virtual pplx::task<void> close(std::ios_base::openmode mode, std::exception_ptr eptr)
{
// We preserve the check here to workaround a Dev10 compiler crash
auto buffer = get_base();
return buffer ? buffer->close(mode, eptr) : pplx::task_from_result();
}
/// <summary>
/// Writes a single character to the stream.
/// </summary>
/// <param name="ch">The character to write</param>
/// <returns>The value of the character. EOF if the write operation fails</returns>
virtual pplx::task<int_type> putc(_CharType ch)
{
return get_base()->putc(ch);
}
/// <summary>
/// Allocates a contiguous memory block and returns it.
/// </summary>
/// <param name="count">The number of characters to allocate.</param>
/// <returns>A pointer to a block to write to, null if the stream buffer implementation does not support alloc/commit.</returns>
virtual _CharType* alloc(size_t count)
{
return get_base()->alloc(count);
}
/// <summary>
/// Submits a block already allocated by the stream buffer.
/// </summary>
/// <param name="count">The number of characters to be committed.</param>
virtual void commit(size_t count)
{
get_base()->commit(count);
}
/// <summary>
/// Gets a pointer to the next already allocated contiguous block of data.
/// </summary>
/// <param name="ptr">A reference to a pointer variable that will hold the address of the block on success.</param>
/// <param name="count">The number of contiguous characters available at the address in 'ptr.'</param>
/// <returns><c>true</c> if the operation succeeded, <c>false</c> otherwise.</returns>
/// <remarks>
/// A return of false does not necessarily indicate that a subsequent read operation would fail, only that
/// there is no block to return immediately or that the stream buffer does not support the operation.
/// The stream buffer may not de-allocate the block until <see cref="::release method" /> is called.
/// If the end of the stream is reached, the function will return <c>true</c>, a null pointer, and a count of zero;
/// a subsequent read will not succeed.
/// </remarks>
virtual bool acquire(_Out_ _CharType*& ptr, _Out_ size_t& count)
{
ptr = nullptr;
count = 0;
return get_base()->acquire(ptr, count);
}
/// <summary>
/// Releases a block of data acquired using <see cref="::acquire method"/>. This frees the stream buffer to de-allocate the
/// memory, if it so desires. Move the read position ahead by the count.
/// </summary>
/// <param name="ptr">A pointer to the block of data to be released.</param>
/// <param name="count">The number of characters that were read.</param>
virtual void release(_Out_writes_(count) _CharType *ptr, _In_ size_t count)
{
get_base()->release(ptr, count);
}
/// <summary>
/// Writes a number of characters to the stream.
/// </summary>
/// <param name="ptr">A pointer to the block of data to be written.</param>
/// <param name="count">The number of characters to write.</param>
/// <returns>The number of characters actually written, either 'count' or 0.</returns>
virtual pplx::task<size_t> putn(const _CharType *ptr, size_t count)
{
return get_base()->putn(ptr, count);
}
/// <summary>
/// Reads a single character from the stream and advances the read position.
/// </summary>
/// <returns>The value of the character. EOF if the read fails.</returns>
virtual pplx::task<int_type> bumpc()
{
return get_base()->bumpc();
}
/// <summary>
/// Reads a single character from the stream and advances the read position.
/// </summary>
/// <returns>The value of the character. <c>-1</c> if the read fails. <c>-2</c> if an asynchronous read is required</returns>
/// <remarks>This is a synchronous operation, but is guaranteed to never block.</remarks>
virtual typename details::basic_streambuf<_CharType>::int_type sbumpc()
{
return get_base()->sbumpc();
}
/// <summary>
/// Reads a single character from the stream without advancing the read position.
/// </summary>
/// <returns>The value of the byte. EOF if the read fails.</returns>
virtual pplx::task<int_type> getc()
{
return get_base()->getc();
}
/// <summary>
/// Reads a single character from the stream without advancing the read position.
/// </summary>
/// <returns>The value of the character. EOF if the read fails. <see cref="::requires_async method" /> if an asynchronous read is required</returns>
/// <remarks>This is a synchronous operation, but is guaranteed to never block.</remarks>
virtual typename details::basic_streambuf<_CharType>::int_type sgetc()
{
return get_base()->sgetc();
}
/// <summary>
/// Advances the read position, then returns the next character without advancing again.
/// </summary>
/// <returns>The value of the character. EOF if the read fails.</returns>
pplx::task<int_type> nextc()
{
return get_base()->nextc();
}
/// <summary>
/// Retreats the read position, then returns the current character without advancing.
/// </summary>
/// <returns>The value of the character. EOF if the read fails. <see cref="::requires_async method" /> if an asynchronous read is required</returns>
pplx::task<int_type> ungetc()
{
return get_base()->ungetc();
}
/// <summary>
/// Reads up to a given number of characters from the stream.
/// </summary>
/// <param name="ptr">The address of the target memory area.</param>
/// <param name="count">The maximum number of characters to read.</param>
/// <returns>The number of characters read. O if the end of the stream is reached.</returns>
virtual pplx::task<size_t> getn(_Out_writes_(count) _CharType *ptr, _In_ size_t count)
{
return get_base()->getn(ptr, count);
}
/// <summary>
/// Copies up to a given number of characters from the stream, synchronously.
/// </summary>
/// <param name="ptr">The address of the target memory area.</param>
/// <param name="count">The maximum number of characters to read.</param>
/// <returns>The number of characters copied. O if the end of the stream is reached or an asynchronous read is required.</returns>
/// <remarks>This is a synchronous operation, but is guaranteed to never block.</remarks>
virtual size_t scopy(_Out_writes_(count) _CharType *ptr, _In_ size_t count)
{
return get_base()->scopy(ptr, count);
}
/// <summary>
/// Gets the current read or write position in the stream.
/// </summary>
/// <param name="direction">The I/O direction to seek (see remarks)</param>
/// <returns>The current position. EOF if the operation fails.</returns>
/// <remarks>Some streams may have separate write and read cursors.
/// For such streams, the direction parameter defines whether to move the read or the write cursor.</remarks>
virtual typename details::basic_streambuf<_CharType>::pos_type getpos(std::ios_base::openmode direction) const
{
return get_base()->getpos(direction);
}
/// <summary>
/// Seeks to the given position.
/// </summary>
/// <param name="pos">The offset from the beginning of the stream.</param>
/// <param name="direction">The I/O direction to seek (see remarks).</param>
/// <returns>The position. EOF if the operation fails.</returns>
/// <remarks>Some streams may have separate write and read cursors. For such streams, the direction parameter defines whether to move the read or the write cursor.</remarks>
virtual typename details::basic_streambuf<_CharType>::pos_type seekpos(typename details::basic_streambuf<_CharType>::pos_type pos, std::ios_base::openmode direction)
{
return get_base()->seekpos(pos, direction);
}
/// <summary>
/// Seeks to a position given by a relative offset.
/// </summary>
/// <param name="offset">The relative position to seek to</param>
/// <param name="way">The starting point (beginning, end, current) for the seek.</param>
/// <param name="mode">The I/O direction to seek (see remarks)</param>
/// <returns>The position. EOF if the operation fails.</returns>
/// <remarks>Some streams may have separate write and read cursors.
/// For such streams, the mode parameter defines whether to move the read or the write cursor.</remarks>
virtual typename details::basic_streambuf<_CharType>::pos_type seekoff(typename details::basic_streambuf<_CharType>::off_type offset, std::ios_base::seekdir way, std::ios_base::openmode mode)
{
return get_base()->seekoff(offset, way, mode);
}
/// <summary>
/// For output streams, flush any internally buffered data to the underlying medium.
/// </summary>
/// <returns><c>true</c> if the flush succeeds, <c>false</c> if not</returns>
virtual pplx::task<void> sync()
{
return get_base()->sync();
}
/// <summary>
/// Retrieves the stream buffer exception_ptr if it has been set.
/// </summary>
/// <returns>Pointer to the exception, if it has been set; otherwise, <c>nullptr</c> will be returned</returns>
virtual std::exception_ptr exception() const
{
return get_base()->exception();
}
#pragma endregion
private:
std::shared_ptr<details::basic_streambuf<_CharType>> m_buffer;
};
}}
#pragma warning(pop) // 4100
| 46.080498 | 236 | 0.608137 | [
"object"
] |
cff2596a213cafd437b2f138c76c7ba743c2736d | 3,258 | h | C | model/bgp.h | magicnat/ns3-bgp | 3ee29315bc207d14ffbfc8ac59d2248aa98a5350 | [
"MIT"
] | 3 | 2021-08-17T02:05:38.000Z | 2021-09-09T16:10:40.000Z | model/bgp.h | magicnat/ns3-bgp | 3ee29315bc207d14ffbfc8ac59d2248aa98a5350 | [
"MIT"
] | 1 | 2021-04-15T08:24:13.000Z | 2021-04-15T08:24:13.000Z | model/bgp.h | Nat-Lab/ns3-bgp | 3ee29315bc207d14ffbfc8ac59d2248aa98a5350 | [
"MIT"
] | null | null | null | /**
* @file bgp.h
* @author Nato Morichika <nat@nat.moe>
* @brief BGP module for ns3.
* @version 0.1
* @date 2019-07-15
*
* @copyright Copyright (c) 2019
*
*/
#ifndef BGP_H
#define BGP_H
#include <vector>
#include "bgp-ns3-fsm.h"
#include "bgp-ns3-clock.h"
#include "bgp-ns3-socket-out.h"
#include "bgp-log.h"
#include "bgp-routing.h"
#include "bgp-ns3-socket-in.h"
#include "ns3/application.h"
#include "ns3/ipv4-address.h"
#include "ns3/socket.h"
namespace ns3 {
class Bgp;
class BgpNs3SocketIn;
/**
* @brief Peer configuration class.
*
*/
class Peer : public SimpleRefCount<Peer> {
public:
Peer();
uint32_t local_asn; //!< local ASN.
uint32_t peer_asn; //!< peer ASN.
Ipv4Address peer_address; //!< peer's address.
libbgp::BgpFilterRules ingress_rules; //!< ingress router filter rules. See libbgp documents.
libbgp::BgpFilterRules egress_rules; //!< egress router filter rules. See libbgp documents.
bool passive; //!< passive peering (don't send OPEN)
int8_t allow_local_as; //!< Allow N local ASN in AS_PATH
int32_t weight; //!< weight of this peer
bool no_nexthop_check; //!< disable nexthop attribute validation
bool forced_default_nexthop; //!< always use peering IP as nexthop.
bool ibgp_alter_nexthop; //!< alter IBGP nexthop attribute the same way as EBGP.
uint8_t ebgp_multihop;
};
/**
* @brief Session information class.
*
*/
class Session : public SimpleRefCount<Session> {
public:
Ptr<Peer> peer;
Ptr<BgpNs3Fsm> fsm;
Ptr<Socket> socket;
Ptr<BgpLog> logger;
Ptr<BgpNs3SocketOut> out_handler;
Ptr<BgpNs3SocketIn> in_handler;
bool local_init;
void Drop();
};
/**
* @brief The Bgp Application.
*
*/
class Bgp : public Application {
public:
Bgp();
static TypeId GetTypeId (void);
void StartApplication(void);
void StopApplication(void);
void AddPeer(const Peer &peer);
void AddRoute(libbgp::Prefix4 route, uint32_t nexthop);
void AddRoute(uint32_t prefix, uint8_t mask, uint32_t nexthop);
void AddRoute(const Ipv4Address &prefix, const Ipv4Mask &mask, const Ipv4Address &nexthop);
void SetLibbgpLogLevel(libbgp::LogLevel log_level);
void SetBgpId(Ipv4Address bgp_id);
void SetHoldTimer(Time hold_timer);
void SetClockInterval(Time interval);
private:
void Tick();
bool ConnectPeer(Ptr<Peer> peer);
bool SessionInit(bool local_init, Ptr<Socket> socket);
void HandleConnectIn(Ptr<Socket> socket, const Address &src);
bool HandleConnectInRequest(Ptr<Socket> socket, const Address &src);
void HandleConnectOut(Ptr<Socket> socket);
void HandleConnectOutFailed(Ptr<Socket> socket);
void HandleClose(Ptr<Socket> socket);
void HandleStateChange(Ptr<Socket> socket, int old_state, int new_state);
Time _hold_timer;
Time _clock_interval;
Time _error_hold;
BgpLog _logger;
BgpNs3Clock _clock;
Ipv4Address _bgp_id;
Ptr<BgpRouting> _routing;
Ptr<Socket> _listen_socket;
std::vector<Ptr<Peer>> _peers;
std::vector<Ptr<Session>> _sessions;
libbgp::BgpConfig _template;
libbgp::BgpRib4 _rib;
libbgp::RouteEventBus _bus;
libbgp::LogLevel _log_level;
bool _running;
};
}
#endif /* BGP_H */
| 24.496241 | 97 | 0.69644 | [
"vector"
] |
cff38239f738f6430accdf1f7d231d8953c3e8ca | 1,863 | h | C | SDKs/CryCode/3.8.1/CryEngine/CryAction/GameObjects/GameObjectDispatch.h | amrhead/FireNET | 34d439aa0157b0c895b20b2b664fddf4f9b84af1 | [
"BSD-2-Clause"
] | 4 | 2017-12-18T20:10:16.000Z | 2021-02-07T21:21:24.000Z | SDKs/CryCode/3.8.1/CryEngine/CryAction/GameObjects/GameObjectDispatch.h | amrhead/FireNET | 34d439aa0157b0c895b20b2b664fddf4f9b84af1 | [
"BSD-2-Clause"
] | null | null | null | SDKs/CryCode/3.8.1/CryEngine/CryAction/GameObjects/GameObjectDispatch.h | amrhead/FireNET | 34d439aa0157b0c895b20b2b664fddf4f9b84af1 | [
"BSD-2-Clause"
] | 3 | 2019-03-11T21:36:15.000Z | 2021-02-07T21:21:26.000Z | /*************************************************************************
Crytek Source File.
Copyright (C), Crytek Studios, 2001-2005.
-------------------------------------------------------------------------
$Id$
$DateTime$
Description: This file implements dispatching RMI calls in C++ to
relevant game object code
-------------------------------------------------------------------------
History:
- 24 Oct 2005 : Created by Craig Tiller
*************************************************************************/
#ifndef __GAMEOBJECTDISPATCH_H__
#define __GAMEOBJECTDISPATCH_H__
#pragma once
#include "IGameObject.h"
#include <vector>
class CGameObjectDispatch
{
public:
CGameObjectDispatch();
~CGameObjectDispatch();
void RegisterInterface( SGameObjectExtensionRMI* pMessages, size_t nCount );
INetMessageSink * GetServerSink() { return &m_serverDef; }
INetMessageSink * GetClientSink() { return &m_clientDef; }
void GetMemoryUsage(ICrySizer * s) const;
private:
// safety check: once we start handing out pointers to m_serverCalls, m_clientCalls
// we should never modify them again
bool m_bSafety;
void LockSafety();
// all messages we have registered
std::vector<SGameObjectExtensionRMI*> m_messages;
// protocol definitions
std::vector<SNetMessageDef> m_serverCalls;
std::vector<SNetMessageDef> m_clientCalls;
class CProtocolDef : public INetMessageSink
{
public:
virtual void DefineProtocol( IProtocolBuilder * pBuilder );
virtual bool HasDef( const SNetMessageDef * pDef );
bool IsServer();
};
CProtocolDef m_serverDef;
CProtocolDef m_clientDef;
static CGameObjectDispatch * m_pGOD;
static TNetMessageCallbackResult Trampoline(
uint32 userId,
INetMessageSink* handler,
TSerialize serialize,
uint32 curSeq,
uint32 oldSeq,
EntityId * entityId, INetChannel* );
};
#endif
| 25.175676 | 84 | 0.637681 | [
"object",
"vector"
] |
cff3d02b682c13d049bcfaeb99d8b0abd5201468 | 32,636 | c | C | python/discodbmodule.c | josephpohlmann/discodb | bdf3bcee4e8e5b254fb15e31fd702fd620aae003 | [
"BSD-3-Clause"
] | null | null | null | python/discodbmodule.c | josephpohlmann/discodb | bdf3bcee4e8e5b254fb15e31fd702fd620aae003 | [
"BSD-3-Clause"
] | null | null | null | python/discodbmodule.c | josephpohlmann/discodb | bdf3bcee4e8e5b254fb15e31fd702fd620aae003 | [
"BSD-3-Clause"
] | null | null | null | #define PY_SSIZE_T_CLEAN
// #if PY_VERSION_HEX < 0x02060000
// #define PyVarObject_HEAD_INIT(type, size) PyObject_HEAD_INIT(type) size,
// #define Py_TYPE(ob) (((PyObject*)(ob))->ob_type)
// #define PyBytes_AsString PyString_AsString
// #define PyBytes_FromFormat PyString_FromFormat
// #endif
#define PY_SSIZE_T_CLEAN
#include <Python.h>
#include "structmember.h"
#include "discodb.h"
#include "discodbmodule.h"
static PyObject *DiscoDBError;
/* discodb Module Methods */
static PyMethodDef discodb_methods[] = {
{NULL} /* Sentinel */
};
/* DiscoDB Object Definition */
static PySequenceMethods DiscoDB_as_sequence = {
NULL, /* sq_length */
NULL, /* sq_concat */
NULL, /* sq_repeat */
NULL, /* sq_item */
NULL, /* sq_slice */
NULL, /* sq_ass_item */
NULL, /* sq_ass_slice */
(objobjproc)DiscoDB_contains, /* sq_contains */
NULL, /* sq_inplace_concat */
NULL, /* sq_inplace_repeat */
};
static PyMappingMethods DiscoDB_as_mapping = {
0, /* mp_length */
(binaryfunc)DiscoDB_getitem, /* mp_subscript */
NULL, /* mp_ass_subscript */
};
static PyMethodDef DiscoDB_methods[] = {
{"keys", (PyCFunction)DiscoDB_keys, METH_NOARGS,
"d.keys() -> an iterator over the keys of d."},
{"values", (PyCFunction)DiscoDB_values, METH_NOARGS,
"d.values() -> an iterator over the values of d."},
{"unique_values", (PyCFunction)DiscoDB_unique_values, METH_NOARGS,
"d.unique_values() -> an iterator over the unique values of d."},
{"query", (PyCFunction)DiscoDB_query, METH_KEYWORDS | METH_VARARGS,
"d.query(q) -> an iterator over the values of d whose keys satisfy q."},
{"dumps", (PyCFunction)DiscoDB_dumps, METH_NOARGS,
"d.dumps() -> a serialization of d."},
{"dump", (PyCFunction)DiscoDB_dump, METH_O,
"d.dump(o) -> write serialization of d to file object o."},
{"loads", (PyCFunction)DiscoDB_loads, METH_CLASS | METH_O,
"D.loads(s) -> a deserialized instance of D from serialization s."},
{"load", (PyCFunction)DiscoDB_load, METH_CLASS | METH_VARARGS,
"D.load(o[, n=0]]) -> a deserialized instance of D from file object o with offset n."},
{NULL} /* Sentinel */
};
static PyMemberDef DiscoDB_members[] = {
{NULL} /* Sentinel */
};
static PyTypeObject DiscoDBType = {
PyVarObject_HEAD_INIT(&PyType_Type, 0)
"_DiscoDB", /* tp_name */
sizeof(DiscoDB), /* tp_basicsize */
0, /* tp_itemsize */
(destructor)DiscoDB_dealloc, /* tp_dealloc */
0, /* tp_print */
0, /* tp_getattr */
0, /* tp_setattr */
0, /* tp_compare */
0, /* tp_repr */
0, /* tp_as_number */
&DiscoDB_as_sequence, /* tp_as_sequence */
&DiscoDB_as_mapping, /* tp_as_mapping */
0, /* tp_hash */
0, /* tp_call */
0, /* tp_str */
0, /* tp_getattro */
0, /* tp_setattro */
0, /* tp_as_buffer */
Py_TPFLAGS_DEFAULT |
Py_TPFLAGS_BASETYPE, /* tp_flags */
0, /* tp_doc */
0, /* tp_traverse */
0, /* tp_clear */
0, /* tp_richcompare */
0, /* tp_weaklistoffset */
(getiterfunc)DiscoDB_keys, /* tp_iter */
0, /* tp_iternext */
DiscoDB_methods, /* tp_methods */
DiscoDB_members, /* tp_members */
0, /* tp_getset */
0, /* tp_base */
0, /* tp_dict */
0, /* tp_descr_get */
0, /* tp_descr_set */
0, /* tp_dictoffset */
0, /* tp_init */
0, /* tp_alloc */
DiscoDB_new, /* tp_new */
0, /* tp_free */
};
/* General Object Protocol */
static PyObject *
DiscoDB_new(PyTypeObject *type, PyObject *args, PyObject *kwds)
{
DiscoDBConstructor *cons = NULL;
DiscoDB *self = NULL;
PyObject
*arg = NULL,
*emptytuple = PyTuple_New(0),
*item = NULL,
*items = NULL,
*iteritems = NULL,
*none = NULL,
*typedict = Py_BuildValue("{s:O}", "ddb_type", type);
if (typedict == NULL || emptytuple == NULL)
goto Done;
cons = (DiscoDBConstructor *)DiscoDBConstructor_new(&DiscoDBConstructorType,
emptytuple,
typedict);
if (cons == NULL)
goto Done;
if (!PyArg_ParseTuple(args, "|O", &arg))
goto Done;
if (arg == NULL) /* null constructor */
items = PyTuple_New(0);
else if (PyMapping_Check(arg)) /* copy constructor */
items = PyMapping_Items(arg);
else /* iter constructor */
Py_INCREF(items = arg);
iteritems = PyObject_GetIter(items);
if (iteritems == NULL)
goto Done;
while ((item = PyIter_Next(iteritems))) {
none = DiscoDBConstructor_add(cons, item);
if (none == NULL)
goto Done;
Py_CLEAR(item);
Py_CLEAR(none);
}
self = (DiscoDB *)DiscoDBConstructor_finalize(cons, emptytuple, kwds);
Done:
Py_CLEAR(emptytuple);
Py_CLEAR(cons);
Py_CLEAR(item);
Py_CLEAR(items);
Py_CLEAR(iteritems);
Py_CLEAR(none);
Py_CLEAR(typedict);
if (PyErr_Occurred()) {
Py_CLEAR(self);
return NULL;
}
return (PyObject *)self;
}
static void
DiscoDB_dealloc(DiscoDB *self)
{
Py_CLEAR(self->obuffer);
free(self->cbuffer);
ddb_free(self->discodb);
Py_TYPE(self)->tp_free((PyObject *)self);
}
/* Mapping Formal / Informal Protocol */
static int
DiscoDB_contains(register DiscoDB *self, register PyObject *key)
{
struct ddb_cursor *cursor = NULL;
struct ddb_entry kentry;
int isfound = 1;
if (ddb_string_to_entry(key, &kentry))
goto Done;
cursor = ddb_getitem(self->discodb, &kentry);
if (cursor == NULL)
if (ddb_has_error(self->discodb))
goto Done;
if (ddb_notfound(cursor))
isfound = 0;
Done:
ddb_cursor_dealloc(cursor);
if (PyErr_Occurred())
return -1;
return isfound;
}
static PyObject *
DiscoDB_getitem(register DiscoDB *self, register PyObject *key)
{
struct ddb_cursor *cursor = NULL;
struct ddb_entry kentry;
if (ddb_string_to_entry(key, &kentry))
goto Done;
cursor = ddb_getitem(self->discodb, &kentry);
if (cursor == NULL)
if (ddb_has_error(self->discodb))
goto Done;
if (ddb_notfound(cursor))
PyErr_Format(PyExc_KeyError, "%s", PyBytes_AsString(key));
Done:
if (PyErr_Occurred())
return NULL;
return DiscoDBIter_new(&DiscoDBIterType, self, cursor);
}
static PyObject *
DiscoDB_keys(DiscoDB *self)
{
struct ddb_cursor *cursor = ddb_keys(self->discodb);
if (cursor == NULL)
if (ddb_has_error(self->discodb))
return NULL;
return DiscoDBIter_new(&DiscoDBIterType, self, cursor);
}
static PyObject *
DiscoDB_values(DiscoDB *self)
{
struct ddb_cursor *cursor = ddb_values(self->discodb);
if (cursor == NULL)
if (ddb_has_error(self->discodb))
return NULL;
return DiscoDBIter_new(&DiscoDBIterType, self, cursor);
}
static PyObject *
DiscoDB_unique_values(DiscoDB *self)
{
struct ddb_cursor *cursor = ddb_unique_values(self->discodb);
if (cursor == NULL)
if (ddb_has_error(self->discodb))
return NULL;
return DiscoDBIter_new(&DiscoDBIterType, self, cursor);
}
static PyObject *
DiscoDB_query(register DiscoDB *self, PyObject *args, PyObject *kwds)
{
PyObject
*query_ = NULL,
*clause = NULL,
*clauses = NULL,
*literal = NULL,
*literals = NULL,
*iterclauses = NULL,
*iterliterals = NULL,
*negated = NULL,
*term = NULL,
*query = NULL;
DiscoDBView *view = NULL;
Py_ssize_t i = 0, j = 0;
struct ddb_query_clause *ddb_clauses = NULL;
struct ddb_cursor *cursor = NULL;
static char *kwlist[] = {"query", "view", NULL};
if (self == NULL)
goto Done;
if (!PyArg_ParseTupleAndKeywords(args, kwds, "O|O!", kwlist,
&query_, &DiscoDBViewType, &view))
goto Done;
Py_INCREF(query_);
Py_XINCREF(view);
query = PyObject_CallMethod(query_, "resolve", "O", self);
if (query == NULL)
goto Done;
clauses = PyObject_GetAttrString(query, "clauses");
if (clauses == NULL)
goto Done;
iterclauses = PyObject_GetIter(clauses);
if (iterclauses == NULL)
goto Done;
if ((i = PyObject_Length(clauses)) < 0)
goto Done;
ddb_clauses = ddb_query_clause_alloc(i);
for (i = 0; (clause = PyIter_Next(iterclauses)); i++) {
literals = PyObject_GetAttrString(clause, "literals");
if (literals == NULL)
goto Done;
iterliterals = PyObject_GetIter(literals);
if (iterliterals == NULL)
goto Done;
if ((j = PyObject_Length(literals)) < 0)
goto Done;
ddb_clauses[i].num_terms = j;
ddb_clauses[i].terms = ddb_query_term_alloc(j);
for (j = 0; (literal = PyIter_Next(iterliterals)); j++) {
negated = PyObject_GetAttrString(literal, "negated");
if (negated == NULL)
goto Done;
term = PyObject_GetAttrString(literal, "term");
if (term == NULL)
goto Done;
ddb_clauses[i].terms[j].nnot = PyObject_IsTrue(negated);
if (ddb_string_to_entry(term, &ddb_clauses[i].terms[j].key))
goto Done;
Py_CLEAR(literal);
Py_CLEAR(negated);
Py_CLEAR(term);
}
Py_CLEAR(clause);
Py_CLEAR(literals);
Py_CLEAR(iterliterals);
}
if (view)
cursor = ddb_query_view(self->discodb, ddb_clauses, i, view->view);
else
cursor = ddb_query(self->discodb, ddb_clauses, i);
if (cursor == NULL)
if (ddb_has_error(self->discodb))
goto Done;
Done:
Py_CLEAR(clause);
Py_CLEAR(clauses);
Py_CLEAR(literal);
Py_CLEAR(literals);
Py_CLEAR(iterclauses);
Py_CLEAR(iterliterals);
Py_CLEAR(negated);
Py_CLEAR(term);
Py_CLEAR(query_);
Py_CLEAR(query);
Py_CLEAR(view);
ddb_query_clause_dealloc(ddb_clauses, i);
if (PyErr_Occurred()) {
ddb_cursor_dealloc(cursor);
return NULL;
}
return DiscoDBIter_new(&DiscoDBIterType, self, cursor);
}
/* Serialization / Deserialization Informal Protocol */
static PyObject *
DiscoDB_dumps(DiscoDB *self)
{
uint64_t length;
char *cbuffer = ddb_dumps(self->discodb, &length);
PyObject *string = Py_BuildValue("y#", cbuffer, length);
free(cbuffer);
return string;
}
static PyObject *
DiscoDB_dump(DiscoDB *self, PyObject *file)
{
PyObject *fileno = NULL;
int fd;
fileno = PyObject_CallMethod(file, "fileno", NULL);
if (fileno == NULL)
goto Done;
fd = PyLong_AsLong(fileno);
if (fd < 0)
goto Done;
if (ddb_dump(self->discodb, fd))
if (ddb_has_error(self->discodb))
goto Done;
Done:
Py_CLEAR(fileno);
if (PyErr_Occurred())
return NULL;
Py_RETURN_NONE;
}
static PyObject *
DiscoDB_loads(PyTypeObject *type, PyObject *bytes)
{
DiscoDB *self = (DiscoDB *)type->tp_alloc(type, 0);
const char *buffer;
Py_ssize_t n;
if (self != NULL) {
if (PyBytes_AsStringAndSize(bytes, (char**)&buffer, &n))
goto Done;
Py_INCREF(bytes);
self->cbuffer = NULL;
self->obuffer = bytes;
self->discodb = ddb_alloc();
if (self->discodb == NULL)
goto Done;
if (ddb_loads(self->discodb, buffer, n))
if (ddb_has_error(self->discodb))
goto Done;
}
Done:
if (PyErr_Occurred()) {
Py_CLEAR(self);
return NULL;
}
return (PyObject *)self;
}
static PyObject *
DiscoDB_load(PyTypeObject *type, PyObject *args)
{
DiscoDB *self = (DiscoDB *)type->tp_alloc(type, 0);
PyObject
*file = NULL,
*fileno = NULL;
long offset = 0;
int fd;
if (self != NULL) {
if (!PyArg_ParseTuple(args, "O|l", &file, &offset))
goto Done;
fileno = PyObject_CallMethod(file, "fileno", NULL);
if (fileno == NULL)
goto Done;
fd = PyLong_AsLong(fileno);
if (fd < 0)
goto Done;
self->cbuffer = NULL;
self->obuffer = NULL;
self->discodb = ddb_alloc();
if (self->discodb == NULL)
goto Done;
if (ddb_loado(self->discodb, fd, offset))
if (ddb_has_error(self->discodb))
goto Done;
}
Done:
Py_CLEAR(fileno);
if (PyErr_Occurred()) {
Py_CLEAR(self);
return NULL;
}
return (PyObject *)self;
}
/* Module Initialization */
static struct PyModuleDef moduledef = {
PyModuleDef_HEAD_INIT,
"_discodb",
"Discodb Module",
-1,
discodb_methods,
NULL,
NULL,
NULL,
NULL,
};
PyMODINIT_FUNC PyInit__discodb(void)
{
PyObject *module = PyModule_Create(&moduledef);
if (PyType_Ready(&DiscoDBType) < 0)
return (PyObject *)NULL;
Py_INCREF(&DiscoDBType);
PyModule_AddObject(module, "_DiscoDB",
(PyObject *)&DiscoDBType);
if (PyType_Ready(&DiscoDBConstructorType) < 0)
return (PyObject *)NULL;
Py_INCREF(&DiscoDBConstructorType);
PyModule_AddObject(module, "DiscoDBConstructor",
(PyObject *)&DiscoDBConstructorType);
if (PyType_Ready(&DiscoDBIterType) < 0)
return (PyObject *)NULL;
Py_INCREF(&DiscoDBIterType);
PyModule_AddObject(module, "DiscoDBIter",
(PyObject *)&DiscoDBIterType);
if (PyType_Ready(&DiscoDBViewType) < 0)
return (PyObject *)NULL;
Py_INCREF(&DiscoDBViewType);
PyModule_AddObject(module, "DiscoDBView",
(PyObject *)&DiscoDBViewType);
DiscoDBError = PyErr_NewException("discodb.DiscoDBError", NULL, NULL);
Py_INCREF(DiscoDBError);
PyModule_AddObject(module, "DiscoDBError", DiscoDBError);
return module;
}
/* DiscoDB Constructor Type */
static PyMethodDef DiscoDBConstructor_methods[] = {
{"add", (PyCFunction)DiscoDBConstructor_add, METH_VARARGS,
"c.add(k, v) -> add (k, v) to the DiscoDB that will be produced."},
{"finalize", (PyCFunction)DiscoDBConstructor_finalize, METH_VARARGS | METH_KEYWORDS,
"c.finalize([flags]) -> a DiscoDB containing the mappings added to c."},
{NULL} /* Sentinel */
};
static PyMemberDef DiscoDBConstructor_members[] = {
{NULL} /* Sentinel */
};
static PyTypeObject DiscoDBConstructorType = {
PyVarObject_HEAD_INIT(&PyType_Type, 0)
"DiscoDBConstructor", /* tp_name */
sizeof(DiscoDBConstructor), /* tp_basicsize */
0, /* tp_itemsize */
(destructor)DiscoDBConstructor_dealloc, /* tp_dealloc */
0, /* tp_print */
0, /* tp_getattr */
0, /* tp_setattr */
0, /* tp_compare */
0, /* tp_repr */
0, /* tp_as_number */
0, /* tp_as_sequence */
0, /* tp_as_mapping */
0, /* tp_hash */
0, /* tp_call */
0, /* tp_str */
0, /* tp_getattro */
0, /* tp_setattro */
0, /* tp_as_buffer */
Py_TPFLAGS_DEFAULT |
Py_TPFLAGS_BASETYPE, /* tp_flags */
0, /* tp_doc */
0, /* tp_traverse */
0, /* tp_clear */
0, /* tp_richcompare */
0, /* tp_weaklistoffset */
0, /* tp_iter */
0, /* tp_iternext */
DiscoDBConstructor_methods, /* tp_methods */
DiscoDBConstructor_members, /* tp_members */
0, /* tp_getset */
0, /* tp_base */
0, /* tp_dict */
0, /* tp_descr_get */
0, /* tp_descr_set */
0, /* tp_dictoffset */
0, /* tp_init */
0, /* tp_alloc */
DiscoDBConstructor_new, /* tp_new */
0, /* tp_free */
};
static PyObject *
DiscoDBConstructor_new(PyTypeObject *type, PyObject *args, PyObject *kwds)
{
DiscoDBConstructor *self = (DiscoDBConstructor *)type->tp_alloc(type, 0);
DiscoDB *ddb = NULL;
PyTypeObject *ddb_type = &DiscoDBType;
static char *kwlist[] = {"ddb", "ddb_type", NULL};
if (self == NULL)
goto Done;
if (!PyArg_ParseTupleAndKeywords(args, kwds, "|OO", kwlist, &ddb, &ddb_type))
goto Done;
if (ddb && !PyObject_TypeCheck(ddb, &DiscoDBType)) {
PyErr_SetString(DiscoDBError, "Not a discodb.");
goto Done;
}
if (!PyType_Check(ddb_type) || !PyType_IsSubtype(ddb_type, &DiscoDBType)) {
PyErr_SetString(DiscoDBError, "Not a valid type.");
goto Done;
}
Py_INCREF(self->ddb_type = ddb_type);
if (ddb)
self->ddb_cons = ddb_cons_ddb(ddb->discodb);
else
self->ddb_cons = ddb_cons_alloc();
if (self->ddb_cons == NULL)
goto Done;
Done:
if (PyErr_Occurred()) {
Py_CLEAR(self);
return NULL;
}
return (PyObject *)self;
}
static void
DiscoDBConstructor_dealloc(DiscoDBConstructor *self)
{
Py_CLEAR(self->ddb_type);
ddb_cons_dealloc(self->ddb_cons);
Py_TYPE(self)->tp_free((PyObject *)self);
}
static PyObject *
DiscoDBConstructor_add(DiscoDBConstructor *self, PyObject *item)
{
PyObject
*itervalues = NULL,
*value = NULL,
*values = NULL,
*valueseq = NULL;
uint64_t n;
struct ddb_entry kentry, ventry;
if (!PyArg_ParseTuple(item, "y#O", &kentry.data, &kentry.length, &values))
goto Done;
Py_XINCREF(values);
if (values == NULL)
values = PyTuple_New(0);
if (PyBytes_Check(values))
valueseq = Py_BuildValue("(O)", values);
else
Py_XINCREF(valueseq = values);
if (valueseq == NULL)
goto Done;
itervalues = PyObject_GetIter(valueseq);
if (itervalues == NULL)
goto Done;
for (n = 0; (value = PyIter_Next(itervalues)); n++) {
if (ddb_string_to_entry(value, &ventry))
goto Done;
if (ddb_cons_add(self->ddb_cons, &kentry, &ventry)) {
PyErr_SetString(DiscoDBError, "Construction failed");
goto Done;
}
Py_CLEAR(value);
}
if (n == 0)
if (ddb_cons_add(self->ddb_cons, &kentry, NULL)) {
PyErr_SetString(DiscoDBError, "Construction failed");
goto Done;
}
Done:
Py_CLEAR(itervalues);
Py_CLEAR(value);
Py_CLEAR(values);
Py_CLEAR(valueseq);
if (PyErr_Occurred())
return NULL;
Py_RETURN_NONE;
}
static PyObject *
DiscoDBConstructor_finalize(DiscoDBConstructor *self, PyObject *args, PyObject *kwds)
{
DiscoDB *discodb = (DiscoDB *)DiscoDBType.tp_alloc(self->ddb_type, 0);
uint64_t n,
flags = 0,
disable_compression = 0,
unique_items = 0;
static char *kwlist[] = {"disable_compression",
"unique_items", NULL};
if (discodb == NULL)
goto Done;
if (!PyArg_ParseTupleAndKeywords(args, kwds, "|II", kwlist,
&disable_compression,
&unique_items))
goto Done;
if (disable_compression)
flags |= DDB_OPT_DISABLE_COMPRESSION;
if (unique_items)
flags |= DDB_OPT_UNIQUE_ITEMS;
discodb->obuffer = NULL;
discodb->cbuffer = ddb_cons_finalize(self->ddb_cons, &n, flags);
if (discodb->cbuffer == NULL) {
PyErr_SetString(DiscoDBError, "Construction finalization failed");
goto Done;
}
discodb->discodb = ddb_alloc();
if (discodb->discodb == NULL)
goto Done;
if (ddb_loads(discodb->discodb, discodb->cbuffer, n))
if (ddb_has_error(discodb->discodb))
goto Done;
Done:
if (PyErr_Occurred()) {
Py_CLEAR(discodb);
return NULL;
}
return (PyObject *)discodb;
}
/* DiscoDB Iterator Types */
static PyMethodDef DiscoDBIter_methods[] = {
{"count", (PyCFunction)DiscoDBIter_count, METH_NOARGS,
"i.count() -> count the remaining entries in the iterator."},
{"size", (PyCFunction)DiscoDBIter_size, METH_NOARGS,
"i.size() -> the size of the underlying cursor."},
{NULL} /* Sentinel */
};
static PyTypeObject DiscoDBIterType = {
PyVarObject_HEAD_INIT(&PyType_Type, 0)
"DiscoDB-iterator", /* tp_name */
sizeof(DiscoDBIter), /* tp_basicsize */
0, /* tp_itemsize */
(destructor)DiscoDBIter_dealloc, /* tp_dealloc */
0, /* tp_print */
0, /* tp_getattr */
0, /* tp_setattr */
0, /* tp_compare */
0, /* tp_repr */
0, /* tp_as_number */
0, /* tp_as_sequence */
0, /* tp_as_mapping */
0, /* tp_hash */
0, /* tp_call */
0, /* tp_str */
PyObject_GenericGetAttr, /* tp_getattro */
0, /* tp_setattro */
0, /* tp_as_buffer */
Py_TPFLAGS_DEFAULT |
Py_TPFLAGS_BASETYPE, /* tp_flags */
0, /* tp_doc */
0, /* tp_traverse */
0, /* tp_clear */
0, /* tp_richcompare */
0, /* tp_weaklistoffset */
PyObject_SelfIter, /* tp_iter */
(iternextfunc)DiscoDBIter_iternext, /* tp_iternext */
DiscoDBIter_methods, /* tp_methods */
};
static PyObject *
DiscoDBIter_new(PyTypeObject *type, DiscoDB *owner, struct ddb_cursor *cursor)
{
DiscoDBIter *self = PyObject_New(DiscoDBIter, type);
if (self != NULL) {
Py_INCREF(owner);
self->owner = owner;
self->cursor = cursor;
}
return (PyObject *)self;
}
static void
DiscoDBIter_dealloc(DiscoDBIter *self)
{
Py_CLEAR(self->owner);
ddb_cursor_dealloc(self->cursor);
PyObject_Del(self);
}
static PyObject *
DiscoDBIter_count(DiscoDBIter *self)
{
int errcode;
Py_ssize_t n = ddb_cursor_count(self->cursor, &errcode);
if (errcode)
return PyErr_NoMemory();
return PyInt_FromSsize_t(n);
}
static PyObject *
DiscoDBIter_size(DiscoDBIter *self)
{
return PyInt_FromSsize_t(ddb_resultset_size(self->cursor));
}
static PyObject *
DiscoDBIter_iternext(DiscoDBIter *self)
{
int errcode;
const struct ddb_entry *next = ddb_next(self->cursor, &errcode);
if (errcode)
return PyErr_NoMemory();
if (next == NULL)
return NULL;
return Py_BuildValue("y#", next->data, next->length);
}
/* DiscoDB View Type */
static PySequenceMethods DiscoDBView_sequence = {
(lenfunc)DiscoDBView_len /* sq length */
};
static PyMethodDef DiscoDBView_methods[] = {
{NULL} /* Sentinel */
};
static PyTypeObject DiscoDBViewType = {
PyVarObject_HEAD_INIT(&PyType_Type, 0)
"DiscoDBView", /* tp_name */
sizeof(DiscoDBView), /* tp_basicsize */
0, /* tp_itemsize */
(destructor)DiscoDBView_dealloc, /* tp_dealloc */
0, /* tp_print */
0, /* tp_getattr */
0, /* tp_setattr */
0, /* tp_compare */
0, /* tp_repr */
0, /* tp_as_number */
&DiscoDBView_sequence, /* tp_as_sequence */
0, /* tp_as_mapping */
0, /* tp_hash */
0, /* tp_call */
0, /* tp_str */
PyObject_GenericGetAttr, /* tp_getattro */
0, /* tp_setattro */
0, /* tp_as_buffer */
Py_TPFLAGS_DEFAULT |
Py_TPFLAGS_BASETYPE, /* tp_flags */
0, /* tp_doc */
0, /* tp_traverse */
0, /* tp_clear */
0, /* tp_richcompare */
0, /* tp_weaklistoffset */
0, /* tp_iter */
0, /* tp_iternext */
DiscoDBView_methods, /* tp_methods */
0, /* tp_members */
0, /* tp_getset */
0, /* tp_base */
0, /* tp_dict */
0, /* tp_descr_get */
0, /* tp_descr_set */
0, /* tp_dictoffset */
0, /* tp_init */
0, /* tp_alloc */
DiscoDBView_new, /* tp_new */
0, /* tp_free */
};
static PyObject *
DiscoDBView_new(PyTypeObject *type, PyObject *args, PyObject *kwds)
{
PyObject *data = NULL,
*iter = NULL,
*item = NULL;
DiscoDB *ddb = NULL;
struct ddb_view_cons *cons = NULL;
DiscoDBView *self = (DiscoDBView*)type->tp_alloc(type, 0);
if (!PyArg_ParseTuple(args, "O!O", &DiscoDBType, &ddb, &data))
goto Done;
Py_INCREF(data);
Py_INCREF(ddb);
if (!(cons = ddb_view_cons_new()))
return NULL;
if (!(iter = PyObject_GetIter(data)))
goto Done;
while ((item = PyIter_Next(iter))){
struct ddb_entry e;
if (ddb_string_to_entry(item, &e))
goto Done;
if (ddb_view_cons_add(cons, &e)){
PyErr_SetString(PyExc_MemoryError, "Adding value to view failed");
goto Done;
}
Py_CLEAR(item);
}
if (!(self->view = ddb_view_cons_finalize(cons, ddb->discodb)))
PyErr_SetString(PyExc_RuntimeError, "Couldn't finalize the view");
Done:
ddb_view_cons_free(cons);
Py_CLEAR(data);
Py_CLEAR(iter);
Py_CLEAR(ddb);
if (PyErr_Occurred()) {
Py_CLEAR(self);
return NULL;
}
return (PyObject*)self;
}
static void DiscoDBView_dealloc(DiscoDBView *self)
{
ddb_view_free(self->view);
PyObject_Del(self);
}
static Py_ssize_t DiscoDBView_len(DiscoDBView *self)
{
return ddb_view_size(self->view);
}
/* ddb helpers */
static struct ddb *
ddb_alloc(void)
{
struct ddb *ddb = ddb_new();
if (!ddb)
PyErr_NoMemory();
return ddb;
}
static struct ddb_cons *
ddb_cons_alloc(void)
{
struct ddb_cons *cons = ddb_cons_new();
if (!cons)
PyErr_NoMemory();
return cons;
}
static struct ddb_query_clause *
ddb_query_clause_alloc(size_t count)
{
struct ddb_query_clause *clause = (struct ddb_query_clause *)calloc(count, sizeof(struct ddb_query_clause));
if (!clause)
PyErr_NoMemory();
return clause;
}
static struct ddb_query_term *
ddb_query_term_alloc(size_t count)
{
struct ddb_query_term *term = (struct ddb_query_term *)calloc(count, sizeof(struct ddb_query_term));
if (!term)
PyErr_NoMemory();
return term;
}
static void
ddb_cons_dealloc(struct ddb_cons *cons)
{
if (cons)
ddb_cons_free(cons);
}
static void
ddb_cursor_dealloc(struct ddb_cursor *cursor)
{
if (cursor)
ddb_free_cursor(cursor);
}
static void
ddb_query_clause_dealloc(struct ddb_query_clause *clauses, uint32_t num_clauses)
{
int i;
for (i = 0; i < num_clauses; i++)
if (clauses[i].terms)
free(clauses[i].terms);
if (clauses)
free(clauses);
}
static int
ddb_has_error(struct ddb *discodb)
{
int errcode;
const char *errstr;
if ((errcode = ddb_error(discodb, &errstr)))
PyErr_SetString(DiscoDBError, errstr);
return errcode;
}
static int
ddb_string_to_entry(PyObject *str, struct ddb_entry *e)
{
Py_ssize_t len = 0;
if (PyString_AsStringAndSize(str, (char**)&e->data, &len))
return 1;
if (len < UINT32_MAX){
e->length = len;
return 0;
}else{
PyErr_SetString(PyExc_ValueError, "String too long");
return 1;
}
}
| 30.615385 | 112 | 0.495312 | [
"object"
] |
cffc68adc2356721ed10ef6699a109ad4fd55868 | 2,686 | c | C | source/edge-impulse-sdk/CMSIS/DSP/Source/SupportFunctions/arm_insertion_sort_f32.c | ovimihai/voice-activated-microbit | ebca28833b51477eb886a800d6834dfc5475b05a | [
"MIT"
] | 94 | 2020-10-09T05:03:45.000Z | 2022-03-27T15:01:23.000Z | source/edge-impulse-sdk/CMSIS/DSP/Source/SupportFunctions/arm_insertion_sort_f32.c | ovimihai/voice-activated-microbit | ebca28833b51477eb886a800d6834dfc5475b05a | [
"MIT"
] | 8 | 2020-11-28T07:29:01.000Z | 2022-02-17T17:03:33.000Z | source/edge-impulse-sdk/CMSIS/DSP/Source/SupportFunctions/arm_insertion_sort_f32.c | ovimihai/voice-activated-microbit | ebca28833b51477eb886a800d6834dfc5475b05a | [
"MIT"
] | 36 | 2020-11-07T23:50:26.000Z | 2022-03-26T05:13:28.000Z | /* ----------------------------------------------------------------------
* Project: CMSIS DSP Library
* Title: arm_insertion_sort_f32.c
* Description: Floating point insertion sort
*
* $Date: 2019
* $Revision: V1.6.0
*
* Target Processor: Cortex-M and Cortex-A cores
* -------------------------------------------------------------------- */
/*
* Copyright (C) 2010-2019 ARM Limited or its affiliates. All rights reserved.
*
* SPDX-License-Identifier: Apache-2.0
*
* Licensed under the Apache License, Version 2.0 (the License); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an AS IS BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "edge-impulse-sdk/CMSIS/DSP/Include/arm_math.h"
#include "arm_sorting.h"
/**
@ingroup groupSupport
*/
/**
@addtogroup Sorting
@{
*/
/**
* @private
* @param[in] S points to an instance of the sorting structure.
* @param[in] pSrc points to the block of input data.
* @param[out] pDst points to the block of output data
* @param[in] blockSize number of samples to process.
*
* @par Algorithm
* The insertion sort is a simple sorting algorithm that
* reads all the element of the input array and removes one element
* at a time, finds the location it belongs in the final sorted list,
* and inserts it there.
*
* @par It's an in-place algorithm. In order to obtain an out-of-place
* function, a memcpy of the source vector is performed.
*/
void arm_insertion_sort_f32(
const arm_sort_instance_f32 * S,
float32_t *pSrc,
float32_t* pDst,
uint32_t blockSize)
{
float32_t * pA;
uint8_t dir = S->dir;
uint32_t i, j;
float32_t temp;
if(pSrc != pDst) // out-of-place
{
memcpy(pDst, pSrc, blockSize*sizeof(float32_t) );
pA = pDst;
}
else
pA = pSrc;
// Real all the element of the input array
for(i=0; i<blockSize; i++)
{
// Move the i-th element to the right position
for (j = i; j>0 && dir==(pA[j]<pA[j-1]); j--)
{
// Swap
temp = pA[j];
pA[j] = pA[j-1];
pA[j-1] = temp;
}
}
}
/**
@} end of Sorting group
*/
| 28.574468 | 86 | 0.577811 | [
"vector"
] |
cffc75ceb2b8db3b90bde45a0b7587c5d23f4d49 | 5,517 | h | C | ibtk/src/refine_ops/CartSideDoubleSpecializedConstantRefine.h | MSV-Project/IBAMR | 3cf614c31bb3c94e2620f165ba967cba719c45ea | [
"BSD-3-Clause"
] | 2 | 2017-12-06T06:16:36.000Z | 2021-03-13T12:28:08.000Z | ibtk/src/refine_ops/CartSideDoubleSpecializedConstantRefine.h | MSV-Project/IBAMR | 3cf614c31bb3c94e2620f165ba967cba719c45ea | [
"BSD-3-Clause"
] | null | null | null | ibtk/src/refine_ops/CartSideDoubleSpecializedConstantRefine.h | MSV-Project/IBAMR | 3cf614c31bb3c94e2620f165ba967cba719c45ea | [
"BSD-3-Clause"
] | null | null | null | // Filename: CartSideDoubleSpecializedConstantRefine.h
// Created on 17 Sep 2011 by Boyce Griffith
//
// Copyright (c) 2002-2013, Boyce Griffith
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// * Redistributions of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
//
// * Neither the name of New York University nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
// POSSIBILITY OF SUCH DAMAGE.
#ifndef included_CartSideDoubleSpecializedConstantRefine
#define included_CartSideDoubleSpecializedConstantRefine
/////////////////////////////// INCLUDES /////////////////////////////////////
#include <string>
#include "Box.h"
#include "IntVector.h"
#include "RefineOperator.h"
#include "tbox/Pointer.h"
namespace SAMRAI {
namespace hier {
template <int DIM> class Patch;
template <int DIM> class Variable;
} // namespace hier
} // namespace SAMRAI
/////////////////////////////// CLASS DEFINITION /////////////////////////////
namespace IBTK
{
/*!
* \brief Class CartSideDoubleSpecializedConstantRefine is a concrete
* SAMRAI::xfer::RefineOperator object that prolongs side-centered double
* precision patch data via constant prolongation.
*/
class CartSideDoubleSpecializedConstantRefine
: public SAMRAI::xfer::RefineOperator<NDIM>
{
public:
/*!
* \brief Default constructor.
*/
CartSideDoubleSpecializedConstantRefine();
/*!
* \brief Destructor.
*/
~CartSideDoubleSpecializedConstantRefine();
/*!
* \name Implementation of SAMRAI::xfer::RefineOperator interface.
*/
//\{
/*!
* Return true if the refining operation matches the variable and name
* string identifier request; false, otherwise.
*/
bool
findRefineOperator(
const SAMRAI::tbox::Pointer<SAMRAI::hier::Variable<NDIM> >& var,
const std::string& op_name) const;
/*!
* Return name string identifier of the refining operation.
*/
const std::string&
getOperatorName() const;
/*!
* Return the priority of this operator relative to other refining
* operators. The SAMRAI transfer routines guarantee that refining using
* operators with lower priority will be performed before those with higher
* priority.
*/
int
getOperatorPriority() const;
/*!
* Return the stencil width associated with the refining operator. The
* SAMRAI transfer routines guarantee that the source patch will contain
* sufficient ghost cell data surrounding the interior to satisfy the
* stencil width requirements for each refining operator.
*/
SAMRAI::hier::IntVector<NDIM>
getStencilWidth() const;
/*!
* Refine the source component on the fine patch to the destination
* component on the coarse patch. The refining operation is performed on the
* intersection of the destination patch and the coarse box. The fine patch
* is guaranteed to contain sufficient data for the stencil width of the
* refining operator.
*/
void
refine(
SAMRAI::hier::Patch<NDIM>& fine,
const SAMRAI::hier::Patch<NDIM>& coarse,
int dst_component,
int src_component,
const SAMRAI::hier::Box<NDIM>& fine_box,
const SAMRAI::hier::IntVector<NDIM>& ratio) const;
//\}
protected:
private:
/*!
* \brief Copy constructor.
*
* \note This constructor is not implemented and should not be used.
*
* \param from The value to copy to this object.
*/
CartSideDoubleSpecializedConstantRefine(
const CartSideDoubleSpecializedConstantRefine& from);
/*!
* \brief Assignment operator.
*
* \note This operator is not implemented and should not be used.
*
* \param that The value to assign to this object.
*
* \return A reference to this object.
*/
CartSideDoubleSpecializedConstantRefine&
operator=(
const CartSideDoubleSpecializedConstantRefine& that);
/*!
* The operator name.
*/
static const std::string s_op_name;
};
}// namespace IBTK
//////////////////////////////////////////////////////////////////////////////
#endif //#ifndef included_CartSideDoubleSpecializedConstantRefine
| 33.035928 | 80 | 0.68153 | [
"object"
] |
cffdab915bbcde4bd8a0f7a7b7c12bf876e41c49 | 27,993 | h | C | arch/cpu/simplelink-cc13xx-cc26xx/lib/coresdk_cc13xx_cc26xx/source/ti/drivers/uart/UARTCC26XX.h | Lkiraa/Contiki-ng | 87b55a9233d5588b454f6f5ec580ee9af1ae88f8 | [
"BSD-3-Clause"
] | 7 | 2018-09-14T13:14:57.000Z | 2022-01-17T15:58:21.000Z | arch/cpu/simplelink-cc13xx-cc26xx/lib/coresdk_cc13xx_cc26xx/source/ti/drivers/uart/UARTCC26XX.h | Lkiraa/Contiki-ng | 87b55a9233d5588b454f6f5ec580ee9af1ae88f8 | [
"BSD-3-Clause"
] | 2 | 2018-11-23T02:45:17.000Z | 2020-09-28T14:14:02.000Z | arch/cpu/simplelink-cc13xx-cc26xx/lib/coresdk_cc13xx_cc26xx/source/ti/drivers/uart/UARTCC26XX.h | Lkiraa/Contiki-ng | 87b55a9233d5588b454f6f5ec580ee9af1ae88f8 | [
"BSD-3-Clause"
] | 8 | 2019-05-29T07:32:25.000Z | 2021-07-06T14:54:58.000Z | /*
* Copyright (c) 2015-2019, Texas Instruments Incorporated
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* * Neither the name of Texas Instruments Incorporated nor the names of
* its contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
* THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
* OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
* OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
* EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/** ============================================================================
* @file UARTCC26XX.h
*
* @brief UART driver implementation for a CC26XX UART controller
*
* # Driver include #
* The UART header file should be included in an application as follows:
* @code
* #include <ti/drivers/UART.h>
* #include <ti/drivers/uart/UARTCC26XX.h>
* @endcode
*
* Refer to @ref UART.h for a complete description of APIs.
*
* # Overview #
* The general UART API should used in application code, i.e. UART_open()
* is used instead of UARTCC26XX_open(). The board file will define the device
* specific config, and casting in the general API will ensure that the correct
* device specific functions are called.
* This is also reflected in the example code in [Use Cases](@ref USE_CASES).
*
* # General Behavior #
* Before using the UART in CC26XX:
* - The UART driver is initialized by calling UART_init().
* - The UART HW is configured and flags system dependencies (e.g. IOs,
* power, etc.) by calling UART_open().
* - The RX and TX can operate independently of each other.
* .
* The following is true for receive operation:
* - RX is enabled by calling UART_read().
* - All received bytes are ignored after UART_open() is called, until
* the first UART_read().
* - If an RX error occur, RX is turned off and all bytes received before the
* error occured are returned.
* - After a successful read, RX remains on. UART_read() must be called
* again before FIFO goes full in order to avoid overflow. It is safe to
* call another UART_read() from the read callback, See
* [Receive Continously] (@ref USE_CASE_CB) use case below.
* - If a read times out (in ::UART_MODE_BLOCKING mode), RX will remain on.
* UART_read() must be called again before FIFO goes full in order to avoid overflow.
* - The UART_read() supports partial return, that can be used if the
* receive size is unknown. See [Use Cases](@ref USE_CASES) below.
* - The RingBuf serves as an extension of the FIFO. If data is received when
* UART_read() is not called, data will be stored in the RingBuf. The
* functionality of the RingBuf has been tested with a size of 32. This size
* can be changed to suit the application.
* .
* The following apply for transmit operation:
* - TX is enabled by calling UART_write().
* - If the UART_write() succeeds, the TX is disabled.
* .
* If UART is no longer needed by application:
* - Release system dependencies for UART by calling UART_close().
* .
* If the UART is configured in ::UART_MODE_CALLBACK mode:
* - The error handling callback is run in a HWI context.
* - The regular callback is run in a SWI context.
*
* # Error handling #
* ## Read errors ##
* If an error occurs during read operation:
* - All bytes received up until an error occurs will be returned, with the
* error signaled in the ::UARTCC26XX_Object.status field. The RX is then turned off
* and all bytes will be ignored until a new read is issued. Note that only
* the read is cancelled when the error occurs. If a write was active
* while the RX error occurred, it will complete.
* - If a RX break error occurs, an extra 0 byte will also be returned by the
* UART_read().
* .
*
* ## Write errors##
* If a timeout occurs during a write, an UART_ERROR will be returned and the
* UART_Object.status will be set to ::UART_TIMED_OUT. All bytes that are not
* transmitted, will be flushed.
* If flow control is not enabled, the ::UARTCC26XX_Object.writeTimeout should
* be kept at default value, BIOS_WAIT_FOREVER. The write call will return after
* all bytes are transmitted.
* If flow control is enabled, the timeout should be set by the application in
* order to recover if the receiver gets stuck.
*
* ## General timeout ##
* A timeout value can only be specified for reads and writes in ::UART_MODE_BLOCKING.
* If a timeout occurs during a read when in ::UART_MODE_BLOCKING, the number of bytes received will be
* returned and the UART_Object.status will be set to ::UART_TIMED_OUT.
* After a read timeout, RX will remain on, but device is allowed to enter standby.
* For more details see [Power Management](@ref POWER_MANAGEMENT) chapter below.
*
* In ::UART_MODE_CALLBACK there is no timeout and the application must call
* UART_readCancel() or UART_writeCancel() to abort the operation.
*
* @note A new read or write will reset the UART_Object.status to UART_OK.
* Caution must be taken when doing parallel reads and writes.
*
* ## Closing driver during an ongoing read/write ##
* It's safe to call UART_close() during an ongoing UART_read() and/or UART_write(),
* this will cancel the ongoing RX/TX immediately.
*
* The RX callback is alwyas called when you call UART_close() if there's an
* ongoing read.
* Note that if UART_close() is called during an ongoing read, the size provided
* in the RX callback function is 0 if < 16 bytes were received before calling UART_close().
* This is because 16 bytes is the RX watermark that triggers the ISR
* to copy bytes from the internal UART FIFO to the software RX buffer.
*
* The TX callback is always called when you call UART_close() if there's an
* ongoing write. The driver does not wait until a byte is transmitted correctly,
* so if UART_close() is called in the middle of sending a byte,
* this byte will be corrupted.
*
* # Power Management @anchor POWER_MANAGEMENT #
* The TI-RTOS power management framework will try to put the device into the most
* power efficient mode whenever possible. Please see the technical reference
* manual for further details on each power mode.
*
* The UARTCC26XX driver is setting a power constraint during operation to keep
* the device out of standby. When the operation has finished, the power
* constraint is released.
* The following statements are valid:
* - After UART_open(): the device is still allowed to enter standby.
* - During UART_read(): the device cannot enter standby.
* - After an RX error (overrun, break, parity, framing): device is allowed to enter standby.
* - After a successful UART_read():
* The device is allowed to enter standby, but RX remains on.
* - _Note_: Device might enter standby while a byte is being
* received if UART_read() is not called again after a successful
* read. This could result in corrupt data being received.
* - _Note_: Application thread should typically either issue another read after
* UART_read() completes successfully, or call
* UART_readCancel() to disable RX and thus assuring that no data
* is received while entering standby.
* - After UART_read() times out in ::UART_MODE_BLOCKING:
* The device is allowed to enter standby, but RX remains on.
* - _Note_: Device might enter standby while a byte is being
* received if UART_read() is not called again after a timeout.
* This could result in corrupt data being received.
* - _Note_: Application thread should typically either issue another read after
* UART_read() times out to continue reception.
* .
* - During UART_write(): the device cannot enter standby.
* - After UART_write() succeeds: the device can enter standby.
* - If UART_writeCancel() is called: the device can enter standby.
* - After write timeout: the device can enter standby.
*
* # Flow Control #
* To enable Flow Control, the RTS and CTS pins must be assigned in the
* ::UARTCC26XX_HWAttrsV2:
* @code
* const UARTCC26XX_HWAttrsV2 uartCC26xxHWAttrs[] = {
* {
* .baseAddr = UART0_BASE,
* .powerMngrId = PERIPH_UART0,
* .intNum = INT_UART0,
* .intPriority = ~0,
* .swiPriority = 0,
* .txPin = Board_UART_TX,
* .rxPin = Board_UART_RX,
* .ctsPin = Board_UART_CTS,
* .rtsPin = Board_UART_RTS
* .ringBufPtr = uartCC26XXRingBuffer[0],
* .ringBufSize = sizeof(uartCC26XXRingBuffer[0]),
* .txIntFifoThr= UARTCC26XX_FIFO_THRESHOLD_1_8,
* .rxIntFifoThr= UARTCC26XX_FIFO_THRESHOLD_4_8
* }
* };
* @endcode
*
* If the RTS and CTS pins are set to ::PIN_UNASSIGNED, the flow control is
* disabled. An example is shown in the ::UARTCC26XX_HWAttrsV2 description.
*
* # Supported Functions #
* | Generic API function | API function | Description |
* |----------------------|--------------------------|------------------------
* | UART_init() | UARTCC26XX_init() | Initialize UART driver |
* | UART_open() | UARTCC26XX_open() | Initialize UART HW and set system dependencies |
* | UART_close() | UARTCC26XX_close() | Disable UART HW and release system dependencies |
* | UART_control() | UARTCC26XX_control() | Configure an already opened UART handle |
* | UART_read() | UARTCC26XX_read() | Start read from UART |
* | UART_readCancel() | UARTCC26XX_readCancel() | Cancel ongoing read from UART |
* | UART_write() | UARTCC26XX_write() | Start write to UART |
* | UART_writeCancel() | UARTCC26XX_writeCancel() | Cancel ongoing write to UART |
*
* @note All calls should go through the generic API
*
* # Not Supported Functionality #
* The CC26XX UART driver currently does not support:
* - ::UART_ECHO_ON
* - ::UART_DATA_TEXT
* - UART_readPolling()
* - UART_writePolling()
*
* # Use Cases @anchor USE_CASES #
* ## Basic Receive #
* Receive 100 bytes over UART in ::UART_MODE_BLOCKING.
* @code
* UART_Handle handle;
* UART_Params params;
* uint8_t rxBuf[100]; // Receive buffer
* uint32_t timeoutUs = 5000; // 5ms timeout, default timeout is no timeout (BIOS_WAIT_FOREVER)
*
* // Init UART and specify non-default parameters
* UART_Params_init(¶ms);
* params.baudRate = 9600;
* params.writeDataMode = UART_DATA_BINARY;
* params.readTimeout = timeoutUs / ClockP_tickPeriod; // Default tick period is 10us
*
* // Open the UART and do the read
* handle = UART_open(Board_UART, ¶ms);
* int rxBytes = UART_read(handle, rxBuf, 100);
* @endcode
*
* ## Receive with Return Partial #
* This use case will read in ::UART_MODE_BLOCKING until the wanted amount of bytes is
* received or until a started reception is inactive for a 32-bit period.
* This UART_read() call can also be used when unknown amount of bytes shall
* be read. Note: The partial return is also possible in ::UART_MODE_CALLBACK mode.
* @code
* UART_Handle handle;
* UART_Params params;
* uint8_t rxBuf[100]; // Receive buffer
*
* // Init UART and specify non-default parameters
* UART_Params_init(¶ms);
* params.baudRate = 9600;
* params.writeDataMode = UART_DATA_BINARY;
*
* // Open the UART and initiate the partial read
* handle = UART_open(Board_UART, ¶ms);
* // Enable RETURN_PARTIAL
* UART_control(handle, UARTCC26XX_CMD_RETURN_PARTIAL_ENABLE, NULL);
* // Begin read
* int rxBytes = UART_read(handle, rxBuf, 100));
* @endcode
*
* ## Basic Transmit #
* This case will configure the UART to send the data in txBuf in
* BLOCKING_MODE.
* @code
* UART_Handle handle;
* UART_Params params;
* uint8_t txBuf[] = "Hello World"; // Transmit buffer
*
* // Init UART and specify non-default parameters
* UART_Params_init(¶ms);
* params.baudRate = 9600;
* params.writeDataMode = UART_DATA_BINARY;
*
* // Open the UART and do the write
* handle = UART_open(Board_UART, ¶ms);
* UART_write(handle, txBuf, sizeof(txBuf));
* @endcode
*
* ## Receive Continously in ::UART_MODE_CALLBACK @anchor USE_CASE_CB #
* This case will configure the UART to receive and transmit continously in
* ::UART_MODE_CALLBACK, 16 bytes at the time and transmit them back via UART TX.
* Note that UART_Params.readTimeout is not in use when using ::UART_MODE_CALLBACK mode.
* @code
* #define MAX_NUM_RX_BYTES 1000 // Maximum RX bytes to receive in one go
* #define MAX_NUM_TX_BYTES 1000 // Maximum TX bytes to send in one go
*
* uint32_t wantedRxBytes; // Number of bytes received so far
* uint8_t rxBuf[MAX_NUM_RX_BYTES]; // Receive buffer
* uint8_t txBuf[MAX_NUM_TX_BYTES]; // Transmit buffer
*
* // Read callback function
* static void readCallback(UART_Handle handle, void *rxBuf, size_t size)
* {
* // Make sure we received all expected bytes
* if (size == wantedRxBytes) {
* // Copy bytes from RX buffer to TX buffer
* for(size_t i = 0; i < size; i++)
* txBuf[i] = ((uint8_t*)rxBuf)[i];
*
* // Echo the bytes received back to transmitter
* UART_write(handle, txBuf, size);
*
* // Start another read, with size the same as it was during first call to
* // UART_read()
* UART_read(handle, rxBuf, wantedRxBytes);
* }
* else {
* // Handle error or call to UART_readCancel()
* }
* }
*
* // Write callback function
* static void writeCallback(UART_Handle handle, void *rxBuf, size_t size)
* {
* // Do nothing
* }
*
* static void taskFxn(uintptr_t a0, uintptr_t a1)
* {
* UART_Handle handle;
* UART_Params params;
*
* // Init UART
* UART_init();
*
* // Specify non-default parameters
* UART_Params_init(¶ms);
* params.baudRate = 9600;
* params.writeMode = UART_MODE_CALLBACK;
* params.writeDataMode = UART_DATA_BINARY;
* params.writeCallback = writeCallback;
* params.readMode = UART_MODE_CALLBACK;
* params.readDataMode = UART_DATA_BINARY;
* params.readCallback = readCallback;
*
* // Open the UART and initiate the first read
* handle = UART_open(Board_UART, ¶ms);
* wantedRxBytes = 16;
* int rxBytes = UART_read(handle, rxBuf, wantedRxBytes);
*
* while(true); // Wait forever
* }
* @endcode
*
* # Baud Rate #
* The CC26xx driver supports baud rates up to 3Mbaud.
* However, when receiving more than 32 bytes back-to-back the baud
* rate is limited to approximately 2Mbaud.
* The throughput is also dependent on the user application.
*
* # Stack requirements #
* There are no additional stack requirements for calling UART_read() within
* its own callback.
*
* # Instrumentation #
* The UART driver interface produces log statements if instrumentation is
* enabled.
*
* Diagnostics Mask | Log details |
* ---------------- | ----------- |
* Diags_USER1 | basic UART operations performed |
* Diags_USER2 | detailed UART operations performed |
*
* ============================================================================
*/
#ifndef ti_drivers_uart_UARTCC26XX__include
#define ti_drivers_uart_UARTCC26XX__include
#ifdef __cplusplus
extern "C" {
#endif
#include <stdint.h>
#include <stdbool.h>
#include <ti/drivers/UART.h>
#include <ti/drivers/pin/PINCC26XX.h>
#include <ti/drivers/Power.h>
#include <ti/drivers/utils/RingBuf.h>
#include <ti/devices/DeviceFamily.h>
#include DeviceFamily_constructPath(driverlib/uart.h)
#include <ti/drivers/dpl/HwiP.h>
#include <ti/drivers/dpl/SwiP.h>
#include <ti/drivers/dpl/ClockP.h>
#include <ti/drivers/dpl/SemaphoreP.h>
/**
* @addtogroup UART_STATUS
* UARTCC26XX_STATUS_* macros are command codes only defined in the
* UARTCC26XX.h driver implementation and need to:
* @code
* #include <ti/drivers/uart/UARTCC26XX.h>
* @endcode
* @{
*/
/* Add UARTCC26XX_STATUS_* macros here */
/** @}*/
/**
* @addtogroup UART_CMD
* UARTCC26XX_CMD_* macros are command codes only defined in the UARTCC26XX.h
* driver implementation and need to:
* @code
* #include <ti/drivers/uart/UARTCC26XX.h>
* @endcode
* @{
*/
/*!
* @brief Command used by UART_control to enable partial return
*
* Enabling this command allows UART_read to return partial data if data
* reception is inactive for a given 32-bit period. With this command @b arg
* is @a don't @a care and it returns UART_STATUS_SUCCESS.
*/
#define UARTCC26XX_CMD_RETURN_PARTIAL_ENABLE (UART_CMD_RESERVED + 0)
/*!
* @brief Command used by UART_control to disable partial return
*
* Disabling this command returns the UARTCC26XX to the default blocking
* behavior where UART_read blocks until all data bytes were received. With
* this comand @b arg is @a don't @a care and it returns UART_STATUS_SUCCESS.
*/
#define UARTCC26XX_CMD_RETURN_PARTIAL_DISABLE (UART_CMD_RESERVED + 1)
/*!
* @brief Command used by UART_control to flush the RX FIFO
*
* This control command flushes any contents in the RX FIFO. With this command
* @b arg is @a don't @a care and it returns UART_STATUS_SUCCESS.
*/
#define UARTCC26XX_CMD_RX_FIFO_FLUSH (UART_CMD_RESERVED + 2)
/** @}*/
/*! Size of the TX and RX FIFOs is 32 items */
#define UARTCC26XX_FIFO_SIZE 32
/*!
* @brief UART TX/RX interrupt FIFO threshold select
*
* Defined FIFO thresholds for generation of both TX interrupt and RX interrupt.
* The default value (UARTCC26XX_FIFO_THRESHOLD_DEFAULT) is defined for backward compatibility handling.
*/
typedef enum UARTCC26XX_FifoThreshold {
UARTCC26XX_FIFO_THRESHOLD_DEFAULT = 0, /*!< Default value forces FIFO
threshold of 1/8 for TX
interrupt and 4/8 for RX
interrupt */
UARTCC26XX_FIFO_THRESHOLD_1_8, /*!< FIFO threshold of 1/8 full */
UARTCC26XX_FIFO_THRESHOLD_2_8, /*!< FIFO threshold of 2/8 full */
UARTCC26XX_FIFO_THRESHOLD_4_8, /*!< FIFO threshold of 4/8 full */
UARTCC26XX_FIFO_THRESHOLD_6_8, /*!< FIFO threshold of 6/8 full */
UARTCC26XX_FIFO_THRESHOLD_7_8 /*!< FIFO threshold of 7/8 full */
} UARTCC26XX_FifoThreshold;
/* BACKWARDS COMPATIBILITY */
#define UARTCC26XX_RETURN_PARTIAL_ENABLE UARTCC26XX_CMD_RETURN_PARTIAL_ENABLE
#define UARTCC26XX_RETURN_PARTIAL_DISABLE UARTCC26XX_CMD_RETURN_PARTIAL_DISABLE
/* END BACKWARDS COMPATIBILITY */
/*!
* @brief The definition of an optional callback function used by the
* UART driver to notify the application when a receive error
* (FIFO overrun, parity error, etc) occurs.
*
* @param UART_Handle UART_Handle
*
* @param error The current value of the receive
* status register.
*/
typedef void (*UARTCC26XX_ErrorCallback) (UART_Handle handle, uint32_t error);
/* UART function table pointer */
extern const UART_FxnTable UARTCC26XX_fxnTable;
/*!
* @brief UARTCC26XX Hardware attributes
*
* These fields, with the exception of intPriority, txIntFifoThr and
* rxIntFifoThr, are used by driverlib APIs and therefore must be populated
* by driverlib macro definitions.
* For CC26xxWare these definitions are found in:
* - inc/hw_memmap.h
* - inc/hw_ints.h
*
* intPriority is the UART peripheral's interrupt priority, as defined by the
* underlying OS. It is passed unmodified to the underlying OS's interrupt
* handler creation code, so you need to refer to the OS documentation
* for usage. For example, for SYS/BIOS applications, refer to the
* ti.sysbios.family.arm.m3.Hwi documentation for SYS/BIOS usage of
* interrupt priorities. If the driver uses the ti.dpl interface
* instead of making OS calls directly, then the HwiP port handles the
* interrupt priority in an OS specific way. In the case of the SYS/BIOS
* port, intPriority is passed unmodified to Hwi_create().
*
* A sample structure is shown below:
* @code
* const UARTCC26XX_HWAttrsV2 uartCC26xxHWAttrs[] = {
* {
* .baseAddr = UART0_BASE,
* .powerMngrId = PERIPH_UART0,
* .intNum = INT_UART0,
* .intPriority = ~0,
* .swiPriority = 0,
* .txPin = Board_UART_TX,
* .rxPin = Board_UART_RX,
* .ctsPin = PIN_UNASSIGNED,
* .rtsPin = PIN_UNASSIGNED,
* .ringBufPtr = uartCC26XXRingBuffer[0],
* .ringBufSize = sizeof(uartCC26XXRingBuffer[0]),
* .txIntFifoThr= UARTCC26XX_FIFO_THRESHOLD_1_8,
* .rxIntFifoThr= UARTCC26XX_FIFO_THRESHOLD_4_8
* }
* };
* @endcode
*
* The .ctsPin and .rtsPin must be assigned to enable flow control.
*/
typedef struct UARTCC26XX_HWAttrsV2 {
uint32_t baseAddr; /*!< UART Peripheral's base address */
uint32_t powerMngrId; /*!< UART Peripheral's power manager ID */
int intNum; /*!< UART Peripheral's interrupt vector */
/*! @brief UART Peripheral's interrupt priority.
The CC26xx uses three of the priority bits, meaning ~0 has the same effect as (7 << 5).
(7 << 5) will apply the lowest priority.
(1 << 5) will apply the highest priority.
Setting the priority to 0 is not supported by this driver.
HWI's with priority 0 ignore the HWI dispatcher to support zero-latency interrupts, thus invalidating the critical sections in this driver.
*/
uint8_t intPriority;
/*! @brief SPI SWI priority.
The higher the number, the higher the priority.
The minimum is 0 and the maximum is 15 by default.
The maximum can be reduced to save RAM by adding or modifying Swi.numPriorities in the kernel configuration file.
*/
uint32_t swiPriority;
uint8_t txPin; /*!< UART TX pin */
uint8_t rxPin; /*!< UART RX pin */
uint8_t ctsPin; /*!< UART CTS pin */
uint8_t rtsPin; /*!< UART RTS pin */
unsigned char *ringBufPtr; /*!< Pointer to an application ring buffer */
size_t ringBufSize; /*!< Size of ringBufPtr */
UARTCC26XX_FifoThreshold txIntFifoThr; /*!< UART TX interrupt FIFO threshold select */
UARTCC26XX_FifoThreshold rxIntFifoThr; /*!< UART RX interrupt FIFO threshold select */
/*! Application error function to be called on receive errors */
UARTCC26XX_ErrorCallback errorFxn;
} UARTCC26XX_HWAttrsV2;
/*!
* @brief UART status
*
* The UART Status is used to flag the different Receive Errors.
*/
typedef enum UART_Status {
UART_TIMED_OUT = 0x10, /*!< UART timed out */
UART_PARITY_ERROR = UART_RXERROR_PARITY, /*!< UART Parity error */
UART_BRAKE_ERROR = UART_RXERROR_BREAK, /*!< UART Break error */
UART_OVERRUN_ERROR = UART_RXERROR_OVERRUN, /*!< UART overrun error */
UART_FRAMING_ERROR = UART_RXERROR_FRAMING, /*!< UART Framing error */
UART_OK = 0x0 /*!< UART OK */
} UART_Status;
/*!
* @brief UARTCC26XX Object
*
* The application must not access any member variables of this structure!
*/
typedef struct UARTCC26XX_Object {
/* UART control variables */
bool opened; /*!< Has the obj been opened */
UART_Mode readMode; /*!< Mode for all read calls */
UART_Mode writeMode; /*!< Mode for all write calls */
unsigned int readTimeout; /*!< Timeout for read semaphore in BLOCKING mode*/
unsigned int writeTimeout; /*!< Timeout for write semaphore in BLOCKING mode*/
UART_Callback readCallback; /*!< Pointer to read callback */
UART_Callback writeCallback; /*!< Pointer to write callback */
UART_ReturnMode readReturnMode; /*!< Receive return mode */
UART_DataMode readDataMode; /*!< Type of data being read */
UART_DataMode writeDataMode; /*!< Type of data being written */
/*! @brief Baud rate for CC26xx UART
*
* The CC26xx driver supports baud rates up to 3Mbaud.
* However, when receiving more than 32 bytes back-to-back the baud
* rate is limited to approx. 2Mbaud.
* The throughput is also dependent on the user application.
*/
uint32_t baudRate;
UART_LEN dataLength; /*!< Data length for UART */
UART_STOP stopBits; /*!< Stop bits for UART */
UART_PAR parityType; /*!< Parity bit type for UART */
UART_Status status; /*!< Status variable */
/* UART write variables */
const void *writeBuf; /*!< Buffer data pointer */
size_t writeCount; /*!< Number of Chars sent */
size_t writeSize; /*!< Chars remaining in buffer */
bool writeCR; /*!< Write a return character */
/* UART receive variables */
bool readRetPartial; /*!< Return partial RX data if timeout occurs */
void *readBuf; /*!< Buffer data pointer */
size_t readCount; /*!< Number of Chars read */
size_t readSize; /*!< Chars remaining in buffer */
RingBuf_Object ringBuffer; /*!< local circular buffer object */
/* PIN driver state object and handle */
PIN_State pinState;
PIN_Handle hPin;
/*! UART post-notification function pointer */
void *uartPostFxn;
/*! UART post-notification object */
Power_NotifyObj uartPostObj;
/* UART SYS/BIOS objects */
HwiP_Struct hwi; /*!< Hwi object */
SwiP_Struct swi; /*!< Swi object */
SemaphoreP_Struct writeSem; /*!< UART write semaphore*/
SemaphoreP_Struct readSem; /*!< UART read semaphore */
ClockP_Struct txFifoEmptyClk; /*!< UART TX FIFO empty clock */
bool uartRxPowerConstraint;
bool uartTxPowerConstraint;
} UARTCC26XX_Object, *UARTCC26XX_Handle;
#ifdef __cplusplus
}
#endif
#endif /* ti_drivers_uart_UARTCC26XX__include */
| 43.332817 | 147 | 0.65695 | [
"object",
"vector"
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.