hexsha stringlengths 40 40 | size int64 22 2.4M | ext stringclasses 5
values | lang stringclasses 1
value | max_stars_repo_path stringlengths 3 260 | max_stars_repo_name stringlengths 5 109 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 9 | max_stars_count float64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 260 | max_issues_repo_name stringlengths 5 109 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 9 | max_issues_count float64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 260 | max_forks_repo_name stringlengths 5 109 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 9 | max_forks_count float64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 22 2.4M | avg_line_length float64 5 169k | max_line_length int64 5 786k | alphanum_fraction float64 0.06 0.95 | matches listlengths 1 11 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
d08ac025f61e649957aa15793fa940bfce47e5cd | 122,275 | c | C | generic/tclCmdIL.c | bitkeeper-scm/tcl | 67374d1d4b20652a780eb29ce95d25ce1ea081f9 | [
"TCL"
] | null | null | null | generic/tclCmdIL.c | bitkeeper-scm/tcl | 67374d1d4b20652a780eb29ce95d25ce1ea081f9 | [
"TCL"
] | null | null | null | generic/tclCmdIL.c | bitkeeper-scm/tcl | 67374d1d4b20652a780eb29ce95d25ce1ea081f9 | [
"TCL"
] | null | null | null | /*
* tclCmdIL.c --
*
* This file contains the top-level command routines for most of the Tcl
* built-in commands whose names begin with the letters I through L. It
* contains only commands in the generic core (i.e., those that don't
* depend much upon UNIX facilities).
*
* Copyright (c) 1987-1993 The Regents of the University of California.
* Copyright (c) 1993-1997 Lucent Technologies.
* Copyright (c) 1994-1997 Sun Microsystems, Inc.
* Copyright (c) 1998-1999 by Scriptics Corporation.
* Copyright (c) 2001 by Kevin B. Kenny. All rights reserved.
* Copyright (c) 2005 Donal K. Fellows.
* Copyright (c) 2007 BitMover, Inc.
*
* See the file "license.terms" for information on usage and redistribution of
* this file, and for a DISCLAIMER OF ALL WARRANTIES.
*/
#include "tclInt.h"
#include "tclRegexp.h"
/*
* During execution of the "lsort" command, structures of the following type
* are used to arrange the objects being sorted into a collection of linked
* lists.
*/
typedef struct SortElement {
union { /* The value that we sorting by. */
const char *strValuePtr;
Tcl_WideInt wideValue;
double doubleValue;
Tcl_Obj *objValuePtr;
} collationKey;
union { /* Object being sorted, or its index. */
Tcl_Obj *objPtr;
int index;
} payload;
struct SortElement *nextPtr;/* Next element in the list, or NULL for end
* of list. */
} SortElement;
/*
* These function pointer types are used with the "lsearch" and "lsort"
* commands to facilitate the "-nocase" option.
*/
typedef int (*SortStrCmpFn_t) (const char *, const char *);
typedef int (*SortMemCmpFn_t) (const void *, const void *, size_t);
/*
* The "lsort" command needs to pass certain information down to the function
* that compares two list elements, and the comparison function needs to pass
* success or failure information back up to the top-level "lsort" command.
* The following structure is used to pass this information.
*/
typedef struct SortInfo {
int isIncreasing; /* Nonzero means sort in increasing order. */
int sortMode; /* The sort mode. One of SORTMODE_* values
* defined below. */
Tcl_Obj *compareCmdPtr; /* The Tcl comparison command when sortMode is
* SORTMODE_COMMAND. Pre-initialized to hold
* base of command. */
int *indexv; /* If the -index option was specified, this
* holds the indexes contained in the list
* supplied as an argument to that option.
* NULL if no indexes supplied, and points to
* singleIndex field when only one
* supplied. */
int indexc; /* Number of indexes in indexv array. */
int singleIndex; /* Static space for common index case. */
int unique;
int numElements;
Tcl_Interp *interp; /* The interpreter in which the sort is being
* done. */
int resultCode; /* Completion code for the lsort command. If
* an error occurs during the sort this is
* changed from TCL_OK to TCL_ERROR. */
} SortInfo;
/*
* The "sortMode" field of the SortInfo structure can take on any of the
* following values.
*/
#define SORTMODE_ASCII 0
#define SORTMODE_INTEGER 1
#define SORTMODE_REAL 2
#define SORTMODE_COMMAND 3
#define SORTMODE_DICTIONARY 4
#define SORTMODE_ASCII_NC 8
/*
* Magic values for the index field of the SortInfo structure. Note that the
* index "end-1" will be translated to SORTIDX_END-1, etc.
*/
#define SORTIDX_NONE -1 /* Not indexed; use whole value. */
#define SORTIDX_END -2 /* Indexed from end. */
/*
* Forward declarations for procedures defined in this file:
*/
static int DictionaryCompare(const char *left, const char *right);
static int IfConditionCallback(ClientData data[],
Tcl_Interp *interp, int result);
static int InfoArgsCmd(ClientData dummy, Tcl_Interp *interp,
int objc, Tcl_Obj *const objv[]);
static int InfoBodyCmd(ClientData dummy, Tcl_Interp *interp,
int objc, Tcl_Obj *const objv[]);
static int InfoCmdCountCmd(ClientData dummy, Tcl_Interp *interp,
int objc, Tcl_Obj *const objv[]);
static int InfoCommandsCmd(ClientData dummy, Tcl_Interp *interp,
int objc, Tcl_Obj *const objv[]);
static int InfoCompleteCmd(ClientData dummy, Tcl_Interp *interp,
int objc, Tcl_Obj *const objv[]);
static int InfoDefaultCmd(ClientData dummy, Tcl_Interp *interp,
int objc, Tcl_Obj *const objv[]);
/* TIP #348 - New 'info' subcommand 'errorstack' */
static int InfoErrorStackCmd(ClientData dummy, Tcl_Interp *interp,
int objc, Tcl_Obj *const objv[]);
/* TIP #280 - New 'info' subcommand 'frame' */
static int InfoFrameCmd(ClientData dummy, Tcl_Interp *interp,
int objc, Tcl_Obj *const objv[]);
static int InfoFunctionsCmd(ClientData dummy, Tcl_Interp *interp,
int objc, Tcl_Obj *const objv[]);
static int InfoHostnameCmd(ClientData dummy, Tcl_Interp *interp,
int objc, Tcl_Obj *const objv[]);
static int InfoLevelCmd(ClientData dummy, Tcl_Interp *interp,
int objc, Tcl_Obj *const objv[]);
static int InfoLibraryCmd(ClientData dummy, Tcl_Interp *interp,
int objc, Tcl_Obj *const objv[]);
static int InfoLoadedCmd(ClientData dummy, Tcl_Interp *interp,
int objc, Tcl_Obj *const objv[]);
static int InfoNameOfExecutableCmd(ClientData dummy,
Tcl_Interp *interp, int objc,
Tcl_Obj *const objv[]);
static int InfoPatchLevelCmd(ClientData dummy, Tcl_Interp *interp,
int objc, Tcl_Obj *const objv[]);
static int InfoProcsCmd(ClientData dummy, Tcl_Interp *interp,
int objc, Tcl_Obj *const objv[]);
static int InfoScriptCmd(ClientData dummy, Tcl_Interp *interp,
int objc, Tcl_Obj *const objv[]);
static int InfoSharedlibCmd(ClientData dummy, Tcl_Interp *interp,
int objc, Tcl_Obj *const objv[]);
static int InfoTclVersionCmd(ClientData dummy, Tcl_Interp *interp,
int objc, Tcl_Obj *const objv[]);
static SortElement * MergeLists(SortElement *leftPtr, SortElement *rightPtr,
SortInfo *infoPtr);
static int SortCompare(SortElement *firstPtr, SortElement *second,
SortInfo *infoPtr);
static Tcl_Obj * SelectObjFromSublist(Tcl_Obj *firstPtr,
SortInfo *infoPtr);
/*
* Array of values describing how to implement each standard subcommand of the
* "info" command.
*/
static const EnsembleImplMap defaultInfoMap[] = {
{"args", InfoArgsCmd, TclCompileBasic1ArgCmd, NULL, NULL, 0},
{"body", InfoBodyCmd, TclCompileBasic1ArgCmd, NULL, NULL, 0},
{"cmdcount", InfoCmdCountCmd, TclCompileBasic0ArgCmd, NULL, NULL, 0},
{"commands", InfoCommandsCmd, TclCompileInfoCommandsCmd, NULL, NULL, 0},
{"complete", InfoCompleteCmd, TclCompileBasic1ArgCmd, NULL, NULL, 0},
{"coroutine", TclInfoCoroutineCmd, TclCompileInfoCoroutineCmd, NULL, NULL, 0},
{"default", InfoDefaultCmd, TclCompileBasic3ArgCmd, NULL, NULL, 0},
{"errorstack", InfoErrorStackCmd, TclCompileBasic0Or1ArgCmd, NULL, NULL, 0},
{"exists", TclInfoExistsCmd, TclCompileInfoExistsCmd, NULL, NULL, 0},
{"frame", InfoFrameCmd, TclCompileBasic0Or1ArgCmd, NULL, NULL, 0},
{"functions", InfoFunctionsCmd, TclCompileBasic0Or1ArgCmd, NULL, NULL, 0},
{"globals", TclInfoGlobalsCmd, TclCompileBasic0Or1ArgCmd, NULL, NULL, 0},
{"hostname", InfoHostnameCmd, TclCompileBasic0ArgCmd, NULL, NULL, 0},
{"level", InfoLevelCmd, TclCompileInfoLevelCmd, NULL, NULL, 0},
{"library", InfoLibraryCmd, TclCompileBasic0ArgCmd, NULL, NULL, 0},
{"loaded", InfoLoadedCmd, TclCompileBasic0Or1ArgCmd, NULL, NULL, 0},
{"locals", TclInfoLocalsCmd, TclCompileBasic0Or1ArgCmd, NULL, NULL, 0},
{"nameofexecutable", InfoNameOfExecutableCmd, TclCompileBasic0ArgCmd, NULL, NULL, 0},
{"patchlevel", InfoPatchLevelCmd, TclCompileBasic0ArgCmd, NULL, NULL, 0},
{"procs", InfoProcsCmd, TclCompileBasic0Or1ArgCmd, NULL, NULL, 0},
{"script", InfoScriptCmd, TclCompileBasic0Or1ArgCmd, NULL, NULL, 0},
{"sharedlibextension", InfoSharedlibCmd, TclCompileBasic0ArgCmd, NULL, NULL, 0},
{"tclversion", InfoTclVersionCmd, TclCompileBasic0ArgCmd, NULL, NULL, 0},
{"vars", TclInfoVarsCmd, TclCompileBasic0Or1ArgCmd, NULL, NULL, 0},
{NULL, NULL, NULL, NULL, NULL, 0}
};
/*
*----------------------------------------------------------------------
*
* Tcl_IfObjCmd --
*
* This procedure is invoked to process the "if" Tcl command. See the
* user documentation for details on what it does.
*
* With the bytecode compiler, this procedure is only called when a
* command name is computed at runtime, and is "if" or the name to which
* "if" was renamed: e.g., "set z if; $z 1 {puts foo}"
*
* Results:
* A standard Tcl result.
*
* Side effects:
* See the user documentation.
*
*----------------------------------------------------------------------
*/
int
Tcl_IfObjCmd(
ClientData dummy, /* Not used. */
Tcl_Interp *interp, /* Current interpreter. */
int objc, /* Number of arguments. */
Tcl_Obj *const objv[]) /* Argument objects. */
{
return Tcl_NRCallObjProc(interp, TclNRIfObjCmd, dummy, objc, objv);
}
int
TclNRIfObjCmd(
ClientData dummy, /* Not used. */
Tcl_Interp *interp, /* Current interpreter. */
int objc, /* Number of arguments. */
Tcl_Obj *const objv[]) /* Argument objects. */
{
Tcl_Obj *boolObj;
if (objc <= 1) {
Tcl_SetObjResult(interp, Tcl_ObjPrintf(
"wrong # args: no expression after \"%s\" argument",
TclGetString(objv[0])));
Tcl_SetErrorCode(interp, "TCL", "WRONGARGS", NULL);
return TCL_ERROR;
}
/*
* At this point, objv[1] refers to the main expression to test. The
* arguments after the expression must be "then" (optional) and a script
* to execute if the expression is true.
*/
TclNewObj(boolObj);
Tcl_NRAddCallback(interp, IfConditionCallback, INT2PTR(objc),
(ClientData) objv, INT2PTR(1), boolObj);
return Tcl_NRExprObj(interp, objv[1], boolObj);
}
static int
IfConditionCallback(
ClientData data[],
Tcl_Interp *interp,
int result)
{
Interp *iPtr = (Interp *) interp;
int objc = PTR2INT(data[0]);
Tcl_Obj *const *objv = data[1];
int i = PTR2INT(data[2]);
Tcl_Obj *boolObj = data[3];
int value, thenScriptIndex = 0;
const char *clause;
if (result != TCL_OK) {
TclDecrRefCount(boolObj);
return result;
}
if (Tcl_GetBooleanFromObj(interp, boolObj, &value) != TCL_OK) {
TclDecrRefCount(boolObj);
return TCL_ERROR;
}
TclDecrRefCount(boolObj);
while (1) {
i++;
if (i >= objc) {
goto missingScript;
}
clause = TclGetString(objv[i]);
if ((i < objc) && (strcmp(clause, "then") == 0)) {
i++;
}
if (i >= objc) {
goto missingScript;
}
if (value) {
thenScriptIndex = i;
value = 0;
}
/*
* The expression evaluated to false. Skip the command, then see if
* there is an "else" or "elseif" clause.
*/
i++;
if (i >= objc) {
if (thenScriptIndex) {
/*
* TIP #280. Make invoking context available to branch.
*/
return TclNREvalObjEx(interp, objv[thenScriptIndex], 0,
iPtr->cmdFramePtr, thenScriptIndex);
}
return TCL_OK;
}
clause = TclGetString(objv[i]);
if ((clause[0] != 'e') || (strcmp(clause, "elseif") != 0)) {
break;
}
i++;
/*
* At this point in the loop, objv and objc refer to an expression to
* test, either for the main expression or an expression following an
* "elseif". The arguments after the expression must be "then"
* (optional) and a script to execute if the expression is true.
*/
if (i >= objc) {
Tcl_SetObjResult(interp, Tcl_ObjPrintf(
"wrong # args: no expression after \"%s\" argument",
clause));
Tcl_SetErrorCode(interp, "TCL", "WRONGARGS", NULL);
return TCL_ERROR;
}
if (!thenScriptIndex) {
TclNewObj(boolObj);
Tcl_NRAddCallback(interp, IfConditionCallback, data[0], data[1],
INT2PTR(i), boolObj);
return Tcl_NRExprObj(interp, objv[i], boolObj);
}
}
/*
* Couldn't find a "then" or "elseif" clause to execute. Check now for an
* "else" clause. We know that there's at least one more argument when we
* get here.
*/
if (strcmp(clause, "else") == 0) {
i++;
if (i >= objc) {
goto missingScript;
}
}
if (i < objc - 1) {
Tcl_SetObjResult(interp, Tcl_NewStringObj(
"wrong # args: extra words after \"else\" clause in \"if\" command",
-1));
Tcl_SetErrorCode(interp, "TCL", "WRONGARGS", NULL);
return TCL_ERROR;
}
if (thenScriptIndex) {
/*
* TIP #280. Make invoking context available to branch/else.
*/
return TclNREvalObjEx(interp, objv[thenScriptIndex], 0,
iPtr->cmdFramePtr, thenScriptIndex);
}
return TclNREvalObjEx(interp, objv[i], 0, iPtr->cmdFramePtr, i);
missingScript:
Tcl_SetObjResult(interp, Tcl_ObjPrintf(
"wrong # args: no script following \"%s\" argument",
TclGetString(objv[i-1])));
Tcl_SetErrorCode(interp, "TCL", "WRONGARGS", NULL);
return TCL_ERROR;
}
/*
*----------------------------------------------------------------------
*
* Tcl_IncrObjCmd --
*
* This procedure is invoked to process the "incr" Tcl command. See the
* user documentation for details on what it does.
*
* With the bytecode compiler, this procedure is only called when a
* command name is computed at runtime, and is "incr" or the name to
* which "incr" was renamed: e.g., "set z incr; $z i -1"
*
* Results:
* A standard Tcl result.
*
* Side effects:
* See the user documentation.
*
*----------------------------------------------------------------------
*/
int
Tcl_IncrObjCmd(
ClientData dummy, /* Not used. */
Tcl_Interp *interp, /* Current interpreter. */
int objc, /* Number of arguments. */
Tcl_Obj *const objv[]) /* Argument objects. */
{
Tcl_Obj *newValuePtr, *incrPtr;
if ((objc != 2) && (objc != 3)) {
Tcl_WrongNumArgs(interp, 1, objv, "varName ?increment?");
return TCL_ERROR;
}
if (objc == 3) {
incrPtr = objv[2];
} else {
incrPtr = Tcl_NewIntObj(1);
}
Tcl_IncrRefCount(incrPtr);
newValuePtr = TclIncrObjVar2(interp, objv[1], NULL,
incrPtr, TCL_LEAVE_ERR_MSG);
Tcl_DecrRefCount(incrPtr);
if (newValuePtr == NULL) {
return TCL_ERROR;
}
/*
* Set the interpreter's object result to refer to the variable's new
* value object.
*/
Tcl_SetObjResult(interp, newValuePtr);
return TCL_OK;
}
/*
*----------------------------------------------------------------------
*
* TclInitInfoCmd --
*
* This function is called to create the "info" Tcl command. See the user
* documentation for details on what it does.
*
* Results:
* Handle for the info command, or NULL on failure.
*
* Side effects:
* none
*
*----------------------------------------------------------------------
*/
Tcl_Command
TclInitInfoCmd(
Tcl_Interp *interp) /* Current interpreter. */
{
return TclMakeEnsemble(interp, "info", defaultInfoMap);
}
/*
*----------------------------------------------------------------------
*
* InfoArgsCmd --
*
* Called to implement the "info args" command that returns the argument
* list for a procedure. Handles the following syntax:
*
* info args procName
*
* Results:
* Returns TCL_OK if successful and TCL_ERROR if there is an error.
*
* Side effects:
* Returns a result in the interpreter's result object. If there is an
* error, the result is an error message.
*
*----------------------------------------------------------------------
*/
static int
InfoArgsCmd(
ClientData dummy, /* Not used. */
Tcl_Interp *interp, /* Current interpreter. */
int objc, /* Number of arguments. */
Tcl_Obj *const objv[]) /* Argument objects. */
{
register Interp *iPtr = (Interp *) interp;
const char *name;
Proc *procPtr;
CompiledLocal *localPtr;
Tcl_Obj *listObjPtr;
if (objc != 2) {
Tcl_WrongNumArgs(interp, 1, objv, "procname");
return TCL_ERROR;
}
name = TclGetString(objv[1]);
procPtr = TclFindProc(iPtr, name);
if (procPtr == NULL) {
Tcl_SetObjResult(interp, Tcl_ObjPrintf(
"\"%s\" isn't a procedure", name));
Tcl_SetErrorCode(interp, "TCL", "LOOKUP", "PROCEDURE", name, NULL);
return TCL_ERROR;
}
/*
* Build a return list containing the arguments.
*/
listObjPtr = Tcl_NewListObj(0, NULL);
for (localPtr = procPtr->firstLocalPtr; localPtr != NULL;
localPtr = localPtr->nextPtr) {
if (TclIsVarArgument(localPtr)) {
Tcl_ListObjAppendElement(interp, listObjPtr,
Tcl_NewStringObj(localPtr->name, -1));
}
}
Tcl_SetObjResult(interp, listObjPtr);
return TCL_OK;
}
/*
*----------------------------------------------------------------------
*
* InfoBodyCmd --
*
* Called to implement the "info body" command that returns the body for
* a procedure. Handles the following syntax:
*
* info body procName
*
* Results:
* Returns TCL_OK if successful and TCL_ERROR if there is an error.
*
* Side effects:
* Returns a result in the interpreter's result object. If there is an
* error, the result is an error message.
*
*----------------------------------------------------------------------
*/
static int
InfoBodyCmd(
ClientData dummy, /* Not used. */
Tcl_Interp *interp, /* Current interpreter. */
int objc, /* Number of arguments. */
Tcl_Obj *const objv[]) /* Argument objects. */
{
register Interp *iPtr = (Interp *) interp;
const char *name;
Proc *procPtr;
Tcl_Obj *bodyPtr, *resultPtr;
if (objc != 2) {
Tcl_WrongNumArgs(interp, 1, objv, "procname");
return TCL_ERROR;
}
name = TclGetString(objv[1]);
procPtr = TclFindProc(iPtr, name);
if (procPtr == NULL) {
Tcl_SetObjResult(interp, Tcl_ObjPrintf(
"\"%s\" isn't a procedure", name));
Tcl_SetErrorCode(interp, "TCL", "LOOKUP", "PROCEDURE", name, NULL);
return TCL_ERROR;
}
/*
* Here we used to return procPtr->bodyPtr, except when the body was
* bytecompiled - in that case, the return was a copy of the body's string
* rep. In order to better isolate the implementation details of the
* compiler/engine subsystem, we now always return a copy of the string
* rep. It is important to return a copy so that later manipulations of
* the object do not invalidate the internal rep.
*/
bodyPtr = procPtr->bodyPtr;
if (bodyPtr->bytes == NULL) {
/*
* The string rep might not be valid if the procedure has never been
* run before. [Bug #545644]
*/
TclGetString(bodyPtr);
}
resultPtr = Tcl_NewStringObj(bodyPtr->bytes, bodyPtr->length);
Tcl_SetObjResult(interp, resultPtr);
return TCL_OK;
}
/*
*----------------------------------------------------------------------
*
* InfoCmdCountCmd --
*
* Called to implement the "info cmdcount" command that returns the
* number of commands that have been executed. Handles the following
* syntax:
*
* info cmdcount
*
* Results:
* Returns TCL_OK if successful and TCL_ERROR if there is an error.
*
* Side effects:
* Returns a result in the interpreter's result object. If there is an
* error, the result is an error message.
*
*----------------------------------------------------------------------
*/
static int
InfoCmdCountCmd(
ClientData dummy, /* Not used. */
Tcl_Interp *interp, /* Current interpreter. */
int objc, /* Number of arguments. */
Tcl_Obj *const objv[]) /* Argument objects. */
{
Interp *iPtr = (Interp *) interp;
if (objc != 1) {
Tcl_WrongNumArgs(interp, 1, objv, NULL);
return TCL_ERROR;
}
Tcl_SetObjResult(interp, Tcl_NewIntObj(iPtr->cmdCount));
return TCL_OK;
}
/*
*----------------------------------------------------------------------
*
* InfoCommandsCmd --
*
* Called to implement the "info commands" command that returns the list
* of commands in the interpreter that match an optional pattern. The
* pattern, if any, consists of an optional sequence of namespace names
* separated by "::" qualifiers, which is followed by a glob-style
* pattern that restricts which commands are returned. Handles the
* following syntax:
*
* info commands ?pattern?
*
* Results:
* Returns TCL_OK if successful and TCL_ERROR if there is an error.
*
* Side effects:
* Returns a result in the interpreter's result object. If there is an
* error, the result is an error message.
*
*----------------------------------------------------------------------
*/
static int
InfoCommandsCmd(
ClientData dummy, /* Not used. */
Tcl_Interp *interp, /* Current interpreter. */
int objc, /* Number of arguments. */
Tcl_Obj *const objv[]) /* Argument objects. */
{
const char *cmdName, *pattern;
const char *simplePattern;
register Tcl_HashEntry *entryPtr;
Tcl_HashSearch search;
Namespace *nsPtr;
Namespace *globalNsPtr = (Namespace *) Tcl_GetGlobalNamespace(interp);
Namespace *currNsPtr = (Namespace *) Tcl_GetCurrentNamespace(interp);
Tcl_Obj *listPtr, *elemObjPtr;
int specificNsInPattern = 0;/* Init. to avoid compiler warning. */
Tcl_Command cmd;
int i;
/*
* Get the pattern and find the "effective namespace" in which to list
* commands.
*/
if (objc == 1) {
simplePattern = NULL;
nsPtr = currNsPtr;
specificNsInPattern = 0;
} else if (objc == 2) {
/*
* From the pattern, get the effective namespace and the simple
* pattern (no namespace qualifiers or ::'s) at the end. If an error
* was found while parsing the pattern, return it. Otherwise, if the
* namespace wasn't found, just leave nsPtr NULL: we will return an
* empty list since no commands there can be found.
*/
Namespace *dummy1NsPtr, *dummy2NsPtr;
pattern = TclGetString(objv[1]);
TclGetNamespaceForQualName(interp, pattern, NULL, 0, &nsPtr,
&dummy1NsPtr, &dummy2NsPtr, &simplePattern);
if (nsPtr != NULL) { /* We successfully found the pattern's ns. */
specificNsInPattern = (strcmp(simplePattern, pattern) != 0);
}
} else {
Tcl_WrongNumArgs(interp, 1, objv, "?pattern?");
return TCL_ERROR;
}
/*
* Exit as quickly as possible if we couldn't find the namespace.
*/
if (nsPtr == NULL) {
return TCL_OK;
}
/*
* Scan through the effective namespace's command table and create a list
* with all commands that match the pattern. If a specific namespace was
* requested in the pattern, qualify the command names with the namespace
* name.
*/
listPtr = Tcl_NewListObj(0, NULL);
if (simplePattern != NULL && TclMatchIsTrivial(simplePattern)) {
/*
* Special case for when the pattern doesn't include any of glob's
* special characters. This lets us avoid scans of any hash tables.
*/
entryPtr = Tcl_FindHashEntry(&nsPtr->cmdTable, simplePattern);
if (entryPtr != NULL) {
if (specificNsInPattern) {
cmd = Tcl_GetHashValue(entryPtr);
elemObjPtr = Tcl_NewObj();
Tcl_GetCommandFullName(interp, cmd, elemObjPtr);
} else {
cmdName = Tcl_GetHashKey(&nsPtr->cmdTable, entryPtr);
elemObjPtr = Tcl_NewStringObj(cmdName, -1);
}
Tcl_ListObjAppendElement(interp, listPtr, elemObjPtr);
Tcl_SetObjResult(interp, listPtr);
return TCL_OK;
}
if ((nsPtr != globalNsPtr) && !specificNsInPattern) {
Tcl_HashTable *tablePtr = NULL; /* Quell warning. */
for (i=0 ; i<nsPtr->commandPathLength ; i++) {
Namespace *pathNsPtr = nsPtr->commandPathArray[i].nsPtr;
if (pathNsPtr == NULL) {
continue;
}
tablePtr = &pathNsPtr->cmdTable;
entryPtr = Tcl_FindHashEntry(tablePtr, simplePattern);
if (entryPtr != NULL) {
break;
}
}
if (entryPtr == NULL) {
tablePtr = &globalNsPtr->cmdTable;
entryPtr = Tcl_FindHashEntry(tablePtr, simplePattern);
}
if (entryPtr != NULL) {
cmdName = Tcl_GetHashKey(tablePtr, entryPtr);
Tcl_ListObjAppendElement(interp, listPtr,
Tcl_NewStringObj(cmdName, -1));
Tcl_SetObjResult(interp, listPtr);
return TCL_OK;
}
}
} else if (nsPtr->commandPathLength == 0 || specificNsInPattern) {
/*
* The pattern is non-trivial, but either there is no explicit path or
* there is an explicit namespace in the pattern. In both cases, the
* old matching scheme is perfect.
*/
entryPtr = Tcl_FirstHashEntry(&nsPtr->cmdTable, &search);
while (entryPtr != NULL) {
cmdName = Tcl_GetHashKey(&nsPtr->cmdTable, entryPtr);
if ((simplePattern == NULL)
|| Tcl_StringMatch(cmdName, simplePattern)) {
if (specificNsInPattern) {
cmd = Tcl_GetHashValue(entryPtr);
elemObjPtr = Tcl_NewObj();
Tcl_GetCommandFullName(interp, cmd, elemObjPtr);
} else {
elemObjPtr = Tcl_NewStringObj(cmdName, -1);
}
Tcl_ListObjAppendElement(interp, listPtr, elemObjPtr);
}
entryPtr = Tcl_NextHashEntry(&search);
}
/*
* If the effective namespace isn't the global :: namespace, and a
* specific namespace wasn't requested in the pattern, then add in all
* global :: commands that match the simple pattern. Of course, we add
* in only those commands that aren't hidden by a command in the
* effective namespace.
*/
if ((nsPtr != globalNsPtr) && !specificNsInPattern) {
entryPtr = Tcl_FirstHashEntry(&globalNsPtr->cmdTable, &search);
while (entryPtr != NULL) {
cmdName = Tcl_GetHashKey(&globalNsPtr->cmdTable, entryPtr);
if ((simplePattern == NULL)
|| Tcl_StringMatch(cmdName, simplePattern)) {
if (Tcl_FindHashEntry(&nsPtr->cmdTable,cmdName) == NULL) {
Tcl_ListObjAppendElement(interp, listPtr,
Tcl_NewStringObj(cmdName, -1));
}
}
entryPtr = Tcl_NextHashEntry(&search);
}
}
} else {
/*
* The pattern is non-trivial (can match more than one command name),
* there is an explicit path, and there is no explicit namespace in
* the pattern. This means that we have to traverse the path to
* discover all the commands defined.
*/
Tcl_HashTable addedCommandsTable;
int isNew;
int foundGlobal = (nsPtr == globalNsPtr);
/*
* We keep a hash of the objects already added to the result list.
*/
Tcl_InitObjHashTable(&addedCommandsTable);
entryPtr = Tcl_FirstHashEntry(&nsPtr->cmdTable, &search);
while (entryPtr != NULL) {
cmdName = Tcl_GetHashKey(&nsPtr->cmdTable, entryPtr);
if ((simplePattern == NULL)
|| Tcl_StringMatch(cmdName, simplePattern)) {
elemObjPtr = Tcl_NewStringObj(cmdName, -1);
Tcl_ListObjAppendElement(interp, listPtr, elemObjPtr);
(void) Tcl_CreateHashEntry(&addedCommandsTable,
elemObjPtr, &isNew);
}
entryPtr = Tcl_NextHashEntry(&search);
}
/*
* Search the path next.
*/
for (i=0 ; i<nsPtr->commandPathLength ; i++) {
Namespace *pathNsPtr = nsPtr->commandPathArray[i].nsPtr;
if (pathNsPtr == NULL) {
continue;
}
if (pathNsPtr == globalNsPtr) {
foundGlobal = 1;
}
entryPtr = Tcl_FirstHashEntry(&pathNsPtr->cmdTable, &search);
while (entryPtr != NULL) {
cmdName = Tcl_GetHashKey(&pathNsPtr->cmdTable, entryPtr);
if ((simplePattern == NULL)
|| Tcl_StringMatch(cmdName, simplePattern)) {
elemObjPtr = Tcl_NewStringObj(cmdName, -1);
(void) Tcl_CreateHashEntry(&addedCommandsTable,
elemObjPtr, &isNew);
if (isNew) {
Tcl_ListObjAppendElement(interp, listPtr, elemObjPtr);
} else {
TclDecrRefCount(elemObjPtr);
}
}
entryPtr = Tcl_NextHashEntry(&search);
}
}
/*
* If the effective namespace isn't the global :: namespace, and a
* specific namespace wasn't requested in the pattern, then add in all
* global :: commands that match the simple pattern. Of course, we add
* in only those commands that aren't hidden by a command in the
* effective namespace.
*/
if (!foundGlobal) {
entryPtr = Tcl_FirstHashEntry(&globalNsPtr->cmdTable, &search);
while (entryPtr != NULL) {
cmdName = Tcl_GetHashKey(&globalNsPtr->cmdTable, entryPtr);
if ((simplePattern == NULL)
|| Tcl_StringMatch(cmdName, simplePattern)) {
elemObjPtr = Tcl_NewStringObj(cmdName, -1);
if (Tcl_FindHashEntry(&addedCommandsTable,
(char *) elemObjPtr) == NULL) {
Tcl_ListObjAppendElement(interp, listPtr, elemObjPtr);
} else {
TclDecrRefCount(elemObjPtr);
}
}
entryPtr = Tcl_NextHashEntry(&search);
}
}
Tcl_DeleteHashTable(&addedCommandsTable);
}
Tcl_SetObjResult(interp, listPtr);
return TCL_OK;
}
/*
*----------------------------------------------------------------------
*
* InfoCompleteCmd --
*
* Called to implement the "info complete" command that determines
* whether a string is a complete Tcl command. Handles the following
* syntax:
*
* info complete command
*
* Results:
* Returns TCL_OK if successful and TCL_ERROR if there is an error.
*
* Side effects:
* Returns a result in the interpreter's result object. If there is an
* error, the result is an error message.
*
*----------------------------------------------------------------------
*/
static int
InfoCompleteCmd(
ClientData dummy, /* Not used. */
Tcl_Interp *interp, /* Current interpreter. */
int objc, /* Number of arguments. */
Tcl_Obj *const objv[]) /* Argument objects. */
{
if (objc != 2) {
Tcl_WrongNumArgs(interp, 1, objv, "command");
return TCL_ERROR;
}
Tcl_SetObjResult(interp, Tcl_NewBooleanObj(
TclObjCommandComplete(objv[1])));
return TCL_OK;
}
/*
*----------------------------------------------------------------------
*
* InfoDefaultCmd --
*
* Called to implement the "info default" command that returns the
* default value for a procedure argument. Handles the following syntax:
*
* info default procName arg varName
*
* Results:
* Returns TCL_OK if successful and TCL_ERROR if there is an error.
*
* Side effects:
* Returns a result in the interpreter's result object. If there is an
* error, the result is an error message.
*
*----------------------------------------------------------------------
*/
static int
InfoDefaultCmd(
ClientData dummy, /* Not used. */
Tcl_Interp *interp, /* Current interpreter. */
int objc, /* Number of arguments. */
Tcl_Obj *const objv[]) /* Argument objects. */
{
Interp *iPtr = (Interp *) interp;
const char *procName, *argName;
Proc *procPtr;
CompiledLocal *localPtr;
Tcl_Obj *valueObjPtr;
if (objc != 4) {
Tcl_WrongNumArgs(interp, 1, objv, "procname arg varname");
return TCL_ERROR;
}
procName = TclGetString(objv[1]);
argName = TclGetString(objv[2]);
procPtr = TclFindProc(iPtr, procName);
if (procPtr == NULL) {
Tcl_SetObjResult(interp, Tcl_ObjPrintf(
"\"%s\" isn't a procedure", procName));
Tcl_SetErrorCode(interp, "TCL", "LOOKUP", "PROCEDURE", procName,
NULL);
return TCL_ERROR;
}
for (localPtr = procPtr->firstLocalPtr; localPtr != NULL;
localPtr = localPtr->nextPtr) {
if (TclIsVarArgument(localPtr)
&& (strcmp(argName, localPtr->name) == 0)) {
if (localPtr->defValuePtr != NULL) {
valueObjPtr = Tcl_ObjSetVar2(interp, objv[3], NULL,
localPtr->defValuePtr, TCL_LEAVE_ERR_MSG);
if (valueObjPtr == NULL) {
return TCL_ERROR;
}
Tcl_SetObjResult(interp, Tcl_NewIntObj(1));
} else {
Tcl_Obj *nullObjPtr = Tcl_NewObj();
valueObjPtr = Tcl_ObjSetVar2(interp, objv[3], NULL,
nullObjPtr, TCL_LEAVE_ERR_MSG);
if (valueObjPtr == NULL) {
return TCL_ERROR;
}
Tcl_SetObjResult(interp, Tcl_NewIntObj(0));
}
return TCL_OK;
}
}
Tcl_SetObjResult(interp, Tcl_ObjPrintf(
"procedure \"%s\" doesn't have an argument \"%s\"",
procName, argName));
Tcl_SetErrorCode(interp, "TCL", "LOOKUP", "ARGUMENT", argName, NULL);
return TCL_ERROR;
}
/*
*----------------------------------------------------------------------
*
* InfoErrorStackCmd --
*
* Called to implement the "info errorstack" command that returns information
* about the last error's call stack. Handles the following syntax:
*
* info errorstack ?interp?
*
* Results:
* Returns TCL_OK if successful and TCL_ERROR if there is an error.
*
* Side effects:
* Returns a result in the interpreter's result object. If there is an
* error, the result is an error message.
*
*----------------------------------------------------------------------
*/
static int
InfoErrorStackCmd(
ClientData dummy, /* Not used. */
Tcl_Interp *interp, /* Current interpreter. */
int objc, /* Number of arguments. */
Tcl_Obj *const objv[]) /* Argument objects. */
{
Tcl_Interp *target;
Interp *iPtr;
if ((objc != 1) && (objc != 2)) {
Tcl_WrongNumArgs(interp, 1, objv, "?interp?");
return TCL_ERROR;
}
target = interp;
if (objc == 2) {
target = Tcl_GetSlave(interp, Tcl_GetString(objv[1]));
if (target == NULL) {
return TCL_ERROR;
}
}
iPtr = (Interp *) target;
Tcl_SetObjResult(interp, iPtr->errorStack);
return TCL_OK;
}
/*
*----------------------------------------------------------------------
*
* TclInfoExistsCmd --
*
* Called to implement the "info exists" command that determines whether
* a variable exists. Handles the following syntax:
*
* info exists varName
*
* Results:
* Returns TCL_OK if successful and TCL_ERROR if there is an error.
*
* Side effects:
* Returns a result in the interpreter's result object. If there is an
* error, the result is an error message.
*
*----------------------------------------------------------------------
*/
int
TclInfoExistsCmd(
ClientData dummy, /* Not used. */
Tcl_Interp *interp, /* Current interpreter. */
int objc, /* Number of arguments. */
Tcl_Obj *const objv[]) /* Argument objects. */
{
const char *varName;
Var *varPtr;
if (objc != 2) {
Tcl_WrongNumArgs(interp, 1, objv, "varName");
return TCL_ERROR;
}
varName = TclGetString(objv[1]);
varPtr = TclVarTraceExists(interp, varName);
Tcl_SetObjResult(interp,
Tcl_NewBooleanObj(varPtr && varPtr->value.objPtr));
return TCL_OK;
}
/*
*----------------------------------------------------------------------
*
* InfoFrameCmd --
* TIP #280
*
* Called to implement the "info frame" command that returns the location
* of either the currently executing command, or its caller. Handles the
* following syntax:
*
* info frame ?number?
*
* Results:
* Returns TCL_OK if successful and TCL_ERROR if there is an error.
*
* Side effects:
* Returns a result in the interpreter's result object. If there is an
* error, the result is an error message.
*
*----------------------------------------------------------------------
*/
static int
InfoFrameCmd(
ClientData dummy, /* Not used. */
Tcl_Interp *interp, /* Current interpreter. */
int objc, /* Number of arguments. */
Tcl_Obj *const objv[]) /* Argument objects. */
{
Interp *iPtr = (Interp *) interp;
int level, code = TCL_OK;
CmdFrame *framePtr, **cmdFramePtrPtr = &iPtr->cmdFramePtr;
CoroutineData *corPtr = iPtr->execEnvPtr->corPtr;
int topLevel = 0;
if (objc > 2) {
Tcl_WrongNumArgs(interp, 1, objv, "?number?");
return TCL_ERROR;
}
while (corPtr) {
while (*cmdFramePtrPtr) {
topLevel++;
cmdFramePtrPtr = &((*cmdFramePtrPtr)->nextPtr);
}
if (corPtr->caller.cmdFramePtr) {
*cmdFramePtrPtr = corPtr->caller.cmdFramePtr;
}
corPtr = corPtr->callerEEPtr->corPtr;
}
topLevel += (*cmdFramePtrPtr)->level;
if (topLevel != iPtr->cmdFramePtr->level) {
framePtr = iPtr->cmdFramePtr;
while (framePtr) {
framePtr->level = topLevel--;
framePtr = framePtr->nextPtr;
}
if (topLevel) {
Tcl_Panic("Broken frame level calculation");
}
topLevel = iPtr->cmdFramePtr->level;
}
if (objc == 1) {
/*
* Just "info frame".
*/
Tcl_SetObjResult(interp, Tcl_NewIntObj(topLevel));
goto done;
}
/*
* We've got "info frame level" and must parse the level first.
*/
if (TclGetIntFromObj(interp, objv[1], &level) != TCL_OK) {
code = TCL_ERROR;
goto done;
}
if ((level > topLevel) || (level <= - topLevel)) {
levelError:
Tcl_SetObjResult(interp, Tcl_ObjPrintf(
"bad level \"%s\"", TclGetString(objv[1])));
Tcl_SetErrorCode(interp, "TCL", "LOOKUP", "STACK_FRAME",
TclGetString(objv[1]), NULL);
code = TCL_ERROR;
goto done;
}
/*
* Let us convert to relative so that we know how many levels to go back
*/
if (level > 0) {
level -= topLevel;
}
framePtr = iPtr->cmdFramePtr;
while (++level <= 0) {
framePtr = framePtr->nextPtr;
if (!framePtr) {
goto levelError;
}
}
Tcl_SetObjResult(interp, TclInfoFrame(interp, framePtr));
done:
cmdFramePtrPtr = &iPtr->cmdFramePtr;
corPtr = iPtr->execEnvPtr->corPtr;
while (corPtr) {
CmdFrame *endPtr = corPtr->caller.cmdFramePtr;
if (endPtr) {
if (*cmdFramePtrPtr == endPtr) {
*cmdFramePtrPtr = NULL;
} else {
CmdFrame *runPtr = *cmdFramePtrPtr;
while (runPtr->nextPtr != endPtr) {
runPtr->level -= endPtr->level;
runPtr = runPtr->nextPtr;
}
runPtr->level = 1;
runPtr->nextPtr = NULL;
}
cmdFramePtrPtr = &corPtr->caller.cmdFramePtr;
}
corPtr = corPtr->callerEEPtr->corPtr;
}
return code;
}
/*
*----------------------------------------------------------------------
*
* TclInfoFrame --
*
* Core of InfoFrameCmd, returns TIP280 dict for a given frame.
*
* Results:
* Returns TIP280 dict.
*
* Side effects:
* None.
*
*----------------------------------------------------------------------
*/
Tcl_Obj *
TclInfoFrame(
Tcl_Interp *interp, /* Current interpreter. */
CmdFrame *framePtr) /* Frame to get info for. */
{
Interp *iPtr = (Interp *) interp;
Tcl_Obj *tmpObj;
Tcl_Obj *lv[20]; /* Keep uptodate when more keys are added to
* the dict. */
int lc = 0;
/*
* This array is indexed by the TCL_LOCATION_... values, except
* for _LAST.
*/
static const char *const typeString[TCL_LOCATION_LAST] = {
"eval", "eval", "eval", "precompiled", "source", "proc"
};
Proc *procPtr = framePtr->framePtr ? framePtr->framePtr->procPtr : NULL;
int needsFree = -1;
/*
* Pull the information and construct the dictionary to return, as list.
* Regarding use of the CmdFrame fields see tclInt.h, and its definition.
*/
#define ADD_PAIR(name, value) \
TclNewLiteralStringObj(tmpObj, name); \
lv[lc++] = tmpObj; \
lv[lc++] = (value)
switch (framePtr->type) {
case TCL_LOCATION_EVAL:
/*
* Evaluation, dynamic script. Type, line, cmd, the latter through
* str.
*/
ADD_PAIR("type", Tcl_NewStringObj(typeString[framePtr->type], -1));
if (framePtr->line) {
ADD_PAIR("line", Tcl_NewIntObj(framePtr->line[0]));
} else {
ADD_PAIR("line", Tcl_NewIntObj(1));
}
ADD_PAIR("cmd", TclGetSourceFromFrame(framePtr, 0, NULL));
break;
case TCL_LOCATION_PREBC:
/*
* Precompiled. Result contains the type as signal, nothing else.
*/
ADD_PAIR("type", Tcl_NewStringObj(typeString[framePtr->type], -1));
break;
case TCL_LOCATION_BC: {
/*
* Execution of bytecode. Talk to the BC engine to fill out the frame.
*/
CmdFrame *fPtr = TclStackAlloc(interp, sizeof(CmdFrame));
*fPtr = *framePtr;
/*
* Note:
* Type BC => f.data.eval.path is not used.
* f.data.tebc.codePtr is used instead.
*/
TclGetSrcInfoForPc(fPtr);
/*
* Now filled: cmd.str.(cmd,len), line
* Possibly modified: type, path!
*/
ADD_PAIR("type", Tcl_NewStringObj(typeString[fPtr->type], -1));
if (fPtr->line) {
ADD_PAIR("line", Tcl_NewIntObj(fPtr->line[0]));
}
if (fPtr->type == TCL_LOCATION_SOURCE) {
ADD_PAIR("file", fPtr->data.eval.path);
/*
* Death of reference by TclGetSrcInfoForPc.
*/
Tcl_DecrRefCount(fPtr->data.eval.path);
}
ADD_PAIR("cmd", TclGetSourceFromFrame(fPtr, 0, NULL));
if (fPtr->cmdObj && framePtr->cmdObj == NULL) {
needsFree = lc - 1;
}
TclStackFree(interp, fPtr);
break;
}
case TCL_LOCATION_SOURCE:
/*
* Evaluation of a script file.
*/
ADD_PAIR("type", Tcl_NewStringObj(typeString[framePtr->type], -1));
ADD_PAIR("line", Tcl_NewIntObj(framePtr->line[0]));
ADD_PAIR("file", framePtr->data.eval.path);
/*
* Refcount framePtr->data.eval.path goes up when lv is converted into
* the result list object.
*/
ADD_PAIR("cmd", TclGetSourceFromFrame(framePtr, 0, NULL));
break;
case TCL_LOCATION_PROC:
Tcl_Panic("TCL_LOCATION_PROC found in standard frame");
break;
}
/*
* 'proc'. Common to all frame types. Conditional on having an associated
* Procedure CallFrame.
*/
if (procPtr != NULL) {
Tcl_HashEntry *namePtr = procPtr->cmdPtr->hPtr;
if (namePtr) {
Tcl_Obj *procNameObj;
/*
* This is a regular command.
*/
TclNewObj(procNameObj);
Tcl_GetCommandFullName(interp, (Tcl_Command) procPtr->cmdPtr,
procNameObj);
ADD_PAIR("proc", procNameObj);
} else if (procPtr->cmdPtr->clientData) {
ExtraFrameInfo *efiPtr = procPtr->cmdPtr->clientData;
int i;
/*
* This is a non-standard command. Luckily, it's told us how to
* render extra information about its frame.
*/
for (i=0 ; i<efiPtr->length ; i++) {
lv[lc++] = Tcl_NewStringObj(efiPtr->fields[i].name, -1);
if (efiPtr->fields[i].proc) {
lv[lc++] =
efiPtr->fields[i].proc(efiPtr->fields[i].clientData);
} else {
lv[lc++] = efiPtr->fields[i].clientData;
}
}
}
}
/*
* 'level'. Common to all frame types. Conditional on having an associated
* _visible_ CallFrame.
*/
if ((framePtr->framePtr != NULL) && (iPtr->varFramePtr != NULL)) {
CallFrame *current = framePtr->framePtr;
CallFrame *top = iPtr->varFramePtr;
CallFrame *idx;
for (idx=top ; idx!=NULL ; idx=idx->callerVarPtr) {
if (idx == current) {
int c = framePtr->framePtr->level;
int t = iPtr->varFramePtr->level;
ADD_PAIR("level", Tcl_NewIntObj(t - c));
break;
}
}
}
tmpObj = Tcl_NewListObj(lc, lv);
if (needsFree >= 0) {
Tcl_DecrRefCount(lv[needsFree]);
}
return tmpObj;
}
/*
*----------------------------------------------------------------------
*
* InfoFunctionsCmd --
*
* Called to implement the "info functions" command that returns the list
* of math functions matching an optional pattern. Handles the following
* syntax:
*
* info functions ?pattern?
*
* Results:
* Returns TCL_OK if successful and TCL_ERROR if there is an error.
*
* Side effects:
* Returns a result in the interpreter's result object. If there is an
* error, the result is an error message.
*
*----------------------------------------------------------------------
*/
static int
InfoFunctionsCmd(
ClientData dummy, /* Not used. */
Tcl_Interp *interp, /* Current interpreter. */
int objc, /* Number of arguments. */
Tcl_Obj *const objv[]) /* Argument objects. */
{
Tcl_Obj *script;
int code;
if (objc > 2) {
Tcl_WrongNumArgs(interp, 1, objv, "?pattern?");
return TCL_ERROR;
}
script = Tcl_NewStringObj(
" ::apply [::list {{pattern *}} {\n"
" ::set cmds {}\n"
" ::foreach cmd [::info commands ::tcl::mathfunc::$pattern] {\n"
" ::lappend cmds [::namespace tail $cmd]\n"
" }\n"
" ::foreach cmd [::info commands tcl::mathfunc::$pattern] {\n"
" ::set cmd [::namespace tail $cmd]\n"
" ::if {$cmd ni $cmds} {\n"
" ::lappend cmds $cmd\n"
" }\n"
" }\n"
" ::return $cmds\n"
" } [::namespace current]] ", -1);
if (objc == 2) {
Tcl_Obj *arg = Tcl_NewListObj(1, &(objv[1]));
Tcl_AppendObjToObj(script, arg);
Tcl_DecrRefCount(arg);
}
Tcl_IncrRefCount(script);
code = Tcl_EvalObjEx(interp, script, 0);
Tcl_DecrRefCount(script);
return code;
}
/*
*----------------------------------------------------------------------
*
* InfoHostnameCmd --
*
* Called to implement the "info hostname" command that returns the host
* name. Handles the following syntax:
*
* info hostname
*
* Results:
* Returns TCL_OK if successful and TCL_ERROR if there is an error.
*
* Side effects:
* Returns a result in the interpreter's result object. If there is an
* error, the result is an error message.
*
*----------------------------------------------------------------------
*/
static int
InfoHostnameCmd(
ClientData dummy, /* Not used. */
Tcl_Interp *interp, /* Current interpreter. */
int objc, /* Number of arguments. */
Tcl_Obj *const objv[]) /* Argument objects. */
{
const char *name;
if (objc != 1) {
Tcl_WrongNumArgs(interp, 1, objv, NULL);
return TCL_ERROR;
}
name = Tcl_GetHostName();
if (name) {
Tcl_SetObjResult(interp, Tcl_NewStringObj(name, -1));
return TCL_OK;
}
Tcl_SetObjResult(interp, Tcl_NewStringObj(
"unable to determine name of host", -1));
Tcl_SetErrorCode(interp, "TCL", "OPERATION", "HOSTNAME", "UNKNOWN", NULL);
return TCL_ERROR;
}
/*
*----------------------------------------------------------------------
*
* InfoLevelCmd --
*
* Called to implement the "info level" command that returns information
* about the call stack. Handles the following syntax:
*
* info level ?number?
*
* Results:
* Returns TCL_OK if successful and TCL_ERROR if there is an error.
*
* Side effects:
* Returns a result in the interpreter's result object. If there is an
* error, the result is an error message.
*
*----------------------------------------------------------------------
*/
static int
InfoLevelCmd(
ClientData dummy, /* Not used. */
Tcl_Interp *interp, /* Current interpreter. */
int objc, /* Number of arguments. */
Tcl_Obj *const objv[]) /* Argument objects. */
{
Interp *iPtr = (Interp *) interp;
if (objc == 1) { /* Just "info level" */
Tcl_SetObjResult(interp, Tcl_NewIntObj(iPtr->varFramePtr->level));
return TCL_OK;
}
if (objc == 2) {
int level;
CallFrame *framePtr, *rootFramePtr = iPtr->rootFramePtr;
if (TclGetIntFromObj(interp, objv[1], &level) != TCL_OK) {
return TCL_ERROR;
}
if (level <= 0) {
if (iPtr->varFramePtr == rootFramePtr) {
goto levelError;
}
level += iPtr->varFramePtr->level;
}
for (framePtr=iPtr->varFramePtr ; framePtr!=rootFramePtr;
framePtr=framePtr->callerVarPtr) {
if (framePtr->level == level) {
break;
}
}
if (framePtr == rootFramePtr) {
goto levelError;
}
Tcl_SetObjResult(interp,
Tcl_NewListObj(framePtr->objc, framePtr->objv));
return TCL_OK;
}
Tcl_WrongNumArgs(interp, 1, objv, "?number?");
return TCL_ERROR;
levelError:
Tcl_SetObjResult(interp, Tcl_ObjPrintf(
"bad level \"%s\"", TclGetString(objv[1])));
Tcl_SetErrorCode(interp, "TCL", "LOOKUP", "STACK_LEVEL",
TclGetString(objv[1]), NULL);
return TCL_ERROR;
}
/*
*----------------------------------------------------------------------
*
* InfoLibraryCmd --
*
* Called to implement the "info library" command that returns the
* library directory for the Tcl installation. Handles the following
* syntax:
*
* info library
*
* Results:
* Returns TCL_OK if successful and TCL_ERROR if there is an error.
*
* Side effects:
* Returns a result in the interpreter's result object. If there is an
* error, the result is an error message.
*
*----------------------------------------------------------------------
*/
static int
InfoLibraryCmd(
ClientData dummy, /* Not used. */
Tcl_Interp *interp, /* Current interpreter. */
int objc, /* Number of arguments. */
Tcl_Obj *const objv[]) /* Argument objects. */
{
const char *libDirName;
if (objc != 1) {
Tcl_WrongNumArgs(interp, 1, objv, NULL);
return TCL_ERROR;
}
libDirName = Tcl_GetVar(interp, "tcl_library", TCL_GLOBAL_ONLY);
if (libDirName != NULL) {
Tcl_SetObjResult(interp, Tcl_NewStringObj(libDirName, -1));
return TCL_OK;
}
Tcl_SetObjResult(interp, Tcl_NewStringObj(
"no library has been specified for Tcl", -1));
Tcl_SetErrorCode(interp, "TCL", "LOOKUP", "VARIABLE", "tcl_library",NULL);
return TCL_ERROR;
}
/*
*----------------------------------------------------------------------
*
* InfoLoadedCmd --
*
* Called to implement the "info loaded" command that returns the
* packages that have been loaded into an interpreter. Handles the
* following syntax:
*
* info loaded ?interp?
*
* Results:
* Returns TCL_OK if successful and TCL_ERROR if there is an error.
*
* Side effects:
* Returns a result in the interpreter's result object. If there is an
* error, the result is an error message.
*
*----------------------------------------------------------------------
*/
static int
InfoLoadedCmd(
ClientData dummy, /* Not used. */
Tcl_Interp *interp, /* Current interpreter. */
int objc, /* Number of arguments. */
Tcl_Obj *const objv[]) /* Argument objects. */
{
const char *interpName;
if ((objc != 1) && (objc != 2)) {
Tcl_WrongNumArgs(interp, 1, objv, "?interp?");
return TCL_ERROR;
}
if (objc == 1) { /* Get loaded pkgs in all interpreters. */
interpName = NULL;
} else { /* Get pkgs just in specified interp. */
interpName = TclGetString(objv[1]);
}
return TclGetLoadedPackages(interp, interpName);
}
/*
*----------------------------------------------------------------------
*
* InfoNameOfExecutableCmd --
*
* Called to implement the "info nameofexecutable" command that returns
* the name of the binary file running this application. Handles the
* following syntax:
*
* info nameofexecutable
*
* Results:
* Returns TCL_OK if successful and TCL_ERROR if there is an error.
*
* Side effects:
* Returns a result in the interpreter's result object. If there is an
* error, the result is an error message.
*
*----------------------------------------------------------------------
*/
static int
InfoNameOfExecutableCmd(
ClientData dummy, /* Not used. */
Tcl_Interp *interp, /* Current interpreter. */
int objc, /* Number of arguments. */
Tcl_Obj *const objv[]) /* Argument objects. */
{
if (objc != 1) {
Tcl_WrongNumArgs(interp, 1, objv, NULL);
return TCL_ERROR;
}
Tcl_SetObjResult(interp, TclGetObjNameOfExecutable());
return TCL_OK;
}
/*
*----------------------------------------------------------------------
*
* InfoPatchLevelCmd --
*
* Called to implement the "info patchlevel" command that returns the
* default value for an argument to a procedure. Handles the following
* syntax:
*
* info patchlevel
*
* Results:
* Returns TCL_OK if successful and TCL_ERROR if there is an error.
*
* Side effects:
* Returns a result in the interpreter's result object. If there is an
* error, the result is an error message.
*
*----------------------------------------------------------------------
*/
static int
InfoPatchLevelCmd(
ClientData dummy, /* Not used. */
Tcl_Interp *interp, /* Current interpreter. */
int objc, /* Number of arguments. */
Tcl_Obj *const objv[]) /* Argument objects. */
{
const char *patchlevel;
if (objc != 1) {
Tcl_WrongNumArgs(interp, 1, objv, NULL);
return TCL_ERROR;
}
patchlevel = Tcl_GetVar(interp, "tcl_patchLevel",
(TCL_GLOBAL_ONLY | TCL_LEAVE_ERR_MSG));
if (patchlevel != NULL) {
Tcl_SetObjResult(interp, Tcl_NewStringObj(patchlevel, -1));
return TCL_OK;
}
return TCL_ERROR;
}
/*
*----------------------------------------------------------------------
*
* InfoProcsCmd --
*
* Called to implement the "info procs" command that returns the list of
* procedures in the interpreter that match an optional pattern. The
* pattern, if any, consists of an optional sequence of namespace names
* separated by "::" qualifiers, which is followed by a glob-style
* pattern that restricts which commands are returned. Handles the
* following syntax:
*
* info procs ?pattern?
*
* Results:
* Returns TCL_OK if successful and TCL_ERROR if there is an error.
*
* Side effects:
* Returns a result in the interpreter's result object. If there is an
* error, the result is an error message.
*
*----------------------------------------------------------------------
*/
static int
InfoProcsCmd(
ClientData dummy, /* Not used. */
Tcl_Interp *interp, /* Current interpreter. */
int objc, /* Number of arguments. */
Tcl_Obj *const objv[]) /* Argument objects. */
{
const char *cmdName, *pattern;
const char *simplePattern;
Namespace *nsPtr;
#ifdef INFO_PROCS_SEARCH_GLOBAL_NS
Namespace *globalNsPtr = (Namespace *) Tcl_GetGlobalNamespace(interp);
#endif
Namespace *currNsPtr = (Namespace *) Tcl_GetCurrentNamespace(interp);
Tcl_Obj *listPtr, *elemObjPtr;
int specificNsInPattern = 0;/* Init. to avoid compiler warning. */
register Tcl_HashEntry *entryPtr;
Tcl_HashSearch search;
Command *cmdPtr, *realCmdPtr;
/*
* Get the pattern and find the "effective namespace" in which to list
* procs.
*/
if (objc == 1) {
simplePattern = NULL;
nsPtr = currNsPtr;
specificNsInPattern = 0;
} else if (objc == 2) {
/*
* From the pattern, get the effective namespace and the simple
* pattern (no namespace qualifiers or ::'s) at the end. If an error
* was found while parsing the pattern, return it. Otherwise, if the
* namespace wasn't found, just leave nsPtr NULL: we will return an
* empty list since no commands there can be found.
*/
Namespace *dummy1NsPtr, *dummy2NsPtr;
pattern = TclGetString(objv[1]);
TclGetNamespaceForQualName(interp, pattern, NULL, /*flags*/ 0, &nsPtr,
&dummy1NsPtr, &dummy2NsPtr, &simplePattern);
if (nsPtr != NULL) { /* We successfully found the pattern's ns. */
specificNsInPattern = (strcmp(simplePattern, pattern) != 0);
}
} else {
Tcl_WrongNumArgs(interp, 1, objv, "?pattern?");
return TCL_ERROR;
}
if (nsPtr == NULL) {
return TCL_OK;
}
/*
* Scan through the effective namespace's command table and create a list
* with all procs that match the pattern. If a specific namespace was
* requested in the pattern, qualify the command names with the namespace
* name.
*/
listPtr = Tcl_NewListObj(0, NULL);
#ifndef INFO_PROCS_SEARCH_GLOBAL_NS
if (simplePattern != NULL && TclMatchIsTrivial(simplePattern)) {
entryPtr = Tcl_FindHashEntry(&nsPtr->cmdTable, simplePattern);
if (entryPtr != NULL) {
cmdPtr = Tcl_GetHashValue(entryPtr);
if (!TclIsProc(cmdPtr)) {
realCmdPtr = (Command *)
TclGetOriginalCommand((Tcl_Command) cmdPtr);
if (realCmdPtr != NULL && TclIsProc(realCmdPtr)) {
goto simpleProcOK;
}
} else {
simpleProcOK:
if (specificNsInPattern) {
elemObjPtr = Tcl_NewObj();
Tcl_GetCommandFullName(interp, (Tcl_Command) cmdPtr,
elemObjPtr);
} else {
elemObjPtr = Tcl_NewStringObj(simplePattern, -1);
}
Tcl_ListObjAppendElement(interp, listPtr, elemObjPtr);
}
}
} else
#endif /* !INFO_PROCS_SEARCH_GLOBAL_NS */
{
entryPtr = Tcl_FirstHashEntry(&nsPtr->cmdTable, &search);
while (entryPtr != NULL) {
cmdName = Tcl_GetHashKey(&nsPtr->cmdTable, entryPtr);
if ((simplePattern == NULL)
|| Tcl_StringMatch(cmdName, simplePattern)) {
cmdPtr = Tcl_GetHashValue(entryPtr);
if (!TclIsProc(cmdPtr)) {
realCmdPtr = (Command *)
TclGetOriginalCommand((Tcl_Command) cmdPtr);
if (realCmdPtr != NULL && TclIsProc(realCmdPtr)) {
goto procOK;
}
} else {
procOK:
if (specificNsInPattern) {
elemObjPtr = Tcl_NewObj();
Tcl_GetCommandFullName(interp, (Tcl_Command) cmdPtr,
elemObjPtr);
} else {
elemObjPtr = Tcl_NewStringObj(cmdName, -1);
}
Tcl_ListObjAppendElement(interp, listPtr, elemObjPtr);
}
}
entryPtr = Tcl_NextHashEntry(&search);
}
/*
* If the effective namespace isn't the global :: namespace, and a
* specific namespace wasn't requested in the pattern, then add in all
* global :: procs that match the simple pattern. Of course, we add in
* only those procs that aren't hidden by a proc in the effective
* namespace.
*/
#ifdef INFO_PROCS_SEARCH_GLOBAL_NS
/*
* If "info procs" worked like "info commands", returning the commands
* also seen in the global namespace, then you would include this
* code. As this could break backwards compatibilty with 8.0-8.2, we
* decided not to "fix" it in 8.3, leaving the behavior slightly
* different.
*/
if ((nsPtr != globalNsPtr) && !specificNsInPattern) {
entryPtr = Tcl_FirstHashEntry(&globalNsPtr->cmdTable, &search);
while (entryPtr != NULL) {
cmdName = Tcl_GetHashKey(&globalNsPtr->cmdTable, entryPtr);
if ((simplePattern == NULL)
|| Tcl_StringMatch(cmdName, simplePattern)) {
if (Tcl_FindHashEntry(&nsPtr->cmdTable,cmdName) == NULL) {
cmdPtr = Tcl_GetHashValue(entryPtr);
realCmdPtr = (Command *) TclGetOriginalCommand(
(Tcl_Command) cmdPtr);
if (TclIsProc(cmdPtr) || ((realCmdPtr != NULL)
&& TclIsProc(realCmdPtr))) {
Tcl_ListObjAppendElement(interp, listPtr,
Tcl_NewStringObj(cmdName, -1));
}
}
}
entryPtr = Tcl_NextHashEntry(&search);
}
}
#endif
}
Tcl_SetObjResult(interp, listPtr);
return TCL_OK;
}
/*
*----------------------------------------------------------------------
*
* InfoScriptCmd --
*
* Called to implement the "info script" command that returns the script
* file that is currently being evaluated. Handles the following syntax:
*
* info script ?newName?
*
* If newName is specified, it will set that as the internal name.
*
* Results:
* Returns TCL_OK if successful and TCL_ERROR if there is an error.
*
* Side effects:
* Returns a result in the interpreter's result object. If there is an
* error, the result is an error message. It may change the internal
* script filename.
*
*----------------------------------------------------------------------
*/
static int
InfoScriptCmd(
ClientData dummy, /* Not used. */
Tcl_Interp *interp, /* Current interpreter. */
int objc, /* Number of arguments. */
Tcl_Obj *const objv[]) /* Argument objects. */
{
Interp *iPtr = (Interp *) interp;
if ((objc != 1) && (objc != 2)) {
Tcl_WrongNumArgs(interp, 1, objv, "?filename?");
return TCL_ERROR;
}
if (objc == 2) {
if (iPtr->scriptFile != NULL) {
Tcl_DecrRefCount(iPtr->scriptFile);
}
iPtr->scriptFile = objv[1];
Tcl_IncrRefCount(iPtr->scriptFile);
}
if (iPtr->scriptFile != NULL) {
Tcl_SetObjResult(interp, iPtr->scriptFile);
}
return TCL_OK;
}
/*
*----------------------------------------------------------------------
*
* InfoSharedlibCmd --
*
* Called to implement the "info sharedlibextension" command that returns
* the file extension used for shared libraries. Handles the following
* syntax:
*
* info sharedlibextension
*
* Results:
* Returns TCL_OK if successful and TCL_ERROR if there is an error.
*
* Side effects:
* Returns a result in the interpreter's result object. If there is an
* error, the result is an error message.
*
*----------------------------------------------------------------------
*/
static int
InfoSharedlibCmd(
ClientData dummy, /* Not used. */
Tcl_Interp *interp, /* Current interpreter. */
int objc, /* Number of arguments. */
Tcl_Obj *const objv[]) /* Argument objects. */
{
if (objc != 1) {
Tcl_WrongNumArgs(interp, 1, objv, NULL);
return TCL_ERROR;
}
#ifdef TCL_SHLIB_EXT
Tcl_SetObjResult(interp, Tcl_NewStringObj(TCL_SHLIB_EXT, -1));
#endif
return TCL_OK;
}
/*
*----------------------------------------------------------------------
*
* InfoTclVersionCmd --
*
* Called to implement the "info tclversion" command that returns the
* version number for this Tcl library. Handles the following syntax:
*
* info tclversion
*
* Results:
* Returns TCL_OK if successful and TCL_ERROR if there is an error.
*
* Side effects:
* Returns a result in the interpreter's result object. If there is an
* error, the result is an error message.
*
*----------------------------------------------------------------------
*/
static int
InfoTclVersionCmd(
ClientData dummy, /* Not used. */
Tcl_Interp *interp, /* Current interpreter. */
int objc, /* Number of arguments. */
Tcl_Obj *const objv[]) /* Argument objects. */
{
Tcl_Obj *version;
if (objc != 1) {
Tcl_WrongNumArgs(interp, 1, objv, NULL);
return TCL_ERROR;
}
version = Tcl_GetVar2Ex(interp, "tcl_version", NULL,
(TCL_GLOBAL_ONLY | TCL_LEAVE_ERR_MSG));
if (version != NULL) {
Tcl_SetObjResult(interp, version);
return TCL_OK;
}
return TCL_ERROR;
}
/*
*----------------------------------------------------------------------
*
* Tcl_JoinObjCmd --
*
* This procedure is invoked to process the "join" Tcl command. See the
* user documentation for details on what it does.
*
* Results:
* A standard Tcl object result.
*
* Side effects:
* See the user documentation.
*
*----------------------------------------------------------------------
*/
int
Tcl_JoinObjCmd(
ClientData dummy, /* Not used. */
Tcl_Interp *interp, /* Current interpreter. */
int objc, /* Number of arguments. */
Tcl_Obj *const objv[]) /* The argument objects. */
{
int listLen, i;
Tcl_Obj *resObjPtr, *joinObjPtr, **elemPtrs;
if ((objc < 2) || (objc > 3)) {
Tcl_WrongNumArgs(interp, 1, objv, "list ?joinString?");
return TCL_ERROR;
}
/*
* Make sure the list argument is a list object and get its length and a
* pointer to its array of element pointers.
*/
if (TclListObjGetElements(interp, objv[1], &listLen,
&elemPtrs) != TCL_OK) {
return TCL_ERROR;
}
joinObjPtr = (objc == 2) ? Tcl_NewStringObj(" ", 1) : objv[2];
Tcl_IncrRefCount(joinObjPtr);
resObjPtr = Tcl_NewObj();
for (i = 0; i < listLen; i++) {
if (i > 0) {
Tcl_AppendObjToObj(resObjPtr, joinObjPtr);
}
Tcl_AppendObjToObj(resObjPtr, elemPtrs[i]);
}
Tcl_DecrRefCount(joinObjPtr);
Tcl_SetObjResult(interp, resObjPtr);
return TCL_OK;
}
/*
*----------------------------------------------------------------------
*
* Tcl_LassignObjCmd --
*
* This object-based procedure is invoked to process the "lassign" Tcl
* command. See the user documentation for details on what it does.
*
* Results:
* A standard Tcl object result.
*
* Side effects:
* See the user documentation.
*
*----------------------------------------------------------------------
*/
int
Tcl_LassignObjCmd(
ClientData dummy, /* Not used. */
Tcl_Interp *interp, /* Current interpreter. */
int objc, /* Number of arguments. */
Tcl_Obj *const objv[]) /* Argument objects. */
{
Tcl_Obj *listCopyPtr;
Tcl_Obj **listObjv; /* The contents of the list. */
int listObjc; /* The length of the list. */
int code = TCL_OK;
if (objc < 2) {
Tcl_WrongNumArgs(interp, 1, objv, "list ?varName ...?");
return TCL_ERROR;
}
listCopyPtr = TclListObjCopy(interp, objv[1]);
if (listCopyPtr == NULL) {
return TCL_ERROR;
}
TclListObjGetElements(NULL, listCopyPtr, &listObjc, &listObjv);
objc -= 2;
objv += 2;
while (code == TCL_OK && objc > 0 && listObjc > 0) {
if (Tcl_ObjSetVar2(interp, *objv++, NULL, *listObjv++,
TCL_LEAVE_ERR_MSG) == NULL) {
code = TCL_ERROR;
}
objc--;
listObjc--;
}
if (code == TCL_OK && objc > 0) {
Tcl_Obj *emptyObj;
TclNewObj(emptyObj);
Tcl_IncrRefCount(emptyObj);
while (code == TCL_OK && objc-- > 0) {
if (Tcl_ObjSetVar2(interp, *objv++, NULL, emptyObj,
TCL_LEAVE_ERR_MSG) == NULL) {
code = TCL_ERROR;
}
}
Tcl_DecrRefCount(emptyObj);
}
if (code == TCL_OK && listObjc > 0) {
Tcl_SetObjResult(interp, Tcl_NewListObj(listObjc, listObjv));
}
Tcl_DecrRefCount(listCopyPtr);
return code;
}
/*
*----------------------------------------------------------------------
*
* Tcl_LindexObjCmd --
*
* This object-based procedure is invoked to process the "lindex" Tcl
* command. See the user documentation for details on what it does.
*
* Results:
* A standard Tcl object result.
*
* Side effects:
* See the user documentation.
*
*----------------------------------------------------------------------
*/
int
Tcl_LindexObjCmd(
ClientData dummy, /* Not used. */
Tcl_Interp *interp, /* Current interpreter. */
int objc, /* Number of arguments. */
Tcl_Obj *const objv[]) /* Argument objects. */
{
Tcl_Obj *elemPtr; /* Pointer to the element being extracted. */
if (objc < 2) {
Tcl_WrongNumArgs(interp, 1, objv, "list ?index ...?");
return TCL_ERROR;
}
/*
* If objc==3, then objv[2] may be either a single index or a list of
* indices: go to TclLindexList to determine which. If objc>=4, or
* objc==2, then objv[2 .. objc-2] are all single indices and processed as
* such in TclLindexFlat.
*/
if (objc == 3) {
elemPtr = TclLindexList(interp, objv[1], objv[2]);
} else {
elemPtr = TclLindexFlat(interp, objv[1], objc-2, objv+2);
}
/*
* Set the interpreter's object result to the last element extracted.
*/
if (elemPtr == NULL) {
return TCL_ERROR;
}
Tcl_SetObjResult(interp, elemPtr);
Tcl_DecrRefCount(elemPtr);
return TCL_OK;
}
/*
*----------------------------------------------------------------------
*
* Tcl_LinsertObjCmd --
*
* This object-based procedure is invoked to process the "linsert" Tcl
* command. See the user documentation for details on what it does.
*
* Results:
* A new Tcl list object formed by inserting zero or more elements into a
* list.
*
* Side effects:
* See the user documentation.
*
*----------------------------------------------------------------------
*/
int
Tcl_LinsertObjCmd(
ClientData dummy, /* Not used. */
Tcl_Interp *interp, /* Current interpreter. */
register int objc, /* Number of arguments. */
Tcl_Obj *const objv[]) /* Argument objects. */
{
Tcl_Obj *listPtr;
int index, len, result;
if (objc < 3) {
Tcl_WrongNumArgs(interp, 1, objv, "list index ?element ...?");
return TCL_ERROR;
}
result = TclListObjLength(interp, objv[1], &len);
if (result != TCL_OK) {
return result;
}
/*
* Get the index. "end" is interpreted to be the index after the last
* element, such that using it will cause any inserted elements to be
* appended to the list.
*/
result = TclGetIntForIndexM(interp, objv[2], /*end*/ len, &index);
if (result != TCL_OK) {
return result;
}
if (index > len) {
index = len;
}
/*
* If the list object is unshared we can modify it directly. Otherwise we
* create a copy to modify: this is "copy on write".
*/
listPtr = objv[1];
if (Tcl_IsShared(listPtr)) {
listPtr = TclListObjCopy(NULL, listPtr);
}
if ((objc == 4) && (index == len)) {
/*
* Special case: insert one element at the end of the list.
*/
Tcl_ListObjAppendElement(NULL, listPtr, objv[3]);
} else {
Tcl_ListObjReplace(NULL, listPtr, index, 0, (objc-3), &(objv[3]));
}
/*
* Set the interpreter's object result.
*/
Tcl_SetObjResult(interp, listPtr);
return TCL_OK;
}
/*
*----------------------------------------------------------------------
*
* Tcl_ListObjCmd --
*
* This procedure is invoked to process the "list" Tcl command. See the
* user documentation for details on what it does.
*
* Results:
* A standard Tcl object result.
*
* Side effects:
* See the user documentation.
*
*----------------------------------------------------------------------
*/
int
Tcl_ListObjCmd(
ClientData dummy, /* Not used. */
Tcl_Interp *interp, /* Current interpreter. */
register int objc, /* Number of arguments. */
register Tcl_Obj *const objv[])
/* The argument objects. */
{
/*
* If there are no list elements, the result is an empty object.
* Otherwise set the interpreter's result object to be a list object.
*/
if (objc > 1) {
Tcl_SetObjResult(interp, Tcl_NewListObj(objc-1, &objv[1]));
}
return TCL_OK;
}
/*
*----------------------------------------------------------------------
*
* Tcl_LlengthObjCmd --
*
* This object-based procedure is invoked to process the "llength" Tcl
* command. See the user documentation for details on what it does.
*
* Results:
* A standard Tcl object result.
*
* Side effects:
* See the user documentation.
*
*----------------------------------------------------------------------
*/
int
Tcl_LlengthObjCmd(
ClientData dummy, /* Not used. */
Tcl_Interp *interp, /* Current interpreter. */
int objc, /* Number of arguments. */
register Tcl_Obj *const objv[])
/* Argument objects. */
{
int listLen, result;
if (objc != 2) {
Tcl_WrongNumArgs(interp, 1, objv, "list");
return TCL_ERROR;
}
result = TclListObjLength(interp, objv[1], &listLen);
if (result != TCL_OK) {
return result;
}
/*
* Set the interpreter's object result to an integer object holding the
* length.
*/
Tcl_SetObjResult(interp, Tcl_NewIntObj(listLen));
return TCL_OK;
}
/*
*----------------------------------------------------------------------
*
* Tcl_LrangeObjCmd --
*
* This procedure is invoked to process the "lrange" Tcl command. See the
* user documentation for details on what it does.
*
* Results:
* A standard Tcl object result.
*
* Side effects:
* See the user documentation.
*
*----------------------------------------------------------------------
*/
int
Tcl_LrangeObjCmd(
ClientData notUsed, /* Not used. */
Tcl_Interp *interp, /* Current interpreter. */
int objc, /* Number of arguments. */
register Tcl_Obj *const objv[])
/* Argument objects. */
{
Tcl_Obj **elemPtrs;
int listLen, first, last, result;
if (objc != 4) {
Tcl_WrongNumArgs(interp, 1, objv, "list first last");
return TCL_ERROR;
}
result = TclListObjLength(interp, objv[1], &listLen);
if (result != TCL_OK) {
return result;
}
result = TclGetIntForIndexM(interp, objv[2], /*endValue*/ listLen - 1,
&first);
if (result != TCL_OK) {
return result;
}
if (first < 0) {
first = 0;
}
result = TclGetIntForIndexM(interp, objv[3], /*endValue*/ listLen - 1,
&last);
if (result != TCL_OK) {
return result;
}
if (last >= listLen) {
last = listLen - 1;
}
if (first > last) {
/*
* Returning an empty list is easy.
*/
return TCL_OK;
}
result = TclListObjGetElements(interp, objv[1], &listLen, &elemPtrs);
if (result != TCL_OK) {
return result;
}
if (Tcl_IsShared(objv[1]) ||
((ListRepPtr(objv[1])->refCount > 1))) {
Tcl_SetObjResult(interp, Tcl_NewListObj(last - first + 1,
&elemPtrs[first]));
} else {
/*
* In-place is possible.
*/
if (last < (listLen - 1)) {
Tcl_ListObjReplace(interp, objv[1], last + 1, listLen - 1 - last,
0, NULL);
}
/*
* This one is not conditioned on (first > 0) in order to preserve the
* string-canonizing effect of [lrange 0 end].
*/
Tcl_ListObjReplace(interp, objv[1], 0, first, 0, NULL);
Tcl_SetObjResult(interp, objv[1]);
}
return TCL_OK;
}
/*
*----------------------------------------------------------------------
*
* Tcl_LrepeatObjCmd --
*
* This procedure is invoked to process the "lrepeat" Tcl command. See
* the user documentation for details on what it does.
*
* Results:
* A standard Tcl object result.
*
* Side effects:
* See the user documentation.
*
*----------------------------------------------------------------------
*/
int
Tcl_LrepeatObjCmd(
ClientData dummy, /* Not used. */
Tcl_Interp *interp, /* Current interpreter. */
register int objc, /* Number of arguments. */
register Tcl_Obj *const objv[])
/* The argument objects. */
{
int elementCount, i, totalElems;
Tcl_Obj *listPtr, **dataArray = NULL;
/*
* Check arguments for legality:
* lrepeat count ?value ...?
*/
if (objc < 2) {
Tcl_WrongNumArgs(interp, 1, objv, "count ?value ...?");
return TCL_ERROR;
}
if (TCL_OK != TclGetIntFromObj(interp, objv[1], &elementCount)) {
return TCL_ERROR;
}
if (elementCount < 0) {
Tcl_SetObjResult(interp, Tcl_ObjPrintf(
"bad count \"%d\": must be integer >= 0", elementCount));
Tcl_SetErrorCode(interp, "TCL", "OPERATION", "LREPEAT", "NEGARG",
NULL);
return TCL_ERROR;
}
/*
* Skip forward to the interesting arguments now we've finished parsing.
*/
objc -= 2;
objv += 2;
/* Final sanity check. Do not exceed limits on max list length. */
if (elementCount && objc > LIST_MAX/elementCount) {
Tcl_SetObjResult(interp, Tcl_ObjPrintf(
"max length of a Tcl list (%d elements) exceeded", LIST_MAX));
Tcl_SetErrorCode(interp, "TCL", "MEMORY", NULL);
return TCL_ERROR;
}
totalElems = objc * elementCount;
/*
* Get an empty list object that is allocated large enough to hold each
* init value elementCount times.
*/
listPtr = Tcl_NewListObj(totalElems, NULL);
if (totalElems) {
List *listRepPtr = ListRepPtr(listPtr);
listRepPtr->elemCount = elementCount*objc;
dataArray = &listRepPtr->elements;
}
/*
* Set the elements. Note that we handle the common degenerate case of a
* single value being repeated separately to permit the compiler as much
* room as possible to optimize a loop that might be run a very large
* number of times.
*/
CLANG_ASSERT(dataArray || totalElems == 0 );
if (objc == 1) {
register Tcl_Obj *tmpPtr = objv[0];
tmpPtr->refCount += elementCount;
for (i=0 ; i<elementCount ; i++) {
dataArray[i] = tmpPtr;
}
} else {
int j, k = 0;
for (i=0 ; i<elementCount ; i++) {
for (j=0 ; j<objc ; j++) {
Tcl_IncrRefCount(objv[j]);
dataArray[k++] = objv[j];
}
}
}
Tcl_SetObjResult(interp, listPtr);
return TCL_OK;
}
/*
*----------------------------------------------------------------------
*
* Tcl_LreplaceObjCmd --
*
* This object-based procedure is invoked to process the "lreplace" Tcl
* command. See the user documentation for details on what it does.
*
* Results:
* A new Tcl list object formed by replacing zero or more elements of a
* list.
*
* Side effects:
* See the user documentation.
*
*----------------------------------------------------------------------
*/
int
Tcl_LreplaceObjCmd(
ClientData dummy, /* Not used. */
Tcl_Interp *interp, /* Current interpreter. */
int objc, /* Number of arguments. */
Tcl_Obj *const objv[]) /* Argument objects. */
{
register Tcl_Obj *listPtr;
int first, last, listLen, numToDelete, result;
if (objc < 4) {
Tcl_WrongNumArgs(interp, 1, objv,
"list first last ?element ...?");
return TCL_ERROR;
}
result = TclListObjLength(interp, objv[1], &listLen);
if (result != TCL_OK) {
return result;
}
/*
* Get the first and last indexes. "end" is interpreted to be the index
* for the last element, such that using it will cause that element to be
* included for deletion.
*/
result = TclGetIntForIndexM(interp, objv[2], /*end*/ listLen-1, &first);
if (result != TCL_OK) {
return result;
}
result = TclGetIntForIndexM(interp, objv[3], /*end*/ listLen-1, &last);
if (result != TCL_OK) {
return result;
}
if (first < 0) {
first = 0;
}
/*
* Complain if the user asked for a start element that is greater than the
* list length. This won't ever trigger for the "end-*" case as that will
* be properly constrained by TclGetIntForIndex because we use listLen-1
* (to allow for replacing the last elem).
*/
if ((first >= listLen) && (listLen > 0)) {
Tcl_SetObjResult(interp, Tcl_ObjPrintf(
"list doesn't contain element %s", TclGetString(objv[2])));
Tcl_SetErrorCode(interp, "TCL", "OPERATION", "LREPLACE", "BADIDX",
NULL);
return TCL_ERROR;
}
if (last >= listLen) {
last = listLen - 1;
}
if (first <= last) {
numToDelete = last - first + 1;
} else {
numToDelete = 0;
}
/*
* If the list object is unshared we can modify it directly, otherwise we
* create a copy to modify: this is "copy on write".
*/
listPtr = objv[1];
if (Tcl_IsShared(listPtr)) {
listPtr = TclListObjCopy(NULL, listPtr);
}
/*
* Note that we call Tcl_ListObjReplace even when numToDelete == 0 and
* objc == 4. In this case, the list value of listPtr is not changed (no
* elements are removed or added), but by making the call we are assured
* we end up with a list in canonical form. Resist any temptation to
* optimize this case away.
*/
Tcl_ListObjReplace(NULL, listPtr, first, numToDelete, objc-4, objv+4);
/*
* Set the interpreter's object result.
*/
Tcl_SetObjResult(interp, listPtr);
return TCL_OK;
}
/*
*----------------------------------------------------------------------
*
* Tcl_LreverseObjCmd --
*
* This procedure is invoked to process the "lreverse" Tcl command. See
* the user documentation for details on what it does.
*
* Results:
* A standard Tcl result.
*
* Side effects:
* See the user documentation.
*
*----------------------------------------------------------------------
*/
int
Tcl_LreverseObjCmd(
ClientData clientData, /* Not used. */
Tcl_Interp *interp, /* Current interpreter. */
int objc, /* Number of arguments. */
Tcl_Obj *const objv[]) /* Argument values. */
{
Tcl_Obj **elemv;
int elemc, i, j;
if (objc != 2) {
Tcl_WrongNumArgs(interp, 1, objv, "list");
return TCL_ERROR;
}
if (TclListObjGetElements(interp, objv[1], &elemc, &elemv) != TCL_OK) {
return TCL_ERROR;
}
/*
* If the list is empty, just return it. [Bug 1876793]
*/
if (!elemc) {
Tcl_SetObjResult(interp, objv[1]);
return TCL_OK;
}
if (Tcl_IsShared(objv[1])
|| (ListRepPtr(objv[1])->refCount > 1)) { /* Bug 1675044 */
Tcl_Obj *resultObj, **dataArray;
List *listRepPtr;
resultObj = Tcl_NewListObj(elemc, NULL);
listRepPtr = ListRepPtr(resultObj);
listRepPtr->elemCount = elemc;
dataArray = &listRepPtr->elements;
for (i=0,j=elemc-1 ; i<elemc ; i++,j--) {
dataArray[j] = elemv[i];
Tcl_IncrRefCount(elemv[i]);
}
Tcl_SetObjResult(interp, resultObj);
} else {
/*
* Not shared, so swap "in place". This relies on Tcl_LOGE above
* returning a pointer to the live array of Tcl_Obj values.
*/
for (i=0,j=elemc-1 ; i<j ; i++,j--) {
Tcl_Obj *tmp = elemv[i];
elemv[i] = elemv[j];
elemv[j] = tmp;
}
TclInvalidateStringRep(objv[1]);
Tcl_SetObjResult(interp, objv[1]);
}
return TCL_OK;
}
/*
*----------------------------------------------------------------------
*
* Tcl_LsearchObjCmd --
*
* This procedure is invoked to process the "lsearch" Tcl command. See
* the user documentation for details on what it does.
*
* Results:
* A standard Tcl result.
*
* Side effects:
* See the user documentation.
*
*----------------------------------------------------------------------
*/
int
Tcl_LsearchObjCmd(
ClientData clientData, /* Not used. */
Tcl_Interp *interp, /* Current interpreter. */
int objc, /* Number of arguments. */
Tcl_Obj *const objv[]) /* Argument values. */
{
const char *bytes, *patternBytes;
int i, match, index, result, listc, length, elemLen, bisect;
int dataType, isIncreasing, lower, upper, offset;
Tcl_WideInt patWide, objWide;
int allMatches, inlineReturn, negatedMatch, returnSubindices, noCase;
double patDouble, objDouble;
SortInfo sortInfo;
Tcl_Obj *patObj, **listv, *listPtr, *startPtr, *itemPtr;
SortStrCmpFn_t strCmpFn = strcmp;
Tcl_RegExp regexp = NULL;
static const char *const options[] = {
"-all", "-ascii", "-bisect", "-decreasing", "-dictionary",
"-exact", "-glob", "-increasing", "-index",
"-inline", "-integer", "-nocase", "-not",
"-real", "-regexp", "-sorted", "-start",
"-subindices", NULL
};
enum options {
LSEARCH_ALL, LSEARCH_ASCII, LSEARCH_BISECT, LSEARCH_DECREASING,
LSEARCH_DICTIONARY, LSEARCH_EXACT, LSEARCH_GLOB, LSEARCH_INCREASING,
LSEARCH_INDEX, LSEARCH_INLINE, LSEARCH_INTEGER, LSEARCH_NOCASE,
LSEARCH_NOT, LSEARCH_REAL, LSEARCH_REGEXP, LSEARCH_SORTED,
LSEARCH_START, LSEARCH_SUBINDICES
};
enum datatypes {
ASCII, DICTIONARY, INTEGER, REAL
};
enum modes {
EXACT, GLOB, REGEXP, SORTED
};
enum modes mode;
mode = GLOB;
dataType = ASCII;
isIncreasing = 1;
allMatches = 0;
inlineReturn = 0;
returnSubindices = 0;
negatedMatch = 0;
bisect = 0;
listPtr = NULL;
startPtr = NULL;
offset = 0;
noCase = 0;
sortInfo.compareCmdPtr = NULL;
sortInfo.isIncreasing = 1;
sortInfo.sortMode = 0;
sortInfo.interp = interp;
sortInfo.resultCode = TCL_OK;
sortInfo.indexv = NULL;
sortInfo.indexc = 0;
if (objc < 3) {
Tcl_WrongNumArgs(interp, 1, objv, "?-option value ...? list pattern");
return TCL_ERROR;
}
for (i = 1; i < objc-2; i++) {
if (Tcl_GetIndexFromObj(interp, objv[i], options, "option", 0, &index)
!= TCL_OK) {
if (startPtr != NULL) {
Tcl_DecrRefCount(startPtr);
}
result = TCL_ERROR;
goto done;
}
switch ((enum options) index) {
case LSEARCH_ALL: /* -all */
allMatches = 1;
break;
case LSEARCH_ASCII: /* -ascii */
dataType = ASCII;
break;
case LSEARCH_BISECT: /* -bisect */
mode = SORTED;
bisect = 1;
break;
case LSEARCH_DECREASING: /* -decreasing */
isIncreasing = 0;
sortInfo.isIncreasing = 0;
break;
case LSEARCH_DICTIONARY: /* -dictionary */
dataType = DICTIONARY;
break;
case LSEARCH_EXACT: /* -increasing */
mode = EXACT;
break;
case LSEARCH_GLOB: /* -glob */
mode = GLOB;
break;
case LSEARCH_INCREASING: /* -increasing */
isIncreasing = 1;
sortInfo.isIncreasing = 1;
break;
case LSEARCH_INLINE: /* -inline */
inlineReturn = 1;
break;
case LSEARCH_INTEGER: /* -integer */
dataType = INTEGER;
break;
case LSEARCH_NOCASE: /* -nocase */
strCmpFn = TclUtfCasecmp;
noCase = 1;
break;
case LSEARCH_NOT: /* -not */
negatedMatch = 1;
break;
case LSEARCH_REAL: /* -real */
dataType = REAL;
break;
case LSEARCH_REGEXP: /* -regexp */
mode = REGEXP;
break;
case LSEARCH_SORTED: /* -sorted */
mode = SORTED;
break;
case LSEARCH_SUBINDICES: /* -subindices */
returnSubindices = 1;
break;
case LSEARCH_START: /* -start */
/*
* If there was a previous -start option, release its saved index
* because it will either be replaced or there will be an error.
*/
if (startPtr != NULL) {
Tcl_DecrRefCount(startPtr);
}
if (i > objc-4) {
Tcl_SetObjResult(interp, Tcl_NewStringObj(
"missing starting index", -1));
Tcl_SetErrorCode(interp, "TCL", "ARGUMENT", "MISSING", NULL);
result = TCL_ERROR;
goto done;
}
i++;
if (objv[i] == objv[objc - 2]) {
/*
* Take copy to prevent shimmering problems. Note that it does
* not matter if the index obj is also a component of the list
* being searched. We only need to copy where the list and the
* index are one-and-the-same.
*/
startPtr = Tcl_DuplicateObj(objv[i]);
} else {
startPtr = objv[i];
Tcl_IncrRefCount(startPtr);
}
break;
case LSEARCH_INDEX: { /* -index */
Tcl_Obj **indices;
int j;
if (sortInfo.indexc > 1) {
TclStackFree(interp, sortInfo.indexv);
}
if (i > objc-4) {
if (startPtr != NULL) {
Tcl_DecrRefCount(startPtr);
}
Tcl_SetObjResult(interp, Tcl_NewStringObj(
"\"-index\" option must be followed by list index",
-1));
Tcl_SetErrorCode(interp, "TCL", "ARGUMENT", "MISSING", NULL);
return TCL_ERROR;
}
/*
* Store the extracted indices for processing by sublist
* extraction. Note that we don't do this using objects because
* that has shimmering problems.
*/
i++;
if (TclListObjGetElements(interp, objv[i],
&sortInfo.indexc, &indices) != TCL_OK) {
if (startPtr != NULL) {
Tcl_DecrRefCount(startPtr);
}
return TCL_ERROR;
}
switch (sortInfo.indexc) {
case 0:
sortInfo.indexv = NULL;
break;
case 1:
sortInfo.indexv = &sortInfo.singleIndex;
break;
default:
sortInfo.indexv =
TclStackAlloc(interp, sizeof(int) * sortInfo.indexc);
}
/*
* Fill the array by parsing each index. We don't know whether
* their scale is sensible yet, but we at least perform the
* syntactic check here.
*/
for (j=0 ; j<sortInfo.indexc ; j++) {
if (TclGetIntForIndexM(interp, indices[j], SORTIDX_END,
&sortInfo.indexv[j]) != TCL_OK) {
Tcl_AppendObjToErrorInfo(interp, Tcl_ObjPrintf(
"\n (-index option item number %d)", j));
result = TCL_ERROR;
goto done;
}
}
break;
}
}
}
/*
* Subindices only make sense if asked for with -index option set.
*/
if (returnSubindices && sortInfo.indexc==0) {
if (startPtr != NULL) {
Tcl_DecrRefCount(startPtr);
}
Tcl_SetObjResult(interp, Tcl_NewStringObj(
"-subindices cannot be used without -index option", -1));
Tcl_SetErrorCode(interp, "TCL", "OPERATION", "LSEARCH",
"BAD_OPTION_MIX", NULL);
return TCL_ERROR;
}
if (bisect && (allMatches || negatedMatch)) {
Tcl_SetObjResult(interp, Tcl_NewStringObj(
"-bisect is not compatible with -all or -not", -1));
Tcl_SetErrorCode(interp, "TCL", "OPERATION", "LSEARCH",
"BAD_OPTION_MIX", NULL);
return TCL_ERROR;
}
if (mode == REGEXP) {
/*
* We can shimmer regexp/list if listv[i] == pattern, so get the
* regexp rep before the list rep. First time round, omit the interp
* and hope that the compilation will succeed. If it fails, we'll
* recompile in "expensive" mode with a place to put error messages.
*/
regexp = Tcl_GetRegExpFromObj(NULL, objv[objc - 1],
TCL_REG_ADVANCED | TCL_REG_NOSUB |
(noCase ? TCL_REG_NOCASE : 0));
if (regexp == NULL) {
/*
* Failed to compile the RE. Try again without the TCL_REG_NOSUB
* flag in case the RE had sub-expressions in it [Bug 1366683]. If
* this fails, an error message will be left in the interpreter.
*/
regexp = Tcl_GetRegExpFromObj(interp, objv[objc - 1],
TCL_REG_ADVANCED | (noCase ? TCL_REG_NOCASE : 0));
}
if (regexp == NULL) {
if (startPtr != NULL) {
Tcl_DecrRefCount(startPtr);
}
result = TCL_ERROR;
goto done;
}
}
/*
* Make sure the list argument is a list object and get its length and a
* pointer to its array of element pointers.
*/
result = TclListObjGetElements(interp, objv[objc - 2], &listc, &listv);
if (result != TCL_OK) {
if (startPtr != NULL) {
Tcl_DecrRefCount(startPtr);
}
goto done;
}
/*
* Get the user-specified start offset.
*/
if (startPtr) {
result = TclGetIntForIndexM(interp, startPtr, listc-1, &offset);
Tcl_DecrRefCount(startPtr);
if (result != TCL_OK) {
goto done;
}
if (offset < 0) {
offset = 0;
}
/*
* If the search started past the end of the list, we just return a
* "did not match anything at all" result straight away. [Bug 1374778]
*/
if (offset > listc-1) {
if (sortInfo.indexc > 1) {
TclStackFree(interp, sortInfo.indexv);
}
if (allMatches || inlineReturn) {
Tcl_ResetResult(interp);
} else {
Tcl_SetObjResult(interp, Tcl_NewIntObj(-1));
}
return TCL_OK;
}
}
patObj = objv[objc - 1];
patternBytes = NULL;
if (mode == EXACT || mode == SORTED) {
switch ((enum datatypes) dataType) {
case ASCII:
case DICTIONARY:
patternBytes = TclGetStringFromObj(patObj, &length);
break;
case INTEGER:
result = TclGetWideIntFromObj(interp, patObj, &patWide);
if (result != TCL_OK) {
goto done;
}
/*
* List representation might have been shimmered; restore it. [Bug
* 1844789]
*/
TclListObjGetElements(NULL, objv[objc - 2], &listc, &listv);
break;
case REAL:
result = Tcl_GetDoubleFromObj(interp, patObj, &patDouble);
if (result != TCL_OK) {
goto done;
}
/*
* List representation might have been shimmered; restore it. [Bug
* 1844789]
*/
TclListObjGetElements(NULL, objv[objc - 2], &listc, &listv);
break;
}
} else {
patternBytes = TclGetStringFromObj(patObj, &length);
}
/*
* Set default index value to -1, indicating failure; if we find the item
* in the course of our search, index will be set to the correct value.
*/
index = -1;
match = 0;
if (mode == SORTED && !allMatches && !negatedMatch) {
/*
* If the data is sorted, we can do a more intelligent search. Note
* that there is no point in being smart when -all was specified; in
* that case, we have to look at all items anyway, and there is no
* sense in doing this when the match sense is inverted.
*/
lower = offset - 1;
upper = listc;
while (lower + 1 != upper && sortInfo.resultCode == TCL_OK) {
i = (lower + upper)/2;
if (sortInfo.indexc != 0) {
itemPtr = SelectObjFromSublist(listv[i], &sortInfo);
if (sortInfo.resultCode != TCL_OK) {
result = sortInfo.resultCode;
goto done;
}
} else {
itemPtr = listv[i];
}
switch ((enum datatypes) dataType) {
case ASCII:
bytes = TclGetString(itemPtr);
match = strCmpFn(patternBytes, bytes);
break;
case DICTIONARY:
bytes = TclGetString(itemPtr);
match = DictionaryCompare(patternBytes, bytes);
break;
case INTEGER:
result = TclGetWideIntFromObj(interp, itemPtr, &objWide);
if (result != TCL_OK) {
goto done;
}
if (patWide == objWide) {
match = 0;
} else if (patWide < objWide) {
match = -1;
} else {
match = 1;
}
break;
case REAL:
result = Tcl_GetDoubleFromObj(interp, itemPtr, &objDouble);
if (result != TCL_OK) {
goto done;
}
if (patDouble == objDouble) {
match = 0;
} else if (patDouble < objDouble) {
match = -1;
} else {
match = 1;
}
break;
}
if (match == 0) {
/*
* Normally, binary search is written to stop when it finds a
* match. If there are duplicates of an element in the list,
* our first match might not be the first occurance.
* Consider: 0 0 0 1 1 1 2 2 2
*
* To maintain consistancy with standard lsearch semantics, we
* must find the leftmost occurance of the pattern in the
* list. Thus we don't just stop searching here. This
* variation means that a search always makes log n
* comparisons (normal binary search might "get lucky" with an
* early comparison).
*
* In bisect mode though, we want the last of equals.
*/
index = i;
if (bisect) {
lower = i;
} else {
upper = i;
}
} else if (match > 0) {
if (isIncreasing) {
lower = i;
} else {
upper = i;
}
} else {
if (isIncreasing) {
upper = i;
} else {
lower = i;
}
}
}
if (bisect && index < 0) {
index = lower;
}
} else {
/*
* We need to do a linear search, because (at least one) of:
* - our matcher can only tell equal vs. not equal
* - our matching sense is negated
* - we're building a list of all matched items
*/
if (allMatches) {
listPtr = Tcl_NewListObj(0, NULL);
}
for (i = offset; i < listc; i++) {
match = 0;
if (sortInfo.indexc != 0) {
itemPtr = SelectObjFromSublist(listv[i], &sortInfo);
if (sortInfo.resultCode != TCL_OK) {
if (listPtr != NULL) {
Tcl_DecrRefCount(listPtr);
}
result = sortInfo.resultCode;
goto done;
}
} else {
itemPtr = listv[i];
}
switch (mode) {
case SORTED:
case EXACT:
switch ((enum datatypes) dataType) {
case ASCII:
bytes = TclGetStringFromObj(itemPtr, &elemLen);
if (length == elemLen) {
/*
* This split allows for more optimal compilation of
* memcmp/strcasecmp.
*/
if (noCase) {
match = (TclUtfCasecmp(bytes, patternBytes) == 0);
} else {
match = (memcmp(bytes, patternBytes,
(size_t) length) == 0);
}
}
break;
case DICTIONARY:
bytes = TclGetString(itemPtr);
match = (DictionaryCompare(bytes, patternBytes) == 0);
break;
case INTEGER:
result = TclGetWideIntFromObj(interp, itemPtr, &objWide);
if (result != TCL_OK) {
if (listPtr != NULL) {
Tcl_DecrRefCount(listPtr);
}
goto done;
}
match = (objWide == patWide);
break;
case REAL:
result = Tcl_GetDoubleFromObj(interp,itemPtr, &objDouble);
if (result != TCL_OK) {
if (listPtr) {
Tcl_DecrRefCount(listPtr);
}
goto done;
}
match = (objDouble == patDouble);
break;
}
break;
case GLOB:
match = Tcl_StringCaseMatch(TclGetString(itemPtr),
patternBytes, noCase);
break;
case REGEXP:
match = Tcl_RegExpExecObj(interp, regexp, itemPtr, 0, 0, 0);
if (match < 0) {
Tcl_DecrRefCount(patObj);
if (listPtr != NULL) {
Tcl_DecrRefCount(listPtr);
}
result = TCL_ERROR;
goto done;
}
break;
}
/*
* Invert match condition for -not.
*/
if (negatedMatch) {
match = !match;
}
if (!match) {
continue;
}
if (!allMatches) {
index = i;
break;
} else if (inlineReturn) {
/*
* Note that these appends are not expected to fail.
*/
if (returnSubindices && (sortInfo.indexc != 0)) {
itemPtr = SelectObjFromSublist(listv[i], &sortInfo);
} else {
itemPtr = listv[i];
}
Tcl_ListObjAppendElement(interp, listPtr, itemPtr);
} else if (returnSubindices) {
int j;
itemPtr = Tcl_NewIntObj(i);
for (j=0 ; j<sortInfo.indexc ; j++) {
Tcl_ListObjAppendElement(interp, itemPtr,
Tcl_NewIntObj(sortInfo.indexv[j]));
}
Tcl_ListObjAppendElement(interp, listPtr, itemPtr);
} else {
Tcl_ListObjAppendElement(interp, listPtr, Tcl_NewIntObj(i));
}
}
}
/*
* Return everything or a single value.
*/
if (allMatches) {
Tcl_SetObjResult(interp, listPtr);
} else if (!inlineReturn) {
if (returnSubindices) {
int j;
itemPtr = Tcl_NewIntObj(index);
for (j=0 ; j<sortInfo.indexc ; j++) {
Tcl_ListObjAppendElement(interp, itemPtr,
Tcl_NewIntObj(sortInfo.indexv[j]));
}
Tcl_SetObjResult(interp, itemPtr);
} else {
Tcl_SetObjResult(interp, Tcl_NewIntObj(index));
}
} else if (index < 0) {
/*
* Is this superfluous? The result should be a blank object by
* default...
*/
Tcl_SetObjResult(interp, Tcl_NewObj());
} else {
Tcl_SetObjResult(interp, listv[index]);
}
result = TCL_OK;
/*
* Cleanup the index list array.
*/
done:
if (sortInfo.indexc > 1) {
TclStackFree(interp, sortInfo.indexv);
}
return result;
}
/*
*----------------------------------------------------------------------
*
* Tcl_LsetObjCmd --
*
* This procedure is invoked to process the "lset" Tcl command. See the
* user documentation for details on what it does.
*
* Results:
* A standard Tcl result.
*
* Side effects:
* See the user documentation.
*
*----------------------------------------------------------------------
*/
int
Tcl_LsetObjCmd(
ClientData clientData, /* Not used. */
Tcl_Interp *interp, /* Current interpreter. */
int objc, /* Number of arguments. */
Tcl_Obj *const objv[]) /* Argument values. */
{
Tcl_Obj *listPtr; /* Pointer to the list being altered. */
Tcl_Obj *finalValuePtr; /* Value finally assigned to the variable. */
/*
* Check parameter count.
*/
if (objc < 3) {
Tcl_WrongNumArgs(interp, 1, objv,
"listVar ?index? ?index ...? value");
return TCL_ERROR;
}
/*
* Look up the list variable's value.
*/
listPtr = Tcl_ObjGetVar2(interp, objv[1], NULL, TCL_LEAVE_ERR_MSG);
if (listPtr == NULL) {
return TCL_ERROR;
}
/*
* Substitute the value in the value. Return either the value or else an
* unshared copy of it.
*/
if (objc == 4) {
finalValuePtr = TclLsetList(interp, listPtr, objv[2], objv[3]);
} else {
finalValuePtr = TclLsetFlat(interp, listPtr, objc-3, objv+2,
objv[objc-1]);
}
/*
* If substitution has failed, bail out.
*/
if (finalValuePtr == NULL) {
return TCL_ERROR;
}
/*
* Finally, update the variable so that traces fire.
*/
listPtr = Tcl_ObjSetVar2(interp, objv[1], NULL, finalValuePtr,
TCL_LEAVE_ERR_MSG);
Tcl_DecrRefCount(finalValuePtr);
if (listPtr == NULL) {
return TCL_ERROR;
}
/*
* Return the new value of the variable as the interpreter result.
*/
Tcl_SetObjResult(interp, listPtr);
return TCL_OK;
}
/*
*----------------------------------------------------------------------
*
* Tcl_LsortObjCmd --
*
* This procedure is invoked to process the "lsort" Tcl command. See the
* user documentation for details on what it does.
*
* Results:
* A standard Tcl result.
*
* Side effects:
* See the user documentation.
*
*----------------------------------------------------------------------
*/
int
Tcl_LsortObjCmd(
ClientData clientData, /* Not used. */
Tcl_Interp *interp, /* Current interpreter. */
int objc, /* Number of arguments. */
Tcl_Obj *const objv[]) /* Argument values. */
{
int i, j, index, indices, length, nocase = 0, indexc;
int sortMode = SORTMODE_ASCII;
int group, groupSize, groupOffset, idx, allocatedIndexVector = 0;
Tcl_Obj *resultPtr, *cmdPtr, **listObjPtrs, *listObj, *indexPtr;
SortElement *elementArray, *elementPtr;
SortInfo sortInfo; /* Information about this sort that needs to
* be passed to the comparison function. */
# define NUM_LISTS 30
SortElement *subList[NUM_LISTS+1];
/* This array holds pointers to temporary
* lists built during the merge sort. Element
* i of the array holds a list of length
* 2**i. */
static const char *const switches[] = {
"-ascii", "-command", "-decreasing", "-dictionary", "-increasing",
"-index", "-indices", "-integer", "-nocase", "-real", "-stride",
"-unique", NULL
};
enum Lsort_Switches {
LSORT_ASCII, LSORT_COMMAND, LSORT_DECREASING, LSORT_DICTIONARY,
LSORT_INCREASING, LSORT_INDEX, LSORT_INDICES, LSORT_INTEGER,
LSORT_NOCASE, LSORT_REAL, LSORT_STRIDE, LSORT_UNIQUE
};
if (objc < 2) {
Tcl_WrongNumArgs(interp, 1, objv, "?-option value ...? list");
return TCL_ERROR;
}
/*
* Parse arguments to set up the mode for the sort.
*/
sortInfo.isIncreasing = 1;
sortInfo.sortMode = SORTMODE_ASCII;
sortInfo.indexv = NULL;
sortInfo.indexc = 0;
sortInfo.unique = 0;
sortInfo.interp = interp;
sortInfo.resultCode = TCL_OK;
cmdPtr = NULL;
indices = 0;
group = 0;
groupSize = 1;
groupOffset = 0;
indexPtr = NULL;
for (i = 1; i < objc-1; i++) {
if (Tcl_GetIndexFromObj(interp, objv[i], switches, "option", 0,
&index) != TCL_OK) {
sortInfo.resultCode = TCL_ERROR;
goto done2;
}
switch ((enum Lsort_Switches) index) {
case LSORT_ASCII:
sortInfo.sortMode = SORTMODE_ASCII;
break;
case LSORT_COMMAND:
if (i == objc-2) {
Tcl_SetObjResult(interp, Tcl_NewStringObj(
"\"-command\" option must be followed "
"by comparison command", -1));
Tcl_SetErrorCode(interp, "TCL", "ARGUMENT", "MISSING", NULL);
sortInfo.resultCode = TCL_ERROR;
goto done2;
}
sortInfo.sortMode = SORTMODE_COMMAND;
cmdPtr = objv[i+1];
i++;
break;
case LSORT_DECREASING:
sortInfo.isIncreasing = 0;
break;
case LSORT_DICTIONARY:
sortInfo.sortMode = SORTMODE_DICTIONARY;
break;
case LSORT_INCREASING:
sortInfo.isIncreasing = 1;
break;
case LSORT_INDEX: {
int indexc, dummy;
Tcl_Obj **indexv;
if (i == objc-2) {
Tcl_SetObjResult(interp, Tcl_NewStringObj(
"\"-index\" option must be followed by list index",
-1));
Tcl_SetErrorCode(interp, "TCL", "ARGUMENT", "MISSING", NULL);
sortInfo.resultCode = TCL_ERROR;
goto done2;
}
if (TclListObjGetElements(interp, objv[i+1], &indexc,
&indexv) != TCL_OK) {
sortInfo.resultCode = TCL_ERROR;
goto done2;
}
/*
* Check each of the indices for syntactic correctness. Note that
* we do not store the converted values here because we do not
* know if this is the only -index option yet and so we can't
* allocate any space; that happens after the scan through all the
* options is done.
*/
for (j=0 ; j<indexc ; j++) {
if (TclGetIntForIndexM(interp, indexv[j], SORTIDX_END,
&dummy) != TCL_OK) {
Tcl_AppendObjToErrorInfo(interp, Tcl_ObjPrintf(
"\n (-index option item number %d)", j));
sortInfo.resultCode = TCL_ERROR;
goto done2;
}
}
indexPtr = objv[i+1];
i++;
break;
}
case LSORT_INTEGER:
sortInfo.sortMode = SORTMODE_INTEGER;
break;
case LSORT_NOCASE:
nocase = 1;
break;
case LSORT_REAL:
sortInfo.sortMode = SORTMODE_REAL;
break;
case LSORT_UNIQUE:
sortInfo.unique = 1;
break;
case LSORT_INDICES:
indices = 1;
break;
case LSORT_STRIDE:
if (i == objc-2) {
Tcl_SetObjResult(interp, Tcl_NewStringObj(
"\"-stride\" option must be "
"followed by stride length", -1));
Tcl_SetErrorCode(interp, "TCL", "ARGUMENT", "MISSING", NULL);
sortInfo.resultCode = TCL_ERROR;
goto done2;
}
if (Tcl_GetIntFromObj(interp, objv[i+1], &groupSize) != TCL_OK) {
sortInfo.resultCode = TCL_ERROR;
goto done2;
}
if (groupSize < 2) {
Tcl_SetObjResult(interp, Tcl_NewStringObj(
"stride length must be at least 2", -1));
Tcl_SetErrorCode(interp, "TCL", "OPERATION", "LSORT",
"BADSTRIDE", NULL);
sortInfo.resultCode = TCL_ERROR;
goto done2;
}
group = 1;
i++;
break;
}
}
if (nocase && (sortInfo.sortMode == SORTMODE_ASCII)) {
sortInfo.sortMode = SORTMODE_ASCII_NC;
}
/*
* Now extract the -index list for real, if present. No failures are
* expected here; the values are all of the right type or convertible to
* it.
*/
if (indexPtr) {
Tcl_Obj **indexv;
TclListObjGetElements(interp, indexPtr, &sortInfo.indexc, &indexv);
switch (sortInfo.indexc) {
case 0:
sortInfo.indexv = NULL;
break;
case 1:
sortInfo.indexv = &sortInfo.singleIndex;
break;
default:
sortInfo.indexv =
TclStackAlloc(interp, sizeof(int) * sortInfo.indexc);
allocatedIndexVector = 1; /* Cannot use indexc field, as it
* might be decreased by 1 later. */
}
for (j=0 ; j<sortInfo.indexc ; j++) {
TclGetIntForIndexM(interp, indexv[j], SORTIDX_END,
&sortInfo.indexv[j]);
}
}
listObj = objv[objc-1];
if (sortInfo.sortMode == SORTMODE_COMMAND) {
Tcl_Obj *newCommandPtr, *newObjPtr;
/*
* When sorting using a command, we are reentrant and therefore might
* have the representation of the list being sorted shimmered out from
* underneath our feet. Take a copy (cheap) to prevent this. [Bug
* 1675116]
*/
listObj = TclListObjCopy(interp, listObj);
if (listObj == NULL) {
sortInfo.resultCode = TCL_ERROR;
goto done2;
}
/*
* The existing command is a list. We want to flatten it, append two
* dummy arguments on the end, and replace these arguments later.
*/
newCommandPtr = Tcl_DuplicateObj(cmdPtr);
TclNewObj(newObjPtr);
Tcl_IncrRefCount(newCommandPtr);
if (Tcl_ListObjAppendElement(interp, newCommandPtr, newObjPtr)
!= TCL_OK) {
TclDecrRefCount(newCommandPtr);
TclDecrRefCount(listObj);
Tcl_IncrRefCount(newObjPtr);
TclDecrRefCount(newObjPtr);
sortInfo.resultCode = TCL_ERROR;
goto done2;
}
Tcl_ListObjAppendElement(interp, newCommandPtr, Tcl_NewObj());
sortInfo.compareCmdPtr = newCommandPtr;
}
sortInfo.resultCode = TclListObjGetElements(interp, listObj,
&length, &listObjPtrs);
if (sortInfo.resultCode != TCL_OK || length <= 0) {
goto done;
}
/*
* Check for sanity when grouping elements of the overall list together
* because of the -stride option. [TIP #326]
*/
if (group) {
if (length % groupSize) {
Tcl_SetObjResult(interp, Tcl_NewStringObj(
"list size must be a multiple of the stride length",
-1));
Tcl_SetErrorCode(interp, "TCL", "OPERATION", "LSORT", "BADSTRIDE",
NULL);
sortInfo.resultCode = TCL_ERROR;
goto done;
}
length = length / groupSize;
if (sortInfo.indexc > 0) {
/*
* Use the first value in the list supplied to -index as the
* offset of the element within each group by which to sort.
*/
groupOffset = sortInfo.indexv[0];
if (groupOffset <= SORTIDX_END) {
groupOffset = (groupOffset - SORTIDX_END) + groupSize - 1;
}
if (groupOffset < 0 || groupOffset >= groupSize) {
Tcl_SetObjResult(interp, Tcl_NewStringObj(
"when used with \"-stride\", the leading \"-index\""
" value must be within the group", -1));
Tcl_SetErrorCode(interp, "TCL", "OPERATION", "LSORT",
"BADINDEX", NULL);
sortInfo.resultCode = TCL_ERROR;
goto done;
}
if (sortInfo.indexc == 1) {
sortInfo.indexc = 0;
sortInfo.indexv = NULL;
} else {
sortInfo.indexc--;
/*
* Do not shrink the actual memory block used; that doesn't
* work with TclStackAlloc-allocated memory. [Bug 2918962]
*/
for (i = 0; i < sortInfo.indexc; i++) {
sortInfo.indexv[i] = sortInfo.indexv[i+1];
}
}
}
}
sortInfo.numElements = length;
indexc = sortInfo.indexc;
sortMode = sortInfo.sortMode;
if ((sortMode == SORTMODE_ASCII_NC)
|| (sortMode == SORTMODE_DICTIONARY)) {
/*
* For this function's purpose all string-based modes are equivalent
*/
sortMode = SORTMODE_ASCII;
}
/*
* Initialize the sublists. After the following loop, subList[i] will
* contain a sorted sublist of length 2**i. Use one extra subList at the
* end, always at NULL, to indicate the end of the lists.
*/
for (j=0 ; j<=NUM_LISTS ; j++) {
subList[j] = NULL;
}
/*
* The following loop creates a SortElement for each list element and
* begins sorting it into the sublists as it appears.
*/
elementArray = TclStackAlloc(interp, length * sizeof(SortElement));
for (i=0; i < length; i++){
idx = groupSize * i + groupOffset;
if (indexc) {
/*
* If this is an indexed sort, retrieve the corresponding element
*/
indexPtr = SelectObjFromSublist(listObjPtrs[idx], &sortInfo);
if (sortInfo.resultCode != TCL_OK) {
goto done1;
}
} else {
indexPtr = listObjPtrs[idx];
}
/*
* Determine the "value" of this object for sorting purposes
*/
if (sortMode == SORTMODE_ASCII) {
elementArray[i].collationKey.strValuePtr = TclGetString(indexPtr);
} else if (sortMode == SORTMODE_INTEGER) {
Tcl_WideInt a;
if (TclGetWideIntFromObj(sortInfo.interp, indexPtr, &a) != TCL_OK) {
sortInfo.resultCode = TCL_ERROR;
goto done1;
}
elementArray[i].collationKey.wideValue = a;
} else if (sortMode == SORTMODE_REAL) {
double a;
if (Tcl_GetDoubleFromObj(sortInfo.interp, indexPtr,
&a) != TCL_OK) {
sortInfo.resultCode = TCL_ERROR;
goto done1;
}
elementArray[i].collationKey.doubleValue = a;
} else {
elementArray[i].collationKey.objValuePtr = indexPtr;
}
/*
* Determine the representation of this element in the result: either
* the objPtr itself, or its index in the original list.
*/
if (indices || group) {
elementArray[i].payload.index = idx;
} else {
elementArray[i].payload.objPtr = listObjPtrs[idx];
}
/*
* Merge this element in the pre-existing sublists (and merge together
* sublists when we have two of the same size).
*/
elementArray[i].nextPtr = NULL;
elementPtr = &elementArray[i];
for (j=0 ; subList[j] ; j++) {
elementPtr = MergeLists(subList[j], elementPtr, &sortInfo);
subList[j] = NULL;
}
if (j >= NUM_LISTS) {
j = NUM_LISTS-1;
}
subList[j] = elementPtr;
}
/*
* Merge all sublists
*/
elementPtr = subList[0];
for (j=1 ; j<NUM_LISTS ; j++) {
elementPtr = MergeLists(subList[j], elementPtr, &sortInfo);
}
/*
* Now store the sorted elements in the result list.
*/
if (sortInfo.resultCode == TCL_OK) {
List *listRepPtr;
Tcl_Obj **newArray, *objPtr;
resultPtr = Tcl_NewListObj(sortInfo.numElements * groupSize, NULL);
listRepPtr = ListRepPtr(resultPtr);
newArray = &listRepPtr->elements;
if (group) {
for (i=0; elementPtr!=NULL ; elementPtr=elementPtr->nextPtr) {
idx = elementPtr->payload.index;
for (j = 0; j < groupSize; j++) {
if (indices) {
objPtr = Tcl_NewIntObj(idx + j - groupOffset);
newArray[i++] = objPtr;
Tcl_IncrRefCount(objPtr);
} else {
objPtr = listObjPtrs[idx + j - groupOffset];
newArray[i++] = objPtr;
Tcl_IncrRefCount(objPtr);
}
}
}
} else if (indices) {
for (i=0; elementPtr != NULL ; elementPtr = elementPtr->nextPtr) {
objPtr = Tcl_NewIntObj(elementPtr->payload.index);
newArray[i++] = objPtr;
Tcl_IncrRefCount(objPtr);
}
} else {
for (i=0; elementPtr != NULL ; elementPtr = elementPtr->nextPtr) {
objPtr = elementPtr->payload.objPtr;
newArray[i++] = objPtr;
Tcl_IncrRefCount(objPtr);
}
}
listRepPtr->elemCount = i;
Tcl_SetObjResult(interp, resultPtr);
}
done1:
TclStackFree(interp, elementArray);
done:
if (sortMode == SORTMODE_COMMAND) {
TclDecrRefCount(sortInfo.compareCmdPtr);
TclDecrRefCount(listObj);
sortInfo.compareCmdPtr = NULL;
}
done2:
if (allocatedIndexVector) {
TclStackFree(interp, sortInfo.indexv);
}
return sortInfo.resultCode;
}
/*
*----------------------------------------------------------------------
*
* MergeLists -
*
* This procedure combines two sorted lists of SortElement structures
* into a single sorted list.
*
* Results:
* The unified list of SortElement structures.
*
* Side effects:
* If infoPtr->unique is set then infoPtr->numElements may be updated.
* Possibly others, if a user-defined comparison command does something
* weird.
*
* Note:
* If infoPtr->unique is set, the merge assumes that there are no
* "repeated" elements in each of the left and right lists. In that case,
* if any element of the left list is equivalent to one in the right list
* it is omitted from the merged list.
*
* This simplified mechanism works because of the special way our
* MergeSort creates the sublists to be merged and will fail to eliminate
* all repeats in the general case where they are already present in
* either the left or right list. A general code would need to skip
* adjacent initial repeats in the left and right lists before comparing
* their initial elements, at each step.
*
*----------------------------------------------------------------------
*/
static SortElement *
MergeLists(
SortElement *leftPtr, /* First list to be merged; may be NULL. */
SortElement *rightPtr, /* Second list to be merged; may be NULL. */
SortInfo *infoPtr) /* Information needed by the comparison
* operator. */
{
SortElement *headPtr, *tailPtr;
int cmp;
if (leftPtr == NULL) {
return rightPtr;
}
if (rightPtr == NULL) {
return leftPtr;
}
cmp = SortCompare(leftPtr, rightPtr, infoPtr);
if (cmp > 0 || (cmp == 0 && infoPtr->unique)) {
if (cmp == 0) {
infoPtr->numElements--;
leftPtr = leftPtr->nextPtr;
}
tailPtr = rightPtr;
rightPtr = rightPtr->nextPtr;
} else {
tailPtr = leftPtr;
leftPtr = leftPtr->nextPtr;
}
headPtr = tailPtr;
if (!infoPtr->unique) {
while ((leftPtr != NULL) && (rightPtr != NULL)) {
cmp = SortCompare(leftPtr, rightPtr, infoPtr);
if (cmp > 0) {
tailPtr->nextPtr = rightPtr;
tailPtr = rightPtr;
rightPtr = rightPtr->nextPtr;
} else {
tailPtr->nextPtr = leftPtr;
tailPtr = leftPtr;
leftPtr = leftPtr->nextPtr;
}
}
} else {
while ((leftPtr != NULL) && (rightPtr != NULL)) {
cmp = SortCompare(leftPtr, rightPtr, infoPtr);
if (cmp >= 0) {
if (cmp == 0) {
infoPtr->numElements--;
leftPtr = leftPtr->nextPtr;
}
tailPtr->nextPtr = rightPtr;
tailPtr = rightPtr;
rightPtr = rightPtr->nextPtr;
} else {
tailPtr->nextPtr = leftPtr;
tailPtr = leftPtr;
leftPtr = leftPtr->nextPtr;
}
}
}
if (leftPtr != NULL) {
tailPtr->nextPtr = leftPtr;
} else {
tailPtr->nextPtr = rightPtr;
}
return headPtr;
}
/*
*----------------------------------------------------------------------
*
* SortCompare --
*
* This procedure is invoked by MergeLists to determine the proper
* ordering between two elements.
*
* Results:
* A negative results means the the first element comes before the
* second, and a positive results means that the second element should
* come first. A result of zero means the two elements are equal and it
* doesn't matter which comes first.
*
* Side effects:
* None, unless a user-defined comparison command does something weird.
*
*----------------------------------------------------------------------
*/
static int
SortCompare(
SortElement *elemPtr1, SortElement *elemPtr2,
/* Values to be compared. */
SortInfo *infoPtr) /* Information passed from the top-level
* "lsort" command. */
{
int order = 0;
if (infoPtr->sortMode == SORTMODE_ASCII) {
order = strcmp(elemPtr1->collationKey.strValuePtr,
elemPtr2->collationKey.strValuePtr);
} else if (infoPtr->sortMode == SORTMODE_ASCII_NC) {
order = TclUtfCasecmp(elemPtr1->collationKey.strValuePtr,
elemPtr2->collationKey.strValuePtr);
} else if (infoPtr->sortMode == SORTMODE_DICTIONARY) {
order = DictionaryCompare(elemPtr1->collationKey.strValuePtr,
elemPtr2->collationKey.strValuePtr);
} else if (infoPtr->sortMode == SORTMODE_INTEGER) {
Tcl_WideInt a, b;
a = elemPtr1->collationKey.wideValue;
b = elemPtr2->collationKey.wideValue;
order = ((a >= b) - (a <= b));
} else if (infoPtr->sortMode == SORTMODE_REAL) {
double a, b;
a = elemPtr1->collationKey.doubleValue;
b = elemPtr2->collationKey.doubleValue;
order = ((a >= b) - (a <= b));
} else {
Tcl_Obj **objv, *paramObjv[2];
int objc;
Tcl_Obj *objPtr1, *objPtr2;
if (infoPtr->resultCode != TCL_OK) {
/*
* Once an error has occurred, skip any future comparisons so as
* to preserve the error message in sortInterp->result.
*/
return 0;
}
objPtr1 = elemPtr1->collationKey.objValuePtr;
objPtr2 = elemPtr2->collationKey.objValuePtr;
paramObjv[0] = objPtr1;
paramObjv[1] = objPtr2;
/*
* We made space in the command list for the two things to compare.
* Replace them and evaluate the result.
*/
TclListObjLength(infoPtr->interp, infoPtr->compareCmdPtr, &objc);
Tcl_ListObjReplace(infoPtr->interp, infoPtr->compareCmdPtr, objc - 2,
2, 2, paramObjv);
TclListObjGetElements(infoPtr->interp, infoPtr->compareCmdPtr,
&objc, &objv);
infoPtr->resultCode = Tcl_EvalObjv(infoPtr->interp, objc, objv, 0);
if (infoPtr->resultCode != TCL_OK) {
Tcl_AddErrorInfo(infoPtr->interp, "\n (-compare command)");
return 0;
}
/*
* Parse the result of the command.
*/
if (TclGetIntFromObj(infoPtr->interp,
Tcl_GetObjResult(infoPtr->interp), &order) != TCL_OK) {
Tcl_SetObjResult(infoPtr->interp, Tcl_NewStringObj(
"-compare command returned non-integer result", -1));
Tcl_SetErrorCode(infoPtr->interp, "TCL", "OPERATION", "LSORT",
"COMPARISONFAILED", NULL);
infoPtr->resultCode = TCL_ERROR;
return 0;
}
}
if (!infoPtr->isIncreasing) {
order = -order;
}
return order;
}
/*
*----------------------------------------------------------------------
*
* DictionaryCompare
*
* This function compares two strings as if they were being used in an
* index or card catalog. The case of alphabetic characters is ignored,
* except to break ties. Thus "B" comes before "b" but after "a". Also,
* integers embedded in the strings compare in numerical order. In other
* words, "x10y" comes after "x9y", not * before it as it would when
* using strcmp().
*
* Results:
* A negative result means that the first element comes before the
* second, and a positive result means that the second element should
* come first. A result of zero means the two elements are equal and it
* doesn't matter which comes first.
*
* Side effects:
* None.
*
*----------------------------------------------------------------------
*/
static int
DictionaryCompare(
const char *left, const char *right) /* The strings to compare. */
{
Tcl_UniChar uniLeft, uniRight, uniLeftLower, uniRightLower;
int diff, zeros;
int secondaryDiff = 0;
while (1) {
if (isdigit(UCHAR(*right)) /* INTL: digit */
&& isdigit(UCHAR(*left))) { /* INTL: digit */
/*
* There are decimal numbers embedded in the two strings. Compare
* them as numbers, rather than strings. If one number has more
* leading zeros than the other, the number with more leading
* zeros sorts later, but only as a secondary choice.
*/
zeros = 0;
while ((*right == '0') && isdigit(UCHAR(right[1]))) {
right++;
zeros--;
}
while ((*left == '0') && isdigit(UCHAR(left[1]))) {
left++;
zeros++;
}
if (secondaryDiff == 0) {
secondaryDiff = zeros;
}
/*
* The code below compares the numbers in the two strings without
* ever converting them to integers. It does this by first
* comparing the lengths of the numbers and then comparing the
* digit values.
*/
diff = 0;
while (1) {
if (diff == 0) {
diff = UCHAR(*left) - UCHAR(*right);
}
right++;
left++;
if (!isdigit(UCHAR(*right))) { /* INTL: digit */
if (isdigit(UCHAR(*left))) { /* INTL: digit */
return 1;
} else {
/*
* The two numbers have the same length. See if their
* values are different.
*/
if (diff != 0) {
return diff;
}
break;
}
} else if (!isdigit(UCHAR(*left))) { /* INTL: digit */
return -1;
}
}
continue;
}
/*
* Convert character to Unicode for comparison purposes. If either
* string is at the terminating null, do a byte-wise comparison and
* bail out immediately.
*/
if ((*left != '\0') && (*right != '\0')) {
left += Tcl_UtfToUniChar(left, &uniLeft);
right += Tcl_UtfToUniChar(right, &uniRight);
/*
* Convert both chars to lower for the comparison, because
* dictionary sorts are case insensitve. Covert to lower, not
* upper, so chars between Z and a will sort before A (where most
* other interesting punctuations occur).
*/
uniLeftLower = Tcl_UniCharToLower(uniLeft);
uniRightLower = Tcl_UniCharToLower(uniRight);
} else {
diff = UCHAR(*left) - UCHAR(*right);
break;
}
diff = uniLeftLower - uniRightLower;
if (diff) {
return diff;
}
if (secondaryDiff == 0) {
if (Tcl_UniCharIsUpper(uniLeft) && Tcl_UniCharIsLower(uniRight)) {
secondaryDiff = -1;
} else if (Tcl_UniCharIsUpper(uniRight)
&& Tcl_UniCharIsLower(uniLeft)) {
secondaryDiff = 1;
}
}
}
if (diff == 0) {
diff = secondaryDiff;
}
return diff;
}
/*
*----------------------------------------------------------------------
*
* SelectObjFromSublist --
*
* This procedure is invoked from lsearch and SortCompare. It is used for
* implementing the -index option, for the lsort and lsearch commands.
*
* Results:
* Returns NULL if a failure occurs, and sets the result in the infoPtr.
* Otherwise returns the Tcl_Obj* to the item.
*
* Side effects:
* None.
*
* Note:
* No reference counting is done, as the result is only used internally
* and never passed directly to user code.
*
*----------------------------------------------------------------------
*/
static Tcl_Obj *
SelectObjFromSublist(
Tcl_Obj *objPtr, /* Obj to select sublist from. */
SortInfo *infoPtr) /* Information passed from the top-level
* "lsearch" or "lsort" command. */
{
int i;
/*
* Quick check for case when no "-index" option is there.
*/
if (infoPtr->indexc == 0) {
return objPtr;
}
/*
* Iterate over the indices, traversing through the nested sublists as we
* go.
*/
for (i=0 ; i<infoPtr->indexc ; i++) {
int listLen, index;
Tcl_Obj *currentObj;
if (TclListObjLength(infoPtr->interp, objPtr, &listLen) != TCL_OK) {
infoPtr->resultCode = TCL_ERROR;
return NULL;
}
index = infoPtr->indexv[i];
/*
* Adjust for end-based indexing.
*/
if (index < SORTIDX_NONE) {
index += listLen + 1;
}
if (Tcl_ListObjIndex(infoPtr->interp, objPtr, index,
¤tObj) != TCL_OK) {
infoPtr->resultCode = TCL_ERROR;
return NULL;
}
if (currentObj == NULL) {
Tcl_SetObjResult(infoPtr->interp, Tcl_ObjPrintf(
"element %d missing from sublist \"%s\"",
index, TclGetString(objPtr)));
Tcl_SetErrorCode(infoPtr->interp, "TCL", "OPERATION", "LSORT",
"INDEXFAILED", NULL);
infoPtr->resultCode = TCL_ERROR;
return NULL;
}
objPtr = currentObj;
}
return objPtr;
}
/*
* Local Variables:
* mode: c
* c-basic-offset: 4
* fill-column: 78
* tab-width: 8
* End:
*/
| 26.992274 | 91 | 0.614647 | [
"render",
"object"
] |
d08d95c7f19b01dbd97c0d5a29581f7c03e95d87 | 10,860 | h | C | 06-Shading/shaders.h | Oranged9922/NPGR019 | 4d2d3a0e657765b8c9382d1dc4b5bef2564070bd | [
"Zlib"
] | 1 | 2021-03-22T18:46:44.000Z | 2021-03-22T18:46:44.000Z | 06-Shading/shaders.h | Oranged9922/NPGR019 | 4d2d3a0e657765b8c9382d1dc4b5bef2564070bd | [
"Zlib"
] | 3 | 2021-10-04T11:22:52.000Z | 2022-03-27T23:18:54.000Z | 06-Shading/shaders.h | Oranged9922/NPGR019 | 4d2d3a0e657765b8c9382d1dc4b5bef2564070bd | [
"Zlib"
] | 2 | 2021-03-27T10:49:58.000Z | 2021-06-01T08:13:17.000Z | /*
* Source code for the NPGR019 lab practices. Copyright Martin Kahoun 2021.
* Licensed under the zlib license, see LICENSE.txt in the root directory.
*/
#pragma once
#include <ShaderCompiler.h>
// Shader programs
namespace ShaderProgram
{
enum
{
Default, Instancing, PointRendering, Tonemapping, NumShaderPrograms
};
}
// Shader programs handle
extern GLuint shaderProgram[ShaderProgram::NumShaderPrograms];
// Helper function for creating and compiling the shaders
bool compileShaders();
// ============================================================================
// Vertex shader types
namespace VertexShader
{
enum
{
Default, Instancing, Point, ScreenQuad, NumVertexShaders
};
}
// Vertex shader sources
static const char* vsSource[] = {
// ----------------------------------------------------------------------------
// Default vertex shader
// ----------------------------------------------------------------------------
R"(
#version 330 core
// The following is not not needed since GLSL version #430
#extension GL_ARB_explicit_uniform_location : require
// Uniform blocks, i.e., constants
layout (std140) uniform TransformBlock
{
// Transposed worldToView matrix - stored compactly as an array of 3 x vec4
mat3x4 worldToView;
mat4x4 projection;
};
// Model to world transformation separately
layout (location = 0) uniform mat4x3 modelToWorld;
// Vertex attribute block, i.e., input
layout (location = 0) in vec3 position;
layout (location = 1) in vec3 normal;
layout (location = 2) in vec3 tangent;
layout (location = 3) in vec2 texCoord;
// Vertex output
out VertexData
{
vec2 texCoord;
vec3 tangent;
vec3 bitangent;
vec3 normal;
vec4 worldPos;
} vOut;
void main()
{
// Pass texture coordinates to the fragment shader
vOut.texCoord = texCoord.st;
// Construct the normal transformation matrix
mat3 normalTransform = transpose(inverse(mat3(modelToWorld)));
// Create the tangent space matrix and pass it to the fragment shader
vOut.normal = normalize(normalTransform * normal);
vOut.tangent = normalize(mat3(modelToWorld) * tangent);
vOut.bitangent = cross(vOut.tangent, vOut.normal);
// Transform vertex position
vOut.worldPos = vec4(modelToWorld * vec4(position.xyz, 1.0f), 1.0f);
// We must multiply from the left because of transposed worldToView
vec4 viewPos = vec4(vOut.worldPos * worldToView, 1.0f);
gl_Position = projection * viewPos;
}
)",
// ----------------------------------------------------------------------------
// Instancing vertex shader using instancing buffer via uniform block objects
// ----------------------------------------------------------------------------
R"(
#version 330 core
// The following is not not needed since GLSL version #430
#extension GL_ARB_explicit_uniform_location : require
// Uniform blocks, i.e., constants
layout (std140) uniform TransformBlock
{
// Transposed worldToView matrix - stored compactly as an array of 3 x vec4
mat3x4 worldToView;
mat4x4 projection;
};
// Vertex attribute block, i.e., input
layout (location = 0) in vec3 position;
layout (location = 1) in vec3 normal;
layout (location = 2) in vec3 tangent;
layout (location = 3) in vec2 texCoord;
// Must match the structure on the CPU side
struct InstanceData
{
// Transposed worldToView matrix - stored compactly as an array of 3 x vec4
mat3x4 modelToWorld;
};
// Uniform buffer used for instances
layout (std140) uniform InstanceBuffer
{
// We are limited to 4096 vec4 registers in total, hence the maximum number of instances
// being 1024 meaning we could fit another vec4 worth of data
InstanceData instanceBuffer[1024];
};
// Vertex output
out VertexData
{
vec2 texCoord;
vec3 tangent;
vec3 bitangent;
vec3 normal;
vec4 worldPos;
} vOut;
void main()
{
// Pass texture coordinates to the fragment shader
vOut.texCoord = texCoord.st;
// Retrieve the model to world matrix from the instance buffer
mat3x4 modelToWorld = instanceBuffer[gl_InstanceID].modelToWorld;
// Construct the normal transformation matrix
mat3 normalTransform = transpose(inverse(mat3(modelToWorld)));
// Create the tangent space matrix and pass it to the fragment shader
// Note: we must multiply from the left because of transposed modelToWorld
vOut.normal = normalize(normal * normalTransform);
vOut.tangent = normalize(tangent * mat3(modelToWorld));
vOut.bitangent = cross(vOut.tangent, vOut.normal);
// Transform vertex position, note we multiply from the left because of transposed modelToWorld
vOut.worldPos = vec4(vec4(position.xyz, 1.0f) * modelToWorld, 1.0f);
vec4 viewPos = vec4(vOut.worldPos * worldToView, 1.0f);
gl_Position = projection * viewPos;
}
)",
// ----------------------------------------------------------------------------
// Vertex shader for point rendering
// ----------------------------------------------------------------------------
R"(
#version 330 core
// Uniform blocks, i.e., constants
layout (std140) uniform TransformBlock
{
// Transposed worldToView matrix - stored compactly as an array of 3 x vec4
mat3x4 worldToView;
mat4x4 projection;
};
uniform vec3 position;
void main()
{
// We must multiply from the left because of transposed worldToView
vec4 viewPos = vec4(vec4(position, 1.0f) * worldToView, 1.0f);
gl_Position = projection * viewPos;
}
)",
// ----------------------------------------------------------------------------
// Fullscreen quad vertex shader
// ----------------------------------------------------------------------------
R"(
#version 330 core
// Fullscreen quad
vec3 position[6] = vec3[6](vec3(-1.0f, -1.0f, 0.0f),
vec3( 1.0f, -1.0f, 0.0f),
vec3( 1.0f, 1.0f, 0.0f),
vec3( 1.0f, 1.0f, 0.0f),
vec3(-1.0f, 1.0f, 0.0f),
vec3(-1.0f, -1.0f, 0.0f));
// Quad UV coordinates
out vec2 UV;
void main()
{
UV = position[gl_VertexID].xy * 0.5f + 0.5f;
gl_Position = vec4(position[gl_VertexID].xyz, 1.0f);
}
)",
""};
// ============================================================================
// Fragment shader types
namespace FragmentShader
{
enum
{
Default, SingleColor, Tonemapping, NumFragmentShaders
};
}
// Fragment shader sources
static const char* fsSource[] = {
// ----------------------------------------------------------------------------
// Default fragment shader source
// ----------------------------------------------------------------------------
R"(
#version 330 core
// The following is not not needed since GLSL version #420
#extension GL_ARB_shading_language_420pack : require
// Texture sampler
layout (binding = 0) uniform sampler2D Diffuse;
layout (binding = 1) uniform sampler2D Normal;
layout (binding = 2) uniform sampler2D Specular;
layout (binding = 3) uniform sampler2D Occlusion;
// Light position/direction
uniform vec3 lightPosWS;
// View position in world space coordinates
uniform vec4 viewPosWS;
// Fragment shader inputs
in VertexData
{
vec2 texCoord;
vec3 tangent;
vec3 bitangent;
vec3 normal;
vec4 worldPos;
} vIn;
// Fragment shader outputs
layout (location = 0) out vec4 color;
void main()
{
// Normally you'd pass this as another uniform
vec3 lightColor = vec3(100.0f, 100.0f, 100.0f);
// Sample textures
vec3 albedo = texture(Diffuse, vIn.texCoord.st).rgb;
vec3 noSample = texture(Normal, vIn.texCoord.st).rgb;
float specSample = texture(Specular, vIn.texCoord.st).r;
float occlusion = texture(Occlusion, vIn.texCoord.st).r;
// Calculate world-space normal
mat3 STN = {vIn.tangent, vIn.bitangent, vIn.normal};
vec3 normal = STN * (noSample * 2.0f - 1.0f);
// Calculate the lighting direction and distance
vec3 lightDir = lightPosWS.xyz - vIn.worldPos.xyz;
float lengthSq = dot(lightDir, lightDir);
float length = sqrt(lengthSq);
lightDir /= length;
// Calculate the view and reflection/halfway direction
vec3 viewDir = normalize(viewPosWS.xyz - vIn.worldPos.xyz);
// Cheaper approximation of reflected direction = reflect(-lightDir, normal)
vec3 halfDir = normalize(viewDir + lightDir);
// Calculate diffuse and specular coefficients
float NdotL = max(0.0f, dot(normal, lightDir));
float NdotH = max(0.0f, dot(normal, halfDir));
// Calculate horizon fading factor
float horizon = clamp(1.0f + dot(vIn.normal, lightDir), 0.0f, 1.0f);
horizon *= horizon;
horizon *= horizon;
horizon *= horizon;
horizon *= horizon;
// Calculate the Phong model terms: ambient, diffuse, specular
vec3 ambient = vec3(0.1f, 0.1f, 0.1f) * occlusion;
vec3 diffuse = horizon * NdotL * lightColor / lengthSq;
vec3 specular = horizon * specSample * lightColor * pow(NdotH, 64.0f) / lengthSq; // Defines shininess
// Spotlight cone
vec3 spotDir = normalize(lightPosWS.xyz);
float theta = dot(lightDir, spotDir);
float outer = 0.7f;
float inner = 0.5f;
float epsilon = outer - inner;
float attenuation = clamp((theta - outer) / epsilon, 0.0f, 1.0f);
diffuse *= attenuation;
specular *= attenuation;
// Calculate the final color
vec3 finalColor = albedo * (ambient + diffuse) + specular;
color = vec4(finalColor, 1.0f);
}
)",
// ----------------------------------------------------------------------------
// Single color pixel shader
// ----------------------------------------------------------------------------
R"(
#version 330 core
// Input color
uniform vec3 color;
// Output color
out vec4 oColor;
void main()
{
oColor = vec4(color.rgb, 1.0f);
}
)",
// ----------------------------------------------------------------------------
// Tonemapping fragment shader source
// ----------------------------------------------------------------------------
R"(
#version 330 core
// The following is not not needed since GLSL version #430
#extension GL_ARB_explicit_uniform_location : require
// The following is not not needed since GLSL version #420
#extension GL_ARB_shading_language_420pack : require
// Our HDR buffer texture
layout (binding = 0) uniform sampler2DMS HDR;
// Number of used MSAA samples
layout (location = 0) uniform float MSAA_LEVEL;
// Quad UV coordinates
in vec2 UV;
// Output
out vec4 color;
vec3 ApplyTonemapping(vec3 hdr)
{
// Reinhard global operator
vec3 result = hdr / (hdr + vec3(1.0f));
return result;
}
void main()
{
// Query the size of the texture and calculate texel coordinates
ivec2 texSize = textureSize(HDR);
ivec2 texel = ivec2(UV * texSize);
// Accumulate color for all MSAA samples
vec3 finalColor = vec3(0.0f);
for (int i = 0; i < int(MSAA_LEVEL); ++i)
{
// Fetch a single sample from a single texel (no interpolation)
vec3 s = texelFetch(HDR, texel, i).rgb;
finalColor += ApplyTonemapping(s);
}
color = vec4(finalColor.rgb / MSAA_LEVEL, 1.0f);
}
)",
""};
| 28.062016 | 104 | 0.62744 | [
"model",
"transform"
] |
d08fb3466e1e6eb7ba2ec4a4c4457d49c7c821f5 | 2,109 | h | C | chromeos/components/multidevice/logging/logging.h | sarang-apps/darshan_browser | 173649bb8a7c656dc60784d19e7bb73e07c20daa | [
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 575 | 2015-06-18T23:58:20.000Z | 2022-03-23T09:32:39.000Z | chromeos/components/multidevice/logging/logging.h | sarang-apps/darshan_browser | 173649bb8a7c656dc60784d19e7bb73e07c20daa | [
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 113 | 2015-05-04T09:58:14.000Z | 2022-01-31T19:35:03.000Z | chromeos/components/multidevice/logging/logging.h | sarang-apps/darshan_browser | 173649bb8a7c656dc60784d19e7bb73e07c20daa | [
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 52 | 2015-07-14T10:40:50.000Z | 2022-03-15T01:11:49.000Z | // Copyright 2015 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef CHROMEOS_COMPONENTS_MULTIDEVICE_LOGGING_LOGGING_H_
#define CHROMEOS_COMPONENTS_MULTIDEVICE_LOGGING_LOGGING_H_
#include <sstream>
#include "base/logging.h"
#include "base/macros.h"
namespace chromeos {
namespace multidevice {
// Use the PA_LOG() macro for all logging related to Proximity Auth, so the
// system is aware of all logs related to this feature. We display these logs in
// the debug WebUI (chrome://proximity-auth).
//
// PA_LOG() has the same interface as the standard LOG() macro and also creates
// a normal log message of the same severity.
// Examples:
// PA_LOG(INFO) << "Waiting for " << x << " pending requests.";
// PA_LOG(ERROR) << "Request failed: " << error_string;
#define PA_LOG(severity) \
chromeos::multidevice::ScopedLogMessage(__FILE__, __LINE__, \
logging::LOG_##severity) \
.stream()
// Disables all logging while in scope. Intended to be called only from test
// code, to clean up test output.
class ScopedDisableLoggingForTesting {
public:
ScopedDisableLoggingForTesting();
~ScopedDisableLoggingForTesting();
};
// An intermediate object used by the PA_LOG macro, wrapping a
// logging::LogMessage instance. When this object is destroyed, the message will
// be logged with the standard logging system and also added to Proximity Auth
// specific log buffer. You should use the PA_LOG() macro instead of this class
// directly.
class ScopedLogMessage {
public:
ScopedLogMessage(const char* file, int line, logging::LogSeverity severity);
~ScopedLogMessage();
std::ostream& stream() { return stream_; }
private:
const char* file_;
int line_;
logging::LogSeverity severity_;
std::ostringstream stream_;
DISALLOW_COPY_AND_ASSIGN(ScopedLogMessage);
};
} // namespace multidevice
} // namespace chromeos
#endif // CHROMEOS_COMPONENTS_MULTIDEVICE_LOGGING_LOGGING_H_
| 32.446154 | 80 | 0.720721 | [
"object"
] |
d09620853fa2305ccb55edb1e810e5d94a38c0bf | 4,152 | h | C | ompi/contrib/vt/vt/tools/vtdyn/vt_dyn.h | bringhurst/ompi | 7da12594dc72085162265188b505aca0d0cfe811 | [
"BSD-3-Clause-Open-MPI"
] | 1 | 2016-05-01T09:37:07.000Z | 2016-05-01T09:37:07.000Z | ompi/contrib/vt/vt/tools/vtdyn/vt_dyn.h | bringhurst/ompi | 7da12594dc72085162265188b505aca0d0cfe811 | [
"BSD-3-Clause-Open-MPI"
] | null | null | null | ompi/contrib/vt/vt/tools/vtdyn/vt_dyn.h | bringhurst/ompi | 7da12594dc72085162265188b505aca0d0cfe811 | [
"BSD-3-Clause-Open-MPI"
] | null | null | null | /**
* VampirTrace
* http://www.tu-dresden.de/zih/vampirtrace
*
* Copyright (c) 2005-2011, ZIH, TU Dresden, Federal Republic of Germany
*
* Copyright (c) 1998-2005, Forschungszentrum Juelich, Juelich Supercomputing
* Centre, Federal Republic of Germany
*
* See the file COPYING in the package base directory for details
**/
#ifndef _VT_DYN_H_
#define _VT_DYN_H_
#include "config.h"
#include "vt_inttypes.h"
#include "rfg_filter.h"
#include "BPatch.h"
#include "BPatch_addressSpace.h"
#include "BPatch_function.h"
#include "BPatch_image.h"
#include <iostream>
#include <set>
#include <string>
#include <vector>
#define STRBUFSIZE 1024
//
// mutation modes
//
typedef enum
{
// either create/attach to a process, instrument, and execute
//
MODE_CREATE,
MODE_ATTACH,
// or open, instrument, and rewrite binary
MODE_REWRITE
} MutationT;
//
// structure that contains the mutator parameters
// (i.e. command line options)
//
struct ParamsS
{
ParamsS()
: mode(MODE_CREATE), mutatee_pid(-1), verbose_level(1),
ignore_no_dbg(false), show_usage(false), show_version(false) {}
MutationT mode; // mutation mode
std::string mutatee; // mutatee executable name
int mutatee_pid; // mutatee PID
std::vector<std::string> mutatee_args; // mutatee arguments
std::vector<std::string> shlibs; // shared libs. to be instrumented
std::string filtfile; // pathname of filter file
std::string outfile; // file name of binary to rewrite
uint32_t verbose_level; // verbose level
bool ignore_no_dbg; // flag: ignore funcs. without debug?
bool show_usage; // flag: show usage text?
bool show_version; // flag: show VampirTrace version?
};
//
// MutatorC class
//
class MutatorC
{
public:
// constructor
MutatorC();
// destructor
~MutatorC();
// run the mutator
bool run();
private:
//
// structure that contains context information about functions to
// be instrumented
//
struct InstFuncS
{
InstFuncS() : func(0), addr(0), lno(0) {}
InstFuncS(BPatch_function * _func, unsigned long _addr,
std::string _name, std::string _file, uint32_t _lno )
: func(_func), addr(_addr), name(_name), file(_file), lno(_lno) {}
BPatch_function * func; // BPatch function object
unsigned long addr; // function address
std::string name; // function name
std::string file; // source file name of function definition
uint32_t lno; // line number of function definition
};
// create/attach to a process or open binary for rewriting
bool initialize();
// continue execution of mutatee or rewrite binary
bool finalize( bool & error );
// get functions to be instrumented
bool getFunctions( std::vector<InstFuncS> & instFuncs );
// instrument a function
bool instrumentFunction( const InstFuncS & instFunc );
// read input filter file
bool readFilter();
// check whether module is excluded from instrumentation
inline bool constraintModule( const std::string & name ) const;
// check whether function is excluded from instrumentation
inline bool constraintFunction( const std::string & name ) const;
// check whether mutatee uses MPI
inline bool isMPI() const;
// find certain function in mutatee
inline bool findFunction( const std::string & name,
BPatch_function *& func ) const;
// entire Dyninst library object
BPatch m_bpatch;
// mutatee's process or binary edit object
BPatch_addressSpace * m_appAddrSpace;
// mutatee's image object
BPatch_image * m_appImage;
// instrumentation functions to be inserted at entry/exit points
//
BPatch_function * m_vtStartFunc;
BPatch_function * m_vtEndFunc;
// RFG filter object to include/exclude functions from instrumenting
RFG_Filter * m_filter;
};
#endif // _VT_DYN_H_
| 26.44586 | 80 | 0.648121 | [
"object",
"vector"
] |
d098e22135bd4d1e69087899ba5e43cc5b3412ae | 112,464 | c | C | CMVS-PMVS/program/thirdParty/gsl-1.13/linalg/test.c | mattjr/structured | 0cb4635af7602f2a243a9b739e5ed757424ab2a7 | [
"Apache-2.0"
] | 14 | 2015-01-11T02:53:04.000Z | 2021-11-25T17:31:22.000Z | CMVS-PMVS/program/thirdParty/gsl-1.13/linalg/test.c | skair39/structured | 0cb4635af7602f2a243a9b739e5ed757424ab2a7 | [
"Apache-2.0"
] | null | null | null | CMVS-PMVS/program/thirdParty/gsl-1.13/linalg/test.c | skair39/structured | 0cb4635af7602f2a243a9b739e5ed757424ab2a7 | [
"Apache-2.0"
] | 14 | 2015-07-21T04:47:52.000Z | 2020-03-12T12:31:25.000Z | /* linalg/test.c
*
* Copyright (C) 1996, 1997, 1998, 1999, 2000, 2004, 2005, 2006, 2007 Gerard Jungman, Brian Gough
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 3 of the License, or (at
* your option) any later version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*/
/* Author: G. Jungman
*/
#include <config.h>
#include <stdlib.h>
#include <gsl/gsl_test.h>
#include <gsl/gsl_math.h>
#include <gsl/gsl_ieee_utils.h>
#include <gsl/gsl_permute_vector.h>
#include <gsl/gsl_blas.h>
#include <gsl/gsl_complex_math.h>
#include <gsl/gsl_linalg.h>
#define TEST_SVD_4X4 1
int check (double x, double actual, double eps);
gsl_matrix * create_hilbert_matrix(unsigned long size);
gsl_matrix * create_general_matrix(unsigned long size1, unsigned long size2);
gsl_matrix * create_vandermonde_matrix(unsigned long size);
gsl_matrix * create_moler_matrix(unsigned long size);
gsl_matrix * create_row_matrix(unsigned long size1, unsigned long size2);
gsl_matrix * create_2x2_matrix(double a11, double a12, double a21, double a22);
gsl_matrix * create_diagonal_matrix(double a[], unsigned long size);
int test_matmult(void);
int test_matmult_mod(void);
int test_LU_solve_dim(const gsl_matrix * m, const double * actual, double eps);
int test_LU_solve(void);
int test_LUc_solve_dim(const gsl_matrix_complex * m, const double * actual, double eps);
int test_LUc_solve(void);
int test_QR_solve_dim(const gsl_matrix * m, const double * actual, double eps);
int test_QR_solve(void);
int test_QR_QRsolve_dim(const gsl_matrix * m, const double * actual, double eps);
int test_QR_QRsolve(void);
int test_QR_lssolve_dim(const gsl_matrix * m, const double * actual, double eps);
int test_QR_lssolve(void);
int test_QR_decomp_dim(const gsl_matrix * m, double eps);
int test_QR_decomp(void);
int test_QRPT_solve_dim(const gsl_matrix * m, const double * actual, double eps);
int test_QRPT_solve(void);
int test_QRPT_QRsolve_dim(const gsl_matrix * m, const double * actual, double eps);
int test_QRPT_QRsolve(void);
int test_QRPT_decomp_dim(const gsl_matrix * m, double eps);
int test_QRPT_decomp(void);
int test_QR_update_dim(const gsl_matrix * m, double eps);
int test_QR_update(void);
int test_QRPT_update_dim(const gsl_matrix * m, double eps);
int test_QRPT_update(void);
int test_LQ_solve_dim(const gsl_matrix * m, const double * actual, double eps);
int test_LQ_solve(void);
int test_LQ_LQsolve_dim(const gsl_matrix * m, const double * actual, double eps);
int test_LQ_LQsolve(void);
int test_LQ_lssolve_dim(const gsl_matrix * m, const double * actual, double eps);
int test_LQ_lssolve(void);
int test_LQ_decomp_dim(const gsl_matrix * m, double eps);
int test_LQ_decomp(void);
int test_PTLQ_solve_dim(const gsl_matrix * m, const double * actual, double eps);
int test_PTLQ_solve(void);
int test_PTLQ_LQsolve_dim(const gsl_matrix * m, const double * actual, double eps);
int test_PTLQ_LQsolve(void);
int test_PTLQ_decomp_dim(const gsl_matrix * m, double eps);
int test_PTLQ_decomp(void);
int test_LQ_update_dim(const gsl_matrix * m, double eps);
int test_LQ_update(void);
int test_SV_solve_dim(const gsl_matrix * m, const double * actual, double eps);
int test_SV_solve(void);
int test_SV_decomp_dim(const gsl_matrix * m, double eps);
int test_SV_decomp(void);
int test_SV_decomp_mod_dim(const gsl_matrix * m, double eps);
int test_SV_decomp_mod(void);
int test_SV_decomp_jacobi_dim(const gsl_matrix * m, double eps);
int test_SV_decomp_jacobi(void);
int test_cholesky_solve_dim(const gsl_matrix * m, const double * actual, double eps);
int test_cholesky_solve(void);
int test_cholesky_decomp_dim(const gsl_matrix * m, double eps);
int test_cholesky_decomp(void);
int test_cholesky_invert_dim(const gsl_matrix * m, double eps);
int test_cholesky_invert(void);
int test_HH_solve_dim(const gsl_matrix * m, const double * actual, double eps);
int test_HH_solve(void);
int test_TDS_solve_dim(unsigned long dim, double d, double od, const double * actual, double eps);
int test_TDS_solve(void);
int test_TDN_solve_dim(unsigned long dim, double d, double a, double b, const double * actual, double eps);
int test_TDN_solve(void);
int test_TDS_cyc_solve_one(const unsigned long dim, const double * d, const double * od, const double * r,
const double * actual, double eps);
int test_TDS_cyc_solve(void);
int test_TDN_cyc_solve_dim(unsigned long dim, double d, double a, double b, const double * actual, double eps);
int test_TDN_cyc_solve(void);
int test_bidiag_decomp_dim(const gsl_matrix * m, double eps);
int test_bidiag_decomp(void);
int
check (double x, double actual, double eps)
{
if (x == actual)
{
return 0;
}
else if (actual == 0)
{
return fabs(x) > eps;
}
else
{
return (fabs(x - actual)/fabs(actual)) > eps;
}
}
gsl_vector *
vector_alloc (size_t n)
{
size_t p[5] = {3, 5, 7, 11, 13};
static size_t k = 0;
size_t stride = p[k];
k = (k + 1) % 5;
{
gsl_block * b = gsl_block_alloc (n * stride);
gsl_vector * v = gsl_vector_alloc_from_block (b, 0, n, stride);
v->owner = 1;
return v;
}
}
void
vector_free (gsl_vector * v)
{
gsl_vector_free (v);
}
gsl_matrix *
create_hilbert_matrix(unsigned long size)
{
unsigned long i, j;
gsl_matrix * m = gsl_matrix_alloc(size, size);
for(i=0; i<size; i++) {
for(j=0; j<size; j++) {
gsl_matrix_set(m, i, j, 1.0/(i+j+1.0));
}
}
return m;
}
gsl_matrix *
create_general_matrix(unsigned long size1, unsigned long size2)
{
unsigned long i, j;
gsl_matrix * m = gsl_matrix_alloc(size1, size2);
for(i=0; i<size1; i++) {
for(j=0; j<size2; j++) {
gsl_matrix_set(m, i, j, 1.0/(i+j+1.0));
}
}
return m;
}
gsl_matrix *
create_singular_matrix(unsigned long size1, unsigned long size2)
{
unsigned long i, j;
gsl_matrix * m = gsl_matrix_alloc(size1, size2);
for(i=0; i<size1; i++) {
for(j=0; j<size2; j++) {
gsl_matrix_set(m, i, j, 1.0/(i+j+1.0));
}
}
/* zero the first column */
for(j = 0; j <size2; j++)
gsl_matrix_set(m,0,j,0.0);
return m;
}
gsl_matrix *
create_vandermonde_matrix(unsigned long size)
{
unsigned long i, j;
gsl_matrix * m = gsl_matrix_alloc(size, size);
for(i=0; i<size; i++) {
for(j=0; j<size; j++) {
gsl_matrix_set(m, i, j, pow(i + 1.0, size - j - 1.0));
}
}
return m;
}
gsl_matrix *
create_moler_matrix(unsigned long size)
{
unsigned long i, j;
gsl_matrix * m = gsl_matrix_alloc(size, size);
for(i=0; i<size; i++) {
for(j=0; j<size; j++) {
gsl_matrix_set(m, i, j, GSL_MIN(i+1,j+1)-2.0);
}
}
return m;
}
gsl_matrix_complex *
create_complex_matrix(unsigned long size)
{
unsigned long i, j;
gsl_matrix_complex * m = gsl_matrix_complex_alloc(size, size);
for(i=0; i<size; i++) {
for(j=0; j<size; j++) {
gsl_complex z = gsl_complex_rect(1.0/(i+j+1.0), 1/(i*i+j*j+0.5));
gsl_matrix_complex_set(m, i, j, z);
}
}
return m;
}
gsl_matrix *
create_row_matrix(unsigned long size1, unsigned long size2)
{
unsigned long i;
gsl_matrix * m = gsl_matrix_calloc(size1, size2);
for(i=0; i<size1; i++) {
gsl_matrix_set(m, i, 0, 1.0/(i+1.0));
}
return m;
}
gsl_matrix *
create_2x2_matrix(double a11, double a12, double a21, double a22)
{
gsl_matrix * m = gsl_matrix_alloc(2, 2);
gsl_matrix_set(m, 0, 0, a11);
gsl_matrix_set(m, 0, 1, a12);
gsl_matrix_set(m, 1, 0, a21);
gsl_matrix_set(m, 1, 1, a22);
return m;
}
gsl_matrix *
create_diagonal_matrix(double a[], unsigned long size)
{
unsigned long i;
gsl_matrix * m = gsl_matrix_calloc(size, size);
for(i=0; i<size; i++) {
gsl_matrix_set(m, i, i, a[i]);
}
return m;
}
gsl_matrix * m11;
gsl_matrix * m51;
gsl_matrix * m35;
gsl_matrix * m53;
gsl_matrix * m97;
gsl_matrix * s35;
gsl_matrix * s53;
gsl_matrix * hilb2;
gsl_matrix * hilb3;
gsl_matrix * hilb4;
gsl_matrix * hilb12;
gsl_matrix * row3;
gsl_matrix * row5;
gsl_matrix * row12;
gsl_matrix * A22;
gsl_matrix * A33;
gsl_matrix * A44;
gsl_matrix * A55;
gsl_matrix_complex * c7;
gsl_matrix * inf5; double inf5_data[] = {1.0, 0.0, -3.0, 0.0, -5.0};
gsl_matrix * nan5;
gsl_matrix * dblmin3, * dblmin5;
double m53_lssolution[] = {52.5992295702070, -337.7263113752073,
351.8823436427604};
double hilb2_solution[] = {-8.0, 18.0} ;
double hilb3_solution[] = {27.0, -192.0, 210.0};
double hilb4_solution[] = {-64.0, 900.0, -2520.0, 1820.0};
double hilb12_solution[] = {-1728.0, 245388.0, -8528520.0,
127026900.0, -1009008000.0, 4768571808.0,
-14202796608.0, 27336497760.0, -33921201600.0,
26189163000.0, -11437874448.0, 2157916488.0 };
double c7_solution[] = { 2.40717272023734e+01, -9.84612797621247e+00,
-2.69338853034031e+02, 8.75455232472528e+01,
2.96661356736296e+03, -1.02624473923993e+03,
-1.82073812124749e+04, 5.67384473042410e+03,
5.57693879019068e+04, -1.61540963210502e+04,
-7.88941207561151e+04, 1.95053812987858e+04,
3.95548551241728e+04, -7.76593696255317e+03 };
gsl_matrix * vander2;
gsl_matrix * vander3;
gsl_matrix * vander4;
gsl_matrix * vander12;
double vander2_solution[] = {1.0, 0.0};
double vander3_solution[] = {0.0, 1.0, 0.0};
double vander4_solution[] = {0.0, 0.0, 1.0, 0.0};
double vander12_solution[] = {0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 1.0, 0.0};
gsl_matrix * moler10;
/* matmult now obsolete */
#ifdef MATMULT
int
test_matmult(void)
{
int s = 0;
gsl_matrix * A = gsl_matrix_calloc(2, 2);
gsl_matrix * B = gsl_matrix_calloc(2, 3);
gsl_matrix * C = gsl_matrix_calloc(2, 3);
gsl_matrix_set(A, 0, 0, 10.0);
gsl_matrix_set(A, 0, 1, 5.0);
gsl_matrix_set(A, 1, 0, 1.0);
gsl_matrix_set(A, 1, 1, 20.0);
gsl_matrix_set(B, 0, 0, 10.0);
gsl_matrix_set(B, 0, 1, 5.0);
gsl_matrix_set(B, 0, 2, 2.0);
gsl_matrix_set(B, 1, 0, 1.0);
gsl_matrix_set(B, 1, 1, 3.0);
gsl_matrix_set(B, 1, 2, 2.0);
gsl_linalg_matmult(A, B, C);
s += ( fabs(gsl_matrix_get(C, 0, 0) - 105.0) > GSL_DBL_EPSILON );
s += ( fabs(gsl_matrix_get(C, 0, 1) - 65.0) > GSL_DBL_EPSILON );
s += ( fabs(gsl_matrix_get(C, 0, 2) - 30.0) > GSL_DBL_EPSILON );
s += ( fabs(gsl_matrix_get(C, 1, 0) - 30.0) > GSL_DBL_EPSILON );
s += ( fabs(gsl_matrix_get(C, 1, 1) - 65.0) > GSL_DBL_EPSILON );
s += ( fabs(gsl_matrix_get(C, 1, 2) - 42.0) > GSL_DBL_EPSILON );
gsl_matrix_free(A);
gsl_matrix_free(B);
gsl_matrix_free(C);
return s;
}
int
test_matmult_mod(void)
{
int s = 0;
gsl_matrix * A = gsl_matrix_calloc(3, 3);
gsl_matrix * B = gsl_matrix_calloc(3, 3);
gsl_matrix * C = gsl_matrix_calloc(3, 3);
gsl_matrix * D = gsl_matrix_calloc(2, 3);
gsl_matrix * E = gsl_matrix_calloc(2, 3);
gsl_matrix * F = gsl_matrix_calloc(2, 2);
gsl_matrix_set(A, 0, 0, 10.0);
gsl_matrix_set(A, 0, 1, 5.0);
gsl_matrix_set(A, 0, 2, 1.0);
gsl_matrix_set(A, 1, 0, 1.0);
gsl_matrix_set(A, 1, 1, 20.0);
gsl_matrix_set(A, 1, 2, 5.0);
gsl_matrix_set(A, 2, 0, 1.0);
gsl_matrix_set(A, 2, 1, 3.0);
gsl_matrix_set(A, 2, 2, 7.0);
gsl_matrix_set(B, 0, 0, 10.0);
gsl_matrix_set(B, 0, 1, 5.0);
gsl_matrix_set(B, 0, 2, 2.0);
gsl_matrix_set(B, 1, 0, 1.0);
gsl_matrix_set(B, 1, 1, 3.0);
gsl_matrix_set(B, 1, 2, 2.0);
gsl_matrix_set(B, 2, 0, 1.0);
gsl_matrix_set(B, 2, 1, 3.0);
gsl_matrix_set(B, 2, 2, 2.0);
gsl_matrix_set(D, 0, 0, 10.0);
gsl_matrix_set(D, 0, 1, 5.0);
gsl_matrix_set(D, 0, 2, 1.0);
gsl_matrix_set(D, 1, 0, 1.0);
gsl_matrix_set(D, 1, 1, 20.0);
gsl_matrix_set(D, 1, 2, 5.0);
gsl_matrix_set(E, 0, 0, 10.0);
gsl_matrix_set(E, 0, 1, 5.0);
gsl_matrix_set(E, 0, 2, 2.0);
gsl_matrix_set(E, 1, 0, 1.0);
gsl_matrix_set(E, 1, 1, 3.0);
gsl_matrix_set(E, 1, 2, 2.0);
gsl_linalg_matmult_mod(A, GSL_LINALG_MOD_NONE, B, GSL_LINALG_MOD_NONE, C);
s += ( fabs(gsl_matrix_get(C, 0, 0) - 106.0) > GSL_DBL_EPSILON );
s += ( fabs(gsl_matrix_get(C, 0, 1) - 68.0) > GSL_DBL_EPSILON );
s += ( fabs(gsl_matrix_get(C, 0, 2) - 32.0) > GSL_DBL_EPSILON );
s += ( fabs(gsl_matrix_get(C, 1, 0) - 35.0) > GSL_DBL_EPSILON );
s += ( fabs(gsl_matrix_get(C, 1, 1) - 80.0) > GSL_DBL_EPSILON );
s += ( fabs(gsl_matrix_get(C, 1, 2) - 52.0) > GSL_DBL_EPSILON );
s += ( fabs(gsl_matrix_get(C, 2, 0) - 20.0) > GSL_DBL_EPSILON );
s += ( fabs(gsl_matrix_get(C, 2, 1) - 35.0) > GSL_DBL_EPSILON );
s += ( fabs(gsl_matrix_get(C, 2, 2) - 22.0) > GSL_DBL_EPSILON );
gsl_linalg_matmult_mod(A, GSL_LINALG_MOD_TRANSPOSE, B, GSL_LINALG_MOD_NONE, C);
s += ( fabs(gsl_matrix_get(C, 0, 0) - 102.0) > GSL_DBL_EPSILON );
s += ( fabs(gsl_matrix_get(C, 0, 1) - 56.0) > GSL_DBL_EPSILON );
s += ( fabs(gsl_matrix_get(C, 0, 2) - 24.0) > GSL_DBL_EPSILON );
s += ( fabs(gsl_matrix_get(C, 1, 0) - 73.0) > GSL_DBL_EPSILON );
s += ( fabs(gsl_matrix_get(C, 1, 1) - 94.0) > GSL_DBL_EPSILON );
s += ( fabs(gsl_matrix_get(C, 1, 2) - 56.0) > GSL_DBL_EPSILON );
s += ( fabs(gsl_matrix_get(C, 2, 0) - 22.0) > GSL_DBL_EPSILON );
s += ( fabs(gsl_matrix_get(C, 2, 1) - 41.0) > GSL_DBL_EPSILON );
s += ( fabs(gsl_matrix_get(C, 2, 2) - 26.0) > GSL_DBL_EPSILON );
gsl_linalg_matmult_mod(A, GSL_LINALG_MOD_NONE, B, GSL_LINALG_MOD_TRANSPOSE, C);
s += ( fabs(gsl_matrix_get(C, 0, 0) - 127.0) > GSL_DBL_EPSILON );
s += ( fabs(gsl_matrix_get(C, 0, 1) - 27.0) > GSL_DBL_EPSILON );
s += ( fabs(gsl_matrix_get(C, 0, 2) - 27.0) > GSL_DBL_EPSILON );
s += ( fabs(gsl_matrix_get(C, 1, 0) - 120.0) > GSL_DBL_EPSILON );
s += ( fabs(gsl_matrix_get(C, 1, 1) - 71.0) > GSL_DBL_EPSILON );
s += ( fabs(gsl_matrix_get(C, 1, 2) - 71.0) > GSL_DBL_EPSILON );
s += ( fabs(gsl_matrix_get(C, 2, 0) - 39.0) > GSL_DBL_EPSILON );
s += ( fabs(gsl_matrix_get(C, 2, 1) - 24.0) > GSL_DBL_EPSILON );
s += ( fabs(gsl_matrix_get(C, 2, 2) - 24.0) > GSL_DBL_EPSILON );
gsl_linalg_matmult_mod(A, GSL_LINALG_MOD_TRANSPOSE, B, GSL_LINALG_MOD_TRANSPOSE, C);
s += ( fabs(gsl_matrix_get(C, 0, 0) - 107.0) > GSL_DBL_EPSILON );
s += ( fabs(gsl_matrix_get(C, 0, 1) - 15.0) > GSL_DBL_EPSILON );
s += ( fabs(gsl_matrix_get(C, 0, 2) - 15.0) > GSL_DBL_EPSILON );
s += ( fabs(gsl_matrix_get(C, 1, 0) - 156.0) > GSL_DBL_EPSILON );
s += ( fabs(gsl_matrix_get(C, 1, 1) - 71.0) > GSL_DBL_EPSILON );
s += ( fabs(gsl_matrix_get(C, 1, 2) - 71.0) > GSL_DBL_EPSILON );
s += ( fabs(gsl_matrix_get(C, 2, 0) - 49.0) > GSL_DBL_EPSILON );
s += ( fabs(gsl_matrix_get(C, 2, 1) - 30.0) > GSL_DBL_EPSILON );
s += ( fabs(gsl_matrix_get(C, 2, 2) - 30.0) > GSL_DBL_EPSILON );
/* now try for non-symmetric matrices */
gsl_linalg_matmult_mod(D, GSL_LINALG_MOD_TRANSPOSE, E, GSL_LINALG_MOD_NONE, C);
s += ( fabs(gsl_matrix_get(C, 0, 0) - 101.0) > GSL_DBL_EPSILON );
s += ( fabs(gsl_matrix_get(C, 0, 1) - 53.0) > GSL_DBL_EPSILON );
s += ( fabs(gsl_matrix_get(C, 0, 2) - 22.0) > GSL_DBL_EPSILON );
s += ( fabs(gsl_matrix_get(C, 1, 0) - 70.0) > GSL_DBL_EPSILON );
s += ( fabs(gsl_matrix_get(C, 1, 1) - 85.0) > GSL_DBL_EPSILON );
s += ( fabs(gsl_matrix_get(C, 1, 2) - 50.0) > GSL_DBL_EPSILON );
s += ( fabs(gsl_matrix_get(C, 2, 0) - 15.0) > GSL_DBL_EPSILON );
s += ( fabs(gsl_matrix_get(C, 2, 1) - 20.0) > GSL_DBL_EPSILON );
s += ( fabs(gsl_matrix_get(C, 2, 2) - 12.0) > GSL_DBL_EPSILON );
gsl_linalg_matmult_mod(D, GSL_LINALG_MOD_NONE, E, GSL_LINALG_MOD_TRANSPOSE, F);
s += ( fabs(gsl_matrix_get(F, 0, 0) - 127.0) > GSL_DBL_EPSILON );
s += ( fabs(gsl_matrix_get(F, 0, 1) - 27.0) > GSL_DBL_EPSILON );
s += ( fabs(gsl_matrix_get(F, 1, 0) - 120.0) > GSL_DBL_EPSILON );
s += ( fabs(gsl_matrix_get(F, 1, 1) - 71.0) > GSL_DBL_EPSILON );
gsl_matrix_free(A);
gsl_matrix_free(B);
gsl_matrix_free(C);
gsl_matrix_free(D);
gsl_matrix_free(E);
gsl_matrix_free(F);
return s;
}
#endif
int
test_LU_solve_dim(const gsl_matrix * m, const double * actual, double eps)
{
int s = 0;
int signum;
unsigned long i, dim = m->size1;
gsl_permutation * perm = gsl_permutation_alloc(dim);
gsl_vector * rhs = gsl_vector_alloc(dim);
gsl_matrix * lu = gsl_matrix_alloc(dim,dim);
gsl_vector * x = gsl_vector_alloc(dim);
gsl_vector * residual = gsl_vector_alloc(dim);
gsl_matrix_memcpy(lu,m);
for(i=0; i<dim; i++) gsl_vector_set(rhs, i, i+1.0);
s += gsl_linalg_LU_decomp(lu, perm, &signum);
s += gsl_linalg_LU_solve(lu, perm, rhs, x);
for(i=0; i<dim; i++) {
int foo = check(gsl_vector_get(x, i),actual[i],eps);
if(foo) {
printf("%3lu[%lu]: %22.18g %22.18g\n", dim, i, gsl_vector_get(x, i), actual[i]);
}
s += foo;
}
s += gsl_linalg_LU_refine(m, lu, perm, rhs, x, residual);
for(i=0; i<dim; i++) {
int foo = check(gsl_vector_get(x, i),actual[i],eps);
if(foo) {
printf("%3lu[%lu]: %22.18g %22.18g (improved)\n", dim, i, gsl_vector_get(x, i), actual[i]);
}
s += foo;
}
gsl_vector_free(residual);
gsl_vector_free(x);
gsl_matrix_free(lu);
gsl_vector_free(rhs);
gsl_permutation_free(perm);
return s;
}
int test_LU_solve(void)
{
int f;
int s = 0;
f = test_LU_solve_dim(hilb2, hilb2_solution, 8.0 * GSL_DBL_EPSILON);
gsl_test(f, " LU_solve hilbert(2)");
s += f;
f = test_LU_solve_dim(hilb3, hilb3_solution, 64.0 * GSL_DBL_EPSILON);
gsl_test(f, " LU_solve hilbert(3)");
s += f;
f = test_LU_solve_dim(hilb4, hilb4_solution, 2048.0 * GSL_DBL_EPSILON);
gsl_test(f, " LU_solve hilbert(4)");
s += f;
f = test_LU_solve_dim(hilb12, hilb12_solution, 0.5);
gsl_test(f, " LU_solve hilbert(12)");
s += f;
f = test_LU_solve_dim(vander2, vander2_solution, 8.0 * GSL_DBL_EPSILON);
gsl_test(f, " LU_solve vander(2)");
s += f;
f = test_LU_solve_dim(vander3, vander3_solution, 64.0 * GSL_DBL_EPSILON);
gsl_test(f, " LU_solve vander(3)");
s += f;
f = test_LU_solve_dim(vander4, vander4_solution, 1024.0 * GSL_DBL_EPSILON);
gsl_test(f, " LU_solve vander(4)");
s += f;
f = test_LU_solve_dim(vander12, vander12_solution, 0.05);
gsl_test(f, " LU_solve vander(12)");
s += f;
return s;
}
int
test_LUc_solve_dim(const gsl_matrix_complex * m, const double * actual, double eps)
{
int s = 0;
int signum;
unsigned long i, dim = m->size1;
gsl_permutation * perm = gsl_permutation_alloc(dim);
gsl_vector_complex * rhs = gsl_vector_complex_alloc(dim);
gsl_matrix_complex * lu = gsl_matrix_complex_alloc(dim,dim);
gsl_vector_complex * x = gsl_vector_complex_alloc(dim);
gsl_vector_complex * residual = gsl_vector_complex_alloc(dim);
gsl_matrix_complex_memcpy(lu,m);
for(i=0; i<dim; i++)
{
gsl_complex z = gsl_complex_rect (2.0*i+1.0, 2.0*i+2.0);
gsl_vector_complex_set(rhs, i, z);
}
s += gsl_linalg_complex_LU_decomp(lu, perm, &signum);
s += gsl_linalg_complex_LU_solve(lu, perm, rhs, x);
for(i=0; i<dim; i++) {
gsl_complex z = gsl_vector_complex_get(x, i);
int foo_r = check(GSL_REAL(z),actual[2*i],eps);
int foo_i = check(GSL_IMAG(z),actual[2*i+1],eps);
if(foo_r || foo_i) {
printf("%3lu[%lu]: %22.18g %22.18g\n", dim, i, GSL_REAL(z), actual[2*i]);
printf("%3lu[%lu]: %22.18g %22.18g\n", dim, i, GSL_IMAG(z), actual[2*i+1]);
}
s += foo_r + foo_i;
}
s += gsl_linalg_complex_LU_refine(m, lu, perm, rhs, x, residual);
for(i=0; i<dim; i++) {
gsl_complex z = gsl_vector_complex_get(x, i);
int foo_r = check(GSL_REAL(z),actual[2*i],eps);
int foo_i = check(GSL_IMAG(z),actual[2*i+1],eps);
if(foo_r || foo_i) {
printf("%3lu[%lu]: %22.18g %22.18g (improved)\n", dim, i, GSL_REAL(z), actual[2*i]);
printf("%3lu[%lu]: %22.18g %22.18g (improved)\n", dim, i, GSL_IMAG(z), actual[2*i+1]);
}
s += foo_r + foo_i;
}
gsl_vector_complex_free(residual);
gsl_vector_complex_free(x);
gsl_matrix_complex_free(lu);
gsl_vector_complex_free(rhs);
gsl_permutation_free(perm);
return s;
}
int test_LUc_solve(void)
{
int f;
int s = 0;
f = test_LUc_solve_dim(c7, c7_solution, 1024.0 * 1024.0 * GSL_DBL_EPSILON);
gsl_test(f, " complex_LU_solve complex(7)");
s += f;
return s;
}
int
test_QR_solve_dim(const gsl_matrix * m, const double * actual, double eps)
{
int s = 0;
unsigned long i, dim = m->size1;
gsl_vector * rhs = gsl_vector_alloc(dim);
gsl_matrix * qr = gsl_matrix_alloc(dim,dim);
gsl_vector * d = gsl_vector_alloc(dim);
gsl_vector * x = gsl_vector_alloc(dim);
gsl_matrix_memcpy(qr,m);
for(i=0; i<dim; i++) gsl_vector_set(rhs, i, i+1.0);
s += gsl_linalg_QR_decomp(qr, d);
s += gsl_linalg_QR_solve(qr, d, rhs, x);
for(i=0; i<dim; i++) {
int foo = check(gsl_vector_get(x, i), actual[i], eps);
if(foo) {
printf("%3lu[%lu]: %22.18g %22.18g\n", dim, i, gsl_vector_get(x, i), actual[i]);
}
s += foo;
}
gsl_vector_free(x);
gsl_vector_free(d);
gsl_matrix_free(qr);
gsl_vector_free(rhs);
return s;
}
int test_QR_solve(void)
{
int f;
int s = 0;
f = test_QR_solve_dim(hilb2, hilb2_solution, 2 * 8.0 * GSL_DBL_EPSILON);
gsl_test(f, " QR_solve hilbert(2)");
s += f;
f = test_QR_solve_dim(hilb3, hilb3_solution, 2 * 64.0 * GSL_DBL_EPSILON);
gsl_test(f, " QR_solve hilbert(3)");
s += f;
f = test_QR_solve_dim(hilb4, hilb4_solution, 2 * 1024.0 * GSL_DBL_EPSILON);
gsl_test(f, " QR_solve hilbert(4)");
s += f;
f = test_QR_solve_dim(hilb12, hilb12_solution, 0.5);
gsl_test(f, " QR_solve hilbert(12)");
s += f;
f = test_QR_solve_dim(vander2, vander2_solution, 8.0 * GSL_DBL_EPSILON);
gsl_test(f, " QR_solve vander(2)");
s += f;
f = test_QR_solve_dim(vander3, vander3_solution, 64.0 * GSL_DBL_EPSILON);
gsl_test(f, " QR_solve vander(3)");
s += f;
f = test_QR_solve_dim(vander4, vander4_solution, 1024.0 * GSL_DBL_EPSILON);
gsl_test(f, " QR_solve vander(4)");
s += f;
f = test_QR_solve_dim(vander12, vander12_solution, 0.05);
gsl_test(f, " QR_solve vander(12)");
s += f;
return s;
}
int
test_QR_QRsolve_dim(const gsl_matrix * m, const double * actual, double eps)
{
int s = 0;
unsigned long i, dim = m->size1;
gsl_vector * rhs = gsl_vector_alloc(dim);
gsl_matrix * qr = gsl_matrix_alloc(dim,dim);
gsl_matrix * q = gsl_matrix_alloc(dim,dim);
gsl_matrix * r = gsl_matrix_alloc(dim,dim);
gsl_vector * d = gsl_vector_alloc(dim);
gsl_vector * x = gsl_vector_alloc(dim);
gsl_matrix_memcpy(qr,m);
for(i=0; i<dim; i++) gsl_vector_set(rhs, i, i+1.0);
s += gsl_linalg_QR_decomp(qr, d);
s += gsl_linalg_QR_unpack(qr, d, q, r);
s += gsl_linalg_QR_QRsolve(q, r, rhs, x);
for(i=0; i<dim; i++) {
int foo = check(gsl_vector_get(x, i), actual[i], eps);
if(foo) {
printf("%3lu[%lu]: %22.18g %22.18g\n", dim, i, gsl_vector_get(x, i), actual[i]);
}
s += foo;
}
gsl_vector_free(x);
gsl_vector_free(d);
gsl_matrix_free(qr);
gsl_matrix_free(q);
gsl_matrix_free(r);
gsl_vector_free(rhs);
return s;
}
int test_QR_QRsolve(void)
{
int f;
int s = 0;
f = test_QR_QRsolve_dim(hilb2, hilb2_solution, 2 * 8.0 * GSL_DBL_EPSILON);
gsl_test(f, " QR_QRsolve hilbert(2)");
s += f;
f = test_QR_QRsolve_dim(hilb3, hilb3_solution, 2 * 64.0 * GSL_DBL_EPSILON);
gsl_test(f, " QR_QRsolve hilbert(3)");
s += f;
f = test_QR_QRsolve_dim(hilb4, hilb4_solution, 2 * 1024.0 * GSL_DBL_EPSILON);
gsl_test(f, " QR_QRsolve hilbert(4)");
s += f;
f = test_QR_QRsolve_dim(hilb12, hilb12_solution, 0.5);
gsl_test(f, " QR_QRsolve hilbert(12)");
s += f;
f = test_QR_QRsolve_dim(vander2, vander2_solution, 8.0 * GSL_DBL_EPSILON);
gsl_test(f, " QR_QRsolve vander(2)");
s += f;
f = test_QR_QRsolve_dim(vander3, vander3_solution, 64.0 * GSL_DBL_EPSILON);
gsl_test(f, " QR_QRsolve vander(3)");
s += f;
f = test_QR_QRsolve_dim(vander4, vander4_solution, 1024.0 * GSL_DBL_EPSILON);
gsl_test(f, " QR_QRsolve vander(4)");
s += f;
f = test_QR_QRsolve_dim(vander12, vander12_solution, 0.05);
gsl_test(f, " QR_QRsolve vander(12)");
s += f;
return s;
}
int
test_QR_lssolve_dim(const gsl_matrix * m, const double * actual, double eps)
{
int s = 0;
unsigned long i, M = m->size1, N = m->size2;
gsl_vector * rhs = gsl_vector_alloc(M);
gsl_matrix * qr = gsl_matrix_alloc(M,N);
gsl_vector * d = gsl_vector_alloc(N);
gsl_vector * x = gsl_vector_alloc(N);
gsl_vector * r = gsl_vector_alloc(M);
gsl_vector * res = gsl_vector_alloc(M);
gsl_matrix_memcpy(qr,m);
for(i=0; i<M; i++) gsl_vector_set(rhs, i, i+1.0);
s += gsl_linalg_QR_decomp(qr, d);
s += gsl_linalg_QR_lssolve(qr, d, rhs, x, res);
for(i=0; i<N; i++) {
int foo = check(gsl_vector_get(x, i), actual[i], eps);
if(foo) {
printf("(%3lu,%3lu)[%lu]: %22.18g %22.18g\n", M, N, i, gsl_vector_get(x, i), actual[i]);
}
s += foo;
}
/* compute residual r = b - m x */
if (M == N) {
gsl_vector_set_zero(r);
} else {
gsl_vector_memcpy(r, rhs);
gsl_blas_dgemv(CblasNoTrans, -1.0, m, x, 1.0, r);
};
for(i=0; i<N; i++) {
int foo = check(gsl_vector_get(res, i), gsl_vector_get(r,i), sqrt(eps));
if(foo) {
printf("(%3lu,%3lu)[%lu]: %22.18g %22.18g\n", M, N, i, gsl_vector_get(res, i), gsl_vector_get(r,i));
}
s += foo;
}
gsl_vector_free(r);
gsl_vector_free(res);
gsl_vector_free(x);
gsl_vector_free(d);
gsl_matrix_free(qr);
gsl_vector_free(rhs);
return s;
}
int test_QR_lssolve(void)
{
int f;
int s = 0;
f = test_QR_lssolve_dim(m53, m53_lssolution, 2 * 64.0 * GSL_DBL_EPSILON);
gsl_test(f, " QR_lssolve m(5,3)");
s += f;
f = test_QR_lssolve_dim(hilb2, hilb2_solution, 2 * 8.0 * GSL_DBL_EPSILON);
gsl_test(f, " QR_lssolve hilbert(2)");
s += f;
f = test_QR_lssolve_dim(hilb3, hilb3_solution, 2 * 64.0 * GSL_DBL_EPSILON);
gsl_test(f, " QR_lssolve hilbert(3)");
s += f;
f = test_QR_lssolve_dim(hilb4, hilb4_solution, 2 * 1024.0 * GSL_DBL_EPSILON);
gsl_test(f, " QR_lssolve hilbert(4)");
s += f;
f = test_QR_lssolve_dim(hilb12, hilb12_solution, 0.5);
gsl_test(f, " QR_lssolve hilbert(12)");
s += f;
f = test_QR_lssolve_dim(vander2, vander2_solution, 8.0 * GSL_DBL_EPSILON);
gsl_test(f, " QR_lssolve vander(2)");
s += f;
f = test_QR_lssolve_dim(vander3, vander3_solution, 64.0 * GSL_DBL_EPSILON);
gsl_test(f, " QR_lssolve vander(3)");
s += f;
f = test_QR_lssolve_dim(vander4, vander4_solution, 1024.0 * GSL_DBL_EPSILON);
gsl_test(f, " QR_lssolve vander(4)");
s += f;
f = test_QR_lssolve_dim(vander12, vander12_solution, 0.05);
gsl_test(f, " QR_lssolve vander(12)");
s += f;
return s;
}
int
test_QR_decomp_dim(const gsl_matrix * m, double eps)
{
int s = 0;
unsigned long i,j, M = m->size1, N = m->size2;
gsl_matrix * qr = gsl_matrix_alloc(M,N);
gsl_matrix * a = gsl_matrix_alloc(M,N);
gsl_matrix * q = gsl_matrix_alloc(M,M);
gsl_matrix * r = gsl_matrix_alloc(M,N);
gsl_vector * d = gsl_vector_alloc(GSL_MIN(M,N));
gsl_matrix_memcpy(qr,m);
s += gsl_linalg_QR_decomp(qr, d);
s += gsl_linalg_QR_unpack(qr, d, q, r);
/* compute a = q r */
gsl_blas_dgemm (CblasNoTrans, CblasNoTrans, 1.0, q, r, 0.0, a);
for(i=0; i<M; i++) {
for(j=0; j<N; j++) {
double aij = gsl_matrix_get(a, i, j);
double mij = gsl_matrix_get(m, i, j);
int foo = check(aij, mij, eps);
if(foo) {
printf("(%3lu,%3lu)[%lu,%lu]: %22.18g %22.18g\n", M, N, i,j, aij, mij);
}
s += foo;
}
}
gsl_vector_free(d);
gsl_matrix_free(qr);
gsl_matrix_free(a);
gsl_matrix_free(q);
gsl_matrix_free(r);
return s;
}
int test_QR_decomp(void)
{
int f;
int s = 0;
f = test_QR_decomp_dim(m35, 2 * 8.0 * GSL_DBL_EPSILON);
gsl_test(f, " QR_decomp m(3,5)");
s += f;
f = test_QR_decomp_dim(m53, 2 * 64.0 * GSL_DBL_EPSILON);
gsl_test(f, " QR_decomp m(5,3)");
s += f;
f = test_QR_decomp_dim(hilb2, 2 * 8.0 * GSL_DBL_EPSILON);
gsl_test(f, " QR_decomp hilbert(2)");
s += f;
f = test_QR_decomp_dim(hilb3, 2 * 64.0 * GSL_DBL_EPSILON);
gsl_test(f, " QR_decomp hilbert(3)");
s += f;
f = test_QR_decomp_dim(hilb4, 2 * 1024.0 * GSL_DBL_EPSILON);
gsl_test(f, " QR_decomp hilbert(4)");
s += f;
f = test_QR_decomp_dim(hilb12, 2 * 1024.0 * GSL_DBL_EPSILON);
gsl_test(f, " QR_decomp hilbert(12)");
s += f;
f = test_QR_decomp_dim(vander2, 8.0 * GSL_DBL_EPSILON);
gsl_test(f, " QR_decomp vander(2)");
s += f;
f = test_QR_decomp_dim(vander3, 64.0 * GSL_DBL_EPSILON);
gsl_test(f, " QR_decomp vander(3)");
s += f;
f = test_QR_decomp_dim(vander4, 1024.0 * GSL_DBL_EPSILON);
gsl_test(f, " QR_decomp vander(4)");
s += f;
f = test_QR_decomp_dim(vander12, 0.0005); /* FIXME: bad accuracy */
gsl_test(f, " QR_decomp vander(12)");
s += f;
return s;
}
int
test_QRPT_solve_dim(const gsl_matrix * m, const double * actual, double eps)
{
int s = 0;
int signum;
unsigned long i, dim = m->size1;
gsl_permutation * perm = gsl_permutation_alloc(dim);
gsl_vector * rhs = gsl_vector_alloc(dim);
gsl_matrix * qr = gsl_matrix_alloc(dim,dim);
gsl_vector * d = gsl_vector_alloc(dim);
gsl_vector * x = gsl_vector_alloc(dim);
gsl_vector * norm = gsl_vector_alloc(dim);
gsl_matrix_memcpy(qr,m);
for(i=0; i<dim; i++) gsl_vector_set(rhs, i, i+1.0);
s += gsl_linalg_QRPT_decomp(qr, d, perm, &signum, norm);
s += gsl_linalg_QRPT_solve(qr, d, perm, rhs, x);
for(i=0; i<dim; i++) {
int foo = check(gsl_vector_get(x, i), actual[i], eps);
if(foo) {
printf("%3lu[%lu]: %22.18g %22.18g\n", dim, i, gsl_vector_get(x, i), actual[i]);
}
s += foo;
}
gsl_vector_free(norm);
gsl_vector_free(x);
gsl_vector_free(d);
gsl_matrix_free(qr);
gsl_vector_free(rhs);
gsl_permutation_free(perm);
return s;
}
int test_QRPT_solve(void)
{
int f;
int s = 0;
f = test_QRPT_solve_dim(hilb2, hilb2_solution, 2 * 8.0 * GSL_DBL_EPSILON);
gsl_test(f, " QRPT_solve hilbert(2)");
s += f;
f = test_QRPT_solve_dim(hilb3, hilb3_solution, 2 * 64.0 * GSL_DBL_EPSILON);
gsl_test(f, " QRPT_solve hilbert(3)");
s += f;
f = test_QRPT_solve_dim(hilb4, hilb4_solution, 2 * 2048.0 * GSL_DBL_EPSILON);
gsl_test(f, " QRPT_solve hilbert(4)");
s += f;
f = test_QRPT_solve_dim(hilb12, hilb12_solution, 0.5);
gsl_test(f, " QRPT_solve hilbert(12)");
s += f;
f = test_QRPT_solve_dim(vander2, vander2_solution, 8.0 * GSL_DBL_EPSILON);
gsl_test(f, " QRPT_solve vander(2)");
s += f;
f = test_QRPT_solve_dim(vander3, vander3_solution, 64.0 * GSL_DBL_EPSILON);
gsl_test(f, " QRPT_solve vander(3)");
s += f;
f = test_QRPT_solve_dim(vander4, vander4_solution, 1024.0 * GSL_DBL_EPSILON);
gsl_test(f, " QRPT_solve vander(4)");
s += f;
f = test_QRPT_solve_dim(vander12, vander12_solution, 0.05);
gsl_test(f, " QRPT_solve vander(12)");
s += f;
return s;
}
int
test_QRPT_QRsolve_dim(const gsl_matrix * m, const double * actual, double eps)
{
int s = 0;
int signum;
unsigned long i, dim = m->size1;
gsl_permutation * perm = gsl_permutation_alloc(dim);
gsl_vector * rhs = gsl_vector_alloc(dim);
gsl_matrix * qr = gsl_matrix_alloc(dim,dim);
gsl_matrix * q = gsl_matrix_alloc(dim,dim);
gsl_matrix * r = gsl_matrix_alloc(dim,dim);
gsl_vector * d = gsl_vector_alloc(dim);
gsl_vector * x = gsl_vector_alloc(dim);
gsl_vector * norm = gsl_vector_alloc(dim);
gsl_matrix_memcpy(qr,m);
for(i=0; i<dim; i++) gsl_vector_set(rhs, i, i+1.0);
s += gsl_linalg_QRPT_decomp2(qr, q, r, d, perm, &signum, norm);
s += gsl_linalg_QRPT_QRsolve(q, r, perm, rhs, x);
for(i=0; i<dim; i++) {
int foo = check(gsl_vector_get(x, i), actual[i], eps);
if(foo) {
printf("%3lu[%lu]: %22.18g %22.18g\n", dim, i, gsl_vector_get(x, i), actual[i]);
}
s += foo;
}
gsl_vector_free(norm);
gsl_vector_free(x);
gsl_vector_free(d);
gsl_matrix_free(qr);
gsl_matrix_free(q);
gsl_matrix_free(r);
gsl_vector_free(rhs);
gsl_permutation_free(perm);
return s;
}
int test_QRPT_QRsolve(void)
{
int f;
int s = 0;
f = test_QRPT_QRsolve_dim(hilb2, hilb2_solution, 2 * 8.0 * GSL_DBL_EPSILON);
gsl_test(f, " QRPT_QRsolve hilbert(2)");
s += f;
f = test_QRPT_QRsolve_dim(hilb3, hilb3_solution, 2 * 64.0 * GSL_DBL_EPSILON);
gsl_test(f, " QRPT_QRsolve hilbert(3)");
s += f;
f = test_QRPT_QRsolve_dim(hilb4, hilb4_solution, 2 * 2048.0 * GSL_DBL_EPSILON);
gsl_test(f, " QRPT_QRsolve hilbert(4)");
s += f;
f = test_QRPT_QRsolve_dim(hilb12, hilb12_solution, 0.5);
gsl_test(f, " QRPT_QRsolve hilbert(12)");
s += f;
f = test_QRPT_QRsolve_dim(vander2, vander2_solution, 8.0 * GSL_DBL_EPSILON);
gsl_test(f, " QRPT_QRsolve vander(2)");
s += f;
f = test_QRPT_QRsolve_dim(vander3, vander3_solution, 64.0 * GSL_DBL_EPSILON);
gsl_test(f, " QRPT_QRsolve vander(3)");
s += f;
f = test_QRPT_QRsolve_dim(vander4, vander4_solution, 1024.0 * GSL_DBL_EPSILON);
gsl_test(f, " QRPT_QRsolve vander(4)");
s += f;
f = test_QRPT_QRsolve_dim(vander12, vander12_solution, 0.05);
gsl_test(f, " QRPT_QRsolve vander(12)");
s += f;
return s;
}
int
test_QRPT_decomp_dim(const gsl_matrix * m, double eps)
{
int s = 0, signum;
unsigned long i,j, M = m->size1, N = m->size2;
gsl_matrix * qr = gsl_matrix_alloc(M,N);
gsl_matrix * a = gsl_matrix_alloc(M,N);
gsl_matrix * q = gsl_matrix_alloc(M,M);
gsl_matrix * r = gsl_matrix_alloc(M,N);
gsl_vector * d = gsl_vector_alloc(GSL_MIN(M,N));
gsl_vector * norm = gsl_vector_alloc(N);
gsl_permutation * perm = gsl_permutation_alloc(N);
gsl_matrix_memcpy(qr,m);
s += gsl_linalg_QRPT_decomp(qr, d, perm, &signum, norm);
s += gsl_linalg_QR_unpack(qr, d, q, r);
/* compute a = q r */
gsl_blas_dgemm (CblasNoTrans, CblasNoTrans, 1.0, q, r, 0.0, a);
/* Compute QR P^T by permuting the elements of the rows of QR */
for (i = 0; i < M; i++) {
gsl_vector_view row = gsl_matrix_row (a, i);
gsl_permute_vector_inverse (perm, &row.vector);
}
for(i=0; i<M; i++) {
for(j=0; j<N; j++) {
double aij = gsl_matrix_get(a, i, j);
double mij = gsl_matrix_get(m, i, j);
int foo = check(aij, mij, eps);
if(foo) {
printf("(%3lu,%3lu)[%lu,%lu]: %22.18g %22.18g\n", M, N, i,j, aij, mij);
}
s += foo;
}
}
gsl_permutation_free (perm);
gsl_vector_free(norm);
gsl_vector_free(d);
gsl_matrix_free(qr);
gsl_matrix_free(a);
gsl_matrix_free(q);
gsl_matrix_free(r);
return s;
}
int test_QRPT_decomp(void)
{
int f;
int s = 0;
f = test_QRPT_decomp_dim(m35, 2 * 8.0 * GSL_DBL_EPSILON);
gsl_test(f, " QRPT_decomp m(3,5)");
s += f;
f = test_QRPT_decomp_dim(m53, 2 * 8.0 * GSL_DBL_EPSILON);
gsl_test(f, " QRPT_decomp m(5,3)");
s += f;
f = test_QRPT_decomp_dim(s35, 2 * 8.0 * GSL_DBL_EPSILON);
gsl_test(f, " QRPT_decomp s(3,5)");
s += f;
f = test_QRPT_decomp_dim(s53, 2 * 8.0 * GSL_DBL_EPSILON);
gsl_test(f, " QRPT_decomp s(5,3)");
s += f;
f = test_QRPT_decomp_dim(hilb2, 2 * 8.0 * GSL_DBL_EPSILON);
gsl_test(f, " QRPT_decomp hilbert(2)");
s += f;
f = test_QRPT_decomp_dim(hilb3, 2 * 64.0 * GSL_DBL_EPSILON);
gsl_test(f, " QRPT_decomp hilbert(3)");
s += f;
f = test_QRPT_decomp_dim(hilb4, 2 * 1024.0 * GSL_DBL_EPSILON);
gsl_test(f, " QRPT_decomp hilbert(4)");
s += f;
f = test_QRPT_decomp_dim(hilb12, 2 * 1024.0 * GSL_DBL_EPSILON);
gsl_test(f, " QRPT_decomp hilbert(12)");
s += f;
f = test_QRPT_decomp_dim(vander2, 8.0 * GSL_DBL_EPSILON);
gsl_test(f, " QRPT_decomp vander(2)");
s += f;
f = test_QRPT_decomp_dim(vander3, 64.0 * GSL_DBL_EPSILON);
gsl_test(f, " QRPT_decomp vander(3)");
s += f;
f = test_QRPT_decomp_dim(vander4, 1024.0 * GSL_DBL_EPSILON);
gsl_test(f, " QRPT_decomp vander(4)");
s += f;
f = test_QRPT_decomp_dim(vander12, 0.0005); /* FIXME: bad accuracy */
gsl_test(f, " QRPT_decomp vander(12)");
s += f;
return s;
}
int
test_QR_update_dim(const gsl_matrix * m, double eps)
{
int s = 0;
unsigned long i,j,k, M = m->size1, N = m->size2;
gsl_matrix * qr1 = gsl_matrix_alloc(M,N);
gsl_matrix * qr2 = gsl_matrix_alloc(M,N);
gsl_matrix * q1 = gsl_matrix_alloc(M,M);
gsl_matrix * r1 = gsl_matrix_alloc(M,N);
gsl_matrix * q2 = gsl_matrix_alloc(M,M);
gsl_matrix * r2 = gsl_matrix_alloc(M,N);
gsl_vector * d = gsl_vector_alloc(GSL_MIN(M,N));
gsl_vector * solution1 = gsl_vector_alloc(N);
gsl_vector * solution2 = gsl_vector_alloc(N);
gsl_vector * u = gsl_vector_alloc(M);
gsl_vector * v = gsl_vector_alloc(N);
gsl_vector * w = gsl_vector_alloc(M);
gsl_matrix_memcpy(qr1,m);
gsl_matrix_memcpy(qr2,m);
for(i=0; i<M; i++) gsl_vector_set(u, i, sin(i+1.0));
for(i=0; i<N; i++) gsl_vector_set(v, i, cos(i+2.0) + sin(i*i+3.0));
for(i=0; i<M; i++)
{
double ui = gsl_vector_get(u, i);
for(j=0; j<N; j++)
{
double vj = gsl_vector_get(v, j);
double qij = gsl_matrix_get(qr1, i, j);
gsl_matrix_set(qr1, i, j, qij + ui * vj);
}
}
s += gsl_linalg_QR_decomp(qr2, d);
s += gsl_linalg_QR_unpack(qr2, d, q2, r2);
/* compute w = Q^T u */
for (j = 0; j < M; j++)
{
double sum = 0;
for (i = 0; i < M; i++)
sum += gsl_matrix_get (q2, i, j) * gsl_vector_get (u, i);
gsl_vector_set (w, j, sum);
}
s += gsl_linalg_QR_update(q2, r2, w, v);
/* compute qr2 = q2 * r2 */
for (i = 0; i < M; i++)
{
for (j = 0; j< N; j++)
{
double sum = 0;
for (k = 0; k <= GSL_MIN(j,M-1); k++)
{
double qik = gsl_matrix_get(q2, i, k);
double rkj = gsl_matrix_get(r2, k, j);
sum += qik * rkj ;
}
gsl_matrix_set (qr2, i, j, sum);
}
}
for(i=0; i<M; i++) {
for(j=0; j<N; j++) {
double s1 = gsl_matrix_get(qr1, i, j);
double s2 = gsl_matrix_get(qr2, i, j);
int foo = check(s1, s2, eps);
if(foo) {
printf("(%3lu,%3lu)[%lu,%lu]: %22.18g %22.18g\n", M, N, i,j, s1, s2);
}
s += foo;
}
}
gsl_vector_free(solution1);
gsl_vector_free(solution2);
gsl_vector_free(d);
gsl_vector_free(u);
gsl_vector_free(v);
gsl_vector_free(w);
gsl_matrix_free(qr1);
gsl_matrix_free(qr2);
gsl_matrix_free(q1);
gsl_matrix_free(r1);
gsl_matrix_free(q2);
gsl_matrix_free(r2);
return s;
}
int test_QR_update(void)
{
int f;
int s = 0;
f = test_QR_update_dim(m35, 2 * 512.0 * GSL_DBL_EPSILON);
gsl_test(f, " QR_update m(3,5)");
s += f;
f = test_QR_update_dim(m53, 2 * 512.0 * GSL_DBL_EPSILON);
gsl_test(f, " QR_update m(5,3)");
s += f;
f = test_QR_update_dim(hilb2, 2 * 512.0 * GSL_DBL_EPSILON);
gsl_test(f, " QR_update hilbert(2)");
s += f;
f = test_QR_update_dim(hilb3, 2 * 512.0 * GSL_DBL_EPSILON);
gsl_test(f, " QR_update hilbert(3)");
s += f;
f = test_QR_update_dim(hilb4, 2 * 1024.0 * GSL_DBL_EPSILON);
gsl_test(f, " QR_update hilbert(4)");
s += f;
f = test_QR_update_dim(hilb12, 2 * 1024.0 * GSL_DBL_EPSILON);
gsl_test(f, " QR_update hilbert(12)");
s += f;
f = test_QR_update_dim(vander2, 8.0 * GSL_DBL_EPSILON);
gsl_test(f, " QR_update vander(2)");
s += f;
f = test_QR_update_dim(vander3, 64.0 * GSL_DBL_EPSILON);
gsl_test(f, " QR_update vander(3)");
s += f;
f = test_QR_update_dim(vander4, 1024.0 * GSL_DBL_EPSILON);
gsl_test(f, " QR_update vander(4)");
s += f;
f = test_QR_update_dim(vander12, 0.0005); /* FIXME: bad accuracy */
gsl_test(f, " QR_update vander(12)");
s += f;
return s;
}
int
test_QRPT_update_dim(const gsl_matrix * m, double eps)
{
int s = 0, signum;
unsigned long i,j,k, M = m->size1, N = m->size2;
gsl_matrix * qr1 = gsl_matrix_alloc(M,N);
gsl_matrix * qr2 = gsl_matrix_alloc(M,N);
gsl_matrix * q1 = gsl_matrix_alloc(M,M);
gsl_matrix * r1 = gsl_matrix_alloc(M,N);
gsl_matrix * q2 = gsl_matrix_alloc(M,M);
gsl_matrix * r2 = gsl_matrix_alloc(M,N);
gsl_vector * d = gsl_vector_alloc(GSL_MIN(M,N));
gsl_vector * u = gsl_vector_alloc(M);
gsl_vector * v = gsl_vector_alloc(N);
gsl_vector * w = gsl_vector_alloc(M);
gsl_vector * norm = gsl_vector_alloc(N);
gsl_permutation * perm = gsl_permutation_alloc(N);
gsl_matrix_memcpy(qr1,m);
gsl_matrix_memcpy(qr2,m);
for(i=0; i<M; i++) gsl_vector_set(u, i, sin(i+1.0));
for(i=0; i<N; i++) gsl_vector_set(v, i, cos(i+2.0) + sin(i*i+3.0));
for(i=0; i<M; i++)
{
double ui = gsl_vector_get(u, i);
for(j=0; j<N; j++)
{
double vj = gsl_vector_get(v, j);
double qij = gsl_matrix_get(qr1, i, j);
gsl_matrix_set(qr1, i, j, qij + ui * vj);
}
}
s += gsl_linalg_QRPT_decomp(qr2, d, perm, &signum, norm);
s += gsl_linalg_QR_unpack(qr2, d, q2, r2);
/* compute w = Q^T u */
for (j = 0; j < M; j++)
{
double sum = 0;
for (i = 0; i < M; i++)
sum += gsl_matrix_get (q2, i, j) * gsl_vector_get (u, i);
gsl_vector_set (w, j, sum);
}
s += gsl_linalg_QRPT_update(q2, r2, perm, w, v);
/* Now compute qr2 = q2 * r2 * p^T */
/* first multiply q2 * r2 */
for (i = 0; i < M; i++)
{
for (j = 0; j< N; j++)
{
double sum = 0;
for (k = 0; k <= GSL_MIN(j,M-1); k++)
{
double qik = gsl_matrix_get(q2, i, k);
double rkj = gsl_matrix_get(r2, k, j);
sum += qik * rkj ;
}
gsl_matrix_set (qr2, i, j, sum);
}
}
/* now apply permutation to get qr2 = q2 * r2 * p^T */
for (i = 0; i < M ; i++)
{
gsl_vector_view r_i = gsl_matrix_row(qr2, i);
gsl_permute_vector_inverse(perm, &r_i.vector);
}
for(i=0; i<M; i++) {
for(j=0; j<N; j++) {
double s1 = gsl_matrix_get(qr1, i, j);
double s2 = gsl_matrix_get(qr2, i, j);
int foo = check(s1, s2, eps);
if(foo) {
printf("(%3lu,%3lu)[%lu,%lu]: %22.18g %22.18g\n", M, N, i,j, s1, s2);
}
s += foo;
}
}
gsl_permutation_free (perm);
gsl_vector_free(norm);
gsl_vector_free(d);
gsl_vector_free(u);
gsl_vector_free(v);
gsl_vector_free(w);
gsl_matrix_free(qr1);
gsl_matrix_free(qr2);
gsl_matrix_free(q1);
gsl_matrix_free(r1);
gsl_matrix_free(q2);
gsl_matrix_free(r2);
return s;
}
int test_QRPT_update(void)
{
int f;
int s = 0;
f = test_QRPT_update_dim(m35, 2 * 512.0 * GSL_DBL_EPSILON);
gsl_test(f, " QRPT_update m(3,5)");
s += f;
f = test_QRPT_update_dim(m53, 2 * 512.0 * GSL_DBL_EPSILON);
gsl_test(f, " QRPT_update m(5,3)");
s += f;
f = test_QRPT_update_dim(hilb2, 2 * 512.0 * GSL_DBL_EPSILON);
gsl_test(f, " QRPT_update hilbert(2)");
s += f;
f = test_QRPT_update_dim(hilb3, 2 * 512.0 * GSL_DBL_EPSILON);
gsl_test(f, " QRPT_update hilbert(3)");
s += f;
f = test_QRPT_update_dim(hilb4, 2 * 1024.0 * GSL_DBL_EPSILON);
gsl_test(f, " QRPT_update hilbert(4)");
s += f;
f = test_QRPT_update_dim(hilb12, 2 * 1024.0 * GSL_DBL_EPSILON);
gsl_test(f, " QRPT_update hilbert(12)");
s += f;
f = test_QRPT_update_dim(vander2, 8.0 * GSL_DBL_EPSILON);
gsl_test(f, " QRPT_update vander(2)");
s += f;
f = test_QRPT_update_dim(vander3, 64.0 * GSL_DBL_EPSILON);
gsl_test(f, " QRPT_update vander(3)");
s += f;
f = test_QRPT_update_dim(vander4, 1024.0 * GSL_DBL_EPSILON);
gsl_test(f, " QRPT_update vander(4)");
s += f;
f = test_QRPT_update_dim(vander12, 0.0005); /* FIXME: bad accuracy */
gsl_test(f, " QRPT_update vander(12)");
s += f;
return s;
}
int
test_LQ_solve_dim(const gsl_matrix * m, const double * actual, double eps)
{
int s = 0;
unsigned long i, dim = m->size1;
gsl_vector * rhs = gsl_vector_alloc(dim);
gsl_matrix * lq = gsl_matrix_alloc(dim,dim);
gsl_vector * d = gsl_vector_alloc(dim);
gsl_vector * x = gsl_vector_alloc(dim);
gsl_matrix_transpose_memcpy(lq,m);
for(i=0; i<dim; i++) gsl_vector_set(rhs, i, i+1.0);
s += gsl_linalg_LQ_decomp(lq, d);
s += gsl_linalg_LQ_solve_T(lq, d, rhs, x);
for(i=0; i<dim; i++) {
int foo = check(gsl_vector_get(x, i), actual[i], eps);
if(foo) {
printf("%3lu[%lu]: %22.18g %22.18g\n", dim, i, gsl_vector_get(x, i), actual[i]);
}
s += foo;
}
gsl_vector_free(x);
gsl_vector_free(d);
gsl_matrix_free(lq);
gsl_vector_free(rhs);
return s;
}
int test_LQ_solve(void)
{
int f;
int s = 0;
f = test_LQ_solve_dim(hilb2, hilb2_solution, 2 * 8.0 * GSL_DBL_EPSILON);
gsl_test(f, " LQ_solve hilbert(2)");
s += f;
f = test_LQ_solve_dim(hilb3, hilb3_solution, 2 * 64.0 * GSL_DBL_EPSILON);
gsl_test(f, " LQ_solve hilbert(3)");
s += f;
f = test_LQ_solve_dim(hilb4, hilb4_solution, 4 * 1024.0 * GSL_DBL_EPSILON);
gsl_test(f, " LQ_solve hilbert(4)");
s += f;
f = test_LQ_solve_dim(hilb12, hilb12_solution, 0.5);
gsl_test(f, " LQ_solve hilbert(12)");
s += f;
f = test_LQ_solve_dim(vander2, vander2_solution, 8.0 * GSL_DBL_EPSILON);
gsl_test(f, " LQ_solve vander(2)");
s += f;
f = test_LQ_solve_dim(vander3, vander3_solution, 64.0 * GSL_DBL_EPSILON);
gsl_test(f, " LQ_solve vander(3)");
s += f;
f = test_LQ_solve_dim(vander4, vander4_solution, 1024.0 * GSL_DBL_EPSILON);
gsl_test(f, " LQ_solve vander(4)");
s += f;
f = test_LQ_solve_dim(vander12, vander12_solution, 0.05);
gsl_test(f, " LQ_solve vander(12)");
s += f;
return s;
}
int
test_LQ_LQsolve_dim(const gsl_matrix * m, const double * actual, double eps)
{
int s = 0;
unsigned long i, dim = m->size1;
gsl_vector * rhs = gsl_vector_alloc(dim);
gsl_matrix * lq = gsl_matrix_alloc(dim,dim);
gsl_matrix * q = gsl_matrix_alloc(dim,dim);
gsl_matrix * l = gsl_matrix_alloc(dim,dim);
gsl_vector * d = gsl_vector_alloc(dim);
gsl_vector * x = gsl_vector_alloc(dim);
gsl_matrix_transpose_memcpy(lq,m);
for(i=0; i<dim; i++) gsl_vector_set(rhs, i, i+1.0);
s += gsl_linalg_LQ_decomp(lq, d);
s += gsl_linalg_LQ_unpack(lq, d, q, l);
s += gsl_linalg_LQ_LQsolve(q, l, rhs, x);
for(i=0; i<dim; i++) {
int foo = check(gsl_vector_get(x, i), actual[i], eps);
if(foo) {
printf("%3lu[%lu]: %22.18g %22.18g\n", dim, i, gsl_vector_get(x, i), actual[i]);
}
s += foo;
}
gsl_vector_free(x);
gsl_vector_free(d);
gsl_matrix_free(lq);
gsl_matrix_free(q);
gsl_matrix_free(l);
gsl_vector_free(rhs);
return s;
}
int test_LQ_LQsolve(void)
{
int f;
int s = 0;
f = test_LQ_LQsolve_dim(hilb2, hilb2_solution, 2 * 8.0 * GSL_DBL_EPSILON);
gsl_test(f, " LQ_LQsolve hilbert(2)");
s += f;
f = test_LQ_LQsolve_dim(hilb3, hilb3_solution, 2 * 64.0 * GSL_DBL_EPSILON);
gsl_test(f, " LQ_LQsolve hilbert(3)");
s += f;
f = test_LQ_LQsolve_dim(hilb4, hilb4_solution, 4 * 1024.0 * GSL_DBL_EPSILON);
gsl_test(f, " LQ_LQsolve hilbert(4)");
s += f;
f = test_LQ_LQsolve_dim(hilb12, hilb12_solution, 0.5);
gsl_test(f, " LQ_LQsolve hilbert(12)");
s += f;
f = test_LQ_LQsolve_dim(vander2, vander2_solution, 8.0 * GSL_DBL_EPSILON);
gsl_test(f, " LQ_LQsolve vander(2)");
s += f;
f = test_LQ_LQsolve_dim(vander3, vander3_solution, 64.0 * GSL_DBL_EPSILON);
gsl_test(f, " LQ_LQsolve vander(3)");
s += f;
f = test_LQ_LQsolve_dim(vander4, vander4_solution, 1024.0 * GSL_DBL_EPSILON);
gsl_test(f, " LQ_LQsolve vander(4)");
s += f;
f = test_LQ_LQsolve_dim(vander12, vander12_solution, 0.05);
gsl_test(f, " LQ_LQsolve vander(12)");
s += f;
return s;
}
int
test_LQ_lssolve_dim(const gsl_matrix * m, const double * actual, double eps)
{
int s = 0;
unsigned long i, M = m->size1, N = m->size2;
gsl_vector * rhs = gsl_vector_alloc(M);
gsl_matrix * lq = gsl_matrix_alloc(N,M);
gsl_vector * d = gsl_vector_alloc(N);
gsl_vector * x = gsl_vector_alloc(N);
gsl_vector * r = gsl_vector_alloc(M);
gsl_vector * res = gsl_vector_alloc(M);
gsl_matrix_transpose_memcpy(lq,m);
for(i=0; i<M; i++) gsl_vector_set(rhs, i, i+1.0);
s += gsl_linalg_LQ_decomp(lq, d);
s += gsl_linalg_LQ_lssolve_T(lq, d, rhs, x, res);
for(i=0; i<N; i++) {
int foo = check(gsl_vector_get(x, i), actual[i], eps);
if(foo) {
printf("(%3lu,%3lu)[%lu]: %22.18g %22.18g\n", M, N, i, gsl_vector_get(x, i), actual[i]);
}
s += foo;
}
/* compute residual r = b - m x */
if (M == N) {
gsl_vector_set_zero(r);
} else {
gsl_vector_memcpy(r, rhs);
gsl_blas_dgemv(CblasNoTrans, -1.0, m, x, 1.0, r);
};
for(i=0; i<N; i++) {
int foo = check(gsl_vector_get(res, i), gsl_vector_get(r,i), sqrt(eps));
if(foo) {
printf("(%3lu,%3lu)[%lu]: %22.18g %22.18g\n", M, N, i, gsl_vector_get(res, i), gsl_vector_get(r,i));
}
s += foo;
}
gsl_vector_free(r);
gsl_vector_free(res);
gsl_vector_free(x);
gsl_vector_free(d);
gsl_matrix_free(lq);
gsl_vector_free(rhs);
return s;
}
int test_LQ_lssolve(void)
{
int f;
int s = 0;
f = test_LQ_lssolve_dim(m53, m53_lssolution, 2 * 64.0 * GSL_DBL_EPSILON);
gsl_test(f, " LQ_lssolve m(5,3)");
s += f;
f = test_LQ_lssolve_dim(hilb2, hilb2_solution, 2 * 8.0 * GSL_DBL_EPSILON);
gsl_test(f, " LQ_lssolve hilbert(2)");
s += f;
f = test_LQ_lssolve_dim(hilb3, hilb3_solution, 2 * 64.0 * GSL_DBL_EPSILON);
gsl_test(f, " LQ_lssolve hilbert(3)");
s += f;
f = test_LQ_lssolve_dim(hilb4, hilb4_solution, 4 * 1024.0 * GSL_DBL_EPSILON);
gsl_test(f, " LQ_lssolve hilbert(4)");
s += f;
f = test_LQ_lssolve_dim(hilb12, hilb12_solution, 0.5);
gsl_test(f, " LQ_lssolve hilbert(12)");
s += f;
f = test_LQ_lssolve_dim(vander2, vander2_solution, 8.0 * GSL_DBL_EPSILON);
gsl_test(f, " LQ_lssolve vander(2)");
s += f;
f = test_LQ_lssolve_dim(vander3, vander3_solution, 64.0 * GSL_DBL_EPSILON);
gsl_test(f, " LQ_lssolve vander(3)");
s += f;
f = test_LQ_lssolve_dim(vander4, vander4_solution, 1024.0 * GSL_DBL_EPSILON);
gsl_test(f, " LQ_lssolve vander(4)");
s += f;
f = test_LQ_lssolve_dim(vander12, vander12_solution, 0.05);
gsl_test(f, " LQ_lssolve vander(12)");
s += f;
return s;
}
int
test_LQ_decomp_dim(const gsl_matrix * m, double eps)
{
int s = 0;
unsigned long i,j, M = m->size1, N = m->size2;
gsl_matrix * lq = gsl_matrix_alloc(M,N);
gsl_matrix * a = gsl_matrix_alloc(M,N);
gsl_matrix * q = gsl_matrix_alloc(N,N);
gsl_matrix * l = gsl_matrix_alloc(M,N);
gsl_vector * d = gsl_vector_alloc(GSL_MIN(M,N));
gsl_matrix_memcpy(lq,m);
s += gsl_linalg_LQ_decomp(lq, d);
s += gsl_linalg_LQ_unpack(lq, d, q, l);
/* compute a = q r */
gsl_blas_dgemm (CblasNoTrans, CblasNoTrans, 1.0, l, q, 0.0, a);
for(i=0; i<M; i++) {
for(j=0; j<N; j++) {
double aij = gsl_matrix_get(a, i, j);
double mij = gsl_matrix_get(m, i, j);
int foo = check(aij, mij, eps);
if(foo) {
printf("(%3lu,%3lu)[%lu,%lu]: %22.18g %22.18g\n", M, N, i,j, aij, mij);
}
s += foo;
}
}
gsl_vector_free(d);
gsl_matrix_free(lq);
gsl_matrix_free(a);
gsl_matrix_free(q);
gsl_matrix_free(l);
return s;
}
int test_LQ_decomp(void)
{
int f;
int s = 0;
f = test_LQ_decomp_dim(m35, 2 * 8.0 * GSL_DBL_EPSILON);
gsl_test(f, " LQ_decomp m(3,5)");
s += f;
f = test_LQ_decomp_dim(m53, 2 * 64.0 * GSL_DBL_EPSILON);
gsl_test(f, " LQ_decomp m(5,3)");
s += f;
f = test_LQ_decomp_dim(hilb2, 2 * 8.0 * GSL_DBL_EPSILON);
gsl_test(f, " LQ_decomp hilbert(2)");
s += f;
f = test_LQ_decomp_dim(hilb3, 2 * 64.0 * GSL_DBL_EPSILON);
gsl_test(f, " LQ_decomp hilbert(3)");
s += f;
f = test_LQ_decomp_dim(hilb4, 4 * 1024.0 * GSL_DBL_EPSILON);
gsl_test(f, " LQ_decomp hilbert(4)");
s += f;
f = test_LQ_decomp_dim(hilb12, 2 * 1024.0 * GSL_DBL_EPSILON);
gsl_test(f, " LQ_decomp hilbert(12)");
s += f;
f = test_LQ_decomp_dim(vander2, 8.0 * GSL_DBL_EPSILON);
gsl_test(f, " LQ_decomp vander(2)");
s += f;
f = test_LQ_decomp_dim(vander3, 64.0 * GSL_DBL_EPSILON);
gsl_test(f, " LQ_decomp vander(3)");
s += f;
f = test_LQ_decomp_dim(vander4, 1024.0 * GSL_DBL_EPSILON);
gsl_test(f, " LQ_decomp vander(4)");
s += f;
f = test_LQ_decomp_dim(vander12, 0.0005); /* FIXME: bad accuracy */
gsl_test(f, " LQ_decomp vander(12)");
s += f;
return s;
}
int
test_PTLQ_solve_dim(const gsl_matrix * m, const double * actual, double eps)
{
int s = 0;
int signum;
unsigned long i, dim = m->size1;
gsl_permutation * perm = gsl_permutation_alloc(dim);
gsl_vector * rhs = gsl_vector_alloc(dim);
gsl_matrix * lq = gsl_matrix_alloc(dim,dim);
gsl_vector * d = gsl_vector_alloc(dim);
gsl_vector * x = gsl_vector_alloc(dim);
gsl_vector * norm = gsl_vector_alloc(dim);
gsl_matrix_transpose_memcpy(lq,m);
for(i=0; i<dim; i++) gsl_vector_set(rhs, i, i+1.0);
s += gsl_linalg_PTLQ_decomp(lq, d, perm, &signum, norm);
s += gsl_linalg_PTLQ_solve_T(lq, d, perm, rhs, x);
for(i=0; i<dim; i++) {
int foo = check(gsl_vector_get(x, i), actual[i], eps);
if(foo) {
printf("%3lu[%lu]: %22.18g %22.18g\n", dim, i, gsl_vector_get(x, i), actual[i]);
}
s += foo;
}
gsl_vector_free(norm);
gsl_vector_free(x);
gsl_vector_free(d);
gsl_matrix_free(lq);
gsl_vector_free(rhs);
gsl_permutation_free(perm);
return s;
}
int test_PTLQ_solve(void)
{
int f;
int s = 0;
f = test_PTLQ_solve_dim(hilb2, hilb2_solution, 2 * 8.0 * GSL_DBL_EPSILON);
gsl_test(f, " PTLQ_solve hilbert(2)");
s += f;
f = test_PTLQ_solve_dim(hilb3, hilb3_solution, 2 * 64.0 * GSL_DBL_EPSILON);
gsl_test(f, " PTLQ_solve hilbert(3)");
s += f;
f = test_PTLQ_solve_dim(hilb4, hilb4_solution, 2 * 2048.0 * GSL_DBL_EPSILON);
gsl_test(f, " PTLQ_solve hilbert(4)");
s += f;
f = test_PTLQ_solve_dim(hilb12, hilb12_solution, 0.5);
gsl_test(f, " PTLQ_solve hilbert(12)");
s += f;
f = test_PTLQ_solve_dim(vander2, vander2_solution, 8.0 * GSL_DBL_EPSILON);
gsl_test(f, " PTLQ_solve vander(2)");
s += f;
f = test_PTLQ_solve_dim(vander3, vander3_solution, 64.0 * GSL_DBL_EPSILON);
gsl_test(f, " PTLQ_solve vander(3)");
s += f;
f = test_PTLQ_solve_dim(vander4, vander4_solution, 1024.0 * GSL_DBL_EPSILON);
gsl_test(f, " PTLQ_solve vander(4)");
s += f;
f = test_PTLQ_solve_dim(vander12, vander12_solution, 0.05);
gsl_test(f, " PTLQ_solve vander(12)");
s += f;
return s;
}
int
test_PTLQ_LQsolve_dim(const gsl_matrix * m, const double * actual, double eps)
{
int s = 0;
int signum;
unsigned long i, dim = m->size1;
gsl_permutation * perm = gsl_permutation_alloc(dim);
gsl_vector * rhs = gsl_vector_alloc(dim);
gsl_matrix * lq = gsl_matrix_alloc(dim,dim);
gsl_matrix * q = gsl_matrix_alloc(dim,dim);
gsl_matrix * l = gsl_matrix_alloc(dim,dim);
gsl_vector * d = gsl_vector_alloc(dim);
gsl_vector * x = gsl_vector_alloc(dim);
gsl_vector * norm = gsl_vector_alloc(dim);
gsl_matrix_transpose_memcpy(lq,m);
for(i=0; i<dim; i++) gsl_vector_set(rhs, i, i+1.0);
s += gsl_linalg_PTLQ_decomp2(lq, q, l, d, perm, &signum, norm);
s += gsl_linalg_PTLQ_LQsolve_T(q, l, perm, rhs, x);
for(i=0; i<dim; i++) {
int foo = check(gsl_vector_get(x, i), actual[i], eps);
if(foo) {
printf("%3lu[%lu]: %22.18g %22.18g\n", dim, i, gsl_vector_get(x, i), actual[i]);
}
s += foo;
}
gsl_vector_free(norm);
gsl_vector_free(x);
gsl_vector_free(d);
gsl_matrix_free(lq);
gsl_vector_free(rhs);
gsl_permutation_free(perm);
return s;
}
int test_PTLQ_LQsolve(void)
{
int f;
int s = 0;
f = test_PTLQ_LQsolve_dim(hilb2, hilb2_solution, 2 * 8.0 * GSL_DBL_EPSILON);
gsl_test(f, " PTLQ_LQsolve hilbert(2)");
s += f;
f = test_PTLQ_LQsolve_dim(hilb3, hilb3_solution, 2 * 64.0 * GSL_DBL_EPSILON);
gsl_test(f, " PTLQ_LQsolve hilbert(3)");
s += f;
f = test_PTLQ_LQsolve_dim(hilb4, hilb4_solution, 2 * 2048.0 * GSL_DBL_EPSILON);
gsl_test(f, " PTLQ_LQsolve hilbert(4)");
s += f;
f = test_PTLQ_LQsolve_dim(hilb12, hilb12_solution, 0.5);
gsl_test(f, " PTLQ_LQsolve hilbert(12)");
s += f;
f = test_PTLQ_LQsolve_dim(vander2, vander2_solution, 8.0 * GSL_DBL_EPSILON);
gsl_test(f, " PTLQ_LQsolve vander(2)");
s += f;
f = test_PTLQ_LQsolve_dim(vander3, vander3_solution, 64.0 * GSL_DBL_EPSILON);
gsl_test(f, " PTLQ_LQsolve vander(3)");
s += f;
f = test_PTLQ_LQsolve_dim(vander4, vander4_solution, 1024.0 * GSL_DBL_EPSILON);
gsl_test(f, " PTLQ_LQsolve vander(4)");
s += f;
f = test_PTLQ_LQsolve_dim(vander12, vander12_solution, 0.05);
gsl_test(f, " PTLQ_LQsolve vander(12)");
s += f;
return s;
}
int
test_PTLQ_decomp_dim(const gsl_matrix * m, double eps)
{
int s = 0, signum;
unsigned long i,j, M = m->size1, N = m->size2;
gsl_matrix * lq = gsl_matrix_alloc(N,M);
gsl_matrix * a = gsl_matrix_alloc(N,M);
gsl_matrix * q = gsl_matrix_alloc(M,M);
gsl_matrix * l = gsl_matrix_alloc(N,M);
gsl_vector * d = gsl_vector_alloc(GSL_MIN(M,N));
gsl_vector * norm = gsl_vector_alloc(N);
gsl_permutation * perm = gsl_permutation_alloc(N);
gsl_matrix_transpose_memcpy(lq,m);
s += gsl_linalg_PTLQ_decomp(lq, d, perm, &signum, norm);
s += gsl_linalg_LQ_unpack(lq, d, q, l);
/* compute a = l q */
gsl_blas_dgemm (CblasNoTrans, CblasNoTrans, 1.0, l, q, 0.0, a);
/* Compute P LQ by permuting the rows of LQ */
for (i = 0; i < M; i++) {
gsl_vector_view col = gsl_matrix_column (a, i);
gsl_permute_vector_inverse (perm, &col.vector);
}
for(i=0; i<M; i++) {
for(j=0; j<N; j++) {
double aij = gsl_matrix_get(a, j, i);
double mij = gsl_matrix_get(m, i, j);
int foo = check(aij, mij, eps);
if(foo) {
printf("(%3lu,%3lu)[%lu,%lu]: %22.18g %22.18g\n", M, N, i,j, aij, mij);
}
s += foo;
}
}
gsl_permutation_free (perm);
gsl_vector_free(norm);
gsl_vector_free(d);
gsl_matrix_free(lq);
gsl_matrix_free(a);
gsl_matrix_free(q);
gsl_matrix_free(l);
return s;
}
int test_PTLQ_decomp(void)
{
int f;
int s = 0;
f = test_PTLQ_decomp_dim(m35, 2 * 8.0 * GSL_DBL_EPSILON);
gsl_test(f, " PTLQ_decomp m(3,5)");
s += f;
f = test_PTLQ_decomp_dim(m53, 2 * 8.0 * GSL_DBL_EPSILON);
gsl_test(f, " PTLQ_decomp m(5,3)");
s += f;
f = test_PTLQ_decomp_dim(s35, 2 * 8.0 * GSL_DBL_EPSILON);
gsl_test(f, " PTLQ_decomp s(3,5)");
s += f;
f = test_PTLQ_decomp_dim(s53, 2 * 8.0 * GSL_DBL_EPSILON);
gsl_test(f, " PTLQ_decomp s(5,3)");
s += f;
f = test_PTLQ_decomp_dim(hilb2, 2 * 8.0 * GSL_DBL_EPSILON);
gsl_test(f, " PTLQ_decomp hilbert(2)");
s += f;
f = test_PTLQ_decomp_dim(hilb3, 2 * 64.0 * GSL_DBL_EPSILON);
gsl_test(f, " PTLQ_decomp hilbert(3)");
s += f;
f = test_PTLQ_decomp_dim(hilb4, 2 * 1024.0 * GSL_DBL_EPSILON);
gsl_test(f, " PTLQ_decomp hilbert(4)");
s += f;
f = test_PTLQ_decomp_dim(hilb12, 2 * 1024.0 * GSL_DBL_EPSILON);
gsl_test(f, " PTLQ_decomp hilbert(12)");
s += f;
f = test_PTLQ_decomp_dim(vander2, 8.0 * GSL_DBL_EPSILON);
gsl_test(f, " PTLQ_decomp vander(2)");
s += f;
f = test_PTLQ_decomp_dim(vander3, 64.0 * GSL_DBL_EPSILON);
gsl_test(f, " PTLQ_decomp vander(3)");
s += f;
f = test_PTLQ_decomp_dim(vander4, 1024.0 * GSL_DBL_EPSILON);
gsl_test(f, " PTLQ_decomp vander(4)");
s += f;
f = test_PTLQ_decomp_dim(vander12, 0.0005); /* FIXME: bad accuracy */
gsl_test(f, " PTLQ_decomp vander(12)");
s += f;
return s;
}
int
test_LQ_update_dim(const gsl_matrix * m, double eps)
{
int s = 0;
unsigned long i,j, M = m->size1, N = m->size2;
gsl_matrix * lq1 = gsl_matrix_alloc(N,M);
gsl_matrix * lq2 = gsl_matrix_alloc(N,M);
gsl_matrix * q1 = gsl_matrix_alloc(M,M);
gsl_matrix * l1 = gsl_matrix_alloc(N,M);
gsl_matrix * q2 = gsl_matrix_alloc(M,M);
gsl_matrix * l2 = gsl_matrix_alloc(N,M);
gsl_vector * d2 = gsl_vector_alloc(GSL_MIN(M,N));
gsl_vector * u = gsl_vector_alloc(M);
gsl_vector * v = gsl_vector_alloc(N);
gsl_vector * w = gsl_vector_alloc(M);
gsl_matrix_transpose_memcpy(lq1,m);
gsl_matrix_transpose_memcpy(lq2,m);
for(i=0; i<M; i++) gsl_vector_set(u, i, sin(i+1.0));
for(i=0; i<N; i++) gsl_vector_set(v, i, cos(i+2.0) + sin(i*i+3.0));
/* lq1 is updated */
gsl_blas_dger(1.0, v, u, lq1);
/* lq2 is first decomposed, updated later */
s += gsl_linalg_LQ_decomp(lq2, d2);
s += gsl_linalg_LQ_unpack(lq2, d2, q2, l2);
/* compute w = Q^T u */
gsl_blas_dgemv(CblasNoTrans, 1.0, q2, u, 0.0, w);
/* now lq2 is updated */
s += gsl_linalg_LQ_update(q2, l2, v, w);
/* multiply q2*l2 */
gsl_blas_dgemm(CblasNoTrans,CblasNoTrans,1.0,l2,q2,0.0,lq2);
/* check lq1==lq2 */
for(i=0; i<N; i++) {
for(j=0; j<M; j++) {
double s1 = gsl_matrix_get(lq1, i, j);
double s2 = gsl_matrix_get(lq2, i, j);
int foo = check(s1, s2, eps);
#if 0
if(foo) {
printf("LQ:(%3lu,%3lu)[%lu,%lu]: %22.18g %22.18g\n", M, N, i,j, s1, s2);
}
#endif
s += foo;
}
}
gsl_vector_free(d2);
gsl_vector_free(u);
gsl_vector_free(v);
gsl_vector_free(w);
gsl_matrix_free(lq1);
gsl_matrix_free(lq2);
gsl_matrix_free(q1);
gsl_matrix_free(l1);
gsl_matrix_free(q2);
gsl_matrix_free(l2);
return s;
}
int test_LQ_update(void)
{
int f;
int s = 0;
f = test_LQ_update_dim(m35, 2 * 512.0 * GSL_DBL_EPSILON);
gsl_test(f, " LQ_update m(3,5)");
s += f;
f = test_LQ_update_dim(m53, 2 * 512.0 * GSL_DBL_EPSILON);
gsl_test(f, " LQ_update m(5,3)");
s += f;
f = test_LQ_update_dim(hilb2, 2 * 512.0 * GSL_DBL_EPSILON);
gsl_test(f, " LQ_update hilbert(2)");
s += f;
f = test_LQ_update_dim(hilb3, 2 * 512.0 * GSL_DBL_EPSILON);
gsl_test(f, " LQ_update hilbert(3)");
s += f;
f = test_LQ_update_dim(hilb4, 2 * 1024.0 * GSL_DBL_EPSILON);
gsl_test(f, " LQ_update hilbert(4)");
s += f;
f = test_LQ_update_dim(hilb12, 2 * 1024.0 * GSL_DBL_EPSILON);
gsl_test(f, " LQ_update hilbert(12)");
s += f;
f = test_LQ_update_dim(vander2, 8.0 * GSL_DBL_EPSILON);
gsl_test(f, " LQ_update vander(2)");
s += f;
f = test_LQ_update_dim(vander3, 64.0 * GSL_DBL_EPSILON);
gsl_test(f, " LQ_update vander(3)");
s += f;
f = test_LQ_update_dim(vander4, 1024.0 * GSL_DBL_EPSILON);
gsl_test(f, " LQ_update vander(4)");
s += f;
f = test_LQ_update_dim(vander12, 0.0005); /* FIXME: bad accuracy */
gsl_test(f, " LQ_update vander(12)");
s += f;
return s;
}
int
test_SV_solve_dim(const gsl_matrix * m, const double * actual, double eps)
{
int s = 0;
unsigned long i, dim = m->size1;
gsl_vector * rhs = gsl_vector_alloc(dim);
gsl_matrix * u = gsl_matrix_alloc(dim,dim);
gsl_matrix * q = gsl_matrix_alloc(dim,dim);
gsl_vector * d = gsl_vector_alloc(dim);
gsl_vector * x = gsl_vector_calloc(dim);
gsl_matrix_memcpy(u,m);
for(i=0; i<dim; i++) gsl_vector_set(rhs, i, i+1.0);
s += gsl_linalg_SV_decomp(u, q, d, x);
s += gsl_linalg_SV_solve(u, q, d, rhs, x);
for(i=0; i<dim; i++) {
int foo = check(gsl_vector_get(x, i), actual[i], eps);
if(foo) {
printf("%3lu[%lu]: %22.18g %22.18g\n", dim, i, gsl_vector_get(x, i), actual[i]);
}
s += foo;
}
gsl_vector_free(x);
gsl_vector_free(d);
gsl_matrix_free(u);
gsl_matrix_free(q);
gsl_vector_free(rhs);
return s;
}
int test_SV_solve(void)
{
int f;
int s = 0;
f = test_SV_solve_dim(hilb2, hilb2_solution, 2 * 8.0 * GSL_DBL_EPSILON);
gsl_test(f, " SV_solve hilbert(2)");
s += f;
f = test_SV_solve_dim(hilb3, hilb3_solution, 2 * 64.0 * GSL_DBL_EPSILON);
gsl_test(f, " SV_solve hilbert(3)");
s += f;
f = test_SV_solve_dim(hilb4, hilb4_solution, 2 * 1024.0 * GSL_DBL_EPSILON);
gsl_test(f, " SV_solve hilbert(4)");
s += f;
f = test_SV_solve_dim(hilb12, hilb12_solution, 0.5);
gsl_test(f, " SV_solve hilbert(12)");
s += f;
f = test_SV_solve_dim(vander2, vander2_solution, 64.0 * GSL_DBL_EPSILON);
gsl_test(f, " SV_solve vander(2)");
s += f;
f = test_SV_solve_dim(vander3, vander3_solution, 64.0 * GSL_DBL_EPSILON);
gsl_test(f, " SV_solve vander(3)");
s += f;
f = test_SV_solve_dim(vander4, vander4_solution, 1024.0 * GSL_DBL_EPSILON);
gsl_test(f, " SV_solve vander(4)");
s += f;
f = test_SV_solve_dim(vander12, vander12_solution, 0.05);
gsl_test(f, " SV_solve vander(12)");
s += f;
return s;
}
int
test_SV_decomp_dim(const gsl_matrix * m, double eps)
{
int s = 0;
double di1;
unsigned long i,j, M = m->size1, N = m->size2;
gsl_matrix * v = gsl_matrix_alloc(M,N);
gsl_matrix * a = gsl_matrix_alloc(M,N);
gsl_matrix * q = gsl_matrix_alloc(N,N);
gsl_matrix * dqt = gsl_matrix_alloc(N,N);
gsl_vector * d = gsl_vector_alloc(N);
gsl_vector * w = gsl_vector_alloc(N);
gsl_matrix_memcpy(v,m);
s += gsl_linalg_SV_decomp(v, q, d, w);
/* Check that singular values are non-negative and in non-decreasing
order */
di1 = 0.0;
for (i = 0; i < N; i++)
{
double di = gsl_vector_get (d, i);
if (gsl_isnan (di))
{
continue; /* skip NaNs */
}
if (di < 0) {
s++;
printf("singular value %lu = %22.18g < 0\n", i, di);
}
if(i > 0 && di > di1) {
s++;
printf("singular value %lu = %22.18g vs previous %22.18g\n", i, di, di1);
}
di1 = di;
}
/* Scale dqt = D Q^T */
for (i = 0; i < N ; i++)
{
double di = gsl_vector_get (d, i);
for (j = 0; j < N; j++)
{
double qji = gsl_matrix_get(q, j, i);
gsl_matrix_set (dqt, i, j, qji * di);
}
}
/* compute a = v dqt */
gsl_blas_dgemm (CblasNoTrans, CblasNoTrans, 1.0, v, dqt, 0.0, a);
for(i=0; i<M; i++) {
for(j=0; j<N; j++) {
double aij = gsl_matrix_get(a, i, j);
double mij = gsl_matrix_get(m, i, j);
int foo = check(aij, mij, eps);
if(foo) {
printf("(%3lu,%3lu)[%lu,%lu]: %22.18g %22.18g\n", M, N, i,j, aij, mij);
}
s += foo;
}
}
gsl_vector_free(w);
gsl_vector_free(d);
gsl_matrix_free(v);
gsl_matrix_free(a);
gsl_matrix_free(q);
gsl_matrix_free(dqt);
return s;
}
int test_SV_decomp(void)
{
int f;
int s = 0;
f = test_SV_decomp_dim(m11, 2 * GSL_DBL_EPSILON);
gsl_test(f, " SV_decomp m(1,1)");
s += f;
f = test_SV_decomp_dim(m51, 2 * 64.0 * GSL_DBL_EPSILON);
gsl_test(f, " SV_decomp m(5,1)");
s += f;
/* M<N not implemented yet */
#if 0
f = test_SV_decomp_dim(m35, 2 * 8.0 * GSL_DBL_EPSILON);
gsl_test(f, " SV_decomp m(3,5)");
s += f;
#endif
f = test_SV_decomp_dim(m53, 2 * 64.0 * GSL_DBL_EPSILON);
gsl_test(f, " SV_decomp m(5,3)");
s += f;
f = test_SV_decomp_dim(moler10, 2 * 64.0 * GSL_DBL_EPSILON);
gsl_test(f, " SV_decomp moler(10)");
s += f;
f = test_SV_decomp_dim(hilb2, 2 * 8.0 * GSL_DBL_EPSILON);
gsl_test(f, " SV_decomp hilbert(2)");
s += f;
f = test_SV_decomp_dim(hilb3, 2 * 64.0 * GSL_DBL_EPSILON);
gsl_test(f, " SV_decomp hilbert(3)");
s += f;
f = test_SV_decomp_dim(hilb4, 2 * 1024.0 * GSL_DBL_EPSILON);
gsl_test(f, " SV_decomp hilbert(4)");
s += f;
f = test_SV_decomp_dim(hilb12, 2 * 1024.0 * GSL_DBL_EPSILON);
gsl_test(f, " SV_decomp hilbert(12)");
s += f;
f = test_SV_decomp_dim(vander2, 8.0 * GSL_DBL_EPSILON);
gsl_test(f, " SV_decomp vander(2)");
s += f;
f = test_SV_decomp_dim(vander3, 64.0 * GSL_DBL_EPSILON);
gsl_test(f, " SV_decomp vander(3)");
s += f;
f = test_SV_decomp_dim(vander4, 1024.0 * GSL_DBL_EPSILON);
gsl_test(f, " SV_decomp vander(4)");
s += f;
f = test_SV_decomp_dim(vander12, 1e-4);
gsl_test(f, " SV_decomp vander(12)");
s += f;
f = test_SV_decomp_dim(row3, 10 * GSL_DBL_EPSILON);
gsl_test(f, " SV_decomp row3");
s += f;
f = test_SV_decomp_dim(row5, 128 * GSL_DBL_EPSILON);
gsl_test(f, " SV_decomp row5");
s += f;
f = test_SV_decomp_dim(row12, 1024 * GSL_DBL_EPSILON);
gsl_test(f, " SV_decomp row12");
s += f;
f = test_SV_decomp_dim(inf5, 1024 * GSL_DBL_EPSILON);
gsl_test(f, " SV_decomp inf5");
s += f;
f = test_SV_decomp_dim(nan5, 1024 * GSL_DBL_EPSILON);
gsl_test(f, " SV_decomp nan5");
s += f;
f = test_SV_decomp_dim(dblmin3, 1024 * GSL_DBL_EPSILON);
gsl_test(f, " SV_decomp dblmin3");
s += f;
f = test_SV_decomp_dim(dblmin5, 1024 * GSL_DBL_EPSILON);
gsl_test(f, " SV_decomp dblmin5");
s += f;
{
double i1, i2, i3, i4;
double lower = -2, upper = 2;
for (i1 = lower; i1 <= upper; i1++)
{
for (i2 = lower; i2 <= upper; i2++)
{
for (i3 = lower; i3 <= upper; i3++)
{
for (i4 = lower; i4 <= upper; i4++)
{
gsl_matrix_set (A22, 0,0, i1);
gsl_matrix_set (A22, 0,1, i2);
gsl_matrix_set (A22, 1,0, i3);
gsl_matrix_set (A22, 1,1, i4);
f = test_SV_decomp_dim(A22, 16 * GSL_DBL_EPSILON);
gsl_test(f, " SV_decomp (2x2) A=[%g, %g; %g, %g]", i1,i2,i3,i4);
s += f;
}
}
}
}
}
{
int i;
double carry = 0, lower = 0, upper = 1;
double *a = A33->data;
for (i=0; i<9; i++) {
a[i] = lower;
}
while (carry == 0.0) {
f = test_SV_decomp_dim(A33, 64 * GSL_DBL_EPSILON);
gsl_test(f, " SV_decomp (3x3) A=[ %g, %g, %g; %g, %g, %g; %g, %g, %g]",
a[0], a[1], a[2], a[3], a[4], a[5], a[6], a[7], a[8]);
/* increment */
carry=1.0;
for (i=9; carry > 0.0 && i>0 && i--;)
{
double v=a[i]+carry;
carry = (v>upper) ? 1.0 : 0.0;
a[i] = (v>upper) ? lower : v;
}
}
}
#ifdef TEST_SVD_4X4
{
int i;
double carry = 0, lower = 0, upper = 1;
double *a = A44->data;
for (i=0; i<16; i++) {
a[i] = lower;
}
while (carry == 0.0) {
f = test_SV_decomp_dim(A44, 64 * GSL_DBL_EPSILON);
gsl_test(f, " SV_decomp (4x4) A=[ %g, %g, %g, %g; %g, %g, %g, %g; %g, %g, %g, %g; %g, %g, %g, %g]",
a[0], a[1], a[2], a[3], a[4], a[5], a[6], a[7], a[8], a[9],
a[10], a[11], a[12], a[13], a[14], a[15]);
/* increment */
carry=1.0;
for (i=16; carry > 0.0 && i>0 && i--;)
{
double v=a[i]+carry;
carry = (v>upper) ? 1.0 : 0.0;
a[i] = (v>upper) ? lower : v;
}
}
}
#endif
return s;
}
int
test_SV_decomp_mod_dim(const gsl_matrix * m, double eps)
{
int s = 0;
double di1;
unsigned long i,j, M = m->size1, N = m->size2;
gsl_matrix * v = gsl_matrix_alloc(M,N);
gsl_matrix * a = gsl_matrix_alloc(M,N);
gsl_matrix * q = gsl_matrix_alloc(N,N);
gsl_matrix * x = gsl_matrix_alloc(N,N);
gsl_matrix * dqt = gsl_matrix_alloc(N,N);
gsl_vector * d = gsl_vector_alloc(N);
gsl_vector * w = gsl_vector_alloc(N);
gsl_matrix_memcpy(v,m);
s += gsl_linalg_SV_decomp_mod(v, x, q, d, w);
/* Check that singular values are non-negative and in non-decreasing
order */
di1 = 0.0;
for (i = 0; i < N; i++)
{
double di = gsl_vector_get (d, i);
if (gsl_isnan (di))
{
continue; /* skip NaNs */
}
if (di < 0) {
s++;
printf("singular value %lu = %22.18g < 0\n", i, di);
}
if(i > 0 && di > di1) {
s++;
printf("singular value %lu = %22.18g vs previous %22.18g\n", i, di, di1);
}
di1 = di;
}
/* Scale dqt = D Q^T */
for (i = 0; i < N ; i++)
{
double di = gsl_vector_get (d, i);
for (j = 0; j < N; j++)
{
double qji = gsl_matrix_get(q, j, i);
gsl_matrix_set (dqt, i, j, qji * di);
}
}
/* compute a = v dqt */
gsl_blas_dgemm (CblasNoTrans, CblasNoTrans, 1.0, v, dqt, 0.0, a);
for(i=0; i<M; i++) {
for(j=0; j<N; j++) {
double aij = gsl_matrix_get(a, i, j);
double mij = gsl_matrix_get(m, i, j);
int foo = check(aij, mij, eps);
if(foo) {
printf("(%3lu,%3lu)[%lu,%lu]: %22.18g %22.18g\n", M, N, i,j, aij, mij);
}
s += foo;
}
}
gsl_vector_free(w);
gsl_vector_free(d);
gsl_matrix_free(v);
gsl_matrix_free(a);
gsl_matrix_free(q);
gsl_matrix_free(dqt);
gsl_matrix_free (x);
return s;
}
int test_SV_decomp_mod(void)
{
int f;
int s = 0;
f = test_SV_decomp_mod_dim(m11, 2 * GSL_DBL_EPSILON);
gsl_test(f, " SV_decomp_mod m(1,1)");
s += f;
f = test_SV_decomp_mod_dim(m51, 2 * 64.0 * GSL_DBL_EPSILON);
gsl_test(f, " SV_decomp_mod m(5,1)");
s += f;
/* M<N not implemented yet */
#if 0
f = test_SV_decomp_mod_dim(m35, 2 * 8.0 * GSL_DBL_EPSILON);
gsl_test(f, " SV_decomp_mod m(3,5)");
s += f;
#endif
f = test_SV_decomp_mod_dim(m53, 2 * 64.0 * GSL_DBL_EPSILON);
gsl_test(f, " SV_decomp_mod m(5,3)");
s += f;
f = test_SV_decomp_mod_dim(moler10, 2 * 64.0 * GSL_DBL_EPSILON);
gsl_test(f, " SV_decomp_mod moler(10)");
s += f;
f = test_SV_decomp_mod_dim(hilb2, 2 * 8.0 * GSL_DBL_EPSILON);
gsl_test(f, " SV_decomp_mod hilbert(2)");
s += f;
f = test_SV_decomp_mod_dim(hilb3, 2 * 64.0 * GSL_DBL_EPSILON);
gsl_test(f, " SV_decomp_mod hilbert(3)");
s += f;
f = test_SV_decomp_mod_dim(hilb4, 2 * 1024.0 * GSL_DBL_EPSILON);
gsl_test(f, " SV_decomp_mod hilbert(4)");
s += f;
f = test_SV_decomp_mod_dim(hilb12, 2 * 1024.0 * GSL_DBL_EPSILON);
gsl_test(f, " SV_decomp_mod hilbert(12)");
s += f;
f = test_SV_decomp_mod_dim(vander2, 8.0 * GSL_DBL_EPSILON);
gsl_test(f, " SV_decomp_mod vander(2)");
s += f;
f = test_SV_decomp_mod_dim(vander3, 64.0 * GSL_DBL_EPSILON);
gsl_test(f, " SV_decomp_mod vander(3)");
s += f;
f = test_SV_decomp_mod_dim(vander4, 1024.0 * GSL_DBL_EPSILON);
gsl_test(f, " SV_decomp_mod vander(4)");
s += f;
f = test_SV_decomp_mod_dim(vander12, 1e-4);
gsl_test(f, " SV_decomp_mod vander(12)");
s += f;
f = test_SV_decomp_mod_dim(row3, 10 * GSL_DBL_EPSILON);
gsl_test(f, " SV_decomp_mod row3");
s += f;
f = test_SV_decomp_mod_dim(row5, 128 * GSL_DBL_EPSILON);
gsl_test(f, " SV_decomp_mod row5");
s += f;
f = test_SV_decomp_mod_dim(row12, 1024 * GSL_DBL_EPSILON);
gsl_test(f, " SV_decomp_mod row12");
s += f;
f = test_SV_decomp_mod_dim(inf5, 1024 * GSL_DBL_EPSILON);
gsl_test(f, " SV_decomp_mod inf5");
s += f;
f = test_SV_decomp_mod_dim(nan5, 1024 * GSL_DBL_EPSILON);
gsl_test(f, " SV_decomp_mod nan5");
s += f;
{
double i1, i2, i3, i4;
double lower = -2, upper = 2;
for (i1 = lower; i1 <= upper; i1++)
{
for (i2 = lower; i2 <= upper; i2++)
{
for (i3 = lower; i3 <= upper; i3++)
{
for (i4 = lower; i4 <= upper; i4++)
{
gsl_matrix_set (A22, 0,0, i1);
gsl_matrix_set (A22, 0,1, i2);
gsl_matrix_set (A22, 1,0, i3);
gsl_matrix_set (A22, 1,1, i4);
f = test_SV_decomp_mod_dim(A22, 16 * GSL_DBL_EPSILON);
gsl_test(f, " SV_decomp_mod (2x2) A=[%g, %g; %g, %g]", i1,i2,i3,i4);
s += f;
}
}
}
}
}
{
int i;
double carry = 0, lower = 0, upper = 1;
double *a = A33->data;
for (i=0; i<9; i++) {
a[i] = lower;
}
while (carry == 0.0) {
f = test_SV_decomp_mod_dim(A33, 64 * GSL_DBL_EPSILON);
gsl_test(f, " SV_decomp_mod (3x3) A=[ %g, %g, %g; %g, %g, %g; %g, %g, %g]",
a[0], a[1], a[2], a[3], a[4], a[5], a[6], a[7], a[8]);
/* increment */
carry=1.0;
for (i=9; carry > 0.0 && i>0 && i--;)
{
double v=a[i]+carry;
carry = (v>upper) ? 1.0 : 0.0;
a[i] = (v>upper) ? lower : v;
}
}
}
#ifdef TEST_SVD_4X4
{
int i;
double carry = 0, lower = 0, upper = 1;
double *a = A44->data;
for (i=0; i<16; i++) {
a[i] = lower;
}
while (carry == 0.0) {
f = test_SV_decomp_mod_dim(A44, 64 * GSL_DBL_EPSILON);
gsl_test(f, " SV_decomp_mod (4x4) A=[ %g, %g, %g, %g; %g, %g, %g, %g; %g, %g, %g, %g; %g, %g, %g, %g]",
a[0], a[1], a[2], a[3], a[4], a[5], a[6], a[7], a[8], a[9],
a[10], a[11], a[12], a[13], a[14], a[15]);
/* increment */
carry=1.0;
for (i=16; carry>0.0 && i>0 && i--;)
{
double v=a[i]+carry;
carry = (v>upper) ? 1.0 : 0.0;
a[i] = (v>upper) ? lower : v;
}
}
}
#endif
return s;
}
int
test_SV_decomp_jacobi_dim(const gsl_matrix * m, double eps)
{
int s = 0;
double di1;
unsigned long i,j, M = m->size1, N = m->size2;
gsl_matrix * v = gsl_matrix_alloc(M,N);
gsl_matrix * a = gsl_matrix_alloc(M,N);
gsl_matrix * q = gsl_matrix_alloc(N,N);
gsl_matrix * dqt = gsl_matrix_alloc(N,N);
gsl_vector * d = gsl_vector_alloc(N);
gsl_matrix_memcpy(v,m);
s += gsl_linalg_SV_decomp_jacobi(v, q, d);
if (s)
printf("call returned status = %d\n", s);
/* Check that singular values are non-negative and in non-decreasing
order */
di1 = 0.0;
for (i = 0; i < N; i++)
{
double di = gsl_vector_get (d, i);
if (gsl_isnan (di))
{
continue; /* skip NaNs */
}
if (di < 0) {
s++;
printf("singular value %lu = %22.18g < 0\n", i, di);
}
if(i > 0 && di > di1) {
s++;
printf("singular value %lu = %22.18g vs previous %22.18g\n", i, di, di1);
}
di1 = di;
}
/* Scale dqt = D Q^T */
for (i = 0; i < N ; i++)
{
double di = gsl_vector_get (d, i);
for (j = 0; j < N; j++)
{
double qji = gsl_matrix_get(q, j, i);
gsl_matrix_set (dqt, i, j, qji * di);
}
}
/* compute a = v dqt */
gsl_blas_dgemm (CblasNoTrans, CblasNoTrans, 1.0, v, dqt, 0.0, a);
for(i=0; i<M; i++) {
for(j=0; j<N; j++) {
double aij = gsl_matrix_get(a, i, j);
double mij = gsl_matrix_get(m, i, j);
int foo = check(aij, mij, eps);
if(foo) {
printf("(%3lu,%3lu)[%lu,%lu]: %22.18g %22.18g\n", M, N, i,j, aij, mij);
}
s += foo;
}
}
gsl_vector_free(d);
gsl_matrix_free(v);
gsl_matrix_free(a);
gsl_matrix_free(q);
gsl_matrix_free(dqt);
return s;
}
int test_SV_decomp_jacobi(void)
{
int f;
int s = 0;
f = test_SV_decomp_jacobi_dim(m11, 2 * GSL_DBL_EPSILON);
gsl_test(f, " SV_decomp_jacobi m(1,1)");
s += f;
f = test_SV_decomp_jacobi_dim(m51, 2 * 64.0 * GSL_DBL_EPSILON);
gsl_test(f, " SV_decomp_jacobi m(5,1)");
s += f;
/* M<N not implemented yet */
#if 0
f = test_SV_decomp_jacobi_dim(m35, 2 * 8.0 * GSL_DBL_EPSILON);
gsl_test(f, " SV_decomp_jacobi m(3,5)");
s += f;
#endif
f = test_SV_decomp_jacobi_dim(m53, 2 * 64.0 * GSL_DBL_EPSILON);
gsl_test(f, " SV_decomp_jacobi m(5,3)");
s += f;
f = test_SV_decomp_jacobi_dim(moler10, 2 * 64.0 * GSL_DBL_EPSILON);
gsl_test(f, " SV_decomp_jacobi moler(10)");
s += f;
f = test_SV_decomp_jacobi_dim(hilb2, 2 * 8.0 * GSL_DBL_EPSILON);
gsl_test(f, " SV_decomp_jacobi hilbert(2)");
s += f;
f = test_SV_decomp_jacobi_dim(hilb3, 2 * 64.0 * GSL_DBL_EPSILON);
gsl_test(f, " SV_decomp_jacobi hilbert(3)");
s += f;
f = test_SV_decomp_jacobi_dim(hilb4, 2 * 1024.0 * GSL_DBL_EPSILON);
gsl_test(f, " SV_decomp_jacobi hilbert(4)");
s += f;
f = test_SV_decomp_jacobi_dim(hilb12, 2 * 1024.0 * GSL_DBL_EPSILON);
gsl_test(f, " SV_decomp_jacobi hilbert(12)");
s += f;
f = test_SV_decomp_jacobi_dim(vander2, 8.0 * GSL_DBL_EPSILON);
gsl_test(f, " SV_decomp_jacobi vander(2)");
s += f;
f = test_SV_decomp_jacobi_dim(vander3, 64.0 * GSL_DBL_EPSILON);
gsl_test(f, " SV_decomp_jacobi vander(3)");
s += f;
f = test_SV_decomp_jacobi_dim(vander4, 1024.0 * GSL_DBL_EPSILON);
gsl_test(f, " SV_decomp_jacobi vander(4)");
s += f;
f = test_SV_decomp_jacobi_dim(vander12, 1e-4);
gsl_test(f, " SV_decomp_jacobi vander(12)");
s += f;
f = test_SV_decomp_jacobi_dim(row3, 10 * GSL_DBL_EPSILON);
gsl_test(f, " SV_decomp_jacobi row3");
s += f;
f = test_SV_decomp_jacobi_dim(row5, 128 * GSL_DBL_EPSILON);
gsl_test(f, " SV_decomp_jacobi row5");
s += f;
f = test_SV_decomp_jacobi_dim(row12, 1024 * GSL_DBL_EPSILON);
gsl_test(f, " SV_decomp_jacobi row12");
s += f;
#ifdef TEST_JACOBI_INF
f = test_SV_decomp_jacobi_dim(inf5, 1024 * GSL_DBL_EPSILON);
gsl_test(f, " SV_decomp_jacobi inf5");
s += f;
f = test_SV_decomp_jacobi_dim(nan5, 1024 * GSL_DBL_EPSILON);
gsl_test(f, " SV_decomp_jacobi nan5");
s += f;
#endif
{
double i1, i2, i3, i4;
double lower = -2, upper = 2;
for (i1 = lower; i1 <= upper; i1++)
{
for (i2 = lower; i2 <= upper; i2++)
{
for (i3 = lower; i3 <= upper; i3++)
{
for (i4 = lower; i4 <= upper; i4++)
{
gsl_matrix_set (A22, 0,0, i1);
gsl_matrix_set (A22, 0,1, i2);
gsl_matrix_set (A22, 1,0, i3);
gsl_matrix_set (A22, 1,1, i4);
f = test_SV_decomp_jacobi_dim(A22, 16 * GSL_DBL_EPSILON);
gsl_test(f, " SV_decomp_jacobi (2x2) A=[%g, %g; %g, %g]", i1,i2,i3,i4);
s += f;
}
}
}
}
}
{
int i;
double carry = 0, lower = 0, upper = 1;
double *a = A33->data;
for (i=0; i<9; i++) {
a[i] = lower;
}
while (carry == 0.0) {
f = test_SV_decomp_jacobi_dim(A33, 64 * GSL_DBL_EPSILON);
gsl_test(f, " SV_decomp_jacobi (3x3) A=[ %g, %g, %g; %g, %g, %g; %g, %g, %g]",
a[0], a[1], a[2], a[3], a[4], a[5], a[6], a[7], a[8]);
/* increment */
carry=1.0;
for (i=9; carry > 0.0 && i>0 && i--;)
{
double v=a[i]+carry;
carry = (v>upper) ? 1.0 : 0.0;
a[i] = (v>upper) ? lower : v;
}
}
}
#ifdef TEST_SVD_4X4
{
int i;
unsigned long k = 0;
double carry = 0, lower = 0, upper = 1;
double *a = A44->data;
for (i=0; i<16; i++) {
a[i] = lower;
}
while (carry == 0.0) {
k++;
f = test_SV_decomp_jacobi_dim(A44, 64 * GSL_DBL_EPSILON);
gsl_test(f, " SV_decomp_jacobi (4x4) A=[ %g, %g, %g, %g; %g, %g, %g, %g; %g, %g, %g, %g; %g, %g, %g, %g] %lu",
a[0], a[1], a[2], a[3], a[4], a[5], a[6], a[7], a[8], a[9],
a[10], a[11], a[12], a[13], a[14], a[15], k);
/* increment */
carry=1.0;
for (i=16; carry > 0.0 && i>0 && i--;)
{
double v=a[i]+carry;
carry = (v>upper) ? 1.0 : 0.0;
a[i] = (v>upper) ? lower : v;
}
}
}
#endif
{
int i;
unsigned long k = 0;
double carry = 0, lower = 0, upper = 1;
double *a = A55->data;
for (i=0; i<25; i++) {
a[i] = lower;
}
while (carry == 0.0) {
k++;
if (k % 1001 == 0)
{
f = test_SV_decomp_jacobi_dim(A55, 64 * GSL_DBL_EPSILON);
gsl_test(f, " SV_decomp_jacobi (5x5) case=%lu",k);
}
/* increment */
carry=1.0;
for (i=25; carry >0.0 && i>0 && i--;)
{
double v=a[i]+carry;
carry = (v>upper) ? 1.0 : 0.0;
a[i] = (v>upper) ? lower : v;
}
}
}
return s;
}
int
test_cholesky_solve_dim(const gsl_matrix * m, const double * actual, double eps)
{
int s = 0;
unsigned long i, dim = m->size1;
gsl_vector * rhs = gsl_vector_alloc(dim);
gsl_matrix * u = gsl_matrix_alloc(dim,dim);
gsl_vector * x = gsl_vector_calloc(dim);
gsl_matrix_memcpy(u,m);
for(i=0; i<dim; i++) gsl_vector_set(rhs, i, i+1.0);
s += gsl_linalg_cholesky_decomp(u);
s += gsl_linalg_cholesky_solve(u, rhs, x);
for(i=0; i<dim; i++) {
int foo = check(gsl_vector_get(x, i), actual[i], eps);
if(foo) {
printf("%3lu[%lu]: %22.18g %22.18g\n", dim, i, gsl_vector_get(x, i), actual[i]);
}
s += foo;
}
gsl_vector_free(x);
gsl_matrix_free(u);
gsl_vector_free(rhs);
return s;
}
int test_cholesky_solve(void)
{
int f;
int s = 0;
f = test_cholesky_solve_dim(hilb2, hilb2_solution, 2 * 8.0 * GSL_DBL_EPSILON);
gsl_test(f, " cholesky_solve hilbert(2)");
s += f;
f = test_cholesky_solve_dim(hilb3, hilb3_solution, 2 * 64.0 * GSL_DBL_EPSILON);
gsl_test(f, " cholesky_solve hilbert(3)");
s += f;
f = test_cholesky_solve_dim(hilb4, hilb4_solution, 2 * 1024.0 * GSL_DBL_EPSILON);
gsl_test(f, " cholesky_solve hilbert(4)");
s += f;
f = test_cholesky_solve_dim(hilb12, hilb12_solution, 0.5);
gsl_test(f, " cholesky_solve hilbert(12)");
s += f;
return s;
}
int
test_cholesky_decomp_dim(const gsl_matrix * m, double eps)
{
int s = 0;
unsigned long i,j, M = m->size1, N = m->size2;
gsl_matrix * v = gsl_matrix_alloc(M,N);
gsl_matrix * a = gsl_matrix_alloc(M,N);
gsl_matrix * l = gsl_matrix_alloc(M,N);
gsl_matrix * lt = gsl_matrix_alloc(N,N);
gsl_matrix_memcpy(v,m);
s += gsl_linalg_cholesky_decomp(v);
/* Compute L LT */
for (i = 0; i < N ; i++)
{
for (j = 0; j < N; j++)
{
double vij = gsl_matrix_get(v, i, j);
gsl_matrix_set (l, i, j, i>=j ? vij : 0);
gsl_matrix_set (lt, i, j, i<=j ? vij : 0);
}
}
/* compute a = l lt */
gsl_blas_dgemm (CblasNoTrans, CblasNoTrans, 1.0, l, lt, 0.0, a);
for(i=0; i<M; i++) {
for(j=0; j<N; j++) {
double aij = gsl_matrix_get(a, i, j);
double mij = gsl_matrix_get(m, i, j);
int foo = check(aij, mij, eps);
if(foo) {
printf("(%3lu,%3lu)[%lu,%lu]: %22.18g %22.18g\n", M, N, i,j, aij, mij);
}
s += foo;
}
}
gsl_matrix_free(v);
gsl_matrix_free(a);
gsl_matrix_free(l);
gsl_matrix_free(lt);
return s;
}
int test_cholesky_decomp(void)
{
int f;
int s = 0;
f = test_cholesky_decomp_dim(hilb2, 2 * 8.0 * GSL_DBL_EPSILON);
gsl_test(f, " cholesky_decomp hilbert(2)");
s += f;
f = test_cholesky_decomp_dim(hilb3, 2 * 64.0 * GSL_DBL_EPSILON);
gsl_test(f, " cholesky_decomp hilbert(3)");
s += f;
f = test_cholesky_decomp_dim(hilb4, 2 * 1024.0 * GSL_DBL_EPSILON);
gsl_test(f, " cholesky_decomp hilbert(4)");
s += f;
f = test_cholesky_decomp_dim(hilb12, 2 * 1024.0 * GSL_DBL_EPSILON);
gsl_test(f, " cholesky_decomp hilbert(12)");
s += f;
return s;
}
int
test_cholesky_invert_dim(const gsl_matrix * m, double eps)
{
int s = 0;
unsigned long i, j, N = m->size1;
gsl_matrix * v = gsl_matrix_alloc(N, N);
gsl_matrix * c = gsl_matrix_alloc(N, N);
gsl_matrix_memcpy(v,m);
s += gsl_linalg_cholesky_decomp(v);
s += gsl_linalg_cholesky_invert(v);
gsl_blas_dsymm(CblasLeft, CblasUpper, 1.0, m, v, 0.0, c);
/* c should be the identity matrix */
for (i = 0; i < N; ++i)
{
for (j = 0; j < N; ++j)
{
int foo;
double cij = gsl_matrix_get(c, i, j);
double expected;
if (i == j)
expected = 1.0;
else
expected = 0.0;
foo = check(cij, expected, eps);
if (foo)
printf("(%3lu,%3lu)[%lu,%lu]: %22.18g %22.18g\n", N, N, i,j, cij, expected);
s += foo;
}
}
gsl_matrix_free(v);
gsl_matrix_free(c);
return s;
}
int
test_cholesky_invert(void)
{
int f;
int s = 0;
f = test_cholesky_invert_dim(hilb2, 2 * 8.0 * GSL_DBL_EPSILON);
gsl_test(f, " cholesky_invert hilbert(2)");
s += f;
f = test_cholesky_invert_dim(hilb3, 2 * 64.0 * GSL_DBL_EPSILON);
gsl_test(f, " cholesky_invert hilbert(3)");
s += f;
f = test_cholesky_invert_dim(hilb4, 2 * 1024.0 * GSL_DBL_EPSILON);
gsl_test(f, " cholesky_invert hilbert(4)");
s += f;
return s;
}
int
test_cholesky_decomp_unit_dim(const gsl_matrix * m, double eps)
{
int s = 0;
const unsigned long M = m->size1;
const unsigned long N = m->size2;
unsigned long i,j;
gsl_matrix * v = gsl_matrix_alloc(M,N);
gsl_matrix * a = gsl_matrix_alloc(M,N);
gsl_matrix * l = gsl_matrix_alloc(M,N);
gsl_matrix * lt = gsl_matrix_alloc(N,N);
gsl_matrix * dm = gsl_matrix_alloc(M,N);
gsl_vector * dv = gsl_vector_alloc(M);
gsl_matrix_memcpy(v,m);
s += gsl_linalg_cholesky_decomp_unit(v, dv);
/*
for(i = 0; i < M; i++)
{
for(j = 0; j < N; j++)
{
printf("v[%lu,%lu]: %22.18e\n", i,j, gsl_matrix_get(v, i, j));
}
}
for(i = 0; i < M; i++)
{
printf("d[%lu]: %22.18e\n", i, gsl_vector_get(dv, i));
}
*/
/* put L and transpose(L) into separate matrices */
for(i = 0; i < N ; i++)
{
for(j = 0; j < N; j++)
{
const double vij = gsl_matrix_get(v, i, j);
gsl_matrix_set (l, i, j, i>=j ? vij : 0);
gsl_matrix_set (lt, i, j, i<=j ? vij : 0);
}
}
/* put D into its own matrix */
gsl_matrix_set_zero(dm);
for(i = 0; i < M; ++i) gsl_matrix_set(dm, i, i, gsl_vector_get(dv, i));
/* compute a = L * D * transpose(L); uses v for temp space */
gsl_blas_dgemm (CblasNoTrans, CblasNoTrans, 1.0, dm, lt, 0.0, v);
gsl_blas_dgemm (CblasNoTrans, CblasNoTrans, 1.0, l, v, 0.0, a);
for(i = 0; i < M; i++)
{
for(j = 0; j < N; j++)
{
const double aij = gsl_matrix_get(a, i, j);
const double mij = gsl_matrix_get(m, i, j);
int foo = check(aij, mij, eps);
if(foo)
{
printf("(%3lu,%3lu)[%lu,%lu]: %22.18g %22.18g\n", M, N, i,j, aij, mij);
}
s += foo;
}
}
gsl_vector_free(dv);
gsl_matrix_free(dm);
gsl_matrix_free(lt);
gsl_matrix_free(l);
gsl_matrix_free(v);
gsl_matrix_free(a);
return s;
}
int test_cholesky_decomp_unit(void)
{
int f;
int s = 0;
f = test_cholesky_decomp_unit_dim(hilb2, 2 * 8.0 * GSL_DBL_EPSILON);
gsl_test(f, " cholesky_decomp_unit hilbert(2)");
s += f;
f = test_cholesky_decomp_unit_dim(hilb3, 2 * 64.0 * GSL_DBL_EPSILON);
gsl_test(f, " cholesky_decomp_unit hilbert(3)");
s += f;
f = test_cholesky_decomp_unit_dim(hilb4, 2 * 1024.0 * GSL_DBL_EPSILON);
gsl_test(f, " cholesky_decomp_unit hilbert(4)");
s += f;
f = test_cholesky_decomp_unit_dim(hilb12, 2 * 1024.0 * GSL_DBL_EPSILON);
gsl_test(f, " cholesky_decomp_unit hilbert(12)");
s += f;
return s;
}
int
test_choleskyc_solve_dim(const gsl_matrix_complex * m, const gsl_vector_complex * actual, double eps)
{
int s = 0;
unsigned long i, dim = m->size1;
gsl_complex z;
gsl_vector_complex * rhs = gsl_vector_complex_alloc(dim);
gsl_matrix_complex * u = gsl_matrix_complex_alloc(dim,dim);
gsl_vector_complex * x = gsl_vector_complex_calloc(dim);
GSL_SET_IMAG(&z, 0.0);
gsl_matrix_complex_memcpy(u,m);
for(i=0; i<dim; i++)
{
GSL_SET_REAL(&z, i + 1.0);
gsl_vector_complex_set(rhs, i, z);
}
s += gsl_linalg_complex_cholesky_decomp(u);
s += gsl_linalg_complex_cholesky_solve(u, rhs, x);
for(i=0; i<dim; i++) {
gsl_complex y = gsl_vector_complex_get(x, i);
gsl_complex a = gsl_vector_complex_get(actual, i);
int foo = check(GSL_REAL(y), GSL_REAL(a), eps);
int foo2 = check(GSL_IMAG(y), GSL_IMAG(a), eps);
if(foo || foo2) {
printf("%3lu[%lu]: %22.18g %22.18g\n", dim, i, GSL_REAL(y), GSL_REAL(a));
printf("%3lu[%lu]: %22.18g %22.18g\n", dim, i, GSL_IMAG(y), GSL_IMAG(a));
}
s += foo + foo2;
}
gsl_vector_complex_free(x);
gsl_matrix_complex_free(u);
gsl_vector_complex_free(rhs);
return s;
} /* test_choleskyc_solve_dim() */
int
test_choleskyc_solve(void)
{
double data7[] = { 66,0, 0,64, 126,63, 124,-62, 61,-61, 60,60, 0,-59,
0,-64, 65,0, 62,-124, -61,-122, -60,-60, 59,-59, -58,0,
126,-63, 62,124, 308,0, 180,-240, 59,-177, 174,58, -57,-114,
124,62, -61,122, 180,240, 299,0, 174,-58, 57,171, 56,-112,
61,61, -60,60, 59,177, 174,58, 119,0, 0,112, 55,-55,
60,-60, 59,59, 174,-58, 57,-171, 0,-112, 116,0, -54,-54,
0,59, -58,0, -57,114, 56,112, 55,55, -54,54, 60,0 };
double data7_sol[] = { -0.524944196428570,0.209123883928571,
1.052873883928572,0.712444196428571,
0.117568824404762,0.443191964285714,
0.412862723214286,-0.356696428571429,
0.815931919642858,-0.265820312500000,
0.777929687500000,0.119484747023810,
1.058733258928571,-0.132087053571429 };
gsl_matrix_complex_view cp7 = gsl_matrix_complex_view_array(data7, 7, 7);
gsl_vector_complex_view cp7_sol = gsl_vector_complex_view_array(data7_sol, 7);
int f;
int s = 0;
f = test_choleskyc_solve_dim(&cp7.matrix, &cp7_sol.vector, 1024.0 * 1024.0 * GSL_DBL_EPSILON);
gsl_test(f, " complex_cholesky_solve complex(7)");
s += f;
return s;
} /* test_choleskyc_solve() */
int
test_choleskyc_decomp_dim(const gsl_matrix_complex * m, double eps)
{
int s = 0;
unsigned long i,j, M = m->size1, N = m->size2;
gsl_matrix_complex * v = gsl_matrix_complex_alloc(M,N);
gsl_matrix_complex * a = gsl_matrix_complex_alloc(M,N);
gsl_matrix_complex * l = gsl_matrix_complex_alloc(M,N);
gsl_matrix_complex * lh = gsl_matrix_complex_alloc(N,N);
gsl_matrix_complex_memcpy(v, m);
gsl_matrix_complex_set_zero(l);
gsl_matrix_complex_set_zero(lh);
s += gsl_linalg_complex_cholesky_decomp(v);
/* Compute L L^H */
for (i = 0; i < N ; i++)
{
for (j = 0; j <= i; j++)
{
gsl_complex vij = gsl_matrix_complex_get(v, i, j);
gsl_matrix_complex_set (l, i, j, vij);
gsl_matrix_complex_set (lh, j, i, gsl_complex_conjugate(vij));
}
}
/* compute a = l lh */
gsl_blas_zgemm (CblasNoTrans,
CblasNoTrans,
GSL_COMPLEX_ONE,
l,
lh,
GSL_COMPLEX_ZERO,
a);
for(i=0; i<M; i++) {
for(j=0; j<N; j++) {
gsl_complex aij = gsl_matrix_complex_get(a, i, j);
gsl_complex mij = gsl_matrix_complex_get(m, i, j);
int foo_r = check(GSL_REAL(aij), GSL_REAL(mij), eps);
int foo_i = check(GSL_IMAG(aij), GSL_IMAG(mij), eps);
if(foo_r || foo_i) {
printf("(%3lu,%3lu)[%lu,%lu]: %22.18g %22.18g\n", M, N, i,j, GSL_REAL(aij), GSL_REAL(mij));
printf("(%3lu,%3lu)[%lu,%lu]: %22.18g %22.18g\n", M, N, i,j, GSL_IMAG(aij), GSL_IMAG(mij));
}
s += foo_r + foo_i;
}
}
gsl_matrix_complex_free(v);
gsl_matrix_complex_free(a);
gsl_matrix_complex_free(l);
gsl_matrix_complex_free(lh);
return s;
}
int
test_choleskyc_decomp(void)
{
int f;
int s = 0;
double dat3[] = { 59.75,0, 49.25,172.25, 66.75,-162.75,
49.25,-172.25, 555.5,0, -429,-333.5,
66.75,162.75, -429,333.5, 536.5,0 };
gsl_matrix_complex_view p3 = gsl_matrix_complex_view_array(dat3, 3, 3);
f = test_choleskyc_decomp_dim(&p3.matrix, 2 * 8.0 * GSL_DBL_EPSILON);
gsl_test(f, " complex_cholesky_decomp complex(3)");
s += f;
return s;
}
int
test_HH_solve_dim(const gsl_matrix * m, const double * actual, double eps)
{
int s = 0;
unsigned long i, dim = m->size1;
gsl_permutation * perm = gsl_permutation_alloc(dim);
gsl_matrix * hh = gsl_matrix_alloc(dim,dim);
gsl_vector * d = gsl_vector_alloc(dim);
gsl_vector * x = gsl_vector_alloc(dim);
gsl_matrix_memcpy(hh,m);
for(i=0; i<dim; i++) gsl_vector_set(x, i, i+1.0);
s += gsl_linalg_HH_svx(hh, x);
for(i=0; i<dim; i++) {
int foo = check(gsl_vector_get(x, i),actual[i],eps);
if( foo) {
printf("%3lu[%lu]: %22.18g %22.18g\n", dim, i, gsl_vector_get(x, i), actual[i]);
}
s += foo;
}
gsl_vector_free(x);
gsl_vector_free(d);
gsl_matrix_free(hh);
gsl_permutation_free(perm);
return s;
}
int test_HH_solve(void)
{
int f;
int s = 0;
f = test_HH_solve_dim(hilb2, hilb2_solution, 8.0 * GSL_DBL_EPSILON);
gsl_test(f, " HH_solve hilbert(2)");
s += f;
f = test_HH_solve_dim(hilb3, hilb3_solution, 128.0 * GSL_DBL_EPSILON);
gsl_test(f, " HH_solve hilbert(3)");
s += f;
f = test_HH_solve_dim(hilb4, hilb4_solution, 2.0 * 1024.0 * GSL_DBL_EPSILON);
gsl_test(f, " HH_solve hilbert(4)");
s += f;
f = test_HH_solve_dim(hilb12, hilb12_solution, 0.5);
gsl_test(f, " HH_solve hilbert(12)");
s += f;
f = test_HH_solve_dim(vander2, vander2_solution, 8.0 * GSL_DBL_EPSILON);
gsl_test(f, " HH_solve vander(2)");
s += f;
f = test_HH_solve_dim(vander3, vander3_solution, 64.0 * GSL_DBL_EPSILON);
gsl_test(f, " HH_solve vander(3)");
s += f;
f = test_HH_solve_dim(vander4, vander4_solution, 1024.0 * GSL_DBL_EPSILON);
gsl_test(f, " HH_solve vander(4)");
s += f;
f = test_HH_solve_dim(vander12, vander12_solution, 0.05);
gsl_test(f, " HH_solve vander(12)");
s += f;
return s;
}
int
test_TDS_solve_dim(unsigned long dim, double d, double od, const double * actual, double eps)
{
int s = 0;
unsigned long i;
gsl_vector * offdiag = vector_alloc(dim-1);
gsl_vector * diag = vector_alloc(dim);
gsl_vector * rhs = vector_alloc(dim);
gsl_vector * x = vector_alloc(dim);
for(i=0; i<dim; i++) {
gsl_vector_set(diag, i, d);
gsl_vector_set(rhs, i, i + 1.0);
}
for(i=0; i<dim-1; i++) {
gsl_vector_set(offdiag, i, od);
}
s += gsl_linalg_solve_symm_tridiag(diag, offdiag, rhs, x);
for(i=0; i<dim; i++) {
double si = gsl_vector_get(x, i);
double ai = actual[i];
int foo = check(si, ai, eps);
if(foo) {
printf("%3lu[%lu]: %22.18g %22.18g\n", dim, i, gsl_vector_get(x, i), actual[i]);
}
s += foo;
}
vector_free(x);
vector_free(rhs);
vector_free(diag);
vector_free(offdiag);
return s;
}
int test_TDS_solve(void)
{
int f;
int s = 0;
{
double actual[] = {0.0, 2.0};
f = test_TDS_solve_dim(2, 1.0, 0.5, actual, 8.0 * GSL_DBL_EPSILON);
gsl_test(f, " solve_TDS dim=2 A");
s += f;
}
{
double actual[] = {3.0/8.0, 15.0/8.0};
f = test_TDS_solve_dim(2, 1.0, 1.0/3.0, actual, 8.0 * GSL_DBL_EPSILON);
gsl_test(f, " solve_TDS dim=2 B");
s += f;
}
{
double actual[] = {5.0/8.0, 9.0/8.0, 2.0, 15.0/8.0, 35.0/8.0};
f = test_TDS_solve_dim(5, 1.0, 1.0/3.0, actual, 8.0 * GSL_DBL_EPSILON);
gsl_test(f, " solve_TDS dim=5");
s += f;
}
return s;
}
int
test_TDS_cyc_solve_one(const unsigned long dim,
const double * d, const double * od,
const double * r, const double * actual, double eps)
{
int s = 0;
unsigned long i;
gsl_vector * offdiag = vector_alloc(dim);
gsl_vector * diag = vector_alloc(dim);
gsl_vector * rhs = vector_alloc(dim);
gsl_vector * x = vector_alloc(dim);
for(i=0; i<dim; i++) {
gsl_vector_set(diag, i, d[i]);
gsl_vector_set(rhs, i, r[i]);
gsl_vector_set(offdiag, i, od[i]);
}
s += gsl_linalg_solve_symm_cyc_tridiag(diag, offdiag, rhs, x);
for(i=0; i<dim; i++) {
double si = gsl_vector_get(x, i);
double ai = actual[i];
int foo = check(si, ai, eps);
if(foo) {
printf("%3lu[%lu]: %22.18g %22.18g\n", dim, i, gsl_vector_get(x, i), actual[i]);
}
s += foo;
}
vector_free(x);
vector_free(rhs);
vector_free(diag);
vector_free(offdiag);
return s;
}
int test_TDS_cyc_solve(void)
{
int f;
int s = 0;
#ifdef SUPPORT_UNDERSIZE_CYC
{
unsigned long dim = 1;
double diag[] = { 2 };
double offdiag[] = { 3 };
double rhs[] = { 7 };
double actual[] = { 3.5 };
f = test_TDS_cyc_solve_one(dim, diag, offdiag, rhs, actual, 28.0 * GSL_DBL_EPSILON);
gsl_test(f, " solve_TDS_cyc dim=%lu A", dim);
s += f;
}
{
unsigned long dim = 2;
double diag[] = { 1, 2 };
double offdiag[] = { 3, 4 };
double rhs[] = { 7, -7 };
double actual[] = { -5, 4 };
f = test_TDS_cyc_solve_one(dim, diag, offdiag, rhs, actual, 28.0 * GSL_DBL_EPSILON);
gsl_test(f, " solve_TDS_cyc dim=%lu A", dim);
s += f;
}
#endif
{
unsigned long dim = 3;
double diag[] = { 1, 1, 1 };
double offdiag[] = { 3, 3, 3 };
double rhs[] = { 7, -7, 7 };
double actual[] = { -2, 5, -2 };
f = test_TDS_cyc_solve_one(dim, diag, offdiag, rhs, actual, 28.0 * GSL_DBL_EPSILON);
gsl_test(f, " solve_TDS_cyc dim=%lu A", dim);
s += f;
}
{
unsigned long dim = 5;
double diag[] = { 4, 2, 1, 2, 4 };
double offdiag[] = { 1, 1, 1, 1, 1 };
double rhs[] = { 30, -24, 3, 21, -30 };
double actual[] = { 12, 3, -42, 42, -21 };
/* f = test_TDS_cyc_solve_one(dim, diag, offdiag, rhs, actual, 7.0 * GSL_DBL_EPSILON);
FIXME: bad accuracy */
f = test_TDS_cyc_solve_one(dim, diag, offdiag, rhs, actual, 40.0 * GSL_DBL_EPSILON);
gsl_test(f, " solve_TDS_cyc dim=%lu B", dim);
s += f;
}
return s;
}
int
test_TDN_solve_dim(unsigned long dim, double d, double a, double b, const double * actual, double eps)
{
int s = 0;
unsigned long i;
gsl_vector * abovediag = vector_alloc(dim-1);
gsl_vector * belowdiag = vector_alloc(dim-1);
gsl_vector * diag = vector_alloc(dim);
gsl_vector * rhs = vector_alloc(dim);
gsl_vector * x = vector_alloc(dim);
for(i=0; i<dim; i++) {
gsl_vector_set(diag, i, d);
gsl_vector_set(rhs, i, i + 1.0);
}
for(i=0; i<dim-1; i++) {
gsl_vector_set(abovediag, i, a);
gsl_vector_set(belowdiag, i, b);
}
s += gsl_linalg_solve_tridiag(diag, abovediag, belowdiag, rhs, x);
for(i=0; i<dim; i++) {
double si = gsl_vector_get(x, i);
double ai = actual[i];
int foo = check(si, ai, eps);
if(foo) {
printf("%3lu[%lu]: %22.18g %22.18g\n", dim, i, gsl_vector_get(x, i), actual[i]);
}
s += foo;
}
vector_free(x);
vector_free(rhs);
vector_free(diag);
vector_free(abovediag);
vector_free(belowdiag);
return s;
}
int test_TDN_solve(void)
{
int f;
int s = 0;
double actual[16];
actual[0] = -7.0/3.0;
actual[1] = 5.0/3.0;
actual[2] = 4.0/3.0;
f = test_TDN_solve_dim(3, 1.0, 2.0, 1.0, actual, 2.0 * GSL_DBL_EPSILON);
gsl_test(f, " solve_TDN dim=2 A");
s += f;
actual[0] = 0.75;
actual[1] = 0.75;
actual[2] = 2.625;
f = test_TDN_solve_dim(3, 1.0, 1.0/3.0, 1.0/2.0, actual, 2.0 * GSL_DBL_EPSILON);
gsl_test(f, " solve_TDN dim=2 B");
s += f;
actual[0] = 99.0/140.0;
actual[1] = 41.0/35.0;
actual[2] = 19.0/10.0;
actual[3] = 72.0/35.0;
actual[4] = 139.0/35.0;
f = test_TDN_solve_dim(5, 1.0, 1.0/4.0, 1.0/2.0, actual, 35.0/8.0 * GSL_DBL_EPSILON);
gsl_test(f, " solve_TDN dim=5");
s += f;
return s;
}
int
test_TDN_cyc_solve_dim(unsigned long dim, double d, double a, double b, const double * actual, double eps)
{
int s = 0;
unsigned long i;
gsl_vector * abovediag = vector_alloc(dim);
gsl_vector * belowdiag = vector_alloc(dim);
gsl_vector * diag = vector_alloc(dim);
gsl_vector * rhs = vector_alloc(dim);
gsl_vector * x = vector_alloc(dim);
for(i=0; i<dim; i++) {
gsl_vector_set(diag, i, d);
gsl_vector_set(rhs, i, i + 1.0);
}
for(i=0; i<dim; i++) {
gsl_vector_set(abovediag, i, a);
gsl_vector_set(belowdiag, i, b);
}
s += gsl_linalg_solve_cyc_tridiag(diag, abovediag, belowdiag, rhs, x);
for(i=0; i<dim; i++) {
double si = gsl_vector_get(x, i);
double ai = actual[i];
int foo = check(si, ai, eps);
if(foo) {
printf("%3lu[%lu]: %22.18g %22.18g\n", dim, i, gsl_vector_get(x, i), actual[i]);
}
s += foo;
}
vector_free(x);
vector_free(rhs);
vector_free(diag);
vector_free(abovediag);
vector_free(belowdiag);
return s;
}
int test_TDN_cyc_solve(void)
{
int f;
int s = 0;
double actual[16];
actual[0] = 3.0/2.0;
actual[1] = -1.0/2.0;
actual[2] = 1.0/2.0;
f = test_TDN_cyc_solve_dim(3, 1.0, 2.0, 1.0, actual, 32.0 * GSL_DBL_EPSILON);
gsl_test(f, " solve_TDN_cyc dim=2 A");
s += f;
actual[0] = -5.0/22.0;
actual[1] = -3.0/22.0;
actual[2] = 29.0/22.0;
actual[3] = -9.0/22.0;
actual[4] = 43.0/22.0;
f = test_TDN_cyc_solve_dim(5, 3.0, 2.0, 1.0, actual, 66.0 * GSL_DBL_EPSILON);
gsl_test(f, " solve_TDN_cyc dim=5");
s += f;
return s;
}
int
test_bidiag_decomp_dim(const gsl_matrix * m, double eps)
{
int s = 0;
unsigned long i,j,k,r, M = m->size1, N = m->size2;
gsl_matrix * A = gsl_matrix_alloc(M,N);
gsl_matrix * a = gsl_matrix_alloc(M,N);
gsl_matrix * b = gsl_matrix_alloc(N,N);
gsl_matrix * u = gsl_matrix_alloc(M,N);
gsl_matrix * v = gsl_matrix_alloc(N,N);
gsl_vector * tau1 = gsl_vector_alloc(N);
gsl_vector * tau2 = gsl_vector_alloc(N-1);
gsl_vector * d = gsl_vector_alloc(N);
gsl_vector * sd = gsl_vector_alloc(N-1);
gsl_matrix_memcpy(A,m);
s += gsl_linalg_bidiag_decomp(A, tau1, tau2);
s += gsl_linalg_bidiag_unpack(A, tau1, u, tau2, v, d, sd);
gsl_matrix_set_zero(b);
for (i = 0; i < N; i++) gsl_matrix_set(b, i,i, gsl_vector_get(d,i));
for (i = 0; i < N-1; i++) gsl_matrix_set(b, i,i+1, gsl_vector_get(sd,i));
/* Compute A = U B V^T */
for (i = 0; i < M ; i++)
{
for (j = 0; j < N; j++)
{
double sum = 0;
for (k = 0; k < N; k++)
{
for (r = 0; r < N; r++)
{
sum += gsl_matrix_get(u, i, k) * gsl_matrix_get (b, k, r)
* gsl_matrix_get(v, j, r);
}
}
gsl_matrix_set (a, i, j, sum);
}
}
for(i=0; i<M; i++) {
for(j=0; j<N; j++) {
double aij = gsl_matrix_get(a, i, j);
double mij = gsl_matrix_get(m, i, j);
int foo = check(aij, mij, eps);
if(foo) {
printf("(%3lu,%3lu)[%lu,%lu]: %22.18g %22.18g\n", M, N, i,j, aij, mij);
}
s += foo;
}
}
gsl_matrix_free(A);
gsl_matrix_free(a);
gsl_matrix_free(u);
gsl_matrix_free(v);
gsl_matrix_free(b);
gsl_vector_free(tau1);
gsl_vector_free(tau2);
gsl_vector_free(d);
gsl_vector_free(sd);
return s;
}
int test_bidiag_decomp(void)
{
int f;
int s = 0;
f = test_bidiag_decomp_dim(m53, 2 * 64.0 * GSL_DBL_EPSILON);
gsl_test(f, " bidiag_decomp m(5,3)");
s += f;
f = test_bidiag_decomp_dim(m97, 2 * 64.0 * GSL_DBL_EPSILON);
gsl_test(f, " bidiag_decomp m(9,7)");
s += f;
f = test_bidiag_decomp_dim(hilb2, 2 * 8.0 * GSL_DBL_EPSILON);
gsl_test(f, " bidiag_decomp hilbert(2)");
s += f;
f = test_bidiag_decomp_dim(hilb3, 2 * 64.0 * GSL_DBL_EPSILON);
gsl_test(f, " bidiag_decomp hilbert(3)");
s += f;
f = test_bidiag_decomp_dim(hilb4, 2 * 1024.0 * GSL_DBL_EPSILON);
gsl_test(f, " bidiag_decomp hilbert(4)");
s += f;
f = test_bidiag_decomp_dim(hilb12, 2 * 1024.0 * GSL_DBL_EPSILON);
gsl_test(f, " bidiag_decomp hilbert(12)");
s += f;
return s;
}
void
my_error_handler (const char *reason, const char *file, int line, int err)
{
if (0) printf ("(caught [%s:%d: %s (%d)])\n", file, line, reason, err) ;
}
int main(void)
{
gsl_ieee_env_setup ();
gsl_set_error_handler (&my_error_handler);
m11 = create_general_matrix(1,1);
m51 = create_general_matrix(5,1);
m35 = create_general_matrix(3,5);
m53 = create_general_matrix(5,3);
m97 = create_general_matrix(9,7);
s35 = create_singular_matrix(3,5);
s53 = create_singular_matrix(5,3);
hilb2 = create_hilbert_matrix(2);
hilb3 = create_hilbert_matrix(3);
hilb4 = create_hilbert_matrix(4);
hilb12 = create_hilbert_matrix(12);
vander2 = create_vandermonde_matrix(2);
vander3 = create_vandermonde_matrix(3);
vander4 = create_vandermonde_matrix(4);
vander12 = create_vandermonde_matrix(12);
moler10 = create_moler_matrix(10);
c7 = create_complex_matrix(7);
row3 = create_row_matrix(3,3);
row5 = create_row_matrix(5,5);
row12 = create_row_matrix(12,12);
A22 = create_2x2_matrix (0.0, 0.0, 0.0, 0.0);
A33 = gsl_matrix_alloc(3,3);
A44 = gsl_matrix_alloc(4,4);
A55 = gsl_matrix_alloc(5,5);
inf5 = create_diagonal_matrix (inf5_data, 5);
gsl_matrix_set(inf5, 3, 3, GSL_POSINF);
nan5 = create_diagonal_matrix (inf5_data, 5);
gsl_matrix_set(nan5, 3, 3, GSL_NAN);
dblmin3 = create_general_matrix (3, 3);
gsl_matrix_scale(dblmin3, GSL_DBL_MIN);
dblmin5 = create_general_matrix (5, 5);
gsl_matrix_scale(dblmin5, GSL_DBL_MIN);
/* Matmult now obsolete */
#ifdef MATMULT
gsl_test(test_matmult(), "Matrix Multiply");
gsl_test(test_matmult_mod(), "Matrix Multiply with Modification");
#endif
gsl_test(test_bidiag_decomp(), "Bidiagonal Decomposition");
gsl_test(test_LU_solve(), "LU Decomposition and Solve");
gsl_test(test_LUc_solve(), "Complex LU Decomposition and Solve");
gsl_test(test_QR_decomp(), "QR Decomposition");
gsl_test(test_QR_solve(), "QR Solve");
gsl_test(test_LQ_solve(), "LQ Solve");
gsl_test(test_PTLQ_solve(), "PTLQ Solve");
gsl_test(test_LQ_decomp(), "LQ Decomposition");
gsl_test(test_LQ_LQsolve(), "LQ LQ Solve");
gsl_test(test_LQ_lssolve(), "LQ LS Solve");
gsl_test(test_LQ_update(), "LQ Rank-1 Update");
gsl_test(test_QRPT_decomp(), "PTLQ Decomposition");
gsl_test(test_PTLQ_solve(), "PTLQ Solve");
gsl_test(test_QR_QRsolve(), "QR QR Solve");
gsl_test(test_QR_lssolve(), "QR LS Solve");
gsl_test(test_QR_update(), "QR Rank-1 Update");
gsl_test(test_QRPT_decomp(), "QRPT Decomposition");
gsl_test(test_QRPT_solve(), "QRPT Solve");
gsl_test(test_QRPT_QRsolve(), "QRPT QR Solve");
gsl_test(test_QRPT_update(), "QRPT Rank-1 Update");
gsl_test(test_SV_decomp(), "Singular Value Decomposition");
gsl_test(test_SV_decomp_jacobi(), "Singular Value Decomposition (Jacobi)");
gsl_test(test_SV_decomp_mod(), "Singular Value Decomposition (Mod)");
gsl_test(test_SV_solve(), "SVD Solve");
gsl_test(test_cholesky_decomp(), "Cholesky Decomposition");
gsl_test(test_cholesky_decomp_unit(), "Cholesky Decomposition [unit triangular]");
gsl_test(test_cholesky_solve(), "Cholesky Solve");
gsl_test(test_cholesky_invert(), "Cholesky Inverse");
gsl_test(test_choleskyc_decomp(), "Complex Cholesky Decomposition");
gsl_test(test_choleskyc_solve(), "Complex Cholesky Solve");
gsl_test(test_HH_solve(), "Householder solve");
gsl_test(test_TDS_solve(), "Tridiagonal symmetric solve");
gsl_test(test_TDS_cyc_solve(), "Tridiagonal symmetric cyclic solve");
gsl_test(test_TDN_solve(), "Tridiagonal nonsymmetric solve");
gsl_test(test_TDN_cyc_solve(), "Tridiagonal nonsymmetric cyclic solve");
gsl_matrix_free(m11);
gsl_matrix_free(m35);
gsl_matrix_free(m51);
gsl_matrix_free(m53);
gsl_matrix_free(m97);
gsl_matrix_free(s35);
gsl_matrix_free(s53);
gsl_matrix_free(hilb2);
gsl_matrix_free(hilb3);
gsl_matrix_free(hilb4);
gsl_matrix_free(hilb12);
gsl_matrix_free(vander2);
gsl_matrix_free(vander3);
gsl_matrix_free(vander4);
gsl_matrix_free(vander12);
gsl_matrix_free(moler10);
gsl_matrix_complex_free(c7);
gsl_matrix_free(row3);
gsl_matrix_free(row5);
gsl_matrix_free(row12);
gsl_matrix_free(A22);
gsl_matrix_free(A33);
gsl_matrix_free(A44);
gsl_matrix_free(A55);
gsl_matrix_free (inf5);
gsl_matrix_free (nan5);
gsl_matrix_free (dblmin3);
gsl_matrix_free (dblmin5);
exit (gsl_test_summary());
}
| 26.745303 | 117 | 0.608123 | [
"vector"
] |
d0a19873ab89a6a2b920cf59d1366d7c7944043f | 4,358 | c | C | vbuf.c | eeeeeta/inebriated-c-edition | 1ceed378a912c0773b866cfe4b387d7191cd6cc0 | [
"Unlicense"
] | null | null | null | vbuf.c | eeeeeta/inebriated-c-edition | 1ceed378a912c0773b866cfe4b387d7191cd6cc0 | [
"Unlicense"
] | null | null | null | vbuf.c | eeeeeta/inebriated-c-edition | 1ceed378a912c0773b866cfe4b387d7191cd6cc0 | [
"Unlicense"
] | null | null | null | /*
* variable data structures, for better memory management (tm)
*/
#include <stdlib.h>
#include <wchar.h>
#include <string.h>
#include <stdbool.h>
#include "markov.h"
#include "vbuf.h"
extern struct database *db_init(void) {
struct database *db = malloc(sizeof(struct database));
db->objs = DPA_init();
db->sses = DPA_init();
return db;
}
/**
* Initialises a dynamic pointer array object.
* Returns a pointer to said object, or NULL if there was an error.
*/
extern DPA *DPA_init(void) {
DPA *dpa = malloc(sizeof(DPA));
if (dpa == NULL) return NULL;
dpa->keys = calloc(DPA_START_SIZE, sizeof(void *));
if (dpa->keys == NULL) return NULL;
dpa->used = 0;
dpa->size = DPA_START_SIZE;
return dpa;
}
/**
* Removes obj from dpa, by swapping the last object of dpa->keys into the slot where
* obj was, and setting that object to NULL whilst decrementing dpa->used.
*
* Does NOT make any attempt to free() obj, do this yourself. Returns false if object
* does not exist.
*/
extern bool *DPA_rem(DPA *dpa, void *obj) {
int i = 0, j = -1;
for (void *tbr = dpa->keys[i]; i < dpa->used; tbr = dpa->keys[++i])
if (tbr == obj) j = i;
if (j == -1) return false;
dpa->keys[j] = dpa->keys[i - 1];
dpa->keys[i - 1] = NULL;
dpa->used--;
return true;
}
/**
* Stores obj in dpa. Returns a pointer to obj if successful, or NULL if there was an error.
*/
extern void *DPA_store(DPA *dpa, void *obj) {
if ((dpa->size - dpa->used) < 2) {
// allocate more space
void **ptr = realloc(dpa->keys, sizeof(void *) * (dpa->size + DPA_REFILL_SIZE));
if (ptr == NULL) return NULL;
dpa->keys = ptr;
dpa->size += DPA_REFILL_SIZE;
}
(dpa->keys)[(dpa->used++)] = obj;
return obj;
}
extern void DPA_free(DPA *dpa) {
free(dpa->keys);
free(dpa);
}
/**
* Initialises a variable string object. Returns pointer on success, NULL on failure.
*/
extern struct varstr *varstr_init(void) {
struct varstr *vs = malloc(sizeof(struct varstr));
if (vs == NULL) return NULL;
vs->str = calloc(VARSTR_START_SIZE, sizeof(wchar_t));
if (vs->str == NULL) return NULL;
vs->used = 0;
vs->size = VARSTR_START_SIZE;
return vs;
}
/**
* (internal function) Refill a varstr if space left is less than/equal to iu.
*/
static struct varstr *varstr_refill_if_needed(struct varstr *vs, int iu) {
if ((vs->size - vs->used) <= iu) {
wchar_t *ptr = realloc(vs->str, sizeof(wchar_t) * (vs->size + iu + VARSTR_REFILL_SIZE));
if (ptr == NULL) return NULL;
wmemset(ptr + vs->size, L'\0', iu + VARSTR_REFILL_SIZE);
vs->str = ptr;
vs->size += iu;
vs->size += VARSTR_REFILL_SIZE;
}
return vs;
}
/**
* Appends a to b using strcat(), allocating more space if needed. Returns pointer to varstr object on success, NULL on failure.
*/
extern struct varstr *varstr_cat(struct varstr *vs, wchar_t *str) {
vs = varstr_refill_if_needed(vs, (wcslen(str) + 1));
if (vs == NULL) return NULL;
vs->used += (wcslen(str) + 1);
wcscat(vs->str, str);
return vs;
}
/**
* Appends a to b using strncat(), allocating more space if needed. Returns pointer to varstr object on success, NULL on failure.
*/
extern struct varstr *varstr_ncat(struct varstr *vs, wchar_t *str, size_t count) {
vs = varstr_refill_if_needed(vs, count + 1);
if (vs == NULL) return NULL;
vs->used += (count + 1);
wcsncat(vs->str, str, count);
return vs;
}
/**
* Append a single wchar_t a to b, allocating more space if needed. Returns pointer to varstr object on success, NULL on failure.
*/
extern struct varstr *varstr_pushc(struct varstr *vs, wchar_t c) {
vs = varstr_refill_if_needed(vs, 2);
if (vs == NULL) return NULL;
(vs->str)[(vs->used)++] = c;
return vs;
}
/**
* Free unused memory in a variable string & convert it to just a regular string.
* Returns pointer to regular string, NULL on failure.
*
* Remember to free() the string!
*/
extern wchar_t *varstr_pack(struct varstr *vs) {
wchar_t *ptr = malloc(sizeof(wchar_t) * (vs->used + 1));
if (ptr == NULL) return NULL;
wcscpy(ptr, (const wchar_t *) vs->str);
free(vs->str);
free(vs);
return ptr;
};
extern void varstr_free(struct varstr *vs) {
free(vs->str);
free(vs);
};
| 30.690141 | 129 | 0.627123 | [
"object"
] |
d0a7a663f933a13c59a249b436f9546c9108c8d4 | 5,297 | h | C | code/SDK/include/Maya_17/maya/MQuaternion.h | Rikoshet-234/xray-oxygen | eaac3fa4780639152684f3251b8b4452abb8e439 | [
"Apache-2.0"
] | 1 | 2021-09-14T14:28:56.000Z | 2021-09-14T14:28:56.000Z | code/SDK/include/Maya_17/maya/MQuaternion.h | Rikoshet-234/xray-oxygen | eaac3fa4780639152684f3251b8b4452abb8e439 | [
"Apache-2.0"
] | null | null | null | code/SDK/include/Maya_17/maya/MQuaternion.h | Rikoshet-234/xray-oxygen | eaac3fa4780639152684f3251b8b4452abb8e439 | [
"Apache-2.0"
] | 3 | 2020-10-12T18:04:42.000Z | 2020-10-12T18:04:59.000Z | #ifndef _MQuaternion
#define _MQuaternion
//-
// ===========================================================================
// Copyright 2018 Autodesk, Inc. All rights reserved.
//
// Use of this software is subject to the terms of the Autodesk license
// agreement provided at the time of installation or download, or which
// otherwise accompanies this software in either electronic or hard copy form.
// ===========================================================================
//+
//
// CLASS: MQuaternion
//
// ****************************************************************************
#if defined __cplusplus
// ****************************************************************************
// INCLUDED HEADER FILES
#include <maya/MStatus.h>
#include <maya/MTypes.h>
#include <maya/MTransformationMatrix.h>
#define kQuaternionEpsilon 1.0e-10
OPENMAYA_MAJOR_NAMESPACE_OPEN
// ****************************************************************************
// CLASS DECLARATION (MQuaternion)
//! \ingroup OpenMaya
//! \brief Quaternion math.
/*!
This class provides methods for working with Quaternions.
Quaternions can be used to specify orientations and rotations of 3-D
objects relative to a starting reference, similar to the way that cartesian
vectors can be used to specify positions and translations of 3-D objects
relative to an origin. Quaternions represent orientations as a single
rotation, just as rectangular co-ordinates represent position as a single
vector.
*/
class OPENMAYA_EXPORT MQuaternion
{
public:
MQuaternion();
MQuaternion(const MQuaternion &src);
MQuaternion(double xx, double yy, double zz, double ww);
MQuaternion(const double q[4]);
MQuaternion(const MVector &a, const MVector &b);
MQuaternion(const MVector &a, const MVector &b,
double angleFactor);
MQuaternion(double angle, const MVector &axisb);
~MQuaternion();
MQuaternion & operator=(const MQuaternion& src);
MQuaternion & operator=(const MMatrix &matrix);
MQuaternion & operator=(const MEulerRotation &matrix);
MMatrix asMatrix() const;
MEulerRotation asEulerRotation() const;
MQuaternion & setAxisAngle(const MVector &axis, double theta);
bool getAxisAngle(MVector &axis, double &theta) const;
MQuaternion & setToXAxis(double theta);
MQuaternion & setToYAxis(double theta);
MQuaternion & setToZAxis(double theta);
MStatus get(double dest[4]) const;
double operator[](unsigned int i) const;
MQuaternion operator+(const MQuaternion &other) const;
MQuaternion operator*(const MQuaternion &other) const;
MQuaternion & operator*=(const MQuaternion &rhs);
MQuaternion operator-(const MQuaternion &other) const;
MQuaternion operator-() const;
MQuaternion & negateIt();
bool operator==(const MQuaternion &other) const;
bool operator!=(const MQuaternion &other) const;
bool isEquivalent(const MQuaternion &other,
double tolerance = kQuaternionEpsilon) const;
MQuaternion & scaleIt(double scale);
MQuaternion normal() const;
MQuaternion & normalizeIt();
MQuaternion conjugate() const;
MQuaternion & conjugateIt();
MQuaternion inverse() const;
MQuaternion & invertIt();
MQuaternion log() const;
MQuaternion exp() const;
//! The multiplicative identity.
static const MQuaternion identity;
BEGIN_NO_SCRIPT_SUPPORT:
//! NO SCRIPT SUPPORT
double & operator[](unsigned int i);
//! NO SCRIPT SUPPORT
friend OPENMAYA_EXPORT MQuaternion operator*(double scale,
const MQuaternion &other);
friend OPENMAYA_EXPORT MQuaternion slerp(const MQuaternion &p,
const MQuaternion &q,
double t);
//! NO SCRIPT SUPPORT
friend OPENMAYA_EXPORT MQuaternion slerp(const MQuaternion &p,
const MQuaternion &q,
double t,
short spin);
//! NO SCRIPT SUPPORT
friend OPENMAYA_EXPORT MQuaternion dslerp(const MQuaternion &p,
const MQuaternion &q,
double t,
int dir);
//! NO SCRIPT SUPPORT
friend OPENMAYA_EXPORT MQuaternion squad(const MQuaternion &p,
const MQuaternion &a,
const MQuaternion &b,
const MQuaternion &q,
double t);
//! NO SCRIPT SUPPORT
friend OPENMAYA_EXPORT MQuaternion squad(const MQuaternion &p,
const MQuaternion &a,
const MQuaternion &b,
const MQuaternion &q,
double t, short spin);
//! NO SCRIPT SUPPORT
friend OPENMAYA_EXPORT MQuaternion squadPt(const MQuaternion &q0,
const MQuaternion &q1,
const MQuaternion &q2);
//! NO SCRIPT SUPPORT
friend OPENMAYA_EXPORT std::ostream &operator<<(std::ostream &os,
const MQuaternion &q);
//! NO SCRIPT SUPPORT
operator MMatrix() const;
END_NO_SCRIPT_SUPPORT:
//! The quaternion's imaginary x component.
double x;
//! The quaternion's imaginary y component.
double y;
//! The quaternion's imaginary z component.
double z;
//! The quaternion's real component.
double w;
protected:
// No protected members
private:
// No private members
};
OPENMAYA_NAMESPACE_CLOSE
#endif /* __cplusplus */
#endif /* _MQuaternion */
| 32.697531 | 79 | 0.638663 | [
"vector"
] |
d0aa1033203d4d8e91de367ad908576d54dd3b79 | 3,389 | h | C | TFastReso_AZYHYDRO.h | amazeliauskas/FastReso | a694de368e79e3d21afa5d57b47209d5c9c55ed9 | [
"MIT"
] | 2 | 2020-07-11T02:48:59.000Z | 2020-09-22T14:25:58.000Z | TFastReso_AZYHYDRO.h | amazeliauskas/FastReso | a694de368e79e3d21afa5d57b47209d5c9c55ed9 | [
"MIT"
] | null | null | null | TFastReso_AZYHYDRO.h | amazeliauskas/FastReso | a694de368e79e3d21afa5d57b47209d5c9c55ed9 | [
"MIT"
] | null | null | null | /*
* Copyright (c) 2018-2021 Aleksas Mazeliauskas, Stefan Floerchinger,
* Eduardo Grossi, and Derek Teaney
* All rights reserved.
*
* FastReso is distributed under MIT license;
* see the LICENSE file that should be present in the root
* of the source distribution, or alternately available at:
* https://github.com/amazeliauskas/FastReso/
*/
#ifndef FASTRESO_TFastReso_AZYHYDRO_h
#define FASTRESO_TFastReso_AZYHYDRO_h
#include "TFastReso.h"
#include <fstream>
#include <memory>
#include <map>
#include <vector>
#include <string>
#include <gsl/gsl_integration.h>
// The class storing the properties of particles and their irreducible distrubtion
// function components f_i. Printing out routines are also defined there.
#include "TParticle_AZYHYDRO.h"
// List of parameters for GSL integration procedure.
#include "qag_params.h"
// List of parameters controlling the momentum discretization of irreducible components
// See also TParticle_AZYHYDRO class definition
#include "grid_params.h"
//! TFastReso_AZYHYDRO class is the top wrapper class of the fast freeze-out implementation.
class TFastReso_AZYHYDRO: public TFastReso {
private:
//! Data vector of TParticle_AZYHYDRO class of all read-in particles
//! See TParticle_AZYHYDRO class definition.
std::vector <std::unique_ptr<TParticle_AZYHYDRO>> fParticleData;
//! Map between the particle name and its position in fParticleData vector;
std::map <int, int> fIndexTable;
public:
//! reads the inputfile of resonances and creates TParticle_AZYHYDRO entry in fParticleData vector.
//! This routine should be edited by the user to match their input file format.
void read_particles_data(std::string inputname, std::string comps="Feq Fshear Fbulk Ftemp Fvel", bool verbose=false) override;
//! The function call to initialized the particle distribution functions in fParticleData. Input parameters
//! are the freeze-out temperature, chemical potential and (optionally), speed of sound (only needed for bulk perturbations).
void do_thermal(double Tfo, double MuB=0, double MuI3=0, double MuS=0, double MuC=0, double Cs2=0.14) override;
void do_thermal(int pi, double Tfo, double QMu=0, double Cs2 = 0.14 );
//! Read in the input file of resonance decays and perfom the decays on the fly. It is important that decays
//! are mass ordered to included all feeddown contributions to lower mass resonances.
//! The table format is the same as decays.data in THERMINATOR 2 [arXiv:1102.0273]
//! Father Child1 Child2 (optional Child3) BranchingRatio ClebschGordanCoeff
//! Note that ClebschGordanCoeff term is 0 or 1 identifying if the branching ration needs
//! to be reweighted by actual Clebsch Gordon coefficients.
void do_decays(std::string inputname, bool verbose=false) override;
public:
//! routine to get the pointer to TParticle_AZYHYDRO class for a particle particle name"
TParticle_AZYHYDRO * getParticleByPDG(int pdgid){return fParticleData[fIndexTable[pdgid]].get();};
TParticle_AZYHYDRO * getParticle(int i){return (i<0) ? fParticleData[fParticleData.size()+i].get() : fParticleData[i].get();};
int getParticleIndexByPDG(int pdgid){return fIndexTable[pdgid];};
//! The printout of the calculated components is done by printing procedures in TParticle_AZYHYDRO class
};
#endif
| 47.732394 | 131 | 0.75391 | [
"vector"
] |
d0b145fda4e30f0385c9fc732550db52ce6959cc | 3,478 | h | C | include/Graphs/include/Kosaraju.h | nicholas-gs/DataStructsAndAlgos | 2d999db6758619782fbb9cf38360746dbb653024 | [
"MIT"
] | null | null | null | include/Graphs/include/Kosaraju.h | nicholas-gs/DataStructsAndAlgos | 2d999db6758619782fbb9cf38360746dbb653024 | [
"MIT"
] | null | null | null | include/Graphs/include/Kosaraju.h | nicholas-gs/DataStructsAndAlgos | 2d999db6758619782fbb9cf38360746dbb653024 | [
"MIT"
] | null | null | null | //
// Created by Nicholas on 29 Dec 2020.
//
#pragma once
#include <cstddef>
#include <stdexcept>
#include <vector>
#include <stack>
#include <memory>
#include "SimpleGraph_Unweighted.h"
#include "DepthFirstOrder.h"
namespace wtl {
/**
* Kosaraju's algorithm to find strongly connected components of a directed graph.
*/
class Kosaraju {
private:
using Graph = SimpleGraph_Unweighted<true>;
/// Number of vertices
const std::size_t m_Size;
/// Number of CC
std::size_t m_Count = 0;
/// Keep track of which CC a vertex belongs to.
std::unique_ptr<std::size_t[]> m_Id;
[[nodiscard]] inline bool outOfBounds(std::size_t v) const noexcept {
return v < 0 || v >= m_Size;
}
void dfs(const Graph& graph, std::size_t v, bool visited[]) {
visited[v] = true;
m_Id[v] = m_Count;
for (std::size_t w : graph.adjacent(v)) {
if (!visited[w]) {
dfs(graph, w, visited);
}
}
}
void scc(const Graph& graph) {
std::unique_ptr<bool[]> visited = std::make_unique<bool[]>(m_Size);
for(std::size_t i = 0; i < m_Size; i++) {
visited[i] = false;
}
Graph reversedGraph = graph.reverse();
DepthFirstOrder<DFS_Order::REVERSE_POSTORDER> depthFirstOrder(reversedGraph);
for (auto& v : depthFirstOrder.getOrdering()) {
if (!visited[v]) {
dfs(graph, v, visited.get());
++m_Count;
}
}
}
public:
/**
* Constructor
* @param graph
*/
Kosaraju(const Graph& graph)
: m_Size(graph.vertex()), m_Id(std::make_unique<std::size_t[]>(m_Size)) {
scc(graph);
}
/**
* Get the number of connected components.
* @return
*/
[[nodiscard]] std::size_t getCount() const noexcept {
return m_Count;
}
/**
* Check if there exists a path between 2 vertices.
* @param v
* @param w
* @return
*/
[[nodiscard]] bool stronglyConnected(std::size_t v, std::size_t w) const {
if (outOfBounds(v) || outOfBounds(w)) {
throw std::invalid_argument("Invalid vertex");
}
return m_Id[v] == m_Id[w];
}
/**
* Get the SCC id that a vertex belongs to.
* @param v
* @return
*/
[[nodiscard]] std::size_t belongs(std::size_t v) const {
if (outOfBounds(v)) {
throw std::invalid_argument("Invalid vertex");
}
return m_Id[v];
}
/**
* Get all vertices in specified SCC.
* @param ccID ID of the CC
* @return
*/
[[nodiscard]] std::vector <std::size_t> set(std::size_t ccID) const {
if (ccID < 0 || ccID >= m_Count) {
throw std::invalid_argument("CC ID does not exist");
}
std::vector <std::size_t> result;
for (std::size_t i = 0; i < m_Size; i++) {
if (m_Id[i] == ccID) {
result.push_back(i);
}
}
return result;
}
~Kosaraju() = default;
};
}
| 26.549618 | 89 | 0.480736 | [
"vector"
] |
d0bd2738c66bcffe2b51756c80ac1776b3c57813 | 47,989 | c | C | dlls/ddrawex/surface.c | miqlas/wine-haiku | 60661fb86ba25ff7d5819416c97a2deebef1893b | [
"MIT"
] | 3 | 2015-08-06T13:40:28.000Z | 2017-11-09T15:50:17.000Z | dlls/ddrawex/surface.c | devyn/wine | 76bb12558beaece005419feb98f024a1eb1f74e8 | [
"MIT"
] | null | null | null | dlls/ddrawex/surface.c | devyn/wine | 76bb12558beaece005419feb98f024a1eb1f74e8 | [
"MIT"
] | 1 | 2019-01-31T13:35:30.000Z | 2019-01-31T13:35:30.000Z | /*
* Copyright 2008 Stefan Dösinger for CodeWeavers
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this library; if not, write to the Free Software
* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA
*/
#include "wine/debug.h"
#define COBJMACROS
#include "winbase.h"
#include "wingdi.h"
#include "ddraw.h"
#include "d3d.h"
#include "ddrawex_private.h"
WINE_DEFAULT_DEBUG_CHANNEL(ddrawex);
/******************************************************************************
* Helper functions for COM management
******************************************************************************/
static IDirectDrawSurfaceImpl *impl_from_dds3(IDirectDrawSurface3 *iface)
{
if(!iface) return NULL;
return (IDirectDrawSurfaceImpl *)((char*)iface - FIELD_OFFSET(IDirectDrawSurfaceImpl, IDirectDrawSurface3_Vtbl));
}
static IDirectDrawSurface3 *dds3_from_impl(IDirectDrawSurfaceImpl *This)
{
if(!This) return NULL;
return (IDirectDrawSurface3 *) &This->IDirectDrawSurface3_Vtbl;
}
static IDirectDrawSurfaceImpl *impl_from_dds4(IDirectDrawSurface4 *iface)
{
if(!iface) return NULL;
return (IDirectDrawSurfaceImpl *)((char*)iface - FIELD_OFFSET(IDirectDrawSurfaceImpl, IDirectDrawSurface4_Vtbl));
}
static IDirectDrawSurface4 *dds4_from_impl(IDirectDrawSurfaceImpl *This)
{
if(!This) return NULL;
return (IDirectDrawSurface4 *) &This->IDirectDrawSurface4_Vtbl;
}
static HRESULT WINAPI
IDirectDrawSurface4Impl_QueryInterface(IDirectDrawSurface4 *iface,
REFIID riid,
void **obj)
{
IDirectDrawSurfaceImpl *This = impl_from_dds4(iface);
/* According to COM docs, if the QueryInterface fails, obj should be set to NULL */
*obj = NULL;
if(!riid)
return DDERR_INVALIDPARAMS;
TRACE("(%p)->(%s,%p)\n",This,debugstr_guid(riid),obj);
if (IsEqualGUID(riid, &IID_IUnknown)
|| IsEqualGUID(riid, &IID_IDirectDrawSurface4) )
{
*obj = dds4_from_impl(This);
IDirectDrawSurface4_AddRef((IDirectDrawSurface4 *) *obj);
TRACE("(%p) returning IDirectDrawSurface4 interface at %p\n", This, *obj);
return S_OK;
}
else if( IsEqualGUID(riid, &IID_IDirectDrawSurface3)
|| IsEqualGUID(riid, &IID_IDirectDrawSurface2)
|| IsEqualGUID(riid, &IID_IDirectDrawSurface) )
{
*obj = dds3_from_impl(This);
IDirectDrawSurface3_AddRef((IDirectDrawSurface3 *) *obj);
TRACE("(%p) returning IDirectDrawSurface3 interface at %p\n", This, *obj);
return S_OK;
}
else if( IsEqualGUID(riid, &IID_IDirectDrawGammaControl) )
{
FIXME("Implement IDirectDrawGammaControl in ddrawex\n");
}
else if( IsEqualGUID(riid, &IID_IDirect3DHALDevice)||
IsEqualGUID(riid, &IID_IDirect3DRGBDevice) )
{
/* Most likely not supported */
FIXME("Test IDirect3DDevice in ddrawex\n");
}
else if (IsEqualGUID( &IID_IDirect3DTexture, riid ) ||
IsEqualGUID( &IID_IDirect3DTexture2, riid ))
{
FIXME("Implement IDirect3dTexture in ddrawex\n");
}
else
{
WARN("No interface\n");
}
return E_NOINTERFACE;
}
static HRESULT WINAPI
IDirectDrawSurface3Impl_QueryInterface(IDirectDrawSurface3 *iface,
REFIID riid,
void **obj)
{
IDirectDrawSurfaceImpl *This = impl_from_dds3(iface);
TRACE("(%p)->(%s,%p): Thunking to IDirectDrawSurface4\n",This,debugstr_guid(riid),obj);
return IDirectDrawSurface4_QueryInterface(dds4_from_impl(This), riid, obj);
}
static ULONG WINAPI
IDirectDrawSurface4Impl_AddRef(IDirectDrawSurface4 *iface)
{
IDirectDrawSurfaceImpl *This = impl_from_dds4(iface);
ULONG ref = InterlockedIncrement(&This->ref);
TRACE("(%p) : incrementing refcount from %u.\n", This, ref - 1);
return ref;
}
static ULONG WINAPI
IDirectDrawSurface3Impl_AddRef(IDirectDrawSurface3 *iface)
{
IDirectDrawSurfaceImpl *This = impl_from_dds3(iface);
TRACE("(%p): Thunking to IDirectDrawSurface4\n", This);
return IDirectDrawSurface4_AddRef(dds4_from_impl(This));
}
static ULONG WINAPI
IDirectDrawSurface4Impl_Release(IDirectDrawSurface4 *iface)
{
IDirectDrawSurfaceImpl *This = impl_from_dds4(iface);
ULONG ref = InterlockedDecrement(&This->ref);
TRACE("(%p) : decrementing refcount to %u.\n", This, ref);
if(ref == 0)
{
TRACE("Destroying object\n");
IDirectDrawSurface4_FreePrivateData(This->parent, &IID_DDrawexPriv);
IDirectDrawSurface4_Release(This->parent);
HeapFree(GetProcessHeap(), 0, This);
}
return ref;
}
static ULONG WINAPI
IDirectDrawSurface3Impl_Release(IDirectDrawSurface3 *iface)
{
IDirectDrawSurfaceImpl *This = impl_from_dds3(iface);
TRACE("(%p): Thunking to IDirectDrawSurface4\n", This);
return IDirectDrawSurface4_Release(dds4_from_impl(This));
}
static HRESULT WINAPI
IDirectDrawSurface4Impl_AddAttachedSurface(IDirectDrawSurface4 *iface,
IDirectDrawSurface4 *Attach_iface)
{
IDirectDrawSurfaceImpl *This = impl_from_dds4(iface);
IDirectDrawSurfaceImpl *attach = impl_from_dds4(Attach_iface);
TRACE("(%p)->(%p)\n", This, attach);
return IDirectDrawSurface4_AddAttachedSurface(This->parent, attach->parent);
}
static HRESULT WINAPI
IDirectDrawSurface3Impl_AddAttachedSurface(IDirectDrawSurface3 *iface,
IDirectDrawSurface3 *Attach_iface)
{
IDirectDrawSurfaceImpl *This = impl_from_dds3(iface);
IDirectDrawSurfaceImpl *attach = impl_from_dds3(Attach_iface);
TRACE("(%p)->(%p): Thunking to IDirectDrawSurface4\n", This, attach);
return IDirectDrawSurface4_AddAttachedSurface(dds4_from_impl(This), dds4_from_impl(attach));
}
static HRESULT WINAPI
IDirectDrawSurface4Impl_AddOverlayDirtyRect(IDirectDrawSurface4 *iface,
RECT *Rect)
{
IDirectDrawSurfaceImpl *This = impl_from_dds4(iface);
TRACE("(%p)->(%p)\n", This, Rect);
return IDirectDrawSurface4_AddOverlayDirtyRect(This->parent, Rect);
}
static HRESULT WINAPI
IDirectDrawSurface3Impl_AddOverlayDirtyRect(IDirectDrawSurface3 *iface,
RECT *Rect)
{
IDirectDrawSurfaceImpl *This = impl_from_dds3(iface);
TRACE("(%p)->(%p): Thunking to IDirectDrawSurface4\n", This, Rect);
return IDirectDrawSurface4_AddOverlayDirtyRect(dds4_from_impl(This), Rect);
}
static HRESULT WINAPI
IDirectDrawSurface4Impl_Blt(IDirectDrawSurface4 *iface,
RECT *DestRect,
IDirectDrawSurface4 *SrcSurface,
RECT *SrcRect,
DWORD Flags,
DDBLTFX *DDBltFx)
{
IDirectDrawSurfaceImpl *This = impl_from_dds4(iface);
IDirectDrawSurfaceImpl *Src = impl_from_dds4(SrcSurface);
TRACE("(%p)->(%p,%p,%p,0x%08x,%p)\n", This, DestRect, Src, SrcRect, Flags, DDBltFx);
return IDirectDrawSurface4_Blt(This->parent, DestRect, Src ? Src->parent : NULL,
SrcRect, Flags, DDBltFx);
}
static HRESULT WINAPI
IDirectDrawSurface3Impl_Blt(IDirectDrawSurface3 *iface,
RECT *DestRect,
IDirectDrawSurface3 *SrcSurface,
RECT *SrcRect,
DWORD Flags,
DDBLTFX *DDBltFx)
{
IDirectDrawSurfaceImpl *This = impl_from_dds3(iface);
IDirectDrawSurfaceImpl *Src = impl_from_dds3(SrcSurface);
TRACE("(%p)->(%p,%p,%p,0x%08x,%p): Thunking to IDirectDrawSurface4\n", This, DestRect, Src, SrcRect, Flags, DDBltFx);
return IDirectDrawSurface4_Blt(dds4_from_impl(This), DestRect, dds4_from_impl(Src),
SrcRect, Flags, DDBltFx);
}
static HRESULT WINAPI
IDirectDrawSurface4Impl_BltBatch(IDirectDrawSurface4 *iface,
DDBLTBATCH *Batch,
DWORD Count,
DWORD Flags)
{
IDirectDrawSurfaceImpl *This = impl_from_dds4(iface);
TRACE("(%p)->(%p,%u,0x%08x)\n", This, Batch, Count, Flags);
return IDirectDrawSurface4_BltBatch(This->parent, Batch, Count, Flags);
}
static HRESULT WINAPI
IDirectDrawSurface3Impl_BltBatch(IDirectDrawSurface3 *iface,
DDBLTBATCH *Batch,
DWORD Count,
DWORD Flags)
{
IDirectDrawSurfaceImpl *This = impl_from_dds3(iface);
TRACE("(%p)->(%p,%u,0x%08x): Thunking to IDirectDrawSurface4\n", This, Batch, Count, Flags);
return IDirectDrawSurface4_BltBatch(dds4_from_impl(This), Batch, Count, Flags);
}
static HRESULT WINAPI
IDirectDrawSurface4Impl_BltFast(IDirectDrawSurface4 *iface,
DWORD dstx,
DWORD dsty,
IDirectDrawSurface4 *Source,
RECT *rsrc,
DWORD trans)
{
IDirectDrawSurfaceImpl *This = impl_from_dds4(iface);
IDirectDrawSurfaceImpl *Src = impl_from_dds4(Source);
TRACE("(%p)->(%u,%u,%p,%p,0x%08x)\n", This, dstx, dsty, Src, rsrc, trans);
return IDirectDrawSurface4_BltFast(This->parent, dstx, dsty, Src ? Src->parent : NULL,
rsrc, trans);
}
static HRESULT WINAPI
IDirectDrawSurface3Impl_BltFast(IDirectDrawSurface3 *iface,
DWORD dstx,
DWORD dsty,
IDirectDrawSurface3 *Source,
RECT *rsrc,
DWORD trans)
{
IDirectDrawSurfaceImpl *This = impl_from_dds3(iface);
IDirectDrawSurfaceImpl *Src = impl_from_dds3(Source);
TRACE("(%p)->(%u,%u,%p,%p,0x%08x): Thunking to IDirectDrawSurface4\n", This, dstx, dsty, Src, rsrc, trans);
return IDirectDrawSurface4_BltFast(dds4_from_impl(This), dstx, dsty, dds4_from_impl(Src),
rsrc, trans);
}
static HRESULT WINAPI
IDirectDrawSurface4Impl_DeleteAttachedSurface(IDirectDrawSurface4 *iface,
DWORD Flags,
IDirectDrawSurface4 *Attach)
{
IDirectDrawSurfaceImpl *This = impl_from_dds4(iface);
IDirectDrawSurfaceImpl *Att = impl_from_dds4(Attach);
TRACE("(%p)->(0x%08x,%p)\n", This, Flags, Att);
return IDirectDrawSurface4_DeleteAttachedSurface(This->parent, Flags,
Att ? Att->parent : NULL);
}
static HRESULT WINAPI
IDirectDrawSurface3Impl_DeleteAttachedSurface(IDirectDrawSurface3 *iface,
DWORD Flags,
IDirectDrawSurface3 *Attach)
{
IDirectDrawSurfaceImpl *This = impl_from_dds3(iface);
IDirectDrawSurfaceImpl *Att = impl_from_dds3(Attach);
TRACE("(%p)->(0x%08x,%p): Thunking to IDirectDrawSurface4\n", This, Flags, Att);
return IDirectDrawSurface4_DeleteAttachedSurface(dds4_from_impl(This), Flags,
dds4_from_impl(Att));
}
struct enumsurfaces_wrap
{
LPDDENUMSURFACESCALLBACK2 orig_cb;
void *orig_ctx;
};
static HRESULT WINAPI
enumsurfaces_wrap_cb(IDirectDrawSurface4 *surf, DDSURFACEDESC2 *desc, void *vctx)
{
struct enumsurfaces_wrap *ctx = vctx;
IDirectDrawSurface4 *outer = dds_get_outer(surf);
TRACE("Returning outer surface %p for inner surface %p\n", outer, surf);
IDirectDrawSurface4_AddRef(outer);
IDirectDrawSurface4_Release(surf);
return ctx->orig_cb(outer, desc, vctx);
}
static HRESULT WINAPI
IDirectDrawSurface4Impl_EnumAttachedSurfaces(IDirectDrawSurface4 *iface,
void *context,
LPDDENUMSURFACESCALLBACK2 cb)
{
IDirectDrawSurfaceImpl *This = impl_from_dds4(iface);
struct enumsurfaces_wrap ctx;
TRACE("(%p)->(%p,%p)\n", This, context, cb);
ctx.orig_cb = cb;
ctx.orig_ctx = context;
return IDirectDrawSurface4_EnumAttachedSurfaces(This->parent, &ctx, enumsurfaces_wrap_cb);
}
struct enumsurfaces_thunk
{
LPDDENUMSURFACESCALLBACK orig_cb;
void *orig_ctx;
};
static HRESULT WINAPI
enumsurfaces_thunk_cb(IDirectDrawSurface4 *surf, DDSURFACEDESC2 *desc2, void *vctx)
{
IDirectDrawSurfaceImpl *This = impl_from_dds4(surf);
struct enumsurfaces_thunk *ctx = vctx;
DDSURFACEDESC desc;
TRACE("Thunking back to IDirectDrawSurface3\n");
IDirectDrawSurface3_AddRef(dds3_from_impl(This));
IDirectDrawSurface3_Release(surf);
DDSD2_to_DDSD(desc2, &desc);
return ctx->orig_cb((IDirectDrawSurface *) dds3_from_impl(This), &desc, ctx->orig_ctx);
}
static HRESULT WINAPI
IDirectDrawSurface3Impl_EnumAttachedSurfaces(IDirectDrawSurface3 *iface,
void *context,
LPDDENUMSURFACESCALLBACK cb)
{
IDirectDrawSurfaceImpl *This = impl_from_dds3(iface);
struct enumsurfaces_thunk ctx;
TRACE("(%p)->(%p,%p): Thunking to IDirectDraw4\n", This, context, cb);
ctx.orig_cb = cb;
ctx.orig_ctx = context;
return IDirectDrawSurface4_EnumAttachedSurfaces(dds4_from_impl(This), &ctx, enumsurfaces_thunk_cb);
}
static HRESULT WINAPI
IDirectDrawSurface4Impl_EnumOverlayZOrders(IDirectDrawSurface4 *iface,
DWORD Flags,
void *context,
LPDDENUMSURFACESCALLBACK2 cb)
{
IDirectDrawSurfaceImpl *This = impl_from_dds4(iface);
struct enumsurfaces_wrap ctx;
TRACE("(%p)->(0x%08x,%p,%p)\n", This, Flags, context, cb);
ctx.orig_cb = cb;
ctx.orig_ctx = context;
return IDirectDrawSurface4_EnumOverlayZOrders(This->parent, Flags, &ctx, enumsurfaces_wrap_cb);
}
static HRESULT WINAPI
IDirectDrawSurface3Impl_EnumOverlayZOrders(IDirectDrawSurface3 *iface,
DWORD Flags,
void *context,
LPDDENUMSURFACESCALLBACK cb)
{
IDirectDrawSurfaceImpl *This = impl_from_dds3(iface);
struct enumsurfaces_thunk ctx;
TRACE("(%p)->(0x%08x,%p,%p): Thunking to IDirectDraw4\n", This, Flags, context, cb);
ctx.orig_cb = cb;
ctx.orig_ctx = context;
return IDirectDrawSurface4_EnumOverlayZOrders(dds4_from_impl(This), Flags, &ctx, enumsurfaces_thunk_cb);
}
static HRESULT WINAPI
IDirectDrawSurface4Impl_Flip(IDirectDrawSurface4 *iface,
IDirectDrawSurface4 *DestOverride,
DWORD Flags)
{
IDirectDrawSurfaceImpl *This = impl_from_dds4(iface);
IDirectDrawSurfaceImpl *Dest = impl_from_dds4(DestOverride);
TRACE("(%p)->(%p,0x%08x)\n", This, Dest, Flags);
return IDirectDrawSurface4_Flip(This->parent, Dest ? Dest->parent : NULL, Flags);
}
static HRESULT WINAPI
IDirectDrawSurface3Impl_Flip(IDirectDrawSurface3 *iface,
IDirectDrawSurface3 *DestOverride,
DWORD Flags)
{
IDirectDrawSurfaceImpl *This = impl_from_dds3(iface);
IDirectDrawSurfaceImpl *Dest = impl_from_dds3(DestOverride);
TRACE("(%p)->(%p,0x%08x): Thunking to IDirectDrawSurface4\n", This, Dest, Flags);
return IDirectDrawSurface4_Flip(dds4_from_impl(This), dds4_from_impl(Dest), Flags);
}
static HRESULT WINAPI
IDirectDrawSurface4Impl_GetAttachedSurface(IDirectDrawSurface4 *iface,
DDSCAPS2 *Caps,
IDirectDrawSurface4 **Surface)
{
IDirectDrawSurfaceImpl *This = impl_from_dds4(iface);
IDirectDrawSurface4 *inner = NULL;
HRESULT hr;
TRACE("(%p)->(%p,%p)\n", This, Caps, Surface);
hr = IDirectDrawSurface4_GetAttachedSurface(dds4_from_impl(This), Caps, &inner);
if(SUCCEEDED(hr))
{
*Surface = dds_get_outer(inner);
IDirectDrawSurface4_AddRef(*Surface);
IDirectDrawSurface4_Release(inner);
}
else
{
*Surface = NULL;
}
return hr;
}
static HRESULT WINAPI
IDirectDrawSurface3Impl_GetAttachedSurface(IDirectDrawSurface3 *iface,
DDSCAPS *Caps,
IDirectDrawSurface3 **Surface)
{
IDirectDrawSurfaceImpl *This = impl_from_dds3(iface);
IDirectDrawSurface4 *surf4;
DDSCAPS2 caps2;
HRESULT hr;
TRACE("(%p)->(%p,%p): Thunking to IDirectDrawSurface4\n", This, Caps, Surface);
memset(&caps2, 0, sizeof(caps2));
caps2.dwCaps = Caps->dwCaps;
hr = IDirectDrawSurface4_GetAttachedSurface(dds4_from_impl(This), &caps2, &surf4);
if(SUCCEEDED(hr))
{
IDirectDrawSurface4_QueryInterface(surf4, &IID_IDirectDrawSurface3, (void **) Surface);
IDirectDrawSurface4_Release(surf4);
}
else
{
*Surface = NULL;
}
return hr;
}
static HRESULT WINAPI
IDirectDrawSurface4Impl_GetBltStatus(IDirectDrawSurface4 *iface,
DWORD Flags)
{
IDirectDrawSurfaceImpl *This = impl_from_dds4(iface);
TRACE("(%p)->(0x%08x)\n", This, Flags);
return IDirectDrawSurface4_GetBltStatus(This->parent, Flags);
}
static HRESULT WINAPI
IDirectDrawSurface3Impl_GetBltStatus(IDirectDrawSurface3 *iface,
DWORD Flags)
{
IDirectDrawSurfaceImpl *This = impl_from_dds3(iface);
TRACE("(%p)->(0x%08x): Thunking to IDirectDrawSurface4\n", This, Flags);
return IDirectDrawSurface4_GetBltStatus(dds4_from_impl(This), Flags);
}
static HRESULT WINAPI
IDirectDrawSurface4Impl_GetCaps(IDirectDrawSurface4 *iface,
DDSCAPS2 *Caps)
{
IDirectDrawSurfaceImpl *This = impl_from_dds4(iface);
TRACE("(%p)->(%p)\n", This, Caps);
return IDirectDrawSurface4_GetCaps(This->parent, Caps);
}
static HRESULT WINAPI
IDirectDrawSurface3Impl_GetCaps(IDirectDrawSurface3 *iface,
DDSCAPS *Caps)
{
IDirectDrawSurfaceImpl *This = impl_from_dds3(iface);
DDSCAPS2 caps2;
HRESULT hr;
TRACE("(%p)->(%p): Thunking to IDirectDrawSurface4\n", This, Caps);
memset(&caps2, 0, sizeof(caps2));
memset(Caps, 0, sizeof(*Caps));
hr = IDirectDrawSurface4_GetCaps(dds4_from_impl(This), &caps2);
Caps->dwCaps = caps2.dwCaps;
return hr;
}
static HRESULT WINAPI
IDirectDrawSurface4Impl_GetClipper(IDirectDrawSurface4 *iface,
IDirectDrawClipper **Clipper)
{
IDirectDrawSurfaceImpl *This = impl_from_dds4(iface);
TRACE("(%p)->(%p)\n", This, Clipper);
return IDirectDrawSurface4_GetClipper(This->parent, Clipper);
}
static HRESULT WINAPI
IDirectDrawSurface3Impl_GetClipper(IDirectDrawSurface3 *iface,
IDirectDrawClipper **Clipper)
{
IDirectDrawSurfaceImpl *This = impl_from_dds3(iface);
TRACE("(%p)->(%p): Thunking to IDirectDrawSurface4\n", This, Clipper);
return IDirectDrawSurface4_GetClipper(dds4_from_impl(This), Clipper);
}
static HRESULT WINAPI
IDirectDrawSurface4Impl_GetColorKey(IDirectDrawSurface4 *iface,
DWORD Flags,
DDCOLORKEY *CKey)
{
IDirectDrawSurfaceImpl *This = impl_from_dds4(iface);
TRACE("(%p)->(0x%08x,%p)\n", This, Flags, CKey);
return IDirectDrawSurface4_GetColorKey(This->parent, Flags, CKey);
}
static HRESULT WINAPI
IDirectDrawSurface3Impl_GetColorKey(IDirectDrawSurface3 *iface,
DWORD Flags,
DDCOLORKEY *CKey)
{
IDirectDrawSurfaceImpl *This = impl_from_dds3(iface);
TRACE("(%p)->(0x%08x,%p): Thunking to IDirectDrawSurface4\n", This, Flags, CKey);
return IDirectDrawSurface4_GetColorKey(dds4_from_impl(This), Flags, CKey);
}
static HRESULT WINAPI
IDirectDrawSurface4Impl_GetDC(IDirectDrawSurface4 *iface,
HDC *hdc)
{
IDirectDrawSurfaceImpl *This = impl_from_dds4(iface);
TRACE("(%p)->(%p)\n", This, hdc);
if(This->permanent_dc)
{
TRACE("Returning stored dc %p\n", This->hdc);
*hdc = This->hdc;
return DD_OK;
}
else
{
return IDirectDrawSurface4_GetDC(This->parent, hdc);
}
}
static HRESULT WINAPI
IDirectDrawSurface3Impl_GetDC(IDirectDrawSurface3 *iface,
HDC *hdc)
{
IDirectDrawSurfaceImpl *This = impl_from_dds3(iface);
TRACE("(%p)->(%p): Thunking to IDirectDrawSurface4\n", This, hdc);
return IDirectDrawSurface4_GetDC(dds4_from_impl(This), hdc);
}
static HRESULT WINAPI
IDirectDrawSurface4Impl_GetFlipStatus(IDirectDrawSurface4 *iface,
DWORD Flags)
{
IDirectDrawSurfaceImpl *This = impl_from_dds4(iface);
TRACE("(%p)->(0x%08x)\n", This, Flags);
return IDirectDrawSurface4_GetFlipStatus(This->parent, Flags);
}
static HRESULT WINAPI
IDirectDrawSurface3Impl_GetFlipStatus(IDirectDrawSurface3 *iface,
DWORD Flags)
{
IDirectDrawSurfaceImpl *This = impl_from_dds3(iface);
TRACE("(%p)->(0x%08x): Thunking to IDirectDrawSurface4\n", This, Flags);
return IDirectDrawSurface4_GetFlipStatus(dds4_from_impl(This), Flags);
}
static HRESULT WINAPI
IDirectDrawSurface4Impl_GetOverlayPosition(IDirectDrawSurface4 *iface,
LONG *X,
LONG *Y)
{
IDirectDrawSurfaceImpl *This = impl_from_dds4(iface);
TRACE("(%p)->(%p,%p)\n", This, X, Y);
return IDirectDrawSurface4_GetOverlayPosition(This->parent, X, Y);
}
static HRESULT WINAPI
IDirectDrawSurface3Impl_GetOverlayPosition(IDirectDrawSurface3 *iface,
LONG *X,
LONG *Y)
{
IDirectDrawSurfaceImpl *This = impl_from_dds3(iface);
TRACE("(%p)->(%p,%p): Thunking to IDirectDrawSurface4\n", This, X, Y);
return IDirectDrawSurface4_GetOverlayPosition(dds4_from_impl(This), X, Y);
}
static HRESULT WINAPI
IDirectDrawSurface4Impl_GetPalette(IDirectDrawSurface4 *iface,
IDirectDrawPalette **Pal)
{
IDirectDrawSurfaceImpl *This = impl_from_dds4(iface);
TRACE("(%p)->(%p)\n", This, Pal);
return IDirectDrawSurface4_GetPalette(This->parent, Pal);
}
static HRESULT WINAPI
IDirectDrawSurface3Impl_GetPalette(IDirectDrawSurface3 *iface,
IDirectDrawPalette **Pal)
{
IDirectDrawSurfaceImpl *This = impl_from_dds3(iface);
TRACE("(%p)->(%p): Thunking to IDirectDrawSurface4\n", This, Pal);
return IDirectDrawSurface4_GetPalette(dds4_from_impl(This), Pal);
}
static HRESULT WINAPI
IDirectDrawSurface4Impl_GetPixelFormat(IDirectDrawSurface4 *iface,
DDPIXELFORMAT *PixelFormat)
{
IDirectDrawSurfaceImpl *This = impl_from_dds4(iface);
TRACE("(%p)->(%p)\n", This, PixelFormat);
return IDirectDrawSurface4_GetPixelFormat(This->parent, PixelFormat);
}
static HRESULT WINAPI
IDirectDrawSurface3Impl_GetPixelFormat(IDirectDrawSurface3 *iface,
DDPIXELFORMAT *PixelFormat)
{
IDirectDrawSurfaceImpl *This = impl_from_dds3(iface);
TRACE("(%p)->(%p): Thunking to IDirectDrawSurface4\n", This, PixelFormat);
return IDirectDrawSurface4_GetPixelFormat(dds4_from_impl(This), PixelFormat);
}
static HRESULT WINAPI
IDirectDrawSurface4Impl_GetSurfaceDesc(IDirectDrawSurface4 *iface,
DDSURFACEDESC2 *DDSD)
{
IDirectDrawSurfaceImpl *This = impl_from_dds4(iface);
HRESULT hr;
TRACE("(%p)->(%p)\n", This, DDSD);
hr = IDirectDrawSurface4_GetSurfaceDesc(This->parent, DDSD);
if(SUCCEEDED(hr) && This->permanent_dc)
{
DDSD->ddsCaps.dwCaps |= DDSCAPS_VIDEOMEMORY;
DDSD->ddsCaps.dwCaps &= ~DDSCAPS_OWNDC;
}
return hr;
}
static HRESULT WINAPI
IDirectDrawSurface3Impl_GetSurfaceDesc(IDirectDrawSurface3 *iface,
DDSURFACEDESC *DDSD)
{
IDirectDrawSurfaceImpl *This = impl_from_dds3(iface);
DDSURFACEDESC2 ddsd2;
HRESULT hr;
TRACE("(%p)->(%p): Thunking to IDirectDrawSurface4\n", This, DDSD);
memset(&ddsd2, 0, sizeof(ddsd2));
ddsd2.dwSize = sizeof(ddsd2);
hr = IDirectDrawSurface4_GetSurfaceDesc(dds4_from_impl(This), &ddsd2);
DDSD2_to_DDSD(&ddsd2, DDSD);
return hr;
}
static HRESULT WINAPI
IDirectDrawSurface4Impl_Initialize(IDirectDrawSurface4 *iface,
IDirectDraw *DD,
DDSURFACEDESC2 *DDSD)
{
IDirectDrawSurfaceImpl *This = impl_from_dds4(iface);
IDirectDraw4 *outer_DD4;
IDirectDraw4 *inner_DD4;
IDirectDraw *inner_DD;
HRESULT hr;
TRACE("(%p)->(%p,%p)\n", This, DD, DDSD);
IDirectDraw_QueryInterface(DD, &IID_IDirectDraw4, (void **) &outer_DD4);
inner_DD4 = dd_get_inner(outer_DD4);
IDirectDraw4_Release(outer_DD4);
IDirectDraw4_QueryInterface(inner_DD4, &IID_IDirectDraw4, (void **) &inner_DD);
hr = IDirectDrawSurface4_Initialize(This->parent, inner_DD, DDSD);
IDirectDraw_Release(inner_DD);
return hr;
}
static HRESULT WINAPI
IDirectDrawSurface3Impl_Initialize(IDirectDrawSurface3 *iface,
IDirectDraw *DD,
DDSURFACEDESC *DDSD)
{
IDirectDrawSurfaceImpl *This = impl_from_dds3(iface);
DDSURFACEDESC2 ddsd2;
TRACE("(%p)->(%p,%p): Thunking to IDirectDrawSurface4\n", This, DD, DDSD);
DDSD_to_DDSD2(DDSD, &ddsd2);
return IDirectDrawSurface4_Initialize(dds4_from_impl(This), DD, &ddsd2);
}
static HRESULT WINAPI
IDirectDrawSurface4Impl_IsLost(IDirectDrawSurface4 *iface)
{
IDirectDrawSurfaceImpl *This = impl_from_dds4(iface);
TRACE("(%p)\n", This);
return IDirectDrawSurface4_IsLost(This->parent);
}
static HRESULT WINAPI
IDirectDrawSurface3Impl_IsLost(IDirectDrawSurface3 *iface)
{
IDirectDrawSurfaceImpl *This = impl_from_dds3(iface);
TRACE("(%p): Thunking to IDirectDrawSurface4\n", This);
return IDirectDrawSurface4_IsLost(dds4_from_impl(This));
}
static HRESULT WINAPI
IDirectDrawSurface4Impl_Lock(IDirectDrawSurface4 *iface,
RECT *Rect,
DDSURFACEDESC2 *DDSD,
DWORD Flags,
HANDLE h)
{
IDirectDrawSurfaceImpl *This = impl_from_dds4(iface);
HRESULT hr;
TRACE("(%p)->(%p,%p,0x%08x,%p)\n", This, Rect, DDSD, Flags, h);
hr = IDirectDrawSurface4_Lock(This->parent, Rect, DDSD, Flags, h);
if(SUCCEEDED(hr) && This->permanent_dc)
{
DDSD->ddsCaps.dwCaps |= DDSCAPS_VIDEOMEMORY;
DDSD->ddsCaps.dwCaps &= ~DDSCAPS_OWNDC;
}
return hr;
}
static HRESULT WINAPI
IDirectDrawSurface3Impl_Lock(IDirectDrawSurface3 *iface,
RECT *Rect,
DDSURFACEDESC *DDSD,
DWORD Flags,
HANDLE h)
{
IDirectDrawSurfaceImpl *This = impl_from_dds3(iface);
DDSURFACEDESC2 ddsd2;
HRESULT hr;
TRACE("(%p)->(%p,%p,0x%08x,%p): Thunking to IDirectDrawSurface4\n", This, Rect, DDSD, Flags, h);
memset(&ddsd2, 0, sizeof(ddsd2));
ddsd2.dwSize = sizeof(ddsd2);
hr = IDirectDrawSurface4_Lock(dds4_from_impl(This), Rect, &ddsd2, Flags, h);
DDSD2_to_DDSD(&ddsd2, DDSD);
return hr;
}
static HRESULT WINAPI
IDirectDrawSurface4Impl_ReleaseDC(IDirectDrawSurface4 *iface,
HDC hdc)
{
IDirectDrawSurfaceImpl *This = impl_from_dds4(iface);
TRACE("(%p)->(%p)\n", This, hdc);
if(This->permanent_dc)
{
TRACE("Surface has a permanent DC, not doing anything\n");
return DD_OK;
}
else
{
return IDirectDrawSurface4_ReleaseDC(This->parent, hdc);
}
}
static HRESULT WINAPI
IDirectDrawSurface3Impl_ReleaseDC(IDirectDrawSurface3 *iface,
HDC hdc)
{
IDirectDrawSurfaceImpl *This = impl_from_dds3(iface);
TRACE("(%p)->(%p): Thunking to IDirectDrawSurface4\n", This, hdc);
return IDirectDrawSurface4_ReleaseDC(dds4_from_impl(This), hdc);
}
static HRESULT WINAPI
IDirectDrawSurface4Impl_Restore(IDirectDrawSurface4 *iface)
{
IDirectDrawSurfaceImpl *This = impl_from_dds4(iface);
TRACE("(%p)\n", This);
return IDirectDrawSurface4_Restore(This->parent);
}
static HRESULT WINAPI
IDirectDrawSurface3Impl_Restore(IDirectDrawSurface3 *iface)
{
IDirectDrawSurfaceImpl *This = impl_from_dds3(iface);
TRACE("(%p): Thunking to IDirectDrawSurface4\n", This);
return IDirectDrawSurface4_Restore(dds4_from_impl(This));
}
static HRESULT WINAPI
IDirectDrawSurface4Impl_SetClipper(IDirectDrawSurface4 *iface,
IDirectDrawClipper *Clipper)
{
IDirectDrawSurfaceImpl *This = impl_from_dds4(iface);
TRACE("(%p)->(%p)\n", This, Clipper);
return IDirectDrawSurface4_SetClipper(This->parent, Clipper);
}
static HRESULT WINAPI
IDirectDrawSurface3Impl_SetClipper(IDirectDrawSurface3 *iface,
IDirectDrawClipper *Clipper)
{
IDirectDrawSurfaceImpl *This = impl_from_dds3(iface);
TRACE("(%p)->(%p): Thunking to IDirectDrawSurface4\n", This, Clipper);
return IDirectDrawSurface4_SetClipper(dds4_from_impl(This), Clipper);
}
static HRESULT WINAPI
IDirectDrawSurface4Impl_SetColorKey(IDirectDrawSurface4 *iface,
DWORD Flags,
DDCOLORKEY *CKey)
{
IDirectDrawSurfaceImpl *This = impl_from_dds4(iface);
TRACE("(%p)->(0x%08x,%p)\n", This, Flags, CKey);
return IDirectDrawSurface4_SetColorKey(This->parent, Flags, CKey);
}
static HRESULT WINAPI
IDirectDrawSurface3Impl_SetColorKey(IDirectDrawSurface3 *iface,
DWORD Flags,
DDCOLORKEY *CKey)
{
IDirectDrawSurfaceImpl *This = impl_from_dds3(iface);
TRACE("(%p)->(0x%08x,%p): Thunking to IDirectDrawSurface4\n", This, Flags, CKey);
return IDirectDrawSurface4_SetColorKey(dds4_from_impl(This), Flags, CKey);
}
static HRESULT WINAPI
IDirectDrawSurface4Impl_SetOverlayPosition(IDirectDrawSurface4 *iface,
LONG X,
LONG Y)
{
IDirectDrawSurfaceImpl *This = impl_from_dds4(iface);
TRACE("(%p)->(%u,%u)\n", This, X, Y);
return IDirectDrawSurface4_SetOverlayPosition(This->parent, X, Y);
}
static HRESULT WINAPI
IDirectDrawSurface3Impl_SetOverlayPosition(IDirectDrawSurface3 *iface,
LONG X,
LONG Y)
{
IDirectDrawSurfaceImpl *This = impl_from_dds3(iface);
TRACE("(%p)->(%u,%u): Thunking to IDirectDrawSurface4\n", This, X, Y);
return IDirectDrawSurface4_SetOverlayPosition(dds4_from_impl(This), X, Y);
}
static HRESULT WINAPI
IDirectDrawSurface4Impl_SetPalette(IDirectDrawSurface4 *iface,
IDirectDrawPalette *Pal)
{
IDirectDrawSurfaceImpl *This = impl_from_dds4(iface);
TRACE("(%p)->(%p)\n", This, Pal);
return IDirectDrawSurface4_SetPalette(This->parent, Pal);
}
static HRESULT WINAPI
IDirectDrawSurface3Impl_SetPalette(IDirectDrawSurface3 *iface,
IDirectDrawPalette *Pal)
{
IDirectDrawSurfaceImpl *This = impl_from_dds3(iface);
TRACE("(%p)->(%p): Thunking to IDirectDrawSurface4\n", This, Pal);
return IDirectDrawSurface4_SetPalette(dds4_from_impl(This), Pal);
}
static HRESULT WINAPI
IDirectDrawSurface4Impl_Unlock(IDirectDrawSurface4 *iface,
RECT *pRect)
{
IDirectDrawSurfaceImpl *This = impl_from_dds4(iface);
TRACE("(%p)->(%p)\n", This, pRect);
return IDirectDrawSurface4_Unlock(This->parent, pRect);
}
static HRESULT WINAPI
IDirectDrawSurface3Impl_Unlock(IDirectDrawSurface3 *iface,
void *data)
{
IDirectDrawSurfaceImpl *This = impl_from_dds3(iface);
TRACE("(%p)->(%p): Thunking to IDirectDrawSurface4\n", This, data);
return IDirectDrawSurface4_Unlock(dds4_from_impl(This), NULL);
}
static HRESULT WINAPI
IDirectDrawSurface4Impl_UpdateOverlay(IDirectDrawSurface4 *iface,
LPRECT SrcRect,
IDirectDrawSurface4 *DstSurface,
LPRECT DstRect,
DWORD Flags,
LPDDOVERLAYFX FX)
{
IDirectDrawSurfaceImpl *This = impl_from_dds4(iface);
IDirectDrawSurfaceImpl *Dst = impl_from_dds4(DstSurface);
TRACE("(%p)->(%p,%p,%p,0x%08x,%p)\n", This, SrcRect, Dst, DstRect, Flags, FX);
return IDirectDrawSurface4_UpdateOverlay(This->parent, SrcRect, Dst ? Dst->parent : NULL,
DstRect, Flags, FX);
}
static HRESULT WINAPI
IDirectDrawSurface3Impl_UpdateOverlay(IDirectDrawSurface3 *iface,
LPRECT SrcRect,
IDirectDrawSurface3 *DstSurface,
LPRECT DstRect,
DWORD Flags,
LPDDOVERLAYFX FX)
{
IDirectDrawSurfaceImpl *This = impl_from_dds3(iface);
IDirectDrawSurfaceImpl *Dst = impl_from_dds3(DstSurface);
TRACE("(%p)->(%p,%p,%p,0x%08x,%p): Thunking to IDirectDrawSurface4\n", This, SrcRect, Dst, DstRect, Flags, FX);
return IDirectDrawSurface4_UpdateOverlay(dds4_from_impl(This), SrcRect, dds4_from_impl(Dst),
DstRect, Flags, FX);
}
static HRESULT WINAPI
IDirectDrawSurface4Impl_UpdateOverlayDisplay(IDirectDrawSurface4 *iface,
DWORD Flags)
{
IDirectDrawSurfaceImpl *This = impl_from_dds4(iface);
TRACE("(%p)->(0x%08x)\n", This, Flags);
return IDirectDrawSurface4_UpdateOverlayDisplay(This->parent, Flags);
}
static HRESULT WINAPI
IDirectDrawSurface3Impl_UpdateOverlayDisplay(IDirectDrawSurface3 *iface,
DWORD Flags)
{
IDirectDrawSurfaceImpl *This = impl_from_dds3(iface);
TRACE("(%p)->(0x%08x): Thunking to IDirectDrawSurface4\n", This, Flags);
return IDirectDrawSurface4_UpdateOverlayDisplay(dds4_from_impl(This), Flags);
}
static HRESULT WINAPI
IDirectDrawSurface4Impl_UpdateOverlayZOrder(IDirectDrawSurface4 *iface,
DWORD Flags,
IDirectDrawSurface4 *DDSRef)
{
IDirectDrawSurfaceImpl *This = impl_from_dds4(iface);
IDirectDrawSurfaceImpl *Ref = impl_from_dds4(DDSRef);
TRACE("(%p)->(0x%08x,%p)\n", This, Flags, Ref);
return IDirectDrawSurface4_UpdateOverlayZOrder(This->parent, Flags, Ref ? Ref->parent : NULL);
}
static HRESULT WINAPI
IDirectDrawSurface3Impl_UpdateOverlayZOrder(IDirectDrawSurface3 *iface,
DWORD Flags,
IDirectDrawSurface3 *DDSRef)
{
IDirectDrawSurfaceImpl *This = impl_from_dds3(iface);
IDirectDrawSurfaceImpl *Ref = impl_from_dds3(DDSRef);
TRACE("(%p)->(0x%08x,%p): Thunking to IDirectDrawSurface4\n", This, Flags, Ref);
return IDirectDrawSurface4_UpdateOverlayZOrder(dds4_from_impl(This), Flags, dds4_from_impl(Ref));
}
static HRESULT WINAPI
IDirectDrawSurface4Impl_GetDDInterface(IDirectDrawSurface4 *iface,
void **DD)
{
IDirectDrawSurfaceImpl *This = impl_from_dds4(iface);
FIXME("(%p)->(%p)\n", This, DD);
/* This has to be implemented in ddrawex, DDraw's interface can't be used because it is pretty
* hard to tell which version of the DD interface is returned
*/
*DD = NULL;
return E_FAIL;
}
static HRESULT WINAPI
IDirectDrawSurface3Impl_GetDDInterface(IDirectDrawSurface3 *iface,
void **DD)
{
IDirectDrawSurfaceImpl *This = impl_from_dds3(iface);
FIXME("(%p)->(%p)\n", This, DD);
/* A thunk it pretty pointless because of the same reason relaying to ddraw.dll works badly
*/
*DD = NULL;
return E_FAIL;
}
static HRESULT WINAPI
IDirectDrawSurface4Impl_PageLock(IDirectDrawSurface4 *iface,
DWORD Flags)
{
IDirectDrawSurfaceImpl *This = impl_from_dds4(iface);
TRACE("(%p)->(%x)\n", iface, Flags);
return IDirectDrawSurface4_PageLock(This->parent, Flags);
}
static HRESULT WINAPI
IDirectDrawSurface3Impl_PageLock(IDirectDrawSurface3 *iface,
DWORD Flags)
{
IDirectDrawSurfaceImpl *This = impl_from_dds3(iface);
TRACE("(%p)->(%x): Thunking to IDirectDrawSurface4\n", iface, Flags);
return IDirectDrawSurface4_PageLock(dds4_from_impl(This), Flags);
}
static HRESULT WINAPI
IDirectDrawSurface4Impl_PageUnlock(IDirectDrawSurface4 *iface,
DWORD Flags)
{
IDirectDrawSurfaceImpl *This = impl_from_dds4(iface);
TRACE("(%p)->(%x)\n", iface, Flags);
return IDirectDrawSurface4_PageUnlock(This->parent, Flags);
}
static HRESULT WINAPI
IDirectDrawSurface3Impl_PageUnlock(IDirectDrawSurface3 *iface,
DWORD Flags)
{
IDirectDrawSurfaceImpl *This = impl_from_dds3(iface);
TRACE("(%p)->(%x): Thunking to IDirectDrawSurface4\n", iface, Flags);
return IDirectDrawSurface4_PageUnlock(dds4_from_impl(This), Flags);
}
static HRESULT WINAPI
IDirectDrawSurface4Impl_SetSurfaceDesc(IDirectDrawSurface4 *iface,
DDSURFACEDESC2 *DDSD,
DWORD Flags)
{
IDirectDrawSurfaceImpl *This = impl_from_dds4(iface);
TRACE("(%p)->(%p,0x%08x)\n", This, DDSD, Flags);
return IDirectDrawSurface4_SetSurfaceDesc(This->parent, DDSD, Flags);
}
static HRESULT WINAPI
IDirectDrawSurface3Impl_SetSurfaceDesc(IDirectDrawSurface3 *iface,
DDSURFACEDESC *DDSD,
DWORD Flags)
{
IDirectDrawSurfaceImpl *This = impl_from_dds3(iface);
DDSURFACEDESC2 ddsd;
TRACE("(%p)->(%p,0x%08x): Thunking to IDirectDrawSurface4\n", This, DDSD, Flags);
DDSD_to_DDSD2(DDSD, &ddsd);
return IDirectDrawSurface4_SetSurfaceDesc(dds4_from_impl(This), &ddsd, Flags);
}
static HRESULT WINAPI
IDirectDrawSurface4Impl_SetPrivateData(IDirectDrawSurface4 *iface,
REFGUID tag,
void *Data,
DWORD Size,
DWORD Flags)
{
IDirectDrawSurfaceImpl *This = impl_from_dds4(iface);
TRACE("(%p)->(%s,%p,%u,0x%08x)\n", iface, debugstr_guid(tag), Data, Size, Flags);
/* To completely avoid this we'd have to clone the private data API in ddrawex */
if(IsEqualGUID(&IID_DDrawexPriv, tag)) {
FIXME("Application uses ddrawex's private guid\n");
}
return IDirectDrawSurface4_SetPrivateData(This->parent, tag, Data, Size, Flags);
}
static HRESULT WINAPI
IDirectDrawSurface4Impl_GetPrivateData(IDirectDrawSurface4 *iface,
REFGUID tag,
void *Data,
DWORD *Size)
{
IDirectDrawSurfaceImpl *This = impl_from_dds4(iface);
TRACE("(%p)->(%s,%p,%p)\n", iface, debugstr_guid(tag), Data, Size);
/* To completely avoid this we'd have to clone the private data API in ddrawex */
if(IsEqualGUID(&IID_DDrawexPriv, tag)) {
FIXME("Application uses ddrawex's private guid\n");
}
return IDirectDrawSurface4_GetPrivateData(This->parent, tag, Data, Size);
}
static HRESULT WINAPI
IDirectDrawSurface4Impl_FreePrivateData(IDirectDrawSurface4 *iface,
REFGUID tag)
{
IDirectDrawSurfaceImpl *This = impl_from_dds4(iface);
TRACE("(%p)->(%s)\n", iface, debugstr_guid(tag));
/* To completely avoid this we'd have to clone the private data API in ddrawex */
if(IsEqualGUID(&IID_DDrawexPriv, tag)) {
FIXME("Application uses ddrawex's private guid\n");
}
return IDirectDrawSurface4_FreePrivateData(This->parent, tag);
}
static HRESULT WINAPI
IDirectDrawSurface4Impl_GetUniquenessValue(IDirectDrawSurface4 *iface,
LPDWORD pValue)
{
IDirectDrawSurfaceImpl *This = impl_from_dds4(iface);
TRACE("(%p)->(%p)\n", This, pValue);
return IDirectDrawSurface4_GetUniquenessValue(This->parent, pValue);
}
static HRESULT WINAPI
IDirectDrawSurface4Impl_ChangeUniquenessValue(IDirectDrawSurface4 *iface)
{
IDirectDrawSurfaceImpl *This = impl_from_dds4(iface);
TRACE("(%p)\n", This);
return IDirectDrawSurface4_ChangeUniquenessValue(This->parent);
}
const IDirectDrawSurface3Vtbl IDirectDrawSurface3_Vtbl =
{
/* IUnknown */
IDirectDrawSurface3Impl_QueryInterface,
IDirectDrawSurface3Impl_AddRef,
IDirectDrawSurface3Impl_Release,
/* IDirectDrawSurface */
IDirectDrawSurface3Impl_AddAttachedSurface,
IDirectDrawSurface3Impl_AddOverlayDirtyRect,
IDirectDrawSurface3Impl_Blt,
IDirectDrawSurface3Impl_BltBatch,
IDirectDrawSurface3Impl_BltFast,
IDirectDrawSurface3Impl_DeleteAttachedSurface,
IDirectDrawSurface3Impl_EnumAttachedSurfaces,
IDirectDrawSurface3Impl_EnumOverlayZOrders,
IDirectDrawSurface3Impl_Flip,
IDirectDrawSurface3Impl_GetAttachedSurface,
IDirectDrawSurface3Impl_GetBltStatus,
IDirectDrawSurface3Impl_GetCaps,
IDirectDrawSurface3Impl_GetClipper,
IDirectDrawSurface3Impl_GetColorKey,
IDirectDrawSurface3Impl_GetDC,
IDirectDrawSurface3Impl_GetFlipStatus,
IDirectDrawSurface3Impl_GetOverlayPosition,
IDirectDrawSurface3Impl_GetPalette,
IDirectDrawSurface3Impl_GetPixelFormat,
IDirectDrawSurface3Impl_GetSurfaceDesc,
IDirectDrawSurface3Impl_Initialize,
IDirectDrawSurface3Impl_IsLost,
IDirectDrawSurface3Impl_Lock,
IDirectDrawSurface3Impl_ReleaseDC,
IDirectDrawSurface3Impl_Restore,
IDirectDrawSurface3Impl_SetClipper,
IDirectDrawSurface3Impl_SetColorKey,
IDirectDrawSurface3Impl_SetOverlayPosition,
IDirectDrawSurface3Impl_SetPalette,
IDirectDrawSurface3Impl_Unlock,
IDirectDrawSurface3Impl_UpdateOverlay,
IDirectDrawSurface3Impl_UpdateOverlayDisplay,
IDirectDrawSurface3Impl_UpdateOverlayZOrder,
/* IDirectDrawSurface 2 */
IDirectDrawSurface3Impl_GetDDInterface,
IDirectDrawSurface3Impl_PageLock,
IDirectDrawSurface3Impl_PageUnlock,
/* IDirectDrawSurface 3 */
IDirectDrawSurface3Impl_SetSurfaceDesc
};
const IDirectDrawSurface4Vtbl IDirectDrawSurface4_Vtbl =
{
/*** IUnknown ***/
IDirectDrawSurface4Impl_QueryInterface,
IDirectDrawSurface4Impl_AddRef,
IDirectDrawSurface4Impl_Release,
/*** IDirectDrawSurface ***/
IDirectDrawSurface4Impl_AddAttachedSurface,
IDirectDrawSurface4Impl_AddOverlayDirtyRect,
IDirectDrawSurface4Impl_Blt,
IDirectDrawSurface4Impl_BltBatch,
IDirectDrawSurface4Impl_BltFast,
IDirectDrawSurface4Impl_DeleteAttachedSurface,
IDirectDrawSurface4Impl_EnumAttachedSurfaces,
IDirectDrawSurface4Impl_EnumOverlayZOrders,
IDirectDrawSurface4Impl_Flip,
IDirectDrawSurface4Impl_GetAttachedSurface,
IDirectDrawSurface4Impl_GetBltStatus,
IDirectDrawSurface4Impl_GetCaps,
IDirectDrawSurface4Impl_GetClipper,
IDirectDrawSurface4Impl_GetColorKey,
IDirectDrawSurface4Impl_GetDC,
IDirectDrawSurface4Impl_GetFlipStatus,
IDirectDrawSurface4Impl_GetOverlayPosition,
IDirectDrawSurface4Impl_GetPalette,
IDirectDrawSurface4Impl_GetPixelFormat,
IDirectDrawSurface4Impl_GetSurfaceDesc,
IDirectDrawSurface4Impl_Initialize,
IDirectDrawSurface4Impl_IsLost,
IDirectDrawSurface4Impl_Lock,
IDirectDrawSurface4Impl_ReleaseDC,
IDirectDrawSurface4Impl_Restore,
IDirectDrawSurface4Impl_SetClipper,
IDirectDrawSurface4Impl_SetColorKey,
IDirectDrawSurface4Impl_SetOverlayPosition,
IDirectDrawSurface4Impl_SetPalette,
IDirectDrawSurface4Impl_Unlock,
IDirectDrawSurface4Impl_UpdateOverlay,
IDirectDrawSurface4Impl_UpdateOverlayDisplay,
IDirectDrawSurface4Impl_UpdateOverlayZOrder,
/*** IDirectDrawSurface2 ***/
IDirectDrawSurface4Impl_GetDDInterface,
IDirectDrawSurface4Impl_PageLock,
IDirectDrawSurface4Impl_PageUnlock,
/*** IDirectDrawSurface3 ***/
IDirectDrawSurface4Impl_SetSurfaceDesc,
/*** IDirectDrawSurface4 ***/
IDirectDrawSurface4Impl_SetPrivateData,
IDirectDrawSurface4Impl_GetPrivateData,
IDirectDrawSurface4Impl_FreePrivateData,
IDirectDrawSurface4Impl_GetUniquenessValue,
IDirectDrawSurface4Impl_ChangeUniquenessValue,
};
/* dds_get_outer
*
* Given a surface from ddraw.dll it retrieves the pointer to the ddrawex.dll wrapper around it
*
* Parameters:
* inner: ddraw.dll surface to retrieve the outer surface from
*
* Returns:
* The surface wrapper. If there is none yet, a new one is created
*/
IDirectDrawSurface4 *dds_get_outer(IDirectDrawSurface4 *inner)
{
IDirectDrawSurface4 *outer = NULL;
DWORD size = sizeof(outer);
HRESULT hr;
if(!inner) return NULL;
hr = IDirectDrawSurface4_GetPrivateData(inner,
&IID_DDrawexPriv,
&outer,
&size);
if(FAILED(hr) || outer == NULL)
{
IDirectDrawSurfaceImpl *impl;
TRACE("Creating new ddrawex surface wrapper for surface %p\n", inner);
impl = HeapAlloc(GetProcessHeap(), HEAP_ZERO_MEMORY, sizeof(*impl));
impl->ref = 1;
impl->IDirectDrawSurface3_Vtbl = &IDirectDrawSurface3_Vtbl;
impl->IDirectDrawSurface4_Vtbl = &IDirectDrawSurface4_Vtbl;
IDirectDrawSurface4_AddRef(inner);
impl->parent = inner;
outer = dds4_from_impl(impl);
hr = IDirectDrawSurface4_SetPrivateData(inner,
&IID_DDrawexPriv,
&outer,
sizeof(outer),
0 /* Flags */);
if(FAILED(hr))
{
ERR("IDirectDrawSurface4_SetPrivateData failed\n");
}
}
return outer;
}
IDirectDrawSurface4 *dds_get_inner(IDirectDrawSurface4 *outer)
{
IDirectDrawSurfaceImpl *This = impl_from_dds4(outer);
if(This == NULL) return NULL;
return This->parent;
}
HRESULT prepare_permanent_dc(IDirectDrawSurface4 *iface)
{
IDirectDrawSurfaceImpl *This = impl_from_dds4(iface);
HRESULT hr;
This->permanent_dc = TRUE;
hr = IDirectDrawSurface4_GetDC(This->parent, &This->hdc);
if(FAILED(hr)) return hr;
hr = IDirectDrawSurface4_ReleaseDC(This->parent, This->hdc);
return hr;
}
| 36.66081 | 121 | 0.655359 | [
"object"
] |
d0bfcf5da27b15c200a2dc89ab281d93df2a7daa | 3,534 | c | C | src/lib/hlp/help.c | prepare/spice3f5 | a0d8c69d43927b7ced9cb619e3faa3d56332566a | [
"BSD-4-Clause-UC"
] | 4 | 2018-02-21T17:31:40.000Z | 2022-03-03T01:43:32.000Z | src/lib/hlp/help.c | prepare/spice3f5 | a0d8c69d43927b7ced9cb619e3faa3d56332566a | [
"BSD-4-Clause-UC"
] | null | null | null | src/lib/hlp/help.c | prepare/spice3f5 | a0d8c69d43927b7ced9cb619e3faa3d56332566a | [
"BSD-4-Clause-UC"
] | 2 | 2019-07-20T00:47:29.000Z | 2020-01-06T19:18:21.000Z | /**********
Copyright 1990 Regents of the University of California. All rights reserved.
Author: 1986 Wayne A. Christopher, U. C. Berkeley CAD Group
**********/
/*
* The main entry point for the help system.
*/
#include "spice.h"
#include "cpstd.h"
#include "hlpdefs.h"
#include "suffix.h"
char *hlp_directory;
extern char *hlp_filelist[];
int hlp_ftablesize = 0;
void
hlp_main(path, wl)
char *path;
wordlist *wl;
{
topic *top;
fplace *place;
hlp_directory = path;
if (wl) {
while (wl) {
if (!(place = findglobalsubject(wl->wl_word))) {
fprintf(stderr, "Error: No such topic: %s\n",
wl->wl_word);
wl = wl->wl_next;
continue;
}
if (!(top = hlp_read(place))) {
fprintf(stderr, "Error: can't read topic\n");
wl = wl->wl_next;
continue;
}
hlp_provide(top);
wl = wl->wl_next;
}
} else {
if (!(place = findglobalsubject("main"))) {
fprintf(stderr, "Error: no main topic\n");
return;
}
if (!(top = hlp_read(place))) {
fprintf(stderr, "Error: can't read topic\n");
return;
}
hlp_provide(top);
}
#ifndef HAS_X11
hlp_free();
#endif
return;
}
fplace *
findglobalsubject(subject)
char *subject;
{
fplace *place;
char **dict;
long fpos;
place = 0;
for (dict = hlp_filelist; *dict && **dict; dict++) {
fpos = findsubject(*dict, subject);
if (fpos != -1) {
place = (fplace *) tmalloc(sizeof(fplace));
place->fpos = fpos;
place->filename = copy(*dict);
place->fp = hlp_fopen(*dict);
break;
}
}
return(place);
}
/* see if file is on filelist */
bool
hlp_approvedfile(filename)
char *filename;
{
char **s;
for (s = hlp_filelist; *s && **s; s++) {
if (cieq(*s, filename)) return(true);
}
return(false);
}
/* keep file pointers on top level files so we don't always have to do
fopen's */
FILE *hlp_fopen(filename)
char *filename;
{
static struct {
char filename[BSIZE_SP];
FILE *fp;
} hlp_ftable[32];
int i;
char buf[BSIZE_SP];
for (i=0; i < hlp_ftablesize; i++) {
if (cieq(filename, hlp_ftable[i].filename)) {
return(hlp_ftable[i].fp);
}
}
/* not already in table */
strcpy(buf, hlp_directory); /* set up pathname */
strcat(buf, DIR_PATHSEP);
strcat(buf, filename);
strcat(buf, ".txt");
hlp_pathfix(buf);
if (!(hlp_ftable[hlp_ftablesize].fp = fopen(buf, "r"))) {
perror(buf);
return (NULL);
}
strcpy(hlp_ftable[hlp_ftablesize].filename, filename);
hlp_ftablesize++;
return(hlp_ftable[hlp_ftablesize - 1].fp);
}
/* ARGSUSED */
void
hlp_pathfix(buf)
char *buf;
{
char *s, *t, *u, bufx[1025];
char *dir_pathsep;
extern char *cp_tildexpand( );
dir_pathsep = DIR_PATHSEP;
if (!buf)
return;
s = cp_tildexpand(buf);
if (sizeof(DIR_PATHSEP) == 2) {
if (*dir_pathsep != '/') {
for (t = s; *t; t++) {
if (*t == '/')
*t = *dir_pathsep;
}
} else
strcpy(buf, s);
} else {
/* For vms; this probably doesn't work, but neither did the old code */
for (s = bufx, t = buf; *t; t++) {
if (*t == '/')
for (u = DIR_PATHSEP; *u; u++) {
*s++ = *u;
}
else
*s++ = *t;
}
*s = 0;
strcpy(buf, s);
}
if (s)
tfree(s);
return;
}
| 20.079545 | 77 | 0.531692 | [
"cad"
] |
fd9d3f74a9a4989bde6d40ba037ebc8a50ac96c8 | 1,724 | h | C | vs/include/alibabacloud/vs/model/DescribeVsPullStreamInfoConfigResult.h | iamzken/aliyun-openapi-cpp-sdk | 3c991c9ca949b6003c8f498ce7a672ea88162bf1 | [
"Apache-2.0"
] | 89 | 2018-02-02T03:54:39.000Z | 2021-12-13T01:32:55.000Z | vs/include/alibabacloud/vs/model/DescribeVsPullStreamInfoConfigResult.h | iamzken/aliyun-openapi-cpp-sdk | 3c991c9ca949b6003c8f498ce7a672ea88162bf1 | [
"Apache-2.0"
] | 89 | 2018-03-14T07:44:54.000Z | 2021-11-26T07:43:25.000Z | vs/include/alibabacloud/vs/model/DescribeVsPullStreamInfoConfigResult.h | aliyun/aliyun-openapi-cpp-sdk | 0cf5861ece17dfb0bb251f13bf3fbdb39c0c6e36 | [
"Apache-2.0"
] | 69 | 2018-01-22T09:45:52.000Z | 2022-03-28T07:58:38.000Z | /*
* Copyright 2009-2017 Alibaba Cloud All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef ALIBABACLOUD_VS_MODEL_DESCRIBEVSPULLSTREAMINFOCONFIGRESULT_H_
#define ALIBABACLOUD_VS_MODEL_DESCRIBEVSPULLSTREAMINFOCONFIGRESULT_H_
#include <string>
#include <vector>
#include <utility>
#include <alibabacloud/core/ServiceResult.h>
#include <alibabacloud/vs/VsExport.h>
namespace AlibabaCloud
{
namespace Vs
{
namespace Model
{
class ALIBABACLOUD_VS_EXPORT DescribeVsPullStreamInfoConfigResult : public ServiceResult
{
public:
struct LiveAppRecord
{
std::string sourceUrl;
std::string streamName;
std::string endTime;
std::string domainName;
std::string startTime;
std::string appName;
};
DescribeVsPullStreamInfoConfigResult();
explicit DescribeVsPullStreamInfoConfigResult(const std::string &payload);
~DescribeVsPullStreamInfoConfigResult();
std::vector<LiveAppRecord> getLiveAppRecordList()const;
protected:
void parse(const std::string &payload);
private:
std::vector<LiveAppRecord> liveAppRecordList_;
};
}
}
}
#endif // !ALIBABACLOUD_VS_MODEL_DESCRIBEVSPULLSTREAMINFOCONFIGRESULT_H_ | 28.733333 | 91 | 0.7529 | [
"vector",
"model"
] |
fd9d41dba7a3afe382f8f45b3d014bf1d2828acf | 746 | h | C | Engine/include/Arclight/Graphics/Rect.h | ArclightEngine/ArclightEngine | f39eb0f22842eb94967982388f73ba942ebfd355 | [
"MIT"
] | 2 | 2021-10-05T03:27:03.000Z | 2021-12-14T02:56:25.000Z | Engine/include/Arclight/Graphics/Rect.h | ArclightEngine/ArclightEngine | f39eb0f22842eb94967982388f73ba942ebfd355 | [
"MIT"
] | 7 | 2021-09-30T01:22:25.000Z | 2022-01-07T01:33:07.000Z | Engine/include/Arclight/Graphics/Rect.h | ArclightEngine/ArclightEngine | f39eb0f22842eb94967982388f73ba942ebfd355 | [
"MIT"
] | null | null | null | #pragma once
#include <Arclight/Vector.h>
namespace Arclight {
template<typename T>
struct Rect {
union{
Vector2<T> origin;
struct {
T left; // x1
T top; // y1
};
};
union {
Vector2<T> end;
struct {
T right; // x2
T bottom; // y2
};
};
Rect() = default;
Rect(const Vector2<T>& origin, const Vector2<T>& end) : origin(origin), end(end) {}
Rect(const Vector2<T>& size)
: origin(0, 0), end(size) {}
inline T Width() { return right - left; }
inline T Height() { return bottom - top; }
// Returns whether a point intersects
inline bool Intersect(Vector2<T> point){
return (point.x >= left && point.x < right && point.y >= top && point.y < bottom);
}
};
using Rectf = Rect<float>;
} // namespace Arclight | 18.65 | 84 | 0.613941 | [
"vector"
] |
fdb404861c9863cf8239afb07acace5d93d46962 | 14,170 | h | C | stage5/02-installqtlibs/qt5.15/include/QtQuickParticles/5.15.1/QtQuickParticles/private/qquickimageparticle_p.h | damir1996iz/tabletos | bfc2f650e3291ce63f083bcf36e81392341fbc40 | [
"BSD-3-Clause"
] | null | null | null | stage5/02-installqtlibs/qt5.15/include/QtQuickParticles/5.15.1/QtQuickParticles/private/qquickimageparticle_p.h | damir1996iz/tabletos | bfc2f650e3291ce63f083bcf36e81392341fbc40 | [
"BSD-3-Clause"
] | null | null | null | stage5/02-installqtlibs/qt5.15/include/QtQuickParticles/5.15.1/QtQuickParticles/private/qquickimageparticle_p.h | damir1996iz/tabletos | bfc2f650e3291ce63f083bcf36e81392341fbc40 | [
"BSD-3-Clause"
] | null | null | null | /****************************************************************************
**
** Copyright (C) 2019 The Qt Company Ltd.
** Contact: https://www.qt.io/licensing/
**
** This file is part of the QtQuick module of the Qt Toolkit.
**
** $QT_BEGIN_LICENSE:LGPL$
** Commercial License Usage
** Licensees holding valid commercial Qt licenses may use this file in
** accordance with the commercial license agreement provided with the
** Software or, alternatively, in accordance with the terms contained in
** a written agreement between you and The Qt Company. For licensing terms
** and conditions see https://www.qt.io/terms-conditions. For further
** information use the contact form at https://www.qt.io/contact-us.
**
** GNU Lesser General Public License Usage
** Alternatively, this file may be used under the terms of the GNU Lesser
** General Public License version 3 as published by the Free Software
** Foundation and appearing in the file LICENSE.LGPL3 included in the
** packaging of this file. Please review the following information to
** ensure the GNU Lesser General Public License version 3 requirements
** will be met: https://www.gnu.org/licenses/lgpl-3.0.html.
**
** GNU General Public License Usage
** Alternatively, this file may be used under the terms of the GNU
** General Public License version 2.0 or (at your option) the GNU General
** Public license version 3 or any later version approved by the KDE Free
** Qt Foundation. The licenses are as published by the Free Software
** Foundation and appearing in the file LICENSE.GPL2 and LICENSE.GPL3
** included in the packaging of this file. Please review the following
** information to ensure the GNU General Public License requirements will
** be met: https://www.gnu.org/licenses/gpl-2.0.html and
** https://www.gnu.org/licenses/gpl-3.0.html.
**
** $QT_END_LICENSE$
**
****************************************************************************/
#ifndef QQUICKIMAGEPARTICLE_P_H
#define QQUICKIMAGEPARTICLE_P_H
//
// W A R N I N G
// -------------
//
// This file is not part of the Qt API. It exists purely as an
// implementation detail. This header file may change from version to
// version without notice, or even be removed.
//
// We mean it.
//
#include "qquickparticlepainter_p.h"
#include "qquickdirection_p.h"
#include <private/qquickpixmapcache_p.h>
#include <QQmlListProperty>
#include <QtGui/qcolor.h>
#include <QtQuick/qsgmaterial.h>
QT_BEGIN_NAMESPACE
class ImageMaterialData;
class QSGGeometryNode;
class QSGMaterial;
class QQuickSprite;
class QQuickStochasticEngine;
class QRhi;
struct SimpleVertex {
float x;
float y;
float t;
float lifeSpan;
float size;
float endSize;
float vx;
float vy;
float ax;
float ay;
};
struct ColoredVertex {
float x;
float y;
float t;
float lifeSpan;
float size;
float endSize;
float vx;
float vy;
float ax;
float ay;
Color4ub color;
};
struct DeformableVertex {
float x;
float y;
float tx;
float ty;
float t;
float lifeSpan;
float size;
float endSize;
float vx;
float vy;
float ax;
float ay;
Color4ub color;
float xx;
float xy;
float yx;
float yy;
float rotation;
float rotationVelocity;
float autoRotate;//Assumed that GPUs prefer floats to bools
};
struct SpriteVertex {
float x;
float y;
float tx;
float ty;
float t;
float lifeSpan;
float size;
float endSize;
float vx;
float vy;
float ax;
float ay;
Color4ub color;
float xx;
float xy;
float yx;
float yy;
float rotation;
float rotationVelocity;
float autoRotate;//Assumed that GPUs prefer floats to bools
float animW;
float animH;
float animProgress;
float animX1;
float animY1;
float animX2;
float animY2;
};
template <typename Vertex>
struct Vertices {
Vertex v1;
Vertex v2;
Vertex v3;
Vertex v4;
};
class ImageMaterial : public QSGMaterial
{
public:
virtual ImageMaterialData *state() = 0;
};
class QQuickImageParticle : public QQuickParticlePainter
{
Q_OBJECT
Q_PROPERTY(QUrl source READ image WRITE setImage NOTIFY imageChanged)
Q_PROPERTY(QQmlListProperty<QQuickSprite> sprites READ sprites)
Q_PROPERTY(Status status READ status NOTIFY statusChanged)
//### Is it worth having progress like Image has?
//Q_PROPERTY(qreal progress READ progress NOTIFY progressChanged)
Q_PROPERTY(QUrl colorTable READ colortable WRITE setColortable NOTIFY colortableChanged)
Q_PROPERTY(QUrl sizeTable READ sizetable WRITE setSizetable NOTIFY sizetableChanged)
Q_PROPERTY(QUrl opacityTable READ opacitytable WRITE setOpacitytable NOTIFY opacitytableChanged)
//###Now just colorize - add a flag for 'solid' color particles(where the img is just a mask?)?
Q_PROPERTY(QColor color READ color WRITE setColor NOTIFY colorChanged RESET resetColor)
//Stacks (added) with individual colorVariations
Q_PROPERTY(qreal colorVariation READ colorVariation WRITE setColorVariation NOTIFY colorVariationChanged RESET resetColor)
Q_PROPERTY(qreal redVariation READ redVariation WRITE setRedVariation NOTIFY redVariationChanged RESET resetColor)
Q_PROPERTY(qreal greenVariation READ greenVariation WRITE setGreenVariation NOTIFY greenVariationChanged RESET resetColor)
Q_PROPERTY(qreal blueVariation READ blueVariation WRITE setBlueVariation NOTIFY blueVariationChanged RESET resetColor)
//Stacks (multiplies) with the Alpha in the color, mostly here so you can use svg color names (which have full alpha)
Q_PROPERTY(qreal alpha READ alpha WRITE setAlpha NOTIFY alphaChanged RESET resetColor)
Q_PROPERTY(qreal alphaVariation READ alphaVariation WRITE setAlphaVariation NOTIFY alphaVariationChanged RESET resetColor)
Q_PROPERTY(qreal rotation READ rotation WRITE setRotation NOTIFY rotationChanged RESET resetRotation)
Q_PROPERTY(qreal rotationVariation READ rotationVariation WRITE setRotationVariation NOTIFY rotationVariationChanged RESET resetRotation)
Q_PROPERTY(qreal rotationVelocity READ rotationVelocity WRITE setRotationVelocity NOTIFY rotationVelocityChanged RESET resetRotation)
Q_PROPERTY(qreal rotationVelocityVariation READ rotationVelocityVariation WRITE setRotationVelocityVariation NOTIFY rotationVelocityVariationChanged RESET resetRotation)
//If true, then will face the direction of motion. Stacks with rotation, e.g. setting rotation
//to 180 will lead to facing away from the direction of motion
Q_PROPERTY(bool autoRotation READ autoRotation WRITE setAutoRotation NOTIFY autoRotationChanged RESET resetRotation)
//xVector is the vector from the top-left point to the top-right point, and is multiplied by current size
Q_PROPERTY(QQuickDirection* xVector READ xVector WRITE setXVector NOTIFY xVectorChanged RESET resetDeformation)
//yVector is the same, but top-left to bottom-left. The particle is always a parallelogram.
Q_PROPERTY(QQuickDirection* yVector READ yVector WRITE setYVector NOTIFY yVectorChanged RESET resetDeformation)
Q_PROPERTY(bool spritesInterpolate READ spritesInterpolate WRITE setSpritesInterpolate NOTIFY spritesInterpolateChanged)
Q_PROPERTY(EntryEffect entryEffect READ entryEffect WRITE setEntryEffect NOTIFY entryEffectChanged)
QML_NAMED_ELEMENT(ImageParticle)
public:
explicit QQuickImageParticle(QQuickItem *parent = 0);
virtual ~QQuickImageParticle();
enum Status { Null, Ready, Loading, Error };
Q_ENUM(Status)
QQmlListProperty<QQuickSprite> sprites();
QQuickStochasticEngine* spriteEngine() {return m_spriteEngine;}
enum EntryEffect {
None = 0,
Fade = 1,
Scale = 2
};
Q_ENUM(EntryEffect)
enum PerformanceLevel{//TODO: Expose?
Unknown = 0,
Simple,
Colored,
Deformable,
Tabled,
Sprites
};
QUrl image() const { return m_image ? m_image->source : QUrl(); }
void setImage(const QUrl &image);
QUrl colortable() const { return m_colorTable ? m_colorTable->source : QUrl(); }
void setColortable(const QUrl &table);
QUrl sizetable() const { return m_sizeTable ? m_sizeTable->source : QUrl(); }
void setSizetable (const QUrl &table);
QUrl opacitytable() const { return m_opacityTable ? m_opacityTable->source : QUrl(); }
void setOpacitytable(const QUrl &table);
QColor color() const { return m_color; }
void setColor(const QColor &color);
qreal colorVariation() const { return m_color_variation; }
void setColorVariation(qreal var);
qreal alphaVariation() const { return m_alphaVariation; }
qreal alpha() const { return m_alpha; }
qreal redVariation() const { return m_redVariation; }
qreal greenVariation() const { return m_greenVariation; }
qreal blueVariation() const { return m_blueVariation; }
qreal rotation() const { return m_rotation; }
qreal rotationVariation() const { return m_rotationVariation; }
qreal rotationVelocity() const { return m_rotationVelocity; }
qreal rotationVelocityVariation() const { return m_rotationVelocityVariation; }
bool autoRotation() const { return m_autoRotation; }
QQuickDirection* xVector() const { return m_xVector; }
QQuickDirection* yVector() const { return m_yVector; }
bool spritesInterpolate() const { return m_spritesInterpolate; }
bool bypassOptimizations() const { return m_bypassOptimizations; }
EntryEffect entryEffect() const { return m_entryEffect; }
Status status() const { return m_status; }
void resetColor();
void resetRotation();
void resetDeformation();
Q_SIGNALS:
void imageChanged();
void colortableChanged();
void sizetableChanged();
void opacitytableChanged();
void colorChanged();
void colorVariationChanged();
void alphaVariationChanged(qreal arg);
void alphaChanged(qreal arg);
void redVariationChanged(qreal arg);
void greenVariationChanged(qreal arg);
void blueVariationChanged(qreal arg);
void rotationChanged(qreal arg);
void rotationVariationChanged(qreal arg);
void rotationVelocityChanged(qreal arg);
void rotationVelocityVariationChanged(qreal arg);
void autoRotationChanged(bool arg);
void xVectorChanged(QQuickDirection* arg);
void yVectorChanged(QQuickDirection* arg);
void spritesInterpolateChanged(bool arg);
void bypassOptimizationsChanged(bool arg);
void entryEffectChanged(EntryEffect arg);
void statusChanged(Status arg);
public Q_SLOTS:
void reloadColor(const Color4ub &c, QQuickParticleData* d);
void setAlphaVariation(qreal arg);
void setAlpha(qreal arg);
void setRedVariation(qreal arg);
void setGreenVariation(qreal arg);
void setBlueVariation(qreal arg);
void setRotation(qreal arg);
void setRotationVariation(qreal arg);
void setRotationVelocity(qreal arg);
void setRotationVelocityVariation(qreal arg);
void setAutoRotation(bool arg);
void setXVector(QQuickDirection* arg);
void setYVector(QQuickDirection* arg);
void setSpritesInterpolate(bool arg);
void setBypassOptimizations(bool arg);
void setEntryEffect(EntryEffect arg);
protected:
void reset() override;
void initialize(int gIdx, int pIdx) override;
void commit(int gIdx, int pIdx) override;
QSGNode *updatePaintNode(QSGNode *, UpdatePaintNodeData *) override;
void prepareNextFrame(QSGNode**);
void buildParticleNodes(QSGNode**);
void sceneGraphInvalidated() override;
private Q_SLOTS:
void createEngine(); //### method invoked by sprite list changing (in engine.h) - pretty nasty
void spriteAdvance(int spriteIndex);
void spritesUpdate(qreal time = 0 );
void mainThreadFetchImageData();
void finishBuildParticleNodes(QSGNode **n);
private:
struct ImageData {
QUrl source;
QQuickPixmap pix;
};
QScopedPointer<ImageData> m_image;
QScopedPointer<ImageData> m_colorTable;
QScopedPointer<ImageData> m_sizeTable;
QScopedPointer<ImageData> m_opacityTable;
bool loadingSomething();
QColor m_color;
qreal m_color_variation;
QSGNode *m_outgoingNode;
QHash<int, QSGGeometryNode *> m_nodes;
QHash<int, int> m_idxStarts;//TODO: Proper resizing will lead to needing a spriteEngine per particle - do this after sprite engine gains transparent sharing?
QList<QPair<int, int> > m_startsIdx;//Same data, optimized for alternate retrieval
int m_lastIdxStart;
QSGMaterial *m_material;
// derived values...
qreal m_alphaVariation;
qreal m_alpha;
qreal m_redVariation;
qreal m_greenVariation;
qreal m_blueVariation;
qreal m_rotation;
qreal m_rotationVariation;
qreal m_rotationVelocity;
qreal m_rotationVelocityVariation;
bool m_autoRotation;
QQuickDirection* m_xVector;
QQuickDirection* m_yVector;
QList<QQuickSprite*> m_sprites;
QQuickSpriteEngine* m_spriteEngine;
bool m_spritesInterpolate;
bool m_explicitColor;
bool m_explicitRotation;
bool m_explicitDeformation;
bool m_explicitAnimation;
QHash<int, QVector<QQuickParticleData*> > m_shadowData;
void clearShadows();
QQuickParticleData* getShadowDatum(QQuickParticleData* datum);
bool m_bypassOptimizations;
PerformanceLevel perfLevel;
PerformanceLevel m_lastLevel;
bool m_debugMode;
template<class Vertex>
void initTexCoords(Vertex* v, int count){
Vertex* end = v + count;
while (v < end){
v[0].tx = 0;
v[0].ty = 0;
v[1].tx = 1;
v[1].ty = 0;
v[2].tx = 0;
v[2].ty = 1;
v[3].tx = 1;
v[3].ty = 1;
v += 4;
}
}
ImageMaterialData *getState(QSGMaterial *m) {
return static_cast<ImageMaterial *>(m)->state();
}
EntryEffect m_entryEffect;
Status m_status;
int m_startedImageLoading;
QRhi *m_rhi;
bool m_apiChecked;
};
QT_END_NAMESPACE
#endif // QQUICKIMAGEPARTICLE_P_H
| 30.277778 | 173 | 0.718137 | [
"vector",
"solid"
] |
fdb59ba71aae153b09a64e30de24e2960e66d382 | 9,126 | c | C | lib/ccv_memory.c | xiaoye77/ccv | 655cb2c4a95694a69b81eab5ccb823dbcaa805e4 | [
"CC0-1.0",
"CC-BY-4.0"
] | null | null | null | lib/ccv_memory.c | xiaoye77/ccv | 655cb2c4a95694a69b81eab5ccb823dbcaa805e4 | [
"CC0-1.0",
"CC-BY-4.0"
] | null | null | null | lib/ccv_memory.c | xiaoye77/ccv | 655cb2c4a95694a69b81eab5ccb823dbcaa805e4 | [
"CC0-1.0",
"CC-BY-4.0"
] | 1 | 2019-05-18T15:50:49.000Z | 2019-05-18T15:50:49.000Z | #include "ccv.h"
#include "ccv_internal.h"
#include "3rdparty/siphash/siphash24.h"
static __thread ccv_cache_t ccv_cache;
/**
* For new typed cache object:
* ccv_dense_matrix_t: type 0
* ccv_array_t: type 1
**/
/* option to enable/disable cache */
static __thread int ccv_cache_opt = 0;
ccv_dense_matrix_t* ccv_dense_matrix_new(int rows, int cols, int type, void* data, uint64_t sig)
{
ccv_dense_matrix_t* mat;
if (ccv_cache_opt && sig != 0 && !data && !(type & CCV_NO_DATA_ALLOC))
{
uint8_t type;
mat = (ccv_dense_matrix_t*)ccv_cache_out(&ccv_cache, sig, &type);
if (mat)
{
assert(type == 0);
mat->type |= CCV_GARBAGE; // set the flag so the upper level function knows this is from recycle-bin
mat->refcount = 1;
return mat;
}
}
if (type & CCV_NO_DATA_ALLOC)
{
mat = (ccv_dense_matrix_t*)ccmalloc(sizeof(ccv_dense_matrix_t));
mat->type = (CCV_GET_CHANNEL(type) | CCV_GET_DATA_TYPE(type) | CCV_MATRIX_DENSE | CCV_NO_DATA_ALLOC) & ~CCV_GARBAGE;
mat->data.u8 = data;
} else {
const size_t hdr_size = (sizeof(ccv_dense_matrix_t) + 15) & -16;
mat = (ccv_dense_matrix_t*)(data ? data : ccmalloc(ccv_compute_dense_matrix_size(rows, cols, type)));
mat->type = (CCV_GET_CHANNEL(type) | CCV_GET_DATA_TYPE(type) | CCV_MATRIX_DENSE) & ~CCV_GARBAGE;
mat->type |= data ? CCV_UNMANAGED : CCV_REUSABLE; // it still could be reusable because the signature could be derived one.
mat->data.u8 = (unsigned char*)mat + hdr_size;
}
mat->sig = sig;
#if CCV_NNC_TENSOR_TFB
mat->reserved0 = 0;
mat->resides = CCV_TENSOR_CPU_MEMORY;
mat->format = CCV_TENSOR_FORMAT_NHWC;
mat->datatype = CCV_GET_DATA_TYPE(type);
mat->channels = CCV_GET_CHANNEL(type);
mat->reserved1 = 0;
#endif
mat->rows = rows;
mat->cols = cols;
mat->step = CCV_GET_STEP(cols, type);
mat->refcount = 1;
return mat;
}
ccv_dense_matrix_t* ccv_dense_matrix_renew(ccv_dense_matrix_t* x, int rows, int cols, int types, int prefer_type, uint64_t sig)
{
if (x != 0)
{
assert(x->rows == rows && x->cols == cols && (CCV_GET_DATA_TYPE(x->type) & types) && (CCV_GET_CHANNEL(x->type) == CCV_GET_CHANNEL(types)));
prefer_type = CCV_GET_DATA_TYPE(x->type) | CCV_GET_CHANNEL(x->type);
}
if (sig != 0)
sig = ccv_cache_generate_signature((const char*)&prefer_type, sizeof(int), sig, CCV_EOF_SIGN);
if (x == 0)
{
x = ccv_dense_matrix_new(rows, cols, prefer_type, 0, sig);
} else {
x->sig = sig;
}
return x;
}
void ccv_make_matrix_mutable(ccv_matrix_t* mat)
{
int type = *(int*)mat;
if (type & CCV_MATRIX_DENSE)
{
ccv_dense_matrix_t* dmt = (ccv_dense_matrix_t*)mat;
dmt->sig = 0;
dmt->type &= ~CCV_REUSABLE;
}
}
void ccv_make_matrix_immutable(ccv_matrix_t* mat)
{
int type = *(int*)mat;
if (type & CCV_MATRIX_DENSE)
{
ccv_dense_matrix_t* dmt = (ccv_dense_matrix_t*)mat;
assert(dmt->sig == 0); // you cannot make matrix with derived signature immutable (it is immutable already)
/* immutable matrix made this way is not reusable (collected), because its signature
* only depends on the content, not the operation to generate it */
dmt->type &= ~CCV_REUSABLE;
dmt->sig = ccv_cache_generate_signature((char*)dmt->data.u8, dmt->rows * dmt->step, (uint64_t)dmt->type, CCV_EOF_SIGN);
}
}
ccv_dense_matrix_t ccv_dense_matrix(int rows, int cols, int type, void* data, uint64_t sig)
{
ccv_dense_matrix_t mat;
mat.reserved0 = 0;
mat.sig = sig;
mat.type = (CCV_GET_CHANNEL(type) | CCV_GET_DATA_TYPE(type) | CCV_MATRIX_DENSE | CCV_NO_DATA_ALLOC | CCV_UNMANAGED) & ~CCV_GARBAGE;
mat.rows = rows;
mat.cols = cols;
mat.step = CCV_GET_STEP(cols, type);
mat.refcount = 1;
#if CCV_NNC_TENSOR_TFB
mat.reserved0 = 0;
mat.resides = CCV_TENSOR_CPU_MEMORY;
mat.format = CCV_TENSOR_FORMAT_NHWC | CCV_GET_DATA_TYPE(type);
mat.channels = CCV_GET_CHANNEL(type);
mat.reserved1 = 0;
#endif
mat.data.u8 = (unsigned char*)data;
return mat;
}
ccv_sparse_matrix_t* ccv_sparse_matrix_new(int rows, int cols, int type, int major, uint64_t sig)
{
ccv_sparse_matrix_t* mat;
mat = (ccv_sparse_matrix_t*)ccmalloc(sizeof(ccv_sparse_matrix_t));
mat->rows = rows;
mat->cols = cols;
mat->type = type | CCV_MATRIX_SPARSE | ((type & CCV_DENSE_VECTOR) ? CCV_DENSE_VECTOR : CCV_SPARSE_VECTOR);
mat->major = major;
mat->prime_index = 1; // See ccv_util.c to know why this is 1 and why size is 2.
mat->size = 2;
mat->rnum = 0;
mat->refcount = 1;
mat->index = (ccv_sparse_matrix_index_t*)cccalloc(sizeof(ccv_sparse_matrix_index_t), mat->size);
mat->vector = (ccv_sparse_matrix_vector_t*)ccmalloc(sizeof(ccv_sparse_matrix_vector_t) * mat->size);
return mat;
}
void ccv_matrix_free_immediately(ccv_matrix_t* mat)
{
int type = *(int*)mat;
assert(!(type & CCV_UNMANAGED));
if (type & CCV_MATRIX_DENSE)
{
ccv_dense_matrix_t* dmt = (ccv_dense_matrix_t*)mat;
dmt->refcount = 0;
ccfree(dmt);
} else if (type & CCV_MATRIX_SPARSE) {
ccv_sparse_matrix_t* smt = (ccv_sparse_matrix_t*)mat;
int i;
for (i = 0; i < smt->size; i++)
if (smt->index[i].ifbit)
ccfree(smt->vector[i].data.u8);
ccfree(smt->vector);
ccfree(smt);
} else if ((type & CCV_MATRIX_CSR) || (type & CCV_MATRIX_CSC)) {
ccv_compressed_sparse_matrix_t* csm = (ccv_compressed_sparse_matrix_t*)mat;
csm->refcount = 0;
ccfree(csm);
}
}
void ccv_matrix_free(ccv_matrix_t* mat)
{
int type = *(int*)mat;
assert(!(type & CCV_UNMANAGED));
if (type & CCV_MATRIX_DENSE)
{
ccv_dense_matrix_t* dmt = (ccv_dense_matrix_t*)mat;
dmt->refcount = 0;
if (!ccv_cache_opt || // e don't enable cache
!(dmt->type & CCV_REUSABLE) || // or this is not a reusable piece
dmt->sig == 0 || // or this doesn't have valid signature
(dmt->type & CCV_NO_DATA_ALLOC)) // or this matrix is allocated as header-only, therefore we cannot cache it
ccfree(dmt);
else {
assert(CCV_GET_DATA_TYPE(dmt->type) == CCV_8U ||
CCV_GET_DATA_TYPE(dmt->type) == CCV_32S ||
CCV_GET_DATA_TYPE(dmt->type) == CCV_32F ||
CCV_GET_DATA_TYPE(dmt->type) == CCV_64S ||
CCV_GET_DATA_TYPE(dmt->type) == CCV_64F);
size_t size = ccv_compute_dense_matrix_size(dmt->rows, dmt->cols, dmt->type);
ccv_cache_put(&ccv_cache, dmt->sig, dmt, size, 0 /* type 0 */);
}
} else if (type & CCV_MATRIX_SPARSE) {
ccv_sparse_matrix_t* smt = (ccv_sparse_matrix_t*)mat;
int i;
for (i = 0; i < smt->size; i++)
{
if (smt->index[i].ifbit > 1)
ccfree(smt->vector[i].index); // It is a union of index / data, can just free them.
}
ccfree(smt->index);
ccfree(smt->vector);
ccfree(smt);
} else if ((type & CCV_MATRIX_CSR) || (type & CCV_MATRIX_CSC)) {
ccv_compressed_sparse_matrix_t* csm = (ccv_compressed_sparse_matrix_t*)mat;
csm->refcount = 0;
ccfree(csm);
}
}
ccv_array_t* ccv_array_new(int rsize, int rnum, uint64_t sig)
{
ccv_array_t* array;
if (ccv_cache_opt && sig != 0)
{
uint8_t type;
array = (ccv_array_t*)ccv_cache_out(&ccv_cache, sig, &type);
if (array)
{
assert(type == 1);
array->type |= CCV_GARBAGE;
array->refcount = 1;
return array;
}
}
array = (ccv_array_t*)ccmalloc(sizeof(ccv_array_t));
array->sig = sig;
array->type = CCV_REUSABLE & ~CCV_GARBAGE;
array->rnum = 0;
array->rsize = rsize;
array->size = ccv_max(rnum, 2 /* allocate memory for at least 2 items */);
array->data = ccmalloc((size_t)array->size * (size_t)rsize);
return array;
}
void ccv_make_array_mutable(ccv_array_t* array)
{
array->sig = 0;
array->type &= ~CCV_REUSABLE;
}
void ccv_make_array_immutable(ccv_array_t* array)
{
assert(array->sig == 0);
array->type &= ~CCV_REUSABLE;
/* TODO: trim the array */
array->sig = ccv_cache_generate_signature(array->data, array->size * array->rsize, (uint64_t)array->rsize, CCV_EOF_SIGN);
}
void ccv_array_free_immediately(ccv_array_t* array)
{
array->refcount = 0;
ccfree(array->data);
ccfree(array);
}
void ccv_array_free(ccv_array_t* array)
{
if (!ccv_cache_opt || !(array->type & CCV_REUSABLE) || array->sig == 0)
{
array->refcount = 0;
ccfree(array->data);
ccfree(array);
} else {
size_t size = sizeof(ccv_array_t) + array->size * array->rsize;
ccv_cache_put(&ccv_cache, array->sig, array, size, 1 /* type 1 */);
}
}
void ccv_drain_cache(void)
{
if (ccv_cache.rnum > 0)
ccv_cache_cleanup(&ccv_cache);
}
void ccv_disable_cache(void)
{
ccv_cache_opt = 0;
ccv_cache_close(&ccv_cache);
}
void ccv_enable_cache(size_t size)
{
ccv_cache_opt = 1;
ccv_cache_init(&ccv_cache, size, 2, ccv_matrix_free_immediately, ccv_array_free_immediately);
}
void ccv_enable_default_cache(void)
{
ccv_enable_cache(CCV_DEFAULT_CACHE_SIZE);
}
static uint8_t key_siphash[16] = "libccvky4siphash";
uint64_t ccv_cache_generate_signature(const char* msg, int len, uint64_t sig_start, ...)
{
uint64_t sig_out, sig_in[2]; // 1 is in, 0 is out
siphash((uint8_t*)&sig_out, (const uint8_t*)msg, len, key_siphash);
va_list arguments;
va_start(arguments, sig_start);
sig_in[0] = sig_out;
sig_in[1] = sig_start;
while (sig_in[1] != 0)
{
siphash((uint8_t*)&sig_out, (const uint8_t*)sig_in, sizeof(uint64_t) * 2, key_siphash);
sig_in[0] = sig_out;
sig_in[1] = va_arg(arguments, uint64_t);
}
va_end(arguments);
return sig_out;
}
| 29.921311 | 141 | 0.699869 | [
"object",
"vector"
] |
fdb73639e80934433b42d014979b8607384b457c | 3,156 | h | C | Half Robot/World.h | marcus1337/HalfRobot | 1f1b66b3e8a999a103b027f72d94492f465f96be | [
"MIT"
] | null | null | null | Half Robot/World.h | marcus1337/HalfRobot | 1f1b66b3e8a999a103b027f72d94492f465f96be | [
"MIT"
] | null | null | null | Half Robot/World.h | marcus1337/HalfRobot | 1f1b66b3e8a999a103b027f72d94492f465f96be | [
"MIT"
] | null | null | null | #pragma once
#include <fstream>
#include <string>
#include <sstream>
#include <ctype.h>
#include <iostream>
#include "Window.h"
#include <vector>
#include "Kartbit.h"
#include <set>
#include <algorithm>
#include "WorldMap.h"
#include <Windows.h>
class World {
public:
void loadMapNum(int num, Kartbit& kartbit) {
using namespace std;
char* kartpiece = kartbit.getKarta();
ifstream infile(w_path + to_string(num) + ".plan");
string line;
int counter = 0;
while (getline(infile, line))
{
stringstream ss(line);
int a;
while (ss >> a)
{
kartpiece[counter] = (char)a;
if (ss.peek() == ',')
ss.ignore();
counter++;
}
}
infile.close();
}
void readInfo() {
using namespace std;
ifstream infile(w_path + "specs" + ".plan");
string line;
getline(infile, line);
stringstream ss(line);
int width; int height;
ss >> width;
ss >> height;
worldMap.initArray(width, height);
while (getline(infile, line))
{
stringstream ss(line);
int name;
int posX; int posY;
ss >> name;
ss >> posX;
ss >> posY;
// cout << "NAME: " << name << " X: " << posX << " Y: " << posY << endl;
worldMap.placePiece(posX, posY, kartan[name-1].getKarta());
}
infile.close();
}
/*static int gcd(int a, int b) {
return (b == 0) ? a : gcd(b, a%b);
}*/
void initDimensions(Window& window) {
int w = window.getWidth();
int h = window.getHeight();
if ((float)h / w >= 0.75f) {
int x = w;
int y = (3.0f / 4.0f)*w;
w = x;
h = y;
}
else {
int y = h;
int x = (4.0f / 3.0f)*h;
w = x;
h = y;
}
kartHeight = h;
kartWidth = w;
midX = (window.getWidth() - w) / 2;
}
void init(Window& window, int w_id) {
using namespace std;
w_path = pathfull() + to_string(w_id) + "\\";
int w = window.getWidth();
int h = window.getHeight();
if ((float)h / w >= 0.75f) {
int x = w;
int y = (3.0f / 4.0f)*w;
w = x;
h = y;
}
else {
int y = h;
int x = (4.0f / 3.0f)*h;
w = x;
h = y;
}
kartHeight = h;
kartWidth = w;
midX = (window.getWidth() - w) / 2;
SDL_Renderer* renderer = window.getRenderer();
for (int i = 0; i < 10; i++) {
Kartbit piece;
piece.setName(i);
loadMapNum(i + 1, ref(piece));
kartan.push_back(piece);
}
readInfo();
worldMap.initWorld(renderer);
}
static int getKartW();
static int getKartH();
static int getKartMidX();
std::vector<Kartbit> getWorldPieces() {
return kartan;
}
void clearWorld() {
kartan.clear();
}
WorldMap& getWorld() {
return std::ref(worldMap);
}
private:
static int midX;
static int kartHeight;
static int kartWidth;
std::vector<Kartbit> kartan;
WorldMap worldMap;
std::string w_path;
static std::string pathfull() {
using namespace std;
char buffer[MAX_PATH];
GetModuleFileName(NULL, buffer, MAX_PATH);
string::size_type pos = string(buffer).find_last_of("\\/");
string test = string(buffer).substr(0, pos);
test += "\\bin\\save\\world";
return test;
}
};
/*for (int b = 0; b < 15; b++) {
for (int j = 0; j < 20; j++) {
cout << to_string(karta[j + b * 20]) << ",";
}
cout << endl;
}*/ | 16.268041 | 74 | 0.579214 | [
"vector"
] |
fdb8425f7e6da2809043bcb3e9b3777e5c801458 | 5,445 | h | C | DataMgr/ForeignStorage/ParquetArrayEncoder.h | yma11/omniscidb | 2f975266f5c9f4daeadffd154735dca30c7b633b | [
"Apache-2.0"
] | null | null | null | DataMgr/ForeignStorage/ParquetArrayEncoder.h | yma11/omniscidb | 2f975266f5c9f4daeadffd154735dca30c7b633b | [
"Apache-2.0"
] | null | null | null | DataMgr/ForeignStorage/ParquetArrayEncoder.h | yma11/omniscidb | 2f975266f5c9f4daeadffd154735dca30c7b633b | [
"Apache-2.0"
] | 1 | 2021-10-07T11:30:01.000Z | 2021-10-07T11:30:01.000Z | /*
* Copyright 2020 OmniSci, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <parquet/types.h>
#include "ParquetEncoder.h"
namespace foreign_storage {
class ParquetArrayEncoder : public ParquetEncoder {
public:
ParquetArrayEncoder(Data_Namespace::AbstractBuffer* data_buffer,
std::shared_ptr<ParquetScalarEncoder> scalar_encoder,
const ColumnDescriptor* column_desciptor)
: ParquetEncoder(data_buffer)
, omnisci_data_type_byte_size_(
column_desciptor->columnType.get_elem_type().get_size())
, scalar_encoder_(scalar_encoder)
, has_assembly_started_(false)
, is_null_array_(false)
, is_empty_array_(false)
, num_elements_in_array_(0) {}
void appendData(const int16_t* def_levels,
const int16_t* rep_levels,
const int64_t values_read,
const int64_t levels_read,
int8_t* values) override {
CHECK(levels_read > 0);
// encode all values in the temporary in-memory `encode_buffer_`, doing
// this encoding as a batch rather than element-wise exposes opportunities
// for performance optimization for certain scalar types
encodeAllValues(values, values_read);
for (int64_t i = 0, j = 0; i < levels_read; ++i) {
if (isNewArray(rep_levels[i])) {
processLastArray();
resetLastArrayMetadata();
}
processArrayItem(def_levels[i], j);
}
}
void finalizeRowGroup() {
processLastArray();
resetLastArrayMetadata();
appendArraysToBuffer();
has_assembly_started_ = false;
}
std::shared_ptr<ChunkMetadata> getRowGroupMetadata(
const parquet::RowGroupMetaData* group_metadata,
const int parquet_column_index,
const SQLTypeInfo& column_type) override {
auto metadata = scalar_encoder_->getRowGroupMetadata(
group_metadata, parquet_column_index, column_type);
metadata->numBytes = 0; // number of bytes is not known
return metadata;
}
protected:
virtual void processLastArray() = 0;
virtual void appendArraysToBuffer() {
buffer_->append(data_buffer_bytes_.data(), data_buffer_bytes_.size());
data_buffer_bytes_.clear();
}
bool isLastArrayNull() const { return is_null_array_; }
bool isLastArrayEmpty() const { return is_empty_array_; }
size_t sizeOfLastArray() const { return num_elements_in_array_; }
int8_t* resizeArrayDataBytes(const size_t additional_num_elements) {
auto current_data_byte_size = data_buffer_bytes_.size();
data_buffer_bytes_.resize(current_data_byte_size +
additional_num_elements * omnisci_data_type_byte_size_);
return data_buffer_bytes_.data() + current_data_byte_size;
}
size_t omnisci_data_type_byte_size_;
std::shared_ptr<ParquetScalarEncoder> scalar_encoder_;
std::vector<int8_t> data_buffer_bytes_;
// constants used during Dremel encoding assembly
const static int16_t non_null_def_level = 3;
const static int16_t item_null_def_level = 2;
const static int16_t empty_list_def_level = 1;
const static int16_t list_null_def_level = 0;
private:
void resetLastArrayMetadata() {
is_empty_array_ = false;
is_null_array_ = false;
num_elements_in_array_ = 0;
}
bool isNewArray(const int16_t rep_level) const {
return rep_level == 0 && has_assembly_started_;
}
void processArrayItem(const int16_t def_level, int64_t& encoded_index) {
has_assembly_started_ = true;
if (def_level == non_null_def_level) {
// push back a scalar element to in-memory data buffer
appendArrayItem(encoded_index++);
} else if (def_level == item_null_def_level) {
// push back a scalar null to in-memory data buffer
appendNullArrayItem();
} else if (def_level == list_null_def_level) {
markArrayAsNull();
} else if (def_level == empty_list_def_level) {
markArrayAsEmpty();
} else {
UNREACHABLE();
}
}
void encodeAllValues(const int8_t* values, const int64_t values_read) {
encode_buffer_.resize(values_read * omnisci_data_type_byte_size_);
scalar_encoder_->encodeAndCopyContiguous(values, encode_buffer_.data(), values_read);
}
void markArrayAsNull() { is_null_array_ = true; }
void markArrayAsEmpty() { is_empty_array_ = true; }
void appendArrayItem(const int64_t encoded_index) {
auto omnisci_data_ptr = resizeArrayDataBytes(1);
scalar_encoder_->copy(
encode_buffer_.data() + (encoded_index)*omnisci_data_type_byte_size_,
omnisci_data_ptr);
num_elements_in_array_++;
}
void appendNullArrayItem() {
scalar_encoder_->setNull(resizeArrayDataBytes(1));
num_elements_in_array_++;
}
std::vector<int8_t> encode_buffer_;
bool has_assembly_started_;
bool is_null_array_;
bool is_empty_array_;
size_t num_elements_in_array_;
};
} // namespace foreign_storage
| 33.404908 | 89 | 0.715152 | [
"vector"
] |
fdba83b77993413c0be401ef08b8860a7b302c3a | 5,878 | h | C | src/mca/odls/base/odls_private.h | naughtont3/prrte | e1d55e4e12db8bc14b8b2e680a88a2a2e75df789 | [
"BSD-3-Clause-Open-MPI"
] | null | null | null | src/mca/odls/base/odls_private.h | naughtont3/prrte | e1d55e4e12db8bc14b8b2e680a88a2a2e75df789 | [
"BSD-3-Clause-Open-MPI"
] | null | null | null | src/mca/odls/base/odls_private.h | naughtont3/prrte | e1d55e4e12db8bc14b8b2e680a88a2a2e75df789 | [
"BSD-3-Clause-Open-MPI"
] | null | null | null | /*
* Copyright (c) 2004-2007 The Trustees of Indiana University and Indiana
* University Research and Technology
* Corporation. All rights reserved.
* Copyright (c) 2004-2006 The University of Tennessee and The University
* of Tennessee Research Foundation. All rights
* reserved.
* Copyright (c) 2004-2005 High Performance Computing Center Stuttgart,
* University of Stuttgart. All rights reserved.
* Copyright (c) 2004-2005 The Regents of the University of California.
* All rights reserved.
* Copyright (c) 2011-2020 Cisco Systems, Inc. All rights reserved
* Copyright (c) 2011 Los Alamos National Security, LLC. All rights
* reserved.
* Copyright (c) 2016-2019 Intel, Inc. All rights reserved.
* Copyright (c) 2017 IBM Corporation. All rights reserved.
* Copyright (c) 2017-2019 Research Organization for Information Science
* and Technology (RIST). All rights reserved.
* Copyright (c) 2021 Nanook Consulting. All rights reserved.
* $COPYRIGHT$
*
* Additional copyrights may follow
*
* $HEADER$
*/
/** @file:
*/
#ifndef MCA_ODLS_PRIVATE_H
#define MCA_ODLS_PRIVATE_H
/*
* includes
*/
#include "prte_config.h"
#include "types.h"
#include "src/class/prte_list.h"
#include "src/class/prte_pointer_array.h"
#include "src/class/prte_bitmap.h"
#include "src/mca/iof/base/iof_base_setup.h"
#include "src/mca/rml/rml_types.h"
#include "src/pmix/pmix-internal.h"
#include "src/runtime/prte_globals.h"
#include "src/threads/threads.h"
#include "src/mca/odls/odls_types.h"
BEGIN_C_DECLS
/*
* General ODLS types
*/
typedef struct {
/** Verbose/debug output stream */
int output;
/** Time to allow process to forcibly die */
int timeout_before_sigkill;
/* list of ranks to be displayed on separate xterms */
prte_list_t xterm_ranks;
/* the xterm cmd to be used */
char **xtermcmd;
/* thread pool */
int max_threads;
int num_threads;
int cutoff;
prte_event_base_t **ev_bases; // event base array for progress threads
char** ev_threads; // event progress thread names
int next_base; // counter to load-level thread use
bool signal_direct_children_only;
prte_lock_t lock;
} prte_odls_globals_t;
PRTE_EXPORT extern prte_odls_globals_t prte_odls_globals;
/*
* Default functions that are common to most environments - can
* be overridden by specific environments if they need something
* different (e.g., bproc)
*/
PRTE_EXPORT int
prte_odls_base_default_get_add_procs_data(pmix_data_buffer_t *data,
pmix_nspace_t job);
PRTE_EXPORT int
prte_odls_base_default_construct_child_list(pmix_data_buffer_t *data,
pmix_nspace_t *job);
PRTE_EXPORT void prte_odls_base_spawn_proc(int fd, short sd, void *cbdata);
/* define a function that will fork a local proc */
typedef int (*prte_odls_base_fork_local_proc_fn_t)(void *cd);
/* define an object for fork/exec the local proc */
typedef struct {
prte_object_t super;
prte_event_t ev;
char *cmd;
char *wdir;
char **argv;
char **env;
prte_job_t *jdata;
prte_app_context_t *app;
prte_proc_t *child;
bool index_argv;
prte_iof_base_io_conf_t opts;
prte_odls_base_fork_local_proc_fn_t fork_local;
} prte_odls_spawn_caddy_t;
PRTE_CLASS_DECLARATION(prte_odls_spawn_caddy_t);
/* define an object for starting local launch */
typedef struct {
prte_object_t object;
prte_event_t *ev;
pmix_nspace_t job;
prte_odls_base_fork_local_proc_fn_t fork_local;
int retries;
} prte_odls_launch_local_t;
PRTE_EXPORT PRTE_CLASS_DECLARATION(prte_odls_launch_local_t);
#define PRTE_ACTIVATE_LOCAL_LAUNCH(j, f) \
do { \
prte_odls_launch_local_t *ll; \
ll = PRTE_NEW(prte_odls_launch_local_t); \
PMIX_LOAD_NSPACE(ll->job, (j)); \
ll->fork_local = (f); \
prte_event_set(prte_event_base, ll->ev, -1, PRTE_EV_WRITE, \
prte_odls_base_default_launch_local, ll); \
prte_event_set_priority(ll->ev, PRTE_SYS_PRI); \
prte_event_active(ll->ev, PRTE_EV_WRITE, 1); \
} while(0);
PRTE_EXPORT void prte_odls_base_default_launch_local(int fd, short sd, void *cbdata);
PRTE_EXPORT void prte_odls_base_default_wait_local_proc(int fd, short sd, void *cbdata);
/* define a function type to signal a local proc */
typedef int (*prte_odls_base_signal_local_fn_t)(pid_t pid, int signum);
PRTE_EXPORT int
prte_odls_base_default_signal_local_procs(const pmix_proc_t *proc, int32_t signal,
prte_odls_base_signal_local_fn_t signal_local);
/* define a function type for killing a local proc */
typedef int (*prte_odls_base_kill_local_fn_t)(pid_t pid, int signum);
/* define a function type to detect that a child died */
typedef bool (*prte_odls_base_child_died_fn_t)(prte_proc_t *child);
PRTE_EXPORT int
prte_odls_base_default_kill_local_procs(prte_pointer_array_t *procs,
prte_odls_base_kill_local_fn_t kill_local);
PRTE_EXPORT int prte_odls_base_default_restart_proc(prte_proc_t *child,
prte_odls_base_fork_local_proc_fn_t fork_local);
/*
* Preload binary/files functions
*/
PRTE_EXPORT int prte_odls_base_preload_files_app_context(prte_app_context_t* context);
END_C_DECLS
#endif
| 35.624242 | 102 | 0.657366 | [
"object"
] |
fdc0ba0e0730422122d47de5a094ac7c290fc507 | 833 | h | C | 0064-Minimum-Path-Sum/cpp_0064/Solution1.h | ooooo-youwillsee/leetcode | 07b273f133c8cf755ea40b3ae9df242ce044823c | [
"MIT"
] | 12 | 2020-03-18T14:36:23.000Z | 2021-12-19T02:24:33.000Z | 0064-Minimum-Path-Sum/cpp_0064/Solution1.h | ooooo-youwillsee/leetcode | 07b273f133c8cf755ea40b3ae9df242ce044823c | [
"MIT"
] | null | null | null | 0064-Minimum-Path-Sum/cpp_0064/Solution1.h | ooooo-youwillsee/leetcode | 07b273f133c8cf755ea40b3ae9df242ce044823c | [
"MIT"
] | null | null | null | //
// Created by ooooo on 2020/2/13.
//
#ifndef CPP_0064__SOLUTION1_H_
#define CPP_0064__SOLUTION1_H_
#include <iostream>
#include <vector>
using namespace std;
/**
* dp[i][j] = cur_num + min(dp[i-1][j] + dp[i][j-1])
*/
class Solution {
public:
int minPathSum(vector<vector<int>> &grid) {
if (grid.empty()) return 0;
int m = grid.size(), n = grid[0].size();
vector<vector<int>> dp(m, vector<int>(n));
dp[0][0] = grid[0][0];
for (int i = 1; i < m; ++i) {
dp[i][0] = dp[i - 1][0] + grid[i][0];
}
for (int i = 1; i < n; ++i) {
dp[0][i] = dp[0][i - 1] + grid[0][i];
}
for (int i = 1; i < m; ++i) {
for (int j = 1; j < n; ++j) {
dp[i][j] = grid[i][j] + min(dp[i - 1][j], dp[i][j - 1]);
}
}
return dp[m - 1][n - 1];
}
};
#endif //CPP_0064__SOLUTION1_H_
| 22.513514 | 64 | 0.495798 | [
"vector"
] |
fdc6b7285fd3ee051a48ae54607034d9af83049a | 6,133 | c | C | src/os/posix/src/os-impl-mutex.c | dsburns/osal | 80fdd67a6c28a962164697ece12570142331ea44 | [
"Apache-2.0"
] | 2 | 2020-06-17T20:30:28.000Z | 2020-12-28T18:42:46.000Z | src/os/posix/src/os-impl-mutex.c | dsburns/osal | 80fdd67a6c28a962164697ece12570142331ea44 | [
"Apache-2.0"
] | 7 | 2020-07-23T17:14:46.000Z | 2020-09-17T00:25:08.000Z | src/os/posix/src/os-impl-mutex.c | dsburns/osal | 80fdd67a6c28a962164697ece12570142331ea44 | [
"Apache-2.0"
] | 2 | 2021-01-21T16:18:16.000Z | 2021-02-10T23:05:51.000Z | /*
* NASA Docket No. GSC-18,370-1, and identified as "Operating System Abstraction Layer"
*
* Copyright (c) 2019 United States Government as represented by
* the Administrator of the National Aeronautics and Space Administration.
* All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* \file os-impl-mutex.c
* \ingroup posix
* \author joseph.p.hickey@nasa.gov
*
*/
/****************************************************************************************
INCLUDE FILES
***************************************************************************************/
#include "os-posix.h"
#include "os-shared-mutex.h"
#include "os-impl-mutex.h"
/* Tables where the OS object information is stored */
OS_impl_mutex_internal_record_t OS_impl_mutex_table [OS_MAX_MUTEXES];
/****************************************************************************************
MUTEX API
***************************************************************************************/
/*----------------------------------------------------------------
*
* Function: OS_Posix_MutexAPI_Impl_Init
*
* Purpose: Local helper routine, not part of OSAL API.
*
*-----------------------------------------------------------------*/
int32 OS_Posix_MutexAPI_Impl_Init(void)
{
memset(OS_impl_mutex_table, 0, sizeof(OS_impl_mutex_table));
return OS_SUCCESS;
} /* end OS_Posix_MutexAPI_Impl_Init */
/*----------------------------------------------------------------
*
* Function: OS_MutSemCreate_Impl
*
* Purpose: Implemented per internal OSAL API
* See prototype for argument/return detail
*
*-----------------------------------------------------------------*/
int32 OS_MutSemCreate_Impl (uint32 sem_id, uint32 options)
{
int return_code;
pthread_mutexattr_t mutex_attr;
/*
** initialize the attribute with default values
*/
return_code = pthread_mutexattr_init(&mutex_attr);
if ( return_code != 0 )
{
OS_DEBUG("Error: Mutex could not be created. pthread_mutexattr_init failed ID = %u: %s\n",
(unsigned int)sem_id,strerror(return_code));
return OS_SEM_FAILURE;
}
/*
** Allow the mutex to use priority inheritance
*/
return_code = pthread_mutexattr_setprotocol(&mutex_attr,PTHREAD_PRIO_INHERIT);
if ( return_code != 0 )
{
OS_DEBUG("Error: Mutex could not be created. pthread_mutexattr_setprotocol failed ID = %u: %s\n",
(unsigned int)sem_id,strerror(return_code));
return OS_SEM_FAILURE;
}
/*
** Set the mutex type to RECURSIVE so a thread can do nested locks
*/
return_code = pthread_mutexattr_settype(&mutex_attr, PTHREAD_MUTEX_RECURSIVE);
if ( return_code != 0 )
{
OS_DEBUG("Error: Mutex could not be created. pthread_mutexattr_settype failed ID = %u: %s\n",
(unsigned int)sem_id,strerror(return_code));
return OS_SEM_FAILURE;
}
/*
** create the mutex
** upon successful initialization, the state of the mutex becomes initialized and unlocked
*/
return_code = pthread_mutex_init(&OS_impl_mutex_table[sem_id].id,&mutex_attr);
if ( return_code != 0 )
{
OS_DEBUG("Error: Mutex could not be created. ID = %u: %s\n",
(unsigned int)sem_id,strerror(return_code));
return OS_SEM_FAILURE;
}
return OS_SUCCESS;
} /* end OS_MutSemCreate_Impl */
/*----------------------------------------------------------------
*
* Function: OS_MutSemDelete_Impl
*
* Purpose: Implemented per internal OSAL API
* See prototype for argument/return detail
*
*-----------------------------------------------------------------*/
int32 OS_MutSemDelete_Impl (uint32 sem_id)
{
int status;
status = pthread_mutex_destroy( &(OS_impl_mutex_table[sem_id].id)); /* 0 = success */
if (status != 0)
{
return OS_SEM_FAILURE;
}
return OS_SUCCESS;
} /* end OS_MutSemDelete_Impl */
/*----------------------------------------------------------------
*
* Function: OS_MutSemGive_Impl
*
* Purpose: Implemented per internal OSAL API
* See prototype for argument/return detail
*
*-----------------------------------------------------------------*/
int32 OS_MutSemGive_Impl ( uint32 sem_id )
{
int status;
/*
** Unlock the mutex
*/
status = pthread_mutex_unlock(&(OS_impl_mutex_table[sem_id].id));
if(status != 0)
{
return OS_SEM_FAILURE;
}
return OS_SUCCESS;
} /* end OS_MutSemGive_Impl */
/*----------------------------------------------------------------
*
* Function: OS_MutSemTake_Impl
*
* Purpose: Implemented per internal OSAL API
* See prototype for argument/return detail
*
*-----------------------------------------------------------------*/
int32 OS_MutSemTake_Impl ( uint32 sem_id )
{
int status;
/*
** Lock the mutex
*/
status = pthread_mutex_lock(&(OS_impl_mutex_table[sem_id].id));
if( status != 0 )
{
return OS_SEM_FAILURE;
}
return OS_SUCCESS;
} /* end OS_MutSemTake_Impl */
/*----------------------------------------------------------------
*
* Function: OS_MutSemGetInfo_Impl
*
* Purpose: Implemented per internal OSAL API
* See prototype for argument/return detail
*
*-----------------------------------------------------------------*/
int32 OS_MutSemGetInfo_Impl (uint32 sem_id, OS_mut_sem_prop_t *mut_prop)
{
return OS_SUCCESS;
} /* end OS_MutSemGetInfo_Impl */
| 29.204762 | 104 | 0.539214 | [
"object"
] |
fdccf7c61cfd076dcfe1e9d2b4702161787d2780 | 2,254 | c | C | matdump.c | curiousTauseef/matrixlab | 553170f93d445f30d7c29731a34a1db75bb3dde2 | [
"MIT"
] | null | null | null | matdump.c | curiousTauseef/matrixlab | 553170f93d445f30d7c29731a34a1db75bb3dde2 | [
"MIT"
] | null | null | null | matdump.c | curiousTauseef/matrixlab | 553170f93d445f30d7c29731a34a1db75bb3dde2 | [
"MIT"
] | 1 | 2019-12-02T17:08:28.000Z | 2019-12-02T17:08:28.000Z | #include "matrix.h"
/** \brief Dumps a matrix in the stdout
*
* \param[in] A Input matrix
*
*/
void mat_dump(MATRIX A)
{
char s[] = "%.16g ";
mat_fdumpf(A, s, stdout);
}
/** \brief Dumps a matrix using a given format specifier in the stdout
*
* \param[in] A Input matrix
* \param[in] s Format specifier
*
*/
void mat_dumpf(MATRIX A, const char *s)
{
mat_fdumpf(A, s, stdout);
}
/** \brief Dumps a matrix in an opened file
*
* \param[in] A Input matrix
* \param[in] fp Pointer to an opened file
*
*/
void mat_fdump(MATRIX A, MAT_FILEPOINTER fp)
{
char s[] = "%.16g ";
mat_fdumpf(A, s, fp);
}
/** \brief Dumps a matrix using a given format specifier in an opened file
*
* \param[in] A Input matrix
* \param[in] s Format specifier
* \param[in] fp Pointer to an opened file
*
*/
void mat_fdumpf(MATRIX A, const char *s, MAT_FILEPOINTER fp)
{
int i, j, m, n;
if(A==NULL) gen_error(GEN_NOT_FOUND);
m = MatCol(A);
n = MatRow(A);
for(i=0; i<n; ++i)
{
for(j=0; j<m; ++j)
{
fprintf(fp, s, A[i][j]);
}
fprintf(fp, "\n");
}
fflush(fp);
}
/** \brief Dumps an integer vector in the stdout
*
* \param[in] A Input vector
*
*/
void int_vec_dump(INT_VECTOR A)
{
char s[] = "%d ";
int_vec_fdumpf(A, s, stdout);
}
/** \brief Dumps an integer vector using a given format specifier in the stdout
*
* \param[in] A Input vector
* \param[in] s Format specifier
*
*/
void int_vec_dumpf(INT_VECTOR A, const char *s)
{
int_vec_fdumpf(A, s, stdout);
}
/** \brief Dumps an integer vector in an opened file
*
* \param[in] A Input vector
* \param[in] fp Pointer to an opened file
*
*/
void int_vec_fdump(INT_VECTOR A, MAT_FILEPOINTER fp)
{
char s[] = "%d ";
int_vec_fdumpf(A, s, fp);
}
/** \brief Dumps an integer vector using a given format specifier in an opened file
*
* \param[in] A Input vector
* \param[in] s Format specifier
* \param[in] fp Pointer to an opened file
*
*/
void int_vec_fdumpf(INT_VECTOR A, const char *s, MAT_FILEPOINTER fp)
{
int i, n;
if(A==NULL) gen_error(GEN_NOT_FOUND);
n = Int_VecLen(A);
for(i=0; i<n; ++i)
{
fprintf(fp, s, A[i]);
}
fflush(fp);
}
| 18.325203 | 83 | 0.604703 | [
"vector"
] |
fdcd995d2cc1c393311198226fb8a4a2d7500bcf | 242 | h | C | include/Engine/Render/Postprocess/PostProcess.h | SamCZ/NovaEngine | 7f667988467ab611018f650295825b63c97e8d43 | [
"MIT"
] | 2 | 2021-04-18T06:40:19.000Z | 2021-06-29T23:35:31.000Z | include/Engine/Render/Postprocess/PostProcess.h | SamCZ/NovaEngine | 7f667988467ab611018f650295825b63c97e8d43 | [
"MIT"
] | null | null | null | include/Engine/Render/Postprocess/PostProcess.h | SamCZ/NovaEngine | 7f667988467ab611018f650295825b63c97e8d43 | [
"MIT"
] | null | null | null | #ifndef POST_PROCESS_H
#define POST_PROCESS_H
namespace NovaEngine {
class PostProcess {
private:
public:
PostProcess();
void init();
void render();
bool onGameObjectRequested(GameObject* obj);
};
}
#endif // !POST_PROCESS_H | 12.736842 | 46 | 0.719008 | [
"render"
] |
fdcf23424bbb1b5b258e78e9b312682967aad9a8 | 22,838 | h | C | vislib/include/vislib/sys/sysfunctions.h | azuki-monster/megamol | f5d75ae5630f9a71a7fbf81624bfd4f6b253c655 | [
"BSD-3-Clause"
] | 2 | 2020-10-16T10:15:37.000Z | 2021-01-21T13:06:00.000Z | vislib/include/vislib/sys/sysfunctions.h | azuki-monster/megamol | f5d75ae5630f9a71a7fbf81624bfd4f6b253c655 | [
"BSD-3-Clause"
] | null | null | null | vislib/include/vislib/sys/sysfunctions.h | azuki-monster/megamol | f5d75ae5630f9a71a7fbf81624bfd4f6b253c655 | [
"BSD-3-Clause"
] | 1 | 2021-01-28T01:19:54.000Z | 2021-01-28T01:19:54.000Z | /*
* sysfunctions.h
*
* Copyright (C) 2006-2011 by Universitaet Stuttgart (VIS). Alle Rechte vorbehalten.
*/
#ifndef VISLIB_SYSFUNCTIONS_H_INCLUDED
#define VISLIB_SYSFUNCTIONS_H_INCLUDED
#if (defined(_MSC_VER) && (_MSC_VER > 1000))
#pragma once
#endif /* (defined(_MSC_VER) && (_MSC_VER > 1000)) */
#if defined(_WIN32) && defined(_MANAGED)
#pragma managed(push, off)
#endif /* defined(_WIN32) && defined(_MANAGED) */
#ifdef _WIN32
#include <shlwapi.h>
#else /* _WIN32 */
#include <sys/types.h>
#include <stddef.h>
#endif /* _WIN32 */
#include <stdio.h>
#include <stdarg.h>
#include "vislib/CharTraits.h"
#include "vislib/sys/File.h"
#include "vislib/sys/sysfunctions.h"
#include "vislib/RawStorage.h"
#include "vislib/String.h"
#include "vislib/sys/SystemException.h"
#include "vislib/types.h"
namespace vislib {
namespace sys {
/**
* Possible text file formats
*/
enum TextFileFormat {
TEXTFF_UNSPECIFIC,
TEXTFF_ASCII, // locale dependent
TEXTFF_UNICODE, // OS dependent
TEXTFF_UTF8,
TEXTFF_UTF16,
TEXTFF_UTF16_BE,
TEXTFF_UTF32,
TEXTFF_UTF32_BE,
TEXTFF_UTF7,
TEXTFF_UTF1,
TEXTFF_UTF_EBCDIC,
TEXTFF_SCSU,
TEXTFF_BOCU1,
TEXTFF_GB18030
};
/**
* Possible options for writing BOM
*/
enum TextFileFormatBOM {
TEXTFF_BOM_UNSPECIFIC, // write BOM when writing suitable format
TEXTFF_BOM_YES, // always write BOM when possible
TEXTFF_BOM_NO // never write BOM
};
/**
* powered by miniport.h:
* Returns the containing struct for the address of a given field.
*
* @param address The address of the field.
* @param type The type of the containing struct.
* @param field The name of the field of address.
*
* @return The address of the containing struct.
*/
#ifndef CONTAINING_RECORD
#ifdef _WIN32
#define CONTAINING_RECORD(address, type, field) \
((type *)((PCHAR)(address) - (ULONG_PTR)(&((type *)0)->field)))
#else /* _WIN32 */
#define CONTAINING_RECORD(address, type, field) \
((type *)((PCHAR)(address) - ((ULONG_PTR)(&((type *)4)->field) - 4)))
#endif /* _WIN32 */
#endif /* CONTAINING_RECORD */
#define CONTAINING_STRUCT(address, type, field) \
CONTAINING_RECORD(address, type, field)
/** Default maximal line size to be read by the read line functions. */
const unsigned int defMaxLineSize = 1024;
/**
* Answer whether the file name 'filename' matches with the glob pattern
* 'pattern'. Be aware that this function does not try to match directory
* names, but only file names (handled as simple strings without any
* additional information).
*
* Pattern syntax:
* ? matches any one character
* * matches any number (even zero) characters
* [...] matches one character from the given list of characters. Note
* that this character list does not support ranges!
* \? matches the ? character
* \* matches the * character
* \[ matches the [ character
* \] matches the ] character
* \\ matches the \ character
*
* @param filename The file name string to test. This name should not
* contain any directory information.
* @param pattern The glob pattern to test. This pattern should not
* contain any directory information.
*
* @return 'true' if the pattern matches the file name, 'false' otherwise.
*/
template <class T>
bool FilenameGlobMatch(const T* filename, const T* pattern) {
SIZE_T fnl = vislib::CharTraits<T>::SafeStringLength(filename);
SIZE_T fnp = 0;
SIZE_T pl = vislib::CharTraits<T>::SafeStringLength(pattern);
SIZE_T pp = 0;
while ((fnp < fnl) && (pp < pl)) {
switch (pattern[pp]) {
case '?':
pp++;
fnp++;
break;
case '*':
pp++;
if (pp == pl) {
// pattern always matches rest of the filename
return true;
}
// this is super slow and lazy, but works
for (SIZE_T skipSize = 0; skipSize < (fnl - fnp);
skipSize++) {
if (FilenameGlobMatch(filename + fnp + skipSize,
pattern + pp)) {
return true;
}
}
return false;
case '[': {
SIZE_T pps = pp;
while ((pp < pl) && (pattern[pp] != ']')) pp++;
if (pp == pl) return false;
vislib::String<vislib::CharTraits<T> > matchGroup(
pattern + pps + 1, static_cast<typename vislib::String<
vislib::CharTraits<T> >::Size>(pp - (1 + pps)));
if (!matchGroup.Contains(filename[fnp])) return false;
fnp++;
pp++;
} break;
case '\\': pp++; // fall through
default:
if (filename[fnp] != pattern[pp]) return false;
fnp++;
pp++;
break;
}
}
// pattern matches only if pattern and filename are consumed at same speed.
return (fnp == fnl) && (pp == pl);
}
/**
* Load a resource from the specified module.
*
* @param out A RawStorage that will receive the resource data.
* @param hModule A handle to the module whose executable file contains
* the resource. If hModule is NULL, the resource is
* loaded from the module that was used to create the
* current process.
* @param resourceID The name of the resource. Alternately, rather than
* a pointer, this parameter can be MAKEINTRESOURCE(ID),
* where ID is the integer identifier of the resource.
* @param resourceType The resource type.
*
* @return A RawStorage containing the raw resource data. This is the same
* object passed in as out.
*
* @throws SystemException If the resource lookup or loading the resource
* failed.
* @throws UnsupportedOperationException On Linux.
*/
RawStorage& LoadResource(RawStorage& out,
#ifdef _WIN32
HMODULE hModule,
#else /* _WIN32 */
void *hModule,
#endif /* _WIN32 */
const char *resourceID, const char *resourceType);
/**
* Load a resource from the specified module.
*
* @param out A RawStorage that will receive the resource data.
* @param hModule A handle to the module whose executable file contains
* the resource. If hModule is NULL, the resource is
* loaded from the module that was used to create the
* current process.
* @param resourceID The name of the resource. Alternately, rather than
* a pointer, this parameter can be MAKEINTRESOURCE(ID),
* where ID is the integer identifier of the resource.
* @param resourceType The resource type.
*
* @return A RawStorage containing the raw resource data. This is the same
* object passed in as out.
*
* @throws SystemException If the resource lookup or loading the resource
* failed.
* @throws UnsupportedOperationException On Linux.
*/
RawStorage& LoadResource(RawStorage& out,
#ifdef _WIN32
HMODULE hModule,
#else /* _WIN32 */
void *hModule,
#endif /* _WIN32 */
const wchar_t *resourceID, const wchar_t *resourceType);
/**
* Reads ansi characters from the file until the end of file, a line break
* is reached, or size characters are read. The returned string does not
* contain the line break if one had been read.
* Remarks: The methode does not perform any buffering, so you might want
* to use a buffered file.
*
* @param input The input file.
* @param size The maximum number of character to read.
*
* @return The string holding the line read.
*
* @throws IOException If the file cannot be read.
* @throws std::bad_alloc If there is not enough memory to store the line.
*/
StringA ReadLineFromFileA(File& input, unsigned int size = defMaxLineSize);
/**
* Reads unicode characters from the file until the end of file, a line
* break is reached, or size characters are read. The returned string does
* not contain the line break if one had been read.
* Remarks: The methode does not perform any buffering, so you might want
* to use a buffered file. wchar_t characters are read, so keep in mind
* that files will not be compatible between windows and linux because of
* different values of sizeof(wchar_t).
*
* @param input The input file.
* @param size The maximum number of character to read.
*
* @return The string holding the line read.
*
* @throws IOException If the file cannot be read.
* @throws std::bad_alloc If there is not enough memory to store the line.
*/
StringW ReadLineFromFileW(File& input, unsigned int size = defMaxLineSize);
#if defined(UNICODE) || defined(_UNICODE)
#define ReadLineFromFile ReadLineFromFileW
#else /* defined(UNICODE) || defined(_UNICODE) */
#define ReadLineFromFile ReadLineFromFileA
#endif /* defined(UNICODE) || defined(_UNICODE) */
/**
* Read the content of the file 'filename' into 'outSrc'. 'outSrc' is
* being erased by this operation.
*
* @param outStr The string to receive the content
* @param filename The name of the file being read
* @param format The format of the text to read
* @param forceFormat If true 'format' is used even if a BOM is found
*
* @return true, if the file could be read, false, if the file was not
* found or could not be opened.
*
* @throws IOException If reading from the file failed.
*/
template<class tp1, class tp2>
bool ReadTextFile(String<tp1>& outStr, const tp2 *filename,
TextFileFormat format = TEXTFF_UNSPECIFIC,
bool forceFormat = false) {
File file;
bool retval = false;
if (file.Open(filename, File::READ_ONLY, File::SHARE_READ,
File::OPEN_ONLY)) {
retval = ReadTextFile(outStr, file, format, forceFormat);
file.Close();
} else {
// works because the last error still contains the correct value
throw SystemException(__FILE__, __LINE__);
}
return retval;
}
/**
* Read the content of the file 'filename' into 'outSrc'. 'outSrc' is
* being erased by this operation.
*
* @param outStr The string to receive the content
* @param filename The name of the file being read
* @param format The format of the text to read
* @param forceFormat If true 'format' is used even if a BOM is found
*
* @return true, if the file could be read, false, if the file was not
* found or could not be opened.
*
* @throws IOException If reading from the file failed.
*/
template<class tp1, class tp2>
bool ReadTextFile(String<tp1>& outStr, const String<tp2>& filename,
TextFileFormat format = TEXTFF_UNSPECIFIC,
bool forceFormat = false) {
return ReadTextFile(outStr, filename.PeekBuffer(), format,
forceFormat);
}
/**
* Read the content of the file 'file' into 'outSrc'. 'outSrc' is being
* erased by this operation. 'file' will be read from the current position,
* will be read until EoF, and will not be closed after operation.
*
* @param outStr The string to receive the content
* @param file The file object being read
* @param format The format of the text to read
* @param forceFormat If true 'format' is used even if a BOM is found
*
* @return true, if the file could be read, false, if the file was not
* found or could not be opened.
*
* @throws IOException If reading from the file failed.
*/
bool ReadTextFile(StringA& outStr, File& file,
TextFileFormat format = TEXTFF_UNSPECIFIC, bool forceFormat = false);
/**
* Read the content of the file 'file' into 'outSrc'. 'outSrc' is being
* erased by this operation. 'file' will be read from the current position,
* will be read until EoF, and will not be closed after operation.
*
* @param outStr The string to receive the content
* @param file The file object being read
* @param format The format of the text to read
* @param forceFormat If true 'format' is used even if a BOM is found
*
* @return true, if the file could be read, false, if the file was not
* found or could not be opened.
*
* @throws IOException If reading from the file failed.
*/
bool ReadTextFile(StringW& outStr, File& file,
TextFileFormat format = TEXTFF_UNSPECIFIC, bool forceFormat = false);
/**
* Answer the number of milliseconds since midnight of the current day.
*
* @return milliseconds since midnight.
*/
unsigned int GetTicksOfDay(void);
#ifdef _WIN32
/**
* Answer the version of a Windows system DLL.
*
* @param outVersion Receives the version of the specified module. The
* 'cbSize' must have been set to the actual size of the
* 'outVersion' structure before calling the function.
* @param moduleName The name of the system DLL to retrieve the version of.
*
* @return The return value of the DllGetVersion of 'moduleName', which is
* NOERROR in case of success or an appropriate error code
* otherwise.
*
* @throws SystemException If the specified module could not be opened or if
* it has no DllGetVersion function.
*/
HRESULT GetDLLVersion(DLLVERSIONINFO& outVersion, const char *moduleName);
/**
* Answer the version of a Windows system DLL.
*
* @param outVersion Receives the version of the specified module. The
* 'cbSize' must have been set to the actual size of the
* 'outVersion' structure before calling the function.
* @param moduleName The name of the system DLL to retrieve the version of.
*
* @return The return value of the DllGetVersion of 'moduleName', which is
* NOERROR in case of success or an appropriate error code
* otherwise.
*
* @throws SystemException If the specified module could not be opened or if
* it has no DllGetVersion function.
*/
HRESULT GetDLLVersion(DLLVERSIONINFO& outVersion,
const wchar_t * moduleName);
#endif /* _WIN32 */
/**
* Remove Windows kernel namespace prefixes "Global" and "Local" from
* 'name'. The function is case-insensitive.
*
* @param name A string that potentially begins with a Windows kernel
* namespace prefix.
*
* @return The name without kernel namespace prefix.
*/
vislib::StringA RemoveKernelNamespace(const char *name);
/**
* Remove Windows kernel namespace prefixes "Global" and "Local" from
* 'name'. The function is case-insensitive.
*
* @param name A string that potentially begins with a Windows kernel
* namespace prefix.
*
* @return The name without kernel namespace prefix.
*/
vislib::StringW RemoveKernelNamespace(const wchar_t *name);
/**
* Release the COM pointer 'ptr' and set it NULL if not yet NULL.
*
* @param ptr A pointer to a COM object (or any other object implementing
* reference counting via a Release() method.
*/
template<class T> void SafeRelease(T*& ptr) {
if (ptr != NULL) {
ptr->Release();
ptr = NULL;
}
}
/**
* Take a Windows IPC resource name and construct a POSIX name for Linux
* it. This involves removing a possible kernel namespace and prepending
* a slash ('/').
*
* @param name A string that potentially begins with a Windows kernel
* namespace prefix.
*
* @return The name in POSIX-compatible format without kernel namespace.
*/
vislib::StringA TranslateWinIpc2PosixName(const char *name);
/**
* Take a Windows IPC resource name and construct a POSIX name for Linux
* it. This involves removing a possible kernel namespace and prepending
* a slash ('/').
*
* @param name A string that potentially begins with a Windows kernel
* namespace prefix.
*
* @return The name in POSIX-compatible format without kernel namespace.
*/
vislib::StringW TranslateWinIpc2PosixName(const wchar_t *name);
#ifndef _WIN32
/**
* Convert a IPC resource name 'name', which might start with a Windows
* kernel namespace prefix, to a Linux System V IPC unique key.
*
* @param name The name of the resource.
*
* @return The System V unique key for the name.
*
* @throws SystemException If the key could not be created.
*/
key_t TranslateIpcName(const char *name);
#endif /* !_WIN32 */
/**
* Writes a formatted string to a file. 'format' uses 'printf' syntax.
* The template parameter should be automatically chosen to be 'char' or
* 'wchar_t'.
*
* @param out The file to which the formatted text line is written to.
* @param format The text line format string, similar to 'printf'.
*
* @return 'true' on success, 'false' it not all the data has been
* written.
*
* @throws SystemException If there was an IO error.
*/
template<class T>
bool WriteFormattedLineToFile(File &out,
const T *format, ...) {
vislib::String<vislib::CharTraits<T> > tmp;
va_list argptr;
va_start(argptr, format);
tmp.FormatVa(format, argptr);
va_end(argptr);
SIZE_T len = tmp.Length() * sizeof(T);
return out.Write(tmp.PeekBuffer(), len) == len;
}
/**
* Writes a string to a file. The template parameter should be
* automatically chosen to be 'char' or 'wchar_t'.
*
* @param out The file to which the text line is written to.
* @param text The text line to be written.
*
* @return 'true' on success, 'false' it not all the data has been
* written.
*
* @throws SystemException If there was an IO error.
*/
template<class T>
bool WriteLineToFile(File &out, const T *text) {
SIZE_T len = vislib::CharTraits<T>::SafeStringLength(text)
* sizeof(T);
return out.Write(text, len) == len;
}
/**
* Writes a text to a file. If the file exists and force is 'true' the
* existing file is overwritten.
*
* @param filename The path to the file to be written.
* @param text The text to be written.
* @param force Flag whether or not to overwrite an existing file.
* @param format The text file format to produce
*
* @return true if the data was written successfully, false otherwise.
*
* @throws SystemException in case of an error.
*/
template<class tp1, class tp2>
bool WriteTextFile(const String<tp1>& filename, const String<tp2>& text,
bool force = false, TextFileFormat format = TEXTFF_UNSPECIFIC,
TextFileFormatBOM bom = TEXTFF_BOM_UNSPECIFIC) {
return WriteTextFile(filename.PeekBuffer(), text, force, format, bom);
}
/**
* Writes a text to a file. If the file exists and force is 'true' the
* existing file is overwritten.
*
* @param filename The path to the file to be written.
* @param text The text to be written.
* @param force Flag whether or not to overwrite an existing file.
* @param format The text file format to produce
*
* @return true if the data was written successfully, false otherwise.
*
* @throws SystemException in case of an error.
*/
template<class tp1, class tp2>
bool WriteTextFile(const tp1 *filename, const String<tp2>& text,
bool force = false, TextFileFormat format = TEXTFF_UNSPECIFIC,
TextFileFormatBOM bom = TEXTFF_BOM_UNSPECIFIC) {
bool retval = false;
File file;
if (file.Open(filename, File::WRITE_ONLY, File::SHARE_EXCLUSIVE,
force ? File::CREATE_OVERWRITE : File::CREATE_ONLY)) {
retval = WriteTextFile(file, text, format, bom);
file.Close();
} else {
// works because the last error still contains the correct value
throw SystemException(__FILE__, __LINE__);
}
return retval;
}
/**
* Writes a text to a file.
*
* @param file The file stream to be written to
* @param text The text to be written
* @param format The text file format to produce
*
* @return true if the data was written successfully, false otherwise.
*
* @throws SystemException in case of an error.
*/
bool WriteTextFile(File& file, const StringA& text,
TextFileFormat format = TEXTFF_ASCII,
TextFileFormatBOM bom = TEXTFF_BOM_UNSPECIFIC);
/**
* Writes a text to a file.
*
* @param file The file stream to be written to
* @param text The text to be written
* @param format The text file format to produce
*
* @return true if the data was written successfully, false otherwise.
*
* @throws SystemException in case of an error.
*/
bool WriteTextFile(File& file, const StringW& text,
TextFileFormat format = TEXTFF_UNICODE,
TextFileFormatBOM bom = TEXTFF_BOM_UNSPECIFIC);
} /* end namespace sys */
} /* end namespace vislib */
#if defined(_WIN32) && defined(_MANAGED)
#pragma managed(pop)
#endif /* defined(_WIN32) && defined(_MANAGED) */
#endif /* VISLIB_SYSFUNCTIONS_H_INCLUDED */
| 37.74876 | 84 | 0.60338 | [
"object"
] |
fdd71b7b3978af83700e515a02733332d8e75792 | 5,231 | h | C | MHD/FEniCS/MHD/Stabilised/SaddlePointForm/Test/SplitMatrix/ScottTest/Hartman2D/jitfailure-ffc_form_189dea85ba972a56eb98b5f7e4ca7e38c2e919b6/ffc_form_189dea85ba972a56eb98b5f7e4ca7e38c2e919b6.h | wathen/PhD | 35524f40028541a4d611d8c78574e4cf9ddc3278 | [
"MIT"
] | 3 | 2020-10-25T13:30:20.000Z | 2021-08-10T21:27:30.000Z | MHD/FEniCS/MHD/Stabilised/SaddlePointForm/Test/SplitMatrix/ScottTest/Hartman2D/jitfailure-ffc_form_189dea85ba972a56eb98b5f7e4ca7e38c2e919b6/ffc_form_189dea85ba972a56eb98b5f7e4ca7e38c2e919b6.h | wathen/PhD | 35524f40028541a4d611d8c78574e4cf9ddc3278 | [
"MIT"
] | null | null | null | MHD/FEniCS/MHD/Stabilised/SaddlePointForm/Test/SplitMatrix/ScottTest/Hartman2D/jitfailure-ffc_form_189dea85ba972a56eb98b5f7e4ca7e38c2e919b6/ffc_form_189dea85ba972a56eb98b5f7e4ca7e38c2e919b6.h | wathen/PhD | 35524f40028541a4d611d8c78574e4cf9ddc3278 | [
"MIT"
] | 3 | 2019-10-28T16:12:13.000Z | 2020-01-13T13:59:44.000Z | // This code conforms with the UFC specification version 2016.2.0
// and was automatically generated by FFC version 2016.2.0.
//
// This code was generated with the following parameters:
//
// convert_exceptions_to_warnings: False
// cpp_optimize: True
// cpp_optimize_flags: '-O2'
// epsilon: 1e-14
// error_control: False
// form_postfix: False
// format: 'ufc'
// max_signature_length: 0
// no-evaluate_basis_derivatives: True
// optimize: False
// precision: 15
// quadrature_degree: -1
// quadrature_rule: 'auto'
// representation: 'auto'
// split: False
#ifndef __FFC_FORM_189DEA85BA972A56EB98B5F7E4CA7E38C2E919B6_H
#define __FFC_FORM_189DEA85BA972A56EB98B5F7E4CA7E38C2E919B6_H
#include <ffc_element_a50ac52382c0402b672a5cd62d4dc0b8a749f43c.h>
#include <ffc_element_c852b35925b0dd6ec6c17a58a30a28504881fde6.h>
#include <ffc_element_cf42ec224b0e5062340b89a867f73365523bda27.h>
#include <stdexcept>
#include <ufc.h>
class ffc_form_189dea85ba972a56eb98b5f7e4ca7e38c2e919b6_cell_integral_main_otherwise: public ufc::cell_integral
{
public:
ffc_form_189dea85ba972a56eb98b5f7e4ca7e38c2e919b6_cell_integral_main_otherwise();
~ffc_form_189dea85ba972a56eb98b5f7e4ca7e38c2e919b6_cell_integral_main_otherwise() override;
const std::vector<bool> & enabled_coefficients() const final override;
void tabulate_tensor(double * A,
const double * const * w,
const double * coordinate_dofs,
int cell_orientation) const final override;
};
extern "C" ufc::cell_integral * create_ffc_form_189dea85ba972a56eb98b5f7e4ca7e38c2e919b6_cell_integral_main_otherwise();
class ffc_form_189dea85ba972a56eb98b5f7e4ca7e38c2e919b6_form_main: public ufc::form
{
public:
ffc_form_189dea85ba972a56eb98b5f7e4ca7e38c2e919b6_form_main();
~ffc_form_189dea85ba972a56eb98b5f7e4ca7e38c2e919b6_form_main() override;
const char * signature() const final override;
std::size_t rank() const final override;
std::size_t num_coefficients() const final override;
std::size_t original_coefficient_position(std::size_t i) const final override;
ufc::finite_element * create_coordinate_finite_element() const final override;
ufc::dofmap * create_coordinate_dofmap() const final override;
ufc::coordinate_mapping * create_coordinate_mapping() const final override;
ufc::finite_element * create_finite_element(std::size_t i) const final override;
ufc::dofmap * create_dofmap(std::size_t i) const final override;
std::size_t max_cell_subdomain_id() const final override;
std::size_t max_exterior_facet_subdomain_id() const final override;
std::size_t max_interior_facet_subdomain_id() const final override;
std::size_t max_vertex_subdomain_id() const final override;
std::size_t max_custom_subdomain_id() const final override;
std::size_t max_cutcell_subdomain_id() const final override;
std::size_t max_interface_subdomain_id() const final override;
std::size_t max_overlap_subdomain_id() const final override;
bool has_cell_integrals() const final override;
bool has_exterior_facet_integrals() const final override;
bool has_interior_facet_integrals() const final override;
bool has_vertex_integrals() const final override;
bool has_custom_integrals() const final override;
bool has_cutcell_integrals() const final override;
bool has_interface_integrals() const final override;
bool has_overlap_integrals() const final override;
ufc::cell_integral * create_cell_integral(std::size_t i) const final override;
ufc::exterior_facet_integral * create_exterior_facet_integral(std::size_t i) const final override;
ufc::interior_facet_integral * create_interior_facet_integral(std::size_t i) const final override;
ufc::vertex_integral * create_vertex_integral(std::size_t i) const final override;
ufc::custom_integral * create_custom_integral(std::size_t i) const final override;
ufc::cutcell_integral * create_cutcell_integral(std::size_t i) const final override;
ufc::interface_integral * create_interface_integral(std::size_t i) const final override;
ufc::overlap_integral * create_overlap_integral(std::size_t i) const final override;
ufc::cell_integral * create_default_cell_integral() const final override;
ufc::exterior_facet_integral * create_default_exterior_facet_integral() const final override;
ufc::interior_facet_integral * create_default_interior_facet_integral() const final override;
ufc::vertex_integral * create_default_vertex_integral() const final override;
ufc::custom_integral * create_default_custom_integral() const final override;
ufc::cutcell_integral * create_default_cutcell_integral() const final override;
ufc::interface_integral * create_default_interface_integral() const final override;
ufc::overlap_integral * create_default_overlap_integral() const final override;
};
extern "C" ufc::form * create_ffc_form_189dea85ba972a56eb98b5f7e4ca7e38c2e919b6_form_main();
#endif
| 36.075862 | 120 | 0.759319 | [
"vector"
] |
fde00560f78e7244e5233134e28a154aed553e55 | 1,014 | h | C | Haze/src/Haze/Scene/Scene.h | HafisCZ/Haze | 1d51ad29b1d991db1b7ec431afba7780df2e0684 | [
"Apache-2.0"
] | 2 | 2019-04-09T23:49:03.000Z | 2020-01-04T06:45:29.000Z | Haze/src/Haze/Scene/Scene.h | HafisCZ/Haze | 1d51ad29b1d991db1b7ec431afba7780df2e0684 | [
"Apache-2.0"
] | null | null | null | Haze/src/Haze/Scene/Scene.h | HafisCZ/Haze | 1d51ad29b1d991db1b7ec431afba7780df2e0684 | [
"Apache-2.0"
] | null | null | null | #pragma once
#include "Haze/Core.h"
#include "Haze/Objects/Model.h"
#include "Haze/Scene/Light.h"
#include "Haze/ImGui/Presets.h"
#include "Haze/Utils/Utils.h"
namespace Haze
{
class Object
{
public:
Object() {}
Object(Model* model) : Model(model) {}
~Object() { Repository::Unload<Haze::Model>(Model); }
Model* Model;
ModelMatrix Matrix;
inline std::pair<Mesh*, float> IntersectsRay(glm::vec3 rayOrigin, glm::vec3 rayDir) {
std::pair<Mesh*, float> ir = { nullptr, 0.0f };
for (auto m : Model->Meshes) {
auto mi = Math::Intersects_OBB_RAY(m->AABB_MIN, m->AABB_MAX, Matrix.GetScale(), Matrix.Matrix(), rayOrigin, rayDir);
if (mi.first && (ir.first == nullptr || mi.second < ir.second)) {
ir = { m, mi.second };
}
}
return ir;
}
};
class Scene
{
public:
std::vector<Object*> Objects;
std::vector<PointLight> Point;
AmbientLight Ambient;
VectorLight Vector;
TextureCube* Skybox = nullptr;
Object* Item = nullptr;
};
} | 19.5 | 121 | 0.629191 | [
"mesh",
"object",
"vector",
"model"
] |
fdec104609991023899cdd8492231068a03a081f | 798 | h | C | Studio/src/Data/MeshWorker.h | ajensen1234/ShapeWorks | fb308c8c38ad0e00c7f62aa7221e00e72140d909 | [
"MIT"
] | 40 | 2019-07-26T18:02:13.000Z | 2022-03-28T07:24:23.000Z | Studio/src/Data/MeshWorker.h | ajensen1234/ShapeWorks | fb308c8c38ad0e00c7f62aa7221e00e72140d909 | [
"MIT"
] | 1,359 | 2019-06-20T17:17:53.000Z | 2022-03-31T05:42:29.000Z | Studio/src/Data/MeshWorker.h | ajensen1234/ShapeWorks | fb308c8c38ad0e00c7f62aa7221e00e72140d909 | [
"MIT"
] | 13 | 2019-12-06T01:31:48.000Z | 2022-02-24T04:34:23.000Z | #pragma once
/**
* @file MeshWorker.h
* @brief Worker class for parallel mesh reconstruction
*
* The MeshWorker implements each thread's mesh construction management
*/
#include <QObject>
#include <QWaitCondition>
#include <QMetaType>
#include <QRunnable>
#include <Data/MeshWorkQueue.h>
#include <Data/MeshCache.h>
#include <Data/MeshGenerator.h>
Q_DECLARE_METATYPE(vtkSmartPointer<vtkPolyData>);
namespace shapeworks {
class MeshWorker : public QObject, public QRunnable {
Q_OBJECT
public:
MeshWorker(MeshWorkQueue* queue, std::shared_ptr<MeshGenerator> generator);
~MeshWorker();
void run();
Q_SIGNALS:
void result_ready(const MeshWorkItem& item, MeshHandle mesh);
void finished();
private:
std::shared_ptr<MeshGenerator> mesh_generator_;
MeshWorkQueue* queue_;
};
} | 19.95 | 77 | 0.760652 | [
"mesh"
] |
fded49a779d8d742727dd37f3180c98dd523d801 | 1,691 | c | C | lib/wizards/neophyte/city/monst/hring.c | vlehtola/questmud | 8bc3099b5ad00a9e0261faeb6637c76b521b6dbe | [
"MIT"
] | null | null | null | lib/wizards/neophyte/city/monst/hring.c | vlehtola/questmud | 8bc3099b5ad00a9e0261faeb6637c76b521b6dbe | [
"MIT"
] | null | null | null | lib/wizards/neophyte/city/monst/hring.c | vlehtola/questmud | 8bc3099b5ad00a9e0261faeb6637c76b521b6dbe | [
"MIT"
] | null | null | null | inherit "obj/monster";
object tunic, staff;
reset(arg) {
string chat_str;
::reset(arg);
if(arg) {return; }
set_name("hring");
set_alias("priest");
set_level(30);
set_gender(1);
set_al(50);
set_hp(50000);
set_max_hp(50000);
set_log();
set_race("high elf");
set_short("Hring the high priest");
set_long("He is wearing a white tunic which glitters in light. Looking at his\n"+
"face you get feeling of great wisdom that this priest posses.\n");
set_extra(1);
set_skill("cast divine", 90);
set_skill("cast heal", 90);
set_skill("cast major", 90);
set_skill("channel", 90);
set_skill("chanting", 50);
set_skill("mastery of medicine", 50);
set_skill("tumble", 30);
staff = clone_object("/wizards/neophyte/city/eq/staff");
move_object(staff, this_object());
init_command("wield staff");
if (!chat_str) {
chat_str = allocate(1);
chat_str[0] =
"Hring says 'We have a quite problem with the warlocks'\n";
}
load_chat(3, chat_str);
}
extra() {
string spell_words, target_name;
target_name = "hring";
spell_words = "chl hea mjr";
if (!query_spell() && random(100) > 50 && query_hp() < query_max_hp() / 2
&&
!this_object()->query_stunned()) {
spell = clone_object("obj/spell");
spell->start_spell(spell_words + " at " + target_name);
}
}
| 32.519231 | 92 | 0.512123 | [
"object"
] |
fdf4b437680eb93d3185ad67abffbc4b6223ee36 | 1,672 | h | C | Cpp/View/ViewRobot.h | pzr1988/Klampt | d5a334e73f1f24ba4c606e03f49915b353799a57 | [
"BSD-3-Clause"
] | 1 | 2020-07-03T20:38:20.000Z | 2020-07-03T20:38:20.000Z | Cpp/View/ViewRobot.h | tcrapse/Klampt | d5a334e73f1f24ba4c606e03f49915b353799a57 | [
"BSD-3-Clause"
] | null | null | null | Cpp/View/ViewRobot.h | tcrapse/Klampt | d5a334e73f1f24ba4c606e03f49915b353799a57 | [
"BSD-3-Clause"
] | null | null | null | #ifndef VIEW_ROBOT_H
#define VIEW_ROBOT_H
#include <KrisLibrary/GLdraw/GLColor.h>
#include <KrisLibrary/GLdraw/GLDisplayList.h>
#include <KrisLibrary/GLdraw/GeometryAppearance.h>
#include <Klampt/Modeling/Robot.h>
/** @ingroup View
* @brief Draws the robot (potentially color-coded)
*/
struct ViewRobot
{
ViewRobot(Robot* robot=NULL);
~ViewRobot();
///Draws the whole robot
void Draw(Robot* robot);
///Draws the whole robot
void Draw();
///Draws opaque / transparent parts of the robot
void DrawOpaque(bool opaque);
///draws link i's geometry in its local frame
void DrawLink_Local(int i,bool keepAppearance=true);
///draws link i's geometry in the world frame
void DrawLink_World(int i,bool keepAppearance=true);
void DrawCenterOfMass(Real radius = 0.05);
void DrawLinkCenterOfMass(int i,Real radius = 0.05);
void DrawLinkFrames(Real size = 0.1);
void DrawLinkSkeleton();
void DrawTorques(const Vector& t);
void SetColors(const GLDraw::GLColor& c);
void SetColor(int i,const GLDraw::GLColor& c);
void SetGrey();
void SetTorqueColors(const Vector& t);
///gets the currently active appearance
GLDraw::GeometryAppearance& Appearance(int link);
///pushes a new active appearance
void PushAppearance();
///pops the last active appearance
void PopAppearance();
///restores the base appearance
void RestoreAppearance();
///easy way to save/restore appearance
vector<GLDraw::GeometryAppearance> GetAppearance();
///easy way to save/restore appearance
void SetAppearance(const vector<GLDraw::GeometryAppearance>& );
Robot* robot;
vector< vector<GLDraw::GeometryAppearance> > appearanceStack;
};
#endif
| 31.54717 | 65 | 0.740431 | [
"geometry",
"vector"
] |
a9048e380d584396d6f41ca3cf0aeb5e231bd0cb | 5,596 | h | C | pyrserpp/Prototype.h | salemileandro/pparserpy | 78becdefe17b63dc9058ab338181dc7bae5da6f5 | [
"MIT"
] | null | null | null | pyrserpp/Prototype.h | salemileandro/pparserpy | 78becdefe17b63dc9058ab338181dc7bae5da6f5 | [
"MIT"
] | null | null | null | pyrserpp/Prototype.h | salemileandro/pparserpy | 78becdefe17b63dc9058ab338181dc7bae5da6f5 | [
"MIT"
] | null | null | null | #ifndef PROTOTYPE_H
#define PROTOTYPE_H
#include <iostream>
#include <fstream>
#include <string>
#include <vector>
#include <cassert>
class Prototype
{
public:
Prototype(std::string input_file, char comment = '#')
{
/* ***************************
*** READING THE INPUT FILE ***
******************************/
if( !this->IsFileExists(input_file) )
{
this->GenerateInputFile(input_file);
}
this->input_file_ = input_file;
std::ifstream read(this->input_file_);
assert(read.is_open());
std::string tmp;
std::string line;
this->input_text_.clear();
while( !read.eof() )
{
std::getline(read, line);
tmp = "";
for(unsigned i=0; i<line.size(); i++)
{
if(line[i] == comment)
break;
else
tmp += line[i];
}
if( tmp.size() > 1)
this->input_text_.push_back(tmp);
}
read.close();
//LOOK_FOR_VARIABLE_CALL
//UNIT_CONVERSION_CALL
}
~Prototype(){};
//DEFINITION_OF_GET_FUNCTIONS
void GenerateInputFile(std::string filename)
{
std::ofstream write(filename.c_str());
assert(write.is_open());
//GENERATE_INPUT_FILE
write.close();
}
protected:
private:
inline bool IsFileExists (const std::string& filename)
{
std::ifstream f(filename.c_str());
return f.good();
}
std::vector<std::string> Split(std::string str, char separator = ' ')
{
std::vector<std::string> words;
std::string temp;
temp = "";
for(unsigned i=0; i<str.size(); i++)
{
if(str[i] != separator)
temp += str[i];
else
{
if( temp != "" )
{
words.push_back(temp);
temp = "";
}
}
}
if( temp != "" )
{
words.push_back(temp);
temp = "";
}
return words;
}
/* *******************************************
*** LookForVariable FUNCTIONS (Multi-Type) ***
**********************************************/
void LookForVariable(bool &variable, std::string variable_name)
{
for(unsigned i=0; i<this->input_text_.size(); i++)
{
auto words = this->Split(input_text_[i]);
if (variable_name == words[0])
{
if (words[1] == "0" || words[1] == "false" || words[1] == "False" )
variable = false;
if (words[1] == "1" || words[1] == "true" || words[1] == "True" )
variable = true;
break;
}
}
return;
}
void LookForVariable(int &variable, std::string variable_name)
{
for(unsigned i=0; i<this->input_text_.size(); i++)
{
auto words = this->Split(input_text_[i]);
if (variable_name == words[0])
{
variable = std::stoi(words[1]);
break;
}
}
return;
}
void LookForVariable(double &variable, std::string variable_name)
{
for(unsigned i=0; i<this->input_text_.size(); i++)
{
auto words = this->Split(input_text_[i]);
if (variable_name == words[0])
{
variable = std::stod(words[1]);
break;
}
}
return;
}
void LookForVariable(std::string &variable, std::string variable_name)
{
for(unsigned i=0; i<this->input_text_.size(); i++)
{
auto words = this->Split(input_text_[i]);
if (variable_name == words[0])
{
variable = words[1];
break;
}
}
return;
}
void LookForVariable(std::vector<double> &variable, std::string variable_name)
{
for(unsigned i=0; i<this->input_text_.size(); i++)
{
auto words = this->Split(input_text_[i]);
if (variable_name == words[0])
{
variable.clear();
if(words[1].find(':') != std::string::npos && words.size() == 2)
{
auto bound = this->Split(words[1], ':');
assert(bound.size() == 3);
double x_start = std::stod(bound[0]);
double x_end = std::stod(bound[1]);
int N = std::stoi(bound[2]);
double dx = (x_end - x_start) / (N - 1);
variable.resize(N);
variable[0] = x_start;
for(unsigned i=1; i<N; i++)
{
variable[i] = variable[i-1] + dx;
}
}
else
{
for(unsigned i=1; i<words.size(); i++)
{
variable.push_back(std::stod(words[i]));
}
}
}
}
return;
}
void LookForVariable(std::vector<int> &variable, std::string variable_name)
{
for(unsigned i=0; i<this->input_text_.size(); i++)
{
auto words = this->Split(input_text_[i]);
if (variable_name == words[0])
{
variable.clear();
for(unsigned i=1; i<words.size(); i++)
{
variable.push_back(std::stoi(words[i]));
}
}
}
return;
}
void LookForVariable(std::vector<bool> &variable, std::string variable_name)
{
for(unsigned i=0; i<this->input_text_.size(); i++)
{
auto words = this->Split(input_text_[i]);
if (variable_name == words[0])
{
variable.clear();
for(unsigned i=1; i<words.size(); i++)
{
if(words[i] == "true" || words[i] == "TRUE" || words[i] == "1")
variable.push_back(true);
else if(words[i] == "false" || words[i] == "FALSE" || words[i] == "0")
variable.push_back(false);
else
variable.push_back(false);
}
}
}
return;
}
void LookForVariable(std::vector<std::string> &variable, std::string variable_name)
{
for(unsigned i=0; i<this->input_text_.size(); i++)
{
auto words = this->Split(input_text_[i]);
if (variable_name == words[0])
{
variable.clear();
for(unsigned i=1; i<words.size(); i++)
{
variable.push_back(words[i]);
}
}
}
return;
}
std::string input_file_;
std::vector<std::string> input_text_;
//USER_DEFINED_VARIABLES
};
#endif // PROTOTYPE_H
| 21.037594 | 85 | 0.547355 | [
"vector"
] |
a906573a8af0eaeffab61e8cac53928ec2133d68 | 3,634 | h | C | third-party/paxos/src/deptran/rcc/server.h | shenweihai1/rolis-eurosys2022 | 59b3fd58144496a9b13415e30b41617b34924323 | [
"MIT"
] | 1 | 2022-03-08T00:36:10.000Z | 2022-03-08T00:36:10.000Z | third-party/paxos/src/deptran/rcc/server.h | shenweihai1/rolis-eurosys2022 | 59b3fd58144496a9b13415e30b41617b34924323 | [
"MIT"
] | null | null | null | third-party/paxos/src/deptran/rcc/server.h | shenweihai1/rolis-eurosys2022 | 59b3fd58144496a9b13415e30b41617b34924323 | [
"MIT"
] | null | null | null |
#pragma once
#include "../__dep__.h"
#include "../scheduler.h"
#include "graph.h"
#include "dep_graph.h"
namespace janus {
class SimpleCommand;
class RccGraph;
class RccCommo;
class TxnInfo;
class RccServer : public TxLogServer, public RccGraph {
public:
static map<txnid_t, int32_t> __debug_xxx_s;
static std::recursive_mutex __debug_mutex_s;
static void __DebugCheckParentSetSize(txnid_t tid, int32_t sz);
AvgStat traversing_stat_{};
public:
set<shared_ptr<RccTx>> fridge_ = {};
std::recursive_mutex mtx_{};
std::time_t last_upgrade_time_{0};
map<parid_t, int32_t> epoch_replies_{};
bool in_upgrade_epoch_{false};
const int EPOCH_DURATION = 5;
RccServer();
virtual ~RccServer();
using RccGraph::Aggregate; // C++ has strange hiding rules
virtual map<txnid_t, shared_ptr<RccTx>> Aggregate(RccGraph &graph);
// override graph operations
unordered_map<txnid_t, shared_ptr<RccTx>> &vertex_index() override {
verify(!managing_memory_);
return reinterpret_cast<
std::unordered_map<txnid_t, shared_ptr<RccTx>> &>(dtxns_);
};
shared_ptr<RccTx> CreateV(txnid_t txn_id) override {
auto sp_tx = CreateTx(txn_id);
return dynamic_pointer_cast<RccTx>(sp_tx);
}
shared_ptr<RccTx> CreateV(RccTx &rhs) override {
auto dtxn = dynamic_pointer_cast<RccTx>(CreateTx(rhs.id()));
if (rhs.epoch_ > 0) {
dtxn->epoch_ = rhs.epoch_;
}
dtxn->partition_ = rhs.partition_;
dtxn->status_ = rhs.status_;
verify(dtxn->id() == rhs.tid_);
return dtxn;
}
shared_ptr<Tx> GetOrCreateTx(txnid_t tid, bool ro = false) override;
virtual void SetPartitionId(parid_t par_id) override {
TxLogServer::partition_id_ = par_id;
RccGraph::partition_id_ = par_id;
}
int OnDispatch(const vector<SimpleCommand> &cmd,
TxnOutput *output,
shared_ptr<RccGraph> graph);
int OnCommit(cmdid_t cmd_id,
rank_t rank,
const RccGraph &graph,
TxnOutput *output,
const function<void()> &callback);
virtual int OnInquire(epoch_t epoch,
txnid_t cmd_id,
shared_ptr<RccGraph> graph);
virtual bool HandleConflicts(Tx &dtxn,
innid_t inn_id,
vector<string> &conflicts) override {
verify(0);
};
void DestroyExecutor(txnid_t tid) override;
void InquireAboutIfNeeded(RccTx &dtxn);
void InquiredGraph(RccTx &dtxn, shared_ptr<RccGraph> graph);
void InquireAck(cmdid_t cmd_id, RccGraph &graph);
bool AllAncCmt(RccTx* v);
void WaitUntilAllPredecessorsAtLeastCommitting(RccTx* v);
void WaitUntilAllPredSccExecuted(const RccScc &);
bool FullyDispatched(const RccScc &scc, rank_t r=RANK_UNDEFINED);
bool IsExecuted(const RccScc &scc, rank_t r=RANK_UNDEFINED);
void Decide(const RccScc &);
bool HasICycle(const RccScc &scc);
bool HasAbortedAncestor(const RccScc &scc);
bool AllAncFns(const RccScc &);
void Execute(const RccScc &);
void Execute(RccTx &);
void Abort(const RccScc &);
virtual int OnCommit(txnid_t txn_id,
rank_t rank,
bool need_validation,
shared_ptr<RccGraph> sp_graph,
TxnOutput *output);
/**
*
* @return validation result
*/
int OnInquireValidation(txid_t tx_id);
void OnNotifyGlobalValidation(txid_t tx_id, int validation_result);
void __DebugExamineFridge();
RccTx &__DebugFindAnOngoingAncestor(RccTx &vertex);
void __DebugExamineGraphVerify(RccTx &v);
RccCommo *commo();
};
} // namespace janus
| 30.283333 | 70 | 0.673913 | [
"vector"
] |
a915f0398035f2232f86a75c2981c2f2b4c90bad | 1,035 | h | C | util/include/ygz/EurocReader.h | Gang96/ygz_TX2 | bfe0c0749cef4c883b0308410687494568576b93 | [
"MIT"
] | 244 | 2017-07-21T08:46:54.000Z | 2022-03-21T14:13:44.000Z | util/include/ygz/EurocReader.h | jyakaranda/StructSLAM | 7eb205489d7bde30ee74b08e72d01deaa42741fa | [
"MIT"
] | 9 | 2017-08-30T12:34:09.000Z | 2021-07-08T08:48:30.000Z | util/include/ygz/EurocReader.h | jyakaranda/StructSLAM | 7eb205489d7bde30ee74b08e72d01deaa42741fa | [
"MIT"
] | 124 | 2017-07-21T12:16:18.000Z | 2022-02-11T14:53:57.000Z | #ifndef YGZ_EUROC_READER_H
#define YGZ_EUROC_READER_H
#include "ygz/Settings.h"
#include "ygz/NumTypes.h"
#include "ygz/IMUData.h"
// 一些用于EuRoC数据集的IO函数
namespace ygz {
// Load the stereo image data
// 输入:左眼图像目录,右眼图像目录,时间戳文件
// 输出:排序后左眼图像文件路径、右眼图像文件路径、时间戳
bool LoadImages(const string &strPathLeft, const string &strPathRight, const string &strPathTimes,
vector<string> &vstrImageLeft, vector<string> &vstrImageRight, vector<double> &vTimeStamps);
// Load the IMU data
bool LoadImus(const string &strImuPath, VecIMU &vImus);
/**
* Load the ground truth trajectory
* @param [in] trajPath the path to trajectory, in euroc will be xxx/state_groundtruth_estimate0/data.csv
* @param [out] the loaded trajectory
* @return true if succeed
*/
typedef map<double, SE3d, std::less<double>, Eigen::aligned_allocator<SE3d>> TrajectoryType;
bool LoadGroundTruthTraj(const string &trajPath,
TrajectoryType &trajectory);
}
#endif
| 29.571429 | 112 | 0.689855 | [
"vector"
] |
a917c85bba3d6f0bc906b01d5135d9aecb50f39f | 6,023 | h | C | core/pubnub_ccore.h | budgetpreneur/c-core | 943b39284837ea5db65eb2445fdc27a60753b183 | [
"MIT"
] | 47 | 2015-07-09T14:14:32.000Z | 2022-03-03T21:47:16.000Z | core/pubnub_ccore.h | parasyte/c-core | 943b39284837ea5db65eb2445fdc27a60753b183 | [
"MIT"
] | 52 | 2015-11-03T16:59:24.000Z | 2022-03-09T15:52:37.000Z | core/pubnub_ccore.h | parasyte/c-core | 943b39284837ea5db65eb2445fdc27a60753b183 | [
"MIT"
] | 69 | 2015-08-19T11:32:27.000Z | 2022-03-31T16:20:13.000Z | /* -*- c-file-style:"stroustrup"; indent-tabs-mode: nil -*- */
#if !defined INC_PUBNUB_CCORE
#define INC_PUBNUB_CCORE
#include "pubnub_api_types.h"
#include <stdbool.h>
typedef struct pubnub_char_mem_block pubnub_chamebl_t;
/** @file pubnub_ccore.h
This has the functions for formating and parsing the
requests and responses for transactions other than
the publish and subscribe - which have a separate module;
*/
struct pbcc_context;
/** Parses the string received as a response for a time operation
(transaction). This checks if the response is valid, and, if it
is, enables getting it as the gotten message (like for
`subscribe`).
@param p The Pubnub C core context to parse the response "in"
@return 0: OK, -1: error (invalid response)
*/
enum pubnub_res pbcc_parse_time_response(struct pbcc_context* p);
/** Parses the string received as a response for a history v2
operation (transaction). This checks if the response is valid,
and, if it is, enables getting the gotten message, as a JSON
array, and the timestamps for the first and last of them.
@param p The Pubnub C core context to parse the response "in"
@return 0: OK, -1: error (invalid response)
*/
enum pubnub_res pbcc_parse_history_response(struct pbcc_context* p);
/** Parses the string received as a response for a presence query
operation (transaction). Presence query is done on several
user requests: "where-now", "here-now", etc.
This checks if the response is valid (a JSON object), and, if it
is, enables getting it, as a whole, in one pubnub_get().
@param p The Pubnub C core context to parse the response "in"
@return 0: OK, -1: error (invalid response)
*/
enum pubnub_res pbcc_parse_presence_response(struct pbcc_context* p);
/** Parses the string received as a response for a channel-registry
operation (transaction). It is done on several user requests
(add/remove channel (from/to) channel group, list (channels
in a) channel group, remove channel group).
This checks if the response is valid (a JSON object), and, if it
is, enables getting it, as a whole, in one pubnub_get().
@param p The Pubnub C core context to parse the response "in"
@return The result of the parsing, expressed as the "Pubnub
result" enum
*/
enum pubnub_res pbcc_parse_channel_registry_response(struct pbcc_context* p);
/** Prepares the Leave operation (transaction), mostly by
formatting the URI of the HTTP request.
*/
enum pubnub_res pbcc_leave_prep(struct pbcc_context* p,
const char* channel,
const char* channel_group);
/** Prepares the Time operation (transaction), mostly by
formatting the URI of the HTTP request.
*/
enum pubnub_res pbcc_time_prep(struct pbcc_context* p);
/** Prepares the History v2 operation (transaction), mostly by
formatting the URI of the HTTP request.
*/
enum pubnub_res pbcc_history_prep(struct pbcc_context* p,
const char* channel,
unsigned count,
bool include_token,
enum pubnub_tribool string_token,
enum pubnub_tribool reverse,
enum pubnub_tribool include_meta,
char const* start,
char const* end);
/** Prepares the Heartbeat operation (transaction), mostly by
formatting the URI of the HTTP request.
*/
enum pubnub_res pbcc_heartbeat_prep(struct pbcc_context* p,
const char* channel,
const char* channel_group);
/** Prepares the Here-now operation (transaction), mostly by
formatting the URI of the HTTP request.
*/
enum pubnub_res pbcc_here_now_prep(struct pbcc_context* p,
const char* channel,
const char* channel_group,
enum pubnub_tribool disable_uuids,
enum pubnub_tribool state);
/** Prepares the Where-now operation (transaction), mostly by
formatting the URI of the HTTP request.
*/
enum pubnub_res pbcc_where_now_prep(struct pbcc_context* p, const char* uuid);
/** Prepares the Set state operation (transaction), mostly by
formatting the URI of the HTTP request.
*/
enum pubnub_res pbcc_set_state_prep(struct pbcc_context* p,
char const* channel,
char const* channel_group,
const char* uuid,
char const* state);
/** Prepares the Get state operation (transaction), mostly by
formatting the URI of the HTTP request.
*/
enum pubnub_res pbcc_state_get_prep(struct pbcc_context* p,
char const* channel,
char const* channel_group,
const char* uuid);
/** Preparse the Remove channel group operation (transaction) , mostly by
formatting the URI of the HTTP request.
*/
enum pubnub_res pbcc_remove_channel_group_prep(struct pbcc_context* p,
char const* channel_group);
/** Preparse an operation (transaction) against the channel registry,
mostly by formatting the URI of the HTTP request.
*/
enum pubnub_res pbcc_channel_registry_prep(struct pbcc_context* p,
char const* channel_group,
char const* param,
char const* channel);
#endif /* !defined INC_PUBNUB_CCORE */
| 40.972789 | 78 | 0.604848 | [
"object"
] |
a91aef75555a253d0a8d5a965c275fa73b16b0eb | 378 | h | C | Meduza/Include/Platform/General/FileSystem/FileSystem.h | NWagter/Meduza | d1df99061381fa1c7665d09e275ddc0060a6ac8d | [
"MIT"
] | 6 | 2020-10-17T10:50:13.000Z | 2022-02-25T20:14:23.000Z | Meduza/Include/Platform/General/FileSystem/FileSystem.h | NWagter/Meduza | d1df99061381fa1c7665d09e275ddc0060a6ac8d | [
"MIT"
] | null | null | null | Meduza/Include/Platform/General/FileSystem/FileSystem.h | NWagter/Meduza | d1df99061381fa1c7665d09e275ddc0060a6ac8d | [
"MIT"
] | 1 | 2020-05-06T12:02:47.000Z | 2020-05-06T12:02:47.000Z | namespace Me
{
namespace Files
{
struct BrowseData
{
std::string m_path;
std::vector<std::string> m_folders;
std::vector<std::pair<std::string, std::string>> m_files;
};
class FileSystem
{
public:
static std::string ReadFile(std::string);
static std::string GetFileName(std::string);
static std::string GetFileExtention(std::string);
};
}
}
| 17.181818 | 60 | 0.666667 | [
"vector"
] |
a920f3570a96ced8bd3373ca1dd71ac0cb171f6c | 1,274 | h | C | Code/include/igl/matlab/validate_arg.h | FabianRepository/SinusProject | 48d68902ccd83f08c4d208ba8e0739a8a1252338 | [
"BSD-3-Clause"
] | null | null | null | Code/include/igl/matlab/validate_arg.h | FabianRepository/SinusProject | 48d68902ccd83f08c4d208ba8e0739a8a1252338 | [
"BSD-3-Clause"
] | null | null | null | Code/include/igl/matlab/validate_arg.h | FabianRepository/SinusProject | 48d68902ccd83f08c4d208ba8e0739a8a1252338 | [
"BSD-3-Clause"
] | null | null | null | // This file is part of libigl, a simple c++ geometry processing library.
//
// Copyright (C) 2015 Alec Jacobson <alecjacobson@gmail.com>
//
// This Source Code Form is subject to the terms of the Mozilla Public License
// v. 2.0. If a copy of the MPL was not distributed with this file, You can
// obtain one at http://mozilla.org/MPL/2.0/.
#ifndef IGL_VALIDATE_ARG_H
#define IGL_VALIDATE_ARG_H
#include "../igl_inline.h"
#include <mex.h>
namespace igl
{
namespace matlab
{
// Throw an error if arg i+1 is not a scalar
//
// Inputs:
// i index of current arguement
// nrhs total number of arguments
// prhs pointer to arguements array
// name name of current argument
IGL_INLINE void validate_arg_scalar(
const int i, const int nrhs, const mxArray * prhs[], const char * name);
IGL_INLINE void validate_arg_logical(
const int i, const int nrhs, const mxArray * prhs[], const char * name);
IGL_INLINE void validate_arg_char(
const int i, const int nrhs, const mxArray * prhs[], const char * name);
IGL_INLINE void validate_arg_double(
const int i, const int nrhs, const mxArray * prhs[], const char * name);
}
}
#ifndef IGL_STATIC_LIBRARY
# include "validate_arg.cpp"
#endif
#endif
| 34.432432 | 79 | 0.688383 | [
"geometry"
] |
a9232833ad881d4080e96a599a1321d5462ff1ce | 2,692 | h | C | Src/util_circbuff2.h | ziggurat29/BlackBoardVGA001 | d6f6c5b3f1ef86b8a20f52d81641848204914633 | [
"BSD-2-Clause"
] | null | null | null | Src/util_circbuff2.h | ziggurat29/BlackBoardVGA001 | d6f6c5b3f1ef86b8a20f52d81641848204914633 | [
"BSD-2-Clause"
] | null | null | null | Src/util_circbuff2.h | ziggurat29/BlackBoardVGA001 | d6f6c5b3f1ef86b8a20f52d81641848204914633 | [
"BSD-2-Clause"
] | null | null | null | //========================================================================
//utilities for defining circular buffers
//impl 2; the use of 'stuct' keeps data members together, and generates
//slightly smaller code.
//This is implemented as macros, so as to be useful in C (where we don't
//have templates as in C++).
//This implementation takes a somewhat more object-oriented approach, so
//that other implementations may be more generalized and factored (by being
//albe to have a 'this' pointer to queue).
//These implementations perform no locking, so you will need to do that
//in your own code at the appropriate time.
#ifndef __UTIL_CIRCBUFF2_H
#define __UTIL_CIRCBUFF2_H
#ifdef __cplusplus
extern "C" {
#endif
#include <stdint.h>
//==============================================================
//simple circular buffer
//This queue is realized with a fixed-size buffer known at compile-time.
//This defines the instance data for the circular buffer.
//'prefix' is a name prefix, so you can have several if you like.
//'type' is the data type; e.g. uint8_t
//'size' is the count of types in the queue. NOTE: MUST be power of 2!
/*
For example, the following:
CIRCBUF(MyQueue,uint8_t,128)
XXX
*/
/*
the implementation; a macro to make it template-esque for C.
*/
//the base type consists of indices, size, and optional debug members
typedef struct circbuff_t circbuff_t;
struct circbuff_t
{
volatile unsigned int _nIdxRead;
volatile unsigned int _nLength;
const unsigned int _nSize;
const unsigned int _nTypeSize;
#ifdef DEBUG
volatile unsigned int _nMaxLength;
#endif
};
//the derived type consists of the base type, with the buffer following
#define CIRCBUFTYPE(instance,type,size) \
typedef struct instance##_circbuff_t instance##_circbuff_t; \
struct instance##_circbuff_t \
{ \
volatile circbuff_t _base; \
volatile uint8_t _abyBuffer[size*sizeof(type)]; \
};
//the instance data is initialized with some critical size params
#define CIRCBUFINST(instance,type,size) \
instance##_circbuff_t instance = \
{ \
{ 0, 0, size, sizeof(type) } \
};
//you can declare the type and instance in one go, which is probably the usual case
#define CIRCBUF(instance,type,size) \
CIRCBUFTYPE(instance,type,size) \
CIRCBUFINST(instance,type,size)
void circbuff_init ( void* pvCirc );
unsigned int circbuff_capacity ( void* pvCirc );
unsigned int circbuff_count ( void* pvCirc );
int circbuff_empty ( void* pvCirc );
int circbuff_full ( void* pvCirc );
int circbuff_enqueue ( void* pvCirc, const void* val );
void circbuff_dequeue ( void* pvCirc, void* val );
#ifdef DEBUG
unsigned int circbuff_max ( void* pvCirc );
#endif
#ifdef __cplusplus
}
#endif
#endif
| 25.158879 | 83 | 0.711367 | [
"object"
] |
a92414e11bde55c53f95fcbff4156d9d587c6ab1 | 2,331 | h | C | aws-cpp-sdk-logs/include/aws/logs/model/MalformedQueryException.h | lintonv/aws-sdk-cpp | 15e19c265ffce19d2046b18aa1b7307fc5377e58 | [
"Apache-2.0"
] | 10 | 2021-07-01T14:03:34.000Z | 2022-03-22T09:40:20.000Z | aws-cpp-sdk-logs/include/aws/logs/model/MalformedQueryException.h | lintonv/aws-sdk-cpp | 15e19c265ffce19d2046b18aa1b7307fc5377e58 | [
"Apache-2.0"
] | 3 | 2021-04-21T07:20:21.000Z | 2021-06-15T10:06:27.000Z | aws-cpp-sdk-logs/include/aws/logs/model/MalformedQueryException.h | ravindra-wagh/aws-sdk-cpp | 7d5ff01b3c3b872f31ca98fb4ce868cd01e97696 | [
"Apache-2.0"
] | 1 | 2021-11-09T11:58:03.000Z | 2021-11-09T11:58:03.000Z | /**
* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
* SPDX-License-Identifier: Apache-2.0.
*/
#pragma once
#include <aws/logs/CloudWatchLogs_EXPORTS.h>
#include <aws/logs/model/QueryCompileError.h>
#include <utility>
namespace Aws
{
namespace Utils
{
namespace Json
{
class JsonValue;
class JsonView;
} // namespace Json
} // namespace Utils
namespace CloudWatchLogs
{
namespace Model
{
/**
* <p>The query string is not valid. Details about this error are displayed in a
* <code>QueryCompileError</code> object. For more information, see <a
* href="https://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_QueryCompileError.html">QueryCompileError</a>.</p>
* <p>For more information about valid query syntax, see <a
* href="https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/CWL_QuerySyntax.html">CloudWatch
* Logs Insights Query Syntax</a>.</p><p><h3>See Also:</h3> <a
* href="http://docs.aws.amazon.com/goto/WebAPI/logs-2014-03-28/MalformedQueryException">AWS
* API Reference</a></p>
*/
class AWS_CLOUDWATCHLOGS_API MalformedQueryException
{
public:
MalformedQueryException();
MalformedQueryException(Aws::Utils::Json::JsonView jsonValue);
MalformedQueryException& operator=(Aws::Utils::Json::JsonView jsonValue);
Aws::Utils::Json::JsonValue Jsonize() const;
inline const QueryCompileError& GetQueryCompileError() const{ return m_queryCompileError; }
inline bool QueryCompileErrorHasBeenSet() const { return m_queryCompileErrorHasBeenSet; }
inline void SetQueryCompileError(const QueryCompileError& value) { m_queryCompileErrorHasBeenSet = true; m_queryCompileError = value; }
inline void SetQueryCompileError(QueryCompileError&& value) { m_queryCompileErrorHasBeenSet = true; m_queryCompileError = std::move(value); }
inline MalformedQueryException& WithQueryCompileError(const QueryCompileError& value) { SetQueryCompileError(value); return *this;}
inline MalformedQueryException& WithQueryCompileError(QueryCompileError&& value) { SetQueryCompileError(std::move(value)); return *this;}
private:
QueryCompileError m_queryCompileError;
bool m_queryCompileErrorHasBeenSet;
};
} // namespace Model
} // namespace CloudWatchLogs
} // namespace Aws
| 32.375 | 145 | 0.746032 | [
"object",
"model"
] |
a92599131b0cf819095fc3512252cd98daf2e565 | 4,865 | h | C | ohm/LineKeysQuery.h | data61/ohm | 1bd81bb3e8a5a400d8af91e39464c640c47d56af | [
"Zlib"
] | 45 | 2020-06-09T23:26:47.000Z | 2022-03-16T12:16:33.000Z | ohm/LineKeysQuery.h | data61/ohm | 1bd81bb3e8a5a400d8af91e39464c640c47d56af | [
"Zlib"
] | 1 | 2022-01-10T05:50:36.000Z | 2022-01-24T02:50:01.000Z | ohm/LineKeysQuery.h | data61/ohm | 1bd81bb3e8a5a400d8af91e39464c640c47d56af | [
"Zlib"
] | 5 | 2021-02-25T15:08:46.000Z | 2022-03-30T13:08:03.000Z | // Copyright (c) 2017
// Commonwealth Scientific and Industrial Research Organisation (CSIRO)
// ABN 41 687 119 230
//
// Author: Kazys Stepanas
#ifndef OHM_LINEKEYSQUERY_H
#define OHM_LINEKEYSQUERY_H
#include "OhmConfig.h"
#include "Query.h"
#include "QueryFlag.h"
#include <glm/fwd.hpp>
namespace ohm
{
struct LineKeysQueryDetail;
/// This query calculates the voxels intersecting a batch of rays.
///
/// The results are similar to those of @c OccupancyMap::calculateSegmentKeys() (identical when using CPU),
/// but supports batched and GPU based calculation. The GPU calculation is generally only marginally faster
/// than CPU, but GPU supports @c executeAsync(), which is not supported for CPU.
///
/// In practice, this query isn't very useful as the GPU performance gains are minimal.
///
/// General usage is:
/// - Initialise the query object setting the map and the GPU flag if required.
/// - Call @c setRays() to define the ray start/end point pairs.
/// - Call @c execute() or @c executeAsync() followed by @c wait() (GPU only).
/// - Process results (see below).
///
/// The @c numberOfResults() will match the number of rays (@c pointCount given to @c setRays()) and for
/// each of these results, there is an entry in @c resultIndices() and @c resultCounts(). The indices indicate the
/// offsets into @c intersectedVoxels() for each ray in the same order they appear in @c setRays(). The
/// counts identify how many voxels are present for the current line counting from the associated index.
/// There should always be at least one voxel per ray for the start/end voxel. More generally, the first voxel
/// is the ray start voxel and the last voxel is the ray end voxel.
class ohm_API LineKeysQuery : public Query
{
protected:
/// Constructor used for inherited objects. This supports deriving @p LineKeysQueryDetail into
/// more specialised forms.
/// @param detail pimple style data structure. When null, a @c LineKeysQueryDetail is allocated by
/// this method.
explicit LineKeysQuery(LineKeysQueryDetail *detail);
public:
/// Construct a new query using the given parameters.
/// @param map The map to operate on. Only the voxel resolution and region sizes are used.
/// @param query_flags Flags controlling the query behaviour. See @c QueryFlag and @c LineKeysQuery::Flag.
explicit LineKeysQuery(ohm::OccupancyMap &map, unsigned query_flags = 0u);
/// Construct a new query using the given parameters.
/// @param query_flags Flags controlling the query behaviour. See @c QueryFlag and @c LineKeysQuery::Flag.
explicit LineKeysQuery(unsigned query_flags = 0);
/// Destructor.
~LineKeysQuery() override;
/// Set the ray point pairs to operate on. The @p rays elements must be in start/end point pairs.
/// @param rays Ray start/end point pairs.
/// @param point_count Number of elements in @p rays. Expected to be an even number; i.e., twice the number of rays.
void setRays(const glm::dvec3 *rays, size_t point_count);
/// Get the array of ray points set in the last call to @c setRays().
/// @return The last ray set assigned in @c setRays(). The number of elements is @c rayPointCount().
const glm::dvec3 *rays() const;
/// Return the number of point elements in @p rays(). The number of rays is half this as ray points are in
/// start/end point pairs.
/// @return The number of elements in @p rays().
size_t rayPointCount() const;
/// Get the array of result index offsets into @c intersectsVoxels().
///
/// This identifies the offsets for each ray into @c intersectedVoxels() where the results for that ray begin.
/// The corresponding number of voxels for each ray are accessible via @c resultCounts(). The number of elements
/// is set by the number of rays, accessible via @c numberOfResults().
///
/// Only value once execution completes.
///
/// @return Index offsets into @p intersectedVoxels() for each ray.
const size_t *resultIndices() const;
/// Get the array of result voxel counts in @c intersectedVoxels() for each ray.
///
/// This identifies the number of voxels for each ray in @c intersectedVoxels(). The corresponding offset for
/// each ray into @c intersectedVoxels() is available via @c resultIndices(). The number of elements
/// is set by the number of rays, accessible via @c numberOfResults().
///
/// Only value once execution completes.
///
/// @return Number of voxels intersected for each ray.
const size_t *resultCounts() const;
protected:
bool onExecute() override;
bool onExecuteAsync() override;
void onReset(bool hard_reset) override;
/// Access internal details.
/// @return Internal details.
LineKeysQueryDetail *imp();
/// Access internal details.
/// @return Internal details.
const LineKeysQueryDetail *imp() const;
};
} // namespace ohm
#endif // OHM_LINEKEYSQUERY_H
| 42.675439 | 118 | 0.72518 | [
"object"
] |
a92624136021621a8267fa2978fbb95522f8be03 | 454 | h | C | core/BytesOk.h | DronMDF/doors | 4e2069159c5edf1d55933480307c127b853255e0 | [
"MIT"
] | null | null | null | core/BytesOk.h | DronMDF/doors | 4e2069159c5edf1d55933480307c127b853255e0 | [
"MIT"
] | 205 | 2019-02-22T15:37:26.000Z | 2019-07-21T21:20:00.000Z | core/BytesOk.h | DronMDF/doors | 4e2069159c5edf1d55933480307c127b853255e0 | [
"MIT"
] | null | null | null | // Copyright (c) 2019 Andrey Valyaev <dron.valyaev@gmail.com>
//
// This software may be modified and distributed under the terms
// of the MIT license. See the LICENSE file for details.
#pragma once
#include <cstddef>
#include <cstdint>
#include <vector>
class BytesOk final {
public:
explicit BytesOk(const std::vector<uint8_t> &bytes);
BytesOk(const void *ptr, size_t size);
uint32_t opt() const;
private:
const std::vector<uint8_t> bytes;
};
| 22.7 | 64 | 0.73348 | [
"vector"
] |
a9270a6e510ea73fa91eb0c55ac20f50182546ec | 19,787 | h | C | torch/custom_class.h | Quentin-Anthony/pytorch | a71713e3a9c5c02f4f8f77dc3427e4aa49e5e71d | [
"Intel"
] | null | null | null | torch/custom_class.h | Quentin-Anthony/pytorch | a71713e3a9c5c02f4f8f77dc3427e4aa49e5e71d | [
"Intel"
] | null | null | null | torch/custom_class.h | Quentin-Anthony/pytorch | a71713e3a9c5c02f4f8f77dc3427e4aa49e5e71d | [
"Intel"
] | null | null | null | #pragma once
#include <ATen/core/builtin_function.h>
#include <ATen/core/function_schema.h>
#include <ATen/core/ivalue.h>
#include <ATen/core/jit_type.h>
#include <ATen/core/op_registration/infer_schema.h>
#include <ATen/core/stack.h>
#include <c10/util/C++17.h>
#include <c10/util/Metaprogramming.h>
#include <c10/util/TypeList.h>
#include <c10/util/TypeTraits.h>
#include <torch/custom_class_detail.h>
#include <torch/library.h>
#include <iostream>
#include <sstream>
namespace torch {
/// This function is used in conjunction with `class_::def()` to register
/// a constructor for a given C++ class type. For example,
/// `torch::init<int, std::string>()` would register a two-argument constructor
/// taking an `int` and a `std::string` as argument.
template <class... Types>
detail::types<void, Types...> init() {
return detail::types<void, Types...>{};
}
template <typename Func, typename... ParameterTypeList>
struct InitLambda {
Func f;
};
template <typename Func>
decltype(auto) init(Func&& f) {
using InitTraits = c10::guts::infer_function_traits_t<std::decay_t<Func>>;
using ParameterTypeList = typename InitTraits::parameter_types;
InitLambda<Func, ParameterTypeList> init{std::forward<Func>(f)};
return init;
}
/// Entry point for custom C++ class registration. To register a C++ class
/// in PyTorch, instantiate `torch::class_` with the desired class as the
/// template parameter. Typically, this instantiation should be done in
/// the initialization of a global variable, so that the class will be
/// made available on dynamic library loading without any additional API
/// calls needed. For example, to register a class named Foo, you might
/// create a global variable like so:
///
/// static auto register_foo = torch::class_<Foo>("myclasses", "Foo")
/// .def("myMethod", &Foo::myMethod)
/// .def("lambdaMethod", [](const c10::intrusive_ptr<Foo>& self) {
/// // Do something with `self`
/// });
///
/// In addition to registering the class, this registration also chains
/// `def()` calls to register methods. `myMethod()` is registered with
/// a pointer to the Foo class's `myMethod()` method. `lambdaMethod()`
/// is registered with a C++ lambda expression.
template <class CurClass>
class class_ : public ::torch::detail::class_base {
static_assert(
std::is_base_of<CustomClassHolder, CurClass>::value,
"torch::class_<T> requires T to inherit from CustomClassHolder");
public:
/// This constructor actually registers the class type.
/// String argument `namespaceName` is an identifier for the
/// namespace you would like this class to appear in.
/// String argument `className` is the name you would like to
/// see this class exposed as in Python and TorchScript. For example, if
/// you pass `foo` as the namespace name and `Bar` as the className, the
/// class will appear as `torch.classes.foo.Bar` in Python and TorchScript
explicit class_(
const std::string& namespaceName,
const std::string& className,
std::string doc_string = "")
: class_base(
namespaceName,
className,
std::move(doc_string),
typeid(c10::intrusive_ptr<CurClass>),
typeid(c10::tagged_capsule<CurClass>)) {}
/// def() can be used in conjunction with `torch::init()` to register
/// a constructor for a given C++ class type. For example, passing
/// `torch::init<int, std::string>()` would register a two-argument
/// constructor taking an `int` and a `std::string` as argument.
template <typename... Types>
class_& def(
torch::detail::types<void, Types...>,
std::string doc_string = "",
std::initializer_list<arg> default_args =
{}) { // Used in combination with
// torch::init<...>()
auto func = [](c10::tagged_capsule<CurClass> self, Types... args) {
auto classObj = c10::make_intrusive<CurClass>(args...);
auto object = self.ivalue.toObject();
object->setSlot(0, c10::IValue::make_capsule(std::move(classObj)));
};
defineMethod(
"__init__",
std::move(func),
std::move(doc_string),
std::move(default_args));
return *this;
}
// Used in combination with torch::init([]lambda(){......})
template <typename Func, typename... ParameterTypes>
class_& def(
InitLambda<Func, c10::guts::typelist::typelist<ParameterTypes...>> init,
std::string doc_string = "",
std::initializer_list<arg> default_args = {}) {
auto init_lambda_wrapper = [func = std::move(init.f)](
c10::tagged_capsule<CurClass> self,
ParameterTypes... arg) {
c10::intrusive_ptr<CurClass> classObj =
at::guts::invoke(func, std::forward<ParameterTypes>(arg)...);
auto object = self.ivalue.toObject();
object->setSlot(0, c10::IValue::make_capsule(classObj));
};
defineMethod(
"__init__",
std::move(init_lambda_wrapper),
std::move(doc_string),
std::move(default_args));
return *this;
}
/// This is the normal method registration API. `name` is the name that
/// the method will be made accessible by in Python and TorchScript.
/// `f` is a callable object that defines the method. Typically `f`
/// will either be a pointer to a method on `CurClass`, or a lambda
/// expression that takes a `c10::intrusive_ptr<CurClass>` as the first
/// argument (emulating a `this` argument in a C++ method.)
///
/// Examples:
///
/// // Exposes method `foo` on C++ class `Foo` as `call_foo()` in
/// // Python and TorchScript
/// .def("call_foo", &Foo::foo)
///
/// // Exposes the given lambda expression as method `call_lambda()`
/// // in Python and TorchScript.
/// .def("call_lambda", [](const c10::intrusive_ptr<Foo>& self) {
/// // do something
/// })
template <typename Func>
class_& def(
std::string name,
Func f,
std::string doc_string = "",
std::initializer_list<arg> default_args = {}) {
auto wrapped_f = detail::wrap_func<CurClass, Func>(std::move(f));
defineMethod(
std::move(name),
std::move(wrapped_f),
std::move(doc_string),
std::move(default_args));
return *this;
}
/// Method registration API for static methods.
template <typename Func>
class_& def_static(std::string name, Func func, std::string doc_string = "") {
auto qualMethodName = qualClassName + "." + name;
auto schema =
c10::inferFunctionSchemaSingleReturn<Func>(std::move(name), "");
auto wrapped_func =
[func = std::move(func)](jit::Stack& stack) mutable -> void {
using RetType =
typename c10::guts::infer_function_traits_t<Func>::return_type;
detail::BoxedProxy<RetType, Func>()(stack, func);
};
auto method = std::make_unique<jit::BuiltinOpFunction>(
std::move(qualMethodName),
std::move(schema),
std::move(wrapped_func),
std::move(doc_string));
classTypePtr->addStaticMethod(method.get());
registerCustomClassMethod(std::move(method));
return *this;
}
/// Property registration API for properties with both getter and setter
/// functions.
template <typename GetterFunc, typename SetterFunc>
class_& def_property(
const std::string& name,
GetterFunc getter_func,
SetterFunc setter_func,
std::string doc_string = "") {
torch::jit::Function* getter;
torch::jit::Function* setter;
auto wrapped_getter =
detail::wrap_func<CurClass, GetterFunc>(std::move(getter_func));
getter = defineMethod(name + "_getter", wrapped_getter, doc_string);
auto wrapped_setter =
detail::wrap_func<CurClass, SetterFunc>(std::move(setter_func));
setter = defineMethod(name + "_setter", wrapped_setter, doc_string);
classTypePtr->addProperty(name, getter, setter);
return *this;
}
/// Property registration API for properties with only getter function.
template <typename GetterFunc>
class_& def_property(
const std::string& name,
GetterFunc getter_func,
std::string doc_string = "") {
torch::jit::Function* getter;
auto wrapped_getter =
detail::wrap_func<CurClass, GetterFunc>(std::move(getter_func));
getter = defineMethod(name + "_getter", wrapped_getter, doc_string);
classTypePtr->addProperty(name, getter, nullptr);
return *this;
}
/// Property registration API for properties with read-write access.
template <typename T>
class_& def_readwrite(const std::string& name, T CurClass::*field) {
auto getter_func = [field =
field](const c10::intrusive_ptr<CurClass>& self) {
return self.get()->*field;
};
auto setter_func = [field = field](
const c10::intrusive_ptr<CurClass>& self, T value) {
self.get()->*field = value;
};
return def_property(name, getter_func, setter_func);
}
/// Property registration API for properties with read-only access.
template <typename T>
class_& def_readonly(const std::string& name, T CurClass::*field) {
auto getter_func =
[field = std::move(field)](const c10::intrusive_ptr<CurClass>& self) {
return self.get()->*field;
};
return def_property(name, getter_func);
}
/// This is an unsafe method registration API added for adding custom JIT
/// backend support via custom C++ classes. It is not for general purpose use.
class_& _def_unboxed(
std::string name,
std::function<void(jit::Stack&)> func,
c10::FunctionSchema schema,
std::string doc_string = "") {
auto method = std::make_unique<jit::BuiltinOpFunction>(
qualClassName + "." + name,
std::move(schema),
std::move(func),
std::move(doc_string));
classTypePtr->addMethod(method.get());
registerCustomClassMethod(std::move(method));
return *this;
}
/// def_pickle() is used to define exactly what state gets serialized
/// or deserialized for a given instance of a custom C++ class in
/// Python or TorchScript. This protocol is equivalent to the Pickle
/// concept of `__getstate__` and `__setstate__` from Python
/// (https://docs.python.org/2/library/pickle.html#object.__getstate__)
///
/// Currently, both the `get_state` and `set_state` callables must be
/// C++ lambda expressions. They should have the following signatures,
/// where `CurClass` is the class you're registering and `T1` is some object
/// that encapsulates the state of the object.
///
/// __getstate__(intrusive_ptr<CurClass>) -> T1
/// __setstate__(T2) -> intrusive_ptr<CurClass>
///
/// `T1` must be an object that is convertable to IValue by the same rules
/// for custom op/method registration.
///
/// For the common case, T1 == T2. T1 can also be a subtype of T2. An
/// example where it makes sense for T1 and T2 to differ is if __setstate__
/// handles legacy formats in a backwards compatible way.
///
/// Example:
///
/// .def_pickle(
/// // __getstate__
/// [](const c10::intrusive_ptr<MyStackClass<std::string>>& self) {
/// return self->stack_;
/// },
/// [](std::vector<std::string> state) { // __setstate__
/// return c10::make_intrusive<MyStackClass<std::string>>(
/// std::vector<std::string>{"i", "was", "deserialized"});
/// })
template <typename GetStateFn, typename SetStateFn>
class_& def_pickle(GetStateFn&& get_state, SetStateFn&& set_state) {
static_assert(
c10::guts::is_stateless_lambda<std::decay_t<GetStateFn>>::value &&
c10::guts::is_stateless_lambda<std::decay_t<SetStateFn>>::value,
"def_pickle() currently only supports lambdas as "
"__getstate__ and __setstate__ arguments.");
def("__getstate__", std::forward<GetStateFn>(get_state));
// __setstate__ needs to be registered with some custom handling:
// We need to wrap the invocation of of the user-provided function
// such that we take the return value (i.e. c10::intrusive_ptr<CurrClass>)
// and assign it to the `capsule` attribute.
using SetStateTraits =
c10::guts::infer_function_traits_t<std::decay_t<SetStateFn>>;
using SetStateArg = typename c10::guts::typelist::head_t<
typename SetStateTraits::parameter_types>;
auto setstate_wrapper = [set_state = std::move(set_state)](
c10::tagged_capsule<CurClass> self,
SetStateArg&& arg) {
c10::intrusive_ptr<CurClass> classObj =
at::guts::invoke(set_state, std::forward<SetStateArg>(arg));
auto object = self.ivalue.toObject();
object->setSlot(0, c10::IValue::make_capsule(classObj));
};
defineMethod(
"__setstate__",
detail::wrap_func<CurClass, decltype(setstate_wrapper)>(
std::move(setstate_wrapper)));
// type validation
auto getstate_schema = classTypePtr->getMethod("__getstate__").getSchema();
auto format_getstate_schema = [&getstate_schema]() {
std::stringstream ss;
ss << getstate_schema;
return ss.str();
};
TORCH_CHECK(
getstate_schema.arguments().size() == 1,
"__getstate__ should take exactly one argument: self. Got: ",
format_getstate_schema());
auto first_arg_type = getstate_schema.arguments().at(0).type();
TORCH_CHECK(
*first_arg_type == *classTypePtr,
"self argument of __getstate__ must be the custom class type. Got ",
first_arg_type->repr_str());
TORCH_CHECK(
getstate_schema.returns().size() == 1,
"__getstate__ should return exactly one value for serialization. Got: ",
format_getstate_schema());
auto ser_type = getstate_schema.returns().at(0).type();
auto setstate_schema = classTypePtr->getMethod("__setstate__").getSchema();
auto arg_type = setstate_schema.arguments().at(1).type();
TORCH_CHECK(
ser_type->isSubtypeOf(*arg_type),
"__getstate__'s return type should be a subtype of "
"input argument of __setstate__. Got ",
ser_type->repr_str(),
" but expected ",
arg_type->repr_str());
return *this;
}
private:
template <typename Func>
torch::jit::Function* defineMethod(
std::string name,
Func func,
std::string doc_string = "",
std::initializer_list<arg> default_args = {}) {
auto qualMethodName = qualClassName + "." + name;
auto schema =
c10::inferFunctionSchemaSingleReturn<Func>(std::move(name), "");
// If default values are provided for function arguments, there must be
// none (no default values) or default values for all function
// arguments, except for self. This is because argument names are not
// extracted by inferFunctionSchemaSingleReturn, and so there must be a
// torch::arg instance in default_args even for arguments that do not
// have an actual default value provided.
TORCH_CHECK(
default_args.size() == 0 ||
default_args.size() == schema.arguments().size() - 1,
"Default values must be specified for none or all arguments");
// If there are default args, copy the argument names and default values to
// the function schema.
if (default_args.size() > 0) {
schema = withNewArguments(schema, default_args);
}
auto wrapped_func =
[func = std::move(func)](jit::Stack& stack) mutable -> void {
// TODO: we need to figure out how to profile calls to custom functions
// like this! Currently can't do it because the profiler stuff is in
// libtorch and not ATen
using RetType =
typename c10::guts::infer_function_traits_t<Func>::return_type;
detail::BoxedProxy<RetType, Func>()(stack, func);
};
auto method = std::make_unique<jit::BuiltinOpFunction>(
qualMethodName,
std::move(schema),
std::move(wrapped_func),
std::move(doc_string));
// Register the method here to keep the Method alive.
// ClassTypes do not hold ownership of their methods (normally it
// those are held by the CompilationUnit), so we need a proxy for
// that behavior here.
auto method_val = method.get();
classTypePtr->addMethod(method_val);
registerCustomClassMethod(std::move(method));
return method_val;
}
};
/// make_custom_class() is a convenient way to create an instance of a
/// registered custom class and wrap it in an IValue, for example when you want
/// to pass the object to TorchScript. Its syntax is equivalent to APIs like
/// `std::make_shared<>` or `c10::make_intrusive<>`.
///
/// For example, if you have a custom C++ class that can be constructed from an
/// `int` and `std::string`, you might use this API like so:
///
/// IValue custom_class_iv = torch::make_custom_class<MyClass>(3,
/// "foobarbaz");
template <typename CurClass, typename... CtorArgs>
c10::IValue make_custom_class(CtorArgs&&... args) {
auto userClassInstance =
c10::make_intrusive<CurClass>(std::forward<CtorArgs>(args)...);
return c10::IValue(std::move(userClassInstance));
}
// Alternative api for creating a torchbind class over torch::class_ this api is preffered to prevent size regressions
// on Edge usecases. Must be used in conjunction with TORCH_SELECTIVE_CLASS macro aka
// selective_class<foo>("foo_namespace", TORCH_SELECTIVE_CLASS("foo"))
template <class CurClass>
inline class_<CurClass> selective_class_(const std::string& namespace_name, detail::SelectiveStr<true> className) {
auto class_name = std::string(className.operator const char *());
return torch::class_<CurClass>(namespace_name, class_name);
}
template <class CurClass>
inline detail::ClassNotSelected selective_class_(const std::string&, detail::SelectiveStr<false>) {
return detail::ClassNotSelected();
}
// jit namespace for backward-compatibility
// We previously defined everything in torch::jit but moved it out to
// better reflect that these features are not limited only to TorchScript
namespace jit {
using ::torch::class_;
using ::torch::getCustomClass;
using ::torch::init;
using ::torch::isCustomClass;
} // namespace jit
template <class CurClass>
inline class_<CurClass> Library::class_(const std::string& className) {
TORCH_CHECK(
kind_ == DEF || kind_ == FRAGMENT,
"class_(\"",
className,
"\"): Cannot define a class inside of a TORCH_LIBRARY_IMPL block. "
"All class_()s should be placed in the (unique) TORCH_LIBRARY block for their namespace. "
"(Error occurred at ",
file_,
":",
line_,
")");
TORCH_INTERNAL_ASSERT(ns_.has_value(), file_, ":", line_);
return torch::class_<CurClass>(*ns_, className);
}
const std::unordered_set<std::string> getAllCustomClassesNames();
template <class CurClass>
inline class_<CurClass> Library::class_(detail::SelectiveStr<true> className) {
auto class_name = std::string(className.operator const char*());
TORCH_CHECK(
kind_ == DEF || kind_ == FRAGMENT,
"class_(\"",
class_name,
"\"): Cannot define a class inside of a TORCH_LIBRARY_IMPL block. "
"All class_()s should be placed in the (unique) TORCH_LIBRARY block for their namespace. "
"(Error occurred at ",
file_,
":",
line_,
")");
TORCH_INTERNAL_ASSERT(ns_.has_value(), file_, ":", line_);
return torch::class_<CurClass>(*ns_, class_name);
}
template <class CurClass>
inline detail::ClassNotSelected Library::class_(detail::SelectiveStr<false>) {
return detail::ClassNotSelected();
}
} // namespace torch
| 38.57115 | 118 | 0.661495 | [
"object",
"vector"
] |
a929275f9777777a6b5f9a1180fc2bd5751c57d8 | 7,282 | c | C | engine/SE/Plyfile/plyremoveface.c | Akalu/phy-engine | 6dfc7467675b7de6607cf05cd06923479811ad93 | [
"MIT"
] | 1 | 2021-07-19T04:37:24.000Z | 2021-07-19T04:37:24.000Z | engine/SE/Plyfile/plyremoveface.c | akalu/phy-engine | 6dfc7467675b7de6607cf05cd06923479811ad93 | [
"MIT"
] | null | null | null | engine/SE/Plyfile/plyremoveface.c | akalu/phy-engine | 6dfc7467675b7de6607cf05cd06923479811ad93 | [
"MIT"
] | null | null | null |
#include <stdio.h>
#include <math.h>
#include <strings.h>
#include <ply.h>
#define FALSE 0
#define TRUE 1
#define X 0
#define Y 1
#define Z 2
double LAmag,LAsum; int LAi,LAj,LAk;
#define VEC3_ZERO(a) { a[0]=a[1]=a[2]=0; }
#define VEC3_NEG(a,b) { a[0]= -b[0]; a[1]= -b[1];a[2]= -b[2];}
#define VEC3_V_OP_V(a,b,op,c) { a[0] = b[0] op c[0]; \
a[1] = b[1] op c[1]; \
a[2] = b[2] op c[2]; \
}
#define VEC3_ASN_OP(a,op,b) {a[0] op b[0]; a[1] op b[1]; a[2] op b[2];}
#define DOTPROD3(a, b) (a[0]*b[0] + a[1]*b[1] + a[2]*b[2])
#define CROSSPROD3(a,b,c) {a[0]=b[1]*c[2]-b[2]*c[1]; \
a[1]=b[2]*c[0]-b[0]*c[2]; \
a[2]=b[0]*c[1]-b[1]*c[0];}
#define NORMALIZE3(a) {LAmag=1./sqrt(a[0]*a[0]+a[1]*a[1]+a[2]*a[2]);\
a[0] *= LAmag; a[1] *= LAmag; a[2] *= LAmag;}
#define ZERO3_TOL(a, tol) { a[0] = ((a[0]<tol)&&(a[0]>-tol))?0.0:a[0];\
a[1] = ((a[1]<tol)&&(a[1]>-tol))?0.0:a[1];\
a[2] = ((a[2]<tol)&&(a[2]>-tol))?0.0:a[2];\
}
#define SQ_DIST3(a, b) ((a[0]-b[0])*(a[0]-b[0]) + \
(a[1]-b[1])*(a[1]-b[1]) + \
(a[2]-b[2])*(a[2]-b[2]))
#define FMAX(x,y) ((x)>(y) ? (x) : (y))
#define FMIN(x,y) ((x)<(y) ? (x) : (y))
#define FP_EQ_EPS( a, b, c ) ((((a) - (b)) <= (c)) && (((a) - (b)) >= -(c)))
typedef float Point[3];
typedef float Vector[3];
typedef struct Vertex {
int id;
unsigned char deleted;
void *other_props;
} Vertex;
typedef struct Face {
int id;
unsigned char nverts;
int *verts;
void *other_props;
unsigned char deleted;
} Face;
PlyProperty vert_props[1];
PlyProperty face_props[] = {
{"vertex_indices", PLY_INT, PLY_INT, offsetof(Face,verts),
1, PLY_UCHAR, PLY_UCHAR, offsetof(Face,nverts)},
};
static int nverts,nfaces;
static Vertex **vlist;
static Face **flist;
static PlyOtherElems *other_elements = NULL;
static PlyOtherProp *vert_other,*face_other,*edge_other;
static int nelems;
static char **element_list;
static int num_comments;
static char **comments;
static int num_obj_info;
static char **obj_info;
static int file_type;
int has_fverts;
char **deleted_faces;
int num_deleted_faces;
main(int argc, char *argv[])
{
get_options(argc, argv);
read_file();
delete_faces();
write_file();
}
get_options(int argc, char *argv[])
{
char *s;
char *progname;
int dummy;
progname = argv[0];
while (--argc > 0 && (*++argv)[0]=='-')
{
for (s = argv[0]+1; *s; s++)
switch (*s)
{
case 'f':
++argv;
deleted_faces = argv;
for (num_deleted_faces = 0;
((*argv) && (sscanf((*argv), "%d\n", &dummy) == 1));
num_deleted_faces++)
++argv;
--argv;
argc -= num_deleted_faces;
break;
default:
usage (progname);
exit (-1);
break;
}
}
}
usage(char *progname)
{
fprintf(stderr, "usage: %s [flags] <in.ply >out.ply\n", progname);
fprintf(stderr, " -- optional flags -- \n");
fprintf(stderr, " -a : average neighboring vertex curvatures\n");
}
delete_faces()
{
int i, j;
int id;
Face *face;
for (i=0; i<nfaces; i++)
flist[i]->deleted = FALSE;
for (i=0; i<num_deleted_faces; i++)
{
if ((sscanf(deleted_faces[i], "%d", &id) != 1) ||
(id < 0) || (id >= nfaces))
{
fprintf(stderr, "Invalid face id\n");
exit(-1);
}
flist[id]->deleted = TRUE;
}
for (i=0; i<nverts; i++)
vlist[i]->deleted = TRUE;
for (i=0; i<nfaces; i++)
{
face = flist[i];
if (face->deleted == TRUE)
continue;
for (j=0; j<face->nverts; j++)
vlist[face->verts[j]]->deleted = FALSE;
}
}
read_file()
{
int i,j,k;
PlyFile *ply;
int nprops;
int num_elems;
PlyProperty **plist;
char *elem_name;
float version;
ply = ply_read (stdin, &nelems, &element_list);
ply_get_info (ply, &version, &file_type);
for (i = 0; i < nelems; i++) {
elem_name = element_list[i];
plist = ply_get_element_description (ply, elem_name, &num_elems, &nprops);
if (equal_strings ("vertex", elem_name)) {
vlist = (Vertex **) malloc (sizeof (Vertex *) * num_elems);
nverts = num_elems;
vert_other = ply_get_other_properties (ply, elem_name,
offsetof(Vertex,other_props));
for (j = 0; j < num_elems; j++) {
vlist[j] = (Vertex *) malloc (sizeof (Vertex));
ply_get_element (ply, (void *) vlist[j]);
vlist[j]->id = j;
}
}
else if (equal_strings ("face", elem_name)) {
flist = (Face **) malloc (sizeof (Face *) * num_elems);
nfaces = num_elems;
has_fverts = FALSE;
for (j=0; j<nprops; j++)
{
if (equal_strings("vertex_indices", plist[j]->name))
{
ply_get_property (ply, elem_name, &face_props[0]);
has_fverts = TRUE;
}
}
face_other = ply_get_other_properties (ply, elem_name,
offsetof(Face,other_props));
if (!has_fverts)
{
fprintf(stderr, "Faces must have vertex indices\n");
exit(-1);
}
for (j = 0; j < num_elems; j++) {
flist[j] = (Face *) malloc (sizeof (Face));
ply_get_element (ply, (void *) flist[j]);
flist[j]->id = j;
}
}
else
other_elements = ply_get_other_element (ply, elem_name, num_elems);
}
comments = ply_get_comments (ply, &num_comments);
obj_info = ply_get_obj_info (ply, &num_obj_info);
ply_close (ply);
}
write_file()
{
int i,j,k;
PlyFile *ply;
int num_elems;
char *elem_name;
int new_nverts, new_nfaces;
Face *face;
Vertex *vert;
static char *known_elements[] = {"vertex", "face"};
ply = ply_write (stdout, 2, known_elements, file_type);
for (i=0, new_nverts=0; i<nverts; i++)
{
vert = vlist[i];
if (vert->deleted == TRUE)
continue;
vert->id = new_nverts++;
}
for (i=0, new_nfaces=0; i<nfaces; i++)
{
face = flist[i];
if (face->deleted == TRUE)
continue;
for (j=0; j<face->nverts; j++)
face->verts[j] = vlist[face->verts[j]]->id;
new_nfaces++;
}
ply_element_count (ply, "vertex", new_nverts);
ply_describe_other_properties (ply, vert_other, offsetof(Vertex,other_props));
ply_element_count (ply, "face", new_nfaces);
ply_describe_property (ply, "face", &face_props[0]);
ply_describe_other_properties (ply, face_other, offsetof(Face,other_props));
ply_describe_other_elements (ply, other_elements);
for (i = 0; i < num_comments; i++)
ply_put_comment (ply, comments[i]);
for (i = 0; i < num_obj_info; i++)
ply_put_obj_info (ply, obj_info[i]);
ply_header_complete (ply);
ply_put_element_setup (ply, "vertex");
for (i = 0; i < nverts; i++)
if (vlist[i]->deleted == FALSE)
ply_put_element (ply, (void *) vlist[i]);
ply_put_element_setup (ply, "face");
for (i = 0; i < nfaces; i++)
if (flist[i]->deleted == FALSE)
ply_put_element (ply, (void *) flist[i]);
ply_put_other_elements (ply);
ply_close (ply);
}
| 20.805714 | 80 | 0.54312 | [
"vector"
] |
a9332c0c22ee648437f65c1887ff339e9bd782a5 | 14,048 | h | C | OpenNN/opennn/conjugate_gradient.h | wagnrd/Pong | 0c50b22e0805b2ae19ff07d25859a6358217b377 | [
"MIT"
] | 4 | 2019-03-06T16:53:27.000Z | 2021-07-28T03:05:08.000Z | OpenNN/opennn/conjugate_gradient.h | wagnrd/Pong | 0c50b22e0805b2ae19ff07d25859a6358217b377 | [
"MIT"
] | 1 | 2021-02-11T17:10:29.000Z | 2021-02-11T20:54:03.000Z | OpenNN/opennn/conjugate_gradient.h | wagnrd/Pong | 0c50b22e0805b2ae19ff07d25859a6358217b377 | [
"MIT"
] | null | null | null | /****************************************************************************************************************/
/* */
/* OpenNN: Open Neural Networks Library */
/* www.opennn.net */
/* */
/* C O N J U G A T E G R A D I E N T C L A S S H E A D E R */
/* */
/* Artificial Intelligence Techniques SL */
/* artelnics@artelnics.com */
/* */
/****************************************************************************************************************/
#ifndef __CONJUGATEGRADIENT_H__
#define __CONJUGATEGRADIENT_H__
// System inlcludes
#include <string>
#include <sstream>
#include <iostream>
#include <fstream>
#include <algorithm>
#include <functional>
#include <climits>
#include <cmath>
#include <ctime>
// OpenNN includes
#include "loss_index.h"
#include "optimization_algorithm.h"
#include "learning_rate_algorithm.h"
// TinyXml includes
#include "tinyxml2.h"
namespace OpenNN
{
///
/// This concrete class represents a conjugate gradient training algorithm for a loss index of a neural network.
///
class ConjugateGradient : public OptimizationAlgorithm
{
public:
// ENUMERATIONS
/// Enumeration of the available training operators for obtaining the training direction.
enum TrainingDirectionMethod{PR, FR};
// DEFAULT CONSTRUCTOR
explicit ConjugateGradient();
// GENERAL CONSTRUCTOR
explicit ConjugateGradient(LossIndex*);
// XML CONSTRUCTOR
explicit ConjugateGradient(const tinyxml2::XMLDocument&);
// DESTRUCTOR
virtual ~ConjugateGradient();
// STRUCTURES
///
/// This structure contains the conjugate gradient results.
///
struct ConjugateGradientResults : public OptimizationAlgorithm::OptimizationAlgorithmResults
{
/// Default constructor.
ConjugateGradientResults()
{
conjugate_gradient_pointer = nullptr;
}
/// Conjugate gradient constructor.
ConjugateGradientResults(ConjugateGradient* new_conjugate_gradient_pointer)
{
conjugate_gradient_pointer = new_conjugate_gradient_pointer;
}
/// Destructor.
virtual ~ConjugateGradientResults()
{
}
/// Pointer to the conjugate gradient object for which the training results are to be stored.
ConjugateGradient* conjugate_gradient_pointer;
// TRAINING HISTORY
/// History of the neural network parameters over the training iterations.
Vector< Vector<double> > parameters_history;
/// History of the parameters norm over the training iterations.
Vector<double> parameters_norm_history;
/// History of the loss function loss over the training iterations.
Vector<double> loss_history;
/// History of the selection loss over the training iterations.
Vector<double> selection_error_history;
/// History of the loss function gradient over the training iterations.
Vector< Vector<double> > gradient_history;
/// History of the gradient norm over the training iterations.
Vector<double> gradient_norm_history;
/// History of the conjugate gradient training direction over the training iterations.
Vector< Vector<double> > training_direction_history;
/// History of the training rate over the training iterations.
Vector<double> training_rate_history;
/// History of the elapsed time over the training iterations.
Vector<double> elapsed_time_history;
// FINAL VALUES
/// Final neural network parameters vector.
Vector<double> final_parameters;
/// Final neural network parameters norm.
double final_parameters_norm;
/// Final loss function evaluation.
double final_loss;
/// Final selection error.
double final_selection_error;
/// Final loss function gradient.
Vector<double> final_gradient;
/// Final gradient norm.
double final_gradient_norm;
/// Final conjugate gradient training direction.
Vector<double> final_training_direction;
/// Final conjugate gradient training rate.
double final_training_rate;
/// Elapsed time of the training process.
double elapsed_time;
/// Maximum number of training iterations.
size_t iterations_number;
void resize_training_history(const size_t&);
string object_to_string() const;
Matrix<string> write_final_results(const int& precision = 3) const;
};
// METHODS
// Get methods
const LearningRateAlgorithm& get_learning_rate_algorithm() const;
LearningRateAlgorithm* get_learning_rate_algorithm_pointer();
// Training operators
const TrainingDirectionMethod& get_training_direction_method() const;
string write_training_direction_method() const;
// Training parameters
const double& get_warning_parameters_norm() const;
const double& get_warning_gradient_norm() const;
const double& get_warning_training_rate() const;
const double& get_error_parameters_norm() const;
const double& get_error_gradient_norm() const;
const double& get_error_training_rate() const;
// Stopping criteria
const double& get_minimum_parameters_increment_norm() const;
const double& get_minimum_loss_increase() const;
const double& get_loss_goal() const;
const size_t& get_maximum_selection_error_increases() const;
const double& get_gradient_norm_goal() const;
const size_t& get_maximum_epochs_number() const;
const double& get_maximum_time() const;
const bool& get_return_minimum_selection_error_neural_network() const;
const bool& get_apply_early_stopping() const;
// Reserve training history
const bool& get_reserve_parameters_history() const;
const bool& get_reserve_parameters_norm_history() const;
const bool& get_reserve_loss_history() const;
const bool& get_reserve_selection_error_history() const;
const bool& get_reserve_gradient_history() const;
const bool& get_reserve_gradient_norm_history() const;
const bool& get_reserve_training_direction_history() const;
const bool& get_reserve_training_rate_history() const;
const bool& get_reserve_elapsed_time_history() const;
// Set methods
void set_default();
void set_loss_index_pointer(LossIndex*);
// Training operators
void set_training_direction_method(const TrainingDirectionMethod&);
void set_training_direction_method(const string&);
// Training parameters
void set_warning_parameters_norm(const double&);
void set_warning_gradient_norm(const double&);
void set_warning_training_rate(const double&);
void set_error_parameters_norm(const double&);
void set_error_gradient_norm(const double&);
void set_error_training_rate(const double&);
// Stopping criteria
void set_minimum_parameters_increment_norm(const double&);
void set_loss_goal(const double&);
void set_minimum_loss_decrease(const double&);
void set_maximum_selection_error_increases(const size_t&);
void set_gradient_norm_goal(const double&);
void set_maximum_epochs_number(const size_t&);
void set_maximum_time(const double&);
void set_return_minimum_selection_error_neural_network(const bool&);
void set_apply_early_stopping(const bool&);
// Reserve training history
void set_reserve_parameters_history(const bool&);
void set_reserve_parameters_norm_history(const bool&);
void set_reserve_loss_history(const bool&);
void set_reserve_selection_error_history(const bool&);
void set_reserve_gradient_history(const bool&);
void set_reserve_gradient_norm_history(const bool&);
void set_reserve_training_direction_history(const bool&);
void set_reserve_training_rate_history(const bool&);
void set_reserve_elapsed_time_history(const bool&);
void set_reserve_all_training_history(const bool&);
// Utilities
void set_display_period(const size_t&);
void set_save_period(const size_t&);
// Training direction methods
double calculate_PR_parameter(const Vector<double>&, const Vector<double>&) const;
double calculate_FR_parameter(const Vector<double>&, const Vector<double>&) const;
Vector<double> calculate_PR_training_direction(const Vector<double>&, const Vector<double>&, const Vector<double>&) const;
Vector<double> calculate_FR_training_direction(const Vector<double>&, const Vector<double>&, const Vector<double>&) const;
Vector<double> calculate_gradient_descent_training_direction(const Vector<double>&) const;
Vector<double> calculate_training_direction(const Vector<double>&, const Vector<double>&, const Vector<double>&) const;
// Training methods
ConjugateGradientResults* perform_training();
void perform_training_void();
string write_optimization_algorithm_type() const;
// Serialization methods
Matrix<string> to_string_matrix() const;
tinyxml2::XMLDocument* to_XML() const;
void from_XML(const tinyxml2::XMLDocument&);
void write_XML(tinyxml2::XMLPrinter&) const;
//void read_XML( );
private:
/// Applied method for calculating the conjugate gradient direction.
TrainingDirectionMethod training_direction_method;
/// Training rate algorithm object for one-dimensional minimization.
LearningRateAlgorithm learning_rate_algorithm;
/// Value for the parameters norm at which a warning message is written to the screen.
double warning_parameters_norm;
/// Value for the gradient norm at which a warning message is written to the screen.
double warning_gradient_norm;
/// Training rate value at wich a warning message is written to the screen.
double warning_training_rate;
/// Value for the parameters norm at which the training process is assumed to fail.
double error_parameters_norm;
/// Value for the gradient norm at which the training process is assumed to fail.
double error_gradient_norm;
/// Training rate at wich the line minimization algorithm is assumed to be unable to bracket a minimum.
double error_training_rate;
// STOPPING CRITERIA
/// Norm of the parameters increment vector at which training stops.
double minimum_parameters_increment_norm;
/// Minimum loss improvement between two successive iterations. It is used as a stopping criterion.
double minimum_loss_decrease;
/// Goal value for the loss. It is used as a stopping criterion.
double loss_goal;
/// Goal value for the norm of the error function gradient. It is used as a stopping criterion.
double gradient_norm_goal;
/// Maximum number of iterations at which the selection loss increases.
/// This is an early stopping method for improving selection.
size_t maximum_selection_error_decreases;
/// Maximum number of iterations to perform_training. It is used as a stopping criterion.
size_t maximum_epochs_number;
/// Maximum training time. It is used as a stopping criterion.
double maximum_time;
/// True if the final model will be the neural network with the minimum selection error, false otherwise.
bool return_minimum_selection_error_neural_network;
/// True if the selection loss decrease stopping criteria has to be taken in account, false otherwise.
bool apply_early_stopping;
// TRAINING HISTORY
/// True if the parameters history matrix is to be reserved, false otherwise.
bool reserve_parameters_history;
/// True if the parameters norm history vector is to be reserved, false otherwise.
bool reserve_parameters_norm_history;
/// True if the loss history vector is to be reserved, false otherwise.
bool reserve_loss_history;
/// True if the gradient history matrix is to be reserved, false otherwise.
bool reserve_gradient_history;
/// True if the gradient norm history vector is to be reserved, false otherwise.
bool reserve_gradient_norm_history;
/// True if the training direction history matrix is to be reserved, false otherwise.
bool reserve_training_direction_history;
/// True if the training rate history vector is to be reserved, false otherwise.
bool reserve_training_rate_history;
/// True if the elapsed time history vector is to be reserved, false otherwise.
bool reserve_elapsed_time_history;
/// True if the selection loss history vector is to be reserved, false otherwise.
bool reserve_selection_error_history;
};
}
#endif
// OpenNN: Open Neural Networks Library.
// Copyright(C) 2005-2018 Artificial Intelligence Techniques, SL.
//
// This library is free software; you can redistribute it and/or
// modify it under the terms of the GNU Lesser General Public
// License as published by the Free Software Foundation; either
// version 2.1 of the License, or any later version.
//
// This library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
// Lesser General Public License for more details.
// You should have received a copy of the GNU Lesser General Public
// License along with this library; if not, write to the Free Software
// Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
| 29.825902 | 125 | 0.687856 | [
"object",
"vector",
"model"
] |
a94c6d2bf53c8ecfa768d0922ffb3d44a0e1a057 | 5,123 | h | C | Samples/FacialAnimation/include/FacialAnimation.h | rjdgtn/OgreCPPBuilder | 1d3fa5874e54da9c19f4fe9fd128fcda19285b5e | [
"MIT"
] | 108 | 2015-01-23T01:43:56.000Z | 2021-12-23T07:00:48.000Z | Samples/FacialAnimation/include/FacialAnimation.h | venscn/ogre | 069a43c4c4fcb5264c995fca65a28acd3154b230 | [
"MIT"
] | 2 | 2016-03-05T14:40:20.000Z | 2017-02-20T11:33:51.000Z | Samples/FacialAnimation/include/FacialAnimation.h | venscn/ogre | 069a43c4c4fcb5264c995fca65a28acd3154b230 | [
"MIT"
] | 92 | 2015-01-13T08:57:11.000Z | 2021-09-19T05:20:55.000Z | #ifndef __FacialAnimation_H__
#define __FacialAnimation_H__
#include "SdkSample.h"
using namespace Ogre;
using namespace OgreBites;
class _OgreSampleClassExport Sample_FacialAnimation : public SdkSample
{
public:
Sample_FacialAnimation():
mSpeakAnimState(0), mManualAnimState(0), mManualKeyFrame(0), mPlayAnimation(false)
{
mInfo["Title"] = "Facial Animation";
mInfo["Description"] = "A demonstration of the facial animation feature, using pose animation.";
mInfo["Thumbnail"] = "thumb_facial.png";
mInfo["Category"] = "Animation";
mInfo["Help"] = "Use the checkbox to enable/disable manual animation. "
"When manual animation is enabled, use the sliders to adjust each pose's influence.";
}
bool frameRenderingQueued(const FrameEvent& evt)
{
if(mPlayAnimation) mSpeakAnimState->addTime(evt.timeSinceLastFrame);
return SdkSample::frameRenderingQueued(evt); // don't forget the parent class updates!
}
protected:
void setupContent(void)
{
// setup some basic lighting for our scene
mSceneMgr->setAmbientLight(ColourValue(0.5, 0.5, 0.5));
mSceneMgr->createLight()->setPosition(40, 60, 50);
mSceneMgr->createLight()->setPosition(-120, -80, -50);
// pre-load the mesh so that we can tweak it with a manual animation
mHeadMesh = MeshManager::getSingleton().load("facial.mesh", ResourceGroupManager::DEFAULT_RESOURCE_GROUP_NAME);
// create a manual animation, create a pose track for it, and create a keyframe in that track
mManualKeyFrame = mHeadMesh->createAnimation("Manual", 0)->createVertexTrack(4, VAT_POSE)->createVertexPoseKeyFrame(0);
// create pose references for the first 15 poses
for (unsigned int i = 0; i < 15; i++) mManualKeyFrame->addPoseReference(i, 0);
// create a head entity from the mesh and attach it to a node with a vertical offset to center it
Entity* head = mSceneMgr->createEntity("Head", "facial.mesh");
mSceneMgr->getRootSceneNode()->createChildSceneNode(Vector3(0, -30, 0))->attachObject(head);
// get the animation states
mSpeakAnimState = head->getAnimationState("Speak");
mManualAnimState = head->getAnimationState("Manual");
// make the camera orbit around the head, and show the cursor
mCameraMan->setStyle(CS_ORBIT);
mCameraMan->setYawPitchDist(Radian(0), Radian(0), 130);
mTrayMgr->showCursor();
mPlayAnimation = true; // by default, the speaking animation is enabled
setupControls();
}
void setupControls()
{
// make logo and frame stats a little more compact to make room for controls
mTrayMgr->showLogo(TL_BOTTOMLEFT);
mTrayMgr->toggleAdvancedFrameStats();
// create group labels for the different sliders
mExpressions.push_back(mTrayMgr->createLabel(TL_NONE, "ExpressionLabel", "Expressions"));
mMouthShapes.push_back(mTrayMgr->createLabel(TL_NONE, "MouthShapeLabel", "Mouth Shapes"));
// create sliders to adjust pose influence
for (unsigned int i = 0; i < mManualKeyFrame->getPoseReferences().size(); i++)
{
String sliderName = "Pose" + StringConverter::toString(i);
String poseName = mHeadMesh->getPose(i)->getName();
if (poseName.find("Expression") != std::string::npos)
mExpressions.push_back(mTrayMgr->createLongSlider(TL_NONE, sliderName, poseName.substr(11), 200, 80, 44, 0, 1, 11));
else mMouthShapes.push_back(mTrayMgr->createLongSlider(TL_NONE, sliderName, poseName.substr(0, 1), 160, 80, 44, 0, 1, 11));
}
// checkbox to switch between automatic animation and manual animation.
mTrayMgr->createCheckBox(TL_TOP, "Manual", "Manual Animation")->setChecked(!mPlayAnimation);
}
void cleanupContent()
{
mExpressions.clear();
mMouthShapes.clear();
mSpeakAnimState = 0;
mManualAnimState = 0;
mManualKeyFrame = 0;
mPlayAnimation = false;
MeshManager::getSingleton().unload(mHeadMesh->getHandle());
}
void checkBoxToggled(OgreBites::CheckBox * box)
{
mPlayAnimation = !box->isChecked();
// toggle animation states
mSpeakAnimState->setEnabled(mPlayAnimation);
mManualAnimState->setEnabled(!mPlayAnimation);
// toggle expression controls
for (unsigned int i = 0; i < mExpressions.size(); i++)
{
mTrayMgr->moveWidgetToTray(mExpressions[i], mPlayAnimation ? TL_NONE : TL_TOPLEFT);
if (mPlayAnimation) mExpressions[i]->hide();
else mExpressions[i]->show();
}
// toggle mouth shape controls
for (unsigned int i = 0; i < mMouthShapes.size(); i++)
{
mTrayMgr->moveWidgetToTray(mMouthShapes[i], mPlayAnimation ? TL_NONE : TL_TOPRIGHT);
if (mPlayAnimation) mMouthShapes[i]->hide();
else mMouthShapes[i]->show();
}
}
void sliderMoved(OgreBites::Slider * slider)
{
// update the pose reference controlled by this slider
mManualKeyFrame->updatePoseReference(StringConverter::parseInt(slider->getName().substr(4)), slider->getValue());
// dirty animation state since we're fudging this manually
mManualAnimState->getParent()->_notifyDirty();
}
MeshPtr mHeadMesh;
AnimationState* mSpeakAnimState;
AnimationState* mManualAnimState;
VertexPoseKeyFrame* mManualKeyFrame;
bool mPlayAnimation;
WidgetList mExpressions;
WidgetList mMouthShapes;
};
#endif
| 35.331034 | 126 | 0.734335 | [
"mesh",
"shape"
] |
a94d897f8d658c8e7fed1c514e6443f16cc546cd | 533,049 | c | C | train.c | tunesh/people_count_cython | 67bb853810c4bb0110e4a62914ea65e0f7dd7a14 | [
"MIT"
] | null | null | null | train.c | tunesh/people_count_cython | 67bb853810c4bb0110e4a62914ea65e0f7dd7a14 | [
"MIT"
] | null | null | null | train.c | tunesh/people_count_cython | 67bb853810c4bb0110e4a62914ea65e0f7dd7a14 | [
"MIT"
] | null | null | null | /* Generated by Cython 0.26.1 */
#define PY_SSIZE_T_CLEAN
#include "Python.h"
#ifndef Py_PYTHON_H
#error Python headers needed to compile C extensions, please install development version of Python.
#elif PY_VERSION_HEX < 0x02060000 || (0x03000000 <= PY_VERSION_HEX && PY_VERSION_HEX < 0x03020000)
#error Cython requires Python 2.6+ or Python 3.2+.
#else
#define CYTHON_ABI "0_26_1"
#include <stddef.h>
#ifndef offsetof
#define offsetof(type, member) ( (size_t) & ((type*)0) -> member )
#endif
#if !defined(WIN32) && !defined(MS_WINDOWS)
#ifndef __stdcall
#define __stdcall
#endif
#ifndef __cdecl
#define __cdecl
#endif
#ifndef __fastcall
#define __fastcall
#endif
#endif
#ifndef DL_IMPORT
#define DL_IMPORT(t) t
#endif
#ifndef DL_EXPORT
#define DL_EXPORT(t) t
#endif
#define __PYX_COMMA ,
#ifndef HAVE_LONG_LONG
#if PY_VERSION_HEX >= 0x03030000 || (PY_MAJOR_VERSION == 2 && PY_VERSION_HEX >= 0x02070000)
#define HAVE_LONG_LONG
#endif
#endif
#ifndef PY_LONG_LONG
#define PY_LONG_LONG LONG_LONG
#endif
#ifndef Py_HUGE_VAL
#define Py_HUGE_VAL HUGE_VAL
#endif
#ifdef PYPY_VERSION
#define CYTHON_COMPILING_IN_PYPY 1
#define CYTHON_COMPILING_IN_PYSTON 0
#define CYTHON_COMPILING_IN_CPYTHON 0
#undef CYTHON_USE_TYPE_SLOTS
#define CYTHON_USE_TYPE_SLOTS 0
#undef CYTHON_USE_PYTYPE_LOOKUP
#define CYTHON_USE_PYTYPE_LOOKUP 0
#undef CYTHON_USE_ASYNC_SLOTS
#define CYTHON_USE_ASYNC_SLOTS 0
#undef CYTHON_USE_PYLIST_INTERNALS
#define CYTHON_USE_PYLIST_INTERNALS 0
#undef CYTHON_USE_UNICODE_INTERNALS
#define CYTHON_USE_UNICODE_INTERNALS 0
#undef CYTHON_USE_UNICODE_WRITER
#define CYTHON_USE_UNICODE_WRITER 0
#undef CYTHON_USE_PYLONG_INTERNALS
#define CYTHON_USE_PYLONG_INTERNALS 0
#undef CYTHON_AVOID_BORROWED_REFS
#define CYTHON_AVOID_BORROWED_REFS 1
#undef CYTHON_ASSUME_SAFE_MACROS
#define CYTHON_ASSUME_SAFE_MACROS 0
#undef CYTHON_UNPACK_METHODS
#define CYTHON_UNPACK_METHODS 0
#undef CYTHON_FAST_THREAD_STATE
#define CYTHON_FAST_THREAD_STATE 0
#undef CYTHON_FAST_PYCALL
#define CYTHON_FAST_PYCALL 0
#elif defined(PYSTON_VERSION)
#define CYTHON_COMPILING_IN_PYPY 0
#define CYTHON_COMPILING_IN_PYSTON 1
#define CYTHON_COMPILING_IN_CPYTHON 0
#ifndef CYTHON_USE_TYPE_SLOTS
#define CYTHON_USE_TYPE_SLOTS 1
#endif
#undef CYTHON_USE_PYTYPE_LOOKUP
#define CYTHON_USE_PYTYPE_LOOKUP 0
#undef CYTHON_USE_ASYNC_SLOTS
#define CYTHON_USE_ASYNC_SLOTS 0
#undef CYTHON_USE_PYLIST_INTERNALS
#define CYTHON_USE_PYLIST_INTERNALS 0
#ifndef CYTHON_USE_UNICODE_INTERNALS
#define CYTHON_USE_UNICODE_INTERNALS 1
#endif
#undef CYTHON_USE_UNICODE_WRITER
#define CYTHON_USE_UNICODE_WRITER 0
#undef CYTHON_USE_PYLONG_INTERNALS
#define CYTHON_USE_PYLONG_INTERNALS 0
#ifndef CYTHON_AVOID_BORROWED_REFS
#define CYTHON_AVOID_BORROWED_REFS 0
#endif
#ifndef CYTHON_ASSUME_SAFE_MACROS
#define CYTHON_ASSUME_SAFE_MACROS 1
#endif
#ifndef CYTHON_UNPACK_METHODS
#define CYTHON_UNPACK_METHODS 1
#endif
#undef CYTHON_FAST_THREAD_STATE
#define CYTHON_FAST_THREAD_STATE 0
#undef CYTHON_FAST_PYCALL
#define CYTHON_FAST_PYCALL 0
#else
#define CYTHON_COMPILING_IN_PYPY 0
#define CYTHON_COMPILING_IN_PYSTON 0
#define CYTHON_COMPILING_IN_CPYTHON 1
#ifndef CYTHON_USE_TYPE_SLOTS
#define CYTHON_USE_TYPE_SLOTS 1
#endif
#if PY_VERSION_HEX < 0x02070000
#undef CYTHON_USE_PYTYPE_LOOKUP
#define CYTHON_USE_PYTYPE_LOOKUP 0
#elif !defined(CYTHON_USE_PYTYPE_LOOKUP)
#define CYTHON_USE_PYTYPE_LOOKUP 1
#endif
#if PY_MAJOR_VERSION < 3
#undef CYTHON_USE_ASYNC_SLOTS
#define CYTHON_USE_ASYNC_SLOTS 0
#elif !defined(CYTHON_USE_ASYNC_SLOTS)
#define CYTHON_USE_ASYNC_SLOTS 1
#endif
#if PY_VERSION_HEX < 0x02070000
#undef CYTHON_USE_PYLONG_INTERNALS
#define CYTHON_USE_PYLONG_INTERNALS 0
#elif !defined(CYTHON_USE_PYLONG_INTERNALS)
#define CYTHON_USE_PYLONG_INTERNALS 1
#endif
#ifndef CYTHON_USE_PYLIST_INTERNALS
#define CYTHON_USE_PYLIST_INTERNALS 1
#endif
#ifndef CYTHON_USE_UNICODE_INTERNALS
#define CYTHON_USE_UNICODE_INTERNALS 1
#endif
#if PY_VERSION_HEX < 0x030300F0
#undef CYTHON_USE_UNICODE_WRITER
#define CYTHON_USE_UNICODE_WRITER 0
#elif !defined(CYTHON_USE_UNICODE_WRITER)
#define CYTHON_USE_UNICODE_WRITER 1
#endif
#ifndef CYTHON_AVOID_BORROWED_REFS
#define CYTHON_AVOID_BORROWED_REFS 0
#endif
#ifndef CYTHON_ASSUME_SAFE_MACROS
#define CYTHON_ASSUME_SAFE_MACROS 1
#endif
#ifndef CYTHON_UNPACK_METHODS
#define CYTHON_UNPACK_METHODS 1
#endif
#ifndef CYTHON_FAST_THREAD_STATE
#define CYTHON_FAST_THREAD_STATE 1
#endif
#ifndef CYTHON_FAST_PYCALL
#define CYTHON_FAST_PYCALL 1
#endif
#endif
#if !defined(CYTHON_FAST_PYCCALL)
#define CYTHON_FAST_PYCCALL (CYTHON_FAST_PYCALL && PY_VERSION_HEX >= 0x030600B1)
#endif
#if CYTHON_USE_PYLONG_INTERNALS
#include "longintrepr.h"
#undef SHIFT
#undef BASE
#undef MASK
#endif
#if CYTHON_COMPILING_IN_PYPY && PY_VERSION_HEX < 0x02070600 && !defined(Py_OptimizeFlag)
#define Py_OptimizeFlag 0
#endif
#define __PYX_BUILD_PY_SSIZE_T "n"
#define CYTHON_FORMAT_SSIZE_T "z"
#if PY_MAJOR_VERSION < 3
#define __Pyx_BUILTIN_MODULE_NAME "__builtin__"
#define __Pyx_PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos)\
PyCode_New(a+k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos)
#define __Pyx_DefaultClassType PyClass_Type
#else
#define __Pyx_BUILTIN_MODULE_NAME "builtins"
#define __Pyx_PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos)\
PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos)
#define __Pyx_DefaultClassType PyType_Type
#endif
#ifndef Py_TPFLAGS_CHECKTYPES
#define Py_TPFLAGS_CHECKTYPES 0
#endif
#ifndef Py_TPFLAGS_HAVE_INDEX
#define Py_TPFLAGS_HAVE_INDEX 0
#endif
#ifndef Py_TPFLAGS_HAVE_NEWBUFFER
#define Py_TPFLAGS_HAVE_NEWBUFFER 0
#endif
#ifndef Py_TPFLAGS_HAVE_FINALIZE
#define Py_TPFLAGS_HAVE_FINALIZE 0
#endif
#if PY_VERSION_HEX < 0x030700A0 || !defined(METH_FASTCALL)
#ifndef METH_FASTCALL
#define METH_FASTCALL 0x80
#endif
typedef PyObject *(*__Pyx_PyCFunctionFast) (PyObject *self, PyObject **args, Py_ssize_t nargs);
typedef PyObject *(*__Pyx_PyCFunctionFastWithKeywords) (PyObject *self, PyObject **args,
Py_ssize_t nargs, PyObject *kwnames);
#else
#define __Pyx_PyCFunctionFast _PyCFunctionFast
#define __Pyx_PyCFunctionFastWithKeywords _PyCFunctionFastWithKeywords
#endif
#if CYTHON_FAST_PYCCALL
#define __Pyx_PyFastCFunction_Check(func)\
((PyCFunction_Check(func) && (METH_FASTCALL == (PyCFunction_GET_FLAGS(func) & ~(METH_CLASS | METH_STATIC | METH_COEXIST | METH_KEYWORDS)))))
#else
#define __Pyx_PyFastCFunction_Check(func) 0
#endif
#if PY_VERSION_HEX > 0x03030000 && defined(PyUnicode_KIND)
#define CYTHON_PEP393_ENABLED 1
#define __Pyx_PyUnicode_READY(op) (likely(PyUnicode_IS_READY(op)) ?\
0 : _PyUnicode_Ready((PyObject *)(op)))
#define __Pyx_PyUnicode_GET_LENGTH(u) PyUnicode_GET_LENGTH(u)
#define __Pyx_PyUnicode_READ_CHAR(u, i) PyUnicode_READ_CHAR(u, i)
#define __Pyx_PyUnicode_MAX_CHAR_VALUE(u) PyUnicode_MAX_CHAR_VALUE(u)
#define __Pyx_PyUnicode_KIND(u) PyUnicode_KIND(u)
#define __Pyx_PyUnicode_DATA(u) PyUnicode_DATA(u)
#define __Pyx_PyUnicode_READ(k, d, i) PyUnicode_READ(k, d, i)
#define __Pyx_PyUnicode_WRITE(k, d, i, ch) PyUnicode_WRITE(k, d, i, ch)
#define __Pyx_PyUnicode_IS_TRUE(u) (0 != (likely(PyUnicode_IS_READY(u)) ? PyUnicode_GET_LENGTH(u) : PyUnicode_GET_SIZE(u)))
#else
#define CYTHON_PEP393_ENABLED 0
#define PyUnicode_1BYTE_KIND 1
#define PyUnicode_2BYTE_KIND 2
#define PyUnicode_4BYTE_KIND 4
#define __Pyx_PyUnicode_READY(op) (0)
#define __Pyx_PyUnicode_GET_LENGTH(u) PyUnicode_GET_SIZE(u)
#define __Pyx_PyUnicode_READ_CHAR(u, i) ((Py_UCS4)(PyUnicode_AS_UNICODE(u)[i]))
#define __Pyx_PyUnicode_MAX_CHAR_VALUE(u) ((sizeof(Py_UNICODE) == 2) ? 65535 : 1114111)
#define __Pyx_PyUnicode_KIND(u) (sizeof(Py_UNICODE))
#define __Pyx_PyUnicode_DATA(u) ((void*)PyUnicode_AS_UNICODE(u))
#define __Pyx_PyUnicode_READ(k, d, i) ((void)(k), (Py_UCS4)(((Py_UNICODE*)d)[i]))
#define __Pyx_PyUnicode_WRITE(k, d, i, ch) (((void)(k)), ((Py_UNICODE*)d)[i] = ch)
#define __Pyx_PyUnicode_IS_TRUE(u) (0 != PyUnicode_GET_SIZE(u))
#endif
#if CYTHON_COMPILING_IN_PYPY
#define __Pyx_PyUnicode_Concat(a, b) PyNumber_Add(a, b)
#define __Pyx_PyUnicode_ConcatSafe(a, b) PyNumber_Add(a, b)
#else
#define __Pyx_PyUnicode_Concat(a, b) PyUnicode_Concat(a, b)
#define __Pyx_PyUnicode_ConcatSafe(a, b) ((unlikely((a) == Py_None) || unlikely((b) == Py_None)) ?\
PyNumber_Add(a, b) : __Pyx_PyUnicode_Concat(a, b))
#endif
#if CYTHON_COMPILING_IN_PYPY && !defined(PyUnicode_Contains)
#define PyUnicode_Contains(u, s) PySequence_Contains(u, s)
#endif
#if CYTHON_COMPILING_IN_PYPY && !defined(PyByteArray_Check)
#define PyByteArray_Check(obj) PyObject_TypeCheck(obj, &PyByteArray_Type)
#endif
#if CYTHON_COMPILING_IN_PYPY && !defined(PyObject_Format)
#define PyObject_Format(obj, fmt) PyObject_CallMethod(obj, "__format__", "O", fmt)
#endif
#if CYTHON_COMPILING_IN_PYPY && !defined(PyObject_Malloc)
#define PyObject_Malloc(s) PyMem_Malloc(s)
#define PyObject_Free(p) PyMem_Free(p)
#define PyObject_Realloc(p) PyMem_Realloc(p)
#endif
#if CYTHON_COMPILING_IN_PYSTON
#define __Pyx_PyCode_HasFreeVars(co) PyCode_HasFreeVars(co)
#define __Pyx_PyFrame_SetLineNumber(frame, lineno) PyFrame_SetLineNumber(frame, lineno)
#else
#define __Pyx_PyCode_HasFreeVars(co) (PyCode_GetNumFree(co) > 0)
#define __Pyx_PyFrame_SetLineNumber(frame, lineno) (frame)->f_lineno = (lineno)
#endif
#define __Pyx_PyString_FormatSafe(a, b) ((unlikely((a) == Py_None)) ? PyNumber_Remainder(a, b) : __Pyx_PyString_Format(a, b))
#define __Pyx_PyUnicode_FormatSafe(a, b) ((unlikely((a) == Py_None)) ? PyNumber_Remainder(a, b) : PyUnicode_Format(a, b))
#if PY_MAJOR_VERSION >= 3
#define __Pyx_PyString_Format(a, b) PyUnicode_Format(a, b)
#else
#define __Pyx_PyString_Format(a, b) PyString_Format(a, b)
#endif
#if PY_MAJOR_VERSION < 3 && !defined(PyObject_ASCII)
#define PyObject_ASCII(o) PyObject_Repr(o)
#endif
#if PY_MAJOR_VERSION >= 3
#define PyBaseString_Type PyUnicode_Type
#define PyStringObject PyUnicodeObject
#define PyString_Type PyUnicode_Type
#define PyString_Check PyUnicode_Check
#define PyString_CheckExact PyUnicode_CheckExact
#endif
#if PY_MAJOR_VERSION >= 3
#define __Pyx_PyBaseString_Check(obj) PyUnicode_Check(obj)
#define __Pyx_PyBaseString_CheckExact(obj) PyUnicode_CheckExact(obj)
#else
#define __Pyx_PyBaseString_Check(obj) (PyString_Check(obj) || PyUnicode_Check(obj))
#define __Pyx_PyBaseString_CheckExact(obj) (PyString_CheckExact(obj) || PyUnicode_CheckExact(obj))
#endif
#ifndef PySet_CheckExact
#define PySet_CheckExact(obj) (Py_TYPE(obj) == &PySet_Type)
#endif
#define __Pyx_TypeCheck(obj, type) PyObject_TypeCheck(obj, (PyTypeObject *)type)
#define __Pyx_PyException_Check(obj) __Pyx_TypeCheck(obj, PyExc_Exception)
#if PY_MAJOR_VERSION >= 3
#define PyIntObject PyLongObject
#define PyInt_Type PyLong_Type
#define PyInt_Check(op) PyLong_Check(op)
#define PyInt_CheckExact(op) PyLong_CheckExact(op)
#define PyInt_FromString PyLong_FromString
#define PyInt_FromUnicode PyLong_FromUnicode
#define PyInt_FromLong PyLong_FromLong
#define PyInt_FromSize_t PyLong_FromSize_t
#define PyInt_FromSsize_t PyLong_FromSsize_t
#define PyInt_AsLong PyLong_AsLong
#define PyInt_AS_LONG PyLong_AS_LONG
#define PyInt_AsSsize_t PyLong_AsSsize_t
#define PyInt_AsUnsignedLongMask PyLong_AsUnsignedLongMask
#define PyInt_AsUnsignedLongLongMask PyLong_AsUnsignedLongLongMask
#define PyNumber_Int PyNumber_Long
#endif
#if PY_MAJOR_VERSION >= 3
#define PyBoolObject PyLongObject
#endif
#if PY_MAJOR_VERSION >= 3 && CYTHON_COMPILING_IN_PYPY
#ifndef PyUnicode_InternFromString
#define PyUnicode_InternFromString(s) PyUnicode_FromString(s)
#endif
#endif
#if PY_VERSION_HEX < 0x030200A4
typedef long Py_hash_t;
#define __Pyx_PyInt_FromHash_t PyInt_FromLong
#define __Pyx_PyInt_AsHash_t PyInt_AsLong
#else
#define __Pyx_PyInt_FromHash_t PyInt_FromSsize_t
#define __Pyx_PyInt_AsHash_t PyInt_AsSsize_t
#endif
#if PY_MAJOR_VERSION >= 3
#define __Pyx_PyMethod_New(func, self, klass) ((self) ? PyMethod_New(func, self) : PyInstanceMethod_New(func))
#else
#define __Pyx_PyMethod_New(func, self, klass) PyMethod_New(func, self, klass)
#endif
#ifndef __has_attribute
#define __has_attribute(x) 0
#endif
#ifndef __has_cpp_attribute
#define __has_cpp_attribute(x) 0
#endif
#if CYTHON_USE_ASYNC_SLOTS
#if PY_VERSION_HEX >= 0x030500B1
#define __Pyx_PyAsyncMethodsStruct PyAsyncMethods
#define __Pyx_PyType_AsAsync(obj) (Py_TYPE(obj)->tp_as_async)
#else
typedef struct {
unaryfunc am_await;
unaryfunc am_aiter;
unaryfunc am_anext;
} __Pyx_PyAsyncMethodsStruct;
#define __Pyx_PyType_AsAsync(obj) ((__Pyx_PyAsyncMethodsStruct*) (Py_TYPE(obj)->tp_reserved))
#endif
#else
#define __Pyx_PyType_AsAsync(obj) NULL
#endif
#ifndef CYTHON_RESTRICT
#if defined(__GNUC__)
#define CYTHON_RESTRICT __restrict__
#elif defined(_MSC_VER) && _MSC_VER >= 1400
#define CYTHON_RESTRICT __restrict
#elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L
#define CYTHON_RESTRICT restrict
#else
#define CYTHON_RESTRICT
#endif
#endif
#ifndef CYTHON_UNUSED
# if defined(__GNUC__)
# if !(defined(__cplusplus)) || (__GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ >= 4))
# define CYTHON_UNUSED __attribute__ ((__unused__))
# else
# define CYTHON_UNUSED
# endif
# elif defined(__ICC) || (defined(__INTEL_COMPILER) && !defined(_MSC_VER))
# define CYTHON_UNUSED __attribute__ ((__unused__))
# else
# define CYTHON_UNUSED
# endif
#endif
#ifndef CYTHON_MAYBE_UNUSED_VAR
# if defined(__cplusplus)
template<class T> void CYTHON_MAYBE_UNUSED_VAR( const T& ) { }
# else
# define CYTHON_MAYBE_UNUSED_VAR(x) (void)(x)
# endif
#endif
#ifndef CYTHON_NCP_UNUSED
# if CYTHON_COMPILING_IN_CPYTHON
# define CYTHON_NCP_UNUSED
# else
# define CYTHON_NCP_UNUSED CYTHON_UNUSED
# endif
#endif
#define __Pyx_void_to_None(void_result) ((void)(void_result), Py_INCREF(Py_None), Py_None)
#ifdef _MSC_VER
#ifndef _MSC_STDINT_H_
#if _MSC_VER < 1300
typedef unsigned char uint8_t;
typedef unsigned int uint32_t;
#else
typedef unsigned __int8 uint8_t;
typedef unsigned __int32 uint32_t;
#endif
#endif
#else
#include <stdint.h>
#endif
#ifndef CYTHON_FALLTHROUGH
#ifdef __cplusplus
#if __has_cpp_attribute(fallthrough)
#define CYTHON_FALLTHROUGH [[fallthrough]]
#elif __has_cpp_attribute(clang::fallthrough)
#define CYTHON_FALLTHROUGH [[clang::fallthrough]]
#endif
#endif
#ifndef CYTHON_FALLTHROUGH
#if __has_attribute(fallthrough) || (defined(__GNUC__) && defined(__attribute__))
#define CYTHON_FALLTHROUGH __attribute__((fallthrough))
#else
#define CYTHON_FALLTHROUGH
#endif
#endif
#endif
#ifndef CYTHON_INLINE
#if defined(__clang__)
#define CYTHON_INLINE __inline__ __attribute__ ((__unused__))
#elif defined(__GNUC__)
#define CYTHON_INLINE __inline__
#elif defined(_MSC_VER)
#define CYTHON_INLINE __inline
#elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L
#define CYTHON_INLINE inline
#else
#define CYTHON_INLINE
#endif
#endif
#if defined(WIN32) || defined(MS_WINDOWS)
#define _USE_MATH_DEFINES
#endif
#include <math.h>
#ifdef NAN
#define __PYX_NAN() ((float) NAN)
#else
static CYTHON_INLINE float __PYX_NAN() {
float value;
memset(&value, 0xFF, sizeof(value));
return value;
}
#endif
#if defined(__CYGWIN__) && defined(_LDBL_EQ_DBL)
#define __Pyx_truncl trunc
#else
#define __Pyx_truncl truncl
#endif
#define __PYX_ERR(f_index, lineno, Ln_error) \
{ \
__pyx_filename = __pyx_f[f_index]; __pyx_lineno = lineno; __pyx_clineno = __LINE__; goto Ln_error; \
}
#if PY_MAJOR_VERSION >= 3
#define __Pyx_PyNumber_Divide(x,y) PyNumber_TrueDivide(x,y)
#define __Pyx_PyNumber_InPlaceDivide(x,y) PyNumber_InPlaceTrueDivide(x,y)
#else
#define __Pyx_PyNumber_Divide(x,y) PyNumber_Divide(x,y)
#define __Pyx_PyNumber_InPlaceDivide(x,y) PyNumber_InPlaceDivide(x,y)
#endif
#ifndef __PYX_EXTERN_C
#ifdef __cplusplus
#define __PYX_EXTERN_C extern "C"
#else
#define __PYX_EXTERN_C extern
#endif
#endif
#define __PYX_HAVE__train
#define __PYX_HAVE_API__train
#ifdef _OPENMP
#include <omp.h>
#endif /* _OPENMP */
#ifdef PYREX_WITHOUT_ASSERTIONS
#define CYTHON_WITHOUT_ASSERTIONS
#endif
typedef struct {PyObject **p; const char *s; const Py_ssize_t n; const char* encoding;
const char is_unicode; const char is_str; const char intern; } __Pyx_StringTabEntry;
#define __PYX_DEFAULT_STRING_ENCODING_IS_ASCII 0
#define __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT 0
#define __PYX_DEFAULT_STRING_ENCODING ""
#define __Pyx_PyObject_FromString __Pyx_PyBytes_FromString
#define __Pyx_PyObject_FromStringAndSize __Pyx_PyBytes_FromStringAndSize
#define __Pyx_uchar_cast(c) ((unsigned char)c)
#define __Pyx_long_cast(x) ((long)x)
#define __Pyx_fits_Py_ssize_t(v, type, is_signed) (\
(sizeof(type) < sizeof(Py_ssize_t)) ||\
(sizeof(type) > sizeof(Py_ssize_t) &&\
likely(v < (type)PY_SSIZE_T_MAX ||\
v == (type)PY_SSIZE_T_MAX) &&\
(!is_signed || likely(v > (type)PY_SSIZE_T_MIN ||\
v == (type)PY_SSIZE_T_MIN))) ||\
(sizeof(type) == sizeof(Py_ssize_t) &&\
(is_signed || likely(v < (type)PY_SSIZE_T_MAX ||\
v == (type)PY_SSIZE_T_MAX))) )
#if defined (__cplusplus) && __cplusplus >= 201103L
#include <cstdlib>
#define __Pyx_sst_abs(value) std::abs(value)
#elif SIZEOF_INT >= SIZEOF_SIZE_T
#define __Pyx_sst_abs(value) abs(value)
#elif SIZEOF_LONG >= SIZEOF_SIZE_T
#define __Pyx_sst_abs(value) labs(value)
#elif defined (_MSC_VER) && defined (_M_X64)
#define __Pyx_sst_abs(value) _abs64(value)
#elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L
#define __Pyx_sst_abs(value) llabs(value)
#elif defined (__GNUC__)
#define __Pyx_sst_abs(value) __builtin_llabs(value)
#else
#define __Pyx_sst_abs(value) ((value<0) ? -value : value)
#endif
static CYTHON_INLINE const char* __Pyx_PyObject_AsString(PyObject*);
static CYTHON_INLINE const char* __Pyx_PyObject_AsStringAndSize(PyObject*, Py_ssize_t* length);
#define __Pyx_PyByteArray_FromString(s) PyByteArray_FromStringAndSize((const char*)s, strlen((const char*)s))
#define __Pyx_PyByteArray_FromStringAndSize(s, l) PyByteArray_FromStringAndSize((const char*)s, l)
#define __Pyx_PyBytes_FromString PyBytes_FromString
#define __Pyx_PyBytes_FromStringAndSize PyBytes_FromStringAndSize
static CYTHON_INLINE PyObject* __Pyx_PyUnicode_FromString(const char*);
#if PY_MAJOR_VERSION < 3
#define __Pyx_PyStr_FromString __Pyx_PyBytes_FromString
#define __Pyx_PyStr_FromStringAndSize __Pyx_PyBytes_FromStringAndSize
#else
#define __Pyx_PyStr_FromString __Pyx_PyUnicode_FromString
#define __Pyx_PyStr_FromStringAndSize __Pyx_PyUnicode_FromStringAndSize
#endif
#define __Pyx_PyObject_AsWritableString(s) ((char*) __Pyx_PyObject_AsString(s))
#define __Pyx_PyObject_AsWritableSString(s) ((signed char*) __Pyx_PyObject_AsString(s))
#define __Pyx_PyObject_AsWritableUString(s) ((unsigned char*) __Pyx_PyObject_AsString(s))
#define __Pyx_PyObject_AsSString(s) ((const signed char*) __Pyx_PyObject_AsString(s))
#define __Pyx_PyObject_AsUString(s) ((const unsigned char*) __Pyx_PyObject_AsString(s))
#define __Pyx_PyObject_FromCString(s) __Pyx_PyObject_FromString((const char*)s)
#define __Pyx_PyBytes_FromCString(s) __Pyx_PyBytes_FromString((const char*)s)
#define __Pyx_PyByteArray_FromCString(s) __Pyx_PyByteArray_FromString((const char*)s)
#define __Pyx_PyStr_FromCString(s) __Pyx_PyStr_FromString((const char*)s)
#define __Pyx_PyUnicode_FromCString(s) __Pyx_PyUnicode_FromString((const char*)s)
#if PY_MAJOR_VERSION < 3
static CYTHON_INLINE size_t __Pyx_Py_UNICODE_strlen(const Py_UNICODE *u)
{
const Py_UNICODE *u_end = u;
while (*u_end++) ;
return (size_t)(u_end - u - 1);
}
#else
#define __Pyx_Py_UNICODE_strlen Py_UNICODE_strlen
#endif
#define __Pyx_PyUnicode_FromUnicode(u) PyUnicode_FromUnicode(u, __Pyx_Py_UNICODE_strlen(u))
#define __Pyx_PyUnicode_FromUnicodeAndLength PyUnicode_FromUnicode
#define __Pyx_PyUnicode_AsUnicode PyUnicode_AsUnicode
#define __Pyx_NewRef(obj) (Py_INCREF(obj), obj)
#define __Pyx_Owned_Py_None(b) __Pyx_NewRef(Py_None)
#define __Pyx_PyBool_FromLong(b) ((b) ? __Pyx_NewRef(Py_True) : __Pyx_NewRef(Py_False))
static CYTHON_INLINE int __Pyx_PyObject_IsTrue(PyObject*);
static CYTHON_INLINE PyObject* __Pyx_PyNumber_IntOrLong(PyObject* x);
static CYTHON_INLINE Py_ssize_t __Pyx_PyIndex_AsSsize_t(PyObject*);
static CYTHON_INLINE PyObject * __Pyx_PyInt_FromSize_t(size_t);
#if CYTHON_ASSUME_SAFE_MACROS
#define __pyx_PyFloat_AsDouble(x) (PyFloat_CheckExact(x) ? PyFloat_AS_DOUBLE(x) : PyFloat_AsDouble(x))
#else
#define __pyx_PyFloat_AsDouble(x) PyFloat_AsDouble(x)
#endif
#define __pyx_PyFloat_AsFloat(x) ((float) __pyx_PyFloat_AsDouble(x))
#if PY_MAJOR_VERSION >= 3
#define __Pyx_PyNumber_Int(x) (PyLong_CheckExact(x) ? __Pyx_NewRef(x) : PyNumber_Long(x))
#else
#define __Pyx_PyNumber_Int(x) (PyInt_CheckExact(x) ? __Pyx_NewRef(x) : PyNumber_Int(x))
#endif
#define __Pyx_PyNumber_Float(x) (PyFloat_CheckExact(x) ? __Pyx_NewRef(x) : PyNumber_Float(x))
#if PY_MAJOR_VERSION < 3 && __PYX_DEFAULT_STRING_ENCODING_IS_ASCII
static int __Pyx_sys_getdefaultencoding_not_ascii;
static int __Pyx_init_sys_getdefaultencoding_params(void) {
PyObject* sys;
PyObject* default_encoding = NULL;
PyObject* ascii_chars_u = NULL;
PyObject* ascii_chars_b = NULL;
const char* default_encoding_c;
sys = PyImport_ImportModule("sys");
if (!sys) goto bad;
default_encoding = PyObject_CallMethod(sys, (char*) "getdefaultencoding", NULL);
Py_DECREF(sys);
if (!default_encoding) goto bad;
default_encoding_c = PyBytes_AsString(default_encoding);
if (!default_encoding_c) goto bad;
if (strcmp(default_encoding_c, "ascii") == 0) {
__Pyx_sys_getdefaultencoding_not_ascii = 0;
} else {
char ascii_chars[128];
int c;
for (c = 0; c < 128; c++) {
ascii_chars[c] = c;
}
__Pyx_sys_getdefaultencoding_not_ascii = 1;
ascii_chars_u = PyUnicode_DecodeASCII(ascii_chars, 128, NULL);
if (!ascii_chars_u) goto bad;
ascii_chars_b = PyUnicode_AsEncodedString(ascii_chars_u, default_encoding_c, NULL);
if (!ascii_chars_b || !PyBytes_Check(ascii_chars_b) || memcmp(ascii_chars, PyBytes_AS_STRING(ascii_chars_b), 128) != 0) {
PyErr_Format(
PyExc_ValueError,
"This module compiled with c_string_encoding=ascii, but default encoding '%.200s' is not a superset of ascii.",
default_encoding_c);
goto bad;
}
Py_DECREF(ascii_chars_u);
Py_DECREF(ascii_chars_b);
}
Py_DECREF(default_encoding);
return 0;
bad:
Py_XDECREF(default_encoding);
Py_XDECREF(ascii_chars_u);
Py_XDECREF(ascii_chars_b);
return -1;
}
#endif
#if __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT && PY_MAJOR_VERSION >= 3
#define __Pyx_PyUnicode_FromStringAndSize(c_str, size) PyUnicode_DecodeUTF8(c_str, size, NULL)
#else
#define __Pyx_PyUnicode_FromStringAndSize(c_str, size) PyUnicode_Decode(c_str, size, __PYX_DEFAULT_STRING_ENCODING, NULL)
#if __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT
static char* __PYX_DEFAULT_STRING_ENCODING;
static int __Pyx_init_sys_getdefaultencoding_params(void) {
PyObject* sys;
PyObject* default_encoding = NULL;
char* default_encoding_c;
sys = PyImport_ImportModule("sys");
if (!sys) goto bad;
default_encoding = PyObject_CallMethod(sys, (char*) (const char*) "getdefaultencoding", NULL);
Py_DECREF(sys);
if (!default_encoding) goto bad;
default_encoding_c = PyBytes_AsString(default_encoding);
if (!default_encoding_c) goto bad;
__PYX_DEFAULT_STRING_ENCODING = (char*) malloc(strlen(default_encoding_c));
if (!__PYX_DEFAULT_STRING_ENCODING) goto bad;
strcpy(__PYX_DEFAULT_STRING_ENCODING, default_encoding_c);
Py_DECREF(default_encoding);
return 0;
bad:
Py_XDECREF(default_encoding);
return -1;
}
#endif
#endif
/* Test for GCC > 2.95 */
#if defined(__GNUC__) && (__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95)))
#define likely(x) __builtin_expect(!!(x), 1)
#define unlikely(x) __builtin_expect(!!(x), 0)
#else /* !__GNUC__ or GCC < 2.95 */
#define likely(x) (x)
#define unlikely(x) (x)
#endif /* __GNUC__ */
static CYTHON_INLINE void __Pyx_pretend_to_initialize(void* ptr) { (void)ptr; }
static PyObject *__pyx_m;
static PyObject *__pyx_d;
static PyObject *__pyx_b;
static PyObject *__pyx_cython_runtime;
static PyObject *__pyx_empty_tuple;
static PyObject *__pyx_empty_bytes;
static PyObject *__pyx_empty_unicode;
static int __pyx_lineno;
static int __pyx_clineno = 0;
static const char * __pyx_cfilenm= __FILE__;
static const char *__pyx_filename;
static const char *__pyx_f[] = {
"train.py",
};
/*--- Type declarations ---*/
struct __pyx_obj_5train___pyx_scope_struct__data_generator;
/* "train.py":165
* return model
*
* def data_generator(annotation_lines, batch_size, input_shape, anchors, num_classes): # <<<<<<<<<<<<<<
* '''data generator for fit_generator'''
* n = len(annotation_lines)
*/
struct __pyx_obj_5train___pyx_scope_struct__data_generator {
PyObject_HEAD
PyObject *__pyx_v_anchors;
PyObject *__pyx_v_annotation_lines;
PyObject *__pyx_v_b;
PyObject *__pyx_v_batch_size;
PyObject *__pyx_v_box;
PyObject *__pyx_v_box_data;
PyObject *__pyx_v_i;
PyObject *__pyx_v_image;
PyObject *__pyx_v_image_data;
PyObject *__pyx_v_input_shape;
PyObject *__pyx_v_n;
PyObject *__pyx_v_num_classes;
PyObject *__pyx_v_y_true;
};
/* --- Runtime support code (head) --- */
/* Refnanny.proto */
#ifndef CYTHON_REFNANNY
#define CYTHON_REFNANNY 0
#endif
#if CYTHON_REFNANNY
typedef struct {
void (*INCREF)(void*, PyObject*, int);
void (*DECREF)(void*, PyObject*, int);
void (*GOTREF)(void*, PyObject*, int);
void (*GIVEREF)(void*, PyObject*, int);
void* (*SetupContext)(const char*, int, const char*);
void (*FinishContext)(void**);
} __Pyx_RefNannyAPIStruct;
static __Pyx_RefNannyAPIStruct *__Pyx_RefNanny = NULL;
static __Pyx_RefNannyAPIStruct *__Pyx_RefNannyImportAPI(const char *modname);
#define __Pyx_RefNannyDeclarations void *__pyx_refnanny = NULL;
#ifdef WITH_THREAD
#define __Pyx_RefNannySetupContext(name, acquire_gil)\
if (acquire_gil) {\
PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure();\
__pyx_refnanny = __Pyx_RefNanny->SetupContext((name), __LINE__, __FILE__);\
PyGILState_Release(__pyx_gilstate_save);\
} else {\
__pyx_refnanny = __Pyx_RefNanny->SetupContext((name), __LINE__, __FILE__);\
}
#else
#define __Pyx_RefNannySetupContext(name, acquire_gil)\
__pyx_refnanny = __Pyx_RefNanny->SetupContext((name), __LINE__, __FILE__)
#endif
#define __Pyx_RefNannyFinishContext()\
__Pyx_RefNanny->FinishContext(&__pyx_refnanny)
#define __Pyx_INCREF(r) __Pyx_RefNanny->INCREF(__pyx_refnanny, (PyObject *)(r), __LINE__)
#define __Pyx_DECREF(r) __Pyx_RefNanny->DECREF(__pyx_refnanny, (PyObject *)(r), __LINE__)
#define __Pyx_GOTREF(r) __Pyx_RefNanny->GOTREF(__pyx_refnanny, (PyObject *)(r), __LINE__)
#define __Pyx_GIVEREF(r) __Pyx_RefNanny->GIVEREF(__pyx_refnanny, (PyObject *)(r), __LINE__)
#define __Pyx_XINCREF(r) do { if((r) != NULL) {__Pyx_INCREF(r); }} while(0)
#define __Pyx_XDECREF(r) do { if((r) != NULL) {__Pyx_DECREF(r); }} while(0)
#define __Pyx_XGOTREF(r) do { if((r) != NULL) {__Pyx_GOTREF(r); }} while(0)
#define __Pyx_XGIVEREF(r) do { if((r) != NULL) {__Pyx_GIVEREF(r);}} while(0)
#else
#define __Pyx_RefNannyDeclarations
#define __Pyx_RefNannySetupContext(name, acquire_gil)
#define __Pyx_RefNannyFinishContext()
#define __Pyx_INCREF(r) Py_INCREF(r)
#define __Pyx_DECREF(r) Py_DECREF(r)
#define __Pyx_GOTREF(r)
#define __Pyx_GIVEREF(r)
#define __Pyx_XINCREF(r) Py_XINCREF(r)
#define __Pyx_XDECREF(r) Py_XDECREF(r)
#define __Pyx_XGOTREF(r)
#define __Pyx_XGIVEREF(r)
#endif
#define __Pyx_XDECREF_SET(r, v) do {\
PyObject *tmp = (PyObject *) r;\
r = v; __Pyx_XDECREF(tmp);\
} while (0)
#define __Pyx_DECREF_SET(r, v) do {\
PyObject *tmp = (PyObject *) r;\
r = v; __Pyx_DECREF(tmp);\
} while (0)
#define __Pyx_CLEAR(r) do { PyObject* tmp = ((PyObject*)(r)); r = NULL; __Pyx_DECREF(tmp);} while(0)
#define __Pyx_XCLEAR(r) do { if((r) != NULL) {PyObject* tmp = ((PyObject*)(r)); r = NULL; __Pyx_DECREF(tmp);}} while(0)
/* PyObjectGetAttrStr.proto */
#if CYTHON_USE_TYPE_SLOTS
static CYTHON_INLINE PyObject* __Pyx_PyObject_GetAttrStr(PyObject* obj, PyObject* attr_name) {
PyTypeObject* tp = Py_TYPE(obj);
if (likely(tp->tp_getattro))
return tp->tp_getattro(obj, attr_name);
#if PY_MAJOR_VERSION < 3
if (likely(tp->tp_getattr))
return tp->tp_getattr(obj, PyString_AS_STRING(attr_name));
#endif
return PyObject_GetAttr(obj, attr_name);
}
#else
#define __Pyx_PyObject_GetAttrStr(o,n) PyObject_GetAttr(o,n)
#endif
/* GetBuiltinName.proto */
static PyObject *__Pyx_GetBuiltinName(PyObject *name);
/* RaiseArgTupleInvalid.proto */
static void __Pyx_RaiseArgtupleInvalid(const char* func_name, int exact,
Py_ssize_t num_min, Py_ssize_t num_max, Py_ssize_t num_found);
/* RaiseDoubleKeywords.proto */
static void __Pyx_RaiseDoubleKeywordsError(const char* func_name, PyObject* kw_name);
/* ParseKeywords.proto */
static int __Pyx_ParseOptionalKeywords(PyObject *kwds, PyObject **argnames[],\
PyObject *kwds2, PyObject *values[], Py_ssize_t num_pos_args,\
const char* function_name);
/* GetModuleGlobalName.proto */
static CYTHON_INLINE PyObject *__Pyx_GetModuleGlobalName(PyObject *name);
/* PyCFunctionFastCall.proto */
#if CYTHON_FAST_PYCCALL
static CYTHON_INLINE PyObject *__Pyx_PyCFunction_FastCall(PyObject *func, PyObject **args, Py_ssize_t nargs);
#else
#define __Pyx_PyCFunction_FastCall(func, args, nargs) (assert(0), NULL)
#endif
/* PyFunctionFastCall.proto */
#if CYTHON_FAST_PYCALL
#define __Pyx_PyFunction_FastCall(func, args, nargs)\
__Pyx_PyFunction_FastCallDict((func), (args), (nargs), NULL)
#if 1 || PY_VERSION_HEX < 0x030600B1
static PyObject *__Pyx_PyFunction_FastCallDict(PyObject *func, PyObject **args, int nargs, PyObject *kwargs);
#else
#define __Pyx_PyFunction_FastCallDict(func, args, nargs, kwargs) _PyFunction_FastCallDict(func, args, nargs, kwargs)
#endif
#endif
/* PyObjectCall.proto */
#if CYTHON_COMPILING_IN_CPYTHON
static CYTHON_INLINE PyObject* __Pyx_PyObject_Call(PyObject *func, PyObject *arg, PyObject *kw);
#else
#define __Pyx_PyObject_Call(func, arg, kw) PyObject_Call(func, arg, kw)
#endif
/* PyObjectCallMethO.proto */
#if CYTHON_COMPILING_IN_CPYTHON
static CYTHON_INLINE PyObject* __Pyx_PyObject_CallMethO(PyObject *func, PyObject *arg);
#endif
/* PyObjectCallOneArg.proto */
static CYTHON_INLINE PyObject* __Pyx_PyObject_CallOneArg(PyObject *func, PyObject *arg);
/* PyObjectLookupSpecial.proto */
#if CYTHON_USE_PYTYPE_LOOKUP && CYTHON_USE_TYPE_SLOTS
static CYTHON_INLINE PyObject* __Pyx_PyObject_LookupSpecial(PyObject* obj, PyObject* attr_name) {
PyObject *res;
PyTypeObject *tp = Py_TYPE(obj);
#if PY_MAJOR_VERSION < 3
if (unlikely(PyInstance_Check(obj)))
return __Pyx_PyObject_GetAttrStr(obj, attr_name);
#endif
res = _PyType_Lookup(tp, attr_name);
if (likely(res)) {
descrgetfunc f = Py_TYPE(res)->tp_descr_get;
if (!f) {
Py_INCREF(res);
} else {
res = f(res, obj, (PyObject *)tp);
}
} else {
PyErr_SetObject(PyExc_AttributeError, attr_name);
}
return res;
}
#else
#define __Pyx_PyObject_LookupSpecial(o,n) __Pyx_PyObject_GetAttrStr(o,n)
#endif
/* PyObjectCallNoArg.proto */
#if CYTHON_COMPILING_IN_CPYTHON
static CYTHON_INLINE PyObject* __Pyx_PyObject_CallNoArg(PyObject *func);
#else
#define __Pyx_PyObject_CallNoArg(func) __Pyx_PyObject_Call(func, __pyx_empty_tuple, NULL)
#endif
/* PyThreadStateGet.proto */
#if CYTHON_FAST_THREAD_STATE
#define __Pyx_PyThreadState_declare PyThreadState *__pyx_tstate;
#define __Pyx_PyThreadState_assign __pyx_tstate = PyThreadState_GET();
#else
#define __Pyx_PyThreadState_declare
#define __Pyx_PyThreadState_assign
#endif
/* SaveResetException.proto */
#if CYTHON_FAST_THREAD_STATE
#define __Pyx_ExceptionSave(type, value, tb) __Pyx__ExceptionSave(__pyx_tstate, type, value, tb)
static CYTHON_INLINE void __Pyx__ExceptionSave(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb);
#define __Pyx_ExceptionReset(type, value, tb) __Pyx__ExceptionReset(__pyx_tstate, type, value, tb)
static CYTHON_INLINE void __Pyx__ExceptionReset(PyThreadState *tstate, PyObject *type, PyObject *value, PyObject *tb);
#else
#define __Pyx_ExceptionSave(type, value, tb) PyErr_GetExcInfo(type, value, tb)
#define __Pyx_ExceptionReset(type, value, tb) PyErr_SetExcInfo(type, value, tb)
#endif
/* GetException.proto */
#if CYTHON_FAST_THREAD_STATE
#define __Pyx_GetException(type, value, tb) __Pyx__GetException(__pyx_tstate, type, value, tb)
static int __Pyx__GetException(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb);
#else
static int __Pyx_GetException(PyObject **type, PyObject **value, PyObject **tb);
#endif
/* PyErrFetchRestore.proto */
#if CYTHON_FAST_THREAD_STATE
#define __Pyx_ErrRestoreWithState(type, value, tb) __Pyx_ErrRestoreInState(PyThreadState_GET(), type, value, tb)
#define __Pyx_ErrFetchWithState(type, value, tb) __Pyx_ErrFetchInState(PyThreadState_GET(), type, value, tb)
#define __Pyx_ErrRestore(type, value, tb) __Pyx_ErrRestoreInState(__pyx_tstate, type, value, tb)
#define __Pyx_ErrFetch(type, value, tb) __Pyx_ErrFetchInState(__pyx_tstate, type, value, tb)
static CYTHON_INLINE void __Pyx_ErrRestoreInState(PyThreadState *tstate, PyObject *type, PyObject *value, PyObject *tb);
static CYTHON_INLINE void __Pyx_ErrFetchInState(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb);
#else
#define __Pyx_ErrRestoreWithState(type, value, tb) PyErr_Restore(type, value, tb)
#define __Pyx_ErrFetchWithState(type, value, tb) PyErr_Fetch(type, value, tb)
#define __Pyx_ErrRestore(type, value, tb) PyErr_Restore(type, value, tb)
#define __Pyx_ErrFetch(type, value, tb) PyErr_Fetch(type, value, tb)
#endif
/* None.proto */
static CYTHON_INLINE void __Pyx_RaiseUnboundLocalError(const char *varname);
/* PyIntFromDouble.proto */
#if PY_MAJOR_VERSION < 3
static CYTHON_INLINE PyObject* __Pyx_PyInt_FromDouble(double value);
#else
#define __Pyx_PyInt_FromDouble(value) PyLong_FromDouble(value)
#endif
/* FetchCommonType.proto */
static PyTypeObject* __Pyx_FetchCommonType(PyTypeObject* type);
/* CythonFunction.proto */
#define __Pyx_CyFunction_USED 1
#include <structmember.h>
#define __Pyx_CYFUNCTION_STATICMETHOD 0x01
#define __Pyx_CYFUNCTION_CLASSMETHOD 0x02
#define __Pyx_CYFUNCTION_CCLASS 0x04
#define __Pyx_CyFunction_GetClosure(f)\
(((__pyx_CyFunctionObject *) (f))->func_closure)
#define __Pyx_CyFunction_GetClassObj(f)\
(((__pyx_CyFunctionObject *) (f))->func_classobj)
#define __Pyx_CyFunction_Defaults(type, f)\
((type *)(((__pyx_CyFunctionObject *) (f))->defaults))
#define __Pyx_CyFunction_SetDefaultsGetter(f, g)\
((__pyx_CyFunctionObject *) (f))->defaults_getter = (g)
typedef struct {
PyCFunctionObject func;
#if PY_VERSION_HEX < 0x030500A0
PyObject *func_weakreflist;
#endif
PyObject *func_dict;
PyObject *func_name;
PyObject *func_qualname;
PyObject *func_doc;
PyObject *func_globals;
PyObject *func_code;
PyObject *func_closure;
PyObject *func_classobj;
void *defaults;
int defaults_pyobjects;
int flags;
PyObject *defaults_tuple;
PyObject *defaults_kwdict;
PyObject *(*defaults_getter)(PyObject *);
PyObject *func_annotations;
} __pyx_CyFunctionObject;
static PyTypeObject *__pyx_CyFunctionType = 0;
#define __Pyx_CyFunction_NewEx(ml, flags, qualname, self, module, globals, code)\
__Pyx_CyFunction_New(__pyx_CyFunctionType, ml, flags, qualname, self, module, globals, code)
static PyObject *__Pyx_CyFunction_New(PyTypeObject *, PyMethodDef *ml,
int flags, PyObject* qualname,
PyObject *self,
PyObject *module, PyObject *globals,
PyObject* code);
static CYTHON_INLINE void *__Pyx_CyFunction_InitDefaults(PyObject *m,
size_t size,
int pyobjects);
static CYTHON_INLINE void __Pyx_CyFunction_SetDefaultsTuple(PyObject *m,
PyObject *tuple);
static CYTHON_INLINE void __Pyx_CyFunction_SetDefaultsKwDict(PyObject *m,
PyObject *dict);
static CYTHON_INLINE void __Pyx_CyFunction_SetAnnotationsDict(PyObject *m,
PyObject *dict);
static int __pyx_CyFunction_init(void);
/* SliceObject.proto */
static CYTHON_INLINE PyObject* __Pyx_PyObject_GetSlice(
PyObject* obj, Py_ssize_t cstart, Py_ssize_t cstop,
PyObject** py_start, PyObject** py_stop, PyObject** py_slice,
int has_cstart, int has_cstop, int wraparound);
/* GetItemInt.proto */
#define __Pyx_GetItemInt(o, i, type, is_signed, to_py_func, is_list, wraparound, boundscheck)\
(__Pyx_fits_Py_ssize_t(i, type, is_signed) ?\
__Pyx_GetItemInt_Fast(o, (Py_ssize_t)i, is_list, wraparound, boundscheck) :\
(is_list ? (PyErr_SetString(PyExc_IndexError, "list index out of range"), (PyObject*)NULL) :\
__Pyx_GetItemInt_Generic(o, to_py_func(i))))
#define __Pyx_GetItemInt_List(o, i, type, is_signed, to_py_func, is_list, wraparound, boundscheck)\
(__Pyx_fits_Py_ssize_t(i, type, is_signed) ?\
__Pyx_GetItemInt_List_Fast(o, (Py_ssize_t)i, wraparound, boundscheck) :\
(PyErr_SetString(PyExc_IndexError, "list index out of range"), (PyObject*)NULL))
static CYTHON_INLINE PyObject *__Pyx_GetItemInt_List_Fast(PyObject *o, Py_ssize_t i,
int wraparound, int boundscheck);
#define __Pyx_GetItemInt_Tuple(o, i, type, is_signed, to_py_func, is_list, wraparound, boundscheck)\
(__Pyx_fits_Py_ssize_t(i, type, is_signed) ?\
__Pyx_GetItemInt_Tuple_Fast(o, (Py_ssize_t)i, wraparound, boundscheck) :\
(PyErr_SetString(PyExc_IndexError, "tuple index out of range"), (PyObject*)NULL))
static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Tuple_Fast(PyObject *o, Py_ssize_t i,
int wraparound, int boundscheck);
static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Generic(PyObject *o, PyObject* j);
static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Fast(PyObject *o, Py_ssize_t i,
int is_list, int wraparound, int boundscheck);
/* PyObjectSetAttrStr.proto */
#if CYTHON_USE_TYPE_SLOTS
#define __Pyx_PyObject_DelAttrStr(o,n) __Pyx_PyObject_SetAttrStr(o,n,NULL)
static CYTHON_INLINE int __Pyx_PyObject_SetAttrStr(PyObject* obj, PyObject* attr_name, PyObject* value) {
PyTypeObject* tp = Py_TYPE(obj);
if (likely(tp->tp_setattro))
return tp->tp_setattro(obj, attr_name, value);
#if PY_MAJOR_VERSION < 3
if (likely(tp->tp_setattr))
return tp->tp_setattr(obj, PyString_AS_STRING(attr_name), value);
#endif
return PyObject_SetAttr(obj, attr_name, value);
}
#else
#define __Pyx_PyObject_DelAttrStr(o,n) PyObject_DelAttr(o,n)
#define __Pyx_PyObject_SetAttrStr(o,n,v) PyObject_SetAttr(o,n,v)
#endif
/* ListCompAppend.proto */
#if CYTHON_USE_PYLIST_INTERNALS && CYTHON_ASSUME_SAFE_MACROS
static CYTHON_INLINE int __Pyx_ListComp_Append(PyObject* list, PyObject* x) {
PyListObject* L = (PyListObject*) list;
Py_ssize_t len = Py_SIZE(list);
if (likely(L->allocated > len)) {
Py_INCREF(x);
PyList_SET_ITEM(list, len, x);
Py_SIZE(list) = len+1;
return 0;
}
return PyList_Append(list, x);
}
#else
#define __Pyx_ListComp_Append(L,x) PyList_Append(L,x)
#endif
/* RaiseTooManyValuesToUnpack.proto */
static CYTHON_INLINE void __Pyx_RaiseTooManyValuesError(Py_ssize_t expected);
/* RaiseNeedMoreValuesToUnpack.proto */
static CYTHON_INLINE void __Pyx_RaiseNeedMoreValuesError(Py_ssize_t index);
/* IterFinish.proto */
static CYTHON_INLINE int __Pyx_IterFinish(void);
/* UnpackItemEndCheck.proto */
static int __Pyx_IternextUnpackEndCheck(PyObject *retval, Py_ssize_t expected);
/* DictGetItem.proto */
#if PY_MAJOR_VERSION >= 3 && !CYTHON_COMPILING_IN_PYPY
static PyObject *__Pyx_PyDict_GetItem(PyObject *d, PyObject* key) {
PyObject *value;
value = PyDict_GetItemWithError(d, key);
if (unlikely(!value)) {
if (!PyErr_Occurred()) {
PyObject* args = PyTuple_Pack(1, key);
if (likely(args))
PyErr_SetObject(PyExc_KeyError, args);
Py_XDECREF(args);
}
return NULL;
}
Py_INCREF(value);
return value;
}
#else
#define __Pyx_PyDict_GetItem(d, key) PyObject_GetItem(d, key)
#endif
/* PyIntBinop.proto */
#if !CYTHON_COMPILING_IN_PYPY
static PyObject* __Pyx_PyInt_FloorDivideObjC(PyObject *op1, PyObject *op2, long intval, int inplace);
#else
#define __Pyx_PyInt_FloorDivideObjC(op1, op2, intval, inplace)\
(inplace ? PyNumber_InPlaceFloorDivide(op1, op2) : PyNumber_FloorDivide(op1, op2))
#endif
/* PyIntBinop.proto */
#if !CYTHON_COMPILING_IN_PYPY
static PyObject* __Pyx_PyInt_AddObjC(PyObject *op1, PyObject *op2, long intval, int inplace);
#else
#define __Pyx_PyInt_AddObjC(op1, op2, intval, inplace)\
(inplace ? PyNumber_InPlaceAdd(op1, op2) : PyNumber_Add(op1, op2))
#endif
/* PyIntBinop.proto */
#if !CYTHON_COMPILING_IN_PYPY
static PyObject* __Pyx_PyInt_EqObjC(PyObject *op1, PyObject *op2, long intval, int inplace);
#else
#define __Pyx_PyInt_EqObjC(op1, op2, intval, inplace)\
PyObject_RichCompare(op1, op2, Py_EQ)
#endif
/* PyIntBinop.proto */
#if !CYTHON_COMPILING_IN_PYPY
static PyObject* __Pyx_PyInt_SubtractObjC(PyObject *op1, PyObject *op2, long intval, int inplace);
#else
#define __Pyx_PyInt_SubtractObjC(op1, op2, intval, inplace)\
(inplace ? PyNumber_InPlaceSubtract(op1, op2) : PyNumber_Subtract(op1, op2))
#endif
/* ListExtend.proto */
static CYTHON_INLINE int __Pyx_PyList_Extend(PyObject* L, PyObject* v) {
#if CYTHON_COMPILING_IN_CPYTHON
PyObject* none = _PyList_Extend((PyListObject*)L, v);
if (unlikely(!none))
return -1;
Py_DECREF(none);
return 0;
#else
return PyList_SetSlice(L, PY_SSIZE_T_MAX, PY_SSIZE_T_MAX, v);
#endif
}
/* ListAppend.proto */
#if CYTHON_USE_PYLIST_INTERNALS && CYTHON_ASSUME_SAFE_MACROS
static CYTHON_INLINE int __Pyx_PyList_Append(PyObject* list, PyObject* x) {
PyListObject* L = (PyListObject*) list;
Py_ssize_t len = Py_SIZE(list);
if (likely(L->allocated > len) & likely(len > (L->allocated >> 1))) {
Py_INCREF(x);
PyList_SET_ITEM(list, len, x);
Py_SIZE(list) = len+1;
return 0;
}
return PyList_Append(list, x);
}
#else
#define __Pyx_PyList_Append(L,x) PyList_Append(L,x)
#endif
/* PyObjectCallMethod1.proto */
static PyObject* __Pyx_PyObject_CallMethod1(PyObject* obj, PyObject* method_name, PyObject* arg);
/* append.proto */
static CYTHON_INLINE int __Pyx_PyObject_Append(PyObject* L, PyObject* x);
/* IncludeStringH.proto */
#include <string.h>
/* Import.proto */
static PyObject *__Pyx_Import(PyObject *name, PyObject *from_list, int level);
/* ImportFrom.proto */
static PyObject* __Pyx_ImportFrom(PyObject* module, PyObject* name);
/* BytesEquals.proto */
static CYTHON_INLINE int __Pyx_PyBytes_Equals(PyObject* s1, PyObject* s2, int equals);
/* UnicodeEquals.proto */
static CYTHON_INLINE int __Pyx_PyUnicode_Equals(PyObject* s1, PyObject* s2, int equals);
/* StrEquals.proto */
#if PY_MAJOR_VERSION >= 3
#define __Pyx_PyString_Equals __Pyx_PyUnicode_Equals
#else
#define __Pyx_PyString_Equals __Pyx_PyBytes_Equals
#endif
/* CLineInTraceback.proto */
static int __Pyx_CLineForTraceback(int c_line);
/* CodeObjectCache.proto */
typedef struct {
PyCodeObject* code_object;
int code_line;
} __Pyx_CodeObjectCacheEntry;
struct __Pyx_CodeObjectCache {
int count;
int max_count;
__Pyx_CodeObjectCacheEntry* entries;
};
static struct __Pyx_CodeObjectCache __pyx_code_cache = {0,0,NULL};
static int __pyx_bisect_code_objects(__Pyx_CodeObjectCacheEntry* entries, int count, int code_line);
static PyCodeObject *__pyx_find_code_object(int code_line);
static void __pyx_insert_code_object(int code_line, PyCodeObject* code_object);
/* AddTraceback.proto */
static void __Pyx_AddTraceback(const char *funcname, int c_line,
int py_line, const char *filename);
/* Print.proto */
static int __Pyx_Print(PyObject*, PyObject *, int);
#if CYTHON_COMPILING_IN_PYPY || PY_MAJOR_VERSION >= 3
static PyObject* __pyx_print = 0;
static PyObject* __pyx_print_kwargs = 0;
#endif
/* CIntToPy.proto */
static CYTHON_INLINE PyObject* __Pyx_PyInt_From_long(long value);
/* PrintOne.proto */
static int __Pyx_PrintOne(PyObject* stream, PyObject *o);
/* CIntFromPy.proto */
static CYTHON_INLINE long __Pyx_PyInt_As_long(PyObject *);
/* CIntFromPy.proto */
static CYTHON_INLINE int __Pyx_PyInt_As_int(PyObject *);
/* RaiseException.proto */
static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb, PyObject *cause);
/* SwapException.proto */
#if CYTHON_FAST_THREAD_STATE
#define __Pyx_ExceptionSwap(type, value, tb) __Pyx__ExceptionSwap(__pyx_tstate, type, value, tb)
static CYTHON_INLINE void __Pyx__ExceptionSwap(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb);
#else
static CYTHON_INLINE void __Pyx_ExceptionSwap(PyObject **type, PyObject **value, PyObject **tb);
#endif
/* CoroutineBase.proto */
typedef PyObject *(*__pyx_coroutine_body_t)(PyObject *, PyObject *);
typedef struct {
PyObject_HEAD
__pyx_coroutine_body_t body;
PyObject *closure;
PyObject *exc_type;
PyObject *exc_value;
PyObject *exc_traceback;
PyObject *gi_weakreflist;
PyObject *classobj;
PyObject *yieldfrom;
PyObject *gi_name;
PyObject *gi_qualname;
PyObject *gi_modulename;
int resume_label;
char is_running;
} __pyx_CoroutineObject;
static __pyx_CoroutineObject *__Pyx__Coroutine_New(
PyTypeObject *type, __pyx_coroutine_body_t body, PyObject *closure,
PyObject *name, PyObject *qualname, PyObject *module_name);
static int __Pyx_Coroutine_clear(PyObject *self);
#if 1 || PY_VERSION_HEX < 0x030300B0
static int __Pyx_PyGen_FetchStopIterationValue(PyObject **pvalue);
#else
#define __Pyx_PyGen_FetchStopIterationValue(pvalue) PyGen_FetchStopIterationValue(pvalue)
#endif
/* PatchModuleWithCoroutine.proto */
static PyObject* __Pyx_Coroutine_patch_module(PyObject* module, const char* py_code);
/* PatchGeneratorABC.proto */
static int __Pyx_patch_abc(void);
/* Generator.proto */
#define __Pyx_Generator_USED
static PyTypeObject *__pyx_GeneratorType = 0;
#define __Pyx_Generator_CheckExact(obj) (Py_TYPE(obj) == __pyx_GeneratorType)
#define __Pyx_Generator_New(body, closure, name, qualname, module_name)\
__Pyx__Coroutine_New(__pyx_GeneratorType, body, closure, name, qualname, module_name)
static PyObject *__Pyx_Generator_Next(PyObject *self);
static int __pyx_Generator_init(void);
/* CheckBinaryVersion.proto */
static int __Pyx_check_binary_version(void);
/* InitStrings.proto */
static int __Pyx_InitStrings(__Pyx_StringTabEntry *t);
/* Module declarations from 'train' */
static PyTypeObject *__pyx_ptype_5train___pyx_scope_struct__data_generator = 0;
#define __Pyx_MODULE_NAME "train"
int __pyx_module_is_main_train = 0;
/* Implementation of 'train' */
static PyObject *__pyx_builtin_open;
static PyObject *__pyx_builtin_range;
static const char __pyx_k_K[] = "K";
static const char __pyx_k_b[] = "b";
static const char __pyx_k_c[] = "c";
static const char __pyx_k_f[] = "f";
static const char __pyx_k_h[] = "h";
static const char __pyx_k_i[] = "i";
static const char __pyx_k_l[] = "l";
static const char __pyx_k_n[] = "n";
static const char __pyx_k_w[] = "w";
static const char __pyx_k_x[] = "x";
static const char __pyx_k__7[] = ",";
static const char __pyx_k_lr[] = "lr";
static const char __pyx_k_np[] = "np";
static const char __pyx_k__14[] = "*";
static const char __pyx_k_box[] = "box";
static const char __pyx_k_end[] = "end";
static const char __pyx_k_num[] = "num";
static const char __pyx_k_Adam[] = "Adam";
static const char __pyx_k_args[] = "args";
static const char __pyx_k_exit[] = "__exit__";
static const char __pyx_k_file[] = "file";
static const char __pyx_k_loss[] = "loss";
static const char __pyx_k_main[] = "__main__";
static const char __pyx_k_name[] = "name";
static const char __pyx_k_open[] = "open";
static const char __pyx_k_seed[] = "seed";
static const char __pyx_k_send[] = "send";
static const char __pyx_k_test[] = "__test__";
static const char __pyx_k_Input[] = "Input";
static const char __pyx_k_Model[] = "Model";
static const char __pyx_k_array[] = "array";
static const char __pyx_k_close[] = "close";
static const char __pyx_k_enter[] = "__enter__";
static const char __pyx_k_image[] = "image";
static const char __pyx_k_input[] = "input";
static const char __pyx_k_lines[] = "lines";
static const char __pyx_k_model[] = "model";
static const char __pyx_k_numpy[] = "numpy";
static const char __pyx_k_print[] = "print";
static const char __pyx_k_range[] = "range";
static const char __pyx_k_shape[] = "shape";
static const char __pyx_k_split[] = "split";
static const char __pyx_k_strip[] = "strip";
static const char __pyx_k_throw[] = "throw";
static const char __pyx_k_train[] = "train";
static const char __pyx_k_zeros[] = "zeros";
static const char __pyx_k_Lambda[] = "Lambda";
static const char __pyx_k_append[] = "append";
static const char __pyx_k_epochs[] = "epochs";
static const char __pyx_k_factor[] = "factor";
static const char __pyx_k_format[] = "format";
static const char __pyx_k_import[] = "__import__";
static const char __pyx_k_layers[] = "layers";
static const char __pyx_k_main_2[] = "_main";
static const char __pyx_k_name_2[] = "__name__";
static const char __pyx_k_output[] = "output";
static const char __pyx_k_period[] = "period";
static const char __pyx_k_random[] = "random";
static const char __pyx_k_y_pred[] = "y_pred";
static const char __pyx_k_y_true[] = "y_true";
static const char __pyx_k_anchors[] = "anchors";
static const char __pyx_k_by_name[] = "by_name";
static const char __pyx_k_compile[] = "compile";
static const char __pyx_k_log_dir[] = "log_dir";
static const char __pyx_k_logging[] = "logging";
static const char __pyx_k_monitor[] = "monitor";
static const char __pyx_k_num_val[] = "num_val";
static const char __pyx_k_reshape[] = "reshape";
static const char __pyx_k_shuffle[] = "shuffle";
static const char __pyx_k_verbose[] = "verbose";
static const char __pyx_k_box_data[] = "box_data";
static const char __pyx_k_logs_000[] = "logs/000/";
static const char __pyx_k_patience[] = "patience";
static const char __pyx_k_readline[] = "readline";
static const char __pyx_k_train_py[] = "train.py";
static const char __pyx_k_val_loss[] = "val_loss";
static const char __pyx_k_arguments[] = "arguments";
static const char __pyx_k_callbacks[] = "callbacks";
static const char __pyx_k_min_delta[] = "min_delta";
static const char __pyx_k_num_train[] = "num_train";
static const char __pyx_k_optimizer[] = "optimizer";
static const char __pyx_k_readlines[] = "readlines";
static const char __pyx_k_reduce_lr[] = "reduce_lr";
static const char __pyx_k_trainable[] = "trainable";
static const char __pyx_k_val_split[] = "val_split";
static const char __pyx_k_yolo_body[] = "yolo_body";
static const char __pyx_k_yolo_loss[] = "yolo_loss";
static const char __pyx_k_batch_size[] = "batch_size";
static const char __pyx_k_checkpoint[] = "checkpoint";
static const char __pyx_k_image_data[] = "image_data";
static const char __pyx_k_model_body[] = "model_body";
static const char __pyx_k_model_loss[] = "model_loss";
static const char __pyx_k_TensorBoard[] = "TensorBoard";
static const char __pyx_k_class_names[] = "class_names";
static const char __pyx_k_freeze_body[] = "freeze_body";
static const char __pyx_k_get_anchors[] = "get_anchors";
static const char __pyx_k_get_classes[] = "get_classes";
static const char __pyx_k_image_input[] = "image_input";
static const char __pyx_k_input_shape[] = "input_shape";
static const char __pyx_k_num_anchors[] = "num_anchors";
static const char __pyx_k_num_classes[] = "num_classes";
static const char __pyx_k_yolo3_model[] = "yolo3.model";
static const char __pyx_k_yolo3_utils[] = "yolo3.utils";
static const char __pyx_k_Load_weights[] = "Load weights {}.";
static const char __pyx_k_anchors_path[] = "anchors_path";
static const char __pyx_k_classes_path[] = "classes_path";
static const char __pyx_k_create_model[] = "create_model";
static const char __pyx_k_keras_layers[] = "keras.layers";
static const char __pyx_k_keras_models[] = "keras.models";
static const char __pyx_k_load_weights[] = "load_weights";
static const char __pyx_k_output_shape[] = "output_shape";
static const char __pyx_k_save_weights[] = "save_weights";
static const char __pyx_k_weights_path[] = "weights_path";
static const char __pyx_k_EarlyStopping[] = "EarlyStopping";
static const char __pyx_k_clear_session[] = "clear_session";
static const char __pyx_k_fit_generator[] = "fit_generator";
static const char __pyx_k_ignore_thresh[] = "ignore_thresh";
static const char __pyx_k_initial_epoch[] = "initial_epoch";
static const char __pyx_k_keras_backend[] = "keras.backend";
static const char __pyx_k_skip_mismatch[] = "skip_mismatch";
static const char __pyx_k_data_generator[] = "data_generator";
static const char __pyx_k_early_stopping[] = "early_stopping";
static const char __pyx_k_save_best_only[] = "save_best_only";
static const char __pyx_k_tiny_yolo_body[] = "tiny_yolo_body";
static const char __pyx_k_ModelCheckpoint[] = "ModelCheckpoint";
static const char __pyx_k_annotation_path[] = "annotation_path";
static const char __pyx_k_get_random_data[] = "get_random_data";
static const char __pyx_k_is_tiny_version[] = "is_tiny_version";
static const char __pyx_k_keras_callbacks[] = "keras.callbacks";
static const char __pyx_k_load_pretrained[] = "load_pretrained";
static const char __pyx_k_steps_per_epoch[] = "steps_per_epoch";
static const char __pyx_k_validation_data[] = "validation_data";
static const char __pyx_k_annotation_lines[] = "annotation_lines";
static const char __pyx_k_keras_optimizers[] = "keras.optimizers";
static const char __pyx_k_validation_steps[] = "validation_steps";
static const char __pyx_k_ReduceLROnPlateau[] = "ReduceLROnPlateau";
static const char __pyx_k_create_tiny_model[] = "create_tiny_model";
static const char __pyx_k_save_weights_only[] = "save_weights_only";
static const char __pyx_k_cline_in_traceback[] = "cline_in_traceback";
static const char __pyx_k_main_locals_lambda[] = "_main.<locals>.<lambda>";
static const char __pyx_k_model_data_train_txt[] = "model_data/train.txt";
static const char __pyx_k_preprocess_true_boxes[] = "preprocess_true_boxes";
static const char __pyx_k_data_generator_wrapper[] = "data_generator_wrapper";
static const char __pyx_k_trained_weights_final_h5[] = "trained_weights_final.h5";
static const char __pyx_k_model_data_head_class_txt[] = "model_data/head_class.txt";
static const char __pyx_k_Unfreeze_all_of_the_layers[] = "Unfreeze all of the layers.";
static const char __pyx_k_model_data_yolo_weights_h5[] = "model_data/yolo_weights.h5";
static const char __pyx_k_trained_weights_stage_1_h5[] = "trained_weights_stage_1.h5";
static const char __pyx_k_Retrain_the_YOLO_model_for_your[] = "\nRetrain the YOLO model for your own dataset.\n";
static const char __pyx_k_Train_on_samples_val_on_samples[] = "Train on {} samples, val on {} samples, with batch size {}.";
static const char __pyx_k_model_data_tiny_yolo_weights_h5[] = "model_data/tiny_yolo_weights.h5";
static const char __pyx_k_Create_Tiny_YOLOv3_model_with_an[] = "Create Tiny YOLOv3 model with {} anchors and {} classes.";
static const char __pyx_k_Create_YOLOv3_model_with_anchors[] = "Create YOLOv3 model with {} anchors and {} classes.";
static const char __pyx_k_Freeze_the_first_layers_of_total[] = "Freeze the first {} layers of total {} layers.";
static const char __pyx_k_ep_epoch_03d_loss_loss_3f_val_lo[] = "ep{epoch:03d}-loss{loss:.3f}-val_loss{val_loss:.3f}.h5";
static const char __pyx_k_model_data_tiny_yolo_anchors_txt[] = "model_data/tiny_yolo_anchors.txt";
static PyObject *__pyx_n_s_Adam;
static PyObject *__pyx_kp_s_Create_Tiny_YOLOv3_model_with_an;
static PyObject *__pyx_kp_s_Create_YOLOv3_model_with_anchors;
static PyObject *__pyx_n_s_EarlyStopping;
static PyObject *__pyx_kp_s_Freeze_the_first_layers_of_total;
static PyObject *__pyx_n_s_Input;
static PyObject *__pyx_n_s_K;
static PyObject *__pyx_n_s_Lambda;
static PyObject *__pyx_kp_s_Load_weights;
static PyObject *__pyx_n_s_Model;
static PyObject *__pyx_n_s_ModelCheckpoint;
static PyObject *__pyx_n_s_ReduceLROnPlateau;
static PyObject *__pyx_n_s_TensorBoard;
static PyObject *__pyx_kp_s_Train_on_samples_val_on_samples;
static PyObject *__pyx_kp_s_Unfreeze_all_of_the_layers;
static PyObject *__pyx_n_s__14;
static PyObject *__pyx_kp_s__7;
static PyObject *__pyx_n_s_anchors;
static PyObject *__pyx_n_s_anchors_path;
static PyObject *__pyx_n_s_annotation_lines;
static PyObject *__pyx_n_s_annotation_path;
static PyObject *__pyx_n_s_append;
static PyObject *__pyx_n_s_args;
static PyObject *__pyx_n_s_arguments;
static PyObject *__pyx_n_s_array;
static PyObject *__pyx_n_s_b;
static PyObject *__pyx_n_s_batch_size;
static PyObject *__pyx_n_s_box;
static PyObject *__pyx_n_s_box_data;
static PyObject *__pyx_n_s_by_name;
static PyObject *__pyx_n_s_c;
static PyObject *__pyx_n_s_callbacks;
static PyObject *__pyx_n_s_checkpoint;
static PyObject *__pyx_n_s_class_names;
static PyObject *__pyx_n_s_classes_path;
static PyObject *__pyx_n_s_clear_session;
static PyObject *__pyx_n_s_cline_in_traceback;
static PyObject *__pyx_n_s_close;
static PyObject *__pyx_n_s_compile;
static PyObject *__pyx_n_s_create_model;
static PyObject *__pyx_n_s_create_tiny_model;
static PyObject *__pyx_n_s_data_generator;
static PyObject *__pyx_n_s_data_generator_wrapper;
static PyObject *__pyx_n_s_early_stopping;
static PyObject *__pyx_n_s_end;
static PyObject *__pyx_n_s_enter;
static PyObject *__pyx_kp_s_ep_epoch_03d_loss_loss_3f_val_lo;
static PyObject *__pyx_n_s_epochs;
static PyObject *__pyx_n_s_exit;
static PyObject *__pyx_n_s_f;
static PyObject *__pyx_n_s_factor;
static PyObject *__pyx_n_s_file;
static PyObject *__pyx_n_s_fit_generator;
static PyObject *__pyx_n_s_format;
static PyObject *__pyx_n_s_freeze_body;
static PyObject *__pyx_n_s_get_anchors;
static PyObject *__pyx_n_s_get_classes;
static PyObject *__pyx_n_s_get_random_data;
static PyObject *__pyx_n_s_h;
static PyObject *__pyx_n_s_i;
static PyObject *__pyx_n_s_ignore_thresh;
static PyObject *__pyx_n_s_image;
static PyObject *__pyx_n_s_image_data;
static PyObject *__pyx_n_s_image_input;
static PyObject *__pyx_n_s_import;
static PyObject *__pyx_n_s_initial_epoch;
static PyObject *__pyx_n_s_input;
static PyObject *__pyx_n_s_input_shape;
static PyObject *__pyx_n_s_is_tiny_version;
static PyObject *__pyx_n_s_keras_backend;
static PyObject *__pyx_n_s_keras_callbacks;
static PyObject *__pyx_n_s_keras_layers;
static PyObject *__pyx_n_s_keras_models;
static PyObject *__pyx_n_s_keras_optimizers;
static PyObject *__pyx_n_s_l;
static PyObject *__pyx_n_s_layers;
static PyObject *__pyx_n_s_lines;
static PyObject *__pyx_n_s_load_pretrained;
static PyObject *__pyx_n_s_load_weights;
static PyObject *__pyx_n_s_log_dir;
static PyObject *__pyx_n_s_logging;
static PyObject *__pyx_kp_s_logs_000;
static PyObject *__pyx_n_s_loss;
static PyObject *__pyx_n_s_lr;
static PyObject *__pyx_n_s_main;
static PyObject *__pyx_n_s_main_2;
static PyObject *__pyx_n_s_main_locals_lambda;
static PyObject *__pyx_n_s_min_delta;
static PyObject *__pyx_n_s_model;
static PyObject *__pyx_n_s_model_body;
static PyObject *__pyx_kp_s_model_data_head_class_txt;
static PyObject *__pyx_kp_s_model_data_tiny_yolo_anchors_txt;
static PyObject *__pyx_kp_s_model_data_tiny_yolo_weights_h5;
static PyObject *__pyx_kp_s_model_data_train_txt;
static PyObject *__pyx_kp_s_model_data_yolo_weights_h5;
static PyObject *__pyx_n_s_model_loss;
static PyObject *__pyx_n_s_monitor;
static PyObject *__pyx_n_s_n;
static PyObject *__pyx_n_s_name;
static PyObject *__pyx_n_s_name_2;
static PyObject *__pyx_n_s_np;
static PyObject *__pyx_n_s_num;
static PyObject *__pyx_n_s_num_anchors;
static PyObject *__pyx_n_s_num_classes;
static PyObject *__pyx_n_s_num_train;
static PyObject *__pyx_n_s_num_val;
static PyObject *__pyx_n_s_numpy;
static PyObject *__pyx_n_s_open;
static PyObject *__pyx_n_s_optimizer;
static PyObject *__pyx_n_s_output;
static PyObject *__pyx_n_s_output_shape;
static PyObject *__pyx_n_s_patience;
static PyObject *__pyx_n_s_period;
static PyObject *__pyx_n_s_preprocess_true_boxes;
static PyObject *__pyx_n_s_print;
static PyObject *__pyx_n_s_random;
static PyObject *__pyx_n_s_range;
static PyObject *__pyx_n_s_readline;
static PyObject *__pyx_n_s_readlines;
static PyObject *__pyx_n_s_reduce_lr;
static PyObject *__pyx_n_s_reshape;
static PyObject *__pyx_n_s_save_best_only;
static PyObject *__pyx_n_s_save_weights;
static PyObject *__pyx_n_s_save_weights_only;
static PyObject *__pyx_n_s_seed;
static PyObject *__pyx_n_s_send;
static PyObject *__pyx_n_s_shape;
static PyObject *__pyx_n_s_shuffle;
static PyObject *__pyx_n_s_skip_mismatch;
static PyObject *__pyx_n_s_split;
static PyObject *__pyx_n_s_steps_per_epoch;
static PyObject *__pyx_n_s_strip;
static PyObject *__pyx_n_s_test;
static PyObject *__pyx_n_s_throw;
static PyObject *__pyx_n_s_tiny_yolo_body;
static PyObject *__pyx_n_s_train;
static PyObject *__pyx_kp_s_train_py;
static PyObject *__pyx_n_s_trainable;
static PyObject *__pyx_kp_s_trained_weights_final_h5;
static PyObject *__pyx_kp_s_trained_weights_stage_1_h5;
static PyObject *__pyx_n_s_val_loss;
static PyObject *__pyx_n_s_val_split;
static PyObject *__pyx_n_s_validation_data;
static PyObject *__pyx_n_s_validation_steps;
static PyObject *__pyx_n_s_verbose;
static PyObject *__pyx_n_s_w;
static PyObject *__pyx_n_s_weights_path;
static PyObject *__pyx_n_s_x;
static PyObject *__pyx_n_s_y_pred;
static PyObject *__pyx_n_s_y_true;
static PyObject *__pyx_n_s_yolo3_model;
static PyObject *__pyx_n_s_yolo3_utils;
static PyObject *__pyx_n_s_yolo_body;
static PyObject *__pyx_n_s_yolo_loss;
static PyObject *__pyx_n_s_zeros;
static PyObject *__pyx_lambda_funcdef_lambda(CYTHON_UNUSED PyObject *__pyx_self, CYTHON_UNUSED PyObject *__pyx_v_y_true, PyObject *__pyx_v_y_pred); /* proto */
static PyObject *__pyx_lambda_funcdef_lambda1(CYTHON_UNUSED PyObject *__pyx_self, CYTHON_UNUSED PyObject *__pyx_v_y_true, PyObject *__pyx_v_y_pred); /* proto */
static PyObject *__pyx_pf_5train__main(CYTHON_UNUSED PyObject *__pyx_self); /* proto */
static PyObject *__pyx_pf_5train_2get_classes(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_classes_path); /* proto */
static PyObject *__pyx_pf_5train_4get_anchors(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_anchors_path); /* proto */
static PyObject *__pyx_pf_5train_6create_model(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_input_shape, PyObject *__pyx_v_anchors, PyObject *__pyx_v_num_classes, PyObject *__pyx_v_load_pretrained, PyObject *__pyx_v_freeze_body, PyObject *__pyx_v_weights_path); /* proto */
static PyObject *__pyx_pf_5train_8create_tiny_model(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_input_shape, PyObject *__pyx_v_anchors, PyObject *__pyx_v_num_classes, PyObject *__pyx_v_load_pretrained, PyObject *__pyx_v_freeze_body, PyObject *__pyx_v_weights_path); /* proto */
static PyObject *__pyx_pf_5train_10data_generator(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_annotation_lines, PyObject *__pyx_v_batch_size, PyObject *__pyx_v_input_shape, PyObject *__pyx_v_anchors, PyObject *__pyx_v_num_classes); /* proto */
static PyObject *__pyx_pf_5train_13data_generator_wrapper(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_annotation_lines, PyObject *__pyx_v_batch_size, PyObject *__pyx_v_input_shape, PyObject *__pyx_v_anchors, PyObject *__pyx_v_num_classes); /* proto */
static PyObject *__pyx_tp_new_5train___pyx_scope_struct__data_generator(PyTypeObject *t, PyObject *a, PyObject *k); /*proto*/
static PyObject *__pyx_float_0_1;
static PyObject *__pyx_float_0_5;
static PyObject *__pyx_float_0_7;
static PyObject *__pyx_float_1eneg_3;
static PyObject *__pyx_float_1eneg_4;
static PyObject *__pyx_int_0;
static PyObject *__pyx_int_1;
static PyObject *__pyx_int_2;
static PyObject *__pyx_int_3;
static PyObject *__pyx_int_5;
static PyObject *__pyx_int_8;
static PyObject *__pyx_int_10;
static PyObject *__pyx_int_16;
static PyObject *__pyx_int_20;
static PyObject *__pyx_int_32;
static PyObject *__pyx_int_50;
static PyObject *__pyx_int_100;
static PyObject *__pyx_int_185;
static PyObject *__pyx_int_416;
static PyObject *__pyx_int_10101;
static PyObject *__pyx_int_neg_1;
static PyObject *__pyx_tuple_;
static PyObject *__pyx_tuple__2;
static PyObject *__pyx_tuple__3;
static PyObject *__pyx_tuple__4;
static PyObject *__pyx_tuple__5;
static PyObject *__pyx_tuple__6;
static PyObject *__pyx_tuple__8;
static PyObject *__pyx_tuple__9;
static PyObject *__pyx_tuple__10;
static PyObject *__pyx_tuple__11;
static PyObject *__pyx_tuple__12;
static PyObject *__pyx_tuple__13;
static PyObject *__pyx_tuple__15;
static PyObject *__pyx_tuple__17;
static PyObject *__pyx_tuple__19;
static PyObject *__pyx_tuple__21;
static PyObject *__pyx_tuple__23;
static PyObject *__pyx_tuple__24;
static PyObject *__pyx_tuple__26;
static PyObject *__pyx_tuple__27;
static PyObject *__pyx_tuple__29;
static PyObject *__pyx_codeobj__16;
static PyObject *__pyx_codeobj__18;
static PyObject *__pyx_codeobj__20;
static PyObject *__pyx_codeobj__22;
static PyObject *__pyx_codeobj__25;
static PyObject *__pyx_codeobj__28;
static PyObject *__pyx_codeobj__30;
/* "train.py":16
*
*
* def _main(): # <<<<<<<<<<<<<<
* annotation_path = 'model_data/train.txt'
* log_dir = 'logs/000/'
*/
/* Python wrapper */
static PyObject *__pyx_pw_5train_1_main(PyObject *__pyx_self, CYTHON_UNUSED PyObject *unused); /*proto*/
static PyMethodDef __pyx_mdef_5train_1_main = {"_main", (PyCFunction)__pyx_pw_5train_1_main, METH_NOARGS, 0};
static PyObject *__pyx_pw_5train_1_main(PyObject *__pyx_self, CYTHON_UNUSED PyObject *unused) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("_main (wrapper)", 0);
__pyx_r = __pyx_pf_5train__main(__pyx_self);
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "train.py":55
* model.compile(optimizer=Adam(lr=1e-3), loss={
* # use custom yolo_loss Lambda layer.
* 'yolo_loss': lambda y_true, y_pred: y_pred}) # <<<<<<<<<<<<<<
*
* batch_size = 32
*/
/* Python wrapper */
static PyObject *__pyx_pw_5train_5_main_lambda(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
static PyMethodDef __pyx_mdef_5train_5_main_lambda = {"lambda", (PyCFunction)__pyx_pw_5train_5_main_lambda, METH_VARARGS|METH_KEYWORDS, 0};
static PyObject *__pyx_pw_5train_5_main_lambda(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
CYTHON_UNUSED PyObject *__pyx_v_y_true = 0;
PyObject *__pyx_v_y_pred = 0;
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("lambda (wrapper)", 0);
{
static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_y_true,&__pyx_n_s_y_pred,0};
PyObject* values[2] = {0,0};
if (unlikely(__pyx_kwds)) {
Py_ssize_t kw_args;
const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args);
switch (pos_args) {
case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1);
CYTHON_FALLTHROUGH;
case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
CYTHON_FALLTHROUGH;
case 0: break;
default: goto __pyx_L5_argtuple_error;
}
kw_args = PyDict_Size(__pyx_kwds);
switch (pos_args) {
case 0:
if (likely((values[0] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_y_true)) != 0)) kw_args--;
else goto __pyx_L5_argtuple_error;
CYTHON_FALLTHROUGH;
case 1:
if (likely((values[1] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_y_pred)) != 0)) kw_args--;
else {
__Pyx_RaiseArgtupleInvalid("lambda", 1, 2, 2, 1); __PYX_ERR(0, 55, __pyx_L3_error)
}
}
if (unlikely(kw_args > 0)) {
if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "lambda") < 0)) __PYX_ERR(0, 55, __pyx_L3_error)
}
} else if (PyTuple_GET_SIZE(__pyx_args) != 2) {
goto __pyx_L5_argtuple_error;
} else {
values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
values[1] = PyTuple_GET_ITEM(__pyx_args, 1);
}
__pyx_v_y_true = values[0];
__pyx_v_y_pred = values[1];
}
goto __pyx_L4_argument_unpacking_done;
__pyx_L5_argtuple_error:;
__Pyx_RaiseArgtupleInvalid("lambda", 1, 2, 2, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 55, __pyx_L3_error)
__pyx_L3_error:;
__Pyx_AddTraceback("train._main.lambda", __pyx_clineno, __pyx_lineno, __pyx_filename);
__Pyx_RefNannyFinishContext();
return NULL;
__pyx_L4_argument_unpacking_done:;
__pyx_r = __pyx_lambda_funcdef_lambda(__pyx_self, __pyx_v_y_true, __pyx_v_y_pred);
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_lambda_funcdef_lambda(CYTHON_UNUSED PyObject *__pyx_self, CYTHON_UNUSED PyObject *__pyx_v_y_true, PyObject *__pyx_v_y_pred) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("lambda", 0);
__Pyx_XDECREF(__pyx_r);
__Pyx_INCREF(__pyx_v_y_pred);
__pyx_r = __pyx_v_y_pred;
goto __pyx_L0;
/* function exit code */
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "train.py":73
* for i in range(len(model.layers)):
* model.layers[i].trainable = True
* model.compile(optimizer=Adam(lr=1e-4), loss={'yolo_loss': lambda y_true, y_pred: y_pred}) # recompile to apply the change # <<<<<<<<<<<<<<
* print('Unfreeze all of the layers.')
*
*/
/* Python wrapper */
static PyObject *__pyx_pw_5train_5_main_1lambda1(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
static PyMethodDef __pyx_mdef_5train_5_main_1lambda1 = {"lambda1", (PyCFunction)__pyx_pw_5train_5_main_1lambda1, METH_VARARGS|METH_KEYWORDS, 0};
static PyObject *__pyx_pw_5train_5_main_1lambda1(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
CYTHON_UNUSED PyObject *__pyx_v_y_true = 0;
PyObject *__pyx_v_y_pred = 0;
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("lambda1 (wrapper)", 0);
{
static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_y_true,&__pyx_n_s_y_pred,0};
PyObject* values[2] = {0,0};
if (unlikely(__pyx_kwds)) {
Py_ssize_t kw_args;
const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args);
switch (pos_args) {
case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1);
CYTHON_FALLTHROUGH;
case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
CYTHON_FALLTHROUGH;
case 0: break;
default: goto __pyx_L5_argtuple_error;
}
kw_args = PyDict_Size(__pyx_kwds);
switch (pos_args) {
case 0:
if (likely((values[0] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_y_true)) != 0)) kw_args--;
else goto __pyx_L5_argtuple_error;
CYTHON_FALLTHROUGH;
case 1:
if (likely((values[1] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_y_pred)) != 0)) kw_args--;
else {
__Pyx_RaiseArgtupleInvalid("lambda1", 1, 2, 2, 1); __PYX_ERR(0, 73, __pyx_L3_error)
}
}
if (unlikely(kw_args > 0)) {
if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "lambda1") < 0)) __PYX_ERR(0, 73, __pyx_L3_error)
}
} else if (PyTuple_GET_SIZE(__pyx_args) != 2) {
goto __pyx_L5_argtuple_error;
} else {
values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
values[1] = PyTuple_GET_ITEM(__pyx_args, 1);
}
__pyx_v_y_true = values[0];
__pyx_v_y_pred = values[1];
}
goto __pyx_L4_argument_unpacking_done;
__pyx_L5_argtuple_error:;
__Pyx_RaiseArgtupleInvalid("lambda1", 1, 2, 2, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 73, __pyx_L3_error)
__pyx_L3_error:;
__Pyx_AddTraceback("train._main.lambda1", __pyx_clineno, __pyx_lineno, __pyx_filename);
__Pyx_RefNannyFinishContext();
return NULL;
__pyx_L4_argument_unpacking_done:;
__pyx_r = __pyx_lambda_funcdef_lambda1(__pyx_self, __pyx_v_y_true, __pyx_v_y_pred);
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_lambda_funcdef_lambda1(CYTHON_UNUSED PyObject *__pyx_self, CYTHON_UNUSED PyObject *__pyx_v_y_true, PyObject *__pyx_v_y_pred) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("lambda1", 0);
__Pyx_XDECREF(__pyx_r);
__Pyx_INCREF(__pyx_v_y_pred);
__pyx_r = __pyx_v_y_pred;
goto __pyx_L0;
/* function exit code */
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "train.py":16
*
*
* def _main(): # <<<<<<<<<<<<<<
* annotation_path = 'model_data/train.txt'
* log_dir = 'logs/000/'
*/
static PyObject *__pyx_pf_5train__main(CYTHON_UNUSED PyObject *__pyx_self) {
PyObject *__pyx_v_annotation_path = NULL;
PyObject *__pyx_v_log_dir = NULL;
PyObject *__pyx_v_classes_path = NULL;
PyObject *__pyx_v_anchors_path = NULL;
PyObject *__pyx_v_class_names = NULL;
Py_ssize_t __pyx_v_num_classes;
PyObject *__pyx_v_anchors = NULL;
PyObject *__pyx_v_input_shape = NULL;
PyObject *__pyx_v_is_tiny_version = NULL;
PyObject *__pyx_v_model = NULL;
PyObject *__pyx_v_logging = NULL;
PyObject *__pyx_v_checkpoint = NULL;
PyObject *__pyx_v_reduce_lr = NULL;
PyObject *__pyx_v_early_stopping = NULL;
double __pyx_v_val_split;
PyObject *__pyx_v_f = NULL;
PyObject *__pyx_v_lines = NULL;
PyObject *__pyx_v_num_val = NULL;
PyObject *__pyx_v_num_train = NULL;
PyObject *__pyx_v_batch_size = NULL;
Py_ssize_t __pyx_v_i;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
PyObject *__pyx_t_2 = NULL;
PyObject *__pyx_t_3 = NULL;
PyObject *__pyx_t_4 = NULL;
Py_ssize_t __pyx_t_5;
int __pyx_t_6;
PyObject *__pyx_t_7 = NULL;
PyObject *__pyx_t_8 = NULL;
PyObject *__pyx_t_9 = NULL;
PyObject *__pyx_t_10 = NULL;
PyObject *__pyx_t_11 = NULL;
int __pyx_t_12;
PyObject *__pyx_t_13 = NULL;
int __pyx_t_14;
PyObject *__pyx_t_15 = NULL;
PyObject *__pyx_t_16 = NULL;
long __pyx_t_17;
PyObject *__pyx_t_18 = NULL;
PyObject *__pyx_t_19 = NULL;
Py_ssize_t __pyx_t_20;
__Pyx_RefNannySetupContext("_main", 0);
/* "train.py":17
*
* def _main():
* annotation_path = 'model_data/train.txt' # <<<<<<<<<<<<<<
* log_dir = 'logs/000/'
* classes_path = 'model_data/head_class.txt'
*/
__Pyx_INCREF(__pyx_kp_s_model_data_train_txt);
__pyx_v_annotation_path = __pyx_kp_s_model_data_train_txt;
/* "train.py":18
* def _main():
* annotation_path = 'model_data/train.txt'
* log_dir = 'logs/000/' # <<<<<<<<<<<<<<
* classes_path = 'model_data/head_class.txt'
* anchors_path = 'model_data/tiny_yolo_anchors.txt'
*/
__Pyx_INCREF(__pyx_kp_s_logs_000);
__pyx_v_log_dir = __pyx_kp_s_logs_000;
/* "train.py":19
* annotation_path = 'model_data/train.txt'
* log_dir = 'logs/000/'
* classes_path = 'model_data/head_class.txt' # <<<<<<<<<<<<<<
* anchors_path = 'model_data/tiny_yolo_anchors.txt'
* class_names = get_classes(classes_path)
*/
__Pyx_INCREF(__pyx_kp_s_model_data_head_class_txt);
__pyx_v_classes_path = __pyx_kp_s_model_data_head_class_txt;
/* "train.py":20
* log_dir = 'logs/000/'
* classes_path = 'model_data/head_class.txt'
* anchors_path = 'model_data/tiny_yolo_anchors.txt' # <<<<<<<<<<<<<<
* class_names = get_classes(classes_path)
* num_classes = len(class_names)
*/
__Pyx_INCREF(__pyx_kp_s_model_data_tiny_yolo_anchors_txt);
__pyx_v_anchors_path = __pyx_kp_s_model_data_tiny_yolo_anchors_txt;
/* "train.py":21
* classes_path = 'model_data/head_class.txt'
* anchors_path = 'model_data/tiny_yolo_anchors.txt'
* class_names = get_classes(classes_path) # <<<<<<<<<<<<<<
* num_classes = len(class_names)
* anchors = get_anchors(anchors_path)
*/
__pyx_t_2 = __Pyx_GetModuleGlobalName(__pyx_n_s_get_classes); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 21, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__pyx_t_3 = NULL;
if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_2))) {
__pyx_t_3 = PyMethod_GET_SELF(__pyx_t_2);
if (likely(__pyx_t_3)) {
PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_2);
__Pyx_INCREF(__pyx_t_3);
__Pyx_INCREF(function);
__Pyx_DECREF_SET(__pyx_t_2, function);
}
}
if (!__pyx_t_3) {
__pyx_t_1 = __Pyx_PyObject_CallOneArg(__pyx_t_2, __pyx_v_classes_path); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 21, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
} else {
#if CYTHON_FAST_PYCALL
if (PyFunction_Check(__pyx_t_2)) {
PyObject *__pyx_temp[2] = {__pyx_t_3, __pyx_v_classes_path};
__pyx_t_1 = __Pyx_PyFunction_FastCall(__pyx_t_2, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 21, __pyx_L1_error)
__Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
__Pyx_GOTREF(__pyx_t_1);
} else
#endif
#if CYTHON_FAST_PYCCALL
if (__Pyx_PyFastCFunction_Check(__pyx_t_2)) {
PyObject *__pyx_temp[2] = {__pyx_t_3, __pyx_v_classes_path};
__pyx_t_1 = __Pyx_PyCFunction_FastCall(__pyx_t_2, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 21, __pyx_L1_error)
__Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
__Pyx_GOTREF(__pyx_t_1);
} else
#endif
{
__pyx_t_4 = PyTuple_New(1+1); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 21, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__Pyx_GIVEREF(__pyx_t_3); PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_3); __pyx_t_3 = NULL;
__Pyx_INCREF(__pyx_v_classes_path);
__Pyx_GIVEREF(__pyx_v_classes_path);
PyTuple_SET_ITEM(__pyx_t_4, 0+1, __pyx_v_classes_path);
__pyx_t_1 = __Pyx_PyObject_Call(__pyx_t_2, __pyx_t_4, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 21, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
}
}
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__pyx_v_class_names = __pyx_t_1;
__pyx_t_1 = 0;
/* "train.py":22
* anchors_path = 'model_data/tiny_yolo_anchors.txt'
* class_names = get_classes(classes_path)
* num_classes = len(class_names) # <<<<<<<<<<<<<<
* anchors = get_anchors(anchors_path)
*
*/
__pyx_t_5 = PyObject_Length(__pyx_v_class_names); if (unlikely(__pyx_t_5 == -1)) __PYX_ERR(0, 22, __pyx_L1_error)
__pyx_v_num_classes = __pyx_t_5;
/* "train.py":23
* class_names = get_classes(classes_path)
* num_classes = len(class_names)
* anchors = get_anchors(anchors_path) # <<<<<<<<<<<<<<
*
* input_shape = (416,416) # multiple of 32, hw
*/
__pyx_t_2 = __Pyx_GetModuleGlobalName(__pyx_n_s_get_anchors); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 23, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__pyx_t_4 = NULL;
if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_2))) {
__pyx_t_4 = PyMethod_GET_SELF(__pyx_t_2);
if (likely(__pyx_t_4)) {
PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_2);
__Pyx_INCREF(__pyx_t_4);
__Pyx_INCREF(function);
__Pyx_DECREF_SET(__pyx_t_2, function);
}
}
if (!__pyx_t_4) {
__pyx_t_1 = __Pyx_PyObject_CallOneArg(__pyx_t_2, __pyx_v_anchors_path); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 23, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
} else {
#if CYTHON_FAST_PYCALL
if (PyFunction_Check(__pyx_t_2)) {
PyObject *__pyx_temp[2] = {__pyx_t_4, __pyx_v_anchors_path};
__pyx_t_1 = __Pyx_PyFunction_FastCall(__pyx_t_2, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 23, __pyx_L1_error)
__Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0;
__Pyx_GOTREF(__pyx_t_1);
} else
#endif
#if CYTHON_FAST_PYCCALL
if (__Pyx_PyFastCFunction_Check(__pyx_t_2)) {
PyObject *__pyx_temp[2] = {__pyx_t_4, __pyx_v_anchors_path};
__pyx_t_1 = __Pyx_PyCFunction_FastCall(__pyx_t_2, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 23, __pyx_L1_error)
__Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0;
__Pyx_GOTREF(__pyx_t_1);
} else
#endif
{
__pyx_t_3 = PyTuple_New(1+1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 23, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_GIVEREF(__pyx_t_4); PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_4); __pyx_t_4 = NULL;
__Pyx_INCREF(__pyx_v_anchors_path);
__Pyx_GIVEREF(__pyx_v_anchors_path);
PyTuple_SET_ITEM(__pyx_t_3, 0+1, __pyx_v_anchors_path);
__pyx_t_1 = __Pyx_PyObject_Call(__pyx_t_2, __pyx_t_3, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 23, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
}
}
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__pyx_v_anchors = __pyx_t_1;
__pyx_t_1 = 0;
/* "train.py":25
* anchors = get_anchors(anchors_path)
*
* input_shape = (416,416) # multiple of 32, hw # <<<<<<<<<<<<<<
*
* is_tiny_version = len(anchors)==6 # default setting
*/
__Pyx_INCREF(__pyx_tuple_);
__pyx_v_input_shape = __pyx_tuple_;
/* "train.py":27
* input_shape = (416,416) # multiple of 32, hw
*
* is_tiny_version = len(anchors)==6 # default setting # <<<<<<<<<<<<<<
* if is_tiny_version:
* model = create_tiny_model(input_shape, anchors, num_classes,
*/
__pyx_t_5 = PyObject_Length(__pyx_v_anchors); if (unlikely(__pyx_t_5 == -1)) __PYX_ERR(0, 27, __pyx_L1_error)
__pyx_t_1 = __Pyx_PyBool_FromLong((__pyx_t_5 == 6)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 27, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_v_is_tiny_version = __pyx_t_1;
__pyx_t_1 = 0;
/* "train.py":28
*
* is_tiny_version = len(anchors)==6 # default setting
* if is_tiny_version: # <<<<<<<<<<<<<<
* model = create_tiny_model(input_shape, anchors, num_classes,
* freeze_body=2, weights_path='model_data/tiny_yolo_weights.h5')
*/
__pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_v_is_tiny_version); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(0, 28, __pyx_L1_error)
if (__pyx_t_6) {
/* "train.py":29
* is_tiny_version = len(anchors)==6 # default setting
* if is_tiny_version:
* model = create_tiny_model(input_shape, anchors, num_classes, # <<<<<<<<<<<<<<
* freeze_body=2, weights_path='model_data/tiny_yolo_weights.h5')
* else:
*/
__pyx_t_1 = __Pyx_GetModuleGlobalName(__pyx_n_s_create_tiny_model); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 29, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_t_2 = PyInt_FromSsize_t(__pyx_v_num_classes); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 29, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__pyx_t_3 = PyTuple_New(3); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 29, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_INCREF(__pyx_v_input_shape);
__Pyx_GIVEREF(__pyx_v_input_shape);
PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_v_input_shape);
__Pyx_INCREF(__pyx_v_anchors);
__Pyx_GIVEREF(__pyx_v_anchors);
PyTuple_SET_ITEM(__pyx_t_3, 1, __pyx_v_anchors);
__Pyx_GIVEREF(__pyx_t_2);
PyTuple_SET_ITEM(__pyx_t_3, 2, __pyx_t_2);
__pyx_t_2 = 0;
/* "train.py":30
* if is_tiny_version:
* model = create_tiny_model(input_shape, anchors, num_classes,
* freeze_body=2, weights_path='model_data/tiny_yolo_weights.h5') # <<<<<<<<<<<<<<
* else:
* model = create_model(input_shape, anchors, num_classes,
*/
__pyx_t_2 = PyDict_New(); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 30, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
if (PyDict_SetItem(__pyx_t_2, __pyx_n_s_freeze_body, __pyx_int_2) < 0) __PYX_ERR(0, 30, __pyx_L1_error)
if (PyDict_SetItem(__pyx_t_2, __pyx_n_s_weights_path, __pyx_kp_s_model_data_tiny_yolo_weights_h5) < 0) __PYX_ERR(0, 30, __pyx_L1_error)
/* "train.py":29
* is_tiny_version = len(anchors)==6 # default setting
* if is_tiny_version:
* model = create_tiny_model(input_shape, anchors, num_classes, # <<<<<<<<<<<<<<
* freeze_body=2, weights_path='model_data/tiny_yolo_weights.h5')
* else:
*/
__pyx_t_4 = __Pyx_PyObject_Call(__pyx_t_1, __pyx_t_3, __pyx_t_2); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 29, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__pyx_v_model = __pyx_t_4;
__pyx_t_4 = 0;
/* "train.py":28
*
* is_tiny_version = len(anchors)==6 # default setting
* if is_tiny_version: # <<<<<<<<<<<<<<
* model = create_tiny_model(input_shape, anchors, num_classes,
* freeze_body=2, weights_path='model_data/tiny_yolo_weights.h5')
*/
goto __pyx_L3;
}
/* "train.py":32
* freeze_body=2, weights_path='model_data/tiny_yolo_weights.h5')
* else:
* model = create_model(input_shape, anchors, num_classes, # <<<<<<<<<<<<<<
* freeze_body=2, weights_path='model_data/yolo_weights.h5') # make sure you know what you freeze
*
*/
/*else*/ {
__pyx_t_4 = __Pyx_GetModuleGlobalName(__pyx_n_s_create_model); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 32, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__pyx_t_2 = PyInt_FromSsize_t(__pyx_v_num_classes); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 32, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__pyx_t_3 = PyTuple_New(3); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 32, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_INCREF(__pyx_v_input_shape);
__Pyx_GIVEREF(__pyx_v_input_shape);
PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_v_input_shape);
__Pyx_INCREF(__pyx_v_anchors);
__Pyx_GIVEREF(__pyx_v_anchors);
PyTuple_SET_ITEM(__pyx_t_3, 1, __pyx_v_anchors);
__Pyx_GIVEREF(__pyx_t_2);
PyTuple_SET_ITEM(__pyx_t_3, 2, __pyx_t_2);
__pyx_t_2 = 0;
/* "train.py":33
* else:
* model = create_model(input_shape, anchors, num_classes,
* freeze_body=2, weights_path='model_data/yolo_weights.h5') # make sure you know what you freeze # <<<<<<<<<<<<<<
*
* logging = TensorBoard(log_dir=log_dir)
*/
__pyx_t_2 = PyDict_New(); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 33, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
if (PyDict_SetItem(__pyx_t_2, __pyx_n_s_freeze_body, __pyx_int_2) < 0) __PYX_ERR(0, 33, __pyx_L1_error)
if (PyDict_SetItem(__pyx_t_2, __pyx_n_s_weights_path, __pyx_kp_s_model_data_yolo_weights_h5) < 0) __PYX_ERR(0, 33, __pyx_L1_error)
/* "train.py":32
* freeze_body=2, weights_path='model_data/tiny_yolo_weights.h5')
* else:
* model = create_model(input_shape, anchors, num_classes, # <<<<<<<<<<<<<<
* freeze_body=2, weights_path='model_data/yolo_weights.h5') # make sure you know what you freeze
*
*/
__pyx_t_1 = __Pyx_PyObject_Call(__pyx_t_4, __pyx_t_3, __pyx_t_2); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 32, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__pyx_v_model = __pyx_t_1;
__pyx_t_1 = 0;
}
__pyx_L3:;
/* "train.py":35
* freeze_body=2, weights_path='model_data/yolo_weights.h5') # make sure you know what you freeze
*
* logging = TensorBoard(log_dir=log_dir) # <<<<<<<<<<<<<<
* checkpoint = ModelCheckpoint(log_dir + 'ep{epoch:03d}-loss{loss:.3f}-val_loss{val_loss:.3f}.h5',
* monitor='val_loss', save_weights_only=True, save_best_only=True, period=3)
*/
__pyx_t_1 = __Pyx_GetModuleGlobalName(__pyx_n_s_TensorBoard); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 35, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_t_2 = PyDict_New(); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 35, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
if (PyDict_SetItem(__pyx_t_2, __pyx_n_s_log_dir, __pyx_v_log_dir) < 0) __PYX_ERR(0, 35, __pyx_L1_error)
__pyx_t_3 = __Pyx_PyObject_Call(__pyx_t_1, __pyx_empty_tuple, __pyx_t_2); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 35, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__pyx_v_logging = __pyx_t_3;
__pyx_t_3 = 0;
/* "train.py":36
*
* logging = TensorBoard(log_dir=log_dir)
* checkpoint = ModelCheckpoint(log_dir + 'ep{epoch:03d}-loss{loss:.3f}-val_loss{val_loss:.3f}.h5', # <<<<<<<<<<<<<<
* monitor='val_loss', save_weights_only=True, save_best_only=True, period=3)
* reduce_lr = ReduceLROnPlateau(monitor='val_loss', factor=0.1, patience=3, verbose=1)
*/
__pyx_t_3 = __Pyx_GetModuleGlobalName(__pyx_n_s_ModelCheckpoint); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 36, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_2 = PyNumber_Add(__pyx_v_log_dir, __pyx_kp_s_ep_epoch_03d_loss_loss_3f_val_lo); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 36, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__pyx_t_1 = PyTuple_New(1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 36, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_GIVEREF(__pyx_t_2);
PyTuple_SET_ITEM(__pyx_t_1, 0, __pyx_t_2);
__pyx_t_2 = 0;
/* "train.py":37
* logging = TensorBoard(log_dir=log_dir)
* checkpoint = ModelCheckpoint(log_dir + 'ep{epoch:03d}-loss{loss:.3f}-val_loss{val_loss:.3f}.h5',
* monitor='val_loss', save_weights_only=True, save_best_only=True, period=3) # <<<<<<<<<<<<<<
* reduce_lr = ReduceLROnPlateau(monitor='val_loss', factor=0.1, patience=3, verbose=1)
* early_stopping = EarlyStopping(monitor='val_loss', min_delta=0, patience=10, verbose=1)
*/
__pyx_t_2 = PyDict_New(); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 37, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
if (PyDict_SetItem(__pyx_t_2, __pyx_n_s_monitor, __pyx_n_s_val_loss) < 0) __PYX_ERR(0, 37, __pyx_L1_error)
if (PyDict_SetItem(__pyx_t_2, __pyx_n_s_save_weights_only, Py_True) < 0) __PYX_ERR(0, 37, __pyx_L1_error)
if (PyDict_SetItem(__pyx_t_2, __pyx_n_s_save_best_only, Py_True) < 0) __PYX_ERR(0, 37, __pyx_L1_error)
if (PyDict_SetItem(__pyx_t_2, __pyx_n_s_period, __pyx_int_3) < 0) __PYX_ERR(0, 37, __pyx_L1_error)
/* "train.py":36
*
* logging = TensorBoard(log_dir=log_dir)
* checkpoint = ModelCheckpoint(log_dir + 'ep{epoch:03d}-loss{loss:.3f}-val_loss{val_loss:.3f}.h5', # <<<<<<<<<<<<<<
* monitor='val_loss', save_weights_only=True, save_best_only=True, period=3)
* reduce_lr = ReduceLROnPlateau(monitor='val_loss', factor=0.1, patience=3, verbose=1)
*/
__pyx_t_4 = __Pyx_PyObject_Call(__pyx_t_3, __pyx_t_1, __pyx_t_2); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 36, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__pyx_v_checkpoint = __pyx_t_4;
__pyx_t_4 = 0;
/* "train.py":38
* checkpoint = ModelCheckpoint(log_dir + 'ep{epoch:03d}-loss{loss:.3f}-val_loss{val_loss:.3f}.h5',
* monitor='val_loss', save_weights_only=True, save_best_only=True, period=3)
* reduce_lr = ReduceLROnPlateau(monitor='val_loss', factor=0.1, patience=3, verbose=1) # <<<<<<<<<<<<<<
* early_stopping = EarlyStopping(monitor='val_loss', min_delta=0, patience=10, verbose=1)
*
*/
__pyx_t_4 = __Pyx_GetModuleGlobalName(__pyx_n_s_ReduceLROnPlateau); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 38, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__pyx_t_2 = PyDict_New(); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 38, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
if (PyDict_SetItem(__pyx_t_2, __pyx_n_s_monitor, __pyx_n_s_val_loss) < 0) __PYX_ERR(0, 38, __pyx_L1_error)
if (PyDict_SetItem(__pyx_t_2, __pyx_n_s_factor, __pyx_float_0_1) < 0) __PYX_ERR(0, 38, __pyx_L1_error)
if (PyDict_SetItem(__pyx_t_2, __pyx_n_s_patience, __pyx_int_3) < 0) __PYX_ERR(0, 38, __pyx_L1_error)
if (PyDict_SetItem(__pyx_t_2, __pyx_n_s_verbose, __pyx_int_1) < 0) __PYX_ERR(0, 38, __pyx_L1_error)
__pyx_t_1 = __Pyx_PyObject_Call(__pyx_t_4, __pyx_empty_tuple, __pyx_t_2); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 38, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__pyx_v_reduce_lr = __pyx_t_1;
__pyx_t_1 = 0;
/* "train.py":39
* monitor='val_loss', save_weights_only=True, save_best_only=True, period=3)
* reduce_lr = ReduceLROnPlateau(monitor='val_loss', factor=0.1, patience=3, verbose=1)
* early_stopping = EarlyStopping(monitor='val_loss', min_delta=0, patience=10, verbose=1) # <<<<<<<<<<<<<<
*
* val_split = 0.1
*/
__pyx_t_1 = __Pyx_GetModuleGlobalName(__pyx_n_s_EarlyStopping); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 39, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_t_2 = PyDict_New(); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 39, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
if (PyDict_SetItem(__pyx_t_2, __pyx_n_s_monitor, __pyx_n_s_val_loss) < 0) __PYX_ERR(0, 39, __pyx_L1_error)
if (PyDict_SetItem(__pyx_t_2, __pyx_n_s_min_delta, __pyx_int_0) < 0) __PYX_ERR(0, 39, __pyx_L1_error)
if (PyDict_SetItem(__pyx_t_2, __pyx_n_s_patience, __pyx_int_10) < 0) __PYX_ERR(0, 39, __pyx_L1_error)
if (PyDict_SetItem(__pyx_t_2, __pyx_n_s_verbose, __pyx_int_1) < 0) __PYX_ERR(0, 39, __pyx_L1_error)
__pyx_t_4 = __Pyx_PyObject_Call(__pyx_t_1, __pyx_empty_tuple, __pyx_t_2); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 39, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__pyx_v_early_stopping = __pyx_t_4;
__pyx_t_4 = 0;
/* "train.py":41
* early_stopping = EarlyStopping(monitor='val_loss', min_delta=0, patience=10, verbose=1)
*
* val_split = 0.1 # <<<<<<<<<<<<<<
* with open(annotation_path) as f:
* lines = f.readlines()
*/
__pyx_v_val_split = 0.1;
/* "train.py":42
*
* val_split = 0.1
* with open(annotation_path) as f: # <<<<<<<<<<<<<<
* lines = f.readlines()
* np.random.seed(10101)
*/
/*with:*/ {
__pyx_t_4 = PyTuple_New(1); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 42, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__Pyx_INCREF(__pyx_v_annotation_path);
__Pyx_GIVEREF(__pyx_v_annotation_path);
PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_v_annotation_path);
__pyx_t_2 = __Pyx_PyObject_Call(__pyx_builtin_open, __pyx_t_4, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 42, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
__pyx_t_7 = __Pyx_PyObject_LookupSpecial(__pyx_t_2, __pyx_n_s_exit); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 42, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_7);
__pyx_t_1 = __Pyx_PyObject_LookupSpecial(__pyx_t_2, __pyx_n_s_enter); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 42, __pyx_L4_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_t_3 = NULL;
if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_1))) {
__pyx_t_3 = PyMethod_GET_SELF(__pyx_t_1);
if (likely(__pyx_t_3)) {
PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_1);
__Pyx_INCREF(__pyx_t_3);
__Pyx_INCREF(function);
__Pyx_DECREF_SET(__pyx_t_1, function);
}
}
if (__pyx_t_3) {
__pyx_t_4 = __Pyx_PyObject_CallOneArg(__pyx_t_1, __pyx_t_3); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 42, __pyx_L4_error)
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
} else {
__pyx_t_4 = __Pyx_PyObject_CallNoArg(__pyx_t_1); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 42, __pyx_L4_error)
}
__Pyx_GOTREF(__pyx_t_4);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__pyx_t_1 = __pyx_t_4;
__pyx_t_4 = 0;
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
/*try:*/ {
{
__Pyx_PyThreadState_declare
__Pyx_PyThreadState_assign
__Pyx_ExceptionSave(&__pyx_t_8, &__pyx_t_9, &__pyx_t_10);
__Pyx_XGOTREF(__pyx_t_8);
__Pyx_XGOTREF(__pyx_t_9);
__Pyx_XGOTREF(__pyx_t_10);
/*try:*/ {
__pyx_v_f = __pyx_t_1;
__pyx_t_1 = 0;
/* "train.py":43
* val_split = 0.1
* with open(annotation_path) as f:
* lines = f.readlines() # <<<<<<<<<<<<<<
* np.random.seed(10101)
* np.random.shuffle(lines)
*/
__pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_v_f, __pyx_n_s_readlines); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 43, __pyx_L8_error)
__Pyx_GOTREF(__pyx_t_2);
__pyx_t_4 = NULL;
if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_2))) {
__pyx_t_4 = PyMethod_GET_SELF(__pyx_t_2);
if (likely(__pyx_t_4)) {
PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_2);
__Pyx_INCREF(__pyx_t_4);
__Pyx_INCREF(function);
__Pyx_DECREF_SET(__pyx_t_2, function);
}
}
if (__pyx_t_4) {
__pyx_t_1 = __Pyx_PyObject_CallOneArg(__pyx_t_2, __pyx_t_4); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 43, __pyx_L8_error)
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
} else {
__pyx_t_1 = __Pyx_PyObject_CallNoArg(__pyx_t_2); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 43, __pyx_L8_error)
}
__Pyx_GOTREF(__pyx_t_1);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__pyx_v_lines = __pyx_t_1;
__pyx_t_1 = 0;
/* "train.py":42
*
* val_split = 0.1
* with open(annotation_path) as f: # <<<<<<<<<<<<<<
* lines = f.readlines()
* np.random.seed(10101)
*/
}
__Pyx_XDECREF(__pyx_t_8); __pyx_t_8 = 0;
__Pyx_XDECREF(__pyx_t_9); __pyx_t_9 = 0;
__Pyx_XDECREF(__pyx_t_10); __pyx_t_10 = 0;
goto __pyx_L13_try_end;
__pyx_L8_error:;
__Pyx_PyThreadState_assign
__Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
__Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0;
__Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
__Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0;
/*except:*/ {
__Pyx_AddTraceback("train._main", __pyx_clineno, __pyx_lineno, __pyx_filename);
if (__Pyx_GetException(&__pyx_t_1, &__pyx_t_2, &__pyx_t_4) < 0) __PYX_ERR(0, 42, __pyx_L10_except_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_GOTREF(__pyx_t_2);
__Pyx_GOTREF(__pyx_t_4);
__pyx_t_3 = PyTuple_Pack(3, __pyx_t_1, __pyx_t_2, __pyx_t_4); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 42, __pyx_L10_except_error)
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_11 = __Pyx_PyObject_Call(__pyx_t_7, __pyx_t_3, NULL);
__Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 42, __pyx_L10_except_error)
__Pyx_GOTREF(__pyx_t_11);
__pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_11);
__Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0;
if (__pyx_t_6 < 0) __PYX_ERR(0, 42, __pyx_L10_except_error)
__pyx_t_12 = ((!(__pyx_t_6 != 0)) != 0);
if (__pyx_t_12) {
__Pyx_GIVEREF(__pyx_t_1);
__Pyx_GIVEREF(__pyx_t_2);
__Pyx_XGIVEREF(__pyx_t_4);
__Pyx_ErrRestoreWithState(__pyx_t_1, __pyx_t_2, __pyx_t_4);
__pyx_t_1 = 0; __pyx_t_2 = 0; __pyx_t_4 = 0;
__PYX_ERR(0, 42, __pyx_L10_except_error)
}
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
goto __pyx_L9_exception_handled;
}
__pyx_L10_except_error:;
__Pyx_PyThreadState_assign
__Pyx_XGIVEREF(__pyx_t_8);
__Pyx_XGIVEREF(__pyx_t_9);
__Pyx_XGIVEREF(__pyx_t_10);
__Pyx_ExceptionReset(__pyx_t_8, __pyx_t_9, __pyx_t_10);
goto __pyx_L1_error;
__pyx_L9_exception_handled:;
__Pyx_PyThreadState_assign
__Pyx_XGIVEREF(__pyx_t_8);
__Pyx_XGIVEREF(__pyx_t_9);
__Pyx_XGIVEREF(__pyx_t_10);
__Pyx_ExceptionReset(__pyx_t_8, __pyx_t_9, __pyx_t_10);
__pyx_L13_try_end:;
}
}
/*finally:*/ {
/*normal exit:*/{
if (__pyx_t_7) {
__pyx_t_10 = __Pyx_PyObject_Call(__pyx_t_7, __pyx_tuple__2, NULL);
__Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 42, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_10);
__Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;
}
goto __pyx_L7;
}
__pyx_L7:;
}
goto __pyx_L17;
__pyx_L4_error:;
__Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
goto __pyx_L1_error;
__pyx_L17:;
}
/* "train.py":44
* with open(annotation_path) as f:
* lines = f.readlines()
* np.random.seed(10101) # <<<<<<<<<<<<<<
* np.random.shuffle(lines)
* np.random.seed(None)
*/
__pyx_t_4 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 44, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_4, __pyx_n_s_random); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 44, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
__pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_seed); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 44, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__pyx_t_2 = __Pyx_PyObject_Call(__pyx_t_4, __pyx_tuple__3, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 44, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
/* "train.py":45
* lines = f.readlines()
* np.random.seed(10101)
* np.random.shuffle(lines) # <<<<<<<<<<<<<<
* np.random.seed(None)
* num_val = int(len(lines)*val_split)
*/
__pyx_t_4 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 45, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_t_4, __pyx_n_s_random); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 45, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
__pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_shuffle); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 45, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
if (unlikely(!__pyx_v_lines)) { __Pyx_RaiseUnboundLocalError("lines"); __PYX_ERR(0, 45, __pyx_L1_error) }
__pyx_t_1 = NULL;
if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_4))) {
__pyx_t_1 = PyMethod_GET_SELF(__pyx_t_4);
if (likely(__pyx_t_1)) {
PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_4);
__Pyx_INCREF(__pyx_t_1);
__Pyx_INCREF(function);
__Pyx_DECREF_SET(__pyx_t_4, function);
}
}
if (!__pyx_t_1) {
__pyx_t_2 = __Pyx_PyObject_CallOneArg(__pyx_t_4, __pyx_v_lines); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 45, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
} else {
#if CYTHON_FAST_PYCALL
if (PyFunction_Check(__pyx_t_4)) {
PyObject *__pyx_temp[2] = {__pyx_t_1, __pyx_v_lines};
__pyx_t_2 = __Pyx_PyFunction_FastCall(__pyx_t_4, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 45, __pyx_L1_error)
__Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0;
__Pyx_GOTREF(__pyx_t_2);
} else
#endif
#if CYTHON_FAST_PYCCALL
if (__Pyx_PyFastCFunction_Check(__pyx_t_4)) {
PyObject *__pyx_temp[2] = {__pyx_t_1, __pyx_v_lines};
__pyx_t_2 = __Pyx_PyCFunction_FastCall(__pyx_t_4, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 45, __pyx_L1_error)
__Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0;
__Pyx_GOTREF(__pyx_t_2);
} else
#endif
{
__pyx_t_3 = PyTuple_New(1+1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 45, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_GIVEREF(__pyx_t_1); PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_1); __pyx_t_1 = NULL;
__Pyx_INCREF(__pyx_v_lines);
__Pyx_GIVEREF(__pyx_v_lines);
PyTuple_SET_ITEM(__pyx_t_3, 0+1, __pyx_v_lines);
__pyx_t_2 = __Pyx_PyObject_Call(__pyx_t_4, __pyx_t_3, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 45, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
}
}
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
/* "train.py":46
* np.random.seed(10101)
* np.random.shuffle(lines)
* np.random.seed(None) # <<<<<<<<<<<<<<
* num_val = int(len(lines)*val_split)
* num_train = len(lines) - num_val
*/
__pyx_t_2 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 46, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_random); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 46, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_4, __pyx_n_s_seed); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 46, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
__pyx_t_4 = __Pyx_PyObject_Call(__pyx_t_2, __pyx_tuple__4, NULL); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 46, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
/* "train.py":47
* np.random.shuffle(lines)
* np.random.seed(None)
* num_val = int(len(lines)*val_split) # <<<<<<<<<<<<<<
* num_train = len(lines) - num_val
*
*/
if (unlikely(!__pyx_v_lines)) { __Pyx_RaiseUnboundLocalError("lines"); __PYX_ERR(0, 47, __pyx_L1_error) }
__pyx_t_5 = PyObject_Length(__pyx_v_lines); if (unlikely(__pyx_t_5 == -1)) __PYX_ERR(0, 47, __pyx_L1_error)
__pyx_t_4 = __Pyx_PyInt_FromDouble((__pyx_t_5 * __pyx_v_val_split)); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 47, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__pyx_v_num_val = __pyx_t_4;
__pyx_t_4 = 0;
/* "train.py":48
* np.random.seed(None)
* num_val = int(len(lines)*val_split)
* num_train = len(lines) - num_val # <<<<<<<<<<<<<<
*
* # Train with frozen layers first, to get a stable loss.
*/
if (unlikely(!__pyx_v_lines)) { __Pyx_RaiseUnboundLocalError("lines"); __PYX_ERR(0, 48, __pyx_L1_error) }
__pyx_t_5 = PyObject_Length(__pyx_v_lines); if (unlikely(__pyx_t_5 == -1)) __PYX_ERR(0, 48, __pyx_L1_error)
__pyx_t_4 = PyInt_FromSsize_t(__pyx_t_5); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 48, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__pyx_t_2 = PyNumber_Subtract(__pyx_t_4, __pyx_v_num_val); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 48, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
__pyx_v_num_train = __pyx_t_2;
__pyx_t_2 = 0;
/* "train.py":53
* # Adjust num epochs to your dataset. This step is enough to obtain a not bad model.
* if True:
* model.compile(optimizer=Adam(lr=1e-3), loss={ # <<<<<<<<<<<<<<
* # use custom yolo_loss Lambda layer.
* 'yolo_loss': lambda y_true, y_pred: y_pred})
*/
__pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_v_model, __pyx_n_s_compile); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 53, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__pyx_t_4 = PyDict_New(); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 53, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__pyx_t_3 = __Pyx_GetModuleGlobalName(__pyx_n_s_Adam); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 53, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_1 = PyDict_New(); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 53, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
if (PyDict_SetItem(__pyx_t_1, __pyx_n_s_lr, __pyx_float_1eneg_3) < 0) __PYX_ERR(0, 53, __pyx_L1_error)
__pyx_t_13 = __Pyx_PyObject_Call(__pyx_t_3, __pyx_empty_tuple, __pyx_t_1); if (unlikely(!__pyx_t_13)) __PYX_ERR(0, 53, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_13);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
if (PyDict_SetItem(__pyx_t_4, __pyx_n_s_optimizer, __pyx_t_13) < 0) __PYX_ERR(0, 53, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_13); __pyx_t_13 = 0;
/* "train.py":55
* model.compile(optimizer=Adam(lr=1e-3), loss={
* # use custom yolo_loss Lambda layer.
* 'yolo_loss': lambda y_true, y_pred: y_pred}) # <<<<<<<<<<<<<<
*
* batch_size = 32
*/
__pyx_t_13 = PyDict_New(); if (unlikely(!__pyx_t_13)) __PYX_ERR(0, 55, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_13);
__pyx_t_1 = __Pyx_CyFunction_NewEx(&__pyx_mdef_5train_5_main_lambda, 0, __pyx_n_s_main_locals_lambda, NULL, __pyx_n_s_train, __pyx_d, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 55, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
if (PyDict_SetItem(__pyx_t_13, __pyx_n_s_yolo_loss, __pyx_t_1) < 0) __PYX_ERR(0, 55, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
if (PyDict_SetItem(__pyx_t_4, __pyx_n_s_loss, __pyx_t_13) < 0) __PYX_ERR(0, 53, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_13); __pyx_t_13 = 0;
/* "train.py":53
* # Adjust num epochs to your dataset. This step is enough to obtain a not bad model.
* if True:
* model.compile(optimizer=Adam(lr=1e-3), loss={ # <<<<<<<<<<<<<<
* # use custom yolo_loss Lambda layer.
* 'yolo_loss': lambda y_true, y_pred: y_pred})
*/
__pyx_t_13 = __Pyx_PyObject_Call(__pyx_t_2, __pyx_empty_tuple, __pyx_t_4); if (unlikely(!__pyx_t_13)) __PYX_ERR(0, 53, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_13);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
__Pyx_DECREF(__pyx_t_13); __pyx_t_13 = 0;
/* "train.py":57
* 'yolo_loss': lambda y_true, y_pred: y_pred})
*
* batch_size = 32 # <<<<<<<<<<<<<<
* print('Train on {} samples, val on {} samples, with batch size {}.'.format(num_train, num_val, batch_size))
* model.fit_generator(data_generator_wrapper(lines[:num_train], batch_size, input_shape, anchors, num_classes),
*/
__Pyx_INCREF(__pyx_int_32);
__pyx_v_batch_size = __pyx_int_32;
/* "train.py":58
*
* batch_size = 32
* print('Train on {} samples, val on {} samples, with batch size {}.'.format(num_train, num_val, batch_size)) # <<<<<<<<<<<<<<
* model.fit_generator(data_generator_wrapper(lines[:num_train], batch_size, input_shape, anchors, num_classes),
* steps_per_epoch=max(1, num_train//batch_size),
*/
__pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_kp_s_Train_on_samples_val_on_samples, __pyx_n_s_format); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 58, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__pyx_t_2 = NULL;
__pyx_t_14 = 0;
if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_4))) {
__pyx_t_2 = PyMethod_GET_SELF(__pyx_t_4);
if (likely(__pyx_t_2)) {
PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_4);
__Pyx_INCREF(__pyx_t_2);
__Pyx_INCREF(function);
__Pyx_DECREF_SET(__pyx_t_4, function);
__pyx_t_14 = 1;
}
}
#if CYTHON_FAST_PYCALL
if (PyFunction_Check(__pyx_t_4)) {
PyObject *__pyx_temp[4] = {__pyx_t_2, __pyx_v_num_train, __pyx_v_num_val, __pyx_v_batch_size};
__pyx_t_13 = __Pyx_PyFunction_FastCall(__pyx_t_4, __pyx_temp+1-__pyx_t_14, 3+__pyx_t_14); if (unlikely(!__pyx_t_13)) __PYX_ERR(0, 58, __pyx_L1_error)
__Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
__Pyx_GOTREF(__pyx_t_13);
} else
#endif
#if CYTHON_FAST_PYCCALL
if (__Pyx_PyFastCFunction_Check(__pyx_t_4)) {
PyObject *__pyx_temp[4] = {__pyx_t_2, __pyx_v_num_train, __pyx_v_num_val, __pyx_v_batch_size};
__pyx_t_13 = __Pyx_PyCFunction_FastCall(__pyx_t_4, __pyx_temp+1-__pyx_t_14, 3+__pyx_t_14); if (unlikely(!__pyx_t_13)) __PYX_ERR(0, 58, __pyx_L1_error)
__Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
__Pyx_GOTREF(__pyx_t_13);
} else
#endif
{
__pyx_t_1 = PyTuple_New(3+__pyx_t_14); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 58, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
if (__pyx_t_2) {
__Pyx_GIVEREF(__pyx_t_2); PyTuple_SET_ITEM(__pyx_t_1, 0, __pyx_t_2); __pyx_t_2 = NULL;
}
__Pyx_INCREF(__pyx_v_num_train);
__Pyx_GIVEREF(__pyx_v_num_train);
PyTuple_SET_ITEM(__pyx_t_1, 0+__pyx_t_14, __pyx_v_num_train);
__Pyx_INCREF(__pyx_v_num_val);
__Pyx_GIVEREF(__pyx_v_num_val);
PyTuple_SET_ITEM(__pyx_t_1, 1+__pyx_t_14, __pyx_v_num_val);
__Pyx_INCREF(__pyx_v_batch_size);
__Pyx_GIVEREF(__pyx_v_batch_size);
PyTuple_SET_ITEM(__pyx_t_1, 2+__pyx_t_14, __pyx_v_batch_size);
__pyx_t_13 = __Pyx_PyObject_Call(__pyx_t_4, __pyx_t_1, NULL); if (unlikely(!__pyx_t_13)) __PYX_ERR(0, 58, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_13);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
}
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
if (__Pyx_PrintOne(0, __pyx_t_13) < 0) __PYX_ERR(0, 58, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_13); __pyx_t_13 = 0;
/* "train.py":59
* batch_size = 32
* print('Train on {} samples, val on {} samples, with batch size {}.'.format(num_train, num_val, batch_size))
* model.fit_generator(data_generator_wrapper(lines[:num_train], batch_size, input_shape, anchors, num_classes), # <<<<<<<<<<<<<<
* steps_per_epoch=max(1, num_train//batch_size),
* validation_data=data_generator_wrapper(lines[num_train:], batch_size, input_shape, anchors, num_classes),
*/
__pyx_t_13 = __Pyx_PyObject_GetAttrStr(__pyx_v_model, __pyx_n_s_fit_generator); if (unlikely(!__pyx_t_13)) __PYX_ERR(0, 59, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_13);
__pyx_t_1 = __Pyx_GetModuleGlobalName(__pyx_n_s_data_generator_wrapper); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 59, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
if (unlikely(!__pyx_v_lines)) { __Pyx_RaiseUnboundLocalError("lines"); __PYX_ERR(0, 59, __pyx_L1_error) }
__pyx_t_2 = __Pyx_PyObject_GetSlice(__pyx_v_lines, 0, 0, NULL, &__pyx_v_num_train, NULL, 0, 0, 1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 59, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__pyx_t_3 = PyInt_FromSsize_t(__pyx_v_num_classes); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 59, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_15 = NULL;
__pyx_t_14 = 0;
if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_1))) {
__pyx_t_15 = PyMethod_GET_SELF(__pyx_t_1);
if (likely(__pyx_t_15)) {
PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_1);
__Pyx_INCREF(__pyx_t_15);
__Pyx_INCREF(function);
__Pyx_DECREF_SET(__pyx_t_1, function);
__pyx_t_14 = 1;
}
}
#if CYTHON_FAST_PYCALL
if (PyFunction_Check(__pyx_t_1)) {
PyObject *__pyx_temp[6] = {__pyx_t_15, __pyx_t_2, __pyx_v_batch_size, __pyx_v_input_shape, __pyx_v_anchors, __pyx_t_3};
__pyx_t_4 = __Pyx_PyFunction_FastCall(__pyx_t_1, __pyx_temp+1-__pyx_t_14, 5+__pyx_t_14); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 59, __pyx_L1_error)
__Pyx_XDECREF(__pyx_t_15); __pyx_t_15 = 0;
__Pyx_GOTREF(__pyx_t_4);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
} else
#endif
#if CYTHON_FAST_PYCCALL
if (__Pyx_PyFastCFunction_Check(__pyx_t_1)) {
PyObject *__pyx_temp[6] = {__pyx_t_15, __pyx_t_2, __pyx_v_batch_size, __pyx_v_input_shape, __pyx_v_anchors, __pyx_t_3};
__pyx_t_4 = __Pyx_PyCFunction_FastCall(__pyx_t_1, __pyx_temp+1-__pyx_t_14, 5+__pyx_t_14); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 59, __pyx_L1_error)
__Pyx_XDECREF(__pyx_t_15); __pyx_t_15 = 0;
__Pyx_GOTREF(__pyx_t_4);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
} else
#endif
{
__pyx_t_16 = PyTuple_New(5+__pyx_t_14); if (unlikely(!__pyx_t_16)) __PYX_ERR(0, 59, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_16);
if (__pyx_t_15) {
__Pyx_GIVEREF(__pyx_t_15); PyTuple_SET_ITEM(__pyx_t_16, 0, __pyx_t_15); __pyx_t_15 = NULL;
}
__Pyx_GIVEREF(__pyx_t_2);
PyTuple_SET_ITEM(__pyx_t_16, 0+__pyx_t_14, __pyx_t_2);
__Pyx_INCREF(__pyx_v_batch_size);
__Pyx_GIVEREF(__pyx_v_batch_size);
PyTuple_SET_ITEM(__pyx_t_16, 1+__pyx_t_14, __pyx_v_batch_size);
__Pyx_INCREF(__pyx_v_input_shape);
__Pyx_GIVEREF(__pyx_v_input_shape);
PyTuple_SET_ITEM(__pyx_t_16, 2+__pyx_t_14, __pyx_v_input_shape);
__Pyx_INCREF(__pyx_v_anchors);
__Pyx_GIVEREF(__pyx_v_anchors);
PyTuple_SET_ITEM(__pyx_t_16, 3+__pyx_t_14, __pyx_v_anchors);
__Pyx_GIVEREF(__pyx_t_3);
PyTuple_SET_ITEM(__pyx_t_16, 4+__pyx_t_14, __pyx_t_3);
__pyx_t_2 = 0;
__pyx_t_3 = 0;
__pyx_t_4 = __Pyx_PyObject_Call(__pyx_t_1, __pyx_t_16, NULL); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 59, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__Pyx_DECREF(__pyx_t_16); __pyx_t_16 = 0;
}
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__pyx_t_1 = PyTuple_New(1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 59, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_GIVEREF(__pyx_t_4);
PyTuple_SET_ITEM(__pyx_t_1, 0, __pyx_t_4);
__pyx_t_4 = 0;
/* "train.py":60
* print('Train on {} samples, val on {} samples, with batch size {}.'.format(num_train, num_val, batch_size))
* model.fit_generator(data_generator_wrapper(lines[:num_train], batch_size, input_shape, anchors, num_classes),
* steps_per_epoch=max(1, num_train//batch_size), # <<<<<<<<<<<<<<
* validation_data=data_generator_wrapper(lines[num_train:], batch_size, input_shape, anchors, num_classes),
* validation_steps=max(1, num_val//batch_size),
*/
__pyx_t_4 = PyDict_New(); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 60, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__pyx_t_16 = PyNumber_FloorDivide(__pyx_v_num_train, __pyx_v_batch_size); if (unlikely(!__pyx_t_16)) __PYX_ERR(0, 60, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_16);
__pyx_t_17 = 1;
__pyx_t_2 = __Pyx_PyInt_From_long(__pyx_t_17); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 60, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__pyx_t_15 = PyObject_RichCompare(__pyx_t_16, __pyx_t_2, Py_GT); __Pyx_XGOTREF(__pyx_t_15); if (unlikely(!__pyx_t_15)) __PYX_ERR(0, 60, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__pyx_t_12 = __Pyx_PyObject_IsTrue(__pyx_t_15); if (unlikely(__pyx_t_12 < 0)) __PYX_ERR(0, 60, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_15); __pyx_t_15 = 0;
if (__pyx_t_12) {
__Pyx_INCREF(__pyx_t_16);
__pyx_t_3 = __pyx_t_16;
} else {
__pyx_t_15 = __Pyx_PyInt_From_long(__pyx_t_17); if (unlikely(!__pyx_t_15)) __PYX_ERR(0, 60, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_15);
__pyx_t_3 = __pyx_t_15;
__pyx_t_15 = 0;
}
__Pyx_DECREF(__pyx_t_16); __pyx_t_16 = 0;
if (PyDict_SetItem(__pyx_t_4, __pyx_n_s_steps_per_epoch, __pyx_t_3) < 0) __PYX_ERR(0, 60, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
/* "train.py":61
* model.fit_generator(data_generator_wrapper(lines[:num_train], batch_size, input_shape, anchors, num_classes),
* steps_per_epoch=max(1, num_train//batch_size),
* validation_data=data_generator_wrapper(lines[num_train:], batch_size, input_shape, anchors, num_classes), # <<<<<<<<<<<<<<
* validation_steps=max(1, num_val//batch_size),
* epochs=50,
*/
__pyx_t_16 = __Pyx_GetModuleGlobalName(__pyx_n_s_data_generator_wrapper); if (unlikely(!__pyx_t_16)) __PYX_ERR(0, 61, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_16);
if (unlikely(!__pyx_v_lines)) { __Pyx_RaiseUnboundLocalError("lines"); __PYX_ERR(0, 61, __pyx_L1_error) }
__pyx_t_15 = __Pyx_PyObject_GetSlice(__pyx_v_lines, 0, 0, &__pyx_v_num_train, NULL, NULL, 0, 0, 1); if (unlikely(!__pyx_t_15)) __PYX_ERR(0, 61, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_15);
__pyx_t_2 = PyInt_FromSsize_t(__pyx_v_num_classes); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 61, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__pyx_t_18 = NULL;
__pyx_t_14 = 0;
if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_16))) {
__pyx_t_18 = PyMethod_GET_SELF(__pyx_t_16);
if (likely(__pyx_t_18)) {
PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_16);
__Pyx_INCREF(__pyx_t_18);
__Pyx_INCREF(function);
__Pyx_DECREF_SET(__pyx_t_16, function);
__pyx_t_14 = 1;
}
}
#if CYTHON_FAST_PYCALL
if (PyFunction_Check(__pyx_t_16)) {
PyObject *__pyx_temp[6] = {__pyx_t_18, __pyx_t_15, __pyx_v_batch_size, __pyx_v_input_shape, __pyx_v_anchors, __pyx_t_2};
__pyx_t_3 = __Pyx_PyFunction_FastCall(__pyx_t_16, __pyx_temp+1-__pyx_t_14, 5+__pyx_t_14); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 61, __pyx_L1_error)
__Pyx_XDECREF(__pyx_t_18); __pyx_t_18 = 0;
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_15); __pyx_t_15 = 0;
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
} else
#endif
#if CYTHON_FAST_PYCCALL
if (__Pyx_PyFastCFunction_Check(__pyx_t_16)) {
PyObject *__pyx_temp[6] = {__pyx_t_18, __pyx_t_15, __pyx_v_batch_size, __pyx_v_input_shape, __pyx_v_anchors, __pyx_t_2};
__pyx_t_3 = __Pyx_PyCFunction_FastCall(__pyx_t_16, __pyx_temp+1-__pyx_t_14, 5+__pyx_t_14); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 61, __pyx_L1_error)
__Pyx_XDECREF(__pyx_t_18); __pyx_t_18 = 0;
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_15); __pyx_t_15 = 0;
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
} else
#endif
{
__pyx_t_19 = PyTuple_New(5+__pyx_t_14); if (unlikely(!__pyx_t_19)) __PYX_ERR(0, 61, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_19);
if (__pyx_t_18) {
__Pyx_GIVEREF(__pyx_t_18); PyTuple_SET_ITEM(__pyx_t_19, 0, __pyx_t_18); __pyx_t_18 = NULL;
}
__Pyx_GIVEREF(__pyx_t_15);
PyTuple_SET_ITEM(__pyx_t_19, 0+__pyx_t_14, __pyx_t_15);
__Pyx_INCREF(__pyx_v_batch_size);
__Pyx_GIVEREF(__pyx_v_batch_size);
PyTuple_SET_ITEM(__pyx_t_19, 1+__pyx_t_14, __pyx_v_batch_size);
__Pyx_INCREF(__pyx_v_input_shape);
__Pyx_GIVEREF(__pyx_v_input_shape);
PyTuple_SET_ITEM(__pyx_t_19, 2+__pyx_t_14, __pyx_v_input_shape);
__Pyx_INCREF(__pyx_v_anchors);
__Pyx_GIVEREF(__pyx_v_anchors);
PyTuple_SET_ITEM(__pyx_t_19, 3+__pyx_t_14, __pyx_v_anchors);
__Pyx_GIVEREF(__pyx_t_2);
PyTuple_SET_ITEM(__pyx_t_19, 4+__pyx_t_14, __pyx_t_2);
__pyx_t_15 = 0;
__pyx_t_2 = 0;
__pyx_t_3 = __Pyx_PyObject_Call(__pyx_t_16, __pyx_t_19, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 61, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_19); __pyx_t_19 = 0;
}
__Pyx_DECREF(__pyx_t_16); __pyx_t_16 = 0;
if (PyDict_SetItem(__pyx_t_4, __pyx_n_s_validation_data, __pyx_t_3) < 0) __PYX_ERR(0, 60, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
/* "train.py":62
* steps_per_epoch=max(1, num_train//batch_size),
* validation_data=data_generator_wrapper(lines[num_train:], batch_size, input_shape, anchors, num_classes),
* validation_steps=max(1, num_val//batch_size), # <<<<<<<<<<<<<<
* epochs=50,
* initial_epoch=0,
*/
__pyx_t_3 = PyNumber_FloorDivide(__pyx_v_num_val, __pyx_v_batch_size); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 62, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_17 = 1;
__pyx_t_19 = __Pyx_PyInt_From_long(__pyx_t_17); if (unlikely(!__pyx_t_19)) __PYX_ERR(0, 62, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_19);
__pyx_t_2 = PyObject_RichCompare(__pyx_t_3, __pyx_t_19, Py_GT); __Pyx_XGOTREF(__pyx_t_2); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 62, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_19); __pyx_t_19 = 0;
__pyx_t_12 = __Pyx_PyObject_IsTrue(__pyx_t_2); if (unlikely(__pyx_t_12 < 0)) __PYX_ERR(0, 62, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
if (__pyx_t_12) {
__Pyx_INCREF(__pyx_t_3);
__pyx_t_16 = __pyx_t_3;
} else {
__pyx_t_2 = __Pyx_PyInt_From_long(__pyx_t_17); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 62, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__pyx_t_16 = __pyx_t_2;
__pyx_t_2 = 0;
}
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
if (PyDict_SetItem(__pyx_t_4, __pyx_n_s_validation_steps, __pyx_t_16) < 0) __PYX_ERR(0, 60, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_16); __pyx_t_16 = 0;
if (PyDict_SetItem(__pyx_t_4, __pyx_n_s_epochs, __pyx_int_50) < 0) __PYX_ERR(0, 60, __pyx_L1_error)
if (PyDict_SetItem(__pyx_t_4, __pyx_n_s_initial_epoch, __pyx_int_0) < 0) __PYX_ERR(0, 60, __pyx_L1_error)
/* "train.py":65
* epochs=50,
* initial_epoch=0,
* callbacks=[logging, checkpoint]) # <<<<<<<<<<<<<<
* model.save_weights(log_dir + 'trained_weights_stage_1.h5')
*
*/
__pyx_t_16 = PyList_New(2); if (unlikely(!__pyx_t_16)) __PYX_ERR(0, 65, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_16);
__Pyx_INCREF(__pyx_v_logging);
__Pyx_GIVEREF(__pyx_v_logging);
PyList_SET_ITEM(__pyx_t_16, 0, __pyx_v_logging);
__Pyx_INCREF(__pyx_v_checkpoint);
__Pyx_GIVEREF(__pyx_v_checkpoint);
PyList_SET_ITEM(__pyx_t_16, 1, __pyx_v_checkpoint);
if (PyDict_SetItem(__pyx_t_4, __pyx_n_s_callbacks, __pyx_t_16) < 0) __PYX_ERR(0, 60, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_16); __pyx_t_16 = 0;
/* "train.py":59
* batch_size = 32
* print('Train on {} samples, val on {} samples, with batch size {}.'.format(num_train, num_val, batch_size))
* model.fit_generator(data_generator_wrapper(lines[:num_train], batch_size, input_shape, anchors, num_classes), # <<<<<<<<<<<<<<
* steps_per_epoch=max(1, num_train//batch_size),
* validation_data=data_generator_wrapper(lines[num_train:], batch_size, input_shape, anchors, num_classes),
*/
__pyx_t_16 = __Pyx_PyObject_Call(__pyx_t_13, __pyx_t_1, __pyx_t_4); if (unlikely(!__pyx_t_16)) __PYX_ERR(0, 59, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_16);
__Pyx_DECREF(__pyx_t_13); __pyx_t_13 = 0;
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
__Pyx_DECREF(__pyx_t_16); __pyx_t_16 = 0;
/* "train.py":66
* initial_epoch=0,
* callbacks=[logging, checkpoint])
* model.save_weights(log_dir + 'trained_weights_stage_1.h5') # <<<<<<<<<<<<<<
*
* # Unfreeze and continue training, to fine-tune.
*/
__pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_v_model, __pyx_n_s_save_weights); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 66, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__pyx_t_1 = PyNumber_Add(__pyx_v_log_dir, __pyx_kp_s_trained_weights_stage_1_h5); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 66, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_t_13 = NULL;
if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_4))) {
__pyx_t_13 = PyMethod_GET_SELF(__pyx_t_4);
if (likely(__pyx_t_13)) {
PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_4);
__Pyx_INCREF(__pyx_t_13);
__Pyx_INCREF(function);
__Pyx_DECREF_SET(__pyx_t_4, function);
}
}
if (!__pyx_t_13) {
__pyx_t_16 = __Pyx_PyObject_CallOneArg(__pyx_t_4, __pyx_t_1); if (unlikely(!__pyx_t_16)) __PYX_ERR(0, 66, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__Pyx_GOTREF(__pyx_t_16);
} else {
#if CYTHON_FAST_PYCALL
if (PyFunction_Check(__pyx_t_4)) {
PyObject *__pyx_temp[2] = {__pyx_t_13, __pyx_t_1};
__pyx_t_16 = __Pyx_PyFunction_FastCall(__pyx_t_4, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_16)) __PYX_ERR(0, 66, __pyx_L1_error)
__Pyx_XDECREF(__pyx_t_13); __pyx_t_13 = 0;
__Pyx_GOTREF(__pyx_t_16);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
} else
#endif
#if CYTHON_FAST_PYCCALL
if (__Pyx_PyFastCFunction_Check(__pyx_t_4)) {
PyObject *__pyx_temp[2] = {__pyx_t_13, __pyx_t_1};
__pyx_t_16 = __Pyx_PyCFunction_FastCall(__pyx_t_4, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_16)) __PYX_ERR(0, 66, __pyx_L1_error)
__Pyx_XDECREF(__pyx_t_13); __pyx_t_13 = 0;
__Pyx_GOTREF(__pyx_t_16);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
} else
#endif
{
__pyx_t_3 = PyTuple_New(1+1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 66, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_GIVEREF(__pyx_t_13); PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_13); __pyx_t_13 = NULL;
__Pyx_GIVEREF(__pyx_t_1);
PyTuple_SET_ITEM(__pyx_t_3, 0+1, __pyx_t_1);
__pyx_t_1 = 0;
__pyx_t_16 = __Pyx_PyObject_Call(__pyx_t_4, __pyx_t_3, NULL); if (unlikely(!__pyx_t_16)) __PYX_ERR(0, 66, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_16);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
}
}
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
__Pyx_DECREF(__pyx_t_16); __pyx_t_16 = 0;
/* "train.py":71
* # Train longer if the result is not good.
* if True:
* for i in range(len(model.layers)): # <<<<<<<<<<<<<<
* model.layers[i].trainable = True
* model.compile(optimizer=Adam(lr=1e-4), loss={'yolo_loss': lambda y_true, y_pred: y_pred}) # recompile to apply the change
*/
__pyx_t_16 = __Pyx_PyObject_GetAttrStr(__pyx_v_model, __pyx_n_s_layers); if (unlikely(!__pyx_t_16)) __PYX_ERR(0, 71, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_16);
__pyx_t_5 = PyObject_Length(__pyx_t_16); if (unlikely(__pyx_t_5 == -1)) __PYX_ERR(0, 71, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_16); __pyx_t_16 = 0;
for (__pyx_t_20 = 0; __pyx_t_20 < __pyx_t_5; __pyx_t_20+=1) {
__pyx_v_i = __pyx_t_20;
/* "train.py":72
* if True:
* for i in range(len(model.layers)):
* model.layers[i].trainable = True # <<<<<<<<<<<<<<
* model.compile(optimizer=Adam(lr=1e-4), loss={'yolo_loss': lambda y_true, y_pred: y_pred}) # recompile to apply the change
* print('Unfreeze all of the layers.')
*/
__pyx_t_16 = __Pyx_PyObject_GetAttrStr(__pyx_v_model, __pyx_n_s_layers); if (unlikely(!__pyx_t_16)) __PYX_ERR(0, 72, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_16);
__pyx_t_4 = __Pyx_GetItemInt(__pyx_t_16, __pyx_v_i, Py_ssize_t, 1, PyInt_FromSsize_t, 0, 1, 1); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 72, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__Pyx_DECREF(__pyx_t_16); __pyx_t_16 = 0;
if (__Pyx_PyObject_SetAttrStr(__pyx_t_4, __pyx_n_s_trainable, Py_True) < 0) __PYX_ERR(0, 72, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
}
/* "train.py":73
* for i in range(len(model.layers)):
* model.layers[i].trainable = True
* model.compile(optimizer=Adam(lr=1e-4), loss={'yolo_loss': lambda y_true, y_pred: y_pred}) # recompile to apply the change # <<<<<<<<<<<<<<
* print('Unfreeze all of the layers.')
*
*/
__pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_v_model, __pyx_n_s_compile); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 73, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__pyx_t_16 = PyDict_New(); if (unlikely(!__pyx_t_16)) __PYX_ERR(0, 73, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_16);
__pyx_t_3 = __Pyx_GetModuleGlobalName(__pyx_n_s_Adam); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 73, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_1 = PyDict_New(); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 73, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
if (PyDict_SetItem(__pyx_t_1, __pyx_n_s_lr, __pyx_float_1eneg_4) < 0) __PYX_ERR(0, 73, __pyx_L1_error)
__pyx_t_13 = __Pyx_PyObject_Call(__pyx_t_3, __pyx_empty_tuple, __pyx_t_1); if (unlikely(!__pyx_t_13)) __PYX_ERR(0, 73, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_13);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
if (PyDict_SetItem(__pyx_t_16, __pyx_n_s_optimizer, __pyx_t_13) < 0) __PYX_ERR(0, 73, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_13); __pyx_t_13 = 0;
__pyx_t_13 = PyDict_New(); if (unlikely(!__pyx_t_13)) __PYX_ERR(0, 73, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_13);
__pyx_t_1 = __Pyx_CyFunction_NewEx(&__pyx_mdef_5train_5_main_1lambda1, 0, __pyx_n_s_main_locals_lambda, NULL, __pyx_n_s_train, __pyx_d, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 73, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
if (PyDict_SetItem(__pyx_t_13, __pyx_n_s_yolo_loss, __pyx_t_1) < 0) __PYX_ERR(0, 73, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
if (PyDict_SetItem(__pyx_t_16, __pyx_n_s_loss, __pyx_t_13) < 0) __PYX_ERR(0, 73, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_13); __pyx_t_13 = 0;
__pyx_t_13 = __Pyx_PyObject_Call(__pyx_t_4, __pyx_empty_tuple, __pyx_t_16); if (unlikely(!__pyx_t_13)) __PYX_ERR(0, 73, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_13);
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
__Pyx_DECREF(__pyx_t_16); __pyx_t_16 = 0;
__Pyx_DECREF(__pyx_t_13); __pyx_t_13 = 0;
/* "train.py":74
* model.layers[i].trainable = True
* model.compile(optimizer=Adam(lr=1e-4), loss={'yolo_loss': lambda y_true, y_pred: y_pred}) # recompile to apply the change
* print('Unfreeze all of the layers.') # <<<<<<<<<<<<<<
*
* batch_size = 32 # note that more GPU memory is required after unfreezing the body
*/
if (__Pyx_PrintOne(0, __pyx_kp_s_Unfreeze_all_of_the_layers) < 0) __PYX_ERR(0, 74, __pyx_L1_error)
/* "train.py":76
* print('Unfreeze all of the layers.')
*
* batch_size = 32 # note that more GPU memory is required after unfreezing the body # <<<<<<<<<<<<<<
* print('Train on {} samples, val on {} samples, with batch size {}.'.format(num_train, num_val, batch_size))
* model.fit_generator(data_generator_wrapper(lines[:num_train], batch_size, input_shape, anchors, num_classes),
*/
__Pyx_INCREF(__pyx_int_32);
__Pyx_DECREF_SET(__pyx_v_batch_size, __pyx_int_32);
/* "train.py":77
*
* batch_size = 32 # note that more GPU memory is required after unfreezing the body
* print('Train on {} samples, val on {} samples, with batch size {}.'.format(num_train, num_val, batch_size)) # <<<<<<<<<<<<<<
* model.fit_generator(data_generator_wrapper(lines[:num_train], batch_size, input_shape, anchors, num_classes),
* steps_per_epoch=max(1, num_train//batch_size),
*/
__pyx_t_16 = __Pyx_PyObject_GetAttrStr(__pyx_kp_s_Train_on_samples_val_on_samples, __pyx_n_s_format); if (unlikely(!__pyx_t_16)) __PYX_ERR(0, 77, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_16);
__pyx_t_4 = NULL;
__pyx_t_14 = 0;
if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_16))) {
__pyx_t_4 = PyMethod_GET_SELF(__pyx_t_16);
if (likely(__pyx_t_4)) {
PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_16);
__Pyx_INCREF(__pyx_t_4);
__Pyx_INCREF(function);
__Pyx_DECREF_SET(__pyx_t_16, function);
__pyx_t_14 = 1;
}
}
#if CYTHON_FAST_PYCALL
if (PyFunction_Check(__pyx_t_16)) {
PyObject *__pyx_temp[4] = {__pyx_t_4, __pyx_v_num_train, __pyx_v_num_val, __pyx_v_batch_size};
__pyx_t_13 = __Pyx_PyFunction_FastCall(__pyx_t_16, __pyx_temp+1-__pyx_t_14, 3+__pyx_t_14); if (unlikely(!__pyx_t_13)) __PYX_ERR(0, 77, __pyx_L1_error)
__Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0;
__Pyx_GOTREF(__pyx_t_13);
} else
#endif
#if CYTHON_FAST_PYCCALL
if (__Pyx_PyFastCFunction_Check(__pyx_t_16)) {
PyObject *__pyx_temp[4] = {__pyx_t_4, __pyx_v_num_train, __pyx_v_num_val, __pyx_v_batch_size};
__pyx_t_13 = __Pyx_PyCFunction_FastCall(__pyx_t_16, __pyx_temp+1-__pyx_t_14, 3+__pyx_t_14); if (unlikely(!__pyx_t_13)) __PYX_ERR(0, 77, __pyx_L1_error)
__Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0;
__Pyx_GOTREF(__pyx_t_13);
} else
#endif
{
__pyx_t_1 = PyTuple_New(3+__pyx_t_14); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 77, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
if (__pyx_t_4) {
__Pyx_GIVEREF(__pyx_t_4); PyTuple_SET_ITEM(__pyx_t_1, 0, __pyx_t_4); __pyx_t_4 = NULL;
}
__Pyx_INCREF(__pyx_v_num_train);
__Pyx_GIVEREF(__pyx_v_num_train);
PyTuple_SET_ITEM(__pyx_t_1, 0+__pyx_t_14, __pyx_v_num_train);
__Pyx_INCREF(__pyx_v_num_val);
__Pyx_GIVEREF(__pyx_v_num_val);
PyTuple_SET_ITEM(__pyx_t_1, 1+__pyx_t_14, __pyx_v_num_val);
__Pyx_INCREF(__pyx_v_batch_size);
__Pyx_GIVEREF(__pyx_v_batch_size);
PyTuple_SET_ITEM(__pyx_t_1, 2+__pyx_t_14, __pyx_v_batch_size);
__pyx_t_13 = __Pyx_PyObject_Call(__pyx_t_16, __pyx_t_1, NULL); if (unlikely(!__pyx_t_13)) __PYX_ERR(0, 77, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_13);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
}
__Pyx_DECREF(__pyx_t_16); __pyx_t_16 = 0;
if (__Pyx_PrintOne(0, __pyx_t_13) < 0) __PYX_ERR(0, 77, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_13); __pyx_t_13 = 0;
/* "train.py":78
* batch_size = 32 # note that more GPU memory is required after unfreezing the body
* print('Train on {} samples, val on {} samples, with batch size {}.'.format(num_train, num_val, batch_size))
* model.fit_generator(data_generator_wrapper(lines[:num_train], batch_size, input_shape, anchors, num_classes), # <<<<<<<<<<<<<<
* steps_per_epoch=max(1, num_train//batch_size),
* validation_data=data_generator_wrapper(lines[num_train:], batch_size, input_shape, anchors, num_classes),
*/
__pyx_t_13 = __Pyx_PyObject_GetAttrStr(__pyx_v_model, __pyx_n_s_fit_generator); if (unlikely(!__pyx_t_13)) __PYX_ERR(0, 78, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_13);
__pyx_t_1 = __Pyx_GetModuleGlobalName(__pyx_n_s_data_generator_wrapper); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 78, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
if (unlikely(!__pyx_v_lines)) { __Pyx_RaiseUnboundLocalError("lines"); __PYX_ERR(0, 78, __pyx_L1_error) }
__pyx_t_4 = __Pyx_PyObject_GetSlice(__pyx_v_lines, 0, 0, NULL, &__pyx_v_num_train, NULL, 0, 0, 1); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 78, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__pyx_t_3 = PyInt_FromSsize_t(__pyx_v_num_classes); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 78, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_2 = NULL;
__pyx_t_14 = 0;
if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_1))) {
__pyx_t_2 = PyMethod_GET_SELF(__pyx_t_1);
if (likely(__pyx_t_2)) {
PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_1);
__Pyx_INCREF(__pyx_t_2);
__Pyx_INCREF(function);
__Pyx_DECREF_SET(__pyx_t_1, function);
__pyx_t_14 = 1;
}
}
#if CYTHON_FAST_PYCALL
if (PyFunction_Check(__pyx_t_1)) {
PyObject *__pyx_temp[6] = {__pyx_t_2, __pyx_t_4, __pyx_v_batch_size, __pyx_v_input_shape, __pyx_v_anchors, __pyx_t_3};
__pyx_t_16 = __Pyx_PyFunction_FastCall(__pyx_t_1, __pyx_temp+1-__pyx_t_14, 5+__pyx_t_14); if (unlikely(!__pyx_t_16)) __PYX_ERR(0, 78, __pyx_L1_error)
__Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
__Pyx_GOTREF(__pyx_t_16);
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
} else
#endif
#if CYTHON_FAST_PYCCALL
if (__Pyx_PyFastCFunction_Check(__pyx_t_1)) {
PyObject *__pyx_temp[6] = {__pyx_t_2, __pyx_t_4, __pyx_v_batch_size, __pyx_v_input_shape, __pyx_v_anchors, __pyx_t_3};
__pyx_t_16 = __Pyx_PyCFunction_FastCall(__pyx_t_1, __pyx_temp+1-__pyx_t_14, 5+__pyx_t_14); if (unlikely(!__pyx_t_16)) __PYX_ERR(0, 78, __pyx_L1_error)
__Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
__Pyx_GOTREF(__pyx_t_16);
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
} else
#endif
{
__pyx_t_19 = PyTuple_New(5+__pyx_t_14); if (unlikely(!__pyx_t_19)) __PYX_ERR(0, 78, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_19);
if (__pyx_t_2) {
__Pyx_GIVEREF(__pyx_t_2); PyTuple_SET_ITEM(__pyx_t_19, 0, __pyx_t_2); __pyx_t_2 = NULL;
}
__Pyx_GIVEREF(__pyx_t_4);
PyTuple_SET_ITEM(__pyx_t_19, 0+__pyx_t_14, __pyx_t_4);
__Pyx_INCREF(__pyx_v_batch_size);
__Pyx_GIVEREF(__pyx_v_batch_size);
PyTuple_SET_ITEM(__pyx_t_19, 1+__pyx_t_14, __pyx_v_batch_size);
__Pyx_INCREF(__pyx_v_input_shape);
__Pyx_GIVEREF(__pyx_v_input_shape);
PyTuple_SET_ITEM(__pyx_t_19, 2+__pyx_t_14, __pyx_v_input_shape);
__Pyx_INCREF(__pyx_v_anchors);
__Pyx_GIVEREF(__pyx_v_anchors);
PyTuple_SET_ITEM(__pyx_t_19, 3+__pyx_t_14, __pyx_v_anchors);
__Pyx_GIVEREF(__pyx_t_3);
PyTuple_SET_ITEM(__pyx_t_19, 4+__pyx_t_14, __pyx_t_3);
__pyx_t_4 = 0;
__pyx_t_3 = 0;
__pyx_t_16 = __Pyx_PyObject_Call(__pyx_t_1, __pyx_t_19, NULL); if (unlikely(!__pyx_t_16)) __PYX_ERR(0, 78, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_16);
__Pyx_DECREF(__pyx_t_19); __pyx_t_19 = 0;
}
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__pyx_t_1 = PyTuple_New(1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 78, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_GIVEREF(__pyx_t_16);
PyTuple_SET_ITEM(__pyx_t_1, 0, __pyx_t_16);
__pyx_t_16 = 0;
/* "train.py":79
* print('Train on {} samples, val on {} samples, with batch size {}.'.format(num_train, num_val, batch_size))
* model.fit_generator(data_generator_wrapper(lines[:num_train], batch_size, input_shape, anchors, num_classes),
* steps_per_epoch=max(1, num_train//batch_size), # <<<<<<<<<<<<<<
* validation_data=data_generator_wrapper(lines[num_train:], batch_size, input_shape, anchors, num_classes),
* validation_steps=max(1, num_val//batch_size),
*/
__pyx_t_16 = PyDict_New(); if (unlikely(!__pyx_t_16)) __PYX_ERR(0, 79, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_16);
__pyx_t_19 = PyNumber_FloorDivide(__pyx_v_num_train, __pyx_v_batch_size); if (unlikely(!__pyx_t_19)) __PYX_ERR(0, 79, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_19);
__pyx_t_17 = 1;
__pyx_t_4 = __Pyx_PyInt_From_long(__pyx_t_17); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 79, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__pyx_t_2 = PyObject_RichCompare(__pyx_t_19, __pyx_t_4, Py_GT); __Pyx_XGOTREF(__pyx_t_2); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 79, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
__pyx_t_12 = __Pyx_PyObject_IsTrue(__pyx_t_2); if (unlikely(__pyx_t_12 < 0)) __PYX_ERR(0, 79, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
if (__pyx_t_12) {
__Pyx_INCREF(__pyx_t_19);
__pyx_t_3 = __pyx_t_19;
} else {
__pyx_t_2 = __Pyx_PyInt_From_long(__pyx_t_17); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 79, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__pyx_t_3 = __pyx_t_2;
__pyx_t_2 = 0;
}
__Pyx_DECREF(__pyx_t_19); __pyx_t_19 = 0;
if (PyDict_SetItem(__pyx_t_16, __pyx_n_s_steps_per_epoch, __pyx_t_3) < 0) __PYX_ERR(0, 79, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
/* "train.py":80
* model.fit_generator(data_generator_wrapper(lines[:num_train], batch_size, input_shape, anchors, num_classes),
* steps_per_epoch=max(1, num_train//batch_size),
* validation_data=data_generator_wrapper(lines[num_train:], batch_size, input_shape, anchors, num_classes), # <<<<<<<<<<<<<<
* validation_steps=max(1, num_val//batch_size),
* epochs=100,
*/
__pyx_t_19 = __Pyx_GetModuleGlobalName(__pyx_n_s_data_generator_wrapper); if (unlikely(!__pyx_t_19)) __PYX_ERR(0, 80, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_19);
if (unlikely(!__pyx_v_lines)) { __Pyx_RaiseUnboundLocalError("lines"); __PYX_ERR(0, 80, __pyx_L1_error) }
__pyx_t_2 = __Pyx_PyObject_GetSlice(__pyx_v_lines, 0, 0, &__pyx_v_num_train, NULL, NULL, 0, 0, 1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 80, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__pyx_t_4 = PyInt_FromSsize_t(__pyx_v_num_classes); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 80, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__pyx_t_15 = NULL;
__pyx_t_14 = 0;
if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_19))) {
__pyx_t_15 = PyMethod_GET_SELF(__pyx_t_19);
if (likely(__pyx_t_15)) {
PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_19);
__Pyx_INCREF(__pyx_t_15);
__Pyx_INCREF(function);
__Pyx_DECREF_SET(__pyx_t_19, function);
__pyx_t_14 = 1;
}
}
#if CYTHON_FAST_PYCALL
if (PyFunction_Check(__pyx_t_19)) {
PyObject *__pyx_temp[6] = {__pyx_t_15, __pyx_t_2, __pyx_v_batch_size, __pyx_v_input_shape, __pyx_v_anchors, __pyx_t_4};
__pyx_t_3 = __Pyx_PyFunction_FastCall(__pyx_t_19, __pyx_temp+1-__pyx_t_14, 5+__pyx_t_14); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 80, __pyx_L1_error)
__Pyx_XDECREF(__pyx_t_15); __pyx_t_15 = 0;
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
} else
#endif
#if CYTHON_FAST_PYCCALL
if (__Pyx_PyFastCFunction_Check(__pyx_t_19)) {
PyObject *__pyx_temp[6] = {__pyx_t_15, __pyx_t_2, __pyx_v_batch_size, __pyx_v_input_shape, __pyx_v_anchors, __pyx_t_4};
__pyx_t_3 = __Pyx_PyCFunction_FastCall(__pyx_t_19, __pyx_temp+1-__pyx_t_14, 5+__pyx_t_14); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 80, __pyx_L1_error)
__Pyx_XDECREF(__pyx_t_15); __pyx_t_15 = 0;
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
} else
#endif
{
__pyx_t_18 = PyTuple_New(5+__pyx_t_14); if (unlikely(!__pyx_t_18)) __PYX_ERR(0, 80, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_18);
if (__pyx_t_15) {
__Pyx_GIVEREF(__pyx_t_15); PyTuple_SET_ITEM(__pyx_t_18, 0, __pyx_t_15); __pyx_t_15 = NULL;
}
__Pyx_GIVEREF(__pyx_t_2);
PyTuple_SET_ITEM(__pyx_t_18, 0+__pyx_t_14, __pyx_t_2);
__Pyx_INCREF(__pyx_v_batch_size);
__Pyx_GIVEREF(__pyx_v_batch_size);
PyTuple_SET_ITEM(__pyx_t_18, 1+__pyx_t_14, __pyx_v_batch_size);
__Pyx_INCREF(__pyx_v_input_shape);
__Pyx_GIVEREF(__pyx_v_input_shape);
PyTuple_SET_ITEM(__pyx_t_18, 2+__pyx_t_14, __pyx_v_input_shape);
__Pyx_INCREF(__pyx_v_anchors);
__Pyx_GIVEREF(__pyx_v_anchors);
PyTuple_SET_ITEM(__pyx_t_18, 3+__pyx_t_14, __pyx_v_anchors);
__Pyx_GIVEREF(__pyx_t_4);
PyTuple_SET_ITEM(__pyx_t_18, 4+__pyx_t_14, __pyx_t_4);
__pyx_t_2 = 0;
__pyx_t_4 = 0;
__pyx_t_3 = __Pyx_PyObject_Call(__pyx_t_19, __pyx_t_18, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 80, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_18); __pyx_t_18 = 0;
}
__Pyx_DECREF(__pyx_t_19); __pyx_t_19 = 0;
if (PyDict_SetItem(__pyx_t_16, __pyx_n_s_validation_data, __pyx_t_3) < 0) __PYX_ERR(0, 79, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
/* "train.py":81
* steps_per_epoch=max(1, num_train//batch_size),
* validation_data=data_generator_wrapper(lines[num_train:], batch_size, input_shape, anchors, num_classes),
* validation_steps=max(1, num_val//batch_size), # <<<<<<<<<<<<<<
* epochs=100,
* initial_epoch=50,
*/
__pyx_t_3 = PyNumber_FloorDivide(__pyx_v_num_val, __pyx_v_batch_size); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 81, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_17 = 1;
__pyx_t_18 = __Pyx_PyInt_From_long(__pyx_t_17); if (unlikely(!__pyx_t_18)) __PYX_ERR(0, 81, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_18);
__pyx_t_4 = PyObject_RichCompare(__pyx_t_3, __pyx_t_18, Py_GT); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 81, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_18); __pyx_t_18 = 0;
__pyx_t_12 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_12 < 0)) __PYX_ERR(0, 81, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
if (__pyx_t_12) {
__Pyx_INCREF(__pyx_t_3);
__pyx_t_19 = __pyx_t_3;
} else {
__pyx_t_4 = __Pyx_PyInt_From_long(__pyx_t_17); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 81, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__pyx_t_19 = __pyx_t_4;
__pyx_t_4 = 0;
}
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
if (PyDict_SetItem(__pyx_t_16, __pyx_n_s_validation_steps, __pyx_t_19) < 0) __PYX_ERR(0, 79, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_19); __pyx_t_19 = 0;
if (PyDict_SetItem(__pyx_t_16, __pyx_n_s_epochs, __pyx_int_100) < 0) __PYX_ERR(0, 79, __pyx_L1_error)
if (PyDict_SetItem(__pyx_t_16, __pyx_n_s_initial_epoch, __pyx_int_50) < 0) __PYX_ERR(0, 79, __pyx_L1_error)
/* "train.py":84
* epochs=100,
* initial_epoch=50,
* callbacks=[logging, checkpoint, reduce_lr, early_stopping]) # <<<<<<<<<<<<<<
* model.save_weights(log_dir + 'trained_weights_final.h5')
*
*/
__pyx_t_19 = PyList_New(4); if (unlikely(!__pyx_t_19)) __PYX_ERR(0, 84, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_19);
__Pyx_INCREF(__pyx_v_logging);
__Pyx_GIVEREF(__pyx_v_logging);
PyList_SET_ITEM(__pyx_t_19, 0, __pyx_v_logging);
__Pyx_INCREF(__pyx_v_checkpoint);
__Pyx_GIVEREF(__pyx_v_checkpoint);
PyList_SET_ITEM(__pyx_t_19, 1, __pyx_v_checkpoint);
__Pyx_INCREF(__pyx_v_reduce_lr);
__Pyx_GIVEREF(__pyx_v_reduce_lr);
PyList_SET_ITEM(__pyx_t_19, 2, __pyx_v_reduce_lr);
__Pyx_INCREF(__pyx_v_early_stopping);
__Pyx_GIVEREF(__pyx_v_early_stopping);
PyList_SET_ITEM(__pyx_t_19, 3, __pyx_v_early_stopping);
if (PyDict_SetItem(__pyx_t_16, __pyx_n_s_callbacks, __pyx_t_19) < 0) __PYX_ERR(0, 79, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_19); __pyx_t_19 = 0;
/* "train.py":78
* batch_size = 32 # note that more GPU memory is required after unfreezing the body
* print('Train on {} samples, val on {} samples, with batch size {}.'.format(num_train, num_val, batch_size))
* model.fit_generator(data_generator_wrapper(lines[:num_train], batch_size, input_shape, anchors, num_classes), # <<<<<<<<<<<<<<
* steps_per_epoch=max(1, num_train//batch_size),
* validation_data=data_generator_wrapper(lines[num_train:], batch_size, input_shape, anchors, num_classes),
*/
__pyx_t_19 = __Pyx_PyObject_Call(__pyx_t_13, __pyx_t_1, __pyx_t_16); if (unlikely(!__pyx_t_19)) __PYX_ERR(0, 78, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_19);
__Pyx_DECREF(__pyx_t_13); __pyx_t_13 = 0;
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__Pyx_DECREF(__pyx_t_16); __pyx_t_16 = 0;
__Pyx_DECREF(__pyx_t_19); __pyx_t_19 = 0;
/* "train.py":85
* initial_epoch=50,
* callbacks=[logging, checkpoint, reduce_lr, early_stopping])
* model.save_weights(log_dir + 'trained_weights_final.h5') # <<<<<<<<<<<<<<
*
* # Further training if needed.
*/
__pyx_t_16 = __Pyx_PyObject_GetAttrStr(__pyx_v_model, __pyx_n_s_save_weights); if (unlikely(!__pyx_t_16)) __PYX_ERR(0, 85, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_16);
__pyx_t_1 = PyNumber_Add(__pyx_v_log_dir, __pyx_kp_s_trained_weights_final_h5); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 85, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_t_13 = NULL;
if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_16))) {
__pyx_t_13 = PyMethod_GET_SELF(__pyx_t_16);
if (likely(__pyx_t_13)) {
PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_16);
__Pyx_INCREF(__pyx_t_13);
__Pyx_INCREF(function);
__Pyx_DECREF_SET(__pyx_t_16, function);
}
}
if (!__pyx_t_13) {
__pyx_t_19 = __Pyx_PyObject_CallOneArg(__pyx_t_16, __pyx_t_1); if (unlikely(!__pyx_t_19)) __PYX_ERR(0, 85, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__Pyx_GOTREF(__pyx_t_19);
} else {
#if CYTHON_FAST_PYCALL
if (PyFunction_Check(__pyx_t_16)) {
PyObject *__pyx_temp[2] = {__pyx_t_13, __pyx_t_1};
__pyx_t_19 = __Pyx_PyFunction_FastCall(__pyx_t_16, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_19)) __PYX_ERR(0, 85, __pyx_L1_error)
__Pyx_XDECREF(__pyx_t_13); __pyx_t_13 = 0;
__Pyx_GOTREF(__pyx_t_19);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
} else
#endif
#if CYTHON_FAST_PYCCALL
if (__Pyx_PyFastCFunction_Check(__pyx_t_16)) {
PyObject *__pyx_temp[2] = {__pyx_t_13, __pyx_t_1};
__pyx_t_19 = __Pyx_PyCFunction_FastCall(__pyx_t_16, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_19)) __PYX_ERR(0, 85, __pyx_L1_error)
__Pyx_XDECREF(__pyx_t_13); __pyx_t_13 = 0;
__Pyx_GOTREF(__pyx_t_19);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
} else
#endif
{
__pyx_t_3 = PyTuple_New(1+1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 85, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_GIVEREF(__pyx_t_13); PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_13); __pyx_t_13 = NULL;
__Pyx_GIVEREF(__pyx_t_1);
PyTuple_SET_ITEM(__pyx_t_3, 0+1, __pyx_t_1);
__pyx_t_1 = 0;
__pyx_t_19 = __Pyx_PyObject_Call(__pyx_t_16, __pyx_t_3, NULL); if (unlikely(!__pyx_t_19)) __PYX_ERR(0, 85, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_19);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
}
}
__Pyx_DECREF(__pyx_t_16); __pyx_t_16 = 0;
__Pyx_DECREF(__pyx_t_19); __pyx_t_19 = 0;
/* "train.py":16
*
*
* def _main(): # <<<<<<<<<<<<<<
* annotation_path = 'model_data/train.txt'
* log_dir = 'logs/000/'
*/
/* function exit code */
__pyx_r = Py_None; __Pyx_INCREF(Py_None);
goto __pyx_L0;
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_XDECREF(__pyx_t_2);
__Pyx_XDECREF(__pyx_t_3);
__Pyx_XDECREF(__pyx_t_4);
__Pyx_XDECREF(__pyx_t_13);
__Pyx_XDECREF(__pyx_t_15);
__Pyx_XDECREF(__pyx_t_16);
__Pyx_XDECREF(__pyx_t_18);
__Pyx_XDECREF(__pyx_t_19);
__Pyx_AddTraceback("train._main", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XDECREF(__pyx_v_annotation_path);
__Pyx_XDECREF(__pyx_v_log_dir);
__Pyx_XDECREF(__pyx_v_classes_path);
__Pyx_XDECREF(__pyx_v_anchors_path);
__Pyx_XDECREF(__pyx_v_class_names);
__Pyx_XDECREF(__pyx_v_anchors);
__Pyx_XDECREF(__pyx_v_input_shape);
__Pyx_XDECREF(__pyx_v_is_tiny_version);
__Pyx_XDECREF(__pyx_v_model);
__Pyx_XDECREF(__pyx_v_logging);
__Pyx_XDECREF(__pyx_v_checkpoint);
__Pyx_XDECREF(__pyx_v_reduce_lr);
__Pyx_XDECREF(__pyx_v_early_stopping);
__Pyx_XDECREF(__pyx_v_f);
__Pyx_XDECREF(__pyx_v_lines);
__Pyx_XDECREF(__pyx_v_num_val);
__Pyx_XDECREF(__pyx_v_num_train);
__Pyx_XDECREF(__pyx_v_batch_size);
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "train.py":90
*
*
* def get_classes(classes_path): # <<<<<<<<<<<<<<
* '''loads the classes'''
* with open(classes_path) as f:
*/
/* Python wrapper */
static PyObject *__pyx_pw_5train_3get_classes(PyObject *__pyx_self, PyObject *__pyx_v_classes_path); /*proto*/
static char __pyx_doc_5train_2get_classes[] = "loads the classes";
static PyMethodDef __pyx_mdef_5train_3get_classes = {"get_classes", (PyCFunction)__pyx_pw_5train_3get_classes, METH_O, __pyx_doc_5train_2get_classes};
static PyObject *__pyx_pw_5train_3get_classes(PyObject *__pyx_self, PyObject *__pyx_v_classes_path) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("get_classes (wrapper)", 0);
__pyx_r = __pyx_pf_5train_2get_classes(__pyx_self, ((PyObject *)__pyx_v_classes_path));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_pf_5train_2get_classes(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_classes_path) {
PyObject *__pyx_v_f = NULL;
PyObject *__pyx_v_class_names = NULL;
PyObject *__pyx_v_c = NULL;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
PyObject *__pyx_t_2 = NULL;
PyObject *__pyx_t_3 = NULL;
PyObject *__pyx_t_4 = NULL;
PyObject *__pyx_t_5 = NULL;
PyObject *__pyx_t_6 = NULL;
PyObject *__pyx_t_7 = NULL;
PyObject *__pyx_t_8 = NULL;
PyObject *__pyx_t_9 = NULL;
int __pyx_t_10;
int __pyx_t_11;
Py_ssize_t __pyx_t_12;
PyObject *(*__pyx_t_13)(PyObject *);
PyObject *__pyx_t_14 = NULL;
__Pyx_RefNannySetupContext("get_classes", 0);
/* "train.py":92
* def get_classes(classes_path):
* '''loads the classes'''
* with open(classes_path) as f: # <<<<<<<<<<<<<<
* class_names = f.readlines()
* class_names = [c.strip() for c in class_names]
*/
/*with:*/ {
__pyx_t_1 = PyTuple_New(1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 92, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_INCREF(__pyx_v_classes_path);
__Pyx_GIVEREF(__pyx_v_classes_path);
PyTuple_SET_ITEM(__pyx_t_1, 0, __pyx_v_classes_path);
__pyx_t_2 = __Pyx_PyObject_Call(__pyx_builtin_open, __pyx_t_1, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 92, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__pyx_t_3 = __Pyx_PyObject_LookupSpecial(__pyx_t_2, __pyx_n_s_exit); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 92, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_4 = __Pyx_PyObject_LookupSpecial(__pyx_t_2, __pyx_n_s_enter); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 92, __pyx_L3_error)
__Pyx_GOTREF(__pyx_t_4);
__pyx_t_5 = NULL;
if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_4))) {
__pyx_t_5 = PyMethod_GET_SELF(__pyx_t_4);
if (likely(__pyx_t_5)) {
PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_4);
__Pyx_INCREF(__pyx_t_5);
__Pyx_INCREF(function);
__Pyx_DECREF_SET(__pyx_t_4, function);
}
}
if (__pyx_t_5) {
__pyx_t_1 = __Pyx_PyObject_CallOneArg(__pyx_t_4, __pyx_t_5); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 92, __pyx_L3_error)
__Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
} else {
__pyx_t_1 = __Pyx_PyObject_CallNoArg(__pyx_t_4); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 92, __pyx_L3_error)
}
__Pyx_GOTREF(__pyx_t_1);
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
__pyx_t_4 = __pyx_t_1;
__pyx_t_1 = 0;
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
/*try:*/ {
{
__Pyx_PyThreadState_declare
__Pyx_PyThreadState_assign
__Pyx_ExceptionSave(&__pyx_t_6, &__pyx_t_7, &__pyx_t_8);
__Pyx_XGOTREF(__pyx_t_6);
__Pyx_XGOTREF(__pyx_t_7);
__Pyx_XGOTREF(__pyx_t_8);
/*try:*/ {
__pyx_v_f = __pyx_t_4;
__pyx_t_4 = 0;
/* "train.py":93
* '''loads the classes'''
* with open(classes_path) as f:
* class_names = f.readlines() # <<<<<<<<<<<<<<
* class_names = [c.strip() for c in class_names]
* return class_names
*/
__pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_v_f, __pyx_n_s_readlines); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 93, __pyx_L7_error)
__Pyx_GOTREF(__pyx_t_2);
__pyx_t_1 = NULL;
if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_2))) {
__pyx_t_1 = PyMethod_GET_SELF(__pyx_t_2);
if (likely(__pyx_t_1)) {
PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_2);
__Pyx_INCREF(__pyx_t_1);
__Pyx_INCREF(function);
__Pyx_DECREF_SET(__pyx_t_2, function);
}
}
if (__pyx_t_1) {
__pyx_t_4 = __Pyx_PyObject_CallOneArg(__pyx_t_2, __pyx_t_1); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 93, __pyx_L7_error)
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
} else {
__pyx_t_4 = __Pyx_PyObject_CallNoArg(__pyx_t_2); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 93, __pyx_L7_error)
}
__Pyx_GOTREF(__pyx_t_4);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__pyx_v_class_names = __pyx_t_4;
__pyx_t_4 = 0;
/* "train.py":92
* def get_classes(classes_path):
* '''loads the classes'''
* with open(classes_path) as f: # <<<<<<<<<<<<<<
* class_names = f.readlines()
* class_names = [c.strip() for c in class_names]
*/
}
__Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0;
__Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0;
__Pyx_XDECREF(__pyx_t_8); __pyx_t_8 = 0;
goto __pyx_L12_try_end;
__pyx_L7_error:;
__Pyx_PyThreadState_assign
__Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0;
__Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0;
__Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
__Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0;
/*except:*/ {
__Pyx_AddTraceback("train.get_classes", __pyx_clineno, __pyx_lineno, __pyx_filename);
if (__Pyx_GetException(&__pyx_t_4, &__pyx_t_2, &__pyx_t_1) < 0) __PYX_ERR(0, 92, __pyx_L9_except_error)
__Pyx_GOTREF(__pyx_t_4);
__Pyx_GOTREF(__pyx_t_2);
__Pyx_GOTREF(__pyx_t_1);
__pyx_t_5 = PyTuple_Pack(3, __pyx_t_4, __pyx_t_2, __pyx_t_1); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 92, __pyx_L9_except_error)
__Pyx_GOTREF(__pyx_t_5);
__pyx_t_9 = __Pyx_PyObject_Call(__pyx_t_3, __pyx_t_5, NULL);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 92, __pyx_L9_except_error)
__Pyx_GOTREF(__pyx_t_9);
__pyx_t_10 = __Pyx_PyObject_IsTrue(__pyx_t_9);
__Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0;
if (__pyx_t_10 < 0) __PYX_ERR(0, 92, __pyx_L9_except_error)
__pyx_t_11 = ((!(__pyx_t_10 != 0)) != 0);
if (__pyx_t_11) {
__Pyx_GIVEREF(__pyx_t_4);
__Pyx_GIVEREF(__pyx_t_2);
__Pyx_XGIVEREF(__pyx_t_1);
__Pyx_ErrRestoreWithState(__pyx_t_4, __pyx_t_2, __pyx_t_1);
__pyx_t_4 = 0; __pyx_t_2 = 0; __pyx_t_1 = 0;
__PYX_ERR(0, 92, __pyx_L9_except_error)
}
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
goto __pyx_L8_exception_handled;
}
__pyx_L9_except_error:;
__Pyx_PyThreadState_assign
__Pyx_XGIVEREF(__pyx_t_6);
__Pyx_XGIVEREF(__pyx_t_7);
__Pyx_XGIVEREF(__pyx_t_8);
__Pyx_ExceptionReset(__pyx_t_6, __pyx_t_7, __pyx_t_8);
goto __pyx_L1_error;
__pyx_L8_exception_handled:;
__Pyx_PyThreadState_assign
__Pyx_XGIVEREF(__pyx_t_6);
__Pyx_XGIVEREF(__pyx_t_7);
__Pyx_XGIVEREF(__pyx_t_8);
__Pyx_ExceptionReset(__pyx_t_6, __pyx_t_7, __pyx_t_8);
__pyx_L12_try_end:;
}
}
/*finally:*/ {
/*normal exit:*/{
if (__pyx_t_3) {
__pyx_t_8 = __Pyx_PyObject_Call(__pyx_t_3, __pyx_tuple__5, NULL);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 92, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_8);
__Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0;
}
goto __pyx_L6;
}
__pyx_L6:;
}
goto __pyx_L16;
__pyx_L3_error:;
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
goto __pyx_L1_error;
__pyx_L16:;
}
/* "train.py":94
* with open(classes_path) as f:
* class_names = f.readlines()
* class_names = [c.strip() for c in class_names] # <<<<<<<<<<<<<<
* return class_names
*
*/
__pyx_t_1 = PyList_New(0); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 94, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
if (unlikely(!__pyx_v_class_names)) { __Pyx_RaiseUnboundLocalError("class_names"); __PYX_ERR(0, 94, __pyx_L1_error) }
if (likely(PyList_CheckExact(__pyx_v_class_names)) || PyTuple_CheckExact(__pyx_v_class_names)) {
__pyx_t_2 = __pyx_v_class_names; __Pyx_INCREF(__pyx_t_2); __pyx_t_12 = 0;
__pyx_t_13 = NULL;
} else {
__pyx_t_12 = -1; __pyx_t_2 = PyObject_GetIter(__pyx_v_class_names); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 94, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__pyx_t_13 = Py_TYPE(__pyx_t_2)->tp_iternext; if (unlikely(!__pyx_t_13)) __PYX_ERR(0, 94, __pyx_L1_error)
}
for (;;) {
if (likely(!__pyx_t_13)) {
if (likely(PyList_CheckExact(__pyx_t_2))) {
if (__pyx_t_12 >= PyList_GET_SIZE(__pyx_t_2)) break;
#if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS
__pyx_t_4 = PyList_GET_ITEM(__pyx_t_2, __pyx_t_12); __Pyx_INCREF(__pyx_t_4); __pyx_t_12++; if (unlikely(0 < 0)) __PYX_ERR(0, 94, __pyx_L1_error)
#else
__pyx_t_4 = PySequence_ITEM(__pyx_t_2, __pyx_t_12); __pyx_t_12++; if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 94, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
#endif
} else {
if (__pyx_t_12 >= PyTuple_GET_SIZE(__pyx_t_2)) break;
#if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS
__pyx_t_4 = PyTuple_GET_ITEM(__pyx_t_2, __pyx_t_12); __Pyx_INCREF(__pyx_t_4); __pyx_t_12++; if (unlikely(0 < 0)) __PYX_ERR(0, 94, __pyx_L1_error)
#else
__pyx_t_4 = PySequence_ITEM(__pyx_t_2, __pyx_t_12); __pyx_t_12++; if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 94, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
#endif
}
} else {
__pyx_t_4 = __pyx_t_13(__pyx_t_2);
if (unlikely(!__pyx_t_4)) {
PyObject* exc_type = PyErr_Occurred();
if (exc_type) {
if (likely(exc_type == PyExc_StopIteration || PyErr_GivenExceptionMatches(exc_type, PyExc_StopIteration))) PyErr_Clear();
else __PYX_ERR(0, 94, __pyx_L1_error)
}
break;
}
__Pyx_GOTREF(__pyx_t_4);
}
__Pyx_XDECREF_SET(__pyx_v_c, __pyx_t_4);
__pyx_t_4 = 0;
__pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_v_c, __pyx_n_s_strip); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 94, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_5);
__pyx_t_14 = NULL;
if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_5))) {
__pyx_t_14 = PyMethod_GET_SELF(__pyx_t_5);
if (likely(__pyx_t_14)) {
PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_5);
__Pyx_INCREF(__pyx_t_14);
__Pyx_INCREF(function);
__Pyx_DECREF_SET(__pyx_t_5, function);
}
}
if (__pyx_t_14) {
__pyx_t_4 = __Pyx_PyObject_CallOneArg(__pyx_t_5, __pyx_t_14); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 94, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_14); __pyx_t_14 = 0;
} else {
__pyx_t_4 = __Pyx_PyObject_CallNoArg(__pyx_t_5); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 94, __pyx_L1_error)
}
__Pyx_GOTREF(__pyx_t_4);
__Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
if (unlikely(__Pyx_ListComp_Append(__pyx_t_1, (PyObject*)__pyx_t_4))) __PYX_ERR(0, 94, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
}
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__Pyx_XDECREF_SET(__pyx_v_class_names, __pyx_t_1);
__pyx_t_1 = 0;
/* "train.py":95
* class_names = f.readlines()
* class_names = [c.strip() for c in class_names]
* return class_names # <<<<<<<<<<<<<<
*
* def get_anchors(anchors_path):
*/
__Pyx_XDECREF(__pyx_r);
__Pyx_INCREF(__pyx_v_class_names);
__pyx_r = __pyx_v_class_names;
goto __pyx_L0;
/* "train.py":90
*
*
* def get_classes(classes_path): # <<<<<<<<<<<<<<
* '''loads the classes'''
* with open(classes_path) as f:
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_XDECREF(__pyx_t_2);
__Pyx_XDECREF(__pyx_t_4);
__Pyx_XDECREF(__pyx_t_5);
__Pyx_XDECREF(__pyx_t_14);
__Pyx_AddTraceback("train.get_classes", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XDECREF(__pyx_v_f);
__Pyx_XDECREF(__pyx_v_class_names);
__Pyx_XDECREF(__pyx_v_c);
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "train.py":97
* return class_names
*
* def get_anchors(anchors_path): # <<<<<<<<<<<<<<
* '''loads the anchors from a file'''
* with open(anchors_path) as f:
*/
/* Python wrapper */
static PyObject *__pyx_pw_5train_5get_anchors(PyObject *__pyx_self, PyObject *__pyx_v_anchors_path); /*proto*/
static char __pyx_doc_5train_4get_anchors[] = "loads the anchors from a file";
static PyMethodDef __pyx_mdef_5train_5get_anchors = {"get_anchors", (PyCFunction)__pyx_pw_5train_5get_anchors, METH_O, __pyx_doc_5train_4get_anchors};
static PyObject *__pyx_pw_5train_5get_anchors(PyObject *__pyx_self, PyObject *__pyx_v_anchors_path) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("get_anchors (wrapper)", 0);
__pyx_r = __pyx_pf_5train_4get_anchors(__pyx_self, ((PyObject *)__pyx_v_anchors_path));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_pf_5train_4get_anchors(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_anchors_path) {
PyObject *__pyx_v_f = NULL;
PyObject *__pyx_v_anchors = NULL;
PyObject *__pyx_v_x = NULL;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
PyObject *__pyx_t_2 = NULL;
PyObject *__pyx_t_3 = NULL;
PyObject *__pyx_t_4 = NULL;
PyObject *__pyx_t_5 = NULL;
PyObject *__pyx_t_6 = NULL;
PyObject *__pyx_t_7 = NULL;
PyObject *__pyx_t_8 = NULL;
PyObject *__pyx_t_9 = NULL;
int __pyx_t_10;
int __pyx_t_11;
Py_ssize_t __pyx_t_12;
PyObject *(*__pyx_t_13)(PyObject *);
__Pyx_RefNannySetupContext("get_anchors", 0);
/* "train.py":99
* def get_anchors(anchors_path):
* '''loads the anchors from a file'''
* with open(anchors_path) as f: # <<<<<<<<<<<<<<
* anchors = f.readline()
* anchors = [float(x) for x in anchors.split(',')]
*/
/*with:*/ {
__pyx_t_1 = PyTuple_New(1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 99, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_INCREF(__pyx_v_anchors_path);
__Pyx_GIVEREF(__pyx_v_anchors_path);
PyTuple_SET_ITEM(__pyx_t_1, 0, __pyx_v_anchors_path);
__pyx_t_2 = __Pyx_PyObject_Call(__pyx_builtin_open, __pyx_t_1, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 99, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__pyx_t_3 = __Pyx_PyObject_LookupSpecial(__pyx_t_2, __pyx_n_s_exit); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 99, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_4 = __Pyx_PyObject_LookupSpecial(__pyx_t_2, __pyx_n_s_enter); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 99, __pyx_L3_error)
__Pyx_GOTREF(__pyx_t_4);
__pyx_t_5 = NULL;
if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_4))) {
__pyx_t_5 = PyMethod_GET_SELF(__pyx_t_4);
if (likely(__pyx_t_5)) {
PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_4);
__Pyx_INCREF(__pyx_t_5);
__Pyx_INCREF(function);
__Pyx_DECREF_SET(__pyx_t_4, function);
}
}
if (__pyx_t_5) {
__pyx_t_1 = __Pyx_PyObject_CallOneArg(__pyx_t_4, __pyx_t_5); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 99, __pyx_L3_error)
__Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
} else {
__pyx_t_1 = __Pyx_PyObject_CallNoArg(__pyx_t_4); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 99, __pyx_L3_error)
}
__Pyx_GOTREF(__pyx_t_1);
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
__pyx_t_4 = __pyx_t_1;
__pyx_t_1 = 0;
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
/*try:*/ {
{
__Pyx_PyThreadState_declare
__Pyx_PyThreadState_assign
__Pyx_ExceptionSave(&__pyx_t_6, &__pyx_t_7, &__pyx_t_8);
__Pyx_XGOTREF(__pyx_t_6);
__Pyx_XGOTREF(__pyx_t_7);
__Pyx_XGOTREF(__pyx_t_8);
/*try:*/ {
__pyx_v_f = __pyx_t_4;
__pyx_t_4 = 0;
/* "train.py":100
* '''loads the anchors from a file'''
* with open(anchors_path) as f:
* anchors = f.readline() # <<<<<<<<<<<<<<
* anchors = [float(x) for x in anchors.split(',')]
* return np.array(anchors).reshape(-1, 2)
*/
__pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_v_f, __pyx_n_s_readline); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 100, __pyx_L7_error)
__Pyx_GOTREF(__pyx_t_2);
__pyx_t_1 = NULL;
if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_2))) {
__pyx_t_1 = PyMethod_GET_SELF(__pyx_t_2);
if (likely(__pyx_t_1)) {
PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_2);
__Pyx_INCREF(__pyx_t_1);
__Pyx_INCREF(function);
__Pyx_DECREF_SET(__pyx_t_2, function);
}
}
if (__pyx_t_1) {
__pyx_t_4 = __Pyx_PyObject_CallOneArg(__pyx_t_2, __pyx_t_1); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 100, __pyx_L7_error)
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
} else {
__pyx_t_4 = __Pyx_PyObject_CallNoArg(__pyx_t_2); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 100, __pyx_L7_error)
}
__Pyx_GOTREF(__pyx_t_4);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__pyx_v_anchors = __pyx_t_4;
__pyx_t_4 = 0;
/* "train.py":99
* def get_anchors(anchors_path):
* '''loads the anchors from a file'''
* with open(anchors_path) as f: # <<<<<<<<<<<<<<
* anchors = f.readline()
* anchors = [float(x) for x in anchors.split(',')]
*/
}
__Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0;
__Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0;
__Pyx_XDECREF(__pyx_t_8); __pyx_t_8 = 0;
goto __pyx_L12_try_end;
__pyx_L7_error:;
__Pyx_PyThreadState_assign
__Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0;
__Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0;
__Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
__Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0;
/*except:*/ {
__Pyx_AddTraceback("train.get_anchors", __pyx_clineno, __pyx_lineno, __pyx_filename);
if (__Pyx_GetException(&__pyx_t_4, &__pyx_t_2, &__pyx_t_1) < 0) __PYX_ERR(0, 99, __pyx_L9_except_error)
__Pyx_GOTREF(__pyx_t_4);
__Pyx_GOTREF(__pyx_t_2);
__Pyx_GOTREF(__pyx_t_1);
__pyx_t_5 = PyTuple_Pack(3, __pyx_t_4, __pyx_t_2, __pyx_t_1); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 99, __pyx_L9_except_error)
__Pyx_GOTREF(__pyx_t_5);
__pyx_t_9 = __Pyx_PyObject_Call(__pyx_t_3, __pyx_t_5, NULL);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 99, __pyx_L9_except_error)
__Pyx_GOTREF(__pyx_t_9);
__pyx_t_10 = __Pyx_PyObject_IsTrue(__pyx_t_9);
__Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0;
if (__pyx_t_10 < 0) __PYX_ERR(0, 99, __pyx_L9_except_error)
__pyx_t_11 = ((!(__pyx_t_10 != 0)) != 0);
if (__pyx_t_11) {
__Pyx_GIVEREF(__pyx_t_4);
__Pyx_GIVEREF(__pyx_t_2);
__Pyx_XGIVEREF(__pyx_t_1);
__Pyx_ErrRestoreWithState(__pyx_t_4, __pyx_t_2, __pyx_t_1);
__pyx_t_4 = 0; __pyx_t_2 = 0; __pyx_t_1 = 0;
__PYX_ERR(0, 99, __pyx_L9_except_error)
}
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
goto __pyx_L8_exception_handled;
}
__pyx_L9_except_error:;
__Pyx_PyThreadState_assign
__Pyx_XGIVEREF(__pyx_t_6);
__Pyx_XGIVEREF(__pyx_t_7);
__Pyx_XGIVEREF(__pyx_t_8);
__Pyx_ExceptionReset(__pyx_t_6, __pyx_t_7, __pyx_t_8);
goto __pyx_L1_error;
__pyx_L8_exception_handled:;
__Pyx_PyThreadState_assign
__Pyx_XGIVEREF(__pyx_t_6);
__Pyx_XGIVEREF(__pyx_t_7);
__Pyx_XGIVEREF(__pyx_t_8);
__Pyx_ExceptionReset(__pyx_t_6, __pyx_t_7, __pyx_t_8);
__pyx_L12_try_end:;
}
}
/*finally:*/ {
/*normal exit:*/{
if (__pyx_t_3) {
__pyx_t_8 = __Pyx_PyObject_Call(__pyx_t_3, __pyx_tuple__6, NULL);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 99, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_8);
__Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0;
}
goto __pyx_L6;
}
__pyx_L6:;
}
goto __pyx_L16;
__pyx_L3_error:;
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
goto __pyx_L1_error;
__pyx_L16:;
}
/* "train.py":101
* with open(anchors_path) as f:
* anchors = f.readline()
* anchors = [float(x) for x in anchors.split(',')] # <<<<<<<<<<<<<<
* return np.array(anchors).reshape(-1, 2)
*
*/
__pyx_t_1 = PyList_New(0); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 101, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
if (unlikely(!__pyx_v_anchors)) { __Pyx_RaiseUnboundLocalError("anchors"); __PYX_ERR(0, 101, __pyx_L1_error) }
__pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_v_anchors, __pyx_n_s_split); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 101, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__pyx_t_4 = __Pyx_PyObject_Call(__pyx_t_2, __pyx_tuple__8, NULL); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 101, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
if (likely(PyList_CheckExact(__pyx_t_4)) || PyTuple_CheckExact(__pyx_t_4)) {
__pyx_t_2 = __pyx_t_4; __Pyx_INCREF(__pyx_t_2); __pyx_t_12 = 0;
__pyx_t_13 = NULL;
} else {
__pyx_t_12 = -1; __pyx_t_2 = PyObject_GetIter(__pyx_t_4); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 101, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__pyx_t_13 = Py_TYPE(__pyx_t_2)->tp_iternext; if (unlikely(!__pyx_t_13)) __PYX_ERR(0, 101, __pyx_L1_error)
}
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
for (;;) {
if (likely(!__pyx_t_13)) {
if (likely(PyList_CheckExact(__pyx_t_2))) {
if (__pyx_t_12 >= PyList_GET_SIZE(__pyx_t_2)) break;
#if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS
__pyx_t_4 = PyList_GET_ITEM(__pyx_t_2, __pyx_t_12); __Pyx_INCREF(__pyx_t_4); __pyx_t_12++; if (unlikely(0 < 0)) __PYX_ERR(0, 101, __pyx_L1_error)
#else
__pyx_t_4 = PySequence_ITEM(__pyx_t_2, __pyx_t_12); __pyx_t_12++; if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 101, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
#endif
} else {
if (__pyx_t_12 >= PyTuple_GET_SIZE(__pyx_t_2)) break;
#if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS
__pyx_t_4 = PyTuple_GET_ITEM(__pyx_t_2, __pyx_t_12); __Pyx_INCREF(__pyx_t_4); __pyx_t_12++; if (unlikely(0 < 0)) __PYX_ERR(0, 101, __pyx_L1_error)
#else
__pyx_t_4 = PySequence_ITEM(__pyx_t_2, __pyx_t_12); __pyx_t_12++; if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 101, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
#endif
}
} else {
__pyx_t_4 = __pyx_t_13(__pyx_t_2);
if (unlikely(!__pyx_t_4)) {
PyObject* exc_type = PyErr_Occurred();
if (exc_type) {
if (likely(exc_type == PyExc_StopIteration || PyErr_GivenExceptionMatches(exc_type, PyExc_StopIteration))) PyErr_Clear();
else __PYX_ERR(0, 101, __pyx_L1_error)
}
break;
}
__Pyx_GOTREF(__pyx_t_4);
}
__Pyx_XDECREF_SET(__pyx_v_x, __pyx_t_4);
__pyx_t_4 = 0;
__pyx_t_4 = __Pyx_PyNumber_Float(__pyx_v_x); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 101, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
if (unlikely(__Pyx_ListComp_Append(__pyx_t_1, (PyObject*)__pyx_t_4))) __PYX_ERR(0, 101, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
}
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__Pyx_XDECREF_SET(__pyx_v_anchors, __pyx_t_1);
__pyx_t_1 = 0;
/* "train.py":102
* anchors = f.readline()
* anchors = [float(x) for x in anchors.split(',')]
* return np.array(anchors).reshape(-1, 2) # <<<<<<<<<<<<<<
*
*
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_2 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 102, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_array); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 102, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__pyx_t_2 = NULL;
if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_4))) {
__pyx_t_2 = PyMethod_GET_SELF(__pyx_t_4);
if (likely(__pyx_t_2)) {
PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_4);
__Pyx_INCREF(__pyx_t_2);
__Pyx_INCREF(function);
__Pyx_DECREF_SET(__pyx_t_4, function);
}
}
if (!__pyx_t_2) {
__pyx_t_1 = __Pyx_PyObject_CallOneArg(__pyx_t_4, __pyx_v_anchors); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 102, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
} else {
#if CYTHON_FAST_PYCALL
if (PyFunction_Check(__pyx_t_4)) {
PyObject *__pyx_temp[2] = {__pyx_t_2, __pyx_v_anchors};
__pyx_t_1 = __Pyx_PyFunction_FastCall(__pyx_t_4, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 102, __pyx_L1_error)
__Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
__Pyx_GOTREF(__pyx_t_1);
} else
#endif
#if CYTHON_FAST_PYCCALL
if (__Pyx_PyFastCFunction_Check(__pyx_t_4)) {
PyObject *__pyx_temp[2] = {__pyx_t_2, __pyx_v_anchors};
__pyx_t_1 = __Pyx_PyCFunction_FastCall(__pyx_t_4, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 102, __pyx_L1_error)
__Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
__Pyx_GOTREF(__pyx_t_1);
} else
#endif
{
__pyx_t_5 = PyTuple_New(1+1); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 102, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_5);
__Pyx_GIVEREF(__pyx_t_2); PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_t_2); __pyx_t_2 = NULL;
__Pyx_INCREF(__pyx_v_anchors);
__Pyx_GIVEREF(__pyx_v_anchors);
PyTuple_SET_ITEM(__pyx_t_5, 0+1, __pyx_v_anchors);
__pyx_t_1 = __Pyx_PyObject_Call(__pyx_t_4, __pyx_t_5, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 102, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
}
}
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
__pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_reshape); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 102, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__pyx_t_1 = __Pyx_PyObject_Call(__pyx_t_4, __pyx_tuple__9, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 102, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
__pyx_r = __pyx_t_1;
__pyx_t_1 = 0;
goto __pyx_L0;
/* "train.py":97
* return class_names
*
* def get_anchors(anchors_path): # <<<<<<<<<<<<<<
* '''loads the anchors from a file'''
* with open(anchors_path) as f:
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_XDECREF(__pyx_t_2);
__Pyx_XDECREF(__pyx_t_4);
__Pyx_XDECREF(__pyx_t_5);
__Pyx_AddTraceback("train.get_anchors", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XDECREF(__pyx_v_f);
__Pyx_XDECREF(__pyx_v_anchors);
__Pyx_XDECREF(__pyx_v_x);
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "train.py":105
*
*
* def create_model(input_shape, anchors, num_classes, load_pretrained=True, freeze_body=2, # <<<<<<<<<<<<<<
* weights_path='model_data/yolo_weights.h5'):
* '''create the training model'''
*/
/* Python wrapper */
static PyObject *__pyx_pw_5train_7create_model(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
static char __pyx_doc_5train_6create_model[] = "create the training model";
static PyMethodDef __pyx_mdef_5train_7create_model = {"create_model", (PyCFunction)__pyx_pw_5train_7create_model, METH_VARARGS|METH_KEYWORDS, __pyx_doc_5train_6create_model};
static PyObject *__pyx_pw_5train_7create_model(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
PyObject *__pyx_v_input_shape = 0;
PyObject *__pyx_v_anchors = 0;
PyObject *__pyx_v_num_classes = 0;
PyObject *__pyx_v_load_pretrained = 0;
PyObject *__pyx_v_freeze_body = 0;
PyObject *__pyx_v_weights_path = 0;
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("create_model (wrapper)", 0);
{
static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_input_shape,&__pyx_n_s_anchors,&__pyx_n_s_num_classes,&__pyx_n_s_load_pretrained,&__pyx_n_s_freeze_body,&__pyx_n_s_weights_path,0};
PyObject* values[6] = {0,0,0,0,0,0};
values[3] = ((PyObject *)((PyObject *)Py_True));
values[4] = ((PyObject *)((PyObject *)__pyx_int_2));
values[5] = ((PyObject *)((PyObject*)__pyx_kp_s_model_data_yolo_weights_h5));
if (unlikely(__pyx_kwds)) {
Py_ssize_t kw_args;
const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args);
switch (pos_args) {
case 6: values[5] = PyTuple_GET_ITEM(__pyx_args, 5);
CYTHON_FALLTHROUGH;
case 5: values[4] = PyTuple_GET_ITEM(__pyx_args, 4);
CYTHON_FALLTHROUGH;
case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3);
CYTHON_FALLTHROUGH;
case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2);
CYTHON_FALLTHROUGH;
case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1);
CYTHON_FALLTHROUGH;
case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
CYTHON_FALLTHROUGH;
case 0: break;
default: goto __pyx_L5_argtuple_error;
}
kw_args = PyDict_Size(__pyx_kwds);
switch (pos_args) {
case 0:
if (likely((values[0] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_input_shape)) != 0)) kw_args--;
else goto __pyx_L5_argtuple_error;
CYTHON_FALLTHROUGH;
case 1:
if (likely((values[1] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_anchors)) != 0)) kw_args--;
else {
__Pyx_RaiseArgtupleInvalid("create_model", 0, 3, 6, 1); __PYX_ERR(0, 105, __pyx_L3_error)
}
CYTHON_FALLTHROUGH;
case 2:
if (likely((values[2] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_num_classes)) != 0)) kw_args--;
else {
__Pyx_RaiseArgtupleInvalid("create_model", 0, 3, 6, 2); __PYX_ERR(0, 105, __pyx_L3_error)
}
CYTHON_FALLTHROUGH;
case 3:
if (kw_args > 0) {
PyObject* value = PyDict_GetItem(__pyx_kwds, __pyx_n_s_load_pretrained);
if (value) { values[3] = value; kw_args--; }
}
CYTHON_FALLTHROUGH;
case 4:
if (kw_args > 0) {
PyObject* value = PyDict_GetItem(__pyx_kwds, __pyx_n_s_freeze_body);
if (value) { values[4] = value; kw_args--; }
}
CYTHON_FALLTHROUGH;
case 5:
if (kw_args > 0) {
PyObject* value = PyDict_GetItem(__pyx_kwds, __pyx_n_s_weights_path);
if (value) { values[5] = value; kw_args--; }
}
}
if (unlikely(kw_args > 0)) {
if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "create_model") < 0)) __PYX_ERR(0, 105, __pyx_L3_error)
}
} else {
switch (PyTuple_GET_SIZE(__pyx_args)) {
case 6: values[5] = PyTuple_GET_ITEM(__pyx_args, 5);
CYTHON_FALLTHROUGH;
case 5: values[4] = PyTuple_GET_ITEM(__pyx_args, 4);
CYTHON_FALLTHROUGH;
case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3);
CYTHON_FALLTHROUGH;
case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2);
values[1] = PyTuple_GET_ITEM(__pyx_args, 1);
values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
break;
default: goto __pyx_L5_argtuple_error;
}
}
__pyx_v_input_shape = values[0];
__pyx_v_anchors = values[1];
__pyx_v_num_classes = values[2];
__pyx_v_load_pretrained = values[3];
__pyx_v_freeze_body = values[4];
__pyx_v_weights_path = values[5];
}
goto __pyx_L4_argument_unpacking_done;
__pyx_L5_argtuple_error:;
__Pyx_RaiseArgtupleInvalid("create_model", 0, 3, 6, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 105, __pyx_L3_error)
__pyx_L3_error:;
__Pyx_AddTraceback("train.create_model", __pyx_clineno, __pyx_lineno, __pyx_filename);
__Pyx_RefNannyFinishContext();
return NULL;
__pyx_L4_argument_unpacking_done:;
__pyx_r = __pyx_pf_5train_6create_model(__pyx_self, __pyx_v_input_shape, __pyx_v_anchors, __pyx_v_num_classes, __pyx_v_load_pretrained, __pyx_v_freeze_body, __pyx_v_weights_path);
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_pf_5train_6create_model(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_input_shape, PyObject *__pyx_v_anchors, PyObject *__pyx_v_num_classes, PyObject *__pyx_v_load_pretrained, PyObject *__pyx_v_freeze_body, PyObject *__pyx_v_weights_path) {
PyObject *__pyx_v_image_input = NULL;
PyObject *__pyx_v_h = NULL;
PyObject *__pyx_v_w = NULL;
PyObject *__pyx_v_num_anchors = NULL;
PyObject *__pyx_v_y_true = NULL;
PyObject *__pyx_v_model_body = NULL;
PyObject *__pyx_v_num = NULL;
PyObject *__pyx_v_i = NULL;
PyObject *__pyx_v_model_loss = NULL;
PyObject *__pyx_v_model = NULL;
long __pyx_v_l;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
PyObject *__pyx_t_2 = NULL;
PyObject *__pyx_t_3 = NULL;
PyObject *(*__pyx_t_4)(PyObject *);
Py_ssize_t __pyx_t_5;
long __pyx_t_6;
PyObject *__pyx_t_7 = NULL;
PyObject *__pyx_t_8 = NULL;
PyObject *__pyx_t_9 = NULL;
PyObject *__pyx_t_10 = NULL;
PyObject *__pyx_t_11 = NULL;
int __pyx_t_12;
int __pyx_t_13;
int __pyx_t_14;
PyObject *(*__pyx_t_15)(PyObject *);
__Pyx_RefNannySetupContext("create_model", 0);
/* "train.py":108
* weights_path='model_data/yolo_weights.h5'):
* '''create the training model'''
* K.clear_session() # get a new session # <<<<<<<<<<<<<<
* image_input = Input(shape=(None, None, 3))
* h, w = input_shape
*/
__pyx_t_2 = __Pyx_GetModuleGlobalName(__pyx_n_s_K); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 108, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_clear_session); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 108, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__pyx_t_2 = NULL;
if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_3))) {
__pyx_t_2 = PyMethod_GET_SELF(__pyx_t_3);
if (likely(__pyx_t_2)) {
PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_3);
__Pyx_INCREF(__pyx_t_2);
__Pyx_INCREF(function);
__Pyx_DECREF_SET(__pyx_t_3, function);
}
}
if (__pyx_t_2) {
__pyx_t_1 = __Pyx_PyObject_CallOneArg(__pyx_t_3, __pyx_t_2); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 108, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
} else {
__pyx_t_1 = __Pyx_PyObject_CallNoArg(__pyx_t_3); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 108, __pyx_L1_error)
}
__Pyx_GOTREF(__pyx_t_1);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
/* "train.py":109
* '''create the training model'''
* K.clear_session() # get a new session
* image_input = Input(shape=(None, None, 3)) # <<<<<<<<<<<<<<
* h, w = input_shape
* num_anchors = len(anchors)
*/
__pyx_t_1 = __Pyx_GetModuleGlobalName(__pyx_n_s_Input); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 109, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_t_3 = PyDict_New(); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 109, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
if (PyDict_SetItem(__pyx_t_3, __pyx_n_s_shape, __pyx_tuple__10) < 0) __PYX_ERR(0, 109, __pyx_L1_error)
__pyx_t_2 = __Pyx_PyObject_Call(__pyx_t_1, __pyx_empty_tuple, __pyx_t_3); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 109, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__pyx_v_image_input = __pyx_t_2;
__pyx_t_2 = 0;
/* "train.py":110
* K.clear_session() # get a new session
* image_input = Input(shape=(None, None, 3))
* h, w = input_shape # <<<<<<<<<<<<<<
* num_anchors = len(anchors)
*
*/
if ((likely(PyTuple_CheckExact(__pyx_v_input_shape))) || (PyList_CheckExact(__pyx_v_input_shape))) {
PyObject* sequence = __pyx_v_input_shape;
#if !CYTHON_COMPILING_IN_PYPY
Py_ssize_t size = Py_SIZE(sequence);
#else
Py_ssize_t size = PySequence_Size(sequence);
#endif
if (unlikely(size != 2)) {
if (size > 2) __Pyx_RaiseTooManyValuesError(2);
else if (size >= 0) __Pyx_RaiseNeedMoreValuesError(size);
__PYX_ERR(0, 110, __pyx_L1_error)
}
#if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS
if (likely(PyTuple_CheckExact(sequence))) {
__pyx_t_2 = PyTuple_GET_ITEM(sequence, 0);
__pyx_t_3 = PyTuple_GET_ITEM(sequence, 1);
} else {
__pyx_t_2 = PyList_GET_ITEM(sequence, 0);
__pyx_t_3 = PyList_GET_ITEM(sequence, 1);
}
__Pyx_INCREF(__pyx_t_2);
__Pyx_INCREF(__pyx_t_3);
#else
__pyx_t_2 = PySequence_ITEM(sequence, 0); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 110, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__pyx_t_3 = PySequence_ITEM(sequence, 1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 110, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
#endif
} else {
Py_ssize_t index = -1;
__pyx_t_1 = PyObject_GetIter(__pyx_v_input_shape); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 110, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_t_4 = Py_TYPE(__pyx_t_1)->tp_iternext;
index = 0; __pyx_t_2 = __pyx_t_4(__pyx_t_1); if (unlikely(!__pyx_t_2)) goto __pyx_L3_unpacking_failed;
__Pyx_GOTREF(__pyx_t_2);
index = 1; __pyx_t_3 = __pyx_t_4(__pyx_t_1); if (unlikely(!__pyx_t_3)) goto __pyx_L3_unpacking_failed;
__Pyx_GOTREF(__pyx_t_3);
if (__Pyx_IternextUnpackEndCheck(__pyx_t_4(__pyx_t_1), 2) < 0) __PYX_ERR(0, 110, __pyx_L1_error)
__pyx_t_4 = NULL;
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
goto __pyx_L4_unpacking_done;
__pyx_L3_unpacking_failed:;
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__pyx_t_4 = NULL;
if (__Pyx_IterFinish() == 0) __Pyx_RaiseNeedMoreValuesError(index);
__PYX_ERR(0, 110, __pyx_L1_error)
__pyx_L4_unpacking_done:;
}
__pyx_v_h = __pyx_t_2;
__pyx_t_2 = 0;
__pyx_v_w = __pyx_t_3;
__pyx_t_3 = 0;
/* "train.py":111
* image_input = Input(shape=(None, None, 3))
* h, w = input_shape
* num_anchors = len(anchors) # <<<<<<<<<<<<<<
*
* y_true = [Input(shape=(h//{0:32, 1:16, 2:8}[l], w//{0:32, 1:16, 2:8}[l], \
*/
__pyx_t_5 = PyObject_Length(__pyx_v_anchors); if (unlikely(__pyx_t_5 == -1)) __PYX_ERR(0, 111, __pyx_L1_error)
__pyx_t_3 = PyInt_FromSsize_t(__pyx_t_5); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 111, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__pyx_v_num_anchors = __pyx_t_3;
__pyx_t_3 = 0;
/* "train.py":113
* num_anchors = len(anchors)
*
* y_true = [Input(shape=(h//{0:32, 1:16, 2:8}[l], w//{0:32, 1:16, 2:8}[l], \ # <<<<<<<<<<<<<<
* num_anchors//3, num_classes+5)) for l in range(3)]
*
*/
__pyx_t_3 = PyList_New(0); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 113, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
/* "train.py":114
*
* y_true = [Input(shape=(h//{0:32, 1:16, 2:8}[l], w//{0:32, 1:16, 2:8}[l], \
* num_anchors//3, num_classes+5)) for l in range(3)] # <<<<<<<<<<<<<<
*
* model_body = yolo_body(image_input, num_anchors//3, num_classes)
*/
for (__pyx_t_6 = 0; __pyx_t_6 < 3; __pyx_t_6+=1) {
__pyx_v_l = __pyx_t_6;
/* "train.py":113
* num_anchors = len(anchors)
*
* y_true = [Input(shape=(h//{0:32, 1:16, 2:8}[l], w//{0:32, 1:16, 2:8}[l], \ # <<<<<<<<<<<<<<
* num_anchors//3, num_classes+5)) for l in range(3)]
*
*/
__pyx_t_2 = __Pyx_GetModuleGlobalName(__pyx_n_s_Input); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 113, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__pyx_t_1 = PyDict_New(); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 113, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_t_7 = PyDict_New(); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 113, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_7);
if (PyDict_SetItem(__pyx_t_7, __pyx_int_0, __pyx_int_32) < 0) __PYX_ERR(0, 113, __pyx_L1_error)
if (PyDict_SetItem(__pyx_t_7, __pyx_int_1, __pyx_int_16) < 0) __PYX_ERR(0, 113, __pyx_L1_error)
if (PyDict_SetItem(__pyx_t_7, __pyx_int_2, __pyx_int_8) < 0) __PYX_ERR(0, 113, __pyx_L1_error)
__pyx_t_8 = __Pyx_PyInt_From_long(__pyx_v_l); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 113, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_8);
__pyx_t_9 = __Pyx_PyDict_GetItem(__pyx_t_7, __pyx_t_8); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 113, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_9);
__Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
__Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0;
__pyx_t_8 = PyNumber_FloorDivide(__pyx_v_h, __pyx_t_9); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 113, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_8);
__Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0;
__pyx_t_9 = PyDict_New(); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 113, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_9);
if (PyDict_SetItem(__pyx_t_9, __pyx_int_0, __pyx_int_32) < 0) __PYX_ERR(0, 113, __pyx_L1_error)
if (PyDict_SetItem(__pyx_t_9, __pyx_int_1, __pyx_int_16) < 0) __PYX_ERR(0, 113, __pyx_L1_error)
if (PyDict_SetItem(__pyx_t_9, __pyx_int_2, __pyx_int_8) < 0) __PYX_ERR(0, 113, __pyx_L1_error)
__pyx_t_7 = __Pyx_PyInt_From_long(__pyx_v_l); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 113, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_7);
__pyx_t_10 = __Pyx_PyDict_GetItem(__pyx_t_9, __pyx_t_7); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 113, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_10);
__Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0;
__Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
__pyx_t_7 = PyNumber_FloorDivide(__pyx_v_w, __pyx_t_10); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 113, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_7);
__Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;
/* "train.py":114
*
* y_true = [Input(shape=(h//{0:32, 1:16, 2:8}[l], w//{0:32, 1:16, 2:8}[l], \
* num_anchors//3, num_classes+5)) for l in range(3)] # <<<<<<<<<<<<<<
*
* model_body = yolo_body(image_input, num_anchors//3, num_classes)
*/
__pyx_t_10 = __Pyx_PyInt_FloorDivideObjC(__pyx_v_num_anchors, __pyx_int_3, 3, 0); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 114, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_10);
__pyx_t_9 = __Pyx_PyInt_AddObjC(__pyx_v_num_classes, __pyx_int_5, 5, 0); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 114, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_9);
/* "train.py":113
* num_anchors = len(anchors)
*
* y_true = [Input(shape=(h//{0:32, 1:16, 2:8}[l], w//{0:32, 1:16, 2:8}[l], \ # <<<<<<<<<<<<<<
* num_anchors//3, num_classes+5)) for l in range(3)]
*
*/
__pyx_t_11 = PyTuple_New(4); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 113, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_11);
__Pyx_GIVEREF(__pyx_t_8);
PyTuple_SET_ITEM(__pyx_t_11, 0, __pyx_t_8);
__Pyx_GIVEREF(__pyx_t_7);
PyTuple_SET_ITEM(__pyx_t_11, 1, __pyx_t_7);
__Pyx_GIVEREF(__pyx_t_10);
PyTuple_SET_ITEM(__pyx_t_11, 2, __pyx_t_10);
__Pyx_GIVEREF(__pyx_t_9);
PyTuple_SET_ITEM(__pyx_t_11, 3, __pyx_t_9);
__pyx_t_8 = 0;
__pyx_t_7 = 0;
__pyx_t_10 = 0;
__pyx_t_9 = 0;
if (PyDict_SetItem(__pyx_t_1, __pyx_n_s_shape, __pyx_t_11) < 0) __PYX_ERR(0, 113, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0;
__pyx_t_11 = __Pyx_PyObject_Call(__pyx_t_2, __pyx_empty_tuple, __pyx_t_1); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 113, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_11);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
if (unlikely(__Pyx_ListComp_Append(__pyx_t_3, (PyObject*)__pyx_t_11))) __PYX_ERR(0, 113, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0;
}
__pyx_v_y_true = ((PyObject*)__pyx_t_3);
__pyx_t_3 = 0;
/* "train.py":116
* num_anchors//3, num_classes+5)) for l in range(3)]
*
* model_body = yolo_body(image_input, num_anchors//3, num_classes) # <<<<<<<<<<<<<<
* print('Create YOLOv3 model with {} anchors and {} classes.'.format(num_anchors, num_classes))
*
*/
__pyx_t_11 = __Pyx_GetModuleGlobalName(__pyx_n_s_yolo_body); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 116, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_11);
__pyx_t_1 = __Pyx_PyInt_FloorDivideObjC(__pyx_v_num_anchors, __pyx_int_3, 3, 0); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 116, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_t_2 = NULL;
__pyx_t_12 = 0;
if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_11))) {
__pyx_t_2 = PyMethod_GET_SELF(__pyx_t_11);
if (likely(__pyx_t_2)) {
PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_11);
__Pyx_INCREF(__pyx_t_2);
__Pyx_INCREF(function);
__Pyx_DECREF_SET(__pyx_t_11, function);
__pyx_t_12 = 1;
}
}
#if CYTHON_FAST_PYCALL
if (PyFunction_Check(__pyx_t_11)) {
PyObject *__pyx_temp[4] = {__pyx_t_2, __pyx_v_image_input, __pyx_t_1, __pyx_v_num_classes};
__pyx_t_3 = __Pyx_PyFunction_FastCall(__pyx_t_11, __pyx_temp+1-__pyx_t_12, 3+__pyx_t_12); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 116, __pyx_L1_error)
__Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
} else
#endif
#if CYTHON_FAST_PYCCALL
if (__Pyx_PyFastCFunction_Check(__pyx_t_11)) {
PyObject *__pyx_temp[4] = {__pyx_t_2, __pyx_v_image_input, __pyx_t_1, __pyx_v_num_classes};
__pyx_t_3 = __Pyx_PyCFunction_FastCall(__pyx_t_11, __pyx_temp+1-__pyx_t_12, 3+__pyx_t_12); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 116, __pyx_L1_error)
__Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
} else
#endif
{
__pyx_t_9 = PyTuple_New(3+__pyx_t_12); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 116, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_9);
if (__pyx_t_2) {
__Pyx_GIVEREF(__pyx_t_2); PyTuple_SET_ITEM(__pyx_t_9, 0, __pyx_t_2); __pyx_t_2 = NULL;
}
__Pyx_INCREF(__pyx_v_image_input);
__Pyx_GIVEREF(__pyx_v_image_input);
PyTuple_SET_ITEM(__pyx_t_9, 0+__pyx_t_12, __pyx_v_image_input);
__Pyx_GIVEREF(__pyx_t_1);
PyTuple_SET_ITEM(__pyx_t_9, 1+__pyx_t_12, __pyx_t_1);
__Pyx_INCREF(__pyx_v_num_classes);
__Pyx_GIVEREF(__pyx_v_num_classes);
PyTuple_SET_ITEM(__pyx_t_9, 2+__pyx_t_12, __pyx_v_num_classes);
__pyx_t_1 = 0;
__pyx_t_3 = __Pyx_PyObject_Call(__pyx_t_11, __pyx_t_9, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 116, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0;
}
__Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0;
__pyx_v_model_body = __pyx_t_3;
__pyx_t_3 = 0;
/* "train.py":117
*
* model_body = yolo_body(image_input, num_anchors//3, num_classes)
* print('Create YOLOv3 model with {} anchors and {} classes.'.format(num_anchors, num_classes)) # <<<<<<<<<<<<<<
*
* if load_pretrained:
*/
__pyx_t_11 = __Pyx_PyObject_GetAttrStr(__pyx_kp_s_Create_YOLOv3_model_with_anchors, __pyx_n_s_format); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 117, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_11);
__pyx_t_9 = NULL;
__pyx_t_12 = 0;
if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_11))) {
__pyx_t_9 = PyMethod_GET_SELF(__pyx_t_11);
if (likely(__pyx_t_9)) {
PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_11);
__Pyx_INCREF(__pyx_t_9);
__Pyx_INCREF(function);
__Pyx_DECREF_SET(__pyx_t_11, function);
__pyx_t_12 = 1;
}
}
#if CYTHON_FAST_PYCALL
if (PyFunction_Check(__pyx_t_11)) {
PyObject *__pyx_temp[3] = {__pyx_t_9, __pyx_v_num_anchors, __pyx_v_num_classes};
__pyx_t_3 = __Pyx_PyFunction_FastCall(__pyx_t_11, __pyx_temp+1-__pyx_t_12, 2+__pyx_t_12); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 117, __pyx_L1_error)
__Pyx_XDECREF(__pyx_t_9); __pyx_t_9 = 0;
__Pyx_GOTREF(__pyx_t_3);
} else
#endif
#if CYTHON_FAST_PYCCALL
if (__Pyx_PyFastCFunction_Check(__pyx_t_11)) {
PyObject *__pyx_temp[3] = {__pyx_t_9, __pyx_v_num_anchors, __pyx_v_num_classes};
__pyx_t_3 = __Pyx_PyCFunction_FastCall(__pyx_t_11, __pyx_temp+1-__pyx_t_12, 2+__pyx_t_12); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 117, __pyx_L1_error)
__Pyx_XDECREF(__pyx_t_9); __pyx_t_9 = 0;
__Pyx_GOTREF(__pyx_t_3);
} else
#endif
{
__pyx_t_1 = PyTuple_New(2+__pyx_t_12); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 117, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
if (__pyx_t_9) {
__Pyx_GIVEREF(__pyx_t_9); PyTuple_SET_ITEM(__pyx_t_1, 0, __pyx_t_9); __pyx_t_9 = NULL;
}
__Pyx_INCREF(__pyx_v_num_anchors);
__Pyx_GIVEREF(__pyx_v_num_anchors);
PyTuple_SET_ITEM(__pyx_t_1, 0+__pyx_t_12, __pyx_v_num_anchors);
__Pyx_INCREF(__pyx_v_num_classes);
__Pyx_GIVEREF(__pyx_v_num_classes);
PyTuple_SET_ITEM(__pyx_t_1, 1+__pyx_t_12, __pyx_v_num_classes);
__pyx_t_3 = __Pyx_PyObject_Call(__pyx_t_11, __pyx_t_1, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 117, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
}
__Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0;
if (__Pyx_PrintOne(0, __pyx_t_3) < 0) __PYX_ERR(0, 117, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
/* "train.py":119
* print('Create YOLOv3 model with {} anchors and {} classes.'.format(num_anchors, num_classes))
*
* if load_pretrained: # <<<<<<<<<<<<<<
* model_body.load_weights(weights_path, by_name=True, skip_mismatch=True)
* print('Load weights {}.'.format(weights_path))
*/
__pyx_t_13 = __Pyx_PyObject_IsTrue(__pyx_v_load_pretrained); if (unlikely(__pyx_t_13 < 0)) __PYX_ERR(0, 119, __pyx_L1_error)
if (__pyx_t_13) {
/* "train.py":120
*
* if load_pretrained:
* model_body.load_weights(weights_path, by_name=True, skip_mismatch=True) # <<<<<<<<<<<<<<
* print('Load weights {}.'.format(weights_path))
* if freeze_body in [1, 2]:
*/
__pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_v_model_body, __pyx_n_s_load_weights); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 120, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_11 = PyTuple_New(1); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 120, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_11);
__Pyx_INCREF(__pyx_v_weights_path);
__Pyx_GIVEREF(__pyx_v_weights_path);
PyTuple_SET_ITEM(__pyx_t_11, 0, __pyx_v_weights_path);
__pyx_t_1 = PyDict_New(); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 120, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
if (PyDict_SetItem(__pyx_t_1, __pyx_n_s_by_name, Py_True) < 0) __PYX_ERR(0, 120, __pyx_L1_error)
if (PyDict_SetItem(__pyx_t_1, __pyx_n_s_skip_mismatch, Py_True) < 0) __PYX_ERR(0, 120, __pyx_L1_error)
__pyx_t_9 = __Pyx_PyObject_Call(__pyx_t_3, __pyx_t_11, __pyx_t_1); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 120, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_9);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0;
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0;
/* "train.py":121
* if load_pretrained:
* model_body.load_weights(weights_path, by_name=True, skip_mismatch=True)
* print('Load weights {}.'.format(weights_path)) # <<<<<<<<<<<<<<
* if freeze_body in [1, 2]:
* # Freeze darknet53 body or freeze all but 3 output layers.
*/
__pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_kp_s_Load_weights, __pyx_n_s_format); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 121, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_t_11 = NULL;
if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_1))) {
__pyx_t_11 = PyMethod_GET_SELF(__pyx_t_1);
if (likely(__pyx_t_11)) {
PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_1);
__Pyx_INCREF(__pyx_t_11);
__Pyx_INCREF(function);
__Pyx_DECREF_SET(__pyx_t_1, function);
}
}
if (!__pyx_t_11) {
__pyx_t_9 = __Pyx_PyObject_CallOneArg(__pyx_t_1, __pyx_v_weights_path); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 121, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_9);
} else {
#if CYTHON_FAST_PYCALL
if (PyFunction_Check(__pyx_t_1)) {
PyObject *__pyx_temp[2] = {__pyx_t_11, __pyx_v_weights_path};
__pyx_t_9 = __Pyx_PyFunction_FastCall(__pyx_t_1, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 121, __pyx_L1_error)
__Pyx_XDECREF(__pyx_t_11); __pyx_t_11 = 0;
__Pyx_GOTREF(__pyx_t_9);
} else
#endif
#if CYTHON_FAST_PYCCALL
if (__Pyx_PyFastCFunction_Check(__pyx_t_1)) {
PyObject *__pyx_temp[2] = {__pyx_t_11, __pyx_v_weights_path};
__pyx_t_9 = __Pyx_PyCFunction_FastCall(__pyx_t_1, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 121, __pyx_L1_error)
__Pyx_XDECREF(__pyx_t_11); __pyx_t_11 = 0;
__Pyx_GOTREF(__pyx_t_9);
} else
#endif
{
__pyx_t_3 = PyTuple_New(1+1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 121, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_GIVEREF(__pyx_t_11); PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_11); __pyx_t_11 = NULL;
__Pyx_INCREF(__pyx_v_weights_path);
__Pyx_GIVEREF(__pyx_v_weights_path);
PyTuple_SET_ITEM(__pyx_t_3, 0+1, __pyx_v_weights_path);
__pyx_t_9 = __Pyx_PyObject_Call(__pyx_t_1, __pyx_t_3, NULL); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 121, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_9);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
}
}
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
if (__Pyx_PrintOne(0, __pyx_t_9) < 0) __PYX_ERR(0, 121, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0;
/* "train.py":122
* model_body.load_weights(weights_path, by_name=True, skip_mismatch=True)
* print('Load weights {}.'.format(weights_path))
* if freeze_body in [1, 2]: # <<<<<<<<<<<<<<
* # Freeze darknet53 body or freeze all but 3 output layers.
* num = (185, len(model_body.layers)-3)[freeze_body-1]
*/
__Pyx_INCREF(__pyx_v_freeze_body);
__pyx_t_9 = __pyx_v_freeze_body;
__pyx_t_1 = __Pyx_PyInt_EqObjC(__pyx_t_9, __pyx_int_1, 1, 0); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 122, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_t_14 = __Pyx_PyObject_IsTrue(__pyx_t_1); if (unlikely(__pyx_t_14 < 0)) __PYX_ERR(0, 122, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
if (!__pyx_t_14) {
} else {
__pyx_t_13 = __pyx_t_14;
goto __pyx_L9_bool_binop_done;
}
__pyx_t_1 = __Pyx_PyInt_EqObjC(__pyx_t_9, __pyx_int_2, 2, 0); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 122, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_t_14 = __Pyx_PyObject_IsTrue(__pyx_t_1); if (unlikely(__pyx_t_14 < 0)) __PYX_ERR(0, 122, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__pyx_t_13 = __pyx_t_14;
__pyx_L9_bool_binop_done:;
__Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0;
__pyx_t_14 = (__pyx_t_13 != 0);
if (__pyx_t_14) {
/* "train.py":124
* if freeze_body in [1, 2]:
* # Freeze darknet53 body or freeze all but 3 output layers.
* num = (185, len(model_body.layers)-3)[freeze_body-1] # <<<<<<<<<<<<<<
* for i in range(num): model_body.layers[i].trainable = False
* print('Freeze the first {} layers of total {} layers.'.format(num, len(model_body.layers)))
*/
__pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_v_model_body, __pyx_n_s_layers); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 124, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_9);
__pyx_t_5 = PyObject_Length(__pyx_t_9); if (unlikely(__pyx_t_5 == -1)) __PYX_ERR(0, 124, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0;
__pyx_t_9 = PyInt_FromSsize_t((__pyx_t_5 - 3)); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 124, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_9);
__pyx_t_1 = PyTuple_New(2); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 124, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_INCREF(__pyx_int_185);
__Pyx_GIVEREF(__pyx_int_185);
PyTuple_SET_ITEM(__pyx_t_1, 0, __pyx_int_185);
__Pyx_GIVEREF(__pyx_t_9);
PyTuple_SET_ITEM(__pyx_t_1, 1, __pyx_t_9);
__pyx_t_9 = 0;
__pyx_t_9 = __Pyx_PyInt_SubtractObjC(__pyx_v_freeze_body, __pyx_int_1, 1, 0); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 124, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_9);
__pyx_t_3 = PyObject_GetItem(__pyx_t_1, __pyx_t_9); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 124, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0;
__pyx_v_num = __pyx_t_3;
__pyx_t_3 = 0;
/* "train.py":125
* # Freeze darknet53 body or freeze all but 3 output layers.
* num = (185, len(model_body.layers)-3)[freeze_body-1]
* for i in range(num): model_body.layers[i].trainable = False # <<<<<<<<<<<<<<
* print('Freeze the first {} layers of total {} layers.'.format(num, len(model_body.layers)))
*
*/
__pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 125, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_INCREF(__pyx_v_num);
__Pyx_GIVEREF(__pyx_v_num);
PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_v_num);
__pyx_t_9 = __Pyx_PyObject_Call(__pyx_builtin_range, __pyx_t_3, NULL); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 125, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_9);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
if (likely(PyList_CheckExact(__pyx_t_9)) || PyTuple_CheckExact(__pyx_t_9)) {
__pyx_t_3 = __pyx_t_9; __Pyx_INCREF(__pyx_t_3); __pyx_t_5 = 0;
__pyx_t_15 = NULL;
} else {
__pyx_t_5 = -1; __pyx_t_3 = PyObject_GetIter(__pyx_t_9); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 125, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_15 = Py_TYPE(__pyx_t_3)->tp_iternext; if (unlikely(!__pyx_t_15)) __PYX_ERR(0, 125, __pyx_L1_error)
}
__Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0;
for (;;) {
if (likely(!__pyx_t_15)) {
if (likely(PyList_CheckExact(__pyx_t_3))) {
if (__pyx_t_5 >= PyList_GET_SIZE(__pyx_t_3)) break;
#if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS
__pyx_t_9 = PyList_GET_ITEM(__pyx_t_3, __pyx_t_5); __Pyx_INCREF(__pyx_t_9); __pyx_t_5++; if (unlikely(0 < 0)) __PYX_ERR(0, 125, __pyx_L1_error)
#else
__pyx_t_9 = PySequence_ITEM(__pyx_t_3, __pyx_t_5); __pyx_t_5++; if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 125, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_9);
#endif
} else {
if (__pyx_t_5 >= PyTuple_GET_SIZE(__pyx_t_3)) break;
#if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS
__pyx_t_9 = PyTuple_GET_ITEM(__pyx_t_3, __pyx_t_5); __Pyx_INCREF(__pyx_t_9); __pyx_t_5++; if (unlikely(0 < 0)) __PYX_ERR(0, 125, __pyx_L1_error)
#else
__pyx_t_9 = PySequence_ITEM(__pyx_t_3, __pyx_t_5); __pyx_t_5++; if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 125, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_9);
#endif
}
} else {
__pyx_t_9 = __pyx_t_15(__pyx_t_3);
if (unlikely(!__pyx_t_9)) {
PyObject* exc_type = PyErr_Occurred();
if (exc_type) {
if (likely(exc_type == PyExc_StopIteration || PyErr_GivenExceptionMatches(exc_type, PyExc_StopIteration))) PyErr_Clear();
else __PYX_ERR(0, 125, __pyx_L1_error)
}
break;
}
__Pyx_GOTREF(__pyx_t_9);
}
__Pyx_XDECREF_SET(__pyx_v_i, __pyx_t_9);
__pyx_t_9 = 0;
__pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_v_model_body, __pyx_n_s_layers); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 125, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_9);
__pyx_t_1 = PyObject_GetItem(__pyx_t_9, __pyx_v_i); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 125, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0;
if (__Pyx_PyObject_SetAttrStr(__pyx_t_1, __pyx_n_s_trainable, Py_False) < 0) __PYX_ERR(0, 125, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
}
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
/* "train.py":126
* num = (185, len(model_body.layers)-3)[freeze_body-1]
* for i in range(num): model_body.layers[i].trainable = False
* print('Freeze the first {} layers of total {} layers.'.format(num, len(model_body.layers))) # <<<<<<<<<<<<<<
*
* model_loss = Lambda(yolo_loss, output_shape=(1,), name='yolo_loss',
*/
__pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_kp_s_Freeze_the_first_layers_of_total, __pyx_n_s_format); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 126, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_v_model_body, __pyx_n_s_layers); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 126, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_9);
__pyx_t_5 = PyObject_Length(__pyx_t_9); if (unlikely(__pyx_t_5 == -1)) __PYX_ERR(0, 126, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0;
__pyx_t_9 = PyInt_FromSsize_t(__pyx_t_5); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 126, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_9);
__pyx_t_11 = NULL;
__pyx_t_12 = 0;
if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_1))) {
__pyx_t_11 = PyMethod_GET_SELF(__pyx_t_1);
if (likely(__pyx_t_11)) {
PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_1);
__Pyx_INCREF(__pyx_t_11);
__Pyx_INCREF(function);
__Pyx_DECREF_SET(__pyx_t_1, function);
__pyx_t_12 = 1;
}
}
#if CYTHON_FAST_PYCALL
if (PyFunction_Check(__pyx_t_1)) {
PyObject *__pyx_temp[3] = {__pyx_t_11, __pyx_v_num, __pyx_t_9};
__pyx_t_3 = __Pyx_PyFunction_FastCall(__pyx_t_1, __pyx_temp+1-__pyx_t_12, 2+__pyx_t_12); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 126, __pyx_L1_error)
__Pyx_XDECREF(__pyx_t_11); __pyx_t_11 = 0;
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0;
} else
#endif
#if CYTHON_FAST_PYCCALL
if (__Pyx_PyFastCFunction_Check(__pyx_t_1)) {
PyObject *__pyx_temp[3] = {__pyx_t_11, __pyx_v_num, __pyx_t_9};
__pyx_t_3 = __Pyx_PyCFunction_FastCall(__pyx_t_1, __pyx_temp+1-__pyx_t_12, 2+__pyx_t_12); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 126, __pyx_L1_error)
__Pyx_XDECREF(__pyx_t_11); __pyx_t_11 = 0;
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0;
} else
#endif
{
__pyx_t_2 = PyTuple_New(2+__pyx_t_12); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 126, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
if (__pyx_t_11) {
__Pyx_GIVEREF(__pyx_t_11); PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_t_11); __pyx_t_11 = NULL;
}
__Pyx_INCREF(__pyx_v_num);
__Pyx_GIVEREF(__pyx_v_num);
PyTuple_SET_ITEM(__pyx_t_2, 0+__pyx_t_12, __pyx_v_num);
__Pyx_GIVEREF(__pyx_t_9);
PyTuple_SET_ITEM(__pyx_t_2, 1+__pyx_t_12, __pyx_t_9);
__pyx_t_9 = 0;
__pyx_t_3 = __Pyx_PyObject_Call(__pyx_t_1, __pyx_t_2, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 126, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
}
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
if (__Pyx_PrintOne(0, __pyx_t_3) < 0) __PYX_ERR(0, 126, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
/* "train.py":122
* model_body.load_weights(weights_path, by_name=True, skip_mismatch=True)
* print('Load weights {}.'.format(weights_path))
* if freeze_body in [1, 2]: # <<<<<<<<<<<<<<
* # Freeze darknet53 body or freeze all but 3 output layers.
* num = (185, len(model_body.layers)-3)[freeze_body-1]
*/
}
/* "train.py":119
* print('Create YOLOv3 model with {} anchors and {} classes.'.format(num_anchors, num_classes))
*
* if load_pretrained: # <<<<<<<<<<<<<<
* model_body.load_weights(weights_path, by_name=True, skip_mismatch=True)
* print('Load weights {}.'.format(weights_path))
*/
}
/* "train.py":128
* print('Freeze the first {} layers of total {} layers.'.format(num, len(model_body.layers)))
*
* model_loss = Lambda(yolo_loss, output_shape=(1,), name='yolo_loss', # <<<<<<<<<<<<<<
* arguments={'anchors': anchors, 'num_classes': num_classes, 'ignore_thresh': 0.5})(
* [*model_body.output, *y_true])
*/
__pyx_t_1 = __Pyx_GetModuleGlobalName(__pyx_n_s_Lambda); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 128, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_t_2 = __Pyx_GetModuleGlobalName(__pyx_n_s_yolo_loss); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 128, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__pyx_t_9 = PyTuple_New(1); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 128, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_9);
__Pyx_GIVEREF(__pyx_t_2);
PyTuple_SET_ITEM(__pyx_t_9, 0, __pyx_t_2);
__pyx_t_2 = 0;
__pyx_t_2 = PyDict_New(); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 128, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
if (PyDict_SetItem(__pyx_t_2, __pyx_n_s_output_shape, __pyx_tuple__11) < 0) __PYX_ERR(0, 128, __pyx_L1_error)
if (PyDict_SetItem(__pyx_t_2, __pyx_n_s_name, __pyx_n_s_yolo_loss) < 0) __PYX_ERR(0, 128, __pyx_L1_error)
/* "train.py":129
*
* model_loss = Lambda(yolo_loss, output_shape=(1,), name='yolo_loss',
* arguments={'anchors': anchors, 'num_classes': num_classes, 'ignore_thresh': 0.5})( # <<<<<<<<<<<<<<
* [*model_body.output, *y_true])
* model = Model([model_body.input, *y_true], model_loss)
*/
__pyx_t_11 = PyDict_New(); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 129, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_11);
if (PyDict_SetItem(__pyx_t_11, __pyx_n_s_anchors, __pyx_v_anchors) < 0) __PYX_ERR(0, 129, __pyx_L1_error)
if (PyDict_SetItem(__pyx_t_11, __pyx_n_s_num_classes, __pyx_v_num_classes) < 0) __PYX_ERR(0, 129, __pyx_L1_error)
if (PyDict_SetItem(__pyx_t_11, __pyx_n_s_ignore_thresh, __pyx_float_0_5) < 0) __PYX_ERR(0, 129, __pyx_L1_error)
if (PyDict_SetItem(__pyx_t_2, __pyx_n_s_arguments, __pyx_t_11) < 0) __PYX_ERR(0, 128, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0;
/* "train.py":128
* print('Freeze the first {} layers of total {} layers.'.format(num, len(model_body.layers)))
*
* model_loss = Lambda(yolo_loss, output_shape=(1,), name='yolo_loss', # <<<<<<<<<<<<<<
* arguments={'anchors': anchors, 'num_classes': num_classes, 'ignore_thresh': 0.5})(
* [*model_body.output, *y_true])
*/
__pyx_t_11 = __Pyx_PyObject_Call(__pyx_t_1, __pyx_t_9, __pyx_t_2); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 128, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_11);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0;
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
/* "train.py":130
* model_loss = Lambda(yolo_loss, output_shape=(1,), name='yolo_loss',
* arguments={'anchors': anchors, 'num_classes': num_classes, 'ignore_thresh': 0.5})(
* [*model_body.output, *y_true]) # <<<<<<<<<<<<<<
* model = Model([model_body.input, *y_true], model_loss)
*
*/
__pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_v_model_body, __pyx_n_s_output); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 130, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_9);
__pyx_t_2 = PySequence_List(__pyx_t_9); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 130, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0;
if (__Pyx_PyList_Extend(__pyx_t_2, __pyx_v_y_true) < 0) __PYX_ERR(0, 130, __pyx_L1_error)
__pyx_t_9 = NULL;
if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_11))) {
__pyx_t_9 = PyMethod_GET_SELF(__pyx_t_11);
if (likely(__pyx_t_9)) {
PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_11);
__Pyx_INCREF(__pyx_t_9);
__Pyx_INCREF(function);
__Pyx_DECREF_SET(__pyx_t_11, function);
}
}
if (!__pyx_t_9) {
__pyx_t_3 = __Pyx_PyObject_CallOneArg(__pyx_t_11, __pyx_t_2); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 129, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__Pyx_GOTREF(__pyx_t_3);
} else {
#if CYTHON_FAST_PYCALL
if (PyFunction_Check(__pyx_t_11)) {
PyObject *__pyx_temp[2] = {__pyx_t_9, __pyx_t_2};
__pyx_t_3 = __Pyx_PyFunction_FastCall(__pyx_t_11, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 129, __pyx_L1_error)
__Pyx_XDECREF(__pyx_t_9); __pyx_t_9 = 0;
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
} else
#endif
#if CYTHON_FAST_PYCCALL
if (__Pyx_PyFastCFunction_Check(__pyx_t_11)) {
PyObject *__pyx_temp[2] = {__pyx_t_9, __pyx_t_2};
__pyx_t_3 = __Pyx_PyCFunction_FastCall(__pyx_t_11, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 129, __pyx_L1_error)
__Pyx_XDECREF(__pyx_t_9); __pyx_t_9 = 0;
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
} else
#endif
{
__pyx_t_1 = PyTuple_New(1+1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 129, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_GIVEREF(__pyx_t_9); PyTuple_SET_ITEM(__pyx_t_1, 0, __pyx_t_9); __pyx_t_9 = NULL;
__Pyx_GIVEREF(__pyx_t_2);
PyTuple_SET_ITEM(__pyx_t_1, 0+1, __pyx_t_2);
__pyx_t_2 = 0;
__pyx_t_3 = __Pyx_PyObject_Call(__pyx_t_11, __pyx_t_1, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 129, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
}
}
__Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0;
__pyx_v_model_loss = __pyx_t_3;
__pyx_t_3 = 0;
/* "train.py":131
* arguments={'anchors': anchors, 'num_classes': num_classes, 'ignore_thresh': 0.5})(
* [*model_body.output, *y_true])
* model = Model([model_body.input, *y_true], model_loss) # <<<<<<<<<<<<<<
*
* return model
*/
__pyx_t_11 = __Pyx_GetModuleGlobalName(__pyx_n_s_Model); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 131, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_11);
__pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_v_model_body, __pyx_n_s_input); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 131, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__pyx_t_9 = PyList_New(1); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 131, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_9);
__Pyx_GIVEREF(__pyx_t_2);
PyList_SET_ITEM(__pyx_t_9, 0, __pyx_t_2);
__pyx_t_2 = 0;
__pyx_t_1 = __pyx_t_9;
__pyx_t_9 = 0;
if (__Pyx_PyList_Extend(__pyx_t_1, __pyx_v_y_true) < 0) __PYX_ERR(0, 131, __pyx_L1_error)
__pyx_t_9 = NULL;
__pyx_t_12 = 0;
if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_11))) {
__pyx_t_9 = PyMethod_GET_SELF(__pyx_t_11);
if (likely(__pyx_t_9)) {
PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_11);
__Pyx_INCREF(__pyx_t_9);
__Pyx_INCREF(function);
__Pyx_DECREF_SET(__pyx_t_11, function);
__pyx_t_12 = 1;
}
}
#if CYTHON_FAST_PYCALL
if (PyFunction_Check(__pyx_t_11)) {
PyObject *__pyx_temp[3] = {__pyx_t_9, __pyx_t_1, __pyx_v_model_loss};
__pyx_t_3 = __Pyx_PyFunction_FastCall(__pyx_t_11, __pyx_temp+1-__pyx_t_12, 2+__pyx_t_12); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 131, __pyx_L1_error)
__Pyx_XDECREF(__pyx_t_9); __pyx_t_9 = 0;
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
} else
#endif
#if CYTHON_FAST_PYCCALL
if (__Pyx_PyFastCFunction_Check(__pyx_t_11)) {
PyObject *__pyx_temp[3] = {__pyx_t_9, __pyx_t_1, __pyx_v_model_loss};
__pyx_t_3 = __Pyx_PyCFunction_FastCall(__pyx_t_11, __pyx_temp+1-__pyx_t_12, 2+__pyx_t_12); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 131, __pyx_L1_error)
__Pyx_XDECREF(__pyx_t_9); __pyx_t_9 = 0;
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
} else
#endif
{
__pyx_t_2 = PyTuple_New(2+__pyx_t_12); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 131, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
if (__pyx_t_9) {
__Pyx_GIVEREF(__pyx_t_9); PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_t_9); __pyx_t_9 = NULL;
}
__Pyx_GIVEREF(__pyx_t_1);
PyTuple_SET_ITEM(__pyx_t_2, 0+__pyx_t_12, __pyx_t_1);
__Pyx_INCREF(__pyx_v_model_loss);
__Pyx_GIVEREF(__pyx_v_model_loss);
PyTuple_SET_ITEM(__pyx_t_2, 1+__pyx_t_12, __pyx_v_model_loss);
__pyx_t_1 = 0;
__pyx_t_3 = __Pyx_PyObject_Call(__pyx_t_11, __pyx_t_2, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 131, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
}
__Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0;
__pyx_v_model = __pyx_t_3;
__pyx_t_3 = 0;
/* "train.py":133
* model = Model([model_body.input, *y_true], model_loss)
*
* return model # <<<<<<<<<<<<<<
*
* def create_tiny_model(input_shape, anchors, num_classes, load_pretrained=True, freeze_body=2,
*/
__Pyx_XDECREF(__pyx_r);
__Pyx_INCREF(__pyx_v_model);
__pyx_r = __pyx_v_model;
goto __pyx_L0;
/* "train.py":105
*
*
* def create_model(input_shape, anchors, num_classes, load_pretrained=True, freeze_body=2, # <<<<<<<<<<<<<<
* weights_path='model_data/yolo_weights.h5'):
* '''create the training model'''
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_XDECREF(__pyx_t_2);
__Pyx_XDECREF(__pyx_t_3);
__Pyx_XDECREF(__pyx_t_7);
__Pyx_XDECREF(__pyx_t_8);
__Pyx_XDECREF(__pyx_t_9);
__Pyx_XDECREF(__pyx_t_10);
__Pyx_XDECREF(__pyx_t_11);
__Pyx_AddTraceback("train.create_model", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XDECREF(__pyx_v_image_input);
__Pyx_XDECREF(__pyx_v_h);
__Pyx_XDECREF(__pyx_v_w);
__Pyx_XDECREF(__pyx_v_num_anchors);
__Pyx_XDECREF(__pyx_v_y_true);
__Pyx_XDECREF(__pyx_v_model_body);
__Pyx_XDECREF(__pyx_v_num);
__Pyx_XDECREF(__pyx_v_i);
__Pyx_XDECREF(__pyx_v_model_loss);
__Pyx_XDECREF(__pyx_v_model);
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "train.py":135
* return model
*
* def create_tiny_model(input_shape, anchors, num_classes, load_pretrained=True, freeze_body=2, # <<<<<<<<<<<<<<
* weights_path='model_data/tiny_yolo_weights.h5'):
* '''create the training model, for Tiny YOLOv3'''
*/
/* Python wrapper */
static PyObject *__pyx_pw_5train_9create_tiny_model(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
static char __pyx_doc_5train_8create_tiny_model[] = "create the training model, for Tiny YOLOv3";
static PyMethodDef __pyx_mdef_5train_9create_tiny_model = {"create_tiny_model", (PyCFunction)__pyx_pw_5train_9create_tiny_model, METH_VARARGS|METH_KEYWORDS, __pyx_doc_5train_8create_tiny_model};
static PyObject *__pyx_pw_5train_9create_tiny_model(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
PyObject *__pyx_v_input_shape = 0;
PyObject *__pyx_v_anchors = 0;
PyObject *__pyx_v_num_classes = 0;
PyObject *__pyx_v_load_pretrained = 0;
PyObject *__pyx_v_freeze_body = 0;
PyObject *__pyx_v_weights_path = 0;
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("create_tiny_model (wrapper)", 0);
{
static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_input_shape,&__pyx_n_s_anchors,&__pyx_n_s_num_classes,&__pyx_n_s_load_pretrained,&__pyx_n_s_freeze_body,&__pyx_n_s_weights_path,0};
PyObject* values[6] = {0,0,0,0,0,0};
values[3] = ((PyObject *)((PyObject *)Py_True));
values[4] = ((PyObject *)((PyObject *)__pyx_int_2));
values[5] = ((PyObject *)((PyObject*)__pyx_kp_s_model_data_tiny_yolo_weights_h5));
if (unlikely(__pyx_kwds)) {
Py_ssize_t kw_args;
const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args);
switch (pos_args) {
case 6: values[5] = PyTuple_GET_ITEM(__pyx_args, 5);
CYTHON_FALLTHROUGH;
case 5: values[4] = PyTuple_GET_ITEM(__pyx_args, 4);
CYTHON_FALLTHROUGH;
case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3);
CYTHON_FALLTHROUGH;
case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2);
CYTHON_FALLTHROUGH;
case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1);
CYTHON_FALLTHROUGH;
case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
CYTHON_FALLTHROUGH;
case 0: break;
default: goto __pyx_L5_argtuple_error;
}
kw_args = PyDict_Size(__pyx_kwds);
switch (pos_args) {
case 0:
if (likely((values[0] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_input_shape)) != 0)) kw_args--;
else goto __pyx_L5_argtuple_error;
CYTHON_FALLTHROUGH;
case 1:
if (likely((values[1] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_anchors)) != 0)) kw_args--;
else {
__Pyx_RaiseArgtupleInvalid("create_tiny_model", 0, 3, 6, 1); __PYX_ERR(0, 135, __pyx_L3_error)
}
CYTHON_FALLTHROUGH;
case 2:
if (likely((values[2] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_num_classes)) != 0)) kw_args--;
else {
__Pyx_RaiseArgtupleInvalid("create_tiny_model", 0, 3, 6, 2); __PYX_ERR(0, 135, __pyx_L3_error)
}
CYTHON_FALLTHROUGH;
case 3:
if (kw_args > 0) {
PyObject* value = PyDict_GetItem(__pyx_kwds, __pyx_n_s_load_pretrained);
if (value) { values[3] = value; kw_args--; }
}
CYTHON_FALLTHROUGH;
case 4:
if (kw_args > 0) {
PyObject* value = PyDict_GetItem(__pyx_kwds, __pyx_n_s_freeze_body);
if (value) { values[4] = value; kw_args--; }
}
CYTHON_FALLTHROUGH;
case 5:
if (kw_args > 0) {
PyObject* value = PyDict_GetItem(__pyx_kwds, __pyx_n_s_weights_path);
if (value) { values[5] = value; kw_args--; }
}
}
if (unlikely(kw_args > 0)) {
if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "create_tiny_model") < 0)) __PYX_ERR(0, 135, __pyx_L3_error)
}
} else {
switch (PyTuple_GET_SIZE(__pyx_args)) {
case 6: values[5] = PyTuple_GET_ITEM(__pyx_args, 5);
CYTHON_FALLTHROUGH;
case 5: values[4] = PyTuple_GET_ITEM(__pyx_args, 4);
CYTHON_FALLTHROUGH;
case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3);
CYTHON_FALLTHROUGH;
case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2);
values[1] = PyTuple_GET_ITEM(__pyx_args, 1);
values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
break;
default: goto __pyx_L5_argtuple_error;
}
}
__pyx_v_input_shape = values[0];
__pyx_v_anchors = values[1];
__pyx_v_num_classes = values[2];
__pyx_v_load_pretrained = values[3];
__pyx_v_freeze_body = values[4];
__pyx_v_weights_path = values[5];
}
goto __pyx_L4_argument_unpacking_done;
__pyx_L5_argtuple_error:;
__Pyx_RaiseArgtupleInvalid("create_tiny_model", 0, 3, 6, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 135, __pyx_L3_error)
__pyx_L3_error:;
__Pyx_AddTraceback("train.create_tiny_model", __pyx_clineno, __pyx_lineno, __pyx_filename);
__Pyx_RefNannyFinishContext();
return NULL;
__pyx_L4_argument_unpacking_done:;
__pyx_r = __pyx_pf_5train_8create_tiny_model(__pyx_self, __pyx_v_input_shape, __pyx_v_anchors, __pyx_v_num_classes, __pyx_v_load_pretrained, __pyx_v_freeze_body, __pyx_v_weights_path);
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_pf_5train_8create_tiny_model(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_input_shape, PyObject *__pyx_v_anchors, PyObject *__pyx_v_num_classes, PyObject *__pyx_v_load_pretrained, PyObject *__pyx_v_freeze_body, PyObject *__pyx_v_weights_path) {
PyObject *__pyx_v_image_input = NULL;
PyObject *__pyx_v_h = NULL;
PyObject *__pyx_v_w = NULL;
PyObject *__pyx_v_num_anchors = NULL;
PyObject *__pyx_v_y_true = NULL;
PyObject *__pyx_v_model_body = NULL;
PyObject *__pyx_v_num = NULL;
PyObject *__pyx_v_i = NULL;
PyObject *__pyx_v_model_loss = NULL;
PyObject *__pyx_v_model = NULL;
long __pyx_v_l;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
PyObject *__pyx_t_2 = NULL;
PyObject *__pyx_t_3 = NULL;
PyObject *(*__pyx_t_4)(PyObject *);
Py_ssize_t __pyx_t_5;
long __pyx_t_6;
PyObject *__pyx_t_7 = NULL;
PyObject *__pyx_t_8 = NULL;
PyObject *__pyx_t_9 = NULL;
PyObject *__pyx_t_10 = NULL;
PyObject *__pyx_t_11 = NULL;
int __pyx_t_12;
int __pyx_t_13;
int __pyx_t_14;
PyObject *(*__pyx_t_15)(PyObject *);
__Pyx_RefNannySetupContext("create_tiny_model", 0);
/* "train.py":138
* weights_path='model_data/tiny_yolo_weights.h5'):
* '''create the training model, for Tiny YOLOv3'''
* K.clear_session() # get a new session # <<<<<<<<<<<<<<
* image_input = Input(shape=(None, None, 3))
* h, w = input_shape
*/
__pyx_t_2 = __Pyx_GetModuleGlobalName(__pyx_n_s_K); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 138, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_clear_session); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 138, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__pyx_t_2 = NULL;
if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_3))) {
__pyx_t_2 = PyMethod_GET_SELF(__pyx_t_3);
if (likely(__pyx_t_2)) {
PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_3);
__Pyx_INCREF(__pyx_t_2);
__Pyx_INCREF(function);
__Pyx_DECREF_SET(__pyx_t_3, function);
}
}
if (__pyx_t_2) {
__pyx_t_1 = __Pyx_PyObject_CallOneArg(__pyx_t_3, __pyx_t_2); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 138, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
} else {
__pyx_t_1 = __Pyx_PyObject_CallNoArg(__pyx_t_3); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 138, __pyx_L1_error)
}
__Pyx_GOTREF(__pyx_t_1);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
/* "train.py":139
* '''create the training model, for Tiny YOLOv3'''
* K.clear_session() # get a new session
* image_input = Input(shape=(None, None, 3)) # <<<<<<<<<<<<<<
* h, w = input_shape
* num_anchors = len(anchors)
*/
__pyx_t_1 = __Pyx_GetModuleGlobalName(__pyx_n_s_Input); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 139, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_t_3 = PyDict_New(); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 139, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
if (PyDict_SetItem(__pyx_t_3, __pyx_n_s_shape, __pyx_tuple__12) < 0) __PYX_ERR(0, 139, __pyx_L1_error)
__pyx_t_2 = __Pyx_PyObject_Call(__pyx_t_1, __pyx_empty_tuple, __pyx_t_3); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 139, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__pyx_v_image_input = __pyx_t_2;
__pyx_t_2 = 0;
/* "train.py":140
* K.clear_session() # get a new session
* image_input = Input(shape=(None, None, 3))
* h, w = input_shape # <<<<<<<<<<<<<<
* num_anchors = len(anchors)
*
*/
if ((likely(PyTuple_CheckExact(__pyx_v_input_shape))) || (PyList_CheckExact(__pyx_v_input_shape))) {
PyObject* sequence = __pyx_v_input_shape;
#if !CYTHON_COMPILING_IN_PYPY
Py_ssize_t size = Py_SIZE(sequence);
#else
Py_ssize_t size = PySequence_Size(sequence);
#endif
if (unlikely(size != 2)) {
if (size > 2) __Pyx_RaiseTooManyValuesError(2);
else if (size >= 0) __Pyx_RaiseNeedMoreValuesError(size);
__PYX_ERR(0, 140, __pyx_L1_error)
}
#if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS
if (likely(PyTuple_CheckExact(sequence))) {
__pyx_t_2 = PyTuple_GET_ITEM(sequence, 0);
__pyx_t_3 = PyTuple_GET_ITEM(sequence, 1);
} else {
__pyx_t_2 = PyList_GET_ITEM(sequence, 0);
__pyx_t_3 = PyList_GET_ITEM(sequence, 1);
}
__Pyx_INCREF(__pyx_t_2);
__Pyx_INCREF(__pyx_t_3);
#else
__pyx_t_2 = PySequence_ITEM(sequence, 0); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 140, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__pyx_t_3 = PySequence_ITEM(sequence, 1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 140, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
#endif
} else {
Py_ssize_t index = -1;
__pyx_t_1 = PyObject_GetIter(__pyx_v_input_shape); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 140, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_t_4 = Py_TYPE(__pyx_t_1)->tp_iternext;
index = 0; __pyx_t_2 = __pyx_t_4(__pyx_t_1); if (unlikely(!__pyx_t_2)) goto __pyx_L3_unpacking_failed;
__Pyx_GOTREF(__pyx_t_2);
index = 1; __pyx_t_3 = __pyx_t_4(__pyx_t_1); if (unlikely(!__pyx_t_3)) goto __pyx_L3_unpacking_failed;
__Pyx_GOTREF(__pyx_t_3);
if (__Pyx_IternextUnpackEndCheck(__pyx_t_4(__pyx_t_1), 2) < 0) __PYX_ERR(0, 140, __pyx_L1_error)
__pyx_t_4 = NULL;
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
goto __pyx_L4_unpacking_done;
__pyx_L3_unpacking_failed:;
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__pyx_t_4 = NULL;
if (__Pyx_IterFinish() == 0) __Pyx_RaiseNeedMoreValuesError(index);
__PYX_ERR(0, 140, __pyx_L1_error)
__pyx_L4_unpacking_done:;
}
__pyx_v_h = __pyx_t_2;
__pyx_t_2 = 0;
__pyx_v_w = __pyx_t_3;
__pyx_t_3 = 0;
/* "train.py":141
* image_input = Input(shape=(None, None, 3))
* h, w = input_shape
* num_anchors = len(anchors) # <<<<<<<<<<<<<<
*
* y_true = [Input(shape=(h//{0:32, 1:16}[l], w//{0:32, 1:16}[l], \
*/
__pyx_t_5 = PyObject_Length(__pyx_v_anchors); if (unlikely(__pyx_t_5 == -1)) __PYX_ERR(0, 141, __pyx_L1_error)
__pyx_t_3 = PyInt_FromSsize_t(__pyx_t_5); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 141, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__pyx_v_num_anchors = __pyx_t_3;
__pyx_t_3 = 0;
/* "train.py":143
* num_anchors = len(anchors)
*
* y_true = [Input(shape=(h//{0:32, 1:16}[l], w//{0:32, 1:16}[l], \ # <<<<<<<<<<<<<<
* num_anchors//2, num_classes+5)) for l in range(2)]
*
*/
__pyx_t_3 = PyList_New(0); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 143, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
/* "train.py":144
*
* y_true = [Input(shape=(h//{0:32, 1:16}[l], w//{0:32, 1:16}[l], \
* num_anchors//2, num_classes+5)) for l in range(2)] # <<<<<<<<<<<<<<
*
* model_body = tiny_yolo_body(image_input, num_anchors//2, num_classes)
*/
for (__pyx_t_6 = 0; __pyx_t_6 < 2; __pyx_t_6+=1) {
__pyx_v_l = __pyx_t_6;
/* "train.py":143
* num_anchors = len(anchors)
*
* y_true = [Input(shape=(h//{0:32, 1:16}[l], w//{0:32, 1:16}[l], \ # <<<<<<<<<<<<<<
* num_anchors//2, num_classes+5)) for l in range(2)]
*
*/
__pyx_t_2 = __Pyx_GetModuleGlobalName(__pyx_n_s_Input); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 143, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__pyx_t_1 = PyDict_New(); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 143, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_t_7 = PyDict_New(); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 143, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_7);
if (PyDict_SetItem(__pyx_t_7, __pyx_int_0, __pyx_int_32) < 0) __PYX_ERR(0, 143, __pyx_L1_error)
if (PyDict_SetItem(__pyx_t_7, __pyx_int_1, __pyx_int_16) < 0) __PYX_ERR(0, 143, __pyx_L1_error)
__pyx_t_8 = __Pyx_PyInt_From_long(__pyx_v_l); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 143, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_8);
__pyx_t_9 = __Pyx_PyDict_GetItem(__pyx_t_7, __pyx_t_8); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 143, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_9);
__Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
__Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0;
__pyx_t_8 = PyNumber_FloorDivide(__pyx_v_h, __pyx_t_9); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 143, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_8);
__Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0;
__pyx_t_9 = PyDict_New(); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 143, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_9);
if (PyDict_SetItem(__pyx_t_9, __pyx_int_0, __pyx_int_32) < 0) __PYX_ERR(0, 143, __pyx_L1_error)
if (PyDict_SetItem(__pyx_t_9, __pyx_int_1, __pyx_int_16) < 0) __PYX_ERR(0, 143, __pyx_L1_error)
__pyx_t_7 = __Pyx_PyInt_From_long(__pyx_v_l); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 143, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_7);
__pyx_t_10 = __Pyx_PyDict_GetItem(__pyx_t_9, __pyx_t_7); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 143, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_10);
__Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0;
__Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
__pyx_t_7 = PyNumber_FloorDivide(__pyx_v_w, __pyx_t_10); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 143, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_7);
__Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;
/* "train.py":144
*
* y_true = [Input(shape=(h//{0:32, 1:16}[l], w//{0:32, 1:16}[l], \
* num_anchors//2, num_classes+5)) for l in range(2)] # <<<<<<<<<<<<<<
*
* model_body = tiny_yolo_body(image_input, num_anchors//2, num_classes)
*/
__pyx_t_10 = __Pyx_PyInt_FloorDivideObjC(__pyx_v_num_anchors, __pyx_int_2, 2, 0); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 144, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_10);
__pyx_t_9 = __Pyx_PyInt_AddObjC(__pyx_v_num_classes, __pyx_int_5, 5, 0); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 144, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_9);
/* "train.py":143
* num_anchors = len(anchors)
*
* y_true = [Input(shape=(h//{0:32, 1:16}[l], w//{0:32, 1:16}[l], \ # <<<<<<<<<<<<<<
* num_anchors//2, num_classes+5)) for l in range(2)]
*
*/
__pyx_t_11 = PyTuple_New(4); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 143, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_11);
__Pyx_GIVEREF(__pyx_t_8);
PyTuple_SET_ITEM(__pyx_t_11, 0, __pyx_t_8);
__Pyx_GIVEREF(__pyx_t_7);
PyTuple_SET_ITEM(__pyx_t_11, 1, __pyx_t_7);
__Pyx_GIVEREF(__pyx_t_10);
PyTuple_SET_ITEM(__pyx_t_11, 2, __pyx_t_10);
__Pyx_GIVEREF(__pyx_t_9);
PyTuple_SET_ITEM(__pyx_t_11, 3, __pyx_t_9);
__pyx_t_8 = 0;
__pyx_t_7 = 0;
__pyx_t_10 = 0;
__pyx_t_9 = 0;
if (PyDict_SetItem(__pyx_t_1, __pyx_n_s_shape, __pyx_t_11) < 0) __PYX_ERR(0, 143, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0;
__pyx_t_11 = __Pyx_PyObject_Call(__pyx_t_2, __pyx_empty_tuple, __pyx_t_1); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 143, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_11);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
if (unlikely(__Pyx_ListComp_Append(__pyx_t_3, (PyObject*)__pyx_t_11))) __PYX_ERR(0, 143, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0;
}
__pyx_v_y_true = ((PyObject*)__pyx_t_3);
__pyx_t_3 = 0;
/* "train.py":146
* num_anchors//2, num_classes+5)) for l in range(2)]
*
* model_body = tiny_yolo_body(image_input, num_anchors//2, num_classes) # <<<<<<<<<<<<<<
* print('Create Tiny YOLOv3 model with {} anchors and {} classes.'.format(num_anchors, num_classes))
*
*/
__pyx_t_11 = __Pyx_GetModuleGlobalName(__pyx_n_s_tiny_yolo_body); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 146, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_11);
__pyx_t_1 = __Pyx_PyInt_FloorDivideObjC(__pyx_v_num_anchors, __pyx_int_2, 2, 0); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 146, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_t_2 = NULL;
__pyx_t_12 = 0;
if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_11))) {
__pyx_t_2 = PyMethod_GET_SELF(__pyx_t_11);
if (likely(__pyx_t_2)) {
PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_11);
__Pyx_INCREF(__pyx_t_2);
__Pyx_INCREF(function);
__Pyx_DECREF_SET(__pyx_t_11, function);
__pyx_t_12 = 1;
}
}
#if CYTHON_FAST_PYCALL
if (PyFunction_Check(__pyx_t_11)) {
PyObject *__pyx_temp[4] = {__pyx_t_2, __pyx_v_image_input, __pyx_t_1, __pyx_v_num_classes};
__pyx_t_3 = __Pyx_PyFunction_FastCall(__pyx_t_11, __pyx_temp+1-__pyx_t_12, 3+__pyx_t_12); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 146, __pyx_L1_error)
__Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
} else
#endif
#if CYTHON_FAST_PYCCALL
if (__Pyx_PyFastCFunction_Check(__pyx_t_11)) {
PyObject *__pyx_temp[4] = {__pyx_t_2, __pyx_v_image_input, __pyx_t_1, __pyx_v_num_classes};
__pyx_t_3 = __Pyx_PyCFunction_FastCall(__pyx_t_11, __pyx_temp+1-__pyx_t_12, 3+__pyx_t_12); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 146, __pyx_L1_error)
__Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
} else
#endif
{
__pyx_t_9 = PyTuple_New(3+__pyx_t_12); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 146, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_9);
if (__pyx_t_2) {
__Pyx_GIVEREF(__pyx_t_2); PyTuple_SET_ITEM(__pyx_t_9, 0, __pyx_t_2); __pyx_t_2 = NULL;
}
__Pyx_INCREF(__pyx_v_image_input);
__Pyx_GIVEREF(__pyx_v_image_input);
PyTuple_SET_ITEM(__pyx_t_9, 0+__pyx_t_12, __pyx_v_image_input);
__Pyx_GIVEREF(__pyx_t_1);
PyTuple_SET_ITEM(__pyx_t_9, 1+__pyx_t_12, __pyx_t_1);
__Pyx_INCREF(__pyx_v_num_classes);
__Pyx_GIVEREF(__pyx_v_num_classes);
PyTuple_SET_ITEM(__pyx_t_9, 2+__pyx_t_12, __pyx_v_num_classes);
__pyx_t_1 = 0;
__pyx_t_3 = __Pyx_PyObject_Call(__pyx_t_11, __pyx_t_9, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 146, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0;
}
__Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0;
__pyx_v_model_body = __pyx_t_3;
__pyx_t_3 = 0;
/* "train.py":147
*
* model_body = tiny_yolo_body(image_input, num_anchors//2, num_classes)
* print('Create Tiny YOLOv3 model with {} anchors and {} classes.'.format(num_anchors, num_classes)) # <<<<<<<<<<<<<<
*
* if load_pretrained:
*/
__pyx_t_11 = __Pyx_PyObject_GetAttrStr(__pyx_kp_s_Create_Tiny_YOLOv3_model_with_an, __pyx_n_s_format); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 147, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_11);
__pyx_t_9 = NULL;
__pyx_t_12 = 0;
if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_11))) {
__pyx_t_9 = PyMethod_GET_SELF(__pyx_t_11);
if (likely(__pyx_t_9)) {
PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_11);
__Pyx_INCREF(__pyx_t_9);
__Pyx_INCREF(function);
__Pyx_DECREF_SET(__pyx_t_11, function);
__pyx_t_12 = 1;
}
}
#if CYTHON_FAST_PYCALL
if (PyFunction_Check(__pyx_t_11)) {
PyObject *__pyx_temp[3] = {__pyx_t_9, __pyx_v_num_anchors, __pyx_v_num_classes};
__pyx_t_3 = __Pyx_PyFunction_FastCall(__pyx_t_11, __pyx_temp+1-__pyx_t_12, 2+__pyx_t_12); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 147, __pyx_L1_error)
__Pyx_XDECREF(__pyx_t_9); __pyx_t_9 = 0;
__Pyx_GOTREF(__pyx_t_3);
} else
#endif
#if CYTHON_FAST_PYCCALL
if (__Pyx_PyFastCFunction_Check(__pyx_t_11)) {
PyObject *__pyx_temp[3] = {__pyx_t_9, __pyx_v_num_anchors, __pyx_v_num_classes};
__pyx_t_3 = __Pyx_PyCFunction_FastCall(__pyx_t_11, __pyx_temp+1-__pyx_t_12, 2+__pyx_t_12); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 147, __pyx_L1_error)
__Pyx_XDECREF(__pyx_t_9); __pyx_t_9 = 0;
__Pyx_GOTREF(__pyx_t_3);
} else
#endif
{
__pyx_t_1 = PyTuple_New(2+__pyx_t_12); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 147, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
if (__pyx_t_9) {
__Pyx_GIVEREF(__pyx_t_9); PyTuple_SET_ITEM(__pyx_t_1, 0, __pyx_t_9); __pyx_t_9 = NULL;
}
__Pyx_INCREF(__pyx_v_num_anchors);
__Pyx_GIVEREF(__pyx_v_num_anchors);
PyTuple_SET_ITEM(__pyx_t_1, 0+__pyx_t_12, __pyx_v_num_anchors);
__Pyx_INCREF(__pyx_v_num_classes);
__Pyx_GIVEREF(__pyx_v_num_classes);
PyTuple_SET_ITEM(__pyx_t_1, 1+__pyx_t_12, __pyx_v_num_classes);
__pyx_t_3 = __Pyx_PyObject_Call(__pyx_t_11, __pyx_t_1, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 147, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
}
__Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0;
if (__Pyx_PrintOne(0, __pyx_t_3) < 0) __PYX_ERR(0, 147, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
/* "train.py":149
* print('Create Tiny YOLOv3 model with {} anchors and {} classes.'.format(num_anchors, num_classes))
*
* if load_pretrained: # <<<<<<<<<<<<<<
* model_body.load_weights(weights_path, by_name=True, skip_mismatch=True)
* print('Load weights {}.'.format(weights_path))
*/
__pyx_t_13 = __Pyx_PyObject_IsTrue(__pyx_v_load_pretrained); if (unlikely(__pyx_t_13 < 0)) __PYX_ERR(0, 149, __pyx_L1_error)
if (__pyx_t_13) {
/* "train.py":150
*
* if load_pretrained:
* model_body.load_weights(weights_path, by_name=True, skip_mismatch=True) # <<<<<<<<<<<<<<
* print('Load weights {}.'.format(weights_path))
* if freeze_body in [1, 2]:
*/
__pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_v_model_body, __pyx_n_s_load_weights); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 150, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_11 = PyTuple_New(1); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 150, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_11);
__Pyx_INCREF(__pyx_v_weights_path);
__Pyx_GIVEREF(__pyx_v_weights_path);
PyTuple_SET_ITEM(__pyx_t_11, 0, __pyx_v_weights_path);
__pyx_t_1 = PyDict_New(); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 150, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
if (PyDict_SetItem(__pyx_t_1, __pyx_n_s_by_name, Py_True) < 0) __PYX_ERR(0, 150, __pyx_L1_error)
if (PyDict_SetItem(__pyx_t_1, __pyx_n_s_skip_mismatch, Py_True) < 0) __PYX_ERR(0, 150, __pyx_L1_error)
__pyx_t_9 = __Pyx_PyObject_Call(__pyx_t_3, __pyx_t_11, __pyx_t_1); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 150, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_9);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0;
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0;
/* "train.py":151
* if load_pretrained:
* model_body.load_weights(weights_path, by_name=True, skip_mismatch=True)
* print('Load weights {}.'.format(weights_path)) # <<<<<<<<<<<<<<
* if freeze_body in [1, 2]:
* # Freeze the darknet body or freeze all but 2 output layers.
*/
__pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_kp_s_Load_weights, __pyx_n_s_format); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 151, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_t_11 = NULL;
if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_1))) {
__pyx_t_11 = PyMethod_GET_SELF(__pyx_t_1);
if (likely(__pyx_t_11)) {
PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_1);
__Pyx_INCREF(__pyx_t_11);
__Pyx_INCREF(function);
__Pyx_DECREF_SET(__pyx_t_1, function);
}
}
if (!__pyx_t_11) {
__pyx_t_9 = __Pyx_PyObject_CallOneArg(__pyx_t_1, __pyx_v_weights_path); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 151, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_9);
} else {
#if CYTHON_FAST_PYCALL
if (PyFunction_Check(__pyx_t_1)) {
PyObject *__pyx_temp[2] = {__pyx_t_11, __pyx_v_weights_path};
__pyx_t_9 = __Pyx_PyFunction_FastCall(__pyx_t_1, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 151, __pyx_L1_error)
__Pyx_XDECREF(__pyx_t_11); __pyx_t_11 = 0;
__Pyx_GOTREF(__pyx_t_9);
} else
#endif
#if CYTHON_FAST_PYCCALL
if (__Pyx_PyFastCFunction_Check(__pyx_t_1)) {
PyObject *__pyx_temp[2] = {__pyx_t_11, __pyx_v_weights_path};
__pyx_t_9 = __Pyx_PyCFunction_FastCall(__pyx_t_1, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 151, __pyx_L1_error)
__Pyx_XDECREF(__pyx_t_11); __pyx_t_11 = 0;
__Pyx_GOTREF(__pyx_t_9);
} else
#endif
{
__pyx_t_3 = PyTuple_New(1+1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 151, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_GIVEREF(__pyx_t_11); PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_11); __pyx_t_11 = NULL;
__Pyx_INCREF(__pyx_v_weights_path);
__Pyx_GIVEREF(__pyx_v_weights_path);
PyTuple_SET_ITEM(__pyx_t_3, 0+1, __pyx_v_weights_path);
__pyx_t_9 = __Pyx_PyObject_Call(__pyx_t_1, __pyx_t_3, NULL); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 151, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_9);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
}
}
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
if (__Pyx_PrintOne(0, __pyx_t_9) < 0) __PYX_ERR(0, 151, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0;
/* "train.py":152
* model_body.load_weights(weights_path, by_name=True, skip_mismatch=True)
* print('Load weights {}.'.format(weights_path))
* if freeze_body in [1, 2]: # <<<<<<<<<<<<<<
* # Freeze the darknet body or freeze all but 2 output layers.
* num = (20, len(model_body.layers)-2)[freeze_body-1]
*/
__Pyx_INCREF(__pyx_v_freeze_body);
__pyx_t_9 = __pyx_v_freeze_body;
__pyx_t_1 = __Pyx_PyInt_EqObjC(__pyx_t_9, __pyx_int_1, 1, 0); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 152, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_t_14 = __Pyx_PyObject_IsTrue(__pyx_t_1); if (unlikely(__pyx_t_14 < 0)) __PYX_ERR(0, 152, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
if (!__pyx_t_14) {
} else {
__pyx_t_13 = __pyx_t_14;
goto __pyx_L9_bool_binop_done;
}
__pyx_t_1 = __Pyx_PyInt_EqObjC(__pyx_t_9, __pyx_int_2, 2, 0); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 152, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_t_14 = __Pyx_PyObject_IsTrue(__pyx_t_1); if (unlikely(__pyx_t_14 < 0)) __PYX_ERR(0, 152, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__pyx_t_13 = __pyx_t_14;
__pyx_L9_bool_binop_done:;
__Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0;
__pyx_t_14 = (__pyx_t_13 != 0);
if (__pyx_t_14) {
/* "train.py":154
* if freeze_body in [1, 2]:
* # Freeze the darknet body or freeze all but 2 output layers.
* num = (20, len(model_body.layers)-2)[freeze_body-1] # <<<<<<<<<<<<<<
* for i in range(num): model_body.layers[i].trainable = False
* print('Freeze the first {} layers of total {} layers.'.format(num, len(model_body.layers)))
*/
__pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_v_model_body, __pyx_n_s_layers); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 154, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_9);
__pyx_t_5 = PyObject_Length(__pyx_t_9); if (unlikely(__pyx_t_5 == -1)) __PYX_ERR(0, 154, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0;
__pyx_t_9 = PyInt_FromSsize_t((__pyx_t_5 - 2)); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 154, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_9);
__pyx_t_1 = PyTuple_New(2); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 154, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_INCREF(__pyx_int_20);
__Pyx_GIVEREF(__pyx_int_20);
PyTuple_SET_ITEM(__pyx_t_1, 0, __pyx_int_20);
__Pyx_GIVEREF(__pyx_t_9);
PyTuple_SET_ITEM(__pyx_t_1, 1, __pyx_t_9);
__pyx_t_9 = 0;
__pyx_t_9 = __Pyx_PyInt_SubtractObjC(__pyx_v_freeze_body, __pyx_int_1, 1, 0); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 154, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_9);
__pyx_t_3 = PyObject_GetItem(__pyx_t_1, __pyx_t_9); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 154, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0;
__pyx_v_num = __pyx_t_3;
__pyx_t_3 = 0;
/* "train.py":155
* # Freeze the darknet body or freeze all but 2 output layers.
* num = (20, len(model_body.layers)-2)[freeze_body-1]
* for i in range(num): model_body.layers[i].trainable = False # <<<<<<<<<<<<<<
* print('Freeze the first {} layers of total {} layers.'.format(num, len(model_body.layers)))
*
*/
__pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 155, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_INCREF(__pyx_v_num);
__Pyx_GIVEREF(__pyx_v_num);
PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_v_num);
__pyx_t_9 = __Pyx_PyObject_Call(__pyx_builtin_range, __pyx_t_3, NULL); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 155, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_9);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
if (likely(PyList_CheckExact(__pyx_t_9)) || PyTuple_CheckExact(__pyx_t_9)) {
__pyx_t_3 = __pyx_t_9; __Pyx_INCREF(__pyx_t_3); __pyx_t_5 = 0;
__pyx_t_15 = NULL;
} else {
__pyx_t_5 = -1; __pyx_t_3 = PyObject_GetIter(__pyx_t_9); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 155, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_15 = Py_TYPE(__pyx_t_3)->tp_iternext; if (unlikely(!__pyx_t_15)) __PYX_ERR(0, 155, __pyx_L1_error)
}
__Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0;
for (;;) {
if (likely(!__pyx_t_15)) {
if (likely(PyList_CheckExact(__pyx_t_3))) {
if (__pyx_t_5 >= PyList_GET_SIZE(__pyx_t_3)) break;
#if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS
__pyx_t_9 = PyList_GET_ITEM(__pyx_t_3, __pyx_t_5); __Pyx_INCREF(__pyx_t_9); __pyx_t_5++; if (unlikely(0 < 0)) __PYX_ERR(0, 155, __pyx_L1_error)
#else
__pyx_t_9 = PySequence_ITEM(__pyx_t_3, __pyx_t_5); __pyx_t_5++; if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 155, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_9);
#endif
} else {
if (__pyx_t_5 >= PyTuple_GET_SIZE(__pyx_t_3)) break;
#if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS
__pyx_t_9 = PyTuple_GET_ITEM(__pyx_t_3, __pyx_t_5); __Pyx_INCREF(__pyx_t_9); __pyx_t_5++; if (unlikely(0 < 0)) __PYX_ERR(0, 155, __pyx_L1_error)
#else
__pyx_t_9 = PySequence_ITEM(__pyx_t_3, __pyx_t_5); __pyx_t_5++; if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 155, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_9);
#endif
}
} else {
__pyx_t_9 = __pyx_t_15(__pyx_t_3);
if (unlikely(!__pyx_t_9)) {
PyObject* exc_type = PyErr_Occurred();
if (exc_type) {
if (likely(exc_type == PyExc_StopIteration || PyErr_GivenExceptionMatches(exc_type, PyExc_StopIteration))) PyErr_Clear();
else __PYX_ERR(0, 155, __pyx_L1_error)
}
break;
}
__Pyx_GOTREF(__pyx_t_9);
}
__Pyx_XDECREF_SET(__pyx_v_i, __pyx_t_9);
__pyx_t_9 = 0;
__pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_v_model_body, __pyx_n_s_layers); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 155, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_9);
__pyx_t_1 = PyObject_GetItem(__pyx_t_9, __pyx_v_i); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 155, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0;
if (__Pyx_PyObject_SetAttrStr(__pyx_t_1, __pyx_n_s_trainable, Py_False) < 0) __PYX_ERR(0, 155, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
}
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
/* "train.py":156
* num = (20, len(model_body.layers)-2)[freeze_body-1]
* for i in range(num): model_body.layers[i].trainable = False
* print('Freeze the first {} layers of total {} layers.'.format(num, len(model_body.layers))) # <<<<<<<<<<<<<<
*
* model_loss = Lambda(yolo_loss, output_shape=(1,), name='yolo_loss',
*/
__pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_kp_s_Freeze_the_first_layers_of_total, __pyx_n_s_format); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 156, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_v_model_body, __pyx_n_s_layers); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 156, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_9);
__pyx_t_5 = PyObject_Length(__pyx_t_9); if (unlikely(__pyx_t_5 == -1)) __PYX_ERR(0, 156, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0;
__pyx_t_9 = PyInt_FromSsize_t(__pyx_t_5); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 156, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_9);
__pyx_t_11 = NULL;
__pyx_t_12 = 0;
if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_1))) {
__pyx_t_11 = PyMethod_GET_SELF(__pyx_t_1);
if (likely(__pyx_t_11)) {
PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_1);
__Pyx_INCREF(__pyx_t_11);
__Pyx_INCREF(function);
__Pyx_DECREF_SET(__pyx_t_1, function);
__pyx_t_12 = 1;
}
}
#if CYTHON_FAST_PYCALL
if (PyFunction_Check(__pyx_t_1)) {
PyObject *__pyx_temp[3] = {__pyx_t_11, __pyx_v_num, __pyx_t_9};
__pyx_t_3 = __Pyx_PyFunction_FastCall(__pyx_t_1, __pyx_temp+1-__pyx_t_12, 2+__pyx_t_12); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 156, __pyx_L1_error)
__Pyx_XDECREF(__pyx_t_11); __pyx_t_11 = 0;
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0;
} else
#endif
#if CYTHON_FAST_PYCCALL
if (__Pyx_PyFastCFunction_Check(__pyx_t_1)) {
PyObject *__pyx_temp[3] = {__pyx_t_11, __pyx_v_num, __pyx_t_9};
__pyx_t_3 = __Pyx_PyCFunction_FastCall(__pyx_t_1, __pyx_temp+1-__pyx_t_12, 2+__pyx_t_12); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 156, __pyx_L1_error)
__Pyx_XDECREF(__pyx_t_11); __pyx_t_11 = 0;
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0;
} else
#endif
{
__pyx_t_2 = PyTuple_New(2+__pyx_t_12); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 156, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
if (__pyx_t_11) {
__Pyx_GIVEREF(__pyx_t_11); PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_t_11); __pyx_t_11 = NULL;
}
__Pyx_INCREF(__pyx_v_num);
__Pyx_GIVEREF(__pyx_v_num);
PyTuple_SET_ITEM(__pyx_t_2, 0+__pyx_t_12, __pyx_v_num);
__Pyx_GIVEREF(__pyx_t_9);
PyTuple_SET_ITEM(__pyx_t_2, 1+__pyx_t_12, __pyx_t_9);
__pyx_t_9 = 0;
__pyx_t_3 = __Pyx_PyObject_Call(__pyx_t_1, __pyx_t_2, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 156, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
}
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
if (__Pyx_PrintOne(0, __pyx_t_3) < 0) __PYX_ERR(0, 156, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
/* "train.py":152
* model_body.load_weights(weights_path, by_name=True, skip_mismatch=True)
* print('Load weights {}.'.format(weights_path))
* if freeze_body in [1, 2]: # <<<<<<<<<<<<<<
* # Freeze the darknet body or freeze all but 2 output layers.
* num = (20, len(model_body.layers)-2)[freeze_body-1]
*/
}
/* "train.py":149
* print('Create Tiny YOLOv3 model with {} anchors and {} classes.'.format(num_anchors, num_classes))
*
* if load_pretrained: # <<<<<<<<<<<<<<
* model_body.load_weights(weights_path, by_name=True, skip_mismatch=True)
* print('Load weights {}.'.format(weights_path))
*/
}
/* "train.py":158
* print('Freeze the first {} layers of total {} layers.'.format(num, len(model_body.layers)))
*
* model_loss = Lambda(yolo_loss, output_shape=(1,), name='yolo_loss', # <<<<<<<<<<<<<<
* arguments={'anchors': anchors, 'num_classes': num_classes, 'ignore_thresh': 0.7})(
* [*model_body.output, *y_true])
*/
__pyx_t_1 = __Pyx_GetModuleGlobalName(__pyx_n_s_Lambda); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 158, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_t_2 = __Pyx_GetModuleGlobalName(__pyx_n_s_yolo_loss); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 158, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__pyx_t_9 = PyTuple_New(1); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 158, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_9);
__Pyx_GIVEREF(__pyx_t_2);
PyTuple_SET_ITEM(__pyx_t_9, 0, __pyx_t_2);
__pyx_t_2 = 0;
__pyx_t_2 = PyDict_New(); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 158, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
if (PyDict_SetItem(__pyx_t_2, __pyx_n_s_output_shape, __pyx_tuple__13) < 0) __PYX_ERR(0, 158, __pyx_L1_error)
if (PyDict_SetItem(__pyx_t_2, __pyx_n_s_name, __pyx_n_s_yolo_loss) < 0) __PYX_ERR(0, 158, __pyx_L1_error)
/* "train.py":159
*
* model_loss = Lambda(yolo_loss, output_shape=(1,), name='yolo_loss',
* arguments={'anchors': anchors, 'num_classes': num_classes, 'ignore_thresh': 0.7})( # <<<<<<<<<<<<<<
* [*model_body.output, *y_true])
* model = Model([model_body.input, *y_true], model_loss)
*/
__pyx_t_11 = PyDict_New(); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 159, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_11);
if (PyDict_SetItem(__pyx_t_11, __pyx_n_s_anchors, __pyx_v_anchors) < 0) __PYX_ERR(0, 159, __pyx_L1_error)
if (PyDict_SetItem(__pyx_t_11, __pyx_n_s_num_classes, __pyx_v_num_classes) < 0) __PYX_ERR(0, 159, __pyx_L1_error)
if (PyDict_SetItem(__pyx_t_11, __pyx_n_s_ignore_thresh, __pyx_float_0_7) < 0) __PYX_ERR(0, 159, __pyx_L1_error)
if (PyDict_SetItem(__pyx_t_2, __pyx_n_s_arguments, __pyx_t_11) < 0) __PYX_ERR(0, 158, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0;
/* "train.py":158
* print('Freeze the first {} layers of total {} layers.'.format(num, len(model_body.layers)))
*
* model_loss = Lambda(yolo_loss, output_shape=(1,), name='yolo_loss', # <<<<<<<<<<<<<<
* arguments={'anchors': anchors, 'num_classes': num_classes, 'ignore_thresh': 0.7})(
* [*model_body.output, *y_true])
*/
__pyx_t_11 = __Pyx_PyObject_Call(__pyx_t_1, __pyx_t_9, __pyx_t_2); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 158, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_11);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0;
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
/* "train.py":160
* model_loss = Lambda(yolo_loss, output_shape=(1,), name='yolo_loss',
* arguments={'anchors': anchors, 'num_classes': num_classes, 'ignore_thresh': 0.7})(
* [*model_body.output, *y_true]) # <<<<<<<<<<<<<<
* model = Model([model_body.input, *y_true], model_loss)
*
*/
__pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_v_model_body, __pyx_n_s_output); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 160, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_9);
__pyx_t_2 = PySequence_List(__pyx_t_9); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 160, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0;
if (__Pyx_PyList_Extend(__pyx_t_2, __pyx_v_y_true) < 0) __PYX_ERR(0, 160, __pyx_L1_error)
__pyx_t_9 = NULL;
if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_11))) {
__pyx_t_9 = PyMethod_GET_SELF(__pyx_t_11);
if (likely(__pyx_t_9)) {
PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_11);
__Pyx_INCREF(__pyx_t_9);
__Pyx_INCREF(function);
__Pyx_DECREF_SET(__pyx_t_11, function);
}
}
if (!__pyx_t_9) {
__pyx_t_3 = __Pyx_PyObject_CallOneArg(__pyx_t_11, __pyx_t_2); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 159, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__Pyx_GOTREF(__pyx_t_3);
} else {
#if CYTHON_FAST_PYCALL
if (PyFunction_Check(__pyx_t_11)) {
PyObject *__pyx_temp[2] = {__pyx_t_9, __pyx_t_2};
__pyx_t_3 = __Pyx_PyFunction_FastCall(__pyx_t_11, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 159, __pyx_L1_error)
__Pyx_XDECREF(__pyx_t_9); __pyx_t_9 = 0;
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
} else
#endif
#if CYTHON_FAST_PYCCALL
if (__Pyx_PyFastCFunction_Check(__pyx_t_11)) {
PyObject *__pyx_temp[2] = {__pyx_t_9, __pyx_t_2};
__pyx_t_3 = __Pyx_PyCFunction_FastCall(__pyx_t_11, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 159, __pyx_L1_error)
__Pyx_XDECREF(__pyx_t_9); __pyx_t_9 = 0;
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
} else
#endif
{
__pyx_t_1 = PyTuple_New(1+1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 159, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_GIVEREF(__pyx_t_9); PyTuple_SET_ITEM(__pyx_t_1, 0, __pyx_t_9); __pyx_t_9 = NULL;
__Pyx_GIVEREF(__pyx_t_2);
PyTuple_SET_ITEM(__pyx_t_1, 0+1, __pyx_t_2);
__pyx_t_2 = 0;
__pyx_t_3 = __Pyx_PyObject_Call(__pyx_t_11, __pyx_t_1, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 159, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
}
}
__Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0;
__pyx_v_model_loss = __pyx_t_3;
__pyx_t_3 = 0;
/* "train.py":161
* arguments={'anchors': anchors, 'num_classes': num_classes, 'ignore_thresh': 0.7})(
* [*model_body.output, *y_true])
* model = Model([model_body.input, *y_true], model_loss) # <<<<<<<<<<<<<<
*
* return model
*/
__pyx_t_11 = __Pyx_GetModuleGlobalName(__pyx_n_s_Model); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 161, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_11);
__pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_v_model_body, __pyx_n_s_input); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 161, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__pyx_t_9 = PyList_New(1); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 161, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_9);
__Pyx_GIVEREF(__pyx_t_2);
PyList_SET_ITEM(__pyx_t_9, 0, __pyx_t_2);
__pyx_t_2 = 0;
__pyx_t_1 = __pyx_t_9;
__pyx_t_9 = 0;
if (__Pyx_PyList_Extend(__pyx_t_1, __pyx_v_y_true) < 0) __PYX_ERR(0, 161, __pyx_L1_error)
__pyx_t_9 = NULL;
__pyx_t_12 = 0;
if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_11))) {
__pyx_t_9 = PyMethod_GET_SELF(__pyx_t_11);
if (likely(__pyx_t_9)) {
PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_11);
__Pyx_INCREF(__pyx_t_9);
__Pyx_INCREF(function);
__Pyx_DECREF_SET(__pyx_t_11, function);
__pyx_t_12 = 1;
}
}
#if CYTHON_FAST_PYCALL
if (PyFunction_Check(__pyx_t_11)) {
PyObject *__pyx_temp[3] = {__pyx_t_9, __pyx_t_1, __pyx_v_model_loss};
__pyx_t_3 = __Pyx_PyFunction_FastCall(__pyx_t_11, __pyx_temp+1-__pyx_t_12, 2+__pyx_t_12); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 161, __pyx_L1_error)
__Pyx_XDECREF(__pyx_t_9); __pyx_t_9 = 0;
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
} else
#endif
#if CYTHON_FAST_PYCCALL
if (__Pyx_PyFastCFunction_Check(__pyx_t_11)) {
PyObject *__pyx_temp[3] = {__pyx_t_9, __pyx_t_1, __pyx_v_model_loss};
__pyx_t_3 = __Pyx_PyCFunction_FastCall(__pyx_t_11, __pyx_temp+1-__pyx_t_12, 2+__pyx_t_12); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 161, __pyx_L1_error)
__Pyx_XDECREF(__pyx_t_9); __pyx_t_9 = 0;
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
} else
#endif
{
__pyx_t_2 = PyTuple_New(2+__pyx_t_12); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 161, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
if (__pyx_t_9) {
__Pyx_GIVEREF(__pyx_t_9); PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_t_9); __pyx_t_9 = NULL;
}
__Pyx_GIVEREF(__pyx_t_1);
PyTuple_SET_ITEM(__pyx_t_2, 0+__pyx_t_12, __pyx_t_1);
__Pyx_INCREF(__pyx_v_model_loss);
__Pyx_GIVEREF(__pyx_v_model_loss);
PyTuple_SET_ITEM(__pyx_t_2, 1+__pyx_t_12, __pyx_v_model_loss);
__pyx_t_1 = 0;
__pyx_t_3 = __Pyx_PyObject_Call(__pyx_t_11, __pyx_t_2, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 161, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
}
__Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0;
__pyx_v_model = __pyx_t_3;
__pyx_t_3 = 0;
/* "train.py":163
* model = Model([model_body.input, *y_true], model_loss)
*
* return model # <<<<<<<<<<<<<<
*
* def data_generator(annotation_lines, batch_size, input_shape, anchors, num_classes):
*/
__Pyx_XDECREF(__pyx_r);
__Pyx_INCREF(__pyx_v_model);
__pyx_r = __pyx_v_model;
goto __pyx_L0;
/* "train.py":135
* return model
*
* def create_tiny_model(input_shape, anchors, num_classes, load_pretrained=True, freeze_body=2, # <<<<<<<<<<<<<<
* weights_path='model_data/tiny_yolo_weights.h5'):
* '''create the training model, for Tiny YOLOv3'''
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_XDECREF(__pyx_t_2);
__Pyx_XDECREF(__pyx_t_3);
__Pyx_XDECREF(__pyx_t_7);
__Pyx_XDECREF(__pyx_t_8);
__Pyx_XDECREF(__pyx_t_9);
__Pyx_XDECREF(__pyx_t_10);
__Pyx_XDECREF(__pyx_t_11);
__Pyx_AddTraceback("train.create_tiny_model", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XDECREF(__pyx_v_image_input);
__Pyx_XDECREF(__pyx_v_h);
__Pyx_XDECREF(__pyx_v_w);
__Pyx_XDECREF(__pyx_v_num_anchors);
__Pyx_XDECREF(__pyx_v_y_true);
__Pyx_XDECREF(__pyx_v_model_body);
__Pyx_XDECREF(__pyx_v_num);
__Pyx_XDECREF(__pyx_v_i);
__Pyx_XDECREF(__pyx_v_model_loss);
__Pyx_XDECREF(__pyx_v_model);
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_gb_5train_12generator(__pyx_CoroutineObject *__pyx_generator, PyObject *__pyx_sent_value); /* proto */
/* "train.py":165
* return model
*
* def data_generator(annotation_lines, batch_size, input_shape, anchors, num_classes): # <<<<<<<<<<<<<<
* '''data generator for fit_generator'''
* n = len(annotation_lines)
*/
/* Python wrapper */
static PyObject *__pyx_pw_5train_11data_generator(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
static char __pyx_doc_5train_10data_generator[] = "data generator for fit_generator";
static PyMethodDef __pyx_mdef_5train_11data_generator = {"data_generator", (PyCFunction)__pyx_pw_5train_11data_generator, METH_VARARGS|METH_KEYWORDS, __pyx_doc_5train_10data_generator};
static PyObject *__pyx_pw_5train_11data_generator(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
PyObject *__pyx_v_annotation_lines = 0;
PyObject *__pyx_v_batch_size = 0;
PyObject *__pyx_v_input_shape = 0;
PyObject *__pyx_v_anchors = 0;
PyObject *__pyx_v_num_classes = 0;
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("data_generator (wrapper)", 0);
{
static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_annotation_lines,&__pyx_n_s_batch_size,&__pyx_n_s_input_shape,&__pyx_n_s_anchors,&__pyx_n_s_num_classes,0};
PyObject* values[5] = {0,0,0,0,0};
if (unlikely(__pyx_kwds)) {
Py_ssize_t kw_args;
const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args);
switch (pos_args) {
case 5: values[4] = PyTuple_GET_ITEM(__pyx_args, 4);
CYTHON_FALLTHROUGH;
case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3);
CYTHON_FALLTHROUGH;
case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2);
CYTHON_FALLTHROUGH;
case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1);
CYTHON_FALLTHROUGH;
case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
CYTHON_FALLTHROUGH;
case 0: break;
default: goto __pyx_L5_argtuple_error;
}
kw_args = PyDict_Size(__pyx_kwds);
switch (pos_args) {
case 0:
if (likely((values[0] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_annotation_lines)) != 0)) kw_args--;
else goto __pyx_L5_argtuple_error;
CYTHON_FALLTHROUGH;
case 1:
if (likely((values[1] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_batch_size)) != 0)) kw_args--;
else {
__Pyx_RaiseArgtupleInvalid("data_generator", 1, 5, 5, 1); __PYX_ERR(0, 165, __pyx_L3_error)
}
CYTHON_FALLTHROUGH;
case 2:
if (likely((values[2] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_input_shape)) != 0)) kw_args--;
else {
__Pyx_RaiseArgtupleInvalid("data_generator", 1, 5, 5, 2); __PYX_ERR(0, 165, __pyx_L3_error)
}
CYTHON_FALLTHROUGH;
case 3:
if (likely((values[3] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_anchors)) != 0)) kw_args--;
else {
__Pyx_RaiseArgtupleInvalid("data_generator", 1, 5, 5, 3); __PYX_ERR(0, 165, __pyx_L3_error)
}
CYTHON_FALLTHROUGH;
case 4:
if (likely((values[4] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_num_classes)) != 0)) kw_args--;
else {
__Pyx_RaiseArgtupleInvalid("data_generator", 1, 5, 5, 4); __PYX_ERR(0, 165, __pyx_L3_error)
}
}
if (unlikely(kw_args > 0)) {
if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "data_generator") < 0)) __PYX_ERR(0, 165, __pyx_L3_error)
}
} else if (PyTuple_GET_SIZE(__pyx_args) != 5) {
goto __pyx_L5_argtuple_error;
} else {
values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
values[1] = PyTuple_GET_ITEM(__pyx_args, 1);
values[2] = PyTuple_GET_ITEM(__pyx_args, 2);
values[3] = PyTuple_GET_ITEM(__pyx_args, 3);
values[4] = PyTuple_GET_ITEM(__pyx_args, 4);
}
__pyx_v_annotation_lines = values[0];
__pyx_v_batch_size = values[1];
__pyx_v_input_shape = values[2];
__pyx_v_anchors = values[3];
__pyx_v_num_classes = values[4];
}
goto __pyx_L4_argument_unpacking_done;
__pyx_L5_argtuple_error:;
__Pyx_RaiseArgtupleInvalid("data_generator", 1, 5, 5, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 165, __pyx_L3_error)
__pyx_L3_error:;
__Pyx_AddTraceback("train.data_generator", __pyx_clineno, __pyx_lineno, __pyx_filename);
__Pyx_RefNannyFinishContext();
return NULL;
__pyx_L4_argument_unpacking_done:;
__pyx_r = __pyx_pf_5train_10data_generator(__pyx_self, __pyx_v_annotation_lines, __pyx_v_batch_size, __pyx_v_input_shape, __pyx_v_anchors, __pyx_v_num_classes);
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_pf_5train_10data_generator(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_annotation_lines, PyObject *__pyx_v_batch_size, PyObject *__pyx_v_input_shape, PyObject *__pyx_v_anchors, PyObject *__pyx_v_num_classes) {
struct __pyx_obj_5train___pyx_scope_struct__data_generator *__pyx_cur_scope;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("data_generator", 0);
__pyx_cur_scope = (struct __pyx_obj_5train___pyx_scope_struct__data_generator *)__pyx_tp_new_5train___pyx_scope_struct__data_generator(__pyx_ptype_5train___pyx_scope_struct__data_generator, __pyx_empty_tuple, NULL);
if (unlikely(!__pyx_cur_scope)) {
__pyx_cur_scope = ((struct __pyx_obj_5train___pyx_scope_struct__data_generator *)Py_None);
__Pyx_INCREF(Py_None);
__PYX_ERR(0, 165, __pyx_L1_error)
} else {
__Pyx_GOTREF(__pyx_cur_scope);
}
__pyx_cur_scope->__pyx_v_annotation_lines = __pyx_v_annotation_lines;
__Pyx_INCREF(__pyx_cur_scope->__pyx_v_annotation_lines);
__Pyx_GIVEREF(__pyx_cur_scope->__pyx_v_annotation_lines);
__pyx_cur_scope->__pyx_v_batch_size = __pyx_v_batch_size;
__Pyx_INCREF(__pyx_cur_scope->__pyx_v_batch_size);
__Pyx_GIVEREF(__pyx_cur_scope->__pyx_v_batch_size);
__pyx_cur_scope->__pyx_v_input_shape = __pyx_v_input_shape;
__Pyx_INCREF(__pyx_cur_scope->__pyx_v_input_shape);
__Pyx_GIVEREF(__pyx_cur_scope->__pyx_v_input_shape);
__pyx_cur_scope->__pyx_v_anchors = __pyx_v_anchors;
__Pyx_INCREF(__pyx_cur_scope->__pyx_v_anchors);
__Pyx_GIVEREF(__pyx_cur_scope->__pyx_v_anchors);
__pyx_cur_scope->__pyx_v_num_classes = __pyx_v_num_classes;
__Pyx_INCREF(__pyx_cur_scope->__pyx_v_num_classes);
__Pyx_GIVEREF(__pyx_cur_scope->__pyx_v_num_classes);
{
__pyx_CoroutineObject *gen = __Pyx_Generator_New((__pyx_coroutine_body_t) __pyx_gb_5train_12generator, (PyObject *) __pyx_cur_scope, __pyx_n_s_data_generator, __pyx_n_s_data_generator, __pyx_n_s_train); if (unlikely(!gen)) __PYX_ERR(0, 165, __pyx_L1_error)
__Pyx_DECREF(__pyx_cur_scope);
__Pyx_RefNannyFinishContext();
return (PyObject *) gen;
}
/* function exit code */
__pyx_L1_error:;
__Pyx_AddTraceback("train.data_generator", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__Pyx_DECREF(((PyObject *)__pyx_cur_scope));
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_gb_5train_12generator(__pyx_CoroutineObject *__pyx_generator, PyObject *__pyx_sent_value) /* generator body */
{
struct __pyx_obj_5train___pyx_scope_struct__data_generator *__pyx_cur_scope = ((struct __pyx_obj_5train___pyx_scope_struct__data_generator *)__pyx_generator->closure);
PyObject *__pyx_r = NULL;
Py_ssize_t __pyx_t_1;
PyObject *__pyx_t_2 = NULL;
PyObject *__pyx_t_3 = NULL;
PyObject *(*__pyx_t_4)(PyObject *);
int __pyx_t_5;
PyObject *__pyx_t_6 = NULL;
PyObject *__pyx_t_7 = NULL;
PyObject *__pyx_t_8 = NULL;
PyObject *(*__pyx_t_9)(PyObject *);
int __pyx_t_10;
int __pyx_t_11;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("None", 0);
switch (__pyx_generator->resume_label) {
case 0: goto __pyx_L3_first_run;
case 1: goto __pyx_L11_resume_from_yield;
default: /* CPython raises the right error here */
__Pyx_RefNannyFinishContext();
return NULL;
}
__pyx_L3_first_run:;
if (unlikely(!__pyx_sent_value)) __PYX_ERR(0, 165, __pyx_L1_error)
/* "train.py":167
* def data_generator(annotation_lines, batch_size, input_shape, anchors, num_classes):
* '''data generator for fit_generator'''
* n = len(annotation_lines) # <<<<<<<<<<<<<<
* i = 0
* while True:
*/
__pyx_t_1 = PyObject_Length(__pyx_cur_scope->__pyx_v_annotation_lines); if (unlikely(__pyx_t_1 == -1)) __PYX_ERR(0, 167, __pyx_L1_error)
__pyx_t_2 = PyInt_FromSsize_t(__pyx_t_1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 167, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_GIVEREF(__pyx_t_2);
__pyx_cur_scope->__pyx_v_n = __pyx_t_2;
__pyx_t_2 = 0;
/* "train.py":168
* '''data generator for fit_generator'''
* n = len(annotation_lines)
* i = 0 # <<<<<<<<<<<<<<
* while True:
* image_data = []
*/
__Pyx_INCREF(__pyx_int_0);
__Pyx_GIVEREF(__pyx_int_0);
__pyx_cur_scope->__pyx_v_i = __pyx_int_0;
/* "train.py":169
* n = len(annotation_lines)
* i = 0
* while True: # <<<<<<<<<<<<<<
* image_data = []
* box_data = []
*/
while (1) {
/* "train.py":170
* i = 0
* while True:
* image_data = [] # <<<<<<<<<<<<<<
* box_data = []
* for b in range(batch_size):
*/
__pyx_t_2 = PyList_New(0); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 170, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_XGOTREF(__pyx_cur_scope->__pyx_v_image_data);
__Pyx_XDECREF_SET(__pyx_cur_scope->__pyx_v_image_data, __pyx_t_2);
__Pyx_GIVEREF(__pyx_t_2);
__pyx_t_2 = 0;
/* "train.py":171
* while True:
* image_data = []
* box_data = [] # <<<<<<<<<<<<<<
* for b in range(batch_size):
* if i==0:
*/
__pyx_t_2 = PyList_New(0); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 171, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_XGOTREF(__pyx_cur_scope->__pyx_v_box_data);
__Pyx_XDECREF_SET(__pyx_cur_scope->__pyx_v_box_data, __pyx_t_2);
__Pyx_GIVEREF(__pyx_t_2);
__pyx_t_2 = 0;
/* "train.py":172
* image_data = []
* box_data = []
* for b in range(batch_size): # <<<<<<<<<<<<<<
* if i==0:
* np.random.shuffle(annotation_lines)
*/
__pyx_t_2 = PyTuple_New(1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 172, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_INCREF(__pyx_cur_scope->__pyx_v_batch_size);
__Pyx_GIVEREF(__pyx_cur_scope->__pyx_v_batch_size);
PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_cur_scope->__pyx_v_batch_size);
__pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_range, __pyx_t_2, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 172, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
if (likely(PyList_CheckExact(__pyx_t_3)) || PyTuple_CheckExact(__pyx_t_3)) {
__pyx_t_2 = __pyx_t_3; __Pyx_INCREF(__pyx_t_2); __pyx_t_1 = 0;
__pyx_t_4 = NULL;
} else {
__pyx_t_1 = -1; __pyx_t_2 = PyObject_GetIter(__pyx_t_3); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 172, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__pyx_t_4 = Py_TYPE(__pyx_t_2)->tp_iternext; if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 172, __pyx_L1_error)
}
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
for (;;) {
if (likely(!__pyx_t_4)) {
if (likely(PyList_CheckExact(__pyx_t_2))) {
if (__pyx_t_1 >= PyList_GET_SIZE(__pyx_t_2)) break;
#if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS
__pyx_t_3 = PyList_GET_ITEM(__pyx_t_2, __pyx_t_1); __Pyx_INCREF(__pyx_t_3); __pyx_t_1++; if (unlikely(0 < 0)) __PYX_ERR(0, 172, __pyx_L1_error)
#else
__pyx_t_3 = PySequence_ITEM(__pyx_t_2, __pyx_t_1); __pyx_t_1++; if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 172, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
#endif
} else {
if (__pyx_t_1 >= PyTuple_GET_SIZE(__pyx_t_2)) break;
#if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS
__pyx_t_3 = PyTuple_GET_ITEM(__pyx_t_2, __pyx_t_1); __Pyx_INCREF(__pyx_t_3); __pyx_t_1++; if (unlikely(0 < 0)) __PYX_ERR(0, 172, __pyx_L1_error)
#else
__pyx_t_3 = PySequence_ITEM(__pyx_t_2, __pyx_t_1); __pyx_t_1++; if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 172, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
#endif
}
} else {
__pyx_t_3 = __pyx_t_4(__pyx_t_2);
if (unlikely(!__pyx_t_3)) {
PyObject* exc_type = PyErr_Occurred();
if (exc_type) {
if (likely(exc_type == PyExc_StopIteration || PyErr_GivenExceptionMatches(exc_type, PyExc_StopIteration))) PyErr_Clear();
else __PYX_ERR(0, 172, __pyx_L1_error)
}
break;
}
__Pyx_GOTREF(__pyx_t_3);
}
__Pyx_XGOTREF(__pyx_cur_scope->__pyx_v_b);
__Pyx_XDECREF_SET(__pyx_cur_scope->__pyx_v_b, __pyx_t_3);
__Pyx_GIVEREF(__pyx_t_3);
__pyx_t_3 = 0;
/* "train.py":173
* box_data = []
* for b in range(batch_size):
* if i==0: # <<<<<<<<<<<<<<
* np.random.shuffle(annotation_lines)
* image, box = get_random_data(annotation_lines[i], input_shape, random=True)
*/
__pyx_t_3 = __Pyx_PyInt_EqObjC(__pyx_cur_scope->__pyx_v_i, __pyx_int_0, 0, 0); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 173, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_5 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_5 < 0)) __PYX_ERR(0, 173, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
if (__pyx_t_5) {
/* "train.py":174
* for b in range(batch_size):
* if i==0:
* np.random.shuffle(annotation_lines) # <<<<<<<<<<<<<<
* image, box = get_random_data(annotation_lines[i], input_shape, random=True)
* image_data.append(image)
*/
__pyx_t_6 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 174, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_6);
__pyx_t_7 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_n_s_random); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 174, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_7);
__Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
__pyx_t_6 = __Pyx_PyObject_GetAttrStr(__pyx_t_7, __pyx_n_s_shuffle); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 174, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_6);
__Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
__pyx_t_7 = NULL;
if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_6))) {
__pyx_t_7 = PyMethod_GET_SELF(__pyx_t_6);
if (likely(__pyx_t_7)) {
PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_6);
__Pyx_INCREF(__pyx_t_7);
__Pyx_INCREF(function);
__Pyx_DECREF_SET(__pyx_t_6, function);
}
}
if (!__pyx_t_7) {
__pyx_t_3 = __Pyx_PyObject_CallOneArg(__pyx_t_6, __pyx_cur_scope->__pyx_v_annotation_lines); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 174, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
} else {
#if CYTHON_FAST_PYCALL
if (PyFunction_Check(__pyx_t_6)) {
PyObject *__pyx_temp[2] = {__pyx_t_7, __pyx_cur_scope->__pyx_v_annotation_lines};
__pyx_t_3 = __Pyx_PyFunction_FastCall(__pyx_t_6, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 174, __pyx_L1_error)
__Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0;
__Pyx_GOTREF(__pyx_t_3);
} else
#endif
#if CYTHON_FAST_PYCCALL
if (__Pyx_PyFastCFunction_Check(__pyx_t_6)) {
PyObject *__pyx_temp[2] = {__pyx_t_7, __pyx_cur_scope->__pyx_v_annotation_lines};
__pyx_t_3 = __Pyx_PyCFunction_FastCall(__pyx_t_6, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 174, __pyx_L1_error)
__Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0;
__Pyx_GOTREF(__pyx_t_3);
} else
#endif
{
__pyx_t_8 = PyTuple_New(1+1); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 174, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_8);
__Pyx_GIVEREF(__pyx_t_7); PyTuple_SET_ITEM(__pyx_t_8, 0, __pyx_t_7); __pyx_t_7 = NULL;
__Pyx_INCREF(__pyx_cur_scope->__pyx_v_annotation_lines);
__Pyx_GIVEREF(__pyx_cur_scope->__pyx_v_annotation_lines);
PyTuple_SET_ITEM(__pyx_t_8, 0+1, __pyx_cur_scope->__pyx_v_annotation_lines);
__pyx_t_3 = __Pyx_PyObject_Call(__pyx_t_6, __pyx_t_8, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 174, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0;
}
}
__Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
/* "train.py":173
* box_data = []
* for b in range(batch_size):
* if i==0: # <<<<<<<<<<<<<<
* np.random.shuffle(annotation_lines)
* image, box = get_random_data(annotation_lines[i], input_shape, random=True)
*/
}
/* "train.py":175
* if i==0:
* np.random.shuffle(annotation_lines)
* image, box = get_random_data(annotation_lines[i], input_shape, random=True) # <<<<<<<<<<<<<<
* image_data.append(image)
* box_data.append(box)
*/
__pyx_t_3 = __Pyx_GetModuleGlobalName(__pyx_n_s_get_random_data); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 175, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_6 = PyObject_GetItem(__pyx_cur_scope->__pyx_v_annotation_lines, __pyx_cur_scope->__pyx_v_i); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 175, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_6);
__pyx_t_8 = PyTuple_New(2); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 175, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_8);
__Pyx_GIVEREF(__pyx_t_6);
PyTuple_SET_ITEM(__pyx_t_8, 0, __pyx_t_6);
__Pyx_INCREF(__pyx_cur_scope->__pyx_v_input_shape);
__Pyx_GIVEREF(__pyx_cur_scope->__pyx_v_input_shape);
PyTuple_SET_ITEM(__pyx_t_8, 1, __pyx_cur_scope->__pyx_v_input_shape);
__pyx_t_6 = 0;
__pyx_t_6 = PyDict_New(); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 175, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_6);
if (PyDict_SetItem(__pyx_t_6, __pyx_n_s_random, Py_True) < 0) __PYX_ERR(0, 175, __pyx_L1_error)
__pyx_t_7 = __Pyx_PyObject_Call(__pyx_t_3, __pyx_t_8, __pyx_t_6); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 175, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_7);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0;
__Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
if ((likely(PyTuple_CheckExact(__pyx_t_7))) || (PyList_CheckExact(__pyx_t_7))) {
PyObject* sequence = __pyx_t_7;
#if !CYTHON_COMPILING_IN_PYPY
Py_ssize_t size = Py_SIZE(sequence);
#else
Py_ssize_t size = PySequence_Size(sequence);
#endif
if (unlikely(size != 2)) {
if (size > 2) __Pyx_RaiseTooManyValuesError(2);
else if (size >= 0) __Pyx_RaiseNeedMoreValuesError(size);
__PYX_ERR(0, 175, __pyx_L1_error)
}
#if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS
if (likely(PyTuple_CheckExact(sequence))) {
__pyx_t_6 = PyTuple_GET_ITEM(sequence, 0);
__pyx_t_8 = PyTuple_GET_ITEM(sequence, 1);
} else {
__pyx_t_6 = PyList_GET_ITEM(sequence, 0);
__pyx_t_8 = PyList_GET_ITEM(sequence, 1);
}
__Pyx_INCREF(__pyx_t_6);
__Pyx_INCREF(__pyx_t_8);
#else
__pyx_t_6 = PySequence_ITEM(sequence, 0); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 175, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_6);
__pyx_t_8 = PySequence_ITEM(sequence, 1); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 175, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_8);
#endif
__Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
} else {
Py_ssize_t index = -1;
__pyx_t_3 = PyObject_GetIter(__pyx_t_7); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 175, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
__pyx_t_9 = Py_TYPE(__pyx_t_3)->tp_iternext;
index = 0; __pyx_t_6 = __pyx_t_9(__pyx_t_3); if (unlikely(!__pyx_t_6)) goto __pyx_L9_unpacking_failed;
__Pyx_GOTREF(__pyx_t_6);
index = 1; __pyx_t_8 = __pyx_t_9(__pyx_t_3); if (unlikely(!__pyx_t_8)) goto __pyx_L9_unpacking_failed;
__Pyx_GOTREF(__pyx_t_8);
if (__Pyx_IternextUnpackEndCheck(__pyx_t_9(__pyx_t_3), 2) < 0) __PYX_ERR(0, 175, __pyx_L1_error)
__pyx_t_9 = NULL;
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
goto __pyx_L10_unpacking_done;
__pyx_L9_unpacking_failed:;
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__pyx_t_9 = NULL;
if (__Pyx_IterFinish() == 0) __Pyx_RaiseNeedMoreValuesError(index);
__PYX_ERR(0, 175, __pyx_L1_error)
__pyx_L10_unpacking_done:;
}
__Pyx_XGOTREF(__pyx_cur_scope->__pyx_v_image);
__Pyx_XDECREF_SET(__pyx_cur_scope->__pyx_v_image, __pyx_t_6);
__Pyx_GIVEREF(__pyx_t_6);
__pyx_t_6 = 0;
__Pyx_XGOTREF(__pyx_cur_scope->__pyx_v_box);
__Pyx_XDECREF_SET(__pyx_cur_scope->__pyx_v_box, __pyx_t_8);
__Pyx_GIVEREF(__pyx_t_8);
__pyx_t_8 = 0;
/* "train.py":176
* np.random.shuffle(annotation_lines)
* image, box = get_random_data(annotation_lines[i], input_shape, random=True)
* image_data.append(image) # <<<<<<<<<<<<<<
* box_data.append(box)
* i = (i+1) % n
*/
__pyx_t_10 = __Pyx_PyObject_Append(__pyx_cur_scope->__pyx_v_image_data, __pyx_cur_scope->__pyx_v_image); if (unlikely(__pyx_t_10 == -1)) __PYX_ERR(0, 176, __pyx_L1_error)
/* "train.py":177
* image, box = get_random_data(annotation_lines[i], input_shape, random=True)
* image_data.append(image)
* box_data.append(box) # <<<<<<<<<<<<<<
* i = (i+1) % n
* image_data = np.array(image_data)
*/
__pyx_t_10 = __Pyx_PyObject_Append(__pyx_cur_scope->__pyx_v_box_data, __pyx_cur_scope->__pyx_v_box); if (unlikely(__pyx_t_10 == -1)) __PYX_ERR(0, 177, __pyx_L1_error)
/* "train.py":178
* image_data.append(image)
* box_data.append(box)
* i = (i+1) % n # <<<<<<<<<<<<<<
* image_data = np.array(image_data)
* box_data = np.array(box_data)
*/
__pyx_t_7 = __Pyx_PyInt_AddObjC(__pyx_cur_scope->__pyx_v_i, __pyx_int_1, 1, 0); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 178, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_7);
__pyx_t_8 = PyNumber_Remainder(__pyx_t_7, __pyx_cur_scope->__pyx_v_n); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 178, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_8);
__Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
__Pyx_GOTREF(__pyx_cur_scope->__pyx_v_i);
__Pyx_DECREF_SET(__pyx_cur_scope->__pyx_v_i, __pyx_t_8);
__Pyx_GIVEREF(__pyx_t_8);
__pyx_t_8 = 0;
/* "train.py":172
* image_data = []
* box_data = []
* for b in range(batch_size): # <<<<<<<<<<<<<<
* if i==0:
* np.random.shuffle(annotation_lines)
*/
}
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
/* "train.py":179
* box_data.append(box)
* i = (i+1) % n
* image_data = np.array(image_data) # <<<<<<<<<<<<<<
* box_data = np.array(box_data)
* y_true = preprocess_true_boxes(box_data, input_shape, anchors, num_classes)
*/
__pyx_t_8 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 179, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_8);
__pyx_t_7 = __Pyx_PyObject_GetAttrStr(__pyx_t_8, __pyx_n_s_array); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 179, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_7);
__Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0;
__pyx_t_8 = NULL;
if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_7))) {
__pyx_t_8 = PyMethod_GET_SELF(__pyx_t_7);
if (likely(__pyx_t_8)) {
PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_7);
__Pyx_INCREF(__pyx_t_8);
__Pyx_INCREF(function);
__Pyx_DECREF_SET(__pyx_t_7, function);
}
}
if (!__pyx_t_8) {
__pyx_t_2 = __Pyx_PyObject_CallOneArg(__pyx_t_7, __pyx_cur_scope->__pyx_v_image_data); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 179, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
} else {
#if CYTHON_FAST_PYCALL
if (PyFunction_Check(__pyx_t_7)) {
PyObject *__pyx_temp[2] = {__pyx_t_8, __pyx_cur_scope->__pyx_v_image_data};
__pyx_t_2 = __Pyx_PyFunction_FastCall(__pyx_t_7, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 179, __pyx_L1_error)
__Pyx_XDECREF(__pyx_t_8); __pyx_t_8 = 0;
__Pyx_GOTREF(__pyx_t_2);
} else
#endif
#if CYTHON_FAST_PYCCALL
if (__Pyx_PyFastCFunction_Check(__pyx_t_7)) {
PyObject *__pyx_temp[2] = {__pyx_t_8, __pyx_cur_scope->__pyx_v_image_data};
__pyx_t_2 = __Pyx_PyCFunction_FastCall(__pyx_t_7, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 179, __pyx_L1_error)
__Pyx_XDECREF(__pyx_t_8); __pyx_t_8 = 0;
__Pyx_GOTREF(__pyx_t_2);
} else
#endif
{
__pyx_t_6 = PyTuple_New(1+1); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 179, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_6);
__Pyx_GIVEREF(__pyx_t_8); PyTuple_SET_ITEM(__pyx_t_6, 0, __pyx_t_8); __pyx_t_8 = NULL;
__Pyx_INCREF(__pyx_cur_scope->__pyx_v_image_data);
__Pyx_GIVEREF(__pyx_cur_scope->__pyx_v_image_data);
PyTuple_SET_ITEM(__pyx_t_6, 0+1, __pyx_cur_scope->__pyx_v_image_data);
__pyx_t_2 = __Pyx_PyObject_Call(__pyx_t_7, __pyx_t_6, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 179, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
}
}
__Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
__Pyx_GOTREF(__pyx_cur_scope->__pyx_v_image_data);
__Pyx_DECREF_SET(__pyx_cur_scope->__pyx_v_image_data, __pyx_t_2);
__Pyx_GIVEREF(__pyx_t_2);
__pyx_t_2 = 0;
/* "train.py":180
* i = (i+1) % n
* image_data = np.array(image_data)
* box_data = np.array(box_data) # <<<<<<<<<<<<<<
* y_true = preprocess_true_boxes(box_data, input_shape, anchors, num_classes)
* yield [image_data, *y_true], np.zeros(batch_size)
*/
__pyx_t_7 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 180, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_7);
__pyx_t_6 = __Pyx_PyObject_GetAttrStr(__pyx_t_7, __pyx_n_s_array); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 180, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_6);
__Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
__pyx_t_7 = NULL;
if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_6))) {
__pyx_t_7 = PyMethod_GET_SELF(__pyx_t_6);
if (likely(__pyx_t_7)) {
PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_6);
__Pyx_INCREF(__pyx_t_7);
__Pyx_INCREF(function);
__Pyx_DECREF_SET(__pyx_t_6, function);
}
}
if (!__pyx_t_7) {
__pyx_t_2 = __Pyx_PyObject_CallOneArg(__pyx_t_6, __pyx_cur_scope->__pyx_v_box_data); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 180, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
} else {
#if CYTHON_FAST_PYCALL
if (PyFunction_Check(__pyx_t_6)) {
PyObject *__pyx_temp[2] = {__pyx_t_7, __pyx_cur_scope->__pyx_v_box_data};
__pyx_t_2 = __Pyx_PyFunction_FastCall(__pyx_t_6, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 180, __pyx_L1_error)
__Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0;
__Pyx_GOTREF(__pyx_t_2);
} else
#endif
#if CYTHON_FAST_PYCCALL
if (__Pyx_PyFastCFunction_Check(__pyx_t_6)) {
PyObject *__pyx_temp[2] = {__pyx_t_7, __pyx_cur_scope->__pyx_v_box_data};
__pyx_t_2 = __Pyx_PyCFunction_FastCall(__pyx_t_6, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 180, __pyx_L1_error)
__Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0;
__Pyx_GOTREF(__pyx_t_2);
} else
#endif
{
__pyx_t_8 = PyTuple_New(1+1); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 180, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_8);
__Pyx_GIVEREF(__pyx_t_7); PyTuple_SET_ITEM(__pyx_t_8, 0, __pyx_t_7); __pyx_t_7 = NULL;
__Pyx_INCREF(__pyx_cur_scope->__pyx_v_box_data);
__Pyx_GIVEREF(__pyx_cur_scope->__pyx_v_box_data);
PyTuple_SET_ITEM(__pyx_t_8, 0+1, __pyx_cur_scope->__pyx_v_box_data);
__pyx_t_2 = __Pyx_PyObject_Call(__pyx_t_6, __pyx_t_8, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 180, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0;
}
}
__Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
__Pyx_GOTREF(__pyx_cur_scope->__pyx_v_box_data);
__Pyx_DECREF_SET(__pyx_cur_scope->__pyx_v_box_data, __pyx_t_2);
__Pyx_GIVEREF(__pyx_t_2);
__pyx_t_2 = 0;
/* "train.py":181
* image_data = np.array(image_data)
* box_data = np.array(box_data)
* y_true = preprocess_true_boxes(box_data, input_shape, anchors, num_classes) # <<<<<<<<<<<<<<
* yield [image_data, *y_true], np.zeros(batch_size)
*
*/
__pyx_t_6 = __Pyx_GetModuleGlobalName(__pyx_n_s_preprocess_true_boxes); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 181, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_6);
__pyx_t_8 = NULL;
__pyx_t_11 = 0;
if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_6))) {
__pyx_t_8 = PyMethod_GET_SELF(__pyx_t_6);
if (likely(__pyx_t_8)) {
PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_6);
__Pyx_INCREF(__pyx_t_8);
__Pyx_INCREF(function);
__Pyx_DECREF_SET(__pyx_t_6, function);
__pyx_t_11 = 1;
}
}
#if CYTHON_FAST_PYCALL
if (PyFunction_Check(__pyx_t_6)) {
PyObject *__pyx_temp[5] = {__pyx_t_8, __pyx_cur_scope->__pyx_v_box_data, __pyx_cur_scope->__pyx_v_input_shape, __pyx_cur_scope->__pyx_v_anchors, __pyx_cur_scope->__pyx_v_num_classes};
__pyx_t_2 = __Pyx_PyFunction_FastCall(__pyx_t_6, __pyx_temp+1-__pyx_t_11, 4+__pyx_t_11); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 181, __pyx_L1_error)
__Pyx_XDECREF(__pyx_t_8); __pyx_t_8 = 0;
__Pyx_GOTREF(__pyx_t_2);
} else
#endif
#if CYTHON_FAST_PYCCALL
if (__Pyx_PyFastCFunction_Check(__pyx_t_6)) {
PyObject *__pyx_temp[5] = {__pyx_t_8, __pyx_cur_scope->__pyx_v_box_data, __pyx_cur_scope->__pyx_v_input_shape, __pyx_cur_scope->__pyx_v_anchors, __pyx_cur_scope->__pyx_v_num_classes};
__pyx_t_2 = __Pyx_PyCFunction_FastCall(__pyx_t_6, __pyx_temp+1-__pyx_t_11, 4+__pyx_t_11); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 181, __pyx_L1_error)
__Pyx_XDECREF(__pyx_t_8); __pyx_t_8 = 0;
__Pyx_GOTREF(__pyx_t_2);
} else
#endif
{
__pyx_t_7 = PyTuple_New(4+__pyx_t_11); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 181, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_7);
if (__pyx_t_8) {
__Pyx_GIVEREF(__pyx_t_8); PyTuple_SET_ITEM(__pyx_t_7, 0, __pyx_t_8); __pyx_t_8 = NULL;
}
__Pyx_INCREF(__pyx_cur_scope->__pyx_v_box_data);
__Pyx_GIVEREF(__pyx_cur_scope->__pyx_v_box_data);
PyTuple_SET_ITEM(__pyx_t_7, 0+__pyx_t_11, __pyx_cur_scope->__pyx_v_box_data);
__Pyx_INCREF(__pyx_cur_scope->__pyx_v_input_shape);
__Pyx_GIVEREF(__pyx_cur_scope->__pyx_v_input_shape);
PyTuple_SET_ITEM(__pyx_t_7, 1+__pyx_t_11, __pyx_cur_scope->__pyx_v_input_shape);
__Pyx_INCREF(__pyx_cur_scope->__pyx_v_anchors);
__Pyx_GIVEREF(__pyx_cur_scope->__pyx_v_anchors);
PyTuple_SET_ITEM(__pyx_t_7, 2+__pyx_t_11, __pyx_cur_scope->__pyx_v_anchors);
__Pyx_INCREF(__pyx_cur_scope->__pyx_v_num_classes);
__Pyx_GIVEREF(__pyx_cur_scope->__pyx_v_num_classes);
PyTuple_SET_ITEM(__pyx_t_7, 3+__pyx_t_11, __pyx_cur_scope->__pyx_v_num_classes);
__pyx_t_2 = __Pyx_PyObject_Call(__pyx_t_6, __pyx_t_7, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 181, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
}
__Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
__Pyx_XGOTREF(__pyx_cur_scope->__pyx_v_y_true);
__Pyx_XDECREF_SET(__pyx_cur_scope->__pyx_v_y_true, __pyx_t_2);
__Pyx_GIVEREF(__pyx_t_2);
__pyx_t_2 = 0;
/* "train.py":182
* box_data = np.array(box_data)
* y_true = preprocess_true_boxes(box_data, input_shape, anchors, num_classes)
* yield [image_data, *y_true], np.zeros(batch_size) # <<<<<<<<<<<<<<
*
* def data_generator_wrapper(annotation_lines, batch_size, input_shape, anchors, num_classes):
*/
__pyx_t_6 = PyList_New(1); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 182, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_6);
__Pyx_INCREF(__pyx_cur_scope->__pyx_v_image_data);
__Pyx_GIVEREF(__pyx_cur_scope->__pyx_v_image_data);
PyList_SET_ITEM(__pyx_t_6, 0, __pyx_cur_scope->__pyx_v_image_data);
__pyx_t_2 = __pyx_t_6;
__pyx_t_6 = 0;
if (__Pyx_PyList_Extend(__pyx_t_2, __pyx_cur_scope->__pyx_v_y_true) < 0) __PYX_ERR(0, 182, __pyx_L1_error)
__pyx_t_7 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 182, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_7);
__pyx_t_8 = __Pyx_PyObject_GetAttrStr(__pyx_t_7, __pyx_n_s_zeros); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 182, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_8);
__Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
__pyx_t_7 = NULL;
if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_8))) {
__pyx_t_7 = PyMethod_GET_SELF(__pyx_t_8);
if (likely(__pyx_t_7)) {
PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_8);
__Pyx_INCREF(__pyx_t_7);
__Pyx_INCREF(function);
__Pyx_DECREF_SET(__pyx_t_8, function);
}
}
if (!__pyx_t_7) {
__pyx_t_6 = __Pyx_PyObject_CallOneArg(__pyx_t_8, __pyx_cur_scope->__pyx_v_batch_size); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 182, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_6);
} else {
#if CYTHON_FAST_PYCALL
if (PyFunction_Check(__pyx_t_8)) {
PyObject *__pyx_temp[2] = {__pyx_t_7, __pyx_cur_scope->__pyx_v_batch_size};
__pyx_t_6 = __Pyx_PyFunction_FastCall(__pyx_t_8, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 182, __pyx_L1_error)
__Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0;
__Pyx_GOTREF(__pyx_t_6);
} else
#endif
#if CYTHON_FAST_PYCCALL
if (__Pyx_PyFastCFunction_Check(__pyx_t_8)) {
PyObject *__pyx_temp[2] = {__pyx_t_7, __pyx_cur_scope->__pyx_v_batch_size};
__pyx_t_6 = __Pyx_PyCFunction_FastCall(__pyx_t_8, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 182, __pyx_L1_error)
__Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0;
__Pyx_GOTREF(__pyx_t_6);
} else
#endif
{
__pyx_t_3 = PyTuple_New(1+1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 182, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_GIVEREF(__pyx_t_7); PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_7); __pyx_t_7 = NULL;
__Pyx_INCREF(__pyx_cur_scope->__pyx_v_batch_size);
__Pyx_GIVEREF(__pyx_cur_scope->__pyx_v_batch_size);
PyTuple_SET_ITEM(__pyx_t_3, 0+1, __pyx_cur_scope->__pyx_v_batch_size);
__pyx_t_6 = __Pyx_PyObject_Call(__pyx_t_8, __pyx_t_3, NULL); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 182, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_6);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
}
}
__Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0;
__pyx_t_8 = PyTuple_New(2); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 182, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_8);
__Pyx_GIVEREF(__pyx_t_2);
PyTuple_SET_ITEM(__pyx_t_8, 0, __pyx_t_2);
__Pyx_GIVEREF(__pyx_t_6);
PyTuple_SET_ITEM(__pyx_t_8, 1, __pyx_t_6);
__pyx_t_2 = 0;
__pyx_t_6 = 0;
__pyx_r = __pyx_t_8;
__pyx_t_8 = 0;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
/* return from generator, yielding value */
__pyx_generator->resume_label = 1;
return __pyx_r;
__pyx_L11_resume_from_yield:;
if (unlikely(!__pyx_sent_value)) __PYX_ERR(0, 182, __pyx_L1_error)
}
CYTHON_MAYBE_UNUSED_VAR(__pyx_cur_scope);
/* "train.py":165
* return model
*
* def data_generator(annotation_lines, batch_size, input_shape, anchors, num_classes): # <<<<<<<<<<<<<<
* '''data generator for fit_generator'''
* n = len(annotation_lines)
*/
/* function exit code */
PyErr_SetNone(PyExc_StopIteration);
goto __pyx_L0;
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_2);
__Pyx_XDECREF(__pyx_t_3);
__Pyx_XDECREF(__pyx_t_6);
__Pyx_XDECREF(__pyx_t_7);
__Pyx_XDECREF(__pyx_t_8);
__Pyx_AddTraceback("data_generator", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_L0:;
__Pyx_XDECREF(__pyx_r); __pyx_r = 0;
__pyx_generator->resume_label = -1;
__Pyx_Coroutine_clear((PyObject*)__pyx_generator);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "train.py":184
* yield [image_data, *y_true], np.zeros(batch_size)
*
* def data_generator_wrapper(annotation_lines, batch_size, input_shape, anchors, num_classes): # <<<<<<<<<<<<<<
* n = len(annotation_lines)
* if n==0 or batch_size<=0: return None
*/
/* Python wrapper */
static PyObject *__pyx_pw_5train_14data_generator_wrapper(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
static PyMethodDef __pyx_mdef_5train_14data_generator_wrapper = {"data_generator_wrapper", (PyCFunction)__pyx_pw_5train_14data_generator_wrapper, METH_VARARGS|METH_KEYWORDS, 0};
static PyObject *__pyx_pw_5train_14data_generator_wrapper(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
PyObject *__pyx_v_annotation_lines = 0;
PyObject *__pyx_v_batch_size = 0;
PyObject *__pyx_v_input_shape = 0;
PyObject *__pyx_v_anchors = 0;
PyObject *__pyx_v_num_classes = 0;
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("data_generator_wrapper (wrapper)", 0);
{
static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_annotation_lines,&__pyx_n_s_batch_size,&__pyx_n_s_input_shape,&__pyx_n_s_anchors,&__pyx_n_s_num_classes,0};
PyObject* values[5] = {0,0,0,0,0};
if (unlikely(__pyx_kwds)) {
Py_ssize_t kw_args;
const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args);
switch (pos_args) {
case 5: values[4] = PyTuple_GET_ITEM(__pyx_args, 4);
CYTHON_FALLTHROUGH;
case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3);
CYTHON_FALLTHROUGH;
case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2);
CYTHON_FALLTHROUGH;
case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1);
CYTHON_FALLTHROUGH;
case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
CYTHON_FALLTHROUGH;
case 0: break;
default: goto __pyx_L5_argtuple_error;
}
kw_args = PyDict_Size(__pyx_kwds);
switch (pos_args) {
case 0:
if (likely((values[0] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_annotation_lines)) != 0)) kw_args--;
else goto __pyx_L5_argtuple_error;
CYTHON_FALLTHROUGH;
case 1:
if (likely((values[1] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_batch_size)) != 0)) kw_args--;
else {
__Pyx_RaiseArgtupleInvalid("data_generator_wrapper", 1, 5, 5, 1); __PYX_ERR(0, 184, __pyx_L3_error)
}
CYTHON_FALLTHROUGH;
case 2:
if (likely((values[2] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_input_shape)) != 0)) kw_args--;
else {
__Pyx_RaiseArgtupleInvalid("data_generator_wrapper", 1, 5, 5, 2); __PYX_ERR(0, 184, __pyx_L3_error)
}
CYTHON_FALLTHROUGH;
case 3:
if (likely((values[3] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_anchors)) != 0)) kw_args--;
else {
__Pyx_RaiseArgtupleInvalid("data_generator_wrapper", 1, 5, 5, 3); __PYX_ERR(0, 184, __pyx_L3_error)
}
CYTHON_FALLTHROUGH;
case 4:
if (likely((values[4] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_num_classes)) != 0)) kw_args--;
else {
__Pyx_RaiseArgtupleInvalid("data_generator_wrapper", 1, 5, 5, 4); __PYX_ERR(0, 184, __pyx_L3_error)
}
}
if (unlikely(kw_args > 0)) {
if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "data_generator_wrapper") < 0)) __PYX_ERR(0, 184, __pyx_L3_error)
}
} else if (PyTuple_GET_SIZE(__pyx_args) != 5) {
goto __pyx_L5_argtuple_error;
} else {
values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
values[1] = PyTuple_GET_ITEM(__pyx_args, 1);
values[2] = PyTuple_GET_ITEM(__pyx_args, 2);
values[3] = PyTuple_GET_ITEM(__pyx_args, 3);
values[4] = PyTuple_GET_ITEM(__pyx_args, 4);
}
__pyx_v_annotation_lines = values[0];
__pyx_v_batch_size = values[1];
__pyx_v_input_shape = values[2];
__pyx_v_anchors = values[3];
__pyx_v_num_classes = values[4];
}
goto __pyx_L4_argument_unpacking_done;
__pyx_L5_argtuple_error:;
__Pyx_RaiseArgtupleInvalid("data_generator_wrapper", 1, 5, 5, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 184, __pyx_L3_error)
__pyx_L3_error:;
__Pyx_AddTraceback("train.data_generator_wrapper", __pyx_clineno, __pyx_lineno, __pyx_filename);
__Pyx_RefNannyFinishContext();
return NULL;
__pyx_L4_argument_unpacking_done:;
__pyx_r = __pyx_pf_5train_13data_generator_wrapper(__pyx_self, __pyx_v_annotation_lines, __pyx_v_batch_size, __pyx_v_input_shape, __pyx_v_anchors, __pyx_v_num_classes);
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_pf_5train_13data_generator_wrapper(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_annotation_lines, PyObject *__pyx_v_batch_size, PyObject *__pyx_v_input_shape, PyObject *__pyx_v_anchors, PyObject *__pyx_v_num_classes) {
Py_ssize_t __pyx_v_n;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
Py_ssize_t __pyx_t_1;
int __pyx_t_2;
int __pyx_t_3;
PyObject *__pyx_t_4 = NULL;
PyObject *__pyx_t_5 = NULL;
PyObject *__pyx_t_6 = NULL;
int __pyx_t_7;
PyObject *__pyx_t_8 = NULL;
__Pyx_RefNannySetupContext("data_generator_wrapper", 0);
/* "train.py":185
*
* def data_generator_wrapper(annotation_lines, batch_size, input_shape, anchors, num_classes):
* n = len(annotation_lines) # <<<<<<<<<<<<<<
* if n==0 or batch_size<=0: return None
* return data_generator(annotation_lines, batch_size, input_shape, anchors, num_classes)
*/
__pyx_t_1 = PyObject_Length(__pyx_v_annotation_lines); if (unlikely(__pyx_t_1 == -1)) __PYX_ERR(0, 185, __pyx_L1_error)
__pyx_v_n = __pyx_t_1;
/* "train.py":186
* def data_generator_wrapper(annotation_lines, batch_size, input_shape, anchors, num_classes):
* n = len(annotation_lines)
* if n==0 or batch_size<=0: return None # <<<<<<<<<<<<<<
* return data_generator(annotation_lines, batch_size, input_shape, anchors, num_classes)
*
*/
__pyx_t_3 = ((__pyx_v_n == 0) != 0);
if (!__pyx_t_3) {
} else {
__pyx_t_2 = __pyx_t_3;
goto __pyx_L4_bool_binop_done;
}
__pyx_t_4 = PyObject_RichCompare(__pyx_v_batch_size, __pyx_int_0, Py_LE); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 186, __pyx_L1_error)
__pyx_t_3 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_3 < 0)) __PYX_ERR(0, 186, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
__pyx_t_2 = __pyx_t_3;
__pyx_L4_bool_binop_done:;
if (__pyx_t_2) {
__Pyx_XDECREF(__pyx_r);
__Pyx_INCREF(Py_None);
__pyx_r = Py_None;
goto __pyx_L0;
}
/* "train.py":187
* n = len(annotation_lines)
* if n==0 or batch_size<=0: return None
* return data_generator(annotation_lines, batch_size, input_shape, anchors, num_classes) # <<<<<<<<<<<<<<
*
* if __name__ == '__main__':
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_5 = __Pyx_GetModuleGlobalName(__pyx_n_s_data_generator); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 187, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_5);
__pyx_t_6 = NULL;
__pyx_t_7 = 0;
if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_5))) {
__pyx_t_6 = PyMethod_GET_SELF(__pyx_t_5);
if (likely(__pyx_t_6)) {
PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_5);
__Pyx_INCREF(__pyx_t_6);
__Pyx_INCREF(function);
__Pyx_DECREF_SET(__pyx_t_5, function);
__pyx_t_7 = 1;
}
}
#if CYTHON_FAST_PYCALL
if (PyFunction_Check(__pyx_t_5)) {
PyObject *__pyx_temp[6] = {__pyx_t_6, __pyx_v_annotation_lines, __pyx_v_batch_size, __pyx_v_input_shape, __pyx_v_anchors, __pyx_v_num_classes};
__pyx_t_4 = __Pyx_PyFunction_FastCall(__pyx_t_5, __pyx_temp+1-__pyx_t_7, 5+__pyx_t_7); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 187, __pyx_L1_error)
__Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0;
__Pyx_GOTREF(__pyx_t_4);
} else
#endif
#if CYTHON_FAST_PYCCALL
if (__Pyx_PyFastCFunction_Check(__pyx_t_5)) {
PyObject *__pyx_temp[6] = {__pyx_t_6, __pyx_v_annotation_lines, __pyx_v_batch_size, __pyx_v_input_shape, __pyx_v_anchors, __pyx_v_num_classes};
__pyx_t_4 = __Pyx_PyCFunction_FastCall(__pyx_t_5, __pyx_temp+1-__pyx_t_7, 5+__pyx_t_7); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 187, __pyx_L1_error)
__Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0;
__Pyx_GOTREF(__pyx_t_4);
} else
#endif
{
__pyx_t_8 = PyTuple_New(5+__pyx_t_7); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 187, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_8);
if (__pyx_t_6) {
__Pyx_GIVEREF(__pyx_t_6); PyTuple_SET_ITEM(__pyx_t_8, 0, __pyx_t_6); __pyx_t_6 = NULL;
}
__Pyx_INCREF(__pyx_v_annotation_lines);
__Pyx_GIVEREF(__pyx_v_annotation_lines);
PyTuple_SET_ITEM(__pyx_t_8, 0+__pyx_t_7, __pyx_v_annotation_lines);
__Pyx_INCREF(__pyx_v_batch_size);
__Pyx_GIVEREF(__pyx_v_batch_size);
PyTuple_SET_ITEM(__pyx_t_8, 1+__pyx_t_7, __pyx_v_batch_size);
__Pyx_INCREF(__pyx_v_input_shape);
__Pyx_GIVEREF(__pyx_v_input_shape);
PyTuple_SET_ITEM(__pyx_t_8, 2+__pyx_t_7, __pyx_v_input_shape);
__Pyx_INCREF(__pyx_v_anchors);
__Pyx_GIVEREF(__pyx_v_anchors);
PyTuple_SET_ITEM(__pyx_t_8, 3+__pyx_t_7, __pyx_v_anchors);
__Pyx_INCREF(__pyx_v_num_classes);
__Pyx_GIVEREF(__pyx_v_num_classes);
PyTuple_SET_ITEM(__pyx_t_8, 4+__pyx_t_7, __pyx_v_num_classes);
__pyx_t_4 = __Pyx_PyObject_Call(__pyx_t_5, __pyx_t_8, NULL); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 187, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0;
}
__Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
__pyx_r = __pyx_t_4;
__pyx_t_4 = 0;
goto __pyx_L0;
/* "train.py":184
* yield [image_data, *y_true], np.zeros(batch_size)
*
* def data_generator_wrapper(annotation_lines, batch_size, input_shape, anchors, num_classes): # <<<<<<<<<<<<<<
* n = len(annotation_lines)
* if n==0 or batch_size<=0: return None
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_4);
__Pyx_XDECREF(__pyx_t_5);
__Pyx_XDECREF(__pyx_t_6);
__Pyx_XDECREF(__pyx_t_8);
__Pyx_AddTraceback("train.data_generator_wrapper", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static struct __pyx_obj_5train___pyx_scope_struct__data_generator *__pyx_freelist_5train___pyx_scope_struct__data_generator[8];
static int __pyx_freecount_5train___pyx_scope_struct__data_generator = 0;
static PyObject *__pyx_tp_new_5train___pyx_scope_struct__data_generator(PyTypeObject *t, CYTHON_UNUSED PyObject *a, CYTHON_UNUSED PyObject *k) {
PyObject *o;
if (CYTHON_COMPILING_IN_CPYTHON && likely((__pyx_freecount_5train___pyx_scope_struct__data_generator > 0) & (t->tp_basicsize == sizeof(struct __pyx_obj_5train___pyx_scope_struct__data_generator)))) {
o = (PyObject*)__pyx_freelist_5train___pyx_scope_struct__data_generator[--__pyx_freecount_5train___pyx_scope_struct__data_generator];
memset(o, 0, sizeof(struct __pyx_obj_5train___pyx_scope_struct__data_generator));
(void) PyObject_INIT(o, t);
PyObject_GC_Track(o);
} else {
o = (*t->tp_alloc)(t, 0);
if (unlikely(!o)) return 0;
}
return o;
}
static void __pyx_tp_dealloc_5train___pyx_scope_struct__data_generator(PyObject *o) {
struct __pyx_obj_5train___pyx_scope_struct__data_generator *p = (struct __pyx_obj_5train___pyx_scope_struct__data_generator *)o;
PyObject_GC_UnTrack(o);
Py_CLEAR(p->__pyx_v_anchors);
Py_CLEAR(p->__pyx_v_annotation_lines);
Py_CLEAR(p->__pyx_v_b);
Py_CLEAR(p->__pyx_v_batch_size);
Py_CLEAR(p->__pyx_v_box);
Py_CLEAR(p->__pyx_v_box_data);
Py_CLEAR(p->__pyx_v_i);
Py_CLEAR(p->__pyx_v_image);
Py_CLEAR(p->__pyx_v_image_data);
Py_CLEAR(p->__pyx_v_input_shape);
Py_CLEAR(p->__pyx_v_n);
Py_CLEAR(p->__pyx_v_num_classes);
Py_CLEAR(p->__pyx_v_y_true);
if (CYTHON_COMPILING_IN_CPYTHON && ((__pyx_freecount_5train___pyx_scope_struct__data_generator < 8) & (Py_TYPE(o)->tp_basicsize == sizeof(struct __pyx_obj_5train___pyx_scope_struct__data_generator)))) {
__pyx_freelist_5train___pyx_scope_struct__data_generator[__pyx_freecount_5train___pyx_scope_struct__data_generator++] = ((struct __pyx_obj_5train___pyx_scope_struct__data_generator *)o);
} else {
(*Py_TYPE(o)->tp_free)(o);
}
}
static int __pyx_tp_traverse_5train___pyx_scope_struct__data_generator(PyObject *o, visitproc v, void *a) {
int e;
struct __pyx_obj_5train___pyx_scope_struct__data_generator *p = (struct __pyx_obj_5train___pyx_scope_struct__data_generator *)o;
if (p->__pyx_v_anchors) {
e = (*v)(p->__pyx_v_anchors, a); if (e) return e;
}
if (p->__pyx_v_annotation_lines) {
e = (*v)(p->__pyx_v_annotation_lines, a); if (e) return e;
}
if (p->__pyx_v_b) {
e = (*v)(p->__pyx_v_b, a); if (e) return e;
}
if (p->__pyx_v_batch_size) {
e = (*v)(p->__pyx_v_batch_size, a); if (e) return e;
}
if (p->__pyx_v_box) {
e = (*v)(p->__pyx_v_box, a); if (e) return e;
}
if (p->__pyx_v_box_data) {
e = (*v)(p->__pyx_v_box_data, a); if (e) return e;
}
if (p->__pyx_v_i) {
e = (*v)(p->__pyx_v_i, a); if (e) return e;
}
if (p->__pyx_v_image) {
e = (*v)(p->__pyx_v_image, a); if (e) return e;
}
if (p->__pyx_v_image_data) {
e = (*v)(p->__pyx_v_image_data, a); if (e) return e;
}
if (p->__pyx_v_input_shape) {
e = (*v)(p->__pyx_v_input_shape, a); if (e) return e;
}
if (p->__pyx_v_n) {
e = (*v)(p->__pyx_v_n, a); if (e) return e;
}
if (p->__pyx_v_num_classes) {
e = (*v)(p->__pyx_v_num_classes, a); if (e) return e;
}
if (p->__pyx_v_y_true) {
e = (*v)(p->__pyx_v_y_true, a); if (e) return e;
}
return 0;
}
static int __pyx_tp_clear_5train___pyx_scope_struct__data_generator(PyObject *o) {
PyObject* tmp;
struct __pyx_obj_5train___pyx_scope_struct__data_generator *p = (struct __pyx_obj_5train___pyx_scope_struct__data_generator *)o;
tmp = ((PyObject*)p->__pyx_v_anchors);
p->__pyx_v_anchors = Py_None; Py_INCREF(Py_None);
Py_XDECREF(tmp);
tmp = ((PyObject*)p->__pyx_v_annotation_lines);
p->__pyx_v_annotation_lines = Py_None; Py_INCREF(Py_None);
Py_XDECREF(tmp);
tmp = ((PyObject*)p->__pyx_v_b);
p->__pyx_v_b = Py_None; Py_INCREF(Py_None);
Py_XDECREF(tmp);
tmp = ((PyObject*)p->__pyx_v_batch_size);
p->__pyx_v_batch_size = Py_None; Py_INCREF(Py_None);
Py_XDECREF(tmp);
tmp = ((PyObject*)p->__pyx_v_box);
p->__pyx_v_box = Py_None; Py_INCREF(Py_None);
Py_XDECREF(tmp);
tmp = ((PyObject*)p->__pyx_v_box_data);
p->__pyx_v_box_data = Py_None; Py_INCREF(Py_None);
Py_XDECREF(tmp);
tmp = ((PyObject*)p->__pyx_v_i);
p->__pyx_v_i = Py_None; Py_INCREF(Py_None);
Py_XDECREF(tmp);
tmp = ((PyObject*)p->__pyx_v_image);
p->__pyx_v_image = Py_None; Py_INCREF(Py_None);
Py_XDECREF(tmp);
tmp = ((PyObject*)p->__pyx_v_image_data);
p->__pyx_v_image_data = Py_None; Py_INCREF(Py_None);
Py_XDECREF(tmp);
tmp = ((PyObject*)p->__pyx_v_input_shape);
p->__pyx_v_input_shape = Py_None; Py_INCREF(Py_None);
Py_XDECREF(tmp);
tmp = ((PyObject*)p->__pyx_v_n);
p->__pyx_v_n = Py_None; Py_INCREF(Py_None);
Py_XDECREF(tmp);
tmp = ((PyObject*)p->__pyx_v_num_classes);
p->__pyx_v_num_classes = Py_None; Py_INCREF(Py_None);
Py_XDECREF(tmp);
tmp = ((PyObject*)p->__pyx_v_y_true);
p->__pyx_v_y_true = Py_None; Py_INCREF(Py_None);
Py_XDECREF(tmp);
return 0;
}
static PyTypeObject __pyx_type_5train___pyx_scope_struct__data_generator = {
PyVarObject_HEAD_INIT(0, 0)
"train.__pyx_scope_struct__data_generator", /*tp_name*/
sizeof(struct __pyx_obj_5train___pyx_scope_struct__data_generator), /*tp_basicsize*/
0, /*tp_itemsize*/
__pyx_tp_dealloc_5train___pyx_scope_struct__data_generator, /*tp_dealloc*/
0, /*tp_print*/
0, /*tp_getattr*/
0, /*tp_setattr*/
#if PY_MAJOR_VERSION < 3
0, /*tp_compare*/
#endif
#if PY_MAJOR_VERSION >= 3
0, /*tp_as_async*/
#endif
0, /*tp_repr*/
0, /*tp_as_number*/
0, /*tp_as_sequence*/
0, /*tp_as_mapping*/
0, /*tp_hash*/
0, /*tp_call*/
0, /*tp_str*/
0, /*tp_getattro*/
0, /*tp_setattro*/
0, /*tp_as_buffer*/
Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_HAVE_GC, /*tp_flags*/
0, /*tp_doc*/
__pyx_tp_traverse_5train___pyx_scope_struct__data_generator, /*tp_traverse*/
__pyx_tp_clear_5train___pyx_scope_struct__data_generator, /*tp_clear*/
0, /*tp_richcompare*/
0, /*tp_weaklistoffset*/
0, /*tp_iter*/
0, /*tp_iternext*/
0, /*tp_methods*/
0, /*tp_members*/
0, /*tp_getset*/
0, /*tp_base*/
0, /*tp_dict*/
0, /*tp_descr_get*/
0, /*tp_descr_set*/
0, /*tp_dictoffset*/
0, /*tp_init*/
0, /*tp_alloc*/
__pyx_tp_new_5train___pyx_scope_struct__data_generator, /*tp_new*/
0, /*tp_free*/
0, /*tp_is_gc*/
0, /*tp_bases*/
0, /*tp_mro*/
0, /*tp_cache*/
0, /*tp_subclasses*/
0, /*tp_weaklist*/
0, /*tp_del*/
0, /*tp_version_tag*/
#if PY_VERSION_HEX >= 0x030400a1
0, /*tp_finalize*/
#endif
};
static PyMethodDef __pyx_methods[] = {
{0, 0, 0, 0}
};
#if PY_MAJOR_VERSION >= 3
static struct PyModuleDef __pyx_moduledef = {
#if PY_VERSION_HEX < 0x03020000
{ PyObject_HEAD_INIT(NULL) NULL, 0, NULL },
#else
PyModuleDef_HEAD_INIT,
#endif
"train",
__pyx_k_Retrain_the_YOLO_model_for_your, /* m_doc */
-1, /* m_size */
__pyx_methods /* m_methods */,
NULL, /* m_reload */
NULL, /* m_traverse */
NULL, /* m_clear */
NULL /* m_free */
};
#endif
static __Pyx_StringTabEntry __pyx_string_tab[] = {
{&__pyx_n_s_Adam, __pyx_k_Adam, sizeof(__pyx_k_Adam), 0, 0, 1, 1},
{&__pyx_kp_s_Create_Tiny_YOLOv3_model_with_an, __pyx_k_Create_Tiny_YOLOv3_model_with_an, sizeof(__pyx_k_Create_Tiny_YOLOv3_model_with_an), 0, 0, 1, 0},
{&__pyx_kp_s_Create_YOLOv3_model_with_anchors, __pyx_k_Create_YOLOv3_model_with_anchors, sizeof(__pyx_k_Create_YOLOv3_model_with_anchors), 0, 0, 1, 0},
{&__pyx_n_s_EarlyStopping, __pyx_k_EarlyStopping, sizeof(__pyx_k_EarlyStopping), 0, 0, 1, 1},
{&__pyx_kp_s_Freeze_the_first_layers_of_total, __pyx_k_Freeze_the_first_layers_of_total, sizeof(__pyx_k_Freeze_the_first_layers_of_total), 0, 0, 1, 0},
{&__pyx_n_s_Input, __pyx_k_Input, sizeof(__pyx_k_Input), 0, 0, 1, 1},
{&__pyx_n_s_K, __pyx_k_K, sizeof(__pyx_k_K), 0, 0, 1, 1},
{&__pyx_n_s_Lambda, __pyx_k_Lambda, sizeof(__pyx_k_Lambda), 0, 0, 1, 1},
{&__pyx_kp_s_Load_weights, __pyx_k_Load_weights, sizeof(__pyx_k_Load_weights), 0, 0, 1, 0},
{&__pyx_n_s_Model, __pyx_k_Model, sizeof(__pyx_k_Model), 0, 0, 1, 1},
{&__pyx_n_s_ModelCheckpoint, __pyx_k_ModelCheckpoint, sizeof(__pyx_k_ModelCheckpoint), 0, 0, 1, 1},
{&__pyx_n_s_ReduceLROnPlateau, __pyx_k_ReduceLROnPlateau, sizeof(__pyx_k_ReduceLROnPlateau), 0, 0, 1, 1},
{&__pyx_n_s_TensorBoard, __pyx_k_TensorBoard, sizeof(__pyx_k_TensorBoard), 0, 0, 1, 1},
{&__pyx_kp_s_Train_on_samples_val_on_samples, __pyx_k_Train_on_samples_val_on_samples, sizeof(__pyx_k_Train_on_samples_val_on_samples), 0, 0, 1, 0},
{&__pyx_kp_s_Unfreeze_all_of_the_layers, __pyx_k_Unfreeze_all_of_the_layers, sizeof(__pyx_k_Unfreeze_all_of_the_layers), 0, 0, 1, 0},
{&__pyx_n_s__14, __pyx_k__14, sizeof(__pyx_k__14), 0, 0, 1, 1},
{&__pyx_kp_s__7, __pyx_k__7, sizeof(__pyx_k__7), 0, 0, 1, 0},
{&__pyx_n_s_anchors, __pyx_k_anchors, sizeof(__pyx_k_anchors), 0, 0, 1, 1},
{&__pyx_n_s_anchors_path, __pyx_k_anchors_path, sizeof(__pyx_k_anchors_path), 0, 0, 1, 1},
{&__pyx_n_s_annotation_lines, __pyx_k_annotation_lines, sizeof(__pyx_k_annotation_lines), 0, 0, 1, 1},
{&__pyx_n_s_annotation_path, __pyx_k_annotation_path, sizeof(__pyx_k_annotation_path), 0, 0, 1, 1},
{&__pyx_n_s_append, __pyx_k_append, sizeof(__pyx_k_append), 0, 0, 1, 1},
{&__pyx_n_s_args, __pyx_k_args, sizeof(__pyx_k_args), 0, 0, 1, 1},
{&__pyx_n_s_arguments, __pyx_k_arguments, sizeof(__pyx_k_arguments), 0, 0, 1, 1},
{&__pyx_n_s_array, __pyx_k_array, sizeof(__pyx_k_array), 0, 0, 1, 1},
{&__pyx_n_s_b, __pyx_k_b, sizeof(__pyx_k_b), 0, 0, 1, 1},
{&__pyx_n_s_batch_size, __pyx_k_batch_size, sizeof(__pyx_k_batch_size), 0, 0, 1, 1},
{&__pyx_n_s_box, __pyx_k_box, sizeof(__pyx_k_box), 0, 0, 1, 1},
{&__pyx_n_s_box_data, __pyx_k_box_data, sizeof(__pyx_k_box_data), 0, 0, 1, 1},
{&__pyx_n_s_by_name, __pyx_k_by_name, sizeof(__pyx_k_by_name), 0, 0, 1, 1},
{&__pyx_n_s_c, __pyx_k_c, sizeof(__pyx_k_c), 0, 0, 1, 1},
{&__pyx_n_s_callbacks, __pyx_k_callbacks, sizeof(__pyx_k_callbacks), 0, 0, 1, 1},
{&__pyx_n_s_checkpoint, __pyx_k_checkpoint, sizeof(__pyx_k_checkpoint), 0, 0, 1, 1},
{&__pyx_n_s_class_names, __pyx_k_class_names, sizeof(__pyx_k_class_names), 0, 0, 1, 1},
{&__pyx_n_s_classes_path, __pyx_k_classes_path, sizeof(__pyx_k_classes_path), 0, 0, 1, 1},
{&__pyx_n_s_clear_session, __pyx_k_clear_session, sizeof(__pyx_k_clear_session), 0, 0, 1, 1},
{&__pyx_n_s_cline_in_traceback, __pyx_k_cline_in_traceback, sizeof(__pyx_k_cline_in_traceback), 0, 0, 1, 1},
{&__pyx_n_s_close, __pyx_k_close, sizeof(__pyx_k_close), 0, 0, 1, 1},
{&__pyx_n_s_compile, __pyx_k_compile, sizeof(__pyx_k_compile), 0, 0, 1, 1},
{&__pyx_n_s_create_model, __pyx_k_create_model, sizeof(__pyx_k_create_model), 0, 0, 1, 1},
{&__pyx_n_s_create_tiny_model, __pyx_k_create_tiny_model, sizeof(__pyx_k_create_tiny_model), 0, 0, 1, 1},
{&__pyx_n_s_data_generator, __pyx_k_data_generator, sizeof(__pyx_k_data_generator), 0, 0, 1, 1},
{&__pyx_n_s_data_generator_wrapper, __pyx_k_data_generator_wrapper, sizeof(__pyx_k_data_generator_wrapper), 0, 0, 1, 1},
{&__pyx_n_s_early_stopping, __pyx_k_early_stopping, sizeof(__pyx_k_early_stopping), 0, 0, 1, 1},
{&__pyx_n_s_end, __pyx_k_end, sizeof(__pyx_k_end), 0, 0, 1, 1},
{&__pyx_n_s_enter, __pyx_k_enter, sizeof(__pyx_k_enter), 0, 0, 1, 1},
{&__pyx_kp_s_ep_epoch_03d_loss_loss_3f_val_lo, __pyx_k_ep_epoch_03d_loss_loss_3f_val_lo, sizeof(__pyx_k_ep_epoch_03d_loss_loss_3f_val_lo), 0, 0, 1, 0},
{&__pyx_n_s_epochs, __pyx_k_epochs, sizeof(__pyx_k_epochs), 0, 0, 1, 1},
{&__pyx_n_s_exit, __pyx_k_exit, sizeof(__pyx_k_exit), 0, 0, 1, 1},
{&__pyx_n_s_f, __pyx_k_f, sizeof(__pyx_k_f), 0, 0, 1, 1},
{&__pyx_n_s_factor, __pyx_k_factor, sizeof(__pyx_k_factor), 0, 0, 1, 1},
{&__pyx_n_s_file, __pyx_k_file, sizeof(__pyx_k_file), 0, 0, 1, 1},
{&__pyx_n_s_fit_generator, __pyx_k_fit_generator, sizeof(__pyx_k_fit_generator), 0, 0, 1, 1},
{&__pyx_n_s_format, __pyx_k_format, sizeof(__pyx_k_format), 0, 0, 1, 1},
{&__pyx_n_s_freeze_body, __pyx_k_freeze_body, sizeof(__pyx_k_freeze_body), 0, 0, 1, 1},
{&__pyx_n_s_get_anchors, __pyx_k_get_anchors, sizeof(__pyx_k_get_anchors), 0, 0, 1, 1},
{&__pyx_n_s_get_classes, __pyx_k_get_classes, sizeof(__pyx_k_get_classes), 0, 0, 1, 1},
{&__pyx_n_s_get_random_data, __pyx_k_get_random_data, sizeof(__pyx_k_get_random_data), 0, 0, 1, 1},
{&__pyx_n_s_h, __pyx_k_h, sizeof(__pyx_k_h), 0, 0, 1, 1},
{&__pyx_n_s_i, __pyx_k_i, sizeof(__pyx_k_i), 0, 0, 1, 1},
{&__pyx_n_s_ignore_thresh, __pyx_k_ignore_thresh, sizeof(__pyx_k_ignore_thresh), 0, 0, 1, 1},
{&__pyx_n_s_image, __pyx_k_image, sizeof(__pyx_k_image), 0, 0, 1, 1},
{&__pyx_n_s_image_data, __pyx_k_image_data, sizeof(__pyx_k_image_data), 0, 0, 1, 1},
{&__pyx_n_s_image_input, __pyx_k_image_input, sizeof(__pyx_k_image_input), 0, 0, 1, 1},
{&__pyx_n_s_import, __pyx_k_import, sizeof(__pyx_k_import), 0, 0, 1, 1},
{&__pyx_n_s_initial_epoch, __pyx_k_initial_epoch, sizeof(__pyx_k_initial_epoch), 0, 0, 1, 1},
{&__pyx_n_s_input, __pyx_k_input, sizeof(__pyx_k_input), 0, 0, 1, 1},
{&__pyx_n_s_input_shape, __pyx_k_input_shape, sizeof(__pyx_k_input_shape), 0, 0, 1, 1},
{&__pyx_n_s_is_tiny_version, __pyx_k_is_tiny_version, sizeof(__pyx_k_is_tiny_version), 0, 0, 1, 1},
{&__pyx_n_s_keras_backend, __pyx_k_keras_backend, sizeof(__pyx_k_keras_backend), 0, 0, 1, 1},
{&__pyx_n_s_keras_callbacks, __pyx_k_keras_callbacks, sizeof(__pyx_k_keras_callbacks), 0, 0, 1, 1},
{&__pyx_n_s_keras_layers, __pyx_k_keras_layers, sizeof(__pyx_k_keras_layers), 0, 0, 1, 1},
{&__pyx_n_s_keras_models, __pyx_k_keras_models, sizeof(__pyx_k_keras_models), 0, 0, 1, 1},
{&__pyx_n_s_keras_optimizers, __pyx_k_keras_optimizers, sizeof(__pyx_k_keras_optimizers), 0, 0, 1, 1},
{&__pyx_n_s_l, __pyx_k_l, sizeof(__pyx_k_l), 0, 0, 1, 1},
{&__pyx_n_s_layers, __pyx_k_layers, sizeof(__pyx_k_layers), 0, 0, 1, 1},
{&__pyx_n_s_lines, __pyx_k_lines, sizeof(__pyx_k_lines), 0, 0, 1, 1},
{&__pyx_n_s_load_pretrained, __pyx_k_load_pretrained, sizeof(__pyx_k_load_pretrained), 0, 0, 1, 1},
{&__pyx_n_s_load_weights, __pyx_k_load_weights, sizeof(__pyx_k_load_weights), 0, 0, 1, 1},
{&__pyx_n_s_log_dir, __pyx_k_log_dir, sizeof(__pyx_k_log_dir), 0, 0, 1, 1},
{&__pyx_n_s_logging, __pyx_k_logging, sizeof(__pyx_k_logging), 0, 0, 1, 1},
{&__pyx_kp_s_logs_000, __pyx_k_logs_000, sizeof(__pyx_k_logs_000), 0, 0, 1, 0},
{&__pyx_n_s_loss, __pyx_k_loss, sizeof(__pyx_k_loss), 0, 0, 1, 1},
{&__pyx_n_s_lr, __pyx_k_lr, sizeof(__pyx_k_lr), 0, 0, 1, 1},
{&__pyx_n_s_main, __pyx_k_main, sizeof(__pyx_k_main), 0, 0, 1, 1},
{&__pyx_n_s_main_2, __pyx_k_main_2, sizeof(__pyx_k_main_2), 0, 0, 1, 1},
{&__pyx_n_s_main_locals_lambda, __pyx_k_main_locals_lambda, sizeof(__pyx_k_main_locals_lambda), 0, 0, 1, 1},
{&__pyx_n_s_min_delta, __pyx_k_min_delta, sizeof(__pyx_k_min_delta), 0, 0, 1, 1},
{&__pyx_n_s_model, __pyx_k_model, sizeof(__pyx_k_model), 0, 0, 1, 1},
{&__pyx_n_s_model_body, __pyx_k_model_body, sizeof(__pyx_k_model_body), 0, 0, 1, 1},
{&__pyx_kp_s_model_data_head_class_txt, __pyx_k_model_data_head_class_txt, sizeof(__pyx_k_model_data_head_class_txt), 0, 0, 1, 0},
{&__pyx_kp_s_model_data_tiny_yolo_anchors_txt, __pyx_k_model_data_tiny_yolo_anchors_txt, sizeof(__pyx_k_model_data_tiny_yolo_anchors_txt), 0, 0, 1, 0},
{&__pyx_kp_s_model_data_tiny_yolo_weights_h5, __pyx_k_model_data_tiny_yolo_weights_h5, sizeof(__pyx_k_model_data_tiny_yolo_weights_h5), 0, 0, 1, 0},
{&__pyx_kp_s_model_data_train_txt, __pyx_k_model_data_train_txt, sizeof(__pyx_k_model_data_train_txt), 0, 0, 1, 0},
{&__pyx_kp_s_model_data_yolo_weights_h5, __pyx_k_model_data_yolo_weights_h5, sizeof(__pyx_k_model_data_yolo_weights_h5), 0, 0, 1, 0},
{&__pyx_n_s_model_loss, __pyx_k_model_loss, sizeof(__pyx_k_model_loss), 0, 0, 1, 1},
{&__pyx_n_s_monitor, __pyx_k_monitor, sizeof(__pyx_k_monitor), 0, 0, 1, 1},
{&__pyx_n_s_n, __pyx_k_n, sizeof(__pyx_k_n), 0, 0, 1, 1},
{&__pyx_n_s_name, __pyx_k_name, sizeof(__pyx_k_name), 0, 0, 1, 1},
{&__pyx_n_s_name_2, __pyx_k_name_2, sizeof(__pyx_k_name_2), 0, 0, 1, 1},
{&__pyx_n_s_np, __pyx_k_np, sizeof(__pyx_k_np), 0, 0, 1, 1},
{&__pyx_n_s_num, __pyx_k_num, sizeof(__pyx_k_num), 0, 0, 1, 1},
{&__pyx_n_s_num_anchors, __pyx_k_num_anchors, sizeof(__pyx_k_num_anchors), 0, 0, 1, 1},
{&__pyx_n_s_num_classes, __pyx_k_num_classes, sizeof(__pyx_k_num_classes), 0, 0, 1, 1},
{&__pyx_n_s_num_train, __pyx_k_num_train, sizeof(__pyx_k_num_train), 0, 0, 1, 1},
{&__pyx_n_s_num_val, __pyx_k_num_val, sizeof(__pyx_k_num_val), 0, 0, 1, 1},
{&__pyx_n_s_numpy, __pyx_k_numpy, sizeof(__pyx_k_numpy), 0, 0, 1, 1},
{&__pyx_n_s_open, __pyx_k_open, sizeof(__pyx_k_open), 0, 0, 1, 1},
{&__pyx_n_s_optimizer, __pyx_k_optimizer, sizeof(__pyx_k_optimizer), 0, 0, 1, 1},
{&__pyx_n_s_output, __pyx_k_output, sizeof(__pyx_k_output), 0, 0, 1, 1},
{&__pyx_n_s_output_shape, __pyx_k_output_shape, sizeof(__pyx_k_output_shape), 0, 0, 1, 1},
{&__pyx_n_s_patience, __pyx_k_patience, sizeof(__pyx_k_patience), 0, 0, 1, 1},
{&__pyx_n_s_period, __pyx_k_period, sizeof(__pyx_k_period), 0, 0, 1, 1},
{&__pyx_n_s_preprocess_true_boxes, __pyx_k_preprocess_true_boxes, sizeof(__pyx_k_preprocess_true_boxes), 0, 0, 1, 1},
{&__pyx_n_s_print, __pyx_k_print, sizeof(__pyx_k_print), 0, 0, 1, 1},
{&__pyx_n_s_random, __pyx_k_random, sizeof(__pyx_k_random), 0, 0, 1, 1},
{&__pyx_n_s_range, __pyx_k_range, sizeof(__pyx_k_range), 0, 0, 1, 1},
{&__pyx_n_s_readline, __pyx_k_readline, sizeof(__pyx_k_readline), 0, 0, 1, 1},
{&__pyx_n_s_readlines, __pyx_k_readlines, sizeof(__pyx_k_readlines), 0, 0, 1, 1},
{&__pyx_n_s_reduce_lr, __pyx_k_reduce_lr, sizeof(__pyx_k_reduce_lr), 0, 0, 1, 1},
{&__pyx_n_s_reshape, __pyx_k_reshape, sizeof(__pyx_k_reshape), 0, 0, 1, 1},
{&__pyx_n_s_save_best_only, __pyx_k_save_best_only, sizeof(__pyx_k_save_best_only), 0, 0, 1, 1},
{&__pyx_n_s_save_weights, __pyx_k_save_weights, sizeof(__pyx_k_save_weights), 0, 0, 1, 1},
{&__pyx_n_s_save_weights_only, __pyx_k_save_weights_only, sizeof(__pyx_k_save_weights_only), 0, 0, 1, 1},
{&__pyx_n_s_seed, __pyx_k_seed, sizeof(__pyx_k_seed), 0, 0, 1, 1},
{&__pyx_n_s_send, __pyx_k_send, sizeof(__pyx_k_send), 0, 0, 1, 1},
{&__pyx_n_s_shape, __pyx_k_shape, sizeof(__pyx_k_shape), 0, 0, 1, 1},
{&__pyx_n_s_shuffle, __pyx_k_shuffle, sizeof(__pyx_k_shuffle), 0, 0, 1, 1},
{&__pyx_n_s_skip_mismatch, __pyx_k_skip_mismatch, sizeof(__pyx_k_skip_mismatch), 0, 0, 1, 1},
{&__pyx_n_s_split, __pyx_k_split, sizeof(__pyx_k_split), 0, 0, 1, 1},
{&__pyx_n_s_steps_per_epoch, __pyx_k_steps_per_epoch, sizeof(__pyx_k_steps_per_epoch), 0, 0, 1, 1},
{&__pyx_n_s_strip, __pyx_k_strip, sizeof(__pyx_k_strip), 0, 0, 1, 1},
{&__pyx_n_s_test, __pyx_k_test, sizeof(__pyx_k_test), 0, 0, 1, 1},
{&__pyx_n_s_throw, __pyx_k_throw, sizeof(__pyx_k_throw), 0, 0, 1, 1},
{&__pyx_n_s_tiny_yolo_body, __pyx_k_tiny_yolo_body, sizeof(__pyx_k_tiny_yolo_body), 0, 0, 1, 1},
{&__pyx_n_s_train, __pyx_k_train, sizeof(__pyx_k_train), 0, 0, 1, 1},
{&__pyx_kp_s_train_py, __pyx_k_train_py, sizeof(__pyx_k_train_py), 0, 0, 1, 0},
{&__pyx_n_s_trainable, __pyx_k_trainable, sizeof(__pyx_k_trainable), 0, 0, 1, 1},
{&__pyx_kp_s_trained_weights_final_h5, __pyx_k_trained_weights_final_h5, sizeof(__pyx_k_trained_weights_final_h5), 0, 0, 1, 0},
{&__pyx_kp_s_trained_weights_stage_1_h5, __pyx_k_trained_weights_stage_1_h5, sizeof(__pyx_k_trained_weights_stage_1_h5), 0, 0, 1, 0},
{&__pyx_n_s_val_loss, __pyx_k_val_loss, sizeof(__pyx_k_val_loss), 0, 0, 1, 1},
{&__pyx_n_s_val_split, __pyx_k_val_split, sizeof(__pyx_k_val_split), 0, 0, 1, 1},
{&__pyx_n_s_validation_data, __pyx_k_validation_data, sizeof(__pyx_k_validation_data), 0, 0, 1, 1},
{&__pyx_n_s_validation_steps, __pyx_k_validation_steps, sizeof(__pyx_k_validation_steps), 0, 0, 1, 1},
{&__pyx_n_s_verbose, __pyx_k_verbose, sizeof(__pyx_k_verbose), 0, 0, 1, 1},
{&__pyx_n_s_w, __pyx_k_w, sizeof(__pyx_k_w), 0, 0, 1, 1},
{&__pyx_n_s_weights_path, __pyx_k_weights_path, sizeof(__pyx_k_weights_path), 0, 0, 1, 1},
{&__pyx_n_s_x, __pyx_k_x, sizeof(__pyx_k_x), 0, 0, 1, 1},
{&__pyx_n_s_y_pred, __pyx_k_y_pred, sizeof(__pyx_k_y_pred), 0, 0, 1, 1},
{&__pyx_n_s_y_true, __pyx_k_y_true, sizeof(__pyx_k_y_true), 0, 0, 1, 1},
{&__pyx_n_s_yolo3_model, __pyx_k_yolo3_model, sizeof(__pyx_k_yolo3_model), 0, 0, 1, 1},
{&__pyx_n_s_yolo3_utils, __pyx_k_yolo3_utils, sizeof(__pyx_k_yolo3_utils), 0, 0, 1, 1},
{&__pyx_n_s_yolo_body, __pyx_k_yolo_body, sizeof(__pyx_k_yolo_body), 0, 0, 1, 1},
{&__pyx_n_s_yolo_loss, __pyx_k_yolo_loss, sizeof(__pyx_k_yolo_loss), 0, 0, 1, 1},
{&__pyx_n_s_zeros, __pyx_k_zeros, sizeof(__pyx_k_zeros), 0, 0, 1, 1},
{0, 0, 0, 0, 0, 0, 0}
};
static int __Pyx_InitCachedBuiltins(void) {
__pyx_builtin_open = __Pyx_GetBuiltinName(__pyx_n_s_open); if (!__pyx_builtin_open) __PYX_ERR(0, 42, __pyx_L1_error)
__pyx_builtin_range = __Pyx_GetBuiltinName(__pyx_n_s_range); if (!__pyx_builtin_range) __PYX_ERR(0, 71, __pyx_L1_error)
return 0;
__pyx_L1_error:;
return -1;
}
static int __Pyx_InitCachedConstants(void) {
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__Pyx_InitCachedConstants", 0);
/* "train.py":25
* anchors = get_anchors(anchors_path)
*
* input_shape = (416,416) # multiple of 32, hw # <<<<<<<<<<<<<<
*
* is_tiny_version = len(anchors)==6 # default setting
*/
__pyx_tuple_ = PyTuple_Pack(2, __pyx_int_416, __pyx_int_416); if (unlikely(!__pyx_tuple_)) __PYX_ERR(0, 25, __pyx_L1_error)
__Pyx_GOTREF(__pyx_tuple_);
__Pyx_GIVEREF(__pyx_tuple_);
/* "train.py":42
*
* val_split = 0.1
* with open(annotation_path) as f: # <<<<<<<<<<<<<<
* lines = f.readlines()
* np.random.seed(10101)
*/
__pyx_tuple__2 = PyTuple_Pack(3, Py_None, Py_None, Py_None); if (unlikely(!__pyx_tuple__2)) __PYX_ERR(0, 42, __pyx_L1_error)
__Pyx_GOTREF(__pyx_tuple__2);
__Pyx_GIVEREF(__pyx_tuple__2);
/* "train.py":44
* with open(annotation_path) as f:
* lines = f.readlines()
* np.random.seed(10101) # <<<<<<<<<<<<<<
* np.random.shuffle(lines)
* np.random.seed(None)
*/
__pyx_tuple__3 = PyTuple_Pack(1, __pyx_int_10101); if (unlikely(!__pyx_tuple__3)) __PYX_ERR(0, 44, __pyx_L1_error)
__Pyx_GOTREF(__pyx_tuple__3);
__Pyx_GIVEREF(__pyx_tuple__3);
/* "train.py":46
* np.random.seed(10101)
* np.random.shuffle(lines)
* np.random.seed(None) # <<<<<<<<<<<<<<
* num_val = int(len(lines)*val_split)
* num_train = len(lines) - num_val
*/
__pyx_tuple__4 = PyTuple_Pack(1, Py_None); if (unlikely(!__pyx_tuple__4)) __PYX_ERR(0, 46, __pyx_L1_error)
__Pyx_GOTREF(__pyx_tuple__4);
__Pyx_GIVEREF(__pyx_tuple__4);
/* "train.py":92
* def get_classes(classes_path):
* '''loads the classes'''
* with open(classes_path) as f: # <<<<<<<<<<<<<<
* class_names = f.readlines()
* class_names = [c.strip() for c in class_names]
*/
__pyx_tuple__5 = PyTuple_Pack(3, Py_None, Py_None, Py_None); if (unlikely(!__pyx_tuple__5)) __PYX_ERR(0, 92, __pyx_L1_error)
__Pyx_GOTREF(__pyx_tuple__5);
__Pyx_GIVEREF(__pyx_tuple__5);
/* "train.py":99
* def get_anchors(anchors_path):
* '''loads the anchors from a file'''
* with open(anchors_path) as f: # <<<<<<<<<<<<<<
* anchors = f.readline()
* anchors = [float(x) for x in anchors.split(',')]
*/
__pyx_tuple__6 = PyTuple_Pack(3, Py_None, Py_None, Py_None); if (unlikely(!__pyx_tuple__6)) __PYX_ERR(0, 99, __pyx_L1_error)
__Pyx_GOTREF(__pyx_tuple__6);
__Pyx_GIVEREF(__pyx_tuple__6);
/* "train.py":101
* with open(anchors_path) as f:
* anchors = f.readline()
* anchors = [float(x) for x in anchors.split(',')] # <<<<<<<<<<<<<<
* return np.array(anchors).reshape(-1, 2)
*
*/
__pyx_tuple__8 = PyTuple_Pack(1, __pyx_kp_s__7); if (unlikely(!__pyx_tuple__8)) __PYX_ERR(0, 101, __pyx_L1_error)
__Pyx_GOTREF(__pyx_tuple__8);
__Pyx_GIVEREF(__pyx_tuple__8);
/* "train.py":102
* anchors = f.readline()
* anchors = [float(x) for x in anchors.split(',')]
* return np.array(anchors).reshape(-1, 2) # <<<<<<<<<<<<<<
*
*
*/
__pyx_tuple__9 = PyTuple_Pack(2, __pyx_int_neg_1, __pyx_int_2); if (unlikely(!__pyx_tuple__9)) __PYX_ERR(0, 102, __pyx_L1_error)
__Pyx_GOTREF(__pyx_tuple__9);
__Pyx_GIVEREF(__pyx_tuple__9);
/* "train.py":109
* '''create the training model'''
* K.clear_session() # get a new session
* image_input = Input(shape=(None, None, 3)) # <<<<<<<<<<<<<<
* h, w = input_shape
* num_anchors = len(anchors)
*/
__pyx_tuple__10 = PyTuple_Pack(3, Py_None, Py_None, __pyx_int_3); if (unlikely(!__pyx_tuple__10)) __PYX_ERR(0, 109, __pyx_L1_error)
__Pyx_GOTREF(__pyx_tuple__10);
__Pyx_GIVEREF(__pyx_tuple__10);
/* "train.py":128
* print('Freeze the first {} layers of total {} layers.'.format(num, len(model_body.layers)))
*
* model_loss = Lambda(yolo_loss, output_shape=(1,), name='yolo_loss', # <<<<<<<<<<<<<<
* arguments={'anchors': anchors, 'num_classes': num_classes, 'ignore_thresh': 0.5})(
* [*model_body.output, *y_true])
*/
__pyx_tuple__11 = PyTuple_Pack(1, __pyx_int_1); if (unlikely(!__pyx_tuple__11)) __PYX_ERR(0, 128, __pyx_L1_error)
__Pyx_GOTREF(__pyx_tuple__11);
__Pyx_GIVEREF(__pyx_tuple__11);
/* "train.py":139
* '''create the training model, for Tiny YOLOv3'''
* K.clear_session() # get a new session
* image_input = Input(shape=(None, None, 3)) # <<<<<<<<<<<<<<
* h, w = input_shape
* num_anchors = len(anchors)
*/
__pyx_tuple__12 = PyTuple_Pack(3, Py_None, Py_None, __pyx_int_3); if (unlikely(!__pyx_tuple__12)) __PYX_ERR(0, 139, __pyx_L1_error)
__Pyx_GOTREF(__pyx_tuple__12);
__Pyx_GIVEREF(__pyx_tuple__12);
/* "train.py":158
* print('Freeze the first {} layers of total {} layers.'.format(num, len(model_body.layers)))
*
* model_loss = Lambda(yolo_loss, output_shape=(1,), name='yolo_loss', # <<<<<<<<<<<<<<
* arguments={'anchors': anchors, 'num_classes': num_classes, 'ignore_thresh': 0.7})(
* [*model_body.output, *y_true])
*/
__pyx_tuple__13 = PyTuple_Pack(1, __pyx_int_1); if (unlikely(!__pyx_tuple__13)) __PYX_ERR(0, 158, __pyx_L1_error)
__Pyx_GOTREF(__pyx_tuple__13);
__Pyx_GIVEREF(__pyx_tuple__13);
/* "train.py":16
*
*
* def _main(): # <<<<<<<<<<<<<<
* annotation_path = 'model_data/train.txt'
* log_dir = 'logs/000/'
*/
__pyx_tuple__15 = PyTuple_Pack(21, __pyx_n_s_annotation_path, __pyx_n_s_log_dir, __pyx_n_s_classes_path, __pyx_n_s_anchors_path, __pyx_n_s_class_names, __pyx_n_s_num_classes, __pyx_n_s_anchors, __pyx_n_s_input_shape, __pyx_n_s_is_tiny_version, __pyx_n_s_model, __pyx_n_s_logging, __pyx_n_s_checkpoint, __pyx_n_s_reduce_lr, __pyx_n_s_early_stopping, __pyx_n_s_val_split, __pyx_n_s_f, __pyx_n_s_lines, __pyx_n_s_num_val, __pyx_n_s_num_train, __pyx_n_s_batch_size, __pyx_n_s_i); if (unlikely(!__pyx_tuple__15)) __PYX_ERR(0, 16, __pyx_L1_error)
__Pyx_GOTREF(__pyx_tuple__15);
__Pyx_GIVEREF(__pyx_tuple__15);
__pyx_codeobj__16 = (PyObject*)__Pyx_PyCode_New(0, 0, 21, 0, 0, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__15, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_train_py, __pyx_n_s_main_2, 16, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__16)) __PYX_ERR(0, 16, __pyx_L1_error)
/* "train.py":90
*
*
* def get_classes(classes_path): # <<<<<<<<<<<<<<
* '''loads the classes'''
* with open(classes_path) as f:
*/
__pyx_tuple__17 = PyTuple_Pack(4, __pyx_n_s_classes_path, __pyx_n_s_f, __pyx_n_s_class_names, __pyx_n_s_c); if (unlikely(!__pyx_tuple__17)) __PYX_ERR(0, 90, __pyx_L1_error)
__Pyx_GOTREF(__pyx_tuple__17);
__Pyx_GIVEREF(__pyx_tuple__17);
__pyx_codeobj__18 = (PyObject*)__Pyx_PyCode_New(1, 0, 4, 0, 0, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__17, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_train_py, __pyx_n_s_get_classes, 90, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__18)) __PYX_ERR(0, 90, __pyx_L1_error)
/* "train.py":97
* return class_names
*
* def get_anchors(anchors_path): # <<<<<<<<<<<<<<
* '''loads the anchors from a file'''
* with open(anchors_path) as f:
*/
__pyx_tuple__19 = PyTuple_Pack(4, __pyx_n_s_anchors_path, __pyx_n_s_f, __pyx_n_s_anchors, __pyx_n_s_x); if (unlikely(!__pyx_tuple__19)) __PYX_ERR(0, 97, __pyx_L1_error)
__Pyx_GOTREF(__pyx_tuple__19);
__Pyx_GIVEREF(__pyx_tuple__19);
__pyx_codeobj__20 = (PyObject*)__Pyx_PyCode_New(1, 0, 4, 0, 0, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__19, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_train_py, __pyx_n_s_get_anchors, 97, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__20)) __PYX_ERR(0, 97, __pyx_L1_error)
/* "train.py":105
*
*
* def create_model(input_shape, anchors, num_classes, load_pretrained=True, freeze_body=2, # <<<<<<<<<<<<<<
* weights_path='model_data/yolo_weights.h5'):
* '''create the training model'''
*/
__pyx_tuple__21 = PyTuple_Pack(17, __pyx_n_s_input_shape, __pyx_n_s_anchors, __pyx_n_s_num_classes, __pyx_n_s_load_pretrained, __pyx_n_s_freeze_body, __pyx_n_s_weights_path, __pyx_n_s_image_input, __pyx_n_s_h, __pyx_n_s_w, __pyx_n_s_num_anchors, __pyx_n_s_y_true, __pyx_n_s_model_body, __pyx_n_s_num, __pyx_n_s_i, __pyx_n_s_model_loss, __pyx_n_s_model, __pyx_n_s_l); if (unlikely(!__pyx_tuple__21)) __PYX_ERR(0, 105, __pyx_L1_error)
__Pyx_GOTREF(__pyx_tuple__21);
__Pyx_GIVEREF(__pyx_tuple__21);
__pyx_codeobj__22 = (PyObject*)__Pyx_PyCode_New(6, 0, 17, 0, 0, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__21, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_train_py, __pyx_n_s_create_model, 105, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__22)) __PYX_ERR(0, 105, __pyx_L1_error)
__pyx_tuple__23 = PyTuple_Pack(3, ((PyObject *)Py_True), ((PyObject *)__pyx_int_2), ((PyObject*)__pyx_kp_s_model_data_yolo_weights_h5)); if (unlikely(!__pyx_tuple__23)) __PYX_ERR(0, 105, __pyx_L1_error)
__Pyx_GOTREF(__pyx_tuple__23);
__Pyx_GIVEREF(__pyx_tuple__23);
/* "train.py":135
* return model
*
* def create_tiny_model(input_shape, anchors, num_classes, load_pretrained=True, freeze_body=2, # <<<<<<<<<<<<<<
* weights_path='model_data/tiny_yolo_weights.h5'):
* '''create the training model, for Tiny YOLOv3'''
*/
__pyx_tuple__24 = PyTuple_Pack(17, __pyx_n_s_input_shape, __pyx_n_s_anchors, __pyx_n_s_num_classes, __pyx_n_s_load_pretrained, __pyx_n_s_freeze_body, __pyx_n_s_weights_path, __pyx_n_s_image_input, __pyx_n_s_h, __pyx_n_s_w, __pyx_n_s_num_anchors, __pyx_n_s_y_true, __pyx_n_s_model_body, __pyx_n_s_num, __pyx_n_s_i, __pyx_n_s_model_loss, __pyx_n_s_model, __pyx_n_s_l); if (unlikely(!__pyx_tuple__24)) __PYX_ERR(0, 135, __pyx_L1_error)
__Pyx_GOTREF(__pyx_tuple__24);
__Pyx_GIVEREF(__pyx_tuple__24);
__pyx_codeobj__25 = (PyObject*)__Pyx_PyCode_New(6, 0, 17, 0, 0, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__24, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_train_py, __pyx_n_s_create_tiny_model, 135, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__25)) __PYX_ERR(0, 135, __pyx_L1_error)
__pyx_tuple__26 = PyTuple_Pack(3, ((PyObject *)Py_True), ((PyObject *)__pyx_int_2), ((PyObject*)__pyx_kp_s_model_data_tiny_yolo_weights_h5)); if (unlikely(!__pyx_tuple__26)) __PYX_ERR(0, 135, __pyx_L1_error)
__Pyx_GOTREF(__pyx_tuple__26);
__Pyx_GIVEREF(__pyx_tuple__26);
/* "train.py":165
* return model
*
* def data_generator(annotation_lines, batch_size, input_shape, anchors, num_classes): # <<<<<<<<<<<<<<
* '''data generator for fit_generator'''
* n = len(annotation_lines)
*/
__pyx_tuple__27 = PyTuple_Pack(13, __pyx_n_s_annotation_lines, __pyx_n_s_batch_size, __pyx_n_s_input_shape, __pyx_n_s_anchors, __pyx_n_s_num_classes, __pyx_n_s_n, __pyx_n_s_i, __pyx_n_s_image_data, __pyx_n_s_box_data, __pyx_n_s_b, __pyx_n_s_image, __pyx_n_s_box, __pyx_n_s_y_true); if (unlikely(!__pyx_tuple__27)) __PYX_ERR(0, 165, __pyx_L1_error)
__Pyx_GOTREF(__pyx_tuple__27);
__Pyx_GIVEREF(__pyx_tuple__27);
__pyx_codeobj__28 = (PyObject*)__Pyx_PyCode_New(5, 0, 13, 0, 0, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__27, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_train_py, __pyx_n_s_data_generator, 165, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__28)) __PYX_ERR(0, 165, __pyx_L1_error)
/* "train.py":184
* yield [image_data, *y_true], np.zeros(batch_size)
*
* def data_generator_wrapper(annotation_lines, batch_size, input_shape, anchors, num_classes): # <<<<<<<<<<<<<<
* n = len(annotation_lines)
* if n==0 or batch_size<=0: return None
*/
__pyx_tuple__29 = PyTuple_Pack(6, __pyx_n_s_annotation_lines, __pyx_n_s_batch_size, __pyx_n_s_input_shape, __pyx_n_s_anchors, __pyx_n_s_num_classes, __pyx_n_s_n); if (unlikely(!__pyx_tuple__29)) __PYX_ERR(0, 184, __pyx_L1_error)
__Pyx_GOTREF(__pyx_tuple__29);
__Pyx_GIVEREF(__pyx_tuple__29);
__pyx_codeobj__30 = (PyObject*)__Pyx_PyCode_New(5, 0, 6, 0, 0, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__29, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_train_py, __pyx_n_s_data_generator_wrapper, 184, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__30)) __PYX_ERR(0, 184, __pyx_L1_error)
__Pyx_RefNannyFinishContext();
return 0;
__pyx_L1_error:;
__Pyx_RefNannyFinishContext();
return -1;
}
static int __Pyx_InitGlobals(void) {
if (__Pyx_InitStrings(__pyx_string_tab) < 0) __PYX_ERR(0, 1, __pyx_L1_error);
__pyx_float_0_1 = PyFloat_FromDouble(0.1); if (unlikely(!__pyx_float_0_1)) __PYX_ERR(0, 1, __pyx_L1_error)
__pyx_float_0_5 = PyFloat_FromDouble(0.5); if (unlikely(!__pyx_float_0_5)) __PYX_ERR(0, 1, __pyx_L1_error)
__pyx_float_0_7 = PyFloat_FromDouble(0.7); if (unlikely(!__pyx_float_0_7)) __PYX_ERR(0, 1, __pyx_L1_error)
__pyx_float_1eneg_3 = PyFloat_FromDouble(1e-3); if (unlikely(!__pyx_float_1eneg_3)) __PYX_ERR(0, 1, __pyx_L1_error)
__pyx_float_1eneg_4 = PyFloat_FromDouble(1e-4); if (unlikely(!__pyx_float_1eneg_4)) __PYX_ERR(0, 1, __pyx_L1_error)
__pyx_int_0 = PyInt_FromLong(0); if (unlikely(!__pyx_int_0)) __PYX_ERR(0, 1, __pyx_L1_error)
__pyx_int_1 = PyInt_FromLong(1); if (unlikely(!__pyx_int_1)) __PYX_ERR(0, 1, __pyx_L1_error)
__pyx_int_2 = PyInt_FromLong(2); if (unlikely(!__pyx_int_2)) __PYX_ERR(0, 1, __pyx_L1_error)
__pyx_int_3 = PyInt_FromLong(3); if (unlikely(!__pyx_int_3)) __PYX_ERR(0, 1, __pyx_L1_error)
__pyx_int_5 = PyInt_FromLong(5); if (unlikely(!__pyx_int_5)) __PYX_ERR(0, 1, __pyx_L1_error)
__pyx_int_8 = PyInt_FromLong(8); if (unlikely(!__pyx_int_8)) __PYX_ERR(0, 1, __pyx_L1_error)
__pyx_int_10 = PyInt_FromLong(10); if (unlikely(!__pyx_int_10)) __PYX_ERR(0, 1, __pyx_L1_error)
__pyx_int_16 = PyInt_FromLong(16); if (unlikely(!__pyx_int_16)) __PYX_ERR(0, 1, __pyx_L1_error)
__pyx_int_20 = PyInt_FromLong(20); if (unlikely(!__pyx_int_20)) __PYX_ERR(0, 1, __pyx_L1_error)
__pyx_int_32 = PyInt_FromLong(32); if (unlikely(!__pyx_int_32)) __PYX_ERR(0, 1, __pyx_L1_error)
__pyx_int_50 = PyInt_FromLong(50); if (unlikely(!__pyx_int_50)) __PYX_ERR(0, 1, __pyx_L1_error)
__pyx_int_100 = PyInt_FromLong(100); if (unlikely(!__pyx_int_100)) __PYX_ERR(0, 1, __pyx_L1_error)
__pyx_int_185 = PyInt_FromLong(185); if (unlikely(!__pyx_int_185)) __PYX_ERR(0, 1, __pyx_L1_error)
__pyx_int_416 = PyInt_FromLong(416); if (unlikely(!__pyx_int_416)) __PYX_ERR(0, 1, __pyx_L1_error)
__pyx_int_10101 = PyInt_FromLong(10101L); if (unlikely(!__pyx_int_10101)) __PYX_ERR(0, 1, __pyx_L1_error)
__pyx_int_neg_1 = PyInt_FromLong(-1); if (unlikely(!__pyx_int_neg_1)) __PYX_ERR(0, 1, __pyx_L1_error)
return 0;
__pyx_L1_error:;
return -1;
}
#if PY_MAJOR_VERSION < 3
PyMODINIT_FUNC inittrain(void); /*proto*/
PyMODINIT_FUNC inittrain(void)
#else
PyMODINIT_FUNC PyInit_train(void); /*proto*/
PyMODINIT_FUNC PyInit_train(void)
#endif
{
PyObject *__pyx_t_1 = NULL;
PyObject *__pyx_t_2 = NULL;
int __pyx_t_3;
PyObject *__pyx_t_4 = NULL;
__Pyx_RefNannyDeclarations
#if CYTHON_REFNANNY
__Pyx_RefNanny = __Pyx_RefNannyImportAPI("refnanny");
if (!__Pyx_RefNanny) {
PyErr_Clear();
__Pyx_RefNanny = __Pyx_RefNannyImportAPI("Cython.Runtime.refnanny");
if (!__Pyx_RefNanny)
Py_FatalError("failed to import 'refnanny' module");
}
#endif
__Pyx_RefNannySetupContext("PyMODINIT_FUNC PyInit_train(void)", 0);
if (__Pyx_check_binary_version() < 0) __PYX_ERR(0, 1, __pyx_L1_error)
__pyx_empty_tuple = PyTuple_New(0); if (unlikely(!__pyx_empty_tuple)) __PYX_ERR(0, 1, __pyx_L1_error)
__pyx_empty_bytes = PyBytes_FromStringAndSize("", 0); if (unlikely(!__pyx_empty_bytes)) __PYX_ERR(0, 1, __pyx_L1_error)
__pyx_empty_unicode = PyUnicode_FromStringAndSize("", 0); if (unlikely(!__pyx_empty_unicode)) __PYX_ERR(0, 1, __pyx_L1_error)
#ifdef __Pyx_CyFunction_USED
if (__pyx_CyFunction_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error)
#endif
#ifdef __Pyx_FusedFunction_USED
if (__pyx_FusedFunction_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error)
#endif
#ifdef __Pyx_Coroutine_USED
if (__pyx_Coroutine_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error)
#endif
#ifdef __Pyx_Generator_USED
if (__pyx_Generator_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error)
#endif
#ifdef __Pyx_StopAsyncIteration_USED
if (__pyx_StopAsyncIteration_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error)
#endif
/*--- Library function declarations ---*/
/*--- Threads initialization code ---*/
#if defined(__PYX_FORCE_INIT_THREADS) && __PYX_FORCE_INIT_THREADS
#ifdef WITH_THREAD /* Python build with threading support? */
PyEval_InitThreads();
#endif
#endif
/*--- Module creation code ---*/
#if PY_MAJOR_VERSION < 3
__pyx_m = Py_InitModule4("train", __pyx_methods, __pyx_k_Retrain_the_YOLO_model_for_your, 0, PYTHON_API_VERSION); Py_XINCREF(__pyx_m);
#else
__pyx_m = PyModule_Create(&__pyx_moduledef);
#endif
if (unlikely(!__pyx_m)) __PYX_ERR(0, 1, __pyx_L1_error)
__pyx_d = PyModule_GetDict(__pyx_m); if (unlikely(!__pyx_d)) __PYX_ERR(0, 1, __pyx_L1_error)
Py_INCREF(__pyx_d);
__pyx_b = PyImport_AddModule(__Pyx_BUILTIN_MODULE_NAME); if (unlikely(!__pyx_b)) __PYX_ERR(0, 1, __pyx_L1_error)
__pyx_cython_runtime = PyImport_AddModule((char *) "cython_runtime"); if (unlikely(!__pyx_cython_runtime)) __PYX_ERR(0, 1, __pyx_L1_error)
#if CYTHON_COMPILING_IN_PYPY
Py_INCREF(__pyx_b);
#endif
if (PyObject_SetAttrString(__pyx_m, "__builtins__", __pyx_b) < 0) __PYX_ERR(0, 1, __pyx_L1_error);
/*--- Initialize various global constants etc. ---*/
if (__Pyx_InitGlobals() < 0) __PYX_ERR(0, 1, __pyx_L1_error)
#if PY_MAJOR_VERSION < 3 && (__PYX_DEFAULT_STRING_ENCODING_IS_ASCII || __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT)
if (__Pyx_init_sys_getdefaultencoding_params() < 0) __PYX_ERR(0, 1, __pyx_L1_error)
#endif
if (__pyx_module_is_main_train) {
if (PyObject_SetAttrString(__pyx_m, "__name__", __pyx_n_s_main) < 0) __PYX_ERR(0, 1, __pyx_L1_error)
}
#if PY_MAJOR_VERSION >= 3
{
PyObject *modules = PyImport_GetModuleDict(); if (unlikely(!modules)) __PYX_ERR(0, 1, __pyx_L1_error)
if (!PyDict_GetItemString(modules, "train")) {
if (unlikely(PyDict_SetItemString(modules, "train", __pyx_m) < 0)) __PYX_ERR(0, 1, __pyx_L1_error)
}
}
#endif
/*--- Builtin init code ---*/
if (__Pyx_InitCachedBuiltins() < 0) __PYX_ERR(0, 1, __pyx_L1_error)
/*--- Constants init code ---*/
if (__Pyx_InitCachedConstants() < 0) __PYX_ERR(0, 1, __pyx_L1_error)
/*--- Global init code ---*/
/*--- Variable export code ---*/
/*--- Function export code ---*/
/*--- Type init code ---*/
if (PyType_Ready(&__pyx_type_5train___pyx_scope_struct__data_generator) < 0) __PYX_ERR(0, 165, __pyx_L1_error)
__pyx_type_5train___pyx_scope_struct__data_generator.tp_print = 0;
__pyx_ptype_5train___pyx_scope_struct__data_generator = &__pyx_type_5train___pyx_scope_struct__data_generator;
/*--- Type import code ---*/
/*--- Variable import code ---*/
/*--- Function import code ---*/
/*--- Execution code ---*/
#if defined(__Pyx_Generator_USED) || defined(__Pyx_Coroutine_USED)
if (__Pyx_patch_abc() < 0) __PYX_ERR(0, 1, __pyx_L1_error)
#endif
/* "train.py":5
* """
*
* import numpy as np # <<<<<<<<<<<<<<
* import keras.backend as K
* from keras.layers import Input, Lambda
*/
__pyx_t_1 = __Pyx_Import(__pyx_n_s_numpy, 0, -1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 5, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
if (PyDict_SetItem(__pyx_d, __pyx_n_s_np, __pyx_t_1) < 0) __PYX_ERR(0, 5, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
/* "train.py":6
*
* import numpy as np
* import keras.backend as K # <<<<<<<<<<<<<<
* from keras.layers import Input, Lambda
* from keras.models import Model
*/
__pyx_t_1 = PyList_New(1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 6, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_INCREF(__pyx_n_s__14);
__Pyx_GIVEREF(__pyx_n_s__14);
PyList_SET_ITEM(__pyx_t_1, 0, __pyx_n_s__14);
__pyx_t_2 = __Pyx_Import(__pyx_n_s_keras_backend, __pyx_t_1, -1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 6, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
if (PyDict_SetItem(__pyx_d, __pyx_n_s_K, __pyx_t_2) < 0) __PYX_ERR(0, 6, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
/* "train.py":7
* import numpy as np
* import keras.backend as K
* from keras.layers import Input, Lambda # <<<<<<<<<<<<<<
* from keras.models import Model
* from keras.optimizers import Adam
*/
__pyx_t_2 = PyList_New(2); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 7, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_INCREF(__pyx_n_s_Input);
__Pyx_GIVEREF(__pyx_n_s_Input);
PyList_SET_ITEM(__pyx_t_2, 0, __pyx_n_s_Input);
__Pyx_INCREF(__pyx_n_s_Lambda);
__Pyx_GIVEREF(__pyx_n_s_Lambda);
PyList_SET_ITEM(__pyx_t_2, 1, __pyx_n_s_Lambda);
__pyx_t_1 = __Pyx_Import(__pyx_n_s_keras_layers, __pyx_t_2, -1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 7, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__pyx_t_2 = __Pyx_ImportFrom(__pyx_t_1, __pyx_n_s_Input); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 7, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
if (PyDict_SetItem(__pyx_d, __pyx_n_s_Input, __pyx_t_2) < 0) __PYX_ERR(0, 7, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__pyx_t_2 = __Pyx_ImportFrom(__pyx_t_1, __pyx_n_s_Lambda); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 7, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
if (PyDict_SetItem(__pyx_d, __pyx_n_s_Lambda, __pyx_t_2) < 0) __PYX_ERR(0, 7, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
/* "train.py":8
* import keras.backend as K
* from keras.layers import Input, Lambda
* from keras.models import Model # <<<<<<<<<<<<<<
* from keras.optimizers import Adam
* from keras.callbacks import TensorBoard, ModelCheckpoint, ReduceLROnPlateau, EarlyStopping
*/
__pyx_t_1 = PyList_New(1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 8, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_INCREF(__pyx_n_s_Model);
__Pyx_GIVEREF(__pyx_n_s_Model);
PyList_SET_ITEM(__pyx_t_1, 0, __pyx_n_s_Model);
__pyx_t_2 = __Pyx_Import(__pyx_n_s_keras_models, __pyx_t_1, -1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 8, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__pyx_t_1 = __Pyx_ImportFrom(__pyx_t_2, __pyx_n_s_Model); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 8, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
if (PyDict_SetItem(__pyx_d, __pyx_n_s_Model, __pyx_t_1) < 0) __PYX_ERR(0, 8, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
/* "train.py":9
* from keras.layers import Input, Lambda
* from keras.models import Model
* from keras.optimizers import Adam # <<<<<<<<<<<<<<
* from keras.callbacks import TensorBoard, ModelCheckpoint, ReduceLROnPlateau, EarlyStopping
*
*/
__pyx_t_2 = PyList_New(1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 9, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_INCREF(__pyx_n_s_Adam);
__Pyx_GIVEREF(__pyx_n_s_Adam);
PyList_SET_ITEM(__pyx_t_2, 0, __pyx_n_s_Adam);
__pyx_t_1 = __Pyx_Import(__pyx_n_s_keras_optimizers, __pyx_t_2, -1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 9, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__pyx_t_2 = __Pyx_ImportFrom(__pyx_t_1, __pyx_n_s_Adam); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 9, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
if (PyDict_SetItem(__pyx_d, __pyx_n_s_Adam, __pyx_t_2) < 0) __PYX_ERR(0, 9, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
/* "train.py":10
* from keras.models import Model
* from keras.optimizers import Adam
* from keras.callbacks import TensorBoard, ModelCheckpoint, ReduceLROnPlateau, EarlyStopping # <<<<<<<<<<<<<<
*
* from yolo3.model import preprocess_true_boxes, yolo_body, tiny_yolo_body, yolo_loss
*/
__pyx_t_1 = PyList_New(4); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 10, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_INCREF(__pyx_n_s_TensorBoard);
__Pyx_GIVEREF(__pyx_n_s_TensorBoard);
PyList_SET_ITEM(__pyx_t_1, 0, __pyx_n_s_TensorBoard);
__Pyx_INCREF(__pyx_n_s_ModelCheckpoint);
__Pyx_GIVEREF(__pyx_n_s_ModelCheckpoint);
PyList_SET_ITEM(__pyx_t_1, 1, __pyx_n_s_ModelCheckpoint);
__Pyx_INCREF(__pyx_n_s_ReduceLROnPlateau);
__Pyx_GIVEREF(__pyx_n_s_ReduceLROnPlateau);
PyList_SET_ITEM(__pyx_t_1, 2, __pyx_n_s_ReduceLROnPlateau);
__Pyx_INCREF(__pyx_n_s_EarlyStopping);
__Pyx_GIVEREF(__pyx_n_s_EarlyStopping);
PyList_SET_ITEM(__pyx_t_1, 3, __pyx_n_s_EarlyStopping);
__pyx_t_2 = __Pyx_Import(__pyx_n_s_keras_callbacks, __pyx_t_1, -1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 10, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__pyx_t_1 = __Pyx_ImportFrom(__pyx_t_2, __pyx_n_s_TensorBoard); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 10, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
if (PyDict_SetItem(__pyx_d, __pyx_n_s_TensorBoard, __pyx_t_1) < 0) __PYX_ERR(0, 10, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__pyx_t_1 = __Pyx_ImportFrom(__pyx_t_2, __pyx_n_s_ModelCheckpoint); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 10, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
if (PyDict_SetItem(__pyx_d, __pyx_n_s_ModelCheckpoint, __pyx_t_1) < 0) __PYX_ERR(0, 10, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__pyx_t_1 = __Pyx_ImportFrom(__pyx_t_2, __pyx_n_s_ReduceLROnPlateau); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 10, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
if (PyDict_SetItem(__pyx_d, __pyx_n_s_ReduceLROnPlateau, __pyx_t_1) < 0) __PYX_ERR(0, 10, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__pyx_t_1 = __Pyx_ImportFrom(__pyx_t_2, __pyx_n_s_EarlyStopping); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 10, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
if (PyDict_SetItem(__pyx_d, __pyx_n_s_EarlyStopping, __pyx_t_1) < 0) __PYX_ERR(0, 10, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
/* "train.py":12
* from keras.callbacks import TensorBoard, ModelCheckpoint, ReduceLROnPlateau, EarlyStopping
*
* from yolo3.model import preprocess_true_boxes, yolo_body, tiny_yolo_body, yolo_loss # <<<<<<<<<<<<<<
* from yolo3.utils import get_random_data
*
*/
__pyx_t_2 = PyList_New(4); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 12, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_INCREF(__pyx_n_s_preprocess_true_boxes);
__Pyx_GIVEREF(__pyx_n_s_preprocess_true_boxes);
PyList_SET_ITEM(__pyx_t_2, 0, __pyx_n_s_preprocess_true_boxes);
__Pyx_INCREF(__pyx_n_s_yolo_body);
__Pyx_GIVEREF(__pyx_n_s_yolo_body);
PyList_SET_ITEM(__pyx_t_2, 1, __pyx_n_s_yolo_body);
__Pyx_INCREF(__pyx_n_s_tiny_yolo_body);
__Pyx_GIVEREF(__pyx_n_s_tiny_yolo_body);
PyList_SET_ITEM(__pyx_t_2, 2, __pyx_n_s_tiny_yolo_body);
__Pyx_INCREF(__pyx_n_s_yolo_loss);
__Pyx_GIVEREF(__pyx_n_s_yolo_loss);
PyList_SET_ITEM(__pyx_t_2, 3, __pyx_n_s_yolo_loss);
__pyx_t_1 = __Pyx_Import(__pyx_n_s_yolo3_model, __pyx_t_2, -1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 12, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__pyx_t_2 = __Pyx_ImportFrom(__pyx_t_1, __pyx_n_s_preprocess_true_boxes); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 12, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
if (PyDict_SetItem(__pyx_d, __pyx_n_s_preprocess_true_boxes, __pyx_t_2) < 0) __PYX_ERR(0, 12, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__pyx_t_2 = __Pyx_ImportFrom(__pyx_t_1, __pyx_n_s_yolo_body); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 12, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
if (PyDict_SetItem(__pyx_d, __pyx_n_s_yolo_body, __pyx_t_2) < 0) __PYX_ERR(0, 12, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__pyx_t_2 = __Pyx_ImportFrom(__pyx_t_1, __pyx_n_s_tiny_yolo_body); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 12, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
if (PyDict_SetItem(__pyx_d, __pyx_n_s_tiny_yolo_body, __pyx_t_2) < 0) __PYX_ERR(0, 12, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__pyx_t_2 = __Pyx_ImportFrom(__pyx_t_1, __pyx_n_s_yolo_loss); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 12, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
if (PyDict_SetItem(__pyx_d, __pyx_n_s_yolo_loss, __pyx_t_2) < 0) __PYX_ERR(0, 12, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
/* "train.py":13
*
* from yolo3.model import preprocess_true_boxes, yolo_body, tiny_yolo_body, yolo_loss
* from yolo3.utils import get_random_data # <<<<<<<<<<<<<<
*
*
*/
__pyx_t_1 = PyList_New(1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 13, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_INCREF(__pyx_n_s_get_random_data);
__Pyx_GIVEREF(__pyx_n_s_get_random_data);
PyList_SET_ITEM(__pyx_t_1, 0, __pyx_n_s_get_random_data);
__pyx_t_2 = __Pyx_Import(__pyx_n_s_yolo3_utils, __pyx_t_1, -1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 13, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__pyx_t_1 = __Pyx_ImportFrom(__pyx_t_2, __pyx_n_s_get_random_data); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 13, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
if (PyDict_SetItem(__pyx_d, __pyx_n_s_get_random_data, __pyx_t_1) < 0) __PYX_ERR(0, 13, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
/* "train.py":16
*
*
* def _main(): # <<<<<<<<<<<<<<
* annotation_path = 'model_data/train.txt'
* log_dir = 'logs/000/'
*/
__pyx_t_2 = __Pyx_CyFunction_NewEx(&__pyx_mdef_5train_1_main, 0, __pyx_n_s_main_2, NULL, __pyx_n_s_train, __pyx_d, ((PyObject *)__pyx_codeobj__16)); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 16, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
if (PyDict_SetItem(__pyx_d, __pyx_n_s_main_2, __pyx_t_2) < 0) __PYX_ERR(0, 16, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
/* "train.py":90
*
*
* def get_classes(classes_path): # <<<<<<<<<<<<<<
* '''loads the classes'''
* with open(classes_path) as f:
*/
__pyx_t_2 = __Pyx_CyFunction_NewEx(&__pyx_mdef_5train_3get_classes, 0, __pyx_n_s_get_classes, NULL, __pyx_n_s_train, __pyx_d, ((PyObject *)__pyx_codeobj__18)); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 90, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
if (PyDict_SetItem(__pyx_d, __pyx_n_s_get_classes, __pyx_t_2) < 0) __PYX_ERR(0, 90, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
/* "train.py":97
* return class_names
*
* def get_anchors(anchors_path): # <<<<<<<<<<<<<<
* '''loads the anchors from a file'''
* with open(anchors_path) as f:
*/
__pyx_t_2 = __Pyx_CyFunction_NewEx(&__pyx_mdef_5train_5get_anchors, 0, __pyx_n_s_get_anchors, NULL, __pyx_n_s_train, __pyx_d, ((PyObject *)__pyx_codeobj__20)); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 97, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
if (PyDict_SetItem(__pyx_d, __pyx_n_s_get_anchors, __pyx_t_2) < 0) __PYX_ERR(0, 97, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
/* "train.py":105
*
*
* def create_model(input_shape, anchors, num_classes, load_pretrained=True, freeze_body=2, # <<<<<<<<<<<<<<
* weights_path='model_data/yolo_weights.h5'):
* '''create the training model'''
*/
__pyx_t_2 = __Pyx_CyFunction_NewEx(&__pyx_mdef_5train_7create_model, 0, __pyx_n_s_create_model, NULL, __pyx_n_s_train, __pyx_d, ((PyObject *)__pyx_codeobj__22)); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 105, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_CyFunction_SetDefaultsTuple(__pyx_t_2, __pyx_tuple__23);
if (PyDict_SetItem(__pyx_d, __pyx_n_s_create_model, __pyx_t_2) < 0) __PYX_ERR(0, 105, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
/* "train.py":135
* return model
*
* def create_tiny_model(input_shape, anchors, num_classes, load_pretrained=True, freeze_body=2, # <<<<<<<<<<<<<<
* weights_path='model_data/tiny_yolo_weights.h5'):
* '''create the training model, for Tiny YOLOv3'''
*/
__pyx_t_2 = __Pyx_CyFunction_NewEx(&__pyx_mdef_5train_9create_tiny_model, 0, __pyx_n_s_create_tiny_model, NULL, __pyx_n_s_train, __pyx_d, ((PyObject *)__pyx_codeobj__25)); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 135, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_CyFunction_SetDefaultsTuple(__pyx_t_2, __pyx_tuple__26);
if (PyDict_SetItem(__pyx_d, __pyx_n_s_create_tiny_model, __pyx_t_2) < 0) __PYX_ERR(0, 135, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
/* "train.py":165
* return model
*
* def data_generator(annotation_lines, batch_size, input_shape, anchors, num_classes): # <<<<<<<<<<<<<<
* '''data generator for fit_generator'''
* n = len(annotation_lines)
*/
__pyx_t_2 = __Pyx_CyFunction_NewEx(&__pyx_mdef_5train_11data_generator, 0, __pyx_n_s_data_generator, NULL, __pyx_n_s_train, __pyx_d, ((PyObject *)__pyx_codeobj__28)); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 165, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
if (PyDict_SetItem(__pyx_d, __pyx_n_s_data_generator, __pyx_t_2) < 0) __PYX_ERR(0, 165, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
/* "train.py":184
* yield [image_data, *y_true], np.zeros(batch_size)
*
* def data_generator_wrapper(annotation_lines, batch_size, input_shape, anchors, num_classes): # <<<<<<<<<<<<<<
* n = len(annotation_lines)
* if n==0 or batch_size<=0: return None
*/
__pyx_t_2 = __Pyx_CyFunction_NewEx(&__pyx_mdef_5train_14data_generator_wrapper, 0, __pyx_n_s_data_generator_wrapper, NULL, __pyx_n_s_train, __pyx_d, ((PyObject *)__pyx_codeobj__30)); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 184, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
if (PyDict_SetItem(__pyx_d, __pyx_n_s_data_generator_wrapper, __pyx_t_2) < 0) __PYX_ERR(0, 184, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
/* "train.py":189
* return data_generator(annotation_lines, batch_size, input_shape, anchors, num_classes)
*
* if __name__ == '__main__': # <<<<<<<<<<<<<<
* _main()
*/
__pyx_t_2 = __Pyx_GetModuleGlobalName(__pyx_n_s_name_2); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 189, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__pyx_t_3 = (__Pyx_PyString_Equals(__pyx_t_2, __pyx_n_s_main, Py_EQ)); if (unlikely(__pyx_t_3 < 0)) __PYX_ERR(0, 189, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
if (__pyx_t_3) {
/* "train.py":190
*
* if __name__ == '__main__':
* _main() # <<<<<<<<<<<<<<
*/
__pyx_t_1 = __Pyx_GetModuleGlobalName(__pyx_n_s_main_2); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 190, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_t_4 = NULL;
if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_1))) {
__pyx_t_4 = PyMethod_GET_SELF(__pyx_t_1);
if (likely(__pyx_t_4)) {
PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_1);
__Pyx_INCREF(__pyx_t_4);
__Pyx_INCREF(function);
__Pyx_DECREF_SET(__pyx_t_1, function);
}
}
if (__pyx_t_4) {
__pyx_t_2 = __Pyx_PyObject_CallOneArg(__pyx_t_1, __pyx_t_4); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 190, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
} else {
__pyx_t_2 = __Pyx_PyObject_CallNoArg(__pyx_t_1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 190, __pyx_L1_error)
}
__Pyx_GOTREF(__pyx_t_2);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
/* "train.py":189
* return data_generator(annotation_lines, batch_size, input_shape, anchors, num_classes)
*
* if __name__ == '__main__': # <<<<<<<<<<<<<<
* _main()
*/
}
/* "train.py":1
* """ # <<<<<<<<<<<<<<
* Retrain the YOLO model for your own dataset.
* """
*/
__pyx_t_2 = PyDict_New(); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 1, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
if (PyDict_SetItem(__pyx_d, __pyx_n_s_test, __pyx_t_2) < 0) __PYX_ERR(0, 1, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
/*--- Wrapped vars code ---*/
goto __pyx_L0;
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_XDECREF(__pyx_t_2);
__Pyx_XDECREF(__pyx_t_4);
if (__pyx_m) {
if (__pyx_d) {
__Pyx_AddTraceback("init train", 0, __pyx_lineno, __pyx_filename);
}
Py_DECREF(__pyx_m); __pyx_m = 0;
} else if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_ImportError, "init train");
}
__pyx_L0:;
__Pyx_RefNannyFinishContext();
#if PY_MAJOR_VERSION < 3
return;
#else
return __pyx_m;
#endif
}
/* --- Runtime support code --- */
/* Refnanny */
#if CYTHON_REFNANNY
static __Pyx_RefNannyAPIStruct *__Pyx_RefNannyImportAPI(const char *modname) {
PyObject *m = NULL, *p = NULL;
void *r = NULL;
m = PyImport_ImportModule((char *)modname);
if (!m) goto end;
p = PyObject_GetAttrString(m, (char *)"RefNannyAPI");
if (!p) goto end;
r = PyLong_AsVoidPtr(p);
end:
Py_XDECREF(p);
Py_XDECREF(m);
return (__Pyx_RefNannyAPIStruct *)r;
}
#endif
/* GetBuiltinName */
static PyObject *__Pyx_GetBuiltinName(PyObject *name) {
PyObject* result = __Pyx_PyObject_GetAttrStr(__pyx_b, name);
if (unlikely(!result)) {
PyErr_Format(PyExc_NameError,
#if PY_MAJOR_VERSION >= 3
"name '%U' is not defined", name);
#else
"name '%.200s' is not defined", PyString_AS_STRING(name));
#endif
}
return result;
}
/* RaiseArgTupleInvalid */
static void __Pyx_RaiseArgtupleInvalid(
const char* func_name,
int exact,
Py_ssize_t num_min,
Py_ssize_t num_max,
Py_ssize_t num_found)
{
Py_ssize_t num_expected;
const char *more_or_less;
if (num_found < num_min) {
num_expected = num_min;
more_or_less = "at least";
} else {
num_expected = num_max;
more_or_less = "at most";
}
if (exact) {
more_or_less = "exactly";
}
PyErr_Format(PyExc_TypeError,
"%.200s() takes %.8s %" CYTHON_FORMAT_SSIZE_T "d positional argument%.1s (%" CYTHON_FORMAT_SSIZE_T "d given)",
func_name, more_or_less, num_expected,
(num_expected == 1) ? "" : "s", num_found);
}
/* RaiseDoubleKeywords */
static void __Pyx_RaiseDoubleKeywordsError(
const char* func_name,
PyObject* kw_name)
{
PyErr_Format(PyExc_TypeError,
#if PY_MAJOR_VERSION >= 3
"%s() got multiple values for keyword argument '%U'", func_name, kw_name);
#else
"%s() got multiple values for keyword argument '%s'", func_name,
PyString_AsString(kw_name));
#endif
}
/* ParseKeywords */
static int __Pyx_ParseOptionalKeywords(
PyObject *kwds,
PyObject **argnames[],
PyObject *kwds2,
PyObject *values[],
Py_ssize_t num_pos_args,
const char* function_name)
{
PyObject *key = 0, *value = 0;
Py_ssize_t pos = 0;
PyObject*** name;
PyObject*** first_kw_arg = argnames + num_pos_args;
while (PyDict_Next(kwds, &pos, &key, &value)) {
name = first_kw_arg;
while (*name && (**name != key)) name++;
if (*name) {
values[name-argnames] = value;
continue;
}
name = first_kw_arg;
#if PY_MAJOR_VERSION < 3
if (likely(PyString_CheckExact(key)) || likely(PyString_Check(key))) {
while (*name) {
if ((CYTHON_COMPILING_IN_PYPY || PyString_GET_SIZE(**name) == PyString_GET_SIZE(key))
&& _PyString_Eq(**name, key)) {
values[name-argnames] = value;
break;
}
name++;
}
if (*name) continue;
else {
PyObject*** argname = argnames;
while (argname != first_kw_arg) {
if ((**argname == key) || (
(CYTHON_COMPILING_IN_PYPY || PyString_GET_SIZE(**argname) == PyString_GET_SIZE(key))
&& _PyString_Eq(**argname, key))) {
goto arg_passed_twice;
}
argname++;
}
}
} else
#endif
if (likely(PyUnicode_Check(key))) {
while (*name) {
int cmp = (**name == key) ? 0 :
#if !CYTHON_COMPILING_IN_PYPY && PY_MAJOR_VERSION >= 3
(PyUnicode_GET_SIZE(**name) != PyUnicode_GET_SIZE(key)) ? 1 :
#endif
PyUnicode_Compare(**name, key);
if (cmp < 0 && unlikely(PyErr_Occurred())) goto bad;
if (cmp == 0) {
values[name-argnames] = value;
break;
}
name++;
}
if (*name) continue;
else {
PyObject*** argname = argnames;
while (argname != first_kw_arg) {
int cmp = (**argname == key) ? 0 :
#if !CYTHON_COMPILING_IN_PYPY && PY_MAJOR_VERSION >= 3
(PyUnicode_GET_SIZE(**argname) != PyUnicode_GET_SIZE(key)) ? 1 :
#endif
PyUnicode_Compare(**argname, key);
if (cmp < 0 && unlikely(PyErr_Occurred())) goto bad;
if (cmp == 0) goto arg_passed_twice;
argname++;
}
}
} else
goto invalid_keyword_type;
if (kwds2) {
if (unlikely(PyDict_SetItem(kwds2, key, value))) goto bad;
} else {
goto invalid_keyword;
}
}
return 0;
arg_passed_twice:
__Pyx_RaiseDoubleKeywordsError(function_name, key);
goto bad;
invalid_keyword_type:
PyErr_Format(PyExc_TypeError,
"%.200s() keywords must be strings", function_name);
goto bad;
invalid_keyword:
PyErr_Format(PyExc_TypeError,
#if PY_MAJOR_VERSION < 3
"%.200s() got an unexpected keyword argument '%.200s'",
function_name, PyString_AsString(key));
#else
"%s() got an unexpected keyword argument '%U'",
function_name, key);
#endif
bad:
return -1;
}
/* GetModuleGlobalName */
static CYTHON_INLINE PyObject *__Pyx_GetModuleGlobalName(PyObject *name) {
PyObject *result;
#if !CYTHON_AVOID_BORROWED_REFS
result = PyDict_GetItem(__pyx_d, name);
if (likely(result)) {
Py_INCREF(result);
} else {
#else
result = PyObject_GetItem(__pyx_d, name);
if (!result) {
PyErr_Clear();
#endif
result = __Pyx_GetBuiltinName(name);
}
return result;
}
/* PyCFunctionFastCall */
#if CYTHON_FAST_PYCCALL
static CYTHON_INLINE PyObject * __Pyx_PyCFunction_FastCall(PyObject *func_obj, PyObject **args, Py_ssize_t nargs) {
PyCFunctionObject *func = (PyCFunctionObject*)func_obj;
PyCFunction meth = PyCFunction_GET_FUNCTION(func);
PyObject *self = PyCFunction_GET_SELF(func);
int flags = PyCFunction_GET_FLAGS(func);
assert(PyCFunction_Check(func));
assert(METH_FASTCALL == (flags & ~(METH_CLASS | METH_STATIC | METH_COEXIST | METH_KEYWORDS)));
assert(nargs >= 0);
assert(nargs == 0 || args != NULL);
/* _PyCFunction_FastCallDict() must not be called with an exception set,
because it may clear it (directly or indirectly) and so the
caller loses its exception */
assert(!PyErr_Occurred());
if ((PY_VERSION_HEX < 0x030700A0) || unlikely(flags & METH_KEYWORDS)) {
return (*((__Pyx_PyCFunctionFastWithKeywords)meth)) (self, args, nargs, NULL);
} else {
return (*((__Pyx_PyCFunctionFast)meth)) (self, args, nargs);
}
}
#endif
/* PyFunctionFastCall */
#if CYTHON_FAST_PYCALL
#include "frameobject.h"
static PyObject* __Pyx_PyFunction_FastCallNoKw(PyCodeObject *co, PyObject **args, Py_ssize_t na,
PyObject *globals) {
PyFrameObject *f;
PyThreadState *tstate = PyThreadState_GET();
PyObject **fastlocals;
Py_ssize_t i;
PyObject *result;
assert(globals != NULL);
/* XXX Perhaps we should create a specialized
PyFrame_New() that doesn't take locals, but does
take builtins without sanity checking them.
*/
assert(tstate != NULL);
f = PyFrame_New(tstate, co, globals, NULL);
if (f == NULL) {
return NULL;
}
fastlocals = f->f_localsplus;
for (i = 0; i < na; i++) {
Py_INCREF(*args);
fastlocals[i] = *args++;
}
result = PyEval_EvalFrameEx(f,0);
++tstate->recursion_depth;
Py_DECREF(f);
--tstate->recursion_depth;
return result;
}
#if 1 || PY_VERSION_HEX < 0x030600B1
static PyObject *__Pyx_PyFunction_FastCallDict(PyObject *func, PyObject **args, int nargs, PyObject *kwargs) {
PyCodeObject *co = (PyCodeObject *)PyFunction_GET_CODE(func);
PyObject *globals = PyFunction_GET_GLOBALS(func);
PyObject *argdefs = PyFunction_GET_DEFAULTS(func);
PyObject *closure;
#if PY_MAJOR_VERSION >= 3
PyObject *kwdefs;
#endif
PyObject *kwtuple, **k;
PyObject **d;
Py_ssize_t nd;
Py_ssize_t nk;
PyObject *result;
assert(kwargs == NULL || PyDict_Check(kwargs));
nk = kwargs ? PyDict_Size(kwargs) : 0;
if (Py_EnterRecursiveCall((char*)" while calling a Python object")) {
return NULL;
}
if (
#if PY_MAJOR_VERSION >= 3
co->co_kwonlyargcount == 0 &&
#endif
likely(kwargs == NULL || nk == 0) &&
co->co_flags == (CO_OPTIMIZED | CO_NEWLOCALS | CO_NOFREE)) {
if (argdefs == NULL && co->co_argcount == nargs) {
result = __Pyx_PyFunction_FastCallNoKw(co, args, nargs, globals);
goto done;
}
else if (nargs == 0 && argdefs != NULL
&& co->co_argcount == Py_SIZE(argdefs)) {
/* function called with no arguments, but all parameters have
a default value: use default values as arguments .*/
args = &PyTuple_GET_ITEM(argdefs, 0);
result =__Pyx_PyFunction_FastCallNoKw(co, args, Py_SIZE(argdefs), globals);
goto done;
}
}
if (kwargs != NULL) {
Py_ssize_t pos, i;
kwtuple = PyTuple_New(2 * nk);
if (kwtuple == NULL) {
result = NULL;
goto done;
}
k = &PyTuple_GET_ITEM(kwtuple, 0);
pos = i = 0;
while (PyDict_Next(kwargs, &pos, &k[i], &k[i+1])) {
Py_INCREF(k[i]);
Py_INCREF(k[i+1]);
i += 2;
}
nk = i / 2;
}
else {
kwtuple = NULL;
k = NULL;
}
closure = PyFunction_GET_CLOSURE(func);
#if PY_MAJOR_VERSION >= 3
kwdefs = PyFunction_GET_KW_DEFAULTS(func);
#endif
if (argdefs != NULL) {
d = &PyTuple_GET_ITEM(argdefs, 0);
nd = Py_SIZE(argdefs);
}
else {
d = NULL;
nd = 0;
}
#if PY_MAJOR_VERSION >= 3
result = PyEval_EvalCodeEx((PyObject*)co, globals, (PyObject *)NULL,
args, nargs,
k, (int)nk,
d, (int)nd, kwdefs, closure);
#else
result = PyEval_EvalCodeEx(co, globals, (PyObject *)NULL,
args, nargs,
k, (int)nk,
d, (int)nd, closure);
#endif
Py_XDECREF(kwtuple);
done:
Py_LeaveRecursiveCall();
return result;
}
#endif
#endif
/* PyObjectCall */
#if CYTHON_COMPILING_IN_CPYTHON
static CYTHON_INLINE PyObject* __Pyx_PyObject_Call(PyObject *func, PyObject *arg, PyObject *kw) {
PyObject *result;
ternaryfunc call = func->ob_type->tp_call;
if (unlikely(!call))
return PyObject_Call(func, arg, kw);
if (unlikely(Py_EnterRecursiveCall((char*)" while calling a Python object")))
return NULL;
result = (*call)(func, arg, kw);
Py_LeaveRecursiveCall();
if (unlikely(!result) && unlikely(!PyErr_Occurred())) {
PyErr_SetString(
PyExc_SystemError,
"NULL result without error in PyObject_Call");
}
return result;
}
#endif
/* PyObjectCallMethO */
#if CYTHON_COMPILING_IN_CPYTHON
static CYTHON_INLINE PyObject* __Pyx_PyObject_CallMethO(PyObject *func, PyObject *arg) {
PyObject *self, *result;
PyCFunction cfunc;
cfunc = PyCFunction_GET_FUNCTION(func);
self = PyCFunction_GET_SELF(func);
if (unlikely(Py_EnterRecursiveCall((char*)" while calling a Python object")))
return NULL;
result = cfunc(self, arg);
Py_LeaveRecursiveCall();
if (unlikely(!result) && unlikely(!PyErr_Occurred())) {
PyErr_SetString(
PyExc_SystemError,
"NULL result without error in PyObject_Call");
}
return result;
}
#endif
/* PyObjectCallOneArg */
#if CYTHON_COMPILING_IN_CPYTHON
static PyObject* __Pyx__PyObject_CallOneArg(PyObject *func, PyObject *arg) {
PyObject *result;
PyObject *args = PyTuple_New(1);
if (unlikely(!args)) return NULL;
Py_INCREF(arg);
PyTuple_SET_ITEM(args, 0, arg);
result = __Pyx_PyObject_Call(func, args, NULL);
Py_DECREF(args);
return result;
}
static CYTHON_INLINE PyObject* __Pyx_PyObject_CallOneArg(PyObject *func, PyObject *arg) {
#if CYTHON_FAST_PYCALL
if (PyFunction_Check(func)) {
return __Pyx_PyFunction_FastCall(func, &arg, 1);
}
#endif
if (likely(PyCFunction_Check(func))) {
if (likely(PyCFunction_GET_FLAGS(func) & METH_O)) {
return __Pyx_PyObject_CallMethO(func, arg);
#if CYTHON_FAST_PYCCALL
} else if (PyCFunction_GET_FLAGS(func) & METH_FASTCALL) {
return __Pyx_PyCFunction_FastCall(func, &arg, 1);
#endif
}
}
return __Pyx__PyObject_CallOneArg(func, arg);
}
#else
static CYTHON_INLINE PyObject* __Pyx_PyObject_CallOneArg(PyObject *func, PyObject *arg) {
PyObject *result;
PyObject *args = PyTuple_Pack(1, arg);
if (unlikely(!args)) return NULL;
result = __Pyx_PyObject_Call(func, args, NULL);
Py_DECREF(args);
return result;
}
#endif
/* PyObjectCallNoArg */
#if CYTHON_COMPILING_IN_CPYTHON
static CYTHON_INLINE PyObject* __Pyx_PyObject_CallNoArg(PyObject *func) {
#if CYTHON_FAST_PYCALL
if (PyFunction_Check(func)) {
return __Pyx_PyFunction_FastCall(func, NULL, 0);
}
#endif
#ifdef __Pyx_CyFunction_USED
if (likely(PyCFunction_Check(func) || PyObject_TypeCheck(func, __pyx_CyFunctionType))) {
#else
if (likely(PyCFunction_Check(func))) {
#endif
if (likely(PyCFunction_GET_FLAGS(func) & METH_NOARGS)) {
return __Pyx_PyObject_CallMethO(func, NULL);
}
}
return __Pyx_PyObject_Call(func, __pyx_empty_tuple, NULL);
}
#endif
/* SaveResetException */
#if CYTHON_FAST_THREAD_STATE
static CYTHON_INLINE void __Pyx__ExceptionSave(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb) {
*type = tstate->exc_type;
*value = tstate->exc_value;
*tb = tstate->exc_traceback;
Py_XINCREF(*type);
Py_XINCREF(*value);
Py_XINCREF(*tb);
}
static CYTHON_INLINE void __Pyx__ExceptionReset(PyThreadState *tstate, PyObject *type, PyObject *value, PyObject *tb) {
PyObject *tmp_type, *tmp_value, *tmp_tb;
tmp_type = tstate->exc_type;
tmp_value = tstate->exc_value;
tmp_tb = tstate->exc_traceback;
tstate->exc_type = type;
tstate->exc_value = value;
tstate->exc_traceback = tb;
Py_XDECREF(tmp_type);
Py_XDECREF(tmp_value);
Py_XDECREF(tmp_tb);
}
#endif
/* GetException */
#if CYTHON_FAST_THREAD_STATE
static int __Pyx__GetException(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb) {
#else
static int __Pyx_GetException(PyObject **type, PyObject **value, PyObject **tb) {
#endif
PyObject *local_type, *local_value, *local_tb;
#if CYTHON_FAST_THREAD_STATE
PyObject *tmp_type, *tmp_value, *tmp_tb;
local_type = tstate->curexc_type;
local_value = tstate->curexc_value;
local_tb = tstate->curexc_traceback;
tstate->curexc_type = 0;
tstate->curexc_value = 0;
tstate->curexc_traceback = 0;
#else
PyErr_Fetch(&local_type, &local_value, &local_tb);
#endif
PyErr_NormalizeException(&local_type, &local_value, &local_tb);
#if CYTHON_FAST_THREAD_STATE
if (unlikely(tstate->curexc_type))
#else
if (unlikely(PyErr_Occurred()))
#endif
goto bad;
#if PY_MAJOR_VERSION >= 3
if (local_tb) {
if (unlikely(PyException_SetTraceback(local_value, local_tb) < 0))
goto bad;
}
#endif
Py_XINCREF(local_tb);
Py_XINCREF(local_type);
Py_XINCREF(local_value);
*type = local_type;
*value = local_value;
*tb = local_tb;
#if CYTHON_FAST_THREAD_STATE
tmp_type = tstate->exc_type;
tmp_value = tstate->exc_value;
tmp_tb = tstate->exc_traceback;
tstate->exc_type = local_type;
tstate->exc_value = local_value;
tstate->exc_traceback = local_tb;
Py_XDECREF(tmp_type);
Py_XDECREF(tmp_value);
Py_XDECREF(tmp_tb);
#else
PyErr_SetExcInfo(local_type, local_value, local_tb);
#endif
return 0;
bad:
*type = 0;
*value = 0;
*tb = 0;
Py_XDECREF(local_type);
Py_XDECREF(local_value);
Py_XDECREF(local_tb);
return -1;
}
/* PyErrFetchRestore */
#if CYTHON_FAST_THREAD_STATE
static CYTHON_INLINE void __Pyx_ErrRestoreInState(PyThreadState *tstate, PyObject *type, PyObject *value, PyObject *tb) {
PyObject *tmp_type, *tmp_value, *tmp_tb;
tmp_type = tstate->curexc_type;
tmp_value = tstate->curexc_value;
tmp_tb = tstate->curexc_traceback;
tstate->curexc_type = type;
tstate->curexc_value = value;
tstate->curexc_traceback = tb;
Py_XDECREF(tmp_type);
Py_XDECREF(tmp_value);
Py_XDECREF(tmp_tb);
}
static CYTHON_INLINE void __Pyx_ErrFetchInState(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb) {
*type = tstate->curexc_type;
*value = tstate->curexc_value;
*tb = tstate->curexc_traceback;
tstate->curexc_type = 0;
tstate->curexc_value = 0;
tstate->curexc_traceback = 0;
}
#endif
/* None */
static CYTHON_INLINE void __Pyx_RaiseUnboundLocalError(const char *varname) {
PyErr_Format(PyExc_UnboundLocalError, "local variable '%s' referenced before assignment", varname);
}
/* PyIntFromDouble */
#if PY_MAJOR_VERSION < 3
static CYTHON_INLINE PyObject* __Pyx_PyInt_FromDouble(double value) {
if (value >= (double)LONG_MIN && value <= (double)LONG_MAX) {
return PyInt_FromLong((long)value);
}
return PyLong_FromDouble(value);
}
#endif
/* FetchCommonType */
static PyTypeObject* __Pyx_FetchCommonType(PyTypeObject* type) {
PyObject* fake_module;
PyTypeObject* cached_type = NULL;
fake_module = PyImport_AddModule((char*) "_cython_" CYTHON_ABI);
if (!fake_module) return NULL;
Py_INCREF(fake_module);
cached_type = (PyTypeObject*) PyObject_GetAttrString(fake_module, type->tp_name);
if (cached_type) {
if (!PyType_Check((PyObject*)cached_type)) {
PyErr_Format(PyExc_TypeError,
"Shared Cython type %.200s is not a type object",
type->tp_name);
goto bad;
}
if (cached_type->tp_basicsize != type->tp_basicsize) {
PyErr_Format(PyExc_TypeError,
"Shared Cython type %.200s has the wrong size, try recompiling",
type->tp_name);
goto bad;
}
} else {
if (!PyErr_ExceptionMatches(PyExc_AttributeError)) goto bad;
PyErr_Clear();
if (PyType_Ready(type) < 0) goto bad;
if (PyObject_SetAttrString(fake_module, type->tp_name, (PyObject*) type) < 0)
goto bad;
Py_INCREF(type);
cached_type = type;
}
done:
Py_DECREF(fake_module);
return cached_type;
bad:
Py_XDECREF(cached_type);
cached_type = NULL;
goto done;
}
/* CythonFunction */
static PyObject *
__Pyx_CyFunction_get_doc(__pyx_CyFunctionObject *op, CYTHON_UNUSED void *closure)
{
if (unlikely(op->func_doc == NULL)) {
if (op->func.m_ml->ml_doc) {
#if PY_MAJOR_VERSION >= 3
op->func_doc = PyUnicode_FromString(op->func.m_ml->ml_doc);
#else
op->func_doc = PyString_FromString(op->func.m_ml->ml_doc);
#endif
if (unlikely(op->func_doc == NULL))
return NULL;
} else {
Py_INCREF(Py_None);
return Py_None;
}
}
Py_INCREF(op->func_doc);
return op->func_doc;
}
static int
__Pyx_CyFunction_set_doc(__pyx_CyFunctionObject *op, PyObject *value)
{
PyObject *tmp = op->func_doc;
if (value == NULL) {
value = Py_None;
}
Py_INCREF(value);
op->func_doc = value;
Py_XDECREF(tmp);
return 0;
}
static PyObject *
__Pyx_CyFunction_get_name(__pyx_CyFunctionObject *op)
{
if (unlikely(op->func_name == NULL)) {
#if PY_MAJOR_VERSION >= 3
op->func_name = PyUnicode_InternFromString(op->func.m_ml->ml_name);
#else
op->func_name = PyString_InternFromString(op->func.m_ml->ml_name);
#endif
if (unlikely(op->func_name == NULL))
return NULL;
}
Py_INCREF(op->func_name);
return op->func_name;
}
static int
__Pyx_CyFunction_set_name(__pyx_CyFunctionObject *op, PyObject *value)
{
PyObject *tmp;
#if PY_MAJOR_VERSION >= 3
if (unlikely(value == NULL || !PyUnicode_Check(value))) {
#else
if (unlikely(value == NULL || !PyString_Check(value))) {
#endif
PyErr_SetString(PyExc_TypeError,
"__name__ must be set to a string object");
return -1;
}
tmp = op->func_name;
Py_INCREF(value);
op->func_name = value;
Py_XDECREF(tmp);
return 0;
}
static PyObject *
__Pyx_CyFunction_get_qualname(__pyx_CyFunctionObject *op)
{
Py_INCREF(op->func_qualname);
return op->func_qualname;
}
static int
__Pyx_CyFunction_set_qualname(__pyx_CyFunctionObject *op, PyObject *value)
{
PyObject *tmp;
#if PY_MAJOR_VERSION >= 3
if (unlikely(value == NULL || !PyUnicode_Check(value))) {
#else
if (unlikely(value == NULL || !PyString_Check(value))) {
#endif
PyErr_SetString(PyExc_TypeError,
"__qualname__ must be set to a string object");
return -1;
}
tmp = op->func_qualname;
Py_INCREF(value);
op->func_qualname = value;
Py_XDECREF(tmp);
return 0;
}
static PyObject *
__Pyx_CyFunction_get_self(__pyx_CyFunctionObject *m, CYTHON_UNUSED void *closure)
{
PyObject *self;
self = m->func_closure;
if (self == NULL)
self = Py_None;
Py_INCREF(self);
return self;
}
static PyObject *
__Pyx_CyFunction_get_dict(__pyx_CyFunctionObject *op)
{
if (unlikely(op->func_dict == NULL)) {
op->func_dict = PyDict_New();
if (unlikely(op->func_dict == NULL))
return NULL;
}
Py_INCREF(op->func_dict);
return op->func_dict;
}
static int
__Pyx_CyFunction_set_dict(__pyx_CyFunctionObject *op, PyObject *value)
{
PyObject *tmp;
if (unlikely(value == NULL)) {
PyErr_SetString(PyExc_TypeError,
"function's dictionary may not be deleted");
return -1;
}
if (unlikely(!PyDict_Check(value))) {
PyErr_SetString(PyExc_TypeError,
"setting function's dictionary to a non-dict");
return -1;
}
tmp = op->func_dict;
Py_INCREF(value);
op->func_dict = value;
Py_XDECREF(tmp);
return 0;
}
static PyObject *
__Pyx_CyFunction_get_globals(__pyx_CyFunctionObject *op)
{
Py_INCREF(op->func_globals);
return op->func_globals;
}
static PyObject *
__Pyx_CyFunction_get_closure(CYTHON_UNUSED __pyx_CyFunctionObject *op)
{
Py_INCREF(Py_None);
return Py_None;
}
static PyObject *
__Pyx_CyFunction_get_code(__pyx_CyFunctionObject *op)
{
PyObject* result = (op->func_code) ? op->func_code : Py_None;
Py_INCREF(result);
return result;
}
static int
__Pyx_CyFunction_init_defaults(__pyx_CyFunctionObject *op) {
int result = 0;
PyObject *res = op->defaults_getter((PyObject *) op);
if (unlikely(!res))
return -1;
#if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS
op->defaults_tuple = PyTuple_GET_ITEM(res, 0);
Py_INCREF(op->defaults_tuple);
op->defaults_kwdict = PyTuple_GET_ITEM(res, 1);
Py_INCREF(op->defaults_kwdict);
#else
op->defaults_tuple = PySequence_ITEM(res, 0);
if (unlikely(!op->defaults_tuple)) result = -1;
else {
op->defaults_kwdict = PySequence_ITEM(res, 1);
if (unlikely(!op->defaults_kwdict)) result = -1;
}
#endif
Py_DECREF(res);
return result;
}
static int
__Pyx_CyFunction_set_defaults(__pyx_CyFunctionObject *op, PyObject* value) {
PyObject* tmp;
if (!value) {
value = Py_None;
} else if (value != Py_None && !PyTuple_Check(value)) {
PyErr_SetString(PyExc_TypeError,
"__defaults__ must be set to a tuple object");
return -1;
}
Py_INCREF(value);
tmp = op->defaults_tuple;
op->defaults_tuple = value;
Py_XDECREF(tmp);
return 0;
}
static PyObject *
__Pyx_CyFunction_get_defaults(__pyx_CyFunctionObject *op) {
PyObject* result = op->defaults_tuple;
if (unlikely(!result)) {
if (op->defaults_getter) {
if (__Pyx_CyFunction_init_defaults(op) < 0) return NULL;
result = op->defaults_tuple;
} else {
result = Py_None;
}
}
Py_INCREF(result);
return result;
}
static int
__Pyx_CyFunction_set_kwdefaults(__pyx_CyFunctionObject *op, PyObject* value) {
PyObject* tmp;
if (!value) {
value = Py_None;
} else if (value != Py_None && !PyDict_Check(value)) {
PyErr_SetString(PyExc_TypeError,
"__kwdefaults__ must be set to a dict object");
return -1;
}
Py_INCREF(value);
tmp = op->defaults_kwdict;
op->defaults_kwdict = value;
Py_XDECREF(tmp);
return 0;
}
static PyObject *
__Pyx_CyFunction_get_kwdefaults(__pyx_CyFunctionObject *op) {
PyObject* result = op->defaults_kwdict;
if (unlikely(!result)) {
if (op->defaults_getter) {
if (__Pyx_CyFunction_init_defaults(op) < 0) return NULL;
result = op->defaults_kwdict;
} else {
result = Py_None;
}
}
Py_INCREF(result);
return result;
}
static int
__Pyx_CyFunction_set_annotations(__pyx_CyFunctionObject *op, PyObject* value) {
PyObject* tmp;
if (!value || value == Py_None) {
value = NULL;
} else if (!PyDict_Check(value)) {
PyErr_SetString(PyExc_TypeError,
"__annotations__ must be set to a dict object");
return -1;
}
Py_XINCREF(value);
tmp = op->func_annotations;
op->func_annotations = value;
Py_XDECREF(tmp);
return 0;
}
static PyObject *
__Pyx_CyFunction_get_annotations(__pyx_CyFunctionObject *op) {
PyObject* result = op->func_annotations;
if (unlikely(!result)) {
result = PyDict_New();
if (unlikely(!result)) return NULL;
op->func_annotations = result;
}
Py_INCREF(result);
return result;
}
static PyGetSetDef __pyx_CyFunction_getsets[] = {
{(char *) "func_doc", (getter)__Pyx_CyFunction_get_doc, (setter)__Pyx_CyFunction_set_doc, 0, 0},
{(char *) "__doc__", (getter)__Pyx_CyFunction_get_doc, (setter)__Pyx_CyFunction_set_doc, 0, 0},
{(char *) "func_name", (getter)__Pyx_CyFunction_get_name, (setter)__Pyx_CyFunction_set_name, 0, 0},
{(char *) "__name__", (getter)__Pyx_CyFunction_get_name, (setter)__Pyx_CyFunction_set_name, 0, 0},
{(char *) "__qualname__", (getter)__Pyx_CyFunction_get_qualname, (setter)__Pyx_CyFunction_set_qualname, 0, 0},
{(char *) "__self__", (getter)__Pyx_CyFunction_get_self, 0, 0, 0},
{(char *) "func_dict", (getter)__Pyx_CyFunction_get_dict, (setter)__Pyx_CyFunction_set_dict, 0, 0},
{(char *) "__dict__", (getter)__Pyx_CyFunction_get_dict, (setter)__Pyx_CyFunction_set_dict, 0, 0},
{(char *) "func_globals", (getter)__Pyx_CyFunction_get_globals, 0, 0, 0},
{(char *) "__globals__", (getter)__Pyx_CyFunction_get_globals, 0, 0, 0},
{(char *) "func_closure", (getter)__Pyx_CyFunction_get_closure, 0, 0, 0},
{(char *) "__closure__", (getter)__Pyx_CyFunction_get_closure, 0, 0, 0},
{(char *) "func_code", (getter)__Pyx_CyFunction_get_code, 0, 0, 0},
{(char *) "__code__", (getter)__Pyx_CyFunction_get_code, 0, 0, 0},
{(char *) "func_defaults", (getter)__Pyx_CyFunction_get_defaults, (setter)__Pyx_CyFunction_set_defaults, 0, 0},
{(char *) "__defaults__", (getter)__Pyx_CyFunction_get_defaults, (setter)__Pyx_CyFunction_set_defaults, 0, 0},
{(char *) "__kwdefaults__", (getter)__Pyx_CyFunction_get_kwdefaults, (setter)__Pyx_CyFunction_set_kwdefaults, 0, 0},
{(char *) "__annotations__", (getter)__Pyx_CyFunction_get_annotations, (setter)__Pyx_CyFunction_set_annotations, 0, 0},
{0, 0, 0, 0, 0}
};
static PyMemberDef __pyx_CyFunction_members[] = {
{(char *) "__module__", T_OBJECT, offsetof(__pyx_CyFunctionObject, func.m_module), PY_WRITE_RESTRICTED, 0},
{0, 0, 0, 0, 0}
};
static PyObject *
__Pyx_CyFunction_reduce(__pyx_CyFunctionObject *m, CYTHON_UNUSED PyObject *args)
{
#if PY_MAJOR_VERSION >= 3
return PyUnicode_FromString(m->func.m_ml->ml_name);
#else
return PyString_FromString(m->func.m_ml->ml_name);
#endif
}
static PyMethodDef __pyx_CyFunction_methods[] = {
{"__reduce__", (PyCFunction)__Pyx_CyFunction_reduce, METH_VARARGS, 0},
{0, 0, 0, 0}
};
#if PY_VERSION_HEX < 0x030500A0
#define __Pyx_CyFunction_weakreflist(cyfunc) ((cyfunc)->func_weakreflist)
#else
#define __Pyx_CyFunction_weakreflist(cyfunc) ((cyfunc)->func.m_weakreflist)
#endif
static PyObject *__Pyx_CyFunction_New(PyTypeObject *type, PyMethodDef *ml, int flags, PyObject* qualname,
PyObject *closure, PyObject *module, PyObject* globals, PyObject* code) {
__pyx_CyFunctionObject *op = PyObject_GC_New(__pyx_CyFunctionObject, type);
if (op == NULL)
return NULL;
op->flags = flags;
__Pyx_CyFunction_weakreflist(op) = NULL;
op->func.m_ml = ml;
op->func.m_self = (PyObject *) op;
Py_XINCREF(closure);
op->func_closure = closure;
Py_XINCREF(module);
op->func.m_module = module;
op->func_dict = NULL;
op->func_name = NULL;
Py_INCREF(qualname);
op->func_qualname = qualname;
op->func_doc = NULL;
op->func_classobj = NULL;
op->func_globals = globals;
Py_INCREF(op->func_globals);
Py_XINCREF(code);
op->func_code = code;
op->defaults_pyobjects = 0;
op->defaults = NULL;
op->defaults_tuple = NULL;
op->defaults_kwdict = NULL;
op->defaults_getter = NULL;
op->func_annotations = NULL;
PyObject_GC_Track(op);
return (PyObject *) op;
}
static int
__Pyx_CyFunction_clear(__pyx_CyFunctionObject *m)
{
Py_CLEAR(m->func_closure);
Py_CLEAR(m->func.m_module);
Py_CLEAR(m->func_dict);
Py_CLEAR(m->func_name);
Py_CLEAR(m->func_qualname);
Py_CLEAR(m->func_doc);
Py_CLEAR(m->func_globals);
Py_CLEAR(m->func_code);
Py_CLEAR(m->func_classobj);
Py_CLEAR(m->defaults_tuple);
Py_CLEAR(m->defaults_kwdict);
Py_CLEAR(m->func_annotations);
if (m->defaults) {
PyObject **pydefaults = __Pyx_CyFunction_Defaults(PyObject *, m);
int i;
for (i = 0; i < m->defaults_pyobjects; i++)
Py_XDECREF(pydefaults[i]);
PyObject_Free(m->defaults);
m->defaults = NULL;
}
return 0;
}
static void __Pyx_CyFunction_dealloc(__pyx_CyFunctionObject *m)
{
PyObject_GC_UnTrack(m);
if (__Pyx_CyFunction_weakreflist(m) != NULL)
PyObject_ClearWeakRefs((PyObject *) m);
__Pyx_CyFunction_clear(m);
PyObject_GC_Del(m);
}
static int __Pyx_CyFunction_traverse(__pyx_CyFunctionObject *m, visitproc visit, void *arg)
{
Py_VISIT(m->func_closure);
Py_VISIT(m->func.m_module);
Py_VISIT(m->func_dict);
Py_VISIT(m->func_name);
Py_VISIT(m->func_qualname);
Py_VISIT(m->func_doc);
Py_VISIT(m->func_globals);
Py_VISIT(m->func_code);
Py_VISIT(m->func_classobj);
Py_VISIT(m->defaults_tuple);
Py_VISIT(m->defaults_kwdict);
if (m->defaults) {
PyObject **pydefaults = __Pyx_CyFunction_Defaults(PyObject *, m);
int i;
for (i = 0; i < m->defaults_pyobjects; i++)
Py_VISIT(pydefaults[i]);
}
return 0;
}
static PyObject *__Pyx_CyFunction_descr_get(PyObject *func, PyObject *obj, PyObject *type)
{
__pyx_CyFunctionObject *m = (__pyx_CyFunctionObject *) func;
if (m->flags & __Pyx_CYFUNCTION_STATICMETHOD) {
Py_INCREF(func);
return func;
}
if (m->flags & __Pyx_CYFUNCTION_CLASSMETHOD) {
if (type == NULL)
type = (PyObject *)(Py_TYPE(obj));
return __Pyx_PyMethod_New(func, type, (PyObject *)(Py_TYPE(type)));
}
if (obj == Py_None)
obj = NULL;
return __Pyx_PyMethod_New(func, obj, type);
}
static PyObject*
__Pyx_CyFunction_repr(__pyx_CyFunctionObject *op)
{
#if PY_MAJOR_VERSION >= 3
return PyUnicode_FromFormat("<cyfunction %U at %p>",
op->func_qualname, (void *)op);
#else
return PyString_FromFormat("<cyfunction %s at %p>",
PyString_AsString(op->func_qualname), (void *)op);
#endif
}
static PyObject * __Pyx_CyFunction_CallMethod(PyObject *func, PyObject *self, PyObject *arg, PyObject *kw) {
PyCFunctionObject* f = (PyCFunctionObject*)func;
PyCFunction meth = f->m_ml->ml_meth;
Py_ssize_t size;
switch (f->m_ml->ml_flags & (METH_VARARGS | METH_KEYWORDS | METH_NOARGS | METH_O)) {
case METH_VARARGS:
if (likely(kw == NULL || PyDict_Size(kw) == 0))
return (*meth)(self, arg);
break;
case METH_VARARGS | METH_KEYWORDS:
return (*(PyCFunctionWithKeywords)meth)(self, arg, kw);
case METH_NOARGS:
if (likely(kw == NULL || PyDict_Size(kw) == 0)) {
size = PyTuple_GET_SIZE(arg);
if (likely(size == 0))
return (*meth)(self, NULL);
PyErr_Format(PyExc_TypeError,
"%.200s() takes no arguments (%" CYTHON_FORMAT_SSIZE_T "d given)",
f->m_ml->ml_name, size);
return NULL;
}
break;
case METH_O:
if (likely(kw == NULL || PyDict_Size(kw) == 0)) {
size = PyTuple_GET_SIZE(arg);
if (likely(size == 1)) {
PyObject *result, *arg0 = PySequence_ITEM(arg, 0);
if (unlikely(!arg0)) return NULL;
result = (*meth)(self, arg0);
Py_DECREF(arg0);
return result;
}
PyErr_Format(PyExc_TypeError,
"%.200s() takes exactly one argument (%" CYTHON_FORMAT_SSIZE_T "d given)",
f->m_ml->ml_name, size);
return NULL;
}
break;
default:
PyErr_SetString(PyExc_SystemError, "Bad call flags in "
"__Pyx_CyFunction_Call. METH_OLDARGS is no "
"longer supported!");
return NULL;
}
PyErr_Format(PyExc_TypeError, "%.200s() takes no keyword arguments",
f->m_ml->ml_name);
return NULL;
}
static CYTHON_INLINE PyObject *__Pyx_CyFunction_Call(PyObject *func, PyObject *arg, PyObject *kw) {
return __Pyx_CyFunction_CallMethod(func, ((PyCFunctionObject*)func)->m_self, arg, kw);
}
static PyObject *__Pyx_CyFunction_CallAsMethod(PyObject *func, PyObject *args, PyObject *kw) {
PyObject *result;
__pyx_CyFunctionObject *cyfunc = (__pyx_CyFunctionObject *) func;
if ((cyfunc->flags & __Pyx_CYFUNCTION_CCLASS) && !(cyfunc->flags & __Pyx_CYFUNCTION_STATICMETHOD)) {
Py_ssize_t argc;
PyObject *new_args;
PyObject *self;
argc = PyTuple_GET_SIZE(args);
new_args = PyTuple_GetSlice(args, 1, argc);
if (unlikely(!new_args))
return NULL;
self = PyTuple_GetItem(args, 0);
if (unlikely(!self)) {
Py_DECREF(new_args);
return NULL;
}
result = __Pyx_CyFunction_CallMethod(func, self, new_args, kw);
Py_DECREF(new_args);
} else {
result = __Pyx_CyFunction_Call(func, args, kw);
}
return result;
}
static PyTypeObject __pyx_CyFunctionType_type = {
PyVarObject_HEAD_INIT(0, 0)
"cython_function_or_method",
sizeof(__pyx_CyFunctionObject),
0,
(destructor) __Pyx_CyFunction_dealloc,
0,
0,
0,
#if PY_MAJOR_VERSION < 3
0,
#else
0,
#endif
(reprfunc) __Pyx_CyFunction_repr,
0,
0,
0,
0,
__Pyx_CyFunction_CallAsMethod,
0,
0,
0,
0,
Py_TPFLAGS_DEFAULT | Py_TPFLAGS_HAVE_GC,
0,
(traverseproc) __Pyx_CyFunction_traverse,
(inquiry) __Pyx_CyFunction_clear,
0,
#if PY_VERSION_HEX < 0x030500A0
offsetof(__pyx_CyFunctionObject, func_weakreflist),
#else
offsetof(PyCFunctionObject, m_weakreflist),
#endif
0,
0,
__pyx_CyFunction_methods,
__pyx_CyFunction_members,
__pyx_CyFunction_getsets,
0,
0,
__Pyx_CyFunction_descr_get,
0,
offsetof(__pyx_CyFunctionObject, func_dict),
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
#if PY_VERSION_HEX >= 0x030400a1
0,
#endif
};
static int __pyx_CyFunction_init(void) {
__pyx_CyFunctionType = __Pyx_FetchCommonType(&__pyx_CyFunctionType_type);
if (__pyx_CyFunctionType == NULL) {
return -1;
}
return 0;
}
static CYTHON_INLINE void *__Pyx_CyFunction_InitDefaults(PyObject *func, size_t size, int pyobjects) {
__pyx_CyFunctionObject *m = (__pyx_CyFunctionObject *) func;
m->defaults = PyObject_Malloc(size);
if (!m->defaults)
return PyErr_NoMemory();
memset(m->defaults, 0, size);
m->defaults_pyobjects = pyobjects;
return m->defaults;
}
static CYTHON_INLINE void __Pyx_CyFunction_SetDefaultsTuple(PyObject *func, PyObject *tuple) {
__pyx_CyFunctionObject *m = (__pyx_CyFunctionObject *) func;
m->defaults_tuple = tuple;
Py_INCREF(tuple);
}
static CYTHON_INLINE void __Pyx_CyFunction_SetDefaultsKwDict(PyObject *func, PyObject *dict) {
__pyx_CyFunctionObject *m = (__pyx_CyFunctionObject *) func;
m->defaults_kwdict = dict;
Py_INCREF(dict);
}
static CYTHON_INLINE void __Pyx_CyFunction_SetAnnotationsDict(PyObject *func, PyObject *dict) {
__pyx_CyFunctionObject *m = (__pyx_CyFunctionObject *) func;
m->func_annotations = dict;
Py_INCREF(dict);
}
/* SliceObject */
static CYTHON_INLINE PyObject* __Pyx_PyObject_GetSlice(PyObject* obj,
Py_ssize_t cstart, Py_ssize_t cstop,
PyObject** _py_start, PyObject** _py_stop, PyObject** _py_slice,
int has_cstart, int has_cstop, CYTHON_UNUSED int wraparound) {
#if CYTHON_USE_TYPE_SLOTS
PyMappingMethods* mp;
#if PY_MAJOR_VERSION < 3
PySequenceMethods* ms = Py_TYPE(obj)->tp_as_sequence;
if (likely(ms && ms->sq_slice)) {
if (!has_cstart) {
if (_py_start && (*_py_start != Py_None)) {
cstart = __Pyx_PyIndex_AsSsize_t(*_py_start);
if ((cstart == (Py_ssize_t)-1) && PyErr_Occurred()) goto bad;
} else
cstart = 0;
}
if (!has_cstop) {
if (_py_stop && (*_py_stop != Py_None)) {
cstop = __Pyx_PyIndex_AsSsize_t(*_py_stop);
if ((cstop == (Py_ssize_t)-1) && PyErr_Occurred()) goto bad;
} else
cstop = PY_SSIZE_T_MAX;
}
if (wraparound && unlikely((cstart < 0) | (cstop < 0)) && likely(ms->sq_length)) {
Py_ssize_t l = ms->sq_length(obj);
if (likely(l >= 0)) {
if (cstop < 0) {
cstop += l;
if (cstop < 0) cstop = 0;
}
if (cstart < 0) {
cstart += l;
if (cstart < 0) cstart = 0;
}
} else {
if (!PyErr_ExceptionMatches(PyExc_OverflowError))
goto bad;
PyErr_Clear();
}
}
return ms->sq_slice(obj, cstart, cstop);
}
#endif
mp = Py_TYPE(obj)->tp_as_mapping;
if (likely(mp && mp->mp_subscript))
#endif
{
PyObject* result;
PyObject *py_slice, *py_start, *py_stop;
if (_py_slice) {
py_slice = *_py_slice;
} else {
PyObject* owned_start = NULL;
PyObject* owned_stop = NULL;
if (_py_start) {
py_start = *_py_start;
} else {
if (has_cstart) {
owned_start = py_start = PyInt_FromSsize_t(cstart);
if (unlikely(!py_start)) goto bad;
} else
py_start = Py_None;
}
if (_py_stop) {
py_stop = *_py_stop;
} else {
if (has_cstop) {
owned_stop = py_stop = PyInt_FromSsize_t(cstop);
if (unlikely(!py_stop)) {
Py_XDECREF(owned_start);
goto bad;
}
} else
py_stop = Py_None;
}
py_slice = PySlice_New(py_start, py_stop, Py_None);
Py_XDECREF(owned_start);
Py_XDECREF(owned_stop);
if (unlikely(!py_slice)) goto bad;
}
#if CYTHON_USE_TYPE_SLOTS
result = mp->mp_subscript(obj, py_slice);
#else
result = PyObject_GetItem(obj, py_slice);
#endif
if (!_py_slice) {
Py_DECREF(py_slice);
}
return result;
}
PyErr_Format(PyExc_TypeError,
"'%.200s' object is unsliceable", Py_TYPE(obj)->tp_name);
bad:
return NULL;
}
/* GetItemInt */
static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Generic(PyObject *o, PyObject* j) {
PyObject *r;
if (!j) return NULL;
r = PyObject_GetItem(o, j);
Py_DECREF(j);
return r;
}
static CYTHON_INLINE PyObject *__Pyx_GetItemInt_List_Fast(PyObject *o, Py_ssize_t i,
CYTHON_NCP_UNUSED int wraparound,
CYTHON_NCP_UNUSED int boundscheck) {
#if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS
Py_ssize_t wrapped_i = i;
if (wraparound & unlikely(i < 0)) {
wrapped_i += PyList_GET_SIZE(o);
}
if ((!boundscheck) || likely((0 <= wrapped_i) & (wrapped_i < PyList_GET_SIZE(o)))) {
PyObject *r = PyList_GET_ITEM(o, wrapped_i);
Py_INCREF(r);
return r;
}
return __Pyx_GetItemInt_Generic(o, PyInt_FromSsize_t(i));
#else
return PySequence_GetItem(o, i);
#endif
}
static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Tuple_Fast(PyObject *o, Py_ssize_t i,
CYTHON_NCP_UNUSED int wraparound,
CYTHON_NCP_UNUSED int boundscheck) {
#if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS
Py_ssize_t wrapped_i = i;
if (wraparound & unlikely(i < 0)) {
wrapped_i += PyTuple_GET_SIZE(o);
}
if ((!boundscheck) || likely((0 <= wrapped_i) & (wrapped_i < PyTuple_GET_SIZE(o)))) {
PyObject *r = PyTuple_GET_ITEM(o, wrapped_i);
Py_INCREF(r);
return r;
}
return __Pyx_GetItemInt_Generic(o, PyInt_FromSsize_t(i));
#else
return PySequence_GetItem(o, i);
#endif
}
static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Fast(PyObject *o, Py_ssize_t i, int is_list,
CYTHON_NCP_UNUSED int wraparound,
CYTHON_NCP_UNUSED int boundscheck) {
#if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS && CYTHON_USE_TYPE_SLOTS
if (is_list || PyList_CheckExact(o)) {
Py_ssize_t n = ((!wraparound) | likely(i >= 0)) ? i : i + PyList_GET_SIZE(o);
if ((!boundscheck) || (likely((n >= 0) & (n < PyList_GET_SIZE(o))))) {
PyObject *r = PyList_GET_ITEM(o, n);
Py_INCREF(r);
return r;
}
}
else if (PyTuple_CheckExact(o)) {
Py_ssize_t n = ((!wraparound) | likely(i >= 0)) ? i : i + PyTuple_GET_SIZE(o);
if ((!boundscheck) || likely((n >= 0) & (n < PyTuple_GET_SIZE(o)))) {
PyObject *r = PyTuple_GET_ITEM(o, n);
Py_INCREF(r);
return r;
}
} else {
PySequenceMethods *m = Py_TYPE(o)->tp_as_sequence;
if (likely(m && m->sq_item)) {
if (wraparound && unlikely(i < 0) && likely(m->sq_length)) {
Py_ssize_t l = m->sq_length(o);
if (likely(l >= 0)) {
i += l;
} else {
if (!PyErr_ExceptionMatches(PyExc_OverflowError))
return NULL;
PyErr_Clear();
}
}
return m->sq_item(o, i);
}
}
#else
if (is_list || PySequence_Check(o)) {
return PySequence_GetItem(o, i);
}
#endif
return __Pyx_GetItemInt_Generic(o, PyInt_FromSsize_t(i));
}
/* RaiseTooManyValuesToUnpack */
static CYTHON_INLINE void __Pyx_RaiseTooManyValuesError(Py_ssize_t expected) {
PyErr_Format(PyExc_ValueError,
"too many values to unpack (expected %" CYTHON_FORMAT_SSIZE_T "d)", expected);
}
/* RaiseNeedMoreValuesToUnpack */
static CYTHON_INLINE void __Pyx_RaiseNeedMoreValuesError(Py_ssize_t index) {
PyErr_Format(PyExc_ValueError,
"need more than %" CYTHON_FORMAT_SSIZE_T "d value%.1s to unpack",
index, (index == 1) ? "" : "s");
}
/* IterFinish */
static CYTHON_INLINE int __Pyx_IterFinish(void) {
#if CYTHON_FAST_THREAD_STATE
PyThreadState *tstate = PyThreadState_GET();
PyObject* exc_type = tstate->curexc_type;
if (unlikely(exc_type)) {
if (likely(exc_type == PyExc_StopIteration) || PyErr_GivenExceptionMatches(exc_type, PyExc_StopIteration)) {
PyObject *exc_value, *exc_tb;
exc_value = tstate->curexc_value;
exc_tb = tstate->curexc_traceback;
tstate->curexc_type = 0;
tstate->curexc_value = 0;
tstate->curexc_traceback = 0;
Py_DECREF(exc_type);
Py_XDECREF(exc_value);
Py_XDECREF(exc_tb);
return 0;
} else {
return -1;
}
}
return 0;
#else
if (unlikely(PyErr_Occurred())) {
if (likely(PyErr_ExceptionMatches(PyExc_StopIteration))) {
PyErr_Clear();
return 0;
} else {
return -1;
}
}
return 0;
#endif
}
/* UnpackItemEndCheck */
static int __Pyx_IternextUnpackEndCheck(PyObject *retval, Py_ssize_t expected) {
if (unlikely(retval)) {
Py_DECREF(retval);
__Pyx_RaiseTooManyValuesError(expected);
return -1;
} else {
return __Pyx_IterFinish();
}
return 0;
}
/* PyIntBinop */
#if !CYTHON_COMPILING_IN_PYPY
static PyObject* __Pyx_PyInt_FloorDivideObjC(PyObject *op1, PyObject *op2, CYTHON_UNUSED long intval, CYTHON_UNUSED int inplace) {
#if PY_MAJOR_VERSION < 3
if (likely(PyInt_CheckExact(op1))) {
const long b = intval;
long x;
long a = PyInt_AS_LONG(op1);
if (unlikely(b == -1 && ((unsigned long)a) == 0-(unsigned long)a))
return PyInt_Type.tp_as_number->nb_floor_divide(op1, op2);
else {
long q, r;
q = a / b;
r = a - q*b;
q -= ((r != 0) & ((r ^ b) < 0));
x = q;
}
return PyInt_FromLong(x);
}
#endif
#if CYTHON_USE_PYLONG_INTERNALS
if (likely(PyLong_CheckExact(op1))) {
const long b = intval;
long a, x;
#ifdef HAVE_LONG_LONG
const PY_LONG_LONG llb = intval;
PY_LONG_LONG lla, llx;
#endif
const digit* digits = ((PyLongObject*)op1)->ob_digit;
const Py_ssize_t size = Py_SIZE(op1);
if (likely(__Pyx_sst_abs(size) <= 1)) {
a = likely(size) ? digits[0] : 0;
if (size == -1) a = -a;
} else {
switch (size) {
case -2:
if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) {
a = -(long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]));
break;
#ifdef HAVE_LONG_LONG
} else if (8 * sizeof(PY_LONG_LONG) - 1 > 2 * PyLong_SHIFT) {
lla = -(PY_LONG_LONG) (((((unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0]));
goto long_long;
#endif
}
case 2:
if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) {
a = (long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]));
break;
#ifdef HAVE_LONG_LONG
} else if (8 * sizeof(PY_LONG_LONG) - 1 > 2 * PyLong_SHIFT) {
lla = (PY_LONG_LONG) (((((unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0]));
goto long_long;
#endif
}
case -3:
if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) {
a = -(long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]));
break;
#ifdef HAVE_LONG_LONG
} else if (8 * sizeof(PY_LONG_LONG) - 1 > 3 * PyLong_SHIFT) {
lla = -(PY_LONG_LONG) (((((((unsigned PY_LONG_LONG)digits[2]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0]));
goto long_long;
#endif
}
case 3:
if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) {
a = (long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]));
break;
#ifdef HAVE_LONG_LONG
} else if (8 * sizeof(PY_LONG_LONG) - 1 > 3 * PyLong_SHIFT) {
lla = (PY_LONG_LONG) (((((((unsigned PY_LONG_LONG)digits[2]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0]));
goto long_long;
#endif
}
case -4:
if (8 * sizeof(long) - 1 > 4 * PyLong_SHIFT) {
a = -(long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]));
break;
#ifdef HAVE_LONG_LONG
} else if (8 * sizeof(PY_LONG_LONG) - 1 > 4 * PyLong_SHIFT) {
lla = -(PY_LONG_LONG) (((((((((unsigned PY_LONG_LONG)digits[3]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[2]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0]));
goto long_long;
#endif
}
case 4:
if (8 * sizeof(long) - 1 > 4 * PyLong_SHIFT) {
a = (long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]));
break;
#ifdef HAVE_LONG_LONG
} else if (8 * sizeof(PY_LONG_LONG) - 1 > 4 * PyLong_SHIFT) {
lla = (PY_LONG_LONG) (((((((((unsigned PY_LONG_LONG)digits[3]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[2]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0]));
goto long_long;
#endif
}
default: return PyLong_Type.tp_as_number->nb_floor_divide(op1, op2);
}
}
{
long q, r;
q = a / b;
r = a - q*b;
q -= ((r != 0) & ((r ^ b) < 0));
x = q;
}
return PyLong_FromLong(x);
#ifdef HAVE_LONG_LONG
long_long:
{
PY_LONG_LONG q, r;
q = lla / llb;
r = lla - q*llb;
q -= ((r != 0) & ((r ^ llb) < 0));
llx = q;
}
return PyLong_FromLongLong(llx);
#endif
}
#endif
return (inplace ? PyNumber_InPlaceFloorDivide : PyNumber_FloorDivide)(op1, op2);
}
#endif
/* PyIntBinop */
#if !CYTHON_COMPILING_IN_PYPY
static PyObject* __Pyx_PyInt_AddObjC(PyObject *op1, PyObject *op2, CYTHON_UNUSED long intval, CYTHON_UNUSED int inplace) {
#if PY_MAJOR_VERSION < 3
if (likely(PyInt_CheckExact(op1))) {
const long b = intval;
long x;
long a = PyInt_AS_LONG(op1);
x = (long)((unsigned long)a + b);
if (likely((x^a) >= 0 || (x^b) >= 0))
return PyInt_FromLong(x);
return PyLong_Type.tp_as_number->nb_add(op1, op2);
}
#endif
#if CYTHON_USE_PYLONG_INTERNALS
if (likely(PyLong_CheckExact(op1))) {
const long b = intval;
long a, x;
#ifdef HAVE_LONG_LONG
const PY_LONG_LONG llb = intval;
PY_LONG_LONG lla, llx;
#endif
const digit* digits = ((PyLongObject*)op1)->ob_digit;
const Py_ssize_t size = Py_SIZE(op1);
if (likely(__Pyx_sst_abs(size) <= 1)) {
a = likely(size) ? digits[0] : 0;
if (size == -1) a = -a;
} else {
switch (size) {
case -2:
if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) {
a = -(long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]));
break;
#ifdef HAVE_LONG_LONG
} else if (8 * sizeof(PY_LONG_LONG) - 1 > 2 * PyLong_SHIFT) {
lla = -(PY_LONG_LONG) (((((unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0]));
goto long_long;
#endif
}
case 2:
if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) {
a = (long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]));
break;
#ifdef HAVE_LONG_LONG
} else if (8 * sizeof(PY_LONG_LONG) - 1 > 2 * PyLong_SHIFT) {
lla = (PY_LONG_LONG) (((((unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0]));
goto long_long;
#endif
}
case -3:
if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) {
a = -(long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]));
break;
#ifdef HAVE_LONG_LONG
} else if (8 * sizeof(PY_LONG_LONG) - 1 > 3 * PyLong_SHIFT) {
lla = -(PY_LONG_LONG) (((((((unsigned PY_LONG_LONG)digits[2]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0]));
goto long_long;
#endif
}
case 3:
if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) {
a = (long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]));
break;
#ifdef HAVE_LONG_LONG
} else if (8 * sizeof(PY_LONG_LONG) - 1 > 3 * PyLong_SHIFT) {
lla = (PY_LONG_LONG) (((((((unsigned PY_LONG_LONG)digits[2]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0]));
goto long_long;
#endif
}
case -4:
if (8 * sizeof(long) - 1 > 4 * PyLong_SHIFT) {
a = -(long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]));
break;
#ifdef HAVE_LONG_LONG
} else if (8 * sizeof(PY_LONG_LONG) - 1 > 4 * PyLong_SHIFT) {
lla = -(PY_LONG_LONG) (((((((((unsigned PY_LONG_LONG)digits[3]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[2]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0]));
goto long_long;
#endif
}
case 4:
if (8 * sizeof(long) - 1 > 4 * PyLong_SHIFT) {
a = (long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]));
break;
#ifdef HAVE_LONG_LONG
} else if (8 * sizeof(PY_LONG_LONG) - 1 > 4 * PyLong_SHIFT) {
lla = (PY_LONG_LONG) (((((((((unsigned PY_LONG_LONG)digits[3]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[2]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0]));
goto long_long;
#endif
}
default: return PyLong_Type.tp_as_number->nb_add(op1, op2);
}
}
x = a + b;
return PyLong_FromLong(x);
#ifdef HAVE_LONG_LONG
long_long:
llx = lla + llb;
return PyLong_FromLongLong(llx);
#endif
}
#endif
if (PyFloat_CheckExact(op1)) {
const long b = intval;
double a = PyFloat_AS_DOUBLE(op1);
double result;
PyFPE_START_PROTECT("add", return NULL)
result = ((double)a) + (double)b;
PyFPE_END_PROTECT(result)
return PyFloat_FromDouble(result);
}
return (inplace ? PyNumber_InPlaceAdd : PyNumber_Add)(op1, op2);
}
#endif
/* PyIntBinop */
#if !CYTHON_COMPILING_IN_PYPY
static PyObject* __Pyx_PyInt_EqObjC(PyObject *op1, PyObject *op2, CYTHON_UNUSED long intval, CYTHON_UNUSED int inplace) {
if (op1 == op2) {
Py_RETURN_TRUE;
}
#if PY_MAJOR_VERSION < 3
if (likely(PyInt_CheckExact(op1))) {
const long b = intval;
long a = PyInt_AS_LONG(op1);
if (a == b) {
Py_RETURN_TRUE;
} else {
Py_RETURN_FALSE;
}
}
#endif
#if CYTHON_USE_PYLONG_INTERNALS
if (likely(PyLong_CheckExact(op1))) {
const long b = intval;
long a;
const digit* digits = ((PyLongObject*)op1)->ob_digit;
const Py_ssize_t size = Py_SIZE(op1);
if (likely(__Pyx_sst_abs(size) <= 1)) {
a = likely(size) ? digits[0] : 0;
if (size == -1) a = -a;
} else {
switch (size) {
case -2:
if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) {
a = -(long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]));
break;
}
case 2:
if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) {
a = (long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]));
break;
}
case -3:
if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) {
a = -(long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]));
break;
}
case 3:
if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) {
a = (long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]));
break;
}
case -4:
if (8 * sizeof(long) - 1 > 4 * PyLong_SHIFT) {
a = -(long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]));
break;
}
case 4:
if (8 * sizeof(long) - 1 > 4 * PyLong_SHIFT) {
a = (long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]));
break;
}
#if PyLong_SHIFT < 30 && PyLong_SHIFT != 15
default: return PyLong_Type.tp_richcompare(op1, op2, Py_EQ);
#else
default: Py_RETURN_FALSE;
#endif
}
}
if (a == b) {
Py_RETURN_TRUE;
} else {
Py_RETURN_FALSE;
}
}
#endif
if (PyFloat_CheckExact(op1)) {
const long b = intval;
double a = PyFloat_AS_DOUBLE(op1);
if ((double)a == (double)b) {
Py_RETURN_TRUE;
} else {
Py_RETURN_FALSE;
}
}
return PyObject_RichCompare(op1, op2, Py_EQ);
}
#endif
/* PyIntBinop */
#if !CYTHON_COMPILING_IN_PYPY
static PyObject* __Pyx_PyInt_SubtractObjC(PyObject *op1, PyObject *op2, CYTHON_UNUSED long intval, CYTHON_UNUSED int inplace) {
#if PY_MAJOR_VERSION < 3
if (likely(PyInt_CheckExact(op1))) {
const long b = intval;
long x;
long a = PyInt_AS_LONG(op1);
x = (long)((unsigned long)a - b);
if (likely((x^a) >= 0 || (x^~b) >= 0))
return PyInt_FromLong(x);
return PyLong_Type.tp_as_number->nb_subtract(op1, op2);
}
#endif
#if CYTHON_USE_PYLONG_INTERNALS
if (likely(PyLong_CheckExact(op1))) {
const long b = intval;
long a, x;
#ifdef HAVE_LONG_LONG
const PY_LONG_LONG llb = intval;
PY_LONG_LONG lla, llx;
#endif
const digit* digits = ((PyLongObject*)op1)->ob_digit;
const Py_ssize_t size = Py_SIZE(op1);
if (likely(__Pyx_sst_abs(size) <= 1)) {
a = likely(size) ? digits[0] : 0;
if (size == -1) a = -a;
} else {
switch (size) {
case -2:
if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) {
a = -(long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]));
break;
#ifdef HAVE_LONG_LONG
} else if (8 * sizeof(PY_LONG_LONG) - 1 > 2 * PyLong_SHIFT) {
lla = -(PY_LONG_LONG) (((((unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0]));
goto long_long;
#endif
}
case 2:
if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) {
a = (long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]));
break;
#ifdef HAVE_LONG_LONG
} else if (8 * sizeof(PY_LONG_LONG) - 1 > 2 * PyLong_SHIFT) {
lla = (PY_LONG_LONG) (((((unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0]));
goto long_long;
#endif
}
case -3:
if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) {
a = -(long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]));
break;
#ifdef HAVE_LONG_LONG
} else if (8 * sizeof(PY_LONG_LONG) - 1 > 3 * PyLong_SHIFT) {
lla = -(PY_LONG_LONG) (((((((unsigned PY_LONG_LONG)digits[2]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0]));
goto long_long;
#endif
}
case 3:
if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) {
a = (long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]));
break;
#ifdef HAVE_LONG_LONG
} else if (8 * sizeof(PY_LONG_LONG) - 1 > 3 * PyLong_SHIFT) {
lla = (PY_LONG_LONG) (((((((unsigned PY_LONG_LONG)digits[2]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0]));
goto long_long;
#endif
}
case -4:
if (8 * sizeof(long) - 1 > 4 * PyLong_SHIFT) {
a = -(long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]));
break;
#ifdef HAVE_LONG_LONG
} else if (8 * sizeof(PY_LONG_LONG) - 1 > 4 * PyLong_SHIFT) {
lla = -(PY_LONG_LONG) (((((((((unsigned PY_LONG_LONG)digits[3]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[2]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0]));
goto long_long;
#endif
}
case 4:
if (8 * sizeof(long) - 1 > 4 * PyLong_SHIFT) {
a = (long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]));
break;
#ifdef HAVE_LONG_LONG
} else if (8 * sizeof(PY_LONG_LONG) - 1 > 4 * PyLong_SHIFT) {
lla = (PY_LONG_LONG) (((((((((unsigned PY_LONG_LONG)digits[3]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[2]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0]));
goto long_long;
#endif
}
default: return PyLong_Type.tp_as_number->nb_subtract(op1, op2);
}
}
x = a - b;
return PyLong_FromLong(x);
#ifdef HAVE_LONG_LONG
long_long:
llx = lla - llb;
return PyLong_FromLongLong(llx);
#endif
}
#endif
if (PyFloat_CheckExact(op1)) {
const long b = intval;
double a = PyFloat_AS_DOUBLE(op1);
double result;
PyFPE_START_PROTECT("subtract", return NULL)
result = ((double)a) - (double)b;
PyFPE_END_PROTECT(result)
return PyFloat_FromDouble(result);
}
return (inplace ? PyNumber_InPlaceSubtract : PyNumber_Subtract)(op1, op2);
}
#endif
/* PyObjectCallMethod1 */
static PyObject* __Pyx_PyObject_CallMethod1(PyObject* obj, PyObject* method_name, PyObject* arg) {
PyObject *method, *result = NULL;
method = __Pyx_PyObject_GetAttrStr(obj, method_name);
if (unlikely(!method)) goto done;
#if CYTHON_UNPACK_METHODS
if (likely(PyMethod_Check(method))) {
PyObject *self = PyMethod_GET_SELF(method);
if (likely(self)) {
PyObject *args;
PyObject *function = PyMethod_GET_FUNCTION(method);
#if CYTHON_FAST_PYCALL
if (PyFunction_Check(function)) {
PyObject *args[2] = {self, arg};
result = __Pyx_PyFunction_FastCall(function, args, 2);
goto done;
}
#endif
#if CYTHON_FAST_PYCCALL
if (__Pyx_PyFastCFunction_Check(function)) {
PyObject *args[2] = {self, arg};
result = __Pyx_PyCFunction_FastCall(function, args, 2);
goto done;
}
#endif
args = PyTuple_New(2);
if (unlikely(!args)) goto done;
Py_INCREF(self);
PyTuple_SET_ITEM(args, 0, self);
Py_INCREF(arg);
PyTuple_SET_ITEM(args, 1, arg);
Py_INCREF(function);
Py_DECREF(method); method = NULL;
result = __Pyx_PyObject_Call(function, args, NULL);
Py_DECREF(args);
Py_DECREF(function);
return result;
}
}
#endif
result = __Pyx_PyObject_CallOneArg(method, arg);
done:
Py_XDECREF(method);
return result;
}
/* append */
static CYTHON_INLINE int __Pyx_PyObject_Append(PyObject* L, PyObject* x) {
if (likely(PyList_CheckExact(L))) {
if (unlikely(__Pyx_PyList_Append(L, x) < 0)) return -1;
} else {
PyObject* retval = __Pyx_PyObject_CallMethod1(L, __pyx_n_s_append, x);
if (unlikely(!retval))
return -1;
Py_DECREF(retval);
}
return 0;
}
/* Import */
static PyObject *__Pyx_Import(PyObject *name, PyObject *from_list, int level) {
PyObject *empty_list = 0;
PyObject *module = 0;
PyObject *global_dict = 0;
PyObject *empty_dict = 0;
PyObject *list;
#if PY_VERSION_HEX < 0x03030000
PyObject *py_import;
py_import = __Pyx_PyObject_GetAttrStr(__pyx_b, __pyx_n_s_import);
if (!py_import)
goto bad;
#endif
if (from_list)
list = from_list;
else {
empty_list = PyList_New(0);
if (!empty_list)
goto bad;
list = empty_list;
}
global_dict = PyModule_GetDict(__pyx_m);
if (!global_dict)
goto bad;
empty_dict = PyDict_New();
if (!empty_dict)
goto bad;
{
#if PY_MAJOR_VERSION >= 3
if (level == -1) {
if (strchr(__Pyx_MODULE_NAME, '.')) {
#if PY_VERSION_HEX < 0x03030000
PyObject *py_level = PyInt_FromLong(1);
if (!py_level)
goto bad;
module = PyObject_CallFunctionObjArgs(py_import,
name, global_dict, empty_dict, list, py_level, NULL);
Py_DECREF(py_level);
#else
module = PyImport_ImportModuleLevelObject(
name, global_dict, empty_dict, list, 1);
#endif
if (!module) {
if (!PyErr_ExceptionMatches(PyExc_ImportError))
goto bad;
PyErr_Clear();
}
}
level = 0;
}
#endif
if (!module) {
#if PY_VERSION_HEX < 0x03030000
PyObject *py_level = PyInt_FromLong(level);
if (!py_level)
goto bad;
module = PyObject_CallFunctionObjArgs(py_import,
name, global_dict, empty_dict, list, py_level, NULL);
Py_DECREF(py_level);
#else
module = PyImport_ImportModuleLevelObject(
name, global_dict, empty_dict, list, level);
#endif
}
}
bad:
#if PY_VERSION_HEX < 0x03030000
Py_XDECREF(py_import);
#endif
Py_XDECREF(empty_list);
Py_XDECREF(empty_dict);
return module;
}
/* ImportFrom */
static PyObject* __Pyx_ImportFrom(PyObject* module, PyObject* name) {
PyObject* value = __Pyx_PyObject_GetAttrStr(module, name);
if (unlikely(!value) && PyErr_ExceptionMatches(PyExc_AttributeError)) {
PyErr_Format(PyExc_ImportError,
#if PY_MAJOR_VERSION < 3
"cannot import name %.230s", PyString_AS_STRING(name));
#else
"cannot import name %S", name);
#endif
}
return value;
}
/* BytesEquals */
static CYTHON_INLINE int __Pyx_PyBytes_Equals(PyObject* s1, PyObject* s2, int equals) {
#if CYTHON_COMPILING_IN_PYPY
return PyObject_RichCompareBool(s1, s2, equals);
#else
if (s1 == s2) {
return (equals == Py_EQ);
} else if (PyBytes_CheckExact(s1) & PyBytes_CheckExact(s2)) {
const char *ps1, *ps2;
Py_ssize_t length = PyBytes_GET_SIZE(s1);
if (length != PyBytes_GET_SIZE(s2))
return (equals == Py_NE);
ps1 = PyBytes_AS_STRING(s1);
ps2 = PyBytes_AS_STRING(s2);
if (ps1[0] != ps2[0]) {
return (equals == Py_NE);
} else if (length == 1) {
return (equals == Py_EQ);
} else {
int result;
#if CYTHON_USE_UNICODE_INTERNALS
Py_hash_t hash1, hash2;
hash1 = ((PyBytesObject*)s1)->ob_shash;
hash2 = ((PyBytesObject*)s2)->ob_shash;
if (hash1 != hash2 && hash1 != -1 && hash2 != -1) {
return (equals == Py_NE);
}
#endif
result = memcmp(ps1, ps2, (size_t)length);
return (equals == Py_EQ) ? (result == 0) : (result != 0);
}
} else if ((s1 == Py_None) & PyBytes_CheckExact(s2)) {
return (equals == Py_NE);
} else if ((s2 == Py_None) & PyBytes_CheckExact(s1)) {
return (equals == Py_NE);
} else {
int result;
PyObject* py_result = PyObject_RichCompare(s1, s2, equals);
if (!py_result)
return -1;
result = __Pyx_PyObject_IsTrue(py_result);
Py_DECREF(py_result);
return result;
}
#endif
}
/* UnicodeEquals */
static CYTHON_INLINE int __Pyx_PyUnicode_Equals(PyObject* s1, PyObject* s2, int equals) {
#if CYTHON_COMPILING_IN_PYPY
return PyObject_RichCompareBool(s1, s2, equals);
#else
#if PY_MAJOR_VERSION < 3
PyObject* owned_ref = NULL;
#endif
int s1_is_unicode, s2_is_unicode;
if (s1 == s2) {
goto return_eq;
}
s1_is_unicode = PyUnicode_CheckExact(s1);
s2_is_unicode = PyUnicode_CheckExact(s2);
#if PY_MAJOR_VERSION < 3
if ((s1_is_unicode & (!s2_is_unicode)) && PyString_CheckExact(s2)) {
owned_ref = PyUnicode_FromObject(s2);
if (unlikely(!owned_ref))
return -1;
s2 = owned_ref;
s2_is_unicode = 1;
} else if ((s2_is_unicode & (!s1_is_unicode)) && PyString_CheckExact(s1)) {
owned_ref = PyUnicode_FromObject(s1);
if (unlikely(!owned_ref))
return -1;
s1 = owned_ref;
s1_is_unicode = 1;
} else if (((!s2_is_unicode) & (!s1_is_unicode))) {
return __Pyx_PyBytes_Equals(s1, s2, equals);
}
#endif
if (s1_is_unicode & s2_is_unicode) {
Py_ssize_t length;
int kind;
void *data1, *data2;
if (unlikely(__Pyx_PyUnicode_READY(s1) < 0) || unlikely(__Pyx_PyUnicode_READY(s2) < 0))
return -1;
length = __Pyx_PyUnicode_GET_LENGTH(s1);
if (length != __Pyx_PyUnicode_GET_LENGTH(s2)) {
goto return_ne;
}
#if CYTHON_USE_UNICODE_INTERNALS
{
Py_hash_t hash1, hash2;
#if CYTHON_PEP393_ENABLED
hash1 = ((PyASCIIObject*)s1)->hash;
hash2 = ((PyASCIIObject*)s2)->hash;
#else
hash1 = ((PyUnicodeObject*)s1)->hash;
hash2 = ((PyUnicodeObject*)s2)->hash;
#endif
if (hash1 != hash2 && hash1 != -1 && hash2 != -1) {
goto return_ne;
}
}
#endif
kind = __Pyx_PyUnicode_KIND(s1);
if (kind != __Pyx_PyUnicode_KIND(s2)) {
goto return_ne;
}
data1 = __Pyx_PyUnicode_DATA(s1);
data2 = __Pyx_PyUnicode_DATA(s2);
if (__Pyx_PyUnicode_READ(kind, data1, 0) != __Pyx_PyUnicode_READ(kind, data2, 0)) {
goto return_ne;
} else if (length == 1) {
goto return_eq;
} else {
int result = memcmp(data1, data2, (size_t)(length * kind));
#if PY_MAJOR_VERSION < 3
Py_XDECREF(owned_ref);
#endif
return (equals == Py_EQ) ? (result == 0) : (result != 0);
}
} else if ((s1 == Py_None) & s2_is_unicode) {
goto return_ne;
} else if ((s2 == Py_None) & s1_is_unicode) {
goto return_ne;
} else {
int result;
PyObject* py_result = PyObject_RichCompare(s1, s2, equals);
if (!py_result)
return -1;
result = __Pyx_PyObject_IsTrue(py_result);
Py_DECREF(py_result);
return result;
}
return_eq:
#if PY_MAJOR_VERSION < 3
Py_XDECREF(owned_ref);
#endif
return (equals == Py_EQ);
return_ne:
#if PY_MAJOR_VERSION < 3
Py_XDECREF(owned_ref);
#endif
return (equals == Py_NE);
#endif
}
/* CLineInTraceback */
static int __Pyx_CLineForTraceback(int c_line) {
#ifdef CYTHON_CLINE_IN_TRACEBACK
return ((CYTHON_CLINE_IN_TRACEBACK)) ? c_line : 0;
#else
PyObject *use_cline;
#if CYTHON_COMPILING_IN_CPYTHON
PyObject **cython_runtime_dict = _PyObject_GetDictPtr(__pyx_cython_runtime);
if (likely(cython_runtime_dict)) {
use_cline = PyDict_GetItem(*cython_runtime_dict, __pyx_n_s_cline_in_traceback);
} else
#endif
{
PyObject *ptype, *pvalue, *ptraceback;
PyObject *use_cline_obj;
PyErr_Fetch(&ptype, &pvalue, &ptraceback);
use_cline_obj = __Pyx_PyObject_GetAttrStr(__pyx_cython_runtime, __pyx_n_s_cline_in_traceback);
if (use_cline_obj) {
use_cline = PyObject_Not(use_cline_obj) ? Py_False : Py_True;
Py_DECREF(use_cline_obj);
} else {
use_cline = NULL;
}
PyErr_Restore(ptype, pvalue, ptraceback);
}
if (!use_cline) {
c_line = 0;
PyObject_SetAttr(__pyx_cython_runtime, __pyx_n_s_cline_in_traceback, Py_False);
}
else if (PyObject_Not(use_cline) != 0) {
c_line = 0;
}
return c_line;
#endif
}
/* CodeObjectCache */
static int __pyx_bisect_code_objects(__Pyx_CodeObjectCacheEntry* entries, int count, int code_line) {
int start = 0, mid = 0, end = count - 1;
if (end >= 0 && code_line > entries[end].code_line) {
return count;
}
while (start < end) {
mid = start + (end - start) / 2;
if (code_line < entries[mid].code_line) {
end = mid;
} else if (code_line > entries[mid].code_line) {
start = mid + 1;
} else {
return mid;
}
}
if (code_line <= entries[mid].code_line) {
return mid;
} else {
return mid + 1;
}
}
static PyCodeObject *__pyx_find_code_object(int code_line) {
PyCodeObject* code_object;
int pos;
if (unlikely(!code_line) || unlikely(!__pyx_code_cache.entries)) {
return NULL;
}
pos = __pyx_bisect_code_objects(__pyx_code_cache.entries, __pyx_code_cache.count, code_line);
if (unlikely(pos >= __pyx_code_cache.count) || unlikely(__pyx_code_cache.entries[pos].code_line != code_line)) {
return NULL;
}
code_object = __pyx_code_cache.entries[pos].code_object;
Py_INCREF(code_object);
return code_object;
}
static void __pyx_insert_code_object(int code_line, PyCodeObject* code_object) {
int pos, i;
__Pyx_CodeObjectCacheEntry* entries = __pyx_code_cache.entries;
if (unlikely(!code_line)) {
return;
}
if (unlikely(!entries)) {
entries = (__Pyx_CodeObjectCacheEntry*)PyMem_Malloc(64*sizeof(__Pyx_CodeObjectCacheEntry));
if (likely(entries)) {
__pyx_code_cache.entries = entries;
__pyx_code_cache.max_count = 64;
__pyx_code_cache.count = 1;
entries[0].code_line = code_line;
entries[0].code_object = code_object;
Py_INCREF(code_object);
}
return;
}
pos = __pyx_bisect_code_objects(__pyx_code_cache.entries, __pyx_code_cache.count, code_line);
if ((pos < __pyx_code_cache.count) && unlikely(__pyx_code_cache.entries[pos].code_line == code_line)) {
PyCodeObject* tmp = entries[pos].code_object;
entries[pos].code_object = code_object;
Py_DECREF(tmp);
return;
}
if (__pyx_code_cache.count == __pyx_code_cache.max_count) {
int new_max = __pyx_code_cache.max_count + 64;
entries = (__Pyx_CodeObjectCacheEntry*)PyMem_Realloc(
__pyx_code_cache.entries, (size_t)new_max*sizeof(__Pyx_CodeObjectCacheEntry));
if (unlikely(!entries)) {
return;
}
__pyx_code_cache.entries = entries;
__pyx_code_cache.max_count = new_max;
}
for (i=__pyx_code_cache.count; i>pos; i--) {
entries[i] = entries[i-1];
}
entries[pos].code_line = code_line;
entries[pos].code_object = code_object;
__pyx_code_cache.count++;
Py_INCREF(code_object);
}
/* AddTraceback */
#include "compile.h"
#include "frameobject.h"
#include "traceback.h"
static PyCodeObject* __Pyx_CreateCodeObjectForTraceback(
const char *funcname, int c_line,
int py_line, const char *filename) {
PyCodeObject *py_code = 0;
PyObject *py_srcfile = 0;
PyObject *py_funcname = 0;
#if PY_MAJOR_VERSION < 3
py_srcfile = PyString_FromString(filename);
#else
py_srcfile = PyUnicode_FromString(filename);
#endif
if (!py_srcfile) goto bad;
if (c_line) {
#if PY_MAJOR_VERSION < 3
py_funcname = PyString_FromFormat( "%s (%s:%d)", funcname, __pyx_cfilenm, c_line);
#else
py_funcname = PyUnicode_FromFormat( "%s (%s:%d)", funcname, __pyx_cfilenm, c_line);
#endif
}
else {
#if PY_MAJOR_VERSION < 3
py_funcname = PyString_FromString(funcname);
#else
py_funcname = PyUnicode_FromString(funcname);
#endif
}
if (!py_funcname) goto bad;
py_code = __Pyx_PyCode_New(
0,
0,
0,
0,
0,
__pyx_empty_bytes, /*PyObject *code,*/
__pyx_empty_tuple, /*PyObject *consts,*/
__pyx_empty_tuple, /*PyObject *names,*/
__pyx_empty_tuple, /*PyObject *varnames,*/
__pyx_empty_tuple, /*PyObject *freevars,*/
__pyx_empty_tuple, /*PyObject *cellvars,*/
py_srcfile, /*PyObject *filename,*/
py_funcname, /*PyObject *name,*/
py_line,
__pyx_empty_bytes /*PyObject *lnotab*/
);
Py_DECREF(py_srcfile);
Py_DECREF(py_funcname);
return py_code;
bad:
Py_XDECREF(py_srcfile);
Py_XDECREF(py_funcname);
return NULL;
}
static void __Pyx_AddTraceback(const char *funcname, int c_line,
int py_line, const char *filename) {
PyCodeObject *py_code = 0;
PyFrameObject *py_frame = 0;
if (c_line) {
c_line = __Pyx_CLineForTraceback(c_line);
}
py_code = __pyx_find_code_object(c_line ? -c_line : py_line);
if (!py_code) {
py_code = __Pyx_CreateCodeObjectForTraceback(
funcname, c_line, py_line, filename);
if (!py_code) goto bad;
__pyx_insert_code_object(c_line ? -c_line : py_line, py_code);
}
py_frame = PyFrame_New(
PyThreadState_GET(), /*PyThreadState *tstate,*/
py_code, /*PyCodeObject *code,*/
__pyx_d, /*PyObject *globals,*/
0 /*PyObject *locals*/
);
if (!py_frame) goto bad;
__Pyx_PyFrame_SetLineNumber(py_frame, py_line);
PyTraceBack_Here(py_frame);
bad:
Py_XDECREF(py_code);
Py_XDECREF(py_frame);
}
/* Print */
#if !CYTHON_COMPILING_IN_PYPY && PY_MAJOR_VERSION < 3
static PyObject *__Pyx_GetStdout(void) {
PyObject *f = PySys_GetObject((char *)"stdout");
if (!f) {
PyErr_SetString(PyExc_RuntimeError, "lost sys.stdout");
}
return f;
}
static int __Pyx_Print(PyObject* f, PyObject *arg_tuple, int newline) {
int i;
if (!f) {
if (!(f = __Pyx_GetStdout()))
return -1;
}
Py_INCREF(f);
for (i=0; i < PyTuple_GET_SIZE(arg_tuple); i++) {
PyObject* v;
if (PyFile_SoftSpace(f, 1)) {
if (PyFile_WriteString(" ", f) < 0)
goto error;
}
v = PyTuple_GET_ITEM(arg_tuple, i);
if (PyFile_WriteObject(v, f, Py_PRINT_RAW) < 0)
goto error;
if (PyString_Check(v)) {
char *s = PyString_AsString(v);
Py_ssize_t len = PyString_Size(v);
if (len > 0) {
switch (s[len-1]) {
case ' ': break;
case '\f': case '\r': case '\n': case '\t': case '\v':
PyFile_SoftSpace(f, 0);
break;
default: break;
}
}
}
}
if (newline) {
if (PyFile_WriteString("\n", f) < 0)
goto error;
PyFile_SoftSpace(f, 0);
}
Py_DECREF(f);
return 0;
error:
Py_DECREF(f);
return -1;
}
#else
static int __Pyx_Print(PyObject* stream, PyObject *arg_tuple, int newline) {
PyObject* kwargs = 0;
PyObject* result = 0;
PyObject* end_string;
if (unlikely(!__pyx_print)) {
__pyx_print = PyObject_GetAttr(__pyx_b, __pyx_n_s_print);
if (!__pyx_print)
return -1;
}
if (stream) {
kwargs = PyDict_New();
if (unlikely(!kwargs))
return -1;
if (unlikely(PyDict_SetItem(kwargs, __pyx_n_s_file, stream) < 0))
goto bad;
if (!newline) {
end_string = PyUnicode_FromStringAndSize(" ", 1);
if (unlikely(!end_string))
goto bad;
if (PyDict_SetItem(kwargs, __pyx_n_s_end, end_string) < 0) {
Py_DECREF(end_string);
goto bad;
}
Py_DECREF(end_string);
}
} else if (!newline) {
if (unlikely(!__pyx_print_kwargs)) {
__pyx_print_kwargs = PyDict_New();
if (unlikely(!__pyx_print_kwargs))
return -1;
end_string = PyUnicode_FromStringAndSize(" ", 1);
if (unlikely(!end_string))
return -1;
if (PyDict_SetItem(__pyx_print_kwargs, __pyx_n_s_end, end_string) < 0) {
Py_DECREF(end_string);
return -1;
}
Py_DECREF(end_string);
}
kwargs = __pyx_print_kwargs;
}
result = PyObject_Call(__pyx_print, arg_tuple, kwargs);
if (unlikely(kwargs) && (kwargs != __pyx_print_kwargs))
Py_DECREF(kwargs);
if (!result)
return -1;
Py_DECREF(result);
return 0;
bad:
if (kwargs != __pyx_print_kwargs)
Py_XDECREF(kwargs);
return -1;
}
#endif
/* CIntToPy */
static CYTHON_INLINE PyObject* __Pyx_PyInt_From_long(long value) {
const long neg_one = (long) -1, const_zero = (long) 0;
const int is_unsigned = neg_one > const_zero;
if (is_unsigned) {
if (sizeof(long) < sizeof(long)) {
return PyInt_FromLong((long) value);
} else if (sizeof(long) <= sizeof(unsigned long)) {
return PyLong_FromUnsignedLong((unsigned long) value);
#ifdef HAVE_LONG_LONG
} else if (sizeof(long) <= sizeof(unsigned PY_LONG_LONG)) {
return PyLong_FromUnsignedLongLong((unsigned PY_LONG_LONG) value);
#endif
}
} else {
if (sizeof(long) <= sizeof(long)) {
return PyInt_FromLong((long) value);
#ifdef HAVE_LONG_LONG
} else if (sizeof(long) <= sizeof(PY_LONG_LONG)) {
return PyLong_FromLongLong((PY_LONG_LONG) value);
#endif
}
}
{
int one = 1; int little = (int)*(unsigned char *)&one;
unsigned char *bytes = (unsigned char *)&value;
return _PyLong_FromByteArray(bytes, sizeof(long),
little, !is_unsigned);
}
}
/* CIntFromPyVerify */
#define __PYX_VERIFY_RETURN_INT(target_type, func_type, func_value)\
__PYX__VERIFY_RETURN_INT(target_type, func_type, func_value, 0)
#define __PYX_VERIFY_RETURN_INT_EXC(target_type, func_type, func_value)\
__PYX__VERIFY_RETURN_INT(target_type, func_type, func_value, 1)
#define __PYX__VERIFY_RETURN_INT(target_type, func_type, func_value, exc)\
{\
func_type value = func_value;\
if (sizeof(target_type) < sizeof(func_type)) {\
if (unlikely(value != (func_type) (target_type) value)) {\
func_type zero = 0;\
if (exc && unlikely(value == (func_type)-1 && PyErr_Occurred()))\
return (target_type) -1;\
if (is_unsigned && unlikely(value < zero))\
goto raise_neg_overflow;\
else\
goto raise_overflow;\
}\
}\
return (target_type) value;\
}
/* PrintOne */
#if !CYTHON_COMPILING_IN_PYPY && PY_MAJOR_VERSION < 3
static int __Pyx_PrintOne(PyObject* f, PyObject *o) {
if (!f) {
if (!(f = __Pyx_GetStdout()))
return -1;
}
Py_INCREF(f);
if (PyFile_SoftSpace(f, 0)) {
if (PyFile_WriteString(" ", f) < 0)
goto error;
}
if (PyFile_WriteObject(o, f, Py_PRINT_RAW) < 0)
goto error;
if (PyFile_WriteString("\n", f) < 0)
goto error;
Py_DECREF(f);
return 0;
error:
Py_DECREF(f);
return -1;
/* the line below is just to avoid C compiler
* warnings about unused functions */
return __Pyx_Print(f, NULL, 0);
}
#else
static int __Pyx_PrintOne(PyObject* stream, PyObject *o) {
int res;
PyObject* arg_tuple = PyTuple_Pack(1, o);
if (unlikely(!arg_tuple))
return -1;
res = __Pyx_Print(stream, arg_tuple, 1);
Py_DECREF(arg_tuple);
return res;
}
#endif
/* CIntFromPy */
static CYTHON_INLINE long __Pyx_PyInt_As_long(PyObject *x) {
const long neg_one = (long) -1, const_zero = (long) 0;
const int is_unsigned = neg_one > const_zero;
#if PY_MAJOR_VERSION < 3
if (likely(PyInt_Check(x))) {
if (sizeof(long) < sizeof(long)) {
__PYX_VERIFY_RETURN_INT(long, long, PyInt_AS_LONG(x))
} else {
long val = PyInt_AS_LONG(x);
if (is_unsigned && unlikely(val < 0)) {
goto raise_neg_overflow;
}
return (long) val;
}
} else
#endif
if (likely(PyLong_Check(x))) {
if (is_unsigned) {
#if CYTHON_USE_PYLONG_INTERNALS
const digit* digits = ((PyLongObject*)x)->ob_digit;
switch (Py_SIZE(x)) {
case 0: return (long) 0;
case 1: __PYX_VERIFY_RETURN_INT(long, digit, digits[0])
case 2:
if (8 * sizeof(long) > 1 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(long, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(long) >= 2 * PyLong_SHIFT) {
return (long) (((((long)digits[1]) << PyLong_SHIFT) | (long)digits[0]));
}
}
break;
case 3:
if (8 * sizeof(long) > 2 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(long) >= 3 * PyLong_SHIFT) {
return (long) (((((((long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0]));
}
}
break;
case 4:
if (8 * sizeof(long) > 3 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(long) >= 4 * PyLong_SHIFT) {
return (long) (((((((((long)digits[3]) << PyLong_SHIFT) | (long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0]));
}
}
break;
}
#endif
#if CYTHON_COMPILING_IN_CPYTHON
if (unlikely(Py_SIZE(x) < 0)) {
goto raise_neg_overflow;
}
#else
{
int result = PyObject_RichCompareBool(x, Py_False, Py_LT);
if (unlikely(result < 0))
return (long) -1;
if (unlikely(result == 1))
goto raise_neg_overflow;
}
#endif
if (sizeof(long) <= sizeof(unsigned long)) {
__PYX_VERIFY_RETURN_INT_EXC(long, unsigned long, PyLong_AsUnsignedLong(x))
#ifdef HAVE_LONG_LONG
} else if (sizeof(long) <= sizeof(unsigned PY_LONG_LONG)) {
__PYX_VERIFY_RETURN_INT_EXC(long, unsigned PY_LONG_LONG, PyLong_AsUnsignedLongLong(x))
#endif
}
} else {
#if CYTHON_USE_PYLONG_INTERNALS
const digit* digits = ((PyLongObject*)x)->ob_digit;
switch (Py_SIZE(x)) {
case 0: return (long) 0;
case -1: __PYX_VERIFY_RETURN_INT(long, sdigit, (sdigit) (-(sdigit)digits[0]))
case 1: __PYX_VERIFY_RETURN_INT(long, digit, +digits[0])
case -2:
if (8 * sizeof(long) - 1 > 1 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(long, long, -(long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) {
return (long) (((long)-1)*(((((long)digits[1]) << PyLong_SHIFT) | (long)digits[0])));
}
}
break;
case 2:
if (8 * sizeof(long) > 1 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(long, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) {
return (long) ((((((long)digits[1]) << PyLong_SHIFT) | (long)digits[0])));
}
}
break;
case -3:
if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(long, long, -(long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) {
return (long) (((long)-1)*(((((((long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0])));
}
}
break;
case 3:
if (8 * sizeof(long) > 2 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) {
return (long) ((((((((long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0])));
}
}
break;
case -4:
if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(long, long, -(long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(long) - 1 > 4 * PyLong_SHIFT) {
return (long) (((long)-1)*(((((((((long)digits[3]) << PyLong_SHIFT) | (long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0])));
}
}
break;
case 4:
if (8 * sizeof(long) > 3 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(long) - 1 > 4 * PyLong_SHIFT) {
return (long) ((((((((((long)digits[3]) << PyLong_SHIFT) | (long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0])));
}
}
break;
}
#endif
if (sizeof(long) <= sizeof(long)) {
__PYX_VERIFY_RETURN_INT_EXC(long, long, PyLong_AsLong(x))
#ifdef HAVE_LONG_LONG
} else if (sizeof(long) <= sizeof(PY_LONG_LONG)) {
__PYX_VERIFY_RETURN_INT_EXC(long, PY_LONG_LONG, PyLong_AsLongLong(x))
#endif
}
}
{
#if CYTHON_COMPILING_IN_PYPY && !defined(_PyLong_AsByteArray)
PyErr_SetString(PyExc_RuntimeError,
"_PyLong_AsByteArray() not available in PyPy, cannot convert large numbers");
#else
long val;
PyObject *v = __Pyx_PyNumber_IntOrLong(x);
#if PY_MAJOR_VERSION < 3
if (likely(v) && !PyLong_Check(v)) {
PyObject *tmp = v;
v = PyNumber_Long(tmp);
Py_DECREF(tmp);
}
#endif
if (likely(v)) {
int one = 1; int is_little = (int)*(unsigned char *)&one;
unsigned char *bytes = (unsigned char *)&val;
int ret = _PyLong_AsByteArray((PyLongObject *)v,
bytes, sizeof(val),
is_little, !is_unsigned);
Py_DECREF(v);
if (likely(!ret))
return val;
}
#endif
return (long) -1;
}
} else {
long val;
PyObject *tmp = __Pyx_PyNumber_IntOrLong(x);
if (!tmp) return (long) -1;
val = __Pyx_PyInt_As_long(tmp);
Py_DECREF(tmp);
return val;
}
raise_overflow:
PyErr_SetString(PyExc_OverflowError,
"value too large to convert to long");
return (long) -1;
raise_neg_overflow:
PyErr_SetString(PyExc_OverflowError,
"can't convert negative value to long");
return (long) -1;
}
/* CIntFromPy */
static CYTHON_INLINE int __Pyx_PyInt_As_int(PyObject *x) {
const int neg_one = (int) -1, const_zero = (int) 0;
const int is_unsigned = neg_one > const_zero;
#if PY_MAJOR_VERSION < 3
if (likely(PyInt_Check(x))) {
if (sizeof(int) < sizeof(long)) {
__PYX_VERIFY_RETURN_INT(int, long, PyInt_AS_LONG(x))
} else {
long val = PyInt_AS_LONG(x);
if (is_unsigned && unlikely(val < 0)) {
goto raise_neg_overflow;
}
return (int) val;
}
} else
#endif
if (likely(PyLong_Check(x))) {
if (is_unsigned) {
#if CYTHON_USE_PYLONG_INTERNALS
const digit* digits = ((PyLongObject*)x)->ob_digit;
switch (Py_SIZE(x)) {
case 0: return (int) 0;
case 1: __PYX_VERIFY_RETURN_INT(int, digit, digits[0])
case 2:
if (8 * sizeof(int) > 1 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(int, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(int) >= 2 * PyLong_SHIFT) {
return (int) (((((int)digits[1]) << PyLong_SHIFT) | (int)digits[0]));
}
}
break;
case 3:
if (8 * sizeof(int) > 2 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(int) >= 3 * PyLong_SHIFT) {
return (int) (((((((int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0]));
}
}
break;
case 4:
if (8 * sizeof(int) > 3 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(int) >= 4 * PyLong_SHIFT) {
return (int) (((((((((int)digits[3]) << PyLong_SHIFT) | (int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0]));
}
}
break;
}
#endif
#if CYTHON_COMPILING_IN_CPYTHON
if (unlikely(Py_SIZE(x) < 0)) {
goto raise_neg_overflow;
}
#else
{
int result = PyObject_RichCompareBool(x, Py_False, Py_LT);
if (unlikely(result < 0))
return (int) -1;
if (unlikely(result == 1))
goto raise_neg_overflow;
}
#endif
if (sizeof(int) <= sizeof(unsigned long)) {
__PYX_VERIFY_RETURN_INT_EXC(int, unsigned long, PyLong_AsUnsignedLong(x))
#ifdef HAVE_LONG_LONG
} else if (sizeof(int) <= sizeof(unsigned PY_LONG_LONG)) {
__PYX_VERIFY_RETURN_INT_EXC(int, unsigned PY_LONG_LONG, PyLong_AsUnsignedLongLong(x))
#endif
}
} else {
#if CYTHON_USE_PYLONG_INTERNALS
const digit* digits = ((PyLongObject*)x)->ob_digit;
switch (Py_SIZE(x)) {
case 0: return (int) 0;
case -1: __PYX_VERIFY_RETURN_INT(int, sdigit, (sdigit) (-(sdigit)digits[0]))
case 1: __PYX_VERIFY_RETURN_INT(int, digit, +digits[0])
case -2:
if (8 * sizeof(int) - 1 > 1 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(int, long, -(long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(int) - 1 > 2 * PyLong_SHIFT) {
return (int) (((int)-1)*(((((int)digits[1]) << PyLong_SHIFT) | (int)digits[0])));
}
}
break;
case 2:
if (8 * sizeof(int) > 1 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(int, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(int) - 1 > 2 * PyLong_SHIFT) {
return (int) ((((((int)digits[1]) << PyLong_SHIFT) | (int)digits[0])));
}
}
break;
case -3:
if (8 * sizeof(int) - 1 > 2 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(int, long, -(long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(int) - 1 > 3 * PyLong_SHIFT) {
return (int) (((int)-1)*(((((((int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0])));
}
}
break;
case 3:
if (8 * sizeof(int) > 2 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(int) - 1 > 3 * PyLong_SHIFT) {
return (int) ((((((((int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0])));
}
}
break;
case -4:
if (8 * sizeof(int) - 1 > 3 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(int, long, -(long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(int) - 1 > 4 * PyLong_SHIFT) {
return (int) (((int)-1)*(((((((((int)digits[3]) << PyLong_SHIFT) | (int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0])));
}
}
break;
case 4:
if (8 * sizeof(int) > 3 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(int) - 1 > 4 * PyLong_SHIFT) {
return (int) ((((((((((int)digits[3]) << PyLong_SHIFT) | (int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0])));
}
}
break;
}
#endif
if (sizeof(int) <= sizeof(long)) {
__PYX_VERIFY_RETURN_INT_EXC(int, long, PyLong_AsLong(x))
#ifdef HAVE_LONG_LONG
} else if (sizeof(int) <= sizeof(PY_LONG_LONG)) {
__PYX_VERIFY_RETURN_INT_EXC(int, PY_LONG_LONG, PyLong_AsLongLong(x))
#endif
}
}
{
#if CYTHON_COMPILING_IN_PYPY && !defined(_PyLong_AsByteArray)
PyErr_SetString(PyExc_RuntimeError,
"_PyLong_AsByteArray() not available in PyPy, cannot convert large numbers");
#else
int val;
PyObject *v = __Pyx_PyNumber_IntOrLong(x);
#if PY_MAJOR_VERSION < 3
if (likely(v) && !PyLong_Check(v)) {
PyObject *tmp = v;
v = PyNumber_Long(tmp);
Py_DECREF(tmp);
}
#endif
if (likely(v)) {
int one = 1; int is_little = (int)*(unsigned char *)&one;
unsigned char *bytes = (unsigned char *)&val;
int ret = _PyLong_AsByteArray((PyLongObject *)v,
bytes, sizeof(val),
is_little, !is_unsigned);
Py_DECREF(v);
if (likely(!ret))
return val;
}
#endif
return (int) -1;
}
} else {
int val;
PyObject *tmp = __Pyx_PyNumber_IntOrLong(x);
if (!tmp) return (int) -1;
val = __Pyx_PyInt_As_int(tmp);
Py_DECREF(tmp);
return val;
}
raise_overflow:
PyErr_SetString(PyExc_OverflowError,
"value too large to convert to int");
return (int) -1;
raise_neg_overflow:
PyErr_SetString(PyExc_OverflowError,
"can't convert negative value to int");
return (int) -1;
}
/* RaiseException */
#if PY_MAJOR_VERSION < 3
static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb,
CYTHON_UNUSED PyObject *cause) {
__Pyx_PyThreadState_declare
Py_XINCREF(type);
if (!value || value == Py_None)
value = NULL;
else
Py_INCREF(value);
if (!tb || tb == Py_None)
tb = NULL;
else {
Py_INCREF(tb);
if (!PyTraceBack_Check(tb)) {
PyErr_SetString(PyExc_TypeError,
"raise: arg 3 must be a traceback or None");
goto raise_error;
}
}
if (PyType_Check(type)) {
#if CYTHON_COMPILING_IN_PYPY
if (!value) {
Py_INCREF(Py_None);
value = Py_None;
}
#endif
PyErr_NormalizeException(&type, &value, &tb);
} else {
if (value) {
PyErr_SetString(PyExc_TypeError,
"instance exception may not have a separate value");
goto raise_error;
}
value = type;
type = (PyObject*) Py_TYPE(type);
Py_INCREF(type);
if (!PyType_IsSubtype((PyTypeObject *)type, (PyTypeObject *)PyExc_BaseException)) {
PyErr_SetString(PyExc_TypeError,
"raise: exception class must be a subclass of BaseException");
goto raise_error;
}
}
__Pyx_PyThreadState_assign
__Pyx_ErrRestore(type, value, tb);
return;
raise_error:
Py_XDECREF(value);
Py_XDECREF(type);
Py_XDECREF(tb);
return;
}
#else
static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb, PyObject *cause) {
PyObject* owned_instance = NULL;
if (tb == Py_None) {
tb = 0;
} else if (tb && !PyTraceBack_Check(tb)) {
PyErr_SetString(PyExc_TypeError,
"raise: arg 3 must be a traceback or None");
goto bad;
}
if (value == Py_None)
value = 0;
if (PyExceptionInstance_Check(type)) {
if (value) {
PyErr_SetString(PyExc_TypeError,
"instance exception may not have a separate value");
goto bad;
}
value = type;
type = (PyObject*) Py_TYPE(value);
} else if (PyExceptionClass_Check(type)) {
PyObject *instance_class = NULL;
if (value && PyExceptionInstance_Check(value)) {
instance_class = (PyObject*) Py_TYPE(value);
if (instance_class != type) {
int is_subclass = PyObject_IsSubclass(instance_class, type);
if (!is_subclass) {
instance_class = NULL;
} else if (unlikely(is_subclass == -1)) {
goto bad;
} else {
type = instance_class;
}
}
}
if (!instance_class) {
PyObject *args;
if (!value)
args = PyTuple_New(0);
else if (PyTuple_Check(value)) {
Py_INCREF(value);
args = value;
} else
args = PyTuple_Pack(1, value);
if (!args)
goto bad;
owned_instance = PyObject_Call(type, args, NULL);
Py_DECREF(args);
if (!owned_instance)
goto bad;
value = owned_instance;
if (!PyExceptionInstance_Check(value)) {
PyErr_Format(PyExc_TypeError,
"calling %R should have returned an instance of "
"BaseException, not %R",
type, Py_TYPE(value));
goto bad;
}
}
} else {
PyErr_SetString(PyExc_TypeError,
"raise: exception class must be a subclass of BaseException");
goto bad;
}
#if PY_VERSION_HEX >= 0x03030000
if (cause) {
#else
if (cause && cause != Py_None) {
#endif
PyObject *fixed_cause;
if (cause == Py_None) {
fixed_cause = NULL;
} else if (PyExceptionClass_Check(cause)) {
fixed_cause = PyObject_CallObject(cause, NULL);
if (fixed_cause == NULL)
goto bad;
} else if (PyExceptionInstance_Check(cause)) {
fixed_cause = cause;
Py_INCREF(fixed_cause);
} else {
PyErr_SetString(PyExc_TypeError,
"exception causes must derive from "
"BaseException");
goto bad;
}
PyException_SetCause(value, fixed_cause);
}
PyErr_SetObject(type, value);
if (tb) {
#if CYTHON_COMPILING_IN_PYPY
PyObject *tmp_type, *tmp_value, *tmp_tb;
PyErr_Fetch(&tmp_type, &tmp_value, &tmp_tb);
Py_INCREF(tb);
PyErr_Restore(tmp_type, tmp_value, tb);
Py_XDECREF(tmp_tb);
#else
PyThreadState *tstate = PyThreadState_GET();
PyObject* tmp_tb = tstate->curexc_traceback;
if (tb != tmp_tb) {
Py_INCREF(tb);
tstate->curexc_traceback = tb;
Py_XDECREF(tmp_tb);
}
#endif
}
bad:
Py_XDECREF(owned_instance);
return;
}
#endif
/* SwapException */
#if CYTHON_FAST_THREAD_STATE
static CYTHON_INLINE void __Pyx__ExceptionSwap(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb) {
PyObject *tmp_type, *tmp_value, *tmp_tb;
tmp_type = tstate->exc_type;
tmp_value = tstate->exc_value;
tmp_tb = tstate->exc_traceback;
tstate->exc_type = *type;
tstate->exc_value = *value;
tstate->exc_traceback = *tb;
*type = tmp_type;
*value = tmp_value;
*tb = tmp_tb;
}
#else
static CYTHON_INLINE void __Pyx_ExceptionSwap(PyObject **type, PyObject **value, PyObject **tb) {
PyObject *tmp_type, *tmp_value, *tmp_tb;
PyErr_GetExcInfo(&tmp_type, &tmp_value, &tmp_tb);
PyErr_SetExcInfo(*type, *value, *tb);
*type = tmp_type;
*value = tmp_value;
*tb = tmp_tb;
}
#endif
/* CoroutineBase */
#include <structmember.h>
#include <frameobject.h>
static PyObject *__Pyx_Coroutine_Send(PyObject *self, PyObject *value);
static PyObject *__Pyx_Coroutine_Close(PyObject *self);
static PyObject *__Pyx_Coroutine_Throw(PyObject *gen, PyObject *args);
#define __Pyx_Coroutine_Undelegate(gen) Py_CLEAR((gen)->yieldfrom)
#if 1 || PY_VERSION_HEX < 0x030300B0
static int __Pyx_PyGen_FetchStopIterationValue(PyObject **pvalue) {
PyObject *et, *ev, *tb;
PyObject *value = NULL;
__Pyx_PyThreadState_declare
__Pyx_PyThreadState_assign
__Pyx_ErrFetch(&et, &ev, &tb);
if (!et) {
Py_XDECREF(tb);
Py_XDECREF(ev);
Py_INCREF(Py_None);
*pvalue = Py_None;
return 0;
}
if (likely(et == PyExc_StopIteration)) {
if (!ev) {
Py_INCREF(Py_None);
value = Py_None;
}
#if PY_VERSION_HEX >= 0x030300A0
else if (Py_TYPE(ev) == (PyTypeObject*)PyExc_StopIteration) {
value = ((PyStopIterationObject *)ev)->value;
Py_INCREF(value);
Py_DECREF(ev);
}
#endif
else if (unlikely(PyTuple_Check(ev))) {
if (PyTuple_GET_SIZE(ev) >= 1) {
#if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS
value = PyTuple_GET_ITEM(ev, 0);
Py_INCREF(value);
#else
value = PySequence_ITEM(ev, 0);
#endif
} else {
Py_INCREF(Py_None);
value = Py_None;
}
Py_DECREF(ev);
}
else if (!PyObject_TypeCheck(ev, (PyTypeObject*)PyExc_StopIteration)) {
value = ev;
}
if (likely(value)) {
Py_XDECREF(tb);
Py_DECREF(et);
*pvalue = value;
return 0;
}
} else if (!PyErr_GivenExceptionMatches(et, PyExc_StopIteration)) {
__Pyx_ErrRestore(et, ev, tb);
return -1;
}
PyErr_NormalizeException(&et, &ev, &tb);
if (unlikely(!PyObject_TypeCheck(ev, (PyTypeObject*)PyExc_StopIteration))) {
__Pyx_ErrRestore(et, ev, tb);
return -1;
}
Py_XDECREF(tb);
Py_DECREF(et);
#if PY_VERSION_HEX >= 0x030300A0
value = ((PyStopIterationObject *)ev)->value;
Py_INCREF(value);
Py_DECREF(ev);
#else
{
PyObject* args = __Pyx_PyObject_GetAttrStr(ev, __pyx_n_s_args);
Py_DECREF(ev);
if (likely(args)) {
value = PySequence_GetItem(args, 0);
Py_DECREF(args);
}
if (unlikely(!value)) {
__Pyx_ErrRestore(NULL, NULL, NULL);
Py_INCREF(Py_None);
value = Py_None;
}
}
#endif
*pvalue = value;
return 0;
}
#endif
static CYTHON_INLINE
void __Pyx_Coroutine_ExceptionClear(__pyx_CoroutineObject *self) {
PyObject *exc_type = self->exc_type;
PyObject *exc_value = self->exc_value;
PyObject *exc_traceback = self->exc_traceback;
self->exc_type = NULL;
self->exc_value = NULL;
self->exc_traceback = NULL;
Py_XDECREF(exc_type);
Py_XDECREF(exc_value);
Py_XDECREF(exc_traceback);
}
static CYTHON_INLINE
int __Pyx_Coroutine_CheckRunning(__pyx_CoroutineObject *gen) {
if (unlikely(gen->is_running)) {
PyErr_SetString(PyExc_ValueError,
"generator already executing");
return 1;
}
return 0;
}
static CYTHON_INLINE
PyObject *__Pyx_Coroutine_SendEx(__pyx_CoroutineObject *self, PyObject *value) {
PyObject *retval;
__Pyx_PyThreadState_declare
assert(!self->is_running);
if (unlikely(self->resume_label == 0)) {
if (unlikely(value && value != Py_None)) {
PyErr_SetString(PyExc_TypeError,
"can't send non-None value to a "
"just-started generator");
return NULL;
}
}
if (unlikely(self->resume_label == -1)) {
PyErr_SetNone(PyExc_StopIteration);
return NULL;
}
__Pyx_PyThreadState_assign
if (value) {
#if CYTHON_COMPILING_IN_PYPY || CYTHON_COMPILING_IN_PYSTON
#else
if (self->exc_traceback) {
PyTracebackObject *tb = (PyTracebackObject *) self->exc_traceback;
PyFrameObject *f = tb->tb_frame;
Py_XINCREF(__pyx_tstate->frame);
assert(f->f_back == NULL);
f->f_back = __pyx_tstate->frame;
}
#endif
__Pyx_ExceptionSwap(&self->exc_type, &self->exc_value,
&self->exc_traceback);
} else {
__Pyx_Coroutine_ExceptionClear(self);
}
self->is_running = 1;
retval = self->body((PyObject *) self, value);
self->is_running = 0;
if (retval) {
__Pyx_ExceptionSwap(&self->exc_type, &self->exc_value,
&self->exc_traceback);
#if CYTHON_COMPILING_IN_PYPY || CYTHON_COMPILING_IN_PYSTON
#else
if (self->exc_traceback) {
PyTracebackObject *tb = (PyTracebackObject *) self->exc_traceback;
PyFrameObject *f = tb->tb_frame;
Py_CLEAR(f->f_back);
}
#endif
} else {
__Pyx_Coroutine_ExceptionClear(self);
}
return retval;
}
static CYTHON_INLINE
PyObject *__Pyx_Coroutine_MethodReturn(PyObject *retval) {
if (unlikely(!retval && !PyErr_Occurred())) {
PyErr_SetNone(PyExc_StopIteration);
}
return retval;
}
static CYTHON_INLINE
PyObject *__Pyx_Coroutine_FinishDelegation(__pyx_CoroutineObject *gen) {
PyObject *ret;
PyObject *val = NULL;
__Pyx_Coroutine_Undelegate(gen);
__Pyx_PyGen_FetchStopIterationValue(&val);
ret = __Pyx_Coroutine_SendEx(gen, val);
Py_XDECREF(val);
return ret;
}
static PyObject *__Pyx_Coroutine_Send(PyObject *self, PyObject *value) {
PyObject *retval;
__pyx_CoroutineObject *gen = (__pyx_CoroutineObject*) self;
PyObject *yf = gen->yieldfrom;
if (unlikely(__Pyx_Coroutine_CheckRunning(gen)))
return NULL;
if (yf) {
PyObject *ret;
gen->is_running = 1;
#ifdef __Pyx_Generator_USED
if (__Pyx_Generator_CheckExact(yf)) {
ret = __Pyx_Coroutine_Send(yf, value);
} else
#endif
#ifdef __Pyx_Coroutine_USED
if (__Pyx_Coroutine_CheckExact(yf)) {
ret = __Pyx_Coroutine_Send(yf, value);
} else
#endif
{
if (value == Py_None)
ret = Py_TYPE(yf)->tp_iternext(yf);
else
ret = __Pyx_PyObject_CallMethod1(yf, __pyx_n_s_send, value);
}
gen->is_running = 0;
if (likely(ret)) {
return ret;
}
retval = __Pyx_Coroutine_FinishDelegation(gen);
} else {
retval = __Pyx_Coroutine_SendEx(gen, value);
}
return __Pyx_Coroutine_MethodReturn(retval);
}
static int __Pyx_Coroutine_CloseIter(__pyx_CoroutineObject *gen, PyObject *yf) {
PyObject *retval = NULL;
int err = 0;
#ifdef __Pyx_Generator_USED
if (__Pyx_Generator_CheckExact(yf)) {
retval = __Pyx_Coroutine_Close(yf);
if (!retval)
return -1;
} else
#endif
#ifdef __Pyx_Coroutine_USED
if (__Pyx_Coroutine_CheckExact(yf)) {
retval = __Pyx_Coroutine_Close(yf);
if (!retval)
return -1;
} else
#endif
{
PyObject *meth;
gen->is_running = 1;
meth = __Pyx_PyObject_GetAttrStr(yf, __pyx_n_s_close);
if (unlikely(!meth)) {
if (!PyErr_ExceptionMatches(PyExc_AttributeError)) {
PyErr_WriteUnraisable(yf);
}
PyErr_Clear();
} else {
retval = PyObject_CallFunction(meth, NULL);
Py_DECREF(meth);
if (!retval)
err = -1;
}
gen->is_running = 0;
}
Py_XDECREF(retval);
return err;
}
static PyObject *__Pyx_Generator_Next(PyObject *self) {
__pyx_CoroutineObject *gen = (__pyx_CoroutineObject*) self;
PyObject *yf = gen->yieldfrom;
if (unlikely(__Pyx_Coroutine_CheckRunning(gen)))
return NULL;
if (yf) {
PyObject *ret;
gen->is_running = 1;
#ifdef __Pyx_Generator_USED
if (__Pyx_Generator_CheckExact(yf)) {
ret = __Pyx_Generator_Next(yf);
} else
#endif
ret = Py_TYPE(yf)->tp_iternext(yf);
gen->is_running = 0;
if (likely(ret)) {
return ret;
}
return __Pyx_Coroutine_FinishDelegation(gen);
}
return __Pyx_Coroutine_SendEx(gen, Py_None);
}
static PyObject *__Pyx_Coroutine_Close(PyObject *self) {
__pyx_CoroutineObject *gen = (__pyx_CoroutineObject *) self;
PyObject *retval, *raised_exception;
PyObject *yf = gen->yieldfrom;
int err = 0;
if (unlikely(__Pyx_Coroutine_CheckRunning(gen)))
return NULL;
if (yf) {
Py_INCREF(yf);
err = __Pyx_Coroutine_CloseIter(gen, yf);
__Pyx_Coroutine_Undelegate(gen);
Py_DECREF(yf);
}
if (err == 0)
PyErr_SetNone(PyExc_GeneratorExit);
retval = __Pyx_Coroutine_SendEx(gen, NULL);
if (retval) {
Py_DECREF(retval);
PyErr_SetString(PyExc_RuntimeError,
"generator ignored GeneratorExit");
return NULL;
}
raised_exception = PyErr_Occurred();
if (!raised_exception
|| raised_exception == PyExc_StopIteration
|| raised_exception == PyExc_GeneratorExit
|| PyErr_GivenExceptionMatches(raised_exception, PyExc_GeneratorExit)
|| PyErr_GivenExceptionMatches(raised_exception, PyExc_StopIteration))
{
if (raised_exception) PyErr_Clear();
Py_INCREF(Py_None);
return Py_None;
}
return NULL;
}
static PyObject *__Pyx_Coroutine_Throw(PyObject *self, PyObject *args) {
__pyx_CoroutineObject *gen = (__pyx_CoroutineObject *) self;
PyObject *typ;
PyObject *tb = NULL;
PyObject *val = NULL;
PyObject *yf = gen->yieldfrom;
if (!PyArg_UnpackTuple(args, (char *)"throw", 1, 3, &typ, &val, &tb))
return NULL;
if (unlikely(__Pyx_Coroutine_CheckRunning(gen)))
return NULL;
if (yf) {
PyObject *ret;
Py_INCREF(yf);
if (PyErr_GivenExceptionMatches(typ, PyExc_GeneratorExit)) {
int err = __Pyx_Coroutine_CloseIter(gen, yf);
Py_DECREF(yf);
__Pyx_Coroutine_Undelegate(gen);
if (err < 0)
return __Pyx_Coroutine_MethodReturn(__Pyx_Coroutine_SendEx(gen, NULL));
goto throw_here;
}
gen->is_running = 1;
#ifdef __Pyx_Generator_USED
if (__Pyx_Generator_CheckExact(yf)) {
ret = __Pyx_Coroutine_Throw(yf, args);
} else
#endif
#ifdef __Pyx_Coroutine_USED
if (__Pyx_Coroutine_CheckExact(yf)) {
ret = __Pyx_Coroutine_Throw(yf, args);
} else
#endif
{
PyObject *meth = __Pyx_PyObject_GetAttrStr(yf, __pyx_n_s_throw);
if (unlikely(!meth)) {
Py_DECREF(yf);
if (!PyErr_ExceptionMatches(PyExc_AttributeError)) {
gen->is_running = 0;
return NULL;
}
PyErr_Clear();
__Pyx_Coroutine_Undelegate(gen);
gen->is_running = 0;
goto throw_here;
}
ret = PyObject_CallObject(meth, args);
Py_DECREF(meth);
}
gen->is_running = 0;
Py_DECREF(yf);
if (!ret) {
ret = __Pyx_Coroutine_FinishDelegation(gen);
}
return __Pyx_Coroutine_MethodReturn(ret);
}
throw_here:
__Pyx_Raise(typ, val, tb, NULL);
return __Pyx_Coroutine_MethodReturn(__Pyx_Coroutine_SendEx(gen, NULL));
}
static int __Pyx_Coroutine_traverse(PyObject *self, visitproc visit, void *arg) {
__pyx_CoroutineObject *gen = (__pyx_CoroutineObject *) self;
Py_VISIT(gen->closure);
Py_VISIT(gen->classobj);
Py_VISIT(gen->yieldfrom);
Py_VISIT(gen->exc_type);
Py_VISIT(gen->exc_value);
Py_VISIT(gen->exc_traceback);
return 0;
}
static int __Pyx_Coroutine_clear(PyObject *self) {
__pyx_CoroutineObject *gen = (__pyx_CoroutineObject *) self;
Py_CLEAR(gen->closure);
Py_CLEAR(gen->classobj);
Py_CLEAR(gen->yieldfrom);
Py_CLEAR(gen->exc_type);
Py_CLEAR(gen->exc_value);
Py_CLEAR(gen->exc_traceback);
Py_CLEAR(gen->gi_name);
Py_CLEAR(gen->gi_qualname);
return 0;
}
static void __Pyx_Coroutine_dealloc(PyObject *self) {
__pyx_CoroutineObject *gen = (__pyx_CoroutineObject *) self;
PyObject_GC_UnTrack(gen);
if (gen->gi_weakreflist != NULL)
PyObject_ClearWeakRefs(self);
if (gen->resume_label > 0) {
PyObject_GC_Track(self);
#if PY_VERSION_HEX >= 0x030400a1
if (PyObject_CallFinalizerFromDealloc(self))
#else
Py_TYPE(gen)->tp_del(self);
if (self->ob_refcnt > 0)
#endif
{
return;
}
PyObject_GC_UnTrack(self);
}
__Pyx_Coroutine_clear(self);
PyObject_GC_Del(gen);
}
static void __Pyx_Coroutine_del(PyObject *self) {
PyObject *res;
PyObject *error_type, *error_value, *error_traceback;
__pyx_CoroutineObject *gen = (__pyx_CoroutineObject *) self;
__Pyx_PyThreadState_declare
if (gen->resume_label <= 0)
return ;
#if PY_VERSION_HEX < 0x030400a1
assert(self->ob_refcnt == 0);
self->ob_refcnt = 1;
#endif
__Pyx_PyThreadState_assign
__Pyx_ErrFetch(&error_type, &error_value, &error_traceback);
res = __Pyx_Coroutine_Close(self);
if (res == NULL)
PyErr_WriteUnraisable(self);
else
Py_DECREF(res);
__Pyx_ErrRestore(error_type, error_value, error_traceback);
#if PY_VERSION_HEX < 0x030400a1
assert(self->ob_refcnt > 0);
if (--self->ob_refcnt == 0) {
return;
}
{
Py_ssize_t refcnt = self->ob_refcnt;
_Py_NewReference(self);
self->ob_refcnt = refcnt;
}
#if CYTHON_COMPILING_IN_CPYTHON
assert(PyType_IS_GC(self->ob_type) &&
_Py_AS_GC(self)->gc.gc_refs != _PyGC_REFS_UNTRACKED);
_Py_DEC_REFTOTAL;
#endif
#ifdef COUNT_ALLOCS
--Py_TYPE(self)->tp_frees;
--Py_TYPE(self)->tp_allocs;
#endif
#endif
}
static PyObject *
__Pyx_Coroutine_get_name(__pyx_CoroutineObject *self)
{
PyObject *name = self->gi_name;
if (unlikely(!name)) name = Py_None;
Py_INCREF(name);
return name;
}
static int
__Pyx_Coroutine_set_name(__pyx_CoroutineObject *self, PyObject *value)
{
PyObject *tmp;
#if PY_MAJOR_VERSION >= 3
if (unlikely(value == NULL || !PyUnicode_Check(value))) {
#else
if (unlikely(value == NULL || !PyString_Check(value))) {
#endif
PyErr_SetString(PyExc_TypeError,
"__name__ must be set to a string object");
return -1;
}
tmp = self->gi_name;
Py_INCREF(value);
self->gi_name = value;
Py_XDECREF(tmp);
return 0;
}
static PyObject *
__Pyx_Coroutine_get_qualname(__pyx_CoroutineObject *self)
{
PyObject *name = self->gi_qualname;
if (unlikely(!name)) name = Py_None;
Py_INCREF(name);
return name;
}
static int
__Pyx_Coroutine_set_qualname(__pyx_CoroutineObject *self, PyObject *value)
{
PyObject *tmp;
#if PY_MAJOR_VERSION >= 3
if (unlikely(value == NULL || !PyUnicode_Check(value))) {
#else
if (unlikely(value == NULL || !PyString_Check(value))) {
#endif
PyErr_SetString(PyExc_TypeError,
"__qualname__ must be set to a string object");
return -1;
}
tmp = self->gi_qualname;
Py_INCREF(value);
self->gi_qualname = value;
Py_XDECREF(tmp);
return 0;
}
static __pyx_CoroutineObject *__Pyx__Coroutine_New(
PyTypeObject* type, __pyx_coroutine_body_t body, PyObject *closure,
PyObject *name, PyObject *qualname, PyObject *module_name) {
__pyx_CoroutineObject *gen = PyObject_GC_New(__pyx_CoroutineObject, type);
if (gen == NULL)
return NULL;
gen->body = body;
gen->closure = closure;
Py_XINCREF(closure);
gen->is_running = 0;
gen->resume_label = 0;
gen->classobj = NULL;
gen->yieldfrom = NULL;
gen->exc_type = NULL;
gen->exc_value = NULL;
gen->exc_traceback = NULL;
gen->gi_weakreflist = NULL;
Py_XINCREF(qualname);
gen->gi_qualname = qualname;
Py_XINCREF(name);
gen->gi_name = name;
Py_XINCREF(module_name);
gen->gi_modulename = module_name;
PyObject_GC_Track(gen);
return gen;
}
/* PatchModuleWithCoroutine */
static PyObject* __Pyx_Coroutine_patch_module(PyObject* module, const char* py_code) {
#if defined(__Pyx_Generator_USED) || defined(__Pyx_Coroutine_USED)
int result;
PyObject *globals, *result_obj;
globals = PyDict_New(); if (unlikely(!globals)) goto ignore;
result = PyDict_SetItemString(globals, "_cython_coroutine_type",
#ifdef __Pyx_Coroutine_USED
(PyObject*)__pyx_CoroutineType);
#else
Py_None);
#endif
if (unlikely(result < 0)) goto ignore;
result = PyDict_SetItemString(globals, "_cython_generator_type",
#ifdef __Pyx_Generator_USED
(PyObject*)__pyx_GeneratorType);
#else
Py_None);
#endif
if (unlikely(result < 0)) goto ignore;
if (unlikely(PyDict_SetItemString(globals, "_module", module) < 0)) goto ignore;
if (unlikely(PyDict_SetItemString(globals, "__builtins__", __pyx_b) < 0)) goto ignore;
result_obj = PyRun_String(py_code, Py_file_input, globals, globals);
if (unlikely(!result_obj)) goto ignore;
Py_DECREF(result_obj);
Py_DECREF(globals);
return module;
ignore:
Py_XDECREF(globals);
PyErr_WriteUnraisable(module);
if (unlikely(PyErr_WarnEx(PyExc_RuntimeWarning, "Cython module failed to patch module with custom type", 1) < 0)) {
Py_DECREF(module);
module = NULL;
}
#else
py_code++;
#endif
return module;
}
/* PatchGeneratorABC */
#ifndef CYTHON_REGISTER_ABCS
#define CYTHON_REGISTER_ABCS 1
#endif
#if defined(__Pyx_Generator_USED) || defined(__Pyx_Coroutine_USED)
static PyObject* __Pyx_patch_abc_module(PyObject *module);
static PyObject* __Pyx_patch_abc_module(PyObject *module) {
module = __Pyx_Coroutine_patch_module(
module, ""
"if _cython_generator_type is not None:\n"
" try: Generator = _module.Generator\n"
" except AttributeError: pass\n"
" else: Generator.register(_cython_generator_type)\n"
"if _cython_coroutine_type is not None:\n"
" try: Coroutine = _module.Coroutine\n"
" except AttributeError: pass\n"
" else: Coroutine.register(_cython_coroutine_type)\n"
);
return module;
}
#endif
static int __Pyx_patch_abc(void) {
#if defined(__Pyx_Generator_USED) || defined(__Pyx_Coroutine_USED)
static int abc_patched = 0;
if (CYTHON_REGISTER_ABCS && !abc_patched) {
PyObject *module;
module = PyImport_ImportModule((PY_VERSION_HEX >= 0x03030000) ? "collections.abc" : "collections");
if (!module) {
PyErr_WriteUnraisable(NULL);
if (unlikely(PyErr_WarnEx(PyExc_RuntimeWarning,
((PY_VERSION_HEX >= 0x03030000) ?
"Cython module failed to register with collections.abc module" :
"Cython module failed to register with collections module"), 1) < 0)) {
return -1;
}
} else {
module = __Pyx_patch_abc_module(module);
abc_patched = 1;
if (unlikely(!module))
return -1;
Py_DECREF(module);
}
module = PyImport_ImportModule("backports_abc");
if (module) {
module = __Pyx_patch_abc_module(module);
Py_XDECREF(module);
}
if (!module) {
PyErr_Clear();
}
}
#else
if ((0)) __Pyx_Coroutine_patch_module(NULL, NULL);
#endif
return 0;
}
/* Generator */
static PyMethodDef __pyx_Generator_methods[] = {
{"send", (PyCFunction) __Pyx_Coroutine_Send, METH_O,
(char*) PyDoc_STR("send(arg) -> send 'arg' into generator,\nreturn next yielded value or raise StopIteration.")},
{"throw", (PyCFunction) __Pyx_Coroutine_Throw, METH_VARARGS,
(char*) PyDoc_STR("throw(typ[,val[,tb]]) -> raise exception in generator,\nreturn next yielded value or raise StopIteration.")},
{"close", (PyCFunction) __Pyx_Coroutine_Close, METH_NOARGS,
(char*) PyDoc_STR("close() -> raise GeneratorExit inside generator.")},
{0, 0, 0, 0}
};
static PyMemberDef __pyx_Generator_memberlist[] = {
{(char *) "gi_running", T_BOOL, offsetof(__pyx_CoroutineObject, is_running), READONLY, NULL},
{(char*) "gi_yieldfrom", T_OBJECT, offsetof(__pyx_CoroutineObject, yieldfrom), READONLY,
(char*) PyDoc_STR("object being iterated by 'yield from', or None")},
{0, 0, 0, 0, 0}
};
static PyGetSetDef __pyx_Generator_getsets[] = {
{(char *) "__name__", (getter)__Pyx_Coroutine_get_name, (setter)__Pyx_Coroutine_set_name,
(char*) PyDoc_STR("name of the generator"), 0},
{(char *) "__qualname__", (getter)__Pyx_Coroutine_get_qualname, (setter)__Pyx_Coroutine_set_qualname,
(char*) PyDoc_STR("qualified name of the generator"), 0},
{0, 0, 0, 0, 0}
};
static PyTypeObject __pyx_GeneratorType_type = {
PyVarObject_HEAD_INIT(0, 0)
"generator",
sizeof(__pyx_CoroutineObject),
0,
(destructor) __Pyx_Coroutine_dealloc,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
Py_TPFLAGS_DEFAULT | Py_TPFLAGS_HAVE_GC | Py_TPFLAGS_HAVE_FINALIZE,
0,
(traverseproc) __Pyx_Coroutine_traverse,
0,
0,
offsetof(__pyx_CoroutineObject, gi_weakreflist),
0,
(iternextfunc) __Pyx_Generator_Next,
__pyx_Generator_methods,
__pyx_Generator_memberlist,
__pyx_Generator_getsets,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
#if PY_VERSION_HEX >= 0x030400a1
0,
#else
__Pyx_Coroutine_del,
#endif
0,
#if PY_VERSION_HEX >= 0x030400a1
__Pyx_Coroutine_del,
#endif
};
static int __pyx_Generator_init(void) {
__pyx_GeneratorType_type.tp_getattro = PyObject_GenericGetAttr;
__pyx_GeneratorType_type.tp_iter = PyObject_SelfIter;
__pyx_GeneratorType = __Pyx_FetchCommonType(&__pyx_GeneratorType_type);
if (unlikely(!__pyx_GeneratorType)) {
return -1;
}
return 0;
}
/* CheckBinaryVersion */
static int __Pyx_check_binary_version(void) {
char ctversion[4], rtversion[4];
PyOS_snprintf(ctversion, 4, "%d.%d", PY_MAJOR_VERSION, PY_MINOR_VERSION);
PyOS_snprintf(rtversion, 4, "%s", Py_GetVersion());
if (ctversion[0] != rtversion[0] || ctversion[2] != rtversion[2]) {
char message[200];
PyOS_snprintf(message, sizeof(message),
"compiletime version %s of module '%.100s' "
"does not match runtime version %s",
ctversion, __Pyx_MODULE_NAME, rtversion);
return PyErr_WarnEx(NULL, message, 1);
}
return 0;
}
/* InitStrings */
static int __Pyx_InitStrings(__Pyx_StringTabEntry *t) {
while (t->p) {
#if PY_MAJOR_VERSION < 3
if (t->is_unicode) {
*t->p = PyUnicode_DecodeUTF8(t->s, t->n - 1, NULL);
} else if (t->intern) {
*t->p = PyString_InternFromString(t->s);
} else {
*t->p = PyString_FromStringAndSize(t->s, t->n - 1);
}
#else
if (t->is_unicode | t->is_str) {
if (t->intern) {
*t->p = PyUnicode_InternFromString(t->s);
} else if (t->encoding) {
*t->p = PyUnicode_Decode(t->s, t->n - 1, t->encoding, NULL);
} else {
*t->p = PyUnicode_FromStringAndSize(t->s, t->n - 1);
}
} else {
*t->p = PyBytes_FromStringAndSize(t->s, t->n - 1);
}
#endif
if (!*t->p)
return -1;
if (PyObject_Hash(*t->p) == -1)
PyErr_Clear();
++t;
}
return 0;
}
static CYTHON_INLINE PyObject* __Pyx_PyUnicode_FromString(const char* c_str) {
return __Pyx_PyUnicode_FromStringAndSize(c_str, (Py_ssize_t)strlen(c_str));
}
static CYTHON_INLINE const char* __Pyx_PyObject_AsString(PyObject* o) {
Py_ssize_t ignore;
return __Pyx_PyObject_AsStringAndSize(o, &ignore);
}
static CYTHON_INLINE const char* __Pyx_PyObject_AsStringAndSize(PyObject* o, Py_ssize_t *length) {
#if CYTHON_COMPILING_IN_CPYTHON && (__PYX_DEFAULT_STRING_ENCODING_IS_ASCII || __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT)
if (
#if PY_MAJOR_VERSION < 3 && __PYX_DEFAULT_STRING_ENCODING_IS_ASCII
__Pyx_sys_getdefaultencoding_not_ascii &&
#endif
PyUnicode_Check(o)) {
#if PY_VERSION_HEX < 0x03030000
char* defenc_c;
PyObject* defenc = _PyUnicode_AsDefaultEncodedString(o, NULL);
if (!defenc) return NULL;
defenc_c = PyBytes_AS_STRING(defenc);
#if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII
{
char* end = defenc_c + PyBytes_GET_SIZE(defenc);
char* c;
for (c = defenc_c; c < end; c++) {
if ((unsigned char) (*c) >= 128) {
PyUnicode_AsASCIIString(o);
return NULL;
}
}
}
#endif
*length = PyBytes_GET_SIZE(defenc);
return defenc_c;
#else
if (__Pyx_PyUnicode_READY(o) == -1) return NULL;
#if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII
if (PyUnicode_IS_ASCII(o)) {
*length = PyUnicode_GET_LENGTH(o);
return PyUnicode_AsUTF8(o);
} else {
PyUnicode_AsASCIIString(o);
return NULL;
}
#else
return PyUnicode_AsUTF8AndSize(o, length);
#endif
#endif
} else
#endif
#if (!CYTHON_COMPILING_IN_PYPY) || (defined(PyByteArray_AS_STRING) && defined(PyByteArray_GET_SIZE))
if (PyByteArray_Check(o)) {
*length = PyByteArray_GET_SIZE(o);
return PyByteArray_AS_STRING(o);
} else
#endif
{
char* result;
int r = PyBytes_AsStringAndSize(o, &result, length);
if (unlikely(r < 0)) {
return NULL;
} else {
return result;
}
}
}
static CYTHON_INLINE int __Pyx_PyObject_IsTrue(PyObject* x) {
int is_true = x == Py_True;
if (is_true | (x == Py_False) | (x == Py_None)) return is_true;
else return PyObject_IsTrue(x);
}
static CYTHON_INLINE PyObject* __Pyx_PyNumber_IntOrLong(PyObject* x) {
#if CYTHON_USE_TYPE_SLOTS
PyNumberMethods *m;
#endif
const char *name = NULL;
PyObject *res = NULL;
#if PY_MAJOR_VERSION < 3
if (PyInt_Check(x) || PyLong_Check(x))
#else
if (PyLong_Check(x))
#endif
return __Pyx_NewRef(x);
#if CYTHON_USE_TYPE_SLOTS
m = Py_TYPE(x)->tp_as_number;
#if PY_MAJOR_VERSION < 3
if (m && m->nb_int) {
name = "int";
res = PyNumber_Int(x);
}
else if (m && m->nb_long) {
name = "long";
res = PyNumber_Long(x);
}
#else
if (m && m->nb_int) {
name = "int";
res = PyNumber_Long(x);
}
#endif
#else
res = PyNumber_Int(x);
#endif
if (res) {
#if PY_MAJOR_VERSION < 3
if (!PyInt_Check(res) && !PyLong_Check(res)) {
#else
if (!PyLong_Check(res)) {
#endif
PyErr_Format(PyExc_TypeError,
"__%.4s__ returned non-%.4s (type %.200s)",
name, name, Py_TYPE(res)->tp_name);
Py_DECREF(res);
return NULL;
}
}
else if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_TypeError,
"an integer is required");
}
return res;
}
static CYTHON_INLINE Py_ssize_t __Pyx_PyIndex_AsSsize_t(PyObject* b) {
Py_ssize_t ival;
PyObject *x;
#if PY_MAJOR_VERSION < 3
if (likely(PyInt_CheckExact(b))) {
if (sizeof(Py_ssize_t) >= sizeof(long))
return PyInt_AS_LONG(b);
else
return PyInt_AsSsize_t(x);
}
#endif
if (likely(PyLong_CheckExact(b))) {
#if CYTHON_USE_PYLONG_INTERNALS
const digit* digits = ((PyLongObject*)b)->ob_digit;
const Py_ssize_t size = Py_SIZE(b);
if (likely(__Pyx_sst_abs(size) <= 1)) {
ival = likely(size) ? digits[0] : 0;
if (size == -1) ival = -ival;
return ival;
} else {
switch (size) {
case 2:
if (8 * sizeof(Py_ssize_t) > 2 * PyLong_SHIFT) {
return (Py_ssize_t) (((((size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0]));
}
break;
case -2:
if (8 * sizeof(Py_ssize_t) > 2 * PyLong_SHIFT) {
return -(Py_ssize_t) (((((size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0]));
}
break;
case 3:
if (8 * sizeof(Py_ssize_t) > 3 * PyLong_SHIFT) {
return (Py_ssize_t) (((((((size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0]));
}
break;
case -3:
if (8 * sizeof(Py_ssize_t) > 3 * PyLong_SHIFT) {
return -(Py_ssize_t) (((((((size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0]));
}
break;
case 4:
if (8 * sizeof(Py_ssize_t) > 4 * PyLong_SHIFT) {
return (Py_ssize_t) (((((((((size_t)digits[3]) << PyLong_SHIFT) | (size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0]));
}
break;
case -4:
if (8 * sizeof(Py_ssize_t) > 4 * PyLong_SHIFT) {
return -(Py_ssize_t) (((((((((size_t)digits[3]) << PyLong_SHIFT) | (size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0]));
}
break;
}
}
#endif
return PyLong_AsSsize_t(b);
}
x = PyNumber_Index(b);
if (!x) return -1;
ival = PyInt_AsSsize_t(x);
Py_DECREF(x);
return ival;
}
static CYTHON_INLINE PyObject * __Pyx_PyInt_FromSize_t(size_t ival) {
return PyInt_FromSize_t(ival);
}
#endif /* Py_PYTHON_H */
| 42.474024 | 542 | 0.672071 | [
"object",
"shape",
"model"
] |
a951e79132ad9c9fb423a5a6361b4d6939bd9a9f | 198,729 | c | C | src/plugins/preauth/pkinit/pkinit_crypto_openssl.c | Bhanuprakash-ch/kerberos | bb3c878d5034210c656a97562065612611c5a6d2 | [
"Apache-2.0"
] | 2 | 2018-01-09T18:23:08.000Z | 2018-07-24T23:14:15.000Z | src/plugins/preauth/pkinit/pkinit_crypto_openssl.c | Bhanuprakash-ch/kerberos | bb3c878d5034210c656a97562065612611c5a6d2 | [
"Apache-2.0"
] | null | null | null | src/plugins/preauth/pkinit/pkinit_crypto_openssl.c | Bhanuprakash-ch/kerberos | bb3c878d5034210c656a97562065612611c5a6d2 | [
"Apache-2.0"
] | 3 | 2017-03-21T18:34:02.000Z | 2020-01-22T19:11:53.000Z | /* -*- mode: c; c-basic-offset: 4; indent-tabs-mode: nil -*- */
/*
* COPYRIGHT (C) 2006,2007
* THE REGENTS OF THE UNIVERSITY OF MICHIGAN
* ALL RIGHTS RESERVED
*
* Permission is granted to use, copy, create derivative works
* and redistribute this software and such derivative works
* for any purpose, so long as the name of The University of
* Michigan is not used in any advertising or publicity
* pertaining to the use of distribution of this software
* without specific, written prior authorization. If the
* above copyright notice or any other identification of the
* University of Michigan is included in any copy of any
* portion of this software, then the disclaimer below must
* also be included.
*
* THIS SOFTWARE IS PROVIDED AS IS, WITHOUT REPRESENTATION
* FROM THE UNIVERSITY OF MICHIGAN AS TO ITS FITNESS FOR ANY
* PURPOSE, AND WITHOUT WARRANTY BY THE UNIVERSITY OF
* MICHIGAN OF ANY KIND, EITHER EXPRESS OR IMPLIED, INCLUDING
* WITHOUT LIMITATION THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE
* REGENTS OF THE UNIVERSITY OF MICHIGAN SHALL NOT BE LIABLE
* FOR ANY DAMAGES, INCLUDING SPECIAL, INDIRECT, INCIDENTAL, OR
* CONSEQUENTIAL DAMAGES, WITH RESPECT TO ANY CLAIM ARISING
* OUT OF OR IN CONNECTION WITH THE USE OF THE SOFTWARE, EVEN
* IF IT HAS BEEN OR IS HEREAFTER ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGES.
*/
#include "pkinit_crypto_openssl.h"
#include "k5-buf.h"
#include <dlfcn.h>
#include <unistd.h>
#include <dirent.h>
#include <arpa/inet.h>
static krb5_error_code pkinit_init_pkinit_oids(pkinit_plg_crypto_context );
static void pkinit_fini_pkinit_oids(pkinit_plg_crypto_context );
static krb5_error_code pkinit_init_dh_params(pkinit_plg_crypto_context );
static void pkinit_fini_dh_params(pkinit_plg_crypto_context );
static krb5_error_code pkinit_init_certs(pkinit_identity_crypto_context ctx);
static void pkinit_fini_certs(pkinit_identity_crypto_context ctx);
static krb5_error_code pkinit_init_pkcs11(pkinit_identity_crypto_context ctx);
static void pkinit_fini_pkcs11(pkinit_identity_crypto_context ctx);
static krb5_error_code pkinit_encode_dh_params
(BIGNUM *, BIGNUM *, BIGNUM *, unsigned char **, unsigned int *);
static DH *pkinit_decode_dh_params
(DH **, unsigned char **, unsigned int );
static int pkinit_check_dh_params(DH *dh1, DH *dh2);
static krb5_error_code pkinit_sign_data
(krb5_context context, pkinit_identity_crypto_context cryptoctx,
unsigned char *data, unsigned int data_len,
unsigned char **sig, unsigned int *sig_len);
static krb5_error_code create_signature
(unsigned char **, unsigned int *, unsigned char *, unsigned int,
EVP_PKEY *pkey);
static krb5_error_code pkinit_decode_data
(krb5_context context, pkinit_identity_crypto_context cryptoctx,
unsigned char *data, unsigned int data_len,
unsigned char **decoded, unsigned int *decoded_len);
static krb5_error_code decode_data
(unsigned char **, unsigned int *, unsigned char *, unsigned int,
EVP_PKEY *pkey, X509 *cert);
#ifdef DEBUG_DH
static void print_dh(DH *, char *);
static void print_pubkey(BIGNUM *, char *);
#endif
static int prepare_enc_data
(unsigned char *indata, int indata_len, unsigned char **outdata,
int *outdata_len);
static int openssl_callback (int, X509_STORE_CTX *);
static int openssl_callback_ignore_crls (int, X509_STORE_CTX *);
static int pkcs7_decrypt
(krb5_context context, pkinit_identity_crypto_context id_cryptoctx,
PKCS7 *p7, BIO *bio);
static BIO * pkcs7_dataDecode
(krb5_context context, pkinit_identity_crypto_context id_cryptoctx,
PKCS7 *p7);
static ASN1_OBJECT * pkinit_pkcs7type2oid
(pkinit_plg_crypto_context plg_cryptoctx, int pkcs7_type);
static krb5_error_code pkinit_create_sequence_of_principal_identifiers
(krb5_context context, pkinit_plg_crypto_context plg_cryptoctx,
pkinit_req_crypto_context req_cryptoctx,
pkinit_identity_crypto_context id_cryptoctx,
int type, krb5_pa_data ***e_data_out);
#ifndef WITHOUT_PKCS11
static krb5_error_code pkinit_find_private_key
(pkinit_identity_crypto_context, CK_ATTRIBUTE_TYPE usage,
CK_OBJECT_HANDLE *objp);
static krb5_error_code pkinit_login
(krb5_context context, pkinit_identity_crypto_context id_cryptoctx,
CK_TOKEN_INFO *tip, const char *password);
static krb5_error_code pkinit_open_session
(krb5_context context, pkinit_identity_crypto_context id_cryptoctx);
static void * pkinit_C_LoadModule(const char *modname, CK_FUNCTION_LIST_PTR_PTR p11p);
static CK_RV pkinit_C_UnloadModule(void *handle);
#ifdef SILLYDECRYPT
CK_RV pkinit_C_Decrypt
(pkinit_identity_crypto_context id_cryptoctx,
CK_BYTE_PTR pEncryptedData, CK_ULONG ulEncryptedDataLen,
CK_BYTE_PTR pData, CK_ULONG_PTR pulDataLen);
#endif
static krb5_error_code pkinit_sign_data_pkcs11
(krb5_context context, pkinit_identity_crypto_context id_cryptoctx,
unsigned char *data, unsigned int data_len,
unsigned char **sig, unsigned int *sig_len);
static krb5_error_code pkinit_decode_data_pkcs11
(krb5_context context, pkinit_identity_crypto_context id_cryptoctx,
unsigned char *data, unsigned int data_len,
unsigned char **decoded_data, unsigned int *decoded_data_len);
#endif /* WITHOUT_PKCS11 */
static krb5_error_code pkinit_sign_data_fs
(krb5_context context, pkinit_identity_crypto_context id_cryptoctx,
unsigned char *data, unsigned int data_len,
unsigned char **sig, unsigned int *sig_len);
static krb5_error_code pkinit_decode_data_fs
(krb5_context context, pkinit_identity_crypto_context id_cryptoctx,
unsigned char *data, unsigned int data_len,
unsigned char **decoded_data, unsigned int *decoded_data_len);
static krb5_error_code
create_krb5_invalidCertificates(krb5_context context,
pkinit_plg_crypto_context plg_cryptoctx,
pkinit_req_crypto_context req_cryptoctx,
pkinit_identity_crypto_context id_cryptoctx,
krb5_external_principal_identifier *** ids);
static krb5_error_code
create_identifiers_from_stack(STACK_OF(X509) *sk,
krb5_external_principal_identifier *** ids);
static int
wrap_signeddata(unsigned char *data, unsigned int data_len,
unsigned char **out, unsigned int *out_len);
static char *
pkinit_pkcs11_code_to_text(int err);
#ifdef HAVE_OPENSSL_CMS
/* Use CMS support present in OpenSSL. */
#include <openssl/cms.h>
#define pkinit_CMS_get0_content_signed(_cms) CMS_get0_content(_cms)
#define pkinit_CMS_get0_content_data(_cms) CMS_get0_content(_cms)
#define pkinit_CMS_free1_crls(_sk_x509crl) \
sk_X509_CRL_pop_free((_sk_x509crl), X509_CRL_free)
#define pkinit_CMS_free1_certs(_sk_x509) \
sk_X509_pop_free((_sk_x509), X509_free)
#define pkinit_CMS_SignerInfo_get_cert(_cms,_si,_x509_pp) \
CMS_SignerInfo_get0_algs(_si,NULL,_x509_pp,NULL,NULL)
#else
/* Fake up CMS support using PKCS7. */
#define pkinit_CMS_free1_crls(_stack_of_x509crls) /* Don't free these */
#define pkinit_CMS_free1_certs(_stack_of_x509certs) /* Don't free these */
#define CMS_NO_SIGNER_CERT_VERIFY PKCS7_NOVERIFY
#define CMS_NOATTR PKCS7_NOATTR
#define CMS_ContentInfo PKCS7
#define CMS_SignerInfo PKCS7_SIGNER_INFO
#define d2i_CMS_ContentInfo d2i_PKCS7
#define CMS_get0_type(_p7) ((_p7)->type)
#define pkinit_CMS_get0_content_signed(_p7) (&((_p7)->d.sign->contents->d.other->value.octet_string))
#define pkinit_CMS_get0_content_data(_p7) (&((_p7)->d.other->value.octet_string))
#define CMS_set1_signers_certs(_p7,_stack_of_x509,_uint)
#define CMS_get0_SignerInfos PKCS7_get_signer_info
#define stack_st_CMS_SignerInfo stack_st_PKCS7_SIGNER_INFO
#undef sk_CMS_SignerInfo_value
#define sk_CMS_SignerInfo_value sk_PKCS7_SIGNER_INFO_value
#define CMS_get0_eContentType(_p7) (_p7->d.sign->contents->type)
#define CMS_verify PKCS7_verify
#define CMS_get1_crls(_p7) (_p7->d.sign->crl)
#define CMS_get1_certs(_p7) (_p7->d.sign->cert)
#define CMS_ContentInfo_free(_p7) PKCS7_free(_p7)
#define pkinit_CMS_SignerInfo_get_cert(_p7,_si,_x509_pp) \
(*_x509_pp) = PKCS7_cert_from_signer_info(_p7,_si)
#endif
static struct pkcs11_errstrings {
short code;
char *text;
} pkcs11_errstrings[] = {
{ 0x0, "ok" },
{ 0x1, "cancel" },
{ 0x2, "host memory" },
{ 0x3, "slot id invalid" },
{ 0x5, "general error" },
{ 0x6, "function failed" },
{ 0x7, "arguments bad" },
{ 0x8, "no event" },
{ 0x9, "need to create threads" },
{ 0xa, "cant lock" },
{ 0x10, "attribute read only" },
{ 0x11, "attribute sensitive" },
{ 0x12, "attribute type invalid" },
{ 0x13, "attribute value invalid" },
{ 0x20, "data invalid" },
{ 0x21, "data len range" },
{ 0x30, "device error" },
{ 0x31, "device memory" },
{ 0x32, "device removed" },
{ 0x40, "encrypted data invalid" },
{ 0x41, "encrypted data len range" },
{ 0x50, "function canceled" },
{ 0x51, "function not parallel" },
{ 0x54, "function not supported" },
{ 0x60, "key handle invalid" },
{ 0x62, "key size range" },
{ 0x63, "key type inconsistent" },
{ 0x64, "key not needed" },
{ 0x65, "key changed" },
{ 0x66, "key needed" },
{ 0x67, "key indigestible" },
{ 0x68, "key function not permitted" },
{ 0x69, "key not wrappable" },
{ 0x6a, "key unextractable" },
{ 0x70, "mechanism invalid" },
{ 0x71, "mechanism param invalid" },
{ 0x82, "object handle invalid" },
{ 0x90, "operation active" },
{ 0x91, "operation not initialized" },
{ 0xa0, "pin incorrect" },
{ 0xa1, "pin invalid" },
{ 0xa2, "pin len range" },
{ 0xa3, "pin expired" },
{ 0xa4, "pin locked" },
{ 0xb0, "session closed" },
{ 0xb1, "session count" },
{ 0xb3, "session handle invalid" },
{ 0xb4, "session parallel not supported" },
{ 0xb5, "session read only" },
{ 0xb6, "session exists" },
{ 0xb7, "session read only exists" },
{ 0xb8, "session read write so exists" },
{ 0xc0, "signature invalid" },
{ 0xc1, "signature len range" },
{ 0xd0, "template incomplete" },
{ 0xd1, "template inconsistent" },
{ 0xe0, "token not present" },
{ 0xe1, "token not recognized" },
{ 0xe2, "token write protected" },
{ 0xf0, "unwrapping key handle invalid" },
{ 0xf1, "unwrapping key size range" },
{ 0xf2, "unwrapping key type inconsistent" },
{ 0x100, "user already logged in" },
{ 0x101, "user not logged in" },
{ 0x102, "user pin not initialized" },
{ 0x103, "user type invalid" },
{ 0x104, "user another already logged in" },
{ 0x105, "user too many types" },
{ 0x110, "wrapped key invalid" },
{ 0x112, "wrapped key len range" },
{ 0x113, "wrapping key handle invalid" },
{ 0x114, "wrapping key size range" },
{ 0x115, "wrapping key type inconsistent" },
{ 0x120, "random seed not supported" },
{ 0x121, "random no rng" },
{ 0x130, "domain params invalid" },
{ 0x150, "buffer too small" },
{ 0x160, "saved state invalid" },
{ 0x170, "information sensitive" },
{ 0x180, "state unsaveable" },
{ 0x190, "cryptoki not initialized" },
{ 0x191, "cryptoki already initialized" },
{ 0x1a0, "mutex bad" },
{ 0x1a1, "mutex not locked" },
{ 0x200, "function rejected" },
{ -1, NULL }
};
/* DH parameters */
unsigned char pkinit_1024_dhprime[128] = {
0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
0xC9, 0x0F, 0xDA, 0xA2, 0x21, 0x68, 0xC2, 0x34,
0xC4, 0xC6, 0x62, 0x8B, 0x80, 0xDC, 0x1C, 0xD1,
0x29, 0x02, 0x4E, 0x08, 0x8A, 0x67, 0xCC, 0x74,
0x02, 0x0B, 0xBE, 0xA6, 0x3B, 0x13, 0x9B, 0x22,
0x51, 0x4A, 0x08, 0x79, 0x8E, 0x34, 0x04, 0xDD,
0xEF, 0x95, 0x19, 0xB3, 0xCD, 0x3A, 0x43, 0x1B,
0x30, 0x2B, 0x0A, 0x6D, 0xF2, 0x5F, 0x14, 0x37,
0x4F, 0xE1, 0x35, 0x6D, 0x6D, 0x51, 0xC2, 0x45,
0xE4, 0x85, 0xB5, 0x76, 0x62, 0x5E, 0x7E, 0xC6,
0xF4, 0x4C, 0x42, 0xE9, 0xA6, 0x37, 0xED, 0x6B,
0x0B, 0xFF, 0x5C, 0xB6, 0xF4, 0x06, 0xB7, 0xED,
0xEE, 0x38, 0x6B, 0xFB, 0x5A, 0x89, 0x9F, 0xA5,
0xAE, 0x9F, 0x24, 0x11, 0x7C, 0x4B, 0x1F, 0xE6,
0x49, 0x28, 0x66, 0x51, 0xEC, 0xE6, 0x53, 0x81,
0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF
};
unsigned char pkinit_2048_dhprime[2048/8] = {
0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
0xC9, 0x0F, 0xDA, 0xA2, 0x21, 0x68, 0xC2, 0x34,
0xC4, 0xC6, 0x62, 0x8B, 0x80, 0xDC, 0x1C, 0xD1,
0x29, 0x02, 0x4E, 0x08, 0x8A, 0x67, 0xCC, 0x74,
0x02, 0x0B, 0xBE, 0xA6, 0x3B, 0x13, 0x9B, 0x22,
0x51, 0x4A, 0x08, 0x79, 0x8E, 0x34, 0x04, 0xDD,
0xEF, 0x95, 0x19, 0xB3, 0xCD, 0x3A, 0x43, 0x1B,
0x30, 0x2B, 0x0A, 0x6D, 0xF2, 0x5F, 0x14, 0x37,
0x4F, 0xE1, 0x35, 0x6D, 0x6D, 0x51, 0xC2, 0x45,
0xE4, 0x85, 0xB5, 0x76, 0x62, 0x5E, 0x7E, 0xC6,
0xF4, 0x4C, 0x42, 0xE9, 0xA6, 0x37, 0xED, 0x6B,
0x0B, 0xFF, 0x5C, 0xB6, 0xF4, 0x06, 0xB7, 0xED,
0xEE, 0x38, 0x6B, 0xFB, 0x5A, 0x89, 0x9F, 0xA5,
0xAE, 0x9F, 0x24, 0x11, 0x7C, 0x4B, 0x1F, 0xE6,
0x49, 0x28, 0x66, 0x51, 0xEC, 0xE4, 0x5B, 0x3D,
0xC2, 0x00, 0x7C, 0xB8, 0xA1, 0x63, 0xBF, 0x05,
0x98, 0xDA, 0x48, 0x36, 0x1C, 0x55, 0xD3, 0x9A,
0x69, 0x16, 0x3F, 0xA8, 0xFD, 0x24, 0xCF, 0x5F,
0x83, 0x65, 0x5D, 0x23, 0xDC, 0xA3, 0xAD, 0x96,
0x1C, 0x62, 0xF3, 0x56, 0x20, 0x85, 0x52, 0xBB,
0x9E, 0xD5, 0x29, 0x07, 0x70, 0x96, 0x96, 0x6D,
0x67, 0x0C, 0x35, 0x4E, 0x4A, 0xBC, 0x98, 0x04,
0xF1, 0x74, 0x6C, 0x08, 0xCA, 0x18, 0x21, 0x7C,
0x32, 0x90, 0x5E, 0x46, 0x2E, 0x36, 0xCE, 0x3B,
0xE3, 0x9E, 0x77, 0x2C, 0x18, 0x0E, 0x86, 0x03,
0x9B, 0x27, 0x83, 0xA2, 0xEC, 0x07, 0xA2, 0x8F,
0xB5, 0xC5, 0x5D, 0xF0, 0x6F, 0x4C, 0x52, 0xC9,
0xDE, 0x2B, 0xCB, 0xF6, 0x95, 0x58, 0x17, 0x18,
0x39, 0x95, 0x49, 0x7C, 0xEA, 0x95, 0x6A, 0xE5,
0x15, 0xD2, 0x26, 0x18, 0x98, 0xFA, 0x05, 0x10,
0x15, 0x72, 0x8E, 0x5A, 0x8A, 0xAC, 0xAA, 0x68,
0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF
};
unsigned char pkinit_4096_dhprime[4096/8] = {
0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
0xC9, 0x0F, 0xDA, 0xA2, 0x21, 0x68, 0xC2, 0x34,
0xC4, 0xC6, 0x62, 0x8B, 0x80, 0xDC, 0x1C, 0xD1,
0x29, 0x02, 0x4E, 0x08, 0x8A, 0x67, 0xCC, 0x74,
0x02, 0x0B, 0xBE, 0xA6, 0x3B, 0x13, 0x9B, 0x22,
0x51, 0x4A, 0x08, 0x79, 0x8E, 0x34, 0x04, 0xDD,
0xEF, 0x95, 0x19, 0xB3, 0xCD, 0x3A, 0x43, 0x1B,
0x30, 0x2B, 0x0A, 0x6D, 0xF2, 0x5F, 0x14, 0x37,
0x4F, 0xE1, 0x35, 0x6D, 0x6D, 0x51, 0xC2, 0x45,
0xE4, 0x85, 0xB5, 0x76, 0x62, 0x5E, 0x7E, 0xC6,
0xF4, 0x4C, 0x42, 0xE9, 0xA6, 0x37, 0xED, 0x6B,
0x0B, 0xFF, 0x5C, 0xB6, 0xF4, 0x06, 0xB7, 0xED,
0xEE, 0x38, 0x6B, 0xFB, 0x5A, 0x89, 0x9F, 0xA5,
0xAE, 0x9F, 0x24, 0x11, 0x7C, 0x4B, 0x1F, 0xE6,
0x49, 0x28, 0x66, 0x51, 0xEC, 0xE4, 0x5B, 0x3D,
0xC2, 0x00, 0x7C, 0xB8, 0xA1, 0x63, 0xBF, 0x05,
0x98, 0xDA, 0x48, 0x36, 0x1C, 0x55, 0xD3, 0x9A,
0x69, 0x16, 0x3F, 0xA8, 0xFD, 0x24, 0xCF, 0x5F,
0x83, 0x65, 0x5D, 0x23, 0xDC, 0xA3, 0xAD, 0x96,
0x1C, 0x62, 0xF3, 0x56, 0x20, 0x85, 0x52, 0xBB,
0x9E, 0xD5, 0x29, 0x07, 0x70, 0x96, 0x96, 0x6D,
0x67, 0x0C, 0x35, 0x4E, 0x4A, 0xBC, 0x98, 0x04,
0xF1, 0x74, 0x6C, 0x08, 0xCA, 0x18, 0x21, 0x7C,
0x32, 0x90, 0x5E, 0x46, 0x2E, 0x36, 0xCE, 0x3B,
0xE3, 0x9E, 0x77, 0x2C, 0x18, 0x0E, 0x86, 0x03,
0x9B, 0x27, 0x83, 0xA2, 0xEC, 0x07, 0xA2, 0x8F,
0xB5, 0xC5, 0x5D, 0xF0, 0x6F, 0x4C, 0x52, 0xC9,
0xDE, 0x2B, 0xCB, 0xF6, 0x95, 0x58, 0x17, 0x18,
0x39, 0x95, 0x49, 0x7C, 0xEA, 0x95, 0x6A, 0xE5,
0x15, 0xD2, 0x26, 0x18, 0x98, 0xFA, 0x05, 0x10,
0x15, 0x72, 0x8E, 0x5A, 0x8A, 0xAA, 0xC4, 0x2D,
0xAD, 0x33, 0x17, 0x0D, 0x04, 0x50, 0x7A, 0x33,
0xA8, 0x55, 0x21, 0xAB, 0xDF, 0x1C, 0xBA, 0x64,
0xEC, 0xFB, 0x85, 0x04, 0x58, 0xDB, 0xEF, 0x0A,
0x8A, 0xEA, 0x71, 0x57, 0x5D, 0x06, 0x0C, 0x7D,
0xB3, 0x97, 0x0F, 0x85, 0xA6, 0xE1, 0xE4, 0xC7,
0xAB, 0xF5, 0xAE, 0x8C, 0xDB, 0x09, 0x33, 0xD7,
0x1E, 0x8C, 0x94, 0xE0, 0x4A, 0x25, 0x61, 0x9D,
0xCE, 0xE3, 0xD2, 0x26, 0x1A, 0xD2, 0xEE, 0x6B,
0xF1, 0x2F, 0xFA, 0x06, 0xD9, 0x8A, 0x08, 0x64,
0xD8, 0x76, 0x02, 0x73, 0x3E, 0xC8, 0x6A, 0x64,
0x52, 0x1F, 0x2B, 0x18, 0x17, 0x7B, 0x20, 0x0C,
0xBB, 0xE1, 0x17, 0x57, 0x7A, 0x61, 0x5D, 0x6C,
0x77, 0x09, 0x88, 0xC0, 0xBA, 0xD9, 0x46, 0xE2,
0x08, 0xE2, 0x4F, 0xA0, 0x74, 0xE5, 0xAB, 0x31,
0x43, 0xDB, 0x5B, 0xFC, 0xE0, 0xFD, 0x10, 0x8E,
0x4B, 0x82, 0xD1, 0x20, 0xA9, 0x21, 0x08, 0x01,
0x1A, 0x72, 0x3C, 0x12, 0xA7, 0x87, 0xE6, 0xD7,
0x88, 0x71, 0x9A, 0x10, 0xBD, 0xBA, 0x5B, 0x26,
0x99, 0xC3, 0x27, 0x18, 0x6A, 0xF4, 0xE2, 0x3C,
0x1A, 0x94, 0x68, 0x34, 0xB6, 0x15, 0x0B, 0xDA,
0x25, 0x83, 0xE9, 0xCA, 0x2A, 0xD4, 0x4C, 0xE8,
0xDB, 0xBB, 0xC2, 0xDB, 0x04, 0xDE, 0x8E, 0xF9,
0x2E, 0x8E, 0xFC, 0x14, 0x1F, 0xBE, 0xCA, 0xA6,
0x28, 0x7C, 0x59, 0x47, 0x4E, 0x6B, 0xC0, 0x5D,
0x99, 0xB2, 0x96, 0x4F, 0xA0, 0x90, 0xC3, 0xA2,
0x23, 0x3B, 0xA1, 0x86, 0x51, 0x5B, 0xE7, 0xED,
0x1F, 0x61, 0x29, 0x70, 0xCE, 0xE2, 0xD7, 0xAF,
0xB8, 0x1B, 0xDD, 0x76, 0x21, 0x70, 0x48, 0x1C,
0xD0, 0x06, 0x91, 0x27, 0xD5, 0xB0, 0x5A, 0xA9,
0x93, 0xB4, 0xEA, 0x98, 0x8D, 0x8F, 0xDD, 0xC1,
0x86, 0xFF, 0xB7, 0xDC, 0x90, 0xA6, 0xC0, 0x8F,
0x4D, 0xF4, 0x35, 0xC9, 0x34, 0x06, 0x31, 0x99,
0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF
};
MAKE_INIT_FUNCTION(pkinit_openssl_init);
krb5_error_code
pkinit_init_plg_crypto(pkinit_plg_crypto_context *cryptoctx)
{
krb5_error_code retval = ENOMEM;
pkinit_plg_crypto_context ctx = NULL;
(void)CALL_INIT_FUNCTION(pkinit_openssl_init);
ctx = malloc(sizeof(*ctx));
if (ctx == NULL)
goto out;
memset(ctx, 0, sizeof(*ctx));
pkiDebug("%s: initializing openssl crypto context at %p\n",
__FUNCTION__, ctx);
retval = pkinit_init_pkinit_oids(ctx);
if (retval)
goto out;
retval = pkinit_init_dh_params(ctx);
if (retval)
goto out;
*cryptoctx = ctx;
out:
if (retval && ctx != NULL)
pkinit_fini_plg_crypto(ctx);
return retval;
}
void
pkinit_fini_plg_crypto(pkinit_plg_crypto_context cryptoctx)
{
pkiDebug("%s: freeing context at %p\n", __FUNCTION__, cryptoctx);
if (cryptoctx == NULL)
return;
pkinit_fini_pkinit_oids(cryptoctx);
pkinit_fini_dh_params(cryptoctx);
free(cryptoctx);
}
krb5_error_code
pkinit_init_identity_crypto(pkinit_identity_crypto_context *idctx)
{
krb5_error_code retval = ENOMEM;
pkinit_identity_crypto_context ctx = NULL;
ctx = malloc(sizeof(*ctx));
if (ctx == NULL)
goto out;
memset(ctx, 0, sizeof(*ctx));
ctx->identity = NULL;
retval = pkinit_init_certs(ctx);
if (retval)
goto out;
retval = pkinit_init_pkcs11(ctx);
if (retval)
goto out;
pkiDebug("%s: returning ctx at %p\n", __FUNCTION__, ctx);
*idctx = ctx;
out:
if (retval) {
if (ctx)
pkinit_fini_identity_crypto(ctx);
}
return retval;
}
void
pkinit_fini_identity_crypto(pkinit_identity_crypto_context idctx)
{
if (idctx == NULL)
return;
pkiDebug("%s: freeing ctx at %p\n", __FUNCTION__, idctx);
if (idctx->deferred_ids != NULL)
pkinit_free_deferred_ids(idctx->deferred_ids);
free(idctx->identity);
pkinit_fini_certs(idctx);
pkinit_fini_pkcs11(idctx);
free(idctx);
}
krb5_error_code
pkinit_init_req_crypto(pkinit_req_crypto_context *cryptoctx)
{
krb5_error_code retval = ENOMEM;
pkinit_req_crypto_context ctx = NULL;
ctx = malloc(sizeof(*ctx));
if (ctx == NULL)
goto out;
memset(ctx, 0, sizeof(*ctx));
ctx->dh = NULL;
ctx->received_cert = NULL;
*cryptoctx = ctx;
pkiDebug("%s: returning ctx at %p\n", __FUNCTION__, ctx);
retval = 0;
out:
if (retval)
free(ctx);
return retval;
}
void
pkinit_fini_req_crypto(pkinit_req_crypto_context req_cryptoctx)
{
if (req_cryptoctx == NULL)
return;
pkiDebug("%s: freeing ctx at %p\n", __FUNCTION__, req_cryptoctx);
if (req_cryptoctx->dh != NULL)
DH_free(req_cryptoctx->dh);
if (req_cryptoctx->received_cert != NULL)
X509_free(req_cryptoctx->received_cert);
free(req_cryptoctx);
}
static krb5_error_code
pkinit_init_pkinit_oids(pkinit_plg_crypto_context ctx)
{
ctx->id_pkinit_san = OBJ_txt2obj("1.3.6.1.5.2.2", 1);
if (ctx->id_pkinit_san == NULL)
return ENOMEM;
ctx->id_pkinit_authData = OBJ_txt2obj("1.3.6.1.5.2.3.1", 1);
if (ctx->id_pkinit_authData == NULL)
return ENOMEM;
ctx->id_pkinit_DHKeyData = OBJ_txt2obj("1.3.6.1.5.2.3.2", 1);
if (ctx->id_pkinit_DHKeyData == NULL)
return ENOMEM;
ctx->id_pkinit_rkeyData = OBJ_txt2obj("1.3.6.1.5.2.3.3", 1);
if (ctx->id_pkinit_rkeyData == NULL)
return ENOMEM;
ctx->id_pkinit_KPClientAuth = OBJ_txt2obj("1.3.6.1.5.2.3.4", 1);
if (ctx->id_pkinit_KPClientAuth == NULL)
return ENOMEM;
ctx->id_pkinit_KPKdc = OBJ_txt2obj("1.3.6.1.5.2.3.5", 1);
if (ctx->id_pkinit_KPKdc == NULL)
return ENOMEM;
ctx->id_ms_kp_sc_logon = OBJ_txt2obj("1.3.6.1.4.1.311.20.2.2", 1);
if (ctx->id_ms_kp_sc_logon == NULL)
return ENOMEM;
ctx->id_ms_san_upn = OBJ_txt2obj("1.3.6.1.4.1.311.20.2.3", 1);
if (ctx->id_ms_san_upn == NULL)
return ENOMEM;
ctx->id_kp_serverAuth = OBJ_txt2obj("1.3.6.1.5.5.7.3.1", 1);
if (ctx->id_kp_serverAuth == NULL)
return ENOMEM;
return 0;
}
static krb5_error_code
get_cert(char *filename, X509 **retcert)
{
X509 *cert = NULL;
BIO *tmp = NULL;
int code;
krb5_error_code retval;
if (filename == NULL || retcert == NULL)
return EINVAL;
*retcert = NULL;
tmp = BIO_new(BIO_s_file());
if (tmp == NULL)
return ENOMEM;
code = BIO_read_filename(tmp, filename);
if (code == 0) {
retval = errno;
goto cleanup;
}
cert = (X509 *) PEM_read_bio_X509(tmp, NULL, NULL, NULL);
if (cert == NULL) {
retval = EIO;
pkiDebug("failed to read certificate from %s\n", filename);
goto cleanup;
}
*retcert = cert;
retval = 0;
cleanup:
if (tmp != NULL)
BIO_free(tmp);
return retval;
}
struct get_key_cb_data {
krb5_context context;
pkinit_identity_crypto_context id_cryptoctx;
const char *fsname;
char *filename;
const char *password;
};
static int
get_key_cb(char *buf, int size, int rwflag, void *userdata)
{
struct get_key_cb_data *data = userdata;
pkinit_identity_crypto_context id_cryptoctx;
krb5_data rdat;
krb5_prompt kprompt;
krb5_prompt_type prompt_type;
krb5_error_code retval;
char *prompt;
if (data->id_cryptoctx->defer_id_prompt) {
/* Supply the identity name to be passed to a responder callback. */
pkinit_set_deferred_id(&data->id_cryptoctx->deferred_ids,
data->fsname, 0, NULL);
return -1;
}
if (data->password == NULL) {
/* We don't already have a password to use, so prompt for one. */
if (data->id_cryptoctx->prompter == NULL)
return -1;
if (asprintf(&prompt, "%s %s", _("Pass phrase for"),
data->filename) < 0)
return -1;
rdat.data = buf;
rdat.length = size;
kprompt.prompt = prompt;
kprompt.hidden = 1;
kprompt.reply = &rdat;
prompt_type = KRB5_PROMPT_TYPE_PREAUTH;
/* PROMPTER_INVOCATION */
k5int_set_prompt_types(data->context, &prompt_type);
id_cryptoctx = data->id_cryptoctx;
retval = (data->id_cryptoctx->prompter)(data->context,
id_cryptoctx->prompter_data,
NULL, NULL, 1, &kprompt);
k5int_set_prompt_types(data->context, 0);
free(prompt);
if (retval != 0)
return -1;
} else {
/* Just use the already-supplied password. */
rdat.length = strlen(data->password);
if ((int)rdat.length >= size)
return -1;
snprintf(buf, size, "%s", data->password);
}
return (int)rdat.length;
}
static krb5_error_code
get_key(krb5_context context, pkinit_identity_crypto_context id_cryptoctx,
char *filename, const char *fsname, EVP_PKEY **retkey,
const char *password)
{
EVP_PKEY *pkey = NULL;
BIO *tmp = NULL;
struct get_key_cb_data cb_data;
int code;
krb5_error_code retval;
if (filename == NULL || retkey == NULL)
return EINVAL;
tmp = BIO_new(BIO_s_file());
if (tmp == NULL)
return ENOMEM;
code = BIO_read_filename(tmp, filename);
if (code == 0) {
retval = errno;
goto cleanup;
}
cb_data.context = context;
cb_data.id_cryptoctx = id_cryptoctx;
cb_data.filename = filename;
cb_data.fsname = fsname;
cb_data.password = password;
pkey = PEM_read_bio_PrivateKey(tmp, NULL, get_key_cb, &cb_data);
if (pkey == NULL && !id_cryptoctx->defer_id_prompt) {
retval = EIO;
pkiDebug("failed to read private key from %s\n", filename);
goto cleanup;
}
*retkey = pkey;
retval = 0;
cleanup:
if (tmp != NULL)
BIO_free(tmp);
return retval;
}
static void
pkinit_fini_pkinit_oids(pkinit_plg_crypto_context ctx)
{
if (ctx == NULL)
return;
ASN1_OBJECT_free(ctx->id_pkinit_san);
ASN1_OBJECT_free(ctx->id_pkinit_authData);
ASN1_OBJECT_free(ctx->id_pkinit_DHKeyData);
ASN1_OBJECT_free(ctx->id_pkinit_rkeyData);
ASN1_OBJECT_free(ctx->id_pkinit_KPClientAuth);
ASN1_OBJECT_free(ctx->id_pkinit_KPKdc);
ASN1_OBJECT_free(ctx->id_ms_kp_sc_logon);
ASN1_OBJECT_free(ctx->id_ms_san_upn);
ASN1_OBJECT_free(ctx->id_kp_serverAuth);
}
static krb5_error_code
pkinit_init_dh_params(pkinit_plg_crypto_context plgctx)
{
krb5_error_code retval = ENOMEM;
plgctx->dh_1024 = DH_new();
if (plgctx->dh_1024 == NULL)
goto cleanup;
plgctx->dh_1024->p = BN_bin2bn(pkinit_1024_dhprime,
sizeof(pkinit_1024_dhprime), NULL);
if ((plgctx->dh_1024->g = BN_new()) == NULL ||
(plgctx->dh_1024->q = BN_new()) == NULL)
goto cleanup;
BN_set_word(plgctx->dh_1024->g, DH_GENERATOR_2);
BN_rshift1(plgctx->dh_1024->q, plgctx->dh_1024->p);
plgctx->dh_2048 = DH_new();
if (plgctx->dh_2048 == NULL)
goto cleanup;
plgctx->dh_2048->p = BN_bin2bn(pkinit_2048_dhprime,
sizeof(pkinit_2048_dhprime), NULL);
if ((plgctx->dh_2048->g = BN_new()) == NULL ||
(plgctx->dh_2048->q = BN_new()) == NULL)
goto cleanup;
BN_set_word(plgctx->dh_2048->g, DH_GENERATOR_2);
BN_rshift1(plgctx->dh_2048->q, plgctx->dh_2048->p);
plgctx->dh_4096 = DH_new();
if (plgctx->dh_4096 == NULL)
goto cleanup;
plgctx->dh_4096->p = BN_bin2bn(pkinit_4096_dhprime,
sizeof(pkinit_4096_dhprime), NULL);
if ((plgctx->dh_4096->g = BN_new()) == NULL ||
(plgctx->dh_4096->q = BN_new()) == NULL)
goto cleanup;
BN_set_word(plgctx->dh_4096->g, DH_GENERATOR_2);
BN_rshift1(plgctx->dh_4096->q, plgctx->dh_4096->p);
retval = 0;
cleanup:
if (retval)
pkinit_fini_dh_params(plgctx);
return retval;
}
static void
pkinit_fini_dh_params(pkinit_plg_crypto_context plgctx)
{
if (plgctx->dh_1024 != NULL)
DH_free(plgctx->dh_1024);
if (plgctx->dh_2048 != NULL)
DH_free(plgctx->dh_2048);
if (plgctx->dh_4096 != NULL)
DH_free(plgctx->dh_4096);
plgctx->dh_1024 = plgctx->dh_2048 = plgctx->dh_4096 = NULL;
}
static krb5_error_code
pkinit_init_certs(pkinit_identity_crypto_context ctx)
{
krb5_error_code retval = ENOMEM;
int i;
for (i = 0; i < MAX_CREDS_ALLOWED; i++)
ctx->creds[i] = NULL;
ctx->my_certs = NULL;
ctx->cert_index = 0;
ctx->my_key = NULL;
ctx->trustedCAs = NULL;
ctx->intermediateCAs = NULL;
ctx->revoked = NULL;
retval = 0;
return retval;
}
static void
pkinit_fini_certs(pkinit_identity_crypto_context ctx)
{
if (ctx == NULL)
return;
if (ctx->my_certs != NULL)
sk_X509_pop_free(ctx->my_certs, X509_free);
if (ctx->my_key != NULL)
EVP_PKEY_free(ctx->my_key);
if (ctx->trustedCAs != NULL)
sk_X509_pop_free(ctx->trustedCAs, X509_free);
if (ctx->intermediateCAs != NULL)
sk_X509_pop_free(ctx->intermediateCAs, X509_free);
if (ctx->revoked != NULL)
sk_X509_CRL_pop_free(ctx->revoked, X509_CRL_free);
}
static krb5_error_code
pkinit_init_pkcs11(pkinit_identity_crypto_context ctx)
{
krb5_error_code retval = ENOMEM;
#ifndef WITHOUT_PKCS11
ctx->p11_module_name = strdup(PKCS11_MODNAME);
if (ctx->p11_module_name == NULL)
return retval;
ctx->p11_module = NULL;
ctx->slotid = PK_NOSLOT;
ctx->token_label = NULL;
ctx->cert_label = NULL;
ctx->session = CK_INVALID_HANDLE;
ctx->p11 = NULL;
#endif
ctx->pkcs11_method = 0;
retval = 0;
return retval;
}
static void
pkinit_fini_pkcs11(pkinit_identity_crypto_context ctx)
{
#ifndef WITHOUT_PKCS11
if (ctx == NULL)
return;
if (ctx->p11 != NULL) {
if (ctx->session != CK_INVALID_HANDLE) {
ctx->p11->C_CloseSession(ctx->session);
ctx->session = CK_INVALID_HANDLE;
}
ctx->p11->C_Finalize(NULL_PTR);
ctx->p11 = NULL;
}
if (ctx->p11_module != NULL) {
pkinit_C_UnloadModule(ctx->p11_module);
ctx->p11_module = NULL;
}
free(ctx->p11_module_name);
free(ctx->token_label);
free(ctx->cert_id);
free(ctx->cert_label);
#endif
}
krb5_error_code
pkinit_identity_set_prompter(pkinit_identity_crypto_context id_cryptoctx,
krb5_prompter_fct prompter,
void *prompter_data)
{
id_cryptoctx->prompter = prompter;
id_cryptoctx->prompter_data = prompter_data;
return 0;
}
/* Create a CMS ContentInfo of type oid containing the octet string in data. */
static krb5_error_code
create_contentinfo(krb5_context context, ASN1_OBJECT *oid,
unsigned char *data, size_t data_len, PKCS7 **p7_out)
{
PKCS7 *p7 = NULL;
ASN1_OCTET_STRING *ostr = NULL;
*p7_out = NULL;
ostr = ASN1_OCTET_STRING_new();
if (ostr == NULL)
goto oom;
if (!ASN1_OCTET_STRING_set(ostr, (unsigned char *)data, data_len))
goto oom;
p7 = PKCS7_new();
if (p7 == NULL)
goto oom;
p7->type = OBJ_dup(oid);
if (p7->type == NULL)
goto oom;
if (OBJ_obj2nid(oid) == NID_pkcs7_data) {
/* Draft 9 uses id-pkcs7-data for signed data. For this type OpenSSL
* expects an octet string in d.data. */
p7->d.data = ostr;
} else {
p7->d.other = ASN1_TYPE_new();
if (p7->d.other == NULL)
goto oom;
p7->d.other->type = V_ASN1_OCTET_STRING;
p7->d.other->value.octet_string = ostr;
}
*p7_out = p7;
return 0;
oom:
if (ostr != NULL)
ASN1_OCTET_STRING_free(ostr);
if (p7 != NULL)
PKCS7_free(p7);
return ENOMEM;
}
krb5_error_code
cms_contentinfo_create(krb5_context context, /* IN */
pkinit_plg_crypto_context plg_cryptoctx, /* IN */
pkinit_req_crypto_context req_cryptoctx, /* IN */
pkinit_identity_crypto_context id_cryptoctx, /* IN */
int cms_msg_type,
unsigned char *data, unsigned int data_len,
unsigned char **out_data, unsigned int *out_data_len)
{
krb5_error_code retval = ENOMEM;
ASN1_OBJECT *oid;
PKCS7 *p7 = NULL;
unsigned char *p;
/* Pick the correct oid for the eContentInfo. */
oid = pkinit_pkcs7type2oid(plg_cryptoctx, cms_msg_type);
if (oid == NULL)
goto cleanup;
retval = create_contentinfo(context, oid, data, data_len, &p7);
if (retval != 0)
goto cleanup;
*out_data_len = i2d_PKCS7(p7, NULL);
if (!(*out_data_len)) {
unsigned long err = ERR_peek_error();
retval = KRB5KDC_ERR_PREAUTH_FAILED;
krb5_set_error_message(context, retval, "%s\n",
ERR_error_string(err, NULL));
pkiDebug("failed to der encode pkcs7\n");
goto cleanup;
}
retval = ENOMEM;
if ((p = *out_data = malloc(*out_data_len)) == NULL)
goto cleanup;
/* DER encode PKCS7 data */
retval = i2d_PKCS7(p7, &p);
if (!retval) {
unsigned long err = ERR_peek_error();
retval = KRB5KDC_ERR_PREAUTH_FAILED;
krb5_set_error_message(context, retval, "%s\n",
ERR_error_string(err, NULL));
pkiDebug("failed to der encode pkcs7\n");
goto cleanup;
}
retval = 0;
cleanup:
if (p7)
PKCS7_free(p7);
return retval;
}
krb5_error_code
cms_signeddata_create(krb5_context context,
pkinit_plg_crypto_context plg_cryptoctx,
pkinit_req_crypto_context req_cryptoctx,
pkinit_identity_crypto_context id_cryptoctx,
int cms_msg_type,
int include_certchain,
unsigned char *data,
unsigned int data_len,
unsigned char **signed_data,
unsigned int *signed_data_len)
{
krb5_error_code retval = ENOMEM;
PKCS7 *p7 = NULL, *inner_p7 = NULL;
PKCS7_SIGNED *p7s = NULL;
PKCS7_SIGNER_INFO *p7si = NULL;
unsigned char *p;
STACK_OF(X509) * cert_stack = NULL;
ASN1_OCTET_STRING *digest_attr = NULL;
EVP_MD_CTX ctx, ctx2;
const EVP_MD *md_tmp = NULL;
unsigned char md_data[EVP_MAX_MD_SIZE], md_data2[EVP_MAX_MD_SIZE];
unsigned char *digestInfo_buf = NULL, *abuf = NULL;
unsigned int md_len, md_len2, alen, digestInfo_len;
STACK_OF(X509_ATTRIBUTE) * sk;
unsigned char *sig = NULL;
unsigned int sig_len = 0;
X509_ALGOR *alg = NULL;
ASN1_OCTET_STRING *digest = NULL;
unsigned int alg_len = 0, digest_len = 0;
unsigned char *y = NULL, *alg_buf = NULL, *digest_buf = NULL;
X509 *cert = NULL;
ASN1_OBJECT *oid = NULL, *oid_copy;
/* Start creating PKCS7 data. */
if ((p7 = PKCS7_new()) == NULL)
goto cleanup;
p7->type = OBJ_nid2obj(NID_pkcs7_signed);
if ((p7s = PKCS7_SIGNED_new()) == NULL)
goto cleanup;
p7->d.sign = p7s;
if (!ASN1_INTEGER_set(p7s->version, 3))
goto cleanup;
/* pick the correct oid for the eContentInfo */
oid = pkinit_pkcs7type2oid(plg_cryptoctx, cms_msg_type);
if (oid == NULL)
goto cleanup;
if (id_cryptoctx->my_certs != NULL) {
/* create a cert chain that has at least the signer's certificate */
if ((cert_stack = sk_X509_new_null()) == NULL)
goto cleanup;
cert = sk_X509_value(id_cryptoctx->my_certs, id_cryptoctx->cert_index);
if (!include_certchain) {
pkiDebug("only including signer's certificate\n");
sk_X509_push(cert_stack, X509_dup(cert));
} else {
/* create a cert chain */
X509_STORE *certstore = NULL;
X509_STORE_CTX certctx;
STACK_OF(X509) *certstack = NULL;
char buf[DN_BUF_LEN];
unsigned int i = 0, size = 0;
if ((certstore = X509_STORE_new()) == NULL)
goto cleanup;
pkiDebug("building certificate chain\n");
X509_STORE_set_verify_cb_func(certstore, openssl_callback);
X509_STORE_CTX_init(&certctx, certstore, cert,
id_cryptoctx->intermediateCAs);
X509_STORE_CTX_trusted_stack(&certctx, id_cryptoctx->trustedCAs);
if (!X509_verify_cert(&certctx)) {
int code = X509_STORE_CTX_get_error(&certctx);
const char *msg = X509_verify_cert_error_string(code);
pkiDebug("failed to create a certificate chain: %s\n", msg);
if (!sk_X509_num(id_cryptoctx->trustedCAs))
pkiDebug("No trusted CAs found. Check your X509_anchors\n");
retval = KRB5KDC_ERR_PREAUTH_FAILED;
krb5_set_error_message(context, retval,
_("Cannot create cert chain: %s"), msg);
goto cleanup;
}
certstack = X509_STORE_CTX_get1_chain(&certctx);
size = sk_X509_num(certstack);
pkiDebug("size of certificate chain = %d\n", size);
for(i = 0; i < size - 1; i++) {
X509 *x = sk_X509_value(certstack, i);
X509_NAME_oneline(X509_get_subject_name(x), buf, sizeof(buf));
pkiDebug("cert #%d: %s\n", i, buf);
sk_X509_push(cert_stack, X509_dup(x));
}
X509_STORE_CTX_cleanup(&certctx);
X509_STORE_free(certstore);
sk_X509_pop_free(certstack, X509_free);
}
p7s->cert = cert_stack;
/* fill-in PKCS7_SIGNER_INFO */
if ((p7si = PKCS7_SIGNER_INFO_new()) == NULL)
goto cleanup;
if (!ASN1_INTEGER_set(p7si->version, 1))
goto cleanup;
if (!X509_NAME_set(&p7si->issuer_and_serial->issuer,
X509_get_issuer_name(cert)))
goto cleanup;
/* because ASN1_INTEGER_set is used to set a 'long' we will do
* things the ugly way. */
M_ASN1_INTEGER_free(p7si->issuer_and_serial->serial);
if (!(p7si->issuer_and_serial->serial =
M_ASN1_INTEGER_dup(X509_get_serialNumber(cert))))
goto cleanup;
/* will not fill-out EVP_PKEY because it's on the smartcard */
/* Set digest algs */
p7si->digest_alg->algorithm = OBJ_nid2obj(NID_sha1);
if (p7si->digest_alg->parameter != NULL)
ASN1_TYPE_free(p7si->digest_alg->parameter);
if ((p7si->digest_alg->parameter = ASN1_TYPE_new()) == NULL)
goto cleanup;
p7si->digest_alg->parameter->type = V_ASN1_NULL;
/* Set sig algs */
if (p7si->digest_enc_alg->parameter != NULL)
ASN1_TYPE_free(p7si->digest_enc_alg->parameter);
p7si->digest_enc_alg->algorithm = OBJ_nid2obj(NID_sha1WithRSAEncryption);
if (!(p7si->digest_enc_alg->parameter = ASN1_TYPE_new()))
goto cleanup;
p7si->digest_enc_alg->parameter->type = V_ASN1_NULL;
if (cms_msg_type == CMS_SIGN_DRAFT9){
/* don't include signed attributes for pa-type 15 request */
abuf = data;
alen = data_len;
} else {
/* add signed attributes */
/* compute sha1 digest over the EncapsulatedContentInfo */
EVP_MD_CTX_init(&ctx);
EVP_DigestInit_ex(&ctx, EVP_sha1(), NULL);
EVP_DigestUpdate(&ctx, data, data_len);
md_tmp = EVP_MD_CTX_md(&ctx);
EVP_DigestFinal_ex(&ctx, md_data, &md_len);
/* create a message digest attr */
digest_attr = ASN1_OCTET_STRING_new();
ASN1_OCTET_STRING_set(digest_attr, md_data, (int)md_len);
PKCS7_add_signed_attribute(p7si, NID_pkcs9_messageDigest,
V_ASN1_OCTET_STRING, (char *) digest_attr);
/* create a content-type attr */
oid_copy = OBJ_dup(oid);
if (oid_copy == NULL)
goto cleanup2;
PKCS7_add_signed_attribute(p7si, NID_pkcs9_contentType,
V_ASN1_OBJECT, oid_copy);
/* create the signature over signed attributes. get DER encoded value */
/* This is the place where smartcard signature needs to be calculated */
sk = p7si->auth_attr;
alen = ASN1_item_i2d((ASN1_VALUE *) sk, &abuf,
ASN1_ITEM_rptr(PKCS7_ATTR_SIGN));
if (abuf == NULL)
goto cleanup2;
} /* signed attributes */
#ifndef WITHOUT_PKCS11
/* Some tokens can only do RSAEncryption without sha1 hash */
/* to compute sha1WithRSAEncryption, encode the algorithm ID for the hash
* function and the hash value into an ASN.1 value of type DigestInfo
* DigestInfo::=SEQUENCE {
* digestAlgorithm AlgorithmIdentifier,
* digest OCTET STRING }
*/
if (id_cryptoctx->pkcs11_method == 1 &&
id_cryptoctx->mech == CKM_RSA_PKCS) {
pkiDebug("mech = CKM_RSA_PKCS\n");
EVP_MD_CTX_init(&ctx2);
/* if this is not draft9 request, include digest signed attribute */
if (cms_msg_type != CMS_SIGN_DRAFT9)
EVP_DigestInit_ex(&ctx2, md_tmp, NULL);
else
EVP_DigestInit_ex(&ctx2, EVP_sha1(), NULL);
EVP_DigestUpdate(&ctx2, abuf, alen);
EVP_DigestFinal_ex(&ctx2, md_data2, &md_len2);
alg = X509_ALGOR_new();
if (alg == NULL)
goto cleanup2;
alg->algorithm = OBJ_nid2obj(NID_sha1);
alg->parameter = NULL;
alg_len = i2d_X509_ALGOR(alg, NULL);
alg_buf = malloc(alg_len);
if (alg_buf == NULL)
goto cleanup2;
digest = ASN1_OCTET_STRING_new();
if (digest == NULL)
goto cleanup2;
ASN1_OCTET_STRING_set(digest, md_data2, (int)md_len2);
digest_len = i2d_ASN1_OCTET_STRING(digest, NULL);
digest_buf = malloc(digest_len);
if (digest_buf == NULL)
goto cleanup2;
digestInfo_len = ASN1_object_size(1, (int)(alg_len + digest_len),
V_ASN1_SEQUENCE);
y = digestInfo_buf = malloc(digestInfo_len);
if (digestInfo_buf == NULL)
goto cleanup2;
ASN1_put_object(&y, 1, (int)(alg_len + digest_len), V_ASN1_SEQUENCE,
V_ASN1_UNIVERSAL);
i2d_X509_ALGOR(alg, &y);
i2d_ASN1_OCTET_STRING(digest, &y);
#ifdef DEBUG_SIG
pkiDebug("signing buffer\n");
print_buffer(digestInfo_buf, digestInfo_len);
print_buffer_bin(digestInfo_buf, digestInfo_len, "/tmp/pkcs7_tosign");
#endif
retval = pkinit_sign_data(context, id_cryptoctx, digestInfo_buf,
digestInfo_len, &sig, &sig_len);
} else
#endif
{
pkiDebug("mech = %s\n",
id_cryptoctx->pkcs11_method == 1 ? "CKM_SHA1_RSA_PKCS" : "FS");
retval = pkinit_sign_data(context, id_cryptoctx, abuf, alen,
&sig, &sig_len);
}
#ifdef DEBUG_SIG
print_buffer(sig, sig_len);
#endif
if (cms_msg_type != CMS_SIGN_DRAFT9 )
free(abuf);
if (retval)
goto cleanup2;
/* Add signature */
if (!ASN1_STRING_set(p7si->enc_digest, (unsigned char *) sig,
(int)sig_len)) {
unsigned long err = ERR_peek_error();
retval = KRB5KDC_ERR_PREAUTH_FAILED;
krb5_set_error_message(context, retval, "%s\n",
ERR_error_string(err, NULL));
pkiDebug("failed to add a signed digest attribute\n");
goto cleanup2;
}
/* adder signer_info to pkcs7 signed */
if (!PKCS7_add_signer(p7, p7si))
goto cleanup2;
} /* we have a certificate */
/* start on adding data to the pkcs7 signed */
retval = create_contentinfo(context, oid, data, data_len, &inner_p7);
if (p7s->contents != NULL)
PKCS7_free(p7s->contents);
p7s->contents = inner_p7;
*signed_data_len = i2d_PKCS7(p7, NULL);
if (!(*signed_data_len)) {
unsigned long err = ERR_peek_error();
retval = KRB5KDC_ERR_PREAUTH_FAILED;
krb5_set_error_message(context, retval, "%s\n",
ERR_error_string(err, NULL));
pkiDebug("failed to der encode pkcs7\n");
goto cleanup2;
}
retval = ENOMEM;
if ((p = *signed_data = malloc(*signed_data_len)) == NULL)
goto cleanup2;
/* DER encode PKCS7 data */
retval = i2d_PKCS7(p7, &p);
if (!retval) {
unsigned long err = ERR_peek_error();
retval = KRB5KDC_ERR_PREAUTH_FAILED;
krb5_set_error_message(context, retval, "%s\n",
ERR_error_string(err, NULL));
pkiDebug("failed to der encode pkcs7\n");
goto cleanup2;
}
retval = 0;
#ifdef DEBUG_ASN1
if (cms_msg_type == CMS_SIGN_CLIENT) {
print_buffer_bin(*signed_data, *signed_data_len,
"/tmp/client_pkcs7_signeddata");
} else {
if (cms_msg_type == CMS_SIGN_SERVER) {
print_buffer_bin(*signed_data, *signed_data_len,
"/tmp/kdc_pkcs7_signeddata");
} else {
print_buffer_bin(*signed_data, *signed_data_len,
"/tmp/draft9_pkcs7_signeddata");
}
}
#endif
cleanup2:
if (p7si) {
if (cms_msg_type != CMS_SIGN_DRAFT9)
EVP_MD_CTX_cleanup(&ctx);
#ifndef WITHOUT_PKCS11
if (id_cryptoctx->pkcs11_method == 1 &&
id_cryptoctx->mech == CKM_RSA_PKCS) {
EVP_MD_CTX_cleanup(&ctx2);
free(digest_buf);
free(digestInfo_buf);
free(alg_buf);
if (digest != NULL)
ASN1_OCTET_STRING_free(digest);
}
#endif
if (alg != NULL)
X509_ALGOR_free(alg);
}
cleanup:
if (p7 != NULL)
PKCS7_free(p7);
free(sig);
return retval;
}
krb5_error_code
cms_signeddata_verify(krb5_context context,
pkinit_plg_crypto_context plgctx,
pkinit_req_crypto_context reqctx,
pkinit_identity_crypto_context idctx,
int cms_msg_type,
int require_crl_checking,
unsigned char *signed_data,
unsigned int signed_data_len,
unsigned char **data,
unsigned int *data_len,
unsigned char **authz_data,
unsigned int *authz_data_len,
int *is_signed)
{
/*
* Warning: Since most openssl functions do not set retval, large chunks of
* this function assume that retval is always a failure and may go to
* cleanup without setting retval explicitly. Make sure retval is not set
* to 0 or errors such as signature verification failure may be converted
* to success with significant security consequences.
*/
krb5_error_code retval = KRB5KDC_ERR_PREAUTH_FAILED;
CMS_ContentInfo *cms = NULL;
BIO *out = NULL;
int flags = CMS_NO_SIGNER_CERT_VERIFY;
int valid_oid = 0;
unsigned int i = 0;
unsigned int vflags = 0, size = 0;
const unsigned char *p = signed_data;
STACK_OF(CMS_SignerInfo) *si_sk = NULL;
CMS_SignerInfo *si = NULL;
X509 *x = NULL;
X509_STORE *store = NULL;
X509_STORE_CTX cert_ctx;
STACK_OF(X509) *signerCerts = NULL;
STACK_OF(X509) *intermediateCAs = NULL;
STACK_OF(X509_CRL) *signerRevoked = NULL;
STACK_OF(X509_CRL) *revoked = NULL;
STACK_OF(X509) *verified_chain = NULL;
ASN1_OBJECT *oid = NULL;
const ASN1_OBJECT *type = NULL, *etype = NULL;
ASN1_OCTET_STRING **octets;
krb5_external_principal_identifier **krb5_verified_chain = NULL;
krb5_data *authz = NULL;
char buf[DN_BUF_LEN];
#ifdef DEBUG_ASN1
print_buffer_bin(signed_data, signed_data_len,
"/tmp/client_received_pkcs7_signeddata");
#endif
if (is_signed)
*is_signed = 1;
oid = pkinit_pkcs7type2oid(plgctx, cms_msg_type);
if (oid == NULL)
goto cleanup;
/* decode received CMS message */
if ((cms = d2i_CMS_ContentInfo(NULL, &p, (int)signed_data_len)) == NULL) {
unsigned long err = ERR_peek_error();
krb5_set_error_message(context, retval, "%s\n",
ERR_error_string(err, NULL));
pkiDebug("%s: failed to decode message: %s\n",
__FUNCTION__, ERR_error_string(err, NULL));
goto cleanup;
}
etype = CMS_get0_eContentType(cms);
/*
* Prior to 1.10 the MIT client incorrectly emitted the pkinit structure
* directly in a CMS ContentInfo rather than using SignedData with no
* signers. Handle that case.
*/
type = CMS_get0_type(cms);
if (is_signed && !OBJ_cmp(type, oid)) {
unsigned char *d;
*is_signed = 0;
octets = pkinit_CMS_get0_content_data(cms);
if (!octets || ((*octets)->type != V_ASN1_OCTET_STRING)) {
retval = KRB5KDC_ERR_PREAUTH_FAILED;
krb5_set_error_message(context, retval,
_("Invalid pkinit packet: octet string "
"expected"));
goto cleanup;
}
*data_len = ASN1_STRING_length(*octets);
d = malloc(*data_len);
if (d == NULL) {
retval = ENOMEM;
goto cleanup;
}
memcpy(d, ASN1_STRING_data(*octets),
*data_len);
*data = d;
goto out;
} else {
/* Verify that the received message is CMS SignedData message. */
if (OBJ_obj2nid(type) != NID_pkcs7_signed) {
pkiDebug("Expected id-signedData CMS msg (received type = %d)\n",
OBJ_obj2nid(type));
krb5_set_error_message(context, retval, _("wrong oid\n"));
goto cleanup;
}
}
/* setup to verify X509 certificate used to sign CMS message */
if (!(store = X509_STORE_new()))
goto cleanup;
/* check if we are inforcing CRL checking */
vflags = X509_V_FLAG_CRL_CHECK|X509_V_FLAG_CRL_CHECK_ALL;
if (require_crl_checking)
X509_STORE_set_verify_cb_func(store, openssl_callback);
else
X509_STORE_set_verify_cb_func(store, openssl_callback_ignore_crls);
X509_STORE_set_flags(store, vflags);
/*
* Get the signer's information from the CMS message. Match signer ID
* against anchors and intermediate CAs in case no certs are present in the
* SignedData. If we start sending kdcPkId values in requests, we'll need
* to match against the source of that information too.
*/
CMS_set1_signers_certs(cms, NULL, 0);
CMS_set1_signers_certs(cms, idctx->trustedCAs, CMS_NOINTERN);
CMS_set1_signers_certs(cms, idctx->intermediateCAs, CMS_NOINTERN);
if (((si_sk = CMS_get0_SignerInfos(cms)) == NULL) ||
((si = sk_CMS_SignerInfo_value(si_sk, 0)) == NULL)) {
/* Not actually signed; anonymous case */
if (!is_signed)
goto cleanup;
*is_signed = 0;
/* We cannot use CMS_dataInit because there may be no digest */
octets = pkinit_CMS_get0_content_signed(cms);
if (octets)
out = BIO_new_mem_buf((*octets)->data, (*octets)->length);
if (out == NULL)
goto cleanup;
} else {
pkinit_CMS_SignerInfo_get_cert(cms, si, &x);
if (x == NULL)
goto cleanup;
/* create available CRL information (get local CRLs and include CRLs
* received in the CMS message
*/
signerRevoked = CMS_get1_crls(cms);
if (idctx->revoked == NULL)
revoked = signerRevoked;
else if (signerRevoked == NULL)
revoked = idctx->revoked;
else {
size = sk_X509_CRL_num(idctx->revoked);
revoked = sk_X509_CRL_new_null();
for (i = 0; i < size; i++)
sk_X509_CRL_push(revoked, sk_X509_CRL_value(idctx->revoked, i));
size = sk_X509_CRL_num(signerRevoked);
for (i = 0; i < size; i++)
sk_X509_CRL_push(revoked, sk_X509_CRL_value(signerRevoked, i));
}
/* create available intermediate CAs chains (get local intermediateCAs and
* include the CA chain received in the CMS message
*/
signerCerts = CMS_get1_certs(cms);
if (idctx->intermediateCAs == NULL)
intermediateCAs = signerCerts;
else if (signerCerts == NULL)
intermediateCAs = idctx->intermediateCAs;
else {
size = sk_X509_num(idctx->intermediateCAs);
intermediateCAs = sk_X509_new_null();
for (i = 0; i < size; i++) {
sk_X509_push(intermediateCAs,
sk_X509_value(idctx->intermediateCAs, i));
}
size = sk_X509_num(signerCerts);
for (i = 0; i < size; i++) {
sk_X509_push(intermediateCAs, sk_X509_value(signerCerts, i));
}
}
/* initialize x509 context with the received certificate and
* trusted and intermediate CA chains and CRLs
*/
if (!X509_STORE_CTX_init(&cert_ctx, store, x, intermediateCAs))
goto cleanup;
X509_STORE_CTX_set0_crls(&cert_ctx, revoked);
/* add trusted CAs certificates for cert verification */
if (idctx->trustedCAs != NULL)
X509_STORE_CTX_trusted_stack(&cert_ctx, idctx->trustedCAs);
else {
pkiDebug("unable to find any trusted CAs\n");
goto cleanup;
}
#ifdef DEBUG_CERTCHAIN
if (intermediateCAs != NULL) {
size = sk_X509_num(intermediateCAs);
pkiDebug("untrusted cert chain of size %d\n", size);
for (i = 0; i < size; i++) {
X509_NAME_oneline(X509_get_subject_name(
sk_X509_value(intermediateCAs, i)), buf, sizeof(buf));
pkiDebug("cert #%d: %s\n", i, buf);
}
}
if (idctx->trustedCAs != NULL) {
size = sk_X509_num(idctx->trustedCAs);
pkiDebug("trusted cert chain of size %d\n", size);
for (i = 0; i < size; i++) {
X509_NAME_oneline(X509_get_subject_name(
sk_X509_value(idctx->trustedCAs, i)), buf, sizeof(buf));
pkiDebug("cert #%d: %s\n", i, buf);
}
}
if (revoked != NULL) {
size = sk_X509_CRL_num(revoked);
pkiDebug("CRL chain of size %d\n", size);
for (i = 0; i < size; i++) {
X509_CRL *crl = sk_X509_CRL_value(revoked, i);
X509_NAME_oneline(X509_CRL_get_issuer(crl), buf, sizeof(buf));
pkiDebug("crls by CA #%d: %s\n", i , buf);
}
}
#endif
i = X509_verify_cert(&cert_ctx);
if (i <= 0) {
int j = X509_STORE_CTX_get_error(&cert_ctx);
reqctx->received_cert = X509_dup(cert_ctx.current_cert);
switch(j) {
case X509_V_ERR_CERT_REVOKED:
retval = KRB5KDC_ERR_REVOKED_CERTIFICATE;
break;
case X509_V_ERR_UNABLE_TO_GET_CRL:
retval = KRB5KDC_ERR_REVOCATION_STATUS_UNKNOWN;
break;
case X509_V_ERR_UNABLE_TO_GET_ISSUER_CERT:
case X509_V_ERR_UNABLE_TO_GET_ISSUER_CERT_LOCALLY:
retval = KRB5KDC_ERR_CANT_VERIFY_CERTIFICATE;
break;
default:
retval = KRB5KDC_ERR_INVALID_CERTIFICATE;
}
if (reqctx->received_cert == NULL)
strlcpy(buf, "(none)", sizeof(buf));
else
X509_NAME_oneline(X509_get_subject_name(reqctx->received_cert),
buf, sizeof(buf));
pkiDebug("problem with cert DN = %s (error=%d) %s\n", buf, j,
X509_verify_cert_error_string(j));
krb5_set_error_message(context, retval, "%s\n",
X509_verify_cert_error_string(j));
#ifdef DEBUG_CERTCHAIN
size = sk_X509_num(signerCerts);
pkiDebug("received cert chain of size %d\n", size);
for (j = 0; j < size; j++) {
X509 *tmp_cert = sk_X509_value(signerCerts, j);
X509_NAME_oneline(X509_get_subject_name(tmp_cert), buf, sizeof(buf));
pkiDebug("cert #%d: %s\n", j, buf);
}
#endif
} else {
/* retrieve verified certificate chain */
if (cms_msg_type == CMS_SIGN_CLIENT || cms_msg_type == CMS_SIGN_DRAFT9)
verified_chain = X509_STORE_CTX_get1_chain(&cert_ctx);
}
X509_STORE_CTX_cleanup(&cert_ctx);
if (i <= 0)
goto cleanup;
out = BIO_new(BIO_s_mem());
if (cms_msg_type == CMS_SIGN_DRAFT9)
flags |= CMS_NOATTR;
if (CMS_verify(cms, NULL, store, NULL, out, flags) == 0) {
unsigned long err = ERR_peek_error();
switch(ERR_GET_REASON(err)) {
case PKCS7_R_DIGEST_FAILURE:
retval = KRB5KDC_ERR_DIGEST_IN_SIGNED_DATA_NOT_ACCEPTED;
break;
case PKCS7_R_SIGNATURE_FAILURE:
default:
retval = KRB5KDC_ERR_INVALID_SIG;
}
pkiDebug("CMS Verification failure\n");
krb5_set_error_message(context, retval, "%s\n",
ERR_error_string(err, NULL));
goto cleanup;
}
} /* message was signed */
if (!OBJ_cmp(etype, oid))
valid_oid = 1;
else if (cms_msg_type == CMS_SIGN_DRAFT9) {
/*
* Various implementations of the pa-type 15 request use
* different OIDS. We check that the returned object
* has any of the acceptable OIDs
*/
ASN1_OBJECT *client_oid = NULL, *server_oid = NULL, *rsa_oid = NULL;
client_oid = pkinit_pkcs7type2oid(plgctx, CMS_SIGN_CLIENT);
server_oid = pkinit_pkcs7type2oid(plgctx, CMS_SIGN_SERVER);
rsa_oid = pkinit_pkcs7type2oid(plgctx, CMS_ENVEL_SERVER);
if (!OBJ_cmp(etype, client_oid) ||
!OBJ_cmp(etype, server_oid) ||
!OBJ_cmp(etype, rsa_oid))
valid_oid = 1;
}
if (valid_oid)
pkiDebug("CMS Verification successful\n");
else {
pkiDebug("wrong oid in eContentType\n");
print_buffer(etype->data,
(unsigned int)etype->length);
retval = KRB5KDC_ERR_PREAUTH_FAILED;
krb5_set_error_message(context, retval, "wrong oid\n");
goto cleanup;
}
/* transfer the data from CMS message into return buffer */
for (size = 0;;) {
int remain;
retval = ENOMEM;
if ((*data = realloc(*data, size + 1024 * 10)) == NULL)
goto cleanup;
remain = BIO_read(out, &((*data)[size]), 1024 * 10);
if (remain <= 0)
break;
else
size += remain;
}
*data_len = size;
if (x) {
reqctx->received_cert = X509_dup(x);
/* generate authorization data */
if (cms_msg_type == CMS_SIGN_CLIENT || cms_msg_type == CMS_SIGN_DRAFT9) {
if (authz_data == NULL || authz_data_len == NULL)
goto out;
*authz_data = NULL;
retval = create_identifiers_from_stack(verified_chain,
&krb5_verified_chain);
if (retval) {
pkiDebug("create_identifiers_from_stack failed\n");
goto cleanup;
}
retval = k5int_encode_krb5_td_trusted_certifiers((krb5_external_principal_identifier *const *)krb5_verified_chain, &authz);
if (retval) {
pkiDebug("encode_krb5_td_trusted_certifiers failed\n");
goto cleanup;
}
#ifdef DEBUG_ASN1
print_buffer_bin((unsigned char *)authz->data, authz->length,
"/tmp/kdc_ad_initial_verified_cas");
#endif
*authz_data = malloc(authz->length);
if (*authz_data == NULL) {
retval = ENOMEM;
goto cleanup;
}
memcpy(*authz_data, authz->data, authz->length);
*authz_data_len = authz->length;
}
}
out:
retval = 0;
cleanup:
if (out != NULL)
BIO_free(out);
if (store != NULL)
X509_STORE_free(store);
if (cms != NULL) {
if (signerCerts != NULL)
pkinit_CMS_free1_certs(signerCerts);
if (idctx->intermediateCAs != NULL && signerCerts)
sk_X509_free(intermediateCAs);
if (signerRevoked != NULL)
pkinit_CMS_free1_crls(signerRevoked);
if (idctx->revoked != NULL && signerRevoked)
sk_X509_CRL_free(revoked);
CMS_ContentInfo_free(cms);
}
if (verified_chain != NULL)
sk_X509_pop_free(verified_chain, X509_free);
if (krb5_verified_chain != NULL)
free_krb5_external_principal_identifier(&krb5_verified_chain);
if (authz != NULL)
krb5_free_data(context, authz);
return retval;
}
krb5_error_code
cms_envelopeddata_create(krb5_context context,
pkinit_plg_crypto_context plgctx,
pkinit_req_crypto_context reqctx,
pkinit_identity_crypto_context idctx,
krb5_preauthtype pa_type,
int include_certchain,
unsigned char *key_pack,
unsigned int key_pack_len,
unsigned char **out,
unsigned int *out_len)
{
krb5_error_code retval = ENOMEM;
PKCS7 *p7 = NULL;
BIO *in = NULL;
unsigned char *p = NULL, *signed_data = NULL, *enc_data = NULL;
int signed_data_len = 0, enc_data_len = 0, flags = PKCS7_BINARY;
STACK_OF(X509) *encerts = NULL;
const EVP_CIPHER *cipher = NULL;
int cms_msg_type;
/* create the PKCS7 SignedData portion of the PKCS7 EnvelopedData */
switch ((int)pa_type) {
case KRB5_PADATA_PK_AS_REQ_OLD:
case KRB5_PADATA_PK_AS_REP_OLD:
cms_msg_type = CMS_SIGN_DRAFT9;
break;
case KRB5_PADATA_PK_AS_REQ:
cms_msg_type = CMS_ENVEL_SERVER;
break;
default:
goto cleanup;
}
retval = cms_signeddata_create(context, plgctx, reqctx, idctx,
cms_msg_type, include_certchain, key_pack, key_pack_len,
&signed_data, (unsigned int *)&signed_data_len);
if (retval) {
pkiDebug("failed to create pkcs7 signed data\n");
goto cleanup;
}
/* check we have client's certificate */
if (reqctx->received_cert == NULL) {
retval = KRB5KDC_ERR_PREAUTH_FAILED;
goto cleanup;
}
encerts = sk_X509_new_null();
sk_X509_push(encerts, reqctx->received_cert);
cipher = EVP_des_ede3_cbc();
in = BIO_new(BIO_s_mem());
switch (pa_type) {
case KRB5_PADATA_PK_AS_REQ:
prepare_enc_data(signed_data, signed_data_len, &enc_data,
&enc_data_len);
retval = BIO_write(in, enc_data, enc_data_len);
if (retval != enc_data_len) {
pkiDebug("BIO_write only wrote %d\n", retval);
goto cleanup;
}
break;
case KRB5_PADATA_PK_AS_REP_OLD:
case KRB5_PADATA_PK_AS_REQ_OLD:
retval = BIO_write(in, signed_data, signed_data_len);
if (retval != signed_data_len) {
pkiDebug("BIO_write only wrote %d\n", retval);
goto cleanup;
}
break;
default:
retval = -1;
goto cleanup;
}
p7 = PKCS7_encrypt(encerts, in, cipher, flags);
if (p7 == NULL) {
pkiDebug("failed to encrypt PKCS7 object\n");
retval = -1;
goto cleanup;
}
switch (pa_type) {
case KRB5_PADATA_PK_AS_REQ:
p7->d.enveloped->enc_data->content_type =
OBJ_nid2obj(NID_pkcs7_signed);
break;
case KRB5_PADATA_PK_AS_REP_OLD:
case KRB5_PADATA_PK_AS_REQ_OLD:
p7->d.enveloped->enc_data->content_type =
OBJ_nid2obj(NID_pkcs7_data);
break;
break;
break;
break;
}
*out_len = i2d_PKCS7(p7, NULL);
if (!*out_len || (p = *out = malloc(*out_len)) == NULL) {
retval = ENOMEM;
goto cleanup;
}
retval = i2d_PKCS7(p7, &p);
if (!retval) {
pkiDebug("unable to write pkcs7 object\n");
goto cleanup;
}
retval = 0;
#ifdef DEBUG_ASN1
print_buffer_bin(*out, *out_len, "/tmp/kdc_enveloped_data");
#endif
cleanup:
if (p7 != NULL)
PKCS7_free(p7);
if (in != NULL)
BIO_free(in);
free(signed_data);
free(enc_data);
if (encerts != NULL)
sk_X509_free(encerts);
return retval;
}
krb5_error_code
cms_envelopeddata_verify(krb5_context context,
pkinit_plg_crypto_context plg_cryptoctx,
pkinit_req_crypto_context req_cryptoctx,
pkinit_identity_crypto_context id_cryptoctx,
krb5_preauthtype pa_type,
int require_crl_checking,
unsigned char *enveloped_data,
unsigned int enveloped_data_len,
unsigned char **data,
unsigned int *data_len)
{
krb5_error_code retval = KRB5KDC_ERR_PREAUTH_FAILED;
PKCS7 *p7 = NULL;
BIO *out = NULL;
int i = 0;
unsigned int size = 0;
const unsigned char *p = enveloped_data;
unsigned int tmp_buf_len = 0, tmp_buf2_len = 0, vfy_buf_len = 0;
unsigned char *tmp_buf = NULL, *tmp_buf2 = NULL, *vfy_buf = NULL;
int msg_type = 0;
#ifdef DEBUG_ASN1
print_buffer_bin(enveloped_data, enveloped_data_len,
"/tmp/client_envelopeddata");
#endif
/* decode received PKCS7 message */
if ((p7 = d2i_PKCS7(NULL, &p, (int)enveloped_data_len)) == NULL) {
unsigned long err = ERR_peek_error();
pkiDebug("failed to decode pkcs7\n");
krb5_set_error_message(context, retval, "%s\n",
ERR_error_string(err, NULL));
goto cleanup;
}
/* verify that the received message is PKCS7 EnvelopedData message */
if (OBJ_obj2nid(p7->type) != NID_pkcs7_enveloped) {
pkiDebug("Expected id-enveloped PKCS7 msg (received type = %d)\n",
OBJ_obj2nid(p7->type));
krb5_set_error_message(context, retval, "wrong oid\n");
goto cleanup;
}
/* decrypt received PKCS7 message */
out = BIO_new(BIO_s_mem());
if (pkcs7_decrypt(context, id_cryptoctx, p7, out)) {
pkiDebug("PKCS7 decryption successful\n");
} else {
unsigned long err = ERR_peek_error();
if (err != 0)
krb5_set_error_message(context, retval, "%s\n",
ERR_error_string(err, NULL));
pkiDebug("PKCS7 decryption failed\n");
goto cleanup;
}
/* transfer the decoded PKCS7 SignedData message into a separate buffer */
for (;;) {
if ((tmp_buf = realloc(tmp_buf, size + 1024 * 10)) == NULL)
goto cleanup;
i = BIO_read(out, &(tmp_buf[size]), 1024 * 10);
if (i <= 0)
break;
else
size += i;
}
tmp_buf_len = size;
#ifdef DEBUG_ASN1
print_buffer_bin(tmp_buf, tmp_buf_len, "/tmp/client_enc_keypack");
#endif
/* verify PKCS7 SignedData message */
switch (pa_type) {
case KRB5_PADATA_PK_AS_REP:
msg_type = CMS_ENVEL_SERVER;
break;
case KRB5_PADATA_PK_AS_REP_OLD:
msg_type = CMS_SIGN_DRAFT9;
break;
default:
pkiDebug("%s: unrecognized pa_type = %d\n", __FUNCTION__, pa_type);
retval = KRB5KDC_ERR_PREAUTH_FAILED;
goto cleanup;
}
/*
* If this is the RFC style, wrap the signed data to make
* decoding easier in the verify routine.
* For draft9-compatible, we don't do anything because it
* is already wrapped.
*/
if (msg_type == CMS_ENVEL_SERVER) {
retval = wrap_signeddata(tmp_buf, tmp_buf_len,
&tmp_buf2, &tmp_buf2_len);
if (retval) {
pkiDebug("failed to encode signeddata\n");
goto cleanup;
}
vfy_buf = tmp_buf2;
vfy_buf_len = tmp_buf2_len;
} else {
vfy_buf = tmp_buf;
vfy_buf_len = tmp_buf_len;
}
#ifdef DEBUG_ASN1
print_buffer_bin(vfy_buf, vfy_buf_len, "/tmp/client_enc_keypack2");
#endif
retval = cms_signeddata_verify(context, plg_cryptoctx, req_cryptoctx,
id_cryptoctx, msg_type,
require_crl_checking,
vfy_buf, vfy_buf_len,
data, data_len, NULL, NULL, NULL);
if (!retval)
pkiDebug("PKCS7 Verification Success\n");
else {
pkiDebug("PKCS7 Verification Failure\n");
goto cleanup;
}
retval = 0;
cleanup:
if (p7 != NULL)
PKCS7_free(p7);
if (out != NULL)
BIO_free(out);
free(tmp_buf);
free(tmp_buf2);
return retval;
}
static krb5_error_code
crypto_retrieve_X509_sans(krb5_context context,
pkinit_plg_crypto_context plgctx,
pkinit_req_crypto_context reqctx,
X509 *cert,
krb5_principal **princs_ret,
krb5_principal **upn_ret,
unsigned char ***dns_ret)
{
krb5_error_code retval = EINVAL;
char buf[DN_BUF_LEN];
int p = 0, u = 0, d = 0, l;
krb5_principal *princs = NULL;
krb5_principal *upns = NULL;
unsigned char **dnss = NULL;
unsigned int i, num_found = 0;
if (princs_ret == NULL && upn_ret == NULL && dns_ret == NULL) {
pkiDebug("%s: nowhere to return any values!\n", __FUNCTION__);
return retval;
}
if (cert == NULL) {
pkiDebug("%s: no certificate!\n", __FUNCTION__);
return retval;
}
X509_NAME_oneline(X509_get_subject_name(cert),
buf, sizeof(buf));
pkiDebug("%s: looking for SANs in cert = %s\n", __FUNCTION__, buf);
if ((l = X509_get_ext_by_NID(cert, NID_subject_alt_name, -1)) >= 0) {
X509_EXTENSION *ext = NULL;
GENERAL_NAMES *ialt = NULL;
GENERAL_NAME *gen = NULL;
int ret = 0;
unsigned int num_sans = 0;
if (!(ext = X509_get_ext(cert, l)) || !(ialt = X509V3_EXT_d2i(ext))) {
pkiDebug("%s: found no subject alt name extensions\n",
__FUNCTION__);
goto cleanup;
}
num_sans = sk_GENERAL_NAME_num(ialt);
pkiDebug("%s: found %d subject alt name extension(s)\n",
__FUNCTION__, num_sans);
/* OK, we're likely returning something. Allocate return values */
if (princs_ret != NULL) {
princs = calloc(num_sans + 1, sizeof(krb5_principal));
if (princs == NULL) {
retval = ENOMEM;
goto cleanup;
}
}
if (upn_ret != NULL) {
upns = calloc(num_sans + 1, sizeof(krb5_principal));
if (upns == NULL) {
retval = ENOMEM;
goto cleanup;
}
}
if (dns_ret != NULL) {
dnss = calloc(num_sans + 1, sizeof(*dnss));
if (dnss == NULL) {
retval = ENOMEM;
goto cleanup;
}
}
for (i = 0; i < num_sans; i++) {
krb5_data name = { 0, 0, NULL };
gen = sk_GENERAL_NAME_value(ialt, i);
switch (gen->type) {
case GEN_OTHERNAME:
name.length = gen->d.otherName->value->value.sequence->length;
name.data = (char *)gen->d.otherName->value->value.sequence->data;
if (princs != NULL
&& OBJ_cmp(plgctx->id_pkinit_san,
gen->d.otherName->type_id) == 0) {
#ifdef DEBUG_ASN1
print_buffer_bin((unsigned char *)name.data, name.length,
"/tmp/pkinit_san");
#endif
ret = k5int_decode_krb5_principal_name(&name, &princs[p]);
if (ret) {
pkiDebug("%s: failed decoding pkinit san value\n",
__FUNCTION__);
} else {
p++;
num_found++;
}
} else if (upns != NULL
&& OBJ_cmp(plgctx->id_ms_san_upn,
gen->d.otherName->type_id) == 0) {
/* Prevent abuse of embedded null characters. */
if (memchr(name.data, '\0', name.length))
break;
ret = krb5_parse_name(context, name.data, &upns[u]);
if (ret) {
pkiDebug("%s: failed parsing ms-upn san value\n",
__FUNCTION__);
} else {
u++;
num_found++;
}
} else {
pkiDebug("%s: unrecognized othername oid in SAN\n",
__FUNCTION__);
continue;
}
break;
case GEN_DNS:
if (dnss != NULL) {
/* Prevent abuse of embedded null characters. */
if (memchr(gen->d.dNSName->data, '\0',
gen->d.dNSName->length))
break;
pkiDebug("%s: found dns name = %s\n",
__FUNCTION__, gen->d.dNSName->data);
dnss[d] = (unsigned char *)
strdup((char *)gen->d.dNSName->data);
if (dnss[d] == NULL) {
pkiDebug("%s: failed to duplicate dns name\n",
__FUNCTION__);
} else {
d++;
num_found++;
}
}
break;
default:
pkiDebug("%s: SAN type = %d expecting %d\n",
__FUNCTION__, gen->type, GEN_OTHERNAME);
}
}
sk_GENERAL_NAME_pop_free(ialt, GENERAL_NAME_free);
}
retval = 0;
if (princs)
*princs_ret = princs;
if (upns)
*upn_ret = upns;
if (dnss)
*dns_ret = dnss;
cleanup:
if (retval) {
if (princs != NULL) {
for (i = 0; princs[i] != NULL; i++)
krb5_free_principal(context, princs[i]);
free(princs);
}
if (upns != NULL) {
for (i = 0; upns[i] != NULL; i++)
krb5_free_principal(context, upns[i]);
free(upns);
}
if (dnss != NULL) {
for (i = 0; dnss[i] != NULL; i++)
free(dnss[i]);
free(dnss);
}
}
return retval;
}
krb5_error_code
crypto_retrieve_signer_identity(krb5_context context,
pkinit_identity_crypto_context id_cryptoctx,
const char **identity)
{
*identity = id_cryptoctx->identity;
if (*identity == NULL)
return ENOENT;
return 0;
}
krb5_error_code
crypto_retrieve_cert_sans(krb5_context context,
pkinit_plg_crypto_context plgctx,
pkinit_req_crypto_context reqctx,
pkinit_identity_crypto_context idctx,
krb5_principal **princs_ret,
krb5_principal **upn_ret,
unsigned char ***dns_ret)
{
krb5_error_code retval = EINVAL;
if (reqctx->received_cert == NULL) {
pkiDebug("%s: No certificate!\n", __FUNCTION__);
return retval;
}
return crypto_retrieve_X509_sans(context, plgctx, reqctx,
reqctx->received_cert, princs_ret,
upn_ret, dns_ret);
}
krb5_error_code
crypto_check_cert_eku(krb5_context context,
pkinit_plg_crypto_context plgctx,
pkinit_req_crypto_context reqctx,
pkinit_identity_crypto_context idctx,
int checking_kdc_cert,
int allow_secondary_usage,
int *valid_eku)
{
char buf[DN_BUF_LEN];
int found_eku = 0;
krb5_error_code retval = EINVAL;
int i;
*valid_eku = 0;
if (reqctx->received_cert == NULL)
goto cleanup;
X509_NAME_oneline(X509_get_subject_name(reqctx->received_cert),
buf, sizeof(buf));
pkiDebug("%s: looking for EKUs in cert = %s\n", __FUNCTION__, buf);
if ((i = X509_get_ext_by_NID(reqctx->received_cert,
NID_ext_key_usage, -1)) >= 0) {
EXTENDED_KEY_USAGE *extusage;
extusage = X509_get_ext_d2i(reqctx->received_cert, NID_ext_key_usage,
NULL, NULL);
if (extusage) {
pkiDebug("%s: found eku info in the cert\n", __FUNCTION__);
for (i = 0; found_eku == 0 && i < sk_ASN1_OBJECT_num(extusage); i++) {
ASN1_OBJECT *tmp_oid;
tmp_oid = sk_ASN1_OBJECT_value(extusage, i);
pkiDebug("%s: checking eku %d of %d, allow_secondary = %d\n",
__FUNCTION__, i+1, sk_ASN1_OBJECT_num(extusage),
allow_secondary_usage);
if (checking_kdc_cert) {
if ((OBJ_cmp(tmp_oid, plgctx->id_pkinit_KPKdc) == 0)
|| (allow_secondary_usage
&& OBJ_cmp(tmp_oid, plgctx->id_kp_serverAuth) == 0))
found_eku = 1;
} else {
if ((OBJ_cmp(tmp_oid, plgctx->id_pkinit_KPClientAuth) == 0)
|| (allow_secondary_usage
&& OBJ_cmp(tmp_oid, plgctx->id_ms_kp_sc_logon) == 0))
found_eku = 1;
}
}
}
EXTENDED_KEY_USAGE_free(extusage);
if (found_eku) {
ASN1_BIT_STRING *usage = NULL;
pkiDebug("%s: found acceptable EKU, checking for digitalSignature\n", __FUNCTION__);
/* check that digitalSignature KeyUsage is present */
X509_check_ca(reqctx->received_cert);
if ((usage = X509_get_ext_d2i(reqctx->received_cert,
NID_key_usage, NULL, NULL))) {
if (!ku_reject(reqctx->received_cert,
X509v3_KU_DIGITAL_SIGNATURE)) {
pkiDebug("%s: found digitalSignature KU\n",
__FUNCTION__);
*valid_eku = 1;
} else
pkiDebug("%s: didn't find digitalSignature KU\n",
__FUNCTION__);
}
ASN1_BIT_STRING_free(usage);
}
}
retval = 0;
cleanup:
pkiDebug("%s: returning retval %d, valid_eku %d\n",
__FUNCTION__, retval, *valid_eku);
return retval;
}
krb5_error_code
pkinit_octetstring2key(krb5_context context,
krb5_enctype etype,
unsigned char *key,
unsigned int dh_key_len,
krb5_keyblock *key_block)
{
krb5_error_code retval;
unsigned char *buf = NULL;
unsigned char md[SHA_DIGEST_LENGTH];
unsigned char counter;
size_t keybytes, keylength, offset;
krb5_data random_data;
if ((buf = malloc(dh_key_len)) == NULL) {
retval = ENOMEM;
goto cleanup;
}
memset(buf, 0, dh_key_len);
counter = 0;
offset = 0;
do {
SHA_CTX c;
SHA1_Init(&c);
SHA1_Update(&c, &counter, 1);
SHA1_Update(&c, key, dh_key_len);
SHA1_Final(md, &c);
if (dh_key_len - offset < sizeof(md))
memcpy(buf + offset, md, dh_key_len - offset);
else
memcpy(buf + offset, md, sizeof(md));
offset += sizeof(md);
counter++;
} while (offset < dh_key_len);
key_block->magic = 0;
key_block->enctype = etype;
retval = krb5_c_keylengths(context, etype, &keybytes, &keylength);
if (retval)
goto cleanup;
key_block->length = keylength;
key_block->contents = malloc(keylength);
if (key_block->contents == NULL) {
retval = ENOMEM;
goto cleanup;
}
random_data.length = keybytes;
random_data.data = (char *)buf;
retval = krb5_c_random_to_key(context, etype, &random_data, key_block);
cleanup:
free(buf);
/* If this is an error return, free the allocated keyblock, if any */
if (retval) {
krb5_free_keyblock_contents(context, key_block);
}
return retval;
}
/**
* Given an algorithm_identifier, this function returns the hash length
* and EVP function associated with that algorithm.
*/
static krb5_error_code
pkinit_alg_values(krb5_context context,
const krb5_data *alg_id,
size_t *hash_bytes,
const EVP_MD *(**func)(void))
{
*hash_bytes = 0;
*func = NULL;
if ((alg_id->length == krb5_pkinit_sha1_oid_len) &&
(0 == memcmp(alg_id->data, &krb5_pkinit_sha1_oid,
krb5_pkinit_sha1_oid_len))) {
*hash_bytes = 20;
*func = &EVP_sha1;
return 0;
} else if ((alg_id->length == krb5_pkinit_sha256_oid_len) &&
(0 == memcmp(alg_id->data, krb5_pkinit_sha256_oid,
krb5_pkinit_sha256_oid_len))) {
*hash_bytes = 32;
*func = &EVP_sha256;
return 0;
} else if ((alg_id->length == krb5_pkinit_sha512_oid_len) &&
(0 == memcmp(alg_id->data, krb5_pkinit_sha512_oid,
krb5_pkinit_sha512_oid_len))) {
*hash_bytes = 64;
*func = &EVP_sha512;
return 0;
} else {
krb5_set_error_message(context, KRB5_ERR_BAD_S2K_PARAMS,
"Bad algorithm ID passed to PK-INIT KDF.");
return KRB5_ERR_BAD_S2K_PARAMS;
}
} /* pkinit_alg_values() */
/* pkinit_alg_agility_kdf() --
* This function generates a key using the KDF described in
* draft_ietf_krb_wg_pkinit_alg_agility-04.txt. The algorithm is
* described as follows:
*
* 1. reps = keydatalen (K) / hash length (H)
*
* 2. Initialize a 32-bit, big-endian bit string counter as 1.
*
* 3. For i = 1 to reps by 1, do the following:
*
* - Compute Hashi = H(counter || Z || OtherInfo).
*
* - Increment counter (modulo 2^32)
*
* 4. Set key = Hash1 || Hash2 || ... so that length of key is K bytes.
*/
krb5_error_code
pkinit_alg_agility_kdf(krb5_context context,
krb5_data *secret,
krb5_data *alg_oid,
krb5_const_principal party_u_info,
krb5_const_principal party_v_info,
krb5_enctype enctype,
krb5_data *as_req,
krb5_data *pk_as_rep,
krb5_keyblock *key_block)
{
krb5_error_code retval = 0;
unsigned int reps = 0;
uint32_t counter = 1; /* Does this type work on Windows? */
size_t offset = 0;
size_t hash_len = 0;
size_t rand_len = 0;
size_t key_len = 0;
krb5_data random_data;
krb5_sp80056a_other_info other_info_fields;
krb5_pkinit_supp_pub_info supp_pub_info_fields;
krb5_data *other_info = NULL;
krb5_data *supp_pub_info = NULL;
krb5_algorithm_identifier alg_id;
const EVP_MD *(*EVP_func)(void);
/* initialize random_data here to make clean-up safe */
random_data.length = 0;
random_data.data = NULL;
/* allocate and initialize the key block */
key_block->magic = 0;
key_block->enctype = enctype;
if (0 != (retval = krb5_c_keylengths(context, enctype, &rand_len,
&key_len)))
goto cleanup;
random_data.length = rand_len;
key_block->length = key_len;
if (NULL == (key_block->contents = malloc(key_block->length))) {
retval = ENOMEM;
goto cleanup;
}
memset (key_block->contents, 0, key_block->length);
/* If this is anonymous pkinit, use the anonymous principle for party_u_info */
if (party_u_info && krb5_principal_compare_any_realm(context, party_u_info,
krb5_anonymous_principal()))
party_u_info = (krb5_principal)krb5_anonymous_principal();
if (0 != (retval = pkinit_alg_values(context, alg_oid, &hash_len, &EVP_func)))
goto cleanup;
/* 1. reps = keydatalen (K) / hash length (H) */
reps = key_block->length/hash_len;
/* ... and round up, if necessary */
if (key_block->length > (reps * hash_len))
reps++;
/* Allocate enough space in the random data buffer to hash directly into
* it, even if the last hash will make it bigger than the key length. */
if (NULL == (random_data.data = malloc(reps * hash_len))) {
retval = ENOMEM;
goto cleanup;
}
/* Encode the ASN.1 octet string for "SuppPubInfo" */
supp_pub_info_fields.enctype = enctype;
supp_pub_info_fields.as_req = *as_req;
supp_pub_info_fields.pk_as_rep = *pk_as_rep;
if (0 != ((retval = encode_krb5_pkinit_supp_pub_info(&supp_pub_info_fields,
&supp_pub_info))))
goto cleanup;
/* Now encode the ASN.1 octet string for "OtherInfo" */
memset(&alg_id, 0, sizeof alg_id);
alg_id.algorithm = *alg_oid; /*alias*/
other_info_fields.algorithm_identifier = alg_id;
other_info_fields.party_u_info = (krb5_principal) party_u_info;
other_info_fields.party_v_info = (krb5_principal) party_v_info;
other_info_fields.supp_pub_info = *supp_pub_info;
if (0 != (retval = encode_krb5_sp80056a_other_info(&other_info_fields, &other_info)))
goto cleanup;
/* 2. Initialize a 32-bit, big-endian bit string counter as 1.
* 3. For i = 1 to reps by 1, do the following:
* - Compute Hashi = H(counter || Z || OtherInfo).
* - Increment counter (modulo 2^32)
*/
for (counter = 1; counter <= reps; counter++) {
EVP_MD_CTX c;
uint s = 0;
uint32_t be_counter = htonl(counter);
EVP_MD_CTX_init(&c);
/* - Compute Hashi = H(counter || Z || OtherInfo). */
if (0 == EVP_DigestInit(&c, EVP_func())) {
krb5_set_error_message(context, KRB5_CRYPTO_INTERNAL,
"Call to OpenSSL EVP_DigestInit() returned an error.");
retval = KRB5_CRYPTO_INTERNAL;
goto cleanup;
}
if ((0 == EVP_DigestUpdate(&c, &be_counter, 4)) ||
(0 == EVP_DigestUpdate(&c, secret->data, secret->length)) ||
(0 == EVP_DigestUpdate(&c, other_info->data, other_info->length))) {
krb5_set_error_message(context, KRB5_CRYPTO_INTERNAL,
"Call to OpenSSL EVP_DigestUpdate() returned an error.");
retval = KRB5_CRYPTO_INTERNAL;
goto cleanup;
}
/* 4. Set key = Hash1 || Hash2 || ... so that length of key is K bytes. */
if (0 == EVP_DigestFinal(&c, (unsigned char *)(random_data.data + offset), &s)) {
krb5_set_error_message(context, KRB5_CRYPTO_INTERNAL,
"Call to OpenSSL EVP_DigestUpdate() returned an error.");
retval = KRB5_CRYPTO_INTERNAL;
goto cleanup;
}
offset += s;
assert(s == hash_len);
EVP_MD_CTX_cleanup(&c);
}
retval = krb5_c_random_to_key(context, enctype, &random_data,
key_block);
cleanup:
/* If this has been an error, free the allocated key_block, if any */
if (retval) {
krb5_free_keyblock_contents(context, key_block);
}
/* free other allocated resources, either way */
if (random_data.data)
free(random_data.data);
krb5_free_data(context, other_info);
krb5_free_data(context, supp_pub_info);
return retval;
} /*pkinit_alg_agility_kdf() */
/* Call DH_compute_key() and ensure that we left-pad short results instead of
* leaving junk bytes at the end of the buffer. */
static void
compute_dh(unsigned char *buf, int size, BIGNUM *server_pub_key, DH *dh)
{
int len, pad;
len = DH_compute_key(buf, server_pub_key, dh);
assert(len >= 0 && len <= size);
if (len < size) {
pad = size - len;
memmove(buf + pad, buf, len);
memset(buf, 0, pad);
}
}
krb5_error_code
client_create_dh(krb5_context context,
pkinit_plg_crypto_context plg_cryptoctx,
pkinit_req_crypto_context cryptoctx,
pkinit_identity_crypto_context id_cryptoctx,
int dh_size,
unsigned char **dh_params,
unsigned int *dh_params_len,
unsigned char **dh_pubkey,
unsigned int *dh_pubkey_len)
{
krb5_error_code retval = KRB5KDC_ERR_PREAUTH_FAILED;
unsigned char *buf = NULL;
int dh_err = 0;
ASN1_INTEGER *pub_key = NULL;
if (cryptoctx->dh == NULL) {
if ((cryptoctx->dh = DH_new()) == NULL)
goto cleanup;
if ((cryptoctx->dh->g = BN_new()) == NULL ||
(cryptoctx->dh->q = BN_new()) == NULL)
goto cleanup;
switch(dh_size) {
case 1024:
pkiDebug("client uses 1024 DH keys\n");
cryptoctx->dh->p = get_rfc2409_prime_1024(NULL);
break;
case 2048:
pkiDebug("client uses 2048 DH keys\n");
cryptoctx->dh->p = BN_bin2bn(pkinit_2048_dhprime,
sizeof(pkinit_2048_dhprime), NULL);
break;
case 4096:
pkiDebug("client uses 4096 DH keys\n");
cryptoctx->dh->p = BN_bin2bn(pkinit_4096_dhprime,
sizeof(pkinit_4096_dhprime), NULL);
break;
default:
goto cleanup;
}
BN_set_word((cryptoctx->dh->g), DH_GENERATOR_2);
BN_rshift1(cryptoctx->dh->q, cryptoctx->dh->p);
}
DH_generate_key(cryptoctx->dh);
DH_check(cryptoctx->dh, &dh_err);
if (dh_err != 0) {
pkiDebug("Warning: dh_check failed with %d\n", dh_err);
if (dh_err & DH_CHECK_P_NOT_PRIME)
pkiDebug("p value is not prime\n");
if (dh_err & DH_CHECK_P_NOT_SAFE_PRIME)
pkiDebug("p value is not a safe prime\n");
if (dh_err & DH_UNABLE_TO_CHECK_GENERATOR)
pkiDebug("unable to check the generator value\n");
if (dh_err & DH_NOT_SUITABLE_GENERATOR)
pkiDebug("the g value is not a generator\n");
}
#ifdef DEBUG_DH
print_dh(cryptoctx->dh, "client's DH params\n");
print_pubkey(cryptoctx->dh->pub_key, "client's pub_key=");
#endif
DH_check_pub_key(cryptoctx->dh, cryptoctx->dh->pub_key, &dh_err);
if (dh_err != 0) {
pkiDebug("dh_check_pub_key failed with %d\n", dh_err);
goto cleanup;
}
/* pack DHparams */
/* aglo: usually we could just call i2d_DHparams to encode DH params
* however, PKINIT requires RFC3279 encoding and openssl does pkcs#3.
*/
retval = pkinit_encode_dh_params(cryptoctx->dh->p, cryptoctx->dh->g,
cryptoctx->dh->q, dh_params, dh_params_len);
if (retval)
goto cleanup;
/* pack DH public key */
/* Diffie-Hellman public key must be ASN1 encoded as an INTEGER; this
* encoding shall be used as the contents (the value) of the
* subjectPublicKey component (a BIT STRING) of the SubjectPublicKeyInfo
* data element
*/
if ((pub_key = BN_to_ASN1_INTEGER(cryptoctx->dh->pub_key, NULL)) == NULL)
goto cleanup;
*dh_pubkey_len = i2d_ASN1_INTEGER(pub_key, NULL);
if ((buf = *dh_pubkey = malloc(*dh_pubkey_len)) == NULL) {
retval = ENOMEM;
goto cleanup;
}
i2d_ASN1_INTEGER(pub_key, &buf);
if (pub_key != NULL)
ASN1_INTEGER_free(pub_key);
retval = 0;
return retval;
cleanup:
if (cryptoctx->dh != NULL)
DH_free(cryptoctx->dh);
cryptoctx->dh = NULL;
free(*dh_params);
*dh_params = NULL;
free(*dh_pubkey);
*dh_pubkey = NULL;
if (pub_key != NULL)
ASN1_INTEGER_free(pub_key);
return retval;
}
krb5_error_code
client_process_dh(krb5_context context,
pkinit_plg_crypto_context plg_cryptoctx,
pkinit_req_crypto_context cryptoctx,
pkinit_identity_crypto_context id_cryptoctx,
unsigned char *subjectPublicKey_data,
unsigned int subjectPublicKey_length,
unsigned char **client_key,
unsigned int *client_key_len)
{
krb5_error_code retval = KRB5KDC_ERR_PREAUTH_FAILED;
BIGNUM *server_pub_key = NULL;
ASN1_INTEGER *pub_key = NULL;
const unsigned char *p = NULL;
*client_key_len = DH_size(cryptoctx->dh);
if ((*client_key = malloc(*client_key_len)) == NULL) {
retval = ENOMEM;
goto cleanup;
}
p = subjectPublicKey_data;
pub_key = d2i_ASN1_INTEGER(NULL, &p, (long)subjectPublicKey_length);
if (pub_key == NULL)
goto cleanup;
if ((server_pub_key = ASN1_INTEGER_to_BN(pub_key, NULL)) == NULL)
goto cleanup;
compute_dh(*client_key, *client_key_len, server_pub_key, cryptoctx->dh);
#ifdef DEBUG_DH
print_pubkey(server_pub_key, "server's pub_key=");
pkiDebug("client computed key (%d)= ", *client_key_len);
print_buffer(*client_key, *client_key_len);
#endif
retval = 0;
if (server_pub_key != NULL)
BN_free(server_pub_key);
if (pub_key != NULL)
ASN1_INTEGER_free(pub_key);
return retval;
cleanup:
free(*client_key);
*client_key = NULL;
if (pub_key != NULL)
ASN1_INTEGER_free(pub_key);
return retval;
}
/* Return 1 if dh is a permitted well-known group, otherwise return 0. */
static int
check_dh_wellknown(pkinit_plg_crypto_context cryptoctx, DH *dh, int nbits)
{
switch (nbits) {
case 1024:
/* Oakley MODP group 2 */
if (pkinit_check_dh_params(cryptoctx->dh_1024, dh) == 0)
return 1;
break;
case 2048:
/* Oakley MODP group 14 */
if (pkinit_check_dh_params(cryptoctx->dh_2048, dh) == 0)
return 1;
break;
case 4096:
/* Oakley MODP group 16 */
if (pkinit_check_dh_params(cryptoctx->dh_4096, dh) == 0)
return 1;
break;
default:
break;
}
return 0;
}
krb5_error_code
server_check_dh(krb5_context context,
pkinit_plg_crypto_context cryptoctx,
pkinit_req_crypto_context req_cryptoctx,
pkinit_identity_crypto_context id_cryptoctx,
krb5_data *dh_params,
int minbits)
{
DH *dh = NULL;
unsigned char *tmp = NULL;
int dh_prime_bits;
krb5_error_code retval = KRB5KDC_ERR_DH_KEY_PARAMETERS_NOT_ACCEPTED;
tmp = (unsigned char *)dh_params->data;
dh = DH_new();
dh = pkinit_decode_dh_params(&dh, &tmp, dh_params->length);
if (dh == NULL) {
pkiDebug("failed to decode dhparams\n");
goto cleanup;
}
/* KDC SHOULD check to see if the key parameters satisfy its policy */
dh_prime_bits = BN_num_bits(dh->p);
if (minbits && dh_prime_bits < minbits) {
pkiDebug("client sent dh params with %d bits, we require %d\n",
dh_prime_bits, minbits);
goto cleanup;
}
if (check_dh_wellknown(cryptoctx, dh, dh_prime_bits))
retval = 0;
cleanup:
if (retval == 0)
req_cryptoctx->dh = dh;
else
DH_free(dh);
return retval;
}
/* kdc's dh function */
krb5_error_code
server_process_dh(krb5_context context,
pkinit_plg_crypto_context plg_cryptoctx,
pkinit_req_crypto_context cryptoctx,
pkinit_identity_crypto_context id_cryptoctx,
unsigned char *data,
unsigned int data_len,
unsigned char **dh_pubkey,
unsigned int *dh_pubkey_len,
unsigned char **server_key,
unsigned int *server_key_len)
{
krb5_error_code retval = ENOMEM;
DH *dh = NULL, *dh_server = NULL;
unsigned char *p = NULL;
ASN1_INTEGER *pub_key = NULL;
*dh_pubkey = *server_key = NULL;
*dh_pubkey_len = *server_key_len = 0;
/* get client's received DH parameters that we saved in server_check_dh */
dh = cryptoctx->dh;
dh_server = DH_new();
if (dh_server == NULL)
goto cleanup;
dh_server->p = BN_dup(dh->p);
dh_server->g = BN_dup(dh->g);
dh_server->q = BN_dup(dh->q);
/* decode client's public key */
p = data;
pub_key = d2i_ASN1_INTEGER(NULL, (const unsigned char **)&p, (int)data_len);
if (pub_key == NULL)
goto cleanup;
dh->pub_key = ASN1_INTEGER_to_BN(pub_key, NULL);
if (dh->pub_key == NULL)
goto cleanup;
ASN1_INTEGER_free(pub_key);
if (!DH_generate_key(dh_server))
goto cleanup;
/* generate DH session key */
*server_key_len = DH_size(dh_server);
if ((*server_key = malloc(*server_key_len)) == NULL)
goto cleanup;
compute_dh(*server_key, *server_key_len, dh->pub_key, dh_server);
#ifdef DEBUG_DH
print_dh(dh_server, "client&server's DH params\n");
print_pubkey(dh->pub_key, "client's pub_key=");
print_pubkey(dh_server->pub_key, "server's pub_key=");
pkiDebug("server computed key=");
print_buffer(*server_key, *server_key_len);
#endif
/* KDC reply */
/* pack DH public key */
/* Diffie-Hellman public key must be ASN1 encoded as an INTEGER; this
* encoding shall be used as the contents (the value) of the
* subjectPublicKey component (a BIT STRING) of the SubjectPublicKeyInfo
* data element
*/
if ((pub_key = BN_to_ASN1_INTEGER(dh_server->pub_key, NULL)) == NULL)
goto cleanup;
*dh_pubkey_len = i2d_ASN1_INTEGER(pub_key, NULL);
if ((p = *dh_pubkey = malloc(*dh_pubkey_len)) == NULL)
goto cleanup;
i2d_ASN1_INTEGER(pub_key, &p);
if (pub_key != NULL)
ASN1_INTEGER_free(pub_key);
retval = 0;
if (dh_server != NULL)
DH_free(dh_server);
return retval;
cleanup:
if (dh_server != NULL)
DH_free(dh_server);
free(*dh_pubkey);
free(*server_key);
return retval;
}
int
pkinit_openssl_init()
{
/* Initialize OpenSSL. */
CRYPTO_malloc_init();
ERR_load_crypto_strings();
OpenSSL_add_all_algorithms();
return 0;
}
static krb5_error_code
pkinit_encode_dh_params(BIGNUM *p, BIGNUM *g, BIGNUM *q,
unsigned char **buf, unsigned int *buf_len)
{
krb5_error_code retval = ENOMEM;
int bufsize = 0, r = 0;
unsigned char *tmp = NULL;
ASN1_INTEGER *ap = NULL, *ag = NULL, *aq = NULL;
if ((ap = BN_to_ASN1_INTEGER(p, NULL)) == NULL)
goto cleanup;
if ((ag = BN_to_ASN1_INTEGER(g, NULL)) == NULL)
goto cleanup;
if ((aq = BN_to_ASN1_INTEGER(q, NULL)) == NULL)
goto cleanup;
bufsize = i2d_ASN1_INTEGER(ap, NULL);
bufsize += i2d_ASN1_INTEGER(ag, NULL);
bufsize += i2d_ASN1_INTEGER(aq, NULL);
r = ASN1_object_size(1, bufsize, V_ASN1_SEQUENCE);
tmp = *buf = malloc((size_t) r);
if (tmp == NULL)
goto cleanup;
ASN1_put_object(&tmp, 1, bufsize, V_ASN1_SEQUENCE, V_ASN1_UNIVERSAL);
i2d_ASN1_INTEGER(ap, &tmp);
i2d_ASN1_INTEGER(ag, &tmp);
i2d_ASN1_INTEGER(aq, &tmp);
*buf_len = r;
retval = 0;
cleanup:
if (ap != NULL)
ASN1_INTEGER_free(ap);
if (ag != NULL)
ASN1_INTEGER_free(ag);
if (aq != NULL)
ASN1_INTEGER_free(aq);
return retval;
}
static DH *
pkinit_decode_dh_params(DH ** a, unsigned char **pp, unsigned int len)
{
ASN1_INTEGER ai, *aip = NULL;
long length = (long) len;
M_ASN1_D2I_vars(a, DH *, DH_new);
M_ASN1_D2I_Init();
M_ASN1_D2I_start_sequence();
aip = &ai;
ai.data = NULL;
ai.length = 0;
M_ASN1_D2I_get_x(ASN1_INTEGER, aip, d2i_ASN1_INTEGER);
if (aip == NULL)
return NULL;
else {
(*a)->p = ASN1_INTEGER_to_BN(aip, NULL);
if ((*a)->p == NULL)
return NULL;
if (ai.data != NULL) {
OPENSSL_free(ai.data);
ai.data = NULL;
ai.length = 0;
}
}
M_ASN1_D2I_get_x(ASN1_INTEGER, aip, d2i_ASN1_INTEGER);
if (aip == NULL)
return NULL;
else {
(*a)->g = ASN1_INTEGER_to_BN(aip, NULL);
if ((*a)->g == NULL)
return NULL;
if (ai.data != NULL) {
OPENSSL_free(ai.data);
ai.data = NULL;
ai.length = 0;
}
}
M_ASN1_D2I_get_opt(aip, d2i_ASN1_INTEGER, V_ASN1_INTEGER);
if (aip == NULL || ai.data == NULL)
(*a)->q = NULL;
else {
(*a)->q = ASN1_INTEGER_to_BN(aip, NULL);
if ((*a)->q == NULL)
return NULL;
if (ai.data != NULL) {
OPENSSL_free(ai.data);
ai.data = NULL;
ai.length = 0;
}
}
M_ASN1_D2I_end_sequence();
M_ASN1_D2I_Finish(a, DH_free, 0);
}
static krb5_error_code
pkinit_create_sequence_of_principal_identifiers(
krb5_context context,
pkinit_plg_crypto_context plg_cryptoctx,
pkinit_req_crypto_context req_cryptoctx,
pkinit_identity_crypto_context id_cryptoctx,
int type,
krb5_pa_data ***e_data_out)
{
krb5_error_code retval = KRB5KRB_ERR_GENERIC;
krb5_external_principal_identifier **krb5_trusted_certifiers = NULL;
krb5_data *td_certifiers = NULL;
krb5_pa_data **pa_data = NULL;
switch(type) {
case TD_TRUSTED_CERTIFIERS:
retval = create_krb5_trustedCertifiers(context, plg_cryptoctx,
req_cryptoctx, id_cryptoctx, &krb5_trusted_certifiers);
if (retval) {
pkiDebug("create_krb5_trustedCertifiers failed\n");
goto cleanup;
}
break;
case TD_INVALID_CERTIFICATES:
retval = create_krb5_invalidCertificates(context, plg_cryptoctx,
req_cryptoctx, id_cryptoctx, &krb5_trusted_certifiers);
if (retval) {
pkiDebug("create_krb5_invalidCertificates failed\n");
goto cleanup;
}
break;
default:
retval = -1;
goto cleanup;
}
retval = k5int_encode_krb5_td_trusted_certifiers((krb5_external_principal_identifier *const *)krb5_trusted_certifiers, &td_certifiers);
if (retval) {
pkiDebug("encode_krb5_td_trusted_certifiers failed\n");
goto cleanup;
}
#ifdef DEBUG_ASN1
print_buffer_bin((unsigned char *)td_certifiers->data,
td_certifiers->length, "/tmp/kdc_td_certifiers");
#endif
pa_data = malloc(2 * sizeof(krb5_pa_data *));
if (pa_data == NULL) {
retval = ENOMEM;
goto cleanup;
}
pa_data[1] = NULL;
pa_data[0] = malloc(sizeof(krb5_pa_data));
if (pa_data[0] == NULL) {
free(pa_data);
retval = ENOMEM;
goto cleanup;
}
pa_data[0]->pa_type = type;
pa_data[0]->length = td_certifiers->length;
pa_data[0]->contents = (krb5_octet *)td_certifiers->data;
*e_data_out = pa_data;
retval = 0;
cleanup:
if (krb5_trusted_certifiers != NULL)
free_krb5_external_principal_identifier(&krb5_trusted_certifiers);
free(td_certifiers);
return retval;
}
krb5_error_code
pkinit_create_td_trusted_certifiers(krb5_context context,
pkinit_plg_crypto_context plg_cryptoctx,
pkinit_req_crypto_context req_cryptoctx,
pkinit_identity_crypto_context id_cryptoctx,
krb5_pa_data ***e_data_out)
{
krb5_error_code retval = KRB5KRB_ERR_GENERIC;
retval = pkinit_create_sequence_of_principal_identifiers(context,
plg_cryptoctx, req_cryptoctx, id_cryptoctx,
TD_TRUSTED_CERTIFIERS, e_data_out);
return retval;
}
krb5_error_code
pkinit_create_td_invalid_certificate(
krb5_context context,
pkinit_plg_crypto_context plg_cryptoctx,
pkinit_req_crypto_context req_cryptoctx,
pkinit_identity_crypto_context id_cryptoctx,
krb5_pa_data ***e_data_out)
{
krb5_error_code retval = KRB5KRB_ERR_GENERIC;
retval = pkinit_create_sequence_of_principal_identifiers(context,
plg_cryptoctx, req_cryptoctx, id_cryptoctx,
TD_INVALID_CERTIFICATES, e_data_out);
return retval;
}
krb5_error_code
pkinit_create_td_dh_parameters(krb5_context context,
pkinit_plg_crypto_context plg_cryptoctx,
pkinit_req_crypto_context req_cryptoctx,
pkinit_identity_crypto_context id_cryptoctx,
pkinit_plg_opts *opts,
krb5_pa_data ***e_data_out)
{
krb5_error_code retval = ENOMEM;
unsigned int buf1_len = 0, buf2_len = 0, buf3_len = 0, i = 0;
unsigned char *buf1 = NULL, *buf2 = NULL, *buf3 = NULL;
krb5_pa_data **pa_data = NULL;
krb5_data *encoded_algId = NULL;
krb5_algorithm_identifier **algId = NULL;
if (opts->dh_min_bits > 4096)
goto cleanup;
if (opts->dh_min_bits <= 1024) {
retval = pkinit_encode_dh_params(plg_cryptoctx->dh_1024->p,
plg_cryptoctx->dh_1024->g, plg_cryptoctx->dh_1024->q,
&buf1, &buf1_len);
if (retval)
goto cleanup;
}
if (opts->dh_min_bits <= 2048) {
retval = pkinit_encode_dh_params(plg_cryptoctx->dh_2048->p,
plg_cryptoctx->dh_2048->g, plg_cryptoctx->dh_2048->q,
&buf2, &buf2_len);
if (retval)
goto cleanup;
}
retval = pkinit_encode_dh_params(plg_cryptoctx->dh_4096->p,
plg_cryptoctx->dh_4096->g, plg_cryptoctx->dh_4096->q,
&buf3, &buf3_len);
if (retval)
goto cleanup;
if (opts->dh_min_bits <= 1024) {
algId = malloc(4 * sizeof(krb5_algorithm_identifier *));
if (algId == NULL)
goto cleanup;
algId[3] = NULL;
algId[0] = malloc(sizeof(krb5_algorithm_identifier));
if (algId[0] == NULL)
goto cleanup;
algId[0]->parameters.data = malloc(buf2_len);
if (algId[0]->parameters.data == NULL)
goto cleanup;
memcpy(algId[0]->parameters.data, buf2, buf2_len);
algId[0]->parameters.length = buf2_len;
algId[0]->algorithm = dh_oid;
algId[1] = malloc(sizeof(krb5_algorithm_identifier));
if (algId[1] == NULL)
goto cleanup;
algId[1]->parameters.data = malloc(buf3_len);
if (algId[1]->parameters.data == NULL)
goto cleanup;
memcpy(algId[1]->parameters.data, buf3, buf3_len);
algId[1]->parameters.length = buf3_len;
algId[1]->algorithm = dh_oid;
algId[2] = malloc(sizeof(krb5_algorithm_identifier));
if (algId[2] == NULL)
goto cleanup;
algId[2]->parameters.data = malloc(buf1_len);
if (algId[2]->parameters.data == NULL)
goto cleanup;
memcpy(algId[2]->parameters.data, buf1, buf1_len);
algId[2]->parameters.length = buf1_len;
algId[2]->algorithm = dh_oid;
} else if (opts->dh_min_bits <= 2048) {
algId = malloc(3 * sizeof(krb5_algorithm_identifier *));
if (algId == NULL)
goto cleanup;
algId[2] = NULL;
algId[0] = malloc(sizeof(krb5_algorithm_identifier));
if (algId[0] == NULL)
goto cleanup;
algId[0]->parameters.data = malloc(buf2_len);
if (algId[0]->parameters.data == NULL)
goto cleanup;
memcpy(algId[0]->parameters.data, buf2, buf2_len);
algId[0]->parameters.length = buf2_len;
algId[0]->algorithm = dh_oid;
algId[1] = malloc(sizeof(krb5_algorithm_identifier));
if (algId[1] == NULL)
goto cleanup;
algId[1]->parameters.data = malloc(buf3_len);
if (algId[1]->parameters.data == NULL)
goto cleanup;
memcpy(algId[1]->parameters.data, buf3, buf3_len);
algId[1]->parameters.length = buf3_len;
algId[1]->algorithm = dh_oid;
} else if (opts->dh_min_bits <= 4096) {
algId = malloc(2 * sizeof(krb5_algorithm_identifier *));
if (algId == NULL)
goto cleanup;
algId[1] = NULL;
algId[0] = malloc(sizeof(krb5_algorithm_identifier));
if (algId[0] == NULL)
goto cleanup;
algId[0]->parameters.data = malloc(buf3_len);
if (algId[0]->parameters.data == NULL)
goto cleanup;
memcpy(algId[0]->parameters.data, buf3, buf3_len);
algId[0]->parameters.length = buf3_len;
algId[0]->algorithm = dh_oid;
}
retval = k5int_encode_krb5_td_dh_parameters((krb5_algorithm_identifier *const *)algId, &encoded_algId);
if (retval)
goto cleanup;
#ifdef DEBUG_ASN1
print_buffer_bin((unsigned char *)encoded_algId->data,
encoded_algId->length, "/tmp/kdc_td_dh_params");
#endif
pa_data = malloc(2 * sizeof(krb5_pa_data *));
if (pa_data == NULL) {
retval = ENOMEM;
goto cleanup;
}
pa_data[1] = NULL;
pa_data[0] = malloc(sizeof(krb5_pa_data));
if (pa_data[0] == NULL) {
free(pa_data);
retval = ENOMEM;
goto cleanup;
}
pa_data[0]->pa_type = TD_DH_PARAMETERS;
pa_data[0]->length = encoded_algId->length;
pa_data[0]->contents = (krb5_octet *)encoded_algId->data;
*e_data_out = pa_data;
retval = 0;
cleanup:
free(buf1);
free(buf2);
free(buf3);
free(encoded_algId);
if (algId != NULL) {
while(algId[i] != NULL) {
free(algId[i]->parameters.data);
free(algId[i]);
i++;
}
free(algId);
}
return retval;
}
krb5_error_code
pkinit_check_kdc_pkid(krb5_context context,
pkinit_plg_crypto_context plg_cryptoctx,
pkinit_req_crypto_context req_cryptoctx,
pkinit_identity_crypto_context id_cryptoctx,
unsigned char *pdid_buf,
unsigned int pkid_len,
int *valid_kdcPkId)
{
PKCS7_ISSUER_AND_SERIAL *is = NULL;
const unsigned char *p = pdid_buf;
int status = 1;
X509 *kdc_cert = sk_X509_value(id_cryptoctx->my_certs, id_cryptoctx->cert_index);
*valid_kdcPkId = 0;
pkiDebug("found kdcPkId in AS REQ\n");
is = d2i_PKCS7_ISSUER_AND_SERIAL(NULL, &p, (int)pkid_len);
if (is == NULL)
return KRB5KDC_ERR_PREAUTH_FAILED;
status = X509_NAME_cmp(X509_get_issuer_name(kdc_cert), is->issuer);
if (!status) {
status = ASN1_INTEGER_cmp(X509_get_serialNumber(kdc_cert), is->serial);
if (!status)
*valid_kdcPkId = 1;
}
X509_NAME_free(is->issuer);
ASN1_INTEGER_free(is->serial);
free(is);
return 0;
}
/* Check parameters against a well-known DH group. */
static int
pkinit_check_dh_params(DH *dh1, DH *dh2)
{
if (BN_cmp(dh1->p, dh2->p) != 0) {
pkiDebug("p is not well-known group dhparameter\n");
return -1;
}
if (BN_cmp(dh1->g, dh2->g) != 0) {
pkiDebug("bad g dhparameter\n");
return -1;
}
pkiDebug("good %d dhparams\n", BN_num_bits(dh1->p));
return 0;
}
krb5_error_code
pkinit_process_td_dh_params(krb5_context context,
pkinit_plg_crypto_context cryptoctx,
pkinit_req_crypto_context req_cryptoctx,
pkinit_identity_crypto_context id_cryptoctx,
krb5_algorithm_identifier **algId,
int *new_dh_size)
{
krb5_error_code retval = KRB5KDC_ERR_DH_KEY_PARAMETERS_NOT_ACCEPTED;
int i = 0, use_sent_dh = 0, ok = 0;
pkiDebug("dh parameters\n");
while (algId[i] != NULL) {
DH *dh = NULL;
unsigned char *tmp = NULL;
int dh_prime_bits = 0;
if (algId[i]->algorithm.length != dh_oid.length ||
memcmp(algId[i]->algorithm.data, dh_oid.data, dh_oid.length))
goto cleanup;
tmp = (unsigned char *)algId[i]->parameters.data;
dh = DH_new();
dh = pkinit_decode_dh_params(&dh, &tmp, algId[i]->parameters.length);
dh_prime_bits = BN_num_bits(dh->p);
pkiDebug("client sent %d DH bits server prefers %d DH bits\n",
*new_dh_size, dh_prime_bits);
ok = check_dh_wellknown(cryptoctx, dh, dh_prime_bits);
if (ok) {
*new_dh_size = dh_prime_bits;
}
if (!ok) {
DH_check(dh, &retval);
if (retval != 0) {
pkiDebug("DH parameters provided by server are unacceptable\n");
retval = KRB5KDC_ERR_DH_KEY_PARAMETERS_NOT_ACCEPTED;
}
else {
use_sent_dh = 1;
ok = 1;
}
}
if (!use_sent_dh)
DH_free(dh);
if (ok) {
if (req_cryptoctx->dh != NULL) {
DH_free(req_cryptoctx->dh);
req_cryptoctx->dh = NULL;
}
if (use_sent_dh)
req_cryptoctx->dh = dh;
break;
}
i++;
}
if (ok)
retval = 0;
cleanup:
return retval;
}
static int
openssl_callback(int ok, X509_STORE_CTX * ctx)
{
#ifdef DEBUG
if (!ok) {
char buf[DN_BUF_LEN];
X509_NAME_oneline(X509_get_subject_name(ctx->current_cert), buf, sizeof(buf));
pkiDebug("cert = %s\n", buf);
pkiDebug("callback function: %d (%s)\n", ctx->error,
X509_verify_cert_error_string(ctx->error));
}
#endif
return ok;
}
static int
openssl_callback_ignore_crls(int ok, X509_STORE_CTX * ctx)
{
if (!ok) {
switch (ctx->error) {
case X509_V_ERR_UNABLE_TO_GET_CRL:
return 1;
default:
return 0;
}
}
return ok;
}
static ASN1_OBJECT *
pkinit_pkcs7type2oid(pkinit_plg_crypto_context cryptoctx, int pkcs7_type)
{
switch (pkcs7_type) {
case CMS_SIGN_CLIENT:
return cryptoctx->id_pkinit_authData;
case CMS_SIGN_DRAFT9:
return OBJ_nid2obj(NID_pkcs7_data);
case CMS_SIGN_SERVER:
return cryptoctx->id_pkinit_DHKeyData;
case CMS_ENVEL_SERVER:
return cryptoctx->id_pkinit_rkeyData;
default:
return NULL;
}
}
static int
wrap_signeddata(unsigned char *data, unsigned int data_len,
unsigned char **out, unsigned int *out_len)
{
unsigned int orig_len = 0, oid_len = 0, tot_len = 0;
ASN1_OBJECT *oid = NULL;
unsigned char *p = NULL;
/* Get length to wrap the original data with SEQUENCE tag */
tot_len = orig_len = ASN1_object_size(1, (int)data_len, V_ASN1_SEQUENCE);
/* Add the signedData OID and adjust lengths */
oid = OBJ_nid2obj(NID_pkcs7_signed);
oid_len = i2d_ASN1_OBJECT(oid, NULL);
tot_len = ASN1_object_size(1, (int)(orig_len+oid_len), V_ASN1_SEQUENCE);
p = *out = malloc(tot_len);
if (p == NULL) return -1;
ASN1_put_object(&p, 1, (int)(orig_len+oid_len),
V_ASN1_SEQUENCE, V_ASN1_UNIVERSAL);
i2d_ASN1_OBJECT(oid, &p);
ASN1_put_object(&p, 1, (int)data_len, 0, V_ASN1_CONTEXT_SPECIFIC);
memcpy(p, data, data_len);
*out_len = tot_len;
return 0;
}
static int
prepare_enc_data(unsigned char *indata,
int indata_len,
unsigned char **outdata,
int *outdata_len)
{
int retval = -1;
ASN1_const_CTX c;
long length = indata_len;
int Ttag, Tclass;
long Tlen;
c.pp = (const unsigned char **)&indata;
c.q = *(const unsigned char **)&indata;
c.error = ERR_R_NESTED_ASN1_ERROR;
c.p= *(const unsigned char **)&indata;
c.max = (length == 0)?0:(c.p+length);
asn1_GetSequence(&c,&length);
ASN1_get_object(&c.p,&Tlen,&Ttag,&Tclass,c.slen);
c.p += Tlen;
ASN1_get_object(&c.p,&Tlen,&Ttag,&Tclass,c.slen);
asn1_const_Finish(&c);
*outdata = malloc((size_t)Tlen);
if (*outdata == NULL) {
retval = ENOMEM;
goto cleanup;
}
memcpy(*outdata, c.p, (size_t)Tlen);
*outdata_len = Tlen;
retval = 0;
cleanup:
return retval;
}
#ifndef WITHOUT_PKCS11
static void *
pkinit_C_LoadModule(const char *modname, CK_FUNCTION_LIST_PTR_PTR p11p)
{
void *handle;
CK_RV (*getflist)(CK_FUNCTION_LIST_PTR_PTR);
pkiDebug("loading module \"%s\"... ", modname);
handle = dlopen(modname, RTLD_NOW);
if (handle == NULL) {
pkiDebug("not found\n");
return NULL;
}
getflist = (CK_RV (*)(CK_FUNCTION_LIST_PTR_PTR)) dlsym(handle, "C_GetFunctionList");
if (getflist == NULL || (*getflist)(p11p) != CKR_OK) {
dlclose(handle);
pkiDebug("failed\n");
return NULL;
}
pkiDebug("ok\n");
return handle;
}
static CK_RV
pkinit_C_UnloadModule(void *handle)
{
dlclose(handle);
return CKR_OK;
}
static krb5_error_code
pkinit_login(krb5_context context,
pkinit_identity_crypto_context id_cryptoctx,
CK_TOKEN_INFO *tip, const char *password)
{
krb5_data rdat;
char *prompt;
const char *warning;
krb5_prompt kprompt;
krb5_prompt_type prompt_type;
int r = 0;
if (tip->flags & CKF_PROTECTED_AUTHENTICATION_PATH) {
rdat.data = NULL;
rdat.length = 0;
} else if (password != NULL) {
rdat.data = strdup(password);
rdat.length = strlen(password);
} else if (id_cryptoctx->prompter == NULL) {
r = KRB5_LIBOS_CANTREADPWD;
rdat.data = NULL;
} else {
if (tip->flags & CKF_USER_PIN_LOCKED)
warning = " (Warning: PIN locked)";
else if (tip->flags & CKF_USER_PIN_FINAL_TRY)
warning = " (Warning: PIN final try)";
else if (tip->flags & CKF_USER_PIN_COUNT_LOW)
warning = " (Warning: PIN count low)";
else
warning = "";
if (asprintf(&prompt, "%.*s PIN%s", (int) sizeof (tip->label),
tip->label, warning) < 0)
return ENOMEM;
rdat.data = malloc(tip->ulMaxPinLen + 2);
rdat.length = tip->ulMaxPinLen + 1;
kprompt.prompt = prompt;
kprompt.hidden = 1;
kprompt.reply = &rdat;
prompt_type = KRB5_PROMPT_TYPE_PREAUTH;
/* PROMPTER_INVOCATION */
k5int_set_prompt_types(context, &prompt_type);
r = (*id_cryptoctx->prompter)(context, id_cryptoctx->prompter_data,
NULL, NULL, 1, &kprompt);
k5int_set_prompt_types(context, 0);
free(prompt);
}
if (r == 0) {
r = id_cryptoctx->p11->C_Login(id_cryptoctx->session, CKU_USER,
(u_char *) rdat.data, rdat.length);
if (r != CKR_OK) {
pkiDebug("C_Login: %s\n", pkinit_pkcs11_code_to_text(r));
r = KRB5KDC_ERR_PREAUTH_FAILED;
}
}
free(rdat.data);
return r;
}
static krb5_error_code
pkinit_open_session(krb5_context context,
pkinit_identity_crypto_context cctx)
{
CK_ULONG i, r;
unsigned char *cp;
size_t label_len;
CK_ULONG count = 0;
CK_SLOT_ID_PTR slotlist;
CK_TOKEN_INFO tinfo;
char *p11name;
const char *password;
if (cctx->p11_module != NULL)
return 0; /* session already open */
/* Load module */
cctx->p11_module =
pkinit_C_LoadModule(cctx->p11_module_name, &cctx->p11);
if (cctx->p11_module == NULL)
return KRB5KDC_ERR_PREAUTH_FAILED;
/* Init */
if ((r = cctx->p11->C_Initialize(NULL)) != CKR_OK) {
pkiDebug("C_Initialize: %s\n", pkinit_pkcs11_code_to_text(r));
return KRB5KDC_ERR_PREAUTH_FAILED;
}
/* Get the list of available slots */
if (cctx->p11->C_GetSlotList(TRUE, NULL, &count) != CKR_OK)
return KRB5KDC_ERR_PREAUTH_FAILED;
if (count == 0)
return KRB5KDC_ERR_PREAUTH_FAILED;
slotlist = calloc(count, sizeof(CK_SLOT_ID));
if (slotlist == NULL)
return ENOMEM;
if (cctx->p11->C_GetSlotList(TRUE, slotlist, &count) != CKR_OK)
return KRB5KDC_ERR_PREAUTH_FAILED;
/* Look for the given token label, or if none given take the first one */
for (i = 0; i < count; i++) {
/* Skip slots that don't match the specified slotid, if given. */
if (cctx->slotid != PK_NOSLOT && cctx->slotid != slotlist[i])
continue;
/* Open session */
if ((r = cctx->p11->C_OpenSession(slotlist[i], CKF_SERIAL_SESSION,
NULL, NULL, &cctx->session)) != CKR_OK) {
pkiDebug("C_OpenSession: %s\n", pkinit_pkcs11_code_to_text(r));
return KRB5KDC_ERR_PREAUTH_FAILED;
}
/* Get token info */
if ((r = cctx->p11->C_GetTokenInfo(slotlist[i], &tinfo)) != CKR_OK) {
pkiDebug("C_GetTokenInfo: %s\n", pkinit_pkcs11_code_to_text(r));
return KRB5KDC_ERR_PREAUTH_FAILED;
}
/* tinfo.label is zero-filled but not necessarily zero-terminated.
* Find the length, ignoring any trailing spaces. */
for (cp = tinfo.label + sizeof(tinfo.label); cp > tinfo.label; cp--) {
if (cp[-1] != '\0' && cp[-1] != ' ')
break;
}
label_len = cp - tinfo.label;
pkiDebug("open_session: slotid %d token \"%.*s\"\n",
(int)slotlist[i], (int)label_len, tinfo.label);
if (cctx->token_label == NULL ||
(strlen(cctx->token_label) == label_len &&
memcmp(cctx->token_label, tinfo.label, label_len) == 0))
break;
cctx->p11->C_CloseSession(cctx->session);
}
if (i >= count) {
free(slotlist);
pkiDebug("open_session: no matching token found\n");
return KRB5KDC_ERR_PREAUTH_FAILED;
}
cctx->slotid = slotlist[i];
free(slotlist);
pkiDebug("open_session: slotid %d (%lu of %d)\n", (int)cctx->slotid,
i + 1, (int) count);
/* Login if needed */
if (tinfo.flags & CKF_LOGIN_REQUIRED) {
if (cctx->p11_module_name != NULL) {
if (cctx->slotid != PK_NOSLOT) {
if (asprintf(&p11name,
"PKCS11:module_name=%s:slotid=%ld:token=%.*s",
cctx->p11_module_name, (long)cctx->slotid,
(int)label_len, tinfo.label) < 0)
p11name = NULL;
} else {
if (asprintf(&p11name,
"PKCS11:module_name=%s,token=%.*s",
cctx->p11_module_name,
(int)label_len, tinfo.label) < 0)
p11name = NULL;
}
} else {
p11name = NULL;
}
if (cctx->defer_id_prompt) {
/* Supply the identity name to be passed to the responder. */
pkinit_set_deferred_id(&cctx->deferred_ids,
p11name, tinfo.flags, NULL);
free(p11name);
return KRB5KRB_ERR_GENERIC;
}
/* Look up a responder-supplied password for the token. */
password = pkinit_find_deferred_id(cctx->deferred_ids, p11name);
free(p11name);
r = pkinit_login(context, cctx, &tinfo, password);
}
return r;
}
/*
* Look for a key that's:
* 1. private
* 2. capable of the specified operation (usually signing or decrypting)
* 3. RSA (this may be wrong but it's all we can do for now)
* 4. matches the id of the cert we chose
*
* You must call pkinit_get_certs before calling pkinit_find_private_key
* (that's because we need the ID of the private key)
*
* pkcs11 says the id of the key doesn't have to match that of the cert, but
* I can't figure out any other way to decide which key to use.
*
* We should only find one key that fits all the requirements.
* If there are more than one, we just take the first one.
*/
krb5_error_code
pkinit_find_private_key(pkinit_identity_crypto_context id_cryptoctx,
CK_ATTRIBUTE_TYPE usage,
CK_OBJECT_HANDLE *objp)
{
CK_OBJECT_CLASS cls;
CK_ATTRIBUTE attrs[4];
CK_ULONG count;
CK_KEY_TYPE keytype;
unsigned int nattrs = 0;
int r;
#ifdef PKINIT_USE_KEY_USAGE
CK_BBOOL true_false;
#endif
cls = CKO_PRIVATE_KEY;
attrs[nattrs].type = CKA_CLASS;
attrs[nattrs].pValue = &cls;
attrs[nattrs].ulValueLen = sizeof cls;
nattrs++;
#ifdef PKINIT_USE_KEY_USAGE
/*
* Some cards get confused if you try to specify a key usage,
* so don't, and hope for the best. This will fail if you have
* several keys with the same id and different usages but I have
* not seen this on real cards.
*/
true_false = TRUE;
attrs[nattrs].type = usage;
attrs[nattrs].pValue = &true_false;
attrs[nattrs].ulValueLen = sizeof true_false;
nattrs++;
#endif
keytype = CKK_RSA;
attrs[nattrs].type = CKA_KEY_TYPE;
attrs[nattrs].pValue = &keytype;
attrs[nattrs].ulValueLen = sizeof keytype;
nattrs++;
attrs[nattrs].type = CKA_ID;
attrs[nattrs].pValue = id_cryptoctx->cert_id;
attrs[nattrs].ulValueLen = id_cryptoctx->cert_id_len;
nattrs++;
r = id_cryptoctx->p11->C_FindObjectsInit(id_cryptoctx->session, attrs, nattrs);
if (r != CKR_OK) {
pkiDebug("krb5_pkinit_sign_data: C_FindObjectsInit: %s\n",
pkinit_pkcs11_code_to_text(r));
return KRB5KDC_ERR_PREAUTH_FAILED;
}
r = id_cryptoctx->p11->C_FindObjects(id_cryptoctx->session, objp, 1, &count);
id_cryptoctx->p11->C_FindObjectsFinal(id_cryptoctx->session);
pkiDebug("found %d private keys (%s)\n", (int) count, pkinit_pkcs11_code_to_text(r));
if (r != CKR_OK || count < 1)
return KRB5KDC_ERR_PREAUTH_FAILED;
return 0;
}
#endif
static krb5_error_code
pkinit_decode_data_fs(krb5_context context,
pkinit_identity_crypto_context id_cryptoctx,
unsigned char *data,
unsigned int data_len,
unsigned char **decoded_data,
unsigned int *decoded_data_len)
{
if (decode_data(decoded_data, decoded_data_len, data, data_len,
id_cryptoctx->my_key, sk_X509_value(id_cryptoctx->my_certs,
id_cryptoctx->cert_index)) <= 0) {
pkiDebug("failed to decode data\n");
return KRB5KDC_ERR_PREAUTH_FAILED;
}
return 0;
}
#ifndef WITHOUT_PKCS11
/*
* When using the ActivCard Linux pkcs11 library (v2.0.1), the decrypt function
* fails. By inserting an extra function call, which serves nothing but to
* change the stack, we were able to work around the issue. If the ActivCard
* library is fixed in the future, this function can be inlined back into the
* caller.
*/
static CK_RV
pkinit_C_Decrypt(pkinit_identity_crypto_context id_cryptoctx,
CK_BYTE_PTR pEncryptedData,
CK_ULONG ulEncryptedDataLen,
CK_BYTE_PTR pData,
CK_ULONG_PTR pulDataLen)
{
CK_RV rv = CKR_OK;
rv = id_cryptoctx->p11->C_Decrypt(id_cryptoctx->session, pEncryptedData,
ulEncryptedDataLen, pData, pulDataLen);
if (rv == CKR_OK) {
pkiDebug("pData %p *pulDataLen %d\n", (void *) pData,
(int) *pulDataLen);
}
return rv;
}
static krb5_error_code
pkinit_decode_data_pkcs11(krb5_context context,
pkinit_identity_crypto_context id_cryptoctx,
unsigned char *data,
unsigned int data_len,
unsigned char **decoded_data,
unsigned int *decoded_data_len)
{
CK_OBJECT_HANDLE obj;
CK_ULONG len;
CK_MECHANISM mech;
unsigned char *cp;
int r;
if (pkinit_open_session(context, id_cryptoctx)) {
pkiDebug("can't open pkcs11 session\n");
return KRB5KDC_ERR_PREAUTH_FAILED;
}
pkinit_find_private_key(id_cryptoctx, CKA_DECRYPT, &obj);
mech.mechanism = CKM_RSA_PKCS;
mech.pParameter = NULL;
mech.ulParameterLen = 0;
if ((r = id_cryptoctx->p11->C_DecryptInit(id_cryptoctx->session, &mech,
obj)) != CKR_OK) {
pkiDebug("C_DecryptInit: 0x%x\n", (int) r);
return KRB5KDC_ERR_PREAUTH_FAILED;
}
pkiDebug("data_len = %d\n", data_len);
cp = malloc((size_t) data_len);
if (cp == NULL)
return ENOMEM;
len = data_len;
pkiDebug("session %p edata %p edata_len %d data %p datalen @%p %d\n",
(void *) id_cryptoctx->session, (void *) data, (int) data_len,
(void *) cp, (void *) &len, (int) len);
if ((r = pkinit_C_Decrypt(id_cryptoctx, data, (CK_ULONG) data_len,
cp, &len)) != CKR_OK) {
pkiDebug("C_Decrypt: %s\n", pkinit_pkcs11_code_to_text(r));
if (r == CKR_BUFFER_TOO_SMALL)
pkiDebug("decrypt %d needs %d\n", (int) data_len, (int) len);
return KRB5KDC_ERR_PREAUTH_FAILED;
}
pkiDebug("decrypt %d -> %d\n", (int) data_len, (int) len);
*decoded_data_len = len;
*decoded_data = cp;
return 0;
}
#endif
krb5_error_code
pkinit_decode_data(krb5_context context,
pkinit_identity_crypto_context id_cryptoctx,
unsigned char *data,
unsigned int data_len,
unsigned char **decoded_data,
unsigned int *decoded_data_len)
{
krb5_error_code retval = KRB5KDC_ERR_PREAUTH_FAILED;
if (id_cryptoctx->pkcs11_method != 1)
retval = pkinit_decode_data_fs(context, id_cryptoctx, data, data_len,
decoded_data, decoded_data_len);
#ifndef WITHOUT_PKCS11
else
retval = pkinit_decode_data_pkcs11(context, id_cryptoctx, data,
data_len, decoded_data, decoded_data_len);
#endif
return retval;
}
static krb5_error_code
pkinit_sign_data_fs(krb5_context context,
pkinit_identity_crypto_context id_cryptoctx,
unsigned char *data,
unsigned int data_len,
unsigned char **sig,
unsigned int *sig_len)
{
if (create_signature(sig, sig_len, data, data_len,
id_cryptoctx->my_key) != 0) {
pkiDebug("failed to create the signature\n");
return KRB5KDC_ERR_PREAUTH_FAILED;
}
return 0;
}
#ifndef WITHOUT_PKCS11
static krb5_error_code
pkinit_sign_data_pkcs11(krb5_context context,
pkinit_identity_crypto_context id_cryptoctx,
unsigned char *data,
unsigned int data_len,
unsigned char **sig,
unsigned int *sig_len)
{
CK_OBJECT_HANDLE obj;
CK_ULONG len;
CK_MECHANISM mech;
unsigned char *cp;
int r;
if (pkinit_open_session(context, id_cryptoctx)) {
pkiDebug("can't open pkcs11 session\n");
return KRB5KDC_ERR_PREAUTH_FAILED;
}
pkinit_find_private_key(id_cryptoctx, CKA_SIGN, &obj);
mech.mechanism = id_cryptoctx->mech;
mech.pParameter = NULL;
mech.ulParameterLen = 0;
if ((r = id_cryptoctx->p11->C_SignInit(id_cryptoctx->session, &mech,
obj)) != CKR_OK) {
pkiDebug("C_SignInit: %s\n", pkinit_pkcs11_code_to_text(r));
return KRB5KDC_ERR_PREAUTH_FAILED;
}
/*
* Key len would give an upper bound on sig size, but there's no way to
* get that. So guess, and if it's too small, re-malloc.
*/
len = PK_SIGLEN_GUESS;
cp = malloc((size_t) len);
if (cp == NULL)
return ENOMEM;
r = id_cryptoctx->p11->C_Sign(id_cryptoctx->session, data,
(CK_ULONG) data_len, cp, &len);
if (r == CKR_BUFFER_TOO_SMALL || (r == CKR_OK && len >= PK_SIGLEN_GUESS)) {
free(cp);
pkiDebug("C_Sign realloc %d\n", (int) len);
cp = malloc((size_t) len);
r = id_cryptoctx->p11->C_Sign(id_cryptoctx->session, data,
(CK_ULONG) data_len, cp, &len);
}
if (r != CKR_OK) {
pkiDebug("C_Sign: %s\n", pkinit_pkcs11_code_to_text(r));
return KRB5KDC_ERR_PREAUTH_FAILED;
}
pkiDebug("sign %d -> %d\n", (int) data_len, (int) len);
*sig_len = len;
*sig = cp;
return 0;
}
#endif
krb5_error_code
pkinit_sign_data(krb5_context context,
pkinit_identity_crypto_context id_cryptoctx,
unsigned char *data,
unsigned int data_len,
unsigned char **sig,
unsigned int *sig_len)
{
krb5_error_code retval = KRB5KDC_ERR_PREAUTH_FAILED;
if (id_cryptoctx == NULL || id_cryptoctx->pkcs11_method != 1)
retval = pkinit_sign_data_fs(context, id_cryptoctx, data, data_len,
sig, sig_len);
#ifndef WITHOUT_PKCS11
else
retval = pkinit_sign_data_pkcs11(context, id_cryptoctx, data, data_len,
sig, sig_len);
#endif
return retval;
}
static krb5_error_code
decode_data(unsigned char **out_data, unsigned int *out_data_len,
unsigned char *data, unsigned int data_len,
EVP_PKEY *pkey, X509 *cert)
{
krb5_error_code retval = ENOMEM;
unsigned char *buf = NULL;
int buf_len = 0;
if (cert && !X509_check_private_key(cert, pkey)) {
pkiDebug("private key does not match certificate\n");
goto cleanup;
}
buf_len = EVP_PKEY_size(pkey);
buf = malloc((size_t) buf_len + 10);
if (buf == NULL)
goto cleanup;
#if OPENSSL_VERSION_NUMBER >= 0x00909000L
retval = EVP_PKEY_decrypt_old(buf, data, (int)data_len, pkey);
#else
retval = EVP_PKEY_decrypt(buf, data, (int)data_len, pkey);
#endif
if (retval <= 0) {
pkiDebug("unable to decrypt received data (len=%d)\n", data_len);
goto cleanup;
}
*out_data = buf;
*out_data_len = retval;
cleanup:
if (retval == ENOMEM)
free(buf);
return retval;
}
static krb5_error_code
create_signature(unsigned char **sig, unsigned int *sig_len,
unsigned char *data, unsigned int data_len, EVP_PKEY *pkey)
{
krb5_error_code retval = ENOMEM;
EVP_MD_CTX md_ctx;
if (pkey == NULL)
return retval;
EVP_VerifyInit(&md_ctx, EVP_sha1());
EVP_SignUpdate(&md_ctx, data, data_len);
*sig_len = EVP_PKEY_size(pkey);
if ((*sig = malloc(*sig_len)) == NULL)
goto cleanup;
EVP_SignFinal(&md_ctx, *sig, sig_len, pkey);
retval = 0;
cleanup:
EVP_MD_CTX_cleanup(&md_ctx);
return retval;
}
/*
* Note:
* This is not the routine the KDC uses to get its certificate.
* This routine is intended to be called by the client
* to obtain the KDC's certificate from some local storage
* to be sent as a hint in its request to the KDC.
*/
krb5_error_code
pkinit_get_kdc_cert(krb5_context context,
pkinit_plg_crypto_context plg_cryptoctx,
pkinit_req_crypto_context req_cryptoctx,
pkinit_identity_crypto_context id_cryptoctx,
krb5_principal princ)
{
krb5_error_code retval = KRB5KDC_ERR_PREAUTH_FAILED;
req_cryptoctx->received_cert = NULL;
retval = 0;
return retval;
}
static char *
reassemble_pkcs12_name(const char *filename)
{
char *ret;
if (asprintf(&ret, "PKCS12:%s", filename) < 0)
return NULL;
return ret;
}
static krb5_error_code
pkinit_get_certs_pkcs12(krb5_context context,
pkinit_plg_crypto_context plg_cryptoctx,
pkinit_req_crypto_context req_cryptoctx,
pkinit_identity_opts *idopts,
pkinit_identity_crypto_context id_cryptoctx,
krb5_principal princ)
{
krb5_error_code retval = KRB5KDC_ERR_PREAUTH_FAILED;
char *prompt_string = NULL;
X509 *x = NULL;
PKCS12 *p12 = NULL;
int ret;
FILE *fp;
EVP_PKEY *y = NULL;
if (idopts->cert_filename == NULL) {
pkiDebug("%s: failed to get user's cert location\n", __FUNCTION__);
goto cleanup;
}
if (idopts->key_filename == NULL) {
pkiDebug("%s: failed to get user's private key location\n", __FUNCTION__);
goto cleanup;
}
fp = fopen(idopts->cert_filename, "rb");
if (fp == NULL) {
pkiDebug("Failed to open PKCS12 file '%s', error %d\n",
idopts->cert_filename, errno);
goto cleanup;
}
set_cloexec_file(fp);
p12 = d2i_PKCS12_fp(fp, NULL);
fclose(fp);
if (p12 == NULL) {
pkiDebug("Failed to decode PKCS12 file '%s' contents\n",
idopts->cert_filename);
goto cleanup;
}
/*
* Try parsing with no pass phrase first. If that fails,
* prompt for the pass phrase and try again.
*/
ret = PKCS12_parse(p12, NULL, &y, &x, NULL);
if (ret == 0) {
krb5_data rdat;
krb5_prompt kprompt;
krb5_prompt_type prompt_type;
krb5_error_code r;
char prompt_reply[128];
char *prompt_prefix = _("Pass phrase for");
char *p12name = reassemble_pkcs12_name(idopts->cert_filename);
const char *tmp;
pkiDebug("Initial PKCS12_parse with no password failed\n");
if (id_cryptoctx->defer_id_prompt) {
/* Supply the identity name to be passed to the responder. */
pkinit_set_deferred_id(&id_cryptoctx->deferred_ids, p12name, 0,
NULL);
free(p12name);
retval = 0;
goto cleanup;
}
/* Try to read a responder-supplied password. */
tmp = pkinit_find_deferred_id(id_cryptoctx->deferred_ids, p12name);
free(p12name);
if (tmp != NULL) {
/* Try using the responder-supplied password. */
rdat.data = (char *)tmp;
rdat.length = strlen(tmp);
} else if (id_cryptoctx->prompter == NULL) {
/* We can't use a prompter. */
goto cleanup;
} else {
/* Ask using a prompter. */
memset(prompt_reply, '\0', sizeof(prompt_reply));
rdat.data = prompt_reply;
rdat.length = sizeof(prompt_reply);
if (asprintf(&prompt_string, "%s %s", prompt_prefix,
idopts->cert_filename) < 0) {
prompt_string = NULL;
goto cleanup;
}
kprompt.prompt = prompt_string;
kprompt.hidden = 1;
kprompt.reply = &rdat;
prompt_type = KRB5_PROMPT_TYPE_PREAUTH;
/* PROMPTER_INVOCATION */
k5int_set_prompt_types(context, &prompt_type);
r = (*id_cryptoctx->prompter)(context, id_cryptoctx->prompter_data,
NULL, NULL, 1, &kprompt);
k5int_set_prompt_types(context, 0);
if (r) {
pkiDebug("Failed to prompt for PKCS12 password");
goto cleanup;
}
}
ret = PKCS12_parse(p12, rdat.data, &y, &x, NULL);
if (ret == 0) {
pkiDebug("Second PKCS12_parse with password failed\n");
goto cleanup;
}
}
id_cryptoctx->creds[0] = malloc(sizeof(struct _pkinit_cred_info));
if (id_cryptoctx->creds[0] == NULL)
goto cleanup;
id_cryptoctx->creds[0]->name =
reassemble_pkcs12_name(idopts->cert_filename);
id_cryptoctx->creds[0]->cert = x;
#ifndef WITHOUT_PKCS11
id_cryptoctx->creds[0]->cert_id = NULL;
id_cryptoctx->creds[0]->cert_id_len = 0;
#endif
id_cryptoctx->creds[0]->key = y;
id_cryptoctx->creds[1] = NULL;
retval = 0;
cleanup:
free(prompt_string);
if (p12)
PKCS12_free(p12);
if (retval) {
if (x != NULL)
X509_free(x);
if (y != NULL)
EVP_PKEY_free(y);
}
return retval;
}
static char *
reassemble_files_name(const char *certfile, const char *keyfile)
{
char *ret;
if (keyfile != NULL) {
if (asprintf(&ret, "FILE:%s,%s", certfile, keyfile) < 0)
return NULL;
} else {
if (asprintf(&ret, "FILE:%s", certfile) < 0)
return NULL;
}
return ret;
}
static krb5_error_code
pkinit_load_fs_cert_and_key(krb5_context context,
pkinit_identity_crypto_context id_cryptoctx,
char *certname,
char *keyname,
int cindex)
{
krb5_error_code retval;
X509 *x = NULL;
EVP_PKEY *y = NULL;
char *fsname = NULL;
const char *password;
fsname = reassemble_files_name(certname, keyname);
/* Try to read a responder-supplied password. */
password = pkinit_find_deferred_id(id_cryptoctx->deferred_ids, fsname);
/* Load the certificate. */
retval = get_cert(certname, &x);
if (retval != 0 || x == NULL) {
pkiDebug("failed to load user's certificate from '%s'\n", certname);
goto cleanup;
}
/* Load the key. */
retval = get_key(context, id_cryptoctx, keyname, fsname, &y, password);
if (retval != 0 || y == NULL) {
pkiDebug("failed to load user's private key from '%s'\n", keyname);
goto cleanup;
}
id_cryptoctx->creds[cindex] = malloc(sizeof(struct _pkinit_cred_info));
if (id_cryptoctx->creds[cindex] == NULL) {
retval = ENOMEM;
goto cleanup;
}
id_cryptoctx->creds[cindex]->name = reassemble_files_name(certname,
keyname);
id_cryptoctx->creds[cindex]->cert = x;
#ifndef WITHOUT_PKCS11
id_cryptoctx->creds[cindex]->cert_id = NULL;
id_cryptoctx->creds[cindex]->cert_id_len = 0;
#endif
id_cryptoctx->creds[cindex]->key = y;
id_cryptoctx->creds[cindex+1] = NULL;
retval = 0;
cleanup:
free(fsname);
if (retval != 0 || y == NULL) {
if (x != NULL)
X509_free(x);
if (y != NULL)
EVP_PKEY_free(y);
}
return retval;
}
static krb5_error_code
pkinit_get_certs_fs(krb5_context context,
pkinit_plg_crypto_context plg_cryptoctx,
pkinit_req_crypto_context req_cryptoctx,
pkinit_identity_opts *idopts,
pkinit_identity_crypto_context id_cryptoctx,
krb5_principal princ)
{
krb5_error_code retval = KRB5KDC_ERR_PREAUTH_FAILED;
if (idopts->cert_filename == NULL) {
pkiDebug("%s: failed to get user's cert location\n", __FUNCTION__);
goto cleanup;
}
if (idopts->key_filename == NULL) {
pkiDebug("%s: failed to get user's private key location\n",
__FUNCTION__);
goto cleanup;
}
retval = pkinit_load_fs_cert_and_key(context, id_cryptoctx,
idopts->cert_filename,
idopts->key_filename, 0);
cleanup:
return retval;
}
static krb5_error_code
pkinit_get_certs_dir(krb5_context context,
pkinit_plg_crypto_context plg_cryptoctx,
pkinit_req_crypto_context req_cryptoctx,
pkinit_identity_opts *idopts,
pkinit_identity_crypto_context id_cryptoctx,
krb5_principal princ)
{
krb5_error_code retval = ENOMEM;
DIR *d = NULL;
struct dirent *dentry = NULL;
char certname[1024];
char keyname[1024];
int i = 0, len;
char *dirname, *suf;
if (idopts->cert_filename == NULL) {
pkiDebug("%s: failed to get user's certificate directory location\n",
__FUNCTION__);
return ENOENT;
}
dirname = idopts->cert_filename;
d = opendir(dirname);
if (d == NULL)
return errno;
/*
* We'll assume that certs are named XXX.crt and the corresponding
* key is named XXX.key
*/
while ((i < MAX_CREDS_ALLOWED) && (dentry = readdir(d)) != NULL) {
/* Ignore subdirectories and anything starting with a dot */
#ifdef DT_DIR
if (dentry->d_type == DT_DIR)
continue;
#endif
if (dentry->d_name[0] == '.')
continue;
len = strlen(dentry->d_name);
if (len < 5)
continue;
suf = dentry->d_name + (len - 4);
if (strncmp(suf, ".crt", 4) != 0)
continue;
/* Checked length */
if (strlen(dirname) + strlen(dentry->d_name) + 2 > sizeof(certname)) {
pkiDebug("%s: Path too long -- directory '%s' and file '%s'\n",
__FUNCTION__, dirname, dentry->d_name);
continue;
}
snprintf(certname, sizeof(certname), "%s/%s", dirname, dentry->d_name);
snprintf(keyname, sizeof(keyname), "%s/%s", dirname, dentry->d_name);
len = strlen(keyname);
keyname[len - 3] = 'k';
keyname[len - 2] = 'e';
keyname[len - 1] = 'y';
retval = pkinit_load_fs_cert_and_key(context, id_cryptoctx,
certname, keyname, i);
if (retval == 0) {
pkiDebug("%s: Successfully loaded cert (and key) for %s\n",
__FUNCTION__, dentry->d_name);
i++;
}
else
continue;
}
if (!id_cryptoctx->defer_id_prompt && i == 0) {
pkiDebug("%s: No cert/key pairs found in directory '%s'\n",
__FUNCTION__, idopts->cert_filename);
retval = ENOENT;
goto cleanup;
}
retval = 0;
cleanup:
if (d)
closedir(d);
return retval;
}
#ifndef WITHOUT_PKCS11
static char *
reassemble_pkcs11_name(pkinit_identity_opts *idopts)
{
struct k5buf buf;
int n = 0;
char *ret;
k5_buf_init_dynamic(&buf);
k5_buf_add(&buf, "PKCS11:");
n = 0;
if (idopts->p11_module_name != NULL) {
k5_buf_add_fmt(&buf, "%smodule_name=%s", n++ ? ":" : "",
idopts->p11_module_name);
}
if (idopts->token_label != NULL) {
k5_buf_add_fmt(&buf, "%stoken=%s", n++ ? ":" : "",
idopts->token_label);
}
if (idopts->cert_label != NULL) {
k5_buf_add_fmt(&buf, "%scertlabel=%s", n++ ? ":" : "",
idopts->cert_label);
}
if (idopts->cert_id_string != NULL) {
k5_buf_add_fmt(&buf, "%scertid=%s", n++ ? ":" : "",
idopts->cert_id_string);
}
if (idopts->slotid != PK_NOSLOT) {
k5_buf_add_fmt(&buf, "%sslotid=%ld", n++ ? ":" : "",
(long)idopts->slotid);
}
if (k5_buf_status(&buf) == 0)
ret = strdup(buf.data);
else
ret = NULL;
k5_buf_free(&buf);
return ret;
}
static krb5_error_code
pkinit_get_certs_pkcs11(krb5_context context,
pkinit_plg_crypto_context plg_cryptoctx,
pkinit_req_crypto_context req_cryptoctx,
pkinit_identity_opts *idopts,
pkinit_identity_crypto_context id_cryptoctx,
krb5_principal princ)
{
#ifdef PKINIT_USE_MECH_LIST
CK_MECHANISM_TYPE_PTR mechp;
CK_MECHANISM_INFO info;
#endif
CK_OBJECT_CLASS cls;
CK_OBJECT_HANDLE obj;
CK_ATTRIBUTE attrs[4];
CK_ULONG count;
CK_CERTIFICATE_TYPE certtype;
CK_BYTE_PTR cert = NULL, cert_id;
const unsigned char *cp;
int i, r;
unsigned int nattrs;
X509 *x = NULL;
/* Copy stuff from idopts -> id_cryptoctx */
if (idopts->p11_module_name != NULL) {
free(id_cryptoctx->p11_module_name);
id_cryptoctx->p11_module_name = strdup(idopts->p11_module_name);
if (id_cryptoctx->p11_module_name == NULL)
return ENOMEM;
}
if (idopts->token_label != NULL) {
id_cryptoctx->token_label = strdup(idopts->token_label);
if (id_cryptoctx->token_label == NULL)
return ENOMEM;
}
if (idopts->cert_label != NULL) {
id_cryptoctx->cert_label = strdup(idopts->cert_label);
if (id_cryptoctx->cert_label == NULL)
return ENOMEM;
}
/* Convert the ascii cert_id string into a binary blob */
if (idopts->cert_id_string != NULL) {
BIGNUM *bn = NULL;
BN_hex2bn(&bn, idopts->cert_id_string);
if (bn == NULL)
return ENOMEM;
id_cryptoctx->cert_id_len = BN_num_bytes(bn);
id_cryptoctx->cert_id = malloc((size_t) id_cryptoctx->cert_id_len);
if (id_cryptoctx->cert_id == NULL) {
BN_free(bn);
return ENOMEM;
}
BN_bn2bin(bn, id_cryptoctx->cert_id);
BN_free(bn);
}
id_cryptoctx->slotid = idopts->slotid;
id_cryptoctx->pkcs11_method = 1;
if (pkinit_open_session(context, id_cryptoctx)) {
pkiDebug("can't open pkcs11 session\n");
if (!id_cryptoctx->defer_id_prompt)
return KRB5KDC_ERR_PREAUTH_FAILED;
}
if (id_cryptoctx->defer_id_prompt) {
/*
* We need to reset all of the PKCS#11 state, so that the next time we
* poke at it, it'll be in as close to the state it was in after we
* loaded it the first time as we can make it.
*/
pkinit_fini_pkcs11(id_cryptoctx);
pkinit_init_pkcs11(id_cryptoctx);
return 0;
}
#ifndef PKINIT_USE_MECH_LIST
/*
* We'd like to use CKM_SHA1_RSA_PKCS for signing if it's available, but
* many cards seems to be confused about whether they are capable of
* this or not. The safe thing seems to be to ignore the mechanism list,
* always use CKM_RSA_PKCS and calculate the sha1 digest ourselves.
*/
id_cryptoctx->mech = CKM_RSA_PKCS;
#else
if ((r = id_cryptoctx->p11->C_GetMechanismList(id_cryptoctx->slotid, NULL,
&count)) != CKR_OK || count <= 0) {
pkiDebug("C_GetMechanismList: %s\n", pkinit_pkcs11_code_to_text(r));
return KRB5KDC_ERR_PREAUTH_FAILED;
}
mechp = malloc(count * sizeof (CK_MECHANISM_TYPE));
if (mechp == NULL)
return ENOMEM;
if ((r = id_cryptoctx->p11->C_GetMechanismList(id_cryptoctx->slotid,
mechp, &count)) != CKR_OK)
return KRB5KDC_ERR_PREAUTH_FAILED;
for (i = 0; i < count; i++) {
if ((r = id_cryptoctx->p11->C_GetMechanismInfo(id_cryptoctx->slotid,
mechp[i], &info)) != CKR_OK)
return KRB5KDC_ERR_PREAUTH_FAILED;
#ifdef DEBUG_MECHINFO
pkiDebug("mech %x flags %x\n", (int) mechp[i], (int) info.flags);
if ((info.flags & (CKF_SIGN|CKF_DECRYPT)) == (CKF_SIGN|CKF_DECRYPT))
pkiDebug(" this mech is good for sign & decrypt\n");
#endif
if (mechp[i] == CKM_RSA_PKCS) {
/* This seems backwards... */
id_cryptoctx->mech =
(info.flags & CKF_SIGN) ? CKM_SHA1_RSA_PKCS : CKM_RSA_PKCS;
}
}
free(mechp);
pkiDebug("got %d mechs from card\n", (int) count);
#endif
cls = CKO_CERTIFICATE;
attrs[0].type = CKA_CLASS;
attrs[0].pValue = &cls;
attrs[0].ulValueLen = sizeof cls;
certtype = CKC_X_509;
attrs[1].type = CKA_CERTIFICATE_TYPE;
attrs[1].pValue = &certtype;
attrs[1].ulValueLen = sizeof certtype;
nattrs = 2;
/* If a cert id and/or label were given, use them too */
if (id_cryptoctx->cert_id_len > 0) {
attrs[nattrs].type = CKA_ID;
attrs[nattrs].pValue = id_cryptoctx->cert_id;
attrs[nattrs].ulValueLen = id_cryptoctx->cert_id_len;
nattrs++;
}
if (id_cryptoctx->cert_label != NULL) {
attrs[nattrs].type = CKA_LABEL;
attrs[nattrs].pValue = id_cryptoctx->cert_label;
attrs[nattrs].ulValueLen = strlen(id_cryptoctx->cert_label);
nattrs++;
}
r = id_cryptoctx->p11->C_FindObjectsInit(id_cryptoctx->session, attrs, nattrs);
if (r != CKR_OK) {
pkiDebug("C_FindObjectsInit: %s\n", pkinit_pkcs11_code_to_text(r));
return KRB5KDC_ERR_PREAUTH_FAILED;
}
for (i = 0; ; i++) {
if (i >= MAX_CREDS_ALLOWED)
return KRB5KDC_ERR_PREAUTH_FAILED;
/* Look for x.509 cert */
if ((r = id_cryptoctx->p11->C_FindObjects(id_cryptoctx->session,
&obj, 1, &count)) != CKR_OK || count <= 0) {
id_cryptoctx->creds[i] = NULL;
break;
}
/* Get cert and id len */
attrs[0].type = CKA_VALUE;
attrs[0].pValue = NULL;
attrs[0].ulValueLen = 0;
attrs[1].type = CKA_ID;
attrs[1].pValue = NULL;
attrs[1].ulValueLen = 0;
if ((r = id_cryptoctx->p11->C_GetAttributeValue(id_cryptoctx->session,
obj, attrs, 2)) != CKR_OK && r != CKR_BUFFER_TOO_SMALL) {
pkiDebug("C_GetAttributeValue: %s\n", pkinit_pkcs11_code_to_text(r));
return KRB5KDC_ERR_PREAUTH_FAILED;
}
cert = (CK_BYTE_PTR) malloc((size_t) attrs[0].ulValueLen + 1);
cert_id = (CK_BYTE_PTR) malloc((size_t) attrs[1].ulValueLen + 1);
if (cert == NULL || cert_id == NULL)
return ENOMEM;
/* Read the cert and id off the card */
attrs[0].type = CKA_VALUE;
attrs[0].pValue = cert;
attrs[1].type = CKA_ID;
attrs[1].pValue = cert_id;
if ((r = id_cryptoctx->p11->C_GetAttributeValue(id_cryptoctx->session,
obj, attrs, 2)) != CKR_OK) {
pkiDebug("C_GetAttributeValue: %s\n", pkinit_pkcs11_code_to_text(r));
return KRB5KDC_ERR_PREAUTH_FAILED;
}
pkiDebug("cert %d size %d id %d idlen %d\n", i,
(int) attrs[0].ulValueLen, (int) cert_id[0],
(int) attrs[1].ulValueLen);
cp = (unsigned char *) cert;
x = d2i_X509(NULL, &cp, (int) attrs[0].ulValueLen);
if (x == NULL)
return KRB5KDC_ERR_PREAUTH_FAILED;
id_cryptoctx->creds[i] = malloc(sizeof(struct _pkinit_cred_info));
if (id_cryptoctx->creds[i] == NULL)
return KRB5KDC_ERR_PREAUTH_FAILED;
id_cryptoctx->creds[i]->name = reassemble_pkcs11_name(idopts);
id_cryptoctx->creds[i]->cert = x;
id_cryptoctx->creds[i]->key = NULL;
id_cryptoctx->creds[i]->cert_id = cert_id;
id_cryptoctx->creds[i]->cert_id_len = attrs[1].ulValueLen;
free(cert);
}
id_cryptoctx->p11->C_FindObjectsFinal(id_cryptoctx->session);
if (cert == NULL)
return KRB5KDC_ERR_PREAUTH_FAILED;
return 0;
}
#endif
static void
free_cred_info(krb5_context context,
pkinit_identity_crypto_context id_cryptoctx,
struct _pkinit_cred_info *cred)
{
if (cred != NULL) {
if (cred->cert != NULL)
X509_free(cred->cert);
if (cred->key != NULL)
EVP_PKEY_free(cred->key);
#ifndef WITHOUT_PKCS11
free(cred->cert_id);
#endif
free(cred->name);
free(cred);
}
}
krb5_error_code
crypto_free_cert_info(krb5_context context,
pkinit_plg_crypto_context plg_cryptoctx,
pkinit_req_crypto_context req_cryptoctx,
pkinit_identity_crypto_context id_cryptoctx)
{
int i;
if (id_cryptoctx == NULL)
return EINVAL;
for (i = 0; i < MAX_CREDS_ALLOWED; i++) {
if (id_cryptoctx->creds[i] != NULL) {
free_cred_info(context, id_cryptoctx, id_cryptoctx->creds[i]);
id_cryptoctx->creds[i] = NULL;
}
}
return 0;
}
krb5_error_code
crypto_load_certs(krb5_context context,
pkinit_plg_crypto_context plg_cryptoctx,
pkinit_req_crypto_context req_cryptoctx,
pkinit_identity_opts *idopts,
pkinit_identity_crypto_context id_cryptoctx,
krb5_principal princ,
krb5_boolean defer_id_prompts)
{
krb5_error_code retval;
id_cryptoctx->defer_id_prompt = defer_id_prompts;
switch(idopts->idtype) {
case IDTYPE_FILE:
retval = pkinit_get_certs_fs(context, plg_cryptoctx,
req_cryptoctx, idopts,
id_cryptoctx, princ);
break;
case IDTYPE_DIR:
retval = pkinit_get_certs_dir(context, plg_cryptoctx,
req_cryptoctx, idopts,
id_cryptoctx, princ);
break;
#ifndef WITHOUT_PKCS11
case IDTYPE_PKCS11:
retval = pkinit_get_certs_pkcs11(context, plg_cryptoctx,
req_cryptoctx, idopts,
id_cryptoctx, princ);
break;
#endif
case IDTYPE_PKCS12:
retval = pkinit_get_certs_pkcs12(context, plg_cryptoctx,
req_cryptoctx, idopts,
id_cryptoctx, princ);
break;
default:
retval = EINVAL;
}
if (retval)
goto cleanup;
cleanup:
return retval;
}
/*
* Get number of certificates available after crypto_load_certs()
*/
krb5_error_code
crypto_cert_get_count(krb5_context context,
pkinit_plg_crypto_context plg_cryptoctx,
pkinit_req_crypto_context req_cryptoctx,
pkinit_identity_crypto_context id_cryptoctx,
int *cert_count)
{
int count;
if (id_cryptoctx == NULL || id_cryptoctx->creds[0] == NULL)
return EINVAL;
for (count = 0;
count <= MAX_CREDS_ALLOWED && id_cryptoctx->creds[count] != NULL;
count++);
*cert_count = count;
return 0;
}
/*
* Begin iteration over the certs loaded in crypto_load_certs()
*/
krb5_error_code
crypto_cert_iteration_begin(krb5_context context,
pkinit_plg_crypto_context plg_cryptoctx,
pkinit_req_crypto_context req_cryptoctx,
pkinit_identity_crypto_context id_cryptoctx,
pkinit_cert_iter_handle *ih_ret)
{
struct _pkinit_cert_iter_data *id;
if (id_cryptoctx == NULL || ih_ret == NULL)
return EINVAL;
if (id_cryptoctx->creds[0] == NULL) /* No cred info available */
return ENOENT;
id = calloc(1, sizeof(*id));
if (id == NULL)
return ENOMEM;
id->magic = ITER_MAGIC;
id->plgctx = plg_cryptoctx,
id->reqctx = req_cryptoctx,
id->idctx = id_cryptoctx;
id->index = 0;
*ih_ret = (pkinit_cert_iter_handle) id;
return 0;
}
/*
* End iteration over the certs loaded in crypto_load_certs()
*/
krb5_error_code
crypto_cert_iteration_end(krb5_context context,
pkinit_cert_iter_handle ih)
{
struct _pkinit_cert_iter_data *id = (struct _pkinit_cert_iter_data *)ih;
if (id == NULL || id->magic != ITER_MAGIC)
return EINVAL;
free(ih);
return 0;
}
/*
* Get next certificate handle
*/
krb5_error_code
crypto_cert_iteration_next(krb5_context context,
pkinit_cert_iter_handle ih,
pkinit_cert_handle *ch_ret)
{
struct _pkinit_cert_iter_data *id = (struct _pkinit_cert_iter_data *)ih;
struct _pkinit_cert_data *cd;
pkinit_identity_crypto_context id_cryptoctx;
if (id == NULL || id->magic != ITER_MAGIC)
return EINVAL;
if (ch_ret == NULL)
return EINVAL;
id_cryptoctx = id->idctx;
if (id_cryptoctx == NULL)
return EINVAL;
if (id_cryptoctx->creds[id->index] == NULL)
return PKINIT_ITER_NO_MORE;
cd = calloc(1, sizeof(*cd));
if (cd == NULL)
return ENOMEM;
cd->magic = CERT_MAGIC;
cd->plgctx = id->plgctx;
cd->reqctx = id->reqctx;
cd->idctx = id->idctx;
cd->index = id->index;
cd->cred = id_cryptoctx->creds[id->index++];
*ch_ret = (pkinit_cert_handle)cd;
return 0;
}
/*
* Release cert handle
*/
krb5_error_code
crypto_cert_release(krb5_context context,
pkinit_cert_handle ch)
{
struct _pkinit_cert_data *cd = (struct _pkinit_cert_data *)ch;
if (cd == NULL || cd->magic != CERT_MAGIC)
return EINVAL;
free(cd);
return 0;
}
/*
* Get certificate Key Usage and Extended Key Usage
*/
static krb5_error_code
crypto_retieve_X509_key_usage(krb5_context context,
pkinit_plg_crypto_context plgcctx,
pkinit_req_crypto_context reqcctx,
X509 *x,
unsigned int *ret_ku_bits,
unsigned int *ret_eku_bits)
{
krb5_error_code retval = 0;
int i;
unsigned int eku_bits = 0, ku_bits = 0;
ASN1_BIT_STRING *usage = NULL;
if (ret_ku_bits == NULL && ret_eku_bits == NULL)
return EINVAL;
if (ret_eku_bits)
*ret_eku_bits = 0;
else {
pkiDebug("%s: EKUs not requested, not checking\n", __FUNCTION__);
goto check_kus;
}
/* Start with Extended Key usage */
i = X509_get_ext_by_NID(x, NID_ext_key_usage, -1);
if (i >= 0) {
EXTENDED_KEY_USAGE *eku;
eku = X509_get_ext_d2i(x, NID_ext_key_usage, NULL, NULL);
if (eku) {
for (i = 0; i < sk_ASN1_OBJECT_num(eku); i++) {
ASN1_OBJECT *certoid;
certoid = sk_ASN1_OBJECT_value(eku, i);
if ((OBJ_cmp(certoid, plgcctx->id_pkinit_KPClientAuth)) == 0)
eku_bits |= PKINIT_EKU_PKINIT;
else if ((OBJ_cmp(certoid, OBJ_nid2obj(NID_ms_smartcard_login))) == 0)
eku_bits |= PKINIT_EKU_MSSCLOGIN;
else if ((OBJ_cmp(certoid, OBJ_nid2obj(NID_client_auth))) == 0)
eku_bits |= PKINIT_EKU_CLIENTAUTH;
else if ((OBJ_cmp(certoid, OBJ_nid2obj(NID_email_protect))) == 0)
eku_bits |= PKINIT_EKU_EMAILPROTECTION;
}
EXTENDED_KEY_USAGE_free(eku);
}
}
pkiDebug("%s: returning eku 0x%08x\n", __FUNCTION__, eku_bits);
*ret_eku_bits = eku_bits;
check_kus:
/* Now the Key Usage bits */
if (ret_ku_bits)
*ret_ku_bits = 0;
else {
pkiDebug("%s: KUs not requested, not checking\n", __FUNCTION__);
goto out;
}
/* Make sure usage exists before checking bits */
X509_check_ca(x);
usage = X509_get_ext_d2i(x, NID_key_usage, NULL, NULL);
if (usage) {
if (!ku_reject(x, X509v3_KU_DIGITAL_SIGNATURE))
ku_bits |= PKINIT_KU_DIGITALSIGNATURE;
if (!ku_reject(x, X509v3_KU_KEY_ENCIPHERMENT))
ku_bits |= PKINIT_KU_KEYENCIPHERMENT;
ASN1_BIT_STRING_free(usage);
}
pkiDebug("%s: returning ku 0x%08x\n", __FUNCTION__, ku_bits);
*ret_ku_bits = ku_bits;
retval = 0;
out:
return retval;
}
/*
* Return a string format of an X509_NAME in buf where
* size is an in/out parameter. On input it is the size
* of the buffer, and on output it is the actual length
* of the name.
* If buf is NULL, returns the length req'd to hold name
*/
static char *
X509_NAME_oneline_ex(X509_NAME * a,
char *buf,
unsigned int *size,
unsigned long flag)
{
BIO *out = NULL;
out = BIO_new(BIO_s_mem ());
if (X509_NAME_print_ex(out, a, 0, flag) > 0) {
if (buf != NULL && (*size) > (unsigned int) BIO_number_written(out)) {
memset(buf, 0, *size);
BIO_read(out, buf, (int) BIO_number_written(out));
}
else {
*size = BIO_number_written(out);
}
}
BIO_free(out);
return (buf);
}
/*
* Get certificate information
*/
krb5_error_code
crypto_cert_get_matching_data(krb5_context context,
pkinit_cert_handle ch,
pkinit_cert_matching_data **ret_md)
{
krb5_error_code retval;
pkinit_cert_matching_data *md;
krb5_principal *pkinit_sans =NULL, *upn_sans = NULL;
struct _pkinit_cert_data *cd = (struct _pkinit_cert_data *)ch;
unsigned int i, j;
char buf[DN_BUF_LEN];
unsigned int bufsize = sizeof(buf);
if (cd == NULL || cd->magic != CERT_MAGIC)
return EINVAL;
if (ret_md == NULL)
return EINVAL;
md = calloc(1, sizeof(*md));
if (md == NULL)
return ENOMEM;
md->ch = ch;
/* get the subject name (in rfc2253 format) */
X509_NAME_oneline_ex(X509_get_subject_name(cd->cred->cert),
buf, &bufsize, XN_FLAG_SEP_COMMA_PLUS);
md->subject_dn = strdup(buf);
if (md->subject_dn == NULL) {
retval = ENOMEM;
goto cleanup;
}
/* get the issuer name (in rfc2253 format) */
X509_NAME_oneline_ex(X509_get_issuer_name(cd->cred->cert),
buf, &bufsize, XN_FLAG_SEP_COMMA_PLUS);
md->issuer_dn = strdup(buf);
if (md->issuer_dn == NULL) {
retval = ENOMEM;
goto cleanup;
}
/* get the san data */
retval = crypto_retrieve_X509_sans(context, cd->plgctx, cd->reqctx,
cd->cred->cert, &pkinit_sans,
&upn_sans, NULL);
if (retval)
goto cleanup;
j = 0;
if (pkinit_sans != NULL) {
for (i = 0; pkinit_sans[i] != NULL; i++)
j++;
}
if (upn_sans != NULL) {
for (i = 0; upn_sans[i] != NULL; i++)
j++;
}
if (j != 0) {
md->sans = calloc((size_t)j+1, sizeof(*md->sans));
if (md->sans == NULL) {
retval = ENOMEM;
goto cleanup;
}
j = 0;
if (pkinit_sans != NULL) {
for (i = 0; pkinit_sans[i] != NULL; i++)
md->sans[j++] = pkinit_sans[i];
free(pkinit_sans);
}
if (upn_sans != NULL) {
for (i = 0; upn_sans[i] != NULL; i++)
md->sans[j++] = upn_sans[i];
free(upn_sans);
}
md->sans[j] = NULL;
} else
md->sans = NULL;
/* get the KU and EKU data */
retval = crypto_retieve_X509_key_usage(context, cd->plgctx, cd->reqctx,
cd->cred->cert,
&md->ku_bits, &md->eku_bits);
if (retval)
goto cleanup;
*ret_md = md;
retval = 0;
cleanup:
if (retval) {
if (md)
crypto_cert_free_matching_data(context, md);
}
return retval;
}
/*
* Free certificate information
*/
krb5_error_code
crypto_cert_free_matching_data(krb5_context context,
pkinit_cert_matching_data *md)
{
krb5_principal p;
int i;
if (md == NULL)
return EINVAL;
if (md->subject_dn)
free(md->subject_dn);
if (md->issuer_dn)
free(md->issuer_dn);
if (md->sans) {
for (i = 0, p = md->sans[i]; p != NULL; p = md->sans[++i])
krb5_free_principal(context, p);
free(md->sans);
}
free(md);
return 0;
}
/*
* Make this matching certificate "the chosen one"
*/
krb5_error_code
crypto_cert_select(krb5_context context,
pkinit_cert_matching_data *md)
{
struct _pkinit_cert_data *cd;
if (md == NULL)
return EINVAL;
cd = (struct _pkinit_cert_data *)md->ch;
if (cd == NULL || cd->magic != CERT_MAGIC)
return EINVAL;
/* copy the selected cert into our id_cryptoctx */
if (cd->idctx->my_certs != NULL) {
sk_X509_pop_free(cd->idctx->my_certs, X509_free);
}
cd->idctx->my_certs = sk_X509_new_null();
sk_X509_push(cd->idctx->my_certs, cd->cred->cert);
free(cd->idctx->identity);
/* hang on to the selected credential name */
if (cd->idctx->creds[cd->index]->name != NULL)
cd->idctx->identity = strdup(cd->idctx->creds[cd->index]->name);
else
cd->idctx->identity = NULL;
cd->idctx->creds[cd->index]->cert = NULL; /* Don't free it twice */
cd->idctx->cert_index = 0;
if (cd->idctx->pkcs11_method != 1) {
cd->idctx->my_key = cd->cred->key;
cd->idctx->creds[cd->index]->key = NULL; /* Don't free it twice */
}
#ifndef WITHOUT_PKCS11
else {
cd->idctx->cert_id = cd->cred->cert_id;
cd->idctx->creds[cd->index]->cert_id = NULL; /* Don't free it twice */
cd->idctx->cert_id_len = cd->cred->cert_id_len;
}
#endif
return 0;
}
/*
* Choose the default certificate as "the chosen one"
*/
krb5_error_code
crypto_cert_select_default(krb5_context context,
pkinit_plg_crypto_context plg_cryptoctx,
pkinit_req_crypto_context req_cryptoctx,
pkinit_identity_crypto_context id_cryptoctx)
{
krb5_error_code retval;
int cert_count = 0;
retval = crypto_cert_get_count(context, plg_cryptoctx, req_cryptoctx,
id_cryptoctx, &cert_count);
if (retval) {
pkiDebug("%s: crypto_cert_get_count error %d, %s\n",
__FUNCTION__, retval, error_message(retval));
goto errout;
}
if (cert_count != 1) {
pkiDebug("%s: ERROR: There are %d certs to choose from, "
"but there must be exactly one.\n",
__FUNCTION__, cert_count);
retval = EINVAL;
goto errout;
}
/* copy the selected cert into our id_cryptoctx */
if (id_cryptoctx->my_certs != NULL) {
sk_X509_pop_free(id_cryptoctx->my_certs, X509_free);
}
id_cryptoctx->my_certs = sk_X509_new_null();
sk_X509_push(id_cryptoctx->my_certs, id_cryptoctx->creds[0]->cert);
id_cryptoctx->creds[0]->cert = NULL; /* Don't free it twice */
id_cryptoctx->cert_index = 0;
/* hang on to the selected credential name */
if (id_cryptoctx->creds[0]->name != NULL)
id_cryptoctx->identity = strdup(id_cryptoctx->creds[0]->name);
else
id_cryptoctx->identity = NULL;
if (id_cryptoctx->pkcs11_method != 1) {
id_cryptoctx->my_key = id_cryptoctx->creds[0]->key;
id_cryptoctx->creds[0]->key = NULL; /* Don't free it twice */
}
#ifndef WITHOUT_PKCS11
else {
id_cryptoctx->cert_id = id_cryptoctx->creds[0]->cert_id;
id_cryptoctx->creds[0]->cert_id = NULL; /* Don't free it twice */
id_cryptoctx->cert_id_len = id_cryptoctx->creds[0]->cert_id_len;
}
#endif
retval = 0;
errout:
return retval;
}
static krb5_error_code
load_cas_and_crls(krb5_context context,
pkinit_plg_crypto_context plg_cryptoctx,
pkinit_req_crypto_context req_cryptoctx,
pkinit_identity_crypto_context id_cryptoctx,
int catype,
char *filename)
{
STACK_OF(X509_INFO) *sk = NULL;
STACK_OF(X509) *ca_certs = NULL;
STACK_OF(X509_CRL) *ca_crls = NULL;
BIO *in = NULL;
krb5_error_code retval = ENOMEM;
int i = 0;
/* If there isn't already a stack in the context,
* create a temporary one now */
switch(catype) {
case CATYPE_ANCHORS:
if (id_cryptoctx->trustedCAs != NULL)
ca_certs = id_cryptoctx->trustedCAs;
else {
ca_certs = sk_X509_new_null();
if (ca_certs == NULL)
return ENOMEM;
}
break;
case CATYPE_INTERMEDIATES:
if (id_cryptoctx->intermediateCAs != NULL)
ca_certs = id_cryptoctx->intermediateCAs;
else {
ca_certs = sk_X509_new_null();
if (ca_certs == NULL)
return ENOMEM;
}
break;
case CATYPE_CRLS:
if (id_cryptoctx->revoked != NULL)
ca_crls = id_cryptoctx->revoked;
else {
ca_crls = sk_X509_CRL_new_null();
if (ca_crls == NULL)
return ENOMEM;
}
break;
default:
return ENOTSUP;
}
if (!(in = BIO_new_file(filename, "r"))) {
retval = errno;
pkiDebug("%s: error opening file '%s': %s\n", __FUNCTION__,
filename, error_message(errno));
goto cleanup;
}
/* This loads from a file, a stack of x509/crl/pkey sets */
if ((sk = PEM_X509_INFO_read_bio(in, NULL, NULL, NULL)) == NULL) {
pkiDebug("%s: error reading file '%s'\n", __FUNCTION__, filename);
retval = EIO;
goto cleanup;
}
/* scan over the stack created from loading the file contents,
* weed out duplicates, and push new ones onto the return stack
*/
for (i = 0; i < sk_X509_INFO_num(sk); i++) {
X509_INFO *xi = sk_X509_INFO_value(sk, i);
if (xi != NULL && xi->x509 != NULL && catype != CATYPE_CRLS) {
int j = 0, size = sk_X509_num(ca_certs), flag = 0;
if (!size) {
sk_X509_push(ca_certs, xi->x509);
xi->x509 = NULL;
continue;
}
for (j = 0; j < size; j++) {
X509 *x = sk_X509_value(ca_certs, j);
flag = X509_cmp(x, xi->x509);
if (flag == 0)
break;
else
continue;
}
if (flag != 0) {
sk_X509_push(ca_certs, X509_dup(xi->x509));
}
} else if (xi != NULL && xi->crl != NULL && catype == CATYPE_CRLS) {
int j = 0, size = sk_X509_CRL_num(ca_crls), flag = 0;
if (!size) {
sk_X509_CRL_push(ca_crls, xi->crl);
xi->crl = NULL;
continue;
}
for (j = 0; j < size; j++) {
X509_CRL *x = sk_X509_CRL_value(ca_crls, j);
flag = X509_CRL_cmp(x, xi->crl);
if (flag == 0)
break;
else
continue;
}
if (flag != 0) {
sk_X509_CRL_push(ca_crls, X509_CRL_dup(xi->crl));
}
}
}
/* If we added something and there wasn't a stack in the
* context before, add the temporary stack to the context.
*/
switch(catype) {
case CATYPE_ANCHORS:
if (sk_X509_num(ca_certs) == 0) {
pkiDebug("no anchors in file, %s\n", filename);
if (id_cryptoctx->trustedCAs == NULL)
sk_X509_free(ca_certs);
} else {
if (id_cryptoctx->trustedCAs == NULL)
id_cryptoctx->trustedCAs = ca_certs;
}
break;
case CATYPE_INTERMEDIATES:
if (sk_X509_num(ca_certs) == 0) {
pkiDebug("no intermediates in file, %s\n", filename);
if (id_cryptoctx->intermediateCAs == NULL)
sk_X509_free(ca_certs);
} else {
if (id_cryptoctx->intermediateCAs == NULL)
id_cryptoctx->intermediateCAs = ca_certs;
}
break;
case CATYPE_CRLS:
if (sk_X509_CRL_num(ca_crls) == 0) {
pkiDebug("no crls in file, %s\n", filename);
if (id_cryptoctx->revoked == NULL)
sk_X509_CRL_free(ca_crls);
} else {
if (id_cryptoctx->revoked == NULL)
id_cryptoctx->revoked = ca_crls;
}
break;
default:
/* Should have been caught above! */
retval = EINVAL;
goto cleanup;
break;
}
retval = 0;
cleanup:
if (in != NULL)
BIO_free(in);
if (sk != NULL)
sk_X509_INFO_pop_free(sk, X509_INFO_free);
return retval;
}
static krb5_error_code
load_cas_and_crls_dir(krb5_context context,
pkinit_plg_crypto_context plg_cryptoctx,
pkinit_req_crypto_context req_cryptoctx,
pkinit_identity_crypto_context id_cryptoctx,
int catype,
char *dirname)
{
krb5_error_code retval = EINVAL;
DIR *d = NULL;
struct dirent *dentry = NULL;
char filename[1024];
if (dirname == NULL)
return EINVAL;
d = opendir(dirname);
if (d == NULL)
return ENOENT;
while ((dentry = readdir(d))) {
if (strlen(dirname) + strlen(dentry->d_name) + 2 > sizeof(filename)) {
pkiDebug("%s: Path too long -- directory '%s' and file '%s'\n",
__FUNCTION__, dirname, dentry->d_name);
goto cleanup;
}
/* Ignore subdirectories and anything starting with a dot */
#ifdef DT_DIR
if (dentry->d_type == DT_DIR)
continue;
#endif
if (dentry->d_name[0] == '.')
continue;
snprintf(filename, sizeof(filename), "%s/%s", dirname, dentry->d_name);
retval = load_cas_and_crls(context, plg_cryptoctx, req_cryptoctx,
id_cryptoctx, catype, filename);
if (retval)
goto cleanup;
}
retval = 0;
cleanup:
if (d != NULL)
closedir(d);
return retval;
}
krb5_error_code
crypto_load_cas_and_crls(krb5_context context,
pkinit_plg_crypto_context plg_cryptoctx,
pkinit_req_crypto_context req_cryptoctx,
pkinit_identity_opts *idopts,
pkinit_identity_crypto_context id_cryptoctx,
int idtype,
int catype,
char *id)
{
pkiDebug("%s: called with idtype %s and catype %s\n",
__FUNCTION__, idtype2string(idtype), catype2string(catype));
switch (idtype) {
case IDTYPE_FILE:
return load_cas_and_crls(context, plg_cryptoctx, req_cryptoctx,
id_cryptoctx, catype, id);
break;
case IDTYPE_DIR:
return load_cas_and_crls_dir(context, plg_cryptoctx, req_cryptoctx,
id_cryptoctx, catype, id);
break;
default:
return ENOTSUP;
break;
}
}
static krb5_error_code
create_identifiers_from_stack(STACK_OF(X509) *sk,
krb5_external_principal_identifier *** ids)
{
int i = 0, sk_size = sk_X509_num(sk);
krb5_external_principal_identifier **krb5_cas = NULL;
X509 *x = NULL;
X509_NAME *xn = NULL;
unsigned char *p = NULL;
int len = 0;
PKCS7_ISSUER_AND_SERIAL *is = NULL;
char buf[DN_BUF_LEN];
*ids = NULL;
krb5_cas = calloc(sk_size + 1, sizeof(*krb5_cas));
if (krb5_cas == NULL)
return ENOMEM;
for (i = 0; i < sk_size; i++) {
krb5_cas[i] = malloc(sizeof(krb5_external_principal_identifier));
x = sk_X509_value(sk, i);
X509_NAME_oneline(X509_get_subject_name(x), buf, sizeof(buf));
pkiDebug("#%d cert= %s\n", i, buf);
/* fill-in subjectName */
krb5_cas[i]->subjectName.magic = 0;
krb5_cas[i]->subjectName.length = 0;
krb5_cas[i]->subjectName.data = NULL;
xn = X509_get_subject_name(x);
len = i2d_X509_NAME(xn, NULL);
if ((p = malloc((size_t) len)) == NULL)
goto oom;
krb5_cas[i]->subjectName.data = (char *)p;
i2d_X509_NAME(xn, &p);
krb5_cas[i]->subjectName.length = len;
/* fill-in issuerAndSerialNumber */
krb5_cas[i]->issuerAndSerialNumber.length = 0;
krb5_cas[i]->issuerAndSerialNumber.magic = 0;
krb5_cas[i]->issuerAndSerialNumber.data = NULL;
is = PKCS7_ISSUER_AND_SERIAL_new();
if (is == NULL)
goto oom;
X509_NAME_set(&is->issuer, X509_get_issuer_name(x));
M_ASN1_INTEGER_free(is->serial);
is->serial = M_ASN1_INTEGER_dup(X509_get_serialNumber(x));
if (is->serial == NULL)
goto oom;
len = i2d_PKCS7_ISSUER_AND_SERIAL(is, NULL);
p = malloc(len);
if (p == NULL)
goto oom;
krb5_cas[i]->issuerAndSerialNumber.data = (char *)p;
i2d_PKCS7_ISSUER_AND_SERIAL(is, &p);
krb5_cas[i]->issuerAndSerialNumber.length = len;
/* fill-in subjectKeyIdentifier */
krb5_cas[i]->subjectKeyIdentifier.length = 0;
krb5_cas[i]->subjectKeyIdentifier.magic = 0;
krb5_cas[i]->subjectKeyIdentifier.data = NULL;
if (X509_get_ext_by_NID(x, NID_subject_key_identifier, -1) >= 0) {
ASN1_OCTET_STRING *ikeyid;
ikeyid = X509_get_ext_d2i(x, NID_subject_key_identifier, NULL,
NULL);
if (ikeyid != NULL) {
len = i2d_ASN1_OCTET_STRING(ikeyid, NULL);
p = malloc(len);
if (p == NULL)
goto oom;
krb5_cas[i]->subjectKeyIdentifier.data = (char *)p;
i2d_ASN1_OCTET_STRING(ikeyid, &p);
krb5_cas[i]->subjectKeyIdentifier.length = len;
ASN1_OCTET_STRING_free(ikeyid);
}
}
PKCS7_ISSUER_AND_SERIAL_free(is);
is = NULL;
}
*ids = krb5_cas;
return 0;
oom:
free_krb5_external_principal_identifier(&krb5_cas);
PKCS7_ISSUER_AND_SERIAL_free(is);
return ENOMEM;
}
static krb5_error_code
create_krb5_invalidCertificates(krb5_context context,
pkinit_plg_crypto_context plg_cryptoctx,
pkinit_req_crypto_context req_cryptoctx,
pkinit_identity_crypto_context id_cryptoctx,
krb5_external_principal_identifier *** ids)
{
krb5_error_code retval = ENOMEM;
STACK_OF(X509) *sk = NULL;
*ids = NULL;
if (req_cryptoctx->received_cert == NULL)
return KRB5KDC_ERR_PREAUTH_FAILED;
sk = sk_X509_new_null();
if (sk == NULL)
goto cleanup;
sk_X509_push(sk, req_cryptoctx->received_cert);
retval = create_identifiers_from_stack(sk, ids);
sk_X509_free(sk);
cleanup:
return retval;
}
krb5_error_code
create_krb5_supportedCMSTypes(krb5_context context,
pkinit_plg_crypto_context plg_cryptoctx,
pkinit_req_crypto_context req_cryptoctx,
pkinit_identity_crypto_context id_cryptoctx,
krb5_algorithm_identifier ***oids)
{
krb5_error_code retval = ENOMEM;
krb5_algorithm_identifier **loids = NULL;
krb5_data des3oid = {0, 8, "\x2A\x86\x48\x86\xF7\x0D\x03\x07" };
*oids = NULL;
loids = malloc(2 * sizeof(krb5_algorithm_identifier *));
if (loids == NULL)
goto cleanup;
loids[1] = NULL;
loids[0] = malloc(sizeof(krb5_algorithm_identifier));
if (loids[0] == NULL) {
free(loids);
goto cleanup;
}
retval = pkinit_copy_krb5_data(&loids[0]->algorithm, &des3oid);
if (retval) {
free(loids[0]);
free(loids);
goto cleanup;
}
loids[0]->parameters.length = 0;
loids[0]->parameters.data = NULL;
*oids = loids;
retval = 0;
cleanup:
return retval;
}
krb5_error_code
create_krb5_trustedCertifiers(krb5_context context,
pkinit_plg_crypto_context plg_cryptoctx,
pkinit_req_crypto_context req_cryptoctx,
pkinit_identity_crypto_context id_cryptoctx,
krb5_external_principal_identifier *** ids)
{
krb5_error_code retval = ENOMEM;
STACK_OF(X509) *sk = id_cryptoctx->trustedCAs;
*ids = NULL;
if (id_cryptoctx->trustedCAs == NULL)
return KRB5KDC_ERR_PREAUTH_FAILED;
retval = create_identifiers_from_stack(sk, ids);
return retval;
}
krb5_error_code
create_issuerAndSerial(krb5_context context,
pkinit_plg_crypto_context plg_cryptoctx,
pkinit_req_crypto_context req_cryptoctx,
pkinit_identity_crypto_context id_cryptoctx,
unsigned char **out,
unsigned int *out_len)
{
unsigned char *p = NULL;
PKCS7_ISSUER_AND_SERIAL *is = NULL;
int len = 0;
krb5_error_code retval = ENOMEM;
X509 *cert = req_cryptoctx->received_cert;
*out = NULL;
*out_len = 0;
if (req_cryptoctx->received_cert == NULL)
return 0;
is = PKCS7_ISSUER_AND_SERIAL_new();
X509_NAME_set(&is->issuer, X509_get_issuer_name(cert));
M_ASN1_INTEGER_free(is->serial);
is->serial = M_ASN1_INTEGER_dup(X509_get_serialNumber(cert));
len = i2d_PKCS7_ISSUER_AND_SERIAL(is, NULL);
if ((p = *out = malloc((size_t) len)) == NULL)
goto cleanup;
i2d_PKCS7_ISSUER_AND_SERIAL(is, &p);
*out_len = len;
retval = 0;
cleanup:
X509_NAME_free(is->issuer);
ASN1_INTEGER_free(is->serial);
free(is);
return retval;
}
static int
pkcs7_decrypt(krb5_context context,
pkinit_identity_crypto_context id_cryptoctx,
PKCS7 *p7,
BIO *data)
{
BIO *tmpmem = NULL;
int retval = 0, i = 0;
char buf[4096];
if(p7 == NULL)
return 0;
if(!PKCS7_type_is_enveloped(p7)) {
pkiDebug("wrong pkcs7 content type\n");
return 0;
}
if(!(tmpmem = pkcs7_dataDecode(context, id_cryptoctx, p7))) {
pkiDebug("unable to decrypt pkcs7 object\n");
return 0;
}
for(;;) {
i = BIO_read(tmpmem, buf, sizeof(buf));
if (i <= 0) break;
BIO_write(data, buf, i);
BIO_free_all(tmpmem);
return 1;
}
return retval;
}
krb5_error_code
pkinit_process_td_trusted_certifiers(
krb5_context context,
pkinit_plg_crypto_context plg_cryptoctx,
pkinit_req_crypto_context req_cryptoctx,
pkinit_identity_crypto_context id_cryptoctx,
krb5_external_principal_identifier **krb5_trusted_certifiers,
int td_type)
{
krb5_error_code retval = ENOMEM;
STACK_OF(X509_NAME) *sk_xn = NULL;
X509_NAME *xn = NULL;
PKCS7_ISSUER_AND_SERIAL *is = NULL;
ASN1_OCTET_STRING *id = NULL;
const unsigned char *p = NULL;
char buf[DN_BUF_LEN];
int i = 0;
if (td_type == TD_TRUSTED_CERTIFIERS)
pkiDebug("received trusted certifiers\n");
else
pkiDebug("received invalid certificate\n");
sk_xn = sk_X509_NAME_new_null();
while(krb5_trusted_certifiers[i] != NULL) {
if (krb5_trusted_certifiers[i]->subjectName.data != NULL) {
p = (unsigned char *)krb5_trusted_certifiers[i]->subjectName.data;
xn = d2i_X509_NAME(NULL, &p,
(int)krb5_trusted_certifiers[i]->subjectName.length);
if (xn == NULL)
goto cleanup;
X509_NAME_oneline(xn, buf, sizeof(buf));
if (td_type == TD_TRUSTED_CERTIFIERS)
pkiDebug("#%d cert = %s is trusted by kdc\n", i, buf);
else
pkiDebug("#%d cert = %s is invalid\n", i, buf);
sk_X509_NAME_push(sk_xn, xn);
}
if (krb5_trusted_certifiers[i]->issuerAndSerialNumber.data != NULL) {
p = (unsigned char *)
krb5_trusted_certifiers[i]->issuerAndSerialNumber.data;
is = d2i_PKCS7_ISSUER_AND_SERIAL(NULL, &p,
(int)krb5_trusted_certifiers[i]->issuerAndSerialNumber.length);
if (is == NULL)
goto cleanup;
X509_NAME_oneline(is->issuer, buf, sizeof(buf));
if (td_type == TD_TRUSTED_CERTIFIERS)
pkiDebug("#%d issuer = %s serial = %ld is trusted bu kdc\n", i,
buf, ASN1_INTEGER_get(is->serial));
else
pkiDebug("#%d issuer = %s serial = %ld is invalid\n", i, buf,
ASN1_INTEGER_get(is->serial));
PKCS7_ISSUER_AND_SERIAL_free(is);
}
if (krb5_trusted_certifiers[i]->subjectKeyIdentifier.data != NULL) {
p = (unsigned char *)
krb5_trusted_certifiers[i]->subjectKeyIdentifier.data;
id = d2i_ASN1_OCTET_STRING(NULL, &p,
(int)krb5_trusted_certifiers[i]->subjectKeyIdentifier.length);
if (id == NULL)
goto cleanup;
/* XXX */
ASN1_OCTET_STRING_free(id);
}
i++;
}
/* XXX Since we not doing anything with received trusted certifiers
* return an error. this is the place where we can pick a different
* client certificate based on the information in td_trusted_certifiers
*/
retval = KRB5KDC_ERR_PREAUTH_FAILED;
cleanup:
if (sk_xn != NULL)
sk_X509_NAME_pop_free(sk_xn, X509_NAME_free);
return retval;
}
static BIO *
pkcs7_dataDecode(krb5_context context,
pkinit_identity_crypto_context id_cryptoctx,
PKCS7 *p7)
{
int i = 0;
unsigned int jj = 0, tmp_len = 0;
BIO *out=NULL,*etmp=NULL,*bio=NULL;
unsigned char *tmp=NULL;
ASN1_OCTET_STRING *data_body=NULL;
const EVP_CIPHER *evp_cipher=NULL;
EVP_CIPHER_CTX *evp_ctx=NULL;
X509_ALGOR *enc_alg=NULL;
STACK_OF(PKCS7_RECIP_INFO) *rsk=NULL;
PKCS7_RECIP_INFO *ri=NULL;
X509 *cert = sk_X509_value(id_cryptoctx->my_certs,
id_cryptoctx->cert_index);
p7->state=PKCS7_S_HEADER;
rsk=p7->d.enveloped->recipientinfo;
enc_alg=p7->d.enveloped->enc_data->algorithm;
data_body=p7->d.enveloped->enc_data->enc_data;
evp_cipher=EVP_get_cipherbyobj(enc_alg->algorithm);
if (evp_cipher == NULL) {
PKCS7err(PKCS7_F_PKCS7_DATADECODE,PKCS7_R_UNSUPPORTED_CIPHER_TYPE);
goto cleanup;
}
if ((etmp=BIO_new(BIO_f_cipher())) == NULL) {
PKCS7err(PKCS7_F_PKCS7_DATADECODE,ERR_R_BIO_LIB);
goto cleanup;
}
/* It was encrypted, we need to decrypt the secret key
* with the private key */
/* Find the recipientInfo which matches the passed certificate
* (if any)
*/
if (cert) {
for (i=0; i<sk_PKCS7_RECIP_INFO_num(rsk); i++) {
int tmp_ret = 0;
ri=sk_PKCS7_RECIP_INFO_value(rsk,i);
tmp_ret = X509_NAME_cmp(ri->issuer_and_serial->issuer,
cert->cert_info->issuer);
if (!tmp_ret) {
tmp_ret = M_ASN1_INTEGER_cmp(cert->cert_info->serialNumber,
ri->issuer_and_serial->serial);
if (!tmp_ret)
break;
}
ri=NULL;
}
if (ri == NULL) {
PKCS7err(PKCS7_F_PKCS7_DATADECODE,
PKCS7_R_NO_RECIPIENT_MATCHES_CERTIFICATE);
goto cleanup;
}
}
/* If we haven't got a certificate try each ri in turn */
if (cert == NULL) {
for (i=0; i<sk_PKCS7_RECIP_INFO_num(rsk); i++) {
ri=sk_PKCS7_RECIP_INFO_value(rsk,i);
jj = pkinit_decode_data(context, id_cryptoctx,
M_ASN1_STRING_data(ri->enc_key),
(unsigned int) M_ASN1_STRING_length(ri->enc_key),
&tmp, &tmp_len);
if (jj) {
PKCS7err(PKCS7_F_PKCS7_DATADECODE, ERR_R_EVP_LIB);
goto cleanup;
}
if (!jj && tmp_len > 0) {
jj = tmp_len;
break;
}
ERR_clear_error();
ri = NULL;
}
if (ri == NULL) {
PKCS7err(PKCS7_F_PKCS7_DATADECODE, PKCS7_R_NO_RECIPIENT_MATCHES_KEY);
goto cleanup;
}
}
else {
jj = pkinit_decode_data(context, id_cryptoctx,
M_ASN1_STRING_data(ri->enc_key),
(unsigned int) M_ASN1_STRING_length(ri->enc_key),
&tmp, &tmp_len);
if (jj || tmp_len <= 0) {
PKCS7err(PKCS7_F_PKCS7_DATADECODE, ERR_R_EVP_LIB);
goto cleanup;
}
jj = tmp_len;
}
evp_ctx=NULL;
BIO_get_cipher_ctx(etmp,&evp_ctx);
if (EVP_CipherInit_ex(evp_ctx,evp_cipher,NULL,NULL,NULL,0) <= 0)
goto cleanup;
if (EVP_CIPHER_asn1_to_param(evp_ctx,enc_alg->parameter) < 0)
goto cleanup;
if (jj != (unsigned) EVP_CIPHER_CTX_key_length(evp_ctx)) {
/* Some S/MIME clients don't use the same key
* and effective key length. The key length is
* determined by the size of the decrypted RSA key.
*/
if(!EVP_CIPHER_CTX_set_key_length(evp_ctx, (int)jj)) {
PKCS7err(PKCS7_F_PKCS7_DATADECODE,
PKCS7_R_DECRYPTED_KEY_IS_WRONG_LENGTH);
goto cleanup;
}
}
if (EVP_CipherInit_ex(evp_ctx,NULL,NULL,tmp,NULL,0) <= 0)
goto cleanup;
OPENSSL_cleanse(tmp,jj);
if (out == NULL)
out=etmp;
else
BIO_push(out,etmp);
etmp=NULL;
if (data_body->length > 0)
bio = BIO_new_mem_buf(data_body->data, data_body->length);
else {
bio=BIO_new(BIO_s_mem());
BIO_set_mem_eof_return(bio,0);
}
BIO_push(out,bio);
bio=NULL;
if (0) {
cleanup:
if (out != NULL) BIO_free_all(out);
if (etmp != NULL) BIO_free_all(etmp);
if (bio != NULL) BIO_free_all(bio);
out=NULL;
}
if (tmp != NULL)
free(tmp);
return(out);
}
#ifdef DEBUG_DH
static void
print_dh(DH * dh, char *msg)
{
BIO *bio_err = NULL;
bio_err = BIO_new(BIO_s_file());
BIO_set_fp(bio_err, stderr, BIO_NOCLOSE | BIO_FP_TEXT);
if (msg)
BIO_puts(bio_err, (const char *)msg);
if (dh)
DHparams_print(bio_err, dh);
BIO_puts(bio_err, "private key: ");
BN_print(bio_err, dh->priv_key);
BIO_puts(bio_err, (const char *)"\n");
BIO_free(bio_err);
}
static void
print_pubkey(BIGNUM * key, char *msg)
{
BIO *bio_err = NULL;
bio_err = BIO_new(BIO_s_file());
BIO_set_fp(bio_err, stderr, BIO_NOCLOSE | BIO_FP_TEXT);
if (msg)
BIO_puts(bio_err, (const char *)msg);
if (key)
BN_print(bio_err, key);
BIO_puts(bio_err, "\n");
BIO_free(bio_err);
}
#endif
static char *
pkinit_pkcs11_code_to_text(int err)
{
int i;
static char uc[32];
for (i = 0; pkcs11_errstrings[i].text != NULL; i++)
if (pkcs11_errstrings[i].code == err)
break;
if (pkcs11_errstrings[i].text != NULL)
return (pkcs11_errstrings[i].text);
snprintf(uc, sizeof(uc), _("unknown code 0x%x"), err);
return (uc);
}
/*
* Add an item to the pkinit_identity_crypto_context's list of deferred
* identities.
*/
krb5_error_code
crypto_set_deferred_id(krb5_context context,
pkinit_identity_crypto_context id_cryptoctx,
const char *identity, const char *password)
{
unsigned long ck_flags;
ck_flags = pkinit_get_deferred_id_flags(id_cryptoctx->deferred_ids,
identity);
return pkinit_set_deferred_id(&id_cryptoctx->deferred_ids,
identity, ck_flags, password);
}
/*
* Retrieve a read-only copy of the pkinit_identity_crypto_context's list of
* deferred identities, sure to be valid only until the next time someone calls
* either pkinit_set_deferred_id() or crypto_set_deferred_id().
*/
const pkinit_deferred_id *
crypto_get_deferred_ids(krb5_context context,
pkinit_identity_crypto_context id_cryptoctx)
{
pkinit_deferred_id *deferred;
const pkinit_deferred_id *ret;
deferred = id_cryptoctx->deferred_ids;
ret = (const pkinit_deferred_id *)deferred;
return ret;
}
| 32.951252 | 139 | 0.584902 | [
"object"
] |
a952183ea2a22697e6dc387b217fcc76459bb593 | 3,710 | h | C | Endpoints/include/Endpoints/EndpointAttributeValidation.h | rysmith0315/avs-device-sdk | f0fa55856af9e33d0889ac14820efbb807341f17 | [
"Apache-2.0"
] | null | null | null | Endpoints/include/Endpoints/EndpointAttributeValidation.h | rysmith0315/avs-device-sdk | f0fa55856af9e33d0889ac14820efbb807341f17 | [
"Apache-2.0"
] | null | null | null | Endpoints/include/Endpoints/EndpointAttributeValidation.h | rysmith0315/avs-device-sdk | f0fa55856af9e33d0889ac14820efbb807341f17 | [
"Apache-2.0"
] | null | null | null | /*
* Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License").
* You may not use this file except in compliance with the License.
* A copy of the License is located at
*
* http://aws.amazon.com/apache2.0/
*
* or in the "license" file accompanying this file. This file is distributed
* on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
* express or implied. See the License for the specific language governing
* permissions and limitations under the License.
*/
#ifndef ALEXA_CLIENT_SDK_ENDPOINTS_INCLUDE_ENDPOINTS_ENDPOINTATTRIBUTEVALIDATION_H_
#define ALEXA_CLIENT_SDK_ENDPOINTS_INCLUDE_ENDPOINTS_ENDPOINTATTRIBUTEVALIDATION_H_
#include <map>
#include <string>
#include <AVSCommon/AVS/AVSDiscoveryEndpointAttributes.h>
namespace alexaClientSDK {
namespace endpoints {
/**
* Returns whether the given identifier follows AVS specification.
*
* @see https://developer.amazon.com/alexa-voice-service/alexa-discovery.html#addorupdatereport
*
* @param identifier The identifier to be validated.
* @return @c true if valid; otherwise, return @c false.
*/
bool isEndpointIdValid(const avsCommon::sdkInterfaces::endpoints::EndpointIdentifier& identifier);
/**
* Returns whether the given name follows AVS specification.
*
* @see https://developer.amazon.com/alexa-voice-service/alexa-discovery.html#addorupdatereport
*
* @param name The friendly name to be validated.
* @return @c true if valid; otherwise, return @c false.
*/
bool isFriendlyNameValid(const std::string& name);
/**
* Returns whether the given description follows AVS specification.
*
* @see https://developer.amazon.com/alexa-voice-service/alexa-discovery.html#addorupdatereport
*
* @param description The description to be validated.
* @return @c true if valid; otherwise, return @c false.
*/
bool isDescriptionValid(const std::string& description);
/**
* Returns whether the given manufacturer name follows AVS specification.
*
* @see https://developer.amazon.com/alexa-voice-service/alexa-discovery.html#addorupdatereport
*
* @param manufacturerName The manufacturer name to be validated.
* @return @c true if valid; otherwise, return @c false.
*/
bool isManufacturerNameValid(const std::string& manufacturerName);
/**
* Returns whether the given attributes follows AVS specification.
*
* See format specification here:
* @see https://developer.amazon.com/alexa-voice-service/alexa-discovery.html#addorupdatereport
*
* @param attributes The attributes to be validated.
* @return @c true if all attributes are valid; otherwise, return @c false.
*/
bool isAdditionalAttributesValid(
const avsCommon::avs::AVSDiscoveryEndpointAttributes::AdditionalAttributes& attributes);
/**
* Returns whether the given connections values follows AVS specification.
*
* @see https://developer.amazon.com/alexa-voice-service/alexa-discovery.html#addorupdatereport
*
* @param connections The list of maps of connections objects
* @return @c true if valid; otherwise, return @c false.
*/
bool areConnectionsValid(const std::vector<std::map<std::string, std::string>>& connections);
/**
* Returns whether the given cookies follow the AVS specification.
*
* @see https://developer.amazon.com/alexa-voice-service/alexa-discovery.html#addorupdatereport
*
* @param cookies The map of cookies name and values.
* @return @c true if valid; otherwise, return @c false.
*/
bool areCookiesValid(const std::map<std::string, std::string>& cookies);
} // namespace endpoints
} // namespace alexaClientSDK
#endif // ALEXA_CLIENT_SDK_ENDPOINTS_INCLUDE_ENDPOINTS_ENDPOINTATTRIBUTEVALIDATION_H_
| 36.372549 | 98 | 0.766577 | [
"vector"
] |
a955f3a8e7f2f63042d5c4784d4a254539737c0a | 1,661 | h | C | ext/intl/breakiterator/breakiterator_iterators.h | guomoumou123/php5.5.10 | 1d77eadd382f075d7836b412b04157e9c4210703 | [
"PHP-3.01"
] | 278 | 2015-11-03T03:01:20.000Z | 2022-01-20T18:21:05.000Z | ext/intl/breakiterator/breakiterator_iterators.h | guomoumou123/php5.5.10 | 1d77eadd382f075d7836b412b04157e9c4210703 | [
"PHP-3.01"
] | 374 | 2015-11-03T12:37:22.000Z | 2021-12-17T14:18:08.000Z | ext/intl/breakiterator/breakiterator_iterators.h | guomoumou123/php5.5.10 | 1d77eadd382f075d7836b412b04157e9c4210703 | [
"PHP-3.01"
] | 96 | 2015-11-22T07:47:26.000Z | 2022-01-20T19:52:19.000Z | /*
+----------------------------------------------------------------------+
| PHP Version 5 |
+----------------------------------------------------------------------+
| This source file is subject to version 3.01 of the PHP license, |
| that is bundled with this package in the file LICENSE, and is |
| available through the world-wide-web at the following url: |
| http://www.php.net/license/3_01.txt |
| If you did not receive a copy of the PHP license and are unable to |
| obtain it through the world-wide-web, please send a note to |
| license@php.net so we can mail you a copy immediately. |
+----------------------------------------------------------------------+
| Authors: Gustavo Lopes <cataphract@php.net> |
+----------------------------------------------------------------------+
*/
#ifndef INTL_BREAKITERATOR_ITERATORS_H
#define INTL_BREAKITERATOR_ITERATORS_H
#include <unicode/umachine.h>
U_CDECL_BEGIN
#include <math.h>
#include <php.h>
U_CDECL_END
typedef enum {
PARTS_ITERATOR_KEY_SEQUENTIAL,
PARTS_ITERATOR_KEY_LEFT,
PARTS_ITERATOR_KEY_RIGHT,
} parts_iter_key_type;
#ifdef __cplusplus
void IntlIterator_from_BreakIterator_parts(zval *break_iter_zv,
zval *object,
parts_iter_key_type key_type TSRMLS_DC);
#endif
U_CFUNC zend_object_iterator *_breakiterator_get_iterator(
zend_class_entry *ce, zval *object, int by_ref TSRMLS_DC);
U_CFUNC void breakiterator_register_IntlPartsIterator_class(TSRMLS_D);
#endif
| 38.627907 | 75 | 0.544852 | [
"object"
] |
a9579ce0c3167adf69d01825a449d7c59de9ba4d | 5,463 | h | C | net/cert/internal/cert_errors.h | xzhan96/chromium.src | 1bd0cf3997f947746c0fc5406a2466e7b5f6159e | [
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 3 | 2018-02-22T18:06:56.000Z | 2021-08-28T12:49:27.000Z | net/cert/internal/cert_errors.h | emilio/chromium.src | 1bd0cf3997f947746c0fc5406a2466e7b5f6159e | [
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | null | null | null | net/cert/internal/cert_errors.h | emilio/chromium.src | 1bd0cf3997f947746c0fc5406a2466e7b5f6159e | [
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 2 | 2017-08-16T08:15:01.000Z | 2018-03-27T00:07:30.000Z | // Copyright 2016 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
// ----------------------------
// Overview of error design
// ----------------------------
//
// Certificate path validation/parsing may emit a sequence of
// errors/warnings/context. These are represented by a tree of CertErrorNodes.
// Each node is comprised of:
//
// * A unique identifier.
//
// This serves similarly to an error code, and is useful for querying if a
// particular error occurred.
//
// * [optional] A parameters object.
//
// Nodes may attach a heap-allocated subclass of CertErrorParams, to carry
// extra information that is useful when reporting the error. For instance
// a parsing error may want to describe where in the DER the failure
// happened, or what the unexpected value was.
//
// * [optional] Child nodes.
//
// Error nodes are arranged in a tree. The parent/child hierarchy is used to
// group errors that share some common state.
// For instance during path processing it is useful to group the
// errors/warnings that happened while processing certificate "i" as
// children of a shared "context" node. The context node in this case
// doesn't describe a particular error, but rather some shared event and
// its parameters.
//
// ----------------------------
// Using errors in other APIs
// ----------------------------
//
// The top level object used in APIs is CertErrors. A pointer to a CertErrors
// object is typically given as an out-parameter for code that may generate
// errors.
//
// Note that CertErrors gives a non-hiearhical interface for emitting errors.
// In other words, it doesn't let you create parent/child relationships
// directly.
//
// To change the parent node for subsequently emitted errors in the CertErrors
// object, one constructs a CertErrorScoper on the stack.
//
// ----------------------------
// Defining new errors
// ----------------------------
//
// The error IDs are extensible and do not need to be centrally defined.
//
// To define a new error use the macro DEFINE_CERT_ERROR_ID() in a .cc file.
// If consumers are to be able to query for this error then the symbol should
// also be exposed in a header file.
//
// Error IDs are in truth string literals, whose pointer value will be unique
// per process.
#ifndef NET_CERT_INTERNAL_CERT_ERRORS_H_
#define NET_CERT_INTERNAL_CERT_ERRORS_H_
#include <memory>
#include <vector>
#include "base/compiler_specific.h"
#include "base/macros.h"
#include "net/base/net_export.h"
#include "net/cert/internal/cert_error_id.h"
namespace net {
class CertErrorParams;
class CertErrorScoper;
class ParsedCertificate;
// The type of a particular CertErrorNode.
enum class CertErrorNodeType {
// Note the TYPE_ prefix is to avoid compile errors. Because ERROR() is a
// commonly used macro name.
// Node that represents a single error.
TYPE_ERROR,
// Node that represents a single non-fatal error.
TYPE_WARNING,
// Parent node for other errors/warnings.
TYPE_CONTEXT,
};
struct CertErrorNode;
using CertErrorNodes = std::vector<std::unique_ptr<CertErrorNode>>;
// CertErrorNode represents a node in the error tree. This could be an error,
// warning, or simply contextual parent node. See the error design overview for
// a better description of how this is used.
struct NET_EXPORT CertErrorNode {
CertErrorNode(CertErrorNodeType node_type,
CertErrorId id,
std::unique_ptr<CertErrorParams> params);
~CertErrorNode();
void AddChild(std::unique_ptr<CertErrorNode> child);
CertErrorNodeType node_type;
CertErrorId id;
std::unique_ptr<CertErrorParams> params;
CertErrorNodes children;
};
// CertErrors is the main object for emitting errors and internally builds up
// the error tree.
class NET_EXPORT CertErrors {
public:
CertErrors();
~CertErrors();
// Adds a node to the current insertion point in the error tree. |params| may
// be null.
void Add(CertErrorNodeType node_type,
CertErrorId id,
std::unique_ptr<CertErrorParams> params);
void AddError(CertErrorId id, std::unique_ptr<CertErrorParams> params);
void AddError(CertErrorId id);
void AddWarning(CertErrorId id, std::unique_ptr<CertErrorParams> params);
void AddWarning(CertErrorId id);
// Returns true if the tree is empty. Note that emptiness of the error tree
// is NOT equivalent to success for some call, and vice versa. (For instance
// consumers may forget to emit errors on failures, or some errors may be
// non-fatal warnings).
bool empty() const;
// Dumps a textual representation of the errors for debugging purposes.
std::string ToDebugString() const;
private:
// CertErrorScoper manipulates the CertErrors object.
friend class CertErrorScoper;
void AddNode(std::unique_ptr<CertErrorNode> node);
// Used by CertErrorScoper to register itself as the top-level scoper.
// Returns the previously set scoper, or nullptr if there was none.
CertErrorScoper* SetScoper(CertErrorScoper* scoper);
CertErrorNodes nodes_;
// The top-most CertErrorScoper that is currently in scope (and which affects
// the parent node for newly added errors).
CertErrorScoper* current_scoper_ = nullptr;
DISALLOW_COPY_AND_ASSIGN(CertErrors);
};
} // namespace net
#endif // NET_CERT_INTERNAL_CERT_ERRORS_H_
| 33.109091 | 80 | 0.714809 | [
"object",
"vector"
] |
a9690a0e7b04383a987a885fce7baa995c7df164 | 25,849 | h | C | com/ole32/com/txf/callframe/metadata.h | npocmaka/Windows-Server-2003 | 5c6fe3db626b63a384230a1aa6b92ac416b0765f | [
"Unlicense"
] | 17 | 2020-11-13T13:42:52.000Z | 2021-09-16T09:13:13.000Z | com/ole32/com/txf/callframe/metadata.h | sancho1952007/Windows-Server-2003 | 5c6fe3db626b63a384230a1aa6b92ac416b0765f | [
"Unlicense"
] | 2 | 2020-10-19T08:02:06.000Z | 2020-10-19T08:23:18.000Z | com/ole32/com/txf/callframe/metadata.h | sancho1952007/Windows-Server-2003 | 5c6fe3db626b63a384230a1aa6b92ac416b0765f | [
"Unlicense"
] | 14 | 2020-11-14T09:43:20.000Z | 2021-08-28T08:59:57.000Z | // Copyright (C) 1995-1999 Microsoft Corporation. All rights reserved.
//
// Metadata.h
//
struct MD_INTERFACE;
struct MD_METHOD;
struct MD_PARAM;
struct MD_INTERFACE_CACHE;
#include "typeinfo.h"
//////////////////////////////////////////////////////////////////////////////////////////////
//////////////////////////////////////////////////////////////////////////////////////////////
//
// A structure to keep track of whether in a given type we have any interface pointers,
// and, if so whether we have an upper bound on them
//
inline BOOL IsUnbounded(LONG l)
{
return l < 0;
}
inline void MakeUnbounded(LONG& l)
{
l = -1;
ASSERT(IsUnbounded(l));
}
struct HAS_INTERFACES
{
LONG m_cInterfaces;
HAS_INTERFACES()
{
m_cInterfaces = 0;
}
void MakeUnbounded()
{
::MakeUnbounded(m_cInterfaces);
ASSERT(IsUnbounded());
}
BOOL IsUnbounded() const
{
return ::IsUnbounded(m_cInterfaces);
}
BOOL HasAnyInterfaces() const
{
return m_cInterfaces != 0;
}
void Add(const HAS_INTERFACES& him)
{
if (!IsUnbounded())
{
if (him.IsUnbounded())
MakeUnbounded();
else
m_cInterfaces += him.m_cInterfaces;
}
}
void Update(LONG& cInterfaces) const
// Update external state variables based on our contents
{
if (!::IsUnbounded(cInterfaces))
{
if (this->IsUnbounded())
::MakeUnbounded(cInterfaces);
else
cInterfaces += m_cInterfaces;
}
}
};
//////////////////////////////////////////////////////////////////////////////////////////////
//////////////////////////////////////////////////////////////////////////////////////////////
HAS_INTERFACES IsShareableType(PFORMAT_STRING pFormat);
inline BOOL IsPointer(PFORMAT_STRING pFormat)
{
// FC_RP, FC_UP, FC_OP, and FC_FP are contiguous
return (FC_RP <= *pFormat && *pFormat <= FC_FP);
}
inline HAS_INTERFACES IsSharableEmbeddedRepeatPointers(PFORMAT_STRING& pFormat)
{
HAS_INTERFACES me;
LONG repeatCount;
if (*pFormat == FC_FIXED_REPEAT)
{
pFormat += 2;
repeatCount = *(ushort*)pFormat;
}
else
{
repeatCount = 0; // a variable repeat count: treat as unbounded if we get any interfaces at all
}
pFormat += 2; // increment to increment field
pFormat += sizeof(ushort); // skip that
pFormat += 2; // ignore the 'array offset'
ULONG cPointersSave = *(ushort*)pFormat;// get number of pointers in each array element
pFormat += sizeof(ushort);
PFORMAT_STRING pFormatSave = pFormat;
ULONG cPointers = cPointersSave;
//
// Loop over the number of pointers per array element. Can be more than one for an array of structures
//
for ( ; cPointers--; )
{
pFormat += 4;
ASSERT(IsPointer(pFormat)); // recurse to check the pointer
HAS_INTERFACES him = IsShareableType(pFormat);
if (repeatCount == 0 && him.HasAnyInterfaces())
{
me.MakeUnbounded(); // A variable repeat count of any interfaces is out of here!
}
else
{
him.m_cInterfaces *= repeatCount; // Scale his interface count by our array size
me.Add(him); // fold in his contribution
}
pFormat += 4; // increment to the next pointer description
}
// return location of format string after the array's pointer description
pFormat = pFormatSave + cPointersSave * 8;
return me;
}
///////////////////////////////////////////////////////////////////////
inline HAS_INTERFACES IsSharableEmbeddedPointers(PFORMAT_STRING pFormat)
{
HAS_INTERFACES me;
pFormat += 2; // Skip FC_PP and FC_PAD
while (FC_END != *pFormat)
{
if (FC_NO_REPEAT == *pFormat)
{
pFormat += 6; // increment to the pointer description
ASSERT(IsPointer(pFormat)); // recurse to check the pointer
me.Add(IsShareableType(pFormat));
pFormat += 4; // increment to the next pointer description
}
else
{
me.Add(IsSharableEmbeddedRepeatPointers(pFormat));
}
}
return me;
}
///////////////////////////////////////////////////////////////////////
inline HAS_INTERFACES IsShareableType(PFORMAT_STRING pFormat)
// We don't want to spend too much time figuring this out, as the whole point of asking is to save
// time in the copying process. Err on the conservative side if we have to and answer FALSE.
{
HAS_INTERFACES me;
switch(*pFormat)
{
case FC_STRUCT: case FC_CSTRUCT: case FC_C_CSTRING: case FC_C_BSTRING:
case FC_C_SSTRING: case FC_C_WSTRING: case FC_CSTRING: case FC_BSTRING:
case FC_SSTRING: case FC_WSTRING:
case FC_CHAR: case FC_BYTE: case FC_SMALL: case FC_WCHAR:
case FC_SHORT: case FC_LONG: case FC_HYPER: case FC_ENUM16:
case FC_ENUM32: case FC_DOUBLE: case FC_FLOAT:
//
// No interfaces here!
//
break;
case FC_IP:
me.m_cInterfaces = 1;
break;
case FC_RP: case FC_UP: case FC_OP:
{
if (SIMPLE_POINTER(pFormat[1]))
{
// No interface pointers
}
else
{
PFORMAT_STRING pFormatPointee = pFormat + 2;
pFormatPointee += *((signed short *)pFormatPointee);
me.Add(IsShareableType(pFormatPointee));
}
}
break;
case FC_SMFARRAY: // small fixed array
case FC_LGFARRAY: // large fixed array
{
if (pFormat[0] == FC_SMFARRAY)
pFormat += 2 + sizeof(ushort);
else
pFormat += 2 + sizeof(ulong);
if (pFormat[0] == FC_PP)
{
me.Add(IsSharableEmbeddedPointers(pFormat));
}
break;
}
case FC_CARRAY: // conformant array
{
pFormat += 8;
if (pFormat[0] == FC_PP)
{
if (IsSharableEmbeddedPointers(pFormat).HasAnyInterfaces())
{
// Ignore the count: any interfaces means no fixed upper bound since we're conformant
//
me.MakeUnbounded();
}
}
break;
}
case FC_PSTRUCT:
{
pFormat += 4;
me.Add(IsSharableEmbeddedPointers(pFormat));
break;
}
case FC_BOGUS_ARRAY: // NYI
case FC_BOGUS_STRUCT: // NYI
case FC_USER_MARSHAL: // NYI
default:
me.MakeUnbounded();
break;
}
return me;
}
///////////////////////////////////////////////////////////////////////
inline HAS_INTERFACES CanShareParameter(PMIDL_STUB_DESC pStubDesc, const PARAM_DESCRIPTION& param, const PARAM_ATTRIBUTES& paramAttr)
// Answer whether this parameter is the kind of parameter which can be shared by a child
// frame with its parent. We answer based on the parameter type only; caller is responsible for,
// e.g., checking whether any sort of sharing is allowed at all.
//
// REVIEW: There are probably more cases that can legitimately be shared beyond
// those which we presently call out.
//
{
if (paramAttr.IsBasetype) // Covers simple refs thereto too. All cases are shareable.
{
return HAS_INTERFACES();
}
else
{
PFORMAT_STRING pFormat = pStubDesc->pFormatTypes + param.TypeOffset;
return IsShareableType(pFormat);
}
}
//////////////////////////////////////////////////////////////////////////////////////////////
//////////////////////////////////////////////////////////////////////////////////////////////
struct MD_PARAM
{
BOOL m_fCanShare;
BOOL m_fMayHaveInterfacePointers;
};
//////////////////////////////////////////////////////////////////////////////////////////////
//////////////////////////////////////////////////////////////////////////////////////////////
struct MD_METHOD
{
///////////////////////////////////////////////////////////////
//
// State
//
///////////////////////////////////////////////////////////////
ULONG m_numberOfParams; // number of parameters, not counting the receiver
PPARAM_DESCRIPTION m_params;
INTERPRETER_OPT_FLAGS m_optFlags;
ULONG m_rpcFlags;
PMIDL_STUB_DESC m_pStubDesc;
ULONG m_cbPushedByCaller;
USHORT m_cbClientBuffer;
USHORT m_cbServerBuffer;
ULONG m_cbStackInclRet;
///////////////////////////////////////
CALLFRAMEINFO m_info;
BOOL m_fCanShareAllParameters;
///////////////////////////////////////
const CInterfaceStubHeader* m_pHeader;
ULONG m_iMethod;
struct MD_INTERFACE* m_pmdInterface;
MD_PARAM* m_rgParams;
LPWSTR m_wszMethodName;
PNDR_PROC_HEADER_EXTS m_pHeaderExts;
///////////////////////////////////////////////////////////////
//
// Meta data setting
//
///////////////////////////////////////////////////////////////
void SetMetaData(const CInterfaceStubHeader* pHeader, ULONG iMethod, struct MD_INTERFACE* pmdInterface, TYPEINFOVTBL* pTypeInfoVtbl)
// Initialize our meta data that has other than to do with parameters
{
// Set up the key pieces of base information
//
m_pHeader = pHeader;
m_iMethod = iMethod;
m_pmdInterface = pmdInterface;
//
// Extract the key information from the format string
//
PMIDL_SERVER_INFO pServerInfo = (PMIDL_SERVER_INFO) m_pHeader->pServerInfo;
m_pStubDesc = pServerInfo->pStubDesc;
ushort formatOffset = pServerInfo->FmtStringOffset[m_iMethod];
m_numberOfParams = 0;
if (formatOffset != 0xffff)
{
PFORMAT_STRING pFormat;
INTERPRETER_FLAGS interpreterFlags;
ULONG procNum;
PFORMAT_STRING pNewProcDescr;
ULONG numberOfParamsInclRet;
pFormat = &pServerInfo->ProcString[formatOffset];
ASSERT(pFormat[0] != 0); // no explicit handle is permitted, must be implicit
interpreterFlags = *((PINTERPRETER_FLAGS)&pFormat[1]);
if (interpreterFlags.HasRpcFlags)
{
m_rpcFlags = *(ulong UNALIGNED *)pFormat;
pFormat += sizeof(ulong);
}
else
m_rpcFlags = 0;
procNum = *(USHORT*)(&pFormat[2]); ASSERT(procNum == m_iMethod);
m_cbStackInclRet = *(USHORT*)(&pFormat[4]);
pNewProcDescr = &pFormat[6]; // additional procedure descriptor info provided in the 'new' interprater
m_cbClientBuffer = *(USHORT*)&pNewProcDescr[0];
m_cbServerBuffer = *(USHORT*)&pNewProcDescr[2];
m_optFlags = *((INTERPRETER_OPT_FLAGS*)&pNewProcDescr[4]);
numberOfParamsInclRet = pNewProcDescr[5]; // includes return value
m_params = (PPARAM_DESCRIPTION)(&pNewProcDescr[6]);
if ( m_optFlags.HasExtensions )
{
m_pHeaderExts = (NDR_PROC_HEADER_EXTS *)m_params;
m_params = (PPARAM_DESCRIPTION)(((uchar*)m_params) + (m_pHeaderExts->Size));
}
else
{
m_pHeaderExts = NULL;
}
m_numberOfParams = m_optFlags.HasReturn ? numberOfParamsInclRet-1 : numberOfParamsInclRet;
m_cbPushedByCaller = m_optFlags.HasReturn ? m_params[numberOfParamsInclRet-1].StackOffset : m_cbStackInclRet; // See ::GetStackSize
}
//
// And some of the supplementary information
//
m_info.iid = *m_pHeader->piid;
m_info.cMethod = m_pHeader->DispatchTableCount;
m_info.iMethod = m_iMethod;
m_info.cParams = m_numberOfParams;
m_info.fHasInValues = FALSE;
m_info.fHasInOutValues = FALSE;
m_info.fHasOutValues = FALSE;
m_info.fDerivesFromIDispatch = FALSE;
m_info.cInInterfacesMax = 0;
m_info.cInOutInterfacesMax = 0;
m_info.cOutInterfacesMax = 0;
m_info.cTopLevelInInterfaces = 0;
//
m_fCanShareAllParameters = TRUE; // until proven otherwise
//
if (pTypeInfoVtbl && pTypeInfoVtbl->m_rgMethodDescs[m_iMethod].m_szMethodName)
{
m_wszMethodName = CopyString(pTypeInfoVtbl->m_rgMethodDescs[m_iMethod].m_szMethodName);
}
else
m_wszMethodName = NULL;
}
MD_METHOD()
{
m_wszMethodName = NULL;
}
~MD_METHOD()
{
CoTaskMemFree(m_wszMethodName);
}
void SetParamMetaData(MD_PARAM* rgParams)
// Set our parameter-based meta data. Caller is giving a big-enough array of parameter meta data.
{
m_rgParams = rgParams;
//
// Iterate through each parameter
//
for (ULONG iparam = 0; iparam < m_numberOfParams; iparam++)
{
const PARAM_DESCRIPTION& param = m_params[iparam];
const PARAM_ATTRIBUTES paramAttr = param.ParamAttr;
//
const HAS_INTERFACES me = CanShareParameter(m_pStubDesc, param, paramAttr);
const BOOL fShare = !me.HasAnyInterfaces();
//
m_rgParams[iparam].m_fMayHaveInterfacePointers = me.HasAnyInterfaces();
//
m_rgParams[iparam].m_fCanShare = fShare;
m_fCanShareAllParameters = (m_fCanShareAllParameters && fShare);
//
if (!!paramAttr.IsIn)
{
if (!!paramAttr.IsOut)
{
m_info.fHasInOutValues = TRUE;
me.Update(m_info.cInOutInterfacesMax);
}
else
{
m_info.fHasInValues = TRUE;
me.Update(m_info.cInInterfacesMax);
//
// Update the top-level in-interface count
//
PFORMAT_STRING pFormatParam = m_pHeader->pServerInfo->pStubDesc->pFormatTypes + param.TypeOffset;
BOOL fIsInterfacePointer = (*pFormatParam == FC_IP);
if (fIsInterfacePointer)
{
m_info.cTopLevelInInterfaces++;
}
}
}
else if (!!paramAttr.IsOut)
{
m_info.fHasOutValues = TRUE;
me.Update(m_info.cOutInterfacesMax);
}
}
}
};
//////////////////////////////////////////////////////////////////////////////////////////////
//////////////////////////////////////////////////////////////////////////////////////////////
struct MD_INTERFACE
{
///////////////////////////////////////////////////////////////
//
// State
//
///////////////////////////////////////////////////////////////
LONG m_refs;
ULONG m_cMethods;
ULONG m_cMethodsInBaseInterface;
MD_METHOD* m_rgMethodsAlloc;
MD_METHOD* m_rgMethods;
MD_PARAM* m_rgParams;
BOOL m_fFreeInfoOnRelease;
BOOL m_fDerivesFromIDispatch;
const CInterfaceStubHeader* m_pHeader;
LPCSTR m_szInterfaceName;
struct MD_INTERFACE_CACHE* m_pcache;
///////////////////////////////////////////////////////////////
//
// Construction
//
///////////////////////////////////////////////////////////////
MD_INTERFACE()
{
m_rgMethodsAlloc = NULL;
m_rgParams = NULL;
m_pcache = NULL;
m_fFreeInfoOnRelease = FALSE;
m_refs = 1;
}
ULONG AddRef() { ASSERT(m_refs>0); InterlockedIncrement(&m_refs); return m_refs;}
ULONG Release();
HRESULT AddToCache(MD_INTERFACE_CACHE* pcache);
private:
~MD_INTERFACE()
{
delete [] m_rgMethodsAlloc;
delete [] m_rgParams;
if (m_fFreeInfoOnRelease)
{
delete const_cast<CInterfaceStubHeader*>(m_pHeader);
CoTaskMemFree(*const_cast<LPSTR*>(&m_szInterfaceName));
}
}
public:
///////////////////////////////////////////////////////////////
//
// Meta data setting
//
///////////////////////////////////////////////////////////////
HRESULT SetMetaData(TYPEINFOVTBL* pTypeInfoVtbl, const CInterfaceStubHeader* pHeader, LPCSTR szInterfaceName)
// Set the meta data of this interface given a reference to the header
{
HRESULT hr = S_OK;
//
m_fFreeInfoOnRelease = (pTypeInfoVtbl != NULL);
m_pHeader = pHeader;
m_szInterfaceName = szInterfaceName;
m_fDerivesFromIDispatch = FALSE;
//
m_cMethods = m_pHeader->DispatchTableCount;
//
// Figure out how many methods are in the base interface.
//
if (pTypeInfoVtbl)
{
if (pTypeInfoVtbl->m_iidBase == IID_IUnknown)
{
m_cMethodsInBaseInterface = 3;
}
else if (pTypeInfoVtbl->m_iidBase == IID_IDispatch)
{
m_cMethodsInBaseInterface = 7;
}
else
{
m_cMethodsInBaseInterface = 3;
}
}
else
{
m_cMethodsInBaseInterface = 3;
}
ASSERT(m_cMethodsInBaseInterface >= 3);
//
// Allocate and initialize the md for each method
//
ULONG cMethods = m_cMethods - m_cMethodsInBaseInterface;
m_rgMethodsAlloc = new MD_METHOD[cMethods];
if (m_rgMethodsAlloc)
{
m_rgMethods = &m_rgMethodsAlloc[-(LONG)m_cMethodsInBaseInterface];
for (ULONG iMethod = m_cMethodsInBaseInterface; iMethod < m_cMethods; iMethod++)
{
m_rgMethods[iMethod].SetMetaData(m_pHeader, iMethod, this, pTypeInfoVtbl);
}
//
// How many parameters are there, total?
//
ULONG cParam = 0;
for (iMethod = m_cMethodsInBaseInterface; iMethod < m_cMethods; iMethod++)
{
cParam += m_rgMethods[iMethod].m_numberOfParams;
}
//
// Allocate and initialize the parameter information
//
m_rgParams = new MD_PARAM[cParam];
if (m_rgParams)
{
cParam = 0;
for (iMethod = m_cMethodsInBaseInterface; iMethod < m_cMethods; iMethod++)
{
m_rgMethods[iMethod].SetParamMetaData(&m_rgParams[cParam]);
cParam += m_rgMethods[iMethod].m_numberOfParams;
}
}
else
hr = E_OUTOFMEMORY;
}
else
hr = E_OUTOFMEMORY;
return hr;
}
HRESULT SetDerivesFromIDispatch(BOOL fDerivesFromIDispatch)
{
m_fDerivesFromIDispatch = fDerivesFromIDispatch;
for (ULONG iMethod = m_cMethodsInBaseInterface; iMethod < m_cMethods; iMethod++)
{
m_rgMethods[iMethod].m_info.fDerivesFromIDispatch = fDerivesFromIDispatch;
}
return S_OK;
}
};
///////////////////////////////////////////////////////////////////////////
//
// MD_INTERFACE_CACHE
//
///////////////////////////////////////////////////////////////////////////
//
// NOTE: The constructor of this object can throw an exception, because
// MAP_SHARED contains an XSLOCK, and, well.... look at the comment
// on MAP_SHARED in lookaside.h.
//
struct MD_INTERFACE_CACHE : MAP_SHARED<MAP_KEY_GUID, MD_INTERFACE*>
{
/////////////////////////////////////////////////
//
// Construction & destruction
//
/////////////////////////////////////////////////
MD_INTERFACE_CACHE()
{
}
~MD_INTERFACE_CACHE()
{
//
// Before the cache is destroyed, all interceptors therein should be
// too, which will remove themselves from us.
//
// ASSERT(0 == Size() && "likely leak: interceptor support dll unloading while interceptors still exist");
}
/////////////////////////////////////////////////
//
// Operations
//
/////////////////////////////////////////////////
HRESULT FindExisting(REFIID iid, MD_INTERFACE** ppmdInterface)
{
HRESULT hr = S_OK;
*ppmdInterface = NULL;
LockShared();
if (Lookup(iid, ppmdInterface))
{
(*ppmdInterface)->AddRef(); // give caller his own reference
}
else
hr = E_NOINTERFACE;
ReleaseLock();
return hr;
}
};
inline HRESULT MD_INTERFACE::AddToCache(MD_INTERFACE_CACHE* pcache)
// Add us into the indicated cache. We'd better not already be in one
{
HRESULT hr = S_OK;
ASSERT(NULL == m_pcache);
ASSERT(pcache);
pcache->LockExclusive();
const IID& iid = *m_pHeader->piid;
ASSERT(iid != GUID_NULL);
ASSERT(!pcache->IncludesKey(iid));
if (pcache->SetAt(iid, this))
{
m_pcache = pcache;
}
else
hr = E_OUTOFMEMORY;
pcache->ReleaseLock();
return hr;
}
inline ULONG MD_INTERFACE::Release()
// Release a MD_INTERFACE. Careful: if we're in the cache, then we could be dug out
// from the cache to get more references.
{
// NOTE:
//
// This code is WRONG if m_pcache can change out from underneath us. But it can't
// in current usage because the cache/no-cache decision is always made as part of
// the creation logic, which is before another independent thread can get a handle
// on us.
//
// If this ceases to be true, then we can deal with it by stealing a bit from the ref count word
// for the 'am in cache' decistion and interlocked operations to update the ref count and this
// bit together.
//
if (m_pcache)
{
// We're in a cache. Get us out of there carefully.
//
LONG crefs;
//
for (;;)
{
crefs = m_refs;
//
if (crefs > 1)
{
// There is at least one non-cache reference out there. We definitely won't
// be poofing if we release with that condition holding
//
if (crefs == InterlockedCompareExchange(&m_refs, (crefs - 1), crefs))
{
return crefs - 1;
}
else
{
// Someone diddled with the ref count while we weren't looking. Go around and try again
}
}
else
{
MD_INTERFACE_CACHE* pcache = m_pcache; ASSERT(pcache);
//
pcache->LockExclusive();
//
crefs = InterlockedDecrement(&m_refs);
if (0 == crefs)
{
// The last public reference just went away, and, because the cache is locked, no
// more can appear. Nuke us!
//
const IID& iid = *m_pHeader->piid;
ASSERT(pcache->IncludesKey(iid));
//
pcache->RemoveKey(iid);
m_pcache = NULL;
//
delete this;
}
//
pcache->ReleaseLock();
//
return crefs;
}
#ifdef _X86_
_asm
{
_emit 0xF3
_emit 0x90
};
#endif
}
}
else
{
// We are getting released, yet we have yet to ever be put into the cache. Just
// the normal, simple case.
//
long crefs = InterlockedDecrement(&m_refs);
if (crefs == 0)
{
delete this;
}
return crefs;
}
}
| 31.794588 | 144 | 0.478665 | [
"object"
] |
a96dd9c08d658b23baf3766731a02f7dd3e84b52 | 901 | h | C | owGameWMO/WMO_Part_Portal.h | adan830/OpenWow | 9b6e9c248bd185b1677fe616d2a3a81a35ca8894 | [
"Apache-2.0"
] | null | null | null | owGameWMO/WMO_Part_Portal.h | adan830/OpenWow | 9b6e9c248bd185b1677fe616d2a3a81a35ca8894 | [
"Apache-2.0"
] | null | null | null | owGameWMO/WMO_Part_Portal.h | adan830/OpenWow | 9b6e9c248bd185b1677fe616d2a3a81a35ca8894 | [
"Apache-2.0"
] | 1 | 2020-05-11T13:32:49.000Z | 2020-05-11T13:32:49.000Z | #pragma once
#include "WMO_Headers.h"
// FORWARD BEGIN
class WMO;
class WMO_Group;
class CWMO_Base_Instance;
// FORWARD END
class CWMO_Part_Portal
{
public:
CWMO_Part_Portal(const WMO* _parentWMO, const SWMO_PortalDef& _proto);
void Render(cmat4 _worldMatrix);
uint16 getStartVertex() const { return m_StartVertex; }
uint16 getCount() const { return m_Count; }
const Plane& getPlane() const { return m_Plane; }
void setGroup(int32 _group, int16 side);
int32 getGrInner() { return m_GrInner; }
int32 getGrOuter() { return m_GrOuter; }
bool IsVisible(CWMO_Base_Instance* _localContr, const Plane* _planes, uint32 _planesCount) const;
bool IsPositive(cvec3 _InvWorldCamera) const;
private:
const WMO* m_ParentWMO;
BoundingBox m_Bounds;
uint16 m_StartVertex;
uint16 m_Count;
Plane m_Plane;
int32 m_GrInner;
int32 m_GrOuter;
SharedGeomPtr m_Geom;
}; | 21.97561 | 98 | 0.743618 | [
"render"
] |
a96f5b9c2df552011ceceed0022bd0fd631bcb86 | 5,577 | h | C | arangod/Aql/DocumentProducingHelper.h | usalko/arapy | 204038e9a8301b16a6ec31e10a3289ca0c7ada91 | [
"BSL-1.0",
"Apache-2.0"
] | null | null | null | arangod/Aql/DocumentProducingHelper.h | usalko/arapy | 204038e9a8301b16a6ec31e10a3289ca0c7ada91 | [
"BSL-1.0",
"Apache-2.0"
] | null | null | null | arangod/Aql/DocumentProducingHelper.h | usalko/arapy | 204038e9a8301b16a6ec31e10a3289ca0c7ada91 | [
"BSL-1.0",
"Apache-2.0"
] | null | null | null | ////////////////////////////////////////////////////////////////////////////////
/// DISCLAIMER
///
/// Copyright 2014-2022 ArangoDB GmbH, Cologne, Germany
/// Copyright 2004-2014 triAGENS GmbH, Cologne, Germany
///
/// Licensed under the Apache License, Version 2.0 (the "License");
/// you may not use this file except in compliance with the License.
/// You may obtain a copy of the License at
///
/// http://www.apache.org/licenses/LICENSE-2.0
///
/// Unless required by applicable law or agreed to in writing, software
/// distributed under the License is distributed on an "AS IS" BASIS,
/// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
/// See the License for the specific language governing permissions and
/// limitations under the License.
///
/// Copyright holder is ArangoDB GmbH, Cologne, Germany
///
/// @author Heiko Kernbach
////////////////////////////////////////////////////////////////////////////////
#pragma once
#include <functional>
#include <string>
#include <unordered_set>
#include <vector>
#include <velocypack/Builder.h>
#include "Aql/types.h"
#include "Aql/AqlFunctionsInternalCache.h"
#include "Aql/AttributeNamePath.h"
#include "Aql/Projections.h"
#include "Containers/FlatHashSet.h"
#include "Indexes/IndexIterator.h"
#include "VocBase/Identifiers/LocalDocumentId.h"
#include "VocBase/voc-types.h"
namespace arangodb {
namespace transaction {
class Methods;
}
namespace velocypack {
class Builder;
class Slice;
} // namespace velocypack
namespace aql {
struct AqlValue;
class Expression;
class ExpressionContext;
class InputAqlItemRow;
class OutputAqlItemRow;
class QueryContext;
struct DocumentProducingFunctionContext {
public:
DocumentProducingFunctionContext(
InputAqlItemRow const& inputRow, OutputAqlItemRow* outputRow,
RegisterId outputRegister, bool produceResult, aql::QueryContext& query,
transaction::Methods& trx, Expression* filter,
arangodb::aql::Projections const& projections,
bool allowCoveringIndexOptimization, bool checkUniqueness);
DocumentProducingFunctionContext() = delete;
~DocumentProducingFunctionContext() = default;
void setOutputRow(OutputAqlItemRow* outputRow);
bool getProduceResult() const noexcept;
arangodb::aql::Projections const& getProjections() const noexcept;
transaction::Methods* getTrxPtr() const noexcept;
std::vector<size_t> const& getCoveringIndexAttributePositions()
const noexcept;
bool getAllowCoveringIndexOptimization() const noexcept;
void setAllowCoveringIndexOptimization(
bool allowCoveringIndexOptimization) noexcept;
void incrScanned() noexcept;
void incrFiltered() noexcept;
size_t getAndResetNumScanned() noexcept;
size_t getAndResetNumFiltered() noexcept;
InputAqlItemRow const& getInputRow() const noexcept;
OutputAqlItemRow& getOutputRow() const noexcept;
RegisterId getOutputRegister() const noexcept;
bool checkUniqueness(LocalDocumentId const& token);
bool checkFilter(velocypack::Slice slice);
bool checkFilter(AqlValue (*getValue)(void const* ctx, Variable const* var,
bool doCopy),
void const* filterContext);
void reset();
void setIsLastIndex(bool val);
bool hasFilter() const noexcept;
aql::AqlFunctionsInternalCache& aqlFunctionsInternalCache() {
return _aqlFunctionsInternalCache;
}
arangodb::velocypack::Builder& getBuilder() noexcept;
private:
bool checkFilter(ExpressionContext& ctx);
aql::AqlFunctionsInternalCache _aqlFunctionsInternalCache;
InputAqlItemRow const& _inputRow;
OutputAqlItemRow* _outputRow;
aql::QueryContext& _query;
transaction::Methods& _trx;
Expression* _filter;
arangodb::aql::Projections const& _projections;
size_t _numScanned;
size_t _numFiltered;
uint_fast16_t _killCheckCounter = 0;
/// @brief Builder that is reused to generate projection results
arangodb::velocypack::Builder _objectBuilder;
/// @brief set of already returned documents. Used to make the result distinct
containers::FlatHashSet<LocalDocumentId> _alreadyReturned;
RegisterId const _outputRegister;
bool const _produceResult;
bool _allowCoveringIndexOptimization;
/// @brief Flag if the current index pointer is the last of the list.
/// Used in uniqueness checks.
bool _isLastIndex;
/// @brief Flag if we need to check for uniqueness
bool _checkUniqueness;
};
namespace DocumentProducingCallbackVariant {
struct WithProjectionsCoveredByIndex {};
struct WithProjectionsNotCoveredByIndex {};
struct DocumentCopy {};
} // namespace DocumentProducingCallbackVariant
template<bool checkUniqueness, bool skip>
IndexIterator::CoveringCallback getCallback(
DocumentProducingCallbackVariant::WithProjectionsCoveredByIndex,
DocumentProducingFunctionContext& context);
template<bool checkUniqueness, bool skip>
IndexIterator::DocumentCallback getCallback(
DocumentProducingCallbackVariant::WithProjectionsNotCoveredByIndex,
DocumentProducingFunctionContext& context);
template<bool checkUniqueness, bool skip>
IndexIterator::DocumentCallback getCallback(
DocumentProducingCallbackVariant::DocumentCopy,
DocumentProducingFunctionContext& context);
template<bool checkUniqueness>
IndexIterator::LocalDocumentIdCallback getNullCallback(
DocumentProducingFunctionContext& context);
template<bool checkUniqueness, bool skip>
IndexIterator::DocumentCallback buildDocumentCallback(
DocumentProducingFunctionContext& context);
} // namespace aql
} // namespace arangodb
| 30.47541 | 80 | 0.751659 | [
"vector"
] |
a97a2db0a069472017c8496ef3d7328ca4fbc289 | 880 | h | C | algorithms/medium/1124. Longest Well-Performing Interval.h | MultivacX/letcode2020 | f86289f8718237303918a7705ae31625a12b68f6 | [
"MIT"
] | null | null | null | algorithms/medium/1124. Longest Well-Performing Interval.h | MultivacX/letcode2020 | f86289f8718237303918a7705ae31625a12b68f6 | [
"MIT"
] | null | null | null | algorithms/medium/1124. Longest Well-Performing Interval.h | MultivacX/letcode2020 | f86289f8718237303918a7705ae31625a12b68f6 | [
"MIT"
] | null | null | null | // 1124. Longest Well-Performing Interval
// https://leetcode.com/problems/longest-well-performing-interval/
// Runtime: 60 ms, faster than 81.70% of C++ online submissions for Longest Well-Performing Interval.
// Memory Usage: 23.4 MB, less than 5.93% of C++ online submissions for Longest Well-Performing Interval.
class Solution {
public:
int longestWPI(vector<int>& hours) {
const int N = hours.size();
int ans = 0;
// {non-positive count, index}
unordered_map<int, int> m{{0, -1}};
for (int i = 0, cnt = 0; i < N; ++i) {
if (hours[i] > 8) ++cnt;
else --cnt;
if (cnt > 0) ans = max(ans, i + 1);
else {
if (m.count(cnt - 1)) ans = max(ans, i - m[cnt - 1]);
if (m.count(cnt) == 0) m.emplace(cnt, i);
}
}
return ans;
}
}; | 35.2 | 105 | 0.535227 | [
"vector"
] |
a989290dcdc9f4f8733911b373bc705e2c2d0311 | 3,546 | h | C | lifting/prefix/include/souffle/CompiledTuple.h | makbn/eval-runner-docker | ed5650a3fe1c7d1c9a6d698608cb125d4aafd9dc | [
"Apache-2.0"
] | 2 | 2021-06-03T14:27:27.000Z | 2021-06-03T20:15:02.000Z | lifting/prefix/include/souffle/CompiledTuple.h | makbn/eval-runner-docker | ed5650a3fe1c7d1c9a6d698608cb125d4aafd9dc | [
"Apache-2.0"
] | null | null | null | lifting/prefix/include/souffle/CompiledTuple.h | makbn/eval-runner-docker | ed5650a3fe1c7d1c9a6d698608cb125d4aafd9dc | [
"Apache-2.0"
] | 3 | 2021-06-03T20:14:29.000Z | 2021-11-23T07:51:19.000Z | /*
* Souffle - A Datalog Compiler
* Copyright (c) 2013, 2015, Oracle and/or its affiliates. All rights reserved
* Licensed under the Universal Permissive License v 1.0 as shown at:
* - https://opensource.org/licenses/UPL
* - <souffle root>/licenses/SOUFFLE-UPL.txt
*/
/************************************************************************
*
* @file CompiledTuple.h
*
* The central file covering the data structure utilized by
* the souffle compiler for representing relations in compiled queries.
*
***********************************************************************/
#pragma once
#include "PresenceCondition.h"
#include "RamRecord.h"
#include <iostream>
namespace souffle {
namespace ram {
/**
* The type of object stored within relations representing the actual
* tuple value. Each tuple consists of a constant number of components.
*
* @tparam Domain the domain of the component values
* @tparam arity the number of components within an instance
*/
template <typename Domain, std::size_t _arity>
struct Tuple {
// some features for template meta programming
using value_type = Domain;
enum { arity = _arity };
// the stored data
Domain data[arity];
// constructores, destructors and assignment are default
souffle::RamRecord toRecord() const {
return souffle::RamRecord(arity, data, PresenceCondition::makeTrue(), false); // TODO (owned or not??)
}
// provide access to components
const Domain& operator[](std::size_t index) const {
return data[index];
}
// provide access to components
Domain& operator[](std::size_t index) {
return data[index];
}
// a comparison operation
bool operator==(const Tuple& other) const {
for (std::size_t i = 0; i < arity; i++) {
if (data[i] != other.data[i]) return false;
}
return true;
}
// inequality comparison
bool operator!=(const Tuple& other) const {
return !(*this == other);
}
// required to put tuples into e.g. a std::set container
bool operator<(const Tuple& other) const {
for (std::size_t i = 0; i < arity; ++i) {
if (data[i] < other.data[i]) return true;
if (data[i] > other.data[i]) return false;
}
return false;
}
// required to put tuples into e.g. a btree container
bool operator>(const Tuple& other) const {
for (std::size_t i = 0; i < arity; ++i) {
if (data[i] > other.data[i]) return true;
if (data[i] < other.data[i]) return false;
}
return false;
}
// allow tuples to be printed
friend std::ostream& operator<<(std::ostream& out, const Tuple& tuple) {
if (arity == 0) return out << "[]";
out << "[";
for (std::size_t i = 0; i < (std::size_t)(arity - 1); ++i) {
out << tuple.data[i];
out << ",";
}
return out << tuple.data[arity - 1] << "]";
}
};
} // end namespace ram
} // end of namespace souffle
// -- add hashing support --
namespace std {
template <typename Domain, std::size_t arity>
struct hash<souffle::ram::Tuple<Domain, arity>> {
size_t operator()(const souffle::ram::Tuple<Domain, arity>& value) const {
std::hash<Domain> hash;
size_t res = 0;
for (unsigned i = 0; i < arity; i++) {
// from boost hash combine
res ^= hash(value[i]) + 0x9e3779b9 + (res << 6) + (res >> 2);
}
return res;
}
};
} // namespace std
| 28.829268 | 110 | 0.575014 | [
"object"
] |
a98b655858cd05e01e9eb5580d90f081c00f3f1a | 31,027 | c | C | src/psi_petsc.c | qikaifzj/ludwig | e16d2d3472772fb3a36c1ee1bde028029c9ecd2d | [
"BSD-3-Clause"
] | 34 | 2018-10-05T11:54:23.000Z | 2022-03-21T06:40:49.000Z | src/psi_petsc.c | yangyang14641/ludwig | 25905b523bc67bc8f88bc757503f7e89362042af | [
"BSD-3-Clause"
] | 108 | 2018-07-26T11:01:23.000Z | 2022-03-31T07:51:10.000Z | src/psi_petsc.c | yangyang14641/ludwig | 25905b523bc67bc8f88bc757503f7e89362042af | [
"BSD-3-Clause"
] | 24 | 2018-12-21T19:05:00.000Z | 2022-03-31T07:51:32.000Z | /*****************************************************************************
*
* psi_petsc.c
*
* A solution of the Poisson equation for the potential and
* charge densities stored in the psi_t object.
*
* This uses the PETSc library.
*
* The Poisson equation homogeneous permittivity looks like
*
* nabla^2 \psi = - rho_elec / epsilon
*
* where psi is the potential, rho_elec is the free charge density, and
* epsilon is a permeability.
*
*
* Edinburgh Soft Matter and Statistical Physics Group and
* Edinburgh Parallel Computing Centre
*
* (c) 2013-2017 The University of Edinburgh
*
* Contributing Authors:
* Oliver Henrich (ohenrich@epcc.ed.ac.uk)
* Kevin Stratford (kevin@epcc.ed.ac.uk)
*
*****************************************************************************/
#include <assert.h>
#include <float.h>
#include <math.h>
#include <mpi.h>
#ifdef PETSC
#include "pe.h"
#include "coords.h"
#include "control.h"
#include "physics.h"
#include "psi_s.h"
#include "psi.h"
#include "psi_sor.h"
#include "psi_gradients.h"
#include "map.h"
#include "util.h"
#include "psi_petsc.h"
#include "petscksp.h"
#include "petscdmda.h"
DM da; /* distributed array */
Vec x,b; /* approx solution, RHS */
Mat A; /* linear system matrix */
MatNullSpace nullsp; /* null space of matrix */
KSP ksp; /* linear solver context */
PC pc; /* preconditioner context */
PetscReal norm; /* norm of solution error */
int i,j,its;
int view_matrix = 0;
int view_vector = 0;
/*****************************************************************************
*
* psi_petsc_init
*
* Initialises PETSc vectors, matrices and KSP solver context
*
*****************************************************************************/
int psi_petsc_init(psi_t * obj, fe_t * fe, f_vare_t fepsilon){
MPI_Comm new_comm;
int new_rank, nhalo;
int mpi_cartsz[3], mpi_coords[3];
int ntotal[3];
char version[256];
double tol_rel; /* Relative tolerance */
double tol_abs; /* Absolute tolerance */
int niteration = 10000; /* Number of iterations */
KSPType solver_type;
PCType pc_type;
PetscReal rtol, abstol, dtol;
int maxits;
assert(obj);
assert(fe);
/* In order for the DMDA and the Cartesian MPI communicator
to share the same part of the domain decomposition it is
necessary to renumber the process ranks of the default
PETSc communicator. Default PETSc is column major decomposition.
*/
cs_cartsz(obj->cs, mpi_cartsz);
cs_cart_coords(obj->cs, mpi_coords);
cs_ntotal(obj->cs, ntotal);
/* Set new rank according to PETSc ordering */
new_rank = mpi_coords[Z]*mpi_cartsz[Y]*mpi_cartsz[X] \
+ mpi_coords[Y]*mpi_cartsz[X] + mpi_coords[X];
/* Create communicator with new ranks according to PETSc ordering */
MPI_Comm_split(PETSC_COMM_WORLD, 1, new_rank, &new_comm);
/* Override default PETSc communicator */
PETSC_COMM_WORLD = new_comm;
/* Create 3D distributed array */
cs_nhalo(obj->cs, &nhalo);
DMDACreate3d(PETSC_COMM_WORLD, \
DM_BOUNDARY_PERIODIC, DM_BOUNDARY_PERIODIC, DM_BOUNDARY_PERIODIC, \
DMDA_STENCIL_BOX, ntotal[X], ntotal[Y], ntotal[Z], \
mpi_cartsz[X], mpi_cartsz[Y], mpi_cartsz[Z], 1, nhalo, \
NULL, NULL, NULL, &da);
/* Create global vectors on DM */
DMCreateGlobalVector(da,&x);
VecDuplicate(x,&b);
/* Create matrix on DM pre-allocated according to distributed array structure */
DMCreateMatrix(da,&A);
DMSetMatType(da,MATMPIAIJ);
DMSetMatrixPreallocateOnly(da,PETSC_TRUE);
/* Initialise solver context and preconditioner */
psi_reltol(obj, &tol_rel);
psi_abstol(obj, &tol_abs);
psi_maxits(obj, &niteration);
KSPCreate(PETSC_COMM_WORLD,&ksp);
KSPSetOperators(ksp,A,A);
KSPSetTolerances(ksp,tol_rel,tol_abs,PETSC_DEFAULT,niteration);
KSPSetFromOptions(ksp);
KSPSetUp(ksp);
PetscGetVersion(version, 256);
pe_info(obj->pe, "\n%s\n", version);
KSPGetType(ksp, &solver_type);
KSPGetTolerances(ksp, &rtol, &abstol, &dtol, &maxits);
KSPGetPC(ksp, &pc);
PCGetType(pc, &pc_type);
pe_info(obj->pe, "\nUsing Krylov subspace solver\n");
pe_info(obj->pe, "----------------------------\n");
pe_info(obj->pe, "Solver type %s\n", solver_type);
pe_info(obj->pe, "Tolerances rtol %g abstol %g maxits %d\n", rtol, abstol, maxits);
pe_info(obj->pe, "Preconditioner type %s\n", pc_type);
if (fepsilon == NULL) psi_petsc_compute_laplacian(obj);
if (fepsilon != NULL) psi_petsc_compute_matrix(obj, (fe_es_t *) fe, fepsilon);
return 0;
}
/*****************************************************************************
*
* psi_petsc_compute_laplacian
*
* Computes the Laplacian for KSP solver.
* Note that this routine uses the PETSc stencil structure, which permits
* local assembly of the matrix.
*
*****************************************************************************/
int psi_petsc_compute_laplacian(psi_t * obj) {
int i, j, k;
int xs, ys, zs, xw, yw, zw, xe, ye, ze;
double epsilon;
#if defined NP_D3Q26
double v[27];
MatStencil row, col[27];
const double r10 = 0.1;
const double r30 = (1.0/30.0);
const double r15_7 = (7.0/15.0);
const double r15_64 = (64.0/15.0);
#elif defined NP_D3Q18
double v[19];
MatStencil row, col[19];
const double r3 = (1.0/3.0);
const double r6 = (1.0/6.0);
#else
/* NP_D3Q6 is the default */
double v[7];
MatStencil row, col[7];
#endif
assert(obj);
/* Get details of the distributed array data structure.
The PETSc directives return global indices, but
every process works only on its local copy. */
DMDAGetCorners(da,&xs,&ys,&zs,&xw,&yw,&zw);
xe = xs + xw;
ye = ys + yw;
ze = zs + zw;
psi_epsilon(obj, &epsilon);
/* 3D-Laplacian with periodic BCs */
for(k=zs; k<ze; k++){
for(j=ys; j<ye; j++){
for(i=xs; i<xe; i++){
row.i = i;
row.j = j;
row.k = k;
#if defined NP_D3Q26
/* 27-point stencil */
col[0].i = row.i; col[0].j = row.j; col[0].k = row.k; v[0] = r15_64 * epsilon;
col[1].i = i-1; col[1].j = j-1; col[1].k = k-1; v[1] = - r30 * epsilon;
col[2].i = i-1; col[2].j = j-1; col[2].k = k; v[2] = - r10 * epsilon;
col[3].i = i-1; col[3].j = j-1; col[3].k = k+1; v[3] = - r30 * epsilon;
col[4].i = i-1; col[4].j = j; col[4].k = k-1; v[4] = - r10 * epsilon;
col[5].i = i-1; col[5].j = j; col[5].k = k; v[5] = - r15_7 * epsilon;
col[6].i = i-1; col[6].j = j; col[6].k = k+1; v[6] = - r10 * epsilon;
col[7].i = i-1; col[7].j = j+1; col[7].k = k-1; v[7] = - r30 * epsilon;
col[8].i = i-1; col[8].j = j+1; col[8].k = k; v[8] = - r10 * epsilon;
col[9].i = i-1; col[9].j = j+1; col[9].k = k+1; v[9] = - r30 * epsilon;
col[10].i = i; col[10].j = j-1; col[10].k = k-1; v[10] = - r10 * epsilon;
col[11].i = i; col[11].j = j-1; col[11].k = k; v[11] = - r15_7 * epsilon;
col[12].i = i; col[12].j = j-1; col[12].k = k+1; v[12] = - r10 * epsilon;
col[13].i = i; col[13].j = j; col[13].k = k-1; v[13] = - r15_7 * epsilon;
col[14].i = i; col[14].j = j; col[14].k = k+1; v[14] = - r15_7 * epsilon;
col[15].i = i; col[15].j = j+1; col[15].k = k-1; v[15] = - r10 * epsilon;
col[16].i = i; col[16].j = j+1; col[16].k = k; v[16] = - r15_7 * epsilon;
col[17].i = i; col[17].j = j+1; col[17].k = k+1; v[17] = - r10 * epsilon;
col[18].i = i+1; col[18].j = j-1; col[18].k = k-1; v[18] = - r30 * epsilon;
col[19].i = i+1; col[19].j = j-1; col[19].k = k; v[19] = - r10 * epsilon;
col[20].i = i+1; col[20].j = j-1; col[20].k = k+1; v[20] = - r30 * epsilon;
col[21].i = i+1; col[21].j = j; col[21].k = k-1; v[21] = - r10 * epsilon;
col[22].i = i+1; col[22].j = j; col[22].k = k; v[22] = - r15_7 * epsilon;
col[23].i = i+1; col[23].j = j; col[23].k = k+1; v[23] = - r10 * epsilon;
col[24].i = i+1; col[24].j = j+1; col[24].k = k-1; v[24] = - r30 * epsilon;
col[25].i = i+1; col[25].j = j+1; col[25].k = k; v[25] = - r10 * epsilon;
col[26].i = i+1; col[26].j = j+1; col[26].k = k+1; v[26] = - r30 * epsilon;
MatSetValuesStencil(A,1,&row,27,col,v,INSERT_VALUES);
#elif defined NP_D3Q18
/* 19-point stencil */
col[0].i = row.i; col[0].j = row.j; col[0].k = row.k; v[0] = 4.0 * epsilon;
col[1].i = i+1; col[1].j = j+1; col[1].k = k; v[1] = - r6 * epsilon;
col[2].i = i+1; col[2].j = j; col[2].k = k+1; v[2] = - r6 * epsilon;
col[3].i = i+1; col[3].j = j; col[3].k = k; v[3] = - r3 * epsilon;
col[4].i = i+1; col[4].j = j; col[4].k = k-1; v[4] = - r6 * epsilon;
col[5].i = i+1; col[5].j = j-1; col[5].k = k; v[5] = - r6 * epsilon;
col[6].i = i; col[6].j = j+1; col[6].k = k+1; v[6] = - r6 * epsilon;
col[7].i = i; col[7].j = j+1; col[7].k = k; v[7] = - r3 * epsilon;
col[8].i = i; col[8].j = j+1; col[8].k = k-1; v[8] = - r6 * epsilon;
col[9].i = i; col[9].j = j; col[9].k = k+1; v[9] = - r3 * epsilon;
col[10].i = i; col[10].j = j; col[10].k = k-1; v[10] = - r3 * epsilon;
col[11].i = i; col[11].j = j-1; col[11].k = k+1; v[11] = - r6 * epsilon;
col[12].i = i; col[12].j = j-1; col[12].k = k; v[12] = - r3 * epsilon;
col[13].i = i; col[13].j = j-1; col[13].k = k-1; v[13] = - r6 * epsilon;
col[14].i = i-1; col[14].j = j+1; col[14].k = k; v[14] = - r6 * epsilon;
col[15].i = i-1; col[15].j = j; col[15].k = k+1; v[15] = - r6 * epsilon;
col[16].i = i-1; col[16].j = j; col[16].k = k; v[16] = - r3 * epsilon;
col[17].i = i-1; col[17].j = j; col[17].k = k-1; v[17] = - r6 * epsilon;
col[18].i = i-1; col[18].j = j-1; col[18].k = k; v[18] = - r6 * epsilon;
MatSetValuesStencil(A,1,&row,19,col,v,INSERT_VALUES);
#else
/* 7-point stencil */
col[0].i = row.i; col[0].j = row.j; col[0].k = row.k; v[0] = 6.0 * epsilon;
col[1].i = i-1; col[1].j = j; col[1].k = k; v[1] = - epsilon;
col[2].i = i; col[2].j = j-1; col[2].k = k; v[2] = - epsilon;
col[3].i = i; col[3].j = j; col[3].k = k-1; v[3] = - epsilon;
col[4].i = i+1; col[4].j = j; col[4].k = k; v[4] = - epsilon;
col[5].i = i; col[5].j = j+1; col[5].k = k; v[5] = - epsilon;
col[6].i = i; col[6].j = j; col[6].k = k+1; v[6] = - epsilon;
MatSetValuesStencil(A,1,&row,7,col,v,INSERT_VALUES);
#endif
}
}
}
/* Matrix assembly & halo swap */
MatAssemblyBegin(A,MAT_FINAL_ASSEMBLY);
MatAssemblyEnd(A,MAT_FINAL_ASSEMBLY);
/* Retain the non-zero structure of the matrix */
MatSetOption(A,MAT_NEW_NONZERO_LOCATIONS,PETSC_FALSE);
/* Set the matrix, preconditioner and nullspace */
KSPSetOperators(ksp,A,A);
MatNullSpaceCreate(PETSC_COMM_WORLD, PETSC_TRUE, 0, NULL, &nullsp);
#if PETSC_VERSION_GE(3, 6, 0)
MatSetNullSpace(A, nullsp);
#else
KSPSetNullSpace(ksp, nullsp);
#endif
MatNullSpaceDestroy(&nullsp);
KSPSetFromOptions(ksp);
if (view_matrix) {
pe_info(obj->pe, "\nPETSc output matrix\n");
PetscViewer viewer;
PetscViewerASCIIOpen(PETSC_COMM_WORLD, "matrix.log", &viewer);
#if PETSC_VERSION_GE(3, 7, 0)
PetscViewerPushFormat(viewer, PETSC_VIEWER_ASCII_INDEX);
PetscViewerPopFormat(viewer);
#else
PetscViewerSetFormat(viewer, PETSC_VIEWER_ASCII_INDEX);
#endif
MatView(A,viewer);;
PetscViewerDestroy(&viewer);
}
return 0;
}
/*****************************************************************************
*
* psi_petsc_compute_matrix
*
* Computes the matrix for KSP solver for a system with dielectric contrast.
* Note that this routine uses the PETSc stencil structure, which permits
* local assembly of the matrix.
*
*****************************************************************************/
int psi_petsc_compute_matrix(psi_t * obj, fe_es_t * fe, f_vare_t fepsilon) {
int ic, jc, kc, p;
int index;
int noffset[3];
int i, j, k, ia;
int xs, ys, zs, xw, yw, zw, xe, ye, ze;
double eps, grad_eps[3];
#if defined NP_D3Q26
double v[27];
MatStencil row, col[27];
const double r10 = 0.1;
const double r30 = (1.0/30.0);
const double r15_7 = (7.0/15.0);
const double r15_64 = (64.0/15.0);
const double matval[27] = {r15_64,
-r30, -r10 , -r30,
-r10, -r15_7, -r10,
-r30, -r10 , -r30,
-r10, -r15_7, -r10,
-r15_7, -r15_7,
-r10, -r15_7, -r10,
-r30, -r10 , -r30,
-r10, -r15_7, -r10,
-r30, -r10 , -r30};
#elif defined NP_D3Q18
double v[19];
MatStencil row, col[19];
const double r3 = (1.0/3.0);
const double r6 = (1.0/6.0);
const double matval[19] = {4.0,
-r6, -r6, -r3,
-r6, -r6, -r6,
-r3, -r6, -r3,
-r3, -r6, -r3,
-r6, -r6, -r6,
-r3, -r6, -r6};
#else
/* NP_D3Q6 is the default */
double v[7];
MatStencil row, col[7];
const double matval[7] = {6.0,
-1.0, -1.0, -1.0,
-1.0, -1.0, -1.0};
#endif
assert(obj);
assert(fe);
assert(fepsilon);
/* Get details of the distributed array data structure.
The PETSc directives return global indices, but
every process works only on its local copy. */
DMDAGetCorners(da,&xs,&ys,&zs,&xw,&yw,&zw);
xe = xs + xw;
ye = ys + yw;
ze = zs + zw;
cs_nlocal_offset(obj->cs, noffset);
/* 3D-operator with periodic BCs */
for(k=zs; k<ze; k++){
for(j=ys; j<ye; j++){
for(i=xs; i<xe; i++){
row.i = i;
row.j = j;
row.k = k;
#if defined NP_D3Q26
/* 27-point stencil */
col[0].i = row.i; col[0].j = row.j; col[0].k = row.k;
col[1].i = i-1; col[1].j = j-1; col[1].k = k-1;
col[2].i = i-1; col[2].j = j-1; col[2].k = k;
col[3].i = i-1; col[3].j = j-1; col[3].k = k+1;
col[4].i = i-1; col[4].j = j; col[4].k = k-1;
col[5].i = i-1; col[5].j = j; col[5].k = k;
col[6].i = i-1; col[6].j = j; col[6].k = k+1;
col[7].i = i-1; col[7].j = j+1; col[7].k = k-1;
col[8].i = i-1; col[8].j = j+1; col[8].k = k;
col[9].i = i-1; col[9].j = j+1; col[9].k = k+1;
col[10].i = i; col[10].j = j-1; col[10].k = k-1;
col[11].i = i; col[11].j = j-1; col[11].k = k;
col[12].i = i; col[12].j = j-1; col[12].k = k+1;
col[13].i = i; col[13].j = j; col[13].k = k-1;
col[14].i = i; col[14].j = j; col[14].k = k+1;
col[15].i = i; col[15].j = j+1; col[15].k = k-1;
col[16].i = i; col[16].j = j+1; col[16].k = k;
col[17].i = i; col[17].j = j+1; col[17].k = k+1;
col[18].i = i+1; col[18].j = j-1; col[18].k = k-1;
col[19].i = i+1; col[19].j = j-1; col[19].k = k;
col[20].i = i+1; col[20].j = j-1; col[20].k = k+1;
col[21].i = i+1; col[21].j = j; col[21].k = k-1;
col[22].i = i+1; col[22].j = j; col[22].k = k;
col[23].i = i+1; col[23].j = j; col[23].k = k+1;
col[24].i = i+1; col[24].j = j+1; col[24].k = k-1;
col[25].i = i+1; col[25].j = j+1; col[25].k = k;
col[26].i = i+1; col[26].j = j+1; col[26].k = k+1;
/* Laplacian part of operator */
ic = (col[0].i + 1) - noffset[X];
jc = (col[0].j + 1) - noffset[Y];
kc = (col[0].k + 1) - noffset[Z];
index = cs_index(obj->cs, ic, jc, kc);
fepsilon(fe, index, &eps);
psi_grad_eps_d3qx(obj, (fe_t *) fe, fepsilon, index, grad_eps);
for (p = 0; p < PSI_NGRAD; p++){
/* Laplacian part of operator */
v[p] = matval[p] * eps;
/* Addtional terms in generalised Poisson equation */
for(ia = 0; ia < 3; ia++){
v[p] -= grad_eps[ia] * psi_gr_wv[p] * psi_gr_rcs2 * psi_gr_cv[p][ia];
}
}
MatSetValuesStencil(A,1,&row,27,col,v,INSERT_VALUES);
#elif defined NP_D3Q18
/* 19-point stencil */
col[0].i = row.i; col[0].j = row.j; col[0].k = row.k;
col[1].i = i+1; col[1].j = j+1; col[1].k = k;
col[2].i = i+1; col[2].j = j; col[2].k = k+1;
col[3].i = i+1; col[3].j = j; col[3].k = k;
col[4].i = i+1; col[4].j = j; col[4].k = k-1;
col[5].i = i+1; col[5].j = j-1; col[5].k = k;
col[6].i = i; col[6].j = j+1; col[6].k = k+1;
col[7].i = i; col[7].j = j+1; col[7].k = k;
col[8].i = i; col[8].j = j+1; col[8].k = k-1;
col[9].i = i; col[9].j = j; col[9].k = k+1;
col[10].i = i; col[10].j = j; col[10].k = k-1;
col[11].i = i; col[11].j = j-1; col[11].k = k+1;
col[12].i = i; col[12].j = j-1; col[12].k = k;
col[13].i = i; col[13].j = j-1; col[13].k = k-1;
col[14].i = i-1; col[14].j = j+1; col[14].k = k;
col[15].i = i-1; col[15].j = j; col[15].k = k+1;
col[16].i = i-1; col[16].j = j; col[16].k = k;
col[17].i = i-1; col[17].j = j; col[17].k = k-1;
col[18].i = i-1; col[18].j = j-1; col[18].k = k;
/* Laplacian part of operator */
ic = (col[0].i + 1) - noffset[X];
jc = (col[0].j + 1) - noffset[Y];
kc = (col[0].k + 1) - noffset[Z];
index = cs_index(obj->cs, ic, jc, kc);
fepsilon(fe, index, &eps);
psi_grad_eps_d3qx(obj, (fe_t *) fe, fepsilon, index, grad_eps);
for (p = 0; p < PSI_NGRAD; p++){
/* Laplacian part of operator */
v[p] = matval[p] * eps;
/* Addtional terms in generalised Poisson equation */
for(ia = 0; ia < 3; ia++){
v[p] -= grad_eps[ia] * psi_gr_wv[p] * psi_gr_rcs2 * psi_gr_cv[p][ia];
}
}
MatSetValuesStencil(A,1,&row,19,col,v,INSERT_VALUES);
#else
/* 7-point stencil */
col[0].i = row.i; col[0].j = row.j; col[0].k = row.k;
col[1].i = i-1; col[1].j = j; col[1].k = k;
col[2].i = i; col[2].j = j-1; col[2].k = k;
col[3].i = i; col[3].j = j; col[3].k = k-1;
col[4].i = i+1; col[4].j = j; col[4].k = k;
col[5].i = i; col[5].j = j+1; col[5].k = k;
col[6].i = i; col[6].j = j; col[6].k = k+1;
ic = (col[0].i + 1) - noffset[X];
jc = (col[0].j + 1) - noffset[Y];
kc = (col[0].k + 1) - noffset[Z];
index = cs_index(obj->cs, ic, jc, kc);
fepsilon(fe, index, &eps);
psi_grad_eps_d3qx(obj, (fe_t *) fe, fepsilon, index, grad_eps);
for (p = 0; p < PSI_NGRAD; p++){
/* Laplacian part of operator */
v[p] = matval[p] * eps;
/* Addtional terms in generalised Poisson equation */
for(ia = 0; ia < 3; ia++){
v[p] -= grad_eps[ia] * psi_gr_wv[p] * psi_gr_rcs2 * psi_gr_cv[p][ia];
}
}
MatSetValuesStencil(A,1,&row,7,col,v,INSERT_VALUES);
#endif
}
}
}
/* Matrix assembly & halo swap */
MatAssemblyBegin(A,MAT_FINAL_ASSEMBLY);
MatAssemblyEnd(A,MAT_FINAL_ASSEMBLY);
/* Retain the non-zero structure of the matrix */
MatSetOption(A,MAT_NEW_NONZERO_LOCATIONS,PETSC_FALSE);
/* Set the matrix, preconditioner and nullspace */
KSPSetOperators(ksp,A,A);
MatNullSpaceCreate(PETSC_COMM_WORLD, PETSC_TRUE, 0, NULL, &nullsp);
#if PETSC_VERSION_GE(3, 6, 0)
MatSetNullSpace(A, nullsp);
#else
KSPSetNullSpace(ksp, nullsp);
#endif
MatNullSpaceDestroy(&nullsp);
KSPSetFromOptions(ksp);
if (view_matrix) {
pe_info(obj->pe, "\nPETSc output matrix\n");
PetscViewer viewer;
PetscViewerASCIIOpen(PETSC_COMM_WORLD, "matrix.log", &viewer);
#if PETSC_VERSION_GE(3, 7, 0)
PetscViewerPushFormat(viewer, PETSC_VIEWER_ASCII_INDEX);
PetscViewerPopFormat(viewer);
#else
PetscViewerSetFormat(viewer, PETSC_VIEWER_ASCII_INDEX);
#endif
MatView(A,viewer);;
PetscViewerDestroy(&viewer);
}
return 0;
}
/*****************************************************************************
*
* psi_petsc_copy_psi_to_da
*
*****************************************************************************/
int psi_petsc_copy_psi_to_da(psi_t * obj) {
int ic,jc,kc,index;
int noffset[3];
int i,j,k;
int xs,ys,zs,xw,yw,zw,xe,ye,ze;
double *** psi_3d;
assert(obj);
cs_nlocal_offset(obj->cs, noffset);
DMDAGetCorners(da,&xs,&ys,&zs,&xw,&yw,&zw);
DMDAVecGetArray(da, x, &psi_3d);
xe = xs + xw;
ye = ys + yw;
ze = zs + zw;
for (k=zs; k<ze; k++) {
kc = k - noffset[Z] + 1;
for (j=ys; j<ye; j++) {
jc = j - noffset[Y] + 1;
for (i=xs; i<xe; i++) {
ic = i - noffset[X] + 1;
index = cs_index(obj->cs, ic,jc,kc);
psi_3d[k][j][i] = obj->psi[index];
}
}
}
DMDAVecRestoreArray(da, x, &psi_3d);
if (view_vector) {
pe_info(obj->pe, "\nPETSc output DA vector\n");
PetscViewer viewer;
PetscViewerASCIIOpen(PETSC_COMM_WORLD, "da.log", &viewer);
#if PETSC_VERSION_GE(3, 7, 0)
PetscViewerPushFormat(viewer, PETSC_VIEWER_ASCII_INDEX);
PetscViewerPopFormat(viewer);
#else
PetscViewerSetFormat(viewer, PETSC_VIEWER_ASCII_INDEX);
#endif
VecView(x,viewer);
PetscViewerDestroy(&viewer);
}
return 0;
}
/*****************************************************************************
*
* psi_petsc_copy_da_to_psi
*
*****************************************************************************/
int psi_petsc_copy_da_to_psi(psi_t * obj) {
int ic,jc,kc,index;
int noffset[3];
int i,j,k;
int xs,ys,zs,xw,yw,zw,xe,ye,ze;
double *** psi_3d;
assert(obj);
cs_nlocal_offset(obj->cs, noffset);
DMDAGetCorners(da,&xs,&ys,&zs,&xw,&yw,&zw);
DMDAVecGetArray(da, x, &psi_3d);
xe = xs + xw;
ye = ys + yw;
ze = zs + zw;
for (k=zs; k<ze; k++) {
kc = k - noffset[Z] + 1;
for (j=ys; j<ye; j++) {
jc = j - noffset[Y] + 1;
for (i=xs; i<xe; i++) {
ic = i - noffset[X] + 1;
index = cs_index(obj->cs, ic,jc,kc);
obj->psi[index] = psi_3d[k][j][i];
}
}
}
DMDAVecRestoreArray(da, x, &psi_3d);
psi_halo_psi(obj);
return 0;
}
/*****************************************************************************
*
* psi_petsc_set_rhs
*
* Sets the right hand side of the Poisson equation and
* modifies the boundary sites if an external electric
* field is present.
*
*****************************************************************************/
int psi_petsc_set_rhs(psi_t * obj) {
int ic,jc,kc,index;
int mpi_cartsz[3], mpi_coords[3];
int ntotal[3], noffset[3];
int periodic[3];
int i,j,k;
int xs,ys,zs,xw,yw,zw,xe,ye,ze;
double *** rho_3d;
double rho_elec;
double eunit, beta;
double epsilon, e0[3];
physics_t * phys = NULL;
assert(obj);
cs_cartsz(obj->cs, mpi_cartsz);
cs_cart_coords(obj->cs, mpi_coords);
cs_nlocal_offset(obj->cs, noffset);
cs_periodic(obj->cs, periodic);
DMDAGetCorners(da,&xs,&ys,&zs,&xw,&yw,&zw);
DMDAVecGetArray(da, b, &rho_3d);
psi_unit_charge(obj, &eunit);
psi_beta(obj, &beta);
xe = xs + xw;
ye = ys + yw;
ze = zs + zw;
for (k=zs; k<ze; k++) {
kc = k - noffset[Z] + 1;
for (j=ys; j<ye; j++) {
jc = j - noffset[Y] + 1;
for (i=xs; i<xe; i++) {
ic = i - noffset[X] + 1;
index = cs_index(obj->cs, ic,jc,kc);
psi_rho_elec(obj, index, &rho_elec);
/* Non-dimensional potential in Poisson eqn requires e/kT */
rho_3d[k][j][i] = rho_elec * eunit * beta;
}
}
}
/* Modify right hand side for external electric field */
physics_ref(&phys);
physics_e0(phys, e0);
if (e0[X] || e0[Y] || e0[Z]) {
cs_ntotal(obj->cs, ntotal);
psi_epsilon(obj, &epsilon);
if (periodic[X]) {
if (mpi_coords[X] == 0) {
for (k=zs; k<ze; k++) {
for (j=ys; j<ye; j++) {
rho_3d[k][j][0] += epsilon * e0[X] * ntotal[X];
}
}
}
if (mpi_coords[X] == mpi_cartsz[X]-1) {
for (k=zs; k<ze; k++) {
for (j=ys; j<ye; j++) {
rho_3d[k][j][xe-1] -= epsilon * e0[X] * ntotal[X];
}
}
}
}
if (periodic[Y]) {
if (mpi_coords[Y] == 0) {
for (k=zs; k<ze; k++) {
for (i=xs; i<xe; i++) {
rho_3d[k][0][i] += epsilon * e0[Y] * ntotal[Y];
}
}
}
if (mpi_coords[Y] == mpi_cartsz[Y]-1) {
for (k=zs; k<ze; k++) {
for (i=xs; i<xe; i++) {
rho_3d[k][ye-1][i] -= epsilon * e0[Y] * ntotal[Y];
}
}
}
}
if (periodic[Z]) {
if (mpi_coords[Z] == 0) {
for (j=ys; j<ye; j++) {
for (i=xs; i<xe; i++) {
rho_3d[0][j][i] += epsilon * e0[Z] * ntotal[Z];
}
}
}
if (mpi_coords[Z] == mpi_cartsz[Z]-1) {
for (j=ys; j<ye; j++) {
for (i=xs; i<xe; i++) {
rho_3d[ze-1][j][i] -= epsilon * e0[Z] * ntotal[Z];
}
}
}
}
}
DMDAVecRestoreArray(da, b, &rho_3d);
if (view_vector) {
pe_info(obj->pe, "\nPETSc output RHS\n");
PetscViewer viewer;
PetscViewerASCIIOpen(PETSC_COMM_WORLD, "rhs.log", &viewer);
#if PETSC_VERSION_GE(3, 7, 0)
PetscViewerPushFormat(viewer, PETSC_VIEWER_ASCII_INDEX);
PetscViewerPopFormat(viewer);
#else
PetscViewerSetFormat(viewer, PETSC_VIEWER_ASCII_INDEX);
#endif
VecView(b,viewer);;
PetscViewerDestroy(&viewer);
}
return 0;
}
/*****************************************************************************
*
* psi_petsc_set_rhs_vare
*
* Sets the right hand side of the Poisson equation and modifies
* the boundary sites if an external electric field is present,
* acounting for dielectric contrast.
*
*****************************************************************************/
int psi_petsc_set_rhs_vare(psi_t * obj, fe_es_t * fe, f_vare_t fepsilon) {
int ic,jc,kc,index;
int mpi_cartsz[3], mpi_coords[3];
int ntotal[3], noffset[3];
int periodic[3];
int i,j,k;
int xs,ys,zs,xw,yw,zw,xe,ye,ze;
double *** rho_3d;
double rho_elec;
double eunit, beta;
double eps, e0[3];
physics_t * phys = NULL;
assert(obj);
assert(fe);
assert(fepsilon);
cs_cartsz(obj->cs, mpi_cartsz);
cs_cart_coords(obj->cs, mpi_coords);
cs_nlocal_offset(obj->cs, noffset);
cs_periodic(obj->cs, periodic);
DMDAGetCorners(da,&xs,&ys,&zs,&xw,&yw,&zw);
DMDAVecGetArray(da, b, &rho_3d);
psi_unit_charge(obj, &eunit);
psi_beta(obj, &beta);
xe = xs + xw;
ye = ys + yw;
ze = zs + zw;
for (k=zs; k<ze; k++) {
kc = k - noffset[Z] + 1;
for (j=ys; j<ye; j++) {
jc = j - noffset[Y] + 1;
for (i=xs; i<xe; i++) {
ic = i - noffset[X] + 1;
index = cs_index(obj->cs, ic,jc,kc);
psi_rho_elec(obj, index, &rho_elec);
/* Non-dimensional potential in Poisson eqn requires e/kT */
rho_3d[k][j][i] = rho_elec * eunit * beta;
}
}
}
/* Modify right hand side for external electric field */
physics_ref(&phys);
physics_e0(phys, e0);
if (e0[X] || e0[Y] || e0[Z]) {
cs_ntotal(obj->cs, ntotal);
if (periodic[X]) {
if (mpi_coords[X] == 0) {
i = 0;
ic = i - noffset[X] + 1;
for (k=zs; k<ze; k++) {
kc = k - noffset[Z] + 1;
for (j=ys; j<ye; j++) {
jc = j - noffset[Y] + 1;
index = cs_index(obj->cs, ic,jc,kc);
fepsilon(fe, index, &eps);
rho_3d[k][j][i] += eps * e0[X] * ntotal[X];
}
}
}
if (mpi_coords[X] == mpi_cartsz[X]-1) {
i = xe - 1;
ic = i - noffset[X] + 1;
for (k=zs; k<ze; k++) {
kc = k - noffset[Z] + 1;
for (j=ys; j<ye; j++) {
jc = j - noffset[Y] + 1;
index = cs_index(obj->cs, ic,jc,kc);
fepsilon(fe, index, &eps);
rho_3d[k][j][i] -= eps * e0[X] * ntotal[X];
}
}
}
}
if (periodic[Y]) {
if (mpi_coords[Y] == 0) {
j = 0;
jc = j - noffset[Y] + 1;
for (k=zs; k<ze; k++) {
kc = k - noffset[Z] + 1;
for (i=xs; i<xe; i++) {
ic = i - noffset[X] + 1;
index = cs_index(obj->cs, ic,jc,kc);
fepsilon(fe, index, &eps);
rho_3d[k][j][i] += eps * e0[Y] * ntotal[Y];
}
}
}
if (mpi_coords[Y] == mpi_cartsz[Y]-1) {
j = ye - 1;
jc = j - noffset[Y] + 1;
for (k=zs; k<ze; k++) {
kc = k - noffset[Z] + 1;
for (i=xs; i<xe; i++) {
ic = i - noffset[X] + 1;
index = cs_index(obj->cs, ic,jc,kc);
fepsilon(fe, index, &eps);
rho_3d[k][j][i] -= eps * e0[Y] * ntotal[Y];
}
}
}
}
if (periodic[Z]) {
if (mpi_coords[Z] == 0) {
k = 0;
kc = k - noffset[Z] + 1;
for (j=ys; j<ye; j++) {
jc = j - noffset[Y] + 1;
for (i=xs; i<xe; i++) {
ic = i - noffset[X] + 1;
index = cs_index(obj->cs, ic,jc,kc);
fepsilon(fe, index, &eps);
rho_3d[k][j][i] += eps * e0[Z] * ntotal[Z];
}
}
}
if (mpi_coords[Z] == mpi_cartsz[Z]-1) {
k = ze - 1;
kc = k - noffset[Z] + 1;
for (j=ys; j<ye; j++) {
jc = j - noffset[Y] + 1;
for (i=xs; i<xe; i++) {
ic = i - noffset[X] + 1;
index = cs_index(obj->cs, ic,jc,kc);
fepsilon(fe, index, &eps);
rho_3d[k][j][i] -= eps * e0[Z] * ntotal[Z];
}
}
}
}
}
DMDAVecRestoreArray(da, b, &rho_3d);
if (view_vector) {
pe_info(obj->pe, "\nPETSc output RHS\n");
PetscViewer viewer;
PetscViewerASCIIOpen(PETSC_COMM_WORLD, "rhs.log", &viewer);
#if PETSC_VERSION_GE(3, 7, 0)
PetscViewerPushFormat(viewer, PETSC_VIEWER_ASCII_INDEX);
PetscViewerPopFormat(viewer);
#else
PetscViewerSetFormat(viewer, PETSC_VIEWER_ASCII_INDEX);
#endif
VecView(b,viewer);;
PetscViewerDestroy(&viewer);
}
return 0;
}
/*****************************************************************************
*
* psi_petsc_solve
*
* If the f_vare_t argument is NULL, the uniform epsilon solver is used.
* If the argument is present, the non-uniform solver is used.
*
*****************************************************************************/
int psi_petsc_solve(psi_t * obj, fe_t * fe, f_vare_t fepsilon) {
assert(obj);
assert(fe);
if(fepsilon == NULL) {
psi_petsc_set_rhs(obj);
}
if(fepsilon != NULL) {
psi_petsc_compute_matrix(obj, (fe_es_t *) fe, fepsilon);
psi_petsc_set_rhs_vare(obj, (fe_es_t *) fe, fepsilon);
}
psi_petsc_copy_psi_to_da(obj);
psi_petsc_poisson(obj);
psi_petsc_copy_da_to_psi(obj);
return 0;
}
/*****************************************************************************
*
* psi_petsc_poisson
*
* Solves the Poisson equation for constant permittivity.
* The vectors b, x are distributed arrays (DA).
*
*****************************************************************************/
int psi_petsc_poisson(psi_t * obj) {
assert(obj);
KSPSetInitialGuessNonzero(ksp,PETSC_TRUE);
KSPSolve(ksp,b,x);
if (is_statistics_step()) {
KSPGetResidualNorm(ksp,&norm);
KSPGetIterationNumber(ksp,&its);
pe_info(obj->pe, "\nKrylov solver\nNorm of residual %g at %d iterations\n",norm,its);
}
return 0;
}
/*****************************************************************************
*
* psi_petsc_finish
*
* Destroys the solver context, distributed array, matrix and vectors.
*
*****************************************************************************/
int psi_petsc_finish() {
KSPDestroy(&ksp);
VecDestroy(&x);
VecDestroy(&b);
MatDestroy(&A);
DMDestroy(&da);
return 0;
}
#endif
| 27.977457 | 89 | 0.520579 | [
"object",
"vector",
"3d"
] |
a98dfdabf0df45914ecb875d15bfa5dedc6307a6 | 2,603 | h | C | third_party/virtualbox/src/VBox/Devices/EFI/Firmware/NetworkPkg/Application/Ping6/Ping6.h | Fimbure/icebox-1 | 0b81992a53e1b410955ca89bdb6f8169d6f2da86 | [
"MIT"
] | 521 | 2019-03-29T15:44:08.000Z | 2022-03-22T09:46:19.000Z | third_party/virtualbox/src/VBox/Devices/EFI/Firmware/NetworkPkg/Application/Ping6/Ping6.h | Fimbure/icebox-1 | 0b81992a53e1b410955ca89bdb6f8169d6f2da86 | [
"MIT"
] | 30 | 2019-06-04T17:00:49.000Z | 2021-09-08T20:44:19.000Z | third_party/virtualbox/src/VBox/Devices/EFI/Firmware/NetworkPkg/Application/Ping6/Ping6.h | Fimbure/icebox-1 | 0b81992a53e1b410955ca89bdb6f8169d6f2da86 | [
"MIT"
] | 99 | 2019-03-29T16:04:13.000Z | 2022-03-28T16:59:34.000Z | /** @file
The interface function declaration of shell application Ping6 (Ping for v6 series).
Copyright (c) 2009 - 2011, Intel Corporation. All rights reserved.<BR>
This program and the accompanying materials
are licensed and made available under the terms and conditions of the BSD License
which accompanies this distribution. The full text of the license may be found at
http://opensource.org/licenses/bsd-license.php.
THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
**/
#ifndef _PING6_H_
#define _PING6_H_
#define PING6_DEFAULT_TIMEOUT 5000
#define PING6_MAX_SEND_NUMBER 10000
#define PING6_MAX_BUFFER_SIZE 32768
#define PING6_ONE_SECOND 10000000
//
// A similar amount of time that passes in femtoseconds
// for each increment of TimerValue. It is for NT32 only.
//
#define NTTIMERPERIOD 358049
#pragma pack(1)
typedef struct _ICMP6_ECHO_REQUEST_REPLY {
UINT8 Type;
UINT8 Code;
UINT16 Checksum;
UINT16 Identifier;
UINT16 SequenceNum;
UINT64 TimeStamp;
UINT8 Data[1];
} ICMP6_ECHO_REQUEST_REPLY;
#pragma pack()
typedef struct _PING6_ICMP6_TX_INFO {
LIST_ENTRY Link;
UINT16 SequenceNum;
UINT64 TimeStamp;
EFI_IP6_COMPLETION_TOKEN *Token;
} PING6_ICMP6_TX_INFO;
typedef struct _PING6_PRIVATE_DATA {
EFI_HANDLE ImageHandle;
EFI_HANDLE NicHandle;
EFI_HANDLE Ip6ChildHandle;
EFI_IP6_PROTOCOL *Ip6;
EFI_EVENT Timer;
EFI_STATUS Status;
LIST_ENTRY TxList;
EFI_IP6_COMPLETION_TOKEN RxToken;
UINT16 RxCount;
UINT16 TxCount;
UINT64 RttSum;
UINT64 RttMin;
UINT64 RttMax;
UINT32 SequenceNum;
EFI_IPv6_ADDRESS SrcAddress;
EFI_IPv6_ADDRESS DstAddress;
UINT32 SendNum;
UINT32 BufferSize;
} PING6_PRIVATE_DATA;
/**
Reads and returns the current value of register.
In IA64, the register is the Interval Timer Vector (ITV).
In X86(IA32/X64), the register is the Time Stamp Counter (TSC)
@return The current value of the register.
**/
UINT64
ReadTime (
VOID
);
#endif
| 29.579545 | 85 | 0.607376 | [
"vector"
] |
a98e42bfaee7a83c19f87a4010980f19d58be3fe | 3,922 | h | C | third_party/webrtc/include/chromium/src/components/sync/driver/model_type_controller.h | ssaroha/node-webrtc | 74335bd07cbc52484a88c8eeb336c9bd001928a8 | [
"BSD-2-Clause"
] | 3 | 2018-02-22T18:06:56.000Z | 2021-08-28T12:49:27.000Z | third_party/webrtc/include/chromium/src/components/sync/driver/model_type_controller.h | ssaroha/node-webrtc | 74335bd07cbc52484a88c8eeb336c9bd001928a8 | [
"BSD-2-Clause"
] | null | null | null | third_party/webrtc/include/chromium/src/components/sync/driver/model_type_controller.h | ssaroha/node-webrtc | 74335bd07cbc52484a88c8eeb336c9bd001928a8 | [
"BSD-2-Clause"
] | 2 | 2017-08-16T08:15:01.000Z | 2018-03-27T00:07:30.000Z | // Copyright 2014 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef COMPONENTS_SYNC_DRIVER_MODEL_TYPE_CONTROLLER_H_
#define COMPONENTS_SYNC_DRIVER_MODEL_TYPE_CONTROLLER_H_
#include <memory>
#include <string>
#include "base/macros.h"
#include "base/memory/weak_ptr.h"
#include "base/single_thread_task_runner.h"
#include "components/sync/base/model_type.h"
#include "components/sync/base/sync_prefs.h"
#include "components/sync/driver/data_type_controller.h"
namespace syncer {
class SyncClient;
struct ActivationContext;
// DataTypeController implementation for Unified Sync and Storage model types.
class ModelTypeController : public DataTypeController {
public:
// |dump_stack| is called when an unrecoverable error occurs.
ModelTypeController(
ModelType type,
const base::Closure& dump_stack,
SyncClient* sync_client,
const scoped_refptr<base::SingleThreadTaskRunner>& model_thread);
~ModelTypeController() override;
// DataTypeController implementation.
bool ShouldLoadModelBeforeConfigure() const override;
void LoadModels(const ModelLoadCallback& model_load_callback) override;
void GetAllNodes(const AllNodesCallback& callback) override;
void GetStatusCounters(const StatusCountersCallback& callback) override;
// Registers non-blocking data type with sync backend. In the process the
// activation context is passed to ModelTypeRegistry, where ModelTypeWorker
// gets created and connected with ModelTypeProcessor.
void RegisterWithBackend(BackendDataTypeConfigurer* configurer) override;
void StartAssociating(const StartCallback& start_callback) override;
void ActivateDataType(BackendDataTypeConfigurer* configurer) override;
void DeactivateDataType(BackendDataTypeConfigurer* configurer) override;
void Stop() override;
std::string name() const override;
State state() const override;
protected:
std::unique_ptr<DataTypeErrorHandler> CreateErrorHandler() override;
private:
void RecordStartFailure(ConfigureResult result) const;
void ReportLoadModelError(const SyncError& error);
// If the DataType controller is waiting for models to load, once the models
// are loaded this function should be called to let the base class
// implementation know that it is safe to continue with the activation.
// The error indicates whether the loading completed successfully.
void LoadModelsDone(ConfigureResult result, const SyncError& error);
// The function will do the real work when OnProcessorStarted got called. This
// is called on the UI thread.
void OnProcessorStarted(
SyncError error,
std::unique_ptr<ActivationContext> activation_context);
// The sync client, which provides access to this type's ModelTypeService.
SyncClient* const sync_client_;
// The thread the model type lives on.
scoped_refptr<base::SingleThreadTaskRunner> model_thread_;
// Sync prefs. Used for determinig if DisableSync should be called during call
// to Stop().
SyncPrefs sync_prefs_;
// State of this datatype controller.
State state_;
// Callbacks for use when starting the datatype.
ModelLoadCallback model_load_callback_;
// Controller receives |activation_context_| from SharedModelTypeProcessor
// callback and must temporarily own it until ActivateDataType is called.
std::unique_ptr<ActivationContext> activation_context_;
// This is a hack to prevent reconfigurations from crashing, because USS
// activation is not idempotent. RegisterWithBackend only needs to actually do
// something the first time after the type is enabled.
// TODO(crbug.com/647505): Remove this once the DTM handles things better.
bool activated_ = false;
DISALLOW_COPY_AND_ASSIGN(ModelTypeController);
};
} // namespace syncer
#endif // COMPONENTS_SYNC_DRIVER_MODEL_TYPE_CONTROLLER_H_
| 38.45098 | 80 | 0.789393 | [
"model"
] |
a98e7ed0306a6a8fb064cd50ebe19615a7d00ed4 | 1,311 | h | C | user/drivers/pci/libpci/src/userclient/UserClientTypes.h | tristanseifert/kush-os | 1ffd595aae8f3dc880e798eff72365b8b6c631f0 | [
"0BSD"
] | 4 | 2021-06-22T20:52:30.000Z | 2022-02-04T00:19:44.000Z | user/drivers/pci/libpci/src/userclient/UserClientTypes.h | tristanseifert/kush-os | 1ffd595aae8f3dc880e798eff72365b8b6c631f0 | [
"0BSD"
] | null | null | null | user/drivers/pci/libpci/src/userclient/UserClientTypes.h | tristanseifert/kush-os | 1ffd595aae8f3dc880e798eff72365b8b6c631f0 | [
"0BSD"
] | null | null | null | #ifndef LIBPCI_USERCLIENT_TYPES_H
#define LIBPCI_USERCLIENT_TYPES_H
#include <compare>
#include <cstddef>
#include <cstdint>
#include <span>
#include <string>
#include <string_view>
#include <vector>
namespace libpci {
/**
* Represents the address of a device on the PCI bus.
*/
struct BusAddress {
/// Bus segment; this should always be 0 for legacy PCI.
uint16_t segment{0};
uint8_t bus{0};
uint8_t device{0};
uint8_t function{0};
/// Create an empty device address.
BusAddress() = default;
/// Create a device address with the given segment, bus, device and function.
BusAddress(const uint16_t _segment, const uint8_t _bus, const uint8_t _device,
const uint8_t _function = 0) : segment(_segment), bus(_bus), device(_device),
function(_function) {};
/// Get the device address of a device's alternate function.
BusAddress(const BusAddress &da, const uint8_t _function) : segment(da.segment),
bus(da.bus), device(da.device), function(_function) {}
inline auto operator<=>(const BusAddress &) const = default;
};
}
namespace rpc {
bool serialize(std::span<std::byte> &, const libpci::BusAddress &);
bool deserialize(const std::span<std::byte> &, libpci::BusAddress &);
size_t bytesFor(const libpci::BusAddress &);
}
#endif
| 29.795455 | 89 | 0.697178 | [
"vector"
] |
a996ff0ff533094501012238500cf52c937378d7 | 35,243 | c | C | TPMCmd/tpm/src/crypt/CryptHash.c | neilsh-msft/ms-tpm-20-ref | d2643bb4ad40b5d8f10b3d6f69b960d23cd89c00 | [
"BSD-2-Clause"
] | null | null | null | TPMCmd/tpm/src/crypt/CryptHash.c | neilsh-msft/ms-tpm-20-ref | d2643bb4ad40b5d8f10b3d6f69b960d23cd89c00 | [
"BSD-2-Clause"
] | null | null | null | TPMCmd/tpm/src/crypt/CryptHash.c | neilsh-msft/ms-tpm-20-ref | d2643bb4ad40b5d8f10b3d6f69b960d23cd89c00 | [
"BSD-2-Clause"
] | null | null | null | /* Microsoft Reference Implementation for TPM 2.0
*
* The copyright in this software is being made available under the BSD License,
* included below. This software may be subject to other third party and
* contributor rights, including patent rights, and no such rights are granted
* under this license.
*
* Copyright (c) Microsoft Corporation
*
* All rights reserved.
*
* BSD License
*
* Redistribution and use in source and binary forms, with or without modification,
* are permitted provided that the following conditions are met:
*
* Redistributions of source code must retain the above copyright notice, this list
* of conditions and the following disclaimer.
*
* Redistributions in binary form must reproduce the above copyright notice, this
* list of conditions and the following disclaimer in the documentation and/or
* other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ""AS IS""
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
* ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
* ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
//** Description
//
// This file contains implementation of cryptographic functions for hashing.
//
//** Includes, Defines, and Types
#define _CRYPT_HASH_C_
#include "Tpm.h"
#define HASH_TABLE_SIZE (HASH_COUNT + 1)
extern const HASH_INFO g_hashData[HASH_COUNT + 1];
#if ALG_SHA1
HASH_DEF_TEMPLATE(SHA1);
#endif
#if ALG_SHA256
HASH_DEF_TEMPLATE(SHA256);
#endif
#if ALG_SHA384
HASH_DEF_TEMPLATE(SHA384);
#endif
#if ALG_SHA512
HASH_DEF_TEMPLATE(SHA512);
#endif
HASH_DEF nullDef = {{0}};
//** Obligatory Initialization Functions
//*** CryptHashInit()
// This function is called by _TPM_Init do perform the initialization operations for
// the library.
BOOL
CryptHashInit(
void
)
{
LibHashInit();
return TRUE;
}
//*** CryptHashStartup()
// This function is called by TPM2_Startup() in case there is work to do at startup.
// Currently, this is a placeholder.
BOOL
CryptHashStartup(
void
)
{
return TRUE;
}
//** Hash Information Access Functions
//*** Introduction
// These functions provide access to the hash algorithm description information.
//*** CryptGetHashDef()
// This function accesses the hash descriptor associated with a hash a
// algorithm. The function returns NULL for TPM_ALG_NULL and fails if
// hashAlg is not a hash algorithm.
PHASH_DEF
CryptGetHashDef(
TPM_ALG_ID hashAlg
)
{
PHASH_DEF retVal;
switch(hashAlg)
{
#if ALG_SHA1
case ALG_SHA1_VALUE:
return &SHA1_Def;
break;
#endif
#if ALG_SHA256
case ALG_SHA256_VALUE:
retVal = &SHA256_Def;
break;
#endif
#if ALG_SHA384
case ALG_SHA384_VALUE:
retVal = &SHA384_Def;
break;
#endif
#if ALG_SHA512
case ALG_SHA512_VALUE:
retVal = &SHA512_Def;
break;
#endif
default:
retVal = &nullDef;
break;
}
return retVal;
}
//*** CryptHashIsValidAlg()
// This function tests to see if an algorithm ID is a valid hash algorithm. If
// flag is true, then TPM_ALG_NULL is a valid hash.
// return type: BOOL
// TRUE hashAlg is a valid, implemented hash on this TPM.
// FALSE not valid
BOOL
CryptHashIsValidAlg(
TPM_ALG_ID hashAlg,
BOOL flag
)
{
switch(hashAlg)
{
#if ALG_SHA1
case ALG_SHA1_VALUE:
#endif
#if ALG_SHA256
case ALG_SHA256_VALUE:
#endif
#if ALG_SHA384
case ALG_SHA384_VALUE:
#endif
#if ALG_SHA512
case ALG_SHA512_VALUE:
#endif
#if ALG_SM3_256
case ALG_SHA256_VALUE:
#endif
return TRUE;
break;
case ALG_NULL_VALUE:
return flag;
break;
default:
break;
}
return FALSE;
}
//*** GetHashInfoPointer()
// This function returns a pointer to the hash info for the algorithm. If the
// algorithm is not supported, function returns a pointer to the data block
// associated with TPM_ALG_NULL.
// NOTE: The data structure must have a digest size of 0 for TPM_ALG_NULL.
static
const HASH_INFO *
GetHashInfoPointer(
TPM_ALG_ID hashAlg
)
{
UINT32 i;
//
// ALG_NULL is the stop value so search up to it
for(i = 0; i < HASH_COUNT; i++)
{
if(g_hashData[i].alg == hashAlg)
return &g_hashData[i];
}
// either the input was TPM_ALG_NUL or we didn't find the requested algorithm
// in either case return a pointer to the TPM_ALG_NULL "hash" descriptor
return &g_hashData[HASH_COUNT];
}
//*** CryptHashGetAlgByIndex()
// This function is used to iterate through the hashes. TPM_ALG_NULL
// is returned for all indexes that are not valid hashes.
// If the TPM implements 3 hashes, then an 'index' value of 0 will
// return the first implemented hash and an 'index' of 2 will return the
// last. All other index values will return TPM_ALG_NULL.
//
// return type: TPM_ALG_ID
// ALG_xxx a hash algorithm
// ALG_NULL this can be used as a stop value
LIB_EXPORT TPM_ALG_ID
CryptHashGetAlgByIndex(
UINT32 index // IN: the index
)
{
if(index >= HASH_COUNT)
return TPM_ALG_NULL;
return g_hashData[index].alg;
}
//*** CryptHashGetDigestSize()
// Returns the size of the digest produced by the hash. If 'hashAlg' is not a hash
// algorithm, the TPM will FAIL.
// return type: UINT16
// 0 TPM_ALG_NULL
// > 0 the digest size
//
LIB_EXPORT UINT16
CryptHashGetDigestSize(
TPM_ALG_ID hashAlg // IN: hash algorithm to look up
)
{
return CryptGetHashDef(hashAlg)->digestSize;
}
//*** CryptHashGetBlockSize()
// Returns the size of the block used by the hash. If 'hashAlg' is not a hash
// algorithm, the TPM will FAIL.
// return type: UINT16
// 0 TPM_ALG_NULL
// > 0 the digest size
//
LIB_EXPORT UINT16
CryptHashGetBlockSize(
TPM_ALG_ID hashAlg // IN: hash algorithm to look up
)
{
return CryptGetHashDef(hashAlg)->blockSize;
}
//*** CryptHashGetDer
// This function returns a pointer to the DER string for the algorithm and
// indicates its size.
LIB_EXPORT UINT16
CryptHashGetDer(
TPM_ALG_ID hashAlg, // IN: the algorithm to look up
const BYTE **p
)
{
const HASH_INFO *q;
q = GetHashInfoPointer(hashAlg);
*p = &q->der[0];
return q->derSize;
}
//*** CryptHashGetContextAlg()
// This function returns the hash algorithm associated with a hash context.
TPM_ALG_ID
CryptHashGetContextAlg(
PHASH_STATE state // IN: the context to check
)
{
return state->hashAlg;
}
//** State Import and Export
//*** CryptHashCopyState
// This function is used to "clone" a HASH_STATE.
LIB_EXPORT void
CryptHashCopyState(
HASH_STATE *out, // OUT: destination of the state
const HASH_STATE *in // IN: source of the state
)
{
pAssert(out->type == in->type);
out->hashAlg = in->hashAlg;
out->def = in->def;
if(in->hashAlg != TPM_ALG_NULL)
{
// Just verify that the hashAlg makes sense (is implemented)
CryptGetHashDef(in->hashAlg);
// ... and copy.
HASH_STATE_COPY(out, in);
}
if(in->type == HASH_STATE_HMAC)
{
const HMAC_STATE *hIn = (HMAC_STATE *)in;
HMAC_STATE *hOut = (HMAC_STATE *)out;
hOut->hmacKey = hIn->hmacKey;
}
return;
}
//*** CryptHashExportState()
// This function is used to export a hash or HMAC hash state. This function
// would be called when preparing to context save a sequence object.
void
CryptHashExportState(
PCHASH_STATE internalFmt, // IN: the hash state formatted for use by
// library
PEXPORT_HASH_STATE externalFmt // OUT: the exported hash state
)
{
BYTE *outBuf = (BYTE *)externalFmt;
//
cAssert(sizeof(HASH_STATE) <= sizeof(EXPORT_HASH_STATE));
// the following #define is used to move data from an aligned internal data
// structure to a byte buffer (external format data.
#define CopyToOffset(value) \
memcpy(&outBuf[offsetof(HASH_STATE,value)], &internalFmt->value, \
sizeof(internalFmt->value))
// Copy the hashAlg
CopyToOffset(hashAlg);
CopyToOffset(type);
#ifdef HASH_STATE_SMAC
if(internalFmt->type == HASH_STATE_SMAC)
{
memcpy(outBuf, internalFmt, sizeof(HASH_STATE));
return;
}
#endif
if(internalFmt->type == HASH_STATE_HMAC)
{
HMAC_STATE *from = (HMAC_STATE *)internalFmt;
memcpy(&outBuf[offsetof(HMAC_STATE, hmacKey)], &from->hmacKey,
sizeof(from->hmacKey));
}
if(internalFmt->hashAlg != TPM_ALG_NULL)
HASH_STATE_EXPORT(externalFmt, internalFmt);
}
//*** CryptHashImportState()
// This function is used to import the hash state. This function
// would be called to import a hash state when the context of a sequence object
// was being loaded.
void
CryptHashImportState(
PHASH_STATE internalFmt, // OUT: the hash state formatted for use by
// the library
PCEXPORT_HASH_STATE externalFmt // IN: the exported hash state
)
{
BYTE *inBuf = (BYTE *)externalFmt;
//
#define CopyFromOffset(value) \
memcpy(&internalFmt->value, &inBuf[offsetof(HASH_STATE,value)], \
sizeof(internalFmt->value))
// Copy the hashAlg of the byte-aligned input structure to the structure-aligned
// internal structure.
CopyFromOffset(hashAlg);
CopyFromOffset(type);
if(internalFmt->hashAlg != TPM_ALG_NULL)
{
#ifdef HASH_STATE_SMAC
if(internalFmt->type == HASH_STATE_SMAC)
{
memcpy(internalFmt, inBuf, sizeof(HASH_STATE));
return;
}
#endif
internalFmt->def = CryptGetHashDef(internalFmt->hashAlg);
HASH_STATE_IMPORT(internalFmt, inBuf);
if(internalFmt->type == HASH_STATE_HMAC)
{
HMAC_STATE *to = (HMAC_STATE *)internalFmt;
memcpy(&to->hmacKey, &inBuf[offsetof(HMAC_STATE, hmacKey)],
sizeof(to->hmacKey));
}
}
}
//** State Modification Functions
//***HashEnd()
// Local function to complete a hash that uses the hashDef instead of an algorithm
// ID. This function is used to complete the hash and only return a partial digest.
// The return value is the size of the data copied.
static UINT16
HashEnd(
PHASH_STATE hashState, // IN: the hash state
UINT32 dOutSize, // IN: the size of receive buffer
PBYTE dOut // OUT: the receive buffer
)
{
BYTE temp[MAX_DIGEST_SIZE];
if((hashState->hashAlg == TPM_ALG_NULL)
|| (hashState->type != HASH_STATE_HASH))
dOutSize = 0;
if(dOutSize > 0)
{
hashState->def = CryptGetHashDef(hashState->hashAlg);
// Set the final size
dOutSize = MIN(dOutSize, hashState->def->digestSize);
// Complete into the temp buffer and then copy
HASH_END(hashState, temp);
// Don't want any other functions calling the HASH_END method
// directly.
#undef HASH_END
memcpy(dOut, &temp, dOutSize);
}
hashState->type = HASH_STATE_EMPTY;
return (UINT16)dOutSize;
}
//*** CryptHashStart()
// Functions starts a hash stack
// Start a hash stack and returns the digest size. As a side effect, the
// value of 'stateSize' in hashState is updated to indicate the number of bytes
// of state that were saved. This function calls GetHashServer() and that function
// will put the TPM into failure mode if the hash algorithm is not supported.
//
// This function does not use the sequence parameter. If it is necessary to import
// or export context, this will start the sequence in a local state
// and export the state to the input buffer. Will need to add a flag to the state
// structure to indicate that it needs to be imported before it can be used.
// (BLEH).
// return type: CRTYP_RESULT
// 0 hash is TPM_ALG_NULL
// >0 digest size
LIB_EXPORT UINT16
CryptHashStart(
PHASH_STATE hashState, // OUT: the running hash state
TPM_ALG_ID hashAlg // IN: hash algorithm
)
{
UINT16 retVal;
TEST(hashAlg);
hashState->hashAlg = hashAlg;
if(hashAlg == TPM_ALG_NULL)
{
retVal = 0;
}
else
{
hashState->def = CryptGetHashDef(hashAlg);
HASH_START(hashState);
retVal = hashState->def->digestSize;
}
#undef HASH_START
hashState->type = HASH_STATE_HASH;
return retVal;
}
//*** CryptDigestUpdate()
// Add data to a hash or HMAC, SMAC stack.
//
void
CryptDigestUpdate(
PHASH_STATE hashState, // IN: the hash context information
UINT32 dataSize, // IN: the size of data to be added
const BYTE *data // IN: data to be hashed
)
{
if(hashState->hashAlg != TPM_ALG_NULL)
{
if((hashState->type == HASH_STATE_HASH)
|| (hashState->type == HASH_STATE_HMAC))
HASH_DATA(hashState, dataSize, (BYTE *)data);
#if SMAC_IMPLEMENTED
else if(hashState->type == HASH_STATE_SMAC)
(hashState->state.smac.smacMethods.data)(&hashState->state.smac.state,
dataSize, data);
#endif // SMAC_IMPLEMENTED
else
FAIL(FATAL_ERROR_INTERNAL);
}
return;
}
//*** CryptHashEnd()
// Complete a hash or HMAC computation. This function will place the smaller of
// 'digestSize' or the size of the digest in 'dOut'. The number of bytes in the
// placed in the buffer is returned. If there is a failure, the returned value
// is <= 0.
// return type: UINT16
// 0 no data returned
// > 0 the number of bytes in the digest or dOutSize, whichever is smaller
LIB_EXPORT UINT16
CryptHashEnd(
PHASH_STATE hashState, // IN: the state of hash stack
UINT32 dOutSize, // IN: size of digest buffer
BYTE *dOut // OUT: hash digest
)
{
pAssert(hashState->type == HASH_STATE_HASH);
return HashEnd(hashState, dOutSize, dOut);
}
//*** CryptHashBlock()
// Start a hash, hash a single block, update 'digest' and return the size of
// the results.
//
// The "digestSize" parameter can be smaller than the digest. If so, only the more
// significant bytes are returned.
// return type: UINT16
// >= 0 number of bytes placed in 'dOut'
LIB_EXPORT UINT16
CryptHashBlock(
TPM_ALG_ID hashAlg, // IN: The hash algorithm
UINT32 dataSize, // IN: size of buffer to hash
const BYTE *data, // IN: the buffer to hash
UINT32 dOutSize, // IN: size of the digest buffer
BYTE *dOut // OUT: digest buffer
)
{
HASH_STATE state;
CryptHashStart(&state, hashAlg);
CryptDigestUpdate(&state, dataSize, data);
return HashEnd(&state, dOutSize, dOut);
}
//*** CryptDigestUpdate2B()
// This function updates a digest (hash or HMAC) with a TPM2B.
//
// This function can be used for both HMAC and hash functions so the
// 'digestState' is void so that either state type can be passed.
LIB_EXPORT void
CryptDigestUpdate2B(
PHASH_STATE state, // IN: the digest state
const TPM2B *bIn // IN: 2B containing the data
)
{
// Only compute the digest if a pointer to the 2B is provided.
// In CryptDigestUpdate(), if size is zero or buffer is NULL, then no change
// to the digest occurs. This function should not provide a buffer if bIn is
// not provided.
pAssert(bIn != NULL);
CryptDigestUpdate(state, bIn->size, bIn->buffer);
return;
}
//*** CryptHashEnd2B()
// This function is the same as CryptCompleteHash() but the digest is
// placed in a TPM2B. This is the most common use and this is provided
// for specification clarity. 'digest.size' should be set to indicate the number of
// bytes to place in the buffer
// return type: UINT16
// >=0 the number of bytes placed in 'digest.buffer'
LIB_EXPORT UINT16
CryptHashEnd2B(
PHASH_STATE state, // IN: the hash state
P2B digest // IN: the size of the buffer Out: requested
// number of bytes
)
{
return CryptHashEnd(state, digest->size, digest->buffer);
}
//*** CryptDigestUpdateInt()
// This function is used to include an integer value to a hash stack. The function
// marshals the integer into its canonical form before calling CryptDigestUpdate().
LIB_EXPORT void
CryptDigestUpdateInt(
void *state, // IN: the state of hash stack
UINT32 intSize, // IN: the size of 'intValue' in bytes
UINT64 intValue // IN: integer value to be hashed
)
{
#if LITTLE_ENDIAN_TPM
intValue = REVERSE_ENDIAN_64(intValue);
#endif
CryptDigestUpdate(state, intSize, &((BYTE *)&intValue)[8 - intSize]);
}
//** HMAC Functions
//*** CryptHmacStart
// This function is used to start an HMAC using a temp
// hash context. The function does the initialization
// of the hash with the HMAC key XOR iPad and updates the
// HMAC key XOR oPad.
//
// The function returns the number of bytes in a digest produced by 'hashAlg'.
// return type: UINT16
// >= 0 number of bytes in digest produced by 'hashAlg' (may be zero)
//
LIB_EXPORT UINT16
CryptHmacStart(
PHMAC_STATE state, // IN/OUT: the state buffer
TPM_ALG_ID hashAlg, // IN: the algorithm to use
UINT16 keySize, // IN: the size of the HMAC key
const BYTE *key // IN: the HMAC key
)
{
PHASH_DEF hashDef;
BYTE * pb;
UINT32 i;
//
hashDef = CryptGetHashDef(hashAlg);
if(hashDef->digestSize != 0)
{
// If the HMAC key is larger than the hash block size, it has to be reduced
// to fit. The reduction is a digest of the hashKey.
if(keySize > hashDef->blockSize)
{
// if the key is too big, reduce it to a digest of itself
state->hmacKey.t.size = CryptHashBlock(hashAlg, keySize, key,
hashDef->digestSize,
state->hmacKey.t.buffer);
}
else
{
memcpy(state->hmacKey.t.buffer, key, keySize);
state->hmacKey.t.size = keySize;
}
// XOR the key with iPad (0x36)
pb = state->hmacKey.t.buffer;
for(i = state->hmacKey.t.size; i > 0; i--)
*pb++ ^= 0x36;
// if the keySize is smaller than a block, fill the rest with 0x36
for(i = hashDef->blockSize - state->hmacKey.t.size; i > 0; i--)
*pb++ = 0x36;
// Increase the oPadSize to a full block
state->hmacKey.t.size = hashDef->blockSize;
// Start a new hash with the HMAC key
// This will go in the caller's state structure and may be a sequence or not
CryptHashStart((PHASH_STATE)state, hashAlg);
CryptDigestUpdate((PHASH_STATE)state, state->hmacKey.t.size,
state->hmacKey.t.buffer);
// XOR the key block with 0x5c ^ 0x36
for(pb = state->hmacKey.t.buffer, i = hashDef->blockSize; i > 0; i--)
*pb++ ^= (0x5c ^ 0x36);
}
// Set the hash algorithm
state->hashState.hashAlg = hashAlg;
// Set the hash state type
state->hashState.type = HASH_STATE_HMAC;
return hashDef->digestSize;
}
//*** CryptHmacEnd()
// This function is called to complete an HMAC. It will finish the current
// digest, and start a new digest. It will then add the oPadKey and the
// completed digest and return the results in dOut. It will not return more
// than dOutSize bytes.
// return type: UINT16
// >= 0 number of bytes in 'dOut' (may be zero)
LIB_EXPORT UINT16
CryptHmacEnd(
PHMAC_STATE state, // IN: the hash state buffer
UINT32 dOutSize, // IN: size of digest buffer
BYTE *dOut // OUT: hash digest
)
{
BYTE temp[MAX_DIGEST_SIZE];
PHASH_STATE hState = (PHASH_STATE)&state->hashState;
#if SMAC_IMPLEMENTED
if(hState->type == HASH_STATE_SMAC)
return (state->hashState.state.smac.smacMethods.end)
(&state->hashState.state.smac.state,
dOutSize,
dOut);
#endif
pAssert(hState->type == HASH_STATE_HMAC);
hState->def = CryptGetHashDef(hState->hashAlg);
// Change the state type for completion processing
hState->type = HASH_STATE_HASH;
if(hState->hashAlg == TPM_ALG_NULL)
dOutSize = 0;
else
{
// Complete the current hash
HashEnd(hState, hState->def->digestSize, temp);
// Do another hash starting with the oPad
CryptHashStart(hState, hState->hashAlg);
CryptDigestUpdate(hState, state->hmacKey.t.size, state->hmacKey.t.buffer);
CryptDigestUpdate(hState, hState->def->digestSize, temp);
}
return HashEnd(hState, dOutSize, dOut);
}
//*** CryptHmacStart2B()
// This function starts an HMAC and returns the size of the digest
// that will be produced.
//
// This function is provided to support the most common use of starting an HMAC
// with a TPM2B key.
//
// The caller must provide a block of memory in which the hash sequence state
// is kept. The caller should not alter the contents of this buffer until the
// hash sequence is completed or abandoned.
//
// return type: UINT16
// > 0 the digest size of the algorithm
// = 0 the hashAlg was TPM_ALG_NULL
LIB_EXPORT UINT16
CryptHmacStart2B(
PHMAC_STATE hmacState, // OUT: the state of HMAC stack. It will be used
// in HMAC update and completion
TPMI_ALG_HASH hashAlg, // IN: hash algorithm
P2B key // IN: HMAC key
)
{
return CryptHmacStart(hmacState, hashAlg, key->size, key->buffer);
}
//*** CryptHmacEnd2B()
// This function is the same as CryptHmacEnd() but the HMAC result
// is returned in a TPM2B which is the most common use.
// return type: UINT16
// >=0 the number of bytes placed in 'digest'
LIB_EXPORT UINT16
CryptHmacEnd2B(
PHMAC_STATE hmacState, // IN: the state of HMAC stack
P2B digest // OUT: HMAC
)
{
return CryptHmacEnd(hmacState, digest->size, digest->buffer);
}
//** Mask and Key Generation Functions
//*** _crypi_MGF1()
// This function performs MGF1 using the selected hash. MGF1 is
// T(n) = T(n-1) || H(seed || counter).
// This function returns the length of the mask produced which
// could be zero if the digest algorithm is not supported
// return type: TPM_RC
// 0 hash algorithm was TPM_ALG_NULL
// > 0 should be the same as 'mSize'
LIB_EXPORT UINT16
CryptMGF1(
UINT32 mSize, // IN: length of the mask to be produced
BYTE *mask, // OUT: buffer to receive the mask
TPM_ALG_ID hashAlg, // IN: hash to use
UINT32 seedSize, // IN: size of the seed
BYTE *seed // IN: seed size
)
{
HASH_STATE hashState;
PHASH_DEF hDef = CryptGetHashDef(hashAlg);
UINT32 remaining;
UINT32 counter = 0;
BYTE swappedCounter[4];
// If there is no digest to compute return
if((hashAlg == TPM_ALG_NULL) || (mSize == 0))
return 0;
for(remaining = mSize; ; remaining -= hDef->digestSize)
{
// Because the system may be either Endian...
UINT32_TO_BYTE_ARRAY(counter, swappedCounter);
// Start the hash and include the seed and counter
CryptHashStart(&hashState, hashAlg);
CryptDigestUpdate(&hashState, seedSize, seed);
CryptDigestUpdate(&hashState, 4, swappedCounter);
// Handling the completion depends on how much space remains in the mask
// buffer. If it can hold the entire digest, put it there. If not
// put the digest in a temp buffer and only copy the amount that
// will fit into the mask buffer.
HashEnd(&hashState, remaining, mask);
if(remaining <= hDef->digestSize)
break;
mask = &mask[hDef->digestSize];
counter++;
}
return (UINT16)mSize;
}
//*** CryptKDFa()
// This function performs the key generation according to Part 1 of the
// TPM specification.
//
// This function returns the number of bytes generated which may be zero.
//
// The 'key' and 'keyStream' pointers are not allowed to be NULL. The other
// pointer values may be NULL. The value of 'sizeInBits' must be no larger
// than (2^18)-1 = 256K bits (32385 bytes).
//
// The "once" parameter is set to allow incremental generation of a large
// value. If this flag is TRUE, "sizeInBits" will be used in the HMAC computation
// but only one iteration of the KDF is performed. This would be used for
// XOR obfuscation so that the mask value can be generated in digest-sized
// chunks rather than having to be generated all at once in an arbitrarily
// large buffer and then XORed into the result. If "once" is TRUE, then
// "sizeInBits" must be a multiple of 8.
//
// Any error in the processing of this command is considered fatal.
// return type: TPM_RC
// 0 hash algorithm is not supported or is TPM_ALG_NULL
// > 0 the number of bytes in the 'keyStream' buffer
LIB_EXPORT UINT16
CryptKDFa(
TPM_ALG_ID hashAlg, // IN: hash algorithm used in HMAC
const TPM2B *key, // IN: HMAC key
const TPM2B *label, // IN: a label for the KDF
const TPM2B *contextU, // IN: context U
const TPM2B *contextV, // IN: context V
UINT32 sizeInBits, // IN: size of generated key in bits
BYTE *keyStream, // OUT: key buffer
UINT32 *counterInOut, // IN/OUT: caller may provide the iteration
// counter for incremental operations to
// avoid large intermediate buffers.
UINT16 blocks // IN: If non-zero, this is the maximum number
// of blocks to be returned, regardless
// of sizeInBits
)
{
UINT32 counter = 0; // counter value
INT16 bytes; // number of bytes to produce
UINT16 generated; // number of bytes generated
BYTE *stream = keyStream;
HMAC_STATE hState;
UINT16 digestSize = CryptHashGetDigestSize(hashAlg);
pAssert(key != NULL && keyStream != NULL);
TEST(TPM_ALG_KDF1_SP800_108);
if(digestSize == 0)
return 0;
if(counterInOut != NULL)
counter = *counterInOut;
// If the size of the request is larger than the numbers will handle,
// it is a fatal error.
pAssert(((sizeInBits + 7) / 8) <= INT16_MAX);
// The number of bytes to be generated is the smaller of the sizeInBits bytes or
// the number of requested blocks. The number of blocks is the smaller of the
// number requested or the number allowed by sizeInBits. A partial block is
// a full block.
bytes = (blocks > 0) ? blocks * digestSize : (UINT16)BITS_TO_BYTES(sizeInBits);
generated = bytes;
// Generate required bytes
for(; bytes > 0; bytes -= digestSize)
{
counter++;
// Start HMAC
if(CryptHmacStart(&hState, hashAlg, key->size, key->buffer) == 0)
return 0;
// Adding counter
CryptDigestUpdateInt(&hState.hashState, 4, counter);
// Adding label
if(label != NULL)
HASH_DATA(&hState.hashState, label->size, (BYTE *)label->buffer);
// Add a null. SP108 is not very clear about when the 0 is needed but to
// make this like the previous version that did not add an 0x00 after
// a null-terminated string, this version will only add a null byte
// if the label parameter did not end in a null byte, or if no label
// is present.
if((label == NULL)
|| (label->size == 0)
|| (label->buffer[label->size - 1] != 0))
CryptDigestUpdateInt(&hState.hashState, 1, 0);
// Adding contextU
if(contextU != NULL)
HASH_DATA(&hState.hashState, contextU->size, contextU->buffer);
// Adding contextV
if(contextV != NULL)
HASH_DATA(&hState.hashState, contextV->size, contextV->buffer);
// Adding size in bits
CryptDigestUpdateInt(&hState.hashState, 4, sizeInBits);
// Complete and put the data in the buffer
CryptHmacEnd(&hState, bytes, stream);
stream = &stream[digestSize];
}
// Masking in the KDF is disabled. If the calling function wants something
// less than even number of bytes, then the caller should do the masking
// because there is no universal way to do it here
if(counterInOut != NULL)
*counterInOut = counter;
return generated;
}
//*** CryptKDFe()
// KDFe as defined in TPM specification part 1.
//
// This function returns the number of bytes generated which may be zero.
//
// The 'Z' and 'keyStream' pointers are not allowed to be NULL. The other
// pointer values may be NULL. The value of 'sizeInBits' must be no larger
// than (2^18)-1 = 256K bits (32385 bytes).
// Any error in the processing of this command is considered fatal.
// return type: TPM_RC
// 0 hash algorithm is not supported or is TPM_ALG_NULL
// > 0 the number of bytes in the 'keyStream' buffer
//
LIB_EXPORT UINT16
CryptKDFe(
TPM_ALG_ID hashAlg, // IN: hash algorithm used in HMAC
TPM2B *Z, // IN: Z
const TPM2B *label, // IN: a label value for the KDF
TPM2B *partyUInfo, // IN: PartyUInfo
TPM2B *partyVInfo, // IN: PartyVInfo
UINT32 sizeInBits, // IN: size of generated key in bits
BYTE *keyStream // OUT: key buffer
)
{
HASH_STATE hashState;
PHASH_DEF hashDef = CryptGetHashDef(hashAlg);
UINT32 counter = 0; // counter value
UINT16 hLen;
BYTE *stream = keyStream;
INT16 bytes; // number of bytes to generate
pAssert(keyStream != NULL && Z != NULL && ((sizeInBits + 7) / 8) < INT16_MAX);
//
hLen = hashDef->digestSize;
bytes = (INT16)((sizeInBits + 7) / 8);
if(hashAlg == TPM_ALG_NULL || bytes == 0)
return 0;
// Generate required bytes
//The inner loop of that KDF uses:
// Hash[i] := H(counter | Z | OtherInfo) (5)
// Where:
// Hash[i] the hash generated on the i-th iteration of the loop.
// H() an approved hash function
// counter a 32-bit counter that is initialized to 1 and incremented
// on each iteration
// Z the X coordinate of the product of a public ECC key and a
// different private ECC key.
// OtherInfo a collection of qualifying data for the KDF defined below.
// In this specification, OtherInfo will be constructed by:
// OtherInfo := Use | PartyUInfo | PartyVInfo
for(; bytes > 0; stream = &stream[hLen], bytes = bytes - hLen)
{
if(bytes < hLen)
hLen = bytes;
counter++;
// Do the hash
CryptHashStart(&hashState, hashAlg);
// Add counter
CryptDigestUpdateInt(&hashState, 4, counter);
// Add Z
if(Z != NULL)
CryptDigestUpdate2B(&hashState, Z);
// Add label
if(label != NULL)
CryptDigestUpdate2B(&hashState, label);
// Add a null. SP108 is not very clear about when the 0 is needed but to
// make this like the previous version that did not add an 0x00 after
// a null-terminated string, this version will only add a null byte
// if the label parameter did not end in a null byte, or if no label
// is present.
if((label == NULL)
|| (label->size == 0)
|| (label->buffer[label->size - 1] != 0))
CryptDigestUpdateInt(&hashState, 1, 0);
// Add PartyUInfo
if(partyUInfo != NULL)
CryptDigestUpdate2B(&hashState, partyUInfo);
// Add PartyVInfo
if(partyVInfo != NULL)
CryptDigestUpdate2B(&hashState, partyVInfo);
// Compute Hash. hLen was changed to be the smaller of bytes or hLen
// at the start of each iteration.
CryptHashEnd(&hashState, hLen, stream);
}
// Mask off bits if the required bits is not a multiple of byte size
if((sizeInBits % 8) != 0)
keyStream[0] &= ((1 << (sizeInBits % 8)) - 1);
return (UINT16)((sizeInBits + 7) / 8);
} | 35.816057 | 85 | 0.602134 | [
"object"
] |
6507d439ca4b37ba0b9564ecad4a5c427cc9764b | 13,575 | h | C | modules/Alexa/SampleApp/include/SampleApp/AplClientBridge.h | isabella232/alexa-smart-screen-sdk | fca0701fc5020a2ae3787cd8a2052e7291982fd0 | [
"Apache-2.0"
] | null | null | null | modules/Alexa/SampleApp/include/SampleApp/AplClientBridge.h | isabella232/alexa-smart-screen-sdk | fca0701fc5020a2ae3787cd8a2052e7291982fd0 | [
"Apache-2.0"
] | 1 | 2022-02-08T19:12:45.000Z | 2022-02-08T19:12:45.000Z | modules/Alexa/SampleApp/include/SampleApp/AplClientBridge.h | isabella232/alexa-smart-screen-sdk | fca0701fc5020a2ae3787cd8a2052e7291982fd0 | [
"Apache-2.0"
] | null | null | null | /*
* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License").
* You may not use this file except in compliance with the License.
* A copy of the License is located at
*
* http://aws.amazon.com/apache2.0/
*
* or in the "license" file accompanying this file. This file is distributed
* on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
* express or implied. See the License for the specific language governing
* permissions and limitations under the License.
*/
#ifndef ALEXA_SMART_SCREEN_SDK_SAMPLEAPP_INCLUDE_SAMPLEAPP_APLCLIENTBRIDGE_H_
#define ALEXA_SMART_SCREEN_SDK_SAMPLEAPP_INCLUDE_SAMPLEAPP_APLCLIENTBRIDGE_H_
#include <acsdkAudioPlayerInterfaces/AudioPlayerObserverInterface.h>
#include <acsdkExternalMediaPlayerInterfaces/ExternalMediaPlayerObserverInterface.h>
#include <AVSCommon/Utils/LibcurlUtils/HTTPContentFetcherFactory.h>
#include <AVSCommon/Utils/Configuration/ConfigurationNode.h>
#include "SmartScreenSDKInterfaces/MessagingServerObserverInterface.h"
#include "APLClient/AplClientBinding.h"
#include "APLClient/AplRenderingEventObserver.h"
#include "APLClient/Extensions/AplCoreExtensionInterface.h"
#include <APLClient/Extensions/Backstack/AplBackstackExtension.h>
#include <APLClient/Extensions/Backstack/AplBackstackExtensionObserver.h>
#include <APLClient/Extensions/AudioPlayer/AplAudioPlayerExtension.h>
#include <APLClient/Extensions/AudioPlayer/AplAudioPlayerExtensionObserverInterface.h>
#include <APLClient/Extensions/E2EEncryption/AplE2EEncryptionExtension.h>
#include <APLClient/AplRenderingEvent.h>
#include "GUI/GUIManager.h"
#include "CachingDownloadManager.h"
namespace alexaSmartScreenSDK {
namespace sampleApp {
/**
* A struct that helps storing additional parameters for APLClientBridge.
*/
struct AplClientBridgeParameter {
// Maximum number of concurrent downloads allowed.
int maxNumberOfConcurrentDownloads;
};
class AplClientBridge
: public APLClient::AplOptionsInterface
, public smartScreenSDKInterfaces::MessagingServerObserverInterface
, public smartScreenSDKInterfaces::VisualStateProviderInterface
, public alexaClientSDK::acsdkAudioPlayerInterfaces::AudioPlayerObserverInterface
, public alexaClientSDK::acsdkExternalMediaPlayerInterfaces::ExternalMediaPlayerObserverInterface
, public APLClient::Extensions::Backstack::AplBackstackExtensionObserverInterface
, public APLClient::Extensions::AudioPlayer::AplAudioPlayerExtensionObserverInterface
, public alexaClientSDK::avsCommon::utils::RequiresShutdown
, public std::enable_shared_from_this<AplClientBridge> {
public:
static std::shared_ptr<AplClientBridge> create(
std::shared_ptr<CachingDownloadManager> contentDownloadManager,
std::shared_ptr<smartScreenSDKInterfaces::GUIClientInterface> guiClient,
AplClientBridgeParameter parameters);
/// @name RequiresShutdown Functions
/// @{
void doShutdown() override;
/// @}
/// @name AplOptionsInterface Functions
/// {
void sendMessage(const std::string& token, const std::string& payload) override;
void resetViewhost(const std::string& token) override;
std::string downloadResource(const std::string& source) override;
std::chrono::milliseconds getTimezoneOffset() override;
void onActivityStarted(const std::string& token, const std::string& source) override;
void onActivityEnded(const std::string& token, const std::string& source) override;
void onSendEvent(const std::string& token, const std::string& event) override;
void onCommandExecutionComplete(const std::string& token, bool result) override;
void onRenderDocumentComplete(const std::string& token, bool result, const std::string& error) override;
void onVisualContextAvailable(const std::string& token, unsigned int stateRequestToken, const std::string& context)
override;
void onSetDocumentIdleTimeout(const std::string& token, const std::chrono::milliseconds& timeout) override;
void onRenderingEvent(const std::string& token, APLClient::AplRenderingEvent event) override;
void onFinish(const std::string& token) override;
void onDataSourceFetchRequestEvent(const std::string& token, const std::string& type, const std::string& payload)
override;
void onRuntimeErrorEvent(const std::string& token, const std::string& payload) override;
void onExtensionEvent(
const std::string& aplToken,
const std::string& uri,
const std::string& name,
const std::string& source,
const std::string& params,
unsigned int event,
std::shared_ptr<APLClient::Extensions::AplCoreExtensionEventCallbackResultInterface> resultCallback) override;
void logMessage(APLClient::LogLevel level, const std::string& source, const std::string& message) override;
int getMaxNumberOfConcurrentDownloads() override;
/// }
/// @name MessagingServerObserverInterface Functions
/// @{
void onConnectionOpened() override;
void onConnectionClosed() override;
/// @}
/// @name VisualStateProviderInterface Methods
/// @{
void provideState(const std::string& aplToken, const unsigned int stateRequestToken) override;
/// @}
/// @name AudioPlayerObserverInterface methods
/// @{
void onPlayerActivityChanged(alexaClientSDK::avsCommon::avs::PlayerActivity state, const Context& context) override;
/// }
/// @name ExternalMediaPlayerObserverInterface methods
/// @{
void onLoginStateProvided(
const std::string& playerId,
alexaClientSDK::acsdkExternalMediaPlayerInterfaces::ObservableSessionProperties sessionStateProperties)
override;
void onPlaybackStateProvided(
const std::string& playerId,
alexaClientSDK::acsdkExternalMediaPlayerInterfaces::ObservablePlaybackStateProperties playbackStateProperties)
override;
/// }
/// @name AplBackstackExtensionObserverInterface Functions
/// @{
void onRestoreDocumentState(std::shared_ptr<APLClient::AplDocumentState> documentState) override;
/// @}
/// @name AplAudioPlayerExtensionObserverInterface methods
/// @{
void onAudioPlayerPlay() override;
void onAudioPlayerPause() override;
void onAudioPlayerNext() override;
void onAudioPlayerPrevious() override;
void onAudioPlayerSeekToPosition(int offsetInMilliseconds) override;
void onAudioPlayerToggle(const std::string& name, bool checked) override;
void onAudioPlayerSkipForward() override;
void onAudioPlayerSkipBackward() override;
void onAudioPlayerLyricDataFlushed(
const std::string& token,
long durationInMilliseconds,
const std::string& lyricData) override;
/// }
void onUpdateTimer();
void setGUIManager(std::shared_ptr<alexaSmartScreenSDK::smartScreenSDKInterfaces::GUIServerInterface> guiManager);
void renderDocument(
const std::string& token,
const std::string& document,
const std::string& datasources,
const std::string& supportedViewports,
const std::string& windowId);
void clearDocument(const std::string& token);
void executeCommands(const std::string& jsonPayload, const std::string& token);
void interruptCommandSequence(const std::string& token);
void dataSourceUpdate(const std::string& sourceType, const std::string& jsonPayload, const std::string& token);
void onMessage(const std::string& windowId, const std::string& message);
bool handleBack();
void onPresentationSessionChanged(const std::string& id, const std::string& skillId);
void handleRenderingEvent(const std::string& token, APLClient::AplRenderingEvent event);
void handleDisplayMetrics(const std::string& windowId, const std::string& jsonPayload);
void onRenderDirectiveReceived(const std::string& token, const std::chrono::steady_clock::time_point& receiveTime);
/**
* Initialize empty client renderer and load corresponding supported extensions
* @param windowId id of the window to be created
* @param supportedExtensions URIs of all supported APL extensions for this window
*/
void initializeRenderer(const std::string& windowId, std::set<std::string> supportedExtensions);
/**
* Initialize empty client renderer and load corresponding supported extensions
* @param windowId id of the window to be created
* @param supportedExtensions Set of APL Extensions to register with this window
*/
void initializeRenderer(
const std::string& windowId,
const std::unordered_set<std::shared_ptr<APLClient::Extensions::AplCoreExtensionInterface>>&
supportedExtensions);
/**
* Returns a shared pointer to the @c AplClientRenderer holding root-context for a given aplToken
* Note:- This is not a thread safe method, avoid calling this method outside @c executor context
*
* @param the APL token in context
* @return the instance of @c APLClientRenderer if found, else nullptr
*/
std::shared_ptr<APLClient::AplClientRenderer> getAplClientRendererFromAplToken(const std::string& aplToken);
/**
* Returns a shared pointer to the @c AplClientRenderer holding root-context for a target window ID
* Note:- This is not a thread safe method, avoid calling this method outside @c executor context
*
* @param the window id in context
* @return the instance of @c APLClientRenderer if found, else nullptr
*/
std::shared_ptr<APLClient::AplClientRenderer> getAplClientRendererFromWindowId(const std::string& windowId);
/**
* Get the max supported APL version as string
* @return Max supported APL version as string
*/
std::string getMaxSupportedAPLVersion();
/**
* Sets the @TelemetrySink to @c AplConfiguration. This sink will be used by @c APLClient
* to record and emit metric events.
*
* @param metricRecorder Shared Pointer to @MetricRecorderInterface to be used by @c TelemetrySink
*/
void onMetricRecorderAvailable(
std::shared_ptr<alexaClientSDK::avsCommon::utils::metrics::MetricRecorderInterface> metricRecorder);
private:
AplClientBridge(
std::shared_ptr<CachingDownloadManager> contentDownloadManager,
std::shared_ptr<smartScreenSDKInterfaces::GUIClientInterface> guiClient,
AplClientBridgeParameter parameters);
/**
* Set token to window id in the managed m_aplTokenToWindowIdMap
* @param token of the apl document.
* @param windowId the id of the window presenting the document with provided token.
*/
void setTokenToWindow(const std::string& token, const std::string& windowId);
/**
* Executor method for clearing document, must be called in executor context.
*
* @param aplClientRenderer shared pointer of @c APLClientRenderer where the document is rendering
*/
void executeClearDocument(std::shared_ptr<APLClient::AplClientRenderer> aplClientRenderer);
/**
* Gets the back extension associated with the provided renderer.
*
* @param aplClientRenderer shared pointer of @c APLClientRenderer to check for associated backstack extension.
* @return Shared pointer to the back extension instance if available, else nullptr
*/
static std::shared_ptr<APLClient::Extensions::Backstack::AplBackstackExtension> getBackExtensionForRenderer(
const std::shared_ptr<APLClient::AplClientRenderer>& aplClientRenderer);
/// Pointer to the download manager for retrieving resources
std::shared_ptr<CachingDownloadManager> m_contentDownloadManager;
/// An internal timer use to run the APL Core update loop
alexaClientSDK::avsCommon::utils::timing::Timer m_updateTimer;
/// Pointer to the APL Client
std::unique_ptr<APLClient::AplClientBinding> m_aplClientBinding;
/// Pointer to the GUI Manager
std::shared_ptr<smartScreenSDKInterfaces::GUIServerInterface> m_guiManager;
/// Pointer to the GUI Client
std::shared_ptr<smartScreenSDKInterfaces::GUIClientInterface> m_guiClient;
/// The last windowId to receive a RenderDocument directive
std::string m_lastRenderedWindowId;
/// Whether a render is currently queued
std::atomic_bool m_renderQueued;
/// An internal executor that performs execution of callable objects passed to it sequentially but asynchronously.
alexaClientSDK::avsCommon::utils::threading::Executor m_executor;
/// An internal struct that stores additional parameters for AplClientBridge.
AplClientBridgeParameter m_parameters;
/// Collection of all @c AudioPlayerExtensions
std::vector<std::shared_ptr<APLClient::Extensions::AudioPlayer::AplAudioPlayerExtension>> m_audioPlayerExtensions;
/// The @c PlayerActivity state of the @c AudioPlayer
alexaClientSDK::avsCommon::avs::PlayerActivity m_playerActivityState;
/// Collection of Pointer to the @c AplClientRenderer for every @c windowId
std::unordered_map<std::string, std::shared_ptr<APLClient::AplClientRenderer>> m_aplClientRendererMap;
/// Map for resolving target @windowId currently rendering a given @c aplToken
std::unordered_map<std::string, std::string> m_aplTokenToWindowIdMap;
};
} // namespace sampleApp
} // namespace alexaSmartScreenSDK
#endif // ALEXA_SMART_SCREEN_SDK_SAMPLEAPP_INCLUDE_SAMPLEAPP_APLCLIENTBRIDGE_H_
| 41.012085 | 120 | 0.747698 | [
"render",
"vector"
] |
650af04f34a84cf12a8f5497ffb37d53870bd66e | 86,369 | c | C | opuslib/src/main/jni/src/opus_encoder.c | teocci/OpusAndroidWrapper | a120a171b45e00f30ea9a0f373331cd5a48c71e0 | [
"MIT"
] | 2 | 2018-07-04T08:28:25.000Z | 2018-08-08T08:57:39.000Z | opuslib/src/main/jni/src/opus_encoder.c | teocci/OpusAndroidWrapper | a120a171b45e00f30ea9a0f373331cd5a48c71e0 | [
"MIT"
] | 1 | 2018-01-28T04:20:32.000Z | 2018-01-28T23:50:10.000Z | opuslib/src/main/jni/src/opus_encoder.c | teocci/OpusAndroidWrapper | a120a171b45e00f30ea9a0f373331cd5a48c71e0 | [
"MIT"
] | null | null | null | /* Copyright (c) 2010-2011 Xiph.Org Foundation, Skype Limited
Written by Jean-Marc Valin and Koen Vos */
/*
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
- Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
- Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER
OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "config.h"
#include <stdarg.h>
#include "celt.h"
#include "entenc.h"
#include "modes.h"
#include "API.h"
#include "stack_alloc.h"
#include "float_cast.h"
#include "opus.h"
#include "arch.h"
#include "opus_private.h"
#include "os_support.h"
#include "cpu_support.h"
#include "analysis.h"
#include "mathops.h"
#include "tuning_parameters.h"
#ifdef FIXED_POINT
#include "fixed/structs_FIX.h"
#else
#include "float/structs_FLP.h"
#endif
#define MAX_ENCODER_BUFFER 480
typedef struct {
opus_val32 XX, XY, YY;
opus_val16 smoothed_width;
opus_val16 max_follower;
} StereoWidthState;
struct OpusEncoder {
int celt_enc_offset;
int silk_enc_offset;
silk_EncControlStruct silk_mode;
int application;
int channels;
int delay_compensation;
int force_channels;
int signal_type;
int user_bandwidth;
int max_bandwidth;
int user_forced_mode;
int voice_ratio;
opus_int32 Fs;
int use_vbr;
int vbr_constraint;
int variable_duration;
opus_int32 bitrate_bps;
opus_int32 user_bitrate_bps;
int lsb_depth;
int encoder_buffer;
int lfe;
#define OPUS_ENCODER_RESET_START stream_channels
int stream_channels;
opus_int16 hybrid_stereo_width_Q14;
opus_int32 variable_HP_smth2_Q15;
opus_val16 prev_HB_gain;
opus_val32 hp_mem[4];
int mode;
int prev_mode;
int prev_channels;
int prev_framesize;
int bandwidth;
int silk_bw_switch;
/* Sampling rate (at the API level) */
int first;
opus_val16 *energy_masking;
StereoWidthState width_mem;
opus_val16 delay_buffer[MAX_ENCODER_BUFFER * 2];
#ifndef DISABLE_FLOAT_API
TonalityAnalysisState analysis;
int detected_bandwidth;
int analysis_offset;
#endif
opus_uint32 rangeFinal;
int arch;
};
/* Transition tables for the voice and music. First column is the
middle (memoriless) threshold. The second column is the hysteresis
(difference with the middle) */
static const opus_int32 mono_voice_bandwidth_thresholds[8] = {
11000, 1000, /* NB<->MB */
14000, 1000, /* MB<->WB */
17000, 1000, /* WB<->SWB */
21000, 2000, /* SWB<->FB */
};
static const opus_int32 mono_music_bandwidth_thresholds[8] = {
12000, 1000, /* NB<->MB */
15000, 1000, /* MB<->WB */
18000, 2000, /* WB<->SWB */
22000, 2000, /* SWB<->FB */
};
static const opus_int32 stereo_voice_bandwidth_thresholds[8] = {
11000, 1000, /* NB<->MB */
14000, 1000, /* MB<->WB */
21000, 2000, /* WB<->SWB */
28000, 2000, /* SWB<->FB */
};
static const opus_int32 stereo_music_bandwidth_thresholds[8] = {
12000, 1000, /* NB<->MB */
18000, 2000, /* MB<->WB */
21000, 2000, /* WB<->SWB */
30000, 2000, /* SWB<->FB */
};
/* Threshold bit-rates for switching between mono and stereo */
static const opus_int32 stereo_voice_threshold = 30000;
static const opus_int32 stereo_music_threshold = 30000;
/* Threshold bit-rate for switching between SILK/hybrid and CELT-only */
static const opus_int32 mode_thresholds[2][2] = {
/* voice */ /* music */
{64000, 16000}, /* mono */
{36000, 16000}, /* stereo */
};
int opus_encoder_get_size(int channels) {
int silkEncSizeBytes, celtEncSizeBytes;
int ret;
if (channels < 1 || channels > 2)
return 0;
ret = silk_Get_Encoder_Size(&silkEncSizeBytes);
if (ret)
return 0;
silkEncSizeBytes = align(silkEncSizeBytes);
celtEncSizeBytes = celt_encoder_get_size(channels);
return align(sizeof(OpusEncoder)) + silkEncSizeBytes + celtEncSizeBytes;
}
int opus_encoder_init(OpusEncoder *st, opus_int32 Fs, int channels, int application) {
void *silk_enc;
CELTEncoder *celt_enc;
int err;
int ret, silkEncSizeBytes;
if ((Fs != 48000 && Fs != 24000 && Fs != 16000 && Fs != 12000 && Fs != 8000) || (channels != 1 && channels != 2) ||
(application != OPUS_APPLICATION_VOIP && application != OPUS_APPLICATION_AUDIO
&& application != OPUS_APPLICATION_RESTRICTED_LOWDELAY))
return OPUS_BAD_ARG;
OPUS_CLEAR((char *) st, opus_encoder_get_size(channels));
/* Create SILK encoder */
ret = silk_Get_Encoder_Size(&silkEncSizeBytes);
if (ret)
return OPUS_BAD_ARG;
silkEncSizeBytes = align(silkEncSizeBytes);
st->silk_enc_offset = align(sizeof(OpusEncoder));
st->celt_enc_offset = st->silk_enc_offset + silkEncSizeBytes;
silk_enc = (char *) st + st->silk_enc_offset;
celt_enc = (CELTEncoder *) ((char *) st + st->celt_enc_offset);
st->stream_channels = st->channels = channels;
st->Fs = Fs;
st->arch = opus_select_arch();
ret = silk_InitEncoder(silk_enc, st->arch, &st->silk_mode);
if (ret)return OPUS_INTERNAL_ERROR;
/* default SILK parameters */
st->silk_mode.nChannelsAPI = channels;
st->silk_mode.nChannelsInternal = channels;
st->silk_mode.API_sampleRate = st->Fs;
st->silk_mode.maxInternalSampleRate = 16000;
st->silk_mode.minInternalSampleRate = 8000;
st->silk_mode.desiredInternalSampleRate = 16000;
st->silk_mode.payloadSize_ms = 20;
st->silk_mode.bitRate = 25000;
st->silk_mode.packetLossPercentage = 0;
st->silk_mode.complexity = 9;
st->silk_mode.useInBandFEC = 0;
st->silk_mode.useDTX = 0;
st->silk_mode.useCBR = 0;
st->silk_mode.reducedDependency = 0;
/* Create CELT encoder */
/* Initialize CELT encoder */
err = celt_encoder_init(celt_enc, Fs, channels, st->arch);
if (err != OPUS_OK)return OPUS_INTERNAL_ERROR;
celt_encoder_ctl(celt_enc, CELT_SET_SIGNALLING(0));
celt_encoder_ctl(celt_enc, OPUS_SET_COMPLEXITY(st->silk_mode.complexity));
st->use_vbr = 1;
/* Makes constrained VBR the default (safer for real-time use) */
st->vbr_constraint = 1;
st->user_bitrate_bps = OPUS_AUTO;
st->bitrate_bps = 3000 + Fs * channels;
st->application = application;
st->signal_type = OPUS_AUTO;
st->user_bandwidth = OPUS_AUTO;
st->max_bandwidth = OPUS_BANDWIDTH_FULLBAND;
st->force_channels = OPUS_AUTO;
st->user_forced_mode = OPUS_AUTO;
st->voice_ratio = -1;
st->encoder_buffer = st->Fs / 100;
st->lsb_depth = 24;
st->variable_duration = OPUS_FRAMESIZE_ARG;
/* Delay compensation of 4 ms (2.5 ms for SILK's extra look-ahead
+ 1.5 ms for SILK resamplers and stereo prediction) */
st->delay_compensation = st->Fs / 250;
st->hybrid_stereo_width_Q14 = 1 << 14;
st->prev_HB_gain = Q15ONE;
st->variable_HP_smth2_Q15 = silk_LSHIFT(silk_lin2log(VARIABLE_HP_MIN_CUTOFF_HZ), 8);
st->first = 1;
st->mode = MODE_HYBRID;
st->bandwidth = OPUS_BANDWIDTH_FULLBAND;
return OPUS_OK;
}
static unsigned char gen_toc(int mode, int framerate, int bandwidth, int channels) {
int period;
unsigned char toc;
period = 0;
while (framerate < 400) {
framerate <<= 1;
period++;
}
if (mode == MODE_SILK_ONLY) {
toc = (bandwidth - OPUS_BANDWIDTH_NARROWBAND) << 5;
toc |= (period - 2) << 3;
} else if (mode == MODE_CELT_ONLY) {
int tmp = bandwidth - OPUS_BANDWIDTH_MEDIUMBAND;
if (tmp < 0)
tmp = 0;
toc = 0x80;
toc |= tmp << 5;
toc |= period << 3;
} else /* Hybrid */
{
toc = 0x60;
toc |= (bandwidth - OPUS_BANDWIDTH_SUPERWIDEBAND) << 4;
toc |= (period - 2) << 3;
}
toc |= (channels == 2) << 2;
return toc;
}
#ifndef FIXED_POINT
static void silk_biquad_float(
const opus_val16 *in, /* I: Input signal */
const opus_int32 *B_Q28, /* I: MA coefficients [3] */
const opus_int32 *A_Q28, /* I: AR coefficients [2] */
opus_val32 *S, /* I/O: State vector [2] */
opus_val16 *out, /* O: Output signal */
const opus_int32 len, /* I: Signal length (must be even) */
int stride
) {
/* DIRECT FORM II TRANSPOSED (uses 2 element state vector) */
opus_int k;
opus_val32 vout;
opus_val32 inval;
opus_val32 A[2], B[3];
A[0] = (opus_val32) (A_Q28[0] * (1.f / ((opus_int32) 1 << 28)));
A[1] = (opus_val32) (A_Q28[1] * (1.f / ((opus_int32) 1 << 28)));
B[0] = (opus_val32) (B_Q28[0] * (1.f / ((opus_int32) 1 << 28)));
B[1] = (opus_val32) (B_Q28[1] * (1.f / ((opus_int32) 1 << 28)));
B[2] = (opus_val32) (B_Q28[2] * (1.f / ((opus_int32) 1 << 28)));
/* Negate A_Q28 values and split in two parts */
for (k = 0; k < len; k++) {
/* S[ 0 ], S[ 1 ]: Q12 */
inval = in[k * stride];
vout = S[0] + B[0] * inval;
S[0] = S[1] - vout * A[0] + B[1] * inval;
S[1] = -vout * A[1] + B[2] * inval + VERY_SMALL;
/* Scale back to Q0 and saturate */
out[k * stride] = vout;
}
}
#endif
static void
hp_cutoff(const opus_val16 *in, opus_int32 cutoff_Hz, opus_val16 *out, opus_val32 *hp_mem, int len, int channels,
opus_int32 Fs) {
opus_int32 B_Q28[3], A_Q28[2];
opus_int32 Fc_Q19, r_Q28, r_Q22;
silk_assert(cutoff_Hz <= silk_int32_MAX / SILK_FIX_CONST(1.5 * 3.14159 / 1000, 19));
Fc_Q19 = silk_DIV32_16(silk_SMULBB(SILK_FIX_CONST(1.5 * 3.14159 / 1000, 19), cutoff_Hz), Fs / 1000);
silk_assert(Fc_Q19 > 0 && Fc_Q19 < 32768);
r_Q28 = SILK_FIX_CONST(1.0, 28) - silk_MUL(SILK_FIX_CONST(0.92, 9), Fc_Q19);
/* b = r * [ 1; -2; 1 ]; */
/* a = [ 1; -2 * r * ( 1 - 0.5 * Fc^2 ); r^2 ]; */
B_Q28[0] = r_Q28;
B_Q28[1] = silk_LSHIFT(-r_Q28, 1);
B_Q28[2] = r_Q28;
/* -r * ( 2 - Fc * Fc ); */
r_Q22 = silk_RSHIFT(r_Q28, 6);
A_Q28[0] = silk_SMULWW(r_Q22, silk_SMULWW(Fc_Q19, Fc_Q19) - SILK_FIX_CONST(2.0, 22));
A_Q28[1] = silk_SMULWW(r_Q22, r_Q22);
#ifdef FIXED_POINT
silk_biquad_alt( in, B_Q28, A_Q28, hp_mem, out, len, channels );
if( channels == 2 ) {
silk_biquad_alt( in+1, B_Q28, A_Q28, hp_mem+2, out+1, len, channels );
}
#else
silk_biquad_float(in, B_Q28, A_Q28, hp_mem, out, len, channels);
if (channels == 2) {
silk_biquad_float(in + 1, B_Q28, A_Q28, hp_mem + 2, out + 1, len, channels);
}
#endif
}
#ifdef FIXED_POINT
static void dc_reject(const opus_val16 *in, opus_int32 cutoff_Hz, opus_val16 *out, opus_val32 *hp_mem, int len, int channels, opus_int32 Fs)
{
int c, i;
int shift;
/* Approximates -round(log2(4.*cutoff_Hz/Fs)) */
shift=celt_ilog2(Fs/(cutoff_Hz*3));
for (c=0;c<channels;c++)
{
for (i=0;i<len;i++)
{
opus_val32 x, tmp, y;
x = SHL32(EXTEND32(in[channels*i+c]), 15);
/* First stage */
tmp = x-hp_mem[2*c];
hp_mem[2*c] = hp_mem[2*c] + PSHR32(x - hp_mem[2*c], shift);
/* Second stage */
y = tmp - hp_mem[2*c+1];
hp_mem[2*c+1] = hp_mem[2*c+1] + PSHR32(tmp - hp_mem[2*c+1], shift);
out[channels*i+c] = EXTRACT16(SATURATE(PSHR32(y, 15), 32767));
}
}
}
#else
static void
dc_reject(const opus_val16 *in, opus_int32 cutoff_Hz, opus_val16 *out, opus_val32 *hp_mem, int len, int channels,
opus_int32 Fs) {
int c, i;
float coef;
coef = 4.0f * cutoff_Hz / Fs;
for (c = 0; c < channels; c++) {
for (i = 0; i < len; i++) {
opus_val32 x, tmp, y;
x = in[channels * i + c];
/* First stage */
tmp = x - hp_mem[2 * c];
hp_mem[2 * c] = hp_mem[2 * c] + coef * (x - hp_mem[2 * c]) + VERY_SMALL;
/* Second stage */
y = tmp - hp_mem[2 * c + 1];
hp_mem[2 * c + 1] = hp_mem[2 * c + 1] + coef * (tmp - hp_mem[2 * c + 1]) + VERY_SMALL;
out[channels * i + c] = y;
}
}
}
#endif
static void stereo_fade(const opus_val16 *in, opus_val16 *out, opus_val16 g1, opus_val16 g2,
int overlap48, int frame_size, int channels, const opus_val16 *window, opus_int32 Fs) {
int i;
int overlap;
int inc;
inc = 48000 / Fs;
overlap = overlap48 / inc;
g1 = Q15ONE - g1;
g2 = Q15ONE - g2;
for (i = 0; i < overlap; i++) {
opus_val32 diff;
opus_val16 g, w;
w = MULT16_16_Q15(window[i * inc], window[i * inc]);
g = SHR32(MAC16_16(MULT16_16(w, g2),
Q15ONE - w, g1), 15);
diff = EXTRACT16(HALF32((opus_val32) in[i * channels] - (opus_val32) in[i * channels + 1]));
diff = MULT16_16_Q15(g, diff);
out[i * channels] = out[i * channels] - diff;
out[i * channels + 1] = out[i * channels + 1] + diff;
}
for (; i < frame_size; i++) {
opus_val32 diff;
diff = EXTRACT16(HALF32((opus_val32) in[i * channels] - (opus_val32) in[i * channels + 1]));
diff = MULT16_16_Q15(g2, diff);
out[i * channels] = out[i * channels] - diff;
out[i * channels + 1] = out[i * channels + 1] + diff;
}
}
static void gain_fade(const opus_val16 *in, opus_val16 *out, opus_val16 g1, opus_val16 g2,
int overlap48, int frame_size, int channels, const opus_val16 *window, opus_int32 Fs) {
int i;
int inc;
int overlap;
int c;
inc = 48000 / Fs;
overlap = overlap48 / inc;
if (channels == 1) {
for (i = 0; i < overlap; i++) {
opus_val16 g, w;
w = MULT16_16_Q15(window[i * inc], window[i * inc]);
g = SHR32(MAC16_16(MULT16_16(w, g2),
Q15ONE - w, g1), 15);
out[i] = MULT16_16_Q15(g, in[i]);
}
} else {
for (i = 0; i < overlap; i++) {
opus_val16 g, w;
w = MULT16_16_Q15(window[i * inc], window[i * inc]);
g = SHR32(MAC16_16(MULT16_16(w, g2),
Q15ONE - w, g1), 15);
out[i * 2] = MULT16_16_Q15(g, in[i * 2]);
out[i * 2 + 1] = MULT16_16_Q15(g, in[i * 2 + 1]);
}
}
c = 0;
do {
for (i = overlap; i < frame_size; i++) {
out[i * channels + c] = MULT16_16_Q15(g2, in[i * channels + c]);
}
} while (++c < channels);
}
OpusEncoder *opus_encoder_create(opus_int32 Fs, int channels, int application, int *error) {
int ret;
OpusEncoder *st;
if ((Fs != 48000 && Fs != 24000 && Fs != 16000 && Fs != 12000 && Fs != 8000) || (channels != 1 && channels != 2) ||
(application != OPUS_APPLICATION_VOIP && application != OPUS_APPLICATION_AUDIO
&& application != OPUS_APPLICATION_RESTRICTED_LOWDELAY)) {
if (error)
*error = OPUS_BAD_ARG;
return NULL;
}
st = (OpusEncoder *) opus_alloc(opus_encoder_get_size(channels));
if (st == NULL) {
if (error)
*error = OPUS_ALLOC_FAIL;
return NULL;
}
ret = opus_encoder_init(st, Fs, channels, application);
if (error)
*error = ret;
if (ret != OPUS_OK) {
opus_free(st);
st = NULL;
}
return st;
}
static opus_int32 user_bitrate_to_bitrate(OpusEncoder *st, int frame_size, int max_data_bytes) {
if (!frame_size)frame_size = st->Fs / 400;
if (st->user_bitrate_bps == OPUS_AUTO)
return 60 * st->Fs / frame_size + st->Fs * st->channels;
else if (st->user_bitrate_bps == OPUS_BITRATE_MAX)
return max_data_bytes * 8 * st->Fs / frame_size;
else
return st->user_bitrate_bps;
}
#ifndef DISABLE_FLOAT_API
/* Don't use more than 60 ms for the frame size analysis */
#define MAX_DYNAMIC_FRAMESIZE 24
/* Estimates how much the bitrate will be boosted based on the sub-frame energy */
static float transient_boost(const float *E, const float *E_1, int LM, int maxM) {
int i;
int M;
float sumE = 0, sumE_1 = 0;
float metric;
M = IMIN(maxM, (1 << LM) + 1);
for (i = 0; i < M; i++) {
sumE += E[i];
sumE_1 += E_1[i];
}
metric = sumE * sumE_1 / (M * M);
/*if (LM==3)
printf("%f\n", metric);*/
/*return metric>10 ? 1 : 0;*/
/*return MAX16(0,1-exp(-.25*(metric-2.)));*/
return MIN16(1, (float) sqrt(MAX16(0, .05f * (metric - 2))));
}
/* Viterbi decoding trying to find the best frame size combination using look-ahead
State numbering:
0: unused
1: 2.5 ms
2: 5 ms (#1)
3: 5 ms (#2)
4: 10 ms (#1)
5: 10 ms (#2)
6: 10 ms (#3)
7: 10 ms (#4)
8: 20 ms (#1)
9: 20 ms (#2)
10: 20 ms (#3)
11: 20 ms (#4)
12: 20 ms (#5)
13: 20 ms (#6)
14: 20 ms (#7)
15: 20 ms (#8)
*/
static int transient_viterbi(const float *E, const float *E_1, int N, int frame_cost, int rate) {
int i;
float cost[MAX_DYNAMIC_FRAMESIZE][16];
int states[MAX_DYNAMIC_FRAMESIZE][16];
float best_cost;
int best_state;
float factor;
/* Take into account that we damp VBR in the 32 kb/s to 64 kb/s range. */
if (rate < 80)
factor = 0;
else if (rate > 160)
factor = 1;
else
factor = (rate - 80.f) / 80.f;
/* Makes variable framesize less aggressive at lower bitrates, but I can't
find any valid theoretical justification for this (other than it seems
to help) */
for (i = 0; i < 16; i++) {
/* Impossible state */
states[0][i] = -1;
cost[0][i] = 1e10;
}
for (i = 0; i < 4; i++) {
cost[0][1 << i] = (frame_cost + rate * (1 << i)) * (1 + factor * transient_boost(E, E_1, i, N + 1));
states[0][1 << i] = i;
}
for (i = 1; i < N; i++) {
int j;
/* Follow continuations */
for (j = 2; j < 16; j++) {
cost[i][j] = cost[i - 1][j - 1];
states[i][j] = j - 1;
}
/* New frames */
for (j = 0; j < 4; j++) {
int k;
float min_cost;
float curr_cost;
states[i][1 << j] = 1;
min_cost = cost[i - 1][1];
for (k = 1; k < 4; k++) {
float tmp = cost[i - 1][(1 << (k + 1)) - 1];
if (tmp < min_cost) {
states[i][1 << j] = (1 << (k + 1)) - 1;
min_cost = tmp;
}
}
curr_cost = (frame_cost + rate * (1 << j)) * (1 + factor * transient_boost(E + i, E_1 + i, j, N - i + 1));
cost[i][1 << j] = min_cost;
/* If part of the frame is outside the analysis window, only count part of the cost */
if (N - i < (1 << j))
cost[i][1 << j] += curr_cost * (float) (N - i) / (1 << j);
else
cost[i][1 << j] += curr_cost;
}
}
best_state = 1;
best_cost = cost[N - 1][1];
/* Find best end state (doesn't force a frame to end at N-1) */
for (i = 2; i < 16; i++) {
if (cost[N - 1][i] < best_cost) {
best_cost = cost[N - 1][i];
best_state = i;
}
}
/* Follow transitions back */
for (i = N - 1; i >= 0; i--) {
/*printf("%d ", best_state);*/
best_state = states[i][best_state];
}
/*printf("%d\n", best_state);*/
return best_state;
}
int optimize_framesize(const opus_val16 *x, int len, int C, opus_int32 Fs,
int bitrate, opus_val16 tonality, float *mem, int buffering,
downmix_func downmix) {
int N;
int i;
float e[MAX_DYNAMIC_FRAMESIZE + 4];
float e_1[MAX_DYNAMIC_FRAMESIZE + 3];
opus_val32 memx;
int bestLM = 0;
int subframe;
int pos;
VARDECL(opus_val32, sub);
subframe = Fs / 400;
ALLOC(sub, subframe, opus_val32);
e[0] = mem[0];
e_1[0] = 1.f / (EPSILON + mem[0]);
if (buffering) {
/* Consider the CELT delay when not in restricted-lowdelay */
/* We assume the buffering is between 2.5 and 5 ms */
int offset = 2 * subframe - buffering;
celt_assert(offset >= 0 && offset <= subframe);
x += C * offset;
len -= offset;
e[1] = mem[1];
e_1[1] = 1.f / (EPSILON + mem[1]);
e[2] = mem[2];
e_1[2] = 1.f / (EPSILON + mem[2]);
pos = 3;
} else {
pos = 1;
}
N = IMIN(len / subframe, MAX_DYNAMIC_FRAMESIZE);
/* Just silencing a warning, it's really initialized later */
memx = 0;
for (i = 0; i < N; i++) {
float tmp;
opus_val32 tmpx;
int j;
tmp = EPSILON;
downmix(x, sub, subframe, i * subframe, 0, -2, C);
if (i == 0)
memx = sub[0];
for (j = 0; j < subframe; j++) {
tmpx = sub[j];
tmp += (tmpx - memx) * (float) (tmpx - memx);
memx = tmpx;
}
e[i + pos] = tmp;
e_1[i + pos] = 1.f / tmp;
}
/* Hack to get 20 ms working with APPLICATION_AUDIO
The real problem is that the corresponding memory needs to use 1.5 ms
from this frame and 1 ms from the next frame */
e[i + pos] = e[i + pos - 1];
if (buffering)
N = IMIN(MAX_DYNAMIC_FRAMESIZE, N + 2);
bestLM = transient_viterbi(e, e_1, N, (int) ((1.f + .5f * tonality) * (60 * C + 40)), bitrate / 400);
mem[0] = e[1 << bestLM];
if (buffering) {
mem[1] = e[(1 << bestLM) + 1];
mem[2] = e[(1 << bestLM) + 2];
}
return bestLM;
}
#endif
#ifndef DISABLE_FLOAT_API
#ifdef FIXED_POINT
#define PCM2VAL(x) FLOAT2INT16(x)
#else
#define PCM2VAL(x) SCALEIN(x)
#endif
void downmix_float(const void *_x, opus_val32 *sub, int subframe, int offset, int c1, int c2, int C) {
const float *x;
opus_val32 scale;
int j;
x = (const float *) _x;
for (j = 0; j < subframe; j++)
sub[j] = PCM2VAL(x[(j + offset) * C + c1]);
if (c2 > -1) {
for (j = 0; j < subframe; j++)
sub[j] += PCM2VAL(x[(j + offset) * C + c2]);
} else if (c2 == -2) {
int c;
for (c = 1; c < C; c++) {
for (j = 0; j < subframe; j++)
sub[j] += PCM2VAL(x[(j + offset) * C + c]);
}
}
#ifdef FIXED_POINT
scale = (1<<SIG_SHIFT);
#else
scale = 1.f;
#endif
if (C == -2)
scale /= C;
else
scale /= 2;
for (j = 0; j < subframe; j++)
sub[j] *= scale;
}
#endif
void downmix_int(const void *_x, opus_val32 *sub, int subframe, int offset, int c1, int c2, int C) {
const opus_int16 *x;
opus_val32 scale;
int j;
x = (const opus_int16 *) _x;
for (j = 0; j < subframe; j++)
sub[j] = x[(j + offset) * C + c1];
if (c2 > -1) {
for (j = 0; j < subframe; j++)
sub[j] += x[(j + offset) * C + c2];
} else if (c2 == -2) {
int c;
for (c = 1; c < C; c++) {
for (j = 0; j < subframe; j++)
sub[j] += x[(j + offset) * C + c];
}
}
#ifdef FIXED_POINT
scale = (1<<SIG_SHIFT);
#else
scale = 1.f / 32768;
#endif
if (C == -2)
scale /= C;
else
scale /= 2;
for (j = 0; j < subframe; j++)
sub[j] *= scale;
}
opus_int32 frame_size_select(opus_int32 frame_size, int variable_duration, opus_int32 Fs) {
int new_size;
if (frame_size < Fs / 400)
return -1;
if (variable_duration == OPUS_FRAMESIZE_ARG)
new_size = frame_size;
else if (variable_duration == OPUS_FRAMESIZE_VARIABLE)
new_size = Fs / 50;
else if (variable_duration >= OPUS_FRAMESIZE_2_5_MS && variable_duration <= OPUS_FRAMESIZE_60_MS)
new_size = IMIN(3 * Fs / 50, (Fs / 400) << (variable_duration - OPUS_FRAMESIZE_2_5_MS));
else
return -1;
if (new_size > frame_size)
return -1;
if (400 * new_size != Fs && 200 * new_size != Fs && 100 * new_size != Fs &&
50 * new_size != Fs && 25 * new_size != Fs && 50 * new_size != 3 * Fs)
return -1;
return new_size;
}
opus_int32 compute_frame_size(const void *analysis_pcm, int frame_size,
int variable_duration, int C, opus_int32 Fs, int bitrate_bps,
int delay_compensation, downmix_func downmix
#ifndef DISABLE_FLOAT_API
, float *subframe_mem
#endif
) {
#ifndef DISABLE_FLOAT_API
if (variable_duration == OPUS_FRAMESIZE_VARIABLE && frame_size >= Fs / 200) {
int LM = 3;
LM = optimize_framesize(analysis_pcm, frame_size, C, Fs, bitrate_bps,
0, subframe_mem, delay_compensation, downmix);
while ((Fs / 400 << LM) > frame_size)
LM--;
frame_size = (Fs / 400 << LM);
} else
#endif
{
frame_size = frame_size_select(frame_size, variable_duration, Fs);
}
if (frame_size < 0)
return -1;
return frame_size;
}
opus_val16 compute_stereo_width(const opus_val16 *pcm, int frame_size, opus_int32 Fs, StereoWidthState *mem) {
opus_val16 corr;
opus_val16 ldiff;
opus_val16 width;
opus_val32 xx, xy, yy;
opus_val16 sqrt_xx, sqrt_yy;
opus_val16 qrrt_xx, qrrt_yy;
int frame_rate;
int i;
opus_val16 short_alpha;
frame_rate = Fs / frame_size;
short_alpha = Q15ONE - 25 * Q15ONE / IMAX(50, frame_rate);
xx = xy = yy = 0;
for (i = 0; i < frame_size; i += 4) {
opus_val32 pxx = 0;
opus_val32 pxy = 0;
opus_val32 pyy = 0;
opus_val16 x, y;
x = pcm[2 * i];
y = pcm[2 * i + 1];
pxx = SHR32(MULT16_16(x, x), 2);
pxy = SHR32(MULT16_16(x, y), 2);
pyy = SHR32(MULT16_16(y, y), 2);
x = pcm[2 * i + 2];
y = pcm[2 * i + 3];
pxx += SHR32(MULT16_16(x, x), 2);
pxy += SHR32(MULT16_16(x, y), 2);
pyy += SHR32(MULT16_16(y, y), 2);
x = pcm[2 * i + 4];
y = pcm[2 * i + 5];
pxx += SHR32(MULT16_16(x, x), 2);
pxy += SHR32(MULT16_16(x, y), 2);
pyy += SHR32(MULT16_16(y, y), 2);
x = pcm[2 * i + 6];
y = pcm[2 * i + 7];
pxx += SHR32(MULT16_16(x, x), 2);
pxy += SHR32(MULT16_16(x, y), 2);
pyy += SHR32(MULT16_16(y, y), 2);
xx += SHR32(pxx, 10);
xy += SHR32(pxy, 10);
yy += SHR32(pyy, 10);
}
mem->XX += MULT16_32_Q15(short_alpha, xx - mem->XX);
mem->XY += MULT16_32_Q15(short_alpha, xy - mem->XY);
mem->YY += MULT16_32_Q15(short_alpha, yy - mem->YY);
mem->XX = MAX32(0, mem->XX);
mem->XY = MAX32(0, mem->XY);
mem->YY = MAX32(0, mem->YY);
if (MAX32(mem->XX, mem->YY) > QCONST16(8e-4f, 18)) {
sqrt_xx = celt_sqrt(mem->XX);
sqrt_yy = celt_sqrt(mem->YY);
qrrt_xx = celt_sqrt(sqrt_xx);
qrrt_yy = celt_sqrt(sqrt_yy);
/* Inter-channel correlation */
mem->XY = MIN32(mem->XY, sqrt_xx * sqrt_yy);
corr = SHR32(frac_div32(mem->XY, EPSILON + MULT16_16(sqrt_xx, sqrt_yy)), 16);
/* Approximate loudness difference */
ldiff = Q15ONE * ABS16(qrrt_xx - qrrt_yy) / (EPSILON + qrrt_xx + qrrt_yy);
width = MULT16_16_Q15(celt_sqrt(QCONST32(1.f, 30) - MULT16_16(corr, corr)), ldiff);
/* Smoothing over one second */
mem->smoothed_width += (width - mem->smoothed_width) / frame_rate;
/* Peak follower */
mem->max_follower = MAX16(mem->max_follower - QCONST16(.02f, 15) / frame_rate, mem->smoothed_width);
} else {
width = 0;
corr = Q15ONE;
ldiff = 0;
}
/*printf("%f %f %f %f %f ", corr/(float)Q15ONE, ldiff/(float)Q15ONE, width/(float)Q15ONE, mem->smoothed_width/(float)Q15ONE, mem->max_follower/(float)Q15ONE);*/
return EXTRACT16(MIN32(Q15ONE, 20 * mem->max_follower));
}
opus_int32 opus_encode_native(OpusEncoder *st, const opus_val16 *pcm, int frame_size,
unsigned char *data, opus_int32 out_data_bytes, int lsb_depth,
const void *analysis_pcm, opus_int32 analysis_size, int c1, int c2, int analysis_channels,
downmix_func downmix) {
void *silk_enc;
CELTEncoder *celt_enc;
int i;
int ret = 0;
opus_int32 nBytes;
ec_enc enc;
int bytes_target;
int prefill = 0;
int start_band = 0;
int redundancy = 0;
int redundancy_bytes = 0; /* Number of bytes to use for redundancy frame */
int celt_to_silk = 0;
VARDECL(opus_val16, pcm_buf);
int nb_compr_bytes;
int to_celt = 0;
opus_uint32 redundant_rng = 0;
int cutoff_Hz, hp_freq_smth1;
int voice_est; /* Probability of voice in Q7 */
opus_int32 equiv_rate;
int delay_compensation;
int frame_rate;
opus_int32 max_rate; /* Max bitrate we're allowed to use */
int curr_bandwidth;
opus_val16 HB_gain;
opus_int32 max_data_bytes; /* Max number of bytes we're allowed to use */
int total_buffer;
opus_val16 stereo_width;
const CELTMode *celt_mode;
AnalysisInfo analysis_info;
int analysis_read_pos_bak = -1;
int analysis_read_subframe_bak = -1;
VARDECL(opus_val16, tmp_prefill);
ALLOC_STACK;
max_data_bytes = IMIN(1276, out_data_bytes);
st->rangeFinal = 0;
if ((!st->variable_duration && 400 * frame_size != st->Fs && 200 * frame_size != st->Fs &&
100 * frame_size != st->Fs &&
50 * frame_size != st->Fs && 25 * frame_size != st->Fs && 50 * frame_size != 3 * st->Fs)
|| (400 * frame_size < st->Fs)
|| max_data_bytes <= 0
) {
RESTORE_STACK;
return OPUS_BAD_ARG;
}
silk_enc = (char *) st + st->silk_enc_offset;
celt_enc = (CELTEncoder *) ((char *) st + st->celt_enc_offset);
if (st->application == OPUS_APPLICATION_RESTRICTED_LOWDELAY)
delay_compensation = 0;
else
delay_compensation = st->delay_compensation;
lsb_depth = IMIN(lsb_depth, st->lsb_depth);
analysis_info.valid = 0;
celt_encoder_ctl(celt_enc, CELT_GET_MODE(&celt_mode));
#ifndef DISABLE_FLOAT_API
#ifdef FIXED_POINT
if (st->silk_mode.complexity >= 10 && st->Fs==48000)
#else
if (st->silk_mode.complexity >= 7 && st->Fs == 48000)
#endif
{
analysis_read_pos_bak = st->analysis.read_pos;
analysis_read_subframe_bak = st->analysis.read_subframe;
run_analysis(&st->analysis, celt_mode, analysis_pcm, analysis_size, frame_size,
c1, c2, analysis_channels, st->Fs,
lsb_depth, downmix, &analysis_info);
}
#endif
st->voice_ratio = -1;
#ifndef DISABLE_FLOAT_API
st->detected_bandwidth = 0;
if (analysis_info.valid) {
int analysis_bandwidth;
if (st->signal_type == OPUS_AUTO)
st->voice_ratio = (int) floor(.5 + 100 * (1 - analysis_info.music_prob));
analysis_bandwidth = analysis_info.bandwidth;
if (analysis_bandwidth <= 12)
st->detected_bandwidth = OPUS_BANDWIDTH_NARROWBAND;
else if (analysis_bandwidth <= 14)
st->detected_bandwidth = OPUS_BANDWIDTH_MEDIUMBAND;
else if (analysis_bandwidth <= 16)
st->detected_bandwidth = OPUS_BANDWIDTH_WIDEBAND;
else if (analysis_bandwidth <= 18)
st->detected_bandwidth = OPUS_BANDWIDTH_SUPERWIDEBAND;
else
st->detected_bandwidth = OPUS_BANDWIDTH_FULLBAND;
}
#endif
if (st->channels == 2 && st->force_channels != 1)
stereo_width = compute_stereo_width(pcm, frame_size, st->Fs, &st->width_mem);
else
stereo_width = 0;
total_buffer = delay_compensation;
st->bitrate_bps = user_bitrate_to_bitrate(st, frame_size, max_data_bytes);
frame_rate = st->Fs / frame_size;
if (max_data_bytes < 3 || st->bitrate_bps < 3 * frame_rate * 8
|| (frame_rate < 50 && (max_data_bytes * frame_rate < 300 || st->bitrate_bps < 2400))) {
/*If the space is too low to do something useful, emit 'PLC' frames.*/
int tocmode = st->mode;
int bw = st->bandwidth == 0 ? OPUS_BANDWIDTH_NARROWBAND : st->bandwidth;
if (tocmode == 0)
tocmode = MODE_SILK_ONLY;
if (frame_rate > 100)
tocmode = MODE_CELT_ONLY;
if (frame_rate < 50)
tocmode = MODE_SILK_ONLY;
if (tocmode == MODE_SILK_ONLY && bw > OPUS_BANDWIDTH_WIDEBAND)
bw = OPUS_BANDWIDTH_WIDEBAND;
else if (tocmode == MODE_CELT_ONLY && bw == OPUS_BANDWIDTH_MEDIUMBAND)
bw = OPUS_BANDWIDTH_NARROWBAND;
else if (bw <= OPUS_BANDWIDTH_SUPERWIDEBAND)
bw = OPUS_BANDWIDTH_SUPERWIDEBAND;
data[0] = gen_toc(tocmode, frame_rate, bw, st->stream_channels);
RESTORE_STACK;
return 1;
}
if (!st->use_vbr) {
int cbrBytes;
cbrBytes = IMIN((st->bitrate_bps + 4 * frame_rate) / (8 * frame_rate), max_data_bytes);
st->bitrate_bps = cbrBytes * (8 * frame_rate);
max_data_bytes = cbrBytes;
}
max_rate = frame_rate * max_data_bytes * 8;
/* Equivalent 20-ms rate for mode/channel/bandwidth decisions */
equiv_rate = st->bitrate_bps - (40 * st->channels + 20) * (st->Fs / frame_size - 50);
if (st->signal_type == OPUS_SIGNAL_VOICE)
voice_est = 127;
else if (st->signal_type == OPUS_SIGNAL_MUSIC)
voice_est = 0;
else if (st->voice_ratio >= 0) {
voice_est = st->voice_ratio * 327 >> 8;
/* For AUDIO, never be more than 90% confident of having speech */
if (st->application == OPUS_APPLICATION_AUDIO)
voice_est = IMIN(voice_est, 115);
} else if (st->application == OPUS_APPLICATION_VOIP)
voice_est = 115;
else
voice_est = 48;
if (st->force_channels != OPUS_AUTO && st->channels == 2) {
st->stream_channels = st->force_channels;
} else {
#ifdef FUZZING
/* Random mono/stereo decision */
if (st->channels == 2 && (rand()&0x1F)==0)
st->stream_channels = 3-st->stream_channels;
#else
/* Rate-dependent mono-stereo decision */
if (st->channels == 2) {
opus_int32 stereo_threshold;
stereo_threshold = stereo_music_threshold +
((voice_est * voice_est * (stereo_voice_threshold - stereo_music_threshold)) >> 14);
if (st->stream_channels == 2)
stereo_threshold -= 1000;
else
stereo_threshold += 1000;
st->stream_channels = (equiv_rate > stereo_threshold) ? 2 : 1;
} else {
st->stream_channels = st->channels;
}
#endif
}
equiv_rate = st->bitrate_bps - (40 * st->stream_channels + 20) * (st->Fs / frame_size - 50);
/* Mode selection depending on application and signal type */
if (st->application == OPUS_APPLICATION_RESTRICTED_LOWDELAY) {
st->mode = MODE_CELT_ONLY;
} else if (st->user_forced_mode == OPUS_AUTO) {
#ifdef FUZZING
/* Random mode switching */
if ((rand()&0xF)==0)
{
if ((rand()&0x1)==0)
st->mode = MODE_CELT_ONLY;
else
st->mode = MODE_SILK_ONLY;
} else {
if (st->prev_mode==MODE_CELT_ONLY)
st->mode = MODE_CELT_ONLY;
else
st->mode = MODE_SILK_ONLY;
}
#else
opus_int32 mode_voice, mode_music;
opus_int32 threshold;
/* Interpolate based on stereo width */
mode_voice = (opus_int32) (MULT16_32_Q15(Q15ONE - stereo_width, mode_thresholds[0][0])
+ MULT16_32_Q15(stereo_width, mode_thresholds[1][0]));
mode_music = (opus_int32) (MULT16_32_Q15(Q15ONE - stereo_width, mode_thresholds[1][1])
+ MULT16_32_Q15(stereo_width, mode_thresholds[1][1]));
/* Interpolate based on speech/music probability */
threshold = mode_music + ((voice_est * voice_est * (mode_voice - mode_music)) >> 14);
/* Bias towards SILK for VoIP because of some useful features */
if (st->application == OPUS_APPLICATION_VOIP)
threshold += 8000;
/*printf("%f %d\n", stereo_width/(float)Q15ONE, threshold);*/
/* Hysteresis */
if (st->prev_mode == MODE_CELT_ONLY)
threshold -= 4000;
else if (st->prev_mode > 0)
threshold += 4000;
st->mode = (equiv_rate >= threshold) ? MODE_CELT_ONLY : MODE_SILK_ONLY;
/* When FEC is enabled and there's enough packet loss, use SILK */
if (st->silk_mode.useInBandFEC && st->silk_mode.packetLossPercentage > (128 - voice_est) >> 4)
st->mode = MODE_SILK_ONLY;
/* When encoding voice and DTX is enabled, set the encoder to SILK mode (at least for now) */
if (st->silk_mode.useDTX && voice_est > 100)
st->mode = MODE_SILK_ONLY;
#endif
} else {
st->mode = st->user_forced_mode;
}
/* Override the chosen mode to make sure we meet the requested frame size */
if (st->mode != MODE_CELT_ONLY && frame_size < st->Fs / 100)
st->mode = MODE_CELT_ONLY;
if (st->lfe)
st->mode = MODE_CELT_ONLY;
/* If max_data_bytes represents less than 8 kb/s, switch to CELT-only mode */
if (max_data_bytes < (frame_rate > 50 ? 12000 : 8000) * frame_size / (st->Fs * 8))
st->mode = MODE_CELT_ONLY;
if (st->stream_channels == 1 && st->prev_channels == 2 && st->silk_mode.toMono == 0
&& st->mode != MODE_CELT_ONLY && st->prev_mode != MODE_CELT_ONLY) {
/* Delay stereo->mono transition by two frames so that SILK can do a smooth downmix */
st->silk_mode.toMono = 1;
st->stream_channels = 2;
} else {
st->silk_mode.toMono = 0;
}
if (st->prev_mode > 0 &&
((st->mode != MODE_CELT_ONLY && st->prev_mode == MODE_CELT_ONLY) ||
(st->mode == MODE_CELT_ONLY && st->prev_mode != MODE_CELT_ONLY))) {
redundancy = 1;
celt_to_silk = (st->mode != MODE_CELT_ONLY);
if (!celt_to_silk) {
/* Switch to SILK/hybrid if frame size is 10 ms or more*/
if (frame_size >= st->Fs / 100) {
st->mode = st->prev_mode;
to_celt = 1;
} else {
redundancy = 0;
}
}
}
/* For the first frame at a new SILK bandwidth */
if (st->silk_bw_switch) {
redundancy = 1;
celt_to_silk = 1;
st->silk_bw_switch = 0;
prefill = 1;
}
if (redundancy) {
/* Fair share of the max size allowed */
redundancy_bytes = IMIN(257, max_data_bytes * (opus_int32) (st->Fs / 200) / (frame_size + st->Fs / 200));
/* For VBR, target the actual bitrate (subject to the limit above) */
if (st->use_vbr)
redundancy_bytes = IMIN(redundancy_bytes, st->bitrate_bps / 1600);
}
if (st->mode != MODE_CELT_ONLY && st->prev_mode == MODE_CELT_ONLY) {
silk_EncControlStruct dummy;
silk_InitEncoder(silk_enc, st->arch, &dummy);
prefill = 1;
}
/* Automatic (rate-dependent) bandwidth selection */
if (st->mode == MODE_CELT_ONLY || st->first || st->silk_mode.allowBandwidthSwitch) {
const opus_int32 *voice_bandwidth_thresholds, *music_bandwidth_thresholds;
opus_int32 bandwidth_thresholds[8];
int bandwidth = OPUS_BANDWIDTH_FULLBAND;
opus_int32 equiv_rate2;
equiv_rate2 = equiv_rate;
if (st->mode != MODE_CELT_ONLY) {
/* Adjust the threshold +/- 10% depending on complexity */
equiv_rate2 = equiv_rate2 * (45 + st->silk_mode.complexity) / 50;
/* CBR is less efficient by ~1 kb/s */
if (!st->use_vbr)
equiv_rate2 -= 1000;
}
if (st->channels == 2 && st->force_channels != 1) {
voice_bandwidth_thresholds = stereo_voice_bandwidth_thresholds;
music_bandwidth_thresholds = stereo_music_bandwidth_thresholds;
} else {
voice_bandwidth_thresholds = mono_voice_bandwidth_thresholds;
music_bandwidth_thresholds = mono_music_bandwidth_thresholds;
}
/* Interpolate bandwidth thresholds depending on voice estimation */
for (i = 0; i < 8; i++) {
bandwidth_thresholds[i] = music_bandwidth_thresholds[i]
+ ((voice_est * voice_est *
(voice_bandwidth_thresholds[i] - music_bandwidth_thresholds[i])) >> 14);
}
do {
int threshold, hysteresis;
threshold = bandwidth_thresholds[2 * (bandwidth - OPUS_BANDWIDTH_MEDIUMBAND)];
hysteresis = bandwidth_thresholds[2 * (bandwidth - OPUS_BANDWIDTH_MEDIUMBAND) + 1];
if (!st->first) {
if (st->bandwidth >= bandwidth)
threshold -= hysteresis;
else
threshold += hysteresis;
}
if (equiv_rate2 >= threshold)
break;
} while (--bandwidth > OPUS_BANDWIDTH_NARROWBAND);
st->bandwidth = bandwidth;
/* Prevents any transition to SWB/FB until the SILK layer has fully
switched to WB mode and turned the variable LP filter off */
if (!st->first && st->mode != MODE_CELT_ONLY && !st->silk_mode.inWBmodeWithoutVariableLP &&
st->bandwidth > OPUS_BANDWIDTH_WIDEBAND)
st->bandwidth = OPUS_BANDWIDTH_WIDEBAND;
}
if (st->bandwidth > st->max_bandwidth)
st->bandwidth = st->max_bandwidth;
if (st->user_bandwidth != OPUS_AUTO)
st->bandwidth = st->user_bandwidth;
/* This prevents us from using hybrid at unsafe CBR/max rates */
if (st->mode != MODE_CELT_ONLY && max_rate < 15000) {
st->bandwidth = IMIN(st->bandwidth, OPUS_BANDWIDTH_WIDEBAND);
}
/* Prevents Opus from wasting bits on frequencies that are above
the Nyquist rate of the input signal */
if (st->Fs <= 24000 && st->bandwidth > OPUS_BANDWIDTH_SUPERWIDEBAND)
st->bandwidth = OPUS_BANDWIDTH_SUPERWIDEBAND;
if (st->Fs <= 16000 && st->bandwidth > OPUS_BANDWIDTH_WIDEBAND)
st->bandwidth = OPUS_BANDWIDTH_WIDEBAND;
if (st->Fs <= 12000 && st->bandwidth > OPUS_BANDWIDTH_MEDIUMBAND)
st->bandwidth = OPUS_BANDWIDTH_MEDIUMBAND;
if (st->Fs <= 8000 && st->bandwidth > OPUS_BANDWIDTH_NARROWBAND)
st->bandwidth = OPUS_BANDWIDTH_NARROWBAND;
#ifndef DISABLE_FLOAT_API
/* Use detected bandwidth to reduce the encoded bandwidth. */
if (st->detected_bandwidth && st->user_bandwidth == OPUS_AUTO) {
int min_detected_bandwidth;
/* Makes bandwidth detection more conservative just in case the detector
gets it wrong when we could have coded a high bandwidth transparently.
When operating in SILK/hybrid mode, we don't go below wideband to avoid
more complicated switches that require redundancy. */
if (equiv_rate <= 18000 * st->stream_channels && st->mode == MODE_CELT_ONLY)
min_detected_bandwidth = OPUS_BANDWIDTH_NARROWBAND;
else if (equiv_rate <= 24000 * st->stream_channels && st->mode == MODE_CELT_ONLY)
min_detected_bandwidth = OPUS_BANDWIDTH_MEDIUMBAND;
else if (equiv_rate <= 30000 * st->stream_channels)
min_detected_bandwidth = OPUS_BANDWIDTH_WIDEBAND;
else if (equiv_rate <= 44000 * st->stream_channels)
min_detected_bandwidth = OPUS_BANDWIDTH_SUPERWIDEBAND;
else
min_detected_bandwidth = OPUS_BANDWIDTH_FULLBAND;
st->detected_bandwidth = IMAX(st->detected_bandwidth, min_detected_bandwidth);
st->bandwidth = IMIN(st->bandwidth, st->detected_bandwidth);
}
#endif
celt_encoder_ctl(celt_enc, OPUS_SET_LSB_DEPTH(lsb_depth));
/* CELT mode doesn't support mediumband, use wideband instead */
if (st->mode == MODE_CELT_ONLY && st->bandwidth == OPUS_BANDWIDTH_MEDIUMBAND)
st->bandwidth = OPUS_BANDWIDTH_WIDEBAND;
if (st->lfe)
st->bandwidth = OPUS_BANDWIDTH_NARROWBAND;
/* Can't support higher than wideband for >20 ms frames */
if (frame_size > st->Fs / 50 && (st->mode == MODE_CELT_ONLY || st->bandwidth > OPUS_BANDWIDTH_WIDEBAND)) {
VARDECL(unsigned char, tmp_data);
int nb_frames;
int bak_mode, bak_bandwidth, bak_channels, bak_to_mono;
VARDECL(OpusRepacketizer, rp);
opus_int32 bytes_per_frame;
opus_int32 repacketize_len;
#ifndef DISABLE_FLOAT_API
if (analysis_read_pos_bak != -1) {
st->analysis.read_pos = analysis_read_pos_bak;
st->analysis.read_subframe = analysis_read_subframe_bak;
}
#endif
nb_frames = frame_size > st->Fs / 25 ? 3 : 2;
bytes_per_frame = IMIN(1276, (out_data_bytes - 3) / nb_frames);
ALLOC(tmp_data, nb_frames * bytes_per_frame, unsigned char);
ALLOC(rp, 1, OpusRepacketizer);
opus_repacketizer_init(rp);
bak_mode = st->user_forced_mode;
bak_bandwidth = st->user_bandwidth;
bak_channels = st->force_channels;
st->user_forced_mode = st->mode;
st->user_bandwidth = st->bandwidth;
st->force_channels = st->stream_channels;
bak_to_mono = st->silk_mode.toMono;
if (bak_to_mono)
st->force_channels = 1;
else
st->prev_channels = st->stream_channels;
for (i = 0; i < nb_frames; i++) {
int tmp_len;
st->silk_mode.toMono = 0;
/* When switching from SILK/Hybrid to CELT, only ask for a switch at the last frame */
if (to_celt && i == nb_frames - 1)
st->user_forced_mode = MODE_CELT_ONLY;
tmp_len = opus_encode_native(st, pcm + i * (st->channels * st->Fs / 50), st->Fs / 50,
tmp_data + i * bytes_per_frame, bytes_per_frame, lsb_depth,
NULL, 0, c1, c2, analysis_channels, downmix);
if (tmp_len < 0) {
RESTORE_STACK;
return OPUS_INTERNAL_ERROR;
}
ret = opus_repacketizer_cat(rp, tmp_data + i * bytes_per_frame, tmp_len);
if (ret < 0) {
RESTORE_STACK;
return OPUS_INTERNAL_ERROR;
}
}
if (st->use_vbr)
repacketize_len = out_data_bytes;
else
repacketize_len = IMIN(3 * st->bitrate_bps / (3 * 8 * 50 / nb_frames), out_data_bytes);
ret = opus_repacketizer_out_range_impl(rp, 0, nb_frames, data, repacketize_len, 0, !st->use_vbr);
if (ret < 0) {
RESTORE_STACK;
return OPUS_INTERNAL_ERROR;
}
st->user_forced_mode = bak_mode;
st->user_bandwidth = bak_bandwidth;
st->force_channels = bak_channels;
st->silk_mode.toMono = bak_to_mono;
RESTORE_STACK;
return ret;
}
curr_bandwidth = st->bandwidth;
/* Chooses the appropriate mode for speech
*NEVER* switch to/from CELT-only mode here as this will invalidate some assumptions */
if (st->mode == MODE_SILK_ONLY && curr_bandwidth > OPUS_BANDWIDTH_WIDEBAND)
st->mode = MODE_HYBRID;
if (st->mode == MODE_HYBRID && curr_bandwidth <= OPUS_BANDWIDTH_WIDEBAND)
st->mode = MODE_SILK_ONLY;
/* printf("%d %d %d %d\n", st->bitrate_bps, st->stream_channels, st->mode, curr_bandwidth); */
bytes_target = IMIN(max_data_bytes - redundancy_bytes, st->bitrate_bps * frame_size / (st->Fs * 8)) - 1;
data += 1;
ec_enc_init(&enc, data, max_data_bytes - 1);
ALLOC(pcm_buf, (total_buffer + frame_size) * st->channels, opus_val16);
for (i = 0; i < total_buffer * st->channels; i++)
pcm_buf[i] = st->delay_buffer[(st->encoder_buffer - total_buffer) * st->channels + i];
if (st->mode == MODE_CELT_ONLY)
hp_freq_smth1 = silk_LSHIFT(silk_lin2log(VARIABLE_HP_MIN_CUTOFF_HZ), 8);
else
hp_freq_smth1 = ((silk_encoder *) silk_enc)->state_Fxx[0].sCmn.variable_HP_smth1_Q15;
st->variable_HP_smth2_Q15 = silk_SMLAWB(st->variable_HP_smth2_Q15,
hp_freq_smth1 - st->variable_HP_smth2_Q15,
SILK_FIX_CONST(VARIABLE_HP_SMTH_COEF2, 16));
/* convert from log scale to Hertz */
cutoff_Hz = silk_log2lin(silk_RSHIFT(st->variable_HP_smth2_Q15, 8));
if (st->application == OPUS_APPLICATION_VOIP) {
hp_cutoff(pcm, cutoff_Hz, &pcm_buf[total_buffer * st->channels], st->hp_mem, frame_size, st->channels, st->Fs);
} else {
dc_reject(pcm, 3, &pcm_buf[total_buffer * st->channels], st->hp_mem, frame_size, st->channels, st->Fs);
}
/* SILK processing */
HB_gain = Q15ONE;
if (st->mode != MODE_CELT_ONLY) {
opus_int32 total_bitRate, celt_rate;
#ifdef FIXED_POINT
const opus_int16 *pcm_silk;
#else
VARDECL(opus_int16, pcm_silk);
ALLOC(pcm_silk, st->channels * frame_size, opus_int16);
#endif
/* Distribute bits between SILK and CELT */
total_bitRate = 8 * bytes_target * frame_rate;
if (st->mode == MODE_HYBRID) {
int HB_gain_ref;
/* Base rate for SILK */
st->silk_mode.bitRate = st->stream_channels * (5000 + 1000 * (st->Fs == 100 * frame_size));
if (curr_bandwidth == OPUS_BANDWIDTH_SUPERWIDEBAND) {
/* SILK gets 2/3 of the remaining bits */
st->silk_mode.bitRate += (total_bitRate - st->silk_mode.bitRate) * 2 / 3;
} else { /* FULLBAND */
/* SILK gets 3/5 of the remaining bits */
st->silk_mode.bitRate += (total_bitRate - st->silk_mode.bitRate) * 3 / 5;
}
/* Don't let SILK use more than 80% */
if (st->silk_mode.bitRate > total_bitRate * 4 / 5) {
st->silk_mode.bitRate = total_bitRate * 4 / 5;
}
if (!st->energy_masking) {
/* Increasingly attenuate high band when it gets allocated fewer bits */
celt_rate = total_bitRate - st->silk_mode.bitRate;
HB_gain_ref = (curr_bandwidth == OPUS_BANDWIDTH_SUPERWIDEBAND) ? 3000 : 3600;
HB_gain = SHL32((opus_val32) celt_rate, 9) /
SHR32((opus_val32) celt_rate + st->stream_channels * HB_gain_ref, 6);
HB_gain = HB_gain < Q15ONE * 6 / 7 ? HB_gain + Q15ONE / 7 : Q15ONE;
}
} else {
/* SILK gets all bits */
st->silk_mode.bitRate = total_bitRate;
}
/* Surround masking for SILK */
if (st->energy_masking && st->use_vbr && !st->lfe) {
opus_val32 mask_sum = 0;
opus_val16 masking_depth;
opus_int32 rate_offset;
int c;
int end = 17;
opus_int16 srate = 16000;
if (st->bandwidth == OPUS_BANDWIDTH_NARROWBAND) {
end = 13;
srate = 8000;
} else if (st->bandwidth == OPUS_BANDWIDTH_MEDIUMBAND) {
end = 15;
srate = 12000;
}
for (c = 0; c < st->channels; c++) {
for (i = 0; i < end; i++) {
opus_val16 mask;
mask = MAX16(MIN16(st->energy_masking[21 * c + i],
QCONST16(.5f, DB_SHIFT)), -QCONST16(2.0f, DB_SHIFT));
if (mask > 0)
mask = HALF16(mask);
mask_sum += mask;
}
}
/* Conservative rate reduction, we cut the masking in half */
masking_depth = mask_sum / end * st->channels;
masking_depth += QCONST16(.2f, DB_SHIFT);
rate_offset = (opus_int32) PSHR32(MULT16_16(srate, masking_depth), DB_SHIFT);
rate_offset = MAX32(rate_offset, -2 * st->silk_mode.bitRate / 3);
/* Split the rate change between the SILK and CELT part for hybrid. */
if (st->bandwidth == OPUS_BANDWIDTH_SUPERWIDEBAND || st->bandwidth == OPUS_BANDWIDTH_FULLBAND)
st->silk_mode.bitRate += 3 * rate_offset / 5;
else
st->silk_mode.bitRate += rate_offset;
bytes_target += rate_offset * frame_size / (8 * st->Fs);
}
st->silk_mode.payloadSize_ms = 1000 * frame_size / st->Fs;
st->silk_mode.nChannelsAPI = st->channels;
st->silk_mode.nChannelsInternal = st->stream_channels;
if (curr_bandwidth == OPUS_BANDWIDTH_NARROWBAND) {
st->silk_mode.desiredInternalSampleRate = 8000;
} else if (curr_bandwidth == OPUS_BANDWIDTH_MEDIUMBAND) {
st->silk_mode.desiredInternalSampleRate = 12000;
} else {
silk_assert(st->mode == MODE_HYBRID || curr_bandwidth == OPUS_BANDWIDTH_WIDEBAND);
st->silk_mode.desiredInternalSampleRate = 16000;
}
if (st->mode == MODE_HYBRID) {
/* Don't allow bandwidth reduction at lowest bitrates in hybrid mode */
st->silk_mode.minInternalSampleRate = 16000;
} else {
st->silk_mode.minInternalSampleRate = 8000;
}
if (st->mode == MODE_SILK_ONLY) {
opus_int32 effective_max_rate = max_rate;
st->silk_mode.maxInternalSampleRate = 16000;
if (frame_rate > 50)
effective_max_rate = effective_max_rate * 2 / 3;
if (effective_max_rate < 13000) {
st->silk_mode.maxInternalSampleRate = 12000;
st->silk_mode.desiredInternalSampleRate = IMIN(12000, st->silk_mode.desiredInternalSampleRate);
}
if (effective_max_rate < 9600) {
st->silk_mode.maxInternalSampleRate = 8000;
st->silk_mode.desiredInternalSampleRate = IMIN(8000, st->silk_mode.desiredInternalSampleRate);
}
} else {
st->silk_mode.maxInternalSampleRate = 16000;
}
st->silk_mode.useCBR = !st->use_vbr;
/* Call SILK encoder for the low band */
nBytes = IMIN(1275, max_data_bytes - 1 - redundancy_bytes);
st->silk_mode.maxBits = nBytes * 8;
/* Only allow up to 90% of the bits for hybrid mode*/
if (st->mode == MODE_HYBRID)
st->silk_mode.maxBits = (opus_int32) st->silk_mode.maxBits * 9 / 10;
if (st->silk_mode.useCBR) {
st->silk_mode.maxBits = (st->silk_mode.bitRate * frame_size / (st->Fs * 8)) * 8;
/* Reduce the initial target to make it easier to reach the CBR rate */
st->silk_mode.bitRate = IMAX(1, st->silk_mode.bitRate - 2000);
}
if (prefill) {
opus_int32 zero = 0;
int prefill_offset;
/* Use a smooth onset for the SILK prefill to avoid the encoder trying to encode
a discontinuity. The exact location is what we need to avoid leaving any "gap"
in the audio when mixing with the redundant CELT frame. Here we can afford to
overwrite st->delay_buffer because the only thing that uses it before it gets
rewritten is tmp_prefill[] and even then only the part after the ramp really
gets used (rather than sent to the encoder and discarded) */
prefill_offset = st->channels * (st->encoder_buffer - st->delay_compensation - st->Fs / 400);
gain_fade(st->delay_buffer + prefill_offset, st->delay_buffer + prefill_offset,
0, Q15ONE, celt_mode->overlap, st->Fs / 400, st->channels, celt_mode->window, st->Fs);
for (i = 0; i < prefill_offset; i++)
st->delay_buffer[i] = 0;
#ifdef FIXED_POINT
pcm_silk = st->delay_buffer;
#else
for (i = 0; i < st->encoder_buffer * st->channels; i++)
pcm_silk[i] = FLOAT2INT16(st->delay_buffer[i]);
#endif
silk_Encode(silk_enc, &st->silk_mode, pcm_silk, st->encoder_buffer, NULL, &zero, 1);
}
#ifdef FIXED_POINT
pcm_silk = pcm_buf+total_buffer*st->channels;
#else
for (i = 0; i < frame_size * st->channels; i++)
pcm_silk[i] = FLOAT2INT16(pcm_buf[total_buffer * st->channels + i]);
#endif
ret = silk_Encode(silk_enc, &st->silk_mode, pcm_silk, frame_size, &enc, &nBytes, 0);
if (ret) {
/*fprintf (stderr, "SILK encode error: %d\n", ret);*/
/* Handle error */
RESTORE_STACK;
return OPUS_INTERNAL_ERROR;
}
if (nBytes == 0) {
st->rangeFinal = 0;
data[-1] = gen_toc(st->mode, st->Fs / frame_size, curr_bandwidth, st->stream_channels);
RESTORE_STACK;
return 1;
}
/* Extract SILK internal bandwidth for signaling in first byte */
if (st->mode == MODE_SILK_ONLY) {
if (st->silk_mode.internalSampleRate == 8000) {
curr_bandwidth = OPUS_BANDWIDTH_NARROWBAND;
} else if (st->silk_mode.internalSampleRate == 12000) {
curr_bandwidth = OPUS_BANDWIDTH_MEDIUMBAND;
} else if (st->silk_mode.internalSampleRate == 16000) {
curr_bandwidth = OPUS_BANDWIDTH_WIDEBAND;
}
} else {
silk_assert(st->silk_mode.internalSampleRate == 16000);
}
st->silk_mode.opusCanSwitch = st->silk_mode.switchReady;
/* FIXME: How do we allocate the redundancy for CBR? */
if (st->silk_mode.opusCanSwitch) {
redundancy = 1;
celt_to_silk = 0;
st->silk_bw_switch = 1;
}
}
/* CELT processing */
{
int endband = 21;
switch (curr_bandwidth) {
case OPUS_BANDWIDTH_NARROWBAND:
endband = 13;
break;
case OPUS_BANDWIDTH_MEDIUMBAND:
case OPUS_BANDWIDTH_WIDEBAND:
endband = 17;
break;
case OPUS_BANDWIDTH_SUPERWIDEBAND:
endband = 19;
break;
case OPUS_BANDWIDTH_FULLBAND:
endband = 21;
break;
}
celt_encoder_ctl(celt_enc, CELT_SET_END_BAND(endband));
celt_encoder_ctl(celt_enc, CELT_SET_CHANNELS(st->stream_channels));
}
celt_encoder_ctl(celt_enc, OPUS_SET_BITRATE(OPUS_BITRATE_MAX));
if (st->mode != MODE_SILK_ONLY) {
opus_val32 celt_pred = 2;
celt_encoder_ctl(celt_enc, OPUS_SET_VBR(0));
/* We may still decide to disable prediction later */
if (st->silk_mode.reducedDependency)
celt_pred = 0;
celt_encoder_ctl(celt_enc, CELT_SET_PREDICTION(celt_pred));
if (st->mode == MODE_HYBRID) {
int len;
len = (ec_tell(&enc) + 7) >> 3;
if (redundancy)
len += st->mode == MODE_HYBRID ? 3 : 1;
if (st->use_vbr) {
nb_compr_bytes = len + bytes_target - (st->silk_mode.bitRate * frame_size) / (8 * st->Fs);
} else {
/* check if SILK used up too much */
nb_compr_bytes = len > bytes_target ? len : bytes_target;
}
} else {
if (st->use_vbr) {
opus_int32 bonus = 0;
#ifndef DISABLE_FLOAT_API
if (st->variable_duration == OPUS_FRAMESIZE_VARIABLE && frame_size != st->Fs / 50) {
bonus = (60 * st->stream_channels + 40) * (st->Fs / frame_size - 50);
if (analysis_info.valid)
bonus = (opus_int32) (bonus * (1.f + .5f * analysis_info.tonality));
}
#endif
celt_encoder_ctl(celt_enc, OPUS_SET_VBR(1));
celt_encoder_ctl(celt_enc, OPUS_SET_VBR_CONSTRAINT(st->vbr_constraint));
celt_encoder_ctl(celt_enc, OPUS_SET_BITRATE(st->bitrate_bps + bonus));
nb_compr_bytes = max_data_bytes - 1 - redundancy_bytes;
} else {
nb_compr_bytes = bytes_target;
}
}
} else {
nb_compr_bytes = 0;
}
ALLOC(tmp_prefill, st->channels * st->Fs / 400, opus_val16);
if (st->mode != MODE_SILK_ONLY && st->mode != st->prev_mode && st->prev_mode > 0) {
for (i = 0; i < st->channels * st->Fs / 400; i++)
tmp_prefill[i] = st->delay_buffer[(st->encoder_buffer - total_buffer - st->Fs / 400) * st->channels + i];
}
for (i = 0; i < st->channels * (st->encoder_buffer - (frame_size + total_buffer)); i++)
st->delay_buffer[i] = st->delay_buffer[i + st->channels * frame_size];
for (; i < st->encoder_buffer * st->channels; i++)
st->delay_buffer[i] = pcm_buf[(frame_size + total_buffer - st->encoder_buffer) * st->channels + i];
/* gain_fade() and stereo_fade() need to be after the buffer copying
because we don't want any of this to affect the SILK part */
if (st->prev_HB_gain < Q15ONE || HB_gain < Q15ONE) {
gain_fade(pcm_buf, pcm_buf,
st->prev_HB_gain, HB_gain, celt_mode->overlap, frame_size, st->channels, celt_mode->window, st->Fs);
}
st->prev_HB_gain = HB_gain;
if (st->mode != MODE_HYBRID || st->stream_channels == 1)
st->silk_mode.stereoWidth_Q14 = IMIN((1 << 14), 2 * IMAX(0, equiv_rate - 30000));
if (!st->energy_masking && st->channels == 2) {
/* Apply stereo width reduction (at low bitrates) */
if (st->hybrid_stereo_width_Q14 < (1 << 14) || st->silk_mode.stereoWidth_Q14 < (1 << 14)) {
opus_val16 g1, g2;
g1 = st->hybrid_stereo_width_Q14;
g2 = (opus_val16) (st->silk_mode.stereoWidth_Q14);
#ifdef FIXED_POINT
g1 = g1==16384 ? Q15ONE : SHL16(g1,1);
g2 = g2==16384 ? Q15ONE : SHL16(g2,1);
#else
g1 *= (1.f / 16384);
g2 *= (1.f / 16384);
#endif
stereo_fade(pcm_buf, pcm_buf, g1, g2, celt_mode->overlap,
frame_size, st->channels, celt_mode->window, st->Fs);
st->hybrid_stereo_width_Q14 = st->silk_mode.stereoWidth_Q14;
}
}
if (st->mode != MODE_CELT_ONLY && ec_tell(&enc) + 17 + 20 * (st->mode == MODE_HYBRID) <= 8 * (max_data_bytes - 1)) {
/* For SILK mode, the redundancy is inferred from the length */
if (st->mode == MODE_HYBRID && (redundancy || ec_tell(&enc) + 37 <= 8 * nb_compr_bytes))
ec_enc_bit_logp(&enc, redundancy, 12);
if (redundancy) {
int max_redundancy;
ec_enc_bit_logp(&enc, celt_to_silk, 1);
if (st->mode == MODE_HYBRID)
max_redundancy = (max_data_bytes - 1) - nb_compr_bytes;
else
max_redundancy = (max_data_bytes - 1) - ((ec_tell(&enc) + 7) >> 3);
/* Target the same bit-rate for redundancy as for the rest,
up to a max of 257 bytes */
redundancy_bytes = IMIN(max_redundancy, st->bitrate_bps / 1600);
redundancy_bytes = IMIN(257, IMAX(2, redundancy_bytes));
if (st->mode == MODE_HYBRID)
ec_enc_uint(&enc, redundancy_bytes - 2, 256);
}
} else {
redundancy = 0;
}
if (!redundancy) {
st->silk_bw_switch = 0;
redundancy_bytes = 0;
}
if (st->mode != MODE_CELT_ONLY)start_band = 17;
if (st->mode == MODE_SILK_ONLY) {
ret = (ec_tell(&enc) + 7) >> 3;
ec_enc_done(&enc);
nb_compr_bytes = ret;
} else {
nb_compr_bytes = IMIN((max_data_bytes - 1) - redundancy_bytes, nb_compr_bytes);
ec_enc_shrink(&enc, nb_compr_bytes);
}
#ifndef DISABLE_FLOAT_API
if (redundancy || st->mode != MODE_SILK_ONLY)
celt_encoder_ctl(celt_enc, CELT_SET_ANALYSIS(&analysis_info));
#endif
/* 5 ms redundant frame for CELT->SILK */
if (redundancy && celt_to_silk) {
int err;
celt_encoder_ctl(celt_enc, CELT_SET_START_BAND(0));
celt_encoder_ctl(celt_enc, OPUS_SET_VBR(0));
err = celt_encode_with_ec(celt_enc, pcm_buf, st->Fs / 200, data + nb_compr_bytes, redundancy_bytes, NULL);
if (err < 0) {
RESTORE_STACK;
return OPUS_INTERNAL_ERROR;
}
celt_encoder_ctl(celt_enc, OPUS_GET_FINAL_RANGE(&redundant_rng));
celt_encoder_ctl(celt_enc, OPUS_RESET_STATE);
}
celt_encoder_ctl(celt_enc, CELT_SET_START_BAND(start_band));
if (st->mode != MODE_SILK_ONLY) {
if (st->mode != st->prev_mode && st->prev_mode > 0) {
unsigned char dummy[2];
celt_encoder_ctl(celt_enc, OPUS_RESET_STATE);
/* Prefilling */
celt_encode_with_ec(celt_enc, tmp_prefill, st->Fs / 400, dummy, 2, NULL);
celt_encoder_ctl(celt_enc, CELT_SET_PREDICTION(0));
}
/* If false, we already busted the budget and we'll end up with a "PLC packet" */
if (ec_tell(&enc) <= 8 * nb_compr_bytes) {
ret = celt_encode_with_ec(celt_enc, pcm_buf, frame_size, NULL, nb_compr_bytes, &enc);
if (ret < 0) {
RESTORE_STACK;
return OPUS_INTERNAL_ERROR;
}
}
}
/* 5 ms redundant frame for SILK->CELT */
if (redundancy && !celt_to_silk) {
int err;
unsigned char dummy[2];
int N2, N4;
N2 = st->Fs / 200;
N4 = st->Fs / 400;
celt_encoder_ctl(celt_enc, OPUS_RESET_STATE);
celt_encoder_ctl(celt_enc, CELT_SET_START_BAND(0));
celt_encoder_ctl(celt_enc, CELT_SET_PREDICTION(0));
/* NOTE: We could speed this up slightly (at the expense of code size) by just adding a function that prefills the buffer */
celt_encode_with_ec(celt_enc, pcm_buf + st->channels * (frame_size - N2 - N4), N4, dummy, 2, NULL);
err = celt_encode_with_ec(celt_enc, pcm_buf + st->channels * (frame_size - N2), N2, data + nb_compr_bytes,
redundancy_bytes, NULL);
if (err < 0) {
RESTORE_STACK;
return OPUS_INTERNAL_ERROR;
}
celt_encoder_ctl(celt_enc, OPUS_GET_FINAL_RANGE(&redundant_rng));
}
/* Signalling the mode in the first byte */
data--;
data[0] = gen_toc(st->mode, st->Fs / frame_size, curr_bandwidth, st->stream_channels);
st->rangeFinal = enc.rng ^ redundant_rng;
if (to_celt)
st->prev_mode = MODE_CELT_ONLY;
else
st->prev_mode = st->mode;
st->prev_channels = st->stream_channels;
st->prev_framesize = frame_size;
st->first = 0;
/* In the unlikely case that the SILK encoder busted its target, tell
the decoder to call the PLC */
if (ec_tell(&enc) > (max_data_bytes - 1) * 8) {
if (max_data_bytes < 2) {
RESTORE_STACK;
return OPUS_BUFFER_TOO_SMALL;
}
data[1] = 0;
ret = 1;
st->rangeFinal = 0;
} else if (st->mode == MODE_SILK_ONLY && !redundancy) {
/*When in LPC only mode it's perfectly
reasonable to strip off trailing zero bytes as
the required range decoder behavior is to
fill these in. This can't be done when the MDCT
modes are used because the decoder needs to know
the actual length for allocation purposes.*/
while (ret > 2 && data[ret] == 0)ret--;
}
/* Count ToC and redundancy */
ret += 1 + redundancy_bytes;
if (!st->use_vbr) {
if (opus_packet_pad(data, ret, max_data_bytes) != OPUS_OK) {
RESTORE_STACK;
return OPUS_INTERNAL_ERROR;
}
ret = max_data_bytes;
}
RESTORE_STACK;
return ret;
}
#ifdef FIXED_POINT
#ifndef DISABLE_FLOAT_API
opus_int32 opus_encode_float(OpusEncoder *st, const float *pcm, int analysis_frame_size,
unsigned char *data, opus_int32 max_data_bytes)
{
int i, ret;
int frame_size;
int delay_compensation;
VARDECL(opus_int16, in);
ALLOC_STACK;
if (st->application == OPUS_APPLICATION_RESTRICTED_LOWDELAY)
delay_compensation = 0;
else
delay_compensation = st->delay_compensation;
frame_size = compute_frame_size(pcm, analysis_frame_size,
st->variable_duration, st->channels, st->Fs, st->bitrate_bps,
delay_compensation, downmix_float, st->analysis.subframe_mem);
ALLOC(in, frame_size*st->channels, opus_int16);
for (i=0;i<frame_size*st->channels;i++)
in[i] = FLOAT2INT16(pcm[i]);
ret = opus_encode_native(st, in, frame_size, data, max_data_bytes, 16, pcm, analysis_frame_size, 0, -2, st->channels, downmix_float);
RESTORE_STACK;
return ret;
}
#endif
opus_int32 opus_encode(OpusEncoder *st, const opus_int16 *pcm, int analysis_frame_size,
unsigned char *data, opus_int32 out_data_bytes)
{
int frame_size;
int delay_compensation;
if (st->application == OPUS_APPLICATION_RESTRICTED_LOWDELAY)
delay_compensation = 0;
else
delay_compensation = st->delay_compensation;
frame_size = compute_frame_size(pcm, analysis_frame_size,
st->variable_duration, st->channels, st->Fs, st->bitrate_bps,
delay_compensation, downmix_int
#ifndef DISABLE_FLOAT_API
, st->analysis.subframe_mem
#endif
);
return opus_encode_native(st, pcm, frame_size, data, out_data_bytes, 16, pcm, analysis_frame_size, 0, -2, st->channels, downmix_int);
}
#else
opus_int32 opus_encode(OpusEncoder *st, const opus_int16 *pcm, int analysis_frame_size,
unsigned char *data, opus_int32 max_data_bytes) {
int i, ret;
int frame_size;
int delay_compensation;
VARDECL(float, in);
ALLOC_STACK;
if (st->application == OPUS_APPLICATION_RESTRICTED_LOWDELAY)
delay_compensation = 0;
else
delay_compensation = st->delay_compensation;
frame_size = compute_frame_size(pcm, analysis_frame_size,
st->variable_duration, st->channels, st->Fs, st->bitrate_bps,
delay_compensation, downmix_int, st->analysis.subframe_mem);
ALLOC(in, frame_size * st->channels, float);
for (i = 0; i < frame_size * st->channels; i++)
in[i] = (1.0f / 32768) * pcm[i];
ret = opus_encode_native(st, in, frame_size, data, max_data_bytes, 16, pcm, analysis_frame_size, 0, -2,
st->channels, downmix_int);
RESTORE_STACK;
return ret;
}
opus_int32 opus_encode_float(OpusEncoder *st, const float *pcm, int analysis_frame_size,
unsigned char *data, opus_int32 out_data_bytes) {
int frame_size;
int delay_compensation;
if (st->application == OPUS_APPLICATION_RESTRICTED_LOWDELAY)
delay_compensation = 0;
else
delay_compensation = st->delay_compensation;
frame_size = compute_frame_size(pcm, analysis_frame_size,
st->variable_duration, st->channels, st->Fs, st->bitrate_bps,
delay_compensation, downmix_float, st->analysis.subframe_mem);
return opus_encode_native(st, pcm, frame_size, data, out_data_bytes, 24,
pcm, analysis_frame_size, 0, -2, st->channels, downmix_float);
}
#endif
int opus_encoder_ctl(OpusEncoder *st, int request, ...) {
int ret;
CELTEncoder *celt_enc;
va_list ap;
ret = OPUS_OK;
va_start(ap, request);
celt_enc = (CELTEncoder *) ((char *) st + st->celt_enc_offset);
switch (request) {
case OPUS_SET_APPLICATION_REQUEST: {
opus_int32 value = va_arg(ap, opus_int32);
if ((value != OPUS_APPLICATION_VOIP && value != OPUS_APPLICATION_AUDIO
&& value != OPUS_APPLICATION_RESTRICTED_LOWDELAY)
|| (!st->first && st->application != value)) {
ret = OPUS_BAD_ARG;
break;
}
st->application = value;
}
break;
case OPUS_GET_APPLICATION_REQUEST: {
opus_int32 *value = va_arg(ap, opus_int32*);
if (!value) {
goto bad_arg;
}
*value = st->application;
}
break;
case OPUS_SET_BITRATE_REQUEST: {
opus_int32 value = va_arg(ap, opus_int32);
if (value != OPUS_AUTO && value != OPUS_BITRATE_MAX) {
if (value <= 0)
goto bad_arg;
else if (value <= 500)
value = 500;
else if (value > (opus_int32) 300000 * st->channels)
value = (opus_int32) 300000 * st->channels;
}
st->user_bitrate_bps = value;
}
break;
case OPUS_GET_BITRATE_REQUEST: {
opus_int32 *value = va_arg(ap, opus_int32*);
if (!value) {
goto bad_arg;
}
*value = user_bitrate_to_bitrate(st, st->prev_framesize, 1276);
}
break;
case OPUS_SET_FORCE_CHANNELS_REQUEST: {
opus_int32 value = va_arg(ap, opus_int32);
if ((value < 1 || value > st->channels) && value != OPUS_AUTO) {
goto bad_arg;
}
st->force_channels = value;
}
break;
case OPUS_GET_FORCE_CHANNELS_REQUEST: {
opus_int32 *value = va_arg(ap, opus_int32*);
if (!value) {
goto bad_arg;
}
*value = st->force_channels;
}
break;
case OPUS_SET_MAX_BANDWIDTH_REQUEST: {
opus_int32 value = va_arg(ap, opus_int32);
if (value < OPUS_BANDWIDTH_NARROWBAND || value > OPUS_BANDWIDTH_FULLBAND) {
goto bad_arg;
}
st->max_bandwidth = value;
if (st->max_bandwidth == OPUS_BANDWIDTH_NARROWBAND) {
st->silk_mode.maxInternalSampleRate = 8000;
} else if (st->max_bandwidth == OPUS_BANDWIDTH_MEDIUMBAND) {
st->silk_mode.maxInternalSampleRate = 12000;
} else {
st->silk_mode.maxInternalSampleRate = 16000;
}
}
break;
case OPUS_GET_MAX_BANDWIDTH_REQUEST: {
opus_int32 *value = va_arg(ap, opus_int32*);
if (!value) {
goto bad_arg;
}
*value = st->max_bandwidth;
}
break;
case OPUS_SET_BANDWIDTH_REQUEST: {
opus_int32 value = va_arg(ap, opus_int32);
if ((value < OPUS_BANDWIDTH_NARROWBAND || value > OPUS_BANDWIDTH_FULLBAND) && value != OPUS_AUTO) {
goto bad_arg;
}
st->user_bandwidth = value;
if (st->user_bandwidth == OPUS_BANDWIDTH_NARROWBAND) {
st->silk_mode.maxInternalSampleRate = 8000;
} else if (st->user_bandwidth == OPUS_BANDWIDTH_MEDIUMBAND) {
st->silk_mode.maxInternalSampleRate = 12000;
} else {
st->silk_mode.maxInternalSampleRate = 16000;
}
}
break;
case OPUS_GET_BANDWIDTH_REQUEST: {
opus_int32 *value = va_arg(ap, opus_int32*);
if (!value) {
goto bad_arg;
}
*value = st->bandwidth;
}
break;
case OPUS_SET_DTX_REQUEST: {
opus_int32 value = va_arg(ap, opus_int32);
if (value < 0 || value > 1) {
goto bad_arg;
}
st->silk_mode.useDTX = value;
}
break;
case OPUS_GET_DTX_REQUEST: {
opus_int32 *value = va_arg(ap, opus_int32*);
if (!value) {
goto bad_arg;
}
*value = st->silk_mode.useDTX;
}
break;
case OPUS_SET_COMPLEXITY_REQUEST: {
opus_int32 value = va_arg(ap, opus_int32);
if (value < 0 || value > 10) {
goto bad_arg;
}
st->silk_mode.complexity = value;
celt_encoder_ctl(celt_enc, OPUS_SET_COMPLEXITY(value));
}
break;
case OPUS_GET_COMPLEXITY_REQUEST: {
opus_int32 *value = va_arg(ap, opus_int32*);
if (!value) {
goto bad_arg;
}
*value = st->silk_mode.complexity;
}
break;
case OPUS_SET_INBAND_FEC_REQUEST: {
opus_int32 value = va_arg(ap, opus_int32);
if (value < 0 || value > 1) {
goto bad_arg;
}
st->silk_mode.useInBandFEC = value;
}
break;
case OPUS_GET_INBAND_FEC_REQUEST: {
opus_int32 *value = va_arg(ap, opus_int32*);
if (!value) {
goto bad_arg;
}
*value = st->silk_mode.useInBandFEC;
}
break;
case OPUS_SET_PACKET_LOSS_PERC_REQUEST: {
opus_int32 value = va_arg(ap, opus_int32);
if (value < 0 || value > 100) {
goto bad_arg;
}
st->silk_mode.packetLossPercentage = value;
celt_encoder_ctl(celt_enc, OPUS_SET_PACKET_LOSS_PERC(value));
}
break;
case OPUS_GET_PACKET_LOSS_PERC_REQUEST: {
opus_int32 *value = va_arg(ap, opus_int32*);
if (!value) {
goto bad_arg;
}
*value = st->silk_mode.packetLossPercentage;
}
break;
case OPUS_SET_VBR_REQUEST: {
opus_int32 value = va_arg(ap, opus_int32);
if (value < 0 || value > 1) {
goto bad_arg;
}
st->use_vbr = value;
st->silk_mode.useCBR = 1 - value;
}
break;
case OPUS_GET_VBR_REQUEST: {
opus_int32 *value = va_arg(ap, opus_int32*);
if (!value) {
goto bad_arg;
}
*value = st->use_vbr;
}
break;
case OPUS_SET_VOICE_RATIO_REQUEST: {
opus_int32 value = va_arg(ap, opus_int32);
if (value < -1 || value > 100) {
goto bad_arg;
}
st->voice_ratio = value;
}
break;
case OPUS_GET_VOICE_RATIO_REQUEST: {
opus_int32 *value = va_arg(ap, opus_int32*);
if (!value) {
goto bad_arg;
}
*value = st->voice_ratio;
}
break;
case OPUS_SET_VBR_CONSTRAINT_REQUEST: {
opus_int32 value = va_arg(ap, opus_int32);
if (value < 0 || value > 1) {
goto bad_arg;
}
st->vbr_constraint = value;
}
break;
case OPUS_GET_VBR_CONSTRAINT_REQUEST: {
opus_int32 *value = va_arg(ap, opus_int32*);
if (!value) {
goto bad_arg;
}
*value = st->vbr_constraint;
}
break;
case OPUS_SET_SIGNAL_REQUEST: {
opus_int32 value = va_arg(ap, opus_int32);
if (value != OPUS_AUTO && value != OPUS_SIGNAL_VOICE && value != OPUS_SIGNAL_MUSIC) {
goto bad_arg;
}
st->signal_type = value;
}
break;
case OPUS_GET_SIGNAL_REQUEST: {
opus_int32 *value = va_arg(ap, opus_int32*);
if (!value) {
goto bad_arg;
}
*value = st->signal_type;
}
break;
case OPUS_GET_LOOKAHEAD_REQUEST: {
opus_int32 *value = va_arg(ap, opus_int32*);
if (!value) {
goto bad_arg;
}
*value = st->Fs / 400;
if (st->application != OPUS_APPLICATION_RESTRICTED_LOWDELAY)
*value += st->delay_compensation;
}
break;
case OPUS_GET_SAMPLE_RATE_REQUEST: {
opus_int32 *value = va_arg(ap, opus_int32*);
if (!value) {
goto bad_arg;
}
*value = st->Fs;
}
break;
case OPUS_GET_FINAL_RANGE_REQUEST: {
opus_uint32 *value = va_arg(ap, opus_uint32*);
if (!value) {
goto bad_arg;
}
*value = st->rangeFinal;
}
break;
case OPUS_SET_LSB_DEPTH_REQUEST: {
opus_int32 value = va_arg(ap, opus_int32);
if (value < 8 || value > 24) {
goto bad_arg;
}
st->lsb_depth = value;
}
break;
case OPUS_GET_LSB_DEPTH_REQUEST: {
opus_int32 *value = va_arg(ap, opus_int32*);
if (!value) {
goto bad_arg;
}
*value = st->lsb_depth;
}
break;
case OPUS_SET_EXPERT_FRAME_DURATION_REQUEST: {
opus_int32 value = va_arg(ap, opus_int32);
if (value != OPUS_FRAMESIZE_ARG && value != OPUS_FRAMESIZE_2_5_MS &&
value != OPUS_FRAMESIZE_5_MS && value != OPUS_FRAMESIZE_10_MS &&
value != OPUS_FRAMESIZE_20_MS && value != OPUS_FRAMESIZE_40_MS &&
value != OPUS_FRAMESIZE_60_MS && value != OPUS_FRAMESIZE_VARIABLE) {
goto bad_arg;
}
st->variable_duration = value;
celt_encoder_ctl(celt_enc, OPUS_SET_EXPERT_FRAME_DURATION(value));
}
break;
case OPUS_GET_EXPERT_FRAME_DURATION_REQUEST: {
opus_int32 *value = va_arg(ap, opus_int32*);
if (!value) {
goto bad_arg;
}
*value = st->variable_duration;
}
break;
case OPUS_SET_PREDICTION_DISABLED_REQUEST: {
opus_int32 value = va_arg(ap, opus_int32);
if (value > 1 || value < 0)
goto bad_arg;
st->silk_mode.reducedDependency = value;
}
break;
case OPUS_GET_PREDICTION_DISABLED_REQUEST: {
opus_int32 *value = va_arg(ap, opus_int32*);
if (!value)
goto bad_arg;
*value = st->silk_mode.reducedDependency;
}
break;
case OPUS_RESET_STATE: {
void *silk_enc;
silk_EncControlStruct dummy;
silk_enc = (char *) st + st->silk_enc_offset;
OPUS_CLEAR((char *) &st->OPUS_ENCODER_RESET_START,
sizeof(OpusEncoder) -
((char *) &st->OPUS_ENCODER_RESET_START - (char *) st));
celt_encoder_ctl(celt_enc, OPUS_RESET_STATE);
silk_InitEncoder(silk_enc, st->arch, &dummy);
st->stream_channels = st->channels;
st->hybrid_stereo_width_Q14 = 1 << 14;
st->prev_HB_gain = Q15ONE;
st->first = 1;
st->mode = MODE_HYBRID;
st->bandwidth = OPUS_BANDWIDTH_FULLBAND;
st->variable_HP_smth2_Q15 = silk_LSHIFT(silk_lin2log(VARIABLE_HP_MIN_CUTOFF_HZ), 8);
}
break;
case OPUS_SET_FORCE_MODE_REQUEST: {
opus_int32 value = va_arg(ap, opus_int32);
if ((value < MODE_SILK_ONLY || value > MODE_CELT_ONLY) && value != OPUS_AUTO) {
goto bad_arg;
}
st->user_forced_mode = value;
}
break;
case OPUS_SET_LFE_REQUEST: {
opus_int32 value = va_arg(ap, opus_int32);
st->lfe = value;
ret = celt_encoder_ctl(celt_enc, OPUS_SET_LFE(value));
}
break;
case OPUS_SET_ENERGY_MASK_REQUEST: {
opus_val16 *value = va_arg(ap, opus_val16*);
st->energy_masking = value;
ret = celt_encoder_ctl(celt_enc, OPUS_SET_ENERGY_MASK(value));
}
break;
case CELT_GET_MODE_REQUEST: {
const CELTMode **value = va_arg(ap, const CELTMode**);
if (!value) {
goto bad_arg;
}
ret = celt_encoder_ctl(celt_enc, CELT_GET_MODE(value));
}
break;
default:
/* fprintf(stderr, "unknown opus_encoder_ctl() request: %d", request);*/
ret = OPUS_UNIMPLEMENTED;
break;
}
va_end(ap);
return ret;
bad_arg:
va_end(ap);
return OPUS_BAD_ARG;
}
void opus_encoder_destroy(OpusEncoder *st) {
opus_free(st);
}
| 37.372999 | 260 | 0.567171 | [
"vector"
] |
651feb836cc5c5d9fd88bcb725b8767211d4643e | 16,931 | c | C | source/blender/modifiers/intern/MOD_particleinstance.c | euclid-skyline/euclid-blender | 5767dcbe60c07cd9ef113e2f54eaad6981a47bce | [
"Naumen",
"Condor-1.1",
"MS-PL"
] | null | null | null | source/blender/modifiers/intern/MOD_particleinstance.c | euclid-skyline/euclid-blender | 5767dcbe60c07cd9ef113e2f54eaad6981a47bce | [
"Naumen",
"Condor-1.1",
"MS-PL"
] | null | null | null | source/blender/modifiers/intern/MOD_particleinstance.c | euclid-skyline/euclid-blender | 5767dcbe60c07cd9ef113e2f54eaad6981a47bce | [
"Naumen",
"Condor-1.1",
"MS-PL"
] | null | null | null | /*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software Foundation,
* Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*
* The Original Code is Copyright (C) 2005 by the Blender Foundation.
* All rights reserved.
*/
/** \file
* \ingroup modifiers
*/
#include "MEM_guardedalloc.h"
#include "BLI_utildefines.h"
#include "BLI_math.h"
#include "BLI_listbase.h"
#include "BLI_rand.h"
#include "BLI_string.h"
#include "DNA_mesh_types.h"
#include "DNA_meshdata_types.h"
#include "BKE_effect.h"
#include "BKE_lattice.h"
#include "BKE_library_query.h"
#include "BKE_mesh.h"
#include "BKE_modifier.h"
#include "BKE_particle.h"
#include "BKE_pointcache.h"
#include "DEG_depsgraph_build.h"
#include "DEG_depsgraph_query.h"
#include "MOD_modifiertypes.h"
static void initData(ModifierData *md)
{
ParticleInstanceModifierData *pimd = (ParticleInstanceModifierData *)md;
pimd->flag = eParticleInstanceFlag_Parents | eParticleInstanceFlag_Unborn |
eParticleInstanceFlag_Alive | eParticleInstanceFlag_Dead;
pimd->psys = 1;
pimd->position = 1.0f;
pimd->axis = 2;
pimd->space = eParticleInstanceSpace_World;
pimd->particle_amount = 1.0f;
pimd->particle_offset = 0.0f;
STRNCPY(pimd->index_layer_name, "");
STRNCPY(pimd->value_layer_name, "");
}
static void requiredDataMask(Object *UNUSED(ob),
ModifierData *md,
CustomData_MeshMasks *r_cddata_masks)
{
ParticleInstanceModifierData *pimd = (ParticleInstanceModifierData *)md;
if (pimd->index_layer_name[0] != '\0' || pimd->value_layer_name[0] != '\0') {
r_cddata_masks->lmask |= CD_MASK_MLOOPCOL;
}
}
static bool isDisabled(const struct Scene *scene, ModifierData *md, bool useRenderParams)
{
ParticleInstanceModifierData *pimd = (ParticleInstanceModifierData *)md;
ParticleSystem *psys;
ModifierData *ob_md;
if (!pimd->ob) {
return true;
}
psys = BLI_findlink(&pimd->ob->particlesystem, pimd->psys - 1);
if (psys == NULL) {
return true;
}
/* If the psys modifier is disabled we cannot use its data.
* First look up the psys modifier from the object, then check if it is enabled.
*/
for (ob_md = pimd->ob->modifiers.first; ob_md; ob_md = ob_md->next) {
if (ob_md->type == eModifierType_ParticleSystem) {
ParticleSystemModifierData *psmd = (ParticleSystemModifierData *)ob_md;
if (psmd->psys == psys) {
int required_mode;
if (useRenderParams) {
required_mode = eModifierMode_Render;
}
else {
required_mode = eModifierMode_Realtime;
}
if (!modifier_isEnabled(scene, ob_md, required_mode)) {
return true;
}
break;
}
}
}
return false;
}
static void updateDepsgraph(ModifierData *md, const ModifierUpdateDepsgraphContext *ctx)
{
ParticleInstanceModifierData *pimd = (ParticleInstanceModifierData *)md;
if (pimd->ob != NULL) {
DEG_add_object_relation(
ctx->node, pimd->ob, DEG_OB_COMP_TRANSFORM, "Particle Instance Modifier");
DEG_add_object_relation(
ctx->node, pimd->ob, DEG_OB_COMP_GEOMETRY, "Particle Instance Modifier");
}
}
static void foreachObjectLink(ModifierData *md, Object *ob, ObjectWalkFunc walk, void *userData)
{
ParticleInstanceModifierData *pimd = (ParticleInstanceModifierData *)md;
walk(userData, ob, &pimd->ob, IDWALK_CB_NOP);
}
static bool particle_skip(ParticleInstanceModifierData *pimd, ParticleSystem *psys, int p)
{
const bool between = (psys->part->childtype == PART_CHILD_FACES);
ParticleData *pa;
int totpart, randp, minp, maxp;
if (p >= psys->totpart) {
ChildParticle *cpa = psys->child + (p - psys->totpart);
pa = psys->particles + (between ? cpa->pa[0] : cpa->parent);
}
else {
pa = psys->particles + p;
}
if (pa) {
if (pa->alive == PARS_UNBORN && (pimd->flag & eParticleInstanceFlag_Unborn) == 0) {
return true;
}
if (pa->alive == PARS_ALIVE && (pimd->flag & eParticleInstanceFlag_Alive) == 0) {
return true;
}
if (pa->alive == PARS_DEAD && (pimd->flag & eParticleInstanceFlag_Dead) == 0) {
return true;
}
}
if (pimd->particle_amount == 1.0f) {
/* Early output, all particles are to be instanced. */
return false;
}
/* Randomly skip particles based on desired amount of visible particles. */
totpart = psys->totpart + psys->totchild;
/* TODO make randomization optional? */
randp = (int)(psys_frand(psys, 3578 + p) * totpart) % totpart;
minp = (int)(totpart * pimd->particle_offset) % (totpart + 1);
maxp = (int)(totpart * (pimd->particle_offset + pimd->particle_amount)) % (totpart + 1);
if (maxp > minp) {
return randp < minp || randp >= maxp;
}
else if (maxp < minp) {
return randp < minp && randp >= maxp;
}
else {
return true;
}
return false;
}
static void store_float_in_vcol(MLoopCol *vcol, float float_value)
{
const uchar value = unit_float_to_uchar_clamp(float_value);
vcol->r = vcol->g = vcol->b = value;
vcol->a = 1.0f;
}
static Mesh *applyModifier(ModifierData *md, const ModifierEvalContext *ctx, Mesh *mesh)
{
Mesh *result;
ParticleInstanceModifierData *pimd = (ParticleInstanceModifierData *)md;
struct Scene *scene = DEG_get_evaluated_scene(ctx->depsgraph);
ParticleSimulationData sim;
ParticleSystem *psys = NULL;
ParticleData *pa = NULL;
MPoly *mpoly, *orig_mpoly;
MLoop *mloop, *orig_mloop;
MVert *mvert, *orig_mvert;
int totvert, totpoly, totloop, totedge;
int maxvert, maxpoly, maxloop, maxedge, part_end = 0, part_start;
int k, p, p_skip;
short track = ctx->object->trackflag % 3, trackneg, axis = pimd->axis;
float max_co = 0.0, min_co = 0.0, temp_co[3];
float *size = NULL;
float spacemat[4][4];
const bool use_parents = pimd->flag & eParticleInstanceFlag_Parents;
const bool use_children = pimd->flag & eParticleInstanceFlag_Children;
bool between;
trackneg = ((ctx->object->trackflag > 2) ? 1 : 0);
if (pimd->ob == ctx->object) {
pimd->ob = NULL;
return mesh;
}
if (pimd->ob) {
psys = BLI_findlink(&pimd->ob->particlesystem, pimd->psys - 1);
if (psys == NULL || psys->totpart == 0) {
return mesh;
}
}
else {
return mesh;
}
part_start = use_parents ? 0 : psys->totpart;
part_end = 0;
if (use_parents) {
part_end += psys->totpart;
}
if (use_children) {
part_end += psys->totchild;
}
if (part_end == 0) {
return mesh;
}
sim.depsgraph = ctx->depsgraph;
sim.scene = scene;
sim.ob = pimd->ob;
sim.psys = psys;
sim.psmd = psys_get_modifier(pimd->ob, psys);
between = (psys->part->childtype == PART_CHILD_FACES);
if (pimd->flag & eParticleInstanceFlag_UseSize) {
float *si;
si = size = MEM_calloc_arrayN(part_end, sizeof(float), "particle size array");
if (pimd->flag & eParticleInstanceFlag_Parents) {
for (p = 0, pa = psys->particles; p < psys->totpart; p++, pa++, si++) {
*si = pa->size;
}
}
if (pimd->flag & eParticleInstanceFlag_Children) {
ChildParticle *cpa = psys->child;
for (p = 0; p < psys->totchild; p++, cpa++, si++) {
*si = psys_get_child_size(psys, cpa, 0.0f, NULL);
}
}
}
switch (pimd->space) {
case eParticleInstanceSpace_World:
/* particle states are in world space already */
unit_m4(spacemat);
break;
case eParticleInstanceSpace_Local:
/* get particle states in the particle object's local space */
invert_m4_m4(spacemat, pimd->ob->obmat);
break;
default:
/* should not happen */
BLI_assert(false);
break;
}
totvert = mesh->totvert;
totpoly = mesh->totpoly;
totloop = mesh->totloop;
totedge = mesh->totedge;
/* count particles */
maxvert = 0;
maxpoly = 0;
maxloop = 0;
maxedge = 0;
for (p = part_start; p < part_end; p++) {
if (particle_skip(pimd, psys, p)) {
continue;
}
maxvert += totvert;
maxpoly += totpoly;
maxloop += totloop;
maxedge += totedge;
}
psys->lattice_deform_data = psys_create_lattice_deform_data(&sim);
if (psys->flag & (PSYS_HAIR_DONE | PSYS_KEYED) || psys->pointcache->flag & PTCACHE_BAKED) {
float min[3], max[3];
INIT_MINMAX(min, max);
BKE_mesh_minmax(mesh, min, max);
min_co = min[track];
max_co = max[track];
}
result = BKE_mesh_new_nomain_from_template(mesh, maxvert, maxedge, 0, maxloop, maxpoly);
mvert = result->mvert;
orig_mvert = mesh->mvert;
mpoly = result->mpoly;
orig_mpoly = mesh->mpoly;
mloop = result->mloop;
orig_mloop = mesh->mloop;
MLoopCol *mloopcols_index = CustomData_get_layer_named(
&result->ldata, CD_MLOOPCOL, pimd->index_layer_name);
MLoopCol *mloopcols_value = CustomData_get_layer_named(
&result->ldata, CD_MLOOPCOL, pimd->value_layer_name);
int *vert_part_index = NULL;
float *vert_part_value = NULL;
if (mloopcols_index != NULL) {
vert_part_index = MEM_calloc_arrayN(maxvert, sizeof(int), "vertex part index array");
}
if (mloopcols_value) {
vert_part_value = MEM_calloc_arrayN(maxvert, sizeof(float), "vertex part value array");
}
for (p = part_start, p_skip = 0; p < part_end; p++) {
float prev_dir[3];
float frame[4]; /* frame orientation quaternion */
float p_random = psys_frand(psys, 77091 + 283 * p);
/* skip particle? */
if (particle_skip(pimd, psys, p)) {
continue;
}
/* set vertices coordinates */
for (k = 0; k < totvert; k++) {
ParticleKey state;
MVert *inMV;
int vindex = p_skip * totvert + k;
MVert *mv = mvert + vindex;
inMV = orig_mvert + k;
CustomData_copy_data(&mesh->vdata, &result->vdata, k, p_skip * totvert + k, 1);
*mv = *inMV;
if (vert_part_index != NULL) {
vert_part_index[vindex] = p;
}
if (vert_part_value != NULL) {
vert_part_value[vindex] = p_random;
}
/*change orientation based on object trackflag*/
copy_v3_v3(temp_co, mv->co);
mv->co[axis] = temp_co[track];
mv->co[(axis + 1) % 3] = temp_co[(track + 1) % 3];
mv->co[(axis + 2) % 3] = temp_co[(track + 2) % 3];
/* get particle state */
if ((psys->flag & (PSYS_HAIR_DONE | PSYS_KEYED) || psys->pointcache->flag & PTCACHE_BAKED) &&
(pimd->flag & eParticleInstanceFlag_Path)) {
float ran = 0.0f;
if (pimd->random_position != 0.0f) {
ran = pimd->random_position * BLI_hash_frand(psys->seed + p);
}
if (pimd->flag & eParticleInstanceFlag_KeepShape) {
state.time = pimd->position * (1.0f - ran);
}
else {
state.time = (mv->co[axis] - min_co) / (max_co - min_co) * pimd->position * (1.0f - ran);
if (trackneg) {
state.time = 1.0f - state.time;
}
mv->co[axis] = 0.0;
}
psys_get_particle_on_path(&sim, p, &state, 1);
normalize_v3(state.vel);
/* Incrementally Rotating Frame (Bishop Frame) */
if (k == 0) {
float hairmat[4][4];
float mat[3][3];
if (p < psys->totpart) {
pa = psys->particles + p;
}
else {
ChildParticle *cpa = psys->child + (p - psys->totpart);
pa = psys->particles + (between ? cpa->pa[0] : cpa->parent);
}
psys_mat_hair_to_global(sim.ob,
BKE_particle_modifier_mesh_final_get(sim.psmd),
sim.psys->part->from,
pa,
hairmat);
copy_m3_m4(mat, hairmat);
/* to quaternion */
mat3_to_quat(frame, mat);
if (pimd->rotation > 0.0f || pimd->random_rotation > 0.0f) {
float angle = 2.0f * M_PI *
(pimd->rotation +
pimd->random_rotation * (psys_frand(psys, 19957323 + p) - 0.5f));
float eul[3] = {0.0f, 0.0f, angle};
float rot[4];
eul_to_quat(rot, eul);
mul_qt_qtqt(frame, frame, rot);
}
/* note: direction is same as normal vector currently,
* but best to keep this separate so the frame can be
* rotated later if necessary
*/
copy_v3_v3(prev_dir, state.vel);
}
else {
float rot[4];
/* incrementally rotate along bend direction */
rotation_between_vecs_to_quat(rot, prev_dir, state.vel);
mul_qt_qtqt(frame, rot, frame);
copy_v3_v3(prev_dir, state.vel);
}
copy_qt_qt(state.rot, frame);
#if 0
/* Absolute Frame (Frenet Frame) */
if (state.vel[axis] < -0.9999f || state.vel[axis] > 0.9999f) {
unit_qt(state.rot);
}
else {
float cross[3];
float temp[3] = {0.0f, 0.0f, 0.0f};
temp[axis] = 1.0f;
cross_v3_v3v3(cross, temp, state.vel);
/* state.vel[axis] is the only component surviving from a dot product with the axis */
axis_angle_to_quat(state.rot, cross, saacos(state.vel[axis]));
}
#endif
}
else {
state.time = -1.0;
psys_get_particle_state(&sim, p, &state, 1);
}
mul_qt_v3(state.rot, mv->co);
if (pimd->flag & eParticleInstanceFlag_UseSize) {
mul_v3_fl(mv->co, size[p]);
}
add_v3_v3(mv->co, state.co);
mul_m4_v3(spacemat, mv->co);
}
/* create edges and adjust edge vertex indices*/
CustomData_copy_data(&mesh->edata, &result->edata, 0, p_skip * totedge, totedge);
MEdge *me = &result->medge[p_skip * totedge];
for (k = 0; k < totedge; k++, me++) {
me->v1 += p_skip * totvert;
me->v2 += p_skip * totvert;
}
/* create polys and loops */
for (k = 0; k < totpoly; k++) {
MPoly *inMP = orig_mpoly + k;
MPoly *mp = mpoly + p_skip * totpoly + k;
CustomData_copy_data(&mesh->pdata, &result->pdata, k, p_skip * totpoly + k, 1);
*mp = *inMP;
mp->loopstart += p_skip * totloop;
{
MLoop *inML = orig_mloop + inMP->loopstart;
MLoop *ml = mloop + mp->loopstart;
int j = mp->totloop;
CustomData_copy_data(&mesh->ldata, &result->ldata, inMP->loopstart, mp->loopstart, j);
for (; j; j--, ml++, inML++) {
ml->v = inML->v + (p_skip * totvert);
ml->e = inML->e + (p_skip * totedge);
const int ml_index = (ml - mloop);
if (mloopcols_index != NULL) {
const int part_index = vert_part_index[ml->v];
store_float_in_vcol(&mloopcols_index[ml_index],
(float)part_index / (float)(psys->totpart - 1));
}
if (mloopcols_value != NULL) {
const float part_value = vert_part_value[ml->v];
store_float_in_vcol(&mloopcols_value[ml_index], part_value);
}
}
}
}
p_skip++;
}
if (psys->lattice_deform_data) {
end_latt_deform(psys->lattice_deform_data);
psys->lattice_deform_data = NULL;
}
if (size) {
MEM_freeN(size);
}
MEM_SAFE_FREE(vert_part_index);
MEM_SAFE_FREE(vert_part_value);
result->runtime.cd_dirty_vert |= CD_MASK_NORMAL;
return result;
}
ModifierTypeInfo modifierType_ParticleInstance = {
/* name */ "ParticleInstance",
/* structName */ "ParticleInstanceModifierData",
/* structSize */ sizeof(ParticleInstanceModifierData),
/* type */ eModifierTypeType_Constructive,
/* flags */ eModifierTypeFlag_AcceptsMesh | eModifierTypeFlag_SupportsMapping |
eModifierTypeFlag_SupportsEditmode | eModifierTypeFlag_EnableInEditmode,
/* copyData */ modifier_copyData_generic,
/* deformVerts */ NULL,
/* deformMatrices */ NULL,
/* deformVertsEM */ NULL,
/* deformMatricesEM */ NULL,
/* applyModifier */ applyModifier,
/* initData */ initData,
/* requiredDataMask */ requiredDataMask,
/* freeData */ NULL,
/* isDisabled */ isDisabled,
/* updateDepsgraph */ updateDepsgraph,
/* dependsOnTime */ NULL,
/* dependsOnNormals */ NULL,
/* foreachObjectLink */ foreachObjectLink,
/* foreachIDLink */ NULL,
/* foreachTexLink */ NULL,
/* freeRuntimeData */ NULL,
};
| 29.547993 | 99 | 0.615971 | [
"mesh",
"object",
"vector"
] |
6521466488d5c4b71ffb5accd225c7b715936a37 | 780 | h | C | include/input.h | AnotherAlternative/gba_dumper | f32854fb05768eed9a280922139a1b798db1679b | [
"MIT"
] | 11 | 2016-09-05T13:18:10.000Z | 2022-03-27T16:16:36.000Z | include/input.h | AnotherAlternative/gba_dumper | f32854fb05768eed9a280922139a1b798db1679b | [
"MIT"
] | null | null | null | include/input.h | AnotherAlternative/gba_dumper | f32854fb05768eed9a280922139a1b798db1679b | [
"MIT"
] | 4 | 2016-09-12T23:00:16.000Z | 2020-01-07T21:59:41.000Z | #ifndef COMMON_H
#define COMMON_H
#include "common.h"
#endif
/*!
* To separate out parsing the arguments from the main application flow, all code related to handling arguments is placed with input.h/.c.
*/
/*!
* This structure will be populated in handle_input and returned to the main caller. It is then used to control program flow via set flags.
*/
typedef struct {
int dump_rom_flag;
int fuzz_value;
char *relative_search_text;
char *translation_file_arg;
char *strings_file_arg;
char *rom_string_break;
unsigned long start_address;
unsigned long end_address;
} passed_options;
/*!
* Given pointers to the rom/dump files, along with the argument vector, populate the options structure.
*/
int handle_input( rom_file*, dump_file*, passed_options*, int, char** );
| 26 | 138 | 0.764103 | [
"vector"
] |
65227b0c7f3f638587a383efc71640fe4db7111d | 17,319 | h | C | Code/Libraries/UtilitiesDataTypes/ImageUtils.h | NIRALUser/AtlasWerks | a074ca208ab41a6ed89c1f0b70004998f7397681 | [
"BSD-3-Clause"
] | 3 | 2016-04-26T05:06:06.000Z | 2020-08-01T09:46:54.000Z | Code/Libraries/UtilitiesDataTypes/ImageUtils.h | scalphunters/AtlasWerks | 9d224bf8db628805368fcb7973ac578937b6b595 | [
"BSD-3-Clause"
] | 1 | 2018-11-27T21:53:48.000Z | 2019-05-13T15:21:31.000Z | Code/Libraries/UtilitiesDataTypes/ImageUtils.h | scalphunters/AtlasWerks | 9d224bf8db628805368fcb7973ac578937b6b595 | [
"BSD-3-Clause"
] | 2 | 2019-01-24T02:07:17.000Z | 2019-12-11T17:27:42.000Z | /* ================================================================
*
* AtlasWerks Project
*
* Copyright (c) Sarang C. Joshi, Bradley C. Davis, J. Samuel Preston,
* Linh K. Ha. All rights reserved. See Copyright.txt or for details.
*
* This software is distributed WITHOUT ANY WARRANTY; without even the
* implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
* PURPOSE. See the above copyright notice for more information.
*
* ================================================================ */
#ifndef ImageUtils_h
#define ImageUtils_h
#ifndef SWIG
#include <float.h>
#include <limits>
#include <fftw3.h>
#include "Array3DUtils.h"
#include "Array3DIO.h"
#include "AffineTransform3D.h"
#include "DataTypes/Image.h"
#endif // SWIG
class ImageUtils
{
public:
/**
* threshold the image at the given level
*
*/
template <class T>
static
void
threshold(const Array3D<T>& image, Array3D<T>& Dimage, const T& thValue, float maxVal = 2000 );
/**
* return the squared L2 norm appropriately scaled by the image
* spacing
*/
template <class T>
static
double
l2NormSqr(const Image<T> &image);
#ifdef SWIG
%template(l2NormSqr) l2NormSqr< float >;
#endif // SWIG
/**
* return the dot product, appropriately scaled by spacing
*/
template <class T>
static
double
l2DotProd(const Image<T> &i1, const Image<T> &i2);
#ifdef SWIG
%template(l2DotProd) l2DotProd< float >;
#endif // SWIG
/**
* return the intensity value for this image at a given position in
* world coordinates
*
* bcd 2004
*/
template <class T,
Array3DUtils::ScalarBackgroundStrategyT BackgroundStrategy,
Array3DUtils::InterpT InterpMethod>
static
T
interp(const Image<T>& image,
const Vector3D<double>& worldCoordinates,
const T& background);
template<class T>
static
T
interp(const Image<T>& image,
const Vector3D<double>& worldCoordinates,
const T& background)
{
interp<T,
DEFAULT_SCALAR_BACKGROUND_STRATEGY,
DEFAULT_SCALAR_INTERP>
(image, worldCoordinates, background);
}
#ifdef SWIG
%template(interp) interp< float, DEFAULT_SCALAR_BACKGROUND_STRATEGY, DEFAULT_SCALAR_INTERP>;
#endif // SWIG
/**
* translate this image. note: this just changes the origin
*
* bcd 2004
*/
template <class T>
static
void
translate(Image<T>& image,
const double& tx,
const double& ty,
const double& tz);
/**
* translate this image. note: this just changes the origin
*
* bcd 2004
*/
template <class T>
static
void
translate(Image<T>& image,
const Vector3D<double>& t);
#ifdef SWIG
%template(translate) translate< float >;
#endif // SWIG
/**
*
* Upsample via sinc interpolation. Currently using full complex
* FFT, may want to go to r2c/c2r in the future to save
* memory. Also, this version only works for images with an even
* number of pixels in each dimension.
*
* jsp 2009
*/
template <class T>
static
void
sincUpsample(Image<T>& image,
unsigned int factor);
/**
*
* Upsample via sinc interpolation. Currently using full complex
* FFT, may want to go to r2c/c2r in the future to save
* memory. Also, this version only works for images with an even
* number of pixels in each dimension.
*
* jsp 2009
*/
template <class T>
static
void
sincUpsample(Image<T>& image,
Vector3D<unsigned int> &newSize);
#ifdef SWIG
%template(sincUpsample) sincUpsample< float >;
#endif // SWIG
/**
* Non-integer downsampling method. Much slower than integer
* version, but uses resampleNew so that positions in world space
* are correctly preserverd. input and output can be the same
* object.
*/
template <class T,
Array3DUtils::ScalarBackgroundStrategyT BackgroundStrategy,
Array3DUtils::InterpT InterpMethod>
static
void gaussianDownsample(const Image<T>& input,
Image<T>& output,
const Vector3D<unsigned int>& newSize);
template <class T>
static
void gaussianDownsample(const Image<T>& input,
Image<T>& output,
const Vector3D<unsigned int>& newSize)
{
gaussianDownsample<T,
DEFAULT_SCALAR_BACKGROUND_STRATEGY,
DEFAULT_SCALAR_INTERP>
(input, output, newSize);
}
/**
* Modified version of resample that correctly centers image.
* make this image have the given origin, spacing, and dimensions.
* intensities should stay in the same place in world coordinates.
*
* jsp 2009
*/
template <class T,
Array3DUtils::ScalarBackgroundStrategyT BackgroundStrategy,
Array3DUtils::InterpT InterpMethod>
static
void
resampleNew(Image<T>& image,
const Vector3D<double>& newOrigin,
const Vector3D<double>& newSpacing,
const Vector3D<unsigned int>& newDimensions,
T bgVal = static_cast<T>(0));
template <class T>
static
void
resampleNew(Image<T>& image,
const Vector3D<double>& newOrigin,
const Vector3D<double>& newSpacing,
const Vector3D<unsigned int>& newDimensions,
T bgVal = static_cast<T>(0))
{
resampleNew<T,
Array3DUtils::BACKGROUND_STRATEGY_CLAMP,
DEFAULT_SCALAR_INTERP>
(image,
newOrigin,
newSpacing,
newDimensions,
bgVal);
}
/**
* Modified version of resample that correctly centers image.
* fill in the destination image from the source image, taking
* spacing, origin, and dimensions into account. where overlapping,
* source and dest will be the same in world coordinates.
*
* jsp 2009
*/
template <class T,
Array3DUtils::ScalarBackgroundStrategyT BackgroundStrategy,
Array3DUtils::InterpT InterpMethod>
static
void
resampleNew(const Image<T>& sourceImage,
Image<T>& destImage,
T bgVal = static_cast<T>(0));
template <class T>
static
void
resampleNew(const Image<T>& sourceImage,
Image<T>& destImage,
T bgVal = static_cast<T>(0))
{
resampleNew<T,
Array3DUtils::BACKGROUND_STRATEGY_CLAMP,
DEFAULT_SCALAR_INTERP>
(sourceImage,
destImage,
bgVal);
}
#ifdef SWIG
%template(resampleNew) resampleNew< float, Array3DUtils::BACKGROUND_STRATEGY_CLAMP, DEFAULT_SCALAR_INTERP>;
#endif // SWIG
/**
* make this image have the given origin, spacing, and dimensions.
* intensities should stay in the same place in world coordinates.
*
* bcd 2004
*/
template <class T,
Array3DUtils::ScalarBackgroundStrategyT BackgroundStrategy,
Array3DUtils::InterpT InterpMethod>
static
void
resample(Image<T>& image,
const Vector3D<double>& newOrigin,
const Vector3D<double>& newSpacing,
const Vector3D<unsigned int>& newDimensions);
template <class T>
static
void
resample(Image<T>& image,
const Vector3D<double>& newOrigin,
const Vector3D<double>& newSpacing,
const Vector3D<unsigned int>& newDimensions)
{
resample<T,
DEFAULT_SCALAR_BACKGROUND_STRATEGY,
DEFAULT_SCALAR_INTERP>
(image, newOrigin, newSpacing, newDimensions);
}
/**
* fill in the destination image from the source image, taking
* spacing, origin, and dimensions into account. where overlapping,
* source and dest will be the same in world coordinates.
*
* bcd 2004
*/
template <class T,
Array3DUtils::ScalarBackgroundStrategyT BackgroundStrategy,
Array3DUtils::InterpT InterpMethod>
static
void
resample(const Image<T>& sourceImage,
Image<T>& destImage);
template <class T>
static
void
resample(const Image<T>& sourceImage,
Image<T>& destImage)
{
resample<T,
Array3DUtils::BACKGROUND_STRATEGY_CLAMP,
DEFAULT_SCALAR_INTERP>(sourceImage, destImage);
}
#ifdef SWIG
%template(resample) resample< float, Array3DUtils::BACKGROUND_STRATEGY_CLAMP, DEFAULT_SCALAR_INTERP>;
#endif // SWIG
/**
* Fill in the destination image from the source image, taking
* spacing, origin, and dimensions into account. Where overlapping,
* source and dest will be the same in world coordinates. Where not
* overlapping, destImage is left unchanged. This requires that
* sourceImage have only nonnegative intensities.
*
* foskey 2005
*/
template <class T,
Array3DUtils::ScalarBackgroundStrategyT BackgroundStrategy,
Array3DUtils::InterpT InterpMethod>
static
void
resampleWithTransparency(const Image<T>& sourceImage,
Image<T>& destImage);
template <class T>
static
void
resampleWithTransparency(const Image<T>& sourceImage,
Image<T>& destImage)
{
resampleWithTransparency<T,
DEFAULT_SCALAR_BACKGROUND_STRATEGY,
DEFAULT_SCALAR_INTERP>
(sourceImage, destImage);
}
#ifdef SWIG
%template(resampleWithTransparency) resampleWithTransparency< float, DEFAULT_SCALAR_BACKGROUND_STRATEGY, DEFAULT_SCALAR_INTERP>;
#endif // SWIG
/**
* Take a transform in world coordinates and return the corresponding
* transform expressed in units of voxels.
*
* For the input transform, the Spacing parameters give the
* distances in world units between successive voxel centers, and
* the Origin parameters give the location of the (0,0,0) voxel in
* world coordinates.
*
* For the returned transform, the coordinate systems of the two
* images have units of voxels, and the origins are the centers of
* the (0,0,0) voxels.
*/
static
AffineTransform3D<double>
transformInIndexCoordinates(
const Vector3D<double>& fixedImageOrigin,
const Vector3D<double>& fixedImageSpacing,
const Vector3D<double>& movingImageOrigin,
const Vector3D<double>& movingImageSpacing,
const AffineTransform3D<double>& transformInWorldCoordinates );
template <class T,
Array3DUtils::ScalarBackgroundStrategyT BackgroundStrategy,
Array3DUtils::InterpT InterpMethod>
static
void
applyAffine(Image<T>& image,
const Vector3D<double>& newOrigin,
const Vector3D<double>& newSpacing,
const Vector3D<unsigned int>& newDimensions,
const AffineTransform3D<double>& transformInWorldCoordinates,
const float& backgroundValue = 0);
template <class T>
static
void
applyAffine(Image<T>& image,
const Vector3D<double>& newOrigin,
const Vector3D<double>& newSpacing,
const Vector3D<unsigned int>& newDimensions,
const AffineTransform3D<double>& transformInWorldCoordinates,
const float& backgroundValue = 0)
{
applyAffine<T,
DEFAULT_SCALAR_BACKGROUND_STRATEGY,
DEFAULT_SCALAR_INTERP>
(image, newOrigin, newSpacing, newDimensions, transformInWorldCoordinates, backgroundValue);
}
/** Transform sourceImage into the coordinate system of destImage.
* After the call, destImage has data from sourceImage, transformed
* by the inverse of 'transform'. To find the intensity of a point
* p in destImage, one looks to the point transform(p) in sourceImage.
* If transform(p) is outside of sourceImage, backgroundValue is
* used. The origins of the respective coordinate systems are those
* specified by getOrigin() for the images.
*/
template <class T,
Array3DUtils::ScalarBackgroundStrategyT BackgroundStrategy,
Array3DUtils::InterpT InterpMethod>
static
void
applyAffine(const Image<T>& sourceImage,
Image<T>& destImage,
const AffineTransform3D<double>& transformInWorldCoordinates,
const float& backgroundValue = 0);
template <class T>
static
void
applyAffine(const Image<T>& sourceImage,
Image<T>& destImage,
const AffineTransform3D<double>& transformInWorldCoordinates,
const float& backgroundValue = 0)
{
applyAffine<T,
DEFAULT_SCALAR_BACKGROUND_STRATEGY,
DEFAULT_SCALAR_INTERP>
(sourceImage, destImage, transformInWorldCoordinates, backgroundValue);
}
#ifdef SWIG
%template(applyAffine) applyAffine< float, DEFAULT_SCALAR_BACKGROUND_STRATEGY, DEFAULT_SCALAR_INTERP>;
#endif // SWIG
/**
* make all voxel spacing the same. the smallest current voxel
* spacing is chosen as the spacing to use.
*
* bcd 2004
*/
template <class T,
Array3DUtils::ScalarBackgroundStrategyT BackgroundStrategy,
Array3DUtils::InterpT InterpMethod>
static
void
makeVoxelsIsotropic(Image<T>& image);
template <class T>
static
void
makeVoxelsIsotropic(Image<T>& image){
makeVoxelsIsotropic<T,
DEFAULT_SCALAR_BACKGROUND_STRATEGY,
DEFAULT_SCALAR_INTERP>
(image);
}
#ifdef SWIG
%template(makeVoxelsIsotropic) makeVoxelsIsotropic< float, DEFAULT_SCALAR_BACKGROUND_STRATEGY, DEFAULT_SCALAR_INTERP>;
#endif // SWIG
/**
* bcd 2004
*/
template <class T,
Array3DUtils::ScalarBackgroundStrategyT BackgroundStrategy,
Array3DUtils::InterpT InterpMethod>
static
void
resliceZMakeIsotropic(Image<T>& image);
template <class T>
static
void
resliceZMakeIsotropic(Image<T>& image){
resliceZMakeIsotropic<T,
DEFAULT_SCALAR_BACKGROUND_STRATEGY,
DEFAULT_SCALAR_INTERP>
(image);
}
#ifdef SWIG
%template(resliceZMakeIsotropic) resliceZMakeIsotropic< float, DEFAULT_SCALAR_BACKGROUND_STRATEGY, DEFAULT_SCALAR_INTERP>;
#endif // SWIG
/**
* gaussian downsample an image. spacing is updated appropriatly.
* \see Array3DUtils::gaussianDownsample(const Array3D<T>& input,
* Array3D<T>& output, const Vector3D<int>& factors, const
* Vector3D<double>& sigma, const Vector3D<int>& kernelSize).
* imageIn and imageOut can be the same object.
*
* bcd 2004
*/
template <class T>
static
void
gaussianDownsample(const Image<T>& imageIn,
Image<T>& imageOut,
const Vector3D<double>& factors,
const Vector3D<double>& sigma,
const Vector3D<double>& kernelSize);
#ifdef SWIG
%template(gaussianDownsample) gaussianDownsample< float >;
#endif // SWIG
/**
* \param image1 first input image
* \param image2 second input image
* \return the sum of voxelwise squared intensity difference between
* two images. a difference is calculated for each voxel of image1.
*
* bcd 2004
*/
template <class T,
Array3DUtils::ScalarBackgroundStrategyT BackgroundStrategy,
Array3DUtils::InterpT InterpMethod>
static
double
squaredError(const Image<T>& image1,
const Image<T>& image2);
template <class T>
static
double
squaredError(const Image<T>& image1,
const Image<T>& image2)
{
return squaredError<T,
DEFAULT_SCALAR_BACKGROUND_STRATEGY,
DEFAULT_SCALAR_INTERP>
(image1, image2);
}
#ifdef SWIG
%template(squaredError) squaredError< float, DEFAULT_SCALAR_BACKGROUND_STRATEGY, DEFAULT_SCALAR_INTERP>;
#endif // SWIG
// Centroid of the image, in world coordinates.
template <class T>
static
Vector3D<double>
computeCentroid(const Image<T>& image,
const ROI<int, unsigned int> voxelROI);
template <class T>
static
Vector3D<double>
computeCentroid(const Image<T>& image);
#ifdef SWIG
%template(computeCentroid) computeCentroid< float >;
#endif // SWIG
/**
* write the META header file and data file for this image .mhd and
* .raw are automatically appended to the filenamePrefix
*
* bcd 2004
*/
template <class T>
static
void
writeMETA(const Image<T>& image,
const char* filenamePrefix);
#ifdef SWIG
%template(writeMETA) writeMETA< float >;
#endif // SWIG
/**
* extract an roi (specified in voxel coordinates) from an image
*
* bcd 2004
*/
template <class T>
static
void
extractROIVoxelCoordinates(const Image<T>& image,
Image<T>& roiImage,
const ROI<int, unsigned int>& voxelROI);
#ifdef SWIG
%template(extractROIVoxelCoordinates) extractROIVoxelCoordinates< float >;
#endif // SWIG
/**
* extract an roi (specified in world coordinates) from an image
*
* bcd 2004
*/
template <class T>
static
void
extractROIWorldCoordinates(const Image<T>& image,
Image<T>& roiImage,
const ROI<double, double>& worldROI);
#ifdef SWIG
%template(extractROIWorldCoordinates) extractROIWorldCoordinates< float >;
#endif // SWIG
/**
* This is a specialized function to handle CT images. These images
* are often stored with a minimum pixel value of -1024 (consistent
* with the definition of Hounsfield units). Other times they are
* shifted so that the minimum intensity is 0, which is what is
* expected by ImMap and BeamLock. This function performs that
* shift. (Note that PLUNC uses the other convention.)
*/
template <class T>
static
void
makeImageUnsigned(Image<T>& image);
#ifdef SWIG
%template(makeImageUnsigned) makeImageUnsigned< float >;
#endif // SWIG
};
#ifndef SWIG
#include "ImageUtils.txx"
#endif // SWIG
#endif
| 27.188383 | 130 | 0.677984 | [
"object",
"transform"
] |
65278ba4ce4fb5c3fdf747d6fc45cc1794f88655 | 9,431 | c | C | deps/pmdk/src/test/obj_tx_free/obj_tx_free.c | kimleeju/xdp_redis | 52eaf9da59e5b9ddb009a7874791cdbe6ce9ba06 | [
"BSD-3-Clause"
] | null | null | null | deps/pmdk/src/test/obj_tx_free/obj_tx_free.c | kimleeju/xdp_redis | 52eaf9da59e5b9ddb009a7874791cdbe6ce9ba06 | [
"BSD-3-Clause"
] | null | null | null | deps/pmdk/src/test/obj_tx_free/obj_tx_free.c | kimleeju/xdp_redis | 52eaf9da59e5b9ddb009a7874791cdbe6ce9ba06 | [
"BSD-3-Clause"
] | null | null | null | /*
* Copyright 2015-2018, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* obj_tx_free.c -- unit test for pmemobj_tx_free
*/
#include <sys/param.h>
#include <string.h>
#include "unittest.h"
#include "util.h"
#include "valgrind_internal.h"
#define LAYOUT_NAME "tx_free"
#define OBJ_SIZE (200 * 1024)
enum type_number {
TYPE_FREE_NO_TX,
TYPE_FREE_WRONG_UUID,
TYPE_FREE_COMMIT,
TYPE_FREE_ABORT,
TYPE_FREE_COMMIT_NESTED1,
TYPE_FREE_COMMIT_NESTED2,
TYPE_FREE_ABORT_NESTED1,
TYPE_FREE_ABORT_NESTED2,
TYPE_FREE_ABORT_AFTER_NESTED1,
TYPE_FREE_ABORT_AFTER_NESTED2,
TYPE_FREE_OOM,
TYPE_FREE_ALLOC,
TYPE_FREE_AFTER_ABORT,
TYPE_FREE_MANY_TIMES,
};
TOID_DECLARE(struct object, 0);
struct object {
size_t value;
char data[OBJ_SIZE - sizeof(size_t)];
};
/*
* do_tx_alloc -- do tx allocation with specified type number
*/
static PMEMoid
do_tx_alloc(PMEMobjpool *pop, unsigned type_num)
{
PMEMoid ret = OID_NULL;
TX_BEGIN(pop) {
ret = pmemobj_tx_alloc(sizeof(struct object), type_num);
} TX_END
return ret;
}
/*
* do_tx_free_wrong_uuid -- try to free object with invalid uuid
*/
static void
do_tx_free_wrong_uuid(PMEMobjpool *pop)
{
volatile int ret = 0;
PMEMoid oid = do_tx_alloc(pop, TYPE_FREE_WRONG_UUID);
oid.pool_uuid_lo = ~oid.pool_uuid_lo;
TX_BEGIN(pop) {
ret = pmemobj_tx_free(oid);
UT_ASSERTeq(ret, 0);
} TX_ONABORT {
ret = -1;
} TX_END
UT_ASSERTeq(ret, -1);
TOID(struct object) obj;
TOID_ASSIGN(obj, POBJ_FIRST_TYPE_NUM(pop, TYPE_FREE_WRONG_UUID));
UT_ASSERT(!TOID_IS_NULL(obj));
}
/*
* do_tx_free_null_oid -- call pmemobj_tx_free with OID_NULL
*/
static void
do_tx_free_null_oid(PMEMobjpool *pop)
{
volatile int ret = 0;
TX_BEGIN(pop) {
ret = pmemobj_tx_free(OID_NULL);
} TX_ONABORT {
ret = -1;
} TX_END
UT_ASSERTeq(ret, 0);
}
/*
* do_tx_free_commit -- do the basic transactional deallocation of object
*/
static void
do_tx_free_commit(PMEMobjpool *pop)
{
int ret;
PMEMoid oid = do_tx_alloc(pop, TYPE_FREE_COMMIT);
TX_BEGIN(pop) {
ret = pmemobj_tx_free(oid);
UT_ASSERTeq(ret, 0);
} TX_ONABORT {
UT_ASSERT(0);
} TX_END
TOID(struct object) obj;
TOID_ASSIGN(obj, POBJ_FIRST_TYPE_NUM(pop, TYPE_FREE_COMMIT));
UT_ASSERT(TOID_IS_NULL(obj));
}
/*
* do_tx_free_abort -- abort deallocation of object
*/
static void
do_tx_free_abort(PMEMobjpool *pop)
{
int ret;
PMEMoid oid = do_tx_alloc(pop, TYPE_FREE_ABORT);
TX_BEGIN(pop) {
ret = pmemobj_tx_free(oid);
UT_ASSERTeq(ret, 0);
pmemobj_tx_abort(-1);
} TX_ONCOMMIT {
UT_ASSERT(0);
} TX_END
TOID(struct object) obj;
TOID_ASSIGN(obj, POBJ_FIRST_TYPE_NUM(pop, TYPE_FREE_ABORT));
UT_ASSERT(!TOID_IS_NULL(obj));
}
/*
* do_tx_free_commit_nested -- do allocation in nested transaction
*/
static void
do_tx_free_commit_nested(PMEMobjpool *pop)
{
int ret;
PMEMoid oid1 = do_tx_alloc(pop, TYPE_FREE_COMMIT_NESTED1);
PMEMoid oid2 = do_tx_alloc(pop, TYPE_FREE_COMMIT_NESTED2);
TX_BEGIN(pop) {
ret = pmemobj_tx_free(oid1);
UT_ASSERTeq(ret, 0);
TX_BEGIN(pop) {
ret = pmemobj_tx_free(oid2);
UT_ASSERTeq(ret, 0);
} TX_ONABORT {
UT_ASSERT(0);
} TX_END
} TX_ONABORT {
UT_ASSERT(0);
} TX_END
TOID(struct object) obj;
TOID_ASSIGN(obj, POBJ_FIRST_TYPE_NUM(pop, TYPE_FREE_COMMIT_NESTED1));
UT_ASSERT(TOID_IS_NULL(obj));
TOID_ASSIGN(obj, POBJ_FIRST_TYPE_NUM(pop, TYPE_FREE_COMMIT_NESTED2));
UT_ASSERT(TOID_IS_NULL(obj));
}
/*
* do_tx_free_abort_nested -- abort allocation in nested transaction
*/
static void
do_tx_free_abort_nested(PMEMobjpool *pop)
{
int ret;
PMEMoid oid1 = do_tx_alloc(pop, TYPE_FREE_ABORT_NESTED1);
PMEMoid oid2 = do_tx_alloc(pop, TYPE_FREE_ABORT_NESTED2);
TX_BEGIN(pop) {
ret = pmemobj_tx_free(oid1);
UT_ASSERTeq(ret, 0);
TX_BEGIN(pop) {
ret = pmemobj_tx_free(oid2);
UT_ASSERTeq(ret, 0);
pmemobj_tx_abort(-1);
} TX_ONCOMMIT {
UT_ASSERT(0);
} TX_END
} TX_ONCOMMIT {
UT_ASSERT(0);
} TX_END
TOID(struct object) obj;
TOID_ASSIGN(obj, POBJ_FIRST_TYPE_NUM(pop, TYPE_FREE_ABORT_NESTED1));
UT_ASSERT(!TOID_IS_NULL(obj));
TOID_ASSIGN(obj, POBJ_FIRST_TYPE_NUM(pop, TYPE_FREE_ABORT_NESTED2));
UT_ASSERT(!TOID_IS_NULL(obj));
}
/*
* do_tx_free_abort_after_nested -- abort transaction after nested
* pmemobj_tx_free
*/
static void
do_tx_free_abort_after_nested(PMEMobjpool *pop)
{
int ret;
PMEMoid oid1 = do_tx_alloc(pop, TYPE_FREE_ABORT_AFTER_NESTED1);
PMEMoid oid2 = do_tx_alloc(pop, TYPE_FREE_ABORT_AFTER_NESTED2);
TX_BEGIN(pop) {
ret = pmemobj_tx_free(oid1);
UT_ASSERTeq(ret, 0);
TX_BEGIN(pop) {
ret = pmemobj_tx_free(oid2);
UT_ASSERTeq(ret, 0);
} TX_END
pmemobj_tx_abort(-1);
} TX_ONCOMMIT {
UT_ASSERT(0);
} TX_END
TOID(struct object) obj;
TOID_ASSIGN(obj, POBJ_FIRST_TYPE_NUM(pop,
TYPE_FREE_ABORT_AFTER_NESTED1));
UT_ASSERT(!TOID_IS_NULL(obj));
TOID_ASSIGN(obj, POBJ_FIRST_TYPE_NUM(pop,
TYPE_FREE_ABORT_AFTER_NESTED2));
UT_ASSERT(!TOID_IS_NULL(obj));
}
/*
* do_tx_free_alloc_abort -- free object allocated in the same transaction
* and abort transaction
*/
static void
do_tx_free_alloc_abort(PMEMobjpool *pop)
{
int ret;
TOID(struct object) obj;
TX_BEGIN(pop) {
TOID_ASSIGN(obj, pmemobj_tx_alloc(
sizeof(struct object), TYPE_FREE_ALLOC));
UT_ASSERT(!TOID_IS_NULL(obj));
ret = pmemobj_tx_free(obj.oid);
UT_ASSERTeq(ret, 0);
pmemobj_tx_abort(-1);
} TX_ONCOMMIT {
UT_ASSERT(0);
} TX_END
TOID_ASSIGN(obj, POBJ_FIRST_TYPE_NUM(pop, TYPE_FREE_ALLOC));
UT_ASSERT(TOID_IS_NULL(obj));
}
/*
* do_tx_free_alloc_abort -- free object allocated in the same transaction
* and commit transaction
*/
static void
do_tx_free_alloc_commit(PMEMobjpool *pop)
{
int ret;
TOID(struct object) obj;
TX_BEGIN(pop) {
TOID_ASSIGN(obj, pmemobj_tx_alloc(
sizeof(struct object), TYPE_FREE_ALLOC));
UT_ASSERT(!TOID_IS_NULL(obj));
ret = pmemobj_tx_free(obj.oid);
UT_ASSERTeq(ret, 0);
} TX_ONABORT {
UT_ASSERT(0);
} TX_END
TOID_ASSIGN(obj, POBJ_FIRST_TYPE_NUM(pop, TYPE_FREE_ALLOC));
UT_ASSERT(TOID_IS_NULL(obj));
}
/*
* do_tx_free_abort_free - allocate a new object, perform a transactional free
* in an aborted transaction and then to actually free the object.
*
* This can expose any issues with not properly handled free undo log.
*/
static void
do_tx_free_abort_free(PMEMobjpool *pop)
{
PMEMoid oid = do_tx_alloc(pop, TYPE_FREE_AFTER_ABORT);
TX_BEGIN(pop) {
pmemobj_tx_free(oid);
pmemobj_tx_abort(-1);
} TX_ONCOMMIT {
UT_ASSERT(0);
} TX_END
TX_BEGIN(pop) {
pmemobj_tx_free(oid);
} TX_ONABORT {
UT_ASSERT(0);
} TX_END
}
/*
* do_tx_free_many_times -- free enough objects to trigger vector array alloc
*/
static void
do_tx_free_many_times(PMEMobjpool *pop)
{
#define TX_FREE_COUNT ((1 << 3) + 1)
PMEMoid oids[TX_FREE_COUNT];
for (int i = 0; i < TX_FREE_COUNT; ++i)
oids[i] = do_tx_alloc(pop, TYPE_FREE_MANY_TIMES);
TX_BEGIN(pop) {
for (int i = 0; i < TX_FREE_COUNT; ++i)
pmemobj_tx_free(oids[i]);
} TX_ONABORT {
UT_ASSERT(0);
} TX_END
#undef TX_FREE_COUNT
}
int
main(int argc, char *argv[])
{
START(argc, argv, "obj_tx_free");
util_init();
if (argc != 2)
UT_FATAL("usage: %s [file]", argv[0]);
PMEMobjpool *pop;
if ((pop = pmemobj_create(argv[1], LAYOUT_NAME, PMEMOBJ_MIN_POOL,
S_IWUSR | S_IRUSR)) == NULL)
UT_FATAL("!pmemobj_create");
do_tx_free_wrong_uuid(pop);
VALGRIND_WRITE_STATS;
do_tx_free_null_oid(pop);
VALGRIND_WRITE_STATS;
do_tx_free_commit(pop);
VALGRIND_WRITE_STATS;
do_tx_free_abort(pop);
VALGRIND_WRITE_STATS;
do_tx_free_commit_nested(pop);
VALGRIND_WRITE_STATS;
do_tx_free_abort_nested(pop);
VALGRIND_WRITE_STATS;
do_tx_free_abort_after_nested(pop);
VALGRIND_WRITE_STATS;
do_tx_free_alloc_commit(pop);
VALGRIND_WRITE_STATS;
do_tx_free_alloc_abort(pop);
VALGRIND_WRITE_STATS;
do_tx_free_abort_free(pop);
VALGRIND_WRITE_STATS;
do_tx_free_many_times(pop);
VALGRIND_WRITE_STATS;
pmemobj_close(pop);
DONE(NULL);
}
| 22.780193 | 78 | 0.741279 | [
"object",
"vector"
] |
6527d498e85ff1d85fd3bc49bc44316a80771232 | 917 | h | C | source/tuning_runner/searcher/full_searcher.h | jiri-filipovic/KTT | d70330d5a3fd02cfc2c2163ea8695b9eb1113c6d | [
"MIT"
] | null | null | null | source/tuning_runner/searcher/full_searcher.h | jiri-filipovic/KTT | d70330d5a3fd02cfc2c2163ea8695b9eb1113c6d | [
"MIT"
] | null | null | null | source/tuning_runner/searcher/full_searcher.h | jiri-filipovic/KTT | d70330d5a3fd02cfc2c2163ea8695b9eb1113c6d | [
"MIT"
] | null | null | null | #pragma once
#include "searcher.h"
namespace ktt
{
class FullSearcher : public Searcher
{
public:
FullSearcher(const std::vector<KernelConfiguration>& configurations) :
configurations(configurations),
index(0)
{
if (configurations.size() == 0)
{
throw std::runtime_error("Configurations vector provided for searcher is empty");
}
}
void calculateNextConfiguration(const double) override
{
index++;
}
KernelConfiguration getCurrentConfiguration() const override
{
return configurations.at(index);
}
size_t getUnexploredConfigurationCount() const override
{
if (index >= configurations.size())
{
return 0;
}
return configurations.size() - index;
}
private:
std::vector<KernelConfiguration> configurations;
size_t index;
};
} // namespace ktt
| 19.510638 | 93 | 0.627045 | [
"vector"
] |
652a2b6897e3f16a3c1533b52d36645138167704 | 1,396 | h | C | source/renderer/hiz_pipeline.h | LavenSun/LSRViewer | 615dcb63ec913c40bdfba6b1096365c6a0391206 | [
"MIT"
] | 1 | 2021-05-18T01:38:52.000Z | 2021-05-18T01:38:52.000Z | source/renderer/hiz_pipeline.h | LavenSun/LSRViewer | 615dcb63ec913c40bdfba6b1096365c6a0391206 | [
"MIT"
] | null | null | null | source/renderer/hiz_pipeline.h | LavenSun/LSRViewer | 615dcb63ec913c40bdfba6b1096365c6a0391206 | [
"MIT"
] | null | null | null | #pragma once
#include <renderer/base_pipeline.h>
class HizPipeline :public chaf::PipelineBase
{
public:
HizPipeline(vks::VulkanDevice& device, uint32_t width, uint32_t height);
~HizPipeline();
void resize(uint32_t width, uint32_t height, VkQueue queue);
void prepare(VkQueue queue, VkFormat depth_format);
void buildCommandBuffer();
void submit();
public:
VkQueue compute_queue;
VkFence fence;
VkSemaphore semaphore;
VkPipelineLayout pipeline_layout;
VkPipeline pipeline;
VkDescriptorSetLayout descriptor_set_layout;
std::vector<VkDescriptorSet> descriptor_sets;
VkDescriptorPool descriptor_pool;
VkCommandPool command_pool;
VkCommandBuffer command_buffer;
struct
{
uint32_t depth_pyramid_levels{ 1 };
VkSampler sampler{ VK_NULL_HANDLE };
VkDeviceMemory mem{ VK_NULL_HANDLE };
VkImage image{ VK_NULL_HANDLE };
std::vector<VkImageView> views;
VkDescriptorImageInfo descriptor;
}hiz_image;
void prepareHiz();
void destroyHiz();
struct
{
VkSampler sampler{ VK_NULL_HANDLE };
VkImageView view{ VK_NULL_HANDLE };
VkImage image{ VK_NULL_HANDLE };
VkDeviceMemory mem{ VK_NULL_HANDLE };
VkDescriptorImageInfo descriptor;
uint32_t width, height;
VkFormat format;
}depth_image;
void prepareDepth(VkQueue queue, VkFormat depth_format);
void destroyDepth();
void copyDepth(VkCommandBuffer cmd_buffer, VkImage depth_stencil_image);
};
| 20.231884 | 73 | 0.780802 | [
"vector"
] |
652c487168a1b1800797a28b58a7f1e9232553aa | 4,383 | h | C | ext/CameraICI/include/icitest_graph.h | nayanavenkataramana/earlyapp | eafd8ae8507dee79d2b751f7c5d9320f5519029b | [
"MIT"
] | 5 | 2019-01-02T18:34:52.000Z | 2021-05-13T16:09:10.000Z | ext/CameraICI/include/icitest_graph.h | nayanavenkataramana/earlyapp | eafd8ae8507dee79d2b751f7c5d9320f5519029b | [
"MIT"
] | 10 | 2018-10-26T06:11:45.000Z | 2019-06-24T06:25:43.000Z | ext/CameraICI/include/icitest_graph.h | nayanavenkataramana/earlyapp | eafd8ae8507dee79d2b751f7c5d9320f5519029b | [
"MIT"
] | 20 | 2018-10-26T02:16:51.000Z | 2021-02-17T11:39:59.000Z | ////////////////////////////////////////////////////////////////////////////////
//
// Copyright (C) 2018 Intel Corporation
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"),
// to deal in the Software without restriction, including without limitation
// the rights to use, copy, modify, merge, publish, distribute, sublicense,
// and/or sell copies of the Software, and to permit persons to whom
// the Software is furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included
// in all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
// THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES
// OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
// ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE
// OR OTHER DEALINGS IN THE SOFTWARE.
//
// SPDX-License-Identifier: MIT
//
////////////////////////////////////////////////////////////////////////////////
#ifndef ICITEST_GRAPH_H
#define ICITEST_GRAPH_H
#include <string.h>
#include <intel_bufmgr.h>
#include <GLES2/gl2.h>
#include <GLES2/gl2ext.h>
#include <EGL/egl.h>
#include <EGL/eglext.h>
#include <wayland-egl.h>
#include <wayland-client.h>
#include <wayland-cursor.h>
#include <drm/drm.h>
#include <drm/drm_mode.h>
#include <drm/drm_fourcc.h>
#include <xf86drm.h>
#include <xf86drmMode.h>
//#include "icitest_common.h"
#include "wayland-drm-client-protocol.h"
#define TARGET_NUM_SECONDS 5
PFNEGLCREATEIMAGEKHRPROC eglCreateImageKHR;
PFNEGLDESTROYIMAGEKHRPROC eglDestroyImageKHR;
PFNGLEGLIMAGETARGETTEXTURE2DOESPROC glEGLImageTargetTexture2DOES;
PFNGLPROGRAMBINARYOESPROC glProgramBinaryOES;
PFNGLGETPROGRAMBINARYOESPROC glGetProgramBinaryOES;
struct buffer {
drm_intel_bo *bo;
unsigned int index;
unsigned int fb_handle;
int dbuf_fd;
uint32_t flink_name;
struct wl_buffer *buf;
EGLImageKHR khrImage;
void *start;
size_t length;
int is_top;
};
struct output {
struct display *display;
struct wl_output *output;
struct wl_list link;
};
struct display {
struct wl_display *display;
struct wl_registry *registry;
struct wl_compositor *compositor;
struct wl_shell *wl_shell;
struct wl_drm *wl_drm;
struct window *window;
struct wl_list output_list;
int strm_fd;
int drm_fd;
dri_bufmgr *bufmgr;
struct buffer *buffers;
struct buffer *disp_bufs[2];
struct setup *s;
struct {
EGLDisplay dpy;
EGLContext ctx;
EGLConfig conf;
} egl;
};
struct geometry {
int width, height;
};
struct window {
struct display *display;
struct geometry geometry, window_size;
struct wl_surface *surface;
void *shell_surface;
struct wl_egl_window *native;
EGLSurface egl_surface;
struct wl_callback *callback;
int fullscreen, opaque, configured, output;
int print_fps, frame_count;
struct {
GLuint fbo;
GLuint color_rbo;
GLuint modelview_uniform;
GLuint gl_texture_size;
GLuint gl_tex_name[2];
GLuint gl_tex_sampler[2];
GLuint rgb565;
GLuint swap_rb;
GLuint pos;
GLuint col;
GLuint attr_tex;
GLuint program;
GLfloat hmi_vtx[12u]; //!< hold coordinates of vertices for texture
GLfloat hmi_tex[8u]; //!< hold indices of vertices for texture
GLubyte hmi_ind[6u]; //!< hold coordinates for texture sample
GLfloat model_view[16u];
} gl;
};
struct wl_shell_surface_listener wl_shell_surface_listener;
const struct wl_callback_listener frame_listener;
struct wl_callback_listener configure_callback_listener;
const struct wl_callback_listener frame_listener;
const struct wl_registry_listener registry_listener;
void destroy_surface(struct window *window);
void init_gl(struct window *window);
void init_egl(struct display *display, int opaque);
int init_gem(struct display *display);
int drm_buffer_to_prime(struct display *display, struct buffer *buffer,
unsigned int size);
void create_surface(struct window *window, void *gpioclass);
void destroy_gem(struct display *display);
int create_buffer(struct display *display, struct buffer *buffer,
unsigned int size);
#endif /*ICITEST_GRAPH_H*/
| 27.566038 | 80 | 0.745836 | [
"geometry"
] |
652cba0dd4d9ed950115957859eaf0d0d6cff08d | 2,537 | h | C | released_plugins/v3d_plugins/neurontracing_neutube/src_neutube/neurolabi/c/tz_interface.h | zzhmark/vaa3d_tools | 3ca418add85a59ac7e805d55a600b78330d7e53d | [
"MIT"
] | 1 | 2021-12-27T19:14:03.000Z | 2021-12-27T19:14:03.000Z | released_plugins/v3d_plugins/neurontracing_neutube/src_neutube/neurolabi/c/tz_interface.h | zzhmark/vaa3d_tools | 3ca418add85a59ac7e805d55a600b78330d7e53d | [
"MIT"
] | 1 | 2016-12-03T05:33:13.000Z | 2016-12-03T05:33:13.000Z | released_plugins/v3d_plugins/neurontracing_neutube/src_neutube/neurolabi/c/tz_interface.h | zzhmark/vaa3d_tools | 3ca418add85a59ac7e805d55a600b78330d7e53d | [
"MIT"
] | null | null | null | /**@file tz_interface.h
* @brief shell interface
* @author Ting Zhao
* @date 01-Mar-2008
*/
#ifndef _TZ_INTERFACE_H_
#define _TZ_INTERFACE_H_
#include "tz_cdefs.h"
__BEGIN_DECLS
/******* progress reporter definition **********/
/*
* Usage:
* PROGRESS_BEGIN("What's going on")
* loop {
* ROGRESS_STATUS(how_many_percents_are_done)
* doing something;
* PROGRESS_REFRESH
* }
* PROGRESS_END("done")
*
* Note: To disable progress bar, define ENABLE_PROGRESS as 0.
*/
#if defined INTERFACE_PROGRESS_OFF
# define ENABLE_PROGRESS 0
# define ENABLE_HOURGLASS 0
#else
# define ENABLE_PROGRESS 1
# define ENABLE_HOURGLASS 1
#endif
#if defined FORCE_PROGRESS
# undef ENABLE_PROGRESS
# undef ENABLE_HOURGLASS
# define ENABLE_PROGRESS 1
# define ENABLE_HOURGLASS 1
#endif
#if ENABLE_PROGRESS == 1
#define PROGRESS_BEGIN(msg) printf("%s: ", msg);
#define PROGRESS_STATUS(k) \
printf("%3d%%", k); \
fflush(stdout);
#define PROGRESS_REFRESH \
printf("\b\b\b\b");
#define PROGRESS_END(msg) \
printf("%s\n", msg);
#else
#define PROGRESS_BEGIN(msg)
#define PROGRESS_STATUS(k)
#define PROGRESS_REFRESH
#define PROGRESS_END(msg)
#endif
/**************************/
/******* hourglass definition **********/
/*
* Usage:
* int hg = 0;
* HOURGLASS_BEGIN("Processing")
* loop {
* HOURGLASS_SHOW(hg)
* doing something;
* HOURGLASS_REFRESH(hg)
* }
* HOURGLASS_END("done")
*
* Note: To disable progress bar, define ENABLE_HOURGLASS as 0.
*/
#if ENABLE_HOURGLASS == 1
const static char *Hourglass[] = {".", ":", "*", "m"};
#define HOURGLASS_BEGIN(msg, hg) \
int hg = 0; \
printf("%s ... ", msg); \
printf("%s", Hourglass[hg]);
#define HOURGLASS_UPDATE(hg) \
printf("\b"); \
hg++; \
if (hg > 3) { \
hg = 0; \
} \
printf("%s", Hourglass[hg]); \
fflush(stdout);
#define HOURGLASS_END(msg) \
printf("\b"); \
printf("%s\n", msg);
#else
#define HOURGLASS_BEGIN(msg, hg)
#define HOURGLASS_SHOW(hg)
#define HOURGLASS_UPDATE(hg)
#define HOURGLASS_END(msg)
#endif
/**************************/
/*
* Display <message> to ask a user to input something. Press Enter to end the
* dialog. <data> is used to store the input and <format> specifies how to read
* the input. It is a string format for scanf(). It returns 1 if <data> is
* updated; otherwise it returns 0.
*/
int Input_Dialog(const char *message, const char *format, void *data);
__END_DECLS
#endif
| 19.976378 | 79 | 0.624754 | [
"3d"
] |
6530b59b66f6b80aaf3f96f09cc4c337de65e1d6 | 19,796 | h | C | fnet_demos/common/startup/startup_mk70fn1/vectors.h | 8bitgeek/fnet | 989903fa94fcc3a507484dc9a69eaf3d6840826c | [
"Apache-2.0"
] | 1 | 2016-04-28T01:21:05.000Z | 2016-04-28T01:21:05.000Z | fnet_demos/common/startup/startup_mk70fn1/vectors.h | Yona-Appletree/FnetCubes | 549b0b80de6587c6012020d09cced39be5d576ee | [
"Apache-2.0"
] | null | null | null | fnet_demos/common/startup/startup_mk70fn1/vectors.h | Yona-Appletree/FnetCubes | 549b0b80de6587c6012020d09cced39be5d576ee | [
"Apache-2.0"
] | null | null | null | /******************************************************************************
* File: vectors.h
*
* Purpose: Provide custom interrupt service routines for Kinetis.
*
* NOTE: This vector table is a superset table, so interrupt sources might be
* listed that are not available on the specific Kinetis device you are
* using.
******************************************************************************/
#ifndef __VECTORS_H
#define __VECTORS_H 1
#include "fapp_config.h"
#include "fnet.h"
// function prototype for default_isr in vectors.c
void default_isr(void);
void abort_isr(void);
void hard_fault_handler_c(unsigned int * hardfault_args);
/* Interrupt Vector Table Function Pointers */
typedef void pointer(void);
extern void __iar_program_start(void);
// Address Vector IRQ Source module Source description
#if FNET_CFG_COMP_IAR
extern unsigned long __BOOT_STACK_ADDRESS[];
#define VECTOR_000 (pointer*)__BOOT_STACK_ADDRESS // ARM core Initial Supervisor SP
#endif /* FNET_CFG_COMP_IAR */
#if FNET_CFG_COMP_CW
extern unsigned long __SP_INIT[];
#define VECTOR_000 (pointer*)__SP_INIT // ARM core Initial Supervisor SP
#endif /* FNET_CFG_COMP_CW */
#if FNET_CFG_COMP_UV
extern unsigned long __initial_sp[];
#define VECTOR_000 (pointer*)__initial_sp // ARM core Initial Supervisor SP
#endif /* FNET_CFG_COMP_IAR */
extern void __startup(void);
#define VECTOR_001 __startup // 0x0000_0004 1 - ARM core Initial Program Counter
#define VECTOR_002 default_isr // 0x0000_0008 2 - ARM core Non-maskable Interrupt (NMI)
#define VECTOR_003 default_isr // 0x0000_000C 3 - ARM core Hard Fault
#define VECTOR_004 default_isr // 0x0000_0010 4 -
#define VECTOR_005 default_isr // 0x0000_0014 5 - ARM core Bus Fault
#define VECTOR_006 default_isr // 0x0000_0018 6 - ARM core Usage Fault
#define VECTOR_007 default_isr // 0x0000_001C 7 -
#define VECTOR_008 default_isr // 0x0000_0020 8 -
#define VECTOR_009 default_isr // 0x0000_0024 9 -
#define VECTOR_010 default_isr // 0x0000_0028 10 -
#define VECTOR_011 default_isr // 0x0000_002C 11 - ARM core Supervisor call (SVCall)
#define VECTOR_012 default_isr // 0x0000_0030 12 - ARM core Debug Monitor
#define VECTOR_013 default_isr // 0x0000_0034 13 -
#define VECTOR_014 default_isr // 0x0000_0038 14 - ARM core Pendable request for system service (PendableSrvReq)
#define VECTOR_015 default_isr // 0x0000_003C 15 - ARM core System tick timer (SysTick)
#define VECTOR_016 default_isr // 0x0000_0040 16 0 DMA DMA Channel 0 transfer complete
#define VECTOR_017 default_isr // 0x0000_0044 17 1 DMA DMA Channel 1 transfer complete
#define VECTOR_018 default_isr // 0x0000_0048 18 2 DMA DMA Channel 2 transfer complete
#define VECTOR_019 default_isr // 0x0000_004C 19 3 DMA DMA Channel 3 transfer complete
#define VECTOR_020 default_isr // 0x0000_0050 20 4 DMA DMA Channel 4 transfer complete
#define VECTOR_021 default_isr // 0x0000_0054 21 5 DMA DMA Channel 5 transfer complete
#define VECTOR_022 default_isr // 0x0000_0058 22 6 DMA DMA Channel 6 transfer complete
#define VECTOR_023 default_isr // 0x0000_005C 23 7 DMA DMA Channel 7 transfer complete
#define VECTOR_024 default_isr // 0x0000_0060 24 8 DMA DMA Channel 8 transfer complete
#define VECTOR_025 default_isr // 0x0000_0064 25 9 DMA DMA Channel 9 transfer complete
#define VECTOR_026 default_isr // 0x0000_0068 26 10 DMA DMA Channel 10 transfer complete
#define VECTOR_027 default_isr // 0x0000_006C 27 11 DMA DMA Channel 11 transfer complete
#define VECTOR_028 default_isr // 0x0000_0070 28 12 DMA DMA Channel 12 transfer complete
#define VECTOR_029 default_isr // 0x0000_0074 29 13 DMA DMA Channel 13 transfer complete
#define VECTOR_030 default_isr // 0x0000_0078 30 14 DMA DMA Channel 14 transfer complete
#define VECTOR_031 default_isr // 0x0000_007C 31 15 DMA DMA Channel 15 transfer complete
#define VECTOR_032 default_isr // 0x0000_0080 32 16 DMA DMA Error Interrupt Channels 0-15
#define VECTOR_033 default_isr // 0x0000_0084 33 17 MCM Normal interrupt
#define VECTOR_034 default_isr // 0x0000_0088 34 18 Flash memory Command Complete
#define VECTOR_035 default_isr // 0x0000_008C 35 19 Flash memory Read Collision
#define VECTOR_036 default_isr // 0x0000_0090 36 20 Mode Controller Low Voltage Detect,Low Voltage Warning, Low Leakage Wakeup
#define VECTOR_037 default_isr // 0x0000_0094 37 21 LLWU
#define VECTOR_038 default_isr // 0x0000_0098 38 22 WDOG
#define VECTOR_039 default_isr // 0x0000_009C 39 23 RNGB
#define VECTOR_040 default_isr // 0x0000_00A0 40 24 I2C0
#define VECTOR_041 default_isr // 0x0000_00A4 41 25 I2C1
#define VECTOR_042 default_isr // 0x0000_00A8 42 26 SPI0 Single interrupt vector for all sources
#define VECTOR_043 default_isr // 0x0000_00AC 43 27 SPI1 Single interrupt vector for all sources
#define VECTOR_044 default_isr // 0x0000_00B0 44 28 SPI2 Single interrupt vector for all sources
#define VECTOR_045 default_isr // 0x0000_00B4 45 29 CAN0 OR'ed Message buffer (0-15)
#define VECTOR_046 default_isr // 0x0000_00B8 46 30 CAN0 Bus Off
#define VECTOR_047 default_isr // 0x0000_00BC 47 31 CAN0 Error
#define VECTOR_048 default_isr // 0x0000_00C0 48 32 CAN0 Transmit Warning
#define VECTOR_049 default_isr // 0x0000_00C4 49 33 CAN0 Receive Warning
#define VECTOR_050 default_isr // 0x0000_00C8 50 34 CAN0 Wake Up
#define VECTOR_051 default_isr // 0x0000_00CC 51 35 CAN0 Individual Matching Elements Update (IMEU)
#define VECTOR_052 default_isr // 0x0000_00D0 52 36 CAN0 Lost receive
#define VECTOR_053 default_isr // 0x0000_00D4 53 37 CAN1 OR'ed Message buffer (0-15)
#define VECTOR_054 default_isr // 0x0000_00D8 54 38 CAN1 Bus off
#define VECTOR_055 default_isr // 0x0000_00DC 55 39 CAN1 Error
#define VECTOR_056 default_isr // 0x0000_00E0 56 40 CAN1 Transmit Warning
#define VECTOR_057 default_isr // 0x0000_00E4 57 41 CAN1 Receive Warning
#define VECTOR_058 default_isr // 0x0000_00E8 58 42 CAN1 Wake Up
#define VECTOR_059 default_isr // 0x0000_00EC 59 43 CAN1 Individual Matching Elements Update (IMEU)
#define VECTOR_060 default_isr // 0x0000_00F0 60 44 CAN1 Lost receive
#define VECTOR_061 default_isr // 0x0000_00F4 61 45 UART0 Single interrupt vector for UART status sources
#define VECTOR_062 default_isr // 0x0000_00F8 62 46 UART0 Single interrupt vector for UART error sources
#define VECTOR_063 default_isr // 0x0000_00FC 63 47 UART1 Single interrupt vector for UART status sources
#define VECTOR_064 default_isr // 0x0000_0100 64 48 UART1 Single interrupt vector for UART error sources
#define VECTOR_065 default_isr // 0x0000_0104 65 49 UART2 Single interrupt vector for UART status sources
#define VECTOR_066 default_isr // 0x0000_0108 66 50 UART2 Single interrupt vector for UART error sources
#define VECTOR_067 default_isr // 0x0000_010C 67 51 UART3 Single interrupt vector for UART status sources
#define VECTOR_068 default_isr // 0x0000_0110 68 52 UART3 Single interrupt vector for UART error sources
#define VECTOR_069 default_isr // 0x0000_0114 69 53 UART4 Single interrupt vector for UART status sources
#define VECTOR_070 default_isr // 0x0000_0118 70 54 UART4 Single interrupt vector for UART error sources
#define VECTOR_071 default_isr // 0x0000_011C 71 55 UART5 Single interrupt vector for UART status sources
#define VECTOR_072 default_isr // 0x0000_0120 72 56 UART5 Single interrupt vector for UART error sources
#define VECTOR_073 default_isr // 0x0000_0124 73 57 ADC0
#define VECTOR_074 default_isr // 0x0000_0128 74 58 ADC1
#define VECTOR_075 default_isr // 0x0000_012C 75 59 CMP0 High-speed comparator
#define VECTOR_076 default_isr // 0x0000_0130 76 60 CMP1
#define VECTOR_077 default_isr // 0x0000_0134 77 61 CMP2
#define VECTOR_078 default_isr // 0x0000_0138 78 62 FTM0 Single interrupt vector for all sources
#define VECTOR_079 default_isr // 0x0000_013C 79 63 FTM1 Single interrupt vector for all sources
#define VECTOR_080 default_isr // 0x0000_0140 80 64 FTM2 Single interrupt vector for all sources
#define VECTOR_081 default_isr // 0x0000_0144 81 65 CMT
#define VECTOR_082 default_isr // 0x0000_0148 82 66 RTC Timer interrupt
#define VECTOR_083 default_isr // 0x0000_014C 83 67
#define VECTOR_084 default_isr // 0x0000_0150 84 68 PIT Channel 0
#define VECTOR_085 default_isr // 0x0000_0154 85 69 PIT Channel 1
#define VECTOR_086 default_isr // 0x0000_0158 86 70 PIT Channel 2
#if FAPP_CFG_PREINSTALL_INTERRUPTS
#define VECTOR_087 fnet_cpu_isr /**** 87 (0x15C) Timer 3 capture/reference event ****/
#else
#define VECTOR_087 default_isr // 0x0000_015C 87 71 PIT Channel 3
#endif
#define VECTOR_088 default_isr // 0x0000_0160 88 72 PDB
#define VECTOR_089 default_isr // 0x0000_0164 89 73 USB OTG
#define VECTOR_090 default_isr // 0x0000_0168 90 74 USB Charger Detect
#define VECTOR_091 default_isr // 0x0000_016C 91 75 ENET IEEE 1588 Timer interrupt
#define VECTOR_092 default_isr // 0x0000_0170 92 76 ENET Transmit interrupt
#if FAPP_CFG_PREINSTALL_INTERRUPTS
#define VECTOR_093 fnet_cpu_isr /**** 93 (0x174) FEC Receive frame interrupt ****/
#else
#define VECTOR_093 default_isr // 0x0000_0174 93 77 ENET Receive interrupt
#endif
#define VECTOR_094 default_isr // 0x0000_0178 94 78 ENET Error and miscellaneous interrupt
#define VECTOR_095 default_isr // 0x0000_017C 95 79 I2S
#define VECTOR_096 default_isr // 0x0000_0180 96 80 SDHC
#define VECTOR_097 default_isr // 0x0000_0184 97 81 DAC0
#define VECTOR_098 default_isr // 0x0000_0188 98 82 DAC1
#define VECTOR_099 default_isr // 0x0000_018C 99 83 TSI Single interrupt vector for all sources
#define VECTOR_100 default_isr // 0x0000_0190 100 84 MCG
#define VECTOR_101 default_isr // 0x0000_0194 101 85 Low Power Timer
#define VECTOR_102 default_isr // 0x0000_0198 102 86 Segment LCD Single interrupt vector for all sources
#define VECTOR_103 default_isr // 0x0000_019C 103 87 Port control module Pin Detect (Port A)
#define VECTOR_104 default_isr // 0x0000_01A0 104 88 Port control module Pin Detect (Port B)
#define VECTOR_105 default_isr // 0x0000_01A4 105 89 Port control module Pin Detect (Port C)
#define VECTOR_106 default_isr // 0x0000_01A8 106 90 Port control module Pin Detect (Port D)
#define VECTOR_107 default_isr // 0x0000_01AC 107 91 Port control module Pin Detect (Port E)
#define VECTOR_108 default_isr // 0x0000_01B0 108 92
#define VECTOR_109 default_isr // 0x0000_01B4 109 93
#define VECTOR_110 default_isr // 0x0000_01B8 110 94
#define VECTOR_111 default_isr // 0x0000_01BC 111 95
#define VECTOR_112 default_isr // 0x0000_01C0 112 96
#define VECTOR_113 default_isr // 0x0000_01C4 113 97
#define VECTOR_114 default_isr // 0x0000_01C8 114 98
#define VECTOR_115 default_isr // 0x0000_01CC 115 99
#define VECTOR_116 default_isr // 0x0000_01D0 116 100
#define VECTOR_117 default_isr // 0x0000_01D4 117 101
#define VECTOR_118 default_isr // 0x0000_01D8 118 102
#define VECTOR_119 default_isr // 0x0000_01DC 119 103
#define VECTOR_120 default_isr //
#define VECTOR_121 default_isr //
#define VECTOR_122 default_isr //
#define VECTOR_123 default_isr //
#define VECTOR_124 default_isr //
#define VECTOR_125 default_isr //
#define VECTOR_126 default_isr //
#define VECTOR_127 default_isr //
#define VECTOR_128 default_isr //
#define VECTOR_129 default_isr //
#define VECTOR_130 default_isr //
#define VECTOR_131 default_isr //
#define VECTOR_132 default_isr //
#define VECTOR_133 default_isr //
#define VECTOR_134 default_isr //
#define VECTOR_135 default_isr //
#define VECTOR_136 default_isr //
#define VECTOR_137 default_isr //
#define VECTOR_138 default_isr //
#define VECTOR_139 default_isr //
#define VECTOR_140 default_isr //
#define VECTOR_141 default_isr //
#define VECTOR_142 default_isr //
#define VECTOR_143 default_isr //
#define VECTOR_144 default_isr //
#define VECTOR_145 default_isr //
#define VECTOR_146 default_isr //
#define VECTOR_147 default_isr //
#define VECTOR_148 default_isr //
#define VECTOR_149 default_isr //
#define VECTOR_150 default_isr //
#define VECTOR_151 default_isr //
#define VECTOR_152 default_isr //
#define VECTOR_153 default_isr //
#define VECTOR_154 default_isr //
#define VECTOR_155 default_isr //
#define VECTOR_156 default_isr //
#define VECTOR_157 default_isr //
#define VECTOR_158 default_isr //
#define VECTOR_159 default_isr //
#define VECTOR_160 default_isr //
#define VECTOR_161 default_isr //
#define VECTOR_162 default_isr //
#define VECTOR_163 default_isr //
#define VECTOR_164 default_isr //
#define VECTOR_165 default_isr //
#define VECTOR_166 default_isr //
#define VECTOR_167 default_isr //
#define VECTOR_168 default_isr //
#define VECTOR_169 default_isr //
#define VECTOR_170 default_isr //
#define VECTOR_171 default_isr //
#define VECTOR_172 default_isr //
#define VECTOR_173 default_isr //
#define VECTOR_174 default_isr //
#define VECTOR_175 default_isr //
#define VECTOR_176 default_isr //
#define VECTOR_177 default_isr //
#define VECTOR_178 default_isr //
#define VECTOR_179 default_isr //
#define VECTOR_180 default_isr //
#define VECTOR_181 default_isr //
#define VECTOR_182 default_isr //
#define VECTOR_183 default_isr //
#define VECTOR_184 default_isr //
#define VECTOR_185 default_isr //
#define VECTOR_186 default_isr //
#define VECTOR_187 default_isr //
#define VECTOR_188 default_isr //
#define VECTOR_189 default_isr //
#define VECTOR_190 default_isr //
#define VECTOR_191 default_isr //
#define VECTOR_192 default_isr //
#define VECTOR_193 default_isr //
#define VECTOR_194 default_isr //
#define VECTOR_195 default_isr //
#define VECTOR_196 default_isr //
#define VECTOR_197 default_isr //
#define VECTOR_198 default_isr //
#define VECTOR_199 default_isr //
#define VECTOR_200 default_isr //
#define VECTOR_201 default_isr //
#define VECTOR_202 default_isr //
#define VECTOR_203 default_isr //
#define VECTOR_204 default_isr //
#define VECTOR_205 default_isr //
#define VECTOR_206 default_isr //
#define VECTOR_207 default_isr //
#define VECTOR_208 default_isr //
#define VECTOR_209 default_isr //
#define VECTOR_210 default_isr //
#define VECTOR_211 default_isr //
#define VECTOR_212 default_isr //
#define VECTOR_213 default_isr //
#define VECTOR_214 default_isr //
#define VECTOR_215 default_isr //
#define VECTOR_216 default_isr //
#define VECTOR_217 default_isr //
#define VECTOR_218 default_isr //
#define VECTOR_219 default_isr //
#define VECTOR_220 default_isr //
#define VECTOR_221 default_isr //
#define VECTOR_222 default_isr //
#define VECTOR_223 default_isr //
#define VECTOR_224 default_isr //
#define VECTOR_225 default_isr //
#define VECTOR_226 default_isr //
#define VECTOR_227 default_isr //
#define VECTOR_228 default_isr //
#define VECTOR_229 default_isr //
#define VECTOR_230 default_isr //
#define VECTOR_231 default_isr //
#define VECTOR_232 default_isr //
#define VECTOR_233 default_isr //
#define VECTOR_234 default_isr //
#define VECTOR_235 default_isr //
#define VECTOR_236 default_isr //
#define VECTOR_237 default_isr //
#define VECTOR_238 default_isr //
#define VECTOR_239 default_isr //
#define VECTOR_240 default_isr //
#define VECTOR_241 default_isr //
#define VECTOR_242 default_isr //
#define VECTOR_243 default_isr //
#define VECTOR_244 default_isr //
#define VECTOR_245 default_isr //
#define VECTOR_246 default_isr //
#define VECTOR_247 default_isr //
#define VECTOR_248 default_isr //
#define VECTOR_249 default_isr //
#define VECTOR_250 default_isr //
#define VECTOR_251 default_isr //
#define VECTOR_252 default_isr //
#define VECTOR_253 default_isr //
#define VECTOR_254 default_isr //
#define VECTOR_255 default_isr //
#define CONFIG_1 (pointer*)0xffffffff
#define CONFIG_2 (pointer*)0xffffffff
#define CONFIG_3 (pointer*)0xffffffff
#define CONFIG_4 (pointer*)0xfffffffe
#endif /*__VECTORS_H*/
/* End of "vectors.h" */
| 61.478261 | 143 | 0.62649 | [
"vector"
] |
6533240a0b08fc26cf23bc8a03bf8f1582eda964 | 2,633 | c | C | test/polygon.c | timower/libnsfb-reMarkable | 12c30d83612d2112b92092d150023650a4a8400a | [
"MIT"
] | 3 | 2021-03-31T18:03:08.000Z | 2021-07-04T04:28:39.000Z | test/polygon.c | timower/libnsfb-reMarkable | 12c30d83612d2112b92092d150023650a4a8400a | [
"MIT"
] | null | null | null | test/polygon.c | timower/libnsfb-reMarkable | 12c30d83612d2112b92092d150023650a4a8400a | [
"MIT"
] | 1 | 2021-03-31T18:20:48.000Z | 2021-03-31T18:20:48.000Z | /* libnsfb ploygon plotter test program */
#include <stdio.h>
#include <stdbool.h>
#include <stdlib.h>
#include <math.h>
#include "libnsfb.h"
#include "libnsfb_plot.h"
#include "libnsfb_event.h"
#define UNUSED(x) ((x) = (x))
int main(int argc, char **argv)
{
const char *fename;
enum nsfb_type_e fetype;
nsfb_t *nsfb;
nsfb_event_t event;
int waitloop = 3;
nsfb_bbox_t box;
uint8_t *fbptr;
int fbstride;
int sides;
int radius;
nsfb_point_t *points;
int loop;
// nsfb_plot_pen_t pen;
if (argc < 2) {
fename="sdl";
} else {
fename = argv[1];
}
fetype = nsfb_type_from_name(fename);
if (fetype == NSFB_SURFACE_NONE) {
fprintf(stderr, "Unable to convert \"%s\" to nsfb surface type\n", fename);
return 1;
}
nsfb = nsfb_new(fetype);
if (nsfb == NULL) {
fprintf(stderr, "Unable to allocate \"%s\" nsfb surface\n", fename);
return 2;
}
if (nsfb_init(nsfb) == -1) {
fprintf(stderr, "Unable to initialise nsfb surface\n");
nsfb_free(nsfb);
return 4;
}
/* get the geometry of the whole screen */
box.x0 = box.y0 = 0;
nsfb_get_geometry(nsfb, &box.x1, &box.y1, NULL);
if ((box.x1 == 0) || (box.y1 == 0)) {
/* if surface was created with no size set a default */
nsfb_set_geometry(nsfb, 800, 600, NSFB_FMT_ANY);
nsfb_get_geometry(nsfb, &box.x1, &box.y1, NULL);
}
nsfb_get_buffer(nsfb, &fbptr, &fbstride);
/* claim the whole screen for update */
nsfb_claim(nsfb, &box);
nsfb_plot_clg(nsfb, 0xffffffff);
radius = (box.x1 / 3);
for (sides = 13; sides >=3; sides--) {
points = malloc(sizeof(nsfb_point_t) * sides);
for (loop = 0; loop < sides;loop++) {
points[loop].x = (box.x1 / 2) +
(radius * cos(loop * 2 * M_PI / sides));
points[loop].y = (box.y1 / 2) +
(radius * sin(loop * 2 * M_PI / sides));
}
nsfb_plot_polygon(nsfb, (const int *)points, sides,
0xff000000 | (0xffffff / (sides * 2)));
free(points);
radius -= 25;
}
nsfb_update(nsfb, &box);
/* wait for quit event or timeout */
while (waitloop > 0) {
if (nsfb_event(nsfb, &event, 1000) == false) {
break;
}
if (event.type == NSFB_EVENT_CONTROL) {
if (event.value.controlcode == NSFB_CONTROL_TIMEOUT) {
/* timeout */
waitloop--;
} else if (event.value.controlcode == NSFB_CONTROL_QUIT) {
break;
}
}
}
nsfb_free(nsfb);
return 0;
}
/*
* Local variables:
* c-basic-offset: 4
* tab-width: 8
* End:
*/
| 21.941667 | 83 | 0.571971 | [
"geometry"
] |
653644aefe0eb179d3ce77032f305428eff5620e | 2,139 | h | C | Code/Framework/AzManipulatorTestFramework/Include/AzManipulatorTestFramework/AzManipulatorTestFrameworkUtils.h | LB-JakubSkorupka/o3de | e224fc2ee5ec2a12e75a10acae268b7b38ae3a32 | [
"Apache-2.0",
"MIT"
] | 11 | 2021-07-08T09:58:26.000Z | 2022-03-17T17:59:26.000Z | Code/Framework/AzManipulatorTestFramework/Include/AzManipulatorTestFramework/AzManipulatorTestFrameworkUtils.h | LB-JakubSkorupka/o3de | e224fc2ee5ec2a12e75a10acae268b7b38ae3a32 | [
"Apache-2.0",
"MIT"
] | 29 | 2021-07-06T19:33:52.000Z | 2022-03-22T10:27:49.000Z | Code/Framework/AzManipulatorTestFramework/Include/AzManipulatorTestFramework/AzManipulatorTestFrameworkUtils.h | LB-JakubSkorupka/o3de | e224fc2ee5ec2a12e75a10acae268b7b38ae3a32 | [
"Apache-2.0",
"MIT"
] | 4 | 2021-07-06T19:24:43.000Z | 2022-03-31T12:42:27.000Z | /*
* Copyright (c) Contributors to the Open 3D Engine Project.
* For complete copyright and license terms please see the LICENSE at the root of this distribution.
*
* SPDX-License-Identifier: Apache-2.0 OR MIT
*
*/
#pragma once
#include <AzToolsFramework/Manipulators/LinearManipulator.h>
#include <AzToolsFramework/Manipulators/ManipulatorBus.h>
#include <AzToolsFramework/Manipulators/ManipulatorManager.h>
#include <AzToolsFramework/Manipulators/PlanarManipulator.h>
namespace AzManipulatorTestFramework
{
//! Create a linear manipulator with a unit sphere bound.
AZStd::shared_ptr<AzToolsFramework::LinearManipulator> CreateLinearManipulator(
const AzToolsFramework::ManipulatorManagerId manipulatorManagerId,
const AZ::Vector3& position = AZ::Vector3::CreateZero(),
float radius = 1.0f);
//! Create a planar manipulator with a unit sphere bound.
AZStd::shared_ptr<AzToolsFramework::PlanarManipulator> CreatePlanarManipulator(
const AzToolsFramework::ManipulatorManagerId manipulatorManagerId,
const AZ::Vector3& position = AZ::Vector3::CreateZero(),
float radius = 1.0f);
//! Dispatch a mouse event to the main manipulator manager via a bus call.
void DispatchMouseInteractionEvent(const AzToolsFramework::ViewportInteraction::MouseInteractionEvent& event);
//! Set the camera position of the specified camera state and return a copy of that state.
AzFramework::CameraState SetCameraStatePosition(const AZ::Vector3& position, AzFramework::CameraState& cameraState);
//! Set the camera direction of the specified camera state and return a copy of that state.
AzFramework::CameraState SetCameraStateDirection(const AZ::Vector3& direction, AzFramework::CameraState& cameraState);
//! Return the center of the viewport of the specified camera state.
AzFramework::ScreenPoint GetCameraStateViewportCenter(const AzFramework::CameraState& cameraState);
//! Default viewport size (1080p) in 16:9 aspect ratio.
inline const auto DefaultViewportSize = AZ::Vector2(1920.0f, 1080.0f);
} // namespace AzManipulatorTestFramework
| 47.533333 | 122 | 0.769518 | [
"3d"
] |
653d5eaa9a52fd5475be46fc108ba9a84cfec0d1 | 6,991 | h | C | types/json_printing.h | leobispo/soa | 57eeddd841157250232004af9a4d69b487c43ae3 | [
"Apache-2.0"
] | 1 | 2018-02-27T08:02:26.000Z | 2018-02-27T08:02:26.000Z | types/json_printing.h | leobispo/soa | 57eeddd841157250232004af9a4d69b487c43ae3 | [
"Apache-2.0"
] | null | null | null | types/json_printing.h | leobispo/soa | 57eeddd841157250232004af9a4d69b487c43ae3 | [
"Apache-2.0"
] | null | null | null | /* json_printing.h -*- C++ -*-
Jeremy Barnes, 26 February 2013
Copyright (c) 2013 Datacratic Inc. All rights reserved.
Context to print out JSON.
*/
#pragma once
#include "json_parsing.h"
#include "jml/utils/exc_assert.h"
#include <boost/algorithm/string.hpp>
#include <string>
namespace Datacratic {
/*****************************************************************************/
/* JSON PRINTING CONTEXT */
/*****************************************************************************/
struct JsonPrintingContext {
virtual ~JsonPrintingContext()
{
}
virtual void startObject() = 0;
virtual void startMember(const std::string & memberName) = 0;
virtual void endObject() = 0;
virtual void startArray(int knownSize = -1) = 0;
virtual void newArrayElement() = 0;
virtual void endArray() = 0;
virtual void writeInt(int i) = 0;
virtual void writeUnsignedInt(unsigned i) = 0;
virtual void writeLong(long i) = 0;
virtual void writeUnsignedLong(unsigned long i) = 0;
virtual void writeLongLong(long long i) = 0;
virtual void writeUnsignedLongLong(unsigned long long i) = 0;
virtual void writeFloat(float f) = 0;
virtual void writeDouble(double d) = 0;
virtual void writeString(const std::string & s) = 0;
virtual void writeStringUtf8(const Utf8String & s) = 0;
virtual void writeBool(bool b) = 0;
virtual void writeNull() = 0;
virtual void writeJson(const Json::Value & val) = 0;
virtual void skip() = 0;
};
/*****************************************************************************/
/* STREAM JSON PRINTING CONTEXT */
/*****************************************************************************/
struct StreamJsonPrintingContext
: public JsonPrintingContext {
StreamJsonPrintingContext(std::ostream & stream)
: stream(stream), writeUtf8(true)
{
}
std::ostream & stream;
bool writeUtf8; ///< If true, utf8 chars in binary. False: escaped ASCII
struct PathEntry {
PathEntry(bool isObject)
: isObject(isObject), memberNum(-1)
{
}
bool isObject;
std::string memberName;
int memberNum;
};
std::vector<PathEntry> path;
virtual void startObject()
{
path.push_back(true /* isObject */);
stream << "{";
}
virtual void startMember(const std::string & memberName)
{
ExcAssert(path.back().isObject);
//path.back().memberName = memberName;
++path.back().memberNum;
if (path.back().memberNum != 0)
stream << ",";
stream << '\"';
ML::jsonEscape(memberName, stream);
stream << "\":";
}
virtual void endObject()
{
ExcAssert(path.back().isObject);
path.pop_back();
stream << "}";
}
virtual void startArray(int knownSize = -1)
{
path.push_back(false /* isObject */);
stream << "[";
}
virtual void newArrayElement()
{
ExcAssert(!path.back().isObject);
++path.back().memberNum;
if (path.back().memberNum != 0)
stream << ",";
}
virtual void endArray()
{
ExcAssert(!path.back().isObject);
path.pop_back();
stream << "]";
}
virtual void skip()
{
stream << "null";
}
virtual void writeNull()
{
stream << "null";
}
virtual void writeInt(int i)
{
stream << i;
}
virtual void writeUnsignedInt(unsigned int i)
{
stream << i;
}
virtual void writeLong(long int i)
{
stream << i;
}
virtual void writeUnsignedLong(unsigned long int i)
{
stream << i;
}
virtual void writeLongLong(long long int i)
{
stream << i;
}
virtual void writeUnsignedLongLong(unsigned long long int i)
{
stream << i;
}
virtual void writeFloat(float f)
{
stream << f;
}
virtual void writeDouble(double d)
{
stream << d;
}
virtual void writeString(const std::string & s)
{
stream << '\"';
ML::jsonEscape(s, stream);
stream << '\"';
}
virtual void writeStringUtf8(const Utf8String & s);
virtual void writeJson(const Json::Value & val)
{
stream << boost::trim_copy(val.toString());
}
virtual void writeBool(bool b)
{
stream << (b ? "true": "false");
}
};
/*****************************************************************************/
/* STRUCTURED JSON PRINTING CONTEXT */
/*****************************************************************************/
/** JSON printing context that puts things into a structure. */
struct StructuredJsonPrintingContext
: public JsonPrintingContext {
Json::Value output;
Json::Value * current;
StructuredJsonPrintingContext()
: current(&output)
{
}
std::vector<Json::Value *> path;
virtual void startObject()
{
*current = Json::Value(Json::objectValue);
path.push_back(current);
}
virtual void startMember(const std::string & memberName)
{
current = &(*path.back())[memberName];
}
virtual void endObject()
{
path.pop_back();
}
virtual void startArray(int knownSize = -1)
{
*current = Json::Value(Json::arrayValue);
path.push_back(current);
}
virtual void newArrayElement()
{
Json::Value & b = *path.back();
current = &b[b.size()];
}
virtual void endArray()
{
path.pop_back();
}
virtual void skip()
{
*current = Json::Value();
}
virtual void writeNull()
{
*current = Json::Value();
}
virtual void writeInt(int i)
{
*current = i;
}
virtual void writeUnsignedInt(unsigned int i)
{
*current = i;
}
virtual void writeLong(long int i)
{
*current = i;
}
virtual void writeUnsignedLong(unsigned long int i)
{
*current = i;
}
virtual void writeLongLong(long long int i)
{
*current = i;
}
virtual void writeUnsignedLongLong(unsigned long long int i)
{
*current = i;
}
virtual void writeFloat(float f)
{
*current = f;
}
virtual void writeDouble(double d)
{
*current = d;
}
virtual void writeString(const std::string & s)
{
*current = s;
}
virtual void writeStringUtf8(const Utf8String & s)
{
*current = s;
}
virtual void writeJson(const Json::Value & val)
{
*current = val;
}
virtual void writeBool(bool b)
{
*current = b;
}
};
} // namespace Datacratic
| 21.444785 | 86 | 0.511086 | [
"vector"
] |
6544a186726097ff94d9496f7f46ec627ff8d7bb | 5,890 | h | C | SPQSP_IO/SP_QSP_shared/ABM_Base/SpatialCompartment.h | popellab/SPQSP_IO | eca3ea55ec2f75b0db5d58da09500ddffabc001d | [
"MIT"
] | null | null | null | SPQSP_IO/SP_QSP_shared/ABM_Base/SpatialCompartment.h | popellab/SPQSP_IO | eca3ea55ec2f75b0db5d58da09500ddffabc001d | [
"MIT"
] | null | null | null | SPQSP_IO/SP_QSP_shared/ABM_Base/SpatialCompartment.h | popellab/SPQSP_IO | eca3ea55ec2f75b0db5d58da09500ddffabc001d | [
"MIT"
] | null | null | null | #ifndef __SPATIAL_COMPARTMENT_H__
#define __SPATIAL_COMPARTMENT_H__
#include <vector>
#include <functional>
//#include "GridElement.h"
//#include "../pde/DiffuseGrid.h"
//#include "CellFactory.h"
#include "CellAgent.h"
#include "Grid3D.h"
#include "AgentGridVoxel.h"
#include "RNG.h"
#include <boost/serialization/nvp.hpp>
#include <boost/serialization/vector.hpp>
#include <boost/serialization/assume_abstract.hpp>
namespace SP_QSP_IO{
//! base class for 3D spatial compartments
class SpatialCompartment
{
//using std::vector;
public:
typedef std::vector<CellAgent *> CellVec;
typedef Grid3D<AgentGridVoxel*> AgentGrid;
protected:
typedef BaseAgent::AgentType AgentType;
typedef BaseAgent::AgentState AgentState;
public:
//! default constructor for serialization
SpatialCompartment(){};
SpatialCompartment(int x, int y, int z);
virtual ~SpatialCompartment();
//! Initialize compartment
virtual void initCompartment(std::string);
//! pure virtual
virtual void timeSlice(unsigned long slice)= 0;
//! default header for extra remark column when writing cell grid to file
virtual std::string getExtraRemarkHeader() const { return ""; };
//void printCellVectorSequence() const;
//! write information of all cells on grid to a snapshot file.
std::string compartment_cells_to_string(void) const;
//! pure virtual
virtual std::string printGridToFile() const = 0;
//! pure virtual
virtual void printCellOdeToFile(unsigned long slice) const = 0;
//! pure virtual
//virtual void printGridToScreen(unsigned long slice) const = 0;
//! iterate over entire grid
//void for_each_voxel(bool, bool, bool, void(*)(Coord3D&));
void for_each_grid_coord(bool, bool, bool, std::function<void(Coord3D&)>);
//! get total number of cells.
int getNrCell()const { return _vecAgent.size(); };
int getGridSize()const { return _agGrid.getSize(); };
std::string getGridContent()const;
//! examine neighboring voxels
bool check_neighbor(const CoordVec& env, const Coord& current,
std::function<bool(const int, const Coord&)>) const;
//! iterate through a list of coordinates and perform operation
bool for_each_coord(const CoordVec& env, const Coord& current,
std::function<bool(const int, const Coord&)>)const;
// iterate through a list of coordinates and perform operation on agents in these voxels
bool for_each_neighbor_ag(const CoordVec& env, const Coord& current,
std::function<bool(BaseAgent*)>)const;
//! examine neibhorhood: all voxels in shape
bool check_neighbor_shape(const ShapeVec&, const CoordVec&,
const Coord&, std::function<bool(const ShapeCoords&)>) const;
//! Scan Moore neighborhood for cell of given type and state.
bool hasTypeStateInTarget(const CoordVec& target, const Coord& c, AgentType t, AgentState s) const;
//! find all qualifying coordinates neighborhood
bool get_qualifying_voxels(const CoordVec&, const Coord&,
std::vector<int>&, std::function<bool(int, const Coord&)>)const;
//! find all qualifying shape anchors in neighborhood
bool get_qualifying_shape_anchors(const ShapeVec&, const CoordVec&,
const Coord&, std::vector<int>&, std::function<bool(const ShapeCoords&)>)const;
//! get all Open neighbor voxels
bool getOpenVoxels(const ShapeVec& targets, const CoordVec& anchors,
const Coord & c, AgentType type, std::vector<int>& candidates)const;
//! get one from the open voxels
bool getOneOpenVoxel(const ShapeVec& targets, const CoordVec& anchors,
const Coord & c, AgentType type, int & idxFound, RNG& rng)const;
//! check if any qualifying agent exist in neighborhood
bool check_neighbor_agents(const CoordVec&, const Coord&,
std::function<bool(const BaseAgent*)>)const;
//! get pointers to qualifying agents
bool get_qualifying_neighbor_agents(const CoordVec&, const Coord&,
std::vector<BaseAgent*>&, std::function<bool(const BaseAgent*)>)const;
//! get one random qualifying agent
bool get_random_qualifying_neighbor_agent(const CoordVec&, const Coord&,
BaseAgent *&, RNG&, std::function<bool(const BaseAgent*)>)const;
protected:
//! Add a cell to spatial compartment
void addAgentToGrid(const Coord & c, BaseAgent*);
//! remove a cell from spatial compartment
void removeAgentFromGrid(const Coord& c, BaseAgent*);
//! Add a cell to spatial compartment
//void addCellToGrid(Coord & c, CellAgent*);
//! remove a cell from spatial compartment
//void removeCellFromGrid(CellAgent*);
//! initiate agent grid
virtual bool initAgentGrid() = 0;
//! attempt recruit one single cell;
bool recruitOneCellInMooreNeighborhood(const CellAgent * dummy, const Coord & crd, RNG& rng);
//! size in x direction
int _sizeX;
//! size in y direction
int _sizeY;
//! size in z direction
int _sizeZ;
//! element grid
/*!ElementGrid _eGrid; Type of agent mapped to this layer,
for more efficient scanning and type searching*/
//! vector of cellular agents
CellVec _vecAgent;
//! grid of pointers to cellular agents.
//! AgentGrid
AgentGrid _agGrid;
private:
friend class boost::serialization::access;
//! boost serialization
template<class Archive>
void serialize(Archive & ar, const unsigned int /*version*/);
//! pure virtual: setup compartment environment
virtual void initEnvironment()=0;
//! pure virtual: setup initial cells
virtual void initCell(std::string filename)=0;
};
BOOST_SERIALIZATION_ASSUME_ABSTRACT(SpatialCompartment)
template<class Archive>
inline void SpatialCompartment::serialize(Archive & ar, const unsigned int version){
// _cGrid and _vecAgent contains pointers of base class which point to derived class instances.
// register all pointer to all cell types so that items can be serialized correctly
ar & BOOST_SERIALIZATION_NVP(_sizeX);
ar & BOOST_SERIALIZATION_NVP(_sizeY);
ar & BOOST_SERIALIZATION_NVP(_sizeZ);
ar & BOOST_SERIALIZATION_NVP(_vecAgent);
ar & BOOST_SERIALIZATION_NVP(_agGrid);
}
};
#endif
| 33.657143 | 100 | 0.753311 | [
"shape",
"vector",
"3d"
] |
65453aec1ae7288cbdd127226bf1f0bc57bfd758 | 3,945 | h | C | minetest-ego/src/hud.h | medialab-prado/Interactivos-15-Ego | 4c7e184b9495d07fc1008573ef1b752d20ebc1ab | [
"MIT"
] | null | null | null | minetest-ego/src/hud.h | medialab-prado/Interactivos-15-Ego | 4c7e184b9495d07fc1008573ef1b752d20ebc1ab | [
"MIT"
] | null | null | null | minetest-ego/src/hud.h | medialab-prado/Interactivos-15-Ego | 4c7e184b9495d07fc1008573ef1b752d20ebc1ab | [
"MIT"
] | null | null | null | /*
Minetest
Copyright (C) 2010-2013 kwolekr, Ryan Kwolek <kwolekr@minetest.net>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU Lesser General Public License as published by
the Free Software Foundation; either version 2.1 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public License along
with this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
*/
#ifndef HUD_HEADER
#define HUD_HEADER
#include "irrlichttypes_extrabloated.h"
#include <string>
#define HUD_DIR_LEFT_RIGHT 0
#define HUD_DIR_RIGHT_LEFT 1
#define HUD_DIR_TOP_BOTTOM 2
#define HUD_DIR_BOTTOM_TOP 3
#define HUD_CORNER_UPPER 0
#define HUD_CORNER_LOWER 1
#define HUD_CORNER_CENTER 2
// Note that these visibility flags do not determine if the hud items are
// actually drawn, but rather, whether to draw the item should the rest
// of the game state permit it.
#define HUD_FLAG_HOTBAR_VISIBLE (1 << 0)
#define HUD_FLAG_HEALTHBAR_VISIBLE (1 << 1)
#define HUD_FLAG_CROSSHAIR_VISIBLE (1 << 2)
#define HUD_FLAG_WIELDITEM_VISIBLE (1 << 3)
#define HUD_FLAG_BREATHBAR_VISIBLE (1 << 4)
#define HUD_FLAG_MINIMAP_VISIBLE (1 << 5)
#define HUD_PARAM_HOTBAR_ITEMCOUNT 1
#define HUD_PARAM_HOTBAR_IMAGE 2
#define HUD_PARAM_HOTBAR_SELECTED_IMAGE 3
#define HUD_HOTBAR_ITEMCOUNT_DEFAULT 8
#define HUD_HOTBAR_ITEMCOUNT_MAX 23
#define HOTBAR_IMAGE_SIZE 48
enum HudElementType {
HUD_ELEM_IMAGE = 0,
HUD_ELEM_TEXT = 1,
HUD_ELEM_STATBAR = 2,
HUD_ELEM_INVENTORY = 3,
HUD_ELEM_WAYPOINT = 4,
};
enum HudElementStat {
HUD_STAT_POS = 0,
HUD_STAT_NAME,
HUD_STAT_SCALE,
HUD_STAT_TEXT,
HUD_STAT_NUMBER,
HUD_STAT_ITEM,
HUD_STAT_DIR,
HUD_STAT_ALIGN,
HUD_STAT_OFFSET,
HUD_STAT_WORLD_POS,
HUD_STAT_SIZE
};
struct HudElement {
HudElementType type;
v2f pos;
std::string name;
v2f scale;
std::string text;
u32 number;
u32 item;
u32 dir;
v2f align;
v2f offset;
v3f world_pos;
v2s32 size;
};
#ifndef SERVER
#include <vector>
#include <IGUIFont.h>
#include "irr_aabb3d.h"
class IGameDef;
class ITextureSource;
class Inventory;
class InventoryList;
class LocalPlayer;
struct ItemStack;
class Hud {
public:
video::IVideoDriver *driver;
scene::ISceneManager* smgr;
gui::IGUIEnvironment *guienv;
IGameDef *gamedef;
LocalPlayer *player;
Inventory *inventory;
ITextureSource *tsrc;
video::SColor crosshair_argb;
video::SColor selectionbox_argb;
bool use_crosshair_image;
std::string hotbar_image;
bool use_hotbar_image;
std::string hotbar_selected_image;
bool use_hotbar_selected_image;
v3s16 camera_offset;
Hud(video::IVideoDriver *driver,scene::ISceneManager* smgr,
gui::IGUIEnvironment* guienv, IGameDef *gamedef, LocalPlayer *player,
Inventory *inventory);
void drawHotbar(u16 playeritem);
void resizeHotbar();
void drawCrosshair();
void drawSelectionBoxes(std::vector<aabb3f> &hilightboxes);
void drawLuaElements(v3s16 camera_offset);
private:
void drawStatbar(v2s32 pos, u16 corner, u16 drawdir, std::string texture,
s32 count, v2s32 offset, v2s32 size=v2s32());
void drawItems(v2s32 upperleftpos, s32 itemcount, s32 offset,
InventoryList *mainlist, u16 selectitem, u16 direction);
void drawItem(const ItemStack &item, const core::rect<s32>& rect, bool selected);
v2u32 m_screensize;
v2s32 m_displaycenter;
s32 m_hotbar_imagesize;
s32 m_padding;
video::SColor hbar_colors[4];
};
void drawItemStack(video::IVideoDriver *driver,
gui::IGUIFont *font,
const ItemStack &item,
const core::rect<s32> &rect,
const core::rect<s32> *clip,
IGameDef *gamedef);
#endif
#endif
| 24.65625 | 82 | 0.775412 | [
"vector"
] |
65456fe216829f8333065730af8b3003f4d2e060 | 7,647 | c | C | opensatkit/cfs/apps/ephem/fsw/src/exobj.c | Sayapatri/hackasat | b0cb5ab3f16a14a3434234f745e91ac15ccee733 | [
"MIT"
] | 62 | 2020-09-01T19:47:37.000Z | 2022-03-14T03:21:53.000Z | opensatkit/cfs/apps/ephem/fsw/src/exobj.c | Sayapatri/hackasat | b0cb5ab3f16a14a3434234f745e91ac15ccee733 | [
"MIT"
] | null | null | null | opensatkit/cfs/apps/ephem/fsw/src/exobj.c | Sayapatri/hackasat | b0cb5ab3f16a14a3434234f745e91ac15ccee733 | [
"MIT"
] | 10 | 2020-09-21T22:25:34.000Z | 2022-01-17T11:47:53.000Z | /*
** Purpose: Implement an example object.
**
** Notes:
** 1. This serves as an example object that uses a table. It does not perform
** any realistic funcions.
**
** License:
** Template written by David McComas and licensed under the GNU
** Lesser General Public License (LGPL).
**
** References:
** 1. OpenSatKit Object-based Application Developers Guide.
** 2. cFS Application Developer's Guide.
*/
/*
** Include Files:
*/
#include <string.h>
#include <stdio.h>
#include <stdlib.h>
#include <ctype.h>
#include <math.h>
#include "app_cfg.h"
#include "exobj.h"
#include "TLE.h"
#include "timekit.h"
/*
** Global File Data
*/
static EXOBJ_Class* ExObj = NULL;
/*
** Local Function Prototypes
*/
/******************************************************************************
** Function: EXOBJ_Constructor
**
*/
void EXOBJ_Constructor(EXOBJ_Class* ExObjPtr)
{
ExObj = ExObjPtr;
CFE_PSP_MemSet((void*)ExObj, 0, sizeof(EXOBJ_Class));
ExObj->DT = EPHEM_RUNLOOP_DELAY/1000; //seconds
EXOBJ_Init();
} /* End EXOBJ_Constructor() */
/******************************************************************************
** Function: EXOBJ_Init
**
** Open TLE and get initial state
**
*/
void EXOBJ_Init(void) {
// FILE *fp;
int32 Fd = -1;
char line1[80];
char line2[80];
char file[256];
int readPtr = 0;
int readSize = 1;
memset(line1,'\0',sizeof(line1));
memset(line2,'\0',sizeof(line2));
memset(file,'\0',sizeof(file));
if (AppFw_VerifyFileForRead(TLE_FILE_PATH)) {
Fd = OS_open(TLE_FILE_PATH, OS_READ_ONLY, 0);
}
if (Fd >= 0) {
while(readSize > 0) {
readSize = OS_read(Fd,&file[readPtr],1);
readPtr += readSize;
}
if (file[0] == '1') {
strncpy(line1,file,69);
}
if (file[71] == '2'){
strncpy(line2,file+71,69);
}
if (tle_checksum(line1,line2)) {
CFE_EVS_SendEvent(EXOBJ_INFO_EID, CFE_EVS_INFORMATION, "Valid TLE file. Loading initial state");
calculateEpochAbsoluteTime(line1);
parseLines(&(ExObj->tle),line1,line2);
getRV(&(ExObj->tle),ExObj->DT,ExObj->Pos,ExObj->Vel);
}
else {
CFE_EVS_SendEvent(EXOBJ_ERROR_EID, CFE_EVS_ERROR, "Invalid TLE file %s", TLE_FILE_PATH);
}
OS_close(Fd);
} else
{
CFE_EVS_SendEvent(EXOBJ_FILE_OPEN_EID, CFE_EVS_ERROR, "File open error for %s",
TLE_FILE_PATH);
}
}
/******************************************************************************
** Function: EXOBJ_Execute
**
** Execute main object function.
**
*/
void EXOBJ_Execute(void)
{
EXOBJ_PropogateOrbit();
ExObj->ExecCnt++;
} /* EXOBJ_Execute() */
/******************************************************************************
** Function: EXOBJ_PropogateOrbit
**
** Propogate orbit based on delta from initial orbit epooch in mins
**
*/
void EXOBJ_PropogateOrbit(void) {
long doy,Year,Month,Day,Hour,Minute;
double Second;
double minutesAfterEpoch = 0.0;
memset(ExObj->TimeString,'\0',sizeof(ExObj->TimeString));
//Using run loop delay as DT now. Would be better if this was based on the cFS clock
ExObj->AbsoluteTime += ExObj->DT; //seconds since J2000 Epoch from TLE
minutesAfterEpoch = (ExObj->AbsoluteTime - ExObj->AbsoluteTimeEpoch)/60;
AbsTimeToDate(ExObj->AbsoluteTime,&Year,&Month,&Day,&Hour,&Minute,&Second,ExObj->DT);
doy = MD2DOY(Year,Month,Day);
//YYYY-DDD-HH:MM:SS.SSS
// sprintf(s,"%04li-%03li-%02li:%02li:%05.3f",Year,doy,Hour,Minute,Second);
//YYYY-MM-DD-HH:MM:SS.SSS
sprintf(ExObj->TimeString,"%04li-%02li-%02li-%02li:%02li:%05.3f",Year,Month,Day,Hour,Minute,Second);
//OS_printf("Time since epoch: %f\n", ExObj->AbsoluteTime - ExObj->AbsoluteTimeEpoch);
getRV(&(ExObj->tle),minutesAfterEpoch,ExObj->Pos,ExObj->Vel);
}
/******************************************************************************
** Function: tle_checksum
**
** validates tle checksum
**
*/
bool tle_checksum(const char* line1, const char* line2)
{
char char_a, char_b;
int i, check_a, check_b;
if(strlen(line1) != 69)
{
return false;
}
if(strlen(line2) != 69)
{
return false;
}
check_a = 0;
check_b = 0;
for(i=0; i<68; i++)
{
char_a = line1[i];
if(isdigit(char_a))
{
check_a += char_a - '0';
}
else if(char_a == '-')
{
check_a += 1;
}
char_b = line2[i];
if(isdigit(char_b))
{
check_b += char_b - '0';
}
else if(char_b == '-')
{
check_b += 1;
}
}
if((check_a % 10) != (line1[68] - '0'))
{
return false;
}
if((check_b % 10) != (line2[68] - '0'))
{
return false;
}
return true;
}
/******************************************************************************
** Function: calculateEpochAbsoluteTime
**
** Converts epoch time from TLE to seconds since J2000 Epoch
**
*/
void calculateEpochAbsoluteTime(const char* line1) {
char YearString[3];
char DOYstring[13];
long year,DOY,Month,Day;
double FloatDOY,FracDay,JDepoch;
double Epoch;
strncpy(YearString,&line1[18],2);
year = (long) atoi(YearString);
if (year < 57) year += 2000;
else year += 1900;
strncpy(DOYstring,&line1[20],12);
FloatDOY = (double) atof(DOYstring);
DOY = (long) FloatDOY;
FracDay = FloatDOY - ((double) DOY);
DOY2MD(year,DOY,&Month,&Day);
JDepoch = YMDHMS2JD(year,Month,Day,0,0,0.0);
JDepoch += FracDay;
Epoch = JDToAbsTime(JDepoch);
ExObj->AbsoluteTimeEpoch = Epoch;
ExObj->AbsoluteTime = Epoch;
}
/******************************************************************************
** Function: EXOBJ_ResetStatus
**
*/
void EXOBJ_ResetStatus(void)
{
ExObj->ExecCnt = 0;
} /* End EXOBJ_ResetStatus() */
/******************************************************************************
** Function: EXOBJ_GetTblPtr
**
*/
const EXTBL_Struct* EXOBJ_GetTblPtr(void)
{
return &(ExObj->Tbl);
} /* End EXOBJ_GetTblPtr() */
/******************************************************************************
** Function: EXOBJ_LoadTbl
**
*/
boolean EXOBJ_LoadTbl(EXTBL_Struct* NewTbl)
{
boolean RetStatus = TRUE;
CFE_EVS_SendEvent (EXOBJ_DEMO_DEBUG_EID, CFE_EVS_DEBUG,"EXOBJ_LoadTbl() Entered");
/*
** This is a simple table copy. More complex table loads may have pass/fail
** criteria.
*/
CFE_PSP_MemCpy(&(ExObj->Tbl), NewTbl, sizeof(EXTBL_Struct));
return RetStatus;
} /* End EXOBJ_LoadTbl() */
/******************************************************************************
** Function: EXOBJ_LoadTblEntry
**
*/
boolean EXOBJ_LoadTblEntry(uint16 EntryId, EXTBL_Entry* NewEntry)
{
boolean RetStatus = TRUE;
/*
** This is a simple table entry copy. More complex table load may have
** pass/fail criteria.
*/
CFE_PSP_MemCpy(&(ExObj->Tbl.Entry[EntryId]),NewEntry,sizeof(EXTBL_Entry));
return RetStatus;
} /* End EXOBJ_LoadTblEntry() */
/******************************************************************************
** Function: EXOBJ_DemoCmd
**
** Send an event message showing that the example object's command is executed.
**
*/
boolean EXOBJ_DemoCmd(void* DataObjPtr, const CFE_SB_MsgPtr_t MsgPtr)
{
const EXOBJ_DemoCmdMsg *CmdMsg = (const EXOBJ_DemoCmdMsg *) MsgPtr;
CFE_EVS_SendEvent (EXOBJ_DEMO_CMD_INFO_EID,
CFE_EVS_INFORMATION,
"Example demo command received with parameter %d",
CmdMsg->Parameter);
return TRUE;
} /* End EXOBJ_EnableDataLoadCmd() */
/* end of file */
| 22.294461 | 106 | 0.560612 | [
"object"
] |
6548e944aacf9994da0db9e781cc2eb19ec81753 | 14,910 | c | C | servers/slapd/back-wt/idl.c | thespooler/openldap | f5100665e34b18817e545063b7ca49f79a15b6d6 | [
"OLDAP-2.8"
] | null | null | null | servers/slapd/back-wt/idl.c | thespooler/openldap | f5100665e34b18817e545063b7ca49f79a15b6d6 | [
"OLDAP-2.8"
] | null | null | null | servers/slapd/back-wt/idl.c | thespooler/openldap | f5100665e34b18817e545063b7ca49f79a15b6d6 | [
"OLDAP-2.8"
] | null | null | null | /* OpenLDAP WiredTiger backend */
/* $OpenLDAP$ */
/* This work is part of OpenLDAP Software <http://www.openldap.org/>.
*
* Copyright 2002-2015 The OpenLDAP Foundation.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted only as authorized by the OpenLDAP
* Public License.
*
* A copy of this license is available in the file LICENSE in the
* top-level directory of the distribution or, alternatively, at
* <http://www.OpenLDAP.org/license.html>.
*/
/* ACKNOWLEDGEMENTS:
* This work was developed by HAMANO Tsukasa <hamano@osstech.co.jp>
* based on back-bdb for inclusion in OpenLDAP Software.
* WiredTiger is a product of MongoDB Inc.
*/
#include "portable.h"
#include <stdio.h>
#include <ac/string.h>
#include "back-wt.h"
#include "idl.h"
#define IDL_MAX(x,y) ( (x) > (y) ? (x) : (y) )
#define IDL_MIN(x,y) ( (x) < (y) ? (x) : (y) )
#define IDL_CMP(x,y) ( (x) < (y) ? -1 : (x) > (y) )
#if IDL_DEBUG > 0
static void idl_check( ID *ids )
{
if( WT_IDL_IS_RANGE( ids ) ) {
assert( WT_IDL_RANGE_FIRST(ids) <= WT_IDL_RANGE_LAST(ids) );
} else {
ID i;
for( i=1; i < ids[0]; i++ ) {
assert( ids[i+1] > ids[i] );
}
}
}
#if IDL_DEBUG > 1
static void idl_dump( ID *ids )
{
if( WT_IDL_IS_RANGE( ids ) ) {
Debug( LDAP_DEBUG_ANY,
"IDL: range ( %ld - %ld )\n",
(long) WT_IDL_RANGE_FIRST( ids ),
(long) WT_IDL_RANGE_LAST( ids ),
0);
} else {
ID i;
Debug( LDAP_DEBUG_ANY, "IDL: size %ld", (long) ids[0], 0, 0 );
for( i=1; i<=ids[0]; i++ ) {
if( i % 16 == 1 ) {
Debug( LDAP_DEBUG_ANY, "\n", 0, 0, 0 );
}
Debug( LDAP_DEBUG_ANY, " %02lx", (long) ids[i], 0, 0 );
}
Debug( LDAP_DEBUG_ANY, "\n", 0, 0, 0 );
}
idl_check( ids );
}
#endif /* IDL_DEBUG > 1 */
#endif /* IDL_DEBUG > 0 */
unsigned wt_idl_search( ID *ids, ID id )
{
#define IDL_BINARY_SEARCH 1
#ifdef IDL_BINARY_SEARCH
/*
* binary search of id in ids
* if found, returns position of id
* if not found, returns first postion greater than id
*/
unsigned base = 0;
unsigned cursor = 1;
int val = 0;
unsigned n = ids[0];
#if IDL_DEBUG > 0
idl_check( ids );
#endif
while( 0 < n ) {
unsigned pivot = n >> 1;
cursor = base + pivot + 1;
val = IDL_CMP( id, ids[cursor] );
if( val < 0 ) {
n = pivot;
} else if ( val > 0 ) {
base = cursor;
n -= pivot + 1;
} else {
return cursor;
}
}
if( val > 0 ) {
++cursor;
}
return cursor;
#else
/* (reverse) linear search */
int i;
#if IDL_DEBUG > 0
idl_check( ids );
#endif
for( i=ids[0]; i; i-- ) {
if( id > ids[i] ) {
break;
}
}
return i+1;
#endif
}
int wt_idl_insert( ID *ids, ID id )
{
unsigned x;
#if IDL_DEBUG > 1
Debug( LDAP_DEBUG_ANY, "insert: %04lx at %d\n", (long) id, x, 0 );
idl_dump( ids );
#elif IDL_DEBUG > 0
idl_check( ids );
#endif
if (WT_IDL_IS_RANGE( ids )) {
/* if already in range, treat as a dup */
if (id >= WT_IDL_RANGE_FIRST(ids) && id <= WT_IDL_RANGE_LAST(ids))
return -1;
if (id < WT_IDL_RANGE_FIRST(ids))
ids[1] = id;
else if (id > WT_IDL_RANGE_LAST(ids))
ids[2] = id;
return 0;
}
x = wt_idl_search( ids, id );
assert( x > 0 );
if( x < 1 ) {
/* internal error */
return -2;
}
if ( x <= ids[0] && ids[x] == id ) {
/* duplicate */
return -1;
}
if ( ++ids[0] >= WT_IDL_DB_MAX ) {
if( id < ids[1] ) {
ids[1] = id;
ids[2] = ids[ids[0]-1];
} else if ( ids[ids[0]-1] < id ) {
ids[2] = id;
} else {
ids[2] = ids[ids[0]-1];
}
ids[0] = NOID;
} else {
/* insert id */
AC_MEMCPY( &ids[x+1], &ids[x], (ids[0]-x) * sizeof(ID) );
ids[x] = id;
}
#if IDL_DEBUG > 1
idl_dump( ids );
#elif IDL_DEBUG > 0
idl_check( ids );
#endif
return 0;
}
static int wt_idl_delete( ID *ids, ID id )
{
unsigned x;
#if IDL_DEBUG > 1
Debug( LDAP_DEBUG_ANY, "delete: %04lx at %d\n", (long) id, x, 0 );
idl_dump( ids );
#elif IDL_DEBUG > 0
idl_check( ids );
#endif
if (WT_IDL_IS_RANGE( ids )) {
/* If deleting a range boundary, adjust */
if ( ids[1] == id )
ids[1]++;
else if ( ids[2] == id )
ids[2]--;
/* deleting from inside a range is a no-op */
/* If the range has collapsed, re-adjust */
if ( ids[1] > ids[2] )
ids[0] = 0;
else if ( ids[1] == ids[2] )
ids[1] = 1;
return 0;
}
x = wt_idl_search( ids, id );
assert( x > 0 );
if( x <= 0 ) {
/* internal error */
return -2;
}
if( x > ids[0] || ids[x] != id ) {
/* not found */
return -1;
} else if ( --ids[0] == 0 ) {
if( x != 1 ) {
return -3;
}
} else {
AC_MEMCPY( &ids[x], &ids[x+1], (1+ids[0]-x) * sizeof(ID) );
}
#if IDL_DEBUG > 1
idl_dump( ids );
#elif IDL_DEBUG > 0
idl_check( ids );
#endif
return 0;
}
static char *
wt_show_key(
char *buf,
void *val,
size_t len )
{
if ( len == 4 /* LUTIL_HASH_BYTES */ ) {
unsigned char *c = val;
sprintf( buf, "[%02x%02x%02x%02x]", c[0], c[1], c[2], c[3] );
return buf;
} else {
return val;
}
}
/*
* idl_intersection - return a = a intersection b
*/
int
wt_idl_intersection(
ID *a,
ID *b )
{
ID ida, idb;
ID idmax, idmin;
ID cursora = 0, cursorb = 0, cursorc;
int swap = 0;
if ( WT_IDL_IS_ZERO( a ) || WT_IDL_IS_ZERO( b ) ) {
a[0] = 0;
return 0;
}
idmin = IDL_MAX( WT_IDL_FIRST(a), WT_IDL_FIRST(b) );
idmax = IDL_MIN( WT_IDL_LAST(a), WT_IDL_LAST(b) );
if ( idmin > idmax ) {
a[0] = 0;
return 0;
} else if ( idmin == idmax ) {
a[0] = 1;
a[1] = idmin;
return 0;
}
if ( WT_IDL_IS_RANGE( a ) ) {
if ( WT_IDL_IS_RANGE(b) ) {
/* If both are ranges, just shrink the boundaries */
a[1] = idmin;
a[2] = idmax;
return 0;
} else {
/* Else swap so that b is the range, a is a list */
ID *tmp = a;
a = b;
b = tmp;
swap = 1;
}
}
/* If a range completely covers the list, the result is
* just the list. If idmin to idmax is contiguous, just
* turn it into a range.
*/
if ( WT_IDL_IS_RANGE( b )
&& WT_IDL_RANGE_FIRST( b ) <= WT_IDL_FIRST( a )
&& WT_IDL_RANGE_LAST( b ) >= WT_IDL_LLAST( a ) ) {
if (idmax - idmin + 1 == a[0])
{
a[0] = NOID;
a[1] = idmin;
a[2] = idmax;
}
goto done;
}
/* Fine, do the intersection one element at a time.
* First advance to idmin in both IDLs.
*/
cursora = cursorb = idmin;
ida = wt_idl_first( a, &cursora );
idb = wt_idl_first( b, &cursorb );
cursorc = 0;
while( ida <= idmax || idb <= idmax ) {
if( ida == idb ) {
a[++cursorc] = ida;
ida = wt_idl_next( a, &cursora );
idb = wt_idl_next( b, &cursorb );
} else if ( ida < idb ) {
ida = wt_idl_next( a, &cursora );
} else {
idb = wt_idl_next( b, &cursorb );
}
}
a[0] = cursorc;
done:
if (swap)
WT_IDL_CPY( b, a );
return 0;
}
/*
* idl_union - return a = a union b
*/
int
wt_idl_union(
ID *a,
ID *b )
{
ID ida, idb;
ID cursora = 0, cursorb = 0, cursorc;
if ( WT_IDL_IS_ZERO( b ) ) {
return 0;
}
if ( WT_IDL_IS_ZERO( a ) ) {
WT_IDL_CPY( a, b );
return 0;
}
if ( WT_IDL_IS_RANGE( a ) || WT_IDL_IS_RANGE(b) ) {
over: ida = IDL_MIN( WT_IDL_FIRST(a), WT_IDL_FIRST(b) );
idb = IDL_MAX( WT_IDL_LAST(a), WT_IDL_LAST(b) );
a[0] = NOID;
a[1] = ida;
a[2] = idb;
return 0;
}
ida = wt_idl_first( a, &cursora );
idb = wt_idl_first( b, &cursorb );
cursorc = b[0];
/* The distinct elements of a are cat'd to b */
while( ida != NOID || idb != NOID ) {
if ( ida < idb ) {
if( ++cursorc > WT_IDL_UM_MAX ) {
goto over;
}
b[cursorc] = ida;
ida = wt_idl_next( a, &cursora );
} else {
if ( ida == idb )
ida = wt_idl_next( a, &cursora );
idb = wt_idl_next( b, &cursorb );
}
}
/* b is copied back to a in sorted order */
a[0] = cursorc;
cursora = 1;
cursorb = 1;
cursorc = b[0]+1;
while (cursorb <= b[0] || cursorc <= a[0]) {
if (cursorc > a[0])
idb = NOID;
else
idb = b[cursorc];
if (cursorb <= b[0] && b[cursorb] < idb)
a[cursora++] = b[cursorb++];
else {
a[cursora++] = idb;
cursorc++;
}
}
return 0;
}
#if 0
/*
* wt_idl_notin - return a intersection ~b (or a minus b)
*/
int
wt_idl_notin(
ID *a,
ID *b,
ID *ids )
{
ID ida, idb;
ID cursora = 0, cursorb = 0;
if( WT_IDL_IS_ZERO( a ) ||
WT_IDL_IS_ZERO( b ) ||
WT_IDL_IS_RANGE( b ) )
{
WT_IDL_CPY( ids, a );
return 0;
}
if( WT_IDL_IS_RANGE( a ) ) {
WT_IDL_CPY( ids, a );
return 0;
}
ida = wt_idl_first( a, &cursora ),
idb = wt_idl_first( b, &cursorb );
ids[0] = 0;
while( ida != NOID ) {
if ( idb == NOID ) {
/* we could shortcut this */
ids[++ids[0]] = ida;
ida = wt_idl_next( a, &cursora );
} else if ( ida < idb ) {
ids[++ids[0]] = ida;
ida = wt_idl_next( a, &cursora );
} else if ( ida > idb ) {
idb = wt_idl_next( b, &cursorb );
} else {
ida = wt_idl_next( a, &cursora );
idb = wt_idl_next( b, &cursorb );
}
}
return 0;
}
#endif
ID wt_idl_first( ID *ids, ID *cursor )
{
ID pos;
if ( ids[0] == 0 ) {
*cursor = NOID;
return NOID;
}
if ( WT_IDL_IS_RANGE( ids ) ) {
if( *cursor < ids[1] ) {
*cursor = ids[1];
}
return *cursor;
}
if ( *cursor == 0 )
pos = 1;
else
pos = wt_idl_search( ids, *cursor );
if( pos > ids[0] ) {
return NOID;
}
*cursor = pos;
return ids[pos];
}
ID wt_idl_next( ID *ids, ID *cursor )
{
if ( WT_IDL_IS_RANGE( ids ) ) {
if( ids[2] < ++(*cursor) ) {
return NOID;
}
return *cursor;
}
if ( ++(*cursor) <= ids[0] ) {
return ids[*cursor];
}
return NOID;
}
/* Add one ID to an unsorted list. We ensure that the first element is the
* minimum and the last element is the maximum, for fast range compaction.
* this means IDLs up to length 3 are always sorted...
*/
int wt_idl_append_one( ID *ids, ID id )
{
if (WT_IDL_IS_RANGE( ids )) {
/* if already in range, treat as a dup */
if (id >= WT_IDL_RANGE_FIRST(ids) && id <= WT_IDL_RANGE_LAST(ids))
return -1;
if (id < WT_IDL_RANGE_FIRST(ids))
ids[1] = id;
else if (id > WT_IDL_RANGE_LAST(ids))
ids[2] = id;
return 0;
}
if ( ids[0] ) {
ID tmp;
if (id < ids[1]) {
tmp = ids[1];
ids[1] = id;
id = tmp;
}
if ( ids[0] > 1 && id < ids[ids[0]] ) {
tmp = ids[ids[0]];
ids[ids[0]] = id;
id = tmp;
}
}
ids[0]++;
if ( ids[0] >= WT_IDL_UM_MAX ) {
ids[0] = NOID;
ids[2] = id;
} else {
ids[ids[0]] = id;
}
return 0;
}
/* Append sorted list b to sorted list a. The result is unsorted but
* a[1] is the min of the result and a[a[0]] is the max.
*/
int wt_idl_append( ID *a, ID *b )
{
ID ida, idb, tmp, swap = 0;
if ( WT_IDL_IS_ZERO( b ) ) {
return 0;
}
if ( WT_IDL_IS_ZERO( a ) ) {
WT_IDL_CPY( a, b );
return 0;
}
ida = WT_IDL_LAST( a );
idb = WT_IDL_LAST( b );
if ( WT_IDL_IS_RANGE( a ) || WT_IDL_IS_RANGE(b) ||
a[0] + b[0] >= WT_IDL_UM_MAX ) {
a[2] = IDL_MAX( ida, idb );
a[1] = IDL_MIN( a[1], b[1] );
a[0] = NOID;
return 0;
}
if ( b[0] > 1 && ida > idb ) {
swap = idb;
a[a[0]] = idb;
b[b[0]] = ida;
}
if ( b[1] < a[1] ) {
tmp = a[1];
a[1] = b[1];
} else {
tmp = b[1];
}
a[0]++;
a[a[0]] = tmp;
if ( b[0] > 1 ) {
int i = b[0] - 1;
AC_MEMCPY(a+a[0]+1, b+2, i * sizeof(ID));
a[0] += i;
}
if ( swap ) {
b[b[0]] = swap;
}
return 0;
}
#if 1
/* Quicksort + Insertion sort for small arrays */
#define SMALL 8
#define SWAP(a,b) itmp=(a);(a)=(b);(b)=itmp
void
wt_idl_sort( ID *ids, ID *tmp )
{
int *istack = (int *)tmp; /* Private stack, not used by caller */
int i,j,k,l,ir,jstack;
ID a, itmp;
if ( WT_IDL_IS_RANGE( ids ))
return;
ir = ids[0];
l = 1;
jstack = 0;
for(;;) {
if (ir - l < SMALL) { /* Insertion sort */
for (j=l+1;j<=ir;j++) {
a = ids[j];
for (i=j-1;i>=1;i--) {
if (ids[i] <= a) break;
ids[i+1] = ids[i];
}
ids[i+1] = a;
}
if (jstack == 0) break;
ir = istack[jstack--];
l = istack[jstack--];
} else {
k = (l + ir) >> 1; /* Choose median of left, center, right */
SWAP(ids[k], ids[l+1]);
if (ids[l] > ids[ir]) {
SWAP(ids[l], ids[ir]);
}
if (ids[l+1] > ids[ir]) {
SWAP(ids[l+1], ids[ir]);
}
if (ids[l] > ids[l+1]) {
SWAP(ids[l], ids[l+1]);
}
i = l+1;
j = ir;
a = ids[l+1];
for(;;) {
do i++; while(ids[i] < a);
do j--; while(ids[j] > a);
if (j < i) break;
SWAP(ids[i],ids[j]);
}
ids[l+1] = ids[j];
ids[j] = a;
jstack += 2;
if (ir-i+1 >= j-l) {
istack[jstack] = ir;
istack[jstack-1] = i;
ir = j-1;
} else {
istack[jstack] = j-1;
istack[jstack-1] = l;
l = i;
}
}
}
}
#else
/* 8 bit Radix sort + insertion sort
*
* based on code from http://www.cubic.org/docs/radix.htm
* with improvements by ebackes@symas.com and hyc@symas.com
*
* This code is O(n) but has a relatively high constant factor. For lists
* up to ~50 Quicksort is slightly faster; up to ~100 they are even.
* Much faster than quicksort for lists longer than ~100. Insertion
* sort is actually superior for lists <50.
*/
#define BUCKETS (1<<8)
#define SMALL 50
void
wt_idl_sort( ID *ids, ID *tmp )
{
int count, soft_limit, phase = 0, size = ids[0];
ID *idls[2];
unsigned char *maxv = (unsigned char *)&ids[size];
if ( WT_IDL_IS_RANGE( ids ))
return;
/* Use insertion sort for small lists */
if ( size <= SMALL ) {
int i,j;
ID a;
for (j=1;j<=size;j++) {
a = ids[j];
for (i=j-1;i>=1;i--) {
if (ids[i] <= a) break;
ids[i+1] = ids[i];
}
ids[i+1] = a;
}
return;
}
tmp[0] = size;
idls[0] = ids;
idls[1] = tmp;
#if BYTE_ORDER == BIG_ENDIAN
for (soft_limit = 0; !maxv[soft_limit]; soft_limit++);
#else
for (soft_limit = sizeof(ID)-1; !maxv[soft_limit]; soft_limit--);
#endif
for (
#if BYTE_ORDER == BIG_ENDIAN
count = sizeof(ID)-1; count >= soft_limit; --count
#else
count = 0; count <= soft_limit; ++count
#endif
) {
unsigned int num[BUCKETS], * np, n, sum;
int i;
ID *sp, *source, *dest;
unsigned char *bp, *source_start;
source = idls[phase]+1;
dest = idls[phase^1]+1;
source_start = ((unsigned char *) source) + count;
np = num;
for ( i = BUCKETS; i > 0; --i ) *np++ = 0;
/* count occurences of every byte value */
bp = source_start;
for ( i = size; i > 0; --i, bp += sizeof(ID) )
num[*bp]++;
/* transform count into index by summing elements and storing
* into same array
*/
sum = 0;
np = num;
for ( i = BUCKETS; i > 0; --i ) {
n = *np;
*np++ = sum;
sum += n;
}
/* fill dest with the right values in the right place */
bp = source_start;
sp = source;
for ( i = size; i > 0; --i, bp += sizeof(ID) ) {
np = num + *bp;
dest[*np] = *sp++;
++(*np);
}
phase ^= 1;
}
/* copy back from temp if needed */
if ( phase ) {
ids++; tmp++;
for ( count = 0; count < size; ++count )
*ids++ = *tmp++;
}
}
#endif /* Quick vs Radix */
| 18.754717 | 74 | 0.543729 | [
"transform"
] |
654989f9e7991338a1c57d112b4581b77c3cffd2 | 826 | h | C | include/audio.h | ccookf/simple-engine | 3e49ead960c45fef8a48602f67e6455ffdf37156 | [
"MIT"
] | null | null | null | include/audio.h | ccookf/simple-engine | 3e49ead960c45fef8a48602f67e6455ffdf37156 | [
"MIT"
] | null | null | null | include/audio.h | ccookf/simple-engine | 3e49ead960c45fef8a48602f67e6455ffdf37156 | [
"MIT"
] | null | null | null | #ifndef AUDIO_H
#define AUDIO_H
#include <SFML/Audio.hpp>
#include <list>
#include <map>
#include <string>
#include <vector>
//These could be set higher, but a new sound system should be setup if
//sound channels are actually a constraint
#define MAX_AUDIO_CHANNELS 50
#define MAX_CONCURRENT_SFX 100
class Audio
{
public:
sf::Music bgm;
static Audio* instance();
sf::Sound* getSfxChannel(std::string filename);
void releaseSfxChannel(sf::Sound* channel);
void loadSfx(std::string filename);
void playSfx(std::string filename, float pitch = 1.0, float volume = 50.0f);
void unloadSounds();
void update();
private:
Audio() { }
~Audio() { unloadSounds(); }
static Audio* _instance;
std::map<std::string, sf::SoundBuffer*> loadedSfx;
std::vector<sf::Sound*> channels;
std::list<sf::Sound> sounds;
};
#endif
| 20.65 | 77 | 0.720339 | [
"vector"
] |
654efa50291bda939a4a425d3dc35fd861dcc131 | 1,188 | h | C | Classes/DKURLCache.h | jaylib/JMURLClient | f8142ecb01e45c64e0dfb3c460de68edc07c60e9 | [
"MIT"
] | null | null | null | Classes/DKURLCache.h | jaylib/JMURLClient | f8142ecb01e45c64e0dfb3c460de68edc07c60e9 | [
"MIT"
] | null | null | null | Classes/DKURLCache.h | jaylib/JMURLClient | f8142ecb01e45c64e0dfb3c460de68edc07c60e9 | [
"MIT"
] | null | null | null | //
// DKFileStorage.h
// Dropkick Networking
//
// Created by Josef Materi on 01.02.14.
// Copyright (c) 2014 Josef Materi. All rights reserved.
//
#import <Foundation/Foundation.h>
@interface DKURLCache : NSObject
/**
* Returns a shared Cache object
*
* @return DKURLCache
*/
+ (instancetype) sharedCache;
/**
* Checks the existance of a cache for a given NSURLRequest
*
* @param request NSURLRequest
*
* @return BOOL
*/
- (BOOL)hasCacheForURLRequest:(NSURLRequest *)request;
/**
* Creates a cache from file in a directory stored in the main bundle
*
* @param directory NSString *directory
*/
- (void)deployCacheFromDirectory:(NSString *)directory;
/**
* Caches the response from all NSURLRequests inside the provided array and returns the paths of the cached responses in a completion block.
*
* @param urlRequests NSArray *array (Array of NSURLRequests)
* @param complete (^)(NSArray *paths) Array of paths for cached responses
* @param failed NSError *error, BOOL *retryIfFailed
*/
- (void)cacheURLRequests:(NSArray *)urlRequests complete:(void (^)(NSArray *paths))complete failed:(void (^)(NSError *error, BOOL *retryIfFailed))failed;
@end
| 25.276596 | 153 | 0.71633 | [
"object"
] |
654fc3969a2bc27688ae440a54b0ae020468842c | 31,968 | c | C | asst1-Synchronization/kern/thread/thread.c | Chikapro/unsw-comp3231-21T1- | 3025d8eb50ac2779adad5dfc14cbf56c2fe3bae5 | [
"MIT"
] | 2 | 2019-03-29T06:19:44.000Z | 2019-05-02T11:37:26.000Z | asst1-Synchronization/kern/thread/thread.c | Chikapro/unsw-comp3231-21T1- | 3025d8eb50ac2779adad5dfc14cbf56c2fe3bae5 | [
"MIT"
] | null | null | null | asst1-Synchronization/kern/thread/thread.c | Chikapro/unsw-comp3231-21T1- | 3025d8eb50ac2779adad5dfc14cbf56c2fe3bae5 | [
"MIT"
] | 4 | 2019-03-26T05:31:38.000Z | 2022-03-31T14:49:11.000Z | /*
* Copyright (c) 2000, 2001, 2002, 2003, 2004, 2005, 2008, 2009, 2010
* The President and Fellows of Harvard College.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. Neither the name of the University nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE UNIVERSITY AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE UNIVERSITY OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
/*
* Core kernel-level thread system.
*/
#define THREADINLINE
#include <types.h>
#include <kern/errno.h>
#include <lib.h>
#include <array.h>
#include <cpu.h>
#include <spl.h>
#include <spinlock.h>
#include <wchan.h>
#include <thread.h>
#include <threadlist.h>
#include <threadprivate.h>
#include <proc.h>
#include <current.h>
#include <synch.h>
#include <addrspace.h>
#include <mainbus.h>
#include <vnode.h>
#include "opt-synchprobs.h"
/* Magic number used as a guard value on kernel thread stacks. */
#define THREAD_STACK_MAGIC 0xbaadf00d
/* Wait channel. A wchan is protected by an associated, passed-in spinlock. */
struct wchan {
const char *wc_name; /* name for this channel */
struct threadlist wc_threads; /* list of waiting threads */
};
/* Master array of CPUs. */
DECLARRAY(cpu, static __UNUSED inline);
DEFARRAY(cpu, static __UNUSED inline);
static struct cpuarray allcpus;
/* Used to wait for secondary CPUs to come online. */
static struct semaphore *cpu_startup_sem;
////////////////////////////////////////////////////////////
/*
* Stick a magic number on the bottom end of the stack. This will
* (sometimes) catch kernel stack overflows. Use thread_checkstack()
* to test this.
*/
static
void
thread_checkstack_init(struct thread *thread)
{
((uint32_t *)thread->t_stack)[0] = THREAD_STACK_MAGIC;
((uint32_t *)thread->t_stack)[1] = THREAD_STACK_MAGIC;
((uint32_t *)thread->t_stack)[2] = THREAD_STACK_MAGIC;
((uint32_t *)thread->t_stack)[3] = THREAD_STACK_MAGIC;
}
/*
* Check the magic number we put on the bottom end of the stack in
* thread_checkstack_init. If these assertions go off, it most likely
* means you overflowed your stack at some point, which can cause all
* kinds of mysterious other things to happen.
*
* Note that when ->t_stack is NULL, which is the case if the stack
* cannot be freed (which in turn is the case if the stack is the boot
* stack, and the thread is the boot thread) this doesn't do anything.
*/
static
void
thread_checkstack(struct thread *thread)
{
if (thread->t_stack != NULL) {
KASSERT(((uint32_t*)thread->t_stack)[0] == THREAD_STACK_MAGIC);
KASSERT(((uint32_t*)thread->t_stack)[1] == THREAD_STACK_MAGIC);
KASSERT(((uint32_t*)thread->t_stack)[2] == THREAD_STACK_MAGIC);
KASSERT(((uint32_t*)thread->t_stack)[3] == THREAD_STACK_MAGIC);
}
}
/*
* Create a thread. This is used both to create a first thread
* for each CPU and to create subsequent forked threads.
*/
static
struct thread *
thread_create(const char *name)
{
struct thread *thread;
DEBUGASSERT(name != NULL);
thread = kmalloc(sizeof(*thread));
if (thread == NULL) {
return NULL;
}
thread->t_name = kstrdup(name);
if (thread->t_name == NULL) {
kfree(thread);
return NULL;
}
thread->t_wchan_name = "NEW";
thread->t_state = S_READY;
/* Thread subsystem fields */
thread_machdep_init(&thread->t_machdep);
threadlistnode_init(&thread->t_listnode, thread);
thread->t_stack = NULL;
thread->t_context = NULL;
thread->t_cpu = NULL;
thread->t_proc = NULL;
HANGMAN_ACTORINIT(&thread->t_hangman, thread->t_name);
/* Interrupt state fields */
thread->t_in_interrupt = false;
thread->t_curspl = IPL_HIGH;
thread->t_iplhigh_count = 1; /* corresponding to t_curspl */
/* If you add to struct thread, be sure to initialize here */
return thread;
}
/*
* Create a CPU structure. This is used for the bootup CPU and
* also for secondary CPUs.
*
* The hardware number (the number assigned by firmware or system
* board config or whatnot) is tracked separately because it is not
* necessarily anything sane or meaningful.
*/
struct cpu *
cpu_create(unsigned hardware_number)
{
struct cpu *c;
int result;
char namebuf[16];
c = kmalloc(sizeof(*c));
if (c == NULL) {
panic("cpu_create: Out of memory\n");
}
c->c_self = c;
c->c_hardware_number = hardware_number;
c->c_curthread = NULL;
threadlist_init(&c->c_zombies);
c->c_hardclocks = 0;
c->c_spinlocks = 0;
c->c_isidle = false;
threadlist_init(&c->c_runqueue);
spinlock_init(&c->c_runqueue_lock);
c->c_ipi_pending = 0;
c->c_numshootdown = 0;
spinlock_init(&c->c_ipi_lock);
result = cpuarray_add(&allcpus, c, &c->c_number);
if (result != 0) {
panic("cpu_create: array_add: %s\n", strerror(result));
}
snprintf(namebuf, sizeof(namebuf), "<boot #%d>", c->c_number);
c->c_curthread = thread_create(namebuf);
if (c->c_curthread == NULL) {
panic("cpu_create: thread_create failed\n");
}
c->c_curthread->t_cpu = c;
if (c->c_number == 0) {
/*
* Leave c->c_curthread->t_stack NULL for the boot
* cpu. This means we're using the boot stack, which
* can't be freed. (Exercise: what would it take to
* make it possible to free the boot stack?)
*/
/*c->c_curthread->t_stack = ... */
}
else {
c->c_curthread->t_stack = kmalloc(STACK_SIZE);
if (c->c_curthread->t_stack == NULL) {
panic("cpu_create: couldn't allocate stack");
}
thread_checkstack_init(c->c_curthread);
}
/*
* If there is no curcpu (or curthread) yet, we are creating
* the first (boot) cpu. Initialize curcpu and curthread as
* early as possible so that other code can take locks without
* exploding.
*/
if (!CURCPU_EXISTS()) {
/*
* Initializing curcpu and curthread is
* machine-dependent because either of curcpu and
* curthread might be defined in terms of the other.
*/
INIT_CURCPU(c, c->c_curthread);
/*
* Now make sure both t_cpu and c_curthread are
* set. This might be partially redundant with
* INIT_CURCPU depending on how things are defined.
*/
curthread->t_cpu = curcpu;
curcpu->c_curthread = curthread;
}
HANGMAN_ACTORINIT(&c->c_hangman, "cpu");
result = proc_addthread(kproc, c->c_curthread);
if (result) {
panic("cpu_create: proc_addthread:: %s\n", strerror(result));
}
cpu_machdep_init(c);
return c;
}
/*
* Destroy a thread.
*
* This function cannot be called in the victim thread's own context.
* Nor can it be called on a running thread.
*
* (Freeing the stack you're actually using to run is ... inadvisable.)
*/
static
void
thread_destroy(struct thread *thread)
{
KASSERT(thread != curthread);
KASSERT(thread->t_state != S_RUN);
/*
* If you add things to struct thread, be sure to clean them up
* either here or in thread_exit(). (And not both...)
*/
/* Thread subsystem fields */
KASSERT(thread->t_proc == NULL);
if (thread->t_stack != NULL) {
kfree(thread->t_stack);
}
threadlistnode_cleanup(&thread->t_listnode);
thread_machdep_cleanup(&thread->t_machdep);
/* sheer paranoia */
thread->t_wchan_name = "DESTROYED";
kfree(thread->t_name);
kfree(thread);
}
/*
* Clean up zombies. (Zombies are threads that have exited but still
* need to have thread_destroy called on them.)
*
* The list of zombies is per-cpu.
*/
static
void
exorcise(void)
{
struct thread *z;
while ((z = threadlist_remhead(&curcpu->c_zombies)) != NULL) {
KASSERT(z != curthread);
KASSERT(z->t_state == S_ZOMBIE);
thread_destroy(z);
}
}
/*
* On panic, stop the thread system (as much as is reasonably
* possible) to make sure we don't end up letting any other threads
* run.
*/
void
thread_panic(void)
{
/*
* Kill off other CPUs.
*
* We could wait for them to stop, except that they might not.
*/
ipi_broadcast(IPI_PANIC);
/*
* Drop runnable threads on the floor.
*
* Don't try to get the run queue lock; we might not be able
* to. Instead, blat the list structure by hand, and take the
* risk that it might not be quite atomic.
*/
curcpu->c_runqueue.tl_count = 0;
curcpu->c_runqueue.tl_head.tln_next = &curcpu->c_runqueue.tl_tail;
curcpu->c_runqueue.tl_tail.tln_prev = &curcpu->c_runqueue.tl_head;
/*
* Ideally, we want to make sure sleeping threads don't wake
* up and start running. However, there's no good way to track
* down all the wchans floating around the system. Another
* alternative would be to set a global flag to make the wchan
* wakeup operations do nothing; but that would mean we
* ourselves couldn't sleep to wait for an I/O completion
* interrupt, and we'd like to be able to do that if the
* system isn't that badly hosed.
*
* So, do nothing else here.
*
* This may prove inadequate in practice and further steps
* might be needed. It may also be necessary to go through and
* forcibly unlock all locks or the like...
*/
}
/*
* At system shutdown, ask the other CPUs to switch off.
*/
void
thread_shutdown(void)
{
/*
* Stop the other CPUs.
*
* We should probably wait for them to stop and shut them off
* on the system board.
*/
ipi_broadcast(IPI_OFFLINE);
}
/*
* Thread system initialization.
*/
void
thread_bootstrap(void)
{
cpuarray_init(&allcpus);
/*
* Create the cpu structure for the bootup CPU, the one we're
* currently running on. Assume the hardware number is 0; that
* might be updated later by mainbus-type code. This also
* creates a thread structure for the first thread, the one
* that's already implicitly running when the kernel is
* started from the bootloader.
*/
KASSERT(CURCPU_EXISTS() == false);
(void)cpu_create(0);
KASSERT(CURCPU_EXISTS() == true);
/* cpu_create() should also have set t_proc. */
KASSERT(curcpu != NULL);
KASSERT(curthread != NULL);
KASSERT(curthread->t_proc != NULL);
KASSERT(curthread->t_proc == kproc);
/* Done */
}
/*
* New CPUs come here once MD initialization is finished. curthread
* and curcpu should already be initialized.
*
* Other than clearing thread_start_cpus() to continue, we don't need
* to do anything. The startup thread can just exit; we only need it
* to be able to get into thread_switch() properly.
*/
void
cpu_hatch(unsigned software_number)
{
char buf[64];
KASSERT(curcpu != NULL);
KASSERT(curthread != NULL);
KASSERT(curcpu->c_number == software_number);
spl0();
cpu_identify(buf, sizeof(buf));
kprintf("cpu%u: %s\n", software_number, buf);
V(cpu_startup_sem);
thread_exit();
}
/*
* Start up secondary cpus. Called from boot().
*/
void
thread_start_cpus(void)
{
char buf[64];
unsigned i;
cpu_identify(buf, sizeof(buf));
kprintf("cpu0: %s\n", buf);
cpu_startup_sem = sem_create("cpu_hatch", 0);
mainbus_start_cpus();
for (i=0; i<cpuarray_num(&allcpus) - 1; i++) {
P(cpu_startup_sem);
}
sem_destroy(cpu_startup_sem);
cpu_startup_sem = NULL;
}
/*
* Make a thread runnable.
*
* targetcpu might be curcpu; it might not be, too.
*/
static
void
thread_make_runnable(struct thread *target, bool already_have_lock)
{
struct cpu *targetcpu;
/* Lock the run queue of the target thread's cpu. */
targetcpu = target->t_cpu;
if (already_have_lock) {
/* The target thread's cpu should be already locked. */
KASSERT(spinlock_do_i_hold(&targetcpu->c_runqueue_lock));
}
else {
spinlock_acquire(&targetcpu->c_runqueue_lock);
}
/* Target thread is now ready to run; put it on the run queue. */
target->t_state = S_READY;
threadlist_addtail(&targetcpu->c_runqueue, target);
if (targetcpu->c_isidle && targetcpu != curcpu->c_self) {
/*
* Other processor is idle; send interrupt to make
* sure it unidles.
*/
ipi_send(targetcpu, IPI_UNIDLE);
}
if (!already_have_lock) {
spinlock_release(&targetcpu->c_runqueue_lock);
}
}
/*
* Create a new thread based on an existing one.
*
* The new thread has name NAME, and starts executing in function
* ENTRYPOINT. DATA1 and DATA2 are passed to ENTRYPOINT.
*
* The new thread is created in the process P. If P is null, the
* process is inherited from the caller. It will start on the same CPU
* as the caller, unless the scheduler intervenes first.
*/
int
thread_fork(const char *name,
struct proc *proc,
void (*entrypoint)(void *data1, unsigned long data2),
void *data1, unsigned long data2)
{
struct thread *newthread;
int result;
newthread = thread_create(name);
if (newthread == NULL) {
return ENOMEM;
}
/* Allocate a stack */
newthread->t_stack = kmalloc(STACK_SIZE);
if (newthread->t_stack == NULL) {
thread_destroy(newthread);
return ENOMEM;
}
thread_checkstack_init(newthread);
/*
* Now we clone various fields from the parent thread.
*/
/* Thread subsystem fields */
newthread->t_cpu = curthread->t_cpu;
/* Attach the new thread to its process */
if (proc == NULL) {
proc = curthread->t_proc;
}
result = proc_addthread(proc, newthread);
if (result) {
/* thread_destroy will clean up the stack */
thread_destroy(newthread);
return result;
}
/*
* Because new threads come out holding the cpu runqueue lock
* (see notes at bottom of thread_switch), we need to account
* for the spllower() that will be done releasing it.
*/
newthread->t_iplhigh_count++;
/* Set up the switchframe so entrypoint() gets called */
switchframe_init(newthread, entrypoint, data1, data2);
/* Lock the current cpu's run queue and make the new thread runnable */
thread_make_runnable(newthread, false);
return 0;
}
/*
* High level, machine-independent context switch code.
*
* The current thread is queued appropriately and its state is changed
* to NEWSTATE; another thread to run is selected and switched to.
*
* If NEWSTATE is S_SLEEP, the thread is queued on the wait channel
* WC, protected by the spinlock LK. Otherwise WC and Lk should be
* NULL.
*/
static
void
thread_switch(threadstate_t newstate, struct wchan *wc, struct spinlock *lk)
{
struct thread *cur, *next;
int spl;
DEBUGASSERT(curcpu->c_curthread == curthread);
DEBUGASSERT(curthread->t_cpu == curcpu->c_self);
/* Explicitly disable interrupts on this processor */
spl = splhigh();
cur = curthread;
/*
* If we're idle, return without doing anything. This happens
* when the timer interrupt interrupts the idle loop.
*/
if (curcpu->c_isidle) {
splx(spl);
return;
}
/* Check the stack guard band. */
thread_checkstack(cur);
/* Lock the run queue. */
spinlock_acquire(&curcpu->c_runqueue_lock);
/* Micro-optimization: if nothing to do, just return */
if (newstate == S_READY && threadlist_isempty(&curcpu->c_runqueue)) {
spinlock_release(&curcpu->c_runqueue_lock);
splx(spl);
return;
}
/* Put the thread in the right place. */
switch (newstate) {
case S_RUN:
panic("Illegal S_RUN in thread_switch\n");
case S_READY:
thread_make_runnable(cur, true /*have lock*/);
break;
case S_SLEEP:
cur->t_wchan_name = wc->wc_name;
/*
* Add the thread to the list in the wait channel, and
* unlock same. To avoid a race with someone else
* calling wchan_wake*, we must keep the wchan's
* associated spinlock locked from the point the
* caller of wchan_sleep locked it until the thread is
* on the list.
*/
threadlist_addtail(&wc->wc_threads, cur);
spinlock_release(lk);
break;
case S_ZOMBIE:
cur->t_wchan_name = "ZOMBIE";
threadlist_addtail(&curcpu->c_zombies, cur);
break;
}
cur->t_state = newstate;
/*
* Get the next thread. While there isn't one, call cpu_idle().
* curcpu->c_isidle must be true when cpu_idle is
* called. Unlock the runqueue while idling too, to make sure
* things can be added to it.
*
* Note that we don't need to unlock the runqueue atomically
* with idling; becoming unidle requires receiving an
* interrupt (either a hardware interrupt or an interprocessor
* interrupt from another cpu posting a wakeup) and idling
* *is* atomic with respect to re-enabling interrupts.
*
* Note that c_isidle becomes true briefly even if we don't go
* idle. However, because one is supposed to hold the runqueue
* lock to look at it, this should not be visible or matter.
*/
/* The current cpu is now idle. */
curcpu->c_isidle = true;
do {
next = threadlist_remhead(&curcpu->c_runqueue);
if (next == NULL) {
spinlock_release(&curcpu->c_runqueue_lock);
cpu_idle();
spinlock_acquire(&curcpu->c_runqueue_lock);
}
} while (next == NULL);
curcpu->c_isidle = false;
/*
* Note that curcpu->c_curthread may be the same variable as
* curthread and it may not be, depending on how curthread and
* curcpu are defined by the MD code. We'll assign both and
* assume the compiler will optimize one away if they're the
* same.
*/
curcpu->c_curthread = next;
curthread = next;
/* do the switch (in assembler in switch.S) */
switchframe_switch(&cur->t_context, &next->t_context);
/*
* When we get to this point we are either running in the next
* thread, or have come back to the same thread again,
* depending on how you look at it. That is,
* switchframe_switch returns immediately in another thread
* context, which in general will be executing here with a
* different stack and different values in the local
* variables. (Although new threads go to thread_startup
* instead.) But, later on when the processor, or some
* processor, comes back to the previous thread, it's also
* executing here with the *same* value in the local
* variables.
*
* The upshot, however, is as follows:
*
* - The thread now currently running is "cur", not "next",
* because when we return from switchrame_switch on the
* same stack, we're back to the thread that
* switchframe_switch call switched away from, which is
* "cur".
*
* - "cur" is _not_ the thread that just *called*
* switchframe_switch.
*
* - If newstate is S_ZOMB we never get back here in that
* context at all.
*
* - If the thread just chosen to run ("next") was a new
* thread, we don't get to this code again until
* *another* context switch happens, because when new
* threads return from switchframe_switch they teleport
* to thread_startup.
*
* - At this point the thread whose stack we're now on may
* have been migrated to another cpu since it last ran.
*
* The above is inherently confusing and will probably take a
* while to get used to.
*
* However, the important part is that code placed here, after
* the call to switchframe_switch, does not necessarily run on
* every context switch. Thus any such code must be either
* skippable on some switches or also called from
* thread_startup.
*/
/* Clear the wait channel and set the thread state. */
cur->t_wchan_name = NULL;
cur->t_state = S_RUN;
/* Unlock the run queue. */
spinlock_release(&curcpu->c_runqueue_lock);
/* Activate our address space in the MMU. */
as_activate();
/* Clean up dead threads. */
exorcise();
/* Turn interrupts back on. */
splx(spl);
}
/*
* This function is where new threads start running. The arguments
* ENTRYPOINT, DATA1, and DATA2 are passed through from thread_fork.
*
* Because new code comes here from inside the middle of
* thread_switch, the beginning part of this function must match the
* tail of thread_switch.
*/
void
thread_startup(void (*entrypoint)(void *data1, unsigned long data2),
void *data1, unsigned long data2)
{
struct thread *cur;
cur = curthread;
/* Clear the wait channel and set the thread state. */
cur->t_wchan_name = NULL;
cur->t_state = S_RUN;
/* Release the runqueue lock acquired in thread_switch. */
spinlock_release(&curcpu->c_runqueue_lock);
/* Activate our address space in the MMU. */
as_activate();
/* Clean up dead threads. */
exorcise();
/* Enable interrupts. */
spl0();
#if OPT_SYNCHPROBS
/* Yield a random number of times to get a good mix of threads. */
{
int i, n;
n = random()%161 + random()%161;
for (i=0; i<n; i++) {
thread_yield();
}
}
#endif
/* Call the function. */
entrypoint(data1, data2);
/* Done. */
thread_exit();
}
/*
* Cause the current thread to exit.
*
* The parts of the thread structure we don't actually need to run
* should be cleaned up right away. The rest has to wait until
* thread_destroy is called from exorcise().
*
* Does not return.
*/
void
thread_exit(void)
{
struct thread *cur;
cur = curthread;
/*
* Detach from our process. You might need to move this action
* around, depending on how your wait/exit works.
*/
proc_remthread(cur);
/* Make sure we *are* detached (move this only if you're sure!) */
KASSERT(cur->t_proc == NULL);
/* Check the stack guard band. */
thread_checkstack(cur);
/* Interrupts off on this processor */
splhigh();
thread_switch(S_ZOMBIE, NULL, NULL);
panic("braaaaaaaiiiiiiiiiiinssssss\n");
}
/*
* Yield the cpu to another process, but stay runnable.
*/
void
thread_yield(void)
{
thread_switch(S_READY, NULL, NULL);
}
////////////////////////////////////////////////////////////
/*
* Scheduler.
*
* This is called periodically from hardclock(). It should reshuffle
* the current CPU's run queue by job priority.
*/
void
schedule(void)
{
/*
* You can write this. If we do nothing, threads will run in
* round-robin fashion.
*/
}
/*
* Thread migration.
*
* This is also called periodically from hardclock(). If the current
* CPU is busy and other CPUs are idle, or less busy, it should move
* threads across to those other other CPUs.
*
* Migrating threads isn't free because of cache affinity; a thread's
* working cache set will end up having to be moved to the other CPU,
* which is fairly slow. The tradeoff between this performance loss
* and the performance loss due to underutilization of some CPUs is
* something that needs to be tuned and probably is workload-specific.
*
* For here and now, because we know we're running on System/161 and
* System/161 does not (yet) model such cache effects, we'll be very
* aggressive.
*/
void
thread_consider_migration(void)
{
unsigned my_count, total_count, one_share, to_send;
unsigned i, numcpus;
struct cpu *c;
struct threadlist victims;
struct thread *t;
my_count = total_count = 0;
numcpus = cpuarray_num(&allcpus);
for (i=0; i<numcpus; i++) {
c = cpuarray_get(&allcpus, i);
spinlock_acquire(&c->c_runqueue_lock);
total_count += c->c_runqueue.tl_count;
if (c == curcpu->c_self) {
my_count = c->c_runqueue.tl_count;
}
spinlock_release(&c->c_runqueue_lock);
}
one_share = DIVROUNDUP(total_count, numcpus);
if (my_count < one_share) {
return;
}
to_send = my_count - one_share;
threadlist_init(&victims);
spinlock_acquire(&curcpu->c_runqueue_lock);
for (i=0; i<to_send; i++) {
t = threadlist_remtail(&curcpu->c_runqueue);
threadlist_addhead(&victims, t);
}
spinlock_release(&curcpu->c_runqueue_lock);
for (i=0; i < numcpus && to_send > 0; i++) {
c = cpuarray_get(&allcpus, i);
if (c == curcpu->c_self) {
continue;
}
spinlock_acquire(&c->c_runqueue_lock);
while (c->c_runqueue.tl_count < one_share && to_send > 0) {
t = threadlist_remhead(&victims);
/*
* Ordinarily, curthread will not appear on
* the run queue. However, it can under the
* following circumstances:
* - it went to sleep;
* - the processor became idle, so it
* remained curthread;
* - it was reawakened, so it was put on the
* run queue;
* - and the processor hasn't fully unidled
* yet, so all these things are still true.
*
* If the timer interrupt happens at (almost)
* exactly the proper moment, we can come here
* while things are in this state and see
* curthread. However, *migrating* curthread
* can cause bad things to happen (Exercise:
* Why? And what?) so shuffle it to the end of
* the list and decrement to_send in order to
* skip it. Then it goes back on our own run
* queue below.
*/
if (t == curthread) {
threadlist_addtail(&victims, t);
to_send--;
continue;
}
t->t_cpu = c;
threadlist_addtail(&c->c_runqueue, t);
DEBUG(DB_THREADS,
"Migrated thread %s: cpu %u -> %u",
t->t_name, curcpu->c_number, c->c_number);
to_send--;
if (c->c_isidle) {
/*
* Other processor is idle; send
* interrupt to make sure it unidles.
*/
ipi_send(c, IPI_UNIDLE);
}
}
spinlock_release(&c->c_runqueue_lock);
}
/*
* Because the code above isn't atomic, the thread counts may have
* changed while we were working and we may end up with leftovers.
* Don't panic; just put them back on our own run queue.
*/
if (!threadlist_isempty(&victims)) {
spinlock_acquire(&curcpu->c_runqueue_lock);
while ((t = threadlist_remhead(&victims)) != NULL) {
threadlist_addtail(&curcpu->c_runqueue, t);
}
spinlock_release(&curcpu->c_runqueue_lock);
}
KASSERT(threadlist_isempty(&victims));
threadlist_cleanup(&victims);
}
////////////////////////////////////////////////////////////
/*
* Wait channel functions
*/
/*
* Create a wait channel. NAME is a symbolic string name for it.
* This is what's displayed by ps -alx in Unix.
*
* NAME should generally be a string constant. If it isn't, alternate
* arrangements should be made to free it after the wait channel is
* destroyed.
*/
struct wchan *
wchan_create(const char *name)
{
struct wchan *wc;
wc = kmalloc(sizeof(*wc));
if (wc == NULL) {
return NULL;
}
threadlist_init(&wc->wc_threads);
wc->wc_name = name;
return wc;
}
/*
* Destroy a wait channel. Must be empty and unlocked.
* (The corresponding cleanup functions require this.)
*/
void
wchan_destroy(struct wchan *wc)
{
threadlist_cleanup(&wc->wc_threads);
kfree(wc);
}
/*
* Yield the cpu to another process, and go to sleep, on the specified
* wait channel WC, whose associated spinlock is LK. Calling wakeup on
* the channel will make the thread runnable again. The spinlock must
* be locked. The call to thread_switch unlocks it; we relock it
* before returning.
*/
void
wchan_sleep(struct wchan *wc, struct spinlock *lk)
{
/* may not sleep in an interrupt handler */
KASSERT(!curthread->t_in_interrupt);
/* must hold the spinlock */
KASSERT(spinlock_do_i_hold(lk));
/* must not hold other spinlocks */
KASSERT(curcpu->c_spinlocks == 1);
thread_switch(S_SLEEP, wc, lk);
spinlock_acquire(lk);
}
/*
* Wake up one thread sleeping on a wait channel.
*/
void
wchan_wakeone(struct wchan *wc, struct spinlock *lk)
{
struct thread *target;
KASSERT(spinlock_do_i_hold(lk));
/* Grab a thread from the channel */
target = threadlist_remhead(&wc->wc_threads);
if (target == NULL) {
/* Nobody was sleeping. */
return;
}
/*
* Note that thread_make_runnable acquires a runqueue lock
* while we're holding LK. This is ok; all spinlocks
* associated with wchans must come before the runqueue locks,
* as we also bridge from the wchan lock to the runqueue lock
* in thread_switch.
*/
thread_make_runnable(target, false);
}
/*
* Wake up all threads sleeping on a wait channel.
*/
void
wchan_wakeall(struct wchan *wc, struct spinlock *lk)
{
struct thread *target;
struct threadlist list;
KASSERT(spinlock_do_i_hold(lk));
threadlist_init(&list);
/*
* Grab all the threads from the channel, moving them to a
* private list.
*/
while ((target = threadlist_remhead(&wc->wc_threads)) != NULL) {
threadlist_addtail(&list, target);
}
/*
* We could conceivably sort by cpu first to cause fewer lock
* ops and fewer IPIs, but for now at least don't bother. Just
* make each thread runnable.
*/
while ((target = threadlist_remhead(&list)) != NULL) {
thread_make_runnable(target, false);
}
threadlist_cleanup(&list);
}
/*
* Return nonzero if there are no threads sleeping on the channel.
* This is meant to be used only for diagnostic purposes.
*/
bool
wchan_isempty(struct wchan *wc, struct spinlock *lk)
{
bool ret;
KASSERT(spinlock_do_i_hold(lk));
ret = threadlist_isempty(&wc->wc_threads);
return ret;
}
////////////////////////////////////////////////////////////
/*
* Machine-independent IPI handling
*/
/*
* Send an IPI (inter-processor interrupt) to the specified CPU.
*/
void
ipi_send(struct cpu *target, int code)
{
KASSERT(code >= 0 && code < 32);
spinlock_acquire(&target->c_ipi_lock);
target->c_ipi_pending |= (uint32_t)1 << code;
mainbus_send_ipi(target);
spinlock_release(&target->c_ipi_lock);
}
/*
* Send an IPI to all CPUs.
*/
void
ipi_broadcast(int code)
{
unsigned i;
struct cpu *c;
for (i=0; i < cpuarray_num(&allcpus); i++) {
c = cpuarray_get(&allcpus, i);
if (c != curcpu->c_self) {
ipi_send(c, code);
}
}
}
/*
* Send a TLB shootdown IPI to the specified CPU.
*/
void
ipi_tlbshootdown(struct cpu *target, const struct tlbshootdown *mapping)
{
unsigned n;
spinlock_acquire(&target->c_ipi_lock);
n = target->c_numshootdown;
if (n == TLBSHOOTDOWN_MAX) {
/*
* If you have problems with this panic going off,
* consider: (1) increasing the maximum, (2) putting
* logic here to sleep until space appears (may
* interact awkwardly with VM system locking), (3)
* putting logic here to coalesce requests together,
* and/or (4) improving VM system state tracking to
* reduce the number of unnecessary shootdowns.
*/
panic("ipi_tlbshootdown: Too many shootdowns queued\n");
}
else {
target->c_shootdown[n] = *mapping;
target->c_numshootdown = n+1;
}
target->c_ipi_pending |= (uint32_t)1 << IPI_TLBSHOOTDOWN;
mainbus_send_ipi(target);
spinlock_release(&target->c_ipi_lock);
}
/*
* Handle an incoming interprocessor interrupt.
*/
void
interprocessor_interrupt(void)
{
uint32_t bits;
unsigned i;
spinlock_acquire(&curcpu->c_ipi_lock);
bits = curcpu->c_ipi_pending;
if (bits & (1U << IPI_PANIC)) {
/* panic on another cpu - just stop dead */
spinlock_release(&curcpu->c_ipi_lock);
cpu_halt();
}
if (bits & (1U << IPI_OFFLINE)) {
/* offline request */
spinlock_release(&curcpu->c_ipi_lock);
spinlock_acquire(&curcpu->c_runqueue_lock);
if (!curcpu->c_isidle) {
kprintf("cpu%d: offline: warning: not idle\n",
curcpu->c_number);
}
spinlock_release(&curcpu->c_runqueue_lock);
kprintf("cpu%d: offline.\n", curcpu->c_number);
cpu_halt();
}
if (bits & (1U << IPI_UNIDLE)) {
/*
* The cpu has already unidled itself to take the
* interrupt; don't need to do anything else.
*/
}
if (bits & (1U << IPI_TLBSHOOTDOWN)) {
/*
* Note: depending on your VM system locking you might
* need to release the ipi lock while calling
* vm_tlbshootdown.
*/
for (i=0; i<curcpu->c_numshootdown; i++) {
vm_tlbshootdown(&curcpu->c_shootdown[i]);
}
curcpu->c_numshootdown = 0;
}
curcpu->c_ipi_pending = 0;
spinlock_release(&curcpu->c_ipi_lock);
}
| 26.075041 | 78 | 0.691191 | [
"model"
] |
6551c9d3c49828d489e4ff8928c0e9f1e816464f | 4,373 | c | C | model/v1beta1_lease_list.c | ityuhui/client-c | 1d30380d7ba0fe9b5e97626e0f7507be4ce8f96d | [
"curl",
"Apache-2.0"
] | null | null | null | model/v1beta1_lease_list.c | ityuhui/client-c | 1d30380d7ba0fe9b5e97626e0f7507be4ce8f96d | [
"curl",
"Apache-2.0"
] | null | null | null | model/v1beta1_lease_list.c | ityuhui/client-c | 1d30380d7ba0fe9b5e97626e0f7507be4ce8f96d | [
"curl",
"Apache-2.0"
] | null | null | null | #include <stdlib.h>
#include <string.h>
#include <stdio.h>
#include "v1beta1_lease_list.h"
v1beta1_lease_list_t *v1beta1_lease_list_create(
char *apiVersion,
list_t *items,
char *kind,
v1_list_meta_t *metadata
) {
v1beta1_lease_list_t *v1beta1_lease_list_local_var = malloc(sizeof(v1beta1_lease_list_t));
if (!v1beta1_lease_list_local_var) {
return NULL;
}
v1beta1_lease_list_local_var->apiVersion = apiVersion;
v1beta1_lease_list_local_var->items = items;
v1beta1_lease_list_local_var->kind = kind;
v1beta1_lease_list_local_var->metadata = metadata;
return v1beta1_lease_list_local_var;
}
void v1beta1_lease_list_free(v1beta1_lease_list_t *v1beta1_lease_list) {
listEntry_t *listEntry;
free(v1beta1_lease_list->apiVersion);
list_ForEach(listEntry, v1beta1_lease_list->items) {
v1beta1_lease_free(listEntry->data);
}
list_free(v1beta1_lease_list->items);
free(v1beta1_lease_list->kind);
v1_list_meta_free(v1beta1_lease_list->metadata);
free(v1beta1_lease_list);
}
cJSON *v1beta1_lease_list_convertToJSON(v1beta1_lease_list_t *v1beta1_lease_list) {
cJSON *item = cJSON_CreateObject();
// v1beta1_lease_list->apiVersion
if(v1beta1_lease_list->apiVersion) {
if(cJSON_AddStringToObject(item, "apiVersion", v1beta1_lease_list->apiVersion) == NULL) {
goto fail; //String
}
}
// v1beta1_lease_list->items
if (!v1beta1_lease_list->items) {
goto fail;
}
cJSON *items = cJSON_AddArrayToObject(item, "items");
if(items == NULL) {
goto fail; //nonprimitive container
}
listEntry_t *itemsListEntry;
if (v1beta1_lease_list->items) {
list_ForEach(itemsListEntry, v1beta1_lease_list->items) {
cJSON *itemLocal = v1beta1_lease_convertToJSON(itemsListEntry->data);
if(itemLocal == NULL) {
goto fail;
}
cJSON_AddItemToArray(items, itemLocal);
}
}
// v1beta1_lease_list->kind
if(v1beta1_lease_list->kind) {
if(cJSON_AddStringToObject(item, "kind", v1beta1_lease_list->kind) == NULL) {
goto fail; //String
}
}
// v1beta1_lease_list->metadata
if(v1beta1_lease_list->metadata) {
cJSON *metadata_local_JSON = v1_list_meta_convertToJSON(v1beta1_lease_list->metadata);
if(metadata_local_JSON == NULL) {
goto fail; //model
}
cJSON_AddItemToObject(item, "metadata", metadata_local_JSON);
if(item->child == NULL) {
goto fail;
}
}
return item;
fail:
if (item) {
cJSON_Delete(item);
}
return NULL;
}
v1beta1_lease_list_t *v1beta1_lease_list_parseFromJSON(cJSON *v1beta1_lease_listJSON){
v1beta1_lease_list_t *v1beta1_lease_list_local_var = NULL;
// v1beta1_lease_list->apiVersion
cJSON *apiVersion = cJSON_GetObjectItemCaseSensitive(v1beta1_lease_listJSON, "apiVersion");
if (apiVersion) {
if(!cJSON_IsString(apiVersion))
{
goto end; //String
}
}
// v1beta1_lease_list->items
cJSON *items = cJSON_GetObjectItemCaseSensitive(v1beta1_lease_listJSON, "items");
if (!items) {
goto end;
}
list_t *itemsList;
cJSON *items_local_nonprimitive;
if(!cJSON_IsArray(items)){
goto end; //nonprimitive container
}
itemsList = list_create();
cJSON_ArrayForEach(items_local_nonprimitive,items )
{
if(!cJSON_IsObject(items_local_nonprimitive)){
goto end;
}
v1beta1_lease_t *itemsItem = v1beta1_lease_parseFromJSON(items_local_nonprimitive);
list_addElement(itemsList, itemsItem);
}
// v1beta1_lease_list->kind
cJSON *kind = cJSON_GetObjectItemCaseSensitive(v1beta1_lease_listJSON, "kind");
if (kind) {
if(!cJSON_IsString(kind))
{
goto end; //String
}
}
// v1beta1_lease_list->metadata
cJSON *metadata = cJSON_GetObjectItemCaseSensitive(v1beta1_lease_listJSON, "metadata");
v1_list_meta_t *metadata_local_nonprim = NULL;
if (metadata) {
metadata_local_nonprim = v1_list_meta_parseFromJSON(metadata); //nonprimitive
}
v1beta1_lease_list_local_var = v1beta1_lease_list_create (
apiVersion ? strdup(apiVersion->valuestring) : NULL,
itemsList,
kind ? strdup(kind->valuestring) : NULL,
metadata ? metadata_local_nonprim : NULL
);
return v1beta1_lease_list_local_var;
end:
return NULL;
}
| 26.185629 | 95 | 0.703407 | [
"model"
] |
655c67c8a8a09195f307188a668ba3babd2cbc88 | 5,229 | c | C | btgesv/base_0/m_knight.c | cr88192/bgbtech_engine | 03869a92fbf3197dd176d311f3b917f4f4e88d1a | [
"MIT"
] | 1 | 2019-07-02T22:53:52.000Z | 2019-07-02T22:53:52.000Z | btgesv/base_0/m_knight.c | cr88192/bgbtech_engine | 03869a92fbf3197dd176d311f3b917f4f4e88d1a | [
"MIT"
] | 5 | 2015-11-17T05:45:50.000Z | 2015-11-26T18:36:51.000Z | btgesv/base_0/m_knight.c | cr88192/bgbtech_engine | 03869a92fbf3197dd176d311f3b917f4f4e88d1a | [
"MIT"
] | null | null | null | #include <btgesv.h>
BTGE_MoveFrame monster_knight_stand1_mf[]=
{
{BTAI_Stand, 0, monster_knight_idle},
{BTAI_Stand, 0, NULL},
{BTAI_Stand, 0, NULL},
{BTAI_Stand, 0, NULL},
{BTAI_Stand, 0, NULL},
{BTAI_Stand, 0, NULL},
{BTAI_Stand, 0, NULL},
{BTAI_Stand, 0, NULL},
{BTAI_Stand, 0, NULL},
{BTAI_Stand, 0, NULL},
{BTAI_Stand, 0, NULL},
{BTAI_Stand, 0, NULL},
{BTAI_Stand, 0, NULL},
{BTAI_Stand, 0, NULL},
{BTAI_Stand, 0, NULL},
{BTAI_Stand, 0, NULL},
{BTAI_Stand, 0, NULL},
{BTAI_Stand, 0, NULL},
{BTAI_Stand, 0, NULL},
{BTAI_Stand, 0, NULL},
{BTAI_Stand, 0, NULL},
{BTAI_Stand, 0, NULL},
{BTAI_Stand, 0, NULL},
{BTAI_Stand, 0, NULL},
{BTAI_Stand, 0, NULL},
{BTAI_Stand, 0, NULL},
{BTAI_Stand, 0, NULL},
{BTAI_Stand, 0, NULL},
{BTAI_Stand, 0, NULL},
{BTAI_Stand, 0, NULL},
{BTAI_Stand, 0, NULL},
};
BTGE_MoveInfo monster_knight_stand1_mfi =
{"stand", 0, 30, monster_knight_stand1_mf, monster_knight_stand};
BTGE_MoveFrame monster_knight_walk1_mf[]=
{
{BTAI_Walk, 3, monster_knight_idle},
{BTAI_Walk, 5, NULL},
{BTAI_Walk, 3, NULL},
{BTAI_Walk, 2, NULL},
{BTAI_Walk, 2, NULL},
{BTAI_Walk, 2, NULL},
{BTAI_Walk, 5, NULL},
{BTAI_Walk, 5, NULL},
{BTAI_Walk, 2, NULL},
{BTAI_Walk, 0, monster_knight_walk1_random},
{BTAI_Walk, 0, NULL},
{BTAI_Walk, 0, NULL},
{BTAI_Walk, 0, NULL},
{BTAI_Walk, 0, NULL},
{BTAI_Walk, 0, NULL},
{BTAI_Walk, 0, NULL},
{BTAI_Walk, 0, NULL},
{BTAI_Walk, 0, NULL},
{BTAI_Walk, 0, NULL},
{BTAI_Walk, 0, NULL},
{BTAI_Walk, 0, NULL},
{BTAI_Walk, 0, NULL},
{BTAI_Walk, 0, NULL},
{BTAI_Walk, 0, NULL},
{BTAI_Walk, 0, NULL},
{BTAI_Walk, 0, NULL},
{BTAI_Walk, 0, NULL},
{BTAI_Walk, 0, NULL},
{BTAI_Walk, 0, NULL},
{BTAI_Walk, 0, NULL},
{BTAI_Walk, 0, NULL},
{BTAI_Walk, 0, NULL},
};
BTGE_MoveFrame monster_knight_walk2_mf[]=
{
{BTAI_Walk, 3, monster_knight_idle},
{BTAI_Walk, 5, NULL},
{BTAI_Walk, 3, NULL},
{BTAI_Walk, 2, NULL},
{BTAI_Walk, 2, NULL},
{BTAI_Walk, 2, NULL},
{BTAI_Walk, 5, NULL},
{BTAI_Walk, 5, NULL},
{BTAI_Walk, 2, NULL},
{BTAI_Walk, 0, NULL},
};
BTGE_MoveInfo monster_knight_walk1_mfi =
{"walk", 0, 20, monster_knight_walk1_mf, monster_knight_walk};
BTGE_MoveFrame monster_knight_run_mf[]=
{
{BTAI_Run, 10, NULL},
{BTAI_Run, 11, NULL},
{BTAI_Run, 11, NULL},
{BTAI_Run, 15, NULL},
{BTAI_Run, 11, NULL},
{BTAI_Run, 15, NULL},
{BTAI_Run, 15, NULL},
};
BTGE_MoveInfo monster_knight_run_mfi =
{"run", 2, 8, monster_knight_run_mf, monster_knight_run};
BTGE_MoveFrame monster_knight_attack1_mf[]=
{
{BTAI_Charge, 0, monster_knight_fire},
{BTAI_Charge, 0, NULL},
{BTAI_Charge, 0, NULL},
{BTAI_Charge, 0, NULL},
{BTAI_Charge, 0, NULL},
{BTAI_Charge, 0, NULL},
{BTAI_Charge, 0, NULL},
{BTAI_Charge, 0, NULL},
{BTAI_Charge, 0, NULL},
{BTAI_Charge, 0, NULL},
{BTAI_Charge, 0, NULL},
{BTAI_Charge, 0, NULL},
};
BTGE_MoveInfo monster_knight_attack1_mfi =
{"attak1", 2, 8, monster_knight_attack1_mf, monster_knight_run};
void monster_knight_idle(btEntity self)
{
}
void monster_knight_init(btEntity self)
{
self->solidtype=BT_SOLID_SLIDEBOX;
self->movetype=BT_MOVE_STEP;
btSetModel(self, "models/monsters/berserk/berserk.model");
self->snd_sight="sound/soldier/solsght1";
self->mins=vec3(-16, -16, -24);
self->maxs=vec3(16, 16, 40);
self->health=50;
btWalkmonsterStart(self);
}
void monster_knight_deinit(btEntity self)
{ }
void monster_knight_null(btEntity self)
{ }
void monster_knight_blocked(btEntity self, btEntity other)
{ }
void monster_knight_touch(btEntity self, btEntity other)
{ }
void monster_knight_use(btEntity self, btEntity other)
{ }
void monster_knight_pain(btEntity self, btEntity other, float damage)
{ }
void monster_knight_die(btEntity self, btEntity other, float damage)
{
// BT_EntityExplode(self);
BT_EntityBloodExplode(self);
// BT_EntityDisintegration(self);
}
void monster_knight_cmdmsg(btEntity self, btEntity other, char *str)
{ }
void monster_knight_stand(btEntity self)
{
btSetMove(self, &monster_knight_stand1_mfi);
}
void monster_knight_walk1_random(btEntity self)
{
if(btRandom()>0.1)
btSetMove(self, &monster_knight_walk1_mfi);
}
void monster_knight_walk(btEntity self)
{
btSetMove(self, &monster_knight_walk1_mfi);
}
void monster_knight_run(btEntity self)
{
btSetMove(self, &monster_knight_run_mfi);
}
void monster_knight_missile(btEntity self)
{
btSetMove(self, &monster_knight_attack1_mfi);
}
void monster_knight_fire(btEntity self)
{
vec3 org, dir;
org=self->origin;
// dir=btYawVector(btCurrentYaw(self));
dir=BT_TargetDirection(self, self->enemy);
// BT_FireRocket(self, org, dir, 10, 600, 25);
BT_FireBlaster(self, org, dir, 10, 600, 25);
}
void monster_knight_melee(btEntity self)
{
}
BTGE_Entity_Iface monster_knight_vt =
{
monster_knight_init, monster_knight_deinit,
monster_knight_null, monster_knight_null,
monster_knight_blocked, monster_knight_touch,
monster_knight_use, monster_knight_pain,
monster_knight_die, monster_knight_cmdmsg,
};
BTGE_Actor_Iface monster_knight_avt =
{
monster_knight_stand,
monster_knight_walk,
monster_knight_run,
monster_knight_missile,
//monster_knight_melee,
NULL,
};
BTSV_API btEntity BtSp_monster_knight(btSEntity args)
{
btEntity self;
// return(NULL);
self=btNewEntityActorInit(args,
&monster_knight_vt,
&monster_knight_avt);
return(self);
}
| 21.084677 | 69 | 0.731115 | [
"model"
] |
655d4abf40b42f7d1343856db1ca2ef5f3358110 | 14,893 | c | C | crankbase/crank128.c | WSID/crank-system | 7c94f829050815392259c227e38b34f97886f7fc | [
"MIT"
] | null | null | null | crankbase/crank128.c | WSID/crank-system | 7c94f829050815392259c227e38b34f97886f7fc | [
"MIT"
] | null | null | null | crankbase/crank128.c | WSID/crank-system | 7c94f829050815392259c227e38b34f97886f7fc | [
"MIT"
] | null | null | null | /* Copyright (C) 2015, WSID */
/* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
#define _CRANKBASE_INSIDE
#define CRANK_NO_C11_GENERIC_SELECTOR
#include <ctype.h>
#include <string.h>
#include <glib.h>
#include <glib-object.h>
#include "crankbits.h"
#include "crank128.h"
/**
* SECTION: crank128
* @title: 128bit Unsigned Integer.
* @short_description: An 128-bit unsigned integer.
*
* 128-bit integers are not used frequently. As most of operations are done by
* multiple 64-bit operations, they are much slower than 64-bit ones.
*
* Sometimes, 128-bit operations are needed. For this, crank system provides
* simple implementation of these operations.
*/
/**
* crank_uint128_init_add:
* @i: (out): Instance to initialize.
* @a: a 64-bit int.
* @b: a 64-bit int.
*
* Initialize 128 from sum of 64-bit integer.
*
* This is [simple function][simple-function].
*/
void
(crank_uint128_init_add) (CrankUint128 *i,
guint64 a,
guint64 b)
{
i->h = CRANK_ADD_CARRY64 (a, b, &(i->l));
}
/**
* crank_uint128_init_mul:
* @i: (out): Instance to initialize.
* @a: a 64-bit int.
* @b: a 64-bit int.
*
* Initialize 128 from product of 64-bit integer.
*/
void
crank_uint128_init_mul (CrankUint128 *i,
guint64 a,
guint64 b)
{
guint64 ah = a >> 32;
guint64 al = a & 0xFFFFFFFF;
guint64 bh = b >> 32;
guint64 bl = b & 0xFFFFFFFF;
guint64 m1;
guint64 m2;
i->h = ah * bh;
m1 = ah * bl;
m2 = al * bh;
i->l = al * bl;
i->h += ((guint64)CRANK_IADD_CARRY64(&m1, m2)) << 32;
i->h += m1 >> 32;
m2 = m1 << 32;
i->h += CRANK_IADD_CARRY64(&(i->l), m2);
}
/**
* crank_uint128_copy:
* @i: A instance.
* @r: (out): A instance.
*
* Copys a 128-bit integer.
*
*
* This is [simple function][simple-function].
*/
void
(crank_uint128_copy) (CrankUint128 *i,
CrankUint128 *r)
{
r->h = i->h; r->l = i->l;
}
/**
* crank_uint128_inc:
* @i: A instance.
*
* Increase 128-bit integer by one.
*
* This is [simple function][simple-function].
*/
void
(crank_uint128_inc) (CrankUint128 *i)
{
i->l++;
i->h += !(i->l);
}
/**
* crank_uint128_dec:
* @i: A instance.
*
* Decrease 128-bit integer by one.
*
* This is [simple function][simple-function].
*/
void
(crank_uint128_dec) (CrankUint128 *i)
{
i->h -= !(i->l);
i->l--;
}
/**
* crank_uint128_add64:
* @a: A 128-bit integer.
* @b: A 64-bit integer.
* @r: (out): A Result.
*
* Adds 64-bit integer to 128-bit integer.
*
* This is [simple function][simple-function].
*/
void
(crank_uint128_add64) (CrankUint128 *a,
guint64 b,
CrankUint128 *r)
{
r->h = a->h + CRANK_ADD_CARRY64(a->l, b, &(r->l));
}
/**
* crank_uint128_add64_self:
* @a: A 128-bit integer.
* @b: A 64-bit integer.
*
* Adds 64-bit integer to 128-bit integer.
*
* This is [simple function][simple-function].
*/
void
(crank_uint128_add64_self) (CrankUint128 *a,
guint64 b)
{
a->h += CRANK_IADD_CARRY64(&(a->l), b);
}
/**
* crank_uint128_add:
* @a: A 128-bit integer.
* @b: A 128-bit integer.
* @r: (out): A Result.
*
* Adds 128-bit integer to 128-bit integer.
*
* This is [simple function][simple-function] and
* [representative function][representative-function].
*/
void
(crank_uint128_add) (CrankUint128 *a,
CrankUint128 *b,
CrankUint128 *r)
{
r->h = a->h + b->h + CRANK_ADD_CARRY64(a->l, b->l, &(r->l));
}
/**
* crank_uint128_add_self:
* @a: A 128-bit integer.
* @b: A 128-bit integer.
*
* Adds 128-bit integer to 128-bit integer.
*
* This is [simple function][simple-function] and
* [representative function][representative-function].
*/
void
(crank_uint128_add_self) (CrankUint128 *a,
CrankUint128 *b)
{
a->h += b->h;
a->h += CRANK_IADD_CARRY64(&(a->l), b->l);
}
/**
* crank_uint128_sub64:
* @a: A 128-bit integer.
* @b: A 64-bit integer.
* @r: (out): A Result.
*
* Subtracts 64-bit integer from 128-bit integer.
*
* This is [simple function][simple-function].
*/
void
(crank_uint128_sub64) (CrankUint128 *a,
guint64 b,
CrankUint128 *r)
{
r->h = a->h - CRANK_SUB_CARRY64(a->l, b, &(r->l));
}
/**
* crank_uint128_sub64_self:
* @a: A 128-bit integer.
* @b: A 64-bit integer.
*
* Subtracts 64-bit integer from 128-bit integer.
*
* This is [simple function][simple-function].
*/
void
(crank_uint128_sub64_self) (CrankUint128 *a,
guint64 b)
{
a->h -= (a->l < b);
a->l -= b;
}
/**
* crank_uint128_sub:
* @a: A 128-bit integer.
* @b: A 128-bit integer.
* @r: (out): A Result.
*
* Subtracts 128-bit integer from 128-bit integer.
*
* This is [simple function][simple-function] and
* [representative function][representative-function].
*/
void
(crank_uint128_sub) (CrankUint128 *a,
CrankUint128 *b,
CrankUint128 *r)
{
r->h = a->h - b->h - CRANK_SUB_CARRY64(a->l, b->l, &(r->l));
}
/**
* crank_uint128_sub_self:
* @a: A 128-bit integer.
* @b: A 128-bit integer.
*
* Subtracts 128-bit integer from 128-bit integer.
*
* This is [simple function][simple-function] and
* [representative function][representative-function].
*/
void
(crank_uint128_sub_self) (CrankUint128 *a,
CrankUint128 *b)
{
a->h -= b->h + ((a->l) < (b->l));
a->l -= b->l;
}
/**
* crank_uint128_mul64:
* @a: A 128-bit integer.
* @b: A 64-bit integer.
* @r: (out): A Result.
*
* Multiplies 64-bit integer to 128-bit integer.
*/
void
crank_uint128_mul64 (CrankUint128 *a,
guint64 b,
CrankUint128 *r)
{
crank_uint128_init_mul (r, a->l, b);
r->h += a->h * b;
}
/**
* crank_uint128_mul64_self:
* @a: A 128-bit integer.
* @b: A 64-bit integer.
*
* Multiplies 64-bit integer to 128-bit integer.
*/
void
crank_uint128_mul64_self (CrankUint128 *a,
guint64 b)
{
CrankUint128 na;
crank_uint128_copy (a, &na);
crank_uint128_mul64 (&na, b, a);
}
/**
* crank_uint128_div:
* @a: A 128-bit integer.
* @b: A 128-bit integer.
* @r: (out): A Result.
*
* Divides 128-bit integer with 128-bit integer.
*
* This function is [representative function][representative-function].
*/
void
crank_uint128_div (CrankUint128 *a,
CrankUint128 *b,
CrankUint128 *r)
{
if (b->h == 0)
crank_uint128_div64 (a, b->l, r);
else
{
CrankUint128 ac;
CrankUint128 bc;
guint64 add = 1;
r->h = 0;
r->l = 0;
crank_uint128_copy (a, &ac);
crank_uint128_copy (b, &bc);
// Shifts to left!
guint sha = crank_bits_shift_to_left64 (&(bc.h));
bc.h |= (bc.l >> (64 - sha));
bc.l <<= sha;
add <<= sha;
while (add != 0)
{
if ((bc.h < ac.h) || ((bc.h == ac.h) && (bc.l <= ac.l)))
{
crank_uint128_sub_self (&ac, &bc);
r->l |= add;
}
crank_uint128_rsh_self (&bc, 1);
add >>= 1;
}
}
}
/**
* crank_uint128_div_self:
* @a: A 128-bit integer.
* @b: A 128-bit integer.
*
* Divides 128-bit integer with 128-bit integer.
*
* This function is [representative function][representative-function].
*/
void
crank_uint128_div_self (CrankUint128 *a,
CrankUint128 *b)
{
if (b->h == 0)
crank_uint128_div64_self (a, b->l);
else
{
CrankUint128 ac;
CrankUint128 bc;
guint64 add = 1;
crank_uint128_copy (a, &ac);
crank_uint128_copy (b, &bc);
a->h = 0;
a->l = 0;
// Shifts to left!
guint sha = crank_bits_shift_to_left64 (&(bc.h));
bc.h |= (bc.l >> (64 - sha));
bc.l <<= sha;
add <<= sha;
while (add != 0)
{
if ((bc.h < ac.h) || ((bc.h == ac.h) && (bc.l <= ac.l)))
{
crank_uint128_sub_self (&ac, &bc);
a->l |= add;
}
crank_uint128_rsh_self (&bc, 1);
add >>= 1;
}
}
}
/**
* crank_uint128_div64:
* @a: A 128-bit integer.
* @b: A 64-bit integer.
* @r: (out): A Result.
*
* Divides 128-bit integer with 64-bit integer.
*/
void
crank_uint128_div64 (CrankUint128 *a,
guint64 b,
CrankUint128 *r)
{
CrankUint128 ac;
guint64 add;
// Initial calculation.
r->h = a->h / b;
ac.h = a->h % b;
r->l = 0;
ac.l = a->l;
// Shift right and subtract.
add = 0x8000000000000000LU;
crank_uint128_lsh_self (&ac, 1);
while (add != 0)
{
if (b <= ac.h)
{
ac.h -= b;
r->l |= add;
}
crank_uint128_lsh_self (&ac, 1);
add >>= 1;
}
}
/**
* crank_uint128_div64_self:
* @a: A 128-bit integer.
* @b: A 64-bit integer.
*
* Divides 128-bit integer with 64-bit integer.
*/
void
crank_uint128_div64_self (CrankUint128 *a,
guint64 b)
{
CrankUint128 ac;
guint64 add;
// Initial calculation.
ac.h = a->h % b;
a->h = a->h / b;
ac.l = a->l;
a->l = 0;
// Shift right and subtract.
add = 0x8000000000000000LU;
crank_uint128_lsh_self (&ac, 1);
while (add != 0)
{
if (b <= ac.h)
{
ac.h -= b;
a->l |= add;
}
crank_uint128_lsh_self (&ac, 1);
add >>= 1;
}
}
/**
* crank_uint128_div32:
* @a: A 128-bit integer.
* @b: A 32-bit integer.
* @r: (out): A Result.
*
* Divides 128-bit integer by 32-bit integer.
*
* As it does not worry about overflowing remainer, it is much faster than
* 64-bit version.
*/
void
crank_uint128_div32 (CrankUint128 *a,
guint32 b,
CrankUint128 *r)
{
// this is about modular.
guint64 mod64;
guint64 q64;
guint64 hr;
guint64 lr;
guint64 hrr;
q64 = crank_bits_remquo_2_64 (b, &mod64);
r->h = a->h / b;
r->l = a->l / b;
hr = a->h % b;
lr = a->l % b;
hrr = mod64 * hr + lr;
crank_uint128_add64_self (r, q64 * hr);
crank_uint128_add64_self (r, hrr / b);
}
/**
* crank_uint128_div32_self:
* @a: A 128-bit integer.
* @b: A 32-bit integer.
*
* Divides 128-bit integer by 32-bit integer.
*
* As it does not worry about overflowing remainer, it is much faster than
* 64-bit version.
*/
void
crank_uint128_div32_self (CrankUint128 *a,
guint32 b)
{
// this is about modular.
guint64 mod64;
guint64 q64;
guint64 hr;
guint64 lr;
guint64 hrr;
q64 = crank_bits_remquo_2_64 (b, &mod64);
hr = a->h % b;
lr = a->l % b;
a->h = a->h / b;
a->l = a->l / b;
hrr = mod64 * hr + lr;
crank_uint128_add64_self (a, q64 * hr);
crank_uint128_add64_self (a, hrr / b);
}
/**
* crank_uint128_remquo32:
* @a: A 128-bit integer.
* @b: A 32-bit integer.
* @q: (out): A Quotient.
* @r: (out): A Reminder.
*
* Divides 128-bit integer by 32-bit integer.
*
* As it does not worry about overflowing remainer, it is much faster than
* 64-bit version.
*/
void
crank_uint128_remquo32 (CrankUint128 *a,
guint32 b,
CrankUint128 *q,
guint32 *r)
{
// this is about modular.
guint64 mod64;
guint64 q64;
guint64 hr;
guint64 lr;
guint64 hrr;
q64 = crank_bits_remquo_2_64 (b, &mod64);
q->h = a->h / b;
q->l = a->l / b;
hr = a->h % b;
lr = a->l % b;
hrr = mod64 * hr + lr;
crank_uint128_add64_self (q, q64 * hr);
crank_uint128_add64_self (q, hrr / b);
*r = hrr % b;
}
/**
* crank_uint128_remquo32_self:
* @a: A 128-bit integer.
* @b: A 32-bit integer.
* @r: (out): A Reminder.
*
* Divides 128-bit integer by 32-bit integer.
*
* As it does not worry about overflowing remainer, it is much faster than
* 64-bit version.
*/
void
crank_uint128_remquo32_self (CrankUint128 *a,
guint32 b,
guint32 *r)
{
// this is about modular.
guint64 mod64;
guint64 q64;
guint64 hr;
guint64 lr;
guint64 hrr;
q64 = crank_bits_remquo_2_64 (b, &mod64);
hr = a->h % b;
lr = a->l % b;
a->h = a->h / b;
a->l = a->l / b;
hrr = mod64 * hr + lr;
crank_uint128_add64_self (a, q64 * hr);
crank_uint128_add64_self (a, hrr / b);
*r = hrr % b;
}
/**
* crank_uint128_lsh:
* @a: A 128-bit integer.
* @b: Shift amount.
* @r: (out): A Result.
*
* Shifts a integer.
*
* This is [simple function][simple-function].
*/
void
(crank_uint128_lsh) (CrankUint128 *a,
const guint b,
CrankUint128 *r)
{
r->h = (a->h << b) | (a->l >> (64 - b));
r->l = (a->l << b);
}
/**
* crank_uint128_lsh_self:
* @a: A 128-bit integer.
* @b: Shift amount.
*
* Shifts a integer.
*
* This is [simple function][simple-function].
*/
void
(crank_uint128_lsh_self) (CrankUint128 *a,
const guint b)
{
a->h = (a->h << b) | (a->l >> (64 - b));
a->l <<= b;
}
/**
* crank_uint128_rsh:
* @a: A 128-bit integer.
* @b: Shift amount.
* @r: (out): A Result.
*
* Shifts a integer.
*
* This is [simple function][simple-function].
*/
void
(crank_uint128_rsh) (CrankUint128 *a,
const guint b,
CrankUint128 *r)
{
r->h = a->h >> b;
r->l = (a->h << (64 - b)) | (a->l >> b);
}
/**
* crank_uint128_rsh_self:
* @a: A 128-bit integer.
* @b: Shift amount.
*
* Shifts a integer.
*
* This is [simple function][simple-function].
*/
void
(crank_uint128_rsh_self) (CrankUint128 *a,
const guint b)
{
a->l = (a->h << (64 - b)) | (a->l >> b);
a->h >>= a->h;
}
| 20.345628 | 80 | 0.563956 | [
"object"
] |
6560bd63dcbb971e63c5a672a526090095125b63 | 9,085 | c | C | platforms/unix/plugins/AsynchFilePlugin/sqUnixAsynchFile.c | bavison/opensmalltalk-vm | d494240736f7c0309e3e819784feb1d53ed0985a | [
"MIT"
] | 445 | 2016-06-30T08:19:11.000Z | 2022-03-28T06:09:49.000Z | platforms/unix/plugins/AsynchFilePlugin/sqUnixAsynchFile.c | bavison/opensmalltalk-vm | d494240736f7c0309e3e819784feb1d53ed0985a | [
"MIT"
] | 439 | 2016-06-29T20:14:36.000Z | 2022-03-17T19:59:58.000Z | platforms/unix/plugins/AsynchFilePlugin/sqUnixAsynchFile.c | bavison/opensmalltalk-vm | d494240736f7c0309e3e819784feb1d53ed0985a | [
"MIT"
] | 137 | 2016-07-02T17:32:07.000Z | 2022-03-20T11:17:25.000Z | /* sqUnixAsynchFile.c -- non-blocking file i/o
*
* Copyright (C) 1996-2004 by Ian Piumarta and other authors/contributors
* listed elsewhere in this file.
* All rights reserved.
*
* This file is part of Unix Squeak.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*
* Author: Ian.Piumarta@INRIA.Fr
*/
/*
Experimental support for asynchronous file reading and writing.
When a read or write operation is initiated, control is returned to Squeak
immediately. A semaphore is signaled when the operation completes, at which
time the client can find out how many bytes were actually read or written
and copy the results of the read operation from the file buffer into a Squeak
buffer. Only one operation may be in progress on a given file at a given time,
but operations on different files may be done in parallel.
The semaphore is signalled once for each transfer operation that is successfully
started, even if that operation later fails. Write operations always write
their entire buffer if they succeed, but read operations may transfer less than
their buffer size if they are started less than a buffer's size from the end
of the file.
The state of a file is kept in the following structure, which is stored directly
in a Squeak ByteArray object:
typedef struct {
int sessionID;
void *state; // private to the implementation
} AsyncFile;
The session ID is used to detect stale files--files that were open
when the image was saved. The state pointer of such files is meaningless.
Async file handles use the same session ID as ordinary file handles.
Note: These primitives are experimental! They need not be implemented on
every platform, and they may be withdrawn or replaced in a future release.
*/
#include "sq.h"
#include "AsynchFilePlugin.h"
#include "sqUnixAsynchFile.h"
/*** module initialisation ***/
#include "sqVirtualMachine.h"
#include "sqaio.h"
#include <sys/types.h>
#include <unistd.h>
#include <time.h>
int sqUnixAsyncFileSessionID= 0;
static struct VirtualMachine *vm= 0;
static fd_set fds;
static int nfd= 0;
#define isValid(f) (f->sessionID == sqUnixAsyncFileSessionID)
#define validate(f) if ((!isValid(f)) || (!(f->state))) return vm->primitiveFail()
int asyncFileInit(void)
{
vm= sqGetInterpreterProxy();
sqUnixAsyncFileSessionID= clock() + time(0);
FD_ZERO(&fds);
nfd= 0;
return 1;
}
int asyncFileShutdown(void)
{
/* protect against calling stale aio handlers */
int i;
for (i= 0; i < nfd; ++i)
if (FD_ISSET(i, &fds))
aioDisable(i);
nfd= 0;
FD_ZERO(&fds);
sqUnixAsyncFileSessionID= 0;
return 1;
}
/*** module ***/
#include <stdlib.h>
#include <string.h>
#include <fcntl.h>
#include <errno.h>
#ifdef __GNUC__
# define INLINE inline
#else
# define INLINE
#endif
#define min(a,b) ((a) < (b) ? (a) : (b))
#define max(a,b) ((a) > (b) ? (a) : (b))
static void readHandler(int fd, void *data, int flags);
static void writeHandler(int fd, void *data, int flags);
INLINE static FilePtr newFileRec(int fd, int sema)
{
FilePtr fp= (FilePtr)calloc(1, sizeof(FileRec));
if (fp)
{
fp->fd= fd;
fp->sema= sema;
fp->rd.status= Busy; /* read not ready */
fp->wr.status= Busy; /* write not complete */
}
return fp;
}
INLINE static int allocateBuffer(struct FileBuf *buf, int size)
{
if (buf->capacity >= size)
return 1;
if (buf->capacity > 0)
{
free(buf->bytes);
buf->capacity= 0;
}
buf->bytes= (char *)malloc(size);
if (!buf->bytes)
{
fprintf(stderr, "out of memory\n");
return 0;
}
buf->capacity= size;
return 1;
}
FilePtr asyncFileAttach(AsyncFile *f, int fd, int semaIndex)
{
FilePtr fp= newFileRec(fd, semaIndex);
if (fp)
{
f->sessionID= sqUnixAsyncFileSessionID;
f->state= (void *)fp;
aioEnable(fd, (void *)fp, 0);
FD_SET(fd, &fds);
nfd= max(nfd, fd + 1);
return fp; /* success */
}
fprintf(stderr, "out of memory\n");
f->sessionID= 0;
f->state= 0;
return 0;
}
/*** public functions ***/
int asyncFileOpen(AsyncFile *f, char *fileNamePtr, int fileNameSize,
int writeFlag, int semaIndex)
{
int fd= 0;
char *name= alloca(fileNameSize + 1);
memcpy((void *)name, (void *)fileNamePtr, fileNameSize);
name[fileNameSize]= '\0';
/* if opening for wr then open for rw so that we can use these primitives
to read bidirectional files (e.g., master ptys for interactive child
processes) */
fd= (writeFlag
? open(name, O_RDWR | O_CREAT, 0644)
: open(name, O_RDONLY));
if (fd >= 0)
{
if (asyncFileAttach(f, fd, semaIndex))
return 0; /* success */
close(fd);
}
vm->primitiveFail();
return 0; /* failure */
}
int asyncFileClose(AsyncFile *f)
{
FilePtr fp= 0;
validate(f);
if ((fp= (FilePtr)f->state))
{
if (fp->fd >= 0)
{
aioDisable(fp->fd);
FD_CLR(fp->fd, &fds);
close(fp->fd);
}
if (fp->buf.bytes)
free((void *)fp->buf.bytes);
free((void *)fp);
f->state= 0;
}
return 0; /* success */
}
/* this no longer appears to be used */
int asyncFileRecordSize(void)
{
fprintf(stderr, "asyncFileRecordSize() called -- why?\n");
vm->primitiveFail();
return 0;
}
int asyncFileReadResult(AsyncFile *f, void *bufferPtr, int bufferSize)
{
FilePtr fp= 0;
int n= 0;
validate(f);
fp= (FilePtr)f->state;
n= read(fp->fd, bufferPtr, bufferSize);
if ((n < 0) && (errno == EWOULDBLOCK))
return fp->rd.status= Busy;
else if (n <= 0)
return fp->rd.status= Error;
else /* (n > 0) */
fp->rd.pos += n;
return fp->rd.status= n;
}
static void readHandler(int fd, void *data, int flags)
{
signalSemaphoreWithIndex(((FilePtr)data)->sema);
}
int asyncFileReadStart(AsyncFile *f, int fPosition, int count)
{
FilePtr fp= 0;
validate(f);
fp= (FilePtr)f->state;
if (( (fPosition >= 0)) /* (fPos < 0) => current position */
&& (fp->rd.pos != fPosition)) /* avoid EPIPE on pty */
{
if (lseek(fp->fd, fPosition, SEEK_SET) < 0)
{
perror("lseek");
goto fail;
}
fp->rd.pos= fPosition;
}
fp->rd.status= Busy;
aioHandle(fp->fd, readHandler, AIO_R);
return 0;
fail:
fp->rd.status= Error;
vm->primitiveFail();
return 0;
}
int asyncFileWriteResult(AsyncFile *f)
{
int n= 0;
FilePtr fp= 0;
validate(f);
fp= (FilePtr)f->state;
n= fp->wr.status;
fp->wr.status= Busy;
return n;
}
static void writeBuffer(FilePtr fp)
{
int n= 0;
while ((n= fp->buf.size - fp->buf.pos) > 0)
{
n= write(fp->fd, (void *)(fp->buf.bytes + fp->buf.pos), n);
if (n < 0)
switch (errno)
{
case EWOULDBLOCK:
aioHandle(fp->fd, writeHandler, AIO_W);
return;
default:
fp->wr.status= Error;
return;
}
fp->buf.pos += n;
fp->wr.pos += n;
}
/* completed */
fp->wr.status= fp->buf.size;
signalSemaphoreWithIndex(fp->sema);
}
static void writeHandler(int fd, void *data, int flags)
{
writeBuffer((FilePtr)data);
}
int asyncFileWriteStart(AsyncFile *f, int fPosition, void *bufferPtr, int count)
{
FilePtr fp= 0;
validate(f);
fp= (FilePtr)f->state;
if (( (fPosition >= 0)) /* (fPos < 0) => current position */
&& (fp->wr.pos != fPosition)) /* avoid EPIPE on tty */
{
if (lseek(fp->fd, fPosition, SEEK_SET) < 0)
{
perror("lseek");
goto fail;
}
fp->wr.pos= fPosition;
}
if (count < 1)
{
fp->wr.status= 0;
signalSemaphoreWithIndex(fp->sema);
return 0;
}
if (!allocateBuffer(&fp->buf, count))
{
fprintf(stderr, "out of memory\n");
goto fail;
}
memcpy((void *)fp->buf.bytes, bufferPtr, count);
fp->buf.pos= 0; /* current output pointer */
fp->buf.size= count; /* bytes to transfer */
fp->wr.status= Busy; /* transfer in progress */
writeBuffer(fp); /* begin transfer */
return 0;
fail:
fp->wr.status= Error;
vm->primitiveFail();
return 0;
}
| 24.162234 | 82 | 0.643588 | [
"object"
] |
65627a01b90b451e60a5fc6f8a8d19ce77d7f611 | 797 | h | C | MobGiAdSDK/AggregationAdThirdSDKs/Common/AdxAdsComponent.framework/Headers/AdxDeviceInfo.h | xlf999/AdsDemo | c9e5ce50fe381fba206be55d4e674a2e859d8c56 | [
"MIT"
] | 1 | 2019-10-12T11:51:11.000Z | 2019-10-12T11:51:11.000Z | MobGiAdSDK/AggregationAdThirdSDKs/Common/AdxAdsComponent.framework/Headers/AdxDeviceInfo.h | xlf999/AdsDemo | c9e5ce50fe381fba206be55d4e674a2e859d8c56 | [
"MIT"
] | null | null | null | MobGiAdSDK/AggregationAdThirdSDKs/Common/AdxAdsComponent.framework/Headers/AdxDeviceInfo.h | xlf999/AdsDemo | c9e5ce50fe381fba206be55d4e674a2e859d8c56 | [
"MIT"
] | null | null | null | //
// Device.h
// AdxAdsComponent
//
// Created by star.liao on 2017/3/30.
// Copyright © 2017年 com.idreamsky. All rights reserved.
//
#import <Foundation/Foundation.h>
#import <SDKCommonModule/JSONModel.h>
@interface AdxDeviceInfo : JSONModel
@property(nonatomic,strong) NSString* ua;
@property(nonatomic,strong) NSString* ip;
@property(nonatomic,strong) NSString* brand;
@property(nonatomic,strong) NSString* model;
@property(nonatomic,assign) int platform;
@property(nonatomic,assign) NSString* version;
@property(nonatomic,strong) NSString* resolution;
@property(nonatomic,assign) int operator;
@property(nonatomic,assign) int net;
@property(nonatomic,strong) NSString* deviceId;
@property(nonatomic,assign) int screenDirection;
@property(nonatomic,assign) double screenSize;
@end
| 24.90625 | 57 | 0.772898 | [
"model"
] |
6563fdfaf54eadc14bbdd5fc826b13d76a903aac | 80,040 | c | C | src/os/solaris/solaris_sigar.c | sbouchex/sigar | 1a6b4fc3ee46d11ec999b9744270c4b65c479739 | [
"Apache-2.0"
] | null | null | null | src/os/solaris/solaris_sigar.c | sbouchex/sigar | 1a6b4fc3ee46d11ec999b9744270c4b65c479739 | [
"Apache-2.0"
] | null | null | null | src/os/solaris/solaris_sigar.c | sbouchex/sigar | 1a6b4fc3ee46d11ec999b9744270c4b65c479739 | [
"Apache-2.0"
] | null | null | null | /*
* Copyright (c) 2004-2008 Hyperic, Inc.
* Copyright (c) 2009 SpringSource, Inc.
* Copyright (c) 2009-2010 VMware, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "sigar.h"
#include "sigar_private.h"
#include "sigar_util.h"
#include "sigar_os.h"
#include <inet/ip.h>
#include <inet/tcp.h>
#include <net/if.h>
#include <net/route.h>
#include <sys/lwp.h>
#include <sys/proc.h>
#include <sys/sockio.h>
#include <sys/swap.h>
#include <sys/stat.h>
#include <sys/systeminfo.h>
#include <sys/utsname.h>
#include <dlfcn.h>
#include <dirent.h>
#include <sys/vm_usage.h>
#include <zone.h>
#include <sys/statvfs.h>
#include <sys/vnode.h>
#define PROC_ERRNO ((errno == ENOENT) ? ESRCH : errno)
#define SIGAR_USR_UCB_PS "/usr/ucb/ps"
/* like kstat_lookup but start w/ ksp->ks_next instead of kc->kc_chain */
static kstat_t *
kstat_next(kstat_t *ksp, char *ks_module, int ks_instance, char *ks_name)
{
if (ksp) {
ksp = ksp->ks_next;
}
for (; ksp; ksp = ksp->ks_next) {
if ((ks_module == NULL ||
strcmp(ksp->ks_module, ks_module) == 0) &&
(ks_instance == -1 || ksp->ks_instance == ks_instance) &&
(ks_name == NULL || strcmp(ksp->ks_name, ks_name) == 0))
return ksp;
}
errno = ENOENT;
return NULL;
}
int sigar_os_open(sigar_t **sig)
{
kstat_ctl_t *kc;
kstat_t *ksp;
sigar_t *sigar;
int i, status;
struct utsname name;
char *ptr;
sigar = malloc(sizeof(*sigar));
*sig = sigar;
sigar->log_level = -1; /* log nothing by default */
sigar->log_impl = NULL;
sigar->log_data = NULL;
uname(&name);
if ((ptr = strchr(name.release, '.'))) {
ptr++;
sigar->solaris_version = atoi(ptr);
}
else {
sigar->solaris_version = 6;
}
if ((ptr = getenv("SIGAR_USE_UCB_PS"))) {
sigar->use_ucb_ps = strEQ(ptr, "true");
}
else {
struct stat sb;
if (stat(SIGAR_USR_UCB_PS, &sb) < 0) {
sigar->use_ucb_ps = 0;
}
else {
sigar->use_ucb_ps = 1;
}
}
sigar->pagesize = 0;
i = sysconf(_SC_PAGESIZE);
while ((i >>= 1) > 0) {
sigar->pagesize++;
}
sigar->ticks = sysconf(_SC_CLK_TCK);
sigar->kc = kc = kstat_open();
if (!kc) {
return errno;
}
sigar->cpulist.size = 0;
sigar->ncpu = 0;
sigar->ks.cpu = NULL;
sigar->ks.cpu_info = NULL;
sigar->ks.cpuid = NULL;
sigar->ks.lcpu = 0;
sigar->koffsets.system[0] = -1;
sigar->koffsets.mempages[0] = -1;
sigar->koffsets.syspages[0] = -1;
if ((status = sigar_get_kstats(sigar)) != SIGAR_OK) {
fprintf(stderr, "status=%d\n", status);
}
sigar->boot_time = 0;
if ((ksp = sigar->ks.system) &&
(kstat_read(kc, ksp, NULL) >= 0))
{
sigar_koffsets_init_system(sigar, ksp);
sigar->boot_time = kSYSTEM(KSTAT_SYSTEM_BOOT_TIME);
}
sigar->last_pid = -1;
sigar->pinfo = NULL;
sigar->plib = NULL;
sigar->pgrab = NULL;
sigar->pfree = NULL;
sigar->pobjname = NULL;
sigar->pargs = NULL;
SIGAR_ZERO(&sigar->mib2);
sigar->mib2.sd = -1;
return SIGAR_OK;
}
int sigar_os_close(sigar_t *sigar)
{
kstat_close(sigar->kc);
if (sigar->mib2.sd != -1) {
close_mib2(&sigar->mib2);
}
if (sigar->ks.lcpu) {
free(sigar->ks.cpu);
free(sigar->ks.cpu_info);
free(sigar->ks.cpuid);
}
if (sigar->pinfo) {
free(sigar->pinfo);
}
if (sigar->cpulist.size != 0) {
sigar_cpu_list_destroy(sigar, &sigar->cpulist);
}
if (sigar->plib) {
dlclose(sigar->plib);
}
if (sigar->pargs) {
sigar_cache_destroy(sigar->pargs);
}
free(sigar);
return SIGAR_OK;
}
char *sigar_os_error_string(sigar_t *sigar, int err)
{
switch (err) {
case SIGAR_EMIB2:
return sigar->mib2.errmsg;
default:
return NULL;
}
}
/* see usr/src/cmd/zonestat/zonestatd/zonestatd.c */
typedef struct sigar_vmusage64 {
id_t vmu_zoneid;
uint_t vmu_type;
id_t vmu_id;
int vmu_align_next_members_on_8_bytes;
uint64_t vmu_rss_all;
uint64_t vmu_rss_private;
uint64_t vmu_rss_shared;
uint64_t vmu_swap_all;
uint64_t vmu_swap_private;
uint64_t vmu_swap_shared;
} sigar_vmusage64_t;
/* getvmusage() can be CPU expensive; throttle calls to secs: */
#define VMUSAGE_INTERVAL 60
static int zone_mem_get(sigar_t *sigar, sigar_mem_t *mem)
{
kstat_ctl_t *kc = sigar->kc;
kstat_t *ksp;
char path[MAXPATHLEN];
sigar_vmusage64_t result;
size_t nres = 1;
sigar_uint64_t used = 0, cap = 0;
int ret;
/*
* The memory_cap kstat is ideal (originally added to SmartOS), but
* if that doesn't exist, switch to getvmusage().
*/
if ((ksp = kstat_lookup(sigar->kc, "memory_cap", -1, NULL)) &&
(kstat_read(sigar->kc, ksp, NULL) != -1))
{
kstat_named_t *kn;
if ((kn = (kstat_named_t *)kstat_data_lookup(ksp, "rss"))) {
used = kn->value.i64;
}
if ((kn = (kstat_named_t *)kstat_data_lookup(ksp, "physcap"))) {
cap = kn->value.i64;
}
}
else {
ret = sysinfo(SI_ARCHITECTURE_64, path, sizeof (path));
if (ret < 0) {
/* 32-bit kernels not supported via getvmusage() */
return -1;
}
/* since we are not GZ, should only get one result */
if (getvmusage(VMUSAGE_ZONE, VMUSAGE_INTERVAL, (vmusage_t *)&result,
&nres) != 0 || nres != 1) {
return -1;
}
used = result.vmu_rss_all;
cap = mem->total;
}
mem->actual_free = mem->free = cap - used;
mem->actual_used = mem->used = used;
sigar_mem_calc_ram(sigar, mem);
return SIGAR_OK;
}
int sigar_mem_get(sigar_t *sigar, sigar_mem_t *mem)
{
kstat_ctl_t *kc = sigar->kc;
kstat_t *ksp;
sigar_uint64_t kern = 0;
SIGAR_ZERO(mem);
/* XXX: is mem hot swappable or can we just do this during open ? */
mem->total = sysconf(_SC_PHYS_PAGES);
mem->total <<= sigar->pagesize;
if (sigar_kstat_update(sigar) == -1) {
return errno;
}
if (getzoneid() != GLOBAL_ZONEID) {
/* zone-aware */
return (zone_mem_get(sigar, mem));
}
if ((ksp = sigar->ks.syspages) && kstat_read(kc, ksp, NULL) >= 0) {
sigar_koffsets_init_syspages(sigar, ksp);
mem->free = kSYSPAGES(KSTAT_SYSPAGES_FREE);
mem->free <<= sigar->pagesize;
mem->used = mem->total - mem->free;
}
if ((ksp = sigar->ks.mempages) && kstat_read(kc, ksp, NULL) >= 0) {
sigar_koffsets_init_mempages(sigar, ksp);
}
/* XXX mdb ::memstat cachelist/freelist not available to kstat, see: */
/* http://bugs.opensolaris.org/bugdatabase/view_bug.do?bug_id=6821980 */
/* ZFS ARC cache. see: http://opensolaris.org/jive/thread.jspa?messageID=393695 */
if ((ksp = kstat_lookup(sigar->kc, "zfs", 0, "arcstats")) &&
(kstat_read(sigar->kc, ksp, NULL) != -1))
{
kstat_named_t *kn;
if ((kn = (kstat_named_t *)kstat_data_lookup(ksp, "size"))) {
kern = kn->value.i64;
}
if ((kn = (kstat_named_t *)kstat_data_lookup(ksp, "c_min"))) {
/* c_min cannot be reclaimed they say */
if (kern > kn->value.i64) {
kern -= kn->value.i64;
}
}
}
mem->actual_free = mem->free + kern;
mem->actual_used = mem->used - kern;
sigar_mem_calc_ram(sigar, mem);
return SIGAR_OK;
}
int sigar_swap_get(sigar_t *sigar, sigar_swap_t *swap)
{
kstat_t *ksp;
kstat_named_t *kn;
swaptbl_t *stab;
int num, i;
char path[PATH_MAX+1]; /* {un,re}used */
/* see: man swapctl(2) */
if ((num = swapctl(SC_GETNSWP, NULL)) == -1) {
return errno;
}
stab = malloc(num * sizeof(stab->swt_ent[0]) + sizeof(*stab));
stab->swt_n = num;
for (i=0; i<num; i++) {
stab->swt_ent[i].ste_path = path;
}
if ((num = swapctl(SC_LIST, stab)) == -1) {
free(stab);
return errno;
}
num = num < stab->swt_n ? num : stab->swt_n;
swap->total = swap->free = 0;
for (i=0; i<num; i++) {
if (stab->swt_ent[i].ste_flags & ST_INDEL) {
continue; /* swap file is being deleted */
}
swap->total += stab->swt_ent[i].ste_pages;
swap->free += stab->swt_ent[i].ste_free;
}
free(stab);
swap->total <<= sigar->pagesize;
swap->free <<= sigar->pagesize;
swap->used = swap->total - swap->free;
if (sigar_kstat_update(sigar) == -1) {
return errno;
}
if (!(ksp = kstat_lookup(sigar->kc, "cpu", -1, "vm"))) {
swap->page_in = swap->page_out = SIGAR_FIELD_NOTIMPL;
return SIGAR_OK;
}
swap->page_in = swap->page_out = 0;
/* XXX: these stats do not exist in this form on solaris 8 or 9.
* they are in the raw cpu_stat struct, but thats not
* binary compatible
*/
do {
if (kstat_read(sigar->kc, ksp, NULL) < 0) {
break;
}
if ((kn = (kstat_named_t *)kstat_data_lookup(ksp, "pgin"))) {
swap->page_in += kn->value.i64; /* vmstat -s | grep "page ins" */
}
if ((kn = (kstat_named_t *)kstat_data_lookup(ksp, "pgout"))) {
swap->page_out += kn->value.i64; /* vmstat -s | grep "page outs" */
}
} while ((ksp = kstat_next(ksp, "cpu", -1, "vm")));
return SIGAR_OK;
}
#ifndef KSTAT_NAMED_STR_PTR
/* same offset as KSTAT_NAMED_STR_PTR(brand) */
#define KSTAT_NAMED_STR_PTR(n) (char *)((n)->value.i32)
#endif
static int get_chip_brand(sigar_t *sigar, int processor,
sigar_cpu_info_t *info)
{
kstat_t *ksp = sigar->ks.cpu_info[processor];
kstat_named_t *brand;
if (sigar->solaris_version < 10) {
/* don't bother; doesn't exist. */
return 0;
}
if (ksp &&
(kstat_read(sigar->kc, ksp, NULL) != -1) &&
(brand = (kstat_named_t *)kstat_data_lookup(ksp, "brand")))
{
char *name = KSTAT_NAMED_STR_PTR(brand);
char *vendor = "Sun";
char *vendors[] = {
"Intel", "AMD", NULL
};
int i;
if (!name) {
return 0;
}
for (i=0; vendors[i]; i++) {
if (strstr(name, vendors[i])) {
vendor = vendors[i];
break;
}
}
SIGAR_SSTRCPY(info->vendor, vendor);
#if 0
SIGAR_SSTRCPY(info->model, name);
sigar_cpu_model_adjust(sigar, info);
#endif
return 1;
}
else {
return 0;
}
}
static void free_chip_id(void *ptr)
{
/*noop*/
}
static int get_chip_id(sigar_t *sigar, int processor)
{
kstat_t *ksp = sigar->ks.cpu_info[processor];
kstat_named_t *chipid;
if (ksp &&
(kstat_read(sigar->kc, ksp, NULL) != -1) &&
(chipid = (kstat_named_t *)kstat_data_lookup(ksp, "chip_id")))
{
return chipid->value.i32;
}
else {
return -1;
}
}
int sigar_cpu_get(sigar_t *sigar, sigar_cpu_t *cpu)
{
int status, i;
status = sigar_cpu_list_get(sigar, &sigar->cpulist);
if (status != SIGAR_OK) {
return status;
}
SIGAR_ZERO(cpu);
for (i=0; i<sigar->cpulist.number; i++) {
sigar_cpu_t *xcpu = &sigar->cpulist.data[i];
cpu->user += xcpu->user;
cpu->sys += xcpu->sys;
cpu->idle += xcpu->idle;
cpu->nice += xcpu->nice;
cpu->wait += xcpu->wait;
cpu->total += xcpu->total;
}
return SIGAR_OK;
}
int sigar_cpu_list_get(sigar_t *sigar, sigar_cpu_list_t *cpulist)
{
kstat_ctl_t *kc = sigar->kc;
kstat_t *ksp;
uint_t cpuinfo[CPU_STATES];
unsigned int i;
int is_debug = SIGAR_LOG_IS_DEBUG(sigar);
sigar_cache_t *chips;
if (sigar_kstat_update(sigar) == -1) {
return errno;
}
if (cpulist == &sigar->cpulist) {
if (sigar->cpulist.size == 0) {
/* create once */
sigar_cpu_list_create(cpulist);
}
else {
/* reset, re-using cpulist.data */
sigar->cpulist.number = 0;
}
}
else {
sigar_cpu_list_create(cpulist);
}
if (is_debug) {
sigar_log_printf(sigar, SIGAR_LOG_DEBUG,
"[cpu_list] OS reports %d CPUs",
sigar->ncpu);
}
chips = sigar_cache_new(16);
chips->free_value = free_chip_id;
for (i=0; i<sigar->ncpu; i++) {
sigar_cpu_t *cpu;
char *buf;
int chip_id;
sigar_cache_entry_t *ent;
if (!CPU_ONLINE(sigar->ks.cpuid[i])) {
sigar_log_printf(sigar, SIGAR_LOG_INFO,
"cpu %d (id=%d) is offline",
i, sigar->ks.cpuid[i]);
continue;
}
if (!(ksp = sigar->ks.cpu[i])) {
sigar_log_printf(sigar, SIGAR_LOG_ERROR,
"NULL ksp for cpu %d (id=%d)",
i, sigar->ks.cpuid[i]);
continue; /* shouldnot happen */
}
if (kstat_read(kc, ksp, NULL) < 0) {
sigar_log_printf(sigar, SIGAR_LOG_ERROR,
"kstat_read failed for cpu %d (id=%d): %s",
i, sigar->ks.cpuid[i],
sigar_strerror(sigar, errno));
continue; /* shouldnot happen */
}
/*
* cpu_stat_t is not binary compatible between solaris versions.
* since cpu_stat is a 'raw' kstat and not 'named' we cannot
* use name based lookups as we do for others.
* the start of the cpu_stat_t structure is binary compatible,
* which looks like so:
* typedef struct cpu_stat {
* kmutex_t cpu_stat_lock;
* cpu_sysinfo_t cpu_sysinfo;
* ...
* typedef struct cpu_sysinfo {
* ulong cpu[CPU_STATES];
* ...
* we just copy the piece we need below:
*/
buf = ksp->ks_data;
buf += sizeof(kmutex_t);
memcpy(&cpuinfo[0], buf, sizeof(cpuinfo));
chip_id = sigar->cpu_list_cores ? -1 : get_chip_id(sigar, i);
if (chip_id == -1) {
SIGAR_CPU_LIST_GROW(cpulist);
cpu = &cpulist->data[cpulist->number++];
SIGAR_ZERO(cpu);
}
else {
/* merge times of logical processors */
ent = sigar_cache_get(chips, chip_id);
if (ent->value) {
cpu = &cpulist->data[(long)ent->value-1];
}
else {
SIGAR_CPU_LIST_GROW(cpulist);
cpu = &cpulist->data[cpulist->number++];
ent->value = (void *)(long)cpulist->number;
SIGAR_ZERO(cpu);
if (is_debug) {
sigar_log_printf(sigar, SIGAR_LOG_DEBUG,
"[cpu_list] Merging times of"
" logical processors for chip_id=%d",
chip_id);
}
}
}
cpu->user += SIGAR_TICK2MSEC(cpuinfo[CPU_USER]);
cpu->sys += SIGAR_TICK2MSEC(cpuinfo[CPU_KERNEL]);
cpu->idle += SIGAR_TICK2MSEC(cpuinfo[CPU_IDLE]);
cpu->wait += SIGAR_TICK2MSEC(cpuinfo[CPU_WAIT]);
cpu->nice += 0; /* no cpu->nice */
cpu->total = cpu->user + cpu->sys + cpu->idle + cpu->wait;
}
sigar_cache_destroy(chips);
return SIGAR_OK;
}
int sigar_uptime_get(sigar_t *sigar,
sigar_uptime_t *uptime)
{
if (sigar->boot_time) {
uptime->uptime = time(NULL) - sigar->boot_time;
}
else {
uptime->uptime = 0; /* XXX: shouldn't happen */
}
return SIGAR_OK;
}
static int loadavg_keys[] = {
KSTAT_SYSTEM_LOADAVG_1,
KSTAT_SYSTEM_LOADAVG_2,
KSTAT_SYSTEM_LOADAVG_3
};
int sigar_loadavg_get(sigar_t *sigar,
sigar_loadavg_t *loadavg)
{
kstat_t *ksp;
int i;
if (sigar_kstat_update(sigar) == -1) {
return errno;
}
if (!(ksp = sigar->ks.system)) {
return -1;
}
if (kstat_read(sigar->kc, ksp, NULL) < 0) {
return -1;
}
sigar_koffsets_init_system(sigar, ksp);
for (i=0; i<3; i++) {
loadavg->loadavg[i] = (double)kSYSTEM(loadavg_keys[i]) / FSCALE;
}
return SIGAR_OK;
}
#define LIBPROC "/usr/lib/libproc.so"
#define CHECK_PSYM(s) \
if (!sigar->s) { \
sigar_log_printf(sigar, SIGAR_LOG_WARN, \
"[%s] Symbol not found: %s", \
SIGAR_FUNC, #s); \
dlclose(sigar->plib); \
sigar->plib = NULL; \
return SIGAR_ENOTIMPL; \
}
static char *proc_readlink(const char *name, char *buffer, size_t size)
{
int len;
if ((len = readlink(name, buffer, size-1)) < 0) {
return NULL;
}
buffer[len] = '\0';
return buffer;
}
static int sigar_init_libproc(sigar_t *sigar)
{
if (sigar->plib) {
return SIGAR_OK;
}
/* libproc.so ships with 5.8+ */
/* interface is undocumented, see libproc.h in the sun jdk sources */
sigar->plib = dlopen(LIBPROC, RTLD_LAZY);
if (!sigar->plib) {
sigar_log_printf(sigar, SIGAR_LOG_WARN,
"[%s] dlopen(%s) = %s",
SIGAR_FUNC, LIBPROC, dlerror());
return SIGAR_ENOTIMPL;
}
sigar->pgrab = (proc_grab_func_t)dlsym(sigar->plib, "Pgrab");
sigar->pfree = (proc_free_func_t)dlsym(sigar->plib, "Pfree");
sigar->pcreate_agent = (proc_create_agent_func_t)dlsym(sigar->plib, "Pcreate_agent");
sigar->pdestroy_agent = (proc_destroy_agent_func_t)dlsym(sigar->plib, "Pdestroy_agent");
sigar->pobjname = (proc_objname_func_t)dlsym(sigar->plib, "Pobjname");
sigar->pexename = (proc_exename_func_t)dlsym(sigar->plib, "Pexecname");
sigar->pdirname = (proc_dirname_func_t)dlsym(sigar->plib, "proc_dirname");
sigar->pfstat64 = (proc_fstat64_func_t)dlsym(sigar->plib, "pr_fstat64");
sigar->pgetsockopt = (proc_getsockopt_func_t)dlsym(sigar->plib, "pr_getsockopt");
sigar->pgetsockname = (proc_getsockname_func_t)dlsym(sigar->plib, "pr_getsockname");
CHECK_PSYM(pgrab);
CHECK_PSYM(pfree);
CHECK_PSYM(pobjname);
return SIGAR_OK;
}
/* from libproc.h, not included w/ solaris distro */
/* Error codes from Pgrab(), Pfgrab_core(), and Pgrab_core() */
#define G_STRANGE -1 /* Unanticipated error, errno is meaningful */
#define G_NOPROC 1 /* No such process */
#define G_NOCORE 2 /* No such core file */
#define G_NOPROCORCORE 3 /* No such proc or core (for proc_arg_grab) */
#define G_NOEXEC 4 /* Cannot locate executable file */
#define G_ZOMB 5 /* Zombie process */
#define G_PERM 6 /* No permission */
#define G_BUSY 7 /* Another process has control */
#define G_SYS 8 /* System process */
#define G_SELF 9 /* Process is self */
#define G_INTR 10 /* Interrupt received while grabbing */
#define G_LP64 11 /* Process is _LP64, self is ILP32 */
#define G_FORMAT 12 /* File is not an ELF format core file */
#define G_ELF 13 /* Libelf error, elf_errno() is meaningful */
#define G_NOTE 14 /* Required PT_NOTE Phdr not present in core */
static int sigar_pgrab(sigar_t *sigar, sigar_pid_t pid,
const char *func,
struct ps_prochandle **phandle)
{
int pstatus;
if (!(*phandle = sigar->pgrab(pid, 0x01, &pstatus))) {
switch (pstatus) {
case G_NOPROC:
return ESRCH;
case G_PERM:
return EACCES;
default:
sigar_log_printf(sigar, SIGAR_LOG_ERROR,
"[%s] Pgrab error=%d",
func, pstatus);
return ENOTSUP; /*XXX*/
}
}
return SIGAR_OK;
}
int sigar_os_proc_list_get(sigar_t *sigar,
sigar_proc_list_t *proclist)
{
return sigar_proc_list_procfs_get(sigar, proclist);
}
int sigar_proc_mem_get(sigar_t *sigar, sigar_pid_t pid,
sigar_proc_mem_t *procmem)
{
int status = sigar_proc_psinfo_get(sigar, pid);
psinfo_t *pinfo = sigar->pinfo;
prusage_t usage;
if (status != SIGAR_OK) {
return status;
}
procmem->size = pinfo->pr_size << 10;
procmem->resident = pinfo->pr_rssize << 10;
procmem->share = SIGAR_FIELD_NOTIMPL;
if (sigar_proc_usage_get(sigar, &usage, pid) == SIGAR_OK) {
procmem->minor_faults = usage.pr_minf;
procmem->major_faults = usage.pr_majf;
procmem->page_faults =
procmem->minor_faults +
procmem->major_faults;
}
else {
procmem->minor_faults = SIGAR_FIELD_NOTIMPL;
procmem->major_faults = SIGAR_FIELD_NOTIMPL;
procmem->page_faults = SIGAR_FIELD_NOTIMPL;
}
return SIGAR_OK;
}
int sigar_proc_cumulative_disk_io_get(sigar_t *sigar, sigar_pid_t pid,
sigar_proc_cumulative_disk_io_t *proc_cumulative_disk_io)
{
prusage_t usage;
int status;
if ((status = sigar_proc_usage_get(sigar, &usage, pid)) != SIGAR_OK) {
return status;
}
proc_cumulative_disk_io->bytes_read = SIGAR_FIELD_NOTIMPL;
proc_cumulative_disk_io->bytes_written = SIGAR_FIELD_NOTIMPL;
proc_cumulative_disk_io->bytes_total = usage.pr_ioch;
return SIGAR_OK;
}
int sigar_proc_cred_get(sigar_t *sigar, sigar_pid_t pid,
sigar_proc_cred_t *proccred)
{
int status = sigar_proc_psinfo_get(sigar, pid);
psinfo_t *pinfo = sigar->pinfo;
if (status != SIGAR_OK) {
return status;
}
proccred->uid = pinfo->pr_uid;
proccred->gid = pinfo->pr_gid;
proccred->euid = pinfo->pr_euid;
proccred->egid = pinfo->pr_egid;
return SIGAR_OK;
}
#define TIMESTRUCT_2MSEC(t) \
((t.tv_sec * MILLISEC) + (t.tv_nsec / (NANOSEC/MILLISEC)))
int sigar_proc_time_get(sigar_t *sigar, sigar_pid_t pid,
sigar_proc_time_t *proctime)
{
prusage_t usage;
int status;
if ((status = sigar_proc_usage_get(sigar, &usage, pid)) != SIGAR_OK) {
return status;
}
proctime->start_time = usage.pr_create.tv_sec + sigar->boot_time;
proctime->start_time *= MILLISEC;
if (usage.pr_utime.tv_sec < 0) {
/* XXX wtf? seen on solaris 10, only for the self process */
pstatus_t pstatus;
status = sigar_proc_status_get(sigar, &pstatus, pid);
if (status != SIGAR_OK) {
return status;
}
usage.pr_utime.tv_sec = pstatus.pr_utime.tv_sec;
usage.pr_utime.tv_nsec = pstatus.pr_utime.tv_nsec;
usage.pr_stime.tv_sec = pstatus.pr_stime.tv_sec;
usage.pr_stime.tv_nsec = pstatus.pr_stime.tv_nsec;
}
proctime->user = TIMESTRUCT_2MSEC(usage.pr_utime);
proctime->sys = TIMESTRUCT_2MSEC(usage.pr_stime);
proctime->total = proctime->user + proctime->sys;
return SIGAR_OK;
}
int sigar_proc_state_get(sigar_t *sigar, sigar_pid_t pid,
sigar_proc_state_t *procstate)
{
int status = sigar_proc_psinfo_get(sigar, pid);
psinfo_t *pinfo = sigar->pinfo;
if (status != SIGAR_OK) {
return status;
}
SIGAR_SSTRCPY(procstate->name, pinfo->pr_fname);
procstate->ppid = pinfo->pr_ppid;
procstate->tty = pinfo->pr_ttydev;
procstate->priority = pinfo->pr_lwp.pr_pri;
procstate->nice = pinfo->pr_lwp.pr_nice - NZERO;
procstate->threads = pinfo->pr_nlwp;
procstate->processor = pinfo->pr_lwp.pr_onpro;
switch (pinfo->pr_lwp.pr_state) {
case SONPROC:
case SRUN:
procstate->state = 'R';
break;
case SZOMB:
procstate->state = 'Z';
break;
case SSLEEP:
procstate->state = 'S';
break;
case SSTOP:
procstate->state = 'T';
break;
case SIDL:
procstate->state = 'D';
break;
}
return SIGAR_OK;
}
typedef struct {
int timestamp;
char *args;
} pargs_t;
static void pargs_free(void *value)
{
pargs_t *pargs = (pargs_t *)value;
if (pargs->args != NULL) {
free(pargs->args);
}
free(pargs);
}
static int ucb_ps_args_get(sigar_t *sigar, sigar_pid_t pid,
sigar_proc_args_t *procargs,
int timestamp)
{
char buffer[9086], *args=NULL, *arg;
sigar_cache_entry_t *ent;
FILE *fp;
pargs_t *pargs;
if (!sigar->pargs) {
sigar->pargs = sigar_cache_new(15);
sigar->pargs->free_value = pargs_free;
}
ent = sigar_cache_get(sigar->pargs, pid);
if (ent->value) {
pargs = (pargs_t *)ent->value;
if (pargs->timestamp != timestamp) {
if (pargs->args) {
free(pargs->args);
pargs->args = NULL;
}
}
}
else {
pargs = malloc(sizeof(*pargs));
pargs->args = NULL;
ent->value = pargs;
}
pargs->timestamp = timestamp;
if (pargs->args) {
args = pargs->args;
}
else {
snprintf(buffer, sizeof(buffer),
SIGAR_USR_UCB_PS " -ww %ld", (long)pid);
if (!(fp = popen(buffer, "r"))) {
return errno;
}
/* skip header */
(void)fgets(buffer, sizeof(buffer), fp);
if ((args = fgets(buffer, sizeof(buffer), fp))) {
int len;
/* skip PID,TT,S,TIME */
args = sigar_skip_multiple_token(args, 4);
SIGAR_SKIP_SPACE(args);
len = strlen(args);
if (len > 0) {
args[len-1] = '\0'; /* chop \n */
}
pargs->args = malloc(len+1);
memcpy(pargs->args, args, len);
}
pclose(fp);
if (!args) {
return ESRCH;
}
}
while (*args && (arg = sigar_getword(&args, ' '))) {
SIGAR_PROC_ARGS_GROW(procargs);
procargs->data[procargs->number++] = arg;
}
return SIGAR_OK;
}
int sigar_os_proc_args_get(sigar_t *sigar, sigar_pid_t pid,
sigar_proc_args_t *procargs)
{
psinfo_t *pinfo;
int fd, status;
char buffer[9086];
char *argvb[56];
char **argvp = argvb;
int n;
size_t nread = 0;
unsigned int argv_size;
if ((status = sigar_proc_psinfo_get(sigar, pid)) != SIGAR_OK) {
return status;
}
pinfo = sigar->pinfo;
if (pinfo->pr_argc == 0) {
procargs->number = 0;
return SIGAR_OK;
}
else if (pinfo->pr_dmodel != PR_MODEL_NATIVE) {
/* we are compiled in 32bit mode
* punt any 64bit native process,
* sizeof our structures can't handle.
*/
if (sigar->use_ucb_ps) {
return ucb_ps_args_get(sigar, pid, procargs,
pinfo->pr_start.tv_sec);
}
else {
return ENOTSUP;
}
}
argv_size = sizeof(*argvp) * pinfo->pr_argc;
(void)SIGAR_PROC_FILENAME(buffer, pid, "/as");
if ((fd = open(buffer, O_RDONLY)) < 0) {
if ((errno == EACCES) && sigar->use_ucb_ps) {
return ucb_ps_args_get(sigar, pid, procargs,
pinfo->pr_start.tv_sec);
}
else {
return PROC_ERRNO;
}
}
if (argv_size > sizeof(argvb)) {
argvp = malloc(argv_size);
}
if ((nread = pread(fd, argvp, argv_size, pinfo->pr_argv)) <= 0) {
close(fd);
if (argvp != argvb) {
free(argvp);
}
return errno;
}
for (n = 0; n < pinfo->pr_argc; n++) {
int alen;
char *arg;
if ((nread = pread(fd, buffer, sizeof(buffer)-1, (off_t)argvp[n])) <= 0) {
close(fd);
if (argvp != argvb) {
free(argvp);
}
return errno;
}
buffer[nread] = '\0';
alen = strlen(buffer)+1;
arg = malloc(alen);
memcpy(arg, buffer, alen);
SIGAR_PROC_ARGS_GROW(procargs);
procargs->data[procargs->number++] = arg;
}
if (argvp != argvb) {
free(argvp);
}
close(fd);
return SIGAR_OK;
}
int sigar_proc_env_get(sigar_t *sigar, sigar_pid_t pid,
sigar_proc_env_t *procenv)
{
psinfo_t *pinfo;
int fd, status;
char buffer[BUFSIZ], *offsets[512];
size_t nread;
int n=0, max=sizeof(offsets)/sizeof(char *);
if ((status = sigar_proc_psinfo_get(sigar, pid)) != SIGAR_OK) {
return status;
}
pinfo = sigar->pinfo;
(void)SIGAR_PROC_FILENAME(buffer, pid, "/as");
if ((fd = open(buffer, O_RDONLY)) < 0) {
return PROC_ERRNO;
}
if ((nread = pread(fd, offsets, sizeof(offsets),
pinfo->pr_envp)) <= 0)
{
close(fd);
return errno;
}
while ((n < max) && offsets[n]) {
char *val;
int klen, vlen, status;
char key[128]; /* XXX is there a max key size? */
if ((nread = pread(fd, buffer, sizeof(buffer),
(off_t)offsets[n++])) <= 0)
{
close(fd);
return errno;
}
val = strchr(buffer, '=');
if (val == NULL) {
break; /*XXX*/
}
klen = val - buffer;
SIGAR_SSTRCPY(key, buffer);
key[klen] = '\0';
++val;
vlen = strlen(val);
status = procenv->env_getter(procenv->data,
key, klen, val, vlen);
if (status != SIGAR_OK) {
/* not an error; just stop iterating */
break;
}
}
close(fd);
return SIGAR_OK;
}
int sigar_proc_fd_get(sigar_t *sigar, sigar_pid_t pid,
sigar_proc_fd_t *procfd)
{
int status =
sigar_proc_fd_count(sigar, pid, &procfd->total);
return status;
}
static int sigar_proc_path_exe_get(sigar_t *sigar, sigar_pid_t pid,
sigar_proc_exe_t *procexe)
{
/* solaris 10+ */
char buffer[BUFSIZ];
(void)SIGAR_PROC_FILENAME(buffer, pid, "/path/a.out");
if (!proc_readlink(buffer, procexe->name, sizeof(procexe->name))) {
procexe->name[0] = '\0';
}
(void)SIGAR_PROC_FILENAME(buffer, pid, "/path/cwd");
if (!proc_readlink(buffer, procexe->cwd, sizeof(procexe->cwd))) {
procexe->cwd[0] = '\0';
}
(void)SIGAR_PROC_FILENAME(buffer, pid, "/path/root");
if (!proc_readlink(buffer, procexe->root, sizeof(procexe->root))) {
procexe->root[0] = '\0';
}
return SIGAR_OK;
}
static int proc_module_get_exe(void *data, char *name, int len)
{
sigar_proc_exe_t *procexe = (sigar_proc_exe_t *)data;
SIGAR_STRNCPY(procexe->name, name, sizeof(procexe->name));
return !SIGAR_OK; /* break loop */
}
static int sigar_which_exe_get(sigar_t *sigar, sigar_proc_exe_t *procexe)
{
char *path = getenv("PATH");
char exe[PATH_MAX];
if (path == NULL) {
return EINVAL;
}
while (path) {
char *ptr = strchr(path, ':');
if (!ptr) {
break;
}
exe[0] = '\0';
strncat(exe, path, ptr-path);
strncat(exe, "/", 1);
strcat(exe, procexe->name);
if (access(exe, X_OK) == 0) {
SIGAR_STRNCPY(procexe->name, exe, sizeof(procexe->name));
break;
}
path = ptr+1;
}
return ENOENT;
}
int sigar_proc_exe_get(sigar_t *sigar, sigar_pid_t pid,
sigar_proc_exe_t *procexe)
{
int status;
char buffer[BUFSIZ];
struct ps_prochandle *phandle;
if (sigar->solaris_version >= 10) {
return sigar_proc_path_exe_get(sigar, pid, procexe);
}
if ((status = sigar_init_libproc(sigar)) != SIGAR_OK) {
return status;
}
procexe->name[0] = '\0';
/* Pgrab would return G_SELF error */
if (pid == sigar_pid_get(sigar)) {
sigar_proc_modules_t procmods;
procmods.module_getter = proc_module_get_exe;
procmods.data = procexe;
status =
sigar_dlinfo_modules(sigar, &procmods);
if (status == SIGAR_OK) {
if (procexe->name[0] != '/') {
sigar_which_exe_get(sigar, procexe);
}
}
}
else {
status = sigar_pgrab(sigar, pid, SIGAR_FUNC, &phandle);
if (status == SIGAR_OK) {
sigar->pexename(phandle, procexe->name, sizeof(procexe->name));
sigar->pfree(phandle);
}
}
if (procexe->name[0] == '\0') {
/*XXX*/
}
(void)SIGAR_PROC_FILENAME(buffer, pid, "/cwd");
if (!sigar->pdirname(buffer, procexe->cwd, sizeof(procexe->cwd))) {
procexe->cwd[0] = '\0';
}
(void)SIGAR_PROC_FILENAME(buffer, pid, "/root");
if (!(sigar->pdirname(buffer, procexe->root, sizeof(procexe->root)))) {
procexe->root[0] = '\0';
}
return SIGAR_OK;
}
static int sigar_read_xmaps(sigar_t *sigar,
prxmap_t *xmaps, int total,
unsigned long *last_inode,
struct ps_prochandle *phandle,
sigar_proc_modules_t *procmods)
{
int status, i;
unsigned long inode;
char buffer[BUFSIZ];
for (i=0; i<total; i++) {
if (xmaps[i].pr_mflags & MA_ANON) {
continue; /* heap, stack, etc */
}
inode = xmaps[i].pr_ino;
if ((inode == 0) || (inode == *last_inode)) {
*last_inode = 0;
continue;
}
*last_inode = inode;
sigar->pobjname(phandle, xmaps[i].pr_vaddr, buffer, sizeof(buffer));
status =
procmods->module_getter(procmods->data, buffer, strlen(buffer));
if (status != SIGAR_OK) {
/* not an error; just stop iterating */
return status;
}
}
return SIGAR_OK;
}
static int sigar_pgrab_modules(sigar_t *sigar, sigar_pid_t pid,
sigar_proc_modules_t *procmods)
{
int fd, pstatus;
off_t map_size, nread;
unsigned long last_inode = 0;
prxmap_t xmaps[15]; /* ~2K */
struct ps_prochandle *phandle;
struct stat statbuf;
char buffer[BUFSIZ];
(void)SIGAR_PROC_FILENAME(buffer, pid, "/xmap");
if ((fd = open(buffer, O_RDONLY)) < 0) {
return errno;
}
if (fstat(fd, &statbuf) < 0) {
close(fd);
return errno;
}
map_size = statbuf.st_size;
if (SIGAR_LOG_IS_DEBUG(sigar)) {
sigar_log_printf(sigar, SIGAR_LOG_DEBUG,
"[%s] pid=%d, size=%d",
SIGAR_FUNC, pid, map_size);
}
if ((pstatus = sigar_init_libproc(sigar)) != SIGAR_OK) {
close(fd);
return pstatus;
}
pstatus = sigar_pgrab(sigar, pid, SIGAR_FUNC, &phandle);
if (pstatus != SIGAR_OK) {
close(fd);
return pstatus;
}
for (nread=0; nread<statbuf.st_size; ) {
off_t wanted = map_size > sizeof(xmaps) ? sizeof(xmaps) : map_size;
int total = wanted / sizeof(prxmap_t);
if (pread(fd, xmaps, wanted, nread) != wanted) {
close(fd);
sigar->pfree(phandle);
return errno;
}
if (SIGAR_LOG_IS_DEBUG(sigar)) {
sigar_log_printf(sigar, SIGAR_LOG_DEBUG,
"[%s] nread=%d, map_size=%d, wanted=%d, total=%d",
SIGAR_FUNC,
nread, map_size, wanted, total);
}
if (sigar_read_xmaps(sigar, xmaps, total,
&last_inode,
phandle, procmods) != SIGAR_OK)
{
break;
}
nread += wanted;
map_size -= wanted;
}
close(fd);
sigar->pfree(phandle);
return SIGAR_OK;
}
int sigar_proc_modules_get(sigar_t *sigar, sigar_pid_t pid,
sigar_proc_modules_t *procmods)
{
if (pid == sigar_pid_get(sigar)) {
/* Pgrab would return G_SELF, this is faster anyhow */
/* XXX one difference to Pgrab, first entry is not the exe name */
return sigar_dlinfo_modules(sigar, procmods);
}
else {
return sigar_pgrab_modules(sigar, pid, procmods);
}
}
#define TIME_NSEC(t) \
(SIGAR_SEC2NANO((t).tv_sec) + (sigar_uint64_t)(t).tv_nsec)
int sigar_thread_cpu_get(sigar_t *sigar,
sigar_uint64_t id,
sigar_thread_cpu_t *cpu)
{
struct lwpinfo info;
if (id != 0) {
return SIGAR_ENOTIMPL;
}
_lwp_info(&info);
cpu->user = TIME_NSEC(info.lwp_utime);
cpu->sys = TIME_NSEC(info.lwp_stime);
cpu->total = TIME_NSEC(info.lwp_utime) + TIME_NSEC(info.lwp_stime);
return SIGAR_OK;
}
#include <sys/mnttab.h>
int sigar_os_fs_type_get(sigar_file_system_t *fsp)
{
char *type = fsp->sys_type_name;
switch (*type) {
case 'u':
if (strEQ(type, "ufs")) {
fsp->type = SIGAR_FSTYPE_LOCAL_DISK;
}
break;
/* XXX */
}
return fsp->type;
}
int sigar_file_system_list_get(sigar_t *sigar,
sigar_file_system_list_t *fslist)
{
struct mnttab ent;
sigar_file_system_t *fsp;
FILE *fp = fopen(MNTTAB, "r");
if (!fp) {
return errno;
}
sigar_file_system_list_create(fslist);
while (getmntent(fp, &ent) == 0) {
if (strstr(ent.mnt_mntopts, "ignore")) {
continue; /* e.g. vold */
}
SIGAR_FILE_SYSTEM_LIST_GROW(fslist);
fsp = &fslist->data[fslist->number++];
SIGAR_SSTRCPY(fsp->dir_name, ent.mnt_mountp);
SIGAR_SSTRCPY(fsp->dev_name, ent.mnt_special);
SIGAR_SSTRCPY(fsp->sys_type_name, ent.mnt_fstype);
SIGAR_SSTRCPY(fsp->options, ent.mnt_mntopts);
sigar_fs_type_init(fsp);
}
fclose(fp);
return SIGAR_OK;
}
typedef struct {
char device[PATH_MAX];
char name[8];
int instance;
} fsdev_path_t;
typedef struct {
char name[256];
int is_partition;
sigar_disk_usage_t disk;
} iodev_t;
static fsdev_path_t *get_fsdev_paths(sigar_t *sigar,
sigar_file_system_list_t *fslist)
{
int i, ndisk, size;
char buffer[BUFSIZ], *ptr;
char *dev, *inst, *drv;
fsdev_path_t *paths, *mapping;
FILE *fp = fopen("/etc/path_to_inst", "r");
if (!fp) {
return NULL;
}
for (i=0, ndisk=0; i<fslist->number; i++) {
sigar_file_system_t *fsp = &fslist->data[i];
if (fsp->type == SIGAR_FSTYPE_LOCAL_DISK) {
ndisk++;
}
}
size = sizeof(*paths) * (ndisk+1);
mapping = paths = malloc(size);
memset(mapping, '\0', size);
while ((ptr = fgets(buffer, sizeof(buffer), fp))) {
/* eat dust java */
char *q;
SIGAR_SKIP_SPACE(ptr);
if (*ptr == '#') {
continue;
}
if (*ptr == '"') {
ptr++;
}
dev = ptr;
if (!(q = strchr(ptr, '"'))) {
continue;
}
ptr = q+1;
*q = '\0';
SIGAR_SKIP_SPACE(ptr);
inst = ptr;
while (sigar_isdigit(*ptr)) {
ptr++;
}
*ptr = '\0';
ptr++;
SIGAR_SKIP_SPACE(ptr);
if (*ptr == '"') {
ptr++;
}
drv = ptr;
if (!(q = strchr(ptr, '"'))) {
continue;
}
*q = '\0';
if (!(strEQ(drv, "sd") ||
strEQ(drv, "ssd") ||
strEQ(drv, "st") ||
strEQ(drv, "dad") ||
strEQ(drv, "cmdk")))
{
continue;
}
paths->instance = atoi(inst);
if (!kstat_lookup(sigar->kc, drv, paths->instance, NULL)) {
continue;
}
SIGAR_SSTRCPY(paths->device, dev);
SIGAR_SSTRCPY(paths->name, drv);
if (--ndisk < 0) {
/* XXX prevent overflow */
break;
}
paths++;
}
fclose(fp);
return mapping;
}
static int create_fsdev_cache(sigar_t *sigar)
{
fsdev_path_t *paths, *mapping;
sigar_file_system_list_t fslist;
int i, j;
int status;
int debug = SIGAR_LOG_IS_DEBUG(sigar);
sigar->fsdev = sigar_cache_new(15);
status = sigar_file_system_list_get(sigar, &fslist);
if (status != SIGAR_OK) {
return status;
}
if (!(mapping = get_fsdev_paths(sigar, &fslist))) {
sigar_file_system_list_destroy(sigar, &fslist);
return ENOENT;
}
for (i=0; i<fslist.number; i++) {
sigar_file_system_t *fsp = &fslist.data[i];
if (fsp->type == SIGAR_FSTYPE_LOCAL_DISK) {
char device[PATH_MAX+1], *ptr=device;
int len = readlink(fsp->dev_name, device, sizeof(device)-1);
char *s;
char partition;
if (len < 0) {
continue;
}
device[len] = '\0';
if (debug) {
sigar_log_printf(sigar, SIGAR_LOG_DEBUG, "[fsdev] name=%s, dev=%s",
fsp->dev_name, device);
}
while (strnEQ(ptr, "../", 3)) {
ptr += 3;
}
if (strnEQ(ptr, "devices", 7)) {
ptr += 7;
}
if ((s = strchr(ptr, ':'))) {
partition = *(s+1);
}
else {
continue;
}
for (j=0, paths=mapping; paths->name[0]; j++) {
if (strnEQ(paths->device, ptr, strlen(paths->device))) {
sigar_cache_entry_t *ent;
struct stat sb;
int retval = stat(fsp->dir_name, &sb);
iodev_t *iodev;
if (retval == 0) {
iodev = malloc(sizeof(*iodev));
SIGAR_DISK_STATS_INIT(&iodev->disk);
/* e.g. sd9,g
* module == sd
* instance == 9
* partition == 8
*/
snprintf(iodev->name, sizeof(iodev->name), "%s%d,%c",
paths->name, paths->instance, partition);
ent = sigar_cache_get(sigar->fsdev, SIGAR_FSDEV_ID(sb));
ent->value = iodev;
if (debug) {
sigar_log_printf(sigar, SIGAR_LOG_DEBUG,
"[fsdev] map %s -> %s",
fsp->dir_name, iodev->name);
}
}
break;
}
paths++;
}
}
}
free(mapping);
sigar_file_system_list_destroy(sigar, &fslist);
return SIGAR_OK;
}
static int io_kstat_read(sigar_t *sigar,
sigar_disk_usage_t *disk,
kstat_t *ksp)
{
kstat_io_t *io;
kstat_read(sigar->kc, ksp, NULL);
io = (kstat_io_t *)ksp->ks_data;
disk->reads = io->reads;
disk->writes = io->writes;
disk->read_bytes = io->nread;
disk->write_bytes = io->nwritten;
disk->qtime = io->wlentime;
disk->rtime = io->rlentime;
disk->wtime = io->wlentime;
disk->time = disk->rtime + disk->wtime;
disk->snaptime = ksp->ks_snaptime;
return SIGAR_OK;
}
static int sigar_kstat_disk_usage_get(sigar_t *sigar, const char *name,
sigar_disk_usage_t *disk,
kstat_t **kio)
{
kstat_t *ksp;
if (sigar_kstat_update(sigar) == -1) {
return errno;
}
for (ksp = sigar->kc->kc_chain;
ksp;
ksp = ksp->ks_next)
{
if (ksp->ks_type != KSTAT_TYPE_IO) {
continue;
}
if (strEQ(ksp->ks_name, name)) {
int status = io_kstat_read(sigar, disk, ksp);
*kio = ksp;
return status;
}
}
return ENXIO;
}
static int simple_hash(const char *s)
{
int hash = 0;
while (*s) {
hash = 31*hash + *s++;
}
return hash;
}
int sigar_disk_usage_get(sigar_t *sigar, const char *name,
sigar_disk_usage_t *disk)
{
kstat_t *ksp;
int status;
iodev_t *iodev = NULL;
sigar_cache_entry_t *ent;
sigar_uint64_t id;
SIGAR_DISK_STATS_INIT(disk);
if (!sigar->fsdev) {
if (create_fsdev_cache(sigar) != SIGAR_OK) {
return SIGAR_OK;
}
}
if (*name == '/') {
struct stat sb;
if (stat(name, &sb) < 0) {
return errno;
}
id = SIGAR_FSDEV_ID(sb);
ent = sigar_cache_get(sigar->fsdev, id);
if (ent->value == NULL) {
status = ENXIO;
}
else {
iodev = (iodev_t *)ent->value;
status = sigar_kstat_disk_usage_get(sigar, iodev->name, disk, &ksp);
}
}
else {
status = sigar_kstat_disk_usage_get(sigar, name, disk, &ksp);
if (status == SIGAR_OK) {
id = simple_hash(name); /*XXX*/
ent = sigar_cache_get(sigar->fsdev, id);
if (ent->value) {
iodev = (iodev_t *)ent->value;
}
else {
ent->value = iodev = malloc(sizeof(*iodev));
SIGAR_SSTRCPY(iodev->name, name);
SIGAR_DISK_STATS_INIT(&iodev->disk);
}
}
}
/* service_time formula derived from opensolaris.org:iostat.c */
if ((status == SIGAR_OK) && iodev) {
sigar_uint64_t delta;
double avw, avr, tps, mtps;
double etime, hr_etime;
if (iodev->disk.snaptime) {
delta = disk->snaptime - iodev->disk.snaptime;
}
else {
delta = ksp->ks_crtime - ksp->ks_snaptime;
}
hr_etime = (double)delta;
if (hr_etime == 0.0) {
hr_etime = (double)NANOSEC;
}
etime = hr_etime / (double)NANOSEC;
tps =
(((double)(disk->reads - iodev->disk.reads)) / etime) +
(((double)(disk->writes - iodev->disk.writes)) / etime);
delta = disk->wtime - iodev->disk.wtime;
if (delta) {
avw = (double)delta;
avw /= hr_etime;
}
else {
avw = 0.0;
}
delta = disk->rtime - iodev->disk.rtime;
if (delta) {
avr = (double)delta;
avr /= hr_etime;
}
else {
avr = 0.0;
}
disk->queue = avw;
disk->service_time = 0.0;
disk->total_service_time=SIGAR_NSEC2MSEC(disk->time);
if (tps && (avw != 0.0 || avr != 0.0)) {
mtps = 1000.0 / tps;
if (avw != 0.0) {
disk->service_time += avw * mtps;
}
if (avr != 0.0) {
disk->service_time += avr * mtps;
}
}
memcpy(&iodev->disk, disk, sizeof(iodev->disk));
}
if (status == ENXIO) {
/* Virtual device. This has no physical device mapping. */
return SIGAR_OK;
}
return status;
}
static int io_kstat_read_vopstat(sigar_t *sigar, sigar_disk_usage_t *disk, kstat_t *ksp)
{
struct vopstats *io;
kstat_read(sigar->kc, ksp, NULL);
io = (vopstats_t *)ksp->ks_data;
disk->reads = io->nread.value.ui64;
disk->writes = io->nwrite.value.ui64;
disk->read_bytes = io->read_bytes.value.ui64;
disk->write_bytes = io->write_bytes.value.ui64;
disk->qtime = 0;
disk->rtime = 0;
disk->wtime = 0;
disk->time = disk->rtime + disk->wtime;
disk->snaptime = ksp->ks_snaptime;
return SIGAR_OK;
}
static int sigar_kstat_disk_usage_get_vopstat(sigar_t *sigar, u_long id, sigar_disk_usage_t *disk, kstat_t **kio)
{
kstat_t *ksp;
if (sigar_kstat_update(sigar) == -1) {
return errno;
}
// Build vopstat name
char name[64];
sprintf(name,"%s%lx",VOPSTATS_STR,id);
for (ksp = sigar->kc->kc_chain; ksp; ksp = ksp->ks_next)
{
if (strEQ(ksp->ks_name, name)) {
int status = io_kstat_read_vopstat(sigar, disk, ksp);
*kio = ksp;
return status;
}
}
return ENXIO;
}
int sigar_disk_usage_get_vopstat(sigar_t *sigar, const char *name, sigar_disk_usage_t *disk)
{
kstat_t *ksp;
int status;
iodev_t *iodev = NULL;
SIGAR_DISK_STATS_INIT(disk);
struct statvfs t;
if (statvfs(name, &t) < 0) {
return errno;
}
status = sigar_kstat_disk_usage_get_vopstat(sigar, t.f_fsid, disk, &ksp);
/* service_time formula derived from opensolaris.org:iostat.c */
if ((status == SIGAR_OK) && iodev) {
sigar_uint64_t delta;
double avw, avr, tps, mtps;
double etime, hr_etime;
if (iodev->disk.snaptime) {
delta = disk->snaptime - iodev->disk.snaptime;
}
else {
delta = ksp->ks_crtime - ksp->ks_snaptime;
}
hr_etime = (double)delta;
if (hr_etime == 0.0) {
hr_etime = (double)NANOSEC;
}
etime = hr_etime / (double)NANOSEC;
tps =
(((double)(disk->reads - iodev->disk.reads)) / etime) +
(((double)(disk->writes - iodev->disk.writes)) / etime);
delta = disk->wtime - iodev->disk.wtime;
if (delta) {
avw = (double)delta;
avw /= hr_etime;
}
else {
avw = 0.0;
}
delta = disk->rtime - iodev->disk.rtime;
if (delta) {
avr = (double)delta;
avr /= hr_etime;
}
else {
avr = 0.0;
}
disk->queue = avw;
disk->service_time = 0.0;
disk->total_service_time=SIGAR_NSEC2MSEC(disk->time);
if (tps && (avw != 0.0 || avr != 0.0)) {
mtps = 1000.0 / tps;
if (avw != 0.0) {
disk->service_time += avw * mtps;
}
if (avr != 0.0) {
disk->service_time += avr * mtps;
}
}
memcpy(&iodev->disk, disk, sizeof(iodev->disk));
}
if (status == ENXIO) {
/* Virtual device. This has no physical device mapping. */
return SIGAR_OK;
}
return status;
}
int sigar_file_system_usage_get(sigar_t *sigar,
const char *dirname,
sigar_file_system_usage_t *fsusage)
{
int status = sigar_statvfs(sigar, dirname, fsusage);
if (status != SIGAR_OK) {
return status;
}
fsusage->use_percent = sigar_file_system_usage_calc_used(sigar, fsusage);
sigar_disk_usage_get_vopstat(sigar, dirname, &fsusage->disk);
return SIGAR_OK;
}
int sigar_cpu_info_list_get(sigar_t *sigar,
sigar_cpu_info_list_t *cpu_infos)
{
processor_info_t stats;
unsigned int i;
int status = SIGAR_OK;
int brand = -1;
sigar_cache_t *chips;
int is_debug = SIGAR_LOG_IS_DEBUG(sigar);
int nsockets = 0;
if (sigar_kstat_update(sigar) == -1) { /* for sigar->ncpu */
return errno;
}
/*
* stats we care about will be the same for each
* online processor, so just grab the first.
*/
for (i=0; i<sigar->ncpu; i++) {
processorid_t id = sigar->ks.cpuid[i];
if ((status = processor_info(id, &stats)) < 0) {
continue;
}
else {
status = SIGAR_OK;
break;
}
}
if (status != SIGAR_OK) {
/* should never happen */
return ENOENT;
}
sigar_cpu_info_list_create(cpu_infos);
chips = sigar_cache_new(16);
chips->free_value = free_chip_id;
for (i=0; i<sigar->ncpu; i++) {
sigar_cpu_info_t *info;
int chip_id = get_chip_id(sigar, i);
if (chip_id != -1) {
sigar_cache_entry_t *ent =
sigar_cache_get(chips, chip_id);
if (ent->value) {
if (!sigar->cpu_list_cores) {
continue;
}
}
else {
++nsockets;
ent->value = chips; /*anything non-NULL*/
if (is_debug) {
sigar_log_printf(sigar, SIGAR_LOG_DEBUG,
"[cpu_list] Merging info of"
" logical processors for chip_id=%d",
chip_id);
}
}
}
else {
++nsockets;
}
SIGAR_CPU_INFO_LIST_GROW(cpu_infos);
info = &cpu_infos->data[cpu_infos->number++];
SIGAR_SSTRCPY(info->model, stats.pi_processor_type);
if (brand == -1) {
brand = get_chip_brand(sigar, i, info);
}
if (strEQ(info->model, "i386")) {
if (!brand) {
/* assume Intel on x86 */
SIGAR_SSTRCPY(info->vendor, "Intel");
}
SIGAR_SSTRCPY(info->model, "x86");
}
else {
if (!brand) {
/* assume Sun */
SIGAR_SSTRCPY(info->vendor, "Sun");
}
/* s/sparc/Sparc/ */
info->model[0] = toupper(info->model[0]);
}
if (brand) {
SIGAR_SSTRCPY(info->vendor, cpu_infos->data[0].vendor);
}
info->mhz = stats.pi_clock;
info->cache_size = SIGAR_FIELD_NOTIMPL; /*XXX*/
}
sigar_cache_destroy(chips);
for (i=0; i<cpu_infos->number; i++) {
sigar_cpu_info_t *info = &cpu_infos->data[i];
info->total_sockets = nsockets;
info->total_cores = sigar->ncpu;
info->cores_per_socket = sigar->ncpu / nsockets;
}
return SIGAR_OK;
}
int sigar_net_route_list_get(sigar_t *sigar,
sigar_net_route_list_t *routelist)
{
char *data;
int len, rc;
struct opthdr *op;
size_t nread=0, size=0;
const char *size_from;
sigar_net_route_list_create(routelist);
while ((rc = get_mib2(&sigar->mib2, &op, &data, &len)) == GET_MIB2_OK) {
mib2_ipRouteEntry_t *entry;
char *end;
if (op->level != MIB2_IP) {
continue;
}
if (op->name == 0) {
/* we want to use this size for bincompat */
size = ((mib2_ip_t *)data)->ipRouteEntrySize;
continue;
}
else if (op->name != MIB2_IP_21) {
continue;
}
if (size == 0) {
size_from = "sizeof";
size = sizeof(*entry);
}
else {
size_from = "mib2_ip";
}
if (SIGAR_LOG_IS_DEBUG(sigar)) {
sigar_log_printf(sigar, SIGAR_LOG_DEBUG,
"[route_list] ipRouteEntrySize=%d (from %s)",
size, size_from);
}
for (entry = (mib2_ipRouteEntry_t *)data, end = data + len;
(char *)entry < end;
nread+=size, entry = (mib2_ipRouteEntry_t *)((char *)data+nread))
{
sigar_net_route_t *route;
int type = entry->ipRouteInfo.re_ire_type;
/* filter same as netstat -r */
if ((type == IRE_CACHE) ||
(type == IRE_BROADCAST) ||
(type == IRE_LOCAL))
{
continue;
}
SIGAR_NET_ROUTE_LIST_GROW(routelist);
route = &routelist->data[routelist->number++];
sigar_net_address_set(route->destination,
entry->ipRouteDest);
sigar_net_address_set(route->gateway,
entry->ipRouteNextHop);
sigar_net_address_set(route->mask,
entry->ipRouteMask);
route->refcnt = entry->ipRouteInfo.re_ref;
route->irtt = entry->ipRouteInfo.re_rtt;
route->metric = entry->ipRouteMetric1;
SIGAR_SSTRCPY(route->ifname, entry->ipRouteIfIndex.o_bytes);
route->flags = RTF_UP;
if ((route->destination.addr.in == 0) &&
(route->mask.addr.in == 0))
{
route->flags |= RTF_GATEWAY;
}
route->use = route->window = route->mtu =
SIGAR_FIELD_NOTIMPL; /*XXX*/
}
}
if (rc != GET_MIB2_EOD) {
close_mib2(&sigar->mib2);
return SIGAR_EMIB2;
}
return SIGAR_OK;
}
static void ifstat_kstat_common(sigar_net_interface_stat_t *ifstat,
kstat_named_t *data, int ndata)
{
int i;
for (i=0; i<ndata; i++) {
sigar_uint64_t value = data[i].value.KSTAT_UINT;
char *ptr = data[i].name;
switch (*ptr) {
case 'c':
if (strEQ(ptr, "collisions")) {
ifstat->tx_collisions = value;
}
break;
case 'd':
if (strEQ(ptr, "drop")) {
ifstat->rx_dropped = value;
ifstat->tx_dropped = value;
}
break;
case 'i':
if (strEQ(ptr, "ipackets")) {
if (ifstat->rx_packets == 0) {
ifstat->rx_packets = value;
}
}
else if (strEQ(ptr, "ipackets64")) {
ifstat->rx_packets = data[i].value.ui64;
}
else if (strEQ(ptr, "ierrors")) {
ifstat->rx_errors = value;
}
else if (strEQ(ptr, "ifspeed")) {
ifstat->speed = value;
}
break;
case 'f':
if (strEQ(ptr, "framing")) {
ifstat->rx_frame = value;
}
break;
case 'm':
if (strEQ(ptr, "missed")) {
ifstat->rx_dropped = value;
ifstat->tx_dropped = value;
}
break;
case 'n':
if (strEQ(ptr, "nocarrier")) {
ifstat->tx_carrier = value;
}
break;
case 'o':
if (strEQ(ptr, "obytes")) {
if (ifstat->tx_bytes == 0) {
ifstat->tx_bytes = value;
}
}
else if (strEQ(ptr, "obytes64")) {
ifstat->tx_bytes = data[i].value.ui64;
}
else if (strEQ(ptr, "oerrors")) {
ifstat->tx_errors = value;
}
else if (strEQ(ptr, "oflo")) {
ifstat->tx_overruns = value;
}
else if (strEQ(ptr, "opackets")) {
if (ifstat->tx_packets == 0) {
ifstat->tx_packets = value;
}
}
else if (strEQ(ptr, "opackets64")) {
ifstat->tx_packets = data[i].value.ui64;
}
else if (strEQ(ptr, "toolong_errors")) {
ifstat->tx_overruns = value;
}
break;
case 'r':
if (strEQ(ptr, "rbytes")) {
if (ifstat->rx_bytes == 0) {
ifstat->rx_bytes = value;
}
}
else if (strEQ(ptr, "rbytes64")) {
ifstat->rx_bytes = data[i].value.ui64;
}
else if (strEQ(ptr, "rx_overflow")) {
ifstat->rx_overruns = value;
}
break;
default:
break;
}
}
}
static int sigar_net_ifstat_get_any(sigar_t *sigar, const char *name,
sigar_net_interface_stat_t *ifstat)
{
kstat_ctl_t *kc = sigar->kc;
kstat_t *ksp;
kstat_named_t *data;
if (sigar_kstat_update(sigar) == -1) {
return errno;
}
if (!(ksp = kstat_lookup(kc, NULL, -1, (char *)name))) {
return ENXIO;
}
if (kstat_read(kc, ksp, NULL) < 0) {
return ENOENT;
}
data = (kstat_named_t *)ksp->ks_data;
ifstat_kstat_common(ifstat, data, ksp->ks_ndata);
return SIGAR_OK;
}
/* loopback interface only has rx/tx packets */
static int sigar_net_ifstat_get_lo(sigar_t *sigar, const char *name,
sigar_net_interface_stat_t *ifstat)
{
ifstat->rx_packets = 0;
ifstat->rx_bytes = SIGAR_FIELD_NOTIMPL;
ifstat->rx_errors = SIGAR_FIELD_NOTIMPL;
ifstat->rx_dropped = SIGAR_FIELD_NOTIMPL;
ifstat->rx_overruns = SIGAR_FIELD_NOTIMPL;
ifstat->rx_frame = SIGAR_FIELD_NOTIMPL;
ifstat->tx_packets = 0;
ifstat->tx_bytes = SIGAR_FIELD_NOTIMPL;
ifstat->tx_errors = SIGAR_FIELD_NOTIMPL;
ifstat->tx_dropped = SIGAR_FIELD_NOTIMPL;
ifstat->tx_overruns = SIGAR_FIELD_NOTIMPL;
ifstat->tx_collisions = SIGAR_FIELD_NOTIMPL;
ifstat->tx_carrier = SIGAR_FIELD_NOTIMPL;
ifstat->speed = SIGAR_FIELD_NOTIMPL;
return sigar_net_ifstat_get_any(sigar, name, ifstat);
}
int sigar_net_interface_stat_get(sigar_t *sigar, const char *name,
sigar_net_interface_stat_t *ifstat)
{
ifstat->speed = SIGAR_FIELD_NOTIMPL;
if (strnEQ(name, "lo", 2)) {
return sigar_net_ifstat_get_lo(sigar, name, ifstat);
}
else {
SIGAR_ZERO(ifstat);
return sigar_net_ifstat_get_any(sigar, name, ifstat);
}
}
int sigar_net_interface_ipv6_config_get(sigar_t *sigar, const char *name,
sigar_net_interface_config_t *ifconfig)
{
int sock;
struct lifreq lifr;
if ((sock = socket(AF_INET6, SOCK_DGRAM, 0)) < 0) {
return errno;
}
SIGAR_SSTRCPY(lifr.lifr_name, name);
if (ioctl(sock, SIOCGLIFADDR, &lifr) == 0) {
struct in6_addr *addr = SIGAR_SIN6_ADDR(&lifr.lifr_addr);
sigar_net_address6_set(ifconfig->address6, addr);
sigar_net_interface_scope6_set(ifconfig, addr);
ifconfig->prefix6_length = lifr.lifr_addrlen;
}
close(sock);
return SIGAR_OK;
}
#define TCPQ_SIZE(s) ((s) >= 0 ? (s) : 0)
static int tcp_connection_get(sigar_net_connection_walker_t *walker,
struct mib2_tcpConnEntry *entry,
int len)
{
int flags = walker->flags;
int status;
char *end = (char *)entry + len;
while ((char *)entry < end) {
int state = entry->tcpConnEntryInfo.ce_state;
if (((flags & SIGAR_NETCONN_SERVER) && (state == TCPS_LISTEN)) ||
((flags & SIGAR_NETCONN_CLIENT) && (state != TCPS_LISTEN)))
{
sigar_net_connection_t conn;
SIGAR_ZERO(&conn);
sigar_net_address_set(conn.local_address, entry->tcpConnLocalAddress);
sigar_net_address_set(conn.remote_address, entry->tcpConnRemAddress);
conn.local_port = entry->tcpConnLocalPort;
conn.remote_port = entry->tcpConnRemPort;
conn.type = SIGAR_NETCONN_TCP;
conn.send_queue =
TCPQ_SIZE(entry->tcpConnEntryInfo.ce_snxt -
entry->tcpConnEntryInfo.ce_suna - 1);
conn.receive_queue =
TCPQ_SIZE(entry->tcpConnEntryInfo.ce_rnxt -
entry->tcpConnEntryInfo.ce_rack);
switch (state) {
case TCPS_CLOSED:
conn.state = SIGAR_TCP_CLOSE;
break;
case TCPS_IDLE:
conn.state = SIGAR_TCP_IDLE;
break;
case TCPS_BOUND:
conn.state = SIGAR_TCP_BOUND;
break;
case TCPS_LISTEN:
conn.state = SIGAR_TCP_LISTEN;
break;
case TCPS_SYN_SENT:
conn.state = SIGAR_TCP_SYN_SENT;
break;
case TCPS_SYN_RCVD:
conn.state = SIGAR_TCP_SYN_RECV;
break;
case TCPS_ESTABLISHED:
conn.state = SIGAR_TCP_ESTABLISHED;
break;
case TCPS_CLOSE_WAIT:
conn.state = SIGAR_TCP_CLOSE_WAIT;
break;
case TCPS_FIN_WAIT_1:
conn.state = SIGAR_TCP_FIN_WAIT1;
break;
case TCPS_CLOSING:
conn.state = SIGAR_TCP_CLOSING;
break;
case TCPS_LAST_ACK:
conn.state = SIGAR_TCP_LAST_ACK;
break;
case TCPS_FIN_WAIT_2:
conn.state = SIGAR_TCP_FIN_WAIT2;
break;
case TCPS_TIME_WAIT:
conn.state = SIGAR_TCP_TIME_WAIT;
break;
default:
conn.state = SIGAR_TCP_UNKNOWN;
break;
}
status = walker->add_connection(walker, &conn);
if (status != SIGAR_OK) {
return status;
}
}
entry++;
}
return SIGAR_OK;
}
static int udp_connection_get(sigar_net_connection_walker_t *walker,
struct mib2_udpEntry *entry,
int len)
{
int flags = walker->flags;
int status;
char *end = (char *)entry + len;
while ((char *)entry < end) {
int state = entry->udpEntryInfo.ue_state;
/* XXX dunno if this state check is right */
if (((flags & SIGAR_NETCONN_SERVER) && (state == MIB2_UDP_idle)) ||
((flags & SIGAR_NETCONN_CLIENT) && (state != MIB2_UDP_idle)))
{
sigar_net_connection_t conn;
SIGAR_ZERO(&conn);
sigar_net_address_set(conn.local_address, entry->udpLocalAddress);
sigar_net_address_set(conn.remote_address, 0);
conn.local_port = entry->udpLocalPort;
conn.remote_port = 0;
conn.type = SIGAR_NETCONN_UDP;
status = walker->add_connection(walker, &conn);
if (status != SIGAR_OK) {
return status;
}
}
entry++;
}
return SIGAR_OK;
}
int sigar_net_connection_walk(sigar_net_connection_walker_t *walker)
{
sigar_t *sigar = walker->sigar;
int flags = walker->flags;
int status;
int want_tcp = flags & SIGAR_NETCONN_TCP;
int want_udp = flags & SIGAR_NETCONN_UDP;
char *data;
int len;
int rc;
struct opthdr *op;
while ((rc = get_mib2(&sigar->mib2, &op, &data, &len)) == GET_MIB2_OK) {
if ((op->level == MIB2_TCP) &&
(op->name == MIB2_TCP_13) &&
want_tcp)
{
status =
tcp_connection_get(walker,
(struct mib2_tcpConnEntry *)data,
len);
}
else if ((op->level == MIB2_UDP) &&
(op->name == MIB2_UDP_5) &&
want_udp)
{
status =
udp_connection_get(walker,
(struct mib2_udpEntry *)data,
len);
}
else {
status = SIGAR_OK;
}
if (status != SIGAR_OK) {
break;
}
}
if (rc != GET_MIB2_EOD) {
close_mib2(&sigar->mib2);
return SIGAR_EMIB2;
}
return SIGAR_OK;
}
SIGAR_DECLARE(int)
sigar_tcp_get(sigar_t *sigar,
sigar_tcp_t *tcp)
{
char *data;
int len;
int rc;
struct opthdr *op;
mib2_tcp_t *mib = NULL;
while ((rc = get_mib2(&sigar->mib2, &op, &data, &len)) == GET_MIB2_OK) {
if ((op->level == MIB2_TCP) && (op->name == 0)) {
mib = (mib2_tcp_t *)data;
break;
}
}
if (mib) {
tcp->active_opens = mib->tcpActiveOpens;
tcp->passive_opens = mib->tcpPassiveOpens;
tcp->attempt_fails = mib->tcpAttemptFails;
tcp->estab_resets = mib->tcpEstabResets;
tcp->curr_estab = mib->tcpCurrEstab;
tcp->in_segs = mib->tcpInSegs;
tcp->out_segs = mib->tcpOutSegs;
tcp->retrans_segs = mib->tcpRetransSegs;
tcp->in_errs = SIGAR_FIELD_NOTIMPL; /* XXX mib2_ip_t.tcpInErrs */
tcp->out_rsts = mib->tcpOutRsts;
return SIGAR_OK;
}
else {
return SIGAR_ENOTIMPL;
}
}
static int sigar_nfs_get(sigar_t *sigar,
char *type,
char **names,
char *nfs)
{
size_t offset;
kstat_t *ksp;
int i;
if (sigar_kstat_update(sigar) == -1) {
return errno;
}
if (!(ksp = kstat_lookup(sigar->kc, "nfs", 0, type))) {
return SIGAR_ENOTIMPL;
}
if (kstat_read(sigar->kc, ksp, NULL) < 0) {
return errno;
}
for (i=0, offset=0;
names[i];
i++, offset+=sizeof(sigar_uint64_t))
{
sigar_uint64_t val;
kstat_named_t *kv =
kstat_data_lookup(ksp, names[i]);
if (kv) {
val = kv->value.ui64;
}
else {
val = -1;
}
*(sigar_uint64_t *)((char *)nfs + offset) = val;
}
return SIGAR_OK;
}
static char *nfs_v2_names[] = {
"null",
"getattr",
"setattr",
"root",
"lookup",
"readlink",
"read",
"wrcache",
"write",
"create",
"remove",
"rename",
"link",
"symlink",
"mkdir",
"rmdir",
"readdir",
"statfs",
NULL
};
int sigar_nfs_client_v2_get(sigar_t *sigar,
sigar_nfs_client_v2_t *nfs)
{
return sigar_nfs_get(sigar, "rfsreqcnt_v2", nfs_v2_names, (char *)nfs);
}
int sigar_nfs_server_v2_get(sigar_t *sigar,
sigar_nfs_server_v2_t *nfs)
{
return sigar_nfs_get(sigar, "rfsproccnt_v2", nfs_v2_names, (char *)nfs);
}
static char *nfs_v3_names[] = {
"null",
"getattr",
"setattr",
"lookup",
"access",
"readlink",
"read",
"write",
"create",
"mkdir",
"symlink",
"mknod",
"remove",
"rmdir",
"rename",
"link",
"readdir",
"readdirplus",
"fsstat",
"fsinfo",
"pathconf",
"commit",
NULL
};
int sigar_nfs_client_v3_get(sigar_t *sigar,
sigar_nfs_client_v3_t *nfs)
{
return sigar_nfs_get(sigar, "rfsreqcnt_v3", nfs_v3_names, (char *)nfs);
}
int sigar_nfs_server_v3_get(sigar_t *sigar,
sigar_nfs_server_v3_t *nfs)
{
return sigar_nfs_get(sigar, "rfsproccnt_v3", nfs_v3_names, (char *)nfs);
}
int sigar_arp_list_get(sigar_t *sigar,
sigar_arp_list_t *arplist)
{
char *data;
int len, rc;
struct opthdr *op;
size_t nread=0, size=0;
const char *size_from;
sigar_arp_list_create(arplist);
while ((rc = get_mib2(&sigar->mib2, &op, &data, &len)) == GET_MIB2_OK) {
mib2_ipNetToMediaEntry_t *entry;
char *end;
if (op->level != MIB2_IP) {
continue;
}
if (op->name == 0) {
/* we want to use this size for bincompat */
size = ((mib2_ip_t *)data)->ipNetToMediaEntrySize;
continue;
}
else if (op->name != MIB2_IP_MEDIA) {
continue;
}
if (size == 0) {
size_from = "sizeof";
size = sizeof(*entry);
}
else {
size_from = "mib2_ip";
}
if (SIGAR_LOG_IS_DEBUG(sigar)) {
sigar_log_printf(sigar, SIGAR_LOG_DEBUG,
"[arp_list] ipNetToMediaEntrySize=%d (from %s)",
size, size_from);
}
for (entry = (mib2_ipNetToMediaEntry_t *)data, end = data + len;
(char *)entry < end;
nread+=size, entry = (mib2_ipNetToMediaEntry_t *)((char *)data+nread))
{
sigar_arp_t *arp;
SIGAR_ARP_LIST_GROW(arplist);
arp = &arplist->data[arplist->number++];
sigar_net_address_set(arp->address,
entry->ipNetToMediaNetAddress);
sigar_net_address_mac_set(arp->hwaddr,
entry->ipNetToMediaPhysAddress.o_bytes,
entry->ipNetToMediaPhysAddress.o_length);
SIGAR_SSTRCPY(arp->ifname, entry->ipNetToMediaIfIndex.o_bytes);
arp->flags = entry->ipNetToMediaInfo.ntm_flags;
SIGAR_SSTRCPY(arp->type, "ether"); /*XXX*/
}
}
if (rc != GET_MIB2_EOD) {
close_mib2(&sigar->mib2);
return SIGAR_EMIB2;
}
return SIGAR_OK;
}
static int find_port(sigar_t *sigar, struct ps_prochandle *phandle,
sigar_pid_t pid, unsigned long port)
{
DIR *dirp;
struct dirent *ent;
char pname[PATH_MAX];
struct stat64 statb;
int found=0;
sprintf(pname, "/proc/%d/fd", (int)pid);
if (!(dirp = opendir(pname))) {
return 0;
}
while ((ent = readdir(dirp))) {
int fd;
if (!sigar_isdigit(ent->d_name[0])) {
continue;
}
fd = atoi(ent->d_name);
if (sigar->pfstat64(phandle, fd, &statb) == -1) {
continue;
}
if ((statb.st_mode & S_IFMT) == S_IFSOCK) {
struct sockaddr_in sin;
struct sockaddr *sa = (struct sockaddr *)&sin;
socklen_t len = sizeof(sin);
int opt, optsz, rc;
optsz = sizeof(opt);
rc = sigar->pgetsockopt(phandle, fd, SOL_SOCKET, SO_TYPE, &opt, &optsz);
if (rc != 0) {
continue;
}
if (opt != SOCK_STREAM) {
continue;
}
optsz = sizeof(opt);
rc = sigar->pgetsockopt(phandle, fd, SOL_SOCKET, SO_ACCEPTCONN, &opt, &optsz);
if (rc != 0) {
continue;
}
if (opt != SO_ACCEPTCONN) {
continue;
}
rc = sigar->pgetsockname(phandle, fd, sa, &len);
if (rc != 0) {
continue;
}
if ((sa->sa_family == AF_INET) ||
(sa->sa_family == AF_INET6))
{
if (ntohs(sin.sin_port) == port) {
found = 1;
break;
}
}
}
}
closedir(dirp);
return found;
}
/* derived from /usr/bin/pfiles.c */
int sigar_proc_port_get(sigar_t *sigar, int protocol,
unsigned long port, sigar_pid_t *pid)
{
sigar_proc_list_t pids;
int i, status, found=0;
if (sigar->solaris_version < 10) {
return SIGAR_ENOTIMPL;
}
if ((status = sigar_init_libproc(sigar)) != SIGAR_OK) {
return SIGAR_ENOTIMPL;
}
status = sigar_proc_list_get(sigar, &pids);
if (status != SIGAR_OK) {
return status;
}
for (i=0; i<pids.number; i++) {
sigar_pid_t ps_id = pids.data[i];
struct ps_prochandle *phandle;
if (ps_id == sigar_pid_get(sigar)) {
continue; /* XXX */
}
status = sigar_pgrab(sigar, ps_id, SIGAR_FUNC, &phandle);
if (status != SIGAR_OK) {
continue;
}
if (sigar->pcreate_agent(phandle) == 0) {
found = find_port(sigar, phandle, ps_id, port);
sigar->pdestroy_agent(phandle);
}
sigar->pfree(phandle);
if (found) {
*pid = ps_id;
break;
}
}
sigar_proc_list_destroy(sigar, &pids);
return found ? SIGAR_OK : ENOENT;
}
int sigar_os_sys_info_get(sigar_t *sigar,
sigar_sys_info_t *sys_info)
{
char *vendor_version;
sysinfo(SI_ARCHITECTURE, sys_info->arch, sizeof(sys_info->arch));
SIGAR_SSTRCPY(sys_info->name, "Solaris");
SIGAR_SSTRCPY(sys_info->vendor, "Sun Microsystems");
if (strEQ(sys_info->version, "5.6")) {
vendor_version = "2.6";
}
else {
if ((vendor_version = strchr(sys_info->version, '.'))) {
++vendor_version;
}
else {
vendor_version = sys_info->version;
}
}
SIGAR_SSTRCPY(sys_info->vendor_version, vendor_version);
snprintf(sys_info->description,
sizeof(sys_info->description),
"%s %s",
sys_info->name, sys_info->vendor_version);
return SIGAR_OK;
}
| 27.215233 | 114 | 0.508383 | [
"model"
] |
65649a2692391fc31d8d658252bf6d8976f13d9c | 932 | h | C | include/il2cpp/System/Comparison_UgPokeLottery.PokeRate_.h | martmists-gh/BDSP | d6326c5d3ad9697ea65269ed47aa0b63abac2a0a | [
"MIT"
] | 1 | 2022-01-15T20:20:27.000Z | 2022-01-15T20:20:27.000Z | include/il2cpp/System/Comparison_UgPokeLottery.PokeRate_.h | martmists-gh/BDSP | d6326c5d3ad9697ea65269ed47aa0b63abac2a0a | [
"MIT"
] | null | null | null | include/il2cpp/System/Comparison_UgPokeLottery.PokeRate_.h | martmists-gh/BDSP | d6326c5d3ad9697ea65269ed47aa0b63abac2a0a | [
"MIT"
] | null | null | null | #pragma once
#include "il2cpp.h"
void System_Comparison_UgPokeLottery_PokeRate____ctor (System_Comparison_UgPokeLottery_PokeRate__o* __this, Il2CppObject* object, intptr_t method, const MethodInfo* method_info);
int32_t System_Comparison_UgPokeLottery_PokeRate___Invoke (System_Comparison_UgPokeLottery_PokeRate__o* __this, Dpr_UnderGround_UgPokeLottery_PokeRate_o x, Dpr_UnderGround_UgPokeLottery_PokeRate_o y, const MethodInfo* method_info);
System_IAsyncResult_o* System_Comparison_UgPokeLottery_PokeRate___BeginInvoke (System_Comparison_UgPokeLottery_PokeRate__o* __this, Dpr_UnderGround_UgPokeLottery_PokeRate_o x, Dpr_UnderGround_UgPokeLottery_PokeRate_o y, System_AsyncCallback_o* callback, Il2CppObject* object, const MethodInfo* method_info);
int32_t System_Comparison_UgPokeLottery_PokeRate___EndInvoke (System_Comparison_UgPokeLottery_PokeRate__o* __this, System_IAsyncResult_o* result, const MethodInfo* method_info);
| 103.555556 | 307 | 0.898069 | [
"object"
] |
656849b1cffd7c18b8be1cac3146d0699076af0d | 562 | h | C | YangMeetingServer/src/yanghttp/YangServerletManager.h | yangxinghai/yangrtc | 92cc28ade5af6cbe22c151cd1220ab12816694e7 | [
"MIT"
] | 23 | 2021-09-13T06:24:34.000Z | 2022-03-24T10:05:12.000Z | YangMeetingServer/src/yanghttp/YangServerletManager.h | yangxinghai/yangrtc | 92cc28ade5af6cbe22c151cd1220ab12816694e7 | [
"MIT"
] | null | null | null | YangMeetingServer/src/yanghttp/YangServerletManager.h | yangxinghai/yangrtc | 92cc28ade5af6cbe22c151cd1220ab12816694e7 | [
"MIT"
] | 9 | 2021-09-13T06:27:44.000Z | 2022-03-02T00:23:17.000Z | /*
* YangServerletManager.h
*
* Created on: 2020年10月10日
* Author: yang
*/
#ifndef YANGHTTP_YANGSERVERLETMANAGER_H_
#define YANGHTTP_YANGSERVERLETMANAGER_H_
#include "../yangsql/include/YangUserManager.h"
#include <map>
class YangServerletManager {
public:
YangServerletManager();
virtual ~YangServerletManager();
string handle(char* p);
YangUserManager ym;
void split(const string s,vector<string>* sv,const char flag );
void split1(const string s,map<string,string>* sv,const char flag );
};
#endif /* YANGHTTP_YANGSERVERLETMANAGER_H_ */
| 21.615385 | 69 | 0.752669 | [
"vector"
] |
656c04c8e6411c23022e00d5490f6056a8bc6f20 | 1,634 | h | C | plansys2_pddl_parser/include/plansys2_pddl_parser/Action.h | mjcarroll/ros2_planning_system | 676d0d3a9629446cdc0797df8daa808e75728cf3 | [
"Apache-2.0"
] | 1 | 2020-03-04T07:55:57.000Z | 2020-03-04T07:55:57.000Z | plansys2_pddl_parser/include/plansys2_pddl_parser/Action.h | mjcarroll/ros2_planning_system | 676d0d3a9629446cdc0797df8daa808e75728cf3 | [
"Apache-2.0"
] | 1 | 2020-12-20T18:30:28.000Z | 2020-12-20T18:30:28.000Z | plansys2_pddl_parser/include/plansys2_pddl_parser/Action.h | mjcarroll/ros2_planning_system | 676d0d3a9629446cdc0797df8daa808e75728cf3 | [
"Apache-2.0"
] | null | null | null |
#pragma once
#include "plansys2_pddl_parser/Ground.h"
namespace parser { namespace pddl {
class Action : public ParamCond {
public:
Condition *pre, *eff;
Action( const std::string & s )
: ParamCond( s ), pre( 0 ), eff( 0 ) {}
Action( ParamCond * c )
: ParamCond( c ), pre( 0 ), eff( 0 ) {}
Action( const Action * a, Domain & d )
: ParamCond( a ), pre( 0 ), eff( 0 ) {
if ( a->pre ) pre = a->pre->copy( d );
if ( a->eff ) eff = a->eff->copy( d );
}
virtual ~Action() {
if ( pre ) delete pre;
if ( eff ) delete eff;
}
void print( std::ostream & s ) const {
s << name << params << "\n";
s << "Pre: " << pre;
if ( eff ) s << "Eff: " << eff;
}
virtual double duration() {
return 1;
}
void PDDLPrint( std::ostream & s, unsigned indent, const TokenStruct< std::string > & ts, const Domain & d ) const override;
void parseConditions( Stringreader & f, TokenStruct< std::string > & ts, Domain & d );
void parse( Stringreader & f, TokenStruct< std::string > & ts, Domain & d );
void addParams( int m, unsigned n ) {}
void addParams( const IntVec & v ) {
if ( pre ) pre->addParams( params.size(), v.size() );
if ( eff ) eff->addParams( params.size(), v.size() );
params.insert( params.end(), v.begin(), v.end() );
}
Condition * copy( Domain & d ) {
return new Action( this, d );
}
CondVec precons();
CondVec effects();
GroundVec addEffects();
GroundVec deleteEffects();
protected:
CondVec getSubconditionsFromCondition( Condition * c );
GroundVec getGroundsFromCondition( Condition * c, bool neg );
};
typedef std::vector< Action * > ActionVec;
} } // namespaces
| 21.220779 | 125 | 0.610771 | [
"vector"
] |
656ea578f25f4a855b663242cac37715b31c8dec | 1,030 | c | C | examples/example_FWHT.c | breuderink/epsilon | 82d61f0615049c6b7f74ec4c72d33d3e161670b9 | [
"BSD-3-Clause"
] | 1 | 2021-04-03T13:35:46.000Z | 2021-04-03T13:35:46.000Z | examples/example_FWHT.c | breuderink/epsilon | 82d61f0615049c6b7f74ec4c72d33d3e161670b9 | [
"BSD-3-Clause"
] | 10 | 2021-04-01T07:14:51.000Z | 2021-05-18T07:31:55.000Z | examples/example_FWHT.c | breuderink/epsilon | 82d61f0615049c6b7f74ec4c72d33d3e161670b9 | [
"BSD-3-Clause"
] | null | null | null | #include <epsilon.h>
#include <stdio.h>
#define LOG2_DIMS 3
#define DIMS (1 << LOG2_DIMS)
void display(float x[DIMS]) {
for (int i = 0; i < DIMS; ++i) {
printf("%5.1f ", x[i]);
}
printf("\n");
}
int main() {
// Define and display input vector.
float x[DIMS] = {-3, -3, -3, -3, 3, 3, 3, 3};
printf("x =\n");
display(x);
// Transform input, and display. Note that it is sparse now.
FWHT(x, LOG2_DIMS);
printf("\nFWHT(x) =\n");
display(x);
// Transform again. Note that it is the (scaled) input again.
FWHT(x, LOG2_DIMS);
printf("\nFWHT(FWHT(x)) =\n");
display(x);
// Print Hadamard matrix.
printf("\nHadamard matrix:\n");
for (int d = 0; d < DIMS; ++d) {
// Create unit vector.
float row[DIMS] = {0};
row[d] = 1.0f;
// Perform fast Walsh-Hadamard transform.
FWHT(row, LOG2_DIMS);
// Print result that forms a row of the Hadamard matrix. Note the
// pattern in the sign changes. For more information see
// https://en.wikipedia.org/wiki/Hadamard_transform.
display(row);
}
return 0;
}
| 21.914894 | 67 | 0.62233 | [
"vector",
"transform"
] |
6574577dccf92b02611aed7c009353e897eb68b1 | 5,571 | h | C | include/is2/srvr/subr.h | Marsh407/is2 | e2048b706e06f376fc924a6945c61e559006413e | [
"Apache-2.0"
] | null | null | null | include/is2/srvr/subr.h | Marsh407/is2 | e2048b706e06f376fc924a6945c61e559006413e | [
"Apache-2.0"
] | null | null | null | include/is2/srvr/subr.h | Marsh407/is2 | e2048b706e06f376fc924a6945c61e559006413e | [
"Apache-2.0"
] | null | null | null | //! ----------------------------------------------------------------------------
//! Copyright Verizon.
//!
//! \file: TODO
//! \details: TODO
//!
//! Licensed under the terms of the Apache 2.0 open source license.
//! Please refer to the LICENSE file in the project root for the terms.
//! ----------------------------------------------------------------------------
#ifndef _SUBR_H
#define _SUBR_H
//! ----------------------------------------------------------------------------
//! includes
//! ----------------------------------------------------------------------------
#include "is2/srvr/http_status.h"
#include "is2/srvr/base_u.h"
#include "is2/support/data.h"
#include "is2/nconn/scheme.h"
#include "is2/nconn/host_info.h"
#include <string>
#include <list>
namespace ns_is2 {
//! ----------------------------------------------------------------------------
//! fwd decl's
//! ----------------------------------------------------------------------------
class nconn;
class resp;
class nbq;
class t_srvr;
class session;
class ups_session;
class subr;
typedef std::list <subr *> subr_list_t;
class base_u;
//! ----------------------------------------------------------------------------
//! subr
//! ----------------------------------------------------------------------------
class subr
{
public:
// -------------------------------------------------
// public types
// -------------------------------------------------
// state
typedef enum {
SUBR_STATE_NONE = 0,
SUBR_STATE_QUEUED,
SUBR_STATE_DNS_LOOKUP,
SUBR_STATE_ACTIVE
} subr_state_t;
// -------------------------------------------------
// Callbacks
// -------------------------------------------------
typedef int32_t (*error_cb_t)(subr &, nconn *, http_status_t, const char *);
typedef int32_t (*completion_cb_t)(subr &, nconn &, resp &);
// -------------------------------------------------
// public methods
// -------------------------------------------------
subr(session &a_session);
subr(const subr &a_subr);
~subr();
const std::string &get_label(void);
bool get_expect_resp_body_flag(void);
void set_keepalive(bool a_val);
void set_host(const std::string &a_val);
void set_headers(const kv_map_list_t &a_headers_list);
int set_header(const std::string &a_key, const std::string &a_val);
int del_header(const std::string &a_key);
void clear_headers(void);
void reset_label(void);
int32_t init_with_url(const std::string &a_url);
int32_t create_request(nbq &ao_q);
int32_t cancel(void);
// -------------------------------------------------
// public members
// -------------------------------------------------
subr_state_t m_state;
scheme_t m_scheme;
std::string m_host;
uint16_t m_port;
std::string m_server_label;
int32_t m_timeout_ms;
std::string m_path;
std::string m_query;
std::string m_fragment;
std::string m_userinfo;
std::string m_hostname;
std::string m_verb;
bool m_keepalive;
std::string m_id;
std::string m_where;
kv_map_list_t m_headers;
nbq *m_body_q;
error_cb_t m_error_cb;
completion_cb_t m_completion_cb;
void *m_data;
bool m_detach_resp;
uint64_t m_uid;
session *m_session;
host_info m_host_info;
uint64_t m_start_time_ms;
uint64_t m_end_time_ms;
void *m_lookup_job;
subr_list_t::iterator m_i_q;
bool m_tls_verify;
bool m_tls_sni;
bool m_tls_self_ok;
bool m_tls_no_host_check;
ups_session *m_ups_session;
base_u *m_u;
private:
// -------------------------------------------------
// private methods
// -------------------------------------------------
// Disallow assign
subr& operator=(const subr &);
};
//! ----------------------------------------------------------------------------
//! upstream object
//! ----------------------------------------------------------------------------
class subr_u: public base_u
{
public:
// -------------------------------------------------
// const
// -------------------------------------------------
static const uint32_t S_UPS_TYPE_SUBR = 0xFFFF000F;
// -------------------------------------------------
// public methods
// -------------------------------------------------
subr_u(session &a_session, subr *a_subr);
~subr_u();
// -------------------------------------------------
// upstream methods
// -------------------------------------------------
ssize_t ups_read(size_t a_len);
ssize_t ups_read_ahead(size_t a_len);
int32_t ups_cancel(void);
uint32_t ups_get_type(void) { return S_UPS_TYPE_SUBR;}
private:
// -------------------------------------------------
// private methods
// -------------------------------------------------
// Disallow copy/assign
subr_u& operator=(const subr_u &);
subr_u(const subr_u &);
// -------------------------------------------------
// private methods
// -------------------------------------------------
subr *m_subr;
};
} //namespace ns_is2 {
#endif
| 36.175325 | 84 | 0.392748 | [
"object"
] |
657bde4ad1af1352b4fd1fe3593b509461b7a2c0 | 5,959 | h | C | lib/scipoptsuite-5.0.1/gcg/src/graph/graph.h | npwebste/UPS_Controller | a90ce2229108197fd48f956310ae2929e0fa5d9a | [
"AFL-1.1"
] | null | null | null | lib/scipoptsuite-5.0.1/gcg/src/graph/graph.h | npwebste/UPS_Controller | a90ce2229108197fd48f956310ae2929e0fa5d9a | [
"AFL-1.1"
] | null | null | null | lib/scipoptsuite-5.0.1/gcg/src/graph/graph.h | npwebste/UPS_Controller | a90ce2229108197fd48f956310ae2929e0fa5d9a | [
"AFL-1.1"
] | null | null | null | /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/* */
/* This file is part of the program */
/* GCG --- Generic Column Generation */
/* a Dantzig-Wolfe decomposition based extension */
/* of the branch-cut-and-price framework */
/* SCIP --- Solving Constraint Integer Programs */
/* */
/* Copyright (C) 2010-2018 Operations Research, RWTH Aachen University */
/* Zuse Institute Berlin (ZIB) */
/* */
/* This program is free software; you can redistribute it and/or */
/* modify it under the terms of the GNU Lesser General Public License */
/* as published by the Free Software Foundation; either version 3 */
/* of the License, or (at your option) any later version. */
/* */
/* This program is distributed in the hope that it will be useful, */
/* but WITHOUT ANY WARRANTY; without even the implied warranty of */
/* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the */
/* GNU Lesser General Public License for more details. */
/* */
/* You should have received a copy of the GNU Lesser General Public License */
/* along with this program; if not, write to the Free Software */
/* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA.*/
/* */
/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/**@file graph.h
* @brief miscellaneous graph methods for structure detection
* @author Martin Bergner
* @author Annika Thome
*/
/*---+----1----+----2----+----3----+----4----+----5----+----6----+----7----+----8----+----9----+----0----+----1----+----2*/
#ifndef GCG_GRAPH_H_
#define GCG_GRAPH_H_
#include "objscip/objscip.h"
#include "tclique/tclique.h"
#include "weights.h"
#include "pub_decomp.h"
#include "bridge.h"
#include "graph_interface.h"
#include <exception>
#include <vector>
#include <string>
namespace gcg {
template <class T>
class Graph : public GraphInterface {
public:
std::string name;
protected:
SCIP* scip_;
Bridge* graph;
int nconss;
int nvars;
int nnonzeroes;
int dummynodes;
public:
/** Constructor */
Graph(
SCIP* scip /**< SCIP data structure */
);
void swap(Graph & other) // the swap member function (should never fail!)
{
// swap all the members (and base subobject, if applicable) with other
std::swap(partition, other.partition);
std::swap(scip_ , other.scip_);
std::swap(graph , other.graph);
std::swap(nconss , other.nconss);
std::swap(nvars , other.nvars);
std::swap(nnonzeroes , other.nnonzeroes);
std::swap(dummynodes, other.dummynodes);
}
Graph& operator=(Graph other) // note: argument passed by value!
{
// swap this with other
swap(other);
return *this;
}
/** Destruktor */
virtual ~Graph();
/** adds the node with the given weight to the graph */
SCIP_RETCODE addNode(int i,int weight);
/** adds the edge to the graph */
SCIP_RETCODE addEdge(int i, int j);
/** return the number of nodes */
int getNNodes();
/** return the number of edges (or hyperedges) */
int getNEdges();
/** returns whether there is an edge between nodes i and j */
virtual int edge(int i, int j);
/** return the number of neighbor nodes of given node */
virtual int getNNeighbors(
int i /**< the given node */
);
/** return the neighboring nodes of a given node */
virtual std::vector<int> getNeighbors(
int i /**< the given node */
);
/** assigns partition to a given node*/
virtual void setPartition(int i, int ID);
/** create graph from the matrix, to be overriden by the implementation*/
virtual SCIP_RETCODE createFromMatrix(
SCIP_CONS** conss, /**< constraints for which graph should be created */
SCIP_VAR** vars, /**< variables for which graph should be created */
int nconss_, /**< number of constraints */
int nvars_ /**< number of variables */
) { return SCIP_ERROR; }
/** writes the graph to the given file.
* The format is graph dependent
*/
virtual SCIP_RETCODE writeToFile(
int fd, /**< filename where the graph should be written to */
SCIP_Bool writeweights /**< whether to write weights */
);
/**
* reads the partition from the given file.
* The format is graph dependent. The default is a file with one line for each node a
*/
virtual SCIP_RETCODE readPartition(
const char* filename /**< filename where the partition is stored */
);
int getNNonzeroes() const
{
return nnonzeroes;
}
/** return the weight of given node */
virtual int getWeight(
int i /**< the given node */
);
/** set the number of dummy nodes */
void setDummynodes(int dummynodes_)
{
dummynodes = dummynodes_;
}
int getDummynodes() const
{
return dummynodes;
}
SCIP_RETCODE flush();
};
}
#endif
| 34.445087 | 123 | 0.506964 | [
"vector"
] |
657c043b629019108e14aed7c63c0696dba49caf | 4,118 | h | C | Rendering/vtkMesaProperty.h | cyrush/visit-vtk | 89505234e1880406fa5b8e0c0f5dfb79d56fc06c | [
"BSD-3-Clause"
] | 1 | 2018-05-17T17:09:16.000Z | 2018-05-17T17:09:16.000Z | Rendering/vtkMesaProperty.h | cyrush/visit-vtk | 89505234e1880406fa5b8e0c0f5dfb79d56fc06c | [
"BSD-3-Clause"
] | null | null | null | Rendering/vtkMesaProperty.h | cyrush/visit-vtk | 89505234e1880406fa5b8e0c0f5dfb79d56fc06c | [
"BSD-3-Clause"
] | null | null | null | /*=========================================================================
Program: Visualization Toolkit
Module: vtkMesaProperty.h
Copyright (c) Ken Martin, Will Schroeder, Bill Lorensen
All rights reserved.
See Copyright.txt or http://www.kitware.com/Copyright.htm for details.
This software is distributed WITHOUT ANY WARRANTY; without even
the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
PURPOSE. See the above copyright notice for more information.
=========================================================================*/
// .NAME vtkMesaProperty - Mesa property
// .SECTION Description
// vtkMesaProperty is a concrete implementation of the abstract class
// vtkProperty. vtkMesaProperty interfaces to the Mesa rendering library.
#ifndef __vtkMesaProperty_h
#define __vtkMesaProperty_h
#include "vtkProperty.h"
class vtkMesaRenderer;
class vtkShader2;
class vtkShader2Collection;
class vtkShaderProgram2;
class vtkGLSLShaderDeviceAdapter2;
class VTK_RENDERING_EXPORT vtkMesaProperty : public vtkProperty
{
public:
static vtkMesaProperty *New();
vtkTypeMacro(vtkMesaProperty,vtkProperty);
virtual void PrintSelf(ostream& os, vtkIndent indent);
// Description:
// Implement base class method.
void Render(vtkActor *a, vtkRenderer *ren);
// Description:
// Implement base class method.
void BackfaceRender(vtkActor *a, vtkRenderer *ren);
//BTX
// Description:
// This method is called after the actor has been rendered.
// Don't call this directly. This method cleans up
// any shaders allocated.
virtual void PostRender(vtkActor *a,
vtkRenderer *r);
// Description:
// Release any graphics resources that are being consumed by this
// property. The parameter window could be used to determine which graphic
// resources to release.
virtual void ReleaseGraphicsResources(vtkWindow *win);
// Description:
// Set/Get the shader program of the vtkProp. It can be set directly or
// by defining a Material.
vtkGetObjectMacro(PropProgram,vtkShaderProgram2);
void SetPropProgram(vtkShaderProgram2 *);
// Description:
// Get the object that can pass vertex attribute to a vtkShaderProgram2.
vtkGetObjectMacro(ShaderDeviceAdapter2,vtkGLSLShaderDeviceAdapter2);
// Description:
// Get the vtkShaderProgram2 object in use.
vtkGetObjectMacro(CurrentShaderProgram2,vtkShaderProgram2);
//ETX
// Description:
// Provide values to initialize shader variables.
// Useful to initialize shader variables that change over time
// (animation, GUI widgets inputs, etc. )
// - \p name - hardware name of the uniform variable
// - \p numVars - number of variables being set
// - \p x - values
virtual void AddShaderVariable(const char *name,int numVars,int *x);
virtual void AddShaderVariable(const char *name,int numVars,float *x);
virtual void AddShaderVariable(const char *name,int numVars,double *x);
protected:
vtkMesaProperty();
~vtkMesaProperty();
// Description:
// Load OpenGL extensions for multi texturing.
void LoadMultiTexturingExtensions(vtkRenderer* ren);
// Description:
// Read this->Material from new style shaders.
virtual void ReadFrameworkMaterial();
// Owned. Result of merging the shader program of the renderer and the
// PropProgram.
vtkShaderProgram2 *CachedShaderProgram2;
vtkShaderProgram2 *LastRendererShaderProgram2; // just a ref
vtkShaderProgram2 *LastPropProgram; // just a ref
vtkShaderProgram2 *PropProgram; // owned
// Point to CachedShaderProgram2 if Shading is on and the context supports
// it.
vtkShaderProgram2 *CurrentShaderProgram2;
vtkShader2 *DefaultMainVS;
vtkShader2 *DefaultMainFS;
vtkShader2 *DefaultPropVS;
vtkShader2 *DefaultPropFS;
bool UseDefaultMainVS;
bool UseDefaultMainFS;
bool UseDefaultPropVS;
bool UseDefaultPropFS;
vtkGLSLShaderDeviceAdapter2 *ShaderDeviceAdapter2;
private:
vtkMesaProperty(const vtkMesaProperty&); // Not implemented.
void operator=(const vtkMesaProperty&); // Not implemented.
};
#endif
| 32.68254 | 76 | 0.724624 | [
"render",
"object"
] |
657c185b223ac08ccdc77670eb251e426dd7135e | 22,009 | h | C | plugins/protein/src/GROLoader.h | voei/megamol | 569b7b58c1f9bc5405b79549b86f84009329f668 | [
"BSD-3-Clause"
] | 2 | 2020-10-16T10:15:37.000Z | 2021-01-21T13:06:00.000Z | plugins/protein/src/GROLoader.h | voei/megamol | 569b7b58c1f9bc5405b79549b86f84009329f668 | [
"BSD-3-Clause"
] | null | null | null | plugins/protein/src/GROLoader.h | voei/megamol | 569b7b58c1f9bc5405b79549b86f84009329f668 | [
"BSD-3-Clause"
] | 1 | 2021-01-28T01:19:54.000Z | 2021-01-28T01:19:54.000Z | /*
* GROLoader.h
*
* Copyright (C) 2011 by University of Stuttgart (VISUS).
* All rights reserved.
*/
#ifndef MMPROTEINPLUGIN_GROLOADER_H_INCLUDED
#define MMPROTEINPLUGIN_GROLOADER_H_INCLUDED
#if (defined(_MSC_VER) && (_MSC_VER > 1000))
#pragma once
#endif /* (defined(_MSC_VER) && (_MSC_VER > 1000)) */
#include "mmcore/param/ParamSlot.h"
#include "mmcore/CalleeSlot.h"
#include "mmcore/CallerSlot.h"
#include "vislib/Array.h"
#include "vislib/math/Vector.h"
#include "vislib/math/Cuboid.h"
#include "vislib/sys/RunnableThread.h"
#include "protein_calls/MolecularDataCall.h"
#include "ForceDataCall.h"
#include "Stride.h"
#include "mmcore/view/AnimDataModule.h"
#include "MDDriverConnector.h"
#include <fstream>
namespace megamol {
namespace protein {
/**
* Data source for GRO files
*/
class GROLoader : public megamol::core::view::AnimDataModule
{
public:
/** Ctor */
GROLoader(void);
/** Dtor */
virtual ~GROLoader(void);
/**
* Answer the name of this module.
*
* @return The name of this module.
*/
static const char *ClassName(void) {
return "GROLoader";
}
/**
* Answer a human readable description of this module.
*
* @return A human readable description of this module.
*/
static const char *Description(void) {
return "Offers protein data.";
}
/**
* Answers whether this module is available on the current system.
*
* @return 'true' if the module is available, 'false' otherwise.
*/
static bool IsAvailable(void) {
return true;
}
protected:
/**
* Implementation of 'Create'.
*
* @return 'true' on success, 'false' otherwise.
*/
virtual bool create(void);
/**
* Call callback to get the data
*
* @param c The calling call
*
* @return True on success
*/
bool getData( core::Call& call);
/**
* Call callback to get the extent of the data
*
* @param c The calling call
*
* @return True on success
*/
bool getExtent( core::Call& call);
/**
* Call callback to check whether data has been changed/needs update
*
* @param c The calling call
*
* @return whether data gas changed
*/
bool dataChanged(core::Call& call) {return false;/*return solventResidues.IsDirty();*/}
/**
* Implementation of 'Release'.
*/
virtual void release(void);
/**
* Creates a frame to be used in the frame cache. This method will be
* called from within 'initFrameCache'.
*
* @return The newly created frame object.
*/
virtual Frame* constructFrame(void) const;
/**
* Loads one frame of the data set into the given 'frame' object. This
* method may be invoked from another thread. You must take
* precausions in case you need synchronised access to shared
* ressources.
*
* @param frame The frame to be loaded.
* @param idx The index of the frame to be loaded.
*/
virtual void loadFrame(Frame *frame, unsigned int idx);
private:
/**
* Storage of frame data
*/
class Frame : public megamol::core::view::AnimDataModule::Frame {
public:
/** Ctor */
Frame(megamol::core::view::AnimDataModule& owner);
/** Dtor */
virtual ~Frame(void);
/**
* Encode a given int to a certain number of bits
* TODO
*/
void encodebits(char *outbuff, int bitsize, int bitoffset,
unsigned int num );
/**
* Encode three integers (representing one coordinate).
*
* @param outbuff buffer for the encoded integers
* @param num_of_bits the bitsize of the encoded integers
* @param sizes the ranges of value
* @param inbuff integers to be encoded
* @param bitoffset the bitoffset in the first byte
*/
bool encodeints(char *outbuff, int num_of_bits,
unsigned int sizes[], int inbuff[],
unsigned int bitoffset);
/**
* Encode the frame and write it to the given XTC-file.
*
* @param outfile The XTC-file.
* @param precision The precision of the encoded float coordinates.
*
* @return 'true' if the frame could be written
*/
bool writeFrame(std::ofstream *outfile, float precision,
float *minFloats, float *maxfloats);
/**
* Reads and decodes one frame of the data set from a given
* xtc-file.
*
* @param file Pointer to the current frame in the xtc-file
*/
void readFrame(std::fstream *file);
/**
* Calculates the number of bits needed to represent a given
* integer value
*
* @param The integer value
*
* @return The number of bits
*/
int sizeofint(int size);
/**
* Calculates the number of bits needed to represent 3 ints.
*
* @param sizes The range of the ints
*
* @return The needed number of bits
*/
unsigned int sizeofints(unsigned int sizes[]);
/**
* Decodes integers from a given byte-array by calculating the
* remainder and doing divisions with the maximum range.
*
* @param buff pointer to the byte buffer
* @param offset the bit-offset within the first byte
* @param num_of_bits the total number of bits to decode
* @param sizes the range of the integers
* @param nums array of the decoded integers
*/
void decodeints(char *buff, int offset, int num_of_bits,
unsigned int sizes[], int nums[]);
/**
* Interprets a given bit array as an integer.
*
* @param buff pointer to the byte buffer
* @param offset the bit-offset within the first byte
* @param bitsize the total number of bits
*
* @return the decoded integer
*/
int decodebits(char *buff, int offset, int bitsize);
/**
* Reverse the order of bytes in a given char-array of 4 elements.
*
* @param num the char-array
*/
void changeByteOrder(char* num);
/**
* Set the frame Index.
*
* @param idx the index
*/
void setFrameIdx(int idx);
/**
* Test for equality
*
* @param rhs The right hand side operand
*
* @return true if this and rhs are equal
*/
bool operator==(const Frame& rhs);
/**
* Set the atom count.
*
* @param atomCnt The atom count
*/
inline void SetAtomCount( unsigned int atomCnt) {
this->atomCount = atomCnt;
this->atomPosition.SetCount( atomCnt*3);
this->bfactor.SetCount( atomCnt);
this->charge.SetCount( atomCnt);
this->occupancy.SetCount( atomCnt);
}
/**
* Get the atom count.
*
* @return The atom count.
*/
inline unsigned int AtomCount() const { return this->atomCount; }
/**
* Assign a position to the array of positions.
*/
bool SetAtomPosition( unsigned int idx, float x, float y, float z);
/**
* Assign a bfactor to the array of bfactors.
*/
bool SetAtomBFactor( unsigned int idx, float val);
/**
* Assign a charge to the array of charges.
*/
bool SetAtomCharge( unsigned int idx, float val);
/**
* Assign a occupancy to the array of occupancies.
*/
bool SetAtomOccupancy( unsigned int idx, float val);
/**
* Set the b-factor range.
*
* @param min The minimum b-factor.
* @param max The maximum b-factor.
*/
void SetBFactorRange( float min, float max) {
this->minBFactor = min; this->maxBFactor = max; }
/**
* Set the minimum b-factor.
*
* @param min The minimum b-factor.
*/
void SetMinBFactor( float min) { this->minBFactor = min; }
/**
* Set the maximum b-factor.
*
* @param max The maximum b-factor.
*/
void SetMaxBFactor( float max) { this->maxBFactor = max; }
/**
* Set the charge range.
*
* @param min The minimum charge.
* @param max The maximum charge.
*/
void SetChargeRange( float min, float max) {
this->minCharge = min; this->maxCharge = max; }
/**
* Set the minimum charge.
*
* @param min The minimum charge.
*/
void SetMinCharge( float min) { this->minCharge = min; }
/**
* Set the maximum charge.
*
* @param max The maximum charge.
*/
void SetMaxCharge( float max) { this->maxCharge = max; }
/**
* Set the occupancy range.
*
* @param min The minimum occupancy.
* @param max The maximum occupancy.
*/
void SetOccupancyRange( float min, float max) {
this->minOccupancy = min; this->maxOccupancy = max; }
/**
* Set the minimum occupancy.
*
* @param min The minimum occupancy.
*/
void SetMinOccupancy( float min) { this->minOccupancy = min; }
/**
* Set the maximum occupancy.
*
* @param max The maximum occupancy.
*/
void SetMaxOccupancy( float max) { this->maxOccupancy = max; }
/**
* Get a reference to the array of atom positions.
*
* @return The atom position array.
*/
const float* AtomPositions() { return this->atomPosition.PeekElements(); }
/**
* Get a reference to the array of atom b-factors.
*
* @return The atom b-factor array.
*/
float* AtomBFactor() { return &this->bfactor[0]; }
/**
* Get a reference to the array of atom charges.
*
* @return The atom charge array.
*/
const float* AtomCharge() { return this->charge.PeekElements(); }
/**
* Get a reference to the array of atom occupancies.
*
* @return The atom occupancy array.
*/
const float* AtomOccupancy() { return this->occupancy.PeekElements(); }
/**
* Get the maximum b-factor of this frame.
*
* @return The maximum b-factor.
*/
float MaxBFactor() const { return this->maxBFactor; }
/**
* Get the minimum b-factor of this frame.
*
* @return The minimum b-factor.
*/
float MinBFactor() const { return this->minBFactor; }
/**
* Get the maximum b-factor of this frame.
*
* @return The maximum b-factor.
*/
float MaxCharge() const { return this->maxCharge; }
/**
* Get the minimum charge of this frame.
*
* @return The minimum charge.
*/
float MinCharge() const { return this->minCharge; }
/**
* Get the maximum occupancy of this frame.
*
* @return The maximum occupancy.
*/
float MaxOccupancy() const { return this->maxOccupancy; }
/**
* Get the minimum occupancy of this frame.
*
* @return The minimum occupancy.
*/
float MinOccupancy() const { return this->minOccupancy; }
private:
/** The atom count */
unsigned int atomCount;
/** The atom positions */
vislib::Array<float> atomPosition;
/** The atom b-factors */
vislib::Array<float> bfactor;
/** The atom charges */
vislib::Array<float> charge;
/** The atom occupancy */
vislib::Array<float> occupancy;
/** The maximum b-factor */
float maxBFactor;
/** The minimum b-factor */
float minBFactor;
/** The maximum carge */
float maxCharge;
/** The minimum charge */
float minCharge;
/** The maximum occupancy */
float maxOccupancy;
/** The minimum occupancy */
float minOccupancy;
};
/**
* Helper class to unlock frame data when 'CallSimpleSphereData' is
* used.
*/
class Unlocker : public megamol::protein_calls::MolecularDataCall::Unlocker {
public:
/**
* Ctor.
*
* @param frame The frame to unlock
*/
Unlocker(Frame& frame) : megamol::protein_calls::MolecularDataCall::Unlocker(),
frame(&frame) {
// intentionally empty
}
/** Dtor. */
virtual ~Unlocker(void) {
this->Unlock();
ASSERT(this->frame == NULL);
}
/** Unlocks the data */
virtual void Unlock(void) {
if (this->frame != NULL) {
this->frame->Unlock();
this->frame = NULL; // DO NOT DELETE!
}
}
private:
/** The frame to unlock */
Frame *frame;
};
/**
* Loads a GRO file.
*
* @param filename The path to the file to load.
*/
void loadFile( const vislib::TString& filename);
/**
* Parse one atom entry.
*
* @param atomEntry The atom entry string.
* @param atom The number of the current atom.
* @param frame The number of the current frame.
*/
void parseAtomEntry( vislib::StringA &atomEntry, unsigned int atom, unsigned int frame, vislib::Array<vislib::TString>& solventResidueNames);
/**
* Get the radius of the element.
*
* @param name The name of the atom type.
* @param scaleFactor The scaling factor for the radius (default is 1)
* @return The radius of the element in nanometer.
*/
float getElementRadius( vislib::StringA name, float scaleFactor = 1.0f);
/**
* Get the color of the element.
*
* @param name The name of the atom type.
* @return The color of the element.
*/
vislib::math::Vector<unsigned char, 3> getElementColor( vislib::StringA name);
/**
* Parse one atom entry and set the position of the current atom entry
* to the frame.
*
* @param atomEntry The atom entry string.
* @param atom The number of the current atom.
* @param frame The number of the current frame.
*/
void setAtomPositionToFrame( vislib::StringA &atomEntry,
unsigned int atom, unsigned int frame);
/**
* Search for connections in the given residue and add them to the
* global connection array.
*
* @param resIdx The index of the residue.
* @param resIdx The index of the reference frame.
*/
void MakeResidueConnections( unsigned int resIdx, unsigned int frame);
/**
* Search for connections between two residues.
*
* @param resIdx0 The index of the first residue.
* @param resIdx1 The index of the second residue.
* @param resIdx The index of the reference frame.
*
* @return 'true' if connections were found, 'false' otherwise.
*/
bool MakeResidueConnections( unsigned int resIdx0, unsigned int resIdx1, unsigned int frame);
/**
* Check if the residue is an amino acid.
*
* @return 'true' if resName specifies an amino acid, 'false' otherwise.
*/
bool IsAminoAcid( vislib::StringA resName );
/**
* Reset all data containers.
*/
void resetAllData();
/**
* Read the number of frames from the XTC file
*
* @return 'true' if the file could be loaded, otherwise 'false'
*/
bool readNumXTCFrames();
/**
* Writes the frames of the current PDB-file (beginning with second
* frame) into a new compressed XTC-file.
*
* The PDB-file has to be fully loaded before because the data-sets
* bounding box is needed.
*
* @param filename The name of the output file.
*/
void writeToXtcFile(const vislib::TString& filename);
// -------------------- variables --------------------
/** The pdb file name slot */
core::param::ParamSlot groFilenameSlot;
/** The xtc file name slot */
core::param::ParamSlot xtcFilenameSlot;
/** The data callee slot */
core::CalleeSlot dataOutSlot;
/** caller slot */
core::CallerSlot forceDataCallerSlot;
/** The maximum frame slot */
core::param::ParamSlot maxFramesSlot;
/** The STRIDE usage flag slot */
core::param::ParamSlot strideFlagSlot;
/** slot to specify a ;-list of residues to be merged into separate chains ... */
core::param::ParamSlot solventResidues;
/** The MDDriver host address */
core::param::ParamSlot mDDHostAddressSlot;
/** The MDDriver port */
core::param::ParamSlot mDDPortSlot;
/** The MDDriver go/pause toggle */
core::param::ParamSlot mDDGoSlot;
/** The MDDriver transfer rate */
core::param::ParamSlot mDDTransferRateSlot;
/** The data */
vislib::Array<Frame*> data;
/** The bounding box */
vislib::math::Cuboid<float> bbox;
/** The data hash */
SIZE_T datahash;
/** Stores for each atom the index of its type */
vislib::Array<unsigned int> atomTypeIdx;
/* Residue index per atom - may be undefined (-1) */
vislib::Array<int> atomResidueIdx;
/** The array of atom types */
vislib::Array<megamol::protein_calls::MolecularDataCall::AtomType> atomType;
/** The array of residues */
vislib::Array<megamol::protein_calls::MolecularDataCall::Residue*> residue;
/** The array of residue type names */
vislib::Array<vislib::StringA> residueTypeName;
/** residue indices marked as solvent */
vislib::Array<unsigned int> solventResidueIdx;
/** The array of molecules */
vislib::Array<megamol::protein_calls::MolecularDataCall::Molecule> molecule;
/** The array of chains */
vislib::Array<megamol::protein_calls::MolecularDataCall::Chain> chain;
/**
* Stores the connectivity information (i.e. subsequent pairs of atom
* indices)
*/
vislib::Array<unsigned int> connectivity;
/** Stores the current residue sequence number while loading */
unsigned int resSeq;
/** Stores the current molecule count while loading */
unsigned int molIdx;
/** Stride secondary structure computation */
Stride *stride;
/** Flag whether secondary structure is available */
bool secStructAvailable;
// Temporary variables for molecular chains
vislib::Array<unsigned int> chainFirstRes;
vislib::Array<unsigned int> chainResCount;
vislib::Array<char> chainName;
vislib::Array<megamol::protein_calls::MolecularDataCall::Chain::ChainType> chainType;
char chainId;
/** the number of frames */
unsigned int numXTCFrames;
/** the byte offset of all frames */
vislib::Array<unsigned int> XTCFrameOffset;
/** Flag whether the current xtc-filename is valid */
bool xtcFileValid;
/** MDDriverLoader object for connecting to MDDriver */
vislib::sys::RunnableThread<MDDriverConnector>* mdd;
/** Per atom filter information to be used by MolecularDataCall */
vislib::Array<int> atomVisibility;
};
} /* end namespace protein */
} /* end namespace megamol */
#endif // MMPROTEINPLUGIN_GROLOADER_H_INCLUDED
| 31.351852 | 149 | 0.51747 | [
"object",
"vector"
] |
6581ad0cc4c7293bc4cb33d0c51cd1f81971147a | 2,155 | h | C | base/include/base/rtti/dynamic_types.h | pretty-wise/link | 16a4241c4978136d8c4bd1caab20bdf37df9caaf | [
"Unlicense"
] | null | null | null | base/include/base/rtti/dynamic_types.h | pretty-wise/link | 16a4241c4978136d8c4bd1caab20bdf37df9caaf | [
"Unlicense"
] | 5 | 2019-12-27T05:51:10.000Z | 2022-02-12T02:24:58.000Z | base/include/base/rtti/dynamic_types.h | pretty-wise/link | 16a4241c4978136d8c4bd1caab20bdf37df9caaf | [
"Unlicense"
] | null | null | null | /*
* Copywrite 2014-2015 Krzysztof Stasik. All rights reserved.
*/
#pragma once
#include "base/core/types.h"
#include "rtti/rtti.h"
#include <vector>
namespace Base {
template <class BaseType> class BaseTypeCreator {
public:
virtual ~BaseTypeCreator() {}
virtual BaseType *operator()() { return nullptr; }
virtual BaseType *placement_new(s8 *mem, size_t mem_size) { return nullptr; }
};
template <class T, class BaseType>
class TypeCreator : public BaseTypeCreator<BaseType> {
public:
BaseType *operator()() { return new T(); }
BaseType *placement_new(s8 *mem, size_t mem_size) {
BASE_ASSERT(mem_size == sizeof(T));
return new(mem) T();
}
};
template <class BaseType> class DynamicTypes {
public:
~DynamicTypes() {
// todo:
// for( std::vector<class_data>::iterator it = m_indices.begin(); it !=
// m_indices.end(); ++it )
{
// delete (*it).creator;
}
}
template <class T> void RegisterType() {
const Rtti &rtti = T::TYPE;
class_data cdata;
cdata.type_name = rtti.GetTypeName();
cdata.type_size = sizeof(T);
cdata.creator = new TypeCreator<T, BaseType>();
m_indices.push_back(cdata);
}
u32 TypeIndex(StringId type_name) {
for(u32 i = 0; i < m_indices.size(); ++i) {
if(m_indices[i].type_name == type_name)
return i;
}
BASE_ASSERT(false, "unknown message %s", type_name.c_str());
return 0;
}
size_t TypeSize(u32 index) {
BASE_ASSERT(index < m_indices.size(), "index out of scope");
return m_indices[index].type_size;
}
StringId TypeName(u32 index) {
BASE_ASSERT(index < m_indices.size(), "index out of scope");
return m_indices[index].type_name;
}
BaseType *Create(u32 index) { return (*m_indices[index].creator)(); }
BaseType *Create(u32 index, void *mem, size_t mem_size) {
m_indices[index].creator->placement_new(mem, mem_size);
}
u32 Count() const { return m_indices.size(); }
private:
struct class_data {
StringId type_name;
size_t type_size;
int creator_index;
BaseTypeCreator<BaseType> *creator;
};
std::vector<class_data> m_indices;
};
} // namespace Base
| 23.423913 | 79 | 0.659861 | [
"vector"
] |
658234e571d63b361a4a4b869459224d31fada7b | 5,635 | h | C | objc4-818.2/runtime/objc-weak.h | hailong123/Objc4-Debug | 602e1aea2cb58d164fe1cc508393be823c304977 | [
"MIT"
] | null | null | null | objc4-818.2/runtime/objc-weak.h | hailong123/Objc4-Debug | 602e1aea2cb58d164fe1cc508393be823c304977 | [
"MIT"
] | null | null | null | objc4-818.2/runtime/objc-weak.h | hailong123/Objc4-Debug | 602e1aea2cb58d164fe1cc508393be823c304977 | [
"MIT"
] | null | null | null | /*
* Copyright (c) 2010-2011 Apple Inc. All rights reserved.
*
* @APPLE_LICENSE_HEADER_START@
*
* This file contains Original Code and/or Modifications of Original Code
* as defined in and that are subject to the Apple Public Source License
* Version 2.0 (the 'License'). You may not use this file except in
* compliance with the License. Please obtain a copy of the License at
* http://www.opensource.apple.com/apsl/ and read it before using this
* file.
*
* The Original Code and all software distributed under the License are
* distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
* EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
* INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
* Please see the License for the specific language governing rights and
* limitations under the License.
*
* @APPLE_LICENSE_HEADER_END@
*/
#ifndef _OBJC_WEAK_H_
#define _OBJC_WEAK_H_
#include <objc/objc.h>
#include "objc-config.h"
__BEGIN_DECLS
/*
The weak table is a hash table governed by a single spin lock.
An allocated blob of memory, most often an object, but under GC any such
allocation, may have its address stored in a __weak marked storage location
through use of compiler generated write-barriers or hand coded uses of the
register weak primitive. Associated with the registration can be a callback
block for the case when one of the allocated chunks of memory is reclaimed.
The table is hashed on the address of the allocated memory. When __weak
marked memory changes its reference, we count on the fact that we can still
see its previous reference.
So, in the hash table, indexed by the weakly referenced item, is a list of
all locations where this address is currently being stored.
For ARC, we also keep track of whether an arbitrary object is being
deallocated by briefly placing it in the table just prior to invoking
dealloc, and removing it via objc_clear_deallocating just prior to memory
reclamation.
*/
// The address of a __weak variable.
// These pointers are stored disguised so memory analysis tools
// don't see lots of interior pointers from the weak table into objects.
typedef DisguisedPtr<objc_object *> weak_referrer_t;
#if __LP64__
#define PTR_MINUS_2 62
#else
#define PTR_MINUS_2 30
#endif
/**
* The internal structure stored in the weak references table.
* It maintains and stores
* a hash set of weak references pointing to an object.
* If out_of_line_ness != REFERRERS_OUT_OF_LINE then the set
* is instead a small inline array.
*/
#define WEAK_INLINE_COUNT 4
// out_of_line_ness field overlaps with the low two bits of inline_referrers[1].
// inline_referrers[1] is a DisguisedPtr of a pointer-aligned address.
// The low two bits of a pointer-aligned DisguisedPtr will always be 0b00
// (disguised nil or 0x80..00) or 0b11 (any other address).
// Therefore out_of_line_ness == 0b10 is used to mark the out-of-line state.
#define REFERRERS_OUT_OF_LINE 2
//结构 也是一个hash结构 其存储的元素是弱引用对象指针的指针, 通过操作指针的指针, 就可以使得weak引用的指针在对象析构后, 指向nil
struct weak_entry_t {
DisguisedPtr<objc_object> referent;//被弱引用的对象
//联合体
//引用该对象的对象列表, 联合 引用个数小于4 用inline_referrers数组, 用个数大于4 用动态数组 weak_referrer_t *referrers
union {
struct {
weak_referrer_t *referrers;//弱引用该对象的对象指针地址的hash数组
uintptr_t out_of_line_ness : 2;//是否使用动态hash数组标记位
uintptr_t num_refs : PTR_MINUS_2;//hash数组中的元素个数
uintptr_t mask;//hash数组长度-1 会参与hash计算,
uintptr_t max_hash_displacement;//可能会发生的hash冲突的最大次数用于判断是否出现了逻辑错误
};
struct {
// out_of_line_ness field is low bits of inline_referrers[1]
weak_referrer_t inline_referrers[WEAK_INLINE_COUNT];
};
};
bool out_of_line() {
return (out_of_line_ness == REFERRERS_OUT_OF_LINE);
}
weak_entry_t& operator=(const weak_entry_t& other) {
memcpy(this, &other, sizeof(other));
return *this;
}
weak_entry_t(objc_object *newReferent, objc_object **newReferrer)
: referent(newReferent)//构造方法 里面初始化了静态数组
{
inline_referrers[0] = newReferrer;
for (int i = 1; i < WEAK_INLINE_COUNT; i++) {
inline_referrers[i] = nil;
}
}
};
/**
* The global weak references table. Stores object ids as keys,
* and weak_entry_t structs as their values.
*/
struct weak_table_t {
weak_entry_t *weak_entries; //hash 数组, 用来存储弱引用对象的相关信息 weak_entry_t
size_t num_entries;//数组中的元素个数
uintptr_t mask;//hash 数组长度 -1 会参与hash计算, (注意: 这里是hash数组的长度, 而不是元素个数, 比如, 数组长度可能是64, 而元素的个数仅存了2个)
uintptr_t max_hash_displacement;//可能会发生的hash冲突的最大次数, 用于判断是否是出现了 逻辑错误(hash表中,的冲突次数绝对不会超过该值)
};
enum WeakRegisterDeallocatingOptions {
ReturnNilIfDeallocating,
CrashIfDeallocating,
DontCheckDeallocating
};
/// Adds an (object, weak pointer) pair to the weak table.
id weak_register_no_lock(weak_table_t *weak_table, id referent,
id *referrer, WeakRegisterDeallocatingOptions deallocatingOptions);
/// Removes an (object, weak pointer) pair from the weak table.
void weak_unregister_no_lock(weak_table_t *weak_table, id referent, id *referrer);
#if DEBUG
/// Returns true if an object is weakly referenced somewhere.
bool weak_is_registered_no_lock(weak_table_t *weak_table, id referent);
#endif
/// Called on object destruction. Sets all remaining weak pointers to nil.
void weak_clear_no_lock(weak_table_t *weak_table, id referent);
__END_DECLS
#endif /* _OBJC_WEAK_H_ */
| 36.830065 | 100 | 0.738421 | [
"object"
] |
658ebea89403f5f3fe724f9bfffcb10714ac6999 | 521,360 | c | C | splikes/connections/calcium.c | bblais/Plasticnet | e450e56a9b993e361873b6a235fdcc55a5690abb | [
"MIT"
] | null | null | null | splikes/connections/calcium.c | bblais/Plasticnet | e450e56a9b993e361873b6a235fdcc55a5690abb | [
"MIT"
] | null | null | null | splikes/connections/calcium.c | bblais/Plasticnet | e450e56a9b993e361873b6a235fdcc55a5690abb | [
"MIT"
] | 1 | 2020-01-16T18:20:53.000Z | 2020-01-16T18:20:53.000Z | /* Generated by Cython 0.29.25 */
#ifndef PY_SSIZE_T_CLEAN
#define PY_SSIZE_T_CLEAN
#endif /* PY_SSIZE_T_CLEAN */
#include "Python.h"
#ifndef Py_PYTHON_H
#error Python headers needed to compile C extensions, please install development version of Python.
#elif PY_VERSION_HEX < 0x02060000 || (0x03000000 <= PY_VERSION_HEX && PY_VERSION_HEX < 0x03030000)
#error Cython requires Python 2.6+ or Python 3.3+.
#else
#define CYTHON_ABI "0_29_25"
#define CYTHON_HEX_VERSION 0x001D19F0
#define CYTHON_FUTURE_DIVISION 0
#include <stddef.h>
#ifndef offsetof
#define offsetof(type, member) ( (size_t) & ((type*)0) -> member )
#endif
#if !defined(WIN32) && !defined(MS_WINDOWS)
#ifndef __stdcall
#define __stdcall
#endif
#ifndef __cdecl
#define __cdecl
#endif
#ifndef __fastcall
#define __fastcall
#endif
#endif
#ifndef DL_IMPORT
#define DL_IMPORT(t) t
#endif
#ifndef DL_EXPORT
#define DL_EXPORT(t) t
#endif
#define __PYX_COMMA ,
#ifndef HAVE_LONG_LONG
#if PY_VERSION_HEX >= 0x02070000
#define HAVE_LONG_LONG
#endif
#endif
#ifndef PY_LONG_LONG
#define PY_LONG_LONG LONG_LONG
#endif
#ifndef Py_HUGE_VAL
#define Py_HUGE_VAL HUGE_VAL
#endif
#ifdef PYPY_VERSION
#define CYTHON_COMPILING_IN_PYPY 1
#define CYTHON_COMPILING_IN_PYSTON 0
#define CYTHON_COMPILING_IN_CPYTHON 0
#undef CYTHON_USE_TYPE_SLOTS
#define CYTHON_USE_TYPE_SLOTS 0
#undef CYTHON_USE_PYTYPE_LOOKUP
#define CYTHON_USE_PYTYPE_LOOKUP 0
#if PY_VERSION_HEX < 0x03050000
#undef CYTHON_USE_ASYNC_SLOTS
#define CYTHON_USE_ASYNC_SLOTS 0
#elif !defined(CYTHON_USE_ASYNC_SLOTS)
#define CYTHON_USE_ASYNC_SLOTS 1
#endif
#undef CYTHON_USE_PYLIST_INTERNALS
#define CYTHON_USE_PYLIST_INTERNALS 0
#undef CYTHON_USE_UNICODE_INTERNALS
#define CYTHON_USE_UNICODE_INTERNALS 0
#undef CYTHON_USE_UNICODE_WRITER
#define CYTHON_USE_UNICODE_WRITER 0
#undef CYTHON_USE_PYLONG_INTERNALS
#define CYTHON_USE_PYLONG_INTERNALS 0
#undef CYTHON_AVOID_BORROWED_REFS
#define CYTHON_AVOID_BORROWED_REFS 1
#undef CYTHON_ASSUME_SAFE_MACROS
#define CYTHON_ASSUME_SAFE_MACROS 0
#undef CYTHON_UNPACK_METHODS
#define CYTHON_UNPACK_METHODS 0
#undef CYTHON_FAST_THREAD_STATE
#define CYTHON_FAST_THREAD_STATE 0
#undef CYTHON_FAST_PYCALL
#define CYTHON_FAST_PYCALL 0
#undef CYTHON_PEP489_MULTI_PHASE_INIT
#define CYTHON_PEP489_MULTI_PHASE_INIT 0
#undef CYTHON_USE_TP_FINALIZE
#define CYTHON_USE_TP_FINALIZE 0
#undef CYTHON_USE_DICT_VERSIONS
#define CYTHON_USE_DICT_VERSIONS 0
#undef CYTHON_USE_EXC_INFO_STACK
#define CYTHON_USE_EXC_INFO_STACK 0
#elif defined(PYSTON_VERSION)
#define CYTHON_COMPILING_IN_PYPY 0
#define CYTHON_COMPILING_IN_PYSTON 1
#define CYTHON_COMPILING_IN_CPYTHON 0
#ifndef CYTHON_USE_TYPE_SLOTS
#define CYTHON_USE_TYPE_SLOTS 1
#endif
#undef CYTHON_USE_PYTYPE_LOOKUP
#define CYTHON_USE_PYTYPE_LOOKUP 0
#undef CYTHON_USE_ASYNC_SLOTS
#define CYTHON_USE_ASYNC_SLOTS 0
#undef CYTHON_USE_PYLIST_INTERNALS
#define CYTHON_USE_PYLIST_INTERNALS 0
#ifndef CYTHON_USE_UNICODE_INTERNALS
#define CYTHON_USE_UNICODE_INTERNALS 1
#endif
#undef CYTHON_USE_UNICODE_WRITER
#define CYTHON_USE_UNICODE_WRITER 0
#undef CYTHON_USE_PYLONG_INTERNALS
#define CYTHON_USE_PYLONG_INTERNALS 0
#ifndef CYTHON_AVOID_BORROWED_REFS
#define CYTHON_AVOID_BORROWED_REFS 0
#endif
#ifndef CYTHON_ASSUME_SAFE_MACROS
#define CYTHON_ASSUME_SAFE_MACROS 1
#endif
#ifndef CYTHON_UNPACK_METHODS
#define CYTHON_UNPACK_METHODS 1
#endif
#undef CYTHON_FAST_THREAD_STATE
#define CYTHON_FAST_THREAD_STATE 0
#undef CYTHON_FAST_PYCALL
#define CYTHON_FAST_PYCALL 0
#undef CYTHON_PEP489_MULTI_PHASE_INIT
#define CYTHON_PEP489_MULTI_PHASE_INIT 0
#undef CYTHON_USE_TP_FINALIZE
#define CYTHON_USE_TP_FINALIZE 0
#undef CYTHON_USE_DICT_VERSIONS
#define CYTHON_USE_DICT_VERSIONS 0
#undef CYTHON_USE_EXC_INFO_STACK
#define CYTHON_USE_EXC_INFO_STACK 0
#else
#define CYTHON_COMPILING_IN_PYPY 0
#define CYTHON_COMPILING_IN_PYSTON 0
#define CYTHON_COMPILING_IN_CPYTHON 1
#ifndef CYTHON_USE_TYPE_SLOTS
#define CYTHON_USE_TYPE_SLOTS 1
#endif
#if PY_VERSION_HEX < 0x02070000
#undef CYTHON_USE_PYTYPE_LOOKUP
#define CYTHON_USE_PYTYPE_LOOKUP 0
#elif !defined(CYTHON_USE_PYTYPE_LOOKUP)
#define CYTHON_USE_PYTYPE_LOOKUP 1
#endif
#if PY_MAJOR_VERSION < 3
#undef CYTHON_USE_ASYNC_SLOTS
#define CYTHON_USE_ASYNC_SLOTS 0
#elif !defined(CYTHON_USE_ASYNC_SLOTS)
#define CYTHON_USE_ASYNC_SLOTS 1
#endif
#if PY_VERSION_HEX < 0x02070000
#undef CYTHON_USE_PYLONG_INTERNALS
#define CYTHON_USE_PYLONG_INTERNALS 0
#elif !defined(CYTHON_USE_PYLONG_INTERNALS)
#define CYTHON_USE_PYLONG_INTERNALS 1
#endif
#ifndef CYTHON_USE_PYLIST_INTERNALS
#define CYTHON_USE_PYLIST_INTERNALS 1
#endif
#ifndef CYTHON_USE_UNICODE_INTERNALS
#define CYTHON_USE_UNICODE_INTERNALS 1
#endif
#if PY_VERSION_HEX < 0x030300F0 || PY_VERSION_HEX >= 0x030B00A2
#undef CYTHON_USE_UNICODE_WRITER
#define CYTHON_USE_UNICODE_WRITER 0
#elif !defined(CYTHON_USE_UNICODE_WRITER)
#define CYTHON_USE_UNICODE_WRITER 1
#endif
#ifndef CYTHON_AVOID_BORROWED_REFS
#define CYTHON_AVOID_BORROWED_REFS 0
#endif
#ifndef CYTHON_ASSUME_SAFE_MACROS
#define CYTHON_ASSUME_SAFE_MACROS 1
#endif
#ifndef CYTHON_UNPACK_METHODS
#define CYTHON_UNPACK_METHODS 1
#endif
#ifndef CYTHON_FAST_THREAD_STATE
#define CYTHON_FAST_THREAD_STATE 1
#endif
#ifndef CYTHON_FAST_PYCALL
#define CYTHON_FAST_PYCALL (PY_VERSION_HEX < 0x030B00A1)
#endif
#ifndef CYTHON_PEP489_MULTI_PHASE_INIT
#define CYTHON_PEP489_MULTI_PHASE_INIT (PY_VERSION_HEX >= 0x03050000)
#endif
#ifndef CYTHON_USE_TP_FINALIZE
#define CYTHON_USE_TP_FINALIZE (PY_VERSION_HEX >= 0x030400a1)
#endif
#ifndef CYTHON_USE_DICT_VERSIONS
#define CYTHON_USE_DICT_VERSIONS (PY_VERSION_HEX >= 0x030600B1)
#endif
#ifndef CYTHON_USE_EXC_INFO_STACK
#define CYTHON_USE_EXC_INFO_STACK (PY_VERSION_HEX >= 0x030700A3)
#endif
#endif
#if !defined(CYTHON_FAST_PYCCALL)
#define CYTHON_FAST_PYCCALL (CYTHON_FAST_PYCALL && PY_VERSION_HEX >= 0x030600B1)
#endif
#if CYTHON_USE_PYLONG_INTERNALS
#if PY_MAJOR_VERSION < 3
#include "longintrepr.h"
#endif
#undef SHIFT
#undef BASE
#undef MASK
#ifdef SIZEOF_VOID_P
enum { __pyx_check_sizeof_voidp = 1 / (int)(SIZEOF_VOID_P == sizeof(void*)) };
#endif
#endif
#ifndef __has_attribute
#define __has_attribute(x) 0
#endif
#ifndef __has_cpp_attribute
#define __has_cpp_attribute(x) 0
#endif
#ifndef CYTHON_RESTRICT
#if defined(__GNUC__)
#define CYTHON_RESTRICT __restrict__
#elif defined(_MSC_VER) && _MSC_VER >= 1400
#define CYTHON_RESTRICT __restrict
#elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L
#define CYTHON_RESTRICT restrict
#else
#define CYTHON_RESTRICT
#endif
#endif
#ifndef CYTHON_UNUSED
# if defined(__GNUC__)
# if !(defined(__cplusplus)) || (__GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ >= 4))
# define CYTHON_UNUSED __attribute__ ((__unused__))
# else
# define CYTHON_UNUSED
# endif
# elif defined(__ICC) || (defined(__INTEL_COMPILER) && !defined(_MSC_VER))
# define CYTHON_UNUSED __attribute__ ((__unused__))
# else
# define CYTHON_UNUSED
# endif
#endif
#ifndef CYTHON_MAYBE_UNUSED_VAR
# if defined(__cplusplus)
template<class T> void CYTHON_MAYBE_UNUSED_VAR( const T& ) { }
# else
# define CYTHON_MAYBE_UNUSED_VAR(x) (void)(x)
# endif
#endif
#ifndef CYTHON_NCP_UNUSED
# if CYTHON_COMPILING_IN_CPYTHON
# define CYTHON_NCP_UNUSED
# else
# define CYTHON_NCP_UNUSED CYTHON_UNUSED
# endif
#endif
#define __Pyx_void_to_None(void_result) ((void)(void_result), Py_INCREF(Py_None), Py_None)
#ifdef _MSC_VER
#ifndef _MSC_STDINT_H_
#if _MSC_VER < 1300
typedef unsigned char uint8_t;
typedef unsigned int uint32_t;
#else
typedef unsigned __int8 uint8_t;
typedef unsigned __int32 uint32_t;
#endif
#endif
#else
#include <stdint.h>
#endif
#ifndef CYTHON_FALLTHROUGH
#if defined(__cplusplus) && __cplusplus >= 201103L
#if __has_cpp_attribute(fallthrough)
#define CYTHON_FALLTHROUGH [[fallthrough]]
#elif __has_cpp_attribute(clang::fallthrough)
#define CYTHON_FALLTHROUGH [[clang::fallthrough]]
#elif __has_cpp_attribute(gnu::fallthrough)
#define CYTHON_FALLTHROUGH [[gnu::fallthrough]]
#endif
#endif
#ifndef CYTHON_FALLTHROUGH
#if __has_attribute(fallthrough)
#define CYTHON_FALLTHROUGH __attribute__((fallthrough))
#else
#define CYTHON_FALLTHROUGH
#endif
#endif
#if defined(__clang__ ) && defined(__apple_build_version__)
#if __apple_build_version__ < 7000000
#undef CYTHON_FALLTHROUGH
#define CYTHON_FALLTHROUGH
#endif
#endif
#endif
#ifndef CYTHON_INLINE
#if defined(__clang__)
#define CYTHON_INLINE __inline__ __attribute__ ((__unused__))
#elif defined(__GNUC__)
#define CYTHON_INLINE __inline__
#elif defined(_MSC_VER)
#define CYTHON_INLINE __inline
#elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L
#define CYTHON_INLINE inline
#else
#define CYTHON_INLINE
#endif
#endif
#if CYTHON_COMPILING_IN_PYPY && PY_VERSION_HEX < 0x02070600 && !defined(Py_OptimizeFlag)
#define Py_OptimizeFlag 0
#endif
#define __PYX_BUILD_PY_SSIZE_T "n"
#define CYTHON_FORMAT_SSIZE_T "z"
#if PY_MAJOR_VERSION < 3
#define __Pyx_BUILTIN_MODULE_NAME "__builtin__"
#define __Pyx_PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos)\
PyCode_New(a+k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos)
#define __Pyx_DefaultClassType PyClass_Type
#else
#define __Pyx_BUILTIN_MODULE_NAME "builtins"
#define __Pyx_DefaultClassType PyType_Type
#if PY_VERSION_HEX >= 0x030B00A1
static CYTHON_INLINE PyCodeObject* __Pyx_PyCode_New(int a, int k, int l, int s, int f,
PyObject *code, PyObject *c, PyObject* n, PyObject *v,
PyObject *fv, PyObject *cell, PyObject* fn,
PyObject *name, int fline, PyObject *lnos) {
PyObject *kwds=NULL, *argcount=NULL, *posonlyargcount=NULL, *kwonlyargcount=NULL;
PyObject *nlocals=NULL, *stacksize=NULL, *flags=NULL, *replace=NULL, *call_result=NULL, *empty=NULL;
const char *fn_cstr=NULL;
const char *name_cstr=NULL;
PyCodeObject* co=NULL;
PyObject *type, *value, *traceback;
PyErr_Fetch(&type, &value, &traceback);
if (!(kwds=PyDict_New())) goto end;
if (!(argcount=PyLong_FromLong(a))) goto end;
if (PyDict_SetItemString(kwds, "co_argcount", argcount) != 0) goto end;
if (!(posonlyargcount=PyLong_FromLong(0))) goto end;
if (PyDict_SetItemString(kwds, "co_posonlyargcount", posonlyargcount) != 0) goto end;
if (!(kwonlyargcount=PyLong_FromLong(k))) goto end;
if (PyDict_SetItemString(kwds, "co_kwonlyargcount", kwonlyargcount) != 0) goto end;
if (!(nlocals=PyLong_FromLong(l))) goto end;
if (PyDict_SetItemString(kwds, "co_nlocals", nlocals) != 0) goto end;
if (!(stacksize=PyLong_FromLong(s))) goto end;
if (PyDict_SetItemString(kwds, "co_stacksize", stacksize) != 0) goto end;
if (!(flags=PyLong_FromLong(f))) goto end;
if (PyDict_SetItemString(kwds, "co_flags", flags) != 0) goto end;
if (PyDict_SetItemString(kwds, "co_code", code) != 0) goto end;
if (PyDict_SetItemString(kwds, "co_consts", c) != 0) goto end;
if (PyDict_SetItemString(kwds, "co_names", n) != 0) goto end;
if (PyDict_SetItemString(kwds, "co_varnames", v) != 0) goto end;
if (PyDict_SetItemString(kwds, "co_freevars", fv) != 0) goto end;
if (PyDict_SetItemString(kwds, "co_cellvars", cell) != 0) goto end;
if (PyDict_SetItemString(kwds, "co_linetable", lnos) != 0) goto end;
if (!(fn_cstr=PyUnicode_AsUTF8AndSize(fn, NULL))) goto end;
if (!(name_cstr=PyUnicode_AsUTF8AndSize(name, NULL))) goto end;
if (!(co = PyCode_NewEmpty(fn_cstr, name_cstr, fline))) goto end;
if (!(replace = PyObject_GetAttrString((PyObject*)co, "replace"))) goto cleanup_code_too;
if (!(empty = PyTuple_New(0))) goto cleanup_code_too; // unfortunately __pyx_empty_tuple isn't available here
if (!(call_result = PyObject_Call(replace, empty, kwds))) goto cleanup_code_too;
Py_XDECREF((PyObject*)co);
co = (PyCodeObject*)call_result;
call_result = NULL;
if (0) {
cleanup_code_too:
Py_XDECREF((PyObject*)co);
co = NULL;
}
end:
Py_XDECREF(kwds);
Py_XDECREF(argcount);
Py_XDECREF(posonlyargcount);
Py_XDECREF(kwonlyargcount);
Py_XDECREF(nlocals);
Py_XDECREF(stacksize);
Py_XDECREF(replace);
Py_XDECREF(call_result);
Py_XDECREF(empty);
if (type) {
PyErr_Restore(type, value, traceback);
}
return co;
}
#else
#define __Pyx_PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos)\
PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos)
#endif
#define __Pyx_DefaultClassType PyType_Type
#endif
#ifndef Py_TPFLAGS_CHECKTYPES
#define Py_TPFLAGS_CHECKTYPES 0
#endif
#ifndef Py_TPFLAGS_HAVE_INDEX
#define Py_TPFLAGS_HAVE_INDEX 0
#endif
#ifndef Py_TPFLAGS_HAVE_NEWBUFFER
#define Py_TPFLAGS_HAVE_NEWBUFFER 0
#endif
#ifndef Py_TPFLAGS_HAVE_FINALIZE
#define Py_TPFLAGS_HAVE_FINALIZE 0
#endif
#ifndef METH_STACKLESS
#define METH_STACKLESS 0
#endif
#if PY_VERSION_HEX <= 0x030700A3 || !defined(METH_FASTCALL)
#ifndef METH_FASTCALL
#define METH_FASTCALL 0x80
#endif
typedef PyObject *(*__Pyx_PyCFunctionFast) (PyObject *self, PyObject *const *args, Py_ssize_t nargs);
typedef PyObject *(*__Pyx_PyCFunctionFastWithKeywords) (PyObject *self, PyObject *const *args,
Py_ssize_t nargs, PyObject *kwnames);
#else
#define __Pyx_PyCFunctionFast _PyCFunctionFast
#define __Pyx_PyCFunctionFastWithKeywords _PyCFunctionFastWithKeywords
#endif
#if CYTHON_FAST_PYCCALL
#define __Pyx_PyFastCFunction_Check(func)\
((PyCFunction_Check(func) && (METH_FASTCALL == (PyCFunction_GET_FLAGS(func) & ~(METH_CLASS | METH_STATIC | METH_COEXIST | METH_KEYWORDS | METH_STACKLESS)))))
#else
#define __Pyx_PyFastCFunction_Check(func) 0
#endif
#if CYTHON_COMPILING_IN_PYPY && !defined(PyObject_Malloc)
#define PyObject_Malloc(s) PyMem_Malloc(s)
#define PyObject_Free(p) PyMem_Free(p)
#define PyObject_Realloc(p) PyMem_Realloc(p)
#endif
#if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX < 0x030400A1
#define PyMem_RawMalloc(n) PyMem_Malloc(n)
#define PyMem_RawRealloc(p, n) PyMem_Realloc(p, n)
#define PyMem_RawFree(p) PyMem_Free(p)
#endif
#if CYTHON_COMPILING_IN_PYSTON
#define __Pyx_PyCode_HasFreeVars(co) PyCode_HasFreeVars(co)
#define __Pyx_PyFrame_SetLineNumber(frame, lineno) PyFrame_SetLineNumber(frame, lineno)
#else
#define __Pyx_PyCode_HasFreeVars(co) (PyCode_GetNumFree(co) > 0)
#define __Pyx_PyFrame_SetLineNumber(frame, lineno) (frame)->f_lineno = (lineno)
#endif
#if !CYTHON_FAST_THREAD_STATE || PY_VERSION_HEX < 0x02070000
#define __Pyx_PyThreadState_Current PyThreadState_GET()
#elif PY_VERSION_HEX >= 0x03060000
#define __Pyx_PyThreadState_Current _PyThreadState_UncheckedGet()
#elif PY_VERSION_HEX >= 0x03000000
#define __Pyx_PyThreadState_Current PyThreadState_GET()
#else
#define __Pyx_PyThreadState_Current _PyThreadState_Current
#endif
#if PY_VERSION_HEX < 0x030700A2 && !defined(PyThread_tss_create) && !defined(Py_tss_NEEDS_INIT)
#include "pythread.h"
#define Py_tss_NEEDS_INIT 0
typedef int Py_tss_t;
static CYTHON_INLINE int PyThread_tss_create(Py_tss_t *key) {
*key = PyThread_create_key();
return 0;
}
static CYTHON_INLINE Py_tss_t * PyThread_tss_alloc(void) {
Py_tss_t *key = (Py_tss_t *)PyObject_Malloc(sizeof(Py_tss_t));
*key = Py_tss_NEEDS_INIT;
return key;
}
static CYTHON_INLINE void PyThread_tss_free(Py_tss_t *key) {
PyObject_Free(key);
}
static CYTHON_INLINE int PyThread_tss_is_created(Py_tss_t *key) {
return *key != Py_tss_NEEDS_INIT;
}
static CYTHON_INLINE void PyThread_tss_delete(Py_tss_t *key) {
PyThread_delete_key(*key);
*key = Py_tss_NEEDS_INIT;
}
static CYTHON_INLINE int PyThread_tss_set(Py_tss_t *key, void *value) {
return PyThread_set_key_value(*key, value);
}
static CYTHON_INLINE void * PyThread_tss_get(Py_tss_t *key) {
return PyThread_get_key_value(*key);
}
#endif
#if CYTHON_COMPILING_IN_CPYTHON || defined(_PyDict_NewPresized)
#define __Pyx_PyDict_NewPresized(n) ((n <= 8) ? PyDict_New() : _PyDict_NewPresized(n))
#else
#define __Pyx_PyDict_NewPresized(n) PyDict_New()
#endif
#if PY_MAJOR_VERSION >= 3 || CYTHON_FUTURE_DIVISION
#define __Pyx_PyNumber_Divide(x,y) PyNumber_TrueDivide(x,y)
#define __Pyx_PyNumber_InPlaceDivide(x,y) PyNumber_InPlaceTrueDivide(x,y)
#else
#define __Pyx_PyNumber_Divide(x,y) PyNumber_Divide(x,y)
#define __Pyx_PyNumber_InPlaceDivide(x,y) PyNumber_InPlaceDivide(x,y)
#endif
#if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030500A1 && CYTHON_USE_UNICODE_INTERNALS
#define __Pyx_PyDict_GetItemStr(dict, name) _PyDict_GetItem_KnownHash(dict, name, ((PyASCIIObject *) name)->hash)
#else
#define __Pyx_PyDict_GetItemStr(dict, name) PyDict_GetItem(dict, name)
#endif
#if PY_VERSION_HEX > 0x03030000 && defined(PyUnicode_KIND)
#define CYTHON_PEP393_ENABLED 1
#if defined(PyUnicode_IS_READY)
#define __Pyx_PyUnicode_READY(op) (likely(PyUnicode_IS_READY(op)) ?\
0 : _PyUnicode_Ready((PyObject *)(op)))
#else
#define __Pyx_PyUnicode_READY(op) (0)
#endif
#define __Pyx_PyUnicode_GET_LENGTH(u) PyUnicode_GET_LENGTH(u)
#define __Pyx_PyUnicode_READ_CHAR(u, i) PyUnicode_READ_CHAR(u, i)
#define __Pyx_PyUnicode_MAX_CHAR_VALUE(u) PyUnicode_MAX_CHAR_VALUE(u)
#define __Pyx_PyUnicode_KIND(u) PyUnicode_KIND(u)
#define __Pyx_PyUnicode_DATA(u) PyUnicode_DATA(u)
#define __Pyx_PyUnicode_READ(k, d, i) PyUnicode_READ(k, d, i)
#define __Pyx_PyUnicode_WRITE(k, d, i, ch) PyUnicode_WRITE(k, d, i, ch)
#if defined(PyUnicode_IS_READY) && defined(PyUnicode_GET_SIZE)
#if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x03090000
#define __Pyx_PyUnicode_IS_TRUE(u) (0 != (likely(PyUnicode_IS_READY(u)) ? PyUnicode_GET_LENGTH(u) : ((PyCompactUnicodeObject *)(u))->wstr_length))
#else
#define __Pyx_PyUnicode_IS_TRUE(u) (0 != (likely(PyUnicode_IS_READY(u)) ? PyUnicode_GET_LENGTH(u) : PyUnicode_GET_SIZE(u)))
#endif
#else
#define __Pyx_PyUnicode_IS_TRUE(u) (0 != PyUnicode_GET_LENGTH(u))
#endif
#else
#define CYTHON_PEP393_ENABLED 0
#define PyUnicode_1BYTE_KIND 1
#define PyUnicode_2BYTE_KIND 2
#define PyUnicode_4BYTE_KIND 4
#define __Pyx_PyUnicode_READY(op) (0)
#define __Pyx_PyUnicode_GET_LENGTH(u) PyUnicode_GET_SIZE(u)
#define __Pyx_PyUnicode_READ_CHAR(u, i) ((Py_UCS4)(PyUnicode_AS_UNICODE(u)[i]))
#define __Pyx_PyUnicode_MAX_CHAR_VALUE(u) ((sizeof(Py_UNICODE) == 2) ? 65535 : 1114111)
#define __Pyx_PyUnicode_KIND(u) (sizeof(Py_UNICODE))
#define __Pyx_PyUnicode_DATA(u) ((void*)PyUnicode_AS_UNICODE(u))
#define __Pyx_PyUnicode_READ(k, d, i) ((void)(k), (Py_UCS4)(((Py_UNICODE*)d)[i]))
#define __Pyx_PyUnicode_WRITE(k, d, i, ch) (((void)(k)), ((Py_UNICODE*)d)[i] = ch)
#define __Pyx_PyUnicode_IS_TRUE(u) (0 != PyUnicode_GET_SIZE(u))
#endif
#if CYTHON_COMPILING_IN_PYPY
#define __Pyx_PyUnicode_Concat(a, b) PyNumber_Add(a, b)
#define __Pyx_PyUnicode_ConcatSafe(a, b) PyNumber_Add(a, b)
#else
#define __Pyx_PyUnicode_Concat(a, b) PyUnicode_Concat(a, b)
#define __Pyx_PyUnicode_ConcatSafe(a, b) ((unlikely((a) == Py_None) || unlikely((b) == Py_None)) ?\
PyNumber_Add(a, b) : __Pyx_PyUnicode_Concat(a, b))
#endif
#if CYTHON_COMPILING_IN_PYPY && !defined(PyUnicode_Contains)
#define PyUnicode_Contains(u, s) PySequence_Contains(u, s)
#endif
#if CYTHON_COMPILING_IN_PYPY && !defined(PyByteArray_Check)
#define PyByteArray_Check(obj) PyObject_TypeCheck(obj, &PyByteArray_Type)
#endif
#if CYTHON_COMPILING_IN_PYPY && !defined(PyObject_Format)
#define PyObject_Format(obj, fmt) PyObject_CallMethod(obj, "__format__", "O", fmt)
#endif
#define __Pyx_PyString_FormatSafe(a, b) ((unlikely((a) == Py_None || (PyString_Check(b) && !PyString_CheckExact(b)))) ? PyNumber_Remainder(a, b) : __Pyx_PyString_Format(a, b))
#define __Pyx_PyUnicode_FormatSafe(a, b) ((unlikely((a) == Py_None || (PyUnicode_Check(b) && !PyUnicode_CheckExact(b)))) ? PyNumber_Remainder(a, b) : PyUnicode_Format(a, b))
#if PY_MAJOR_VERSION >= 3
#define __Pyx_PyString_Format(a, b) PyUnicode_Format(a, b)
#else
#define __Pyx_PyString_Format(a, b) PyString_Format(a, b)
#endif
#if PY_MAJOR_VERSION < 3 && !defined(PyObject_ASCII)
#define PyObject_ASCII(o) PyObject_Repr(o)
#endif
#if PY_MAJOR_VERSION >= 3
#define PyBaseString_Type PyUnicode_Type
#define PyStringObject PyUnicodeObject
#define PyString_Type PyUnicode_Type
#define PyString_Check PyUnicode_Check
#define PyString_CheckExact PyUnicode_CheckExact
#ifndef PyObject_Unicode
#define PyObject_Unicode PyObject_Str
#endif
#endif
#if PY_MAJOR_VERSION >= 3
#define __Pyx_PyBaseString_Check(obj) PyUnicode_Check(obj)
#define __Pyx_PyBaseString_CheckExact(obj) PyUnicode_CheckExact(obj)
#else
#define __Pyx_PyBaseString_Check(obj) (PyString_Check(obj) || PyUnicode_Check(obj))
#define __Pyx_PyBaseString_CheckExact(obj) (PyString_CheckExact(obj) || PyUnicode_CheckExact(obj))
#endif
#ifndef PySet_CheckExact
#define PySet_CheckExact(obj) (Py_TYPE(obj) == &PySet_Type)
#endif
#if PY_VERSION_HEX >= 0x030900A4
#define __Pyx_SET_REFCNT(obj, refcnt) Py_SET_REFCNT(obj, refcnt)
#define __Pyx_SET_SIZE(obj, size) Py_SET_SIZE(obj, size)
#else
#define __Pyx_SET_REFCNT(obj, refcnt) Py_REFCNT(obj) = (refcnt)
#define __Pyx_SET_SIZE(obj, size) Py_SIZE(obj) = (size)
#endif
#if CYTHON_ASSUME_SAFE_MACROS
#define __Pyx_PySequence_SIZE(seq) Py_SIZE(seq)
#else
#define __Pyx_PySequence_SIZE(seq) PySequence_Size(seq)
#endif
#if PY_MAJOR_VERSION >= 3
#define PyIntObject PyLongObject
#define PyInt_Type PyLong_Type
#define PyInt_Check(op) PyLong_Check(op)
#define PyInt_CheckExact(op) PyLong_CheckExact(op)
#define PyInt_FromString PyLong_FromString
#define PyInt_FromUnicode PyLong_FromUnicode
#define PyInt_FromLong PyLong_FromLong
#define PyInt_FromSize_t PyLong_FromSize_t
#define PyInt_FromSsize_t PyLong_FromSsize_t
#define PyInt_AsLong PyLong_AsLong
#define PyInt_AS_LONG PyLong_AS_LONG
#define PyInt_AsSsize_t PyLong_AsSsize_t
#define PyInt_AsUnsignedLongMask PyLong_AsUnsignedLongMask
#define PyInt_AsUnsignedLongLongMask PyLong_AsUnsignedLongLongMask
#define PyNumber_Int PyNumber_Long
#endif
#if PY_MAJOR_VERSION >= 3
#define PyBoolObject PyLongObject
#endif
#if PY_MAJOR_VERSION >= 3 && CYTHON_COMPILING_IN_PYPY
#ifndef PyUnicode_InternFromString
#define PyUnicode_InternFromString(s) PyUnicode_FromString(s)
#endif
#endif
#if PY_VERSION_HEX < 0x030200A4
typedef long Py_hash_t;
#define __Pyx_PyInt_FromHash_t PyInt_FromLong
#define __Pyx_PyInt_AsHash_t __Pyx_PyIndex_AsHash_t
#else
#define __Pyx_PyInt_FromHash_t PyInt_FromSsize_t
#define __Pyx_PyInt_AsHash_t __Pyx_PyIndex_AsSsize_t
#endif
#if PY_MAJOR_VERSION >= 3
#define __Pyx_PyMethod_New(func, self, klass) ((self) ? ((void)(klass), PyMethod_New(func, self)) : __Pyx_NewRef(func))
#else
#define __Pyx_PyMethod_New(func, self, klass) PyMethod_New(func, self, klass)
#endif
#if CYTHON_USE_ASYNC_SLOTS
#if PY_VERSION_HEX >= 0x030500B1
#define __Pyx_PyAsyncMethodsStruct PyAsyncMethods
#define __Pyx_PyType_AsAsync(obj) (Py_TYPE(obj)->tp_as_async)
#else
#define __Pyx_PyType_AsAsync(obj) ((__Pyx_PyAsyncMethodsStruct*) (Py_TYPE(obj)->tp_reserved))
#endif
#else
#define __Pyx_PyType_AsAsync(obj) NULL
#endif
#ifndef __Pyx_PyAsyncMethodsStruct
typedef struct {
unaryfunc am_await;
unaryfunc am_aiter;
unaryfunc am_anext;
} __Pyx_PyAsyncMethodsStruct;
#endif
#if defined(WIN32) || defined(MS_WINDOWS)
#define _USE_MATH_DEFINES
#endif
#include <math.h>
#ifdef NAN
#define __PYX_NAN() ((float) NAN)
#else
static CYTHON_INLINE float __PYX_NAN() {
float value;
memset(&value, 0xFF, sizeof(value));
return value;
}
#endif
#if defined(__CYGWIN__) && defined(_LDBL_EQ_DBL)
#define __Pyx_truncl trunc
#else
#define __Pyx_truncl truncl
#endif
#define __PYX_MARK_ERR_POS(f_index, lineno) \
{ __pyx_filename = __pyx_f[f_index]; (void)__pyx_filename; __pyx_lineno = lineno; (void)__pyx_lineno; __pyx_clineno = __LINE__; (void)__pyx_clineno; }
#define __PYX_ERR(f_index, lineno, Ln_error) \
{ __PYX_MARK_ERR_POS(f_index, lineno) goto Ln_error; }
#ifndef __PYX_EXTERN_C
#ifdef __cplusplus
#define __PYX_EXTERN_C extern "C"
#else
#define __PYX_EXTERN_C extern
#endif
#endif
#define __PYX_HAVE__splikes__connections__calcium
#define __PYX_HAVE_API__splikes__connections__calcium
/* Early includes */
#include <string.h>
#include <stdio.h>
#include "numpy/arrayobject.h"
#include "numpy/ndarrayobject.h"
#include "numpy/ndarraytypes.h"
#include "numpy/arrayscalars.h"
#include "numpy/ufuncobject.h"
/* NumPy API declarations from "numpy/__init__.pxd" */
#include "math.h"
#include "randomkit.h"
#ifdef _OPENMP
#include <omp.h>
#endif /* _OPENMP */
#if defined(PYREX_WITHOUT_ASSERTIONS) && !defined(CYTHON_WITHOUT_ASSERTIONS)
#define CYTHON_WITHOUT_ASSERTIONS
#endif
typedef struct {PyObject **p; const char *s; const Py_ssize_t n; const char* encoding;
const char is_unicode; const char is_str; const char intern; } __Pyx_StringTabEntry;
#define __PYX_DEFAULT_STRING_ENCODING_IS_ASCII 0
#define __PYX_DEFAULT_STRING_ENCODING_IS_UTF8 0
#define __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT (PY_MAJOR_VERSION >= 3 && __PYX_DEFAULT_STRING_ENCODING_IS_UTF8)
#define __PYX_DEFAULT_STRING_ENCODING ""
#define __Pyx_PyObject_FromString __Pyx_PyBytes_FromString
#define __Pyx_PyObject_FromStringAndSize __Pyx_PyBytes_FromStringAndSize
#define __Pyx_uchar_cast(c) ((unsigned char)c)
#define __Pyx_long_cast(x) ((long)x)
#define __Pyx_fits_Py_ssize_t(v, type, is_signed) (\
(sizeof(type) < sizeof(Py_ssize_t)) ||\
(sizeof(type) > sizeof(Py_ssize_t) &&\
likely(v < (type)PY_SSIZE_T_MAX ||\
v == (type)PY_SSIZE_T_MAX) &&\
(!is_signed || likely(v > (type)PY_SSIZE_T_MIN ||\
v == (type)PY_SSIZE_T_MIN))) ||\
(sizeof(type) == sizeof(Py_ssize_t) &&\
(is_signed || likely(v < (type)PY_SSIZE_T_MAX ||\
v == (type)PY_SSIZE_T_MAX))) )
static CYTHON_INLINE int __Pyx_is_valid_index(Py_ssize_t i, Py_ssize_t limit) {
return (size_t) i < (size_t) limit;
}
#if defined (__cplusplus) && __cplusplus >= 201103L
#include <cstdlib>
#define __Pyx_sst_abs(value) std::abs(value)
#elif SIZEOF_INT >= SIZEOF_SIZE_T
#define __Pyx_sst_abs(value) abs(value)
#elif SIZEOF_LONG >= SIZEOF_SIZE_T
#define __Pyx_sst_abs(value) labs(value)
#elif defined (_MSC_VER)
#define __Pyx_sst_abs(value) ((Py_ssize_t)_abs64(value))
#elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L
#define __Pyx_sst_abs(value) llabs(value)
#elif defined (__GNUC__)
#define __Pyx_sst_abs(value) __builtin_llabs(value)
#else
#define __Pyx_sst_abs(value) ((value<0) ? -value : value)
#endif
static CYTHON_INLINE const char* __Pyx_PyObject_AsString(PyObject*);
static CYTHON_INLINE const char* __Pyx_PyObject_AsStringAndSize(PyObject*, Py_ssize_t* length);
#define __Pyx_PyByteArray_FromString(s) PyByteArray_FromStringAndSize((const char*)s, strlen((const char*)s))
#define __Pyx_PyByteArray_FromStringAndSize(s, l) PyByteArray_FromStringAndSize((const char*)s, l)
#define __Pyx_PyBytes_FromString PyBytes_FromString
#define __Pyx_PyBytes_FromStringAndSize PyBytes_FromStringAndSize
static CYTHON_INLINE PyObject* __Pyx_PyUnicode_FromString(const char*);
#if PY_MAJOR_VERSION < 3
#define __Pyx_PyStr_FromString __Pyx_PyBytes_FromString
#define __Pyx_PyStr_FromStringAndSize __Pyx_PyBytes_FromStringAndSize
#else
#define __Pyx_PyStr_FromString __Pyx_PyUnicode_FromString
#define __Pyx_PyStr_FromStringAndSize __Pyx_PyUnicode_FromStringAndSize
#endif
#define __Pyx_PyBytes_AsWritableString(s) ((char*) PyBytes_AS_STRING(s))
#define __Pyx_PyBytes_AsWritableSString(s) ((signed char*) PyBytes_AS_STRING(s))
#define __Pyx_PyBytes_AsWritableUString(s) ((unsigned char*) PyBytes_AS_STRING(s))
#define __Pyx_PyBytes_AsString(s) ((const char*) PyBytes_AS_STRING(s))
#define __Pyx_PyBytes_AsSString(s) ((const signed char*) PyBytes_AS_STRING(s))
#define __Pyx_PyBytes_AsUString(s) ((const unsigned char*) PyBytes_AS_STRING(s))
#define __Pyx_PyObject_AsWritableString(s) ((char*) __Pyx_PyObject_AsString(s))
#define __Pyx_PyObject_AsWritableSString(s) ((signed char*) __Pyx_PyObject_AsString(s))
#define __Pyx_PyObject_AsWritableUString(s) ((unsigned char*) __Pyx_PyObject_AsString(s))
#define __Pyx_PyObject_AsSString(s) ((const signed char*) __Pyx_PyObject_AsString(s))
#define __Pyx_PyObject_AsUString(s) ((const unsigned char*) __Pyx_PyObject_AsString(s))
#define __Pyx_PyObject_FromCString(s) __Pyx_PyObject_FromString((const char*)s)
#define __Pyx_PyBytes_FromCString(s) __Pyx_PyBytes_FromString((const char*)s)
#define __Pyx_PyByteArray_FromCString(s) __Pyx_PyByteArray_FromString((const char*)s)
#define __Pyx_PyStr_FromCString(s) __Pyx_PyStr_FromString((const char*)s)
#define __Pyx_PyUnicode_FromCString(s) __Pyx_PyUnicode_FromString((const char*)s)
static CYTHON_INLINE size_t __Pyx_Py_UNICODE_strlen(const Py_UNICODE *u) {
const Py_UNICODE *u_end = u;
while (*u_end++) ;
return (size_t)(u_end - u - 1);
}
#define __Pyx_PyUnicode_FromUnicode(u) PyUnicode_FromUnicode(u, __Pyx_Py_UNICODE_strlen(u))
#define __Pyx_PyUnicode_FromUnicodeAndLength PyUnicode_FromUnicode
#define __Pyx_PyUnicode_AsUnicode PyUnicode_AsUnicode
#define __Pyx_NewRef(obj) (Py_INCREF(obj), obj)
#define __Pyx_Owned_Py_None(b) __Pyx_NewRef(Py_None)
static CYTHON_INLINE PyObject * __Pyx_PyBool_FromLong(long b);
static CYTHON_INLINE int __Pyx_PyObject_IsTrue(PyObject*);
static CYTHON_INLINE int __Pyx_PyObject_IsTrueAndDecref(PyObject*);
static CYTHON_INLINE PyObject* __Pyx_PyNumber_IntOrLong(PyObject* x);
#define __Pyx_PySequence_Tuple(obj)\
(likely(PyTuple_CheckExact(obj)) ? __Pyx_NewRef(obj) : PySequence_Tuple(obj))
static CYTHON_INLINE Py_ssize_t __Pyx_PyIndex_AsSsize_t(PyObject*);
static CYTHON_INLINE PyObject * __Pyx_PyInt_FromSize_t(size_t);
static CYTHON_INLINE Py_hash_t __Pyx_PyIndex_AsHash_t(PyObject*);
#if CYTHON_ASSUME_SAFE_MACROS
#define __pyx_PyFloat_AsDouble(x) (PyFloat_CheckExact(x) ? PyFloat_AS_DOUBLE(x) : PyFloat_AsDouble(x))
#else
#define __pyx_PyFloat_AsDouble(x) PyFloat_AsDouble(x)
#endif
#define __pyx_PyFloat_AsFloat(x) ((float) __pyx_PyFloat_AsDouble(x))
#if PY_MAJOR_VERSION >= 3
#define __Pyx_PyNumber_Int(x) (PyLong_CheckExact(x) ? __Pyx_NewRef(x) : PyNumber_Long(x))
#else
#define __Pyx_PyNumber_Int(x) (PyInt_CheckExact(x) ? __Pyx_NewRef(x) : PyNumber_Int(x))
#endif
#define __Pyx_PyNumber_Float(x) (PyFloat_CheckExact(x) ? __Pyx_NewRef(x) : PyNumber_Float(x))
#if PY_MAJOR_VERSION < 3 && __PYX_DEFAULT_STRING_ENCODING_IS_ASCII
static int __Pyx_sys_getdefaultencoding_not_ascii;
static int __Pyx_init_sys_getdefaultencoding_params(void) {
PyObject* sys;
PyObject* default_encoding = NULL;
PyObject* ascii_chars_u = NULL;
PyObject* ascii_chars_b = NULL;
const char* default_encoding_c;
sys = PyImport_ImportModule("sys");
if (!sys) goto bad;
default_encoding = PyObject_CallMethod(sys, (char*) "getdefaultencoding", NULL);
Py_DECREF(sys);
if (!default_encoding) goto bad;
default_encoding_c = PyBytes_AsString(default_encoding);
if (!default_encoding_c) goto bad;
if (strcmp(default_encoding_c, "ascii") == 0) {
__Pyx_sys_getdefaultencoding_not_ascii = 0;
} else {
char ascii_chars[128];
int c;
for (c = 0; c < 128; c++) {
ascii_chars[c] = c;
}
__Pyx_sys_getdefaultencoding_not_ascii = 1;
ascii_chars_u = PyUnicode_DecodeASCII(ascii_chars, 128, NULL);
if (!ascii_chars_u) goto bad;
ascii_chars_b = PyUnicode_AsEncodedString(ascii_chars_u, default_encoding_c, NULL);
if (!ascii_chars_b || !PyBytes_Check(ascii_chars_b) || memcmp(ascii_chars, PyBytes_AS_STRING(ascii_chars_b), 128) != 0) {
PyErr_Format(
PyExc_ValueError,
"This module compiled with c_string_encoding=ascii, but default encoding '%.200s' is not a superset of ascii.",
default_encoding_c);
goto bad;
}
Py_DECREF(ascii_chars_u);
Py_DECREF(ascii_chars_b);
}
Py_DECREF(default_encoding);
return 0;
bad:
Py_XDECREF(default_encoding);
Py_XDECREF(ascii_chars_u);
Py_XDECREF(ascii_chars_b);
return -1;
}
#endif
#if __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT && PY_MAJOR_VERSION >= 3
#define __Pyx_PyUnicode_FromStringAndSize(c_str, size) PyUnicode_DecodeUTF8(c_str, size, NULL)
#else
#define __Pyx_PyUnicode_FromStringAndSize(c_str, size) PyUnicode_Decode(c_str, size, __PYX_DEFAULT_STRING_ENCODING, NULL)
#if __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT
static char* __PYX_DEFAULT_STRING_ENCODING;
static int __Pyx_init_sys_getdefaultencoding_params(void) {
PyObject* sys;
PyObject* default_encoding = NULL;
char* default_encoding_c;
sys = PyImport_ImportModule("sys");
if (!sys) goto bad;
default_encoding = PyObject_CallMethod(sys, (char*) (const char*) "getdefaultencoding", NULL);
Py_DECREF(sys);
if (!default_encoding) goto bad;
default_encoding_c = PyBytes_AsString(default_encoding);
if (!default_encoding_c) goto bad;
__PYX_DEFAULT_STRING_ENCODING = (char*) malloc(strlen(default_encoding_c) + 1);
if (!__PYX_DEFAULT_STRING_ENCODING) goto bad;
strcpy(__PYX_DEFAULT_STRING_ENCODING, default_encoding_c);
Py_DECREF(default_encoding);
return 0;
bad:
Py_XDECREF(default_encoding);
return -1;
}
#endif
#endif
/* Test for GCC > 2.95 */
#if defined(__GNUC__) && (__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95)))
#define likely(x) __builtin_expect(!!(x), 1)
#define unlikely(x) __builtin_expect(!!(x), 0)
#else /* !__GNUC__ or GCC < 2.95 */
#define likely(x) (x)
#define unlikely(x) (x)
#endif /* __GNUC__ */
static CYTHON_INLINE void __Pyx_pretend_to_initialize(void* ptr) { (void)ptr; }
static PyObject *__pyx_m = NULL;
static PyObject *__pyx_d;
static PyObject *__pyx_b;
static PyObject *__pyx_cython_runtime = NULL;
static PyObject *__pyx_empty_tuple;
static PyObject *__pyx_empty_bytes;
static PyObject *__pyx_empty_unicode;
static int __pyx_lineno;
static int __pyx_clineno = 0;
static const char * __pyx_cfilenm= __FILE__;
static const char *__pyx_filename;
/* Header.proto */
#if !defined(CYTHON_CCOMPLEX)
#if defined(__cplusplus)
#define CYTHON_CCOMPLEX 1
#elif defined(_Complex_I)
#define CYTHON_CCOMPLEX 1
#else
#define CYTHON_CCOMPLEX 0
#endif
#endif
#if CYTHON_CCOMPLEX
#ifdef __cplusplus
#include <complex>
#else
#include <complex.h>
#endif
#endif
#if CYTHON_CCOMPLEX && !defined(__cplusplus) && defined(__sun__) && defined(__GNUC__)
#undef _Complex_I
#define _Complex_I 1.0fj
#endif
static const char *__pyx_f[] = {
"splikes/connections/calcium.pyx",
"stringsource",
"__init__.pxd",
"type.pxd",
"splikes/splikes.pxd",
};
/* "../../../opt/anaconda3/envs/py3/lib/python3.8/site-packages/numpy/__init__.pxd":690
* # in Cython to enable them only on the right systems.
*
* ctypedef npy_int8 int8_t # <<<<<<<<<<<<<<
* ctypedef npy_int16 int16_t
* ctypedef npy_int32 int32_t
*/
typedef npy_int8 __pyx_t_5numpy_int8_t;
/* "../../../opt/anaconda3/envs/py3/lib/python3.8/site-packages/numpy/__init__.pxd":691
*
* ctypedef npy_int8 int8_t
* ctypedef npy_int16 int16_t # <<<<<<<<<<<<<<
* ctypedef npy_int32 int32_t
* ctypedef npy_int64 int64_t
*/
typedef npy_int16 __pyx_t_5numpy_int16_t;
/* "../../../opt/anaconda3/envs/py3/lib/python3.8/site-packages/numpy/__init__.pxd":692
* ctypedef npy_int8 int8_t
* ctypedef npy_int16 int16_t
* ctypedef npy_int32 int32_t # <<<<<<<<<<<<<<
* ctypedef npy_int64 int64_t
* #ctypedef npy_int96 int96_t
*/
typedef npy_int32 __pyx_t_5numpy_int32_t;
/* "../../../opt/anaconda3/envs/py3/lib/python3.8/site-packages/numpy/__init__.pxd":693
* ctypedef npy_int16 int16_t
* ctypedef npy_int32 int32_t
* ctypedef npy_int64 int64_t # <<<<<<<<<<<<<<
* #ctypedef npy_int96 int96_t
* #ctypedef npy_int128 int128_t
*/
typedef npy_int64 __pyx_t_5numpy_int64_t;
/* "../../../opt/anaconda3/envs/py3/lib/python3.8/site-packages/numpy/__init__.pxd":697
* #ctypedef npy_int128 int128_t
*
* ctypedef npy_uint8 uint8_t # <<<<<<<<<<<<<<
* ctypedef npy_uint16 uint16_t
* ctypedef npy_uint32 uint32_t
*/
typedef npy_uint8 __pyx_t_5numpy_uint8_t;
/* "../../../opt/anaconda3/envs/py3/lib/python3.8/site-packages/numpy/__init__.pxd":698
*
* ctypedef npy_uint8 uint8_t
* ctypedef npy_uint16 uint16_t # <<<<<<<<<<<<<<
* ctypedef npy_uint32 uint32_t
* ctypedef npy_uint64 uint64_t
*/
typedef npy_uint16 __pyx_t_5numpy_uint16_t;
/* "../../../opt/anaconda3/envs/py3/lib/python3.8/site-packages/numpy/__init__.pxd":699
* ctypedef npy_uint8 uint8_t
* ctypedef npy_uint16 uint16_t
* ctypedef npy_uint32 uint32_t # <<<<<<<<<<<<<<
* ctypedef npy_uint64 uint64_t
* #ctypedef npy_uint96 uint96_t
*/
typedef npy_uint32 __pyx_t_5numpy_uint32_t;
/* "../../../opt/anaconda3/envs/py3/lib/python3.8/site-packages/numpy/__init__.pxd":700
* ctypedef npy_uint16 uint16_t
* ctypedef npy_uint32 uint32_t
* ctypedef npy_uint64 uint64_t # <<<<<<<<<<<<<<
* #ctypedef npy_uint96 uint96_t
* #ctypedef npy_uint128 uint128_t
*/
typedef npy_uint64 __pyx_t_5numpy_uint64_t;
/* "../../../opt/anaconda3/envs/py3/lib/python3.8/site-packages/numpy/__init__.pxd":704
* #ctypedef npy_uint128 uint128_t
*
* ctypedef npy_float32 float32_t # <<<<<<<<<<<<<<
* ctypedef npy_float64 float64_t
* #ctypedef npy_float80 float80_t
*/
typedef npy_float32 __pyx_t_5numpy_float32_t;
/* "../../../opt/anaconda3/envs/py3/lib/python3.8/site-packages/numpy/__init__.pxd":705
*
* ctypedef npy_float32 float32_t
* ctypedef npy_float64 float64_t # <<<<<<<<<<<<<<
* #ctypedef npy_float80 float80_t
* #ctypedef npy_float128 float128_t
*/
typedef npy_float64 __pyx_t_5numpy_float64_t;
/* "../../../opt/anaconda3/envs/py3/lib/python3.8/site-packages/numpy/__init__.pxd":714
* # The int types are mapped a bit surprising --
* # numpy.int corresponds to 'l' and numpy.long to 'q'
* ctypedef npy_long int_t # <<<<<<<<<<<<<<
* ctypedef npy_longlong long_t
* ctypedef npy_longlong longlong_t
*/
typedef npy_long __pyx_t_5numpy_int_t;
/* "../../../opt/anaconda3/envs/py3/lib/python3.8/site-packages/numpy/__init__.pxd":715
* # numpy.int corresponds to 'l' and numpy.long to 'q'
* ctypedef npy_long int_t
* ctypedef npy_longlong long_t # <<<<<<<<<<<<<<
* ctypedef npy_longlong longlong_t
*
*/
typedef npy_longlong __pyx_t_5numpy_long_t;
/* "../../../opt/anaconda3/envs/py3/lib/python3.8/site-packages/numpy/__init__.pxd":716
* ctypedef npy_long int_t
* ctypedef npy_longlong long_t
* ctypedef npy_longlong longlong_t # <<<<<<<<<<<<<<
*
* ctypedef npy_ulong uint_t
*/
typedef npy_longlong __pyx_t_5numpy_longlong_t;
/* "../../../opt/anaconda3/envs/py3/lib/python3.8/site-packages/numpy/__init__.pxd":718
* ctypedef npy_longlong longlong_t
*
* ctypedef npy_ulong uint_t # <<<<<<<<<<<<<<
* ctypedef npy_ulonglong ulong_t
* ctypedef npy_ulonglong ulonglong_t
*/
typedef npy_ulong __pyx_t_5numpy_uint_t;
/* "../../../opt/anaconda3/envs/py3/lib/python3.8/site-packages/numpy/__init__.pxd":719
*
* ctypedef npy_ulong uint_t
* ctypedef npy_ulonglong ulong_t # <<<<<<<<<<<<<<
* ctypedef npy_ulonglong ulonglong_t
*
*/
typedef npy_ulonglong __pyx_t_5numpy_ulong_t;
/* "../../../opt/anaconda3/envs/py3/lib/python3.8/site-packages/numpy/__init__.pxd":720
* ctypedef npy_ulong uint_t
* ctypedef npy_ulonglong ulong_t
* ctypedef npy_ulonglong ulonglong_t # <<<<<<<<<<<<<<
*
* ctypedef npy_intp intp_t
*/
typedef npy_ulonglong __pyx_t_5numpy_ulonglong_t;
/* "../../../opt/anaconda3/envs/py3/lib/python3.8/site-packages/numpy/__init__.pxd":722
* ctypedef npy_ulonglong ulonglong_t
*
* ctypedef npy_intp intp_t # <<<<<<<<<<<<<<
* ctypedef npy_uintp uintp_t
*
*/
typedef npy_intp __pyx_t_5numpy_intp_t;
/* "../../../opt/anaconda3/envs/py3/lib/python3.8/site-packages/numpy/__init__.pxd":723
*
* ctypedef npy_intp intp_t
* ctypedef npy_uintp uintp_t # <<<<<<<<<<<<<<
*
* ctypedef npy_double float_t
*/
typedef npy_uintp __pyx_t_5numpy_uintp_t;
/* "../../../opt/anaconda3/envs/py3/lib/python3.8/site-packages/numpy/__init__.pxd":725
* ctypedef npy_uintp uintp_t
*
* ctypedef npy_double float_t # <<<<<<<<<<<<<<
* ctypedef npy_double double_t
* ctypedef npy_longdouble longdouble_t
*/
typedef npy_double __pyx_t_5numpy_float_t;
/* "../../../opt/anaconda3/envs/py3/lib/python3.8/site-packages/numpy/__init__.pxd":726
*
* ctypedef npy_double float_t
* ctypedef npy_double double_t # <<<<<<<<<<<<<<
* ctypedef npy_longdouble longdouble_t
*
*/
typedef npy_double __pyx_t_5numpy_double_t;
/* "../../../opt/anaconda3/envs/py3/lib/python3.8/site-packages/numpy/__init__.pxd":727
* ctypedef npy_double float_t
* ctypedef npy_double double_t
* ctypedef npy_longdouble longdouble_t # <<<<<<<<<<<<<<
*
* ctypedef npy_cfloat cfloat_t
*/
typedef npy_longdouble __pyx_t_5numpy_longdouble_t;
/* Declarations.proto */
#if CYTHON_CCOMPLEX
#ifdef __cplusplus
typedef ::std::complex< float > __pyx_t_float_complex;
#else
typedef float _Complex __pyx_t_float_complex;
#endif
#else
typedef struct { float real, imag; } __pyx_t_float_complex;
#endif
static CYTHON_INLINE __pyx_t_float_complex __pyx_t_float_complex_from_parts(float, float);
/* Declarations.proto */
#if CYTHON_CCOMPLEX
#ifdef __cplusplus
typedef ::std::complex< double > __pyx_t_double_complex;
#else
typedef double _Complex __pyx_t_double_complex;
#endif
#else
typedef struct { double real, imag; } __pyx_t_double_complex;
#endif
static CYTHON_INLINE __pyx_t_double_complex __pyx_t_double_complex_from_parts(double, double);
/*--- Type declarations ---*/
struct __pyx_obj_7splikes_7splikes_group;
struct __pyx_obj_7splikes_7splikes_monitor;
struct __pyx_obj_7splikes_7splikes_simulation;
struct __pyx_obj_7splikes_7splikes_neuron;
struct __pyx_obj_7splikes_7splikes_connection;
struct __pyx_obj_7splikes_11connections_7calcium_calcium;
/* "../../../opt/anaconda3/envs/py3/lib/python3.8/site-packages/numpy/__init__.pxd":729
* ctypedef npy_longdouble longdouble_t
*
* ctypedef npy_cfloat cfloat_t # <<<<<<<<<<<<<<
* ctypedef npy_cdouble cdouble_t
* ctypedef npy_clongdouble clongdouble_t
*/
typedef npy_cfloat __pyx_t_5numpy_cfloat_t;
/* "../../../opt/anaconda3/envs/py3/lib/python3.8/site-packages/numpy/__init__.pxd":730
*
* ctypedef npy_cfloat cfloat_t
* ctypedef npy_cdouble cdouble_t # <<<<<<<<<<<<<<
* ctypedef npy_clongdouble clongdouble_t
*
*/
typedef npy_cdouble __pyx_t_5numpy_cdouble_t;
/* "../../../opt/anaconda3/envs/py3/lib/python3.8/site-packages/numpy/__init__.pxd":731
* ctypedef npy_cfloat cfloat_t
* ctypedef npy_cdouble cdouble_t
* ctypedef npy_clongdouble clongdouble_t # <<<<<<<<<<<<<<
*
* ctypedef npy_cdouble complex_t
*/
typedef npy_clongdouble __pyx_t_5numpy_clongdouble_t;
/* "../../../opt/anaconda3/envs/py3/lib/python3.8/site-packages/numpy/__init__.pxd":733
* ctypedef npy_clongdouble clongdouble_t
*
* ctypedef npy_cdouble complex_t # <<<<<<<<<<<<<<
*
* cdef inline object PyArray_MultiIterNew1(a):
*/
typedef npy_cdouble __pyx_t_5numpy_complex_t;
/* "splikes/splikes.pxd":29
* cdef double rande()
*
* cdef class group: # <<<<<<<<<<<<<<
* cdef public object save_attrs,save_data
* cdef public object name
*/
struct __pyx_obj_7splikes_7splikes_group {
PyObject_HEAD
PyObject *save_attrs;
PyObject *save_data;
PyObject *name;
int verbose;
};
/* "splikes/splikes.pxd":35
*
*
* cdef class monitor(group): # <<<<<<<<<<<<<<
* cdef public double time_to_next_save
* cdef public double save_interval
*/
struct __pyx_obj_7splikes_7splikes_monitor {
struct __pyx_obj_7splikes_7splikes_group __pyx_base;
struct __pyx_vtabstruct_7splikes_7splikes_monitor *__pyx_vtab;
double time_to_next_save;
double save_interval;
PyObject *container;
PyObject *t;
PyObject *values;
PyObject *t_tmp;
PyObject *values_tmp;
};
/* "splikes/splikes.pxd":43
* cpdef update(self,double t)
*
* cdef class simulation(group): # <<<<<<<<<<<<<<
* cdef public double dt
* cdef public double total_time
*/
struct __pyx_obj_7splikes_7splikes_simulation {
struct __pyx_obj_7splikes_7splikes_group __pyx_base;
struct __pyx_vtabstruct_7splikes_7splikes_simulation *__pyx_vtab;
double dt;
double total_time;
double start_time;
double time_to_next_save;
double time_to_next_filter;
PyObject *monitors;
PyObject *filters;
int seed;
};
/* "splikes/splikes.pxd":52
* cpdef _reset(self)
*
* cdef class neuron(group): # <<<<<<<<<<<<<<
* cdef public int is_spike
* cdef public int post_count
*/
struct __pyx_obj_7splikes_7splikes_neuron {
struct __pyx_obj_7splikes_7splikes_group __pyx_base;
struct __pyx_vtabstruct_7splikes_7splikes_neuron *__pyx_vtab;
int is_spike;
int post_count;
PyObject *saved_spikes;
double save_spikes_begin;
double save_spikes_end;
PyArrayObject *spiking;
PyArrayObject *rate;
int N;
PyArrayObject *last_spike_time;
PyObject *connections_pre;
PyObject *connections_post;
int num_pre;
int num_post;
PyObject *state_variable;
};
/* "splikes/splikes.pxd":67
*
*
* cdef class connection(group): # <<<<<<<<<<<<<<
* cdef public np.ndarray weights
* cdef public np.ndarray initial_weights
*/
struct __pyx_obj_7splikes_7splikes_connection {
struct __pyx_obj_7splikes_7splikes_group __pyx_base;
struct __pyx_vtabstruct_7splikes_7splikes_connection *__pyx_vtab;
PyArrayObject *weights;
PyArrayObject *initial_weights;
int reset_to_initial;
PyObject *initial_weight_range;
double w_max;
double w_min;
struct __pyx_obj_7splikes_7splikes_neuron *pre;
struct __pyx_obj_7splikes_7splikes_neuron *post;
double *W;
PyArrayObject *state;
int use_state;
PyObject *state_variable;
double spike_scale;
};
/* "splikes/connections/calcium.pyx":57
* # - k_minus=0
* #
* cdef class calcium(connection): # <<<<<<<<<<<<<<
* cdef public double g_t,mg2,mg1,v_reversal,tau_ca,alpha2,alpha1,backspike_amplitude,i_nmda_mu,peak_backspike_fast,peak_backspike_slow,_lambda,beta2,beta1,k_plus,g_nmda_o,tau_backspike_fast,tau_backspike_slow,eta_gamma0,i_nmda_s,tau_nmda_s,Vo,Vp,i_nmda_f,tau_nmda_f,k_minus
* cdef public np.ndarray B,I_nmda,h,Ca,v_total,eta,g_nmda,v_backspike_fast,I_nmda_fast,I_nmda_slow,omega,v_backspike_slow
*/
struct __pyx_obj_7splikes_11connections_7calcium_calcium {
struct __pyx_obj_7splikes_7splikes_connection __pyx_base;
double g_t;
double mg2;
double mg1;
double v_reversal;
double tau_ca;
double alpha2;
double alpha1;
double backspike_amplitude;
double i_nmda_mu;
double peak_backspike_fast;
double peak_backspike_slow;
double _lambda;
double beta2;
double beta1;
double k_plus;
double g_nmda_o;
double tau_backspike_fast;
double tau_backspike_slow;
double eta_gamma0;
double i_nmda_s;
double tau_nmda_s;
double Vo;
double Vp;
double i_nmda_f;
double tau_nmda_f;
double k_minus;
PyArrayObject *B;
PyArrayObject *I_nmda;
PyArrayObject *h;
PyArrayObject *Ca;
PyArrayObject *v_total;
PyArrayObject *eta;
PyArrayObject *g_nmda;
PyArrayObject *v_backspike_fast;
PyArrayObject *I_nmda_fast;
PyArrayObject *I_nmda_slow;
PyArrayObject *omega;
PyArrayObject *v_backspike_slow;
};
/* "splikes/splikes.pxd":35
*
*
* cdef class monitor(group): # <<<<<<<<<<<<<<
* cdef public double time_to_next_save
* cdef public double save_interval
*/
struct __pyx_vtabstruct_7splikes_7splikes_monitor {
PyObject *(*update)(struct __pyx_obj_7splikes_7splikes_monitor *, double, int __pyx_skip_dispatch);
};
static struct __pyx_vtabstruct_7splikes_7splikes_monitor *__pyx_vtabptr_7splikes_7splikes_monitor;
/* "splikes/splikes.pxd":43
* cpdef update(self,double t)
*
* cdef class simulation(group): # <<<<<<<<<<<<<<
* cdef public double dt
* cdef public double total_time
*/
struct __pyx_vtabstruct_7splikes_7splikes_simulation {
PyObject *(*_reset)(struct __pyx_obj_7splikes_7splikes_simulation *, int __pyx_skip_dispatch);
};
static struct __pyx_vtabstruct_7splikes_7splikes_simulation *__pyx_vtabptr_7splikes_7splikes_simulation;
/* "splikes/splikes.pxd":52
* cpdef _reset(self)
*
* cdef class neuron(group): # <<<<<<<<<<<<<<
* cdef public int is_spike
* cdef public int post_count
*/
struct __pyx_vtabstruct_7splikes_7splikes_neuron {
PyObject *(*_reset)(struct __pyx_obj_7splikes_7splikes_neuron *, int __pyx_skip_dispatch);
PyObject *(*update)(struct __pyx_obj_7splikes_7splikes_neuron *, double, struct __pyx_obj_7splikes_7splikes_simulation *, int __pyx_skip_dispatch);
};
static struct __pyx_vtabstruct_7splikes_7splikes_neuron *__pyx_vtabptr_7splikes_7splikes_neuron;
/* "splikes/splikes.pxd":67
*
*
* cdef class connection(group): # <<<<<<<<<<<<<<
* cdef public np.ndarray weights
* cdef public np.ndarray initial_weights
*/
struct __pyx_vtabstruct_7splikes_7splikes_connection {
PyObject *(*_reset)(struct __pyx_obj_7splikes_7splikes_connection *, int __pyx_skip_dispatch);
PyObject *(*update)(struct __pyx_obj_7splikes_7splikes_connection *, double, struct __pyx_obj_7splikes_7splikes_simulation *, int __pyx_skip_dispatch);
PyObject *(*apply_weight_limits)(struct __pyx_obj_7splikes_7splikes_connection *, int __pyx_skip_dispatch);
};
static struct __pyx_vtabstruct_7splikes_7splikes_connection *__pyx_vtabptr_7splikes_7splikes_connection;
/* "splikes/connections/calcium.pyx":57
* # - k_minus=0
* #
* cdef class calcium(connection): # <<<<<<<<<<<<<<
* cdef public double g_t,mg2,mg1,v_reversal,tau_ca,alpha2,alpha1,backspike_amplitude,i_nmda_mu,peak_backspike_fast,peak_backspike_slow,_lambda,beta2,beta1,k_plus,g_nmda_o,tau_backspike_fast,tau_backspike_slow,eta_gamma0,i_nmda_s,tau_nmda_s,Vo,Vp,i_nmda_f,tau_nmda_f,k_minus
* cdef public np.ndarray B,I_nmda,h,Ca,v_total,eta,g_nmda,v_backspike_fast,I_nmda_fast,I_nmda_slow,omega,v_backspike_slow
*/
struct __pyx_vtabstruct_7splikes_11connections_7calcium_calcium {
struct __pyx_vtabstruct_7splikes_7splikes_connection __pyx_base;
};
static struct __pyx_vtabstruct_7splikes_11connections_7calcium_calcium *__pyx_vtabptr_7splikes_11connections_7calcium_calcium;
/* --- Runtime support code (head) --- */
/* Refnanny.proto */
#ifndef CYTHON_REFNANNY
#define CYTHON_REFNANNY 0
#endif
#if CYTHON_REFNANNY
typedef struct {
void (*INCREF)(void*, PyObject*, int);
void (*DECREF)(void*, PyObject*, int);
void (*GOTREF)(void*, PyObject*, int);
void (*GIVEREF)(void*, PyObject*, int);
void* (*SetupContext)(const char*, int, const char*);
void (*FinishContext)(void**);
} __Pyx_RefNannyAPIStruct;
static __Pyx_RefNannyAPIStruct *__Pyx_RefNanny = NULL;
static __Pyx_RefNannyAPIStruct *__Pyx_RefNannyImportAPI(const char *modname);
#define __Pyx_RefNannyDeclarations void *__pyx_refnanny = NULL;
#ifdef WITH_THREAD
#define __Pyx_RefNannySetupContext(name, acquire_gil)\
if (acquire_gil) {\
PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure();\
__pyx_refnanny = __Pyx_RefNanny->SetupContext((name), __LINE__, __FILE__);\
PyGILState_Release(__pyx_gilstate_save);\
} else {\
__pyx_refnanny = __Pyx_RefNanny->SetupContext((name), __LINE__, __FILE__);\
}
#else
#define __Pyx_RefNannySetupContext(name, acquire_gil)\
__pyx_refnanny = __Pyx_RefNanny->SetupContext((name), __LINE__, __FILE__)
#endif
#define __Pyx_RefNannyFinishContext()\
__Pyx_RefNanny->FinishContext(&__pyx_refnanny)
#define __Pyx_INCREF(r) __Pyx_RefNanny->INCREF(__pyx_refnanny, (PyObject *)(r), __LINE__)
#define __Pyx_DECREF(r) __Pyx_RefNanny->DECREF(__pyx_refnanny, (PyObject *)(r), __LINE__)
#define __Pyx_GOTREF(r) __Pyx_RefNanny->GOTREF(__pyx_refnanny, (PyObject *)(r), __LINE__)
#define __Pyx_GIVEREF(r) __Pyx_RefNanny->GIVEREF(__pyx_refnanny, (PyObject *)(r), __LINE__)
#define __Pyx_XINCREF(r) do { if((r) != NULL) {__Pyx_INCREF(r); }} while(0)
#define __Pyx_XDECREF(r) do { if((r) != NULL) {__Pyx_DECREF(r); }} while(0)
#define __Pyx_XGOTREF(r) do { if((r) != NULL) {__Pyx_GOTREF(r); }} while(0)
#define __Pyx_XGIVEREF(r) do { if((r) != NULL) {__Pyx_GIVEREF(r);}} while(0)
#else
#define __Pyx_RefNannyDeclarations
#define __Pyx_RefNannySetupContext(name, acquire_gil)
#define __Pyx_RefNannyFinishContext()
#define __Pyx_INCREF(r) Py_INCREF(r)
#define __Pyx_DECREF(r) Py_DECREF(r)
#define __Pyx_GOTREF(r)
#define __Pyx_GIVEREF(r)
#define __Pyx_XINCREF(r) Py_XINCREF(r)
#define __Pyx_XDECREF(r) Py_XDECREF(r)
#define __Pyx_XGOTREF(r)
#define __Pyx_XGIVEREF(r)
#endif
#define __Pyx_XDECREF_SET(r, v) do {\
PyObject *tmp = (PyObject *) r;\
r = v; __Pyx_XDECREF(tmp);\
} while (0)
#define __Pyx_DECREF_SET(r, v) do {\
PyObject *tmp = (PyObject *) r;\
r = v; __Pyx_DECREF(tmp);\
} while (0)
#define __Pyx_CLEAR(r) do { PyObject* tmp = ((PyObject*)(r)); r = NULL; __Pyx_DECREF(tmp);} while(0)
#define __Pyx_XCLEAR(r) do { if((r) != NULL) {PyObject* tmp = ((PyObject*)(r)); r = NULL; __Pyx_DECREF(tmp);}} while(0)
/* PyObjectGetAttrStr.proto */
#if CYTHON_USE_TYPE_SLOTS
static CYTHON_INLINE PyObject* __Pyx_PyObject_GetAttrStr(PyObject* obj, PyObject* attr_name);
#else
#define __Pyx_PyObject_GetAttrStr(o,n) PyObject_GetAttr(o,n)
#endif
/* GetBuiltinName.proto */
static PyObject *__Pyx_GetBuiltinName(PyObject *name);
/* PyDictVersioning.proto */
#if CYTHON_USE_DICT_VERSIONS && CYTHON_USE_TYPE_SLOTS
#define __PYX_DICT_VERSION_INIT ((PY_UINT64_T) -1)
#define __PYX_GET_DICT_VERSION(dict) (((PyDictObject*)(dict))->ma_version_tag)
#define __PYX_UPDATE_DICT_CACHE(dict, value, cache_var, version_var)\
(version_var) = __PYX_GET_DICT_VERSION(dict);\
(cache_var) = (value);
#define __PYX_PY_DICT_LOOKUP_IF_MODIFIED(VAR, DICT, LOOKUP) {\
static PY_UINT64_T __pyx_dict_version = 0;\
static PyObject *__pyx_dict_cached_value = NULL;\
if (likely(__PYX_GET_DICT_VERSION(DICT) == __pyx_dict_version)) {\
(VAR) = __pyx_dict_cached_value;\
} else {\
(VAR) = __pyx_dict_cached_value = (LOOKUP);\
__pyx_dict_version = __PYX_GET_DICT_VERSION(DICT);\
}\
}
static CYTHON_INLINE PY_UINT64_T __Pyx_get_tp_dict_version(PyObject *obj);
static CYTHON_INLINE PY_UINT64_T __Pyx_get_object_dict_version(PyObject *obj);
static CYTHON_INLINE int __Pyx_object_dict_version_matches(PyObject* obj, PY_UINT64_T tp_dict_version, PY_UINT64_T obj_dict_version);
#else
#define __PYX_GET_DICT_VERSION(dict) (0)
#define __PYX_UPDATE_DICT_CACHE(dict, value, cache_var, version_var)
#define __PYX_PY_DICT_LOOKUP_IF_MODIFIED(VAR, DICT, LOOKUP) (VAR) = (LOOKUP);
#endif
/* PyFunctionFastCall.proto */
#if CYTHON_FAST_PYCALL
#define __Pyx_PyFunction_FastCall(func, args, nargs)\
__Pyx_PyFunction_FastCallDict((func), (args), (nargs), NULL)
#if 1 || PY_VERSION_HEX < 0x030600B1
static PyObject *__Pyx_PyFunction_FastCallDict(PyObject *func, PyObject **args, Py_ssize_t nargs, PyObject *kwargs);
#else
#define __Pyx_PyFunction_FastCallDict(func, args, nargs, kwargs) _PyFunction_FastCallDict(func, args, nargs, kwargs)
#endif
#define __Pyx_BUILD_ASSERT_EXPR(cond)\
(sizeof(char [1 - 2*!(cond)]) - 1)
#ifndef Py_MEMBER_SIZE
#define Py_MEMBER_SIZE(type, member) sizeof(((type *)0)->member)
#endif
#if CYTHON_FAST_PYCALL
static size_t __pyx_pyframe_localsplus_offset = 0;
#include "frameobject.h"
#define __Pxy_PyFrame_Initialize_Offsets()\
((void)__Pyx_BUILD_ASSERT_EXPR(sizeof(PyFrameObject) == offsetof(PyFrameObject, f_localsplus) + Py_MEMBER_SIZE(PyFrameObject, f_localsplus)),\
(void)(__pyx_pyframe_localsplus_offset = ((size_t)PyFrame_Type.tp_basicsize) - Py_MEMBER_SIZE(PyFrameObject, f_localsplus)))
#define __Pyx_PyFrame_GetLocalsplus(frame)\
(assert(__pyx_pyframe_localsplus_offset), (PyObject **)(((char *)(frame)) + __pyx_pyframe_localsplus_offset))
#endif // CYTHON_FAST_PYCALL
#endif
/* PyObjectCall.proto */
#if CYTHON_COMPILING_IN_CPYTHON
static CYTHON_INLINE PyObject* __Pyx_PyObject_Call(PyObject *func, PyObject *arg, PyObject *kw);
#else
#define __Pyx_PyObject_Call(func, arg, kw) PyObject_Call(func, arg, kw)
#endif
/* PyObjectCallMethO.proto */
#if CYTHON_COMPILING_IN_CPYTHON
static CYTHON_INLINE PyObject* __Pyx_PyObject_CallMethO(PyObject *func, PyObject *arg);
#endif
/* PyObjectCallNoArg.proto */
#if CYTHON_COMPILING_IN_CPYTHON
static CYTHON_INLINE PyObject* __Pyx_PyObject_CallNoArg(PyObject *func);
#else
#define __Pyx_PyObject_CallNoArg(func) __Pyx_PyObject_Call(func, __pyx_empty_tuple, NULL)
#endif
/* PyCFunctionFastCall.proto */
#if CYTHON_FAST_PYCCALL
static CYTHON_INLINE PyObject *__Pyx_PyCFunction_FastCall(PyObject *func, PyObject **args, Py_ssize_t nargs);
#else
#define __Pyx_PyCFunction_FastCall(func, args, nargs) (assert(0), NULL)
#endif
/* PyObjectCallOneArg.proto */
static CYTHON_INLINE PyObject* __Pyx_PyObject_CallOneArg(PyObject *func, PyObject *arg);
/* GetModuleGlobalName.proto */
#if CYTHON_USE_DICT_VERSIONS
#define __Pyx_GetModuleGlobalName(var, name) {\
static PY_UINT64_T __pyx_dict_version = 0;\
static PyObject *__pyx_dict_cached_value = NULL;\
(var) = (likely(__pyx_dict_version == __PYX_GET_DICT_VERSION(__pyx_d))) ?\
(likely(__pyx_dict_cached_value) ? __Pyx_NewRef(__pyx_dict_cached_value) : __Pyx_GetBuiltinName(name)) :\
__Pyx__GetModuleGlobalName(name, &__pyx_dict_version, &__pyx_dict_cached_value);\
}
#define __Pyx_GetModuleGlobalNameUncached(var, name) {\
PY_UINT64_T __pyx_dict_version;\
PyObject *__pyx_dict_cached_value;\
(var) = __Pyx__GetModuleGlobalName(name, &__pyx_dict_version, &__pyx_dict_cached_value);\
}
static PyObject *__Pyx__GetModuleGlobalName(PyObject *name, PY_UINT64_T *dict_version, PyObject **dict_cached_value);
#else
#define __Pyx_GetModuleGlobalName(var, name) (var) = __Pyx__GetModuleGlobalName(name)
#define __Pyx_GetModuleGlobalNameUncached(var, name) (var) = __Pyx__GetModuleGlobalName(name)
static CYTHON_INLINE PyObject *__Pyx__GetModuleGlobalName(PyObject *name);
#endif
/* ExtTypeTest.proto */
static CYTHON_INLINE int __Pyx_TypeTest(PyObject *obj, PyTypeObject *type);
/* RaiseArgTupleInvalid.proto */
static void __Pyx_RaiseArgtupleInvalid(const char* func_name, int exact,
Py_ssize_t num_min, Py_ssize_t num_max, Py_ssize_t num_found);
/* RaiseDoubleKeywords.proto */
static void __Pyx_RaiseDoubleKeywordsError(const char* func_name, PyObject* kw_name);
/* ParseKeywords.proto */
static int __Pyx_ParseOptionalKeywords(PyObject *kwds, PyObject **argnames[],\
PyObject *kwds2, PyObject *values[], Py_ssize_t num_pos_args,\
const char* function_name);
/* ArgTypeTest.proto */
#define __Pyx_ArgTypeTest(obj, type, none_allowed, name, exact)\
((likely((Py_TYPE(obj) == type) | (none_allowed && (obj == Py_None)))) ? 1 :\
__Pyx__ArgTypeTest(obj, type, name, exact))
static int __Pyx__ArgTypeTest(PyObject *obj, PyTypeObject *type, const char *name, int exact);
/* PyThreadStateGet.proto */
#if CYTHON_FAST_THREAD_STATE
#define __Pyx_PyThreadState_declare PyThreadState *__pyx_tstate;
#define __Pyx_PyThreadState_assign __pyx_tstate = __Pyx_PyThreadState_Current;
#define __Pyx_PyErr_Occurred() __pyx_tstate->curexc_type
#else
#define __Pyx_PyThreadState_declare
#define __Pyx_PyThreadState_assign
#define __Pyx_PyErr_Occurred() PyErr_Occurred()
#endif
/* PyErrFetchRestore.proto */
#if CYTHON_FAST_THREAD_STATE
#define __Pyx_PyErr_Clear() __Pyx_ErrRestore(NULL, NULL, NULL)
#define __Pyx_ErrRestoreWithState(type, value, tb) __Pyx_ErrRestoreInState(PyThreadState_GET(), type, value, tb)
#define __Pyx_ErrFetchWithState(type, value, tb) __Pyx_ErrFetchInState(PyThreadState_GET(), type, value, tb)
#define __Pyx_ErrRestore(type, value, tb) __Pyx_ErrRestoreInState(__pyx_tstate, type, value, tb)
#define __Pyx_ErrFetch(type, value, tb) __Pyx_ErrFetchInState(__pyx_tstate, type, value, tb)
static CYTHON_INLINE void __Pyx_ErrRestoreInState(PyThreadState *tstate, PyObject *type, PyObject *value, PyObject *tb);
static CYTHON_INLINE void __Pyx_ErrFetchInState(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb);
#if CYTHON_COMPILING_IN_CPYTHON
#define __Pyx_PyErr_SetNone(exc) (Py_INCREF(exc), __Pyx_ErrRestore((exc), NULL, NULL))
#else
#define __Pyx_PyErr_SetNone(exc) PyErr_SetNone(exc)
#endif
#else
#define __Pyx_PyErr_Clear() PyErr_Clear()
#define __Pyx_PyErr_SetNone(exc) PyErr_SetNone(exc)
#define __Pyx_ErrRestoreWithState(type, value, tb) PyErr_Restore(type, value, tb)
#define __Pyx_ErrFetchWithState(type, value, tb) PyErr_Fetch(type, value, tb)
#define __Pyx_ErrRestoreInState(tstate, type, value, tb) PyErr_Restore(type, value, tb)
#define __Pyx_ErrFetchInState(tstate, type, value, tb) PyErr_Fetch(type, value, tb)
#define __Pyx_ErrRestore(type, value, tb) PyErr_Restore(type, value, tb)
#define __Pyx_ErrFetch(type, value, tb) PyErr_Fetch(type, value, tb)
#endif
/* RaiseException.proto */
static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb, PyObject *cause);
/* GetTopmostException.proto */
#if CYTHON_USE_EXC_INFO_STACK
static _PyErr_StackItem * __Pyx_PyErr_GetTopmostException(PyThreadState *tstate);
#endif
/* SaveResetException.proto */
#if CYTHON_FAST_THREAD_STATE
#define __Pyx_ExceptionSave(type, value, tb) __Pyx__ExceptionSave(__pyx_tstate, type, value, tb)
static CYTHON_INLINE void __Pyx__ExceptionSave(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb);
#define __Pyx_ExceptionReset(type, value, tb) __Pyx__ExceptionReset(__pyx_tstate, type, value, tb)
static CYTHON_INLINE void __Pyx__ExceptionReset(PyThreadState *tstate, PyObject *type, PyObject *value, PyObject *tb);
#else
#define __Pyx_ExceptionSave(type, value, tb) PyErr_GetExcInfo(type, value, tb)
#define __Pyx_ExceptionReset(type, value, tb) PyErr_SetExcInfo(type, value, tb)
#endif
/* PyErrExceptionMatches.proto */
#if CYTHON_FAST_THREAD_STATE
#define __Pyx_PyErr_ExceptionMatches(err) __Pyx_PyErr_ExceptionMatchesInState(__pyx_tstate, err)
static CYTHON_INLINE int __Pyx_PyErr_ExceptionMatchesInState(PyThreadState* tstate, PyObject* err);
#else
#define __Pyx_PyErr_ExceptionMatches(err) PyErr_ExceptionMatches(err)
#endif
/* GetException.proto */
#if CYTHON_FAST_THREAD_STATE
#define __Pyx_GetException(type, value, tb) __Pyx__GetException(__pyx_tstate, type, value, tb)
static int __Pyx__GetException(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb);
#else
static int __Pyx_GetException(PyObject **type, PyObject **value, PyObject **tb);
#endif
/* CallNextTpDealloc.proto */
static void __Pyx_call_next_tp_dealloc(PyObject* obj, destructor current_tp_dealloc);
/* CallNextTpTraverse.proto */
static int __Pyx_call_next_tp_traverse(PyObject* obj, visitproc v, void *a, traverseproc current_tp_traverse);
/* CallNextTpClear.proto */
static void __Pyx_call_next_tp_clear(PyObject* obj, inquiry current_tp_dealloc);
/* TypeImport.proto */
#ifndef __PYX_HAVE_RT_ImportType_proto
#define __PYX_HAVE_RT_ImportType_proto
enum __Pyx_ImportType_CheckSize {
__Pyx_ImportType_CheckSize_Error = 0,
__Pyx_ImportType_CheckSize_Warn = 1,
__Pyx_ImportType_CheckSize_Ignore = 2
};
static PyTypeObject *__Pyx_ImportType(PyObject* module, const char *module_name, const char *class_name, size_t size, enum __Pyx_ImportType_CheckSize check_size);
#endif
/* GetVTable.proto */
static void* __Pyx_GetVtable(PyObject *dict);
/* PyObject_GenericGetAttrNoDict.proto */
#if CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP && PY_VERSION_HEX < 0x03070000
static CYTHON_INLINE PyObject* __Pyx_PyObject_GenericGetAttrNoDict(PyObject* obj, PyObject* attr_name);
#else
#define __Pyx_PyObject_GenericGetAttrNoDict PyObject_GenericGetAttr
#endif
/* PyObject_GenericGetAttr.proto */
#if CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP && PY_VERSION_HEX < 0x03070000
static PyObject* __Pyx_PyObject_GenericGetAttr(PyObject* obj, PyObject* attr_name);
#else
#define __Pyx_PyObject_GenericGetAttr PyObject_GenericGetAttr
#endif
/* SetVTable.proto */
static int __Pyx_SetVtable(PyObject *dict, void *vtable);
/* PyObjectGetAttrStrNoError.proto */
static CYTHON_INLINE PyObject* __Pyx_PyObject_GetAttrStrNoError(PyObject* obj, PyObject* attr_name);
/* SetupReduce.proto */
static int __Pyx_setup_reduce(PyObject* type_obj);
/* Import.proto */
static PyObject *__Pyx_Import(PyObject *name, PyObject *from_list, int level);
/* CLineInTraceback.proto */
#ifdef CYTHON_CLINE_IN_TRACEBACK
#define __Pyx_CLineForTraceback(tstate, c_line) (((CYTHON_CLINE_IN_TRACEBACK)) ? c_line : 0)
#else
static int __Pyx_CLineForTraceback(PyThreadState *tstate, int c_line);
#endif
/* CodeObjectCache.proto */
typedef struct {
PyCodeObject* code_object;
int code_line;
} __Pyx_CodeObjectCacheEntry;
struct __Pyx_CodeObjectCache {
int count;
int max_count;
__Pyx_CodeObjectCacheEntry* entries;
};
static struct __Pyx_CodeObjectCache __pyx_code_cache = {0,0,NULL};
static int __pyx_bisect_code_objects(__Pyx_CodeObjectCacheEntry* entries, int count, int code_line);
static PyCodeObject *__pyx_find_code_object(int code_line);
static void __pyx_insert_code_object(int code_line, PyCodeObject* code_object);
/* AddTraceback.proto */
static void __Pyx_AddTraceback(const char *funcname, int c_line,
int py_line, const char *filename);
/* GCCDiagnostics.proto */
#if defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 6))
#define __Pyx_HAS_GCC_DIAGNOSTIC
#endif
/* RealImag.proto */
#if CYTHON_CCOMPLEX
#ifdef __cplusplus
#define __Pyx_CREAL(z) ((z).real())
#define __Pyx_CIMAG(z) ((z).imag())
#else
#define __Pyx_CREAL(z) (__real__(z))
#define __Pyx_CIMAG(z) (__imag__(z))
#endif
#else
#define __Pyx_CREAL(z) ((z).real)
#define __Pyx_CIMAG(z) ((z).imag)
#endif
#if defined(__cplusplus) && CYTHON_CCOMPLEX\
&& (defined(_WIN32) || defined(__clang__) || (defined(__GNUC__) && (__GNUC__ >= 5 || __GNUC__ == 4 && __GNUC_MINOR__ >= 4 )) || __cplusplus >= 201103)
#define __Pyx_SET_CREAL(z,x) ((z).real(x))
#define __Pyx_SET_CIMAG(z,y) ((z).imag(y))
#else
#define __Pyx_SET_CREAL(z,x) __Pyx_CREAL(z) = (x)
#define __Pyx_SET_CIMAG(z,y) __Pyx_CIMAG(z) = (y)
#endif
/* Arithmetic.proto */
#if CYTHON_CCOMPLEX
#define __Pyx_c_eq_float(a, b) ((a)==(b))
#define __Pyx_c_sum_float(a, b) ((a)+(b))
#define __Pyx_c_diff_float(a, b) ((a)-(b))
#define __Pyx_c_prod_float(a, b) ((a)*(b))
#define __Pyx_c_quot_float(a, b) ((a)/(b))
#define __Pyx_c_neg_float(a) (-(a))
#ifdef __cplusplus
#define __Pyx_c_is_zero_float(z) ((z)==(float)0)
#define __Pyx_c_conj_float(z) (::std::conj(z))
#if 1
#define __Pyx_c_abs_float(z) (::std::abs(z))
#define __Pyx_c_pow_float(a, b) (::std::pow(a, b))
#endif
#else
#define __Pyx_c_is_zero_float(z) ((z)==0)
#define __Pyx_c_conj_float(z) (conjf(z))
#if 1
#define __Pyx_c_abs_float(z) (cabsf(z))
#define __Pyx_c_pow_float(a, b) (cpowf(a, b))
#endif
#endif
#else
static CYTHON_INLINE int __Pyx_c_eq_float(__pyx_t_float_complex, __pyx_t_float_complex);
static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_sum_float(__pyx_t_float_complex, __pyx_t_float_complex);
static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_diff_float(__pyx_t_float_complex, __pyx_t_float_complex);
static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_prod_float(__pyx_t_float_complex, __pyx_t_float_complex);
static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_quot_float(__pyx_t_float_complex, __pyx_t_float_complex);
static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_neg_float(__pyx_t_float_complex);
static CYTHON_INLINE int __Pyx_c_is_zero_float(__pyx_t_float_complex);
static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_conj_float(__pyx_t_float_complex);
#if 1
static CYTHON_INLINE float __Pyx_c_abs_float(__pyx_t_float_complex);
static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_pow_float(__pyx_t_float_complex, __pyx_t_float_complex);
#endif
#endif
/* Arithmetic.proto */
#if CYTHON_CCOMPLEX
#define __Pyx_c_eq_double(a, b) ((a)==(b))
#define __Pyx_c_sum_double(a, b) ((a)+(b))
#define __Pyx_c_diff_double(a, b) ((a)-(b))
#define __Pyx_c_prod_double(a, b) ((a)*(b))
#define __Pyx_c_quot_double(a, b) ((a)/(b))
#define __Pyx_c_neg_double(a) (-(a))
#ifdef __cplusplus
#define __Pyx_c_is_zero_double(z) ((z)==(double)0)
#define __Pyx_c_conj_double(z) (::std::conj(z))
#if 1
#define __Pyx_c_abs_double(z) (::std::abs(z))
#define __Pyx_c_pow_double(a, b) (::std::pow(a, b))
#endif
#else
#define __Pyx_c_is_zero_double(z) ((z)==0)
#define __Pyx_c_conj_double(z) (conj(z))
#if 1
#define __Pyx_c_abs_double(z) (cabs(z))
#define __Pyx_c_pow_double(a, b) (cpow(a, b))
#endif
#endif
#else
static CYTHON_INLINE int __Pyx_c_eq_double(__pyx_t_double_complex, __pyx_t_double_complex);
static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_sum_double(__pyx_t_double_complex, __pyx_t_double_complex);
static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_diff_double(__pyx_t_double_complex, __pyx_t_double_complex);
static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_prod_double(__pyx_t_double_complex, __pyx_t_double_complex);
static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_quot_double(__pyx_t_double_complex, __pyx_t_double_complex);
static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_neg_double(__pyx_t_double_complex);
static CYTHON_INLINE int __Pyx_c_is_zero_double(__pyx_t_double_complex);
static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_conj_double(__pyx_t_double_complex);
#if 1
static CYTHON_INLINE double __Pyx_c_abs_double(__pyx_t_double_complex);
static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_pow_double(__pyx_t_double_complex, __pyx_t_double_complex);
#endif
#endif
/* CIntToPy.proto */
static CYTHON_INLINE PyObject* __Pyx_PyInt_From_int(int value);
/* CIntFromPy.proto */
static CYTHON_INLINE int __Pyx_PyInt_As_int(PyObject *);
/* CIntToPy.proto */
static CYTHON_INLINE PyObject* __Pyx_PyInt_From_long(long value);
/* CIntFromPy.proto */
static CYTHON_INLINE long __Pyx_PyInt_As_long(PyObject *);
/* FastTypeChecks.proto */
#if CYTHON_COMPILING_IN_CPYTHON
#define __Pyx_TypeCheck(obj, type) __Pyx_IsSubtype(Py_TYPE(obj), (PyTypeObject *)type)
static CYTHON_INLINE int __Pyx_IsSubtype(PyTypeObject *a, PyTypeObject *b);
static CYTHON_INLINE int __Pyx_PyErr_GivenExceptionMatches(PyObject *err, PyObject *type);
static CYTHON_INLINE int __Pyx_PyErr_GivenExceptionMatches2(PyObject *err, PyObject *type1, PyObject *type2);
#else
#define __Pyx_TypeCheck(obj, type) PyObject_TypeCheck(obj, (PyTypeObject *)type)
#define __Pyx_PyErr_GivenExceptionMatches(err, type) PyErr_GivenExceptionMatches(err, type)
#define __Pyx_PyErr_GivenExceptionMatches2(err, type1, type2) (PyErr_GivenExceptionMatches(err, type1) || PyErr_GivenExceptionMatches(err, type2))
#endif
#define __Pyx_PyException_Check(obj) __Pyx_TypeCheck(obj, PyExc_Exception)
/* CheckBinaryVersion.proto */
static int __Pyx_check_binary_version(void);
/* InitStrings.proto */
static int __Pyx_InitStrings(__Pyx_StringTabEntry *t);
static PyObject *__pyx_f_7splikes_11connections_7calcium_7calcium__reset(struct __pyx_obj_7splikes_11connections_7calcium_calcium *__pyx_v_self, int __pyx_skip_dispatch); /* proto*/
static PyObject *__pyx_f_7splikes_11connections_7calcium_7calcium_update(struct __pyx_obj_7splikes_11connections_7calcium_calcium *__pyx_v_self, CYTHON_UNUSED double __pyx_v_t, struct __pyx_obj_7splikes_7splikes_simulation *__pyx_v_sim, int __pyx_skip_dispatch); /* proto*/
/* Module declarations from 'cpython.buffer' */
/* Module declarations from 'libc.string' */
/* Module declarations from 'libc.stdio' */
/* Module declarations from '__builtin__' */
/* Module declarations from 'cpython.type' */
static PyTypeObject *__pyx_ptype_7cpython_4type_type = 0;
/* Module declarations from 'cpython' */
/* Module declarations from 'cpython.object' */
/* Module declarations from 'cpython.ref' */
/* Module declarations from 'cpython.mem' */
/* Module declarations from 'numpy' */
/* Module declarations from 'numpy' */
static PyTypeObject *__pyx_ptype_5numpy_dtype = 0;
static PyTypeObject *__pyx_ptype_5numpy_flatiter = 0;
static PyTypeObject *__pyx_ptype_5numpy_broadcast = 0;
static PyTypeObject *__pyx_ptype_5numpy_ndarray = 0;
static PyTypeObject *__pyx_ptype_5numpy_generic = 0;
static PyTypeObject *__pyx_ptype_5numpy_number = 0;
static PyTypeObject *__pyx_ptype_5numpy_integer = 0;
static PyTypeObject *__pyx_ptype_5numpy_signedinteger = 0;
static PyTypeObject *__pyx_ptype_5numpy_unsignedinteger = 0;
static PyTypeObject *__pyx_ptype_5numpy_inexact = 0;
static PyTypeObject *__pyx_ptype_5numpy_floating = 0;
static PyTypeObject *__pyx_ptype_5numpy_complexfloating = 0;
static PyTypeObject *__pyx_ptype_5numpy_flexible = 0;
static PyTypeObject *__pyx_ptype_5numpy_character = 0;
static PyTypeObject *__pyx_ptype_5numpy_ufunc = 0;
/* Module declarations from 'splikes.splikes' */
static PyTypeObject *__pyx_ptype_7splikes_7splikes_group = 0;
static PyTypeObject *__pyx_ptype_7splikes_7splikes_monitor = 0;
static PyTypeObject *__pyx_ptype_7splikes_7splikes_simulation = 0;
static PyTypeObject *__pyx_ptype_7splikes_7splikes_neuron = 0;
static PyTypeObject *__pyx_ptype_7splikes_7splikes_connection = 0;
/* Module declarations from 'cython' */
/* Module declarations from 'splikes.connections.calcium' */
static PyTypeObject *__pyx_ptype_7splikes_11connections_7calcium_calcium = 0;
static PyObject *__pyx_f_7splikes_11connections_7calcium_sig(double, double); /*proto*/
#define __Pyx_MODULE_NAME "splikes.connections.calcium"
extern int __pyx_module_is_main_splikes__connections__calcium;
int __pyx_module_is_main_splikes__connections__calcium = 0;
/* Implementation of 'splikes.connections.calcium' */
static PyObject *__pyx_builtin_range;
static PyObject *__pyx_builtin_TypeError;
static PyObject *__pyx_builtin_ImportError;
static const char __pyx_k_t[] = "t";
static const char __pyx_k_np[] = "np";
static const char __pyx_k_pre[] = "pre";
static const char __pyx_k_sim[] = "sim";
static const char __pyx_k_init[] = "__init__";
static const char __pyx_k_main[] = "__main__";
static const char __pyx_k_name[] = "__name__";
static const char __pyx_k_post[] = "post";
static const char __pyx_k_test[] = "__test__";
static const char __pyx_k_dtype[] = "dtype";
static const char __pyx_k_float[] = "float";
static const char __pyx_k_numpy[] = "numpy";
static const char __pyx_k_pylab[] = "pylab";
static const char __pyx_k_range[] = "range";
static const char __pyx_k_reset[] = "_reset";
static const char __pyx_k_state[] = "state";
static const char __pyx_k_zeros[] = "zeros";
static const char __pyx_k_import[] = "__import__";
static const char __pyx_k_reduce[] = "__reduce__";
static const char __pyx_k_update[] = "update";
static const char __pyx_k_calcium[] = "calcium";
static const char __pyx_k_getstate[] = "__getstate__";
static const char __pyx_k_setstate[] = "__setstate__";
static const char __pyx_k_TypeError[] = "TypeError";
static const char __pyx_k_reduce_ex[] = "__reduce_ex__";
static const char __pyx_k_pyx_vtable[] = "__pyx_vtable__";
static const char __pyx_k_ImportError[] = "ImportError";
static const char __pyx_k_reduce_cython[] = "__reduce_cython__";
static const char __pyx_k_setstate_cython[] = "__setstate_cython__";
static const char __pyx_k_cline_in_traceback[] = "cline_in_traceback";
static const char __pyx_k_initial_weight_range[] = "initial_weight_range";
static const char __pyx_k_numpy_core_multiarray_failed_to[] = "numpy.core.multiarray failed to import";
static const char __pyx_k_self_W_cannot_be_converted_to_a[] = "self.W cannot be converted to a Python object for pickling";
static const char __pyx_k_numpy_core_umath_failed_to_impor[] = "numpy.core.umath failed to import";
static PyObject *__pyx_n_s_ImportError;
static PyObject *__pyx_n_s_TypeError;
static PyObject *__pyx_n_s_calcium;
static PyObject *__pyx_n_s_cline_in_traceback;
static PyObject *__pyx_n_s_dtype;
static PyObject *__pyx_n_s_float;
static PyObject *__pyx_n_s_getstate;
static PyObject *__pyx_n_s_import;
static PyObject *__pyx_n_s_init;
static PyObject *__pyx_n_s_initial_weight_range;
static PyObject *__pyx_n_s_main;
static PyObject *__pyx_n_s_name;
static PyObject *__pyx_n_s_np;
static PyObject *__pyx_n_s_numpy;
static PyObject *__pyx_kp_s_numpy_core_multiarray_failed_to;
static PyObject *__pyx_kp_s_numpy_core_umath_failed_to_impor;
static PyObject *__pyx_n_s_post;
static PyObject *__pyx_n_s_pre;
static PyObject *__pyx_n_s_pylab;
static PyObject *__pyx_n_s_pyx_vtable;
static PyObject *__pyx_n_s_range;
static PyObject *__pyx_n_s_reduce;
static PyObject *__pyx_n_s_reduce_cython;
static PyObject *__pyx_n_s_reduce_ex;
static PyObject *__pyx_n_s_reset;
static PyObject *__pyx_kp_s_self_W_cannot_be_converted_to_a;
static PyObject *__pyx_n_s_setstate;
static PyObject *__pyx_n_s_setstate_cython;
static PyObject *__pyx_n_s_sim;
static PyObject *__pyx_n_s_state;
static PyObject *__pyx_n_s_t;
static PyObject *__pyx_n_s_test;
static PyObject *__pyx_n_s_update;
static PyObject *__pyx_n_s_zeros;
static PyObject *__pyx_pf_7splikes_11connections_7calcium_7calcium__reset(struct __pyx_obj_7splikes_11connections_7calcium_calcium *__pyx_v_self); /* proto */
static int __pyx_pf_7splikes_11connections_7calcium_7calcium_2__init__(struct __pyx_obj_7splikes_11connections_7calcium_calcium *__pyx_v_self, struct __pyx_obj_7splikes_7splikes_neuron *__pyx_v_pre, struct __pyx_obj_7splikes_7splikes_neuron *__pyx_v_post, PyObject *__pyx_v_initial_weight_range, PyObject *__pyx_v_state); /* proto */
static PyObject *__pyx_pf_7splikes_11connections_7calcium_7calcium_4update(struct __pyx_obj_7splikes_11connections_7calcium_calcium *__pyx_v_self, double __pyx_v_t, struct __pyx_obj_7splikes_7splikes_simulation *__pyx_v_sim); /* proto */
static PyObject *__pyx_pf_7splikes_11connections_7calcium_7calcium_3g_t___get__(struct __pyx_obj_7splikes_11connections_7calcium_calcium *__pyx_v_self); /* proto */
static int __pyx_pf_7splikes_11connections_7calcium_7calcium_3g_t_2__set__(struct __pyx_obj_7splikes_11connections_7calcium_calcium *__pyx_v_self, PyObject *__pyx_v_value); /* proto */
static PyObject *__pyx_pf_7splikes_11connections_7calcium_7calcium_3mg2___get__(struct __pyx_obj_7splikes_11connections_7calcium_calcium *__pyx_v_self); /* proto */
static int __pyx_pf_7splikes_11connections_7calcium_7calcium_3mg2_2__set__(struct __pyx_obj_7splikes_11connections_7calcium_calcium *__pyx_v_self, PyObject *__pyx_v_value); /* proto */
static PyObject *__pyx_pf_7splikes_11connections_7calcium_7calcium_3mg1___get__(struct __pyx_obj_7splikes_11connections_7calcium_calcium *__pyx_v_self); /* proto */
static int __pyx_pf_7splikes_11connections_7calcium_7calcium_3mg1_2__set__(struct __pyx_obj_7splikes_11connections_7calcium_calcium *__pyx_v_self, PyObject *__pyx_v_value); /* proto */
static PyObject *__pyx_pf_7splikes_11connections_7calcium_7calcium_10v_reversal___get__(struct __pyx_obj_7splikes_11connections_7calcium_calcium *__pyx_v_self); /* proto */
static int __pyx_pf_7splikes_11connections_7calcium_7calcium_10v_reversal_2__set__(struct __pyx_obj_7splikes_11connections_7calcium_calcium *__pyx_v_self, PyObject *__pyx_v_value); /* proto */
static PyObject *__pyx_pf_7splikes_11connections_7calcium_7calcium_6tau_ca___get__(struct __pyx_obj_7splikes_11connections_7calcium_calcium *__pyx_v_self); /* proto */
static int __pyx_pf_7splikes_11connections_7calcium_7calcium_6tau_ca_2__set__(struct __pyx_obj_7splikes_11connections_7calcium_calcium *__pyx_v_self, PyObject *__pyx_v_value); /* proto */
static PyObject *__pyx_pf_7splikes_11connections_7calcium_7calcium_6alpha2___get__(struct __pyx_obj_7splikes_11connections_7calcium_calcium *__pyx_v_self); /* proto */
static int __pyx_pf_7splikes_11connections_7calcium_7calcium_6alpha2_2__set__(struct __pyx_obj_7splikes_11connections_7calcium_calcium *__pyx_v_self, PyObject *__pyx_v_value); /* proto */
static PyObject *__pyx_pf_7splikes_11connections_7calcium_7calcium_6alpha1___get__(struct __pyx_obj_7splikes_11connections_7calcium_calcium *__pyx_v_self); /* proto */
static int __pyx_pf_7splikes_11connections_7calcium_7calcium_6alpha1_2__set__(struct __pyx_obj_7splikes_11connections_7calcium_calcium *__pyx_v_self, PyObject *__pyx_v_value); /* proto */
static PyObject *__pyx_pf_7splikes_11connections_7calcium_7calcium_19backspike_amplitude___get__(struct __pyx_obj_7splikes_11connections_7calcium_calcium *__pyx_v_self); /* proto */
static int __pyx_pf_7splikes_11connections_7calcium_7calcium_19backspike_amplitude_2__set__(struct __pyx_obj_7splikes_11connections_7calcium_calcium *__pyx_v_self, PyObject *__pyx_v_value); /* proto */
static PyObject *__pyx_pf_7splikes_11connections_7calcium_7calcium_9i_nmda_mu___get__(struct __pyx_obj_7splikes_11connections_7calcium_calcium *__pyx_v_self); /* proto */
static int __pyx_pf_7splikes_11connections_7calcium_7calcium_9i_nmda_mu_2__set__(struct __pyx_obj_7splikes_11connections_7calcium_calcium *__pyx_v_self, PyObject *__pyx_v_value); /* proto */
static PyObject *__pyx_pf_7splikes_11connections_7calcium_7calcium_19peak_backspike_fast___get__(struct __pyx_obj_7splikes_11connections_7calcium_calcium *__pyx_v_self); /* proto */
static int __pyx_pf_7splikes_11connections_7calcium_7calcium_19peak_backspike_fast_2__set__(struct __pyx_obj_7splikes_11connections_7calcium_calcium *__pyx_v_self, PyObject *__pyx_v_value); /* proto */
static PyObject *__pyx_pf_7splikes_11connections_7calcium_7calcium_19peak_backspike_slow___get__(struct __pyx_obj_7splikes_11connections_7calcium_calcium *__pyx_v_self); /* proto */
static int __pyx_pf_7splikes_11connections_7calcium_7calcium_19peak_backspike_slow_2__set__(struct __pyx_obj_7splikes_11connections_7calcium_calcium *__pyx_v_self, PyObject *__pyx_v_value); /* proto */
static PyObject *__pyx_pf_7splikes_11connections_7calcium_7calcium_7_lambda___get__(struct __pyx_obj_7splikes_11connections_7calcium_calcium *__pyx_v_self); /* proto */
static int __pyx_pf_7splikes_11connections_7calcium_7calcium_7_lambda_2__set__(struct __pyx_obj_7splikes_11connections_7calcium_calcium *__pyx_v_self, PyObject *__pyx_v_value); /* proto */
static PyObject *__pyx_pf_7splikes_11connections_7calcium_7calcium_5beta2___get__(struct __pyx_obj_7splikes_11connections_7calcium_calcium *__pyx_v_self); /* proto */
static int __pyx_pf_7splikes_11connections_7calcium_7calcium_5beta2_2__set__(struct __pyx_obj_7splikes_11connections_7calcium_calcium *__pyx_v_self, PyObject *__pyx_v_value); /* proto */
static PyObject *__pyx_pf_7splikes_11connections_7calcium_7calcium_5beta1___get__(struct __pyx_obj_7splikes_11connections_7calcium_calcium *__pyx_v_self); /* proto */
static int __pyx_pf_7splikes_11connections_7calcium_7calcium_5beta1_2__set__(struct __pyx_obj_7splikes_11connections_7calcium_calcium *__pyx_v_self, PyObject *__pyx_v_value); /* proto */
static PyObject *__pyx_pf_7splikes_11connections_7calcium_7calcium_6k_plus___get__(struct __pyx_obj_7splikes_11connections_7calcium_calcium *__pyx_v_self); /* proto */
static int __pyx_pf_7splikes_11connections_7calcium_7calcium_6k_plus_2__set__(struct __pyx_obj_7splikes_11connections_7calcium_calcium *__pyx_v_self, PyObject *__pyx_v_value); /* proto */
static PyObject *__pyx_pf_7splikes_11connections_7calcium_7calcium_8g_nmda_o___get__(struct __pyx_obj_7splikes_11connections_7calcium_calcium *__pyx_v_self); /* proto */
static int __pyx_pf_7splikes_11connections_7calcium_7calcium_8g_nmda_o_2__set__(struct __pyx_obj_7splikes_11connections_7calcium_calcium *__pyx_v_self, PyObject *__pyx_v_value); /* proto */
static PyObject *__pyx_pf_7splikes_11connections_7calcium_7calcium_18tau_backspike_fast___get__(struct __pyx_obj_7splikes_11connections_7calcium_calcium *__pyx_v_self); /* proto */
static int __pyx_pf_7splikes_11connections_7calcium_7calcium_18tau_backspike_fast_2__set__(struct __pyx_obj_7splikes_11connections_7calcium_calcium *__pyx_v_self, PyObject *__pyx_v_value); /* proto */
static PyObject *__pyx_pf_7splikes_11connections_7calcium_7calcium_18tau_backspike_slow___get__(struct __pyx_obj_7splikes_11connections_7calcium_calcium *__pyx_v_self); /* proto */
static int __pyx_pf_7splikes_11connections_7calcium_7calcium_18tau_backspike_slow_2__set__(struct __pyx_obj_7splikes_11connections_7calcium_calcium *__pyx_v_self, PyObject *__pyx_v_value); /* proto */
static PyObject *__pyx_pf_7splikes_11connections_7calcium_7calcium_10eta_gamma0___get__(struct __pyx_obj_7splikes_11connections_7calcium_calcium *__pyx_v_self); /* proto */
static int __pyx_pf_7splikes_11connections_7calcium_7calcium_10eta_gamma0_2__set__(struct __pyx_obj_7splikes_11connections_7calcium_calcium *__pyx_v_self, PyObject *__pyx_v_value); /* proto */
static PyObject *__pyx_pf_7splikes_11connections_7calcium_7calcium_8i_nmda_s___get__(struct __pyx_obj_7splikes_11connections_7calcium_calcium *__pyx_v_self); /* proto */
static int __pyx_pf_7splikes_11connections_7calcium_7calcium_8i_nmda_s_2__set__(struct __pyx_obj_7splikes_11connections_7calcium_calcium *__pyx_v_self, PyObject *__pyx_v_value); /* proto */
static PyObject *__pyx_pf_7splikes_11connections_7calcium_7calcium_10tau_nmda_s___get__(struct __pyx_obj_7splikes_11connections_7calcium_calcium *__pyx_v_self); /* proto */
static int __pyx_pf_7splikes_11connections_7calcium_7calcium_10tau_nmda_s_2__set__(struct __pyx_obj_7splikes_11connections_7calcium_calcium *__pyx_v_self, PyObject *__pyx_v_value); /* proto */
static PyObject *__pyx_pf_7splikes_11connections_7calcium_7calcium_2Vo___get__(struct __pyx_obj_7splikes_11connections_7calcium_calcium *__pyx_v_self); /* proto */
static int __pyx_pf_7splikes_11connections_7calcium_7calcium_2Vo_2__set__(struct __pyx_obj_7splikes_11connections_7calcium_calcium *__pyx_v_self, PyObject *__pyx_v_value); /* proto */
static PyObject *__pyx_pf_7splikes_11connections_7calcium_7calcium_2Vp___get__(struct __pyx_obj_7splikes_11connections_7calcium_calcium *__pyx_v_self); /* proto */
static int __pyx_pf_7splikes_11connections_7calcium_7calcium_2Vp_2__set__(struct __pyx_obj_7splikes_11connections_7calcium_calcium *__pyx_v_self, PyObject *__pyx_v_value); /* proto */
static PyObject *__pyx_pf_7splikes_11connections_7calcium_7calcium_8i_nmda_f___get__(struct __pyx_obj_7splikes_11connections_7calcium_calcium *__pyx_v_self); /* proto */
static int __pyx_pf_7splikes_11connections_7calcium_7calcium_8i_nmda_f_2__set__(struct __pyx_obj_7splikes_11connections_7calcium_calcium *__pyx_v_self, PyObject *__pyx_v_value); /* proto */
static PyObject *__pyx_pf_7splikes_11connections_7calcium_7calcium_10tau_nmda_f___get__(struct __pyx_obj_7splikes_11connections_7calcium_calcium *__pyx_v_self); /* proto */
static int __pyx_pf_7splikes_11connections_7calcium_7calcium_10tau_nmda_f_2__set__(struct __pyx_obj_7splikes_11connections_7calcium_calcium *__pyx_v_self, PyObject *__pyx_v_value); /* proto */
static PyObject *__pyx_pf_7splikes_11connections_7calcium_7calcium_7k_minus___get__(struct __pyx_obj_7splikes_11connections_7calcium_calcium *__pyx_v_self); /* proto */
static int __pyx_pf_7splikes_11connections_7calcium_7calcium_7k_minus_2__set__(struct __pyx_obj_7splikes_11connections_7calcium_calcium *__pyx_v_self, PyObject *__pyx_v_value); /* proto */
static PyObject *__pyx_pf_7splikes_11connections_7calcium_7calcium_1B___get__(struct __pyx_obj_7splikes_11connections_7calcium_calcium *__pyx_v_self); /* proto */
static int __pyx_pf_7splikes_11connections_7calcium_7calcium_1B_2__set__(struct __pyx_obj_7splikes_11connections_7calcium_calcium *__pyx_v_self, PyObject *__pyx_v_value); /* proto */
static int __pyx_pf_7splikes_11connections_7calcium_7calcium_1B_4__del__(struct __pyx_obj_7splikes_11connections_7calcium_calcium *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_7splikes_11connections_7calcium_7calcium_6I_nmda___get__(struct __pyx_obj_7splikes_11connections_7calcium_calcium *__pyx_v_self); /* proto */
static int __pyx_pf_7splikes_11connections_7calcium_7calcium_6I_nmda_2__set__(struct __pyx_obj_7splikes_11connections_7calcium_calcium *__pyx_v_self, PyObject *__pyx_v_value); /* proto */
static int __pyx_pf_7splikes_11connections_7calcium_7calcium_6I_nmda_4__del__(struct __pyx_obj_7splikes_11connections_7calcium_calcium *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_7splikes_11connections_7calcium_7calcium_1h___get__(struct __pyx_obj_7splikes_11connections_7calcium_calcium *__pyx_v_self); /* proto */
static int __pyx_pf_7splikes_11connections_7calcium_7calcium_1h_2__set__(struct __pyx_obj_7splikes_11connections_7calcium_calcium *__pyx_v_self, PyObject *__pyx_v_value); /* proto */
static int __pyx_pf_7splikes_11connections_7calcium_7calcium_1h_4__del__(struct __pyx_obj_7splikes_11connections_7calcium_calcium *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_7splikes_11connections_7calcium_7calcium_2Ca___get__(struct __pyx_obj_7splikes_11connections_7calcium_calcium *__pyx_v_self); /* proto */
static int __pyx_pf_7splikes_11connections_7calcium_7calcium_2Ca_2__set__(struct __pyx_obj_7splikes_11connections_7calcium_calcium *__pyx_v_self, PyObject *__pyx_v_value); /* proto */
static int __pyx_pf_7splikes_11connections_7calcium_7calcium_2Ca_4__del__(struct __pyx_obj_7splikes_11connections_7calcium_calcium *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_7splikes_11connections_7calcium_7calcium_7v_total___get__(struct __pyx_obj_7splikes_11connections_7calcium_calcium *__pyx_v_self); /* proto */
static int __pyx_pf_7splikes_11connections_7calcium_7calcium_7v_total_2__set__(struct __pyx_obj_7splikes_11connections_7calcium_calcium *__pyx_v_self, PyObject *__pyx_v_value); /* proto */
static int __pyx_pf_7splikes_11connections_7calcium_7calcium_7v_total_4__del__(struct __pyx_obj_7splikes_11connections_7calcium_calcium *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_7splikes_11connections_7calcium_7calcium_3eta___get__(struct __pyx_obj_7splikes_11connections_7calcium_calcium *__pyx_v_self); /* proto */
static int __pyx_pf_7splikes_11connections_7calcium_7calcium_3eta_2__set__(struct __pyx_obj_7splikes_11connections_7calcium_calcium *__pyx_v_self, PyObject *__pyx_v_value); /* proto */
static int __pyx_pf_7splikes_11connections_7calcium_7calcium_3eta_4__del__(struct __pyx_obj_7splikes_11connections_7calcium_calcium *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_7splikes_11connections_7calcium_7calcium_6g_nmda___get__(struct __pyx_obj_7splikes_11connections_7calcium_calcium *__pyx_v_self); /* proto */
static int __pyx_pf_7splikes_11connections_7calcium_7calcium_6g_nmda_2__set__(struct __pyx_obj_7splikes_11connections_7calcium_calcium *__pyx_v_self, PyObject *__pyx_v_value); /* proto */
static int __pyx_pf_7splikes_11connections_7calcium_7calcium_6g_nmda_4__del__(struct __pyx_obj_7splikes_11connections_7calcium_calcium *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_7splikes_11connections_7calcium_7calcium_16v_backspike_fast___get__(struct __pyx_obj_7splikes_11connections_7calcium_calcium *__pyx_v_self); /* proto */
static int __pyx_pf_7splikes_11connections_7calcium_7calcium_16v_backspike_fast_2__set__(struct __pyx_obj_7splikes_11connections_7calcium_calcium *__pyx_v_self, PyObject *__pyx_v_value); /* proto */
static int __pyx_pf_7splikes_11connections_7calcium_7calcium_16v_backspike_fast_4__del__(struct __pyx_obj_7splikes_11connections_7calcium_calcium *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_7splikes_11connections_7calcium_7calcium_11I_nmda_fast___get__(struct __pyx_obj_7splikes_11connections_7calcium_calcium *__pyx_v_self); /* proto */
static int __pyx_pf_7splikes_11connections_7calcium_7calcium_11I_nmda_fast_2__set__(struct __pyx_obj_7splikes_11connections_7calcium_calcium *__pyx_v_self, PyObject *__pyx_v_value); /* proto */
static int __pyx_pf_7splikes_11connections_7calcium_7calcium_11I_nmda_fast_4__del__(struct __pyx_obj_7splikes_11connections_7calcium_calcium *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_7splikes_11connections_7calcium_7calcium_11I_nmda_slow___get__(struct __pyx_obj_7splikes_11connections_7calcium_calcium *__pyx_v_self); /* proto */
static int __pyx_pf_7splikes_11connections_7calcium_7calcium_11I_nmda_slow_2__set__(struct __pyx_obj_7splikes_11connections_7calcium_calcium *__pyx_v_self, PyObject *__pyx_v_value); /* proto */
static int __pyx_pf_7splikes_11connections_7calcium_7calcium_11I_nmda_slow_4__del__(struct __pyx_obj_7splikes_11connections_7calcium_calcium *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_7splikes_11connections_7calcium_7calcium_5omega___get__(struct __pyx_obj_7splikes_11connections_7calcium_calcium *__pyx_v_self); /* proto */
static int __pyx_pf_7splikes_11connections_7calcium_7calcium_5omega_2__set__(struct __pyx_obj_7splikes_11connections_7calcium_calcium *__pyx_v_self, PyObject *__pyx_v_value); /* proto */
static int __pyx_pf_7splikes_11connections_7calcium_7calcium_5omega_4__del__(struct __pyx_obj_7splikes_11connections_7calcium_calcium *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_7splikes_11connections_7calcium_7calcium_16v_backspike_slow___get__(struct __pyx_obj_7splikes_11connections_7calcium_calcium *__pyx_v_self); /* proto */
static int __pyx_pf_7splikes_11connections_7calcium_7calcium_16v_backspike_slow_2__set__(struct __pyx_obj_7splikes_11connections_7calcium_calcium *__pyx_v_self, PyObject *__pyx_v_value); /* proto */
static int __pyx_pf_7splikes_11connections_7calcium_7calcium_16v_backspike_slow_4__del__(struct __pyx_obj_7splikes_11connections_7calcium_calcium *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_7splikes_11connections_7calcium_7calcium_6__reduce_cython__(CYTHON_UNUSED struct __pyx_obj_7splikes_11connections_7calcium_calcium *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_7splikes_11connections_7calcium_7calcium_8__setstate_cython__(CYTHON_UNUSED struct __pyx_obj_7splikes_11connections_7calcium_calcium *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state); /* proto */
static PyObject *__pyx_tp_new_7splikes_11connections_7calcium_calcium(PyTypeObject *t, PyObject *a, PyObject *k); /*proto*/
static PyObject *__pyx_float_0_5;
static PyObject *__pyx_tuple_;
static PyObject *__pyx_tuple__2;
static PyObject *__pyx_tuple__3;
static PyObject *__pyx_tuple__4;
/* Late includes */
/* "splikes/connections/calcium.pyx":9
* cimport numpy as np
*
* cdef sig(double x,double beta): # <<<<<<<<<<<<<<
* return ((tanh(beta*x/2.0)+1.0)/2.0)
*
*/
static PyObject *__pyx_f_7splikes_11connections_7calcium_sig(double __pyx_v_x, double __pyx_v_beta) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("sig", 0);
/* "splikes/connections/calcium.pyx":10
*
* cdef sig(double x,double beta):
* return ((tanh(beta*x/2.0)+1.0)/2.0) # <<<<<<<<<<<<<<
*
*
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_1 = PyFloat_FromDouble(((tanh(((__pyx_v_beta * __pyx_v_x) / 2.0)) + 1.0) / 2.0)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 10, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_r = __pyx_t_1;
__pyx_t_1 = 0;
goto __pyx_L0;
/* "splikes/connections/calcium.pyx":9
* cimport numpy as np
*
* cdef sig(double x,double beta): # <<<<<<<<<<<<<<
* return ((tanh(beta*x/2.0)+1.0)/2.0)
*
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_AddTraceback("splikes.connections.calcium.sig", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = 0;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "splikes/connections/calcium.pyx":60
* cdef public double g_t,mg2,mg1,v_reversal,tau_ca,alpha2,alpha1,backspike_amplitude,i_nmda_mu,peak_backspike_fast,peak_backspike_slow,_lambda,beta2,beta1,k_plus,g_nmda_o,tau_backspike_fast,tau_backspike_slow,eta_gamma0,i_nmda_s,tau_nmda_s,Vo,Vp,i_nmda_f,tau_nmda_f,k_minus
* cdef public np.ndarray B,I_nmda,h,Ca,v_total,eta,g_nmda,v_backspike_fast,I_nmda_fast,I_nmda_slow,omega,v_backspike_slow
* cpdef _reset(self): # <<<<<<<<<<<<<<
* self.B=np.zeros(self.post.N,dtype=np.float)
* self.I_nmda=np.zeros( (self.post.N,self.pre.N),dtype=np.float)
*/
static PyObject *__pyx_pw_7splikes_11connections_7calcium_7calcium_1_reset(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/
static PyObject *__pyx_f_7splikes_11connections_7calcium_7calcium__reset(struct __pyx_obj_7splikes_11connections_7calcium_calcium *__pyx_v_self, int __pyx_skip_dispatch) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
PyObject *__pyx_t_2 = NULL;
PyObject *__pyx_t_3 = NULL;
PyObject *__pyx_t_4 = NULL;
PyObject *__pyx_t_5 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("_reset", 0);
/* Check if called by wrapper */
if (unlikely(__pyx_skip_dispatch)) ;
/* Check if overridden in Python */
else if (unlikely((Py_TYPE(((PyObject *)__pyx_v_self))->tp_dictoffset != 0) || (Py_TYPE(((PyObject *)__pyx_v_self))->tp_flags & (Py_TPFLAGS_IS_ABSTRACT | Py_TPFLAGS_HEAPTYPE)))) {
#if CYTHON_USE_DICT_VERSIONS && CYTHON_USE_PYTYPE_LOOKUP && CYTHON_USE_TYPE_SLOTS
static PY_UINT64_T __pyx_tp_dict_version = __PYX_DICT_VERSION_INIT, __pyx_obj_dict_version = __PYX_DICT_VERSION_INIT;
if (unlikely(!__Pyx_object_dict_version_matches(((PyObject *)__pyx_v_self), __pyx_tp_dict_version, __pyx_obj_dict_version))) {
PY_UINT64_T __pyx_type_dict_guard = __Pyx_get_tp_dict_version(((PyObject *)__pyx_v_self));
#endif
__pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_reset); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 60, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
if (!PyCFunction_Check(__pyx_t_1) || (PyCFunction_GET_FUNCTION(__pyx_t_1) != (PyCFunction)(void*)__pyx_pw_7splikes_11connections_7calcium_7calcium_1_reset)) {
__Pyx_XDECREF(__pyx_r);
__Pyx_INCREF(__pyx_t_1);
__pyx_t_3 = __pyx_t_1; __pyx_t_4 = NULL;
if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_3))) {
__pyx_t_4 = PyMethod_GET_SELF(__pyx_t_3);
if (likely(__pyx_t_4)) {
PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_3);
__Pyx_INCREF(__pyx_t_4);
__Pyx_INCREF(function);
__Pyx_DECREF_SET(__pyx_t_3, function);
}
}
__pyx_t_2 = (__pyx_t_4) ? __Pyx_PyObject_CallOneArg(__pyx_t_3, __pyx_t_4) : __Pyx_PyObject_CallNoArg(__pyx_t_3);
__Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0;
if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 60, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__pyx_r = __pyx_t_2;
__pyx_t_2 = 0;
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
goto __pyx_L0;
}
#if CYTHON_USE_DICT_VERSIONS && CYTHON_USE_PYTYPE_LOOKUP && CYTHON_USE_TYPE_SLOTS
__pyx_tp_dict_version = __Pyx_get_tp_dict_version(((PyObject *)__pyx_v_self));
__pyx_obj_dict_version = __Pyx_get_object_dict_version(((PyObject *)__pyx_v_self));
if (unlikely(__pyx_type_dict_guard != __pyx_tp_dict_version)) {
__pyx_tp_dict_version = __pyx_obj_dict_version = __PYX_DICT_VERSION_INIT;
}
#endif
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
#if CYTHON_USE_DICT_VERSIONS && CYTHON_USE_PYTYPE_LOOKUP && CYTHON_USE_TYPE_SLOTS
}
#endif
}
/* "splikes/connections/calcium.pyx":61
* cdef public np.ndarray B,I_nmda,h,Ca,v_total,eta,g_nmda,v_backspike_fast,I_nmda_fast,I_nmda_slow,omega,v_backspike_slow
* cpdef _reset(self):
* self.B=np.zeros(self.post.N,dtype=np.float) # <<<<<<<<<<<<<<
* self.I_nmda=np.zeros( (self.post.N,self.pre.N),dtype=np.float)
* self.h=np.zeros(self.post.N,dtype=np.float)
*/
__Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_n_s_np); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 61, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_zeros); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 61, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__pyx_t_1 = __Pyx_PyInt_From_int(__pyx_v_self->__pyx_base.post->N); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 61, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 61, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_GIVEREF(__pyx_t_1);
PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_1);
__pyx_t_1 = 0;
__pyx_t_1 = __Pyx_PyDict_NewPresized(1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 61, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_n_s_np); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 61, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_t_4, __pyx_n_s_float); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 61, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_5);
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
if (PyDict_SetItem(__pyx_t_1, __pyx_n_s_dtype, __pyx_t_5) < 0) __PYX_ERR(0, 61, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
__pyx_t_5 = __Pyx_PyObject_Call(__pyx_t_2, __pyx_t_3, __pyx_t_1); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 61, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_5);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
if (!(likely(((__pyx_t_5) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_5, __pyx_ptype_5numpy_ndarray))))) __PYX_ERR(0, 61, __pyx_L1_error)
__Pyx_GIVEREF(__pyx_t_5);
__Pyx_GOTREF(__pyx_v_self->B);
__Pyx_DECREF(((PyObject *)__pyx_v_self->B));
__pyx_v_self->B = ((PyArrayObject *)__pyx_t_5);
__pyx_t_5 = 0;
/* "splikes/connections/calcium.pyx":62
* cpdef _reset(self):
* self.B=np.zeros(self.post.N,dtype=np.float)
* self.I_nmda=np.zeros( (self.post.N,self.pre.N),dtype=np.float) # <<<<<<<<<<<<<<
* self.h=np.zeros(self.post.N,dtype=np.float)
* self.Ca=np.zeros( (self.post.N,self.pre.N),dtype=np.float)
*/
__Pyx_GetModuleGlobalName(__pyx_t_5, __pyx_n_s_np); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 62, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_5);
__pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_t_5, __pyx_n_s_zeros); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 62, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
__pyx_t_5 = __Pyx_PyInt_From_int(__pyx_v_self->__pyx_base.post->N); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 62, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_5);
__pyx_t_3 = __Pyx_PyInt_From_int(__pyx_v_self->__pyx_base.pre->N); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 62, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_2 = PyTuple_New(2); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 62, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_GIVEREF(__pyx_t_5);
PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_t_5);
__Pyx_GIVEREF(__pyx_t_3);
PyTuple_SET_ITEM(__pyx_t_2, 1, __pyx_t_3);
__pyx_t_5 = 0;
__pyx_t_3 = 0;
__pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 62, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_GIVEREF(__pyx_t_2);
PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_2);
__pyx_t_2 = 0;
__pyx_t_2 = __Pyx_PyDict_NewPresized(1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 62, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_GetModuleGlobalName(__pyx_t_5, __pyx_n_s_np); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 62, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_5);
__pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_t_5, __pyx_n_s_float); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 62, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
if (PyDict_SetItem(__pyx_t_2, __pyx_n_s_dtype, __pyx_t_4) < 0) __PYX_ERR(0, 62, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
__pyx_t_4 = __Pyx_PyObject_Call(__pyx_t_1, __pyx_t_3, __pyx_t_2); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 62, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
if (!(likely(((__pyx_t_4) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_4, __pyx_ptype_5numpy_ndarray))))) __PYX_ERR(0, 62, __pyx_L1_error)
__Pyx_GIVEREF(__pyx_t_4);
__Pyx_GOTREF(__pyx_v_self->I_nmda);
__Pyx_DECREF(((PyObject *)__pyx_v_self->I_nmda));
__pyx_v_self->I_nmda = ((PyArrayObject *)__pyx_t_4);
__pyx_t_4 = 0;
/* "splikes/connections/calcium.pyx":63
* self.B=np.zeros(self.post.N,dtype=np.float)
* self.I_nmda=np.zeros( (self.post.N,self.pre.N),dtype=np.float)
* self.h=np.zeros(self.post.N,dtype=np.float) # <<<<<<<<<<<<<<
* self.Ca=np.zeros( (self.post.N,self.pre.N),dtype=np.float)
* self.v_total=np.zeros(self.post.N,dtype=np.float)
*/
__Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_n_s_np); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 63, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_4, __pyx_n_s_zeros); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 63, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
__pyx_t_4 = __Pyx_PyInt_From_int(__pyx_v_self->__pyx_base.post->N); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 63, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 63, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_GIVEREF(__pyx_t_4);
PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_4);
__pyx_t_4 = 0;
__pyx_t_4 = __Pyx_PyDict_NewPresized(1); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 63, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_n_s_np); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 63, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_float); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 63, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_5);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
if (PyDict_SetItem(__pyx_t_4, __pyx_n_s_dtype, __pyx_t_5) < 0) __PYX_ERR(0, 63, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
__pyx_t_5 = __Pyx_PyObject_Call(__pyx_t_2, __pyx_t_3, __pyx_t_4); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 63, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_5);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
if (!(likely(((__pyx_t_5) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_5, __pyx_ptype_5numpy_ndarray))))) __PYX_ERR(0, 63, __pyx_L1_error)
__Pyx_GIVEREF(__pyx_t_5);
__Pyx_GOTREF(__pyx_v_self->h);
__Pyx_DECREF(((PyObject *)__pyx_v_self->h));
__pyx_v_self->h = ((PyArrayObject *)__pyx_t_5);
__pyx_t_5 = 0;
/* "splikes/connections/calcium.pyx":64
* self.I_nmda=np.zeros( (self.post.N,self.pre.N),dtype=np.float)
* self.h=np.zeros(self.post.N,dtype=np.float)
* self.Ca=np.zeros( (self.post.N,self.pre.N),dtype=np.float) # <<<<<<<<<<<<<<
* self.v_total=np.zeros(self.post.N,dtype=np.float)
* self.eta=np.zeros( (self.post.N,self.pre.N),dtype=np.float)
*/
__Pyx_GetModuleGlobalName(__pyx_t_5, __pyx_n_s_np); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 64, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_5);
__pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_t_5, __pyx_n_s_zeros); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 64, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
__pyx_t_5 = __Pyx_PyInt_From_int(__pyx_v_self->__pyx_base.post->N); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 64, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_5);
__pyx_t_3 = __Pyx_PyInt_From_int(__pyx_v_self->__pyx_base.pre->N); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 64, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_2 = PyTuple_New(2); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 64, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_GIVEREF(__pyx_t_5);
PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_t_5);
__Pyx_GIVEREF(__pyx_t_3);
PyTuple_SET_ITEM(__pyx_t_2, 1, __pyx_t_3);
__pyx_t_5 = 0;
__pyx_t_3 = 0;
__pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 64, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_GIVEREF(__pyx_t_2);
PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_2);
__pyx_t_2 = 0;
__pyx_t_2 = __Pyx_PyDict_NewPresized(1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 64, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_GetModuleGlobalName(__pyx_t_5, __pyx_n_s_np); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 64, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_5);
__pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_t_5, __pyx_n_s_float); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 64, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
if (PyDict_SetItem(__pyx_t_2, __pyx_n_s_dtype, __pyx_t_1) < 0) __PYX_ERR(0, 64, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__pyx_t_1 = __Pyx_PyObject_Call(__pyx_t_4, __pyx_t_3, __pyx_t_2); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 64, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
if (!(likely(((__pyx_t_1) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_1, __pyx_ptype_5numpy_ndarray))))) __PYX_ERR(0, 64, __pyx_L1_error)
__Pyx_GIVEREF(__pyx_t_1);
__Pyx_GOTREF(__pyx_v_self->Ca);
__Pyx_DECREF(((PyObject *)__pyx_v_self->Ca));
__pyx_v_self->Ca = ((PyArrayObject *)__pyx_t_1);
__pyx_t_1 = 0;
/* "splikes/connections/calcium.pyx":65
* self.h=np.zeros(self.post.N,dtype=np.float)
* self.Ca=np.zeros( (self.post.N,self.pre.N),dtype=np.float)
* self.v_total=np.zeros(self.post.N,dtype=np.float) # <<<<<<<<<<<<<<
* self.eta=np.zeros( (self.post.N,self.pre.N),dtype=np.float)
* self.g_nmda=np.zeros( (self.post.N,self.pre.N),dtype=np.float)
*/
__Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_n_s_np); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 65, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_zeros); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 65, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__pyx_t_1 = __Pyx_PyInt_From_int(__pyx_v_self->__pyx_base.post->N); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 65, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 65, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_GIVEREF(__pyx_t_1);
PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_1);
__pyx_t_1 = 0;
__pyx_t_1 = __Pyx_PyDict_NewPresized(1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 65, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_n_s_np); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 65, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_t_4, __pyx_n_s_float); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 65, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_5);
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
if (PyDict_SetItem(__pyx_t_1, __pyx_n_s_dtype, __pyx_t_5) < 0) __PYX_ERR(0, 65, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
__pyx_t_5 = __Pyx_PyObject_Call(__pyx_t_2, __pyx_t_3, __pyx_t_1); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 65, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_5);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
if (!(likely(((__pyx_t_5) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_5, __pyx_ptype_5numpy_ndarray))))) __PYX_ERR(0, 65, __pyx_L1_error)
__Pyx_GIVEREF(__pyx_t_5);
__Pyx_GOTREF(__pyx_v_self->v_total);
__Pyx_DECREF(((PyObject *)__pyx_v_self->v_total));
__pyx_v_self->v_total = ((PyArrayObject *)__pyx_t_5);
__pyx_t_5 = 0;
/* "splikes/connections/calcium.pyx":66
* self.Ca=np.zeros( (self.post.N,self.pre.N),dtype=np.float)
* self.v_total=np.zeros(self.post.N,dtype=np.float)
* self.eta=np.zeros( (self.post.N,self.pre.N),dtype=np.float) # <<<<<<<<<<<<<<
* self.g_nmda=np.zeros( (self.post.N,self.pre.N),dtype=np.float)
* self.v_backspike_fast=np.zeros(self.post.N,dtype=np.float)
*/
__Pyx_GetModuleGlobalName(__pyx_t_5, __pyx_n_s_np); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 66, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_5);
__pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_t_5, __pyx_n_s_zeros); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 66, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
__pyx_t_5 = __Pyx_PyInt_From_int(__pyx_v_self->__pyx_base.post->N); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 66, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_5);
__pyx_t_3 = __Pyx_PyInt_From_int(__pyx_v_self->__pyx_base.pre->N); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 66, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_2 = PyTuple_New(2); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 66, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_GIVEREF(__pyx_t_5);
PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_t_5);
__Pyx_GIVEREF(__pyx_t_3);
PyTuple_SET_ITEM(__pyx_t_2, 1, __pyx_t_3);
__pyx_t_5 = 0;
__pyx_t_3 = 0;
__pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 66, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_GIVEREF(__pyx_t_2);
PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_2);
__pyx_t_2 = 0;
__pyx_t_2 = __Pyx_PyDict_NewPresized(1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 66, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_GetModuleGlobalName(__pyx_t_5, __pyx_n_s_np); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 66, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_5);
__pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_t_5, __pyx_n_s_float); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 66, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
if (PyDict_SetItem(__pyx_t_2, __pyx_n_s_dtype, __pyx_t_4) < 0) __PYX_ERR(0, 66, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
__pyx_t_4 = __Pyx_PyObject_Call(__pyx_t_1, __pyx_t_3, __pyx_t_2); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 66, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
if (!(likely(((__pyx_t_4) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_4, __pyx_ptype_5numpy_ndarray))))) __PYX_ERR(0, 66, __pyx_L1_error)
__Pyx_GIVEREF(__pyx_t_4);
__Pyx_GOTREF(__pyx_v_self->eta);
__Pyx_DECREF(((PyObject *)__pyx_v_self->eta));
__pyx_v_self->eta = ((PyArrayObject *)__pyx_t_4);
__pyx_t_4 = 0;
/* "splikes/connections/calcium.pyx":67
* self.v_total=np.zeros(self.post.N,dtype=np.float)
* self.eta=np.zeros( (self.post.N,self.pre.N),dtype=np.float)
* self.g_nmda=np.zeros( (self.post.N,self.pre.N),dtype=np.float) # <<<<<<<<<<<<<<
* self.v_backspike_fast=np.zeros(self.post.N,dtype=np.float)
* self.I_nmda_fast=np.zeros(self.pre.N,dtype=np.float)
*/
__Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_n_s_np); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 67, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_4, __pyx_n_s_zeros); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 67, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
__pyx_t_4 = __Pyx_PyInt_From_int(__pyx_v_self->__pyx_base.post->N); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 67, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__pyx_t_3 = __Pyx_PyInt_From_int(__pyx_v_self->__pyx_base.pre->N); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 67, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_1 = PyTuple_New(2); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 67, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_GIVEREF(__pyx_t_4);
PyTuple_SET_ITEM(__pyx_t_1, 0, __pyx_t_4);
__Pyx_GIVEREF(__pyx_t_3);
PyTuple_SET_ITEM(__pyx_t_1, 1, __pyx_t_3);
__pyx_t_4 = 0;
__pyx_t_3 = 0;
__pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 67, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_GIVEREF(__pyx_t_1);
PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_1);
__pyx_t_1 = 0;
__pyx_t_1 = __Pyx_PyDict_NewPresized(1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 67, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_n_s_np); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 67, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_t_4, __pyx_n_s_float); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 67, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_5);
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
if (PyDict_SetItem(__pyx_t_1, __pyx_n_s_dtype, __pyx_t_5) < 0) __PYX_ERR(0, 67, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
__pyx_t_5 = __Pyx_PyObject_Call(__pyx_t_2, __pyx_t_3, __pyx_t_1); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 67, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_5);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
if (!(likely(((__pyx_t_5) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_5, __pyx_ptype_5numpy_ndarray))))) __PYX_ERR(0, 67, __pyx_L1_error)
__Pyx_GIVEREF(__pyx_t_5);
__Pyx_GOTREF(__pyx_v_self->g_nmda);
__Pyx_DECREF(((PyObject *)__pyx_v_self->g_nmda));
__pyx_v_self->g_nmda = ((PyArrayObject *)__pyx_t_5);
__pyx_t_5 = 0;
/* "splikes/connections/calcium.pyx":68
* self.eta=np.zeros( (self.post.N,self.pre.N),dtype=np.float)
* self.g_nmda=np.zeros( (self.post.N,self.pre.N),dtype=np.float)
* self.v_backspike_fast=np.zeros(self.post.N,dtype=np.float) # <<<<<<<<<<<<<<
* self.I_nmda_fast=np.zeros(self.pre.N,dtype=np.float)
* self.I_nmda_slow=np.zeros(self.pre.N,dtype=np.float)
*/
__Pyx_GetModuleGlobalName(__pyx_t_5, __pyx_n_s_np); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 68, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_5);
__pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_t_5, __pyx_n_s_zeros); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 68, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
__pyx_t_5 = __Pyx_PyInt_From_int(__pyx_v_self->__pyx_base.post->N); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 68, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_5);
__pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 68, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_GIVEREF(__pyx_t_5);
PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_5);
__pyx_t_5 = 0;
__pyx_t_5 = __Pyx_PyDict_NewPresized(1); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 68, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_5);
__Pyx_GetModuleGlobalName(__pyx_t_2, __pyx_n_s_np); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 68, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_float); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 68, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
if (PyDict_SetItem(__pyx_t_5, __pyx_n_s_dtype, __pyx_t_4) < 0) __PYX_ERR(0, 68, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
__pyx_t_4 = __Pyx_PyObject_Call(__pyx_t_1, __pyx_t_3, __pyx_t_5); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 68, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
if (!(likely(((__pyx_t_4) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_4, __pyx_ptype_5numpy_ndarray))))) __PYX_ERR(0, 68, __pyx_L1_error)
__Pyx_GIVEREF(__pyx_t_4);
__Pyx_GOTREF(__pyx_v_self->v_backspike_fast);
__Pyx_DECREF(((PyObject *)__pyx_v_self->v_backspike_fast));
__pyx_v_self->v_backspike_fast = ((PyArrayObject *)__pyx_t_4);
__pyx_t_4 = 0;
/* "splikes/connections/calcium.pyx":69
* self.g_nmda=np.zeros( (self.post.N,self.pre.N),dtype=np.float)
* self.v_backspike_fast=np.zeros(self.post.N,dtype=np.float)
* self.I_nmda_fast=np.zeros(self.pre.N,dtype=np.float) # <<<<<<<<<<<<<<
* self.I_nmda_slow=np.zeros(self.pre.N,dtype=np.float)
* self.omega=np.zeros( (self.post.N,self.pre.N),dtype=np.float)
*/
__Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_n_s_np); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 69, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_t_4, __pyx_n_s_zeros); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 69, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_5);
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
__pyx_t_4 = __Pyx_PyInt_From_int(__pyx_v_self->__pyx_base.pre->N); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 69, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 69, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_GIVEREF(__pyx_t_4);
PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_4);
__pyx_t_4 = 0;
__pyx_t_4 = __Pyx_PyDict_NewPresized(1); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 69, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_n_s_np); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 69, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_float); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 69, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
if (PyDict_SetItem(__pyx_t_4, __pyx_n_s_dtype, __pyx_t_2) < 0) __PYX_ERR(0, 69, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__pyx_t_2 = __Pyx_PyObject_Call(__pyx_t_5, __pyx_t_3, __pyx_t_4); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 69, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
if (!(likely(((__pyx_t_2) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_2, __pyx_ptype_5numpy_ndarray))))) __PYX_ERR(0, 69, __pyx_L1_error)
__Pyx_GIVEREF(__pyx_t_2);
__Pyx_GOTREF(__pyx_v_self->I_nmda_fast);
__Pyx_DECREF(((PyObject *)__pyx_v_self->I_nmda_fast));
__pyx_v_self->I_nmda_fast = ((PyArrayObject *)__pyx_t_2);
__pyx_t_2 = 0;
/* "splikes/connections/calcium.pyx":70
* self.v_backspike_fast=np.zeros(self.post.N,dtype=np.float)
* self.I_nmda_fast=np.zeros(self.pre.N,dtype=np.float)
* self.I_nmda_slow=np.zeros(self.pre.N,dtype=np.float) # <<<<<<<<<<<<<<
* self.omega=np.zeros( (self.post.N,self.pre.N),dtype=np.float)
* self.v_backspike_slow=np.zeros(self.post.N,dtype=np.float)
*/
__Pyx_GetModuleGlobalName(__pyx_t_2, __pyx_n_s_np); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 70, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_zeros); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 70, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__pyx_t_2 = __Pyx_PyInt_From_int(__pyx_v_self->__pyx_base.pre->N); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 70, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 70, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_GIVEREF(__pyx_t_2);
PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_2);
__pyx_t_2 = 0;
__pyx_t_2 = __Pyx_PyDict_NewPresized(1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 70, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_GetModuleGlobalName(__pyx_t_5, __pyx_n_s_np); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 70, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_5);
__pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_t_5, __pyx_n_s_float); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 70, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
if (PyDict_SetItem(__pyx_t_2, __pyx_n_s_dtype, __pyx_t_1) < 0) __PYX_ERR(0, 70, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__pyx_t_1 = __Pyx_PyObject_Call(__pyx_t_4, __pyx_t_3, __pyx_t_2); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 70, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
if (!(likely(((__pyx_t_1) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_1, __pyx_ptype_5numpy_ndarray))))) __PYX_ERR(0, 70, __pyx_L1_error)
__Pyx_GIVEREF(__pyx_t_1);
__Pyx_GOTREF(__pyx_v_self->I_nmda_slow);
__Pyx_DECREF(((PyObject *)__pyx_v_self->I_nmda_slow));
__pyx_v_self->I_nmda_slow = ((PyArrayObject *)__pyx_t_1);
__pyx_t_1 = 0;
/* "splikes/connections/calcium.pyx":71
* self.I_nmda_fast=np.zeros(self.pre.N,dtype=np.float)
* self.I_nmda_slow=np.zeros(self.pre.N,dtype=np.float)
* self.omega=np.zeros( (self.post.N,self.pre.N),dtype=np.float) # <<<<<<<<<<<<<<
* self.v_backspike_slow=np.zeros(self.post.N,dtype=np.float)
* connection._reset(self)
*/
__Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_n_s_np); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 71, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_zeros); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 71, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__pyx_t_1 = __Pyx_PyInt_From_int(__pyx_v_self->__pyx_base.post->N); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 71, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_t_3 = __Pyx_PyInt_From_int(__pyx_v_self->__pyx_base.pre->N); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 71, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_4 = PyTuple_New(2); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 71, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__Pyx_GIVEREF(__pyx_t_1);
PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_1);
__Pyx_GIVEREF(__pyx_t_3);
PyTuple_SET_ITEM(__pyx_t_4, 1, __pyx_t_3);
__pyx_t_1 = 0;
__pyx_t_3 = 0;
__pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 71, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_GIVEREF(__pyx_t_4);
PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_4);
__pyx_t_4 = 0;
__pyx_t_4 = __Pyx_PyDict_NewPresized(1); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 71, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_n_s_np); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 71, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_float); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 71, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_5);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
if (PyDict_SetItem(__pyx_t_4, __pyx_n_s_dtype, __pyx_t_5) < 0) __PYX_ERR(0, 71, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
__pyx_t_5 = __Pyx_PyObject_Call(__pyx_t_2, __pyx_t_3, __pyx_t_4); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 71, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_5);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
if (!(likely(((__pyx_t_5) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_5, __pyx_ptype_5numpy_ndarray))))) __PYX_ERR(0, 71, __pyx_L1_error)
__Pyx_GIVEREF(__pyx_t_5);
__Pyx_GOTREF(__pyx_v_self->omega);
__Pyx_DECREF(((PyObject *)__pyx_v_self->omega));
__pyx_v_self->omega = ((PyArrayObject *)__pyx_t_5);
__pyx_t_5 = 0;
/* "splikes/connections/calcium.pyx":72
* self.I_nmda_slow=np.zeros(self.pre.N,dtype=np.float)
* self.omega=np.zeros( (self.post.N,self.pre.N),dtype=np.float)
* self.v_backspike_slow=np.zeros(self.post.N,dtype=np.float) # <<<<<<<<<<<<<<
* connection._reset(self)
*
*/
__Pyx_GetModuleGlobalName(__pyx_t_5, __pyx_n_s_np); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 72, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_5);
__pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_t_5, __pyx_n_s_zeros); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 72, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
__pyx_t_5 = __Pyx_PyInt_From_int(__pyx_v_self->__pyx_base.post->N); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 72, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_5);
__pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 72, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_GIVEREF(__pyx_t_5);
PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_5);
__pyx_t_5 = 0;
__pyx_t_5 = __Pyx_PyDict_NewPresized(1); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 72, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_5);
__Pyx_GetModuleGlobalName(__pyx_t_2, __pyx_n_s_np); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 72, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_float); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 72, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
if (PyDict_SetItem(__pyx_t_5, __pyx_n_s_dtype, __pyx_t_1) < 0) __PYX_ERR(0, 72, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__pyx_t_1 = __Pyx_PyObject_Call(__pyx_t_4, __pyx_t_3, __pyx_t_5); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 72, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
if (!(likely(((__pyx_t_1) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_1, __pyx_ptype_5numpy_ndarray))))) __PYX_ERR(0, 72, __pyx_L1_error)
__Pyx_GIVEREF(__pyx_t_1);
__Pyx_GOTREF(__pyx_v_self->v_backspike_slow);
__Pyx_DECREF(((PyObject *)__pyx_v_self->v_backspike_slow));
__pyx_v_self->v_backspike_slow = ((PyArrayObject *)__pyx_t_1);
__pyx_t_1 = 0;
/* "splikes/connections/calcium.pyx":73
* self.omega=np.zeros( (self.post.N,self.pre.N),dtype=np.float)
* self.v_backspike_slow=np.zeros(self.post.N,dtype=np.float)
* connection._reset(self) # <<<<<<<<<<<<<<
*
* def __init__(self,neuron pre,neuron post,initial_weight_range=None,state=None):
*/
__pyx_t_1 = __pyx_vtabptr_7splikes_7splikes_connection->_reset(((struct __pyx_obj_7splikes_7splikes_connection *)__pyx_v_self), 1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 73, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
/* "splikes/connections/calcium.pyx":60
* cdef public double g_t,mg2,mg1,v_reversal,tau_ca,alpha2,alpha1,backspike_amplitude,i_nmda_mu,peak_backspike_fast,peak_backspike_slow,_lambda,beta2,beta1,k_plus,g_nmda_o,tau_backspike_fast,tau_backspike_slow,eta_gamma0,i_nmda_s,tau_nmda_s,Vo,Vp,i_nmda_f,tau_nmda_f,k_minus
* cdef public np.ndarray B,I_nmda,h,Ca,v_total,eta,g_nmda,v_backspike_fast,I_nmda_fast,I_nmda_slow,omega,v_backspike_slow
* cpdef _reset(self): # <<<<<<<<<<<<<<
* self.B=np.zeros(self.post.N,dtype=np.float)
* self.I_nmda=np.zeros( (self.post.N,self.pre.N),dtype=np.float)
*/
/* function exit code */
__pyx_r = Py_None; __Pyx_INCREF(Py_None);
goto __pyx_L0;
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_XDECREF(__pyx_t_2);
__Pyx_XDECREF(__pyx_t_3);
__Pyx_XDECREF(__pyx_t_4);
__Pyx_XDECREF(__pyx_t_5);
__Pyx_AddTraceback("splikes.connections.calcium.calcium._reset", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = 0;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* Python wrapper */
static PyObject *__pyx_pw_7splikes_11connections_7calcium_7calcium_1_reset(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/
static PyObject *__pyx_pw_7splikes_11connections_7calcium_7calcium_1_reset(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("_reset (wrapper)", 0);
__pyx_r = __pyx_pf_7splikes_11connections_7calcium_7calcium__reset(((struct __pyx_obj_7splikes_11connections_7calcium_calcium *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_pf_7splikes_11connections_7calcium_7calcium__reset(struct __pyx_obj_7splikes_11connections_7calcium_calcium *__pyx_v_self) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("_reset", 0);
__Pyx_XDECREF(__pyx_r);
__pyx_t_1 = __pyx_f_7splikes_11connections_7calcium_7calcium__reset(__pyx_v_self, 1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 60, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_r = __pyx_t_1;
__pyx_t_1 = 0;
goto __pyx_L0;
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_AddTraceback("splikes.connections.calcium.calcium._reset", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "splikes/connections/calcium.pyx":75
* connection._reset(self)
*
* def __init__(self,neuron pre,neuron post,initial_weight_range=None,state=None): # <<<<<<<<<<<<<<
* connection.__init__(self,pre,post,initial_weight_range,state)
*
*/
/* Python wrapper */
static int __pyx_pw_7splikes_11connections_7calcium_7calcium_3__init__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
static int __pyx_pw_7splikes_11connections_7calcium_7calcium_3__init__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
struct __pyx_obj_7splikes_7splikes_neuron *__pyx_v_pre = 0;
struct __pyx_obj_7splikes_7splikes_neuron *__pyx_v_post = 0;
PyObject *__pyx_v_initial_weight_range = 0;
PyObject *__pyx_v_state = 0;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
int __pyx_r;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__init__ (wrapper)", 0);
{
static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_pre,&__pyx_n_s_post,&__pyx_n_s_initial_weight_range,&__pyx_n_s_state,0};
PyObject* values[4] = {0,0,0,0};
values[2] = ((PyObject *)Py_None);
values[3] = ((PyObject *)Py_None);
if (unlikely(__pyx_kwds)) {
Py_ssize_t kw_args;
const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args);
switch (pos_args) {
case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3);
CYTHON_FALLTHROUGH;
case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2);
CYTHON_FALLTHROUGH;
case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1);
CYTHON_FALLTHROUGH;
case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
CYTHON_FALLTHROUGH;
case 0: break;
default: goto __pyx_L5_argtuple_error;
}
kw_args = PyDict_Size(__pyx_kwds);
switch (pos_args) {
case 0:
if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_pre)) != 0)) kw_args--;
else goto __pyx_L5_argtuple_error;
CYTHON_FALLTHROUGH;
case 1:
if (likely((values[1] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_post)) != 0)) kw_args--;
else {
__Pyx_RaiseArgtupleInvalid("__init__", 0, 2, 4, 1); __PYX_ERR(0, 75, __pyx_L3_error)
}
CYTHON_FALLTHROUGH;
case 2:
if (kw_args > 0) {
PyObject* value = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_initial_weight_range);
if (value) { values[2] = value; kw_args--; }
}
CYTHON_FALLTHROUGH;
case 3:
if (kw_args > 0) {
PyObject* value = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_state);
if (value) { values[3] = value; kw_args--; }
}
}
if (unlikely(kw_args > 0)) {
if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "__init__") < 0)) __PYX_ERR(0, 75, __pyx_L3_error)
}
} else {
switch (PyTuple_GET_SIZE(__pyx_args)) {
case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3);
CYTHON_FALLTHROUGH;
case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2);
CYTHON_FALLTHROUGH;
case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1);
values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
break;
default: goto __pyx_L5_argtuple_error;
}
}
__pyx_v_pre = ((struct __pyx_obj_7splikes_7splikes_neuron *)values[0]);
__pyx_v_post = ((struct __pyx_obj_7splikes_7splikes_neuron *)values[1]);
__pyx_v_initial_weight_range = values[2];
__pyx_v_state = values[3];
}
goto __pyx_L4_argument_unpacking_done;
__pyx_L5_argtuple_error:;
__Pyx_RaiseArgtupleInvalid("__init__", 0, 2, 4, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 75, __pyx_L3_error)
__pyx_L3_error:;
__Pyx_AddTraceback("splikes.connections.calcium.calcium.__init__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__Pyx_RefNannyFinishContext();
return -1;
__pyx_L4_argument_unpacking_done:;
if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_pre), __pyx_ptype_7splikes_7splikes_neuron, 1, "pre", 0))) __PYX_ERR(0, 75, __pyx_L1_error)
if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_post), __pyx_ptype_7splikes_7splikes_neuron, 1, "post", 0))) __PYX_ERR(0, 75, __pyx_L1_error)
__pyx_r = __pyx_pf_7splikes_11connections_7calcium_7calcium_2__init__(((struct __pyx_obj_7splikes_11connections_7calcium_calcium *)__pyx_v_self), __pyx_v_pre, __pyx_v_post, __pyx_v_initial_weight_range, __pyx_v_state);
/* function exit code */
goto __pyx_L0;
__pyx_L1_error:;
__pyx_r = -1;
__pyx_L0:;
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static int __pyx_pf_7splikes_11connections_7calcium_7calcium_2__init__(struct __pyx_obj_7splikes_11connections_7calcium_calcium *__pyx_v_self, struct __pyx_obj_7splikes_7splikes_neuron *__pyx_v_pre, struct __pyx_obj_7splikes_7splikes_neuron *__pyx_v_post, PyObject *__pyx_v_initial_weight_range, PyObject *__pyx_v_state) {
int __pyx_r;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
PyObject *__pyx_t_2 = NULL;
PyObject *__pyx_t_3 = NULL;
int __pyx_t_4;
PyObject *__pyx_t_5 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("__init__", 0);
/* "splikes/connections/calcium.pyx":76
*
* def __init__(self,neuron pre,neuron post,initial_weight_range=None,state=None):
* connection.__init__(self,pre,post,initial_weight_range,state) # <<<<<<<<<<<<<<
*
* self.g_t=-0.0045
*/
__pyx_t_2 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_ptype_7splikes_7splikes_connection), __pyx_n_s_init); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 76, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__pyx_t_3 = NULL;
__pyx_t_4 = 0;
if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_2))) {
__pyx_t_3 = PyMethod_GET_SELF(__pyx_t_2);
if (likely(__pyx_t_3)) {
PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_2);
__Pyx_INCREF(__pyx_t_3);
__Pyx_INCREF(function);
__Pyx_DECREF_SET(__pyx_t_2, function);
__pyx_t_4 = 1;
}
}
#if CYTHON_FAST_PYCALL
if (PyFunction_Check(__pyx_t_2)) {
PyObject *__pyx_temp[6] = {__pyx_t_3, ((PyObject *)__pyx_v_self), ((PyObject *)__pyx_v_pre), ((PyObject *)__pyx_v_post), __pyx_v_initial_weight_range, __pyx_v_state};
__pyx_t_1 = __Pyx_PyFunction_FastCall(__pyx_t_2, __pyx_temp+1-__pyx_t_4, 5+__pyx_t_4); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 76, __pyx_L1_error)
__Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
__Pyx_GOTREF(__pyx_t_1);
} else
#endif
#if CYTHON_FAST_PYCCALL
if (__Pyx_PyFastCFunction_Check(__pyx_t_2)) {
PyObject *__pyx_temp[6] = {__pyx_t_3, ((PyObject *)__pyx_v_self), ((PyObject *)__pyx_v_pre), ((PyObject *)__pyx_v_post), __pyx_v_initial_weight_range, __pyx_v_state};
__pyx_t_1 = __Pyx_PyCFunction_FastCall(__pyx_t_2, __pyx_temp+1-__pyx_t_4, 5+__pyx_t_4); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 76, __pyx_L1_error)
__Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
__Pyx_GOTREF(__pyx_t_1);
} else
#endif
{
__pyx_t_5 = PyTuple_New(5+__pyx_t_4); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 76, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_5);
if (__pyx_t_3) {
__Pyx_GIVEREF(__pyx_t_3); PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_t_3); __pyx_t_3 = NULL;
}
__Pyx_INCREF(((PyObject *)__pyx_v_self));
__Pyx_GIVEREF(((PyObject *)__pyx_v_self));
PyTuple_SET_ITEM(__pyx_t_5, 0+__pyx_t_4, ((PyObject *)__pyx_v_self));
__Pyx_INCREF(((PyObject *)__pyx_v_pre));
__Pyx_GIVEREF(((PyObject *)__pyx_v_pre));
PyTuple_SET_ITEM(__pyx_t_5, 1+__pyx_t_4, ((PyObject *)__pyx_v_pre));
__Pyx_INCREF(((PyObject *)__pyx_v_post));
__Pyx_GIVEREF(((PyObject *)__pyx_v_post));
PyTuple_SET_ITEM(__pyx_t_5, 2+__pyx_t_4, ((PyObject *)__pyx_v_post));
__Pyx_INCREF(__pyx_v_initial_weight_range);
__Pyx_GIVEREF(__pyx_v_initial_weight_range);
PyTuple_SET_ITEM(__pyx_t_5, 3+__pyx_t_4, __pyx_v_initial_weight_range);
__Pyx_INCREF(__pyx_v_state);
__Pyx_GIVEREF(__pyx_v_state);
PyTuple_SET_ITEM(__pyx_t_5, 4+__pyx_t_4, __pyx_v_state);
__pyx_t_1 = __Pyx_PyObject_Call(__pyx_t_2, __pyx_t_5, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 76, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
}
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
/* "splikes/connections/calcium.pyx":78
* connection.__init__(self,pre,post,initial_weight_range,state)
*
* self.g_t=-0.0045 # <<<<<<<<<<<<<<
* self.mg2=3.57
* self.mg1=-0.062
*/
__pyx_v_self->g_t = -0.0045;
/* "splikes/connections/calcium.pyx":79
*
* self.g_t=-0.0045
* self.mg2=3.57 # <<<<<<<<<<<<<<
* self.mg1=-0.062
* self.v_reversal=130
*/
__pyx_v_self->mg2 = 3.57;
/* "splikes/connections/calcium.pyx":80
* self.g_t=-0.0045
* self.mg2=3.57
* self.mg1=-0.062 # <<<<<<<<<<<<<<
* self.v_reversal=130
* self.tau_ca=20
*/
__pyx_v_self->mg1 = -0.062;
/* "splikes/connections/calcium.pyx":81
* self.mg2=3.57
* self.mg1=-0.062
* self.v_reversal=130 # <<<<<<<<<<<<<<
* self.tau_ca=20
* self.alpha2=0.4
*/
__pyx_v_self->v_reversal = 130.0;
/* "splikes/connections/calcium.pyx":82
* self.mg1=-0.062
* self.v_reversal=130
* self.tau_ca=20 # <<<<<<<<<<<<<<
* self.alpha2=0.4
* self.alpha1=0.25
*/
__pyx_v_self->tau_ca = 20.0;
/* "splikes/connections/calcium.pyx":83
* self.v_reversal=130
* self.tau_ca=20
* self.alpha2=0.4 # <<<<<<<<<<<<<<
* self.alpha1=0.25
* self.backspike_amplitude=60
*/
__pyx_v_self->alpha2 = 0.4;
/* "splikes/connections/calcium.pyx":84
* self.tau_ca=20
* self.alpha2=0.4
* self.alpha1=0.25 # <<<<<<<<<<<<<<
* self.backspike_amplitude=60
* self.i_nmda_mu=0.7
*/
__pyx_v_self->alpha1 = 0.25;
/* "splikes/connections/calcium.pyx":85
* self.alpha2=0.4
* self.alpha1=0.25
* self.backspike_amplitude=60 # <<<<<<<<<<<<<<
* self.i_nmda_mu=0.7
* self.peak_backspike_fast=0.75
*/
__pyx_v_self->backspike_amplitude = 60.0;
/* "splikes/connections/calcium.pyx":86
* self.alpha1=0.25
* self.backspike_amplitude=60
* self.i_nmda_mu=0.7 # <<<<<<<<<<<<<<
* self.peak_backspike_fast=0.75
* self.peak_backspike_slow=0.25
*/
__pyx_v_self->i_nmda_mu = 0.7;
/* "splikes/connections/calcium.pyx":87
* self.backspike_amplitude=60
* self.i_nmda_mu=0.7
* self.peak_backspike_fast=0.75 # <<<<<<<<<<<<<<
* self.peak_backspike_slow=0.25
* self._lambda=0
*/
__pyx_v_self->peak_backspike_fast = 0.75;
/* "splikes/connections/calcium.pyx":88
* self.i_nmda_mu=0.7
* self.peak_backspike_fast=0.75
* self.peak_backspike_slow=0.25 # <<<<<<<<<<<<<<
* self._lambda=0
* self.beta2=20
*/
__pyx_v_self->peak_backspike_slow = 0.25;
/* "splikes/connections/calcium.pyx":89
* self.peak_backspike_fast=0.75
* self.peak_backspike_slow=0.25
* self._lambda=0 # <<<<<<<<<<<<<<
* self.beta2=20
* self.beta1=60
*/
__pyx_v_self->_lambda = 0.0;
/* "splikes/connections/calcium.pyx":90
* self.peak_backspike_slow=0.25
* self._lambda=0
* self.beta2=20 # <<<<<<<<<<<<<<
* self.beta1=60
* self.k_plus=0
*/
__pyx_v_self->beta2 = 20.0;
/* "splikes/connections/calcium.pyx":91
* self._lambda=0
* self.beta2=20
* self.beta1=60 # <<<<<<<<<<<<<<
* self.k_plus=0
* self.g_nmda_o=-0.0025
*/
__pyx_v_self->beta1 = 60.0;
/* "splikes/connections/calcium.pyx":92
* self.beta2=20
* self.beta1=60
* self.k_plus=0 # <<<<<<<<<<<<<<
* self.g_nmda_o=-0.0025
* self.tau_backspike_fast=3
*/
__pyx_v_self->k_plus = 0.0;
/* "splikes/connections/calcium.pyx":93
* self.beta1=60
* self.k_plus=0
* self.g_nmda_o=-0.0025 # <<<<<<<<<<<<<<
* self.tau_backspike_fast=3
* self.tau_backspike_slow=30
*/
__pyx_v_self->g_nmda_o = -0.0025;
/* "splikes/connections/calcium.pyx":94
* self.k_plus=0
* self.g_nmda_o=-0.0025
* self.tau_backspike_fast=3 # <<<<<<<<<<<<<<
* self.tau_backspike_slow=30
* self.eta_gamma0=0.02
*/
__pyx_v_self->tau_backspike_fast = 3.0;
/* "splikes/connections/calcium.pyx":95
* self.g_nmda_o=-0.0025
* self.tau_backspike_fast=3
* self.tau_backspike_slow=30 # <<<<<<<<<<<<<<
* self.eta_gamma0=0.02
* self.i_nmda_s=0.25
*/
__pyx_v_self->tau_backspike_slow = 30.0;
/* "splikes/connections/calcium.pyx":96
* self.tau_backspike_fast=3
* self.tau_backspike_slow=30
* self.eta_gamma0=0.02 # <<<<<<<<<<<<<<
* self.i_nmda_s=0.25
* self.tau_nmda_s=200
*/
__pyx_v_self->eta_gamma0 = 0.02;
/* "splikes/connections/calcium.pyx":97
* self.tau_backspike_slow=30
* self.eta_gamma0=0.02
* self.i_nmda_s=0.25 # <<<<<<<<<<<<<<
* self.tau_nmda_s=200
* self.Vo=-65
*/
__pyx_v_self->i_nmda_s = 0.25;
/* "splikes/connections/calcium.pyx":98
* self.eta_gamma0=0.02
* self.i_nmda_s=0.25
* self.tau_nmda_s=200 # <<<<<<<<<<<<<<
* self.Vo=-65
* self.Vp=2
*/
__pyx_v_self->tau_nmda_s = 200.0;
/* "splikes/connections/calcium.pyx":99
* self.i_nmda_s=0.25
* self.tau_nmda_s=200
* self.Vo=-65 # <<<<<<<<<<<<<<
* self.Vp=2
* self.i_nmda_f=0.75
*/
__pyx_v_self->Vo = -65.0;
/* "splikes/connections/calcium.pyx":100
* self.tau_nmda_s=200
* self.Vo=-65
* self.Vp=2 # <<<<<<<<<<<<<<
* self.i_nmda_f=0.75
* self.tau_nmda_f=50
*/
__pyx_v_self->Vp = 2.0;
/* "splikes/connections/calcium.pyx":101
* self.Vo=-65
* self.Vp=2
* self.i_nmda_f=0.75 # <<<<<<<<<<<<<<
* self.tau_nmda_f=50
* self.k_minus=0
*/
__pyx_v_self->i_nmda_f = 0.75;
/* "splikes/connections/calcium.pyx":102
* self.Vp=2
* self.i_nmda_f=0.75
* self.tau_nmda_f=50 # <<<<<<<<<<<<<<
* self.k_minus=0
* self._reset()
*/
__pyx_v_self->tau_nmda_f = 50.0;
/* "splikes/connections/calcium.pyx":103
* self.i_nmda_f=0.75
* self.tau_nmda_f=50
* self.k_minus=0 # <<<<<<<<<<<<<<
* self._reset()
*
*/
__pyx_v_self->k_minus = 0.0;
/* "splikes/connections/calcium.pyx":104
* self.tau_nmda_f=50
* self.k_minus=0
* self._reset() # <<<<<<<<<<<<<<
*
* @cython.cdivision(True)
*/
__pyx_t_1 = ((struct __pyx_vtabstruct_7splikes_11connections_7calcium_calcium *)__pyx_v_self->__pyx_base.__pyx_vtab)->__pyx_base._reset(((struct __pyx_obj_7splikes_7splikes_connection *)__pyx_v_self), 0); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 104, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
/* "splikes/connections/calcium.pyx":75
* connection._reset(self)
*
* def __init__(self,neuron pre,neuron post,initial_weight_range=None,state=None): # <<<<<<<<<<<<<<
* connection.__init__(self,pre,post,initial_weight_range,state)
*
*/
/* function exit code */
__pyx_r = 0;
goto __pyx_L0;
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_XDECREF(__pyx_t_2);
__Pyx_XDECREF(__pyx_t_3);
__Pyx_XDECREF(__pyx_t_5);
__Pyx_AddTraceback("splikes.connections.calcium.calcium.__init__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = -1;
__pyx_L0:;
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "splikes/connections/calcium.pyx":108
* @cython.cdivision(True)
* @cython.boundscheck(False) # turn of bounds-checking for entire function
* cpdef update(self,double t,simulation sim): # <<<<<<<<<<<<<<
* cdef int __i,__j
*
*/
static PyObject *__pyx_pw_7splikes_11connections_7calcium_7calcium_5update(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
static PyObject *__pyx_f_7splikes_11connections_7calcium_7calcium_update(struct __pyx_obj_7splikes_11connections_7calcium_calcium *__pyx_v_self, CYTHON_UNUSED double __pyx_v_t, struct __pyx_obj_7splikes_7splikes_simulation *__pyx_v_sim, int __pyx_skip_dispatch) {
int __pyx_v___i;
int __pyx_v___j;
double *__pyx_v_B;
double *__pyx_v_I_nmda;
double *__pyx_v_h;
double *__pyx_v_Ca;
double *__pyx_v_v_total;
double *__pyx_v_eta;
double *__pyx_v_g_nmda;
double *__pyx_v_v_backspike_fast;
double *__pyx_v_I_nmda_fast;
double *__pyx_v_I_nmda_slow;
double *__pyx_v_omega;
double *__pyx_v_v_backspike_slow;
double __pyx_v_g_t;
double __pyx_v_mg2;
double __pyx_v_mg1;
double __pyx_v_v_reversal;
double __pyx_v_tau_ca;
double __pyx_v_alpha2;
double __pyx_v_alpha1;
double __pyx_v_backspike_amplitude;
double __pyx_v_i_nmda_mu;
double __pyx_v_peak_backspike_fast;
double __pyx_v_peak_backspike_slow;
double __pyx_v__lambda;
double __pyx_v_beta2;
double __pyx_v_beta1;
double __pyx_v_k_plus;
CYTHON_UNUSED double __pyx_v_g_nmda_o;
double __pyx_v_tau_backspike_fast;
double __pyx_v_tau_backspike_slow;
double __pyx_v_eta_gamma0;
double __pyx_v_i_nmda_s;
double __pyx_v_tau_nmda_s;
double __pyx_v_Vo;
double __pyx_v_Vp;
double __pyx_v_i_nmda_f;
double __pyx_v_tau_nmda_f;
double __pyx_v_k_minus;
double *__pyx_v_W;
CYTHON_UNUSED double *__pyx_v_post_rate;
CYTHON_UNUSED double *__pyx_v_pre_rate;
int *__pyx_v_pre;
int *__pyx_v_post;
int __pyx_v___wi;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
PyObject *__pyx_t_2 = NULL;
PyObject *__pyx_t_3 = NULL;
PyObject *__pyx_t_4 = NULL;
PyObject *__pyx_t_5 = NULL;
int __pyx_t_6;
PyObject *__pyx_t_7 = NULL;
double __pyx_t_8;
double *__pyx_t_9;
int __pyx_t_10;
int __pyx_t_11;
int __pyx_t_12;
int __pyx_t_13;
int __pyx_t_14;
int __pyx_t_15;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("update", 0);
/* Check if called by wrapper */
if (unlikely(__pyx_skip_dispatch)) ;
/* Check if overridden in Python */
else if (unlikely((Py_TYPE(((PyObject *)__pyx_v_self))->tp_dictoffset != 0) || (Py_TYPE(((PyObject *)__pyx_v_self))->tp_flags & (Py_TPFLAGS_IS_ABSTRACT | Py_TPFLAGS_HEAPTYPE)))) {
#if CYTHON_USE_DICT_VERSIONS && CYTHON_USE_PYTYPE_LOOKUP && CYTHON_USE_TYPE_SLOTS
static PY_UINT64_T __pyx_tp_dict_version = __PYX_DICT_VERSION_INIT, __pyx_obj_dict_version = __PYX_DICT_VERSION_INIT;
if (unlikely(!__Pyx_object_dict_version_matches(((PyObject *)__pyx_v_self), __pyx_tp_dict_version, __pyx_obj_dict_version))) {
PY_UINT64_T __pyx_type_dict_guard = __Pyx_get_tp_dict_version(((PyObject *)__pyx_v_self));
#endif
__pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_update); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 108, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
if (!PyCFunction_Check(__pyx_t_1) || (PyCFunction_GET_FUNCTION(__pyx_t_1) != (PyCFunction)(void*)__pyx_pw_7splikes_11connections_7calcium_7calcium_5update)) {
__Pyx_XDECREF(__pyx_r);
__pyx_t_3 = PyFloat_FromDouble(__pyx_v_t); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 108, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_INCREF(__pyx_t_1);
__pyx_t_4 = __pyx_t_1; __pyx_t_5 = NULL;
__pyx_t_6 = 0;
if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_4))) {
__pyx_t_5 = PyMethod_GET_SELF(__pyx_t_4);
if (likely(__pyx_t_5)) {
PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_4);
__Pyx_INCREF(__pyx_t_5);
__Pyx_INCREF(function);
__Pyx_DECREF_SET(__pyx_t_4, function);
__pyx_t_6 = 1;
}
}
#if CYTHON_FAST_PYCALL
if (PyFunction_Check(__pyx_t_4)) {
PyObject *__pyx_temp[3] = {__pyx_t_5, __pyx_t_3, ((PyObject *)__pyx_v_sim)};
__pyx_t_2 = __Pyx_PyFunction_FastCall(__pyx_t_4, __pyx_temp+1-__pyx_t_6, 2+__pyx_t_6); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 108, __pyx_L1_error)
__Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0;
__Pyx_GOTREF(__pyx_t_2);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
} else
#endif
#if CYTHON_FAST_PYCCALL
if (__Pyx_PyFastCFunction_Check(__pyx_t_4)) {
PyObject *__pyx_temp[3] = {__pyx_t_5, __pyx_t_3, ((PyObject *)__pyx_v_sim)};
__pyx_t_2 = __Pyx_PyCFunction_FastCall(__pyx_t_4, __pyx_temp+1-__pyx_t_6, 2+__pyx_t_6); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 108, __pyx_L1_error)
__Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0;
__Pyx_GOTREF(__pyx_t_2);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
} else
#endif
{
__pyx_t_7 = PyTuple_New(2+__pyx_t_6); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 108, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_7);
if (__pyx_t_5) {
__Pyx_GIVEREF(__pyx_t_5); PyTuple_SET_ITEM(__pyx_t_7, 0, __pyx_t_5); __pyx_t_5 = NULL;
}
__Pyx_GIVEREF(__pyx_t_3);
PyTuple_SET_ITEM(__pyx_t_7, 0+__pyx_t_6, __pyx_t_3);
__Pyx_INCREF(((PyObject *)__pyx_v_sim));
__Pyx_GIVEREF(((PyObject *)__pyx_v_sim));
PyTuple_SET_ITEM(__pyx_t_7, 1+__pyx_t_6, ((PyObject *)__pyx_v_sim));
__pyx_t_3 = 0;
__pyx_t_2 = __Pyx_PyObject_Call(__pyx_t_4, __pyx_t_7, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 108, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
}
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
__pyx_r = __pyx_t_2;
__pyx_t_2 = 0;
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
goto __pyx_L0;
}
#if CYTHON_USE_DICT_VERSIONS && CYTHON_USE_PYTYPE_LOOKUP && CYTHON_USE_TYPE_SLOTS
__pyx_tp_dict_version = __Pyx_get_tp_dict_version(((PyObject *)__pyx_v_self));
__pyx_obj_dict_version = __Pyx_get_object_dict_version(((PyObject *)__pyx_v_self));
if (unlikely(__pyx_type_dict_guard != __pyx_tp_dict_version)) {
__pyx_tp_dict_version = __pyx_obj_dict_version = __PYX_DICT_VERSION_INIT;
}
#endif
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
#if CYTHON_USE_DICT_VERSIONS && CYTHON_USE_PYTYPE_LOOKUP && CYTHON_USE_TYPE_SLOTS
}
#endif
}
/* "splikes/connections/calcium.pyx":111
* cdef int __i,__j
*
* cdef double *B=<double *>self.B.data # <<<<<<<<<<<<<<
* cdef double *I_nmda=<double *>self.I_nmda.data
* cdef double *h=<double *>self.h.data
*/
__pyx_v_B = ((double *)__pyx_v_self->B->data);
/* "splikes/connections/calcium.pyx":112
*
* cdef double *B=<double *>self.B.data
* cdef double *I_nmda=<double *>self.I_nmda.data # <<<<<<<<<<<<<<
* cdef double *h=<double *>self.h.data
* cdef double *Ca=<double *>self.Ca.data
*/
__pyx_v_I_nmda = ((double *)__pyx_v_self->I_nmda->data);
/* "splikes/connections/calcium.pyx":113
* cdef double *B=<double *>self.B.data
* cdef double *I_nmda=<double *>self.I_nmda.data
* cdef double *h=<double *>self.h.data # <<<<<<<<<<<<<<
* cdef double *Ca=<double *>self.Ca.data
* cdef double *v_total=<double *>self.v_total.data
*/
__pyx_v_h = ((double *)__pyx_v_self->h->data);
/* "splikes/connections/calcium.pyx":114
* cdef double *I_nmda=<double *>self.I_nmda.data
* cdef double *h=<double *>self.h.data
* cdef double *Ca=<double *>self.Ca.data # <<<<<<<<<<<<<<
* cdef double *v_total=<double *>self.v_total.data
* cdef double *eta=<double *>self.eta.data
*/
__pyx_v_Ca = ((double *)__pyx_v_self->Ca->data);
/* "splikes/connections/calcium.pyx":115
* cdef double *h=<double *>self.h.data
* cdef double *Ca=<double *>self.Ca.data
* cdef double *v_total=<double *>self.v_total.data # <<<<<<<<<<<<<<
* cdef double *eta=<double *>self.eta.data
* cdef double *g_nmda=<double *>self.g_nmda.data
*/
__pyx_v_v_total = ((double *)__pyx_v_self->v_total->data);
/* "splikes/connections/calcium.pyx":116
* cdef double *Ca=<double *>self.Ca.data
* cdef double *v_total=<double *>self.v_total.data
* cdef double *eta=<double *>self.eta.data # <<<<<<<<<<<<<<
* cdef double *g_nmda=<double *>self.g_nmda.data
* cdef double *v_backspike_fast=<double *>self.v_backspike_fast.data
*/
__pyx_v_eta = ((double *)__pyx_v_self->eta->data);
/* "splikes/connections/calcium.pyx":117
* cdef double *v_total=<double *>self.v_total.data
* cdef double *eta=<double *>self.eta.data
* cdef double *g_nmda=<double *>self.g_nmda.data # <<<<<<<<<<<<<<
* cdef double *v_backspike_fast=<double *>self.v_backspike_fast.data
* cdef double *I_nmda_fast=<double *>self.I_nmda_fast.data
*/
__pyx_v_g_nmda = ((double *)__pyx_v_self->g_nmda->data);
/* "splikes/connections/calcium.pyx":118
* cdef double *eta=<double *>self.eta.data
* cdef double *g_nmda=<double *>self.g_nmda.data
* cdef double *v_backspike_fast=<double *>self.v_backspike_fast.data # <<<<<<<<<<<<<<
* cdef double *I_nmda_fast=<double *>self.I_nmda_fast.data
* cdef double *I_nmda_slow=<double *>self.I_nmda_slow.data
*/
__pyx_v_v_backspike_fast = ((double *)__pyx_v_self->v_backspike_fast->data);
/* "splikes/connections/calcium.pyx":119
* cdef double *g_nmda=<double *>self.g_nmda.data
* cdef double *v_backspike_fast=<double *>self.v_backspike_fast.data
* cdef double *I_nmda_fast=<double *>self.I_nmda_fast.data # <<<<<<<<<<<<<<
* cdef double *I_nmda_slow=<double *>self.I_nmda_slow.data
* cdef double *omega=<double *>self.omega.data
*/
__pyx_v_I_nmda_fast = ((double *)__pyx_v_self->I_nmda_fast->data);
/* "splikes/connections/calcium.pyx":120
* cdef double *v_backspike_fast=<double *>self.v_backspike_fast.data
* cdef double *I_nmda_fast=<double *>self.I_nmda_fast.data
* cdef double *I_nmda_slow=<double *>self.I_nmda_slow.data # <<<<<<<<<<<<<<
* cdef double *omega=<double *>self.omega.data
* cdef double *v_backspike_slow=<double *>self.v_backspike_slow.data
*/
__pyx_v_I_nmda_slow = ((double *)__pyx_v_self->I_nmda_slow->data);
/* "splikes/connections/calcium.pyx":121
* cdef double *I_nmda_fast=<double *>self.I_nmda_fast.data
* cdef double *I_nmda_slow=<double *>self.I_nmda_slow.data
* cdef double *omega=<double *>self.omega.data # <<<<<<<<<<<<<<
* cdef double *v_backspike_slow=<double *>self.v_backspike_slow.data
* cdef double g_t=self.g_t
*/
__pyx_v_omega = ((double *)__pyx_v_self->omega->data);
/* "splikes/connections/calcium.pyx":122
* cdef double *I_nmda_slow=<double *>self.I_nmda_slow.data
* cdef double *omega=<double *>self.omega.data
* cdef double *v_backspike_slow=<double *>self.v_backspike_slow.data # <<<<<<<<<<<<<<
* cdef double g_t=self.g_t
* cdef double mg2=self.mg2
*/
__pyx_v_v_backspike_slow = ((double *)__pyx_v_self->v_backspike_slow->data);
/* "splikes/connections/calcium.pyx":123
* cdef double *omega=<double *>self.omega.data
* cdef double *v_backspike_slow=<double *>self.v_backspike_slow.data
* cdef double g_t=self.g_t # <<<<<<<<<<<<<<
* cdef double mg2=self.mg2
* cdef double mg1=self.mg1
*/
__pyx_t_8 = __pyx_v_self->g_t;
__pyx_v_g_t = __pyx_t_8;
/* "splikes/connections/calcium.pyx":124
* cdef double *v_backspike_slow=<double *>self.v_backspike_slow.data
* cdef double g_t=self.g_t
* cdef double mg2=self.mg2 # <<<<<<<<<<<<<<
* cdef double mg1=self.mg1
* cdef double v_reversal=self.v_reversal
*/
__pyx_t_8 = __pyx_v_self->mg2;
__pyx_v_mg2 = __pyx_t_8;
/* "splikes/connections/calcium.pyx":125
* cdef double g_t=self.g_t
* cdef double mg2=self.mg2
* cdef double mg1=self.mg1 # <<<<<<<<<<<<<<
* cdef double v_reversal=self.v_reversal
* cdef double tau_ca=self.tau_ca
*/
__pyx_t_8 = __pyx_v_self->mg1;
__pyx_v_mg1 = __pyx_t_8;
/* "splikes/connections/calcium.pyx":126
* cdef double mg2=self.mg2
* cdef double mg1=self.mg1
* cdef double v_reversal=self.v_reversal # <<<<<<<<<<<<<<
* cdef double tau_ca=self.tau_ca
* cdef double alpha2=self.alpha2
*/
__pyx_t_8 = __pyx_v_self->v_reversal;
__pyx_v_v_reversal = __pyx_t_8;
/* "splikes/connections/calcium.pyx":127
* cdef double mg1=self.mg1
* cdef double v_reversal=self.v_reversal
* cdef double tau_ca=self.tau_ca # <<<<<<<<<<<<<<
* cdef double alpha2=self.alpha2
* cdef double alpha1=self.alpha1
*/
__pyx_t_8 = __pyx_v_self->tau_ca;
__pyx_v_tau_ca = __pyx_t_8;
/* "splikes/connections/calcium.pyx":128
* cdef double v_reversal=self.v_reversal
* cdef double tau_ca=self.tau_ca
* cdef double alpha2=self.alpha2 # <<<<<<<<<<<<<<
* cdef double alpha1=self.alpha1
* cdef double backspike_amplitude=self.backspike_amplitude
*/
__pyx_t_8 = __pyx_v_self->alpha2;
__pyx_v_alpha2 = __pyx_t_8;
/* "splikes/connections/calcium.pyx":129
* cdef double tau_ca=self.tau_ca
* cdef double alpha2=self.alpha2
* cdef double alpha1=self.alpha1 # <<<<<<<<<<<<<<
* cdef double backspike_amplitude=self.backspike_amplitude
* cdef double i_nmda_mu=self.i_nmda_mu
*/
__pyx_t_8 = __pyx_v_self->alpha1;
__pyx_v_alpha1 = __pyx_t_8;
/* "splikes/connections/calcium.pyx":130
* cdef double alpha2=self.alpha2
* cdef double alpha1=self.alpha1
* cdef double backspike_amplitude=self.backspike_amplitude # <<<<<<<<<<<<<<
* cdef double i_nmda_mu=self.i_nmda_mu
* cdef double peak_backspike_fast=self.peak_backspike_fast
*/
__pyx_t_8 = __pyx_v_self->backspike_amplitude;
__pyx_v_backspike_amplitude = __pyx_t_8;
/* "splikes/connections/calcium.pyx":131
* cdef double alpha1=self.alpha1
* cdef double backspike_amplitude=self.backspike_amplitude
* cdef double i_nmda_mu=self.i_nmda_mu # <<<<<<<<<<<<<<
* cdef double peak_backspike_fast=self.peak_backspike_fast
* cdef double peak_backspike_slow=self.peak_backspike_slow
*/
__pyx_t_8 = __pyx_v_self->i_nmda_mu;
__pyx_v_i_nmda_mu = __pyx_t_8;
/* "splikes/connections/calcium.pyx":132
* cdef double backspike_amplitude=self.backspike_amplitude
* cdef double i_nmda_mu=self.i_nmda_mu
* cdef double peak_backspike_fast=self.peak_backspike_fast # <<<<<<<<<<<<<<
* cdef double peak_backspike_slow=self.peak_backspike_slow
* cdef double _lambda=self._lambda
*/
__pyx_t_8 = __pyx_v_self->peak_backspike_fast;
__pyx_v_peak_backspike_fast = __pyx_t_8;
/* "splikes/connections/calcium.pyx":133
* cdef double i_nmda_mu=self.i_nmda_mu
* cdef double peak_backspike_fast=self.peak_backspike_fast
* cdef double peak_backspike_slow=self.peak_backspike_slow # <<<<<<<<<<<<<<
* cdef double _lambda=self._lambda
* cdef double beta2=self.beta2
*/
__pyx_t_8 = __pyx_v_self->peak_backspike_slow;
__pyx_v_peak_backspike_slow = __pyx_t_8;
/* "splikes/connections/calcium.pyx":134
* cdef double peak_backspike_fast=self.peak_backspike_fast
* cdef double peak_backspike_slow=self.peak_backspike_slow
* cdef double _lambda=self._lambda # <<<<<<<<<<<<<<
* cdef double beta2=self.beta2
* cdef double beta1=self.beta1
*/
__pyx_t_8 = __pyx_v_self->_lambda;
__pyx_v__lambda = __pyx_t_8;
/* "splikes/connections/calcium.pyx":135
* cdef double peak_backspike_slow=self.peak_backspike_slow
* cdef double _lambda=self._lambda
* cdef double beta2=self.beta2 # <<<<<<<<<<<<<<
* cdef double beta1=self.beta1
* cdef double k_plus=self.k_plus
*/
__pyx_t_8 = __pyx_v_self->beta2;
__pyx_v_beta2 = __pyx_t_8;
/* "splikes/connections/calcium.pyx":136
* cdef double _lambda=self._lambda
* cdef double beta2=self.beta2
* cdef double beta1=self.beta1 # <<<<<<<<<<<<<<
* cdef double k_plus=self.k_plus
* cdef double g_nmda_o=self.g_nmda_o
*/
__pyx_t_8 = __pyx_v_self->beta1;
__pyx_v_beta1 = __pyx_t_8;
/* "splikes/connections/calcium.pyx":137
* cdef double beta2=self.beta2
* cdef double beta1=self.beta1
* cdef double k_plus=self.k_plus # <<<<<<<<<<<<<<
* cdef double g_nmda_o=self.g_nmda_o
* cdef double tau_backspike_fast=self.tau_backspike_fast
*/
__pyx_t_8 = __pyx_v_self->k_plus;
__pyx_v_k_plus = __pyx_t_8;
/* "splikes/connections/calcium.pyx":138
* cdef double beta1=self.beta1
* cdef double k_plus=self.k_plus
* cdef double g_nmda_o=self.g_nmda_o # <<<<<<<<<<<<<<
* cdef double tau_backspike_fast=self.tau_backspike_fast
* cdef double tau_backspike_slow=self.tau_backspike_slow
*/
__pyx_t_8 = __pyx_v_self->g_nmda_o;
__pyx_v_g_nmda_o = __pyx_t_8;
/* "splikes/connections/calcium.pyx":139
* cdef double k_plus=self.k_plus
* cdef double g_nmda_o=self.g_nmda_o
* cdef double tau_backspike_fast=self.tau_backspike_fast # <<<<<<<<<<<<<<
* cdef double tau_backspike_slow=self.tau_backspike_slow
* cdef double eta_gamma0=self.eta_gamma0
*/
__pyx_t_8 = __pyx_v_self->tau_backspike_fast;
__pyx_v_tau_backspike_fast = __pyx_t_8;
/* "splikes/connections/calcium.pyx":140
* cdef double g_nmda_o=self.g_nmda_o
* cdef double tau_backspike_fast=self.tau_backspike_fast
* cdef double tau_backspike_slow=self.tau_backspike_slow # <<<<<<<<<<<<<<
* cdef double eta_gamma0=self.eta_gamma0
* cdef double i_nmda_s=self.i_nmda_s
*/
__pyx_t_8 = __pyx_v_self->tau_backspike_slow;
__pyx_v_tau_backspike_slow = __pyx_t_8;
/* "splikes/connections/calcium.pyx":141
* cdef double tau_backspike_fast=self.tau_backspike_fast
* cdef double tau_backspike_slow=self.tau_backspike_slow
* cdef double eta_gamma0=self.eta_gamma0 # <<<<<<<<<<<<<<
* cdef double i_nmda_s=self.i_nmda_s
* cdef double tau_nmda_s=self.tau_nmda_s
*/
__pyx_t_8 = __pyx_v_self->eta_gamma0;
__pyx_v_eta_gamma0 = __pyx_t_8;
/* "splikes/connections/calcium.pyx":142
* cdef double tau_backspike_slow=self.tau_backspike_slow
* cdef double eta_gamma0=self.eta_gamma0
* cdef double i_nmda_s=self.i_nmda_s # <<<<<<<<<<<<<<
* cdef double tau_nmda_s=self.tau_nmda_s
* cdef double Vo=self.Vo
*/
__pyx_t_8 = __pyx_v_self->i_nmda_s;
__pyx_v_i_nmda_s = __pyx_t_8;
/* "splikes/connections/calcium.pyx":143
* cdef double eta_gamma0=self.eta_gamma0
* cdef double i_nmda_s=self.i_nmda_s
* cdef double tau_nmda_s=self.tau_nmda_s # <<<<<<<<<<<<<<
* cdef double Vo=self.Vo
* cdef double Vp=self.Vp
*/
__pyx_t_8 = __pyx_v_self->tau_nmda_s;
__pyx_v_tau_nmda_s = __pyx_t_8;
/* "splikes/connections/calcium.pyx":144
* cdef double i_nmda_s=self.i_nmda_s
* cdef double tau_nmda_s=self.tau_nmda_s
* cdef double Vo=self.Vo # <<<<<<<<<<<<<<
* cdef double Vp=self.Vp
* cdef double i_nmda_f=self.i_nmda_f
*/
__pyx_t_8 = __pyx_v_self->Vo;
__pyx_v_Vo = __pyx_t_8;
/* "splikes/connections/calcium.pyx":145
* cdef double tau_nmda_s=self.tau_nmda_s
* cdef double Vo=self.Vo
* cdef double Vp=self.Vp # <<<<<<<<<<<<<<
* cdef double i_nmda_f=self.i_nmda_f
* cdef double tau_nmda_f=self.tau_nmda_f
*/
__pyx_t_8 = __pyx_v_self->Vp;
__pyx_v_Vp = __pyx_t_8;
/* "splikes/connections/calcium.pyx":146
* cdef double Vo=self.Vo
* cdef double Vp=self.Vp
* cdef double i_nmda_f=self.i_nmda_f # <<<<<<<<<<<<<<
* cdef double tau_nmda_f=self.tau_nmda_f
* cdef double k_minus=self.k_minus
*/
__pyx_t_8 = __pyx_v_self->i_nmda_f;
__pyx_v_i_nmda_f = __pyx_t_8;
/* "splikes/connections/calcium.pyx":147
* cdef double Vp=self.Vp
* cdef double i_nmda_f=self.i_nmda_f
* cdef double tau_nmda_f=self.tau_nmda_f # <<<<<<<<<<<<<<
* cdef double k_minus=self.k_minus
*
*/
__pyx_t_8 = __pyx_v_self->tau_nmda_f;
__pyx_v_tau_nmda_f = __pyx_t_8;
/* "splikes/connections/calcium.pyx":148
* cdef double i_nmda_f=self.i_nmda_f
* cdef double tau_nmda_f=self.tau_nmda_f
* cdef double k_minus=self.k_minus # <<<<<<<<<<<<<<
*
* cdef double *W=self.W
*/
__pyx_t_8 = __pyx_v_self->k_minus;
__pyx_v_k_minus = __pyx_t_8;
/* "splikes/connections/calcium.pyx":150
* cdef double k_minus=self.k_minus
*
* cdef double *W=self.W # <<<<<<<<<<<<<<
* cdef double *post_rate=<double *>self.post.rate.data
* cdef double *pre_rate=<double *>self.pre.rate.data
*/
__pyx_t_9 = __pyx_v_self->__pyx_base.W;
__pyx_v_W = __pyx_t_9;
/* "splikes/connections/calcium.pyx":151
*
* cdef double *W=self.W
* cdef double *post_rate=<double *>self.post.rate.data # <<<<<<<<<<<<<<
* cdef double *pre_rate=<double *>self.pre.rate.data
* cdef int *pre,*post # spikes for pre and post
*/
__pyx_v_post_rate = ((double *)__pyx_v_self->__pyx_base.post->rate->data);
/* "splikes/connections/calcium.pyx":152
* cdef double *W=self.W
* cdef double *post_rate=<double *>self.post.rate.data
* cdef double *pre_rate=<double *>self.pre.rate.data # <<<<<<<<<<<<<<
* cdef int *pre,*post # spikes for pre and post
* cdef int __wi
*/
__pyx_v_pre_rate = ((double *)__pyx_v_self->__pyx_base.pre->rate->data);
/* "splikes/connections/calcium.pyx":157
*
*
* pre=<int *>self.pre.spiking.data # <<<<<<<<<<<<<<
* post=<int *>self.post.spiking.data
*
*/
__pyx_v_pre = ((int *)__pyx_v_self->__pyx_base.pre->spiking->data);
/* "splikes/connections/calcium.pyx":158
*
* pre=<int *>self.pre.spiking.data
* post=<int *>self.post.spiking.data # <<<<<<<<<<<<<<
*
* for __i in range(self.post.N):
*/
__pyx_v_post = ((int *)__pyx_v_self->__pyx_base.post->spiking->data);
/* "splikes/connections/calcium.pyx":160
* post=<int *>self.post.spiking.data
*
* for __i in range(self.post.N): # <<<<<<<<<<<<<<
* v_backspike_slow[__i]+=sim.dt*(peak_backspike_slow*post[__i]/sim.dt-v_backspike_slow[__i]/tau_backspike_slow)
* v_backspike_fast[__i]+=sim.dt*(peak_backspike_fast*post[__i]/sim.dt-v_backspike_fast[__i]/tau_backspike_fast)
*/
__pyx_t_6 = __pyx_v_self->__pyx_base.post->N;
__pyx_t_10 = __pyx_t_6;
for (__pyx_t_11 = 0; __pyx_t_11 < __pyx_t_10; __pyx_t_11+=1) {
__pyx_v___i = __pyx_t_11;
/* "splikes/connections/calcium.pyx":161
*
* for __i in range(self.post.N):
* v_backspike_slow[__i]+=sim.dt*(peak_backspike_slow*post[__i]/sim.dt-v_backspike_slow[__i]/tau_backspike_slow) # <<<<<<<<<<<<<<
* v_backspike_fast[__i]+=sim.dt*(peak_backspike_fast*post[__i]/sim.dt-v_backspike_fast[__i]/tau_backspike_fast)
* v_total[__i]=(Vo+backspike_amplitude*(v_backspike_fast[__i]+v_backspike_slow[__i]))
*/
__pyx_t_12 = __pyx_v___i;
(__pyx_v_v_backspike_slow[__pyx_t_12]) = ((__pyx_v_v_backspike_slow[__pyx_t_12]) + (__pyx_v_sim->dt * (((__pyx_v_peak_backspike_slow * (__pyx_v_post[__pyx_v___i])) / __pyx_v_sim->dt) - ((__pyx_v_v_backspike_slow[__pyx_v___i]) / __pyx_v_tau_backspike_slow))));
/* "splikes/connections/calcium.pyx":162
* for __i in range(self.post.N):
* v_backspike_slow[__i]+=sim.dt*(peak_backspike_slow*post[__i]/sim.dt-v_backspike_slow[__i]/tau_backspike_slow)
* v_backspike_fast[__i]+=sim.dt*(peak_backspike_fast*post[__i]/sim.dt-v_backspike_fast[__i]/tau_backspike_fast) # <<<<<<<<<<<<<<
* v_total[__i]=(Vo+backspike_amplitude*(v_backspike_fast[__i]+v_backspike_slow[__i]))
* B[__i]=(1.0/(1.0+(exp(mg1*v_total[__i])/mg2)))
*/
__pyx_t_12 = __pyx_v___i;
(__pyx_v_v_backspike_fast[__pyx_t_12]) = ((__pyx_v_v_backspike_fast[__pyx_t_12]) + (__pyx_v_sim->dt * (((__pyx_v_peak_backspike_fast * (__pyx_v_post[__pyx_v___i])) / __pyx_v_sim->dt) - ((__pyx_v_v_backspike_fast[__pyx_v___i]) / __pyx_v_tau_backspike_fast))));
/* "splikes/connections/calcium.pyx":163
* v_backspike_slow[__i]+=sim.dt*(peak_backspike_slow*post[__i]/sim.dt-v_backspike_slow[__i]/tau_backspike_slow)
* v_backspike_fast[__i]+=sim.dt*(peak_backspike_fast*post[__i]/sim.dt-v_backspike_fast[__i]/tau_backspike_fast)
* v_total[__i]=(Vo+backspike_amplitude*(v_backspike_fast[__i]+v_backspike_slow[__i])) # <<<<<<<<<<<<<<
* B[__i]=(1.0/(1.0+(exp(mg1*v_total[__i])/mg2)))
* h[__i]=(B[__i]*(v_total[__i]-v_reversal))
*/
(__pyx_v_v_total[__pyx_v___i]) = (__pyx_v_Vo + (__pyx_v_backspike_amplitude * ((__pyx_v_v_backspike_fast[__pyx_v___i]) + (__pyx_v_v_backspike_slow[__pyx_v___i]))));
/* "splikes/connections/calcium.pyx":164
* v_backspike_fast[__i]+=sim.dt*(peak_backspike_fast*post[__i]/sim.dt-v_backspike_fast[__i]/tau_backspike_fast)
* v_total[__i]=(Vo+backspike_amplitude*(v_backspike_fast[__i]+v_backspike_slow[__i]))
* B[__i]=(1.0/(1.0+(exp(mg1*v_total[__i])/mg2))) # <<<<<<<<<<<<<<
* h[__i]=(B[__i]*(v_total[__i]-v_reversal))
* for __j in range(self.pre.N):
*/
(__pyx_v_B[__pyx_v___i]) = (1.0 / (1.0 + (exp((__pyx_v_mg1 * (__pyx_v_v_total[__pyx_v___i]))) / __pyx_v_mg2)));
/* "splikes/connections/calcium.pyx":165
* v_total[__i]=(Vo+backspike_amplitude*(v_backspike_fast[__i]+v_backspike_slow[__i]))
* B[__i]=(1.0/(1.0+(exp(mg1*v_total[__i])/mg2)))
* h[__i]=(B[__i]*(v_total[__i]-v_reversal)) # <<<<<<<<<<<<<<
* for __j in range(self.pre.N):
* I_nmda_slow[__j]+=sim.dt*(i_nmda_mu*(i_nmda_s-I_nmda_slow[__j])*pre[__j]/sim.dt-I_nmda_slow[__j]/tau_nmda_s)
*/
(__pyx_v_h[__pyx_v___i]) = ((__pyx_v_B[__pyx_v___i]) * ((__pyx_v_v_total[__pyx_v___i]) - __pyx_v_v_reversal));
}
/* "splikes/connections/calcium.pyx":166
* B[__i]=(1.0/(1.0+(exp(mg1*v_total[__i])/mg2)))
* h[__i]=(B[__i]*(v_total[__i]-v_reversal))
* for __j in range(self.pre.N): # <<<<<<<<<<<<<<
* I_nmda_slow[__j]+=sim.dt*(i_nmda_mu*(i_nmda_s-I_nmda_slow[__j])*pre[__j]/sim.dt-I_nmda_slow[__j]/tau_nmda_s)
* I_nmda_fast[__j]+=sim.dt*(i_nmda_mu*(i_nmda_f-I_nmda_fast[__j])*pre[__j]/sim.dt-I_nmda_fast[__j]/tau_nmda_f)
*/
__pyx_t_6 = __pyx_v_self->__pyx_base.pre->N;
__pyx_t_10 = __pyx_t_6;
for (__pyx_t_11 = 0; __pyx_t_11 < __pyx_t_10; __pyx_t_11+=1) {
__pyx_v___j = __pyx_t_11;
/* "splikes/connections/calcium.pyx":167
* h[__i]=(B[__i]*(v_total[__i]-v_reversal))
* for __j in range(self.pre.N):
* I_nmda_slow[__j]+=sim.dt*(i_nmda_mu*(i_nmda_s-I_nmda_slow[__j])*pre[__j]/sim.dt-I_nmda_slow[__j]/tau_nmda_s) # <<<<<<<<<<<<<<
* I_nmda_fast[__j]+=sim.dt*(i_nmda_mu*(i_nmda_f-I_nmda_fast[__j])*pre[__j]/sim.dt-I_nmda_fast[__j]/tau_nmda_f)
* for __i in range(self.post.N):
*/
__pyx_t_12 = __pyx_v___j;
(__pyx_v_I_nmda_slow[__pyx_t_12]) = ((__pyx_v_I_nmda_slow[__pyx_t_12]) + (__pyx_v_sim->dt * ((((__pyx_v_i_nmda_mu * (__pyx_v_i_nmda_s - (__pyx_v_I_nmda_slow[__pyx_v___j]))) * (__pyx_v_pre[__pyx_v___j])) / __pyx_v_sim->dt) - ((__pyx_v_I_nmda_slow[__pyx_v___j]) / __pyx_v_tau_nmda_s))));
/* "splikes/connections/calcium.pyx":168
* for __j in range(self.pre.N):
* I_nmda_slow[__j]+=sim.dt*(i_nmda_mu*(i_nmda_s-I_nmda_slow[__j])*pre[__j]/sim.dt-I_nmda_slow[__j]/tau_nmda_s)
* I_nmda_fast[__j]+=sim.dt*(i_nmda_mu*(i_nmda_f-I_nmda_fast[__j])*pre[__j]/sim.dt-I_nmda_fast[__j]/tau_nmda_f) # <<<<<<<<<<<<<<
* for __i in range(self.post.N):
* for __j in range(self.pre.N):
*/
__pyx_t_12 = __pyx_v___j;
(__pyx_v_I_nmda_fast[__pyx_t_12]) = ((__pyx_v_I_nmda_fast[__pyx_t_12]) + (__pyx_v_sim->dt * ((((__pyx_v_i_nmda_mu * (__pyx_v_i_nmda_f - (__pyx_v_I_nmda_fast[__pyx_v___j]))) * (__pyx_v_pre[__pyx_v___j])) / __pyx_v_sim->dt) - ((__pyx_v_I_nmda_fast[__pyx_v___j]) / __pyx_v_tau_nmda_f))));
}
/* "splikes/connections/calcium.pyx":169
* I_nmda_slow[__j]+=sim.dt*(i_nmda_mu*(i_nmda_s-I_nmda_slow[__j])*pre[__j]/sim.dt-I_nmda_slow[__j]/tau_nmda_s)
* I_nmda_fast[__j]+=sim.dt*(i_nmda_mu*(i_nmda_f-I_nmda_fast[__j])*pre[__j]/sim.dt-I_nmda_fast[__j]/tau_nmda_f)
* for __i in range(self.post.N): # <<<<<<<<<<<<<<
* for __j in range(self.pre.N):
* __wi=__i*self.pre.N+__j
*/
__pyx_t_6 = __pyx_v_self->__pyx_base.post->N;
__pyx_t_10 = __pyx_t_6;
for (__pyx_t_11 = 0; __pyx_t_11 < __pyx_t_10; __pyx_t_11+=1) {
__pyx_v___i = __pyx_t_11;
/* "splikes/connections/calcium.pyx":170
* I_nmda_fast[__j]+=sim.dt*(i_nmda_mu*(i_nmda_f-I_nmda_fast[__j])*pre[__j]/sim.dt-I_nmda_fast[__j]/tau_nmda_f)
* for __i in range(self.post.N):
* for __j in range(self.pre.N): # <<<<<<<<<<<<<<
* __wi=__i*self.pre.N+__j
* g_nmda[__wi]+=sim.dt*(k_plus*g_t-(k_plus+k_minus*(v_total[__i]-Vo)**Vp)*g_nmda[__wi])
*/
__pyx_t_12 = __pyx_v_self->__pyx_base.pre->N;
__pyx_t_13 = __pyx_t_12;
for (__pyx_t_14 = 0; __pyx_t_14 < __pyx_t_13; __pyx_t_14+=1) {
__pyx_v___j = __pyx_t_14;
/* "splikes/connections/calcium.pyx":171
* for __i in range(self.post.N):
* for __j in range(self.pre.N):
* __wi=__i*self.pre.N+__j # <<<<<<<<<<<<<<
* g_nmda[__wi]+=sim.dt*(k_plus*g_t-(k_plus+k_minus*(v_total[__i]-Vo)**Vp)*g_nmda[__wi])
* I_nmda[__wi]=(g_nmda[__wi]*(I_nmda_fast[__j]+I_nmda_slow[__j])*h[__i])
*/
__pyx_v___wi = ((__pyx_v___i * __pyx_v_self->__pyx_base.pre->N) + __pyx_v___j);
/* "splikes/connections/calcium.pyx":172
* for __j in range(self.pre.N):
* __wi=__i*self.pre.N+__j
* g_nmda[__wi]+=sim.dt*(k_plus*g_t-(k_plus+k_minus*(v_total[__i]-Vo)**Vp)*g_nmda[__wi]) # <<<<<<<<<<<<<<
* I_nmda[__wi]=(g_nmda[__wi]*(I_nmda_fast[__j]+I_nmda_slow[__j])*h[__i])
* Ca[__wi]+=sim.dt*((I_nmda[__wi]-Ca[__wi]/tau_ca))
*/
__pyx_t_15 = __pyx_v___wi;
(__pyx_v_g_nmda[__pyx_t_15]) = ((__pyx_v_g_nmda[__pyx_t_15]) + (__pyx_v_sim->dt * ((__pyx_v_k_plus * __pyx_v_g_t) - ((__pyx_v_k_plus + (__pyx_v_k_minus * pow(((__pyx_v_v_total[__pyx_v___i]) - __pyx_v_Vo), __pyx_v_Vp))) * (__pyx_v_g_nmda[__pyx_v___wi])))));
/* "splikes/connections/calcium.pyx":173
* __wi=__i*self.pre.N+__j
* g_nmda[__wi]+=sim.dt*(k_plus*g_t-(k_plus+k_minus*(v_total[__i]-Vo)**Vp)*g_nmda[__wi])
* I_nmda[__wi]=(g_nmda[__wi]*(I_nmda_fast[__j]+I_nmda_slow[__j])*h[__i]) # <<<<<<<<<<<<<<
* Ca[__wi]+=sim.dt*((I_nmda[__wi]-Ca[__wi]/tau_ca))
* omega[__wi]=(sig(Ca[__wi]-alpha2,beta2)-0.5*sig(Ca[__wi]-alpha1,beta1))
*/
(__pyx_v_I_nmda[__pyx_v___wi]) = (((__pyx_v_g_nmda[__pyx_v___wi]) * ((__pyx_v_I_nmda_fast[__pyx_v___j]) + (__pyx_v_I_nmda_slow[__pyx_v___j]))) * (__pyx_v_h[__pyx_v___i]));
/* "splikes/connections/calcium.pyx":174
* g_nmda[__wi]+=sim.dt*(k_plus*g_t-(k_plus+k_minus*(v_total[__i]-Vo)**Vp)*g_nmda[__wi])
* I_nmda[__wi]=(g_nmda[__wi]*(I_nmda_fast[__j]+I_nmda_slow[__j])*h[__i])
* Ca[__wi]+=sim.dt*((I_nmda[__wi]-Ca[__wi]/tau_ca)) # <<<<<<<<<<<<<<
* omega[__wi]=(sig(Ca[__wi]-alpha2,beta2)-0.5*sig(Ca[__wi]-alpha1,beta1))
* eta[__wi]=(eta_gamma0*Ca[__wi])
*/
__pyx_t_15 = __pyx_v___wi;
(__pyx_v_Ca[__pyx_t_15]) = ((__pyx_v_Ca[__pyx_t_15]) + (__pyx_v_sim->dt * ((__pyx_v_I_nmda[__pyx_v___wi]) - ((__pyx_v_Ca[__pyx_v___wi]) / __pyx_v_tau_ca))));
/* "splikes/connections/calcium.pyx":175
* I_nmda[__wi]=(g_nmda[__wi]*(I_nmda_fast[__j]+I_nmda_slow[__j])*h[__i])
* Ca[__wi]+=sim.dt*((I_nmda[__wi]-Ca[__wi]/tau_ca))
* omega[__wi]=(sig(Ca[__wi]-alpha2,beta2)-0.5*sig(Ca[__wi]-alpha1,beta1)) # <<<<<<<<<<<<<<
* eta[__wi]=(eta_gamma0*Ca[__wi])
* W[__wi]+=sim.dt*(eta[__wi]*(omega[__wi]-_lambda*W[__wi]))
*/
__pyx_t_1 = __pyx_f_7splikes_11connections_7calcium_sig(((__pyx_v_Ca[__pyx_v___wi]) - __pyx_v_alpha2), __pyx_v_beta2); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 175, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_t_2 = __pyx_f_7splikes_11connections_7calcium_sig(((__pyx_v_Ca[__pyx_v___wi]) - __pyx_v_alpha1), __pyx_v_beta1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 175, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__pyx_t_4 = PyNumber_Multiply(__pyx_float_0_5, __pyx_t_2); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 175, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__pyx_t_2 = PyNumber_Subtract(__pyx_t_1, __pyx_t_4); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 175, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
__pyx_t_8 = __pyx_PyFloat_AsDouble(__pyx_t_2); if (unlikely((__pyx_t_8 == (double)-1) && PyErr_Occurred())) __PYX_ERR(0, 175, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
(__pyx_v_omega[__pyx_v___wi]) = __pyx_t_8;
/* "splikes/connections/calcium.pyx":176
* Ca[__wi]+=sim.dt*((I_nmda[__wi]-Ca[__wi]/tau_ca))
* omega[__wi]=(sig(Ca[__wi]-alpha2,beta2)-0.5*sig(Ca[__wi]-alpha1,beta1))
* eta[__wi]=(eta_gamma0*Ca[__wi]) # <<<<<<<<<<<<<<
* W[__wi]+=sim.dt*(eta[__wi]*(omega[__wi]-_lambda*W[__wi]))
*
*/
(__pyx_v_eta[__pyx_v___wi]) = (__pyx_v_eta_gamma0 * (__pyx_v_Ca[__pyx_v___wi]));
/* "splikes/connections/calcium.pyx":177
* omega[__wi]=(sig(Ca[__wi]-alpha2,beta2)-0.5*sig(Ca[__wi]-alpha1,beta1))
* eta[__wi]=(eta_gamma0*Ca[__wi])
* W[__wi]+=sim.dt*(eta[__wi]*(omega[__wi]-_lambda*W[__wi])) # <<<<<<<<<<<<<<
*
* self.apply_weight_limits()
*/
__pyx_t_15 = __pyx_v___wi;
(__pyx_v_W[__pyx_t_15]) = ((__pyx_v_W[__pyx_t_15]) + (__pyx_v_sim->dt * ((__pyx_v_eta[__pyx_v___wi]) * ((__pyx_v_omega[__pyx_v___wi]) - (__pyx_v__lambda * (__pyx_v_W[__pyx_v___wi]))))));
}
}
/* "splikes/connections/calcium.pyx":179
* W[__wi]+=sim.dt*(eta[__wi]*(omega[__wi]-_lambda*W[__wi]))
*
* self.apply_weight_limits() # <<<<<<<<<<<<<<
*
*/
__pyx_t_2 = ((struct __pyx_vtabstruct_7splikes_11connections_7calcium_calcium *)__pyx_v_self->__pyx_base.__pyx_vtab)->__pyx_base.apply_weight_limits(((struct __pyx_obj_7splikes_7splikes_connection *)__pyx_v_self), 0); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 179, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
/* "splikes/connections/calcium.pyx":108
* @cython.cdivision(True)
* @cython.boundscheck(False) # turn of bounds-checking for entire function
* cpdef update(self,double t,simulation sim): # <<<<<<<<<<<<<<
* cdef int __i,__j
*
*/
/* function exit code */
__pyx_r = Py_None; __Pyx_INCREF(Py_None);
goto __pyx_L0;
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_XDECREF(__pyx_t_2);
__Pyx_XDECREF(__pyx_t_3);
__Pyx_XDECREF(__pyx_t_4);
__Pyx_XDECREF(__pyx_t_5);
__Pyx_XDECREF(__pyx_t_7);
__Pyx_AddTraceback("splikes.connections.calcium.calcium.update", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = 0;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* Python wrapper */
static PyObject *__pyx_pw_7splikes_11connections_7calcium_7calcium_5update(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
static PyObject *__pyx_pw_7splikes_11connections_7calcium_7calcium_5update(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
double __pyx_v_t;
struct __pyx_obj_7splikes_7splikes_simulation *__pyx_v_sim = 0;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("update (wrapper)", 0);
{
static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_t,&__pyx_n_s_sim,0};
PyObject* values[2] = {0,0};
if (unlikely(__pyx_kwds)) {
Py_ssize_t kw_args;
const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args);
switch (pos_args) {
case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1);
CYTHON_FALLTHROUGH;
case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
CYTHON_FALLTHROUGH;
case 0: break;
default: goto __pyx_L5_argtuple_error;
}
kw_args = PyDict_Size(__pyx_kwds);
switch (pos_args) {
case 0:
if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_t)) != 0)) kw_args--;
else goto __pyx_L5_argtuple_error;
CYTHON_FALLTHROUGH;
case 1:
if (likely((values[1] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_sim)) != 0)) kw_args--;
else {
__Pyx_RaiseArgtupleInvalid("update", 1, 2, 2, 1); __PYX_ERR(0, 108, __pyx_L3_error)
}
}
if (unlikely(kw_args > 0)) {
if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "update") < 0)) __PYX_ERR(0, 108, __pyx_L3_error)
}
} else if (PyTuple_GET_SIZE(__pyx_args) != 2) {
goto __pyx_L5_argtuple_error;
} else {
values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
values[1] = PyTuple_GET_ITEM(__pyx_args, 1);
}
__pyx_v_t = __pyx_PyFloat_AsDouble(values[0]); if (unlikely((__pyx_v_t == (double)-1) && PyErr_Occurred())) __PYX_ERR(0, 108, __pyx_L3_error)
__pyx_v_sim = ((struct __pyx_obj_7splikes_7splikes_simulation *)values[1]);
}
goto __pyx_L4_argument_unpacking_done;
__pyx_L5_argtuple_error:;
__Pyx_RaiseArgtupleInvalid("update", 1, 2, 2, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 108, __pyx_L3_error)
__pyx_L3_error:;
__Pyx_AddTraceback("splikes.connections.calcium.calcium.update", __pyx_clineno, __pyx_lineno, __pyx_filename);
__Pyx_RefNannyFinishContext();
return NULL;
__pyx_L4_argument_unpacking_done:;
if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_sim), __pyx_ptype_7splikes_7splikes_simulation, 1, "sim", 0))) __PYX_ERR(0, 108, __pyx_L1_error)
__pyx_r = __pyx_pf_7splikes_11connections_7calcium_7calcium_4update(((struct __pyx_obj_7splikes_11connections_7calcium_calcium *)__pyx_v_self), __pyx_v_t, __pyx_v_sim);
/* function exit code */
goto __pyx_L0;
__pyx_L1_error:;
__pyx_r = NULL;
__pyx_L0:;
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_pf_7splikes_11connections_7calcium_7calcium_4update(struct __pyx_obj_7splikes_11connections_7calcium_calcium *__pyx_v_self, double __pyx_v_t, struct __pyx_obj_7splikes_7splikes_simulation *__pyx_v_sim) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("update", 0);
__Pyx_XDECREF(__pyx_r);
__pyx_t_1 = __pyx_f_7splikes_11connections_7calcium_7calcium_update(__pyx_v_self, __pyx_v_t, __pyx_v_sim, 1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 108, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_r = __pyx_t_1;
__pyx_t_1 = 0;
goto __pyx_L0;
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_AddTraceback("splikes.connections.calcium.calcium.update", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "splikes/connections/calcium.pyx":58
* #
* cdef class calcium(connection):
* cdef public double g_t,mg2,mg1,v_reversal,tau_ca,alpha2,alpha1,backspike_amplitude,i_nmda_mu,peak_backspike_fast,peak_backspike_slow,_lambda,beta2,beta1,k_plus,g_nmda_o,tau_backspike_fast,tau_backspike_slow,eta_gamma0,i_nmda_s,tau_nmda_s,Vo,Vp,i_nmda_f,tau_nmda_f,k_minus # <<<<<<<<<<<<<<
* cdef public np.ndarray B,I_nmda,h,Ca,v_total,eta,g_nmda,v_backspike_fast,I_nmda_fast,I_nmda_slow,omega,v_backspike_slow
* cpdef _reset(self):
*/
/* Python wrapper */
static PyObject *__pyx_pw_7splikes_11connections_7calcium_7calcium_3g_t_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_7splikes_11connections_7calcium_7calcium_3g_t_1__get__(PyObject *__pyx_v_self) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
__pyx_r = __pyx_pf_7splikes_11connections_7calcium_7calcium_3g_t___get__(((struct __pyx_obj_7splikes_11connections_7calcium_calcium *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_pf_7splikes_11connections_7calcium_7calcium_3g_t___get__(struct __pyx_obj_7splikes_11connections_7calcium_calcium *__pyx_v_self) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("__get__", 0);
__Pyx_XDECREF(__pyx_r);
__pyx_t_1 = PyFloat_FromDouble(__pyx_v_self->g_t); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 58, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_r = __pyx_t_1;
__pyx_t_1 = 0;
goto __pyx_L0;
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_AddTraceback("splikes.connections.calcium.calcium.g_t.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* Python wrapper */
static int __pyx_pw_7splikes_11connections_7calcium_7calcium_3g_t_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_value); /*proto*/
static int __pyx_pw_7splikes_11connections_7calcium_7calcium_3g_t_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_value) {
int __pyx_r;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
__pyx_r = __pyx_pf_7splikes_11connections_7calcium_7calcium_3g_t_2__set__(((struct __pyx_obj_7splikes_11connections_7calcium_calcium *)__pyx_v_self), ((PyObject *)__pyx_v_value));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static int __pyx_pf_7splikes_11connections_7calcium_7calcium_3g_t_2__set__(struct __pyx_obj_7splikes_11connections_7calcium_calcium *__pyx_v_self, PyObject *__pyx_v_value) {
int __pyx_r;
__Pyx_RefNannyDeclarations
double __pyx_t_1;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("__set__", 0);
__pyx_t_1 = __pyx_PyFloat_AsDouble(__pyx_v_value); if (unlikely((__pyx_t_1 == (double)-1) && PyErr_Occurred())) __PYX_ERR(0, 58, __pyx_L1_error)
__pyx_v_self->g_t = __pyx_t_1;
/* function exit code */
__pyx_r = 0;
goto __pyx_L0;
__pyx_L1_error:;
__Pyx_AddTraceback("splikes.connections.calcium.calcium.g_t.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = -1;
__pyx_L0:;
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* Python wrapper */
static PyObject *__pyx_pw_7splikes_11connections_7calcium_7calcium_3mg2_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_7splikes_11connections_7calcium_7calcium_3mg2_1__get__(PyObject *__pyx_v_self) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
__pyx_r = __pyx_pf_7splikes_11connections_7calcium_7calcium_3mg2___get__(((struct __pyx_obj_7splikes_11connections_7calcium_calcium *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_pf_7splikes_11connections_7calcium_7calcium_3mg2___get__(struct __pyx_obj_7splikes_11connections_7calcium_calcium *__pyx_v_self) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("__get__", 0);
__Pyx_XDECREF(__pyx_r);
__pyx_t_1 = PyFloat_FromDouble(__pyx_v_self->mg2); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 58, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_r = __pyx_t_1;
__pyx_t_1 = 0;
goto __pyx_L0;
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_AddTraceback("splikes.connections.calcium.calcium.mg2.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* Python wrapper */
static int __pyx_pw_7splikes_11connections_7calcium_7calcium_3mg2_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_value); /*proto*/
static int __pyx_pw_7splikes_11connections_7calcium_7calcium_3mg2_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_value) {
int __pyx_r;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
__pyx_r = __pyx_pf_7splikes_11connections_7calcium_7calcium_3mg2_2__set__(((struct __pyx_obj_7splikes_11connections_7calcium_calcium *)__pyx_v_self), ((PyObject *)__pyx_v_value));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static int __pyx_pf_7splikes_11connections_7calcium_7calcium_3mg2_2__set__(struct __pyx_obj_7splikes_11connections_7calcium_calcium *__pyx_v_self, PyObject *__pyx_v_value) {
int __pyx_r;
__Pyx_RefNannyDeclarations
double __pyx_t_1;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("__set__", 0);
__pyx_t_1 = __pyx_PyFloat_AsDouble(__pyx_v_value); if (unlikely((__pyx_t_1 == (double)-1) && PyErr_Occurred())) __PYX_ERR(0, 58, __pyx_L1_error)
__pyx_v_self->mg2 = __pyx_t_1;
/* function exit code */
__pyx_r = 0;
goto __pyx_L0;
__pyx_L1_error:;
__Pyx_AddTraceback("splikes.connections.calcium.calcium.mg2.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = -1;
__pyx_L0:;
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* Python wrapper */
static PyObject *__pyx_pw_7splikes_11connections_7calcium_7calcium_3mg1_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_7splikes_11connections_7calcium_7calcium_3mg1_1__get__(PyObject *__pyx_v_self) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
__pyx_r = __pyx_pf_7splikes_11connections_7calcium_7calcium_3mg1___get__(((struct __pyx_obj_7splikes_11connections_7calcium_calcium *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_pf_7splikes_11connections_7calcium_7calcium_3mg1___get__(struct __pyx_obj_7splikes_11connections_7calcium_calcium *__pyx_v_self) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("__get__", 0);
__Pyx_XDECREF(__pyx_r);
__pyx_t_1 = PyFloat_FromDouble(__pyx_v_self->mg1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 58, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_r = __pyx_t_1;
__pyx_t_1 = 0;
goto __pyx_L0;
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_AddTraceback("splikes.connections.calcium.calcium.mg1.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* Python wrapper */
static int __pyx_pw_7splikes_11connections_7calcium_7calcium_3mg1_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_value); /*proto*/
static int __pyx_pw_7splikes_11connections_7calcium_7calcium_3mg1_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_value) {
int __pyx_r;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
__pyx_r = __pyx_pf_7splikes_11connections_7calcium_7calcium_3mg1_2__set__(((struct __pyx_obj_7splikes_11connections_7calcium_calcium *)__pyx_v_self), ((PyObject *)__pyx_v_value));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static int __pyx_pf_7splikes_11connections_7calcium_7calcium_3mg1_2__set__(struct __pyx_obj_7splikes_11connections_7calcium_calcium *__pyx_v_self, PyObject *__pyx_v_value) {
int __pyx_r;
__Pyx_RefNannyDeclarations
double __pyx_t_1;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("__set__", 0);
__pyx_t_1 = __pyx_PyFloat_AsDouble(__pyx_v_value); if (unlikely((__pyx_t_1 == (double)-1) && PyErr_Occurred())) __PYX_ERR(0, 58, __pyx_L1_error)
__pyx_v_self->mg1 = __pyx_t_1;
/* function exit code */
__pyx_r = 0;
goto __pyx_L0;
__pyx_L1_error:;
__Pyx_AddTraceback("splikes.connections.calcium.calcium.mg1.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = -1;
__pyx_L0:;
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* Python wrapper */
static PyObject *__pyx_pw_7splikes_11connections_7calcium_7calcium_10v_reversal_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_7splikes_11connections_7calcium_7calcium_10v_reversal_1__get__(PyObject *__pyx_v_self) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
__pyx_r = __pyx_pf_7splikes_11connections_7calcium_7calcium_10v_reversal___get__(((struct __pyx_obj_7splikes_11connections_7calcium_calcium *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_pf_7splikes_11connections_7calcium_7calcium_10v_reversal___get__(struct __pyx_obj_7splikes_11connections_7calcium_calcium *__pyx_v_self) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("__get__", 0);
__Pyx_XDECREF(__pyx_r);
__pyx_t_1 = PyFloat_FromDouble(__pyx_v_self->v_reversal); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 58, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_r = __pyx_t_1;
__pyx_t_1 = 0;
goto __pyx_L0;
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_AddTraceback("splikes.connections.calcium.calcium.v_reversal.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* Python wrapper */
static int __pyx_pw_7splikes_11connections_7calcium_7calcium_10v_reversal_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_value); /*proto*/
static int __pyx_pw_7splikes_11connections_7calcium_7calcium_10v_reversal_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_value) {
int __pyx_r;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
__pyx_r = __pyx_pf_7splikes_11connections_7calcium_7calcium_10v_reversal_2__set__(((struct __pyx_obj_7splikes_11connections_7calcium_calcium *)__pyx_v_self), ((PyObject *)__pyx_v_value));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static int __pyx_pf_7splikes_11connections_7calcium_7calcium_10v_reversal_2__set__(struct __pyx_obj_7splikes_11connections_7calcium_calcium *__pyx_v_self, PyObject *__pyx_v_value) {
int __pyx_r;
__Pyx_RefNannyDeclarations
double __pyx_t_1;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("__set__", 0);
__pyx_t_1 = __pyx_PyFloat_AsDouble(__pyx_v_value); if (unlikely((__pyx_t_1 == (double)-1) && PyErr_Occurred())) __PYX_ERR(0, 58, __pyx_L1_error)
__pyx_v_self->v_reversal = __pyx_t_1;
/* function exit code */
__pyx_r = 0;
goto __pyx_L0;
__pyx_L1_error:;
__Pyx_AddTraceback("splikes.connections.calcium.calcium.v_reversal.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = -1;
__pyx_L0:;
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* Python wrapper */
static PyObject *__pyx_pw_7splikes_11connections_7calcium_7calcium_6tau_ca_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_7splikes_11connections_7calcium_7calcium_6tau_ca_1__get__(PyObject *__pyx_v_self) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
__pyx_r = __pyx_pf_7splikes_11connections_7calcium_7calcium_6tau_ca___get__(((struct __pyx_obj_7splikes_11connections_7calcium_calcium *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_pf_7splikes_11connections_7calcium_7calcium_6tau_ca___get__(struct __pyx_obj_7splikes_11connections_7calcium_calcium *__pyx_v_self) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("__get__", 0);
__Pyx_XDECREF(__pyx_r);
__pyx_t_1 = PyFloat_FromDouble(__pyx_v_self->tau_ca); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 58, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_r = __pyx_t_1;
__pyx_t_1 = 0;
goto __pyx_L0;
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_AddTraceback("splikes.connections.calcium.calcium.tau_ca.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* Python wrapper */
static int __pyx_pw_7splikes_11connections_7calcium_7calcium_6tau_ca_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_value); /*proto*/
static int __pyx_pw_7splikes_11connections_7calcium_7calcium_6tau_ca_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_value) {
int __pyx_r;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
__pyx_r = __pyx_pf_7splikes_11connections_7calcium_7calcium_6tau_ca_2__set__(((struct __pyx_obj_7splikes_11connections_7calcium_calcium *)__pyx_v_self), ((PyObject *)__pyx_v_value));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static int __pyx_pf_7splikes_11connections_7calcium_7calcium_6tau_ca_2__set__(struct __pyx_obj_7splikes_11connections_7calcium_calcium *__pyx_v_self, PyObject *__pyx_v_value) {
int __pyx_r;
__Pyx_RefNannyDeclarations
double __pyx_t_1;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("__set__", 0);
__pyx_t_1 = __pyx_PyFloat_AsDouble(__pyx_v_value); if (unlikely((__pyx_t_1 == (double)-1) && PyErr_Occurred())) __PYX_ERR(0, 58, __pyx_L1_error)
__pyx_v_self->tau_ca = __pyx_t_1;
/* function exit code */
__pyx_r = 0;
goto __pyx_L0;
__pyx_L1_error:;
__Pyx_AddTraceback("splikes.connections.calcium.calcium.tau_ca.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = -1;
__pyx_L0:;
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* Python wrapper */
static PyObject *__pyx_pw_7splikes_11connections_7calcium_7calcium_6alpha2_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_7splikes_11connections_7calcium_7calcium_6alpha2_1__get__(PyObject *__pyx_v_self) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
__pyx_r = __pyx_pf_7splikes_11connections_7calcium_7calcium_6alpha2___get__(((struct __pyx_obj_7splikes_11connections_7calcium_calcium *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_pf_7splikes_11connections_7calcium_7calcium_6alpha2___get__(struct __pyx_obj_7splikes_11connections_7calcium_calcium *__pyx_v_self) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("__get__", 0);
__Pyx_XDECREF(__pyx_r);
__pyx_t_1 = PyFloat_FromDouble(__pyx_v_self->alpha2); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 58, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_r = __pyx_t_1;
__pyx_t_1 = 0;
goto __pyx_L0;
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_AddTraceback("splikes.connections.calcium.calcium.alpha2.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* Python wrapper */
static int __pyx_pw_7splikes_11connections_7calcium_7calcium_6alpha2_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_value); /*proto*/
static int __pyx_pw_7splikes_11connections_7calcium_7calcium_6alpha2_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_value) {
int __pyx_r;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
__pyx_r = __pyx_pf_7splikes_11connections_7calcium_7calcium_6alpha2_2__set__(((struct __pyx_obj_7splikes_11connections_7calcium_calcium *)__pyx_v_self), ((PyObject *)__pyx_v_value));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static int __pyx_pf_7splikes_11connections_7calcium_7calcium_6alpha2_2__set__(struct __pyx_obj_7splikes_11connections_7calcium_calcium *__pyx_v_self, PyObject *__pyx_v_value) {
int __pyx_r;
__Pyx_RefNannyDeclarations
double __pyx_t_1;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("__set__", 0);
__pyx_t_1 = __pyx_PyFloat_AsDouble(__pyx_v_value); if (unlikely((__pyx_t_1 == (double)-1) && PyErr_Occurred())) __PYX_ERR(0, 58, __pyx_L1_error)
__pyx_v_self->alpha2 = __pyx_t_1;
/* function exit code */
__pyx_r = 0;
goto __pyx_L0;
__pyx_L1_error:;
__Pyx_AddTraceback("splikes.connections.calcium.calcium.alpha2.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = -1;
__pyx_L0:;
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* Python wrapper */
static PyObject *__pyx_pw_7splikes_11connections_7calcium_7calcium_6alpha1_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_7splikes_11connections_7calcium_7calcium_6alpha1_1__get__(PyObject *__pyx_v_self) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
__pyx_r = __pyx_pf_7splikes_11connections_7calcium_7calcium_6alpha1___get__(((struct __pyx_obj_7splikes_11connections_7calcium_calcium *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_pf_7splikes_11connections_7calcium_7calcium_6alpha1___get__(struct __pyx_obj_7splikes_11connections_7calcium_calcium *__pyx_v_self) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("__get__", 0);
__Pyx_XDECREF(__pyx_r);
__pyx_t_1 = PyFloat_FromDouble(__pyx_v_self->alpha1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 58, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_r = __pyx_t_1;
__pyx_t_1 = 0;
goto __pyx_L0;
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_AddTraceback("splikes.connections.calcium.calcium.alpha1.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* Python wrapper */
static int __pyx_pw_7splikes_11connections_7calcium_7calcium_6alpha1_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_value); /*proto*/
static int __pyx_pw_7splikes_11connections_7calcium_7calcium_6alpha1_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_value) {
int __pyx_r;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
__pyx_r = __pyx_pf_7splikes_11connections_7calcium_7calcium_6alpha1_2__set__(((struct __pyx_obj_7splikes_11connections_7calcium_calcium *)__pyx_v_self), ((PyObject *)__pyx_v_value));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static int __pyx_pf_7splikes_11connections_7calcium_7calcium_6alpha1_2__set__(struct __pyx_obj_7splikes_11connections_7calcium_calcium *__pyx_v_self, PyObject *__pyx_v_value) {
int __pyx_r;
__Pyx_RefNannyDeclarations
double __pyx_t_1;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("__set__", 0);
__pyx_t_1 = __pyx_PyFloat_AsDouble(__pyx_v_value); if (unlikely((__pyx_t_1 == (double)-1) && PyErr_Occurred())) __PYX_ERR(0, 58, __pyx_L1_error)
__pyx_v_self->alpha1 = __pyx_t_1;
/* function exit code */
__pyx_r = 0;
goto __pyx_L0;
__pyx_L1_error:;
__Pyx_AddTraceback("splikes.connections.calcium.calcium.alpha1.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = -1;
__pyx_L0:;
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* Python wrapper */
static PyObject *__pyx_pw_7splikes_11connections_7calcium_7calcium_19backspike_amplitude_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_7splikes_11connections_7calcium_7calcium_19backspike_amplitude_1__get__(PyObject *__pyx_v_self) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
__pyx_r = __pyx_pf_7splikes_11connections_7calcium_7calcium_19backspike_amplitude___get__(((struct __pyx_obj_7splikes_11connections_7calcium_calcium *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_pf_7splikes_11connections_7calcium_7calcium_19backspike_amplitude___get__(struct __pyx_obj_7splikes_11connections_7calcium_calcium *__pyx_v_self) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("__get__", 0);
__Pyx_XDECREF(__pyx_r);
__pyx_t_1 = PyFloat_FromDouble(__pyx_v_self->backspike_amplitude); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 58, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_r = __pyx_t_1;
__pyx_t_1 = 0;
goto __pyx_L0;
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_AddTraceback("splikes.connections.calcium.calcium.backspike_amplitude.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* Python wrapper */
static int __pyx_pw_7splikes_11connections_7calcium_7calcium_19backspike_amplitude_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_value); /*proto*/
static int __pyx_pw_7splikes_11connections_7calcium_7calcium_19backspike_amplitude_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_value) {
int __pyx_r;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
__pyx_r = __pyx_pf_7splikes_11connections_7calcium_7calcium_19backspike_amplitude_2__set__(((struct __pyx_obj_7splikes_11connections_7calcium_calcium *)__pyx_v_self), ((PyObject *)__pyx_v_value));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static int __pyx_pf_7splikes_11connections_7calcium_7calcium_19backspike_amplitude_2__set__(struct __pyx_obj_7splikes_11connections_7calcium_calcium *__pyx_v_self, PyObject *__pyx_v_value) {
int __pyx_r;
__Pyx_RefNannyDeclarations
double __pyx_t_1;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("__set__", 0);
__pyx_t_1 = __pyx_PyFloat_AsDouble(__pyx_v_value); if (unlikely((__pyx_t_1 == (double)-1) && PyErr_Occurred())) __PYX_ERR(0, 58, __pyx_L1_error)
__pyx_v_self->backspike_amplitude = __pyx_t_1;
/* function exit code */
__pyx_r = 0;
goto __pyx_L0;
__pyx_L1_error:;
__Pyx_AddTraceback("splikes.connections.calcium.calcium.backspike_amplitude.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = -1;
__pyx_L0:;
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* Python wrapper */
static PyObject *__pyx_pw_7splikes_11connections_7calcium_7calcium_9i_nmda_mu_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_7splikes_11connections_7calcium_7calcium_9i_nmda_mu_1__get__(PyObject *__pyx_v_self) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
__pyx_r = __pyx_pf_7splikes_11connections_7calcium_7calcium_9i_nmda_mu___get__(((struct __pyx_obj_7splikes_11connections_7calcium_calcium *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_pf_7splikes_11connections_7calcium_7calcium_9i_nmda_mu___get__(struct __pyx_obj_7splikes_11connections_7calcium_calcium *__pyx_v_self) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("__get__", 0);
__Pyx_XDECREF(__pyx_r);
__pyx_t_1 = PyFloat_FromDouble(__pyx_v_self->i_nmda_mu); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 58, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_r = __pyx_t_1;
__pyx_t_1 = 0;
goto __pyx_L0;
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_AddTraceback("splikes.connections.calcium.calcium.i_nmda_mu.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* Python wrapper */
static int __pyx_pw_7splikes_11connections_7calcium_7calcium_9i_nmda_mu_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_value); /*proto*/
static int __pyx_pw_7splikes_11connections_7calcium_7calcium_9i_nmda_mu_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_value) {
int __pyx_r;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
__pyx_r = __pyx_pf_7splikes_11connections_7calcium_7calcium_9i_nmda_mu_2__set__(((struct __pyx_obj_7splikes_11connections_7calcium_calcium *)__pyx_v_self), ((PyObject *)__pyx_v_value));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static int __pyx_pf_7splikes_11connections_7calcium_7calcium_9i_nmda_mu_2__set__(struct __pyx_obj_7splikes_11connections_7calcium_calcium *__pyx_v_self, PyObject *__pyx_v_value) {
int __pyx_r;
__Pyx_RefNannyDeclarations
double __pyx_t_1;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("__set__", 0);
__pyx_t_1 = __pyx_PyFloat_AsDouble(__pyx_v_value); if (unlikely((__pyx_t_1 == (double)-1) && PyErr_Occurred())) __PYX_ERR(0, 58, __pyx_L1_error)
__pyx_v_self->i_nmda_mu = __pyx_t_1;
/* function exit code */
__pyx_r = 0;
goto __pyx_L0;
__pyx_L1_error:;
__Pyx_AddTraceback("splikes.connections.calcium.calcium.i_nmda_mu.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = -1;
__pyx_L0:;
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* Python wrapper */
static PyObject *__pyx_pw_7splikes_11connections_7calcium_7calcium_19peak_backspike_fast_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_7splikes_11connections_7calcium_7calcium_19peak_backspike_fast_1__get__(PyObject *__pyx_v_self) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
__pyx_r = __pyx_pf_7splikes_11connections_7calcium_7calcium_19peak_backspike_fast___get__(((struct __pyx_obj_7splikes_11connections_7calcium_calcium *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_pf_7splikes_11connections_7calcium_7calcium_19peak_backspike_fast___get__(struct __pyx_obj_7splikes_11connections_7calcium_calcium *__pyx_v_self) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("__get__", 0);
__Pyx_XDECREF(__pyx_r);
__pyx_t_1 = PyFloat_FromDouble(__pyx_v_self->peak_backspike_fast); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 58, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_r = __pyx_t_1;
__pyx_t_1 = 0;
goto __pyx_L0;
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_AddTraceback("splikes.connections.calcium.calcium.peak_backspike_fast.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* Python wrapper */
static int __pyx_pw_7splikes_11connections_7calcium_7calcium_19peak_backspike_fast_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_value); /*proto*/
static int __pyx_pw_7splikes_11connections_7calcium_7calcium_19peak_backspike_fast_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_value) {
int __pyx_r;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
__pyx_r = __pyx_pf_7splikes_11connections_7calcium_7calcium_19peak_backspike_fast_2__set__(((struct __pyx_obj_7splikes_11connections_7calcium_calcium *)__pyx_v_self), ((PyObject *)__pyx_v_value));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static int __pyx_pf_7splikes_11connections_7calcium_7calcium_19peak_backspike_fast_2__set__(struct __pyx_obj_7splikes_11connections_7calcium_calcium *__pyx_v_self, PyObject *__pyx_v_value) {
int __pyx_r;
__Pyx_RefNannyDeclarations
double __pyx_t_1;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("__set__", 0);
__pyx_t_1 = __pyx_PyFloat_AsDouble(__pyx_v_value); if (unlikely((__pyx_t_1 == (double)-1) && PyErr_Occurred())) __PYX_ERR(0, 58, __pyx_L1_error)
__pyx_v_self->peak_backspike_fast = __pyx_t_1;
/* function exit code */
__pyx_r = 0;
goto __pyx_L0;
__pyx_L1_error:;
__Pyx_AddTraceback("splikes.connections.calcium.calcium.peak_backspike_fast.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = -1;
__pyx_L0:;
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* Python wrapper */
static PyObject *__pyx_pw_7splikes_11connections_7calcium_7calcium_19peak_backspike_slow_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_7splikes_11connections_7calcium_7calcium_19peak_backspike_slow_1__get__(PyObject *__pyx_v_self) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
__pyx_r = __pyx_pf_7splikes_11connections_7calcium_7calcium_19peak_backspike_slow___get__(((struct __pyx_obj_7splikes_11connections_7calcium_calcium *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_pf_7splikes_11connections_7calcium_7calcium_19peak_backspike_slow___get__(struct __pyx_obj_7splikes_11connections_7calcium_calcium *__pyx_v_self) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("__get__", 0);
__Pyx_XDECREF(__pyx_r);
__pyx_t_1 = PyFloat_FromDouble(__pyx_v_self->peak_backspike_slow); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 58, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_r = __pyx_t_1;
__pyx_t_1 = 0;
goto __pyx_L0;
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_AddTraceback("splikes.connections.calcium.calcium.peak_backspike_slow.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* Python wrapper */
static int __pyx_pw_7splikes_11connections_7calcium_7calcium_19peak_backspike_slow_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_value); /*proto*/
static int __pyx_pw_7splikes_11connections_7calcium_7calcium_19peak_backspike_slow_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_value) {
int __pyx_r;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
__pyx_r = __pyx_pf_7splikes_11connections_7calcium_7calcium_19peak_backspike_slow_2__set__(((struct __pyx_obj_7splikes_11connections_7calcium_calcium *)__pyx_v_self), ((PyObject *)__pyx_v_value));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static int __pyx_pf_7splikes_11connections_7calcium_7calcium_19peak_backspike_slow_2__set__(struct __pyx_obj_7splikes_11connections_7calcium_calcium *__pyx_v_self, PyObject *__pyx_v_value) {
int __pyx_r;
__Pyx_RefNannyDeclarations
double __pyx_t_1;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("__set__", 0);
__pyx_t_1 = __pyx_PyFloat_AsDouble(__pyx_v_value); if (unlikely((__pyx_t_1 == (double)-1) && PyErr_Occurred())) __PYX_ERR(0, 58, __pyx_L1_error)
__pyx_v_self->peak_backspike_slow = __pyx_t_1;
/* function exit code */
__pyx_r = 0;
goto __pyx_L0;
__pyx_L1_error:;
__Pyx_AddTraceback("splikes.connections.calcium.calcium.peak_backspike_slow.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = -1;
__pyx_L0:;
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* Python wrapper */
static PyObject *__pyx_pw_7splikes_11connections_7calcium_7calcium_7_lambda_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_7splikes_11connections_7calcium_7calcium_7_lambda_1__get__(PyObject *__pyx_v_self) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
__pyx_r = __pyx_pf_7splikes_11connections_7calcium_7calcium_7_lambda___get__(((struct __pyx_obj_7splikes_11connections_7calcium_calcium *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_pf_7splikes_11connections_7calcium_7calcium_7_lambda___get__(struct __pyx_obj_7splikes_11connections_7calcium_calcium *__pyx_v_self) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("__get__", 0);
__Pyx_XDECREF(__pyx_r);
__pyx_t_1 = PyFloat_FromDouble(__pyx_v_self->_lambda); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 58, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_r = __pyx_t_1;
__pyx_t_1 = 0;
goto __pyx_L0;
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_AddTraceback("splikes.connections.calcium.calcium._lambda.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* Python wrapper */
static int __pyx_pw_7splikes_11connections_7calcium_7calcium_7_lambda_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_value); /*proto*/
static int __pyx_pw_7splikes_11connections_7calcium_7calcium_7_lambda_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_value) {
int __pyx_r;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
__pyx_r = __pyx_pf_7splikes_11connections_7calcium_7calcium_7_lambda_2__set__(((struct __pyx_obj_7splikes_11connections_7calcium_calcium *)__pyx_v_self), ((PyObject *)__pyx_v_value));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static int __pyx_pf_7splikes_11connections_7calcium_7calcium_7_lambda_2__set__(struct __pyx_obj_7splikes_11connections_7calcium_calcium *__pyx_v_self, PyObject *__pyx_v_value) {
int __pyx_r;
__Pyx_RefNannyDeclarations
double __pyx_t_1;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("__set__", 0);
__pyx_t_1 = __pyx_PyFloat_AsDouble(__pyx_v_value); if (unlikely((__pyx_t_1 == (double)-1) && PyErr_Occurred())) __PYX_ERR(0, 58, __pyx_L1_error)
__pyx_v_self->_lambda = __pyx_t_1;
/* function exit code */
__pyx_r = 0;
goto __pyx_L0;
__pyx_L1_error:;
__Pyx_AddTraceback("splikes.connections.calcium.calcium._lambda.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = -1;
__pyx_L0:;
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* Python wrapper */
static PyObject *__pyx_pw_7splikes_11connections_7calcium_7calcium_5beta2_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_7splikes_11connections_7calcium_7calcium_5beta2_1__get__(PyObject *__pyx_v_self) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
__pyx_r = __pyx_pf_7splikes_11connections_7calcium_7calcium_5beta2___get__(((struct __pyx_obj_7splikes_11connections_7calcium_calcium *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_pf_7splikes_11connections_7calcium_7calcium_5beta2___get__(struct __pyx_obj_7splikes_11connections_7calcium_calcium *__pyx_v_self) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("__get__", 0);
__Pyx_XDECREF(__pyx_r);
__pyx_t_1 = PyFloat_FromDouble(__pyx_v_self->beta2); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 58, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_r = __pyx_t_1;
__pyx_t_1 = 0;
goto __pyx_L0;
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_AddTraceback("splikes.connections.calcium.calcium.beta2.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* Python wrapper */
static int __pyx_pw_7splikes_11connections_7calcium_7calcium_5beta2_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_value); /*proto*/
static int __pyx_pw_7splikes_11connections_7calcium_7calcium_5beta2_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_value) {
int __pyx_r;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
__pyx_r = __pyx_pf_7splikes_11connections_7calcium_7calcium_5beta2_2__set__(((struct __pyx_obj_7splikes_11connections_7calcium_calcium *)__pyx_v_self), ((PyObject *)__pyx_v_value));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static int __pyx_pf_7splikes_11connections_7calcium_7calcium_5beta2_2__set__(struct __pyx_obj_7splikes_11connections_7calcium_calcium *__pyx_v_self, PyObject *__pyx_v_value) {
int __pyx_r;
__Pyx_RefNannyDeclarations
double __pyx_t_1;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("__set__", 0);
__pyx_t_1 = __pyx_PyFloat_AsDouble(__pyx_v_value); if (unlikely((__pyx_t_1 == (double)-1) && PyErr_Occurred())) __PYX_ERR(0, 58, __pyx_L1_error)
__pyx_v_self->beta2 = __pyx_t_1;
/* function exit code */
__pyx_r = 0;
goto __pyx_L0;
__pyx_L1_error:;
__Pyx_AddTraceback("splikes.connections.calcium.calcium.beta2.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = -1;
__pyx_L0:;
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* Python wrapper */
static PyObject *__pyx_pw_7splikes_11connections_7calcium_7calcium_5beta1_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_7splikes_11connections_7calcium_7calcium_5beta1_1__get__(PyObject *__pyx_v_self) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
__pyx_r = __pyx_pf_7splikes_11connections_7calcium_7calcium_5beta1___get__(((struct __pyx_obj_7splikes_11connections_7calcium_calcium *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_pf_7splikes_11connections_7calcium_7calcium_5beta1___get__(struct __pyx_obj_7splikes_11connections_7calcium_calcium *__pyx_v_self) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("__get__", 0);
__Pyx_XDECREF(__pyx_r);
__pyx_t_1 = PyFloat_FromDouble(__pyx_v_self->beta1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 58, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_r = __pyx_t_1;
__pyx_t_1 = 0;
goto __pyx_L0;
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_AddTraceback("splikes.connections.calcium.calcium.beta1.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* Python wrapper */
static int __pyx_pw_7splikes_11connections_7calcium_7calcium_5beta1_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_value); /*proto*/
static int __pyx_pw_7splikes_11connections_7calcium_7calcium_5beta1_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_value) {
int __pyx_r;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
__pyx_r = __pyx_pf_7splikes_11connections_7calcium_7calcium_5beta1_2__set__(((struct __pyx_obj_7splikes_11connections_7calcium_calcium *)__pyx_v_self), ((PyObject *)__pyx_v_value));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static int __pyx_pf_7splikes_11connections_7calcium_7calcium_5beta1_2__set__(struct __pyx_obj_7splikes_11connections_7calcium_calcium *__pyx_v_self, PyObject *__pyx_v_value) {
int __pyx_r;
__Pyx_RefNannyDeclarations
double __pyx_t_1;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("__set__", 0);
__pyx_t_1 = __pyx_PyFloat_AsDouble(__pyx_v_value); if (unlikely((__pyx_t_1 == (double)-1) && PyErr_Occurred())) __PYX_ERR(0, 58, __pyx_L1_error)
__pyx_v_self->beta1 = __pyx_t_1;
/* function exit code */
__pyx_r = 0;
goto __pyx_L0;
__pyx_L1_error:;
__Pyx_AddTraceback("splikes.connections.calcium.calcium.beta1.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = -1;
__pyx_L0:;
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* Python wrapper */
static PyObject *__pyx_pw_7splikes_11connections_7calcium_7calcium_6k_plus_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_7splikes_11connections_7calcium_7calcium_6k_plus_1__get__(PyObject *__pyx_v_self) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
__pyx_r = __pyx_pf_7splikes_11connections_7calcium_7calcium_6k_plus___get__(((struct __pyx_obj_7splikes_11connections_7calcium_calcium *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_pf_7splikes_11connections_7calcium_7calcium_6k_plus___get__(struct __pyx_obj_7splikes_11connections_7calcium_calcium *__pyx_v_self) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("__get__", 0);
__Pyx_XDECREF(__pyx_r);
__pyx_t_1 = PyFloat_FromDouble(__pyx_v_self->k_plus); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 58, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_r = __pyx_t_1;
__pyx_t_1 = 0;
goto __pyx_L0;
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_AddTraceback("splikes.connections.calcium.calcium.k_plus.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* Python wrapper */
static int __pyx_pw_7splikes_11connections_7calcium_7calcium_6k_plus_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_value); /*proto*/
static int __pyx_pw_7splikes_11connections_7calcium_7calcium_6k_plus_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_value) {
int __pyx_r;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
__pyx_r = __pyx_pf_7splikes_11connections_7calcium_7calcium_6k_plus_2__set__(((struct __pyx_obj_7splikes_11connections_7calcium_calcium *)__pyx_v_self), ((PyObject *)__pyx_v_value));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static int __pyx_pf_7splikes_11connections_7calcium_7calcium_6k_plus_2__set__(struct __pyx_obj_7splikes_11connections_7calcium_calcium *__pyx_v_self, PyObject *__pyx_v_value) {
int __pyx_r;
__Pyx_RefNannyDeclarations
double __pyx_t_1;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("__set__", 0);
__pyx_t_1 = __pyx_PyFloat_AsDouble(__pyx_v_value); if (unlikely((__pyx_t_1 == (double)-1) && PyErr_Occurred())) __PYX_ERR(0, 58, __pyx_L1_error)
__pyx_v_self->k_plus = __pyx_t_1;
/* function exit code */
__pyx_r = 0;
goto __pyx_L0;
__pyx_L1_error:;
__Pyx_AddTraceback("splikes.connections.calcium.calcium.k_plus.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = -1;
__pyx_L0:;
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* Python wrapper */
static PyObject *__pyx_pw_7splikes_11connections_7calcium_7calcium_8g_nmda_o_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_7splikes_11connections_7calcium_7calcium_8g_nmda_o_1__get__(PyObject *__pyx_v_self) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
__pyx_r = __pyx_pf_7splikes_11connections_7calcium_7calcium_8g_nmda_o___get__(((struct __pyx_obj_7splikes_11connections_7calcium_calcium *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_pf_7splikes_11connections_7calcium_7calcium_8g_nmda_o___get__(struct __pyx_obj_7splikes_11connections_7calcium_calcium *__pyx_v_self) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("__get__", 0);
__Pyx_XDECREF(__pyx_r);
__pyx_t_1 = PyFloat_FromDouble(__pyx_v_self->g_nmda_o); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 58, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_r = __pyx_t_1;
__pyx_t_1 = 0;
goto __pyx_L0;
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_AddTraceback("splikes.connections.calcium.calcium.g_nmda_o.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* Python wrapper */
static int __pyx_pw_7splikes_11connections_7calcium_7calcium_8g_nmda_o_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_value); /*proto*/
static int __pyx_pw_7splikes_11connections_7calcium_7calcium_8g_nmda_o_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_value) {
int __pyx_r;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
__pyx_r = __pyx_pf_7splikes_11connections_7calcium_7calcium_8g_nmda_o_2__set__(((struct __pyx_obj_7splikes_11connections_7calcium_calcium *)__pyx_v_self), ((PyObject *)__pyx_v_value));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static int __pyx_pf_7splikes_11connections_7calcium_7calcium_8g_nmda_o_2__set__(struct __pyx_obj_7splikes_11connections_7calcium_calcium *__pyx_v_self, PyObject *__pyx_v_value) {
int __pyx_r;
__Pyx_RefNannyDeclarations
double __pyx_t_1;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("__set__", 0);
__pyx_t_1 = __pyx_PyFloat_AsDouble(__pyx_v_value); if (unlikely((__pyx_t_1 == (double)-1) && PyErr_Occurred())) __PYX_ERR(0, 58, __pyx_L1_error)
__pyx_v_self->g_nmda_o = __pyx_t_1;
/* function exit code */
__pyx_r = 0;
goto __pyx_L0;
__pyx_L1_error:;
__Pyx_AddTraceback("splikes.connections.calcium.calcium.g_nmda_o.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = -1;
__pyx_L0:;
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* Python wrapper */
static PyObject *__pyx_pw_7splikes_11connections_7calcium_7calcium_18tau_backspike_fast_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_7splikes_11connections_7calcium_7calcium_18tau_backspike_fast_1__get__(PyObject *__pyx_v_self) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
__pyx_r = __pyx_pf_7splikes_11connections_7calcium_7calcium_18tau_backspike_fast___get__(((struct __pyx_obj_7splikes_11connections_7calcium_calcium *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_pf_7splikes_11connections_7calcium_7calcium_18tau_backspike_fast___get__(struct __pyx_obj_7splikes_11connections_7calcium_calcium *__pyx_v_self) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("__get__", 0);
__Pyx_XDECREF(__pyx_r);
__pyx_t_1 = PyFloat_FromDouble(__pyx_v_self->tau_backspike_fast); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 58, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_r = __pyx_t_1;
__pyx_t_1 = 0;
goto __pyx_L0;
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_AddTraceback("splikes.connections.calcium.calcium.tau_backspike_fast.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* Python wrapper */
static int __pyx_pw_7splikes_11connections_7calcium_7calcium_18tau_backspike_fast_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_value); /*proto*/
static int __pyx_pw_7splikes_11connections_7calcium_7calcium_18tau_backspike_fast_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_value) {
int __pyx_r;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
__pyx_r = __pyx_pf_7splikes_11connections_7calcium_7calcium_18tau_backspike_fast_2__set__(((struct __pyx_obj_7splikes_11connections_7calcium_calcium *)__pyx_v_self), ((PyObject *)__pyx_v_value));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static int __pyx_pf_7splikes_11connections_7calcium_7calcium_18tau_backspike_fast_2__set__(struct __pyx_obj_7splikes_11connections_7calcium_calcium *__pyx_v_self, PyObject *__pyx_v_value) {
int __pyx_r;
__Pyx_RefNannyDeclarations
double __pyx_t_1;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("__set__", 0);
__pyx_t_1 = __pyx_PyFloat_AsDouble(__pyx_v_value); if (unlikely((__pyx_t_1 == (double)-1) && PyErr_Occurred())) __PYX_ERR(0, 58, __pyx_L1_error)
__pyx_v_self->tau_backspike_fast = __pyx_t_1;
/* function exit code */
__pyx_r = 0;
goto __pyx_L0;
__pyx_L1_error:;
__Pyx_AddTraceback("splikes.connections.calcium.calcium.tau_backspike_fast.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = -1;
__pyx_L0:;
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* Python wrapper */
static PyObject *__pyx_pw_7splikes_11connections_7calcium_7calcium_18tau_backspike_slow_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_7splikes_11connections_7calcium_7calcium_18tau_backspike_slow_1__get__(PyObject *__pyx_v_self) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
__pyx_r = __pyx_pf_7splikes_11connections_7calcium_7calcium_18tau_backspike_slow___get__(((struct __pyx_obj_7splikes_11connections_7calcium_calcium *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_pf_7splikes_11connections_7calcium_7calcium_18tau_backspike_slow___get__(struct __pyx_obj_7splikes_11connections_7calcium_calcium *__pyx_v_self) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("__get__", 0);
__Pyx_XDECREF(__pyx_r);
__pyx_t_1 = PyFloat_FromDouble(__pyx_v_self->tau_backspike_slow); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 58, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_r = __pyx_t_1;
__pyx_t_1 = 0;
goto __pyx_L0;
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_AddTraceback("splikes.connections.calcium.calcium.tau_backspike_slow.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* Python wrapper */
static int __pyx_pw_7splikes_11connections_7calcium_7calcium_18tau_backspike_slow_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_value); /*proto*/
static int __pyx_pw_7splikes_11connections_7calcium_7calcium_18tau_backspike_slow_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_value) {
int __pyx_r;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
__pyx_r = __pyx_pf_7splikes_11connections_7calcium_7calcium_18tau_backspike_slow_2__set__(((struct __pyx_obj_7splikes_11connections_7calcium_calcium *)__pyx_v_self), ((PyObject *)__pyx_v_value));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static int __pyx_pf_7splikes_11connections_7calcium_7calcium_18tau_backspike_slow_2__set__(struct __pyx_obj_7splikes_11connections_7calcium_calcium *__pyx_v_self, PyObject *__pyx_v_value) {
int __pyx_r;
__Pyx_RefNannyDeclarations
double __pyx_t_1;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("__set__", 0);
__pyx_t_1 = __pyx_PyFloat_AsDouble(__pyx_v_value); if (unlikely((__pyx_t_1 == (double)-1) && PyErr_Occurred())) __PYX_ERR(0, 58, __pyx_L1_error)
__pyx_v_self->tau_backspike_slow = __pyx_t_1;
/* function exit code */
__pyx_r = 0;
goto __pyx_L0;
__pyx_L1_error:;
__Pyx_AddTraceback("splikes.connections.calcium.calcium.tau_backspike_slow.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = -1;
__pyx_L0:;
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* Python wrapper */
static PyObject *__pyx_pw_7splikes_11connections_7calcium_7calcium_10eta_gamma0_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_7splikes_11connections_7calcium_7calcium_10eta_gamma0_1__get__(PyObject *__pyx_v_self) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
__pyx_r = __pyx_pf_7splikes_11connections_7calcium_7calcium_10eta_gamma0___get__(((struct __pyx_obj_7splikes_11connections_7calcium_calcium *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_pf_7splikes_11connections_7calcium_7calcium_10eta_gamma0___get__(struct __pyx_obj_7splikes_11connections_7calcium_calcium *__pyx_v_self) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("__get__", 0);
__Pyx_XDECREF(__pyx_r);
__pyx_t_1 = PyFloat_FromDouble(__pyx_v_self->eta_gamma0); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 58, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_r = __pyx_t_1;
__pyx_t_1 = 0;
goto __pyx_L0;
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_AddTraceback("splikes.connections.calcium.calcium.eta_gamma0.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* Python wrapper */
static int __pyx_pw_7splikes_11connections_7calcium_7calcium_10eta_gamma0_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_value); /*proto*/
static int __pyx_pw_7splikes_11connections_7calcium_7calcium_10eta_gamma0_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_value) {
int __pyx_r;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
__pyx_r = __pyx_pf_7splikes_11connections_7calcium_7calcium_10eta_gamma0_2__set__(((struct __pyx_obj_7splikes_11connections_7calcium_calcium *)__pyx_v_self), ((PyObject *)__pyx_v_value));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static int __pyx_pf_7splikes_11connections_7calcium_7calcium_10eta_gamma0_2__set__(struct __pyx_obj_7splikes_11connections_7calcium_calcium *__pyx_v_self, PyObject *__pyx_v_value) {
int __pyx_r;
__Pyx_RefNannyDeclarations
double __pyx_t_1;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("__set__", 0);
__pyx_t_1 = __pyx_PyFloat_AsDouble(__pyx_v_value); if (unlikely((__pyx_t_1 == (double)-1) && PyErr_Occurred())) __PYX_ERR(0, 58, __pyx_L1_error)
__pyx_v_self->eta_gamma0 = __pyx_t_1;
/* function exit code */
__pyx_r = 0;
goto __pyx_L0;
__pyx_L1_error:;
__Pyx_AddTraceback("splikes.connections.calcium.calcium.eta_gamma0.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = -1;
__pyx_L0:;
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* Python wrapper */
static PyObject *__pyx_pw_7splikes_11connections_7calcium_7calcium_8i_nmda_s_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_7splikes_11connections_7calcium_7calcium_8i_nmda_s_1__get__(PyObject *__pyx_v_self) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
__pyx_r = __pyx_pf_7splikes_11connections_7calcium_7calcium_8i_nmda_s___get__(((struct __pyx_obj_7splikes_11connections_7calcium_calcium *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_pf_7splikes_11connections_7calcium_7calcium_8i_nmda_s___get__(struct __pyx_obj_7splikes_11connections_7calcium_calcium *__pyx_v_self) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("__get__", 0);
__Pyx_XDECREF(__pyx_r);
__pyx_t_1 = PyFloat_FromDouble(__pyx_v_self->i_nmda_s); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 58, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_r = __pyx_t_1;
__pyx_t_1 = 0;
goto __pyx_L0;
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_AddTraceback("splikes.connections.calcium.calcium.i_nmda_s.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* Python wrapper */
static int __pyx_pw_7splikes_11connections_7calcium_7calcium_8i_nmda_s_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_value); /*proto*/
static int __pyx_pw_7splikes_11connections_7calcium_7calcium_8i_nmda_s_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_value) {
int __pyx_r;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
__pyx_r = __pyx_pf_7splikes_11connections_7calcium_7calcium_8i_nmda_s_2__set__(((struct __pyx_obj_7splikes_11connections_7calcium_calcium *)__pyx_v_self), ((PyObject *)__pyx_v_value));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static int __pyx_pf_7splikes_11connections_7calcium_7calcium_8i_nmda_s_2__set__(struct __pyx_obj_7splikes_11connections_7calcium_calcium *__pyx_v_self, PyObject *__pyx_v_value) {
int __pyx_r;
__Pyx_RefNannyDeclarations
double __pyx_t_1;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("__set__", 0);
__pyx_t_1 = __pyx_PyFloat_AsDouble(__pyx_v_value); if (unlikely((__pyx_t_1 == (double)-1) && PyErr_Occurred())) __PYX_ERR(0, 58, __pyx_L1_error)
__pyx_v_self->i_nmda_s = __pyx_t_1;
/* function exit code */
__pyx_r = 0;
goto __pyx_L0;
__pyx_L1_error:;
__Pyx_AddTraceback("splikes.connections.calcium.calcium.i_nmda_s.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = -1;
__pyx_L0:;
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* Python wrapper */
static PyObject *__pyx_pw_7splikes_11connections_7calcium_7calcium_10tau_nmda_s_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_7splikes_11connections_7calcium_7calcium_10tau_nmda_s_1__get__(PyObject *__pyx_v_self) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
__pyx_r = __pyx_pf_7splikes_11connections_7calcium_7calcium_10tau_nmda_s___get__(((struct __pyx_obj_7splikes_11connections_7calcium_calcium *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_pf_7splikes_11connections_7calcium_7calcium_10tau_nmda_s___get__(struct __pyx_obj_7splikes_11connections_7calcium_calcium *__pyx_v_self) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("__get__", 0);
__Pyx_XDECREF(__pyx_r);
__pyx_t_1 = PyFloat_FromDouble(__pyx_v_self->tau_nmda_s); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 58, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_r = __pyx_t_1;
__pyx_t_1 = 0;
goto __pyx_L0;
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_AddTraceback("splikes.connections.calcium.calcium.tau_nmda_s.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* Python wrapper */
static int __pyx_pw_7splikes_11connections_7calcium_7calcium_10tau_nmda_s_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_value); /*proto*/
static int __pyx_pw_7splikes_11connections_7calcium_7calcium_10tau_nmda_s_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_value) {
int __pyx_r;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
__pyx_r = __pyx_pf_7splikes_11connections_7calcium_7calcium_10tau_nmda_s_2__set__(((struct __pyx_obj_7splikes_11connections_7calcium_calcium *)__pyx_v_self), ((PyObject *)__pyx_v_value));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static int __pyx_pf_7splikes_11connections_7calcium_7calcium_10tau_nmda_s_2__set__(struct __pyx_obj_7splikes_11connections_7calcium_calcium *__pyx_v_self, PyObject *__pyx_v_value) {
int __pyx_r;
__Pyx_RefNannyDeclarations
double __pyx_t_1;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("__set__", 0);
__pyx_t_1 = __pyx_PyFloat_AsDouble(__pyx_v_value); if (unlikely((__pyx_t_1 == (double)-1) && PyErr_Occurred())) __PYX_ERR(0, 58, __pyx_L1_error)
__pyx_v_self->tau_nmda_s = __pyx_t_1;
/* function exit code */
__pyx_r = 0;
goto __pyx_L0;
__pyx_L1_error:;
__Pyx_AddTraceback("splikes.connections.calcium.calcium.tau_nmda_s.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = -1;
__pyx_L0:;
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* Python wrapper */
static PyObject *__pyx_pw_7splikes_11connections_7calcium_7calcium_2Vo_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_7splikes_11connections_7calcium_7calcium_2Vo_1__get__(PyObject *__pyx_v_self) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
__pyx_r = __pyx_pf_7splikes_11connections_7calcium_7calcium_2Vo___get__(((struct __pyx_obj_7splikes_11connections_7calcium_calcium *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_pf_7splikes_11connections_7calcium_7calcium_2Vo___get__(struct __pyx_obj_7splikes_11connections_7calcium_calcium *__pyx_v_self) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("__get__", 0);
__Pyx_XDECREF(__pyx_r);
__pyx_t_1 = PyFloat_FromDouble(__pyx_v_self->Vo); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 58, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_r = __pyx_t_1;
__pyx_t_1 = 0;
goto __pyx_L0;
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_AddTraceback("splikes.connections.calcium.calcium.Vo.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* Python wrapper */
static int __pyx_pw_7splikes_11connections_7calcium_7calcium_2Vo_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_value); /*proto*/
static int __pyx_pw_7splikes_11connections_7calcium_7calcium_2Vo_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_value) {
int __pyx_r;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
__pyx_r = __pyx_pf_7splikes_11connections_7calcium_7calcium_2Vo_2__set__(((struct __pyx_obj_7splikes_11connections_7calcium_calcium *)__pyx_v_self), ((PyObject *)__pyx_v_value));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static int __pyx_pf_7splikes_11connections_7calcium_7calcium_2Vo_2__set__(struct __pyx_obj_7splikes_11connections_7calcium_calcium *__pyx_v_self, PyObject *__pyx_v_value) {
int __pyx_r;
__Pyx_RefNannyDeclarations
double __pyx_t_1;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("__set__", 0);
__pyx_t_1 = __pyx_PyFloat_AsDouble(__pyx_v_value); if (unlikely((__pyx_t_1 == (double)-1) && PyErr_Occurred())) __PYX_ERR(0, 58, __pyx_L1_error)
__pyx_v_self->Vo = __pyx_t_1;
/* function exit code */
__pyx_r = 0;
goto __pyx_L0;
__pyx_L1_error:;
__Pyx_AddTraceback("splikes.connections.calcium.calcium.Vo.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = -1;
__pyx_L0:;
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* Python wrapper */
static PyObject *__pyx_pw_7splikes_11connections_7calcium_7calcium_2Vp_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_7splikes_11connections_7calcium_7calcium_2Vp_1__get__(PyObject *__pyx_v_self) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
__pyx_r = __pyx_pf_7splikes_11connections_7calcium_7calcium_2Vp___get__(((struct __pyx_obj_7splikes_11connections_7calcium_calcium *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_pf_7splikes_11connections_7calcium_7calcium_2Vp___get__(struct __pyx_obj_7splikes_11connections_7calcium_calcium *__pyx_v_self) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("__get__", 0);
__Pyx_XDECREF(__pyx_r);
__pyx_t_1 = PyFloat_FromDouble(__pyx_v_self->Vp); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 58, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_r = __pyx_t_1;
__pyx_t_1 = 0;
goto __pyx_L0;
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_AddTraceback("splikes.connections.calcium.calcium.Vp.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* Python wrapper */
static int __pyx_pw_7splikes_11connections_7calcium_7calcium_2Vp_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_value); /*proto*/
static int __pyx_pw_7splikes_11connections_7calcium_7calcium_2Vp_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_value) {
int __pyx_r;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
__pyx_r = __pyx_pf_7splikes_11connections_7calcium_7calcium_2Vp_2__set__(((struct __pyx_obj_7splikes_11connections_7calcium_calcium *)__pyx_v_self), ((PyObject *)__pyx_v_value));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static int __pyx_pf_7splikes_11connections_7calcium_7calcium_2Vp_2__set__(struct __pyx_obj_7splikes_11connections_7calcium_calcium *__pyx_v_self, PyObject *__pyx_v_value) {
int __pyx_r;
__Pyx_RefNannyDeclarations
double __pyx_t_1;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("__set__", 0);
__pyx_t_1 = __pyx_PyFloat_AsDouble(__pyx_v_value); if (unlikely((__pyx_t_1 == (double)-1) && PyErr_Occurred())) __PYX_ERR(0, 58, __pyx_L1_error)
__pyx_v_self->Vp = __pyx_t_1;
/* function exit code */
__pyx_r = 0;
goto __pyx_L0;
__pyx_L1_error:;
__Pyx_AddTraceback("splikes.connections.calcium.calcium.Vp.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = -1;
__pyx_L0:;
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* Python wrapper */
static PyObject *__pyx_pw_7splikes_11connections_7calcium_7calcium_8i_nmda_f_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_7splikes_11connections_7calcium_7calcium_8i_nmda_f_1__get__(PyObject *__pyx_v_self) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
__pyx_r = __pyx_pf_7splikes_11connections_7calcium_7calcium_8i_nmda_f___get__(((struct __pyx_obj_7splikes_11connections_7calcium_calcium *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_pf_7splikes_11connections_7calcium_7calcium_8i_nmda_f___get__(struct __pyx_obj_7splikes_11connections_7calcium_calcium *__pyx_v_self) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("__get__", 0);
__Pyx_XDECREF(__pyx_r);
__pyx_t_1 = PyFloat_FromDouble(__pyx_v_self->i_nmda_f); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 58, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_r = __pyx_t_1;
__pyx_t_1 = 0;
goto __pyx_L0;
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_AddTraceback("splikes.connections.calcium.calcium.i_nmda_f.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* Python wrapper */
static int __pyx_pw_7splikes_11connections_7calcium_7calcium_8i_nmda_f_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_value); /*proto*/
static int __pyx_pw_7splikes_11connections_7calcium_7calcium_8i_nmda_f_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_value) {
int __pyx_r;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
__pyx_r = __pyx_pf_7splikes_11connections_7calcium_7calcium_8i_nmda_f_2__set__(((struct __pyx_obj_7splikes_11connections_7calcium_calcium *)__pyx_v_self), ((PyObject *)__pyx_v_value));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static int __pyx_pf_7splikes_11connections_7calcium_7calcium_8i_nmda_f_2__set__(struct __pyx_obj_7splikes_11connections_7calcium_calcium *__pyx_v_self, PyObject *__pyx_v_value) {
int __pyx_r;
__Pyx_RefNannyDeclarations
double __pyx_t_1;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("__set__", 0);
__pyx_t_1 = __pyx_PyFloat_AsDouble(__pyx_v_value); if (unlikely((__pyx_t_1 == (double)-1) && PyErr_Occurred())) __PYX_ERR(0, 58, __pyx_L1_error)
__pyx_v_self->i_nmda_f = __pyx_t_1;
/* function exit code */
__pyx_r = 0;
goto __pyx_L0;
__pyx_L1_error:;
__Pyx_AddTraceback("splikes.connections.calcium.calcium.i_nmda_f.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = -1;
__pyx_L0:;
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* Python wrapper */
static PyObject *__pyx_pw_7splikes_11connections_7calcium_7calcium_10tau_nmda_f_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_7splikes_11connections_7calcium_7calcium_10tau_nmda_f_1__get__(PyObject *__pyx_v_self) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
__pyx_r = __pyx_pf_7splikes_11connections_7calcium_7calcium_10tau_nmda_f___get__(((struct __pyx_obj_7splikes_11connections_7calcium_calcium *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_pf_7splikes_11connections_7calcium_7calcium_10tau_nmda_f___get__(struct __pyx_obj_7splikes_11connections_7calcium_calcium *__pyx_v_self) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("__get__", 0);
__Pyx_XDECREF(__pyx_r);
__pyx_t_1 = PyFloat_FromDouble(__pyx_v_self->tau_nmda_f); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 58, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_r = __pyx_t_1;
__pyx_t_1 = 0;
goto __pyx_L0;
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_AddTraceback("splikes.connections.calcium.calcium.tau_nmda_f.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* Python wrapper */
static int __pyx_pw_7splikes_11connections_7calcium_7calcium_10tau_nmda_f_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_value); /*proto*/
static int __pyx_pw_7splikes_11connections_7calcium_7calcium_10tau_nmda_f_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_value) {
int __pyx_r;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
__pyx_r = __pyx_pf_7splikes_11connections_7calcium_7calcium_10tau_nmda_f_2__set__(((struct __pyx_obj_7splikes_11connections_7calcium_calcium *)__pyx_v_self), ((PyObject *)__pyx_v_value));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static int __pyx_pf_7splikes_11connections_7calcium_7calcium_10tau_nmda_f_2__set__(struct __pyx_obj_7splikes_11connections_7calcium_calcium *__pyx_v_self, PyObject *__pyx_v_value) {
int __pyx_r;
__Pyx_RefNannyDeclarations
double __pyx_t_1;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("__set__", 0);
__pyx_t_1 = __pyx_PyFloat_AsDouble(__pyx_v_value); if (unlikely((__pyx_t_1 == (double)-1) && PyErr_Occurred())) __PYX_ERR(0, 58, __pyx_L1_error)
__pyx_v_self->tau_nmda_f = __pyx_t_1;
/* function exit code */
__pyx_r = 0;
goto __pyx_L0;
__pyx_L1_error:;
__Pyx_AddTraceback("splikes.connections.calcium.calcium.tau_nmda_f.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = -1;
__pyx_L0:;
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* Python wrapper */
static PyObject *__pyx_pw_7splikes_11connections_7calcium_7calcium_7k_minus_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_7splikes_11connections_7calcium_7calcium_7k_minus_1__get__(PyObject *__pyx_v_self) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
__pyx_r = __pyx_pf_7splikes_11connections_7calcium_7calcium_7k_minus___get__(((struct __pyx_obj_7splikes_11connections_7calcium_calcium *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_pf_7splikes_11connections_7calcium_7calcium_7k_minus___get__(struct __pyx_obj_7splikes_11connections_7calcium_calcium *__pyx_v_self) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("__get__", 0);
__Pyx_XDECREF(__pyx_r);
__pyx_t_1 = PyFloat_FromDouble(__pyx_v_self->k_minus); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 58, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_r = __pyx_t_1;
__pyx_t_1 = 0;
goto __pyx_L0;
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_AddTraceback("splikes.connections.calcium.calcium.k_minus.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* Python wrapper */
static int __pyx_pw_7splikes_11connections_7calcium_7calcium_7k_minus_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_value); /*proto*/
static int __pyx_pw_7splikes_11connections_7calcium_7calcium_7k_minus_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_value) {
int __pyx_r;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
__pyx_r = __pyx_pf_7splikes_11connections_7calcium_7calcium_7k_minus_2__set__(((struct __pyx_obj_7splikes_11connections_7calcium_calcium *)__pyx_v_self), ((PyObject *)__pyx_v_value));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static int __pyx_pf_7splikes_11connections_7calcium_7calcium_7k_minus_2__set__(struct __pyx_obj_7splikes_11connections_7calcium_calcium *__pyx_v_self, PyObject *__pyx_v_value) {
int __pyx_r;
__Pyx_RefNannyDeclarations
double __pyx_t_1;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("__set__", 0);
__pyx_t_1 = __pyx_PyFloat_AsDouble(__pyx_v_value); if (unlikely((__pyx_t_1 == (double)-1) && PyErr_Occurred())) __PYX_ERR(0, 58, __pyx_L1_error)
__pyx_v_self->k_minus = __pyx_t_1;
/* function exit code */
__pyx_r = 0;
goto __pyx_L0;
__pyx_L1_error:;
__Pyx_AddTraceback("splikes.connections.calcium.calcium.k_minus.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = -1;
__pyx_L0:;
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "splikes/connections/calcium.pyx":59
* cdef class calcium(connection):
* cdef public double g_t,mg2,mg1,v_reversal,tau_ca,alpha2,alpha1,backspike_amplitude,i_nmda_mu,peak_backspike_fast,peak_backspike_slow,_lambda,beta2,beta1,k_plus,g_nmda_o,tau_backspike_fast,tau_backspike_slow,eta_gamma0,i_nmda_s,tau_nmda_s,Vo,Vp,i_nmda_f,tau_nmda_f,k_minus
* cdef public np.ndarray B,I_nmda,h,Ca,v_total,eta,g_nmda,v_backspike_fast,I_nmda_fast,I_nmda_slow,omega,v_backspike_slow # <<<<<<<<<<<<<<
* cpdef _reset(self):
* self.B=np.zeros(self.post.N,dtype=np.float)
*/
/* Python wrapper */
static PyObject *__pyx_pw_7splikes_11connections_7calcium_7calcium_1B_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_7splikes_11connections_7calcium_7calcium_1B_1__get__(PyObject *__pyx_v_self) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
__pyx_r = __pyx_pf_7splikes_11connections_7calcium_7calcium_1B___get__(((struct __pyx_obj_7splikes_11connections_7calcium_calcium *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_pf_7splikes_11connections_7calcium_7calcium_1B___get__(struct __pyx_obj_7splikes_11connections_7calcium_calcium *__pyx_v_self) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__get__", 0);
__Pyx_XDECREF(__pyx_r);
__Pyx_INCREF(((PyObject *)__pyx_v_self->B));
__pyx_r = ((PyObject *)__pyx_v_self->B);
goto __pyx_L0;
/* function exit code */
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* Python wrapper */
static int __pyx_pw_7splikes_11connections_7calcium_7calcium_1B_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_value); /*proto*/
static int __pyx_pw_7splikes_11connections_7calcium_7calcium_1B_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_value) {
int __pyx_r;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
__pyx_r = __pyx_pf_7splikes_11connections_7calcium_7calcium_1B_2__set__(((struct __pyx_obj_7splikes_11connections_7calcium_calcium *)__pyx_v_self), ((PyObject *)__pyx_v_value));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static int __pyx_pf_7splikes_11connections_7calcium_7calcium_1B_2__set__(struct __pyx_obj_7splikes_11connections_7calcium_calcium *__pyx_v_self, PyObject *__pyx_v_value) {
int __pyx_r;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("__set__", 0);
if (!(likely(((__pyx_v_value) == Py_None) || likely(__Pyx_TypeTest(__pyx_v_value, __pyx_ptype_5numpy_ndarray))))) __PYX_ERR(0, 59, __pyx_L1_error)
__pyx_t_1 = __pyx_v_value;
__Pyx_INCREF(__pyx_t_1);
__Pyx_GIVEREF(__pyx_t_1);
__Pyx_GOTREF(__pyx_v_self->B);
__Pyx_DECREF(((PyObject *)__pyx_v_self->B));
__pyx_v_self->B = ((PyArrayObject *)__pyx_t_1);
__pyx_t_1 = 0;
/* function exit code */
__pyx_r = 0;
goto __pyx_L0;
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_AddTraceback("splikes.connections.calcium.calcium.B.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = -1;
__pyx_L0:;
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* Python wrapper */
static int __pyx_pw_7splikes_11connections_7calcium_7calcium_1B_5__del__(PyObject *__pyx_v_self); /*proto*/
static int __pyx_pw_7splikes_11connections_7calcium_7calcium_1B_5__del__(PyObject *__pyx_v_self) {
int __pyx_r;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__del__ (wrapper)", 0);
__pyx_r = __pyx_pf_7splikes_11connections_7calcium_7calcium_1B_4__del__(((struct __pyx_obj_7splikes_11connections_7calcium_calcium *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static int __pyx_pf_7splikes_11connections_7calcium_7calcium_1B_4__del__(struct __pyx_obj_7splikes_11connections_7calcium_calcium *__pyx_v_self) {
int __pyx_r;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__del__", 0);
__Pyx_INCREF(Py_None);
__Pyx_GIVEREF(Py_None);
__Pyx_GOTREF(__pyx_v_self->B);
__Pyx_DECREF(((PyObject *)__pyx_v_self->B));
__pyx_v_self->B = ((PyArrayObject *)Py_None);
/* function exit code */
__pyx_r = 0;
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* Python wrapper */
static PyObject *__pyx_pw_7splikes_11connections_7calcium_7calcium_6I_nmda_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_7splikes_11connections_7calcium_7calcium_6I_nmda_1__get__(PyObject *__pyx_v_self) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
__pyx_r = __pyx_pf_7splikes_11connections_7calcium_7calcium_6I_nmda___get__(((struct __pyx_obj_7splikes_11connections_7calcium_calcium *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_pf_7splikes_11connections_7calcium_7calcium_6I_nmda___get__(struct __pyx_obj_7splikes_11connections_7calcium_calcium *__pyx_v_self) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__get__", 0);
__Pyx_XDECREF(__pyx_r);
__Pyx_INCREF(((PyObject *)__pyx_v_self->I_nmda));
__pyx_r = ((PyObject *)__pyx_v_self->I_nmda);
goto __pyx_L0;
/* function exit code */
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* Python wrapper */
static int __pyx_pw_7splikes_11connections_7calcium_7calcium_6I_nmda_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_value); /*proto*/
static int __pyx_pw_7splikes_11connections_7calcium_7calcium_6I_nmda_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_value) {
int __pyx_r;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
__pyx_r = __pyx_pf_7splikes_11connections_7calcium_7calcium_6I_nmda_2__set__(((struct __pyx_obj_7splikes_11connections_7calcium_calcium *)__pyx_v_self), ((PyObject *)__pyx_v_value));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static int __pyx_pf_7splikes_11connections_7calcium_7calcium_6I_nmda_2__set__(struct __pyx_obj_7splikes_11connections_7calcium_calcium *__pyx_v_self, PyObject *__pyx_v_value) {
int __pyx_r;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("__set__", 0);
if (!(likely(((__pyx_v_value) == Py_None) || likely(__Pyx_TypeTest(__pyx_v_value, __pyx_ptype_5numpy_ndarray))))) __PYX_ERR(0, 59, __pyx_L1_error)
__pyx_t_1 = __pyx_v_value;
__Pyx_INCREF(__pyx_t_1);
__Pyx_GIVEREF(__pyx_t_1);
__Pyx_GOTREF(__pyx_v_self->I_nmda);
__Pyx_DECREF(((PyObject *)__pyx_v_self->I_nmda));
__pyx_v_self->I_nmda = ((PyArrayObject *)__pyx_t_1);
__pyx_t_1 = 0;
/* function exit code */
__pyx_r = 0;
goto __pyx_L0;
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_AddTraceback("splikes.connections.calcium.calcium.I_nmda.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = -1;
__pyx_L0:;
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* Python wrapper */
static int __pyx_pw_7splikes_11connections_7calcium_7calcium_6I_nmda_5__del__(PyObject *__pyx_v_self); /*proto*/
static int __pyx_pw_7splikes_11connections_7calcium_7calcium_6I_nmda_5__del__(PyObject *__pyx_v_self) {
int __pyx_r;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__del__ (wrapper)", 0);
__pyx_r = __pyx_pf_7splikes_11connections_7calcium_7calcium_6I_nmda_4__del__(((struct __pyx_obj_7splikes_11connections_7calcium_calcium *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static int __pyx_pf_7splikes_11connections_7calcium_7calcium_6I_nmda_4__del__(struct __pyx_obj_7splikes_11connections_7calcium_calcium *__pyx_v_self) {
int __pyx_r;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__del__", 0);
__Pyx_INCREF(Py_None);
__Pyx_GIVEREF(Py_None);
__Pyx_GOTREF(__pyx_v_self->I_nmda);
__Pyx_DECREF(((PyObject *)__pyx_v_self->I_nmda));
__pyx_v_self->I_nmda = ((PyArrayObject *)Py_None);
/* function exit code */
__pyx_r = 0;
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* Python wrapper */
static PyObject *__pyx_pw_7splikes_11connections_7calcium_7calcium_1h_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_7splikes_11connections_7calcium_7calcium_1h_1__get__(PyObject *__pyx_v_self) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
__pyx_r = __pyx_pf_7splikes_11connections_7calcium_7calcium_1h___get__(((struct __pyx_obj_7splikes_11connections_7calcium_calcium *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_pf_7splikes_11connections_7calcium_7calcium_1h___get__(struct __pyx_obj_7splikes_11connections_7calcium_calcium *__pyx_v_self) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__get__", 0);
__Pyx_XDECREF(__pyx_r);
__Pyx_INCREF(((PyObject *)__pyx_v_self->h));
__pyx_r = ((PyObject *)__pyx_v_self->h);
goto __pyx_L0;
/* function exit code */
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* Python wrapper */
static int __pyx_pw_7splikes_11connections_7calcium_7calcium_1h_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_value); /*proto*/
static int __pyx_pw_7splikes_11connections_7calcium_7calcium_1h_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_value) {
int __pyx_r;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
__pyx_r = __pyx_pf_7splikes_11connections_7calcium_7calcium_1h_2__set__(((struct __pyx_obj_7splikes_11connections_7calcium_calcium *)__pyx_v_self), ((PyObject *)__pyx_v_value));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static int __pyx_pf_7splikes_11connections_7calcium_7calcium_1h_2__set__(struct __pyx_obj_7splikes_11connections_7calcium_calcium *__pyx_v_self, PyObject *__pyx_v_value) {
int __pyx_r;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("__set__", 0);
if (!(likely(((__pyx_v_value) == Py_None) || likely(__Pyx_TypeTest(__pyx_v_value, __pyx_ptype_5numpy_ndarray))))) __PYX_ERR(0, 59, __pyx_L1_error)
__pyx_t_1 = __pyx_v_value;
__Pyx_INCREF(__pyx_t_1);
__Pyx_GIVEREF(__pyx_t_1);
__Pyx_GOTREF(__pyx_v_self->h);
__Pyx_DECREF(((PyObject *)__pyx_v_self->h));
__pyx_v_self->h = ((PyArrayObject *)__pyx_t_1);
__pyx_t_1 = 0;
/* function exit code */
__pyx_r = 0;
goto __pyx_L0;
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_AddTraceback("splikes.connections.calcium.calcium.h.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = -1;
__pyx_L0:;
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* Python wrapper */
static int __pyx_pw_7splikes_11connections_7calcium_7calcium_1h_5__del__(PyObject *__pyx_v_self); /*proto*/
static int __pyx_pw_7splikes_11connections_7calcium_7calcium_1h_5__del__(PyObject *__pyx_v_self) {
int __pyx_r;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__del__ (wrapper)", 0);
__pyx_r = __pyx_pf_7splikes_11connections_7calcium_7calcium_1h_4__del__(((struct __pyx_obj_7splikes_11connections_7calcium_calcium *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static int __pyx_pf_7splikes_11connections_7calcium_7calcium_1h_4__del__(struct __pyx_obj_7splikes_11connections_7calcium_calcium *__pyx_v_self) {
int __pyx_r;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__del__", 0);
__Pyx_INCREF(Py_None);
__Pyx_GIVEREF(Py_None);
__Pyx_GOTREF(__pyx_v_self->h);
__Pyx_DECREF(((PyObject *)__pyx_v_self->h));
__pyx_v_self->h = ((PyArrayObject *)Py_None);
/* function exit code */
__pyx_r = 0;
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* Python wrapper */
static PyObject *__pyx_pw_7splikes_11connections_7calcium_7calcium_2Ca_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_7splikes_11connections_7calcium_7calcium_2Ca_1__get__(PyObject *__pyx_v_self) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
__pyx_r = __pyx_pf_7splikes_11connections_7calcium_7calcium_2Ca___get__(((struct __pyx_obj_7splikes_11connections_7calcium_calcium *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_pf_7splikes_11connections_7calcium_7calcium_2Ca___get__(struct __pyx_obj_7splikes_11connections_7calcium_calcium *__pyx_v_self) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__get__", 0);
__Pyx_XDECREF(__pyx_r);
__Pyx_INCREF(((PyObject *)__pyx_v_self->Ca));
__pyx_r = ((PyObject *)__pyx_v_self->Ca);
goto __pyx_L0;
/* function exit code */
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* Python wrapper */
static int __pyx_pw_7splikes_11connections_7calcium_7calcium_2Ca_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_value); /*proto*/
static int __pyx_pw_7splikes_11connections_7calcium_7calcium_2Ca_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_value) {
int __pyx_r;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
__pyx_r = __pyx_pf_7splikes_11connections_7calcium_7calcium_2Ca_2__set__(((struct __pyx_obj_7splikes_11connections_7calcium_calcium *)__pyx_v_self), ((PyObject *)__pyx_v_value));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static int __pyx_pf_7splikes_11connections_7calcium_7calcium_2Ca_2__set__(struct __pyx_obj_7splikes_11connections_7calcium_calcium *__pyx_v_self, PyObject *__pyx_v_value) {
int __pyx_r;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("__set__", 0);
if (!(likely(((__pyx_v_value) == Py_None) || likely(__Pyx_TypeTest(__pyx_v_value, __pyx_ptype_5numpy_ndarray))))) __PYX_ERR(0, 59, __pyx_L1_error)
__pyx_t_1 = __pyx_v_value;
__Pyx_INCREF(__pyx_t_1);
__Pyx_GIVEREF(__pyx_t_1);
__Pyx_GOTREF(__pyx_v_self->Ca);
__Pyx_DECREF(((PyObject *)__pyx_v_self->Ca));
__pyx_v_self->Ca = ((PyArrayObject *)__pyx_t_1);
__pyx_t_1 = 0;
/* function exit code */
__pyx_r = 0;
goto __pyx_L0;
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_AddTraceback("splikes.connections.calcium.calcium.Ca.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = -1;
__pyx_L0:;
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* Python wrapper */
static int __pyx_pw_7splikes_11connections_7calcium_7calcium_2Ca_5__del__(PyObject *__pyx_v_self); /*proto*/
static int __pyx_pw_7splikes_11connections_7calcium_7calcium_2Ca_5__del__(PyObject *__pyx_v_self) {
int __pyx_r;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__del__ (wrapper)", 0);
__pyx_r = __pyx_pf_7splikes_11connections_7calcium_7calcium_2Ca_4__del__(((struct __pyx_obj_7splikes_11connections_7calcium_calcium *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static int __pyx_pf_7splikes_11connections_7calcium_7calcium_2Ca_4__del__(struct __pyx_obj_7splikes_11connections_7calcium_calcium *__pyx_v_self) {
int __pyx_r;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__del__", 0);
__Pyx_INCREF(Py_None);
__Pyx_GIVEREF(Py_None);
__Pyx_GOTREF(__pyx_v_self->Ca);
__Pyx_DECREF(((PyObject *)__pyx_v_self->Ca));
__pyx_v_self->Ca = ((PyArrayObject *)Py_None);
/* function exit code */
__pyx_r = 0;
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* Python wrapper */
static PyObject *__pyx_pw_7splikes_11connections_7calcium_7calcium_7v_total_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_7splikes_11connections_7calcium_7calcium_7v_total_1__get__(PyObject *__pyx_v_self) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
__pyx_r = __pyx_pf_7splikes_11connections_7calcium_7calcium_7v_total___get__(((struct __pyx_obj_7splikes_11connections_7calcium_calcium *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_pf_7splikes_11connections_7calcium_7calcium_7v_total___get__(struct __pyx_obj_7splikes_11connections_7calcium_calcium *__pyx_v_self) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__get__", 0);
__Pyx_XDECREF(__pyx_r);
__Pyx_INCREF(((PyObject *)__pyx_v_self->v_total));
__pyx_r = ((PyObject *)__pyx_v_self->v_total);
goto __pyx_L0;
/* function exit code */
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* Python wrapper */
static int __pyx_pw_7splikes_11connections_7calcium_7calcium_7v_total_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_value); /*proto*/
static int __pyx_pw_7splikes_11connections_7calcium_7calcium_7v_total_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_value) {
int __pyx_r;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
__pyx_r = __pyx_pf_7splikes_11connections_7calcium_7calcium_7v_total_2__set__(((struct __pyx_obj_7splikes_11connections_7calcium_calcium *)__pyx_v_self), ((PyObject *)__pyx_v_value));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static int __pyx_pf_7splikes_11connections_7calcium_7calcium_7v_total_2__set__(struct __pyx_obj_7splikes_11connections_7calcium_calcium *__pyx_v_self, PyObject *__pyx_v_value) {
int __pyx_r;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("__set__", 0);
if (!(likely(((__pyx_v_value) == Py_None) || likely(__Pyx_TypeTest(__pyx_v_value, __pyx_ptype_5numpy_ndarray))))) __PYX_ERR(0, 59, __pyx_L1_error)
__pyx_t_1 = __pyx_v_value;
__Pyx_INCREF(__pyx_t_1);
__Pyx_GIVEREF(__pyx_t_1);
__Pyx_GOTREF(__pyx_v_self->v_total);
__Pyx_DECREF(((PyObject *)__pyx_v_self->v_total));
__pyx_v_self->v_total = ((PyArrayObject *)__pyx_t_1);
__pyx_t_1 = 0;
/* function exit code */
__pyx_r = 0;
goto __pyx_L0;
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_AddTraceback("splikes.connections.calcium.calcium.v_total.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = -1;
__pyx_L0:;
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* Python wrapper */
static int __pyx_pw_7splikes_11connections_7calcium_7calcium_7v_total_5__del__(PyObject *__pyx_v_self); /*proto*/
static int __pyx_pw_7splikes_11connections_7calcium_7calcium_7v_total_5__del__(PyObject *__pyx_v_self) {
int __pyx_r;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__del__ (wrapper)", 0);
__pyx_r = __pyx_pf_7splikes_11connections_7calcium_7calcium_7v_total_4__del__(((struct __pyx_obj_7splikes_11connections_7calcium_calcium *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static int __pyx_pf_7splikes_11connections_7calcium_7calcium_7v_total_4__del__(struct __pyx_obj_7splikes_11connections_7calcium_calcium *__pyx_v_self) {
int __pyx_r;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__del__", 0);
__Pyx_INCREF(Py_None);
__Pyx_GIVEREF(Py_None);
__Pyx_GOTREF(__pyx_v_self->v_total);
__Pyx_DECREF(((PyObject *)__pyx_v_self->v_total));
__pyx_v_self->v_total = ((PyArrayObject *)Py_None);
/* function exit code */
__pyx_r = 0;
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* Python wrapper */
static PyObject *__pyx_pw_7splikes_11connections_7calcium_7calcium_3eta_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_7splikes_11connections_7calcium_7calcium_3eta_1__get__(PyObject *__pyx_v_self) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
__pyx_r = __pyx_pf_7splikes_11connections_7calcium_7calcium_3eta___get__(((struct __pyx_obj_7splikes_11connections_7calcium_calcium *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_pf_7splikes_11connections_7calcium_7calcium_3eta___get__(struct __pyx_obj_7splikes_11connections_7calcium_calcium *__pyx_v_self) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__get__", 0);
__Pyx_XDECREF(__pyx_r);
__Pyx_INCREF(((PyObject *)__pyx_v_self->eta));
__pyx_r = ((PyObject *)__pyx_v_self->eta);
goto __pyx_L0;
/* function exit code */
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* Python wrapper */
static int __pyx_pw_7splikes_11connections_7calcium_7calcium_3eta_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_value); /*proto*/
static int __pyx_pw_7splikes_11connections_7calcium_7calcium_3eta_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_value) {
int __pyx_r;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
__pyx_r = __pyx_pf_7splikes_11connections_7calcium_7calcium_3eta_2__set__(((struct __pyx_obj_7splikes_11connections_7calcium_calcium *)__pyx_v_self), ((PyObject *)__pyx_v_value));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static int __pyx_pf_7splikes_11connections_7calcium_7calcium_3eta_2__set__(struct __pyx_obj_7splikes_11connections_7calcium_calcium *__pyx_v_self, PyObject *__pyx_v_value) {
int __pyx_r;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("__set__", 0);
if (!(likely(((__pyx_v_value) == Py_None) || likely(__Pyx_TypeTest(__pyx_v_value, __pyx_ptype_5numpy_ndarray))))) __PYX_ERR(0, 59, __pyx_L1_error)
__pyx_t_1 = __pyx_v_value;
__Pyx_INCREF(__pyx_t_1);
__Pyx_GIVEREF(__pyx_t_1);
__Pyx_GOTREF(__pyx_v_self->eta);
__Pyx_DECREF(((PyObject *)__pyx_v_self->eta));
__pyx_v_self->eta = ((PyArrayObject *)__pyx_t_1);
__pyx_t_1 = 0;
/* function exit code */
__pyx_r = 0;
goto __pyx_L0;
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_AddTraceback("splikes.connections.calcium.calcium.eta.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = -1;
__pyx_L0:;
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* Python wrapper */
static int __pyx_pw_7splikes_11connections_7calcium_7calcium_3eta_5__del__(PyObject *__pyx_v_self); /*proto*/
static int __pyx_pw_7splikes_11connections_7calcium_7calcium_3eta_5__del__(PyObject *__pyx_v_self) {
int __pyx_r;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__del__ (wrapper)", 0);
__pyx_r = __pyx_pf_7splikes_11connections_7calcium_7calcium_3eta_4__del__(((struct __pyx_obj_7splikes_11connections_7calcium_calcium *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static int __pyx_pf_7splikes_11connections_7calcium_7calcium_3eta_4__del__(struct __pyx_obj_7splikes_11connections_7calcium_calcium *__pyx_v_self) {
int __pyx_r;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__del__", 0);
__Pyx_INCREF(Py_None);
__Pyx_GIVEREF(Py_None);
__Pyx_GOTREF(__pyx_v_self->eta);
__Pyx_DECREF(((PyObject *)__pyx_v_self->eta));
__pyx_v_self->eta = ((PyArrayObject *)Py_None);
/* function exit code */
__pyx_r = 0;
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* Python wrapper */
static PyObject *__pyx_pw_7splikes_11connections_7calcium_7calcium_6g_nmda_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_7splikes_11connections_7calcium_7calcium_6g_nmda_1__get__(PyObject *__pyx_v_self) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
__pyx_r = __pyx_pf_7splikes_11connections_7calcium_7calcium_6g_nmda___get__(((struct __pyx_obj_7splikes_11connections_7calcium_calcium *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_pf_7splikes_11connections_7calcium_7calcium_6g_nmda___get__(struct __pyx_obj_7splikes_11connections_7calcium_calcium *__pyx_v_self) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__get__", 0);
__Pyx_XDECREF(__pyx_r);
__Pyx_INCREF(((PyObject *)__pyx_v_self->g_nmda));
__pyx_r = ((PyObject *)__pyx_v_self->g_nmda);
goto __pyx_L0;
/* function exit code */
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* Python wrapper */
static int __pyx_pw_7splikes_11connections_7calcium_7calcium_6g_nmda_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_value); /*proto*/
static int __pyx_pw_7splikes_11connections_7calcium_7calcium_6g_nmda_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_value) {
int __pyx_r;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
__pyx_r = __pyx_pf_7splikes_11connections_7calcium_7calcium_6g_nmda_2__set__(((struct __pyx_obj_7splikes_11connections_7calcium_calcium *)__pyx_v_self), ((PyObject *)__pyx_v_value));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static int __pyx_pf_7splikes_11connections_7calcium_7calcium_6g_nmda_2__set__(struct __pyx_obj_7splikes_11connections_7calcium_calcium *__pyx_v_self, PyObject *__pyx_v_value) {
int __pyx_r;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("__set__", 0);
if (!(likely(((__pyx_v_value) == Py_None) || likely(__Pyx_TypeTest(__pyx_v_value, __pyx_ptype_5numpy_ndarray))))) __PYX_ERR(0, 59, __pyx_L1_error)
__pyx_t_1 = __pyx_v_value;
__Pyx_INCREF(__pyx_t_1);
__Pyx_GIVEREF(__pyx_t_1);
__Pyx_GOTREF(__pyx_v_self->g_nmda);
__Pyx_DECREF(((PyObject *)__pyx_v_self->g_nmda));
__pyx_v_self->g_nmda = ((PyArrayObject *)__pyx_t_1);
__pyx_t_1 = 0;
/* function exit code */
__pyx_r = 0;
goto __pyx_L0;
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_AddTraceback("splikes.connections.calcium.calcium.g_nmda.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = -1;
__pyx_L0:;
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* Python wrapper */
static int __pyx_pw_7splikes_11connections_7calcium_7calcium_6g_nmda_5__del__(PyObject *__pyx_v_self); /*proto*/
static int __pyx_pw_7splikes_11connections_7calcium_7calcium_6g_nmda_5__del__(PyObject *__pyx_v_self) {
int __pyx_r;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__del__ (wrapper)", 0);
__pyx_r = __pyx_pf_7splikes_11connections_7calcium_7calcium_6g_nmda_4__del__(((struct __pyx_obj_7splikes_11connections_7calcium_calcium *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static int __pyx_pf_7splikes_11connections_7calcium_7calcium_6g_nmda_4__del__(struct __pyx_obj_7splikes_11connections_7calcium_calcium *__pyx_v_self) {
int __pyx_r;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__del__", 0);
__Pyx_INCREF(Py_None);
__Pyx_GIVEREF(Py_None);
__Pyx_GOTREF(__pyx_v_self->g_nmda);
__Pyx_DECREF(((PyObject *)__pyx_v_self->g_nmda));
__pyx_v_self->g_nmda = ((PyArrayObject *)Py_None);
/* function exit code */
__pyx_r = 0;
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* Python wrapper */
static PyObject *__pyx_pw_7splikes_11connections_7calcium_7calcium_16v_backspike_fast_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_7splikes_11connections_7calcium_7calcium_16v_backspike_fast_1__get__(PyObject *__pyx_v_self) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
__pyx_r = __pyx_pf_7splikes_11connections_7calcium_7calcium_16v_backspike_fast___get__(((struct __pyx_obj_7splikes_11connections_7calcium_calcium *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_pf_7splikes_11connections_7calcium_7calcium_16v_backspike_fast___get__(struct __pyx_obj_7splikes_11connections_7calcium_calcium *__pyx_v_self) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__get__", 0);
__Pyx_XDECREF(__pyx_r);
__Pyx_INCREF(((PyObject *)__pyx_v_self->v_backspike_fast));
__pyx_r = ((PyObject *)__pyx_v_self->v_backspike_fast);
goto __pyx_L0;
/* function exit code */
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* Python wrapper */
static int __pyx_pw_7splikes_11connections_7calcium_7calcium_16v_backspike_fast_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_value); /*proto*/
static int __pyx_pw_7splikes_11connections_7calcium_7calcium_16v_backspike_fast_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_value) {
int __pyx_r;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
__pyx_r = __pyx_pf_7splikes_11connections_7calcium_7calcium_16v_backspike_fast_2__set__(((struct __pyx_obj_7splikes_11connections_7calcium_calcium *)__pyx_v_self), ((PyObject *)__pyx_v_value));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static int __pyx_pf_7splikes_11connections_7calcium_7calcium_16v_backspike_fast_2__set__(struct __pyx_obj_7splikes_11connections_7calcium_calcium *__pyx_v_self, PyObject *__pyx_v_value) {
int __pyx_r;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("__set__", 0);
if (!(likely(((__pyx_v_value) == Py_None) || likely(__Pyx_TypeTest(__pyx_v_value, __pyx_ptype_5numpy_ndarray))))) __PYX_ERR(0, 59, __pyx_L1_error)
__pyx_t_1 = __pyx_v_value;
__Pyx_INCREF(__pyx_t_1);
__Pyx_GIVEREF(__pyx_t_1);
__Pyx_GOTREF(__pyx_v_self->v_backspike_fast);
__Pyx_DECREF(((PyObject *)__pyx_v_self->v_backspike_fast));
__pyx_v_self->v_backspike_fast = ((PyArrayObject *)__pyx_t_1);
__pyx_t_1 = 0;
/* function exit code */
__pyx_r = 0;
goto __pyx_L0;
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_AddTraceback("splikes.connections.calcium.calcium.v_backspike_fast.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = -1;
__pyx_L0:;
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* Python wrapper */
static int __pyx_pw_7splikes_11connections_7calcium_7calcium_16v_backspike_fast_5__del__(PyObject *__pyx_v_self); /*proto*/
static int __pyx_pw_7splikes_11connections_7calcium_7calcium_16v_backspike_fast_5__del__(PyObject *__pyx_v_self) {
int __pyx_r;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__del__ (wrapper)", 0);
__pyx_r = __pyx_pf_7splikes_11connections_7calcium_7calcium_16v_backspike_fast_4__del__(((struct __pyx_obj_7splikes_11connections_7calcium_calcium *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static int __pyx_pf_7splikes_11connections_7calcium_7calcium_16v_backspike_fast_4__del__(struct __pyx_obj_7splikes_11connections_7calcium_calcium *__pyx_v_self) {
int __pyx_r;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__del__", 0);
__Pyx_INCREF(Py_None);
__Pyx_GIVEREF(Py_None);
__Pyx_GOTREF(__pyx_v_self->v_backspike_fast);
__Pyx_DECREF(((PyObject *)__pyx_v_self->v_backspike_fast));
__pyx_v_self->v_backspike_fast = ((PyArrayObject *)Py_None);
/* function exit code */
__pyx_r = 0;
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* Python wrapper */
static PyObject *__pyx_pw_7splikes_11connections_7calcium_7calcium_11I_nmda_fast_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_7splikes_11connections_7calcium_7calcium_11I_nmda_fast_1__get__(PyObject *__pyx_v_self) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
__pyx_r = __pyx_pf_7splikes_11connections_7calcium_7calcium_11I_nmda_fast___get__(((struct __pyx_obj_7splikes_11connections_7calcium_calcium *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_pf_7splikes_11connections_7calcium_7calcium_11I_nmda_fast___get__(struct __pyx_obj_7splikes_11connections_7calcium_calcium *__pyx_v_self) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__get__", 0);
__Pyx_XDECREF(__pyx_r);
__Pyx_INCREF(((PyObject *)__pyx_v_self->I_nmda_fast));
__pyx_r = ((PyObject *)__pyx_v_self->I_nmda_fast);
goto __pyx_L0;
/* function exit code */
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* Python wrapper */
static int __pyx_pw_7splikes_11connections_7calcium_7calcium_11I_nmda_fast_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_value); /*proto*/
static int __pyx_pw_7splikes_11connections_7calcium_7calcium_11I_nmda_fast_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_value) {
int __pyx_r;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
__pyx_r = __pyx_pf_7splikes_11connections_7calcium_7calcium_11I_nmda_fast_2__set__(((struct __pyx_obj_7splikes_11connections_7calcium_calcium *)__pyx_v_self), ((PyObject *)__pyx_v_value));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static int __pyx_pf_7splikes_11connections_7calcium_7calcium_11I_nmda_fast_2__set__(struct __pyx_obj_7splikes_11connections_7calcium_calcium *__pyx_v_self, PyObject *__pyx_v_value) {
int __pyx_r;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("__set__", 0);
if (!(likely(((__pyx_v_value) == Py_None) || likely(__Pyx_TypeTest(__pyx_v_value, __pyx_ptype_5numpy_ndarray))))) __PYX_ERR(0, 59, __pyx_L1_error)
__pyx_t_1 = __pyx_v_value;
__Pyx_INCREF(__pyx_t_1);
__Pyx_GIVEREF(__pyx_t_1);
__Pyx_GOTREF(__pyx_v_self->I_nmda_fast);
__Pyx_DECREF(((PyObject *)__pyx_v_self->I_nmda_fast));
__pyx_v_self->I_nmda_fast = ((PyArrayObject *)__pyx_t_1);
__pyx_t_1 = 0;
/* function exit code */
__pyx_r = 0;
goto __pyx_L0;
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_AddTraceback("splikes.connections.calcium.calcium.I_nmda_fast.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = -1;
__pyx_L0:;
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* Python wrapper */
static int __pyx_pw_7splikes_11connections_7calcium_7calcium_11I_nmda_fast_5__del__(PyObject *__pyx_v_self); /*proto*/
static int __pyx_pw_7splikes_11connections_7calcium_7calcium_11I_nmda_fast_5__del__(PyObject *__pyx_v_self) {
int __pyx_r;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__del__ (wrapper)", 0);
__pyx_r = __pyx_pf_7splikes_11connections_7calcium_7calcium_11I_nmda_fast_4__del__(((struct __pyx_obj_7splikes_11connections_7calcium_calcium *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static int __pyx_pf_7splikes_11connections_7calcium_7calcium_11I_nmda_fast_4__del__(struct __pyx_obj_7splikes_11connections_7calcium_calcium *__pyx_v_self) {
int __pyx_r;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__del__", 0);
__Pyx_INCREF(Py_None);
__Pyx_GIVEREF(Py_None);
__Pyx_GOTREF(__pyx_v_self->I_nmda_fast);
__Pyx_DECREF(((PyObject *)__pyx_v_self->I_nmda_fast));
__pyx_v_self->I_nmda_fast = ((PyArrayObject *)Py_None);
/* function exit code */
__pyx_r = 0;
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* Python wrapper */
static PyObject *__pyx_pw_7splikes_11connections_7calcium_7calcium_11I_nmda_slow_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_7splikes_11connections_7calcium_7calcium_11I_nmda_slow_1__get__(PyObject *__pyx_v_self) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
__pyx_r = __pyx_pf_7splikes_11connections_7calcium_7calcium_11I_nmda_slow___get__(((struct __pyx_obj_7splikes_11connections_7calcium_calcium *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_pf_7splikes_11connections_7calcium_7calcium_11I_nmda_slow___get__(struct __pyx_obj_7splikes_11connections_7calcium_calcium *__pyx_v_self) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__get__", 0);
__Pyx_XDECREF(__pyx_r);
__Pyx_INCREF(((PyObject *)__pyx_v_self->I_nmda_slow));
__pyx_r = ((PyObject *)__pyx_v_self->I_nmda_slow);
goto __pyx_L0;
/* function exit code */
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* Python wrapper */
static int __pyx_pw_7splikes_11connections_7calcium_7calcium_11I_nmda_slow_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_value); /*proto*/
static int __pyx_pw_7splikes_11connections_7calcium_7calcium_11I_nmda_slow_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_value) {
int __pyx_r;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
__pyx_r = __pyx_pf_7splikes_11connections_7calcium_7calcium_11I_nmda_slow_2__set__(((struct __pyx_obj_7splikes_11connections_7calcium_calcium *)__pyx_v_self), ((PyObject *)__pyx_v_value));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static int __pyx_pf_7splikes_11connections_7calcium_7calcium_11I_nmda_slow_2__set__(struct __pyx_obj_7splikes_11connections_7calcium_calcium *__pyx_v_self, PyObject *__pyx_v_value) {
int __pyx_r;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("__set__", 0);
if (!(likely(((__pyx_v_value) == Py_None) || likely(__Pyx_TypeTest(__pyx_v_value, __pyx_ptype_5numpy_ndarray))))) __PYX_ERR(0, 59, __pyx_L1_error)
__pyx_t_1 = __pyx_v_value;
__Pyx_INCREF(__pyx_t_1);
__Pyx_GIVEREF(__pyx_t_1);
__Pyx_GOTREF(__pyx_v_self->I_nmda_slow);
__Pyx_DECREF(((PyObject *)__pyx_v_self->I_nmda_slow));
__pyx_v_self->I_nmda_slow = ((PyArrayObject *)__pyx_t_1);
__pyx_t_1 = 0;
/* function exit code */
__pyx_r = 0;
goto __pyx_L0;
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_AddTraceback("splikes.connections.calcium.calcium.I_nmda_slow.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = -1;
__pyx_L0:;
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* Python wrapper */
static int __pyx_pw_7splikes_11connections_7calcium_7calcium_11I_nmda_slow_5__del__(PyObject *__pyx_v_self); /*proto*/
static int __pyx_pw_7splikes_11connections_7calcium_7calcium_11I_nmda_slow_5__del__(PyObject *__pyx_v_self) {
int __pyx_r;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__del__ (wrapper)", 0);
__pyx_r = __pyx_pf_7splikes_11connections_7calcium_7calcium_11I_nmda_slow_4__del__(((struct __pyx_obj_7splikes_11connections_7calcium_calcium *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static int __pyx_pf_7splikes_11connections_7calcium_7calcium_11I_nmda_slow_4__del__(struct __pyx_obj_7splikes_11connections_7calcium_calcium *__pyx_v_self) {
int __pyx_r;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__del__", 0);
__Pyx_INCREF(Py_None);
__Pyx_GIVEREF(Py_None);
__Pyx_GOTREF(__pyx_v_self->I_nmda_slow);
__Pyx_DECREF(((PyObject *)__pyx_v_self->I_nmda_slow));
__pyx_v_self->I_nmda_slow = ((PyArrayObject *)Py_None);
/* function exit code */
__pyx_r = 0;
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* Python wrapper */
static PyObject *__pyx_pw_7splikes_11connections_7calcium_7calcium_5omega_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_7splikes_11connections_7calcium_7calcium_5omega_1__get__(PyObject *__pyx_v_self) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
__pyx_r = __pyx_pf_7splikes_11connections_7calcium_7calcium_5omega___get__(((struct __pyx_obj_7splikes_11connections_7calcium_calcium *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_pf_7splikes_11connections_7calcium_7calcium_5omega___get__(struct __pyx_obj_7splikes_11connections_7calcium_calcium *__pyx_v_self) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__get__", 0);
__Pyx_XDECREF(__pyx_r);
__Pyx_INCREF(((PyObject *)__pyx_v_self->omega));
__pyx_r = ((PyObject *)__pyx_v_self->omega);
goto __pyx_L0;
/* function exit code */
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* Python wrapper */
static int __pyx_pw_7splikes_11connections_7calcium_7calcium_5omega_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_value); /*proto*/
static int __pyx_pw_7splikes_11connections_7calcium_7calcium_5omega_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_value) {
int __pyx_r;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
__pyx_r = __pyx_pf_7splikes_11connections_7calcium_7calcium_5omega_2__set__(((struct __pyx_obj_7splikes_11connections_7calcium_calcium *)__pyx_v_self), ((PyObject *)__pyx_v_value));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static int __pyx_pf_7splikes_11connections_7calcium_7calcium_5omega_2__set__(struct __pyx_obj_7splikes_11connections_7calcium_calcium *__pyx_v_self, PyObject *__pyx_v_value) {
int __pyx_r;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("__set__", 0);
if (!(likely(((__pyx_v_value) == Py_None) || likely(__Pyx_TypeTest(__pyx_v_value, __pyx_ptype_5numpy_ndarray))))) __PYX_ERR(0, 59, __pyx_L1_error)
__pyx_t_1 = __pyx_v_value;
__Pyx_INCREF(__pyx_t_1);
__Pyx_GIVEREF(__pyx_t_1);
__Pyx_GOTREF(__pyx_v_self->omega);
__Pyx_DECREF(((PyObject *)__pyx_v_self->omega));
__pyx_v_self->omega = ((PyArrayObject *)__pyx_t_1);
__pyx_t_1 = 0;
/* function exit code */
__pyx_r = 0;
goto __pyx_L0;
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_AddTraceback("splikes.connections.calcium.calcium.omega.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = -1;
__pyx_L0:;
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* Python wrapper */
static int __pyx_pw_7splikes_11connections_7calcium_7calcium_5omega_5__del__(PyObject *__pyx_v_self); /*proto*/
static int __pyx_pw_7splikes_11connections_7calcium_7calcium_5omega_5__del__(PyObject *__pyx_v_self) {
int __pyx_r;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__del__ (wrapper)", 0);
__pyx_r = __pyx_pf_7splikes_11connections_7calcium_7calcium_5omega_4__del__(((struct __pyx_obj_7splikes_11connections_7calcium_calcium *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static int __pyx_pf_7splikes_11connections_7calcium_7calcium_5omega_4__del__(struct __pyx_obj_7splikes_11connections_7calcium_calcium *__pyx_v_self) {
int __pyx_r;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__del__", 0);
__Pyx_INCREF(Py_None);
__Pyx_GIVEREF(Py_None);
__Pyx_GOTREF(__pyx_v_self->omega);
__Pyx_DECREF(((PyObject *)__pyx_v_self->omega));
__pyx_v_self->omega = ((PyArrayObject *)Py_None);
/* function exit code */
__pyx_r = 0;
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* Python wrapper */
static PyObject *__pyx_pw_7splikes_11connections_7calcium_7calcium_16v_backspike_slow_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_7splikes_11connections_7calcium_7calcium_16v_backspike_slow_1__get__(PyObject *__pyx_v_self) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
__pyx_r = __pyx_pf_7splikes_11connections_7calcium_7calcium_16v_backspike_slow___get__(((struct __pyx_obj_7splikes_11connections_7calcium_calcium *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_pf_7splikes_11connections_7calcium_7calcium_16v_backspike_slow___get__(struct __pyx_obj_7splikes_11connections_7calcium_calcium *__pyx_v_self) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__get__", 0);
__Pyx_XDECREF(__pyx_r);
__Pyx_INCREF(((PyObject *)__pyx_v_self->v_backspike_slow));
__pyx_r = ((PyObject *)__pyx_v_self->v_backspike_slow);
goto __pyx_L0;
/* function exit code */
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* Python wrapper */
static int __pyx_pw_7splikes_11connections_7calcium_7calcium_16v_backspike_slow_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_value); /*proto*/
static int __pyx_pw_7splikes_11connections_7calcium_7calcium_16v_backspike_slow_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_value) {
int __pyx_r;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
__pyx_r = __pyx_pf_7splikes_11connections_7calcium_7calcium_16v_backspike_slow_2__set__(((struct __pyx_obj_7splikes_11connections_7calcium_calcium *)__pyx_v_self), ((PyObject *)__pyx_v_value));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static int __pyx_pf_7splikes_11connections_7calcium_7calcium_16v_backspike_slow_2__set__(struct __pyx_obj_7splikes_11connections_7calcium_calcium *__pyx_v_self, PyObject *__pyx_v_value) {
int __pyx_r;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("__set__", 0);
if (!(likely(((__pyx_v_value) == Py_None) || likely(__Pyx_TypeTest(__pyx_v_value, __pyx_ptype_5numpy_ndarray))))) __PYX_ERR(0, 59, __pyx_L1_error)
__pyx_t_1 = __pyx_v_value;
__Pyx_INCREF(__pyx_t_1);
__Pyx_GIVEREF(__pyx_t_1);
__Pyx_GOTREF(__pyx_v_self->v_backspike_slow);
__Pyx_DECREF(((PyObject *)__pyx_v_self->v_backspike_slow));
__pyx_v_self->v_backspike_slow = ((PyArrayObject *)__pyx_t_1);
__pyx_t_1 = 0;
/* function exit code */
__pyx_r = 0;
goto __pyx_L0;
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_AddTraceback("splikes.connections.calcium.calcium.v_backspike_slow.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = -1;
__pyx_L0:;
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* Python wrapper */
static int __pyx_pw_7splikes_11connections_7calcium_7calcium_16v_backspike_slow_5__del__(PyObject *__pyx_v_self); /*proto*/
static int __pyx_pw_7splikes_11connections_7calcium_7calcium_16v_backspike_slow_5__del__(PyObject *__pyx_v_self) {
int __pyx_r;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__del__ (wrapper)", 0);
__pyx_r = __pyx_pf_7splikes_11connections_7calcium_7calcium_16v_backspike_slow_4__del__(((struct __pyx_obj_7splikes_11connections_7calcium_calcium *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static int __pyx_pf_7splikes_11connections_7calcium_7calcium_16v_backspike_slow_4__del__(struct __pyx_obj_7splikes_11connections_7calcium_calcium *__pyx_v_self) {
int __pyx_r;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__del__", 0);
__Pyx_INCREF(Py_None);
__Pyx_GIVEREF(Py_None);
__Pyx_GOTREF(__pyx_v_self->v_backspike_slow);
__Pyx_DECREF(((PyObject *)__pyx_v_self->v_backspike_slow));
__pyx_v_self->v_backspike_slow = ((PyArrayObject *)Py_None);
/* function exit code */
__pyx_r = 0;
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "(tree fragment)":1
* def __reduce_cython__(self): # <<<<<<<<<<<<<<
* raise TypeError("self.W cannot be converted to a Python object for pickling")
* def __setstate_cython__(self, __pyx_state):
*/
/* Python wrapper */
static PyObject *__pyx_pw_7splikes_11connections_7calcium_7calcium_7__reduce_cython__(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/
static PyObject *__pyx_pw_7splikes_11connections_7calcium_7calcium_7__reduce_cython__(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__reduce_cython__ (wrapper)", 0);
__pyx_r = __pyx_pf_7splikes_11connections_7calcium_7calcium_6__reduce_cython__(((struct __pyx_obj_7splikes_11connections_7calcium_calcium *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_pf_7splikes_11connections_7calcium_7calcium_6__reduce_cython__(CYTHON_UNUSED struct __pyx_obj_7splikes_11connections_7calcium_calcium *__pyx_v_self) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("__reduce_cython__", 0);
/* "(tree fragment)":2
* def __reduce_cython__(self):
* raise TypeError("self.W cannot be converted to a Python object for pickling") # <<<<<<<<<<<<<<
* def __setstate_cython__(self, __pyx_state):
* raise TypeError("self.W cannot be converted to a Python object for pickling")
*/
__pyx_t_1 = __Pyx_PyObject_Call(__pyx_builtin_TypeError, __pyx_tuple_, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 2, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_Raise(__pyx_t_1, 0, 0, 0);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__PYX_ERR(1, 2, __pyx_L1_error)
/* "(tree fragment)":1
* def __reduce_cython__(self): # <<<<<<<<<<<<<<
* raise TypeError("self.W cannot be converted to a Python object for pickling")
* def __setstate_cython__(self, __pyx_state):
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_AddTraceback("splikes.connections.calcium.calcium.__reduce_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "(tree fragment)":3
* def __reduce_cython__(self):
* raise TypeError("self.W cannot be converted to a Python object for pickling")
* def __setstate_cython__(self, __pyx_state): # <<<<<<<<<<<<<<
* raise TypeError("self.W cannot be converted to a Python object for pickling")
*/
/* Python wrapper */
static PyObject *__pyx_pw_7splikes_11connections_7calcium_7calcium_9__setstate_cython__(PyObject *__pyx_v_self, PyObject *__pyx_v___pyx_state); /*proto*/
static PyObject *__pyx_pw_7splikes_11connections_7calcium_7calcium_9__setstate_cython__(PyObject *__pyx_v_self, PyObject *__pyx_v___pyx_state) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__setstate_cython__ (wrapper)", 0);
__pyx_r = __pyx_pf_7splikes_11connections_7calcium_7calcium_8__setstate_cython__(((struct __pyx_obj_7splikes_11connections_7calcium_calcium *)__pyx_v_self), ((PyObject *)__pyx_v___pyx_state));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_pf_7splikes_11connections_7calcium_7calcium_8__setstate_cython__(CYTHON_UNUSED struct __pyx_obj_7splikes_11connections_7calcium_calcium *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("__setstate_cython__", 0);
/* "(tree fragment)":4
* raise TypeError("self.W cannot be converted to a Python object for pickling")
* def __setstate_cython__(self, __pyx_state):
* raise TypeError("self.W cannot be converted to a Python object for pickling") # <<<<<<<<<<<<<<
*/
__pyx_t_1 = __Pyx_PyObject_Call(__pyx_builtin_TypeError, __pyx_tuple__2, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 4, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_Raise(__pyx_t_1, 0, 0, 0);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__PYX_ERR(1, 4, __pyx_L1_error)
/* "(tree fragment)":3
* def __reduce_cython__(self):
* raise TypeError("self.W cannot be converted to a Python object for pickling")
* def __setstate_cython__(self, __pyx_state): # <<<<<<<<<<<<<<
* raise TypeError("self.W cannot be converted to a Python object for pickling")
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_AddTraceback("splikes.connections.calcium.calcium.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "../../../opt/anaconda3/envs/py3/lib/python3.8/site-packages/numpy/__init__.pxd":735
* ctypedef npy_cdouble complex_t
*
* cdef inline object PyArray_MultiIterNew1(a): # <<<<<<<<<<<<<<
* return PyArray_MultiIterNew(1, <void*>a)
*
*/
static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew1(PyObject *__pyx_v_a) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("PyArray_MultiIterNew1", 0);
/* "../../../opt/anaconda3/envs/py3/lib/python3.8/site-packages/numpy/__init__.pxd":736
*
* cdef inline object PyArray_MultiIterNew1(a):
* return PyArray_MultiIterNew(1, <void*>a) # <<<<<<<<<<<<<<
*
* cdef inline object PyArray_MultiIterNew2(a, b):
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_1 = PyArray_MultiIterNew(1, ((void *)__pyx_v_a)); if (unlikely(!__pyx_t_1)) __PYX_ERR(2, 736, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_r = __pyx_t_1;
__pyx_t_1 = 0;
goto __pyx_L0;
/* "../../../opt/anaconda3/envs/py3/lib/python3.8/site-packages/numpy/__init__.pxd":735
* ctypedef npy_cdouble complex_t
*
* cdef inline object PyArray_MultiIterNew1(a): # <<<<<<<<<<<<<<
* return PyArray_MultiIterNew(1, <void*>a)
*
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_AddTraceback("numpy.PyArray_MultiIterNew1", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = 0;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "../../../opt/anaconda3/envs/py3/lib/python3.8/site-packages/numpy/__init__.pxd":738
* return PyArray_MultiIterNew(1, <void*>a)
*
* cdef inline object PyArray_MultiIterNew2(a, b): # <<<<<<<<<<<<<<
* return PyArray_MultiIterNew(2, <void*>a, <void*>b)
*
*/
static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew2(PyObject *__pyx_v_a, PyObject *__pyx_v_b) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("PyArray_MultiIterNew2", 0);
/* "../../../opt/anaconda3/envs/py3/lib/python3.8/site-packages/numpy/__init__.pxd":739
*
* cdef inline object PyArray_MultiIterNew2(a, b):
* return PyArray_MultiIterNew(2, <void*>a, <void*>b) # <<<<<<<<<<<<<<
*
* cdef inline object PyArray_MultiIterNew3(a, b, c):
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_1 = PyArray_MultiIterNew(2, ((void *)__pyx_v_a), ((void *)__pyx_v_b)); if (unlikely(!__pyx_t_1)) __PYX_ERR(2, 739, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_r = __pyx_t_1;
__pyx_t_1 = 0;
goto __pyx_L0;
/* "../../../opt/anaconda3/envs/py3/lib/python3.8/site-packages/numpy/__init__.pxd":738
* return PyArray_MultiIterNew(1, <void*>a)
*
* cdef inline object PyArray_MultiIterNew2(a, b): # <<<<<<<<<<<<<<
* return PyArray_MultiIterNew(2, <void*>a, <void*>b)
*
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_AddTraceback("numpy.PyArray_MultiIterNew2", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = 0;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "../../../opt/anaconda3/envs/py3/lib/python3.8/site-packages/numpy/__init__.pxd":741
* return PyArray_MultiIterNew(2, <void*>a, <void*>b)
*
* cdef inline object PyArray_MultiIterNew3(a, b, c): # <<<<<<<<<<<<<<
* return PyArray_MultiIterNew(3, <void*>a, <void*>b, <void*> c)
*
*/
static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew3(PyObject *__pyx_v_a, PyObject *__pyx_v_b, PyObject *__pyx_v_c) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("PyArray_MultiIterNew3", 0);
/* "../../../opt/anaconda3/envs/py3/lib/python3.8/site-packages/numpy/__init__.pxd":742
*
* cdef inline object PyArray_MultiIterNew3(a, b, c):
* return PyArray_MultiIterNew(3, <void*>a, <void*>b, <void*> c) # <<<<<<<<<<<<<<
*
* cdef inline object PyArray_MultiIterNew4(a, b, c, d):
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_1 = PyArray_MultiIterNew(3, ((void *)__pyx_v_a), ((void *)__pyx_v_b), ((void *)__pyx_v_c)); if (unlikely(!__pyx_t_1)) __PYX_ERR(2, 742, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_r = __pyx_t_1;
__pyx_t_1 = 0;
goto __pyx_L0;
/* "../../../opt/anaconda3/envs/py3/lib/python3.8/site-packages/numpy/__init__.pxd":741
* return PyArray_MultiIterNew(2, <void*>a, <void*>b)
*
* cdef inline object PyArray_MultiIterNew3(a, b, c): # <<<<<<<<<<<<<<
* return PyArray_MultiIterNew(3, <void*>a, <void*>b, <void*> c)
*
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_AddTraceback("numpy.PyArray_MultiIterNew3", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = 0;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "../../../opt/anaconda3/envs/py3/lib/python3.8/site-packages/numpy/__init__.pxd":744
* return PyArray_MultiIterNew(3, <void*>a, <void*>b, <void*> c)
*
* cdef inline object PyArray_MultiIterNew4(a, b, c, d): # <<<<<<<<<<<<<<
* return PyArray_MultiIterNew(4, <void*>a, <void*>b, <void*>c, <void*> d)
*
*/
static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew4(PyObject *__pyx_v_a, PyObject *__pyx_v_b, PyObject *__pyx_v_c, PyObject *__pyx_v_d) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("PyArray_MultiIterNew4", 0);
/* "../../../opt/anaconda3/envs/py3/lib/python3.8/site-packages/numpy/__init__.pxd":745
*
* cdef inline object PyArray_MultiIterNew4(a, b, c, d):
* return PyArray_MultiIterNew(4, <void*>a, <void*>b, <void*>c, <void*> d) # <<<<<<<<<<<<<<
*
* cdef inline object PyArray_MultiIterNew5(a, b, c, d, e):
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_1 = PyArray_MultiIterNew(4, ((void *)__pyx_v_a), ((void *)__pyx_v_b), ((void *)__pyx_v_c), ((void *)__pyx_v_d)); if (unlikely(!__pyx_t_1)) __PYX_ERR(2, 745, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_r = __pyx_t_1;
__pyx_t_1 = 0;
goto __pyx_L0;
/* "../../../opt/anaconda3/envs/py3/lib/python3.8/site-packages/numpy/__init__.pxd":744
* return PyArray_MultiIterNew(3, <void*>a, <void*>b, <void*> c)
*
* cdef inline object PyArray_MultiIterNew4(a, b, c, d): # <<<<<<<<<<<<<<
* return PyArray_MultiIterNew(4, <void*>a, <void*>b, <void*>c, <void*> d)
*
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_AddTraceback("numpy.PyArray_MultiIterNew4", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = 0;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "../../../opt/anaconda3/envs/py3/lib/python3.8/site-packages/numpy/__init__.pxd":747
* return PyArray_MultiIterNew(4, <void*>a, <void*>b, <void*>c, <void*> d)
*
* cdef inline object PyArray_MultiIterNew5(a, b, c, d, e): # <<<<<<<<<<<<<<
* return PyArray_MultiIterNew(5, <void*>a, <void*>b, <void*>c, <void*> d, <void*> e)
*
*/
static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew5(PyObject *__pyx_v_a, PyObject *__pyx_v_b, PyObject *__pyx_v_c, PyObject *__pyx_v_d, PyObject *__pyx_v_e) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("PyArray_MultiIterNew5", 0);
/* "../../../opt/anaconda3/envs/py3/lib/python3.8/site-packages/numpy/__init__.pxd":748
*
* cdef inline object PyArray_MultiIterNew5(a, b, c, d, e):
* return PyArray_MultiIterNew(5, <void*>a, <void*>b, <void*>c, <void*> d, <void*> e) # <<<<<<<<<<<<<<
*
* cdef inline tuple PyDataType_SHAPE(dtype d):
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_1 = PyArray_MultiIterNew(5, ((void *)__pyx_v_a), ((void *)__pyx_v_b), ((void *)__pyx_v_c), ((void *)__pyx_v_d), ((void *)__pyx_v_e)); if (unlikely(!__pyx_t_1)) __PYX_ERR(2, 748, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_r = __pyx_t_1;
__pyx_t_1 = 0;
goto __pyx_L0;
/* "../../../opt/anaconda3/envs/py3/lib/python3.8/site-packages/numpy/__init__.pxd":747
* return PyArray_MultiIterNew(4, <void*>a, <void*>b, <void*>c, <void*> d)
*
* cdef inline object PyArray_MultiIterNew5(a, b, c, d, e): # <<<<<<<<<<<<<<
* return PyArray_MultiIterNew(5, <void*>a, <void*>b, <void*>c, <void*> d, <void*> e)
*
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_AddTraceback("numpy.PyArray_MultiIterNew5", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = 0;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "../../../opt/anaconda3/envs/py3/lib/python3.8/site-packages/numpy/__init__.pxd":750
* return PyArray_MultiIterNew(5, <void*>a, <void*>b, <void*>c, <void*> d, <void*> e)
*
* cdef inline tuple PyDataType_SHAPE(dtype d): # <<<<<<<<<<<<<<
* if PyDataType_HASSUBARRAY(d):
* return <tuple>d.subarray.shape
*/
static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyDataType_SHAPE(PyArray_Descr *__pyx_v_d) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
int __pyx_t_1;
__Pyx_RefNannySetupContext("PyDataType_SHAPE", 0);
/* "../../../opt/anaconda3/envs/py3/lib/python3.8/site-packages/numpy/__init__.pxd":751
*
* cdef inline tuple PyDataType_SHAPE(dtype d):
* if PyDataType_HASSUBARRAY(d): # <<<<<<<<<<<<<<
* return <tuple>d.subarray.shape
* else:
*/
__pyx_t_1 = (PyDataType_HASSUBARRAY(__pyx_v_d) != 0);
if (__pyx_t_1) {
/* "../../../opt/anaconda3/envs/py3/lib/python3.8/site-packages/numpy/__init__.pxd":752
* cdef inline tuple PyDataType_SHAPE(dtype d):
* if PyDataType_HASSUBARRAY(d):
* return <tuple>d.subarray.shape # <<<<<<<<<<<<<<
* else:
* return ()
*/
__Pyx_XDECREF(__pyx_r);
__Pyx_INCREF(((PyObject*)__pyx_v_d->subarray->shape));
__pyx_r = ((PyObject*)__pyx_v_d->subarray->shape);
goto __pyx_L0;
/* "../../../opt/anaconda3/envs/py3/lib/python3.8/site-packages/numpy/__init__.pxd":751
*
* cdef inline tuple PyDataType_SHAPE(dtype d):
* if PyDataType_HASSUBARRAY(d): # <<<<<<<<<<<<<<
* return <tuple>d.subarray.shape
* else:
*/
}
/* "../../../opt/anaconda3/envs/py3/lib/python3.8/site-packages/numpy/__init__.pxd":754
* return <tuple>d.subarray.shape
* else:
* return () # <<<<<<<<<<<<<<
*
*
*/
/*else*/ {
__Pyx_XDECREF(__pyx_r);
__Pyx_INCREF(__pyx_empty_tuple);
__pyx_r = __pyx_empty_tuple;
goto __pyx_L0;
}
/* "../../../opt/anaconda3/envs/py3/lib/python3.8/site-packages/numpy/__init__.pxd":750
* return PyArray_MultiIterNew(5, <void*>a, <void*>b, <void*>c, <void*> d, <void*> e)
*
* cdef inline tuple PyDataType_SHAPE(dtype d): # <<<<<<<<<<<<<<
* if PyDataType_HASSUBARRAY(d):
* return <tuple>d.subarray.shape
*/
/* function exit code */
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "../../../opt/anaconda3/envs/py3/lib/python3.8/site-packages/numpy/__init__.pxd":931
* int _import_umath() except -1
*
* cdef inline void set_array_base(ndarray arr, object base): # <<<<<<<<<<<<<<
* Py_INCREF(base) # important to do this before stealing the reference below!
* PyArray_SetBaseObject(arr, base)
*/
static CYTHON_INLINE void __pyx_f_5numpy_set_array_base(PyArrayObject *__pyx_v_arr, PyObject *__pyx_v_base) {
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("set_array_base", 0);
/* "../../../opt/anaconda3/envs/py3/lib/python3.8/site-packages/numpy/__init__.pxd":932
*
* cdef inline void set_array_base(ndarray arr, object base):
* Py_INCREF(base) # important to do this before stealing the reference below! # <<<<<<<<<<<<<<
* PyArray_SetBaseObject(arr, base)
*
*/
Py_INCREF(__pyx_v_base);
/* "../../../opt/anaconda3/envs/py3/lib/python3.8/site-packages/numpy/__init__.pxd":933
* cdef inline void set_array_base(ndarray arr, object base):
* Py_INCREF(base) # important to do this before stealing the reference below!
* PyArray_SetBaseObject(arr, base) # <<<<<<<<<<<<<<
*
* cdef inline object get_array_base(ndarray arr):
*/
(void)(PyArray_SetBaseObject(__pyx_v_arr, __pyx_v_base));
/* "../../../opt/anaconda3/envs/py3/lib/python3.8/site-packages/numpy/__init__.pxd":931
* int _import_umath() except -1
*
* cdef inline void set_array_base(ndarray arr, object base): # <<<<<<<<<<<<<<
* Py_INCREF(base) # important to do this before stealing the reference below!
* PyArray_SetBaseObject(arr, base)
*/
/* function exit code */
__Pyx_RefNannyFinishContext();
}
/* "../../../opt/anaconda3/envs/py3/lib/python3.8/site-packages/numpy/__init__.pxd":935
* PyArray_SetBaseObject(arr, base)
*
* cdef inline object get_array_base(ndarray arr): # <<<<<<<<<<<<<<
* base = PyArray_BASE(arr)
* if base is NULL:
*/
static CYTHON_INLINE PyObject *__pyx_f_5numpy_get_array_base(PyArrayObject *__pyx_v_arr) {
PyObject *__pyx_v_base;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
int __pyx_t_1;
__Pyx_RefNannySetupContext("get_array_base", 0);
/* "../../../opt/anaconda3/envs/py3/lib/python3.8/site-packages/numpy/__init__.pxd":936
*
* cdef inline object get_array_base(ndarray arr):
* base = PyArray_BASE(arr) # <<<<<<<<<<<<<<
* if base is NULL:
* return None
*/
__pyx_v_base = PyArray_BASE(__pyx_v_arr);
/* "../../../opt/anaconda3/envs/py3/lib/python3.8/site-packages/numpy/__init__.pxd":937
* cdef inline object get_array_base(ndarray arr):
* base = PyArray_BASE(arr)
* if base is NULL: # <<<<<<<<<<<<<<
* return None
* return <object>base
*/
__pyx_t_1 = ((__pyx_v_base == NULL) != 0);
if (__pyx_t_1) {
/* "../../../opt/anaconda3/envs/py3/lib/python3.8/site-packages/numpy/__init__.pxd":938
* base = PyArray_BASE(arr)
* if base is NULL:
* return None # <<<<<<<<<<<<<<
* return <object>base
*
*/
__Pyx_XDECREF(__pyx_r);
__pyx_r = Py_None; __Pyx_INCREF(Py_None);
goto __pyx_L0;
/* "../../../opt/anaconda3/envs/py3/lib/python3.8/site-packages/numpy/__init__.pxd":937
* cdef inline object get_array_base(ndarray arr):
* base = PyArray_BASE(arr)
* if base is NULL: # <<<<<<<<<<<<<<
* return None
* return <object>base
*/
}
/* "../../../opt/anaconda3/envs/py3/lib/python3.8/site-packages/numpy/__init__.pxd":939
* if base is NULL:
* return None
* return <object>base # <<<<<<<<<<<<<<
*
* # Versions of the import_* functions which are more suitable for
*/
__Pyx_XDECREF(__pyx_r);
__Pyx_INCREF(((PyObject *)__pyx_v_base));
__pyx_r = ((PyObject *)__pyx_v_base);
goto __pyx_L0;
/* "../../../opt/anaconda3/envs/py3/lib/python3.8/site-packages/numpy/__init__.pxd":935
* PyArray_SetBaseObject(arr, base)
*
* cdef inline object get_array_base(ndarray arr): # <<<<<<<<<<<<<<
* base = PyArray_BASE(arr)
* if base is NULL:
*/
/* function exit code */
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "../../../opt/anaconda3/envs/py3/lib/python3.8/site-packages/numpy/__init__.pxd":943
* # Versions of the import_* functions which are more suitable for
* # Cython code.
* cdef inline int import_array() except -1: # <<<<<<<<<<<<<<
* try:
* __pyx_import_array()
*/
static CYTHON_INLINE int __pyx_f_5numpy_import_array(void) {
int __pyx_r;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
PyObject *__pyx_t_2 = NULL;
PyObject *__pyx_t_3 = NULL;
int __pyx_t_4;
PyObject *__pyx_t_5 = NULL;
PyObject *__pyx_t_6 = NULL;
PyObject *__pyx_t_7 = NULL;
PyObject *__pyx_t_8 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("import_array", 0);
/* "../../../opt/anaconda3/envs/py3/lib/python3.8/site-packages/numpy/__init__.pxd":944
* # Cython code.
* cdef inline int import_array() except -1:
* try: # <<<<<<<<<<<<<<
* __pyx_import_array()
* except Exception:
*/
{
__Pyx_PyThreadState_declare
__Pyx_PyThreadState_assign
__Pyx_ExceptionSave(&__pyx_t_1, &__pyx_t_2, &__pyx_t_3);
__Pyx_XGOTREF(__pyx_t_1);
__Pyx_XGOTREF(__pyx_t_2);
__Pyx_XGOTREF(__pyx_t_3);
/*try:*/ {
/* "../../../opt/anaconda3/envs/py3/lib/python3.8/site-packages/numpy/__init__.pxd":945
* cdef inline int import_array() except -1:
* try:
* __pyx_import_array() # <<<<<<<<<<<<<<
* except Exception:
* raise ImportError("numpy.core.multiarray failed to import")
*/
__pyx_t_4 = _import_array(); if (unlikely(__pyx_t_4 == ((int)-1))) __PYX_ERR(2, 945, __pyx_L3_error)
/* "../../../opt/anaconda3/envs/py3/lib/python3.8/site-packages/numpy/__init__.pxd":944
* # Cython code.
* cdef inline int import_array() except -1:
* try: # <<<<<<<<<<<<<<
* __pyx_import_array()
* except Exception:
*/
}
__Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0;
__Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
__Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
goto __pyx_L8_try_end;
__pyx_L3_error:;
/* "../../../opt/anaconda3/envs/py3/lib/python3.8/site-packages/numpy/__init__.pxd":946
* try:
* __pyx_import_array()
* except Exception: # <<<<<<<<<<<<<<
* raise ImportError("numpy.core.multiarray failed to import")
*
*/
__pyx_t_4 = __Pyx_PyErr_ExceptionMatches(((PyObject *)(&((PyTypeObject*)PyExc_Exception)[0])));
if (__pyx_t_4) {
__Pyx_AddTraceback("numpy.import_array", __pyx_clineno, __pyx_lineno, __pyx_filename);
if (__Pyx_GetException(&__pyx_t_5, &__pyx_t_6, &__pyx_t_7) < 0) __PYX_ERR(2, 946, __pyx_L5_except_error)
__Pyx_GOTREF(__pyx_t_5);
__Pyx_GOTREF(__pyx_t_6);
__Pyx_GOTREF(__pyx_t_7);
/* "../../../opt/anaconda3/envs/py3/lib/python3.8/site-packages/numpy/__init__.pxd":947
* __pyx_import_array()
* except Exception:
* raise ImportError("numpy.core.multiarray failed to import") # <<<<<<<<<<<<<<
*
* cdef inline int import_umath() except -1:
*/
__pyx_t_8 = __Pyx_PyObject_Call(__pyx_builtin_ImportError, __pyx_tuple__3, NULL); if (unlikely(!__pyx_t_8)) __PYX_ERR(2, 947, __pyx_L5_except_error)
__Pyx_GOTREF(__pyx_t_8);
__Pyx_Raise(__pyx_t_8, 0, 0, 0);
__Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0;
__PYX_ERR(2, 947, __pyx_L5_except_error)
}
goto __pyx_L5_except_error;
__pyx_L5_except_error:;
/* "../../../opt/anaconda3/envs/py3/lib/python3.8/site-packages/numpy/__init__.pxd":944
* # Cython code.
* cdef inline int import_array() except -1:
* try: # <<<<<<<<<<<<<<
* __pyx_import_array()
* except Exception:
*/
__Pyx_XGIVEREF(__pyx_t_1);
__Pyx_XGIVEREF(__pyx_t_2);
__Pyx_XGIVEREF(__pyx_t_3);
__Pyx_ExceptionReset(__pyx_t_1, __pyx_t_2, __pyx_t_3);
goto __pyx_L1_error;
__pyx_L8_try_end:;
}
/* "../../../opt/anaconda3/envs/py3/lib/python3.8/site-packages/numpy/__init__.pxd":943
* # Versions of the import_* functions which are more suitable for
* # Cython code.
* cdef inline int import_array() except -1: # <<<<<<<<<<<<<<
* try:
* __pyx_import_array()
*/
/* function exit code */
__pyx_r = 0;
goto __pyx_L0;
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_5);
__Pyx_XDECREF(__pyx_t_6);
__Pyx_XDECREF(__pyx_t_7);
__Pyx_XDECREF(__pyx_t_8);
__Pyx_AddTraceback("numpy.import_array", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = -1;
__pyx_L0:;
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "../../../opt/anaconda3/envs/py3/lib/python3.8/site-packages/numpy/__init__.pxd":949
* raise ImportError("numpy.core.multiarray failed to import")
*
* cdef inline int import_umath() except -1: # <<<<<<<<<<<<<<
* try:
* _import_umath()
*/
static CYTHON_INLINE int __pyx_f_5numpy_import_umath(void) {
int __pyx_r;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
PyObject *__pyx_t_2 = NULL;
PyObject *__pyx_t_3 = NULL;
int __pyx_t_4;
PyObject *__pyx_t_5 = NULL;
PyObject *__pyx_t_6 = NULL;
PyObject *__pyx_t_7 = NULL;
PyObject *__pyx_t_8 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("import_umath", 0);
/* "../../../opt/anaconda3/envs/py3/lib/python3.8/site-packages/numpy/__init__.pxd":950
*
* cdef inline int import_umath() except -1:
* try: # <<<<<<<<<<<<<<
* _import_umath()
* except Exception:
*/
{
__Pyx_PyThreadState_declare
__Pyx_PyThreadState_assign
__Pyx_ExceptionSave(&__pyx_t_1, &__pyx_t_2, &__pyx_t_3);
__Pyx_XGOTREF(__pyx_t_1);
__Pyx_XGOTREF(__pyx_t_2);
__Pyx_XGOTREF(__pyx_t_3);
/*try:*/ {
/* "../../../opt/anaconda3/envs/py3/lib/python3.8/site-packages/numpy/__init__.pxd":951
* cdef inline int import_umath() except -1:
* try:
* _import_umath() # <<<<<<<<<<<<<<
* except Exception:
* raise ImportError("numpy.core.umath failed to import")
*/
__pyx_t_4 = _import_umath(); if (unlikely(__pyx_t_4 == ((int)-1))) __PYX_ERR(2, 951, __pyx_L3_error)
/* "../../../opt/anaconda3/envs/py3/lib/python3.8/site-packages/numpy/__init__.pxd":950
*
* cdef inline int import_umath() except -1:
* try: # <<<<<<<<<<<<<<
* _import_umath()
* except Exception:
*/
}
__Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0;
__Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
__Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
goto __pyx_L8_try_end;
__pyx_L3_error:;
/* "../../../opt/anaconda3/envs/py3/lib/python3.8/site-packages/numpy/__init__.pxd":952
* try:
* _import_umath()
* except Exception: # <<<<<<<<<<<<<<
* raise ImportError("numpy.core.umath failed to import")
*
*/
__pyx_t_4 = __Pyx_PyErr_ExceptionMatches(((PyObject *)(&((PyTypeObject*)PyExc_Exception)[0])));
if (__pyx_t_4) {
__Pyx_AddTraceback("numpy.import_umath", __pyx_clineno, __pyx_lineno, __pyx_filename);
if (__Pyx_GetException(&__pyx_t_5, &__pyx_t_6, &__pyx_t_7) < 0) __PYX_ERR(2, 952, __pyx_L5_except_error)
__Pyx_GOTREF(__pyx_t_5);
__Pyx_GOTREF(__pyx_t_6);
__Pyx_GOTREF(__pyx_t_7);
/* "../../../opt/anaconda3/envs/py3/lib/python3.8/site-packages/numpy/__init__.pxd":953
* _import_umath()
* except Exception:
* raise ImportError("numpy.core.umath failed to import") # <<<<<<<<<<<<<<
*
* cdef inline int import_ufunc() except -1:
*/
__pyx_t_8 = __Pyx_PyObject_Call(__pyx_builtin_ImportError, __pyx_tuple__4, NULL); if (unlikely(!__pyx_t_8)) __PYX_ERR(2, 953, __pyx_L5_except_error)
__Pyx_GOTREF(__pyx_t_8);
__Pyx_Raise(__pyx_t_8, 0, 0, 0);
__Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0;
__PYX_ERR(2, 953, __pyx_L5_except_error)
}
goto __pyx_L5_except_error;
__pyx_L5_except_error:;
/* "../../../opt/anaconda3/envs/py3/lib/python3.8/site-packages/numpy/__init__.pxd":950
*
* cdef inline int import_umath() except -1:
* try: # <<<<<<<<<<<<<<
* _import_umath()
* except Exception:
*/
__Pyx_XGIVEREF(__pyx_t_1);
__Pyx_XGIVEREF(__pyx_t_2);
__Pyx_XGIVEREF(__pyx_t_3);
__Pyx_ExceptionReset(__pyx_t_1, __pyx_t_2, __pyx_t_3);
goto __pyx_L1_error;
__pyx_L8_try_end:;
}
/* "../../../opt/anaconda3/envs/py3/lib/python3.8/site-packages/numpy/__init__.pxd":949
* raise ImportError("numpy.core.multiarray failed to import")
*
* cdef inline int import_umath() except -1: # <<<<<<<<<<<<<<
* try:
* _import_umath()
*/
/* function exit code */
__pyx_r = 0;
goto __pyx_L0;
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_5);
__Pyx_XDECREF(__pyx_t_6);
__Pyx_XDECREF(__pyx_t_7);
__Pyx_XDECREF(__pyx_t_8);
__Pyx_AddTraceback("numpy.import_umath", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = -1;
__pyx_L0:;
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "../../../opt/anaconda3/envs/py3/lib/python3.8/site-packages/numpy/__init__.pxd":955
* raise ImportError("numpy.core.umath failed to import")
*
* cdef inline int import_ufunc() except -1: # <<<<<<<<<<<<<<
* try:
* _import_umath()
*/
static CYTHON_INLINE int __pyx_f_5numpy_import_ufunc(void) {
int __pyx_r;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
PyObject *__pyx_t_2 = NULL;
PyObject *__pyx_t_3 = NULL;
int __pyx_t_4;
PyObject *__pyx_t_5 = NULL;
PyObject *__pyx_t_6 = NULL;
PyObject *__pyx_t_7 = NULL;
PyObject *__pyx_t_8 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("import_ufunc", 0);
/* "../../../opt/anaconda3/envs/py3/lib/python3.8/site-packages/numpy/__init__.pxd":956
*
* cdef inline int import_ufunc() except -1:
* try: # <<<<<<<<<<<<<<
* _import_umath()
* except Exception:
*/
{
__Pyx_PyThreadState_declare
__Pyx_PyThreadState_assign
__Pyx_ExceptionSave(&__pyx_t_1, &__pyx_t_2, &__pyx_t_3);
__Pyx_XGOTREF(__pyx_t_1);
__Pyx_XGOTREF(__pyx_t_2);
__Pyx_XGOTREF(__pyx_t_3);
/*try:*/ {
/* "../../../opt/anaconda3/envs/py3/lib/python3.8/site-packages/numpy/__init__.pxd":957
* cdef inline int import_ufunc() except -1:
* try:
* _import_umath() # <<<<<<<<<<<<<<
* except Exception:
* raise ImportError("numpy.core.umath failed to import")
*/
__pyx_t_4 = _import_umath(); if (unlikely(__pyx_t_4 == ((int)-1))) __PYX_ERR(2, 957, __pyx_L3_error)
/* "../../../opt/anaconda3/envs/py3/lib/python3.8/site-packages/numpy/__init__.pxd":956
*
* cdef inline int import_ufunc() except -1:
* try: # <<<<<<<<<<<<<<
* _import_umath()
* except Exception:
*/
}
__Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0;
__Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
__Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
goto __pyx_L8_try_end;
__pyx_L3_error:;
/* "../../../opt/anaconda3/envs/py3/lib/python3.8/site-packages/numpy/__init__.pxd":958
* try:
* _import_umath()
* except Exception: # <<<<<<<<<<<<<<
* raise ImportError("numpy.core.umath failed to import")
*
*/
__pyx_t_4 = __Pyx_PyErr_ExceptionMatches(((PyObject *)(&((PyTypeObject*)PyExc_Exception)[0])));
if (__pyx_t_4) {
__Pyx_AddTraceback("numpy.import_ufunc", __pyx_clineno, __pyx_lineno, __pyx_filename);
if (__Pyx_GetException(&__pyx_t_5, &__pyx_t_6, &__pyx_t_7) < 0) __PYX_ERR(2, 958, __pyx_L5_except_error)
__Pyx_GOTREF(__pyx_t_5);
__Pyx_GOTREF(__pyx_t_6);
__Pyx_GOTREF(__pyx_t_7);
/* "../../../opt/anaconda3/envs/py3/lib/python3.8/site-packages/numpy/__init__.pxd":959
* _import_umath()
* except Exception:
* raise ImportError("numpy.core.umath failed to import") # <<<<<<<<<<<<<<
*
* cdef extern from *:
*/
__pyx_t_8 = __Pyx_PyObject_Call(__pyx_builtin_ImportError, __pyx_tuple__4, NULL); if (unlikely(!__pyx_t_8)) __PYX_ERR(2, 959, __pyx_L5_except_error)
__Pyx_GOTREF(__pyx_t_8);
__Pyx_Raise(__pyx_t_8, 0, 0, 0);
__Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0;
__PYX_ERR(2, 959, __pyx_L5_except_error)
}
goto __pyx_L5_except_error;
__pyx_L5_except_error:;
/* "../../../opt/anaconda3/envs/py3/lib/python3.8/site-packages/numpy/__init__.pxd":956
*
* cdef inline int import_ufunc() except -1:
* try: # <<<<<<<<<<<<<<
* _import_umath()
* except Exception:
*/
__Pyx_XGIVEREF(__pyx_t_1);
__Pyx_XGIVEREF(__pyx_t_2);
__Pyx_XGIVEREF(__pyx_t_3);
__Pyx_ExceptionReset(__pyx_t_1, __pyx_t_2, __pyx_t_3);
goto __pyx_L1_error;
__pyx_L8_try_end:;
}
/* "../../../opt/anaconda3/envs/py3/lib/python3.8/site-packages/numpy/__init__.pxd":955
* raise ImportError("numpy.core.umath failed to import")
*
* cdef inline int import_ufunc() except -1: # <<<<<<<<<<<<<<
* try:
* _import_umath()
*/
/* function exit code */
__pyx_r = 0;
goto __pyx_L0;
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_5);
__Pyx_XDECREF(__pyx_t_6);
__Pyx_XDECREF(__pyx_t_7);
__Pyx_XDECREF(__pyx_t_8);
__Pyx_AddTraceback("numpy.import_ufunc", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = -1;
__pyx_L0:;
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "../../../opt/anaconda3/envs/py3/lib/python3.8/site-packages/numpy/__init__.pxd":969
*
*
* cdef inline bint is_timedelta64_object(object obj): # <<<<<<<<<<<<<<
* """
* Cython equivalent of `isinstance(obj, np.timedelta64)`
*/
static CYTHON_INLINE int __pyx_f_5numpy_is_timedelta64_object(PyObject *__pyx_v_obj) {
int __pyx_r;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("is_timedelta64_object", 0);
/* "../../../opt/anaconda3/envs/py3/lib/python3.8/site-packages/numpy/__init__.pxd":981
* bool
* """
* return PyObject_TypeCheck(obj, &PyTimedeltaArrType_Type) # <<<<<<<<<<<<<<
*
*
*/
__pyx_r = PyObject_TypeCheck(__pyx_v_obj, (&PyTimedeltaArrType_Type));
goto __pyx_L0;
/* "../../../opt/anaconda3/envs/py3/lib/python3.8/site-packages/numpy/__init__.pxd":969
*
*
* cdef inline bint is_timedelta64_object(object obj): # <<<<<<<<<<<<<<
* """
* Cython equivalent of `isinstance(obj, np.timedelta64)`
*/
/* function exit code */
__pyx_L0:;
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "../../../opt/anaconda3/envs/py3/lib/python3.8/site-packages/numpy/__init__.pxd":984
*
*
* cdef inline bint is_datetime64_object(object obj): # <<<<<<<<<<<<<<
* """
* Cython equivalent of `isinstance(obj, np.datetime64)`
*/
static CYTHON_INLINE int __pyx_f_5numpy_is_datetime64_object(PyObject *__pyx_v_obj) {
int __pyx_r;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("is_datetime64_object", 0);
/* "../../../opt/anaconda3/envs/py3/lib/python3.8/site-packages/numpy/__init__.pxd":996
* bool
* """
* return PyObject_TypeCheck(obj, &PyDatetimeArrType_Type) # <<<<<<<<<<<<<<
*
*
*/
__pyx_r = PyObject_TypeCheck(__pyx_v_obj, (&PyDatetimeArrType_Type));
goto __pyx_L0;
/* "../../../opt/anaconda3/envs/py3/lib/python3.8/site-packages/numpy/__init__.pxd":984
*
*
* cdef inline bint is_datetime64_object(object obj): # <<<<<<<<<<<<<<
* """
* Cython equivalent of `isinstance(obj, np.datetime64)`
*/
/* function exit code */
__pyx_L0:;
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "../../../opt/anaconda3/envs/py3/lib/python3.8/site-packages/numpy/__init__.pxd":999
*
*
* cdef inline npy_datetime get_datetime64_value(object obj) nogil: # <<<<<<<<<<<<<<
* """
* returns the int64 value underlying scalar numpy datetime64 object
*/
static CYTHON_INLINE npy_datetime __pyx_f_5numpy_get_datetime64_value(PyObject *__pyx_v_obj) {
npy_datetime __pyx_r;
/* "../../../opt/anaconda3/envs/py3/lib/python3.8/site-packages/numpy/__init__.pxd":1006
* also needed. That can be found using `get_datetime64_unit`.
* """
* return (<PyDatetimeScalarObject*>obj).obval # <<<<<<<<<<<<<<
*
*
*/
__pyx_r = ((PyDatetimeScalarObject *)__pyx_v_obj)->obval;
goto __pyx_L0;
/* "../../../opt/anaconda3/envs/py3/lib/python3.8/site-packages/numpy/__init__.pxd":999
*
*
* cdef inline npy_datetime get_datetime64_value(object obj) nogil: # <<<<<<<<<<<<<<
* """
* returns the int64 value underlying scalar numpy datetime64 object
*/
/* function exit code */
__pyx_L0:;
return __pyx_r;
}
/* "../../../opt/anaconda3/envs/py3/lib/python3.8/site-packages/numpy/__init__.pxd":1009
*
*
* cdef inline npy_timedelta get_timedelta64_value(object obj) nogil: # <<<<<<<<<<<<<<
* """
* returns the int64 value underlying scalar numpy timedelta64 object
*/
static CYTHON_INLINE npy_timedelta __pyx_f_5numpy_get_timedelta64_value(PyObject *__pyx_v_obj) {
npy_timedelta __pyx_r;
/* "../../../opt/anaconda3/envs/py3/lib/python3.8/site-packages/numpy/__init__.pxd":1013
* returns the int64 value underlying scalar numpy timedelta64 object
* """
* return (<PyTimedeltaScalarObject*>obj).obval # <<<<<<<<<<<<<<
*
*
*/
__pyx_r = ((PyTimedeltaScalarObject *)__pyx_v_obj)->obval;
goto __pyx_L0;
/* "../../../opt/anaconda3/envs/py3/lib/python3.8/site-packages/numpy/__init__.pxd":1009
*
*
* cdef inline npy_timedelta get_timedelta64_value(object obj) nogil: # <<<<<<<<<<<<<<
* """
* returns the int64 value underlying scalar numpy timedelta64 object
*/
/* function exit code */
__pyx_L0:;
return __pyx_r;
}
/* "../../../opt/anaconda3/envs/py3/lib/python3.8/site-packages/numpy/__init__.pxd":1016
*
*
* cdef inline NPY_DATETIMEUNIT get_datetime64_unit(object obj) nogil: # <<<<<<<<<<<<<<
* """
* returns the unit part of the dtype for a numpy datetime64 object.
*/
static CYTHON_INLINE NPY_DATETIMEUNIT __pyx_f_5numpy_get_datetime64_unit(PyObject *__pyx_v_obj) {
NPY_DATETIMEUNIT __pyx_r;
/* "../../../opt/anaconda3/envs/py3/lib/python3.8/site-packages/numpy/__init__.pxd":1020
* returns the unit part of the dtype for a numpy datetime64 object.
* """
* return <NPY_DATETIMEUNIT>(<PyDatetimeScalarObject*>obj).obmeta.base # <<<<<<<<<<<<<<
*/
__pyx_r = ((NPY_DATETIMEUNIT)((PyDatetimeScalarObject *)__pyx_v_obj)->obmeta.base);
goto __pyx_L0;
/* "../../../opt/anaconda3/envs/py3/lib/python3.8/site-packages/numpy/__init__.pxd":1016
*
*
* cdef inline NPY_DATETIMEUNIT get_datetime64_unit(object obj) nogil: # <<<<<<<<<<<<<<
* """
* returns the unit part of the dtype for a numpy datetime64 object.
*/
/* function exit code */
__pyx_L0:;
return __pyx_r;
}
static struct __pyx_vtabstruct_7splikes_11connections_7calcium_calcium __pyx_vtable_7splikes_11connections_7calcium_calcium;
static PyObject *__pyx_tp_new_7splikes_11connections_7calcium_calcium(PyTypeObject *t, PyObject *a, PyObject *k) {
struct __pyx_obj_7splikes_11connections_7calcium_calcium *p;
PyObject *o = __pyx_ptype_7splikes_7splikes_connection->tp_new(t, a, k);
if (unlikely(!o)) return 0;
p = ((struct __pyx_obj_7splikes_11connections_7calcium_calcium *)o);
p->__pyx_base.__pyx_vtab = (struct __pyx_vtabstruct_7splikes_7splikes_connection*)__pyx_vtabptr_7splikes_11connections_7calcium_calcium;
p->B = ((PyArrayObject *)Py_None); Py_INCREF(Py_None);
p->I_nmda = ((PyArrayObject *)Py_None); Py_INCREF(Py_None);
p->h = ((PyArrayObject *)Py_None); Py_INCREF(Py_None);
p->Ca = ((PyArrayObject *)Py_None); Py_INCREF(Py_None);
p->v_total = ((PyArrayObject *)Py_None); Py_INCREF(Py_None);
p->eta = ((PyArrayObject *)Py_None); Py_INCREF(Py_None);
p->g_nmda = ((PyArrayObject *)Py_None); Py_INCREF(Py_None);
p->v_backspike_fast = ((PyArrayObject *)Py_None); Py_INCREF(Py_None);
p->I_nmda_fast = ((PyArrayObject *)Py_None); Py_INCREF(Py_None);
p->I_nmda_slow = ((PyArrayObject *)Py_None); Py_INCREF(Py_None);
p->omega = ((PyArrayObject *)Py_None); Py_INCREF(Py_None);
p->v_backspike_slow = ((PyArrayObject *)Py_None); Py_INCREF(Py_None);
return o;
}
static void __pyx_tp_dealloc_7splikes_11connections_7calcium_calcium(PyObject *o) {
struct __pyx_obj_7splikes_11connections_7calcium_calcium *p = (struct __pyx_obj_7splikes_11connections_7calcium_calcium *)o;
#if CYTHON_USE_TP_FINALIZE
if (unlikely(PyType_HasFeature(Py_TYPE(o), Py_TPFLAGS_HAVE_FINALIZE) && Py_TYPE(o)->tp_finalize) && !_PyGC_FINALIZED(o)) {
if (PyObject_CallFinalizerFromDealloc(o)) return;
}
#endif
PyObject_GC_UnTrack(o);
Py_CLEAR(p->B);
Py_CLEAR(p->I_nmda);
Py_CLEAR(p->h);
Py_CLEAR(p->Ca);
Py_CLEAR(p->v_total);
Py_CLEAR(p->eta);
Py_CLEAR(p->g_nmda);
Py_CLEAR(p->v_backspike_fast);
Py_CLEAR(p->I_nmda_fast);
Py_CLEAR(p->I_nmda_slow);
Py_CLEAR(p->omega);
Py_CLEAR(p->v_backspike_slow);
PyObject_GC_Track(o);
if (likely(__pyx_ptype_7splikes_7splikes_connection)) __pyx_ptype_7splikes_7splikes_connection->tp_dealloc(o); else __Pyx_call_next_tp_dealloc(o, __pyx_tp_dealloc_7splikes_11connections_7calcium_calcium);
}
static int __pyx_tp_traverse_7splikes_11connections_7calcium_calcium(PyObject *o, visitproc v, void *a) {
int e;
struct __pyx_obj_7splikes_11connections_7calcium_calcium *p = (struct __pyx_obj_7splikes_11connections_7calcium_calcium *)o;
e = ((likely(__pyx_ptype_7splikes_7splikes_connection)) ? ((__pyx_ptype_7splikes_7splikes_connection->tp_traverse) ? __pyx_ptype_7splikes_7splikes_connection->tp_traverse(o, v, a) : 0) : __Pyx_call_next_tp_traverse(o, v, a, __pyx_tp_traverse_7splikes_11connections_7calcium_calcium)); if (e) return e;
if (p->B) {
e = (*v)(((PyObject *)p->B), a); if (e) return e;
}
if (p->I_nmda) {
e = (*v)(((PyObject *)p->I_nmda), a); if (e) return e;
}
if (p->h) {
e = (*v)(((PyObject *)p->h), a); if (e) return e;
}
if (p->Ca) {
e = (*v)(((PyObject *)p->Ca), a); if (e) return e;
}
if (p->v_total) {
e = (*v)(((PyObject *)p->v_total), a); if (e) return e;
}
if (p->eta) {
e = (*v)(((PyObject *)p->eta), a); if (e) return e;
}
if (p->g_nmda) {
e = (*v)(((PyObject *)p->g_nmda), a); if (e) return e;
}
if (p->v_backspike_fast) {
e = (*v)(((PyObject *)p->v_backspike_fast), a); if (e) return e;
}
if (p->I_nmda_fast) {
e = (*v)(((PyObject *)p->I_nmda_fast), a); if (e) return e;
}
if (p->I_nmda_slow) {
e = (*v)(((PyObject *)p->I_nmda_slow), a); if (e) return e;
}
if (p->omega) {
e = (*v)(((PyObject *)p->omega), a); if (e) return e;
}
if (p->v_backspike_slow) {
e = (*v)(((PyObject *)p->v_backspike_slow), a); if (e) return e;
}
return 0;
}
static int __pyx_tp_clear_7splikes_11connections_7calcium_calcium(PyObject *o) {
PyObject* tmp;
struct __pyx_obj_7splikes_11connections_7calcium_calcium *p = (struct __pyx_obj_7splikes_11connections_7calcium_calcium *)o;
if (likely(__pyx_ptype_7splikes_7splikes_connection)) { if (__pyx_ptype_7splikes_7splikes_connection->tp_clear) __pyx_ptype_7splikes_7splikes_connection->tp_clear(o); } else __Pyx_call_next_tp_clear(o, __pyx_tp_clear_7splikes_11connections_7calcium_calcium);
tmp = ((PyObject*)p->B);
p->B = ((PyArrayObject *)Py_None); Py_INCREF(Py_None);
Py_XDECREF(tmp);
tmp = ((PyObject*)p->I_nmda);
p->I_nmda = ((PyArrayObject *)Py_None); Py_INCREF(Py_None);
Py_XDECREF(tmp);
tmp = ((PyObject*)p->h);
p->h = ((PyArrayObject *)Py_None); Py_INCREF(Py_None);
Py_XDECREF(tmp);
tmp = ((PyObject*)p->Ca);
p->Ca = ((PyArrayObject *)Py_None); Py_INCREF(Py_None);
Py_XDECREF(tmp);
tmp = ((PyObject*)p->v_total);
p->v_total = ((PyArrayObject *)Py_None); Py_INCREF(Py_None);
Py_XDECREF(tmp);
tmp = ((PyObject*)p->eta);
p->eta = ((PyArrayObject *)Py_None); Py_INCREF(Py_None);
Py_XDECREF(tmp);
tmp = ((PyObject*)p->g_nmda);
p->g_nmda = ((PyArrayObject *)Py_None); Py_INCREF(Py_None);
Py_XDECREF(tmp);
tmp = ((PyObject*)p->v_backspike_fast);
p->v_backspike_fast = ((PyArrayObject *)Py_None); Py_INCREF(Py_None);
Py_XDECREF(tmp);
tmp = ((PyObject*)p->I_nmda_fast);
p->I_nmda_fast = ((PyArrayObject *)Py_None); Py_INCREF(Py_None);
Py_XDECREF(tmp);
tmp = ((PyObject*)p->I_nmda_slow);
p->I_nmda_slow = ((PyArrayObject *)Py_None); Py_INCREF(Py_None);
Py_XDECREF(tmp);
tmp = ((PyObject*)p->omega);
p->omega = ((PyArrayObject *)Py_None); Py_INCREF(Py_None);
Py_XDECREF(tmp);
tmp = ((PyObject*)p->v_backspike_slow);
p->v_backspike_slow = ((PyArrayObject *)Py_None); Py_INCREF(Py_None);
Py_XDECREF(tmp);
return 0;
}
static PyObject *__pyx_getprop_7splikes_11connections_7calcium_7calcium_g_t(PyObject *o, CYTHON_UNUSED void *x) {
return __pyx_pw_7splikes_11connections_7calcium_7calcium_3g_t_1__get__(o);
}
static int __pyx_setprop_7splikes_11connections_7calcium_7calcium_g_t(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
if (v) {
return __pyx_pw_7splikes_11connections_7calcium_7calcium_3g_t_3__set__(o, v);
}
else {
PyErr_SetString(PyExc_NotImplementedError, "__del__");
return -1;
}
}
static PyObject *__pyx_getprop_7splikes_11connections_7calcium_7calcium_mg2(PyObject *o, CYTHON_UNUSED void *x) {
return __pyx_pw_7splikes_11connections_7calcium_7calcium_3mg2_1__get__(o);
}
static int __pyx_setprop_7splikes_11connections_7calcium_7calcium_mg2(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
if (v) {
return __pyx_pw_7splikes_11connections_7calcium_7calcium_3mg2_3__set__(o, v);
}
else {
PyErr_SetString(PyExc_NotImplementedError, "__del__");
return -1;
}
}
static PyObject *__pyx_getprop_7splikes_11connections_7calcium_7calcium_mg1(PyObject *o, CYTHON_UNUSED void *x) {
return __pyx_pw_7splikes_11connections_7calcium_7calcium_3mg1_1__get__(o);
}
static int __pyx_setprop_7splikes_11connections_7calcium_7calcium_mg1(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
if (v) {
return __pyx_pw_7splikes_11connections_7calcium_7calcium_3mg1_3__set__(o, v);
}
else {
PyErr_SetString(PyExc_NotImplementedError, "__del__");
return -1;
}
}
static PyObject *__pyx_getprop_7splikes_11connections_7calcium_7calcium_v_reversal(PyObject *o, CYTHON_UNUSED void *x) {
return __pyx_pw_7splikes_11connections_7calcium_7calcium_10v_reversal_1__get__(o);
}
static int __pyx_setprop_7splikes_11connections_7calcium_7calcium_v_reversal(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
if (v) {
return __pyx_pw_7splikes_11connections_7calcium_7calcium_10v_reversal_3__set__(o, v);
}
else {
PyErr_SetString(PyExc_NotImplementedError, "__del__");
return -1;
}
}
static PyObject *__pyx_getprop_7splikes_11connections_7calcium_7calcium_tau_ca(PyObject *o, CYTHON_UNUSED void *x) {
return __pyx_pw_7splikes_11connections_7calcium_7calcium_6tau_ca_1__get__(o);
}
static int __pyx_setprop_7splikes_11connections_7calcium_7calcium_tau_ca(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
if (v) {
return __pyx_pw_7splikes_11connections_7calcium_7calcium_6tau_ca_3__set__(o, v);
}
else {
PyErr_SetString(PyExc_NotImplementedError, "__del__");
return -1;
}
}
static PyObject *__pyx_getprop_7splikes_11connections_7calcium_7calcium_alpha2(PyObject *o, CYTHON_UNUSED void *x) {
return __pyx_pw_7splikes_11connections_7calcium_7calcium_6alpha2_1__get__(o);
}
static int __pyx_setprop_7splikes_11connections_7calcium_7calcium_alpha2(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
if (v) {
return __pyx_pw_7splikes_11connections_7calcium_7calcium_6alpha2_3__set__(o, v);
}
else {
PyErr_SetString(PyExc_NotImplementedError, "__del__");
return -1;
}
}
static PyObject *__pyx_getprop_7splikes_11connections_7calcium_7calcium_alpha1(PyObject *o, CYTHON_UNUSED void *x) {
return __pyx_pw_7splikes_11connections_7calcium_7calcium_6alpha1_1__get__(o);
}
static int __pyx_setprop_7splikes_11connections_7calcium_7calcium_alpha1(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
if (v) {
return __pyx_pw_7splikes_11connections_7calcium_7calcium_6alpha1_3__set__(o, v);
}
else {
PyErr_SetString(PyExc_NotImplementedError, "__del__");
return -1;
}
}
static PyObject *__pyx_getprop_7splikes_11connections_7calcium_7calcium_backspike_amplitude(PyObject *o, CYTHON_UNUSED void *x) {
return __pyx_pw_7splikes_11connections_7calcium_7calcium_19backspike_amplitude_1__get__(o);
}
static int __pyx_setprop_7splikes_11connections_7calcium_7calcium_backspike_amplitude(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
if (v) {
return __pyx_pw_7splikes_11connections_7calcium_7calcium_19backspike_amplitude_3__set__(o, v);
}
else {
PyErr_SetString(PyExc_NotImplementedError, "__del__");
return -1;
}
}
static PyObject *__pyx_getprop_7splikes_11connections_7calcium_7calcium_i_nmda_mu(PyObject *o, CYTHON_UNUSED void *x) {
return __pyx_pw_7splikes_11connections_7calcium_7calcium_9i_nmda_mu_1__get__(o);
}
static int __pyx_setprop_7splikes_11connections_7calcium_7calcium_i_nmda_mu(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
if (v) {
return __pyx_pw_7splikes_11connections_7calcium_7calcium_9i_nmda_mu_3__set__(o, v);
}
else {
PyErr_SetString(PyExc_NotImplementedError, "__del__");
return -1;
}
}
static PyObject *__pyx_getprop_7splikes_11connections_7calcium_7calcium_peak_backspike_fast(PyObject *o, CYTHON_UNUSED void *x) {
return __pyx_pw_7splikes_11connections_7calcium_7calcium_19peak_backspike_fast_1__get__(o);
}
static int __pyx_setprop_7splikes_11connections_7calcium_7calcium_peak_backspike_fast(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
if (v) {
return __pyx_pw_7splikes_11connections_7calcium_7calcium_19peak_backspike_fast_3__set__(o, v);
}
else {
PyErr_SetString(PyExc_NotImplementedError, "__del__");
return -1;
}
}
static PyObject *__pyx_getprop_7splikes_11connections_7calcium_7calcium_peak_backspike_slow(PyObject *o, CYTHON_UNUSED void *x) {
return __pyx_pw_7splikes_11connections_7calcium_7calcium_19peak_backspike_slow_1__get__(o);
}
static int __pyx_setprop_7splikes_11connections_7calcium_7calcium_peak_backspike_slow(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
if (v) {
return __pyx_pw_7splikes_11connections_7calcium_7calcium_19peak_backspike_slow_3__set__(o, v);
}
else {
PyErr_SetString(PyExc_NotImplementedError, "__del__");
return -1;
}
}
static PyObject *__pyx_getprop_7splikes_11connections_7calcium_7calcium__lambda(PyObject *o, CYTHON_UNUSED void *x) {
return __pyx_pw_7splikes_11connections_7calcium_7calcium_7_lambda_1__get__(o);
}
static int __pyx_setprop_7splikes_11connections_7calcium_7calcium__lambda(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
if (v) {
return __pyx_pw_7splikes_11connections_7calcium_7calcium_7_lambda_3__set__(o, v);
}
else {
PyErr_SetString(PyExc_NotImplementedError, "__del__");
return -1;
}
}
static PyObject *__pyx_getprop_7splikes_11connections_7calcium_7calcium_beta2(PyObject *o, CYTHON_UNUSED void *x) {
return __pyx_pw_7splikes_11connections_7calcium_7calcium_5beta2_1__get__(o);
}
static int __pyx_setprop_7splikes_11connections_7calcium_7calcium_beta2(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
if (v) {
return __pyx_pw_7splikes_11connections_7calcium_7calcium_5beta2_3__set__(o, v);
}
else {
PyErr_SetString(PyExc_NotImplementedError, "__del__");
return -1;
}
}
static PyObject *__pyx_getprop_7splikes_11connections_7calcium_7calcium_beta1(PyObject *o, CYTHON_UNUSED void *x) {
return __pyx_pw_7splikes_11connections_7calcium_7calcium_5beta1_1__get__(o);
}
static int __pyx_setprop_7splikes_11connections_7calcium_7calcium_beta1(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
if (v) {
return __pyx_pw_7splikes_11connections_7calcium_7calcium_5beta1_3__set__(o, v);
}
else {
PyErr_SetString(PyExc_NotImplementedError, "__del__");
return -1;
}
}
static PyObject *__pyx_getprop_7splikes_11connections_7calcium_7calcium_k_plus(PyObject *o, CYTHON_UNUSED void *x) {
return __pyx_pw_7splikes_11connections_7calcium_7calcium_6k_plus_1__get__(o);
}
static int __pyx_setprop_7splikes_11connections_7calcium_7calcium_k_plus(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
if (v) {
return __pyx_pw_7splikes_11connections_7calcium_7calcium_6k_plus_3__set__(o, v);
}
else {
PyErr_SetString(PyExc_NotImplementedError, "__del__");
return -1;
}
}
static PyObject *__pyx_getprop_7splikes_11connections_7calcium_7calcium_g_nmda_o(PyObject *o, CYTHON_UNUSED void *x) {
return __pyx_pw_7splikes_11connections_7calcium_7calcium_8g_nmda_o_1__get__(o);
}
static int __pyx_setprop_7splikes_11connections_7calcium_7calcium_g_nmda_o(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
if (v) {
return __pyx_pw_7splikes_11connections_7calcium_7calcium_8g_nmda_o_3__set__(o, v);
}
else {
PyErr_SetString(PyExc_NotImplementedError, "__del__");
return -1;
}
}
static PyObject *__pyx_getprop_7splikes_11connections_7calcium_7calcium_tau_backspike_fast(PyObject *o, CYTHON_UNUSED void *x) {
return __pyx_pw_7splikes_11connections_7calcium_7calcium_18tau_backspike_fast_1__get__(o);
}
static int __pyx_setprop_7splikes_11connections_7calcium_7calcium_tau_backspike_fast(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
if (v) {
return __pyx_pw_7splikes_11connections_7calcium_7calcium_18tau_backspike_fast_3__set__(o, v);
}
else {
PyErr_SetString(PyExc_NotImplementedError, "__del__");
return -1;
}
}
static PyObject *__pyx_getprop_7splikes_11connections_7calcium_7calcium_tau_backspike_slow(PyObject *o, CYTHON_UNUSED void *x) {
return __pyx_pw_7splikes_11connections_7calcium_7calcium_18tau_backspike_slow_1__get__(o);
}
static int __pyx_setprop_7splikes_11connections_7calcium_7calcium_tau_backspike_slow(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
if (v) {
return __pyx_pw_7splikes_11connections_7calcium_7calcium_18tau_backspike_slow_3__set__(o, v);
}
else {
PyErr_SetString(PyExc_NotImplementedError, "__del__");
return -1;
}
}
static PyObject *__pyx_getprop_7splikes_11connections_7calcium_7calcium_eta_gamma0(PyObject *o, CYTHON_UNUSED void *x) {
return __pyx_pw_7splikes_11connections_7calcium_7calcium_10eta_gamma0_1__get__(o);
}
static int __pyx_setprop_7splikes_11connections_7calcium_7calcium_eta_gamma0(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
if (v) {
return __pyx_pw_7splikes_11connections_7calcium_7calcium_10eta_gamma0_3__set__(o, v);
}
else {
PyErr_SetString(PyExc_NotImplementedError, "__del__");
return -1;
}
}
static PyObject *__pyx_getprop_7splikes_11connections_7calcium_7calcium_i_nmda_s(PyObject *o, CYTHON_UNUSED void *x) {
return __pyx_pw_7splikes_11connections_7calcium_7calcium_8i_nmda_s_1__get__(o);
}
static int __pyx_setprop_7splikes_11connections_7calcium_7calcium_i_nmda_s(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
if (v) {
return __pyx_pw_7splikes_11connections_7calcium_7calcium_8i_nmda_s_3__set__(o, v);
}
else {
PyErr_SetString(PyExc_NotImplementedError, "__del__");
return -1;
}
}
static PyObject *__pyx_getprop_7splikes_11connections_7calcium_7calcium_tau_nmda_s(PyObject *o, CYTHON_UNUSED void *x) {
return __pyx_pw_7splikes_11connections_7calcium_7calcium_10tau_nmda_s_1__get__(o);
}
static int __pyx_setprop_7splikes_11connections_7calcium_7calcium_tau_nmda_s(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
if (v) {
return __pyx_pw_7splikes_11connections_7calcium_7calcium_10tau_nmda_s_3__set__(o, v);
}
else {
PyErr_SetString(PyExc_NotImplementedError, "__del__");
return -1;
}
}
static PyObject *__pyx_getprop_7splikes_11connections_7calcium_7calcium_Vo(PyObject *o, CYTHON_UNUSED void *x) {
return __pyx_pw_7splikes_11connections_7calcium_7calcium_2Vo_1__get__(o);
}
static int __pyx_setprop_7splikes_11connections_7calcium_7calcium_Vo(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
if (v) {
return __pyx_pw_7splikes_11connections_7calcium_7calcium_2Vo_3__set__(o, v);
}
else {
PyErr_SetString(PyExc_NotImplementedError, "__del__");
return -1;
}
}
static PyObject *__pyx_getprop_7splikes_11connections_7calcium_7calcium_Vp(PyObject *o, CYTHON_UNUSED void *x) {
return __pyx_pw_7splikes_11connections_7calcium_7calcium_2Vp_1__get__(o);
}
static int __pyx_setprop_7splikes_11connections_7calcium_7calcium_Vp(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
if (v) {
return __pyx_pw_7splikes_11connections_7calcium_7calcium_2Vp_3__set__(o, v);
}
else {
PyErr_SetString(PyExc_NotImplementedError, "__del__");
return -1;
}
}
static PyObject *__pyx_getprop_7splikes_11connections_7calcium_7calcium_i_nmda_f(PyObject *o, CYTHON_UNUSED void *x) {
return __pyx_pw_7splikes_11connections_7calcium_7calcium_8i_nmda_f_1__get__(o);
}
static int __pyx_setprop_7splikes_11connections_7calcium_7calcium_i_nmda_f(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
if (v) {
return __pyx_pw_7splikes_11connections_7calcium_7calcium_8i_nmda_f_3__set__(o, v);
}
else {
PyErr_SetString(PyExc_NotImplementedError, "__del__");
return -1;
}
}
static PyObject *__pyx_getprop_7splikes_11connections_7calcium_7calcium_tau_nmda_f(PyObject *o, CYTHON_UNUSED void *x) {
return __pyx_pw_7splikes_11connections_7calcium_7calcium_10tau_nmda_f_1__get__(o);
}
static int __pyx_setprop_7splikes_11connections_7calcium_7calcium_tau_nmda_f(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
if (v) {
return __pyx_pw_7splikes_11connections_7calcium_7calcium_10tau_nmda_f_3__set__(o, v);
}
else {
PyErr_SetString(PyExc_NotImplementedError, "__del__");
return -1;
}
}
static PyObject *__pyx_getprop_7splikes_11connections_7calcium_7calcium_k_minus(PyObject *o, CYTHON_UNUSED void *x) {
return __pyx_pw_7splikes_11connections_7calcium_7calcium_7k_minus_1__get__(o);
}
static int __pyx_setprop_7splikes_11connections_7calcium_7calcium_k_minus(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
if (v) {
return __pyx_pw_7splikes_11connections_7calcium_7calcium_7k_minus_3__set__(o, v);
}
else {
PyErr_SetString(PyExc_NotImplementedError, "__del__");
return -1;
}
}
static PyObject *__pyx_getprop_7splikes_11connections_7calcium_7calcium_B(PyObject *o, CYTHON_UNUSED void *x) {
return __pyx_pw_7splikes_11connections_7calcium_7calcium_1B_1__get__(o);
}
static int __pyx_setprop_7splikes_11connections_7calcium_7calcium_B(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
if (v) {
return __pyx_pw_7splikes_11connections_7calcium_7calcium_1B_3__set__(o, v);
}
else {
return __pyx_pw_7splikes_11connections_7calcium_7calcium_1B_5__del__(o);
}
}
static PyObject *__pyx_getprop_7splikes_11connections_7calcium_7calcium_I_nmda(PyObject *o, CYTHON_UNUSED void *x) {
return __pyx_pw_7splikes_11connections_7calcium_7calcium_6I_nmda_1__get__(o);
}
static int __pyx_setprop_7splikes_11connections_7calcium_7calcium_I_nmda(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
if (v) {
return __pyx_pw_7splikes_11connections_7calcium_7calcium_6I_nmda_3__set__(o, v);
}
else {
return __pyx_pw_7splikes_11connections_7calcium_7calcium_6I_nmda_5__del__(o);
}
}
static PyObject *__pyx_getprop_7splikes_11connections_7calcium_7calcium_h(PyObject *o, CYTHON_UNUSED void *x) {
return __pyx_pw_7splikes_11connections_7calcium_7calcium_1h_1__get__(o);
}
static int __pyx_setprop_7splikes_11connections_7calcium_7calcium_h(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
if (v) {
return __pyx_pw_7splikes_11connections_7calcium_7calcium_1h_3__set__(o, v);
}
else {
return __pyx_pw_7splikes_11connections_7calcium_7calcium_1h_5__del__(o);
}
}
static PyObject *__pyx_getprop_7splikes_11connections_7calcium_7calcium_Ca(PyObject *o, CYTHON_UNUSED void *x) {
return __pyx_pw_7splikes_11connections_7calcium_7calcium_2Ca_1__get__(o);
}
static int __pyx_setprop_7splikes_11connections_7calcium_7calcium_Ca(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
if (v) {
return __pyx_pw_7splikes_11connections_7calcium_7calcium_2Ca_3__set__(o, v);
}
else {
return __pyx_pw_7splikes_11connections_7calcium_7calcium_2Ca_5__del__(o);
}
}
static PyObject *__pyx_getprop_7splikes_11connections_7calcium_7calcium_v_total(PyObject *o, CYTHON_UNUSED void *x) {
return __pyx_pw_7splikes_11connections_7calcium_7calcium_7v_total_1__get__(o);
}
static int __pyx_setprop_7splikes_11connections_7calcium_7calcium_v_total(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
if (v) {
return __pyx_pw_7splikes_11connections_7calcium_7calcium_7v_total_3__set__(o, v);
}
else {
return __pyx_pw_7splikes_11connections_7calcium_7calcium_7v_total_5__del__(o);
}
}
static PyObject *__pyx_getprop_7splikes_11connections_7calcium_7calcium_eta(PyObject *o, CYTHON_UNUSED void *x) {
return __pyx_pw_7splikes_11connections_7calcium_7calcium_3eta_1__get__(o);
}
static int __pyx_setprop_7splikes_11connections_7calcium_7calcium_eta(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
if (v) {
return __pyx_pw_7splikes_11connections_7calcium_7calcium_3eta_3__set__(o, v);
}
else {
return __pyx_pw_7splikes_11connections_7calcium_7calcium_3eta_5__del__(o);
}
}
static PyObject *__pyx_getprop_7splikes_11connections_7calcium_7calcium_g_nmda(PyObject *o, CYTHON_UNUSED void *x) {
return __pyx_pw_7splikes_11connections_7calcium_7calcium_6g_nmda_1__get__(o);
}
static int __pyx_setprop_7splikes_11connections_7calcium_7calcium_g_nmda(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
if (v) {
return __pyx_pw_7splikes_11connections_7calcium_7calcium_6g_nmda_3__set__(o, v);
}
else {
return __pyx_pw_7splikes_11connections_7calcium_7calcium_6g_nmda_5__del__(o);
}
}
static PyObject *__pyx_getprop_7splikes_11connections_7calcium_7calcium_v_backspike_fast(PyObject *o, CYTHON_UNUSED void *x) {
return __pyx_pw_7splikes_11connections_7calcium_7calcium_16v_backspike_fast_1__get__(o);
}
static int __pyx_setprop_7splikes_11connections_7calcium_7calcium_v_backspike_fast(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
if (v) {
return __pyx_pw_7splikes_11connections_7calcium_7calcium_16v_backspike_fast_3__set__(o, v);
}
else {
return __pyx_pw_7splikes_11connections_7calcium_7calcium_16v_backspike_fast_5__del__(o);
}
}
static PyObject *__pyx_getprop_7splikes_11connections_7calcium_7calcium_I_nmda_fast(PyObject *o, CYTHON_UNUSED void *x) {
return __pyx_pw_7splikes_11connections_7calcium_7calcium_11I_nmda_fast_1__get__(o);
}
static int __pyx_setprop_7splikes_11connections_7calcium_7calcium_I_nmda_fast(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
if (v) {
return __pyx_pw_7splikes_11connections_7calcium_7calcium_11I_nmda_fast_3__set__(o, v);
}
else {
return __pyx_pw_7splikes_11connections_7calcium_7calcium_11I_nmda_fast_5__del__(o);
}
}
static PyObject *__pyx_getprop_7splikes_11connections_7calcium_7calcium_I_nmda_slow(PyObject *o, CYTHON_UNUSED void *x) {
return __pyx_pw_7splikes_11connections_7calcium_7calcium_11I_nmda_slow_1__get__(o);
}
static int __pyx_setprop_7splikes_11connections_7calcium_7calcium_I_nmda_slow(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
if (v) {
return __pyx_pw_7splikes_11connections_7calcium_7calcium_11I_nmda_slow_3__set__(o, v);
}
else {
return __pyx_pw_7splikes_11connections_7calcium_7calcium_11I_nmda_slow_5__del__(o);
}
}
static PyObject *__pyx_getprop_7splikes_11connections_7calcium_7calcium_omega(PyObject *o, CYTHON_UNUSED void *x) {
return __pyx_pw_7splikes_11connections_7calcium_7calcium_5omega_1__get__(o);
}
static int __pyx_setprop_7splikes_11connections_7calcium_7calcium_omega(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
if (v) {
return __pyx_pw_7splikes_11connections_7calcium_7calcium_5omega_3__set__(o, v);
}
else {
return __pyx_pw_7splikes_11connections_7calcium_7calcium_5omega_5__del__(o);
}
}
static PyObject *__pyx_getprop_7splikes_11connections_7calcium_7calcium_v_backspike_slow(PyObject *o, CYTHON_UNUSED void *x) {
return __pyx_pw_7splikes_11connections_7calcium_7calcium_16v_backspike_slow_1__get__(o);
}
static int __pyx_setprop_7splikes_11connections_7calcium_7calcium_v_backspike_slow(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {
if (v) {
return __pyx_pw_7splikes_11connections_7calcium_7calcium_16v_backspike_slow_3__set__(o, v);
}
else {
return __pyx_pw_7splikes_11connections_7calcium_7calcium_16v_backspike_slow_5__del__(o);
}
}
static PyMethodDef __pyx_methods_7splikes_11connections_7calcium_calcium[] = {
{"_reset", (PyCFunction)__pyx_pw_7splikes_11connections_7calcium_7calcium_1_reset, METH_NOARGS, 0},
{"update", (PyCFunction)(void*)(PyCFunctionWithKeywords)__pyx_pw_7splikes_11connections_7calcium_7calcium_5update, METH_VARARGS|METH_KEYWORDS, 0},
{"__reduce_cython__", (PyCFunction)__pyx_pw_7splikes_11connections_7calcium_7calcium_7__reduce_cython__, METH_NOARGS, 0},
{"__setstate_cython__", (PyCFunction)__pyx_pw_7splikes_11connections_7calcium_7calcium_9__setstate_cython__, METH_O, 0},
{0, 0, 0, 0}
};
static struct PyGetSetDef __pyx_getsets_7splikes_11connections_7calcium_calcium[] = {
{(char *)"g_t", __pyx_getprop_7splikes_11connections_7calcium_7calcium_g_t, __pyx_setprop_7splikes_11connections_7calcium_7calcium_g_t, (char *)0, 0},
{(char *)"mg2", __pyx_getprop_7splikes_11connections_7calcium_7calcium_mg2, __pyx_setprop_7splikes_11connections_7calcium_7calcium_mg2, (char *)0, 0},
{(char *)"mg1", __pyx_getprop_7splikes_11connections_7calcium_7calcium_mg1, __pyx_setprop_7splikes_11connections_7calcium_7calcium_mg1, (char *)0, 0},
{(char *)"v_reversal", __pyx_getprop_7splikes_11connections_7calcium_7calcium_v_reversal, __pyx_setprop_7splikes_11connections_7calcium_7calcium_v_reversal, (char *)0, 0},
{(char *)"tau_ca", __pyx_getprop_7splikes_11connections_7calcium_7calcium_tau_ca, __pyx_setprop_7splikes_11connections_7calcium_7calcium_tau_ca, (char *)0, 0},
{(char *)"alpha2", __pyx_getprop_7splikes_11connections_7calcium_7calcium_alpha2, __pyx_setprop_7splikes_11connections_7calcium_7calcium_alpha2, (char *)0, 0},
{(char *)"alpha1", __pyx_getprop_7splikes_11connections_7calcium_7calcium_alpha1, __pyx_setprop_7splikes_11connections_7calcium_7calcium_alpha1, (char *)0, 0},
{(char *)"backspike_amplitude", __pyx_getprop_7splikes_11connections_7calcium_7calcium_backspike_amplitude, __pyx_setprop_7splikes_11connections_7calcium_7calcium_backspike_amplitude, (char *)0, 0},
{(char *)"i_nmda_mu", __pyx_getprop_7splikes_11connections_7calcium_7calcium_i_nmda_mu, __pyx_setprop_7splikes_11connections_7calcium_7calcium_i_nmda_mu, (char *)0, 0},
{(char *)"peak_backspike_fast", __pyx_getprop_7splikes_11connections_7calcium_7calcium_peak_backspike_fast, __pyx_setprop_7splikes_11connections_7calcium_7calcium_peak_backspike_fast, (char *)0, 0},
{(char *)"peak_backspike_slow", __pyx_getprop_7splikes_11connections_7calcium_7calcium_peak_backspike_slow, __pyx_setprop_7splikes_11connections_7calcium_7calcium_peak_backspike_slow, (char *)0, 0},
{(char *)"_lambda", __pyx_getprop_7splikes_11connections_7calcium_7calcium__lambda, __pyx_setprop_7splikes_11connections_7calcium_7calcium__lambda, (char *)0, 0},
{(char *)"beta2", __pyx_getprop_7splikes_11connections_7calcium_7calcium_beta2, __pyx_setprop_7splikes_11connections_7calcium_7calcium_beta2, (char *)0, 0},
{(char *)"beta1", __pyx_getprop_7splikes_11connections_7calcium_7calcium_beta1, __pyx_setprop_7splikes_11connections_7calcium_7calcium_beta1, (char *)0, 0},
{(char *)"k_plus", __pyx_getprop_7splikes_11connections_7calcium_7calcium_k_plus, __pyx_setprop_7splikes_11connections_7calcium_7calcium_k_plus, (char *)0, 0},
{(char *)"g_nmda_o", __pyx_getprop_7splikes_11connections_7calcium_7calcium_g_nmda_o, __pyx_setprop_7splikes_11connections_7calcium_7calcium_g_nmda_o, (char *)0, 0},
{(char *)"tau_backspike_fast", __pyx_getprop_7splikes_11connections_7calcium_7calcium_tau_backspike_fast, __pyx_setprop_7splikes_11connections_7calcium_7calcium_tau_backspike_fast, (char *)0, 0},
{(char *)"tau_backspike_slow", __pyx_getprop_7splikes_11connections_7calcium_7calcium_tau_backspike_slow, __pyx_setprop_7splikes_11connections_7calcium_7calcium_tau_backspike_slow, (char *)0, 0},
{(char *)"eta_gamma0", __pyx_getprop_7splikes_11connections_7calcium_7calcium_eta_gamma0, __pyx_setprop_7splikes_11connections_7calcium_7calcium_eta_gamma0, (char *)0, 0},
{(char *)"i_nmda_s", __pyx_getprop_7splikes_11connections_7calcium_7calcium_i_nmda_s, __pyx_setprop_7splikes_11connections_7calcium_7calcium_i_nmda_s, (char *)0, 0},
{(char *)"tau_nmda_s", __pyx_getprop_7splikes_11connections_7calcium_7calcium_tau_nmda_s, __pyx_setprop_7splikes_11connections_7calcium_7calcium_tau_nmda_s, (char *)0, 0},
{(char *)"Vo", __pyx_getprop_7splikes_11connections_7calcium_7calcium_Vo, __pyx_setprop_7splikes_11connections_7calcium_7calcium_Vo, (char *)0, 0},
{(char *)"Vp", __pyx_getprop_7splikes_11connections_7calcium_7calcium_Vp, __pyx_setprop_7splikes_11connections_7calcium_7calcium_Vp, (char *)0, 0},
{(char *)"i_nmda_f", __pyx_getprop_7splikes_11connections_7calcium_7calcium_i_nmda_f, __pyx_setprop_7splikes_11connections_7calcium_7calcium_i_nmda_f, (char *)0, 0},
{(char *)"tau_nmda_f", __pyx_getprop_7splikes_11connections_7calcium_7calcium_tau_nmda_f, __pyx_setprop_7splikes_11connections_7calcium_7calcium_tau_nmda_f, (char *)0, 0},
{(char *)"k_minus", __pyx_getprop_7splikes_11connections_7calcium_7calcium_k_minus, __pyx_setprop_7splikes_11connections_7calcium_7calcium_k_minus, (char *)0, 0},
{(char *)"B", __pyx_getprop_7splikes_11connections_7calcium_7calcium_B, __pyx_setprop_7splikes_11connections_7calcium_7calcium_B, (char *)0, 0},
{(char *)"I_nmda", __pyx_getprop_7splikes_11connections_7calcium_7calcium_I_nmda, __pyx_setprop_7splikes_11connections_7calcium_7calcium_I_nmda, (char *)0, 0},
{(char *)"h", __pyx_getprop_7splikes_11connections_7calcium_7calcium_h, __pyx_setprop_7splikes_11connections_7calcium_7calcium_h, (char *)0, 0},
{(char *)"Ca", __pyx_getprop_7splikes_11connections_7calcium_7calcium_Ca, __pyx_setprop_7splikes_11connections_7calcium_7calcium_Ca, (char *)0, 0},
{(char *)"v_total", __pyx_getprop_7splikes_11connections_7calcium_7calcium_v_total, __pyx_setprop_7splikes_11connections_7calcium_7calcium_v_total, (char *)0, 0},
{(char *)"eta", __pyx_getprop_7splikes_11connections_7calcium_7calcium_eta, __pyx_setprop_7splikes_11connections_7calcium_7calcium_eta, (char *)0, 0},
{(char *)"g_nmda", __pyx_getprop_7splikes_11connections_7calcium_7calcium_g_nmda, __pyx_setprop_7splikes_11connections_7calcium_7calcium_g_nmda, (char *)0, 0},
{(char *)"v_backspike_fast", __pyx_getprop_7splikes_11connections_7calcium_7calcium_v_backspike_fast, __pyx_setprop_7splikes_11connections_7calcium_7calcium_v_backspike_fast, (char *)0, 0},
{(char *)"I_nmda_fast", __pyx_getprop_7splikes_11connections_7calcium_7calcium_I_nmda_fast, __pyx_setprop_7splikes_11connections_7calcium_7calcium_I_nmda_fast, (char *)0, 0},
{(char *)"I_nmda_slow", __pyx_getprop_7splikes_11connections_7calcium_7calcium_I_nmda_slow, __pyx_setprop_7splikes_11connections_7calcium_7calcium_I_nmda_slow, (char *)0, 0},
{(char *)"omega", __pyx_getprop_7splikes_11connections_7calcium_7calcium_omega, __pyx_setprop_7splikes_11connections_7calcium_7calcium_omega, (char *)0, 0},
{(char *)"v_backspike_slow", __pyx_getprop_7splikes_11connections_7calcium_7calcium_v_backspike_slow, __pyx_setprop_7splikes_11connections_7calcium_7calcium_v_backspike_slow, (char *)0, 0},
{0, 0, 0, 0, 0}
};
static PyTypeObject __pyx_type_7splikes_11connections_7calcium_calcium = {
PyVarObject_HEAD_INIT(0, 0)
"splikes.connections.calcium.calcium", /*tp_name*/
sizeof(struct __pyx_obj_7splikes_11connections_7calcium_calcium), /*tp_basicsize*/
0, /*tp_itemsize*/
__pyx_tp_dealloc_7splikes_11connections_7calcium_calcium, /*tp_dealloc*/
#if PY_VERSION_HEX < 0x030800b4
0, /*tp_print*/
#endif
#if PY_VERSION_HEX >= 0x030800b4
0, /*tp_vectorcall_offset*/
#endif
0, /*tp_getattr*/
0, /*tp_setattr*/
#if PY_MAJOR_VERSION < 3
0, /*tp_compare*/
#endif
#if PY_MAJOR_VERSION >= 3
0, /*tp_as_async*/
#endif
0, /*tp_repr*/
0, /*tp_as_number*/
0, /*tp_as_sequence*/
0, /*tp_as_mapping*/
0, /*tp_hash*/
0, /*tp_call*/
0, /*tp_str*/
0, /*tp_getattro*/
0, /*tp_setattro*/
0, /*tp_as_buffer*/
Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE|Py_TPFLAGS_HAVE_GC, /*tp_flags*/
0, /*tp_doc*/
__pyx_tp_traverse_7splikes_11connections_7calcium_calcium, /*tp_traverse*/
__pyx_tp_clear_7splikes_11connections_7calcium_calcium, /*tp_clear*/
0, /*tp_richcompare*/
0, /*tp_weaklistoffset*/
0, /*tp_iter*/
0, /*tp_iternext*/
__pyx_methods_7splikes_11connections_7calcium_calcium, /*tp_methods*/
0, /*tp_members*/
__pyx_getsets_7splikes_11connections_7calcium_calcium, /*tp_getset*/
0, /*tp_base*/
0, /*tp_dict*/
0, /*tp_descr_get*/
0, /*tp_descr_set*/
0, /*tp_dictoffset*/
__pyx_pw_7splikes_11connections_7calcium_7calcium_3__init__, /*tp_init*/
0, /*tp_alloc*/
__pyx_tp_new_7splikes_11connections_7calcium_calcium, /*tp_new*/
0, /*tp_free*/
0, /*tp_is_gc*/
0, /*tp_bases*/
0, /*tp_mro*/
0, /*tp_cache*/
0, /*tp_subclasses*/
0, /*tp_weaklist*/
0, /*tp_del*/
0, /*tp_version_tag*/
#if PY_VERSION_HEX >= 0x030400a1
0, /*tp_finalize*/
#endif
#if PY_VERSION_HEX >= 0x030800b1
0, /*tp_vectorcall*/
#endif
#if PY_VERSION_HEX >= 0x030800b4 && PY_VERSION_HEX < 0x03090000
0, /*tp_print*/
#endif
#if PY_VERSION_HEX >= 0x030B00A2
0, /*tp_inline_values_offset*/
#endif
};
static PyMethodDef __pyx_methods[] = {
{0, 0, 0, 0}
};
#if PY_MAJOR_VERSION >= 3
#if CYTHON_PEP489_MULTI_PHASE_INIT
static PyObject* __pyx_pymod_create(PyObject *spec, PyModuleDef *def); /*proto*/
static int __pyx_pymod_exec_calcium(PyObject* module); /*proto*/
static PyModuleDef_Slot __pyx_moduledef_slots[] = {
{Py_mod_create, (void*)__pyx_pymod_create},
{Py_mod_exec, (void*)__pyx_pymod_exec_calcium},
{0, NULL}
};
#endif
static struct PyModuleDef __pyx_moduledef = {
PyModuleDef_HEAD_INIT,
"calcium",
0, /* m_doc */
#if CYTHON_PEP489_MULTI_PHASE_INIT
0, /* m_size */
#else
-1, /* m_size */
#endif
__pyx_methods /* m_methods */,
#if CYTHON_PEP489_MULTI_PHASE_INIT
__pyx_moduledef_slots, /* m_slots */
#else
NULL, /* m_reload */
#endif
NULL, /* m_traverse */
NULL, /* m_clear */
NULL /* m_free */
};
#endif
#ifndef CYTHON_SMALL_CODE
#if defined(__clang__)
#define CYTHON_SMALL_CODE
#elif defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 3))
#define CYTHON_SMALL_CODE __attribute__((cold))
#else
#define CYTHON_SMALL_CODE
#endif
#endif
static __Pyx_StringTabEntry __pyx_string_tab[] = {
{&__pyx_n_s_ImportError, __pyx_k_ImportError, sizeof(__pyx_k_ImportError), 0, 0, 1, 1},
{&__pyx_n_s_TypeError, __pyx_k_TypeError, sizeof(__pyx_k_TypeError), 0, 0, 1, 1},
{&__pyx_n_s_calcium, __pyx_k_calcium, sizeof(__pyx_k_calcium), 0, 0, 1, 1},
{&__pyx_n_s_cline_in_traceback, __pyx_k_cline_in_traceback, sizeof(__pyx_k_cline_in_traceback), 0, 0, 1, 1},
{&__pyx_n_s_dtype, __pyx_k_dtype, sizeof(__pyx_k_dtype), 0, 0, 1, 1},
{&__pyx_n_s_float, __pyx_k_float, sizeof(__pyx_k_float), 0, 0, 1, 1},
{&__pyx_n_s_getstate, __pyx_k_getstate, sizeof(__pyx_k_getstate), 0, 0, 1, 1},
{&__pyx_n_s_import, __pyx_k_import, sizeof(__pyx_k_import), 0, 0, 1, 1},
{&__pyx_n_s_init, __pyx_k_init, sizeof(__pyx_k_init), 0, 0, 1, 1},
{&__pyx_n_s_initial_weight_range, __pyx_k_initial_weight_range, sizeof(__pyx_k_initial_weight_range), 0, 0, 1, 1},
{&__pyx_n_s_main, __pyx_k_main, sizeof(__pyx_k_main), 0, 0, 1, 1},
{&__pyx_n_s_name, __pyx_k_name, sizeof(__pyx_k_name), 0, 0, 1, 1},
{&__pyx_n_s_np, __pyx_k_np, sizeof(__pyx_k_np), 0, 0, 1, 1},
{&__pyx_n_s_numpy, __pyx_k_numpy, sizeof(__pyx_k_numpy), 0, 0, 1, 1},
{&__pyx_kp_s_numpy_core_multiarray_failed_to, __pyx_k_numpy_core_multiarray_failed_to, sizeof(__pyx_k_numpy_core_multiarray_failed_to), 0, 0, 1, 0},
{&__pyx_kp_s_numpy_core_umath_failed_to_impor, __pyx_k_numpy_core_umath_failed_to_impor, sizeof(__pyx_k_numpy_core_umath_failed_to_impor), 0, 0, 1, 0},
{&__pyx_n_s_post, __pyx_k_post, sizeof(__pyx_k_post), 0, 0, 1, 1},
{&__pyx_n_s_pre, __pyx_k_pre, sizeof(__pyx_k_pre), 0, 0, 1, 1},
{&__pyx_n_s_pylab, __pyx_k_pylab, sizeof(__pyx_k_pylab), 0, 0, 1, 1},
{&__pyx_n_s_pyx_vtable, __pyx_k_pyx_vtable, sizeof(__pyx_k_pyx_vtable), 0, 0, 1, 1},
{&__pyx_n_s_range, __pyx_k_range, sizeof(__pyx_k_range), 0, 0, 1, 1},
{&__pyx_n_s_reduce, __pyx_k_reduce, sizeof(__pyx_k_reduce), 0, 0, 1, 1},
{&__pyx_n_s_reduce_cython, __pyx_k_reduce_cython, sizeof(__pyx_k_reduce_cython), 0, 0, 1, 1},
{&__pyx_n_s_reduce_ex, __pyx_k_reduce_ex, sizeof(__pyx_k_reduce_ex), 0, 0, 1, 1},
{&__pyx_n_s_reset, __pyx_k_reset, sizeof(__pyx_k_reset), 0, 0, 1, 1},
{&__pyx_kp_s_self_W_cannot_be_converted_to_a, __pyx_k_self_W_cannot_be_converted_to_a, sizeof(__pyx_k_self_W_cannot_be_converted_to_a), 0, 0, 1, 0},
{&__pyx_n_s_setstate, __pyx_k_setstate, sizeof(__pyx_k_setstate), 0, 0, 1, 1},
{&__pyx_n_s_setstate_cython, __pyx_k_setstate_cython, sizeof(__pyx_k_setstate_cython), 0, 0, 1, 1},
{&__pyx_n_s_sim, __pyx_k_sim, sizeof(__pyx_k_sim), 0, 0, 1, 1},
{&__pyx_n_s_state, __pyx_k_state, sizeof(__pyx_k_state), 0, 0, 1, 1},
{&__pyx_n_s_t, __pyx_k_t, sizeof(__pyx_k_t), 0, 0, 1, 1},
{&__pyx_n_s_test, __pyx_k_test, sizeof(__pyx_k_test), 0, 0, 1, 1},
{&__pyx_n_s_update, __pyx_k_update, sizeof(__pyx_k_update), 0, 0, 1, 1},
{&__pyx_n_s_zeros, __pyx_k_zeros, sizeof(__pyx_k_zeros), 0, 0, 1, 1},
{0, 0, 0, 0, 0, 0, 0}
};
static CYTHON_SMALL_CODE int __Pyx_InitCachedBuiltins(void) {
__pyx_builtin_range = __Pyx_GetBuiltinName(__pyx_n_s_range); if (!__pyx_builtin_range) __PYX_ERR(0, 160, __pyx_L1_error)
__pyx_builtin_TypeError = __Pyx_GetBuiltinName(__pyx_n_s_TypeError); if (!__pyx_builtin_TypeError) __PYX_ERR(1, 2, __pyx_L1_error)
__pyx_builtin_ImportError = __Pyx_GetBuiltinName(__pyx_n_s_ImportError); if (!__pyx_builtin_ImportError) __PYX_ERR(2, 947, __pyx_L1_error)
return 0;
__pyx_L1_error:;
return -1;
}
static CYTHON_SMALL_CODE int __Pyx_InitCachedConstants(void) {
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__Pyx_InitCachedConstants", 0);
/* "(tree fragment)":2
* def __reduce_cython__(self):
* raise TypeError("self.W cannot be converted to a Python object for pickling") # <<<<<<<<<<<<<<
* def __setstate_cython__(self, __pyx_state):
* raise TypeError("self.W cannot be converted to a Python object for pickling")
*/
__pyx_tuple_ = PyTuple_Pack(1, __pyx_kp_s_self_W_cannot_be_converted_to_a); if (unlikely(!__pyx_tuple_)) __PYX_ERR(1, 2, __pyx_L1_error)
__Pyx_GOTREF(__pyx_tuple_);
__Pyx_GIVEREF(__pyx_tuple_);
/* "(tree fragment)":4
* raise TypeError("self.W cannot be converted to a Python object for pickling")
* def __setstate_cython__(self, __pyx_state):
* raise TypeError("self.W cannot be converted to a Python object for pickling") # <<<<<<<<<<<<<<
*/
__pyx_tuple__2 = PyTuple_Pack(1, __pyx_kp_s_self_W_cannot_be_converted_to_a); if (unlikely(!__pyx_tuple__2)) __PYX_ERR(1, 4, __pyx_L1_error)
__Pyx_GOTREF(__pyx_tuple__2);
__Pyx_GIVEREF(__pyx_tuple__2);
/* "../../../opt/anaconda3/envs/py3/lib/python3.8/site-packages/numpy/__init__.pxd":947
* __pyx_import_array()
* except Exception:
* raise ImportError("numpy.core.multiarray failed to import") # <<<<<<<<<<<<<<
*
* cdef inline int import_umath() except -1:
*/
__pyx_tuple__3 = PyTuple_Pack(1, __pyx_kp_s_numpy_core_multiarray_failed_to); if (unlikely(!__pyx_tuple__3)) __PYX_ERR(2, 947, __pyx_L1_error)
__Pyx_GOTREF(__pyx_tuple__3);
__Pyx_GIVEREF(__pyx_tuple__3);
/* "../../../opt/anaconda3/envs/py3/lib/python3.8/site-packages/numpy/__init__.pxd":953
* _import_umath()
* except Exception:
* raise ImportError("numpy.core.umath failed to import") # <<<<<<<<<<<<<<
*
* cdef inline int import_ufunc() except -1:
*/
__pyx_tuple__4 = PyTuple_Pack(1, __pyx_kp_s_numpy_core_umath_failed_to_impor); if (unlikely(!__pyx_tuple__4)) __PYX_ERR(2, 953, __pyx_L1_error)
__Pyx_GOTREF(__pyx_tuple__4);
__Pyx_GIVEREF(__pyx_tuple__4);
__Pyx_RefNannyFinishContext();
return 0;
__pyx_L1_error:;
__Pyx_RefNannyFinishContext();
return -1;
}
static CYTHON_SMALL_CODE int __Pyx_InitGlobals(void) {
if (__Pyx_InitStrings(__pyx_string_tab) < 0) __PYX_ERR(0, 2, __pyx_L1_error);
__pyx_float_0_5 = PyFloat_FromDouble(0.5); if (unlikely(!__pyx_float_0_5)) __PYX_ERR(0, 2, __pyx_L1_error)
return 0;
__pyx_L1_error:;
return -1;
}
static CYTHON_SMALL_CODE int __Pyx_modinit_global_init_code(void); /*proto*/
static CYTHON_SMALL_CODE int __Pyx_modinit_variable_export_code(void); /*proto*/
static CYTHON_SMALL_CODE int __Pyx_modinit_function_export_code(void); /*proto*/
static CYTHON_SMALL_CODE int __Pyx_modinit_type_init_code(void); /*proto*/
static CYTHON_SMALL_CODE int __Pyx_modinit_type_import_code(void); /*proto*/
static CYTHON_SMALL_CODE int __Pyx_modinit_variable_import_code(void); /*proto*/
static CYTHON_SMALL_CODE int __Pyx_modinit_function_import_code(void); /*proto*/
static int __Pyx_modinit_global_init_code(void) {
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__Pyx_modinit_global_init_code", 0);
/*--- Global init code ---*/
__Pyx_RefNannyFinishContext();
return 0;
}
static int __Pyx_modinit_variable_export_code(void) {
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__Pyx_modinit_variable_export_code", 0);
/*--- Variable export code ---*/
__Pyx_RefNannyFinishContext();
return 0;
}
static int __Pyx_modinit_function_export_code(void) {
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__Pyx_modinit_function_export_code", 0);
/*--- Function export code ---*/
__Pyx_RefNannyFinishContext();
return 0;
}
static int __Pyx_modinit_type_init_code(void) {
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("__Pyx_modinit_type_init_code", 0);
/*--- Type init code ---*/
__pyx_t_1 = PyImport_ImportModule("splikes.splikes"); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 2, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_ptype_7splikes_7splikes_connection = __Pyx_ImportType(__pyx_t_1, "splikes.splikes", "connection", sizeof(struct __pyx_obj_7splikes_7splikes_connection), __Pyx_ImportType_CheckSize_Warn);
if (!__pyx_ptype_7splikes_7splikes_connection) __PYX_ERR(0, 2, __pyx_L1_error)
__pyx_vtabptr_7splikes_7splikes_connection = (struct __pyx_vtabstruct_7splikes_7splikes_connection*)__Pyx_GetVtable(__pyx_ptype_7splikes_7splikes_connection->tp_dict); if (unlikely(!__pyx_vtabptr_7splikes_7splikes_connection)) __PYX_ERR(0, 2, __pyx_L1_error)
__pyx_vtabptr_7splikes_11connections_7calcium_calcium = &__pyx_vtable_7splikes_11connections_7calcium_calcium;
__pyx_vtable_7splikes_11connections_7calcium_calcium.__pyx_base = *__pyx_vtabptr_7splikes_7splikes_connection;
__pyx_vtable_7splikes_11connections_7calcium_calcium.__pyx_base._reset = (PyObject *(*)(struct __pyx_obj_7splikes_7splikes_connection *, int __pyx_skip_dispatch))__pyx_f_7splikes_11connections_7calcium_7calcium__reset;
__pyx_vtable_7splikes_11connections_7calcium_calcium.__pyx_base.update = (PyObject *(*)(struct __pyx_obj_7splikes_7splikes_connection *, double, struct __pyx_obj_7splikes_7splikes_simulation *, int __pyx_skip_dispatch))__pyx_f_7splikes_11connections_7calcium_7calcium_update;
__pyx_type_7splikes_11connections_7calcium_calcium.tp_base = __pyx_ptype_7splikes_7splikes_connection;
if (PyType_Ready(&__pyx_type_7splikes_11connections_7calcium_calcium) < 0) __PYX_ERR(0, 57, __pyx_L1_error)
#if PY_VERSION_HEX < 0x030800B1
__pyx_type_7splikes_11connections_7calcium_calcium.tp_print = 0;
#endif
if ((CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP) && likely(!__pyx_type_7splikes_11connections_7calcium_calcium.tp_dictoffset && __pyx_type_7splikes_11connections_7calcium_calcium.tp_getattro == PyObject_GenericGetAttr)) {
__pyx_type_7splikes_11connections_7calcium_calcium.tp_getattro = __Pyx_PyObject_GenericGetAttr;
}
if (__Pyx_SetVtable(__pyx_type_7splikes_11connections_7calcium_calcium.tp_dict, __pyx_vtabptr_7splikes_11connections_7calcium_calcium) < 0) __PYX_ERR(0, 57, __pyx_L1_error)
if (PyObject_SetAttr(__pyx_m, __pyx_n_s_calcium, (PyObject *)&__pyx_type_7splikes_11connections_7calcium_calcium) < 0) __PYX_ERR(0, 57, __pyx_L1_error)
if (__Pyx_setup_reduce((PyObject*)&__pyx_type_7splikes_11connections_7calcium_calcium) < 0) __PYX_ERR(0, 57, __pyx_L1_error)
__pyx_ptype_7splikes_11connections_7calcium_calcium = &__pyx_type_7splikes_11connections_7calcium_calcium;
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__Pyx_RefNannyFinishContext();
return 0;
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_RefNannyFinishContext();
return -1;
}
static int __Pyx_modinit_type_import_code(void) {
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("__Pyx_modinit_type_import_code", 0);
/*--- Type import code ---*/
__pyx_t_1 = PyImport_ImportModule(__Pyx_BUILTIN_MODULE_NAME); if (unlikely(!__pyx_t_1)) __PYX_ERR(3, 9, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_ptype_7cpython_4type_type = __Pyx_ImportType(__pyx_t_1, __Pyx_BUILTIN_MODULE_NAME, "type",
#if defined(PYPY_VERSION_NUM) && PYPY_VERSION_NUM < 0x050B0000
sizeof(PyTypeObject),
#else
sizeof(PyHeapTypeObject),
#endif
__Pyx_ImportType_CheckSize_Warn);
if (!__pyx_ptype_7cpython_4type_type) __PYX_ERR(3, 9, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__pyx_t_1 = PyImport_ImportModule("numpy"); if (unlikely(!__pyx_t_1)) __PYX_ERR(2, 200, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_ptype_5numpy_dtype = __Pyx_ImportType(__pyx_t_1, "numpy", "dtype", sizeof(PyArray_Descr), __Pyx_ImportType_CheckSize_Ignore);
if (!__pyx_ptype_5numpy_dtype) __PYX_ERR(2, 200, __pyx_L1_error)
__pyx_ptype_5numpy_flatiter = __Pyx_ImportType(__pyx_t_1, "numpy", "flatiter", sizeof(PyArrayIterObject), __Pyx_ImportType_CheckSize_Ignore);
if (!__pyx_ptype_5numpy_flatiter) __PYX_ERR(2, 223, __pyx_L1_error)
__pyx_ptype_5numpy_broadcast = __Pyx_ImportType(__pyx_t_1, "numpy", "broadcast", sizeof(PyArrayMultiIterObject), __Pyx_ImportType_CheckSize_Ignore);
if (!__pyx_ptype_5numpy_broadcast) __PYX_ERR(2, 227, __pyx_L1_error)
__pyx_ptype_5numpy_ndarray = __Pyx_ImportType(__pyx_t_1, "numpy", "ndarray", sizeof(PyArrayObject), __Pyx_ImportType_CheckSize_Ignore);
if (!__pyx_ptype_5numpy_ndarray) __PYX_ERR(2, 239, __pyx_L1_error)
__pyx_ptype_5numpy_generic = __Pyx_ImportType(__pyx_t_1, "numpy", "generic", sizeof(PyObject), __Pyx_ImportType_CheckSize_Warn);
if (!__pyx_ptype_5numpy_generic) __PYX_ERR(2, 771, __pyx_L1_error)
__pyx_ptype_5numpy_number = __Pyx_ImportType(__pyx_t_1, "numpy", "number", sizeof(PyObject), __Pyx_ImportType_CheckSize_Warn);
if (!__pyx_ptype_5numpy_number) __PYX_ERR(2, 773, __pyx_L1_error)
__pyx_ptype_5numpy_integer = __Pyx_ImportType(__pyx_t_1, "numpy", "integer", sizeof(PyObject), __Pyx_ImportType_CheckSize_Warn);
if (!__pyx_ptype_5numpy_integer) __PYX_ERR(2, 775, __pyx_L1_error)
__pyx_ptype_5numpy_signedinteger = __Pyx_ImportType(__pyx_t_1, "numpy", "signedinteger", sizeof(PyObject), __Pyx_ImportType_CheckSize_Warn);
if (!__pyx_ptype_5numpy_signedinteger) __PYX_ERR(2, 777, __pyx_L1_error)
__pyx_ptype_5numpy_unsignedinteger = __Pyx_ImportType(__pyx_t_1, "numpy", "unsignedinteger", sizeof(PyObject), __Pyx_ImportType_CheckSize_Warn);
if (!__pyx_ptype_5numpy_unsignedinteger) __PYX_ERR(2, 779, __pyx_L1_error)
__pyx_ptype_5numpy_inexact = __Pyx_ImportType(__pyx_t_1, "numpy", "inexact", sizeof(PyObject), __Pyx_ImportType_CheckSize_Warn);
if (!__pyx_ptype_5numpy_inexact) __PYX_ERR(2, 781, __pyx_L1_error)
__pyx_ptype_5numpy_floating = __Pyx_ImportType(__pyx_t_1, "numpy", "floating", sizeof(PyObject), __Pyx_ImportType_CheckSize_Warn);
if (!__pyx_ptype_5numpy_floating) __PYX_ERR(2, 783, __pyx_L1_error)
__pyx_ptype_5numpy_complexfloating = __Pyx_ImportType(__pyx_t_1, "numpy", "complexfloating", sizeof(PyObject), __Pyx_ImportType_CheckSize_Warn);
if (!__pyx_ptype_5numpy_complexfloating) __PYX_ERR(2, 785, __pyx_L1_error)
__pyx_ptype_5numpy_flexible = __Pyx_ImportType(__pyx_t_1, "numpy", "flexible", sizeof(PyObject), __Pyx_ImportType_CheckSize_Warn);
if (!__pyx_ptype_5numpy_flexible) __PYX_ERR(2, 787, __pyx_L1_error)
__pyx_ptype_5numpy_character = __Pyx_ImportType(__pyx_t_1, "numpy", "character", sizeof(PyObject), __Pyx_ImportType_CheckSize_Warn);
if (!__pyx_ptype_5numpy_character) __PYX_ERR(2, 789, __pyx_L1_error)
__pyx_ptype_5numpy_ufunc = __Pyx_ImportType(__pyx_t_1, "numpy", "ufunc", sizeof(PyUFuncObject), __Pyx_ImportType_CheckSize_Ignore);
if (!__pyx_ptype_5numpy_ufunc) __PYX_ERR(2, 827, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__pyx_t_1 = PyImport_ImportModule("splikes.splikes"); if (unlikely(!__pyx_t_1)) __PYX_ERR(4, 29, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_ptype_7splikes_7splikes_group = __Pyx_ImportType(__pyx_t_1, "splikes.splikes", "group", sizeof(struct __pyx_obj_7splikes_7splikes_group), __Pyx_ImportType_CheckSize_Warn);
if (!__pyx_ptype_7splikes_7splikes_group) __PYX_ERR(4, 29, __pyx_L1_error)
__pyx_ptype_7splikes_7splikes_monitor = __Pyx_ImportType(__pyx_t_1, "splikes.splikes", "monitor", sizeof(struct __pyx_obj_7splikes_7splikes_monitor), __Pyx_ImportType_CheckSize_Warn);
if (!__pyx_ptype_7splikes_7splikes_monitor) __PYX_ERR(4, 35, __pyx_L1_error)
__pyx_vtabptr_7splikes_7splikes_monitor = (struct __pyx_vtabstruct_7splikes_7splikes_monitor*)__Pyx_GetVtable(__pyx_ptype_7splikes_7splikes_monitor->tp_dict); if (unlikely(!__pyx_vtabptr_7splikes_7splikes_monitor)) __PYX_ERR(4, 35, __pyx_L1_error)
__pyx_ptype_7splikes_7splikes_simulation = __Pyx_ImportType(__pyx_t_1, "splikes.splikes", "simulation", sizeof(struct __pyx_obj_7splikes_7splikes_simulation), __Pyx_ImportType_CheckSize_Warn);
if (!__pyx_ptype_7splikes_7splikes_simulation) __PYX_ERR(4, 43, __pyx_L1_error)
__pyx_vtabptr_7splikes_7splikes_simulation = (struct __pyx_vtabstruct_7splikes_7splikes_simulation*)__Pyx_GetVtable(__pyx_ptype_7splikes_7splikes_simulation->tp_dict); if (unlikely(!__pyx_vtabptr_7splikes_7splikes_simulation)) __PYX_ERR(4, 43, __pyx_L1_error)
__pyx_ptype_7splikes_7splikes_neuron = __Pyx_ImportType(__pyx_t_1, "splikes.splikes", "neuron", sizeof(struct __pyx_obj_7splikes_7splikes_neuron), __Pyx_ImportType_CheckSize_Warn);
if (!__pyx_ptype_7splikes_7splikes_neuron) __PYX_ERR(4, 52, __pyx_L1_error)
__pyx_vtabptr_7splikes_7splikes_neuron = (struct __pyx_vtabstruct_7splikes_7splikes_neuron*)__Pyx_GetVtable(__pyx_ptype_7splikes_7splikes_neuron->tp_dict); if (unlikely(!__pyx_vtabptr_7splikes_7splikes_neuron)) __PYX_ERR(4, 52, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__Pyx_RefNannyFinishContext();
return 0;
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_RefNannyFinishContext();
return -1;
}
static int __Pyx_modinit_variable_import_code(void) {
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__Pyx_modinit_variable_import_code", 0);
/*--- Variable import code ---*/
__Pyx_RefNannyFinishContext();
return 0;
}
static int __Pyx_modinit_function_import_code(void) {
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__Pyx_modinit_function_import_code", 0);
/*--- Function import code ---*/
__Pyx_RefNannyFinishContext();
return 0;
}
#ifndef CYTHON_NO_PYINIT_EXPORT
#define __Pyx_PyMODINIT_FUNC PyMODINIT_FUNC
#elif PY_MAJOR_VERSION < 3
#ifdef __cplusplus
#define __Pyx_PyMODINIT_FUNC extern "C" void
#else
#define __Pyx_PyMODINIT_FUNC void
#endif
#else
#ifdef __cplusplus
#define __Pyx_PyMODINIT_FUNC extern "C" PyObject *
#else
#define __Pyx_PyMODINIT_FUNC PyObject *
#endif
#endif
#if PY_MAJOR_VERSION < 3
__Pyx_PyMODINIT_FUNC initcalcium(void) CYTHON_SMALL_CODE; /*proto*/
__Pyx_PyMODINIT_FUNC initcalcium(void)
#else
__Pyx_PyMODINIT_FUNC PyInit_calcium(void) CYTHON_SMALL_CODE; /*proto*/
__Pyx_PyMODINIT_FUNC PyInit_calcium(void)
#if CYTHON_PEP489_MULTI_PHASE_INIT
{
return PyModuleDef_Init(&__pyx_moduledef);
}
static CYTHON_SMALL_CODE int __Pyx_check_single_interpreter(void) {
#if PY_VERSION_HEX >= 0x030700A1
static PY_INT64_T main_interpreter_id = -1;
PY_INT64_T current_id = PyInterpreterState_GetID(PyThreadState_Get()->interp);
if (main_interpreter_id == -1) {
main_interpreter_id = current_id;
return (unlikely(current_id == -1)) ? -1 : 0;
} else if (unlikely(main_interpreter_id != current_id))
#else
static PyInterpreterState *main_interpreter = NULL;
PyInterpreterState *current_interpreter = PyThreadState_Get()->interp;
if (!main_interpreter) {
main_interpreter = current_interpreter;
} else if (unlikely(main_interpreter != current_interpreter))
#endif
{
PyErr_SetString(
PyExc_ImportError,
"Interpreter change detected - this module can only be loaded into one interpreter per process.");
return -1;
}
return 0;
}
static CYTHON_SMALL_CODE int __Pyx_copy_spec_to_module(PyObject *spec, PyObject *moddict, const char* from_name, const char* to_name, int allow_none) {
PyObject *value = PyObject_GetAttrString(spec, from_name);
int result = 0;
if (likely(value)) {
if (allow_none || value != Py_None) {
result = PyDict_SetItemString(moddict, to_name, value);
}
Py_DECREF(value);
} else if (PyErr_ExceptionMatches(PyExc_AttributeError)) {
PyErr_Clear();
} else {
result = -1;
}
return result;
}
static CYTHON_SMALL_CODE PyObject* __pyx_pymod_create(PyObject *spec, CYTHON_UNUSED PyModuleDef *def) {
PyObject *module = NULL, *moddict, *modname;
if (__Pyx_check_single_interpreter())
return NULL;
if (__pyx_m)
return __Pyx_NewRef(__pyx_m);
modname = PyObject_GetAttrString(spec, "name");
if (unlikely(!modname)) goto bad;
module = PyModule_NewObject(modname);
Py_DECREF(modname);
if (unlikely(!module)) goto bad;
moddict = PyModule_GetDict(module);
if (unlikely(!moddict)) goto bad;
if (unlikely(__Pyx_copy_spec_to_module(spec, moddict, "loader", "__loader__", 1) < 0)) goto bad;
if (unlikely(__Pyx_copy_spec_to_module(spec, moddict, "origin", "__file__", 1) < 0)) goto bad;
if (unlikely(__Pyx_copy_spec_to_module(spec, moddict, "parent", "__package__", 1) < 0)) goto bad;
if (unlikely(__Pyx_copy_spec_to_module(spec, moddict, "submodule_search_locations", "__path__", 0) < 0)) goto bad;
return module;
bad:
Py_XDECREF(module);
return NULL;
}
static CYTHON_SMALL_CODE int __pyx_pymod_exec_calcium(PyObject *__pyx_pyinit_module)
#endif
#endif
{
PyObject *__pyx_t_1 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannyDeclarations
#if CYTHON_PEP489_MULTI_PHASE_INIT
if (__pyx_m) {
if (__pyx_m == __pyx_pyinit_module) return 0;
PyErr_SetString(PyExc_RuntimeError, "Module 'calcium' has already been imported. Re-initialisation is not supported.");
return -1;
}
#elif PY_MAJOR_VERSION >= 3
if (__pyx_m) return __Pyx_NewRef(__pyx_m);
#endif
#if CYTHON_REFNANNY
__Pyx_RefNanny = __Pyx_RefNannyImportAPI("refnanny");
if (!__Pyx_RefNanny) {
PyErr_Clear();
__Pyx_RefNanny = __Pyx_RefNannyImportAPI("Cython.Runtime.refnanny");
if (!__Pyx_RefNanny)
Py_FatalError("failed to import 'refnanny' module");
}
#endif
__Pyx_RefNannySetupContext("__Pyx_PyMODINIT_FUNC PyInit_calcium(void)", 0);
if (__Pyx_check_binary_version() < 0) __PYX_ERR(0, 2, __pyx_L1_error)
#ifdef __Pxy_PyFrame_Initialize_Offsets
__Pxy_PyFrame_Initialize_Offsets();
#endif
__pyx_empty_tuple = PyTuple_New(0); if (unlikely(!__pyx_empty_tuple)) __PYX_ERR(0, 2, __pyx_L1_error)
__pyx_empty_bytes = PyBytes_FromStringAndSize("", 0); if (unlikely(!__pyx_empty_bytes)) __PYX_ERR(0, 2, __pyx_L1_error)
__pyx_empty_unicode = PyUnicode_FromStringAndSize("", 0); if (unlikely(!__pyx_empty_unicode)) __PYX_ERR(0, 2, __pyx_L1_error)
#ifdef __Pyx_CyFunction_USED
if (__pyx_CyFunction_init() < 0) __PYX_ERR(0, 2, __pyx_L1_error)
#endif
#ifdef __Pyx_FusedFunction_USED
if (__pyx_FusedFunction_init() < 0) __PYX_ERR(0, 2, __pyx_L1_error)
#endif
#ifdef __Pyx_Coroutine_USED
if (__pyx_Coroutine_init() < 0) __PYX_ERR(0, 2, __pyx_L1_error)
#endif
#ifdef __Pyx_Generator_USED
if (__pyx_Generator_init() < 0) __PYX_ERR(0, 2, __pyx_L1_error)
#endif
#ifdef __Pyx_AsyncGen_USED
if (__pyx_AsyncGen_init() < 0) __PYX_ERR(0, 2, __pyx_L1_error)
#endif
#ifdef __Pyx_StopAsyncIteration_USED
if (__pyx_StopAsyncIteration_init() < 0) __PYX_ERR(0, 2, __pyx_L1_error)
#endif
/*--- Library function declarations ---*/
/*--- Threads initialization code ---*/
#if defined(WITH_THREAD) && PY_VERSION_HEX < 0x030700F0 && defined(__PYX_FORCE_INIT_THREADS) && __PYX_FORCE_INIT_THREADS
PyEval_InitThreads();
#endif
/*--- Module creation code ---*/
#if CYTHON_PEP489_MULTI_PHASE_INIT
__pyx_m = __pyx_pyinit_module;
Py_INCREF(__pyx_m);
#else
#if PY_MAJOR_VERSION < 3
__pyx_m = Py_InitModule4("calcium", __pyx_methods, 0, 0, PYTHON_API_VERSION); Py_XINCREF(__pyx_m);
#else
__pyx_m = PyModule_Create(&__pyx_moduledef);
#endif
if (unlikely(!__pyx_m)) __PYX_ERR(0, 2, __pyx_L1_error)
#endif
__pyx_d = PyModule_GetDict(__pyx_m); if (unlikely(!__pyx_d)) __PYX_ERR(0, 2, __pyx_L1_error)
Py_INCREF(__pyx_d);
__pyx_b = PyImport_AddModule(__Pyx_BUILTIN_MODULE_NAME); if (unlikely(!__pyx_b)) __PYX_ERR(0, 2, __pyx_L1_error)
Py_INCREF(__pyx_b);
__pyx_cython_runtime = PyImport_AddModule((char *) "cython_runtime"); if (unlikely(!__pyx_cython_runtime)) __PYX_ERR(0, 2, __pyx_L1_error)
Py_INCREF(__pyx_cython_runtime);
if (PyObject_SetAttrString(__pyx_m, "__builtins__", __pyx_b) < 0) __PYX_ERR(0, 2, __pyx_L1_error);
/*--- Initialize various global constants etc. ---*/
if (__Pyx_InitGlobals() < 0) __PYX_ERR(0, 2, __pyx_L1_error)
#if PY_MAJOR_VERSION < 3 && (__PYX_DEFAULT_STRING_ENCODING_IS_ASCII || __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT)
if (__Pyx_init_sys_getdefaultencoding_params() < 0) __PYX_ERR(0, 2, __pyx_L1_error)
#endif
if (__pyx_module_is_main_splikes__connections__calcium) {
if (PyObject_SetAttr(__pyx_m, __pyx_n_s_name, __pyx_n_s_main) < 0) __PYX_ERR(0, 2, __pyx_L1_error)
}
#if PY_MAJOR_VERSION >= 3
{
PyObject *modules = PyImport_GetModuleDict(); if (unlikely(!modules)) __PYX_ERR(0, 2, __pyx_L1_error)
if (!PyDict_GetItemString(modules, "splikes.connections.calcium")) {
if (unlikely(PyDict_SetItemString(modules, "splikes.connections.calcium", __pyx_m) < 0)) __PYX_ERR(0, 2, __pyx_L1_error)
}
}
#endif
/*--- Builtin init code ---*/
if (__Pyx_InitCachedBuiltins() < 0) __PYX_ERR(0, 2, __pyx_L1_error)
/*--- Constants init code ---*/
if (__Pyx_InitCachedConstants() < 0) __PYX_ERR(0, 2, __pyx_L1_error)
/*--- Global type/function init code ---*/
(void)__Pyx_modinit_global_init_code();
(void)__Pyx_modinit_variable_export_code();
(void)__Pyx_modinit_function_export_code();
if (unlikely(__Pyx_modinit_type_init_code() < 0)) __PYX_ERR(0, 2, __pyx_L1_error)
if (unlikely(__Pyx_modinit_type_import_code() < 0)) __PYX_ERR(0, 2, __pyx_L1_error)
(void)__Pyx_modinit_variable_import_code();
(void)__Pyx_modinit_function_import_code();
/*--- Execution code ---*/
#if defined(__Pyx_Generator_USED) || defined(__Pyx_Coroutine_USED)
if (__Pyx_patch_abc() < 0) __PYX_ERR(0, 2, __pyx_L1_error)
#endif
/* "splikes/connections/calcium.pyx":4
* from splikes.splikes cimport *
* cimport cython
* import pylab # <<<<<<<<<<<<<<
*
* import numpy as np
*/
__pyx_t_1 = __Pyx_Import(__pyx_n_s_pylab, 0, -1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 4, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
if (PyDict_SetItem(__pyx_d, __pyx_n_s_pylab, __pyx_t_1) < 0) __PYX_ERR(0, 4, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
/* "splikes/connections/calcium.pyx":6
* import pylab
*
* import numpy as np # <<<<<<<<<<<<<<
* cimport numpy as np
*
*/
__pyx_t_1 = __Pyx_Import(__pyx_n_s_numpy, 0, -1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 6, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
if (PyDict_SetItem(__pyx_d, __pyx_n_s_np, __pyx_t_1) < 0) __PYX_ERR(0, 6, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
/* "splikes/connections/calcium.pyx":2
*
* from splikes.splikes cimport * # <<<<<<<<<<<<<<
* cimport cython
* import pylab
*/
__pyx_t_1 = __Pyx_PyDict_NewPresized(0); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 2, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
if (PyDict_SetItem(__pyx_d, __pyx_n_s_test, __pyx_t_1) < 0) __PYX_ERR(0, 2, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
/* "../../../opt/anaconda3/envs/py3/lib/python3.8/site-packages/numpy/__init__.pxd":1016
*
*
* cdef inline NPY_DATETIMEUNIT get_datetime64_unit(object obj) nogil: # <<<<<<<<<<<<<<
* """
* returns the unit part of the dtype for a numpy datetime64 object.
*/
/*--- Wrapped vars code ---*/
goto __pyx_L0;
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
if (__pyx_m) {
if (__pyx_d) {
__Pyx_AddTraceback("init splikes.connections.calcium", __pyx_clineno, __pyx_lineno, __pyx_filename);
}
Py_CLEAR(__pyx_m);
} else if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_ImportError, "init splikes.connections.calcium");
}
__pyx_L0:;
__Pyx_RefNannyFinishContext();
#if CYTHON_PEP489_MULTI_PHASE_INIT
return (__pyx_m != NULL) ? 0 : -1;
#elif PY_MAJOR_VERSION >= 3
return __pyx_m;
#else
return;
#endif
}
/* --- Runtime support code --- */
/* Refnanny */
#if CYTHON_REFNANNY
static __Pyx_RefNannyAPIStruct *__Pyx_RefNannyImportAPI(const char *modname) {
PyObject *m = NULL, *p = NULL;
void *r = NULL;
m = PyImport_ImportModule(modname);
if (!m) goto end;
p = PyObject_GetAttrString(m, "RefNannyAPI");
if (!p) goto end;
r = PyLong_AsVoidPtr(p);
end:
Py_XDECREF(p);
Py_XDECREF(m);
return (__Pyx_RefNannyAPIStruct *)r;
}
#endif
/* PyObjectGetAttrStr */
#if CYTHON_USE_TYPE_SLOTS
static CYTHON_INLINE PyObject* __Pyx_PyObject_GetAttrStr(PyObject* obj, PyObject* attr_name) {
PyTypeObject* tp = Py_TYPE(obj);
if (likely(tp->tp_getattro))
return tp->tp_getattro(obj, attr_name);
#if PY_MAJOR_VERSION < 3
if (likely(tp->tp_getattr))
return tp->tp_getattr(obj, PyString_AS_STRING(attr_name));
#endif
return PyObject_GetAttr(obj, attr_name);
}
#endif
/* GetBuiltinName */
static PyObject *__Pyx_GetBuiltinName(PyObject *name) {
PyObject* result = __Pyx_PyObject_GetAttrStr(__pyx_b, name);
if (unlikely(!result)) {
PyErr_Format(PyExc_NameError,
#if PY_MAJOR_VERSION >= 3
"name '%U' is not defined", name);
#else
"name '%.200s' is not defined", PyString_AS_STRING(name));
#endif
}
return result;
}
/* PyDictVersioning */
#if CYTHON_USE_DICT_VERSIONS && CYTHON_USE_TYPE_SLOTS
static CYTHON_INLINE PY_UINT64_T __Pyx_get_tp_dict_version(PyObject *obj) {
PyObject *dict = Py_TYPE(obj)->tp_dict;
return likely(dict) ? __PYX_GET_DICT_VERSION(dict) : 0;
}
static CYTHON_INLINE PY_UINT64_T __Pyx_get_object_dict_version(PyObject *obj) {
PyObject **dictptr = NULL;
Py_ssize_t offset = Py_TYPE(obj)->tp_dictoffset;
if (offset) {
#if CYTHON_COMPILING_IN_CPYTHON
dictptr = (likely(offset > 0)) ? (PyObject **) ((char *)obj + offset) : _PyObject_GetDictPtr(obj);
#else
dictptr = _PyObject_GetDictPtr(obj);
#endif
}
return (dictptr && *dictptr) ? __PYX_GET_DICT_VERSION(*dictptr) : 0;
}
static CYTHON_INLINE int __Pyx_object_dict_version_matches(PyObject* obj, PY_UINT64_T tp_dict_version, PY_UINT64_T obj_dict_version) {
PyObject *dict = Py_TYPE(obj)->tp_dict;
if (unlikely(!dict) || unlikely(tp_dict_version != __PYX_GET_DICT_VERSION(dict)))
return 0;
return obj_dict_version == __Pyx_get_object_dict_version(obj);
}
#endif
/* PyFunctionFastCall */
#if CYTHON_FAST_PYCALL
static PyObject* __Pyx_PyFunction_FastCallNoKw(PyCodeObject *co, PyObject **args, Py_ssize_t na,
PyObject *globals) {
PyFrameObject *f;
PyThreadState *tstate = __Pyx_PyThreadState_Current;
PyObject **fastlocals;
Py_ssize_t i;
PyObject *result;
assert(globals != NULL);
/* XXX Perhaps we should create a specialized
PyFrame_New() that doesn't take locals, but does
take builtins without sanity checking them.
*/
assert(tstate != NULL);
f = PyFrame_New(tstate, co, globals, NULL);
if (f == NULL) {
return NULL;
}
fastlocals = __Pyx_PyFrame_GetLocalsplus(f);
for (i = 0; i < na; i++) {
Py_INCREF(*args);
fastlocals[i] = *args++;
}
result = PyEval_EvalFrameEx(f,0);
++tstate->recursion_depth;
Py_DECREF(f);
--tstate->recursion_depth;
return result;
}
#if 1 || PY_VERSION_HEX < 0x030600B1
static PyObject *__Pyx_PyFunction_FastCallDict(PyObject *func, PyObject **args, Py_ssize_t nargs, PyObject *kwargs) {
PyCodeObject *co = (PyCodeObject *)PyFunction_GET_CODE(func);
PyObject *globals = PyFunction_GET_GLOBALS(func);
PyObject *argdefs = PyFunction_GET_DEFAULTS(func);
PyObject *closure;
#if PY_MAJOR_VERSION >= 3
PyObject *kwdefs;
#endif
PyObject *kwtuple, **k;
PyObject **d;
Py_ssize_t nd;
Py_ssize_t nk;
PyObject *result;
assert(kwargs == NULL || PyDict_Check(kwargs));
nk = kwargs ? PyDict_Size(kwargs) : 0;
if (Py_EnterRecursiveCall((char*)" while calling a Python object")) {
return NULL;
}
if (
#if PY_MAJOR_VERSION >= 3
co->co_kwonlyargcount == 0 &&
#endif
likely(kwargs == NULL || nk == 0) &&
co->co_flags == (CO_OPTIMIZED | CO_NEWLOCALS | CO_NOFREE)) {
if (argdefs == NULL && co->co_argcount == nargs) {
result = __Pyx_PyFunction_FastCallNoKw(co, args, nargs, globals);
goto done;
}
else if (nargs == 0 && argdefs != NULL
&& co->co_argcount == Py_SIZE(argdefs)) {
/* function called with no arguments, but all parameters have
a default value: use default values as arguments .*/
args = &PyTuple_GET_ITEM(argdefs, 0);
result =__Pyx_PyFunction_FastCallNoKw(co, args, Py_SIZE(argdefs), globals);
goto done;
}
}
if (kwargs != NULL) {
Py_ssize_t pos, i;
kwtuple = PyTuple_New(2 * nk);
if (kwtuple == NULL) {
result = NULL;
goto done;
}
k = &PyTuple_GET_ITEM(kwtuple, 0);
pos = i = 0;
while (PyDict_Next(kwargs, &pos, &k[i], &k[i+1])) {
Py_INCREF(k[i]);
Py_INCREF(k[i+1]);
i += 2;
}
nk = i / 2;
}
else {
kwtuple = NULL;
k = NULL;
}
closure = PyFunction_GET_CLOSURE(func);
#if PY_MAJOR_VERSION >= 3
kwdefs = PyFunction_GET_KW_DEFAULTS(func);
#endif
if (argdefs != NULL) {
d = &PyTuple_GET_ITEM(argdefs, 0);
nd = Py_SIZE(argdefs);
}
else {
d = NULL;
nd = 0;
}
#if PY_MAJOR_VERSION >= 3
result = PyEval_EvalCodeEx((PyObject*)co, globals, (PyObject *)NULL,
args, (int)nargs,
k, (int)nk,
d, (int)nd, kwdefs, closure);
#else
result = PyEval_EvalCodeEx(co, globals, (PyObject *)NULL,
args, (int)nargs,
k, (int)nk,
d, (int)nd, closure);
#endif
Py_XDECREF(kwtuple);
done:
Py_LeaveRecursiveCall();
return result;
}
#endif
#endif
/* PyObjectCall */
#if CYTHON_COMPILING_IN_CPYTHON
static CYTHON_INLINE PyObject* __Pyx_PyObject_Call(PyObject *func, PyObject *arg, PyObject *kw) {
PyObject *result;
ternaryfunc call = Py_TYPE(func)->tp_call;
if (unlikely(!call))
return PyObject_Call(func, arg, kw);
if (unlikely(Py_EnterRecursiveCall((char*)" while calling a Python object")))
return NULL;
result = (*call)(func, arg, kw);
Py_LeaveRecursiveCall();
if (unlikely(!result) && unlikely(!PyErr_Occurred())) {
PyErr_SetString(
PyExc_SystemError,
"NULL result without error in PyObject_Call");
}
return result;
}
#endif
/* PyObjectCallMethO */
#if CYTHON_COMPILING_IN_CPYTHON
static CYTHON_INLINE PyObject* __Pyx_PyObject_CallMethO(PyObject *func, PyObject *arg) {
PyObject *self, *result;
PyCFunction cfunc;
cfunc = PyCFunction_GET_FUNCTION(func);
self = PyCFunction_GET_SELF(func);
if (unlikely(Py_EnterRecursiveCall((char*)" while calling a Python object")))
return NULL;
result = cfunc(self, arg);
Py_LeaveRecursiveCall();
if (unlikely(!result) && unlikely(!PyErr_Occurred())) {
PyErr_SetString(
PyExc_SystemError,
"NULL result without error in PyObject_Call");
}
return result;
}
#endif
/* PyObjectCallNoArg */
#if CYTHON_COMPILING_IN_CPYTHON
static CYTHON_INLINE PyObject* __Pyx_PyObject_CallNoArg(PyObject *func) {
#if CYTHON_FAST_PYCALL
if (PyFunction_Check(func)) {
return __Pyx_PyFunction_FastCall(func, NULL, 0);
}
#endif
#ifdef __Pyx_CyFunction_USED
if (likely(PyCFunction_Check(func) || __Pyx_CyFunction_Check(func)))
#else
if (likely(PyCFunction_Check(func)))
#endif
{
if (likely(PyCFunction_GET_FLAGS(func) & METH_NOARGS)) {
return __Pyx_PyObject_CallMethO(func, NULL);
}
}
return __Pyx_PyObject_Call(func, __pyx_empty_tuple, NULL);
}
#endif
/* PyCFunctionFastCall */
#if CYTHON_FAST_PYCCALL
static CYTHON_INLINE PyObject * __Pyx_PyCFunction_FastCall(PyObject *func_obj, PyObject **args, Py_ssize_t nargs) {
PyCFunctionObject *func = (PyCFunctionObject*)func_obj;
PyCFunction meth = PyCFunction_GET_FUNCTION(func);
PyObject *self = PyCFunction_GET_SELF(func);
int flags = PyCFunction_GET_FLAGS(func);
assert(PyCFunction_Check(func));
assert(METH_FASTCALL == (flags & ~(METH_CLASS | METH_STATIC | METH_COEXIST | METH_KEYWORDS | METH_STACKLESS)));
assert(nargs >= 0);
assert(nargs == 0 || args != NULL);
/* _PyCFunction_FastCallDict() must not be called with an exception set,
because it may clear it (directly or indirectly) and so the
caller loses its exception */
assert(!PyErr_Occurred());
if ((PY_VERSION_HEX < 0x030700A0) || unlikely(flags & METH_KEYWORDS)) {
return (*((__Pyx_PyCFunctionFastWithKeywords)(void*)meth)) (self, args, nargs, NULL);
} else {
return (*((__Pyx_PyCFunctionFast)(void*)meth)) (self, args, nargs);
}
}
#endif
/* PyObjectCallOneArg */
#if CYTHON_COMPILING_IN_CPYTHON
static PyObject* __Pyx__PyObject_CallOneArg(PyObject *func, PyObject *arg) {
PyObject *result;
PyObject *args = PyTuple_New(1);
if (unlikely(!args)) return NULL;
Py_INCREF(arg);
PyTuple_SET_ITEM(args, 0, arg);
result = __Pyx_PyObject_Call(func, args, NULL);
Py_DECREF(args);
return result;
}
static CYTHON_INLINE PyObject* __Pyx_PyObject_CallOneArg(PyObject *func, PyObject *arg) {
#if CYTHON_FAST_PYCALL
if (PyFunction_Check(func)) {
return __Pyx_PyFunction_FastCall(func, &arg, 1);
}
#endif
if (likely(PyCFunction_Check(func))) {
if (likely(PyCFunction_GET_FLAGS(func) & METH_O)) {
return __Pyx_PyObject_CallMethO(func, arg);
#if CYTHON_FAST_PYCCALL
} else if (__Pyx_PyFastCFunction_Check(func)) {
return __Pyx_PyCFunction_FastCall(func, &arg, 1);
#endif
}
}
return __Pyx__PyObject_CallOneArg(func, arg);
}
#else
static CYTHON_INLINE PyObject* __Pyx_PyObject_CallOneArg(PyObject *func, PyObject *arg) {
PyObject *result;
PyObject *args = PyTuple_Pack(1, arg);
if (unlikely(!args)) return NULL;
result = __Pyx_PyObject_Call(func, args, NULL);
Py_DECREF(args);
return result;
}
#endif
/* GetModuleGlobalName */
#if CYTHON_USE_DICT_VERSIONS
static PyObject *__Pyx__GetModuleGlobalName(PyObject *name, PY_UINT64_T *dict_version, PyObject **dict_cached_value)
#else
static CYTHON_INLINE PyObject *__Pyx__GetModuleGlobalName(PyObject *name)
#endif
{
PyObject *result;
#if !CYTHON_AVOID_BORROWED_REFS
#if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030500A1
result = _PyDict_GetItem_KnownHash(__pyx_d, name, ((PyASCIIObject *) name)->hash);
__PYX_UPDATE_DICT_CACHE(__pyx_d, result, *dict_cached_value, *dict_version)
if (likely(result)) {
return __Pyx_NewRef(result);
} else if (unlikely(PyErr_Occurred())) {
return NULL;
}
#else
result = PyDict_GetItem(__pyx_d, name);
__PYX_UPDATE_DICT_CACHE(__pyx_d, result, *dict_cached_value, *dict_version)
if (likely(result)) {
return __Pyx_NewRef(result);
}
#endif
#else
result = PyObject_GetItem(__pyx_d, name);
__PYX_UPDATE_DICT_CACHE(__pyx_d, result, *dict_cached_value, *dict_version)
if (likely(result)) {
return __Pyx_NewRef(result);
}
PyErr_Clear();
#endif
return __Pyx_GetBuiltinName(name);
}
/* ExtTypeTest */
static CYTHON_INLINE int __Pyx_TypeTest(PyObject *obj, PyTypeObject *type) {
if (unlikely(!type)) {
PyErr_SetString(PyExc_SystemError, "Missing type object");
return 0;
}
if (likely(__Pyx_TypeCheck(obj, type)))
return 1;
PyErr_Format(PyExc_TypeError, "Cannot convert %.200s to %.200s",
Py_TYPE(obj)->tp_name, type->tp_name);
return 0;
}
/* RaiseArgTupleInvalid */
static void __Pyx_RaiseArgtupleInvalid(
const char* func_name,
int exact,
Py_ssize_t num_min,
Py_ssize_t num_max,
Py_ssize_t num_found)
{
Py_ssize_t num_expected;
const char *more_or_less;
if (num_found < num_min) {
num_expected = num_min;
more_or_less = "at least";
} else {
num_expected = num_max;
more_or_less = "at most";
}
if (exact) {
more_or_less = "exactly";
}
PyErr_Format(PyExc_TypeError,
"%.200s() takes %.8s %" CYTHON_FORMAT_SSIZE_T "d positional argument%.1s (%" CYTHON_FORMAT_SSIZE_T "d given)",
func_name, more_or_less, num_expected,
(num_expected == 1) ? "" : "s", num_found);
}
/* RaiseDoubleKeywords */
static void __Pyx_RaiseDoubleKeywordsError(
const char* func_name,
PyObject* kw_name)
{
PyErr_Format(PyExc_TypeError,
#if PY_MAJOR_VERSION >= 3
"%s() got multiple values for keyword argument '%U'", func_name, kw_name);
#else
"%s() got multiple values for keyword argument '%s'", func_name,
PyString_AsString(kw_name));
#endif
}
/* ParseKeywords */
static int __Pyx_ParseOptionalKeywords(
PyObject *kwds,
PyObject **argnames[],
PyObject *kwds2,
PyObject *values[],
Py_ssize_t num_pos_args,
const char* function_name)
{
PyObject *key = 0, *value = 0;
Py_ssize_t pos = 0;
PyObject*** name;
PyObject*** first_kw_arg = argnames + num_pos_args;
while (PyDict_Next(kwds, &pos, &key, &value)) {
name = first_kw_arg;
while (*name && (**name != key)) name++;
if (*name) {
values[name-argnames] = value;
continue;
}
name = first_kw_arg;
#if PY_MAJOR_VERSION < 3
if (likely(PyString_Check(key))) {
while (*name) {
if ((CYTHON_COMPILING_IN_PYPY || PyString_GET_SIZE(**name) == PyString_GET_SIZE(key))
&& _PyString_Eq(**name, key)) {
values[name-argnames] = value;
break;
}
name++;
}
if (*name) continue;
else {
PyObject*** argname = argnames;
while (argname != first_kw_arg) {
if ((**argname == key) || (
(CYTHON_COMPILING_IN_PYPY || PyString_GET_SIZE(**argname) == PyString_GET_SIZE(key))
&& _PyString_Eq(**argname, key))) {
goto arg_passed_twice;
}
argname++;
}
}
} else
#endif
if (likely(PyUnicode_Check(key))) {
while (*name) {
int cmp = (**name == key) ? 0 :
#if !CYTHON_COMPILING_IN_PYPY && PY_MAJOR_VERSION >= 3
(__Pyx_PyUnicode_GET_LENGTH(**name) != __Pyx_PyUnicode_GET_LENGTH(key)) ? 1 :
#endif
PyUnicode_Compare(**name, key);
if (cmp < 0 && unlikely(PyErr_Occurred())) goto bad;
if (cmp == 0) {
values[name-argnames] = value;
break;
}
name++;
}
if (*name) continue;
else {
PyObject*** argname = argnames;
while (argname != first_kw_arg) {
int cmp = (**argname == key) ? 0 :
#if !CYTHON_COMPILING_IN_PYPY && PY_MAJOR_VERSION >= 3
(__Pyx_PyUnicode_GET_LENGTH(**argname) != __Pyx_PyUnicode_GET_LENGTH(key)) ? 1 :
#endif
PyUnicode_Compare(**argname, key);
if (cmp < 0 && unlikely(PyErr_Occurred())) goto bad;
if (cmp == 0) goto arg_passed_twice;
argname++;
}
}
} else
goto invalid_keyword_type;
if (kwds2) {
if (unlikely(PyDict_SetItem(kwds2, key, value))) goto bad;
} else {
goto invalid_keyword;
}
}
return 0;
arg_passed_twice:
__Pyx_RaiseDoubleKeywordsError(function_name, key);
goto bad;
invalid_keyword_type:
PyErr_Format(PyExc_TypeError,
"%.200s() keywords must be strings", function_name);
goto bad;
invalid_keyword:
PyErr_Format(PyExc_TypeError,
#if PY_MAJOR_VERSION < 3
"%.200s() got an unexpected keyword argument '%.200s'",
function_name, PyString_AsString(key));
#else
"%s() got an unexpected keyword argument '%U'",
function_name, key);
#endif
bad:
return -1;
}
/* ArgTypeTest */
static int __Pyx__ArgTypeTest(PyObject *obj, PyTypeObject *type, const char *name, int exact)
{
if (unlikely(!type)) {
PyErr_SetString(PyExc_SystemError, "Missing type object");
return 0;
}
else if (exact) {
#if PY_MAJOR_VERSION == 2
if ((type == &PyBaseString_Type) && likely(__Pyx_PyBaseString_CheckExact(obj))) return 1;
#endif
}
else {
if (likely(__Pyx_TypeCheck(obj, type))) return 1;
}
PyErr_Format(PyExc_TypeError,
"Argument '%.200s' has incorrect type (expected %.200s, got %.200s)",
name, type->tp_name, Py_TYPE(obj)->tp_name);
return 0;
}
/* PyErrFetchRestore */
#if CYTHON_FAST_THREAD_STATE
static CYTHON_INLINE void __Pyx_ErrRestoreInState(PyThreadState *tstate, PyObject *type, PyObject *value, PyObject *tb) {
PyObject *tmp_type, *tmp_value, *tmp_tb;
tmp_type = tstate->curexc_type;
tmp_value = tstate->curexc_value;
tmp_tb = tstate->curexc_traceback;
tstate->curexc_type = type;
tstate->curexc_value = value;
tstate->curexc_traceback = tb;
Py_XDECREF(tmp_type);
Py_XDECREF(tmp_value);
Py_XDECREF(tmp_tb);
}
static CYTHON_INLINE void __Pyx_ErrFetchInState(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb) {
*type = tstate->curexc_type;
*value = tstate->curexc_value;
*tb = tstate->curexc_traceback;
tstate->curexc_type = 0;
tstate->curexc_value = 0;
tstate->curexc_traceback = 0;
}
#endif
/* RaiseException */
#if PY_MAJOR_VERSION < 3
static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb,
CYTHON_UNUSED PyObject *cause) {
__Pyx_PyThreadState_declare
Py_XINCREF(type);
if (!value || value == Py_None)
value = NULL;
else
Py_INCREF(value);
if (!tb || tb == Py_None)
tb = NULL;
else {
Py_INCREF(tb);
if (!PyTraceBack_Check(tb)) {
PyErr_SetString(PyExc_TypeError,
"raise: arg 3 must be a traceback or None");
goto raise_error;
}
}
if (PyType_Check(type)) {
#if CYTHON_COMPILING_IN_PYPY
if (!value) {
Py_INCREF(Py_None);
value = Py_None;
}
#endif
PyErr_NormalizeException(&type, &value, &tb);
} else {
if (value) {
PyErr_SetString(PyExc_TypeError,
"instance exception may not have a separate value");
goto raise_error;
}
value = type;
type = (PyObject*) Py_TYPE(type);
Py_INCREF(type);
if (!PyType_IsSubtype((PyTypeObject *)type, (PyTypeObject *)PyExc_BaseException)) {
PyErr_SetString(PyExc_TypeError,
"raise: exception class must be a subclass of BaseException");
goto raise_error;
}
}
__Pyx_PyThreadState_assign
__Pyx_ErrRestore(type, value, tb);
return;
raise_error:
Py_XDECREF(value);
Py_XDECREF(type);
Py_XDECREF(tb);
return;
}
#else
static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb, PyObject *cause) {
PyObject* owned_instance = NULL;
if (tb == Py_None) {
tb = 0;
} else if (tb && !PyTraceBack_Check(tb)) {
PyErr_SetString(PyExc_TypeError,
"raise: arg 3 must be a traceback or None");
goto bad;
}
if (value == Py_None)
value = 0;
if (PyExceptionInstance_Check(type)) {
if (value) {
PyErr_SetString(PyExc_TypeError,
"instance exception may not have a separate value");
goto bad;
}
value = type;
type = (PyObject*) Py_TYPE(value);
} else if (PyExceptionClass_Check(type)) {
PyObject *instance_class = NULL;
if (value && PyExceptionInstance_Check(value)) {
instance_class = (PyObject*) Py_TYPE(value);
if (instance_class != type) {
int is_subclass = PyObject_IsSubclass(instance_class, type);
if (!is_subclass) {
instance_class = NULL;
} else if (unlikely(is_subclass == -1)) {
goto bad;
} else {
type = instance_class;
}
}
}
if (!instance_class) {
PyObject *args;
if (!value)
args = PyTuple_New(0);
else if (PyTuple_Check(value)) {
Py_INCREF(value);
args = value;
} else
args = PyTuple_Pack(1, value);
if (!args)
goto bad;
owned_instance = PyObject_Call(type, args, NULL);
Py_DECREF(args);
if (!owned_instance)
goto bad;
value = owned_instance;
if (!PyExceptionInstance_Check(value)) {
PyErr_Format(PyExc_TypeError,
"calling %R should have returned an instance of "
"BaseException, not %R",
type, Py_TYPE(value));
goto bad;
}
}
} else {
PyErr_SetString(PyExc_TypeError,
"raise: exception class must be a subclass of BaseException");
goto bad;
}
if (cause) {
PyObject *fixed_cause;
if (cause == Py_None) {
fixed_cause = NULL;
} else if (PyExceptionClass_Check(cause)) {
fixed_cause = PyObject_CallObject(cause, NULL);
if (fixed_cause == NULL)
goto bad;
} else if (PyExceptionInstance_Check(cause)) {
fixed_cause = cause;
Py_INCREF(fixed_cause);
} else {
PyErr_SetString(PyExc_TypeError,
"exception causes must derive from "
"BaseException");
goto bad;
}
PyException_SetCause(value, fixed_cause);
}
PyErr_SetObject(type, value);
if (tb) {
#if CYTHON_COMPILING_IN_PYPY
PyObject *tmp_type, *tmp_value, *tmp_tb;
PyErr_Fetch(&tmp_type, &tmp_value, &tmp_tb);
Py_INCREF(tb);
PyErr_Restore(tmp_type, tmp_value, tb);
Py_XDECREF(tmp_tb);
#else
PyThreadState *tstate = __Pyx_PyThreadState_Current;
PyObject* tmp_tb = tstate->curexc_traceback;
if (tb != tmp_tb) {
Py_INCREF(tb);
tstate->curexc_traceback = tb;
Py_XDECREF(tmp_tb);
}
#endif
}
bad:
Py_XDECREF(owned_instance);
return;
}
#endif
/* GetTopmostException */
#if CYTHON_USE_EXC_INFO_STACK
static _PyErr_StackItem *
__Pyx_PyErr_GetTopmostException(PyThreadState *tstate)
{
_PyErr_StackItem *exc_info = tstate->exc_info;
while ((exc_info->exc_type == NULL || exc_info->exc_type == Py_None) &&
exc_info->previous_item != NULL)
{
exc_info = exc_info->previous_item;
}
return exc_info;
}
#endif
/* SaveResetException */
#if CYTHON_FAST_THREAD_STATE
static CYTHON_INLINE void __Pyx__ExceptionSave(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb) {
#if CYTHON_USE_EXC_INFO_STACK
_PyErr_StackItem *exc_info = __Pyx_PyErr_GetTopmostException(tstate);
*type = exc_info->exc_type;
*value = exc_info->exc_value;
*tb = exc_info->exc_traceback;
#else
*type = tstate->exc_type;
*value = tstate->exc_value;
*tb = tstate->exc_traceback;
#endif
Py_XINCREF(*type);
Py_XINCREF(*value);
Py_XINCREF(*tb);
}
static CYTHON_INLINE void __Pyx__ExceptionReset(PyThreadState *tstate, PyObject *type, PyObject *value, PyObject *tb) {
PyObject *tmp_type, *tmp_value, *tmp_tb;
#if CYTHON_USE_EXC_INFO_STACK
_PyErr_StackItem *exc_info = tstate->exc_info;
tmp_type = exc_info->exc_type;
tmp_value = exc_info->exc_value;
tmp_tb = exc_info->exc_traceback;
exc_info->exc_type = type;
exc_info->exc_value = value;
exc_info->exc_traceback = tb;
#else
tmp_type = tstate->exc_type;
tmp_value = tstate->exc_value;
tmp_tb = tstate->exc_traceback;
tstate->exc_type = type;
tstate->exc_value = value;
tstate->exc_traceback = tb;
#endif
Py_XDECREF(tmp_type);
Py_XDECREF(tmp_value);
Py_XDECREF(tmp_tb);
}
#endif
/* PyErrExceptionMatches */
#if CYTHON_FAST_THREAD_STATE
static int __Pyx_PyErr_ExceptionMatchesTuple(PyObject *exc_type, PyObject *tuple) {
Py_ssize_t i, n;
n = PyTuple_GET_SIZE(tuple);
#if PY_MAJOR_VERSION >= 3
for (i=0; i<n; i++) {
if (exc_type == PyTuple_GET_ITEM(tuple, i)) return 1;
}
#endif
for (i=0; i<n; i++) {
if (__Pyx_PyErr_GivenExceptionMatches(exc_type, PyTuple_GET_ITEM(tuple, i))) return 1;
}
return 0;
}
static CYTHON_INLINE int __Pyx_PyErr_ExceptionMatchesInState(PyThreadState* tstate, PyObject* err) {
PyObject *exc_type = tstate->curexc_type;
if (exc_type == err) return 1;
if (unlikely(!exc_type)) return 0;
if (unlikely(PyTuple_Check(err)))
return __Pyx_PyErr_ExceptionMatchesTuple(exc_type, err);
return __Pyx_PyErr_GivenExceptionMatches(exc_type, err);
}
#endif
/* GetException */
#if CYTHON_FAST_THREAD_STATE
static int __Pyx__GetException(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb)
#else
static int __Pyx_GetException(PyObject **type, PyObject **value, PyObject **tb)
#endif
{
PyObject *local_type, *local_value, *local_tb;
#if CYTHON_FAST_THREAD_STATE
PyObject *tmp_type, *tmp_value, *tmp_tb;
local_type = tstate->curexc_type;
local_value = tstate->curexc_value;
local_tb = tstate->curexc_traceback;
tstate->curexc_type = 0;
tstate->curexc_value = 0;
tstate->curexc_traceback = 0;
#else
PyErr_Fetch(&local_type, &local_value, &local_tb);
#endif
PyErr_NormalizeException(&local_type, &local_value, &local_tb);
#if CYTHON_FAST_THREAD_STATE
if (unlikely(tstate->curexc_type))
#else
if (unlikely(PyErr_Occurred()))
#endif
goto bad;
#if PY_MAJOR_VERSION >= 3
if (local_tb) {
if (unlikely(PyException_SetTraceback(local_value, local_tb) < 0))
goto bad;
}
#endif
Py_XINCREF(local_tb);
Py_XINCREF(local_type);
Py_XINCREF(local_value);
*type = local_type;
*value = local_value;
*tb = local_tb;
#if CYTHON_FAST_THREAD_STATE
#if CYTHON_USE_EXC_INFO_STACK
{
_PyErr_StackItem *exc_info = tstate->exc_info;
tmp_type = exc_info->exc_type;
tmp_value = exc_info->exc_value;
tmp_tb = exc_info->exc_traceback;
exc_info->exc_type = local_type;
exc_info->exc_value = local_value;
exc_info->exc_traceback = local_tb;
}
#else
tmp_type = tstate->exc_type;
tmp_value = tstate->exc_value;
tmp_tb = tstate->exc_traceback;
tstate->exc_type = local_type;
tstate->exc_value = local_value;
tstate->exc_traceback = local_tb;
#endif
Py_XDECREF(tmp_type);
Py_XDECREF(tmp_value);
Py_XDECREF(tmp_tb);
#else
PyErr_SetExcInfo(local_type, local_value, local_tb);
#endif
return 0;
bad:
*type = 0;
*value = 0;
*tb = 0;
Py_XDECREF(local_type);
Py_XDECREF(local_value);
Py_XDECREF(local_tb);
return -1;
}
/* CallNextTpDealloc */
static void __Pyx_call_next_tp_dealloc(PyObject* obj, destructor current_tp_dealloc) {
PyTypeObject* type = Py_TYPE(obj);
while (type && type->tp_dealloc != current_tp_dealloc)
type = type->tp_base;
while (type && type->tp_dealloc == current_tp_dealloc)
type = type->tp_base;
if (type)
type->tp_dealloc(obj);
}
/* CallNextTpTraverse */
static int __Pyx_call_next_tp_traverse(PyObject* obj, visitproc v, void *a, traverseproc current_tp_traverse) {
PyTypeObject* type = Py_TYPE(obj);
while (type && type->tp_traverse != current_tp_traverse)
type = type->tp_base;
while (type && type->tp_traverse == current_tp_traverse)
type = type->tp_base;
if (type && type->tp_traverse)
return type->tp_traverse(obj, v, a);
return 0;
}
/* CallNextTpClear */
static void __Pyx_call_next_tp_clear(PyObject* obj, inquiry current_tp_clear) {
PyTypeObject* type = Py_TYPE(obj);
while (type && type->tp_clear != current_tp_clear)
type = type->tp_base;
while (type && type->tp_clear == current_tp_clear)
type = type->tp_base;
if (type && type->tp_clear)
type->tp_clear(obj);
}
/* TypeImport */
#ifndef __PYX_HAVE_RT_ImportType
#define __PYX_HAVE_RT_ImportType
static PyTypeObject *__Pyx_ImportType(PyObject *module, const char *module_name, const char *class_name,
size_t size, enum __Pyx_ImportType_CheckSize check_size)
{
PyObject *result = 0;
char warning[200];
Py_ssize_t basicsize;
#ifdef Py_LIMITED_API
PyObject *py_basicsize;
#endif
result = PyObject_GetAttrString(module, class_name);
if (!result)
goto bad;
if (!PyType_Check(result)) {
PyErr_Format(PyExc_TypeError,
"%.200s.%.200s is not a type object",
module_name, class_name);
goto bad;
}
#ifndef Py_LIMITED_API
basicsize = ((PyTypeObject *)result)->tp_basicsize;
#else
py_basicsize = PyObject_GetAttrString(result, "__basicsize__");
if (!py_basicsize)
goto bad;
basicsize = PyLong_AsSsize_t(py_basicsize);
Py_DECREF(py_basicsize);
py_basicsize = 0;
if (basicsize == (Py_ssize_t)-1 && PyErr_Occurred())
goto bad;
#endif
if ((size_t)basicsize < size) {
PyErr_Format(PyExc_ValueError,
"%.200s.%.200s size changed, may indicate binary incompatibility. "
"Expected %zd from C header, got %zd from PyObject",
module_name, class_name, size, basicsize);
goto bad;
}
if (check_size == __Pyx_ImportType_CheckSize_Error && (size_t)basicsize != size) {
PyErr_Format(PyExc_ValueError,
"%.200s.%.200s size changed, may indicate binary incompatibility. "
"Expected %zd from C header, got %zd from PyObject",
module_name, class_name, size, basicsize);
goto bad;
}
else if (check_size == __Pyx_ImportType_CheckSize_Warn && (size_t)basicsize > size) {
PyOS_snprintf(warning, sizeof(warning),
"%s.%s size changed, may indicate binary incompatibility. "
"Expected %zd from C header, got %zd from PyObject",
module_name, class_name, size, basicsize);
if (PyErr_WarnEx(NULL, warning, 0) < 0) goto bad;
}
return (PyTypeObject *)result;
bad:
Py_XDECREF(result);
return NULL;
}
#endif
/* GetVTable */
static void* __Pyx_GetVtable(PyObject *dict) {
void* ptr;
PyObject *ob = PyObject_GetItem(dict, __pyx_n_s_pyx_vtable);
if (!ob)
goto bad;
#if PY_VERSION_HEX >= 0x02070000
ptr = PyCapsule_GetPointer(ob, 0);
#else
ptr = PyCObject_AsVoidPtr(ob);
#endif
if (!ptr && !PyErr_Occurred())
PyErr_SetString(PyExc_RuntimeError, "invalid vtable found for imported type");
Py_DECREF(ob);
return ptr;
bad:
Py_XDECREF(ob);
return NULL;
}
/* PyObject_GenericGetAttrNoDict */
#if CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP && PY_VERSION_HEX < 0x03070000
static PyObject *__Pyx_RaiseGenericGetAttributeError(PyTypeObject *tp, PyObject *attr_name) {
PyErr_Format(PyExc_AttributeError,
#if PY_MAJOR_VERSION >= 3
"'%.50s' object has no attribute '%U'",
tp->tp_name, attr_name);
#else
"'%.50s' object has no attribute '%.400s'",
tp->tp_name, PyString_AS_STRING(attr_name));
#endif
return NULL;
}
static CYTHON_INLINE PyObject* __Pyx_PyObject_GenericGetAttrNoDict(PyObject* obj, PyObject* attr_name) {
PyObject *descr;
PyTypeObject *tp = Py_TYPE(obj);
if (unlikely(!PyString_Check(attr_name))) {
return PyObject_GenericGetAttr(obj, attr_name);
}
assert(!tp->tp_dictoffset);
descr = _PyType_Lookup(tp, attr_name);
if (unlikely(!descr)) {
return __Pyx_RaiseGenericGetAttributeError(tp, attr_name);
}
Py_INCREF(descr);
#if PY_MAJOR_VERSION < 3
if (likely(PyType_HasFeature(Py_TYPE(descr), Py_TPFLAGS_HAVE_CLASS)))
#endif
{
descrgetfunc f = Py_TYPE(descr)->tp_descr_get;
if (unlikely(f)) {
PyObject *res = f(descr, obj, (PyObject *)tp);
Py_DECREF(descr);
return res;
}
}
return descr;
}
#endif
/* PyObject_GenericGetAttr */
#if CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP && PY_VERSION_HEX < 0x03070000
static PyObject* __Pyx_PyObject_GenericGetAttr(PyObject* obj, PyObject* attr_name) {
if (unlikely(Py_TYPE(obj)->tp_dictoffset)) {
return PyObject_GenericGetAttr(obj, attr_name);
}
return __Pyx_PyObject_GenericGetAttrNoDict(obj, attr_name);
}
#endif
/* SetVTable */
static int __Pyx_SetVtable(PyObject *dict, void *vtable) {
#if PY_VERSION_HEX >= 0x02070000
PyObject *ob = PyCapsule_New(vtable, 0, 0);
#else
PyObject *ob = PyCObject_FromVoidPtr(vtable, 0);
#endif
if (!ob)
goto bad;
if (PyDict_SetItem(dict, __pyx_n_s_pyx_vtable, ob) < 0)
goto bad;
Py_DECREF(ob);
return 0;
bad:
Py_XDECREF(ob);
return -1;
}
/* PyObjectGetAttrStrNoError */
static void __Pyx_PyObject_GetAttrStr_ClearAttributeError(void) {
__Pyx_PyThreadState_declare
__Pyx_PyThreadState_assign
if (likely(__Pyx_PyErr_ExceptionMatches(PyExc_AttributeError)))
__Pyx_PyErr_Clear();
}
static CYTHON_INLINE PyObject* __Pyx_PyObject_GetAttrStrNoError(PyObject* obj, PyObject* attr_name) {
PyObject *result;
#if CYTHON_COMPILING_IN_CPYTHON && CYTHON_USE_TYPE_SLOTS && PY_VERSION_HEX >= 0x030700B1
PyTypeObject* tp = Py_TYPE(obj);
if (likely(tp->tp_getattro == PyObject_GenericGetAttr)) {
return _PyObject_GenericGetAttrWithDict(obj, attr_name, NULL, 1);
}
#endif
result = __Pyx_PyObject_GetAttrStr(obj, attr_name);
if (unlikely(!result)) {
__Pyx_PyObject_GetAttrStr_ClearAttributeError();
}
return result;
}
/* SetupReduce */
static int __Pyx_setup_reduce_is_named(PyObject* meth, PyObject* name) {
int ret;
PyObject *name_attr;
name_attr = __Pyx_PyObject_GetAttrStr(meth, __pyx_n_s_name);
if (likely(name_attr)) {
ret = PyObject_RichCompareBool(name_attr, name, Py_EQ);
} else {
ret = -1;
}
if (unlikely(ret < 0)) {
PyErr_Clear();
ret = 0;
}
Py_XDECREF(name_attr);
return ret;
}
static int __Pyx_setup_reduce(PyObject* type_obj) {
int ret = 0;
PyObject *object_reduce = NULL;
PyObject *object_reduce_ex = NULL;
PyObject *reduce = NULL;
PyObject *reduce_ex = NULL;
PyObject *reduce_cython = NULL;
PyObject *setstate = NULL;
PyObject *setstate_cython = NULL;
#if CYTHON_USE_PYTYPE_LOOKUP
if (_PyType_Lookup((PyTypeObject*)type_obj, __pyx_n_s_getstate)) goto __PYX_GOOD;
#else
if (PyObject_HasAttr(type_obj, __pyx_n_s_getstate)) goto __PYX_GOOD;
#endif
#if CYTHON_USE_PYTYPE_LOOKUP
object_reduce_ex = _PyType_Lookup(&PyBaseObject_Type, __pyx_n_s_reduce_ex); if (!object_reduce_ex) goto __PYX_BAD;
#else
object_reduce_ex = __Pyx_PyObject_GetAttrStr((PyObject*)&PyBaseObject_Type, __pyx_n_s_reduce_ex); if (!object_reduce_ex) goto __PYX_BAD;
#endif
reduce_ex = __Pyx_PyObject_GetAttrStr(type_obj, __pyx_n_s_reduce_ex); if (unlikely(!reduce_ex)) goto __PYX_BAD;
if (reduce_ex == object_reduce_ex) {
#if CYTHON_USE_PYTYPE_LOOKUP
object_reduce = _PyType_Lookup(&PyBaseObject_Type, __pyx_n_s_reduce); if (!object_reduce) goto __PYX_BAD;
#else
object_reduce = __Pyx_PyObject_GetAttrStr((PyObject*)&PyBaseObject_Type, __pyx_n_s_reduce); if (!object_reduce) goto __PYX_BAD;
#endif
reduce = __Pyx_PyObject_GetAttrStr(type_obj, __pyx_n_s_reduce); if (unlikely(!reduce)) goto __PYX_BAD;
if (reduce == object_reduce || __Pyx_setup_reduce_is_named(reduce, __pyx_n_s_reduce_cython)) {
reduce_cython = __Pyx_PyObject_GetAttrStrNoError(type_obj, __pyx_n_s_reduce_cython);
if (likely(reduce_cython)) {
ret = PyDict_SetItem(((PyTypeObject*)type_obj)->tp_dict, __pyx_n_s_reduce, reduce_cython); if (unlikely(ret < 0)) goto __PYX_BAD;
ret = PyDict_DelItem(((PyTypeObject*)type_obj)->tp_dict, __pyx_n_s_reduce_cython); if (unlikely(ret < 0)) goto __PYX_BAD;
} else if (reduce == object_reduce || PyErr_Occurred()) {
goto __PYX_BAD;
}
setstate = __Pyx_PyObject_GetAttrStr(type_obj, __pyx_n_s_setstate);
if (!setstate) PyErr_Clear();
if (!setstate || __Pyx_setup_reduce_is_named(setstate, __pyx_n_s_setstate_cython)) {
setstate_cython = __Pyx_PyObject_GetAttrStrNoError(type_obj, __pyx_n_s_setstate_cython);
if (likely(setstate_cython)) {
ret = PyDict_SetItem(((PyTypeObject*)type_obj)->tp_dict, __pyx_n_s_setstate, setstate_cython); if (unlikely(ret < 0)) goto __PYX_BAD;
ret = PyDict_DelItem(((PyTypeObject*)type_obj)->tp_dict, __pyx_n_s_setstate_cython); if (unlikely(ret < 0)) goto __PYX_BAD;
} else if (!setstate || PyErr_Occurred()) {
goto __PYX_BAD;
}
}
PyType_Modified((PyTypeObject*)type_obj);
}
}
goto __PYX_GOOD;
__PYX_BAD:
if (!PyErr_Occurred())
PyErr_Format(PyExc_RuntimeError, "Unable to initialize pickling for %s", ((PyTypeObject*)type_obj)->tp_name);
ret = -1;
__PYX_GOOD:
#if !CYTHON_USE_PYTYPE_LOOKUP
Py_XDECREF(object_reduce);
Py_XDECREF(object_reduce_ex);
#endif
Py_XDECREF(reduce);
Py_XDECREF(reduce_ex);
Py_XDECREF(reduce_cython);
Py_XDECREF(setstate);
Py_XDECREF(setstate_cython);
return ret;
}
/* Import */
static PyObject *__Pyx_Import(PyObject *name, PyObject *from_list, int level) {
PyObject *empty_list = 0;
PyObject *module = 0;
PyObject *global_dict = 0;
PyObject *empty_dict = 0;
PyObject *list;
#if PY_MAJOR_VERSION < 3
PyObject *py_import;
py_import = __Pyx_PyObject_GetAttrStr(__pyx_b, __pyx_n_s_import);
if (!py_import)
goto bad;
#endif
if (from_list)
list = from_list;
else {
empty_list = PyList_New(0);
if (!empty_list)
goto bad;
list = empty_list;
}
global_dict = PyModule_GetDict(__pyx_m);
if (!global_dict)
goto bad;
empty_dict = PyDict_New();
if (!empty_dict)
goto bad;
{
#if PY_MAJOR_VERSION >= 3
if (level == -1) {
if ((1) && (strchr(__Pyx_MODULE_NAME, '.'))) {
module = PyImport_ImportModuleLevelObject(
name, global_dict, empty_dict, list, 1);
if (!module) {
if (!PyErr_ExceptionMatches(PyExc_ImportError))
goto bad;
PyErr_Clear();
}
}
level = 0;
}
#endif
if (!module) {
#if PY_MAJOR_VERSION < 3
PyObject *py_level = PyInt_FromLong(level);
if (!py_level)
goto bad;
module = PyObject_CallFunctionObjArgs(py_import,
name, global_dict, empty_dict, list, py_level, (PyObject *)NULL);
Py_DECREF(py_level);
#else
module = PyImport_ImportModuleLevelObject(
name, global_dict, empty_dict, list, level);
#endif
}
}
bad:
#if PY_MAJOR_VERSION < 3
Py_XDECREF(py_import);
#endif
Py_XDECREF(empty_list);
Py_XDECREF(empty_dict);
return module;
}
/* CLineInTraceback */
#ifndef CYTHON_CLINE_IN_TRACEBACK
static int __Pyx_CLineForTraceback(CYTHON_NCP_UNUSED PyThreadState *tstate, int c_line) {
PyObject *use_cline;
PyObject *ptype, *pvalue, *ptraceback;
#if CYTHON_COMPILING_IN_CPYTHON
PyObject **cython_runtime_dict;
#endif
if (unlikely(!__pyx_cython_runtime)) {
return c_line;
}
__Pyx_ErrFetchInState(tstate, &ptype, &pvalue, &ptraceback);
#if CYTHON_COMPILING_IN_CPYTHON
cython_runtime_dict = _PyObject_GetDictPtr(__pyx_cython_runtime);
if (likely(cython_runtime_dict)) {
__PYX_PY_DICT_LOOKUP_IF_MODIFIED(
use_cline, *cython_runtime_dict,
__Pyx_PyDict_GetItemStr(*cython_runtime_dict, __pyx_n_s_cline_in_traceback))
} else
#endif
{
PyObject *use_cline_obj = __Pyx_PyObject_GetAttrStr(__pyx_cython_runtime, __pyx_n_s_cline_in_traceback);
if (use_cline_obj) {
use_cline = PyObject_Not(use_cline_obj) ? Py_False : Py_True;
Py_DECREF(use_cline_obj);
} else {
PyErr_Clear();
use_cline = NULL;
}
}
if (!use_cline) {
c_line = 0;
(void) PyObject_SetAttr(__pyx_cython_runtime, __pyx_n_s_cline_in_traceback, Py_False);
}
else if (use_cline == Py_False || (use_cline != Py_True && PyObject_Not(use_cline) != 0)) {
c_line = 0;
}
__Pyx_ErrRestoreInState(tstate, ptype, pvalue, ptraceback);
return c_line;
}
#endif
/* CodeObjectCache */
static int __pyx_bisect_code_objects(__Pyx_CodeObjectCacheEntry* entries, int count, int code_line) {
int start = 0, mid = 0, end = count - 1;
if (end >= 0 && code_line > entries[end].code_line) {
return count;
}
while (start < end) {
mid = start + (end - start) / 2;
if (code_line < entries[mid].code_line) {
end = mid;
} else if (code_line > entries[mid].code_line) {
start = mid + 1;
} else {
return mid;
}
}
if (code_line <= entries[mid].code_line) {
return mid;
} else {
return mid + 1;
}
}
static PyCodeObject *__pyx_find_code_object(int code_line) {
PyCodeObject* code_object;
int pos;
if (unlikely(!code_line) || unlikely(!__pyx_code_cache.entries)) {
return NULL;
}
pos = __pyx_bisect_code_objects(__pyx_code_cache.entries, __pyx_code_cache.count, code_line);
if (unlikely(pos >= __pyx_code_cache.count) || unlikely(__pyx_code_cache.entries[pos].code_line != code_line)) {
return NULL;
}
code_object = __pyx_code_cache.entries[pos].code_object;
Py_INCREF(code_object);
return code_object;
}
static void __pyx_insert_code_object(int code_line, PyCodeObject* code_object) {
int pos, i;
__Pyx_CodeObjectCacheEntry* entries = __pyx_code_cache.entries;
if (unlikely(!code_line)) {
return;
}
if (unlikely(!entries)) {
entries = (__Pyx_CodeObjectCacheEntry*)PyMem_Malloc(64*sizeof(__Pyx_CodeObjectCacheEntry));
if (likely(entries)) {
__pyx_code_cache.entries = entries;
__pyx_code_cache.max_count = 64;
__pyx_code_cache.count = 1;
entries[0].code_line = code_line;
entries[0].code_object = code_object;
Py_INCREF(code_object);
}
return;
}
pos = __pyx_bisect_code_objects(__pyx_code_cache.entries, __pyx_code_cache.count, code_line);
if ((pos < __pyx_code_cache.count) && unlikely(__pyx_code_cache.entries[pos].code_line == code_line)) {
PyCodeObject* tmp = entries[pos].code_object;
entries[pos].code_object = code_object;
Py_DECREF(tmp);
return;
}
if (__pyx_code_cache.count == __pyx_code_cache.max_count) {
int new_max = __pyx_code_cache.max_count + 64;
entries = (__Pyx_CodeObjectCacheEntry*)PyMem_Realloc(
__pyx_code_cache.entries, ((size_t)new_max) * sizeof(__Pyx_CodeObjectCacheEntry));
if (unlikely(!entries)) {
return;
}
__pyx_code_cache.entries = entries;
__pyx_code_cache.max_count = new_max;
}
for (i=__pyx_code_cache.count; i>pos; i--) {
entries[i] = entries[i-1];
}
entries[pos].code_line = code_line;
entries[pos].code_object = code_object;
__pyx_code_cache.count++;
Py_INCREF(code_object);
}
/* AddTraceback */
#include "compile.h"
#include "frameobject.h"
#include "traceback.h"
static PyCodeObject* __Pyx_CreateCodeObjectForTraceback(
const char *funcname, int c_line,
int py_line, const char *filename) {
PyCodeObject *py_code = NULL;
PyObject *py_funcname = NULL;
#if PY_MAJOR_VERSION < 3
PyObject *py_srcfile = NULL;
py_srcfile = PyString_FromString(filename);
if (!py_srcfile) goto bad;
#endif
if (c_line) {
#if PY_MAJOR_VERSION < 3
py_funcname = PyString_FromFormat( "%s (%s:%d)", funcname, __pyx_cfilenm, c_line);
if (!py_funcname) goto bad;
#else
py_funcname = PyUnicode_FromFormat( "%s (%s:%d)", funcname, __pyx_cfilenm, c_line);
if (!py_funcname) goto bad;
funcname = PyUnicode_AsUTF8(py_funcname);
if (!funcname) goto bad;
#endif
}
else {
#if PY_MAJOR_VERSION < 3
py_funcname = PyString_FromString(funcname);
if (!py_funcname) goto bad;
#endif
}
#if PY_MAJOR_VERSION < 3
py_code = __Pyx_PyCode_New(
0,
0,
0,
0,
0,
__pyx_empty_bytes, /*PyObject *code,*/
__pyx_empty_tuple, /*PyObject *consts,*/
__pyx_empty_tuple, /*PyObject *names,*/
__pyx_empty_tuple, /*PyObject *varnames,*/
__pyx_empty_tuple, /*PyObject *freevars,*/
__pyx_empty_tuple, /*PyObject *cellvars,*/
py_srcfile, /*PyObject *filename,*/
py_funcname, /*PyObject *name,*/
py_line,
__pyx_empty_bytes /*PyObject *lnotab*/
);
Py_DECREF(py_srcfile);
#else
py_code = PyCode_NewEmpty(filename, funcname, py_line);
#endif
Py_XDECREF(py_funcname); // XDECREF since it's only set on Py3 if cline
return py_code;
bad:
Py_XDECREF(py_funcname);
#if PY_MAJOR_VERSION < 3
Py_XDECREF(py_srcfile);
#endif
return NULL;
}
static void __Pyx_AddTraceback(const char *funcname, int c_line,
int py_line, const char *filename) {
PyCodeObject *py_code = 0;
PyFrameObject *py_frame = 0;
PyThreadState *tstate = __Pyx_PyThreadState_Current;
if (c_line) {
c_line = __Pyx_CLineForTraceback(tstate, c_line);
}
py_code = __pyx_find_code_object(c_line ? -c_line : py_line);
if (!py_code) {
py_code = __Pyx_CreateCodeObjectForTraceback(
funcname, c_line, py_line, filename);
if (!py_code) goto bad;
__pyx_insert_code_object(c_line ? -c_line : py_line, py_code);
}
py_frame = PyFrame_New(
tstate, /*PyThreadState *tstate,*/
py_code, /*PyCodeObject *code,*/
__pyx_d, /*PyObject *globals,*/
0 /*PyObject *locals*/
);
if (!py_frame) goto bad;
__Pyx_PyFrame_SetLineNumber(py_frame, py_line);
PyTraceBack_Here(py_frame);
bad:
Py_XDECREF(py_code);
Py_XDECREF(py_frame);
}
/* CIntFromPyVerify */
#define __PYX_VERIFY_RETURN_INT(target_type, func_type, func_value)\
__PYX__VERIFY_RETURN_INT(target_type, func_type, func_value, 0)
#define __PYX_VERIFY_RETURN_INT_EXC(target_type, func_type, func_value)\
__PYX__VERIFY_RETURN_INT(target_type, func_type, func_value, 1)
#define __PYX__VERIFY_RETURN_INT(target_type, func_type, func_value, exc)\
{\
func_type value = func_value;\
if (sizeof(target_type) < sizeof(func_type)) {\
if (unlikely(value != (func_type) (target_type) value)) {\
func_type zero = 0;\
if (exc && unlikely(value == (func_type)-1 && PyErr_Occurred()))\
return (target_type) -1;\
if (is_unsigned && unlikely(value < zero))\
goto raise_neg_overflow;\
else\
goto raise_overflow;\
}\
}\
return (target_type) value;\
}
/* Declarations */
#if CYTHON_CCOMPLEX
#ifdef __cplusplus
static CYTHON_INLINE __pyx_t_float_complex __pyx_t_float_complex_from_parts(float x, float y) {
return ::std::complex< float >(x, y);
}
#else
static CYTHON_INLINE __pyx_t_float_complex __pyx_t_float_complex_from_parts(float x, float y) {
return x + y*(__pyx_t_float_complex)_Complex_I;
}
#endif
#else
static CYTHON_INLINE __pyx_t_float_complex __pyx_t_float_complex_from_parts(float x, float y) {
__pyx_t_float_complex z;
z.real = x;
z.imag = y;
return z;
}
#endif
/* Arithmetic */
#if CYTHON_CCOMPLEX
#else
static CYTHON_INLINE int __Pyx_c_eq_float(__pyx_t_float_complex a, __pyx_t_float_complex b) {
return (a.real == b.real) && (a.imag == b.imag);
}
static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_sum_float(__pyx_t_float_complex a, __pyx_t_float_complex b) {
__pyx_t_float_complex z;
z.real = a.real + b.real;
z.imag = a.imag + b.imag;
return z;
}
static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_diff_float(__pyx_t_float_complex a, __pyx_t_float_complex b) {
__pyx_t_float_complex z;
z.real = a.real - b.real;
z.imag = a.imag - b.imag;
return z;
}
static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_prod_float(__pyx_t_float_complex a, __pyx_t_float_complex b) {
__pyx_t_float_complex z;
z.real = a.real * b.real - a.imag * b.imag;
z.imag = a.real * b.imag + a.imag * b.real;
return z;
}
#if 1
static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_quot_float(__pyx_t_float_complex a, __pyx_t_float_complex b) {
if (b.imag == 0) {
return __pyx_t_float_complex_from_parts(a.real / b.real, a.imag / b.real);
} else if (fabsf(b.real) >= fabsf(b.imag)) {
if (b.real == 0 && b.imag == 0) {
return __pyx_t_float_complex_from_parts(a.real / b.real, a.imag / b.imag);
} else {
float r = b.imag / b.real;
float s = (float)(1.0) / (b.real + b.imag * r);
return __pyx_t_float_complex_from_parts(
(a.real + a.imag * r) * s, (a.imag - a.real * r) * s);
}
} else {
float r = b.real / b.imag;
float s = (float)(1.0) / (b.imag + b.real * r);
return __pyx_t_float_complex_from_parts(
(a.real * r + a.imag) * s, (a.imag * r - a.real) * s);
}
}
#else
static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_quot_float(__pyx_t_float_complex a, __pyx_t_float_complex b) {
if (b.imag == 0) {
return __pyx_t_float_complex_from_parts(a.real / b.real, a.imag / b.real);
} else {
float denom = b.real * b.real + b.imag * b.imag;
return __pyx_t_float_complex_from_parts(
(a.real * b.real + a.imag * b.imag) / denom,
(a.imag * b.real - a.real * b.imag) / denom);
}
}
#endif
static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_neg_float(__pyx_t_float_complex a) {
__pyx_t_float_complex z;
z.real = -a.real;
z.imag = -a.imag;
return z;
}
static CYTHON_INLINE int __Pyx_c_is_zero_float(__pyx_t_float_complex a) {
return (a.real == 0) && (a.imag == 0);
}
static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_conj_float(__pyx_t_float_complex a) {
__pyx_t_float_complex z;
z.real = a.real;
z.imag = -a.imag;
return z;
}
#if 1
static CYTHON_INLINE float __Pyx_c_abs_float(__pyx_t_float_complex z) {
#if !defined(HAVE_HYPOT) || defined(_MSC_VER)
return sqrtf(z.real*z.real + z.imag*z.imag);
#else
return hypotf(z.real, z.imag);
#endif
}
static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_pow_float(__pyx_t_float_complex a, __pyx_t_float_complex b) {
__pyx_t_float_complex z;
float r, lnr, theta, z_r, z_theta;
if (b.imag == 0 && b.real == (int)b.real) {
if (b.real < 0) {
float denom = a.real * a.real + a.imag * a.imag;
a.real = a.real / denom;
a.imag = -a.imag / denom;
b.real = -b.real;
}
switch ((int)b.real) {
case 0:
z.real = 1;
z.imag = 0;
return z;
case 1:
return a;
case 2:
return __Pyx_c_prod_float(a, a);
case 3:
z = __Pyx_c_prod_float(a, a);
return __Pyx_c_prod_float(z, a);
case 4:
z = __Pyx_c_prod_float(a, a);
return __Pyx_c_prod_float(z, z);
}
}
if (a.imag == 0) {
if (a.real == 0) {
return a;
} else if (b.imag == 0) {
z.real = powf(a.real, b.real);
z.imag = 0;
return z;
} else if (a.real > 0) {
r = a.real;
theta = 0;
} else {
r = -a.real;
theta = atan2f(0.0, -1.0);
}
} else {
r = __Pyx_c_abs_float(a);
theta = atan2f(a.imag, a.real);
}
lnr = logf(r);
z_r = expf(lnr * b.real - theta * b.imag);
z_theta = theta * b.real + lnr * b.imag;
z.real = z_r * cosf(z_theta);
z.imag = z_r * sinf(z_theta);
return z;
}
#endif
#endif
/* Declarations */
#if CYTHON_CCOMPLEX
#ifdef __cplusplus
static CYTHON_INLINE __pyx_t_double_complex __pyx_t_double_complex_from_parts(double x, double y) {
return ::std::complex< double >(x, y);
}
#else
static CYTHON_INLINE __pyx_t_double_complex __pyx_t_double_complex_from_parts(double x, double y) {
return x + y*(__pyx_t_double_complex)_Complex_I;
}
#endif
#else
static CYTHON_INLINE __pyx_t_double_complex __pyx_t_double_complex_from_parts(double x, double y) {
__pyx_t_double_complex z;
z.real = x;
z.imag = y;
return z;
}
#endif
/* Arithmetic */
#if CYTHON_CCOMPLEX
#else
static CYTHON_INLINE int __Pyx_c_eq_double(__pyx_t_double_complex a, __pyx_t_double_complex b) {
return (a.real == b.real) && (a.imag == b.imag);
}
static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_sum_double(__pyx_t_double_complex a, __pyx_t_double_complex b) {
__pyx_t_double_complex z;
z.real = a.real + b.real;
z.imag = a.imag + b.imag;
return z;
}
static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_diff_double(__pyx_t_double_complex a, __pyx_t_double_complex b) {
__pyx_t_double_complex z;
z.real = a.real - b.real;
z.imag = a.imag - b.imag;
return z;
}
static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_prod_double(__pyx_t_double_complex a, __pyx_t_double_complex b) {
__pyx_t_double_complex z;
z.real = a.real * b.real - a.imag * b.imag;
z.imag = a.real * b.imag + a.imag * b.real;
return z;
}
#if 1
static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_quot_double(__pyx_t_double_complex a, __pyx_t_double_complex b) {
if (b.imag == 0) {
return __pyx_t_double_complex_from_parts(a.real / b.real, a.imag / b.real);
} else if (fabs(b.real) >= fabs(b.imag)) {
if (b.real == 0 && b.imag == 0) {
return __pyx_t_double_complex_from_parts(a.real / b.real, a.imag / b.imag);
} else {
double r = b.imag / b.real;
double s = (double)(1.0) / (b.real + b.imag * r);
return __pyx_t_double_complex_from_parts(
(a.real + a.imag * r) * s, (a.imag - a.real * r) * s);
}
} else {
double r = b.real / b.imag;
double s = (double)(1.0) / (b.imag + b.real * r);
return __pyx_t_double_complex_from_parts(
(a.real * r + a.imag) * s, (a.imag * r - a.real) * s);
}
}
#else
static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_quot_double(__pyx_t_double_complex a, __pyx_t_double_complex b) {
if (b.imag == 0) {
return __pyx_t_double_complex_from_parts(a.real / b.real, a.imag / b.real);
} else {
double denom = b.real * b.real + b.imag * b.imag;
return __pyx_t_double_complex_from_parts(
(a.real * b.real + a.imag * b.imag) / denom,
(a.imag * b.real - a.real * b.imag) / denom);
}
}
#endif
static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_neg_double(__pyx_t_double_complex a) {
__pyx_t_double_complex z;
z.real = -a.real;
z.imag = -a.imag;
return z;
}
static CYTHON_INLINE int __Pyx_c_is_zero_double(__pyx_t_double_complex a) {
return (a.real == 0) && (a.imag == 0);
}
static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_conj_double(__pyx_t_double_complex a) {
__pyx_t_double_complex z;
z.real = a.real;
z.imag = -a.imag;
return z;
}
#if 1
static CYTHON_INLINE double __Pyx_c_abs_double(__pyx_t_double_complex z) {
#if !defined(HAVE_HYPOT) || defined(_MSC_VER)
return sqrt(z.real*z.real + z.imag*z.imag);
#else
return hypot(z.real, z.imag);
#endif
}
static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_pow_double(__pyx_t_double_complex a, __pyx_t_double_complex b) {
__pyx_t_double_complex z;
double r, lnr, theta, z_r, z_theta;
if (b.imag == 0 && b.real == (int)b.real) {
if (b.real < 0) {
double denom = a.real * a.real + a.imag * a.imag;
a.real = a.real / denom;
a.imag = -a.imag / denom;
b.real = -b.real;
}
switch ((int)b.real) {
case 0:
z.real = 1;
z.imag = 0;
return z;
case 1:
return a;
case 2:
return __Pyx_c_prod_double(a, a);
case 3:
z = __Pyx_c_prod_double(a, a);
return __Pyx_c_prod_double(z, a);
case 4:
z = __Pyx_c_prod_double(a, a);
return __Pyx_c_prod_double(z, z);
}
}
if (a.imag == 0) {
if (a.real == 0) {
return a;
} else if (b.imag == 0) {
z.real = pow(a.real, b.real);
z.imag = 0;
return z;
} else if (a.real > 0) {
r = a.real;
theta = 0;
} else {
r = -a.real;
theta = atan2(0.0, -1.0);
}
} else {
r = __Pyx_c_abs_double(a);
theta = atan2(a.imag, a.real);
}
lnr = log(r);
z_r = exp(lnr * b.real - theta * b.imag);
z_theta = theta * b.real + lnr * b.imag;
z.real = z_r * cos(z_theta);
z.imag = z_r * sin(z_theta);
return z;
}
#endif
#endif
/* CIntToPy */
static CYTHON_INLINE PyObject* __Pyx_PyInt_From_int(int value) {
#ifdef __Pyx_HAS_GCC_DIAGNOSTIC
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wconversion"
#endif
const int neg_one = (int) -1, const_zero = (int) 0;
#ifdef __Pyx_HAS_GCC_DIAGNOSTIC
#pragma GCC diagnostic pop
#endif
const int is_unsigned = neg_one > const_zero;
if (is_unsigned) {
if (sizeof(int) < sizeof(long)) {
return PyInt_FromLong((long) value);
} else if (sizeof(int) <= sizeof(unsigned long)) {
return PyLong_FromUnsignedLong((unsigned long) value);
#ifdef HAVE_LONG_LONG
} else if (sizeof(int) <= sizeof(unsigned PY_LONG_LONG)) {
return PyLong_FromUnsignedLongLong((unsigned PY_LONG_LONG) value);
#endif
}
} else {
if (sizeof(int) <= sizeof(long)) {
return PyInt_FromLong((long) value);
#ifdef HAVE_LONG_LONG
} else if (sizeof(int) <= sizeof(PY_LONG_LONG)) {
return PyLong_FromLongLong((PY_LONG_LONG) value);
#endif
}
}
{
int one = 1; int little = (int)*(unsigned char *)&one;
unsigned char *bytes = (unsigned char *)&value;
return _PyLong_FromByteArray(bytes, sizeof(int),
little, !is_unsigned);
}
}
/* CIntFromPy */
static CYTHON_INLINE int __Pyx_PyInt_As_int(PyObject *x) {
#ifdef __Pyx_HAS_GCC_DIAGNOSTIC
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wconversion"
#endif
const int neg_one = (int) -1, const_zero = (int) 0;
#ifdef __Pyx_HAS_GCC_DIAGNOSTIC
#pragma GCC diagnostic pop
#endif
const int is_unsigned = neg_one > const_zero;
#if PY_MAJOR_VERSION < 3
if (likely(PyInt_Check(x))) {
if (sizeof(int) < sizeof(long)) {
__PYX_VERIFY_RETURN_INT(int, long, PyInt_AS_LONG(x))
} else {
long val = PyInt_AS_LONG(x);
if (is_unsigned && unlikely(val < 0)) {
goto raise_neg_overflow;
}
return (int) val;
}
} else
#endif
if (likely(PyLong_Check(x))) {
if (is_unsigned) {
#if CYTHON_USE_PYLONG_INTERNALS
const digit* digits = ((PyLongObject*)x)->ob_digit;
switch (Py_SIZE(x)) {
case 0: return (int) 0;
case 1: __PYX_VERIFY_RETURN_INT(int, digit, digits[0])
case 2:
if (8 * sizeof(int) > 1 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(int, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(int) >= 2 * PyLong_SHIFT) {
return (int) (((((int)digits[1]) << PyLong_SHIFT) | (int)digits[0]));
}
}
break;
case 3:
if (8 * sizeof(int) > 2 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(int) >= 3 * PyLong_SHIFT) {
return (int) (((((((int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0]));
}
}
break;
case 4:
if (8 * sizeof(int) > 3 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(int) >= 4 * PyLong_SHIFT) {
return (int) (((((((((int)digits[3]) << PyLong_SHIFT) | (int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0]));
}
}
break;
}
#endif
#if CYTHON_COMPILING_IN_CPYTHON
if (unlikely(Py_SIZE(x) < 0)) {
goto raise_neg_overflow;
}
#else
{
int result = PyObject_RichCompareBool(x, Py_False, Py_LT);
if (unlikely(result < 0))
return (int) -1;
if (unlikely(result == 1))
goto raise_neg_overflow;
}
#endif
if (sizeof(int) <= sizeof(unsigned long)) {
__PYX_VERIFY_RETURN_INT_EXC(int, unsigned long, PyLong_AsUnsignedLong(x))
#ifdef HAVE_LONG_LONG
} else if (sizeof(int) <= sizeof(unsigned PY_LONG_LONG)) {
__PYX_VERIFY_RETURN_INT_EXC(int, unsigned PY_LONG_LONG, PyLong_AsUnsignedLongLong(x))
#endif
}
} else {
#if CYTHON_USE_PYLONG_INTERNALS
const digit* digits = ((PyLongObject*)x)->ob_digit;
switch (Py_SIZE(x)) {
case 0: return (int) 0;
case -1: __PYX_VERIFY_RETURN_INT(int, sdigit, (sdigit) (-(sdigit)digits[0]))
case 1: __PYX_VERIFY_RETURN_INT(int, digit, +digits[0])
case -2:
if (8 * sizeof(int) - 1 > 1 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(int, long, -(long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(int) - 1 > 2 * PyLong_SHIFT) {
return (int) (((int)-1)*(((((int)digits[1]) << PyLong_SHIFT) | (int)digits[0])));
}
}
break;
case 2:
if (8 * sizeof(int) > 1 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(int, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(int) - 1 > 2 * PyLong_SHIFT) {
return (int) ((((((int)digits[1]) << PyLong_SHIFT) | (int)digits[0])));
}
}
break;
case -3:
if (8 * sizeof(int) - 1 > 2 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(int, long, -(long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(int) - 1 > 3 * PyLong_SHIFT) {
return (int) (((int)-1)*(((((((int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0])));
}
}
break;
case 3:
if (8 * sizeof(int) > 2 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(int) - 1 > 3 * PyLong_SHIFT) {
return (int) ((((((((int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0])));
}
}
break;
case -4:
if (8 * sizeof(int) - 1 > 3 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(int, long, -(long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(int) - 1 > 4 * PyLong_SHIFT) {
return (int) (((int)-1)*(((((((((int)digits[3]) << PyLong_SHIFT) | (int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0])));
}
}
break;
case 4:
if (8 * sizeof(int) > 3 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(int) - 1 > 4 * PyLong_SHIFT) {
return (int) ((((((((((int)digits[3]) << PyLong_SHIFT) | (int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0])));
}
}
break;
}
#endif
if (sizeof(int) <= sizeof(long)) {
__PYX_VERIFY_RETURN_INT_EXC(int, long, PyLong_AsLong(x))
#ifdef HAVE_LONG_LONG
} else if (sizeof(int) <= sizeof(PY_LONG_LONG)) {
__PYX_VERIFY_RETURN_INT_EXC(int, PY_LONG_LONG, PyLong_AsLongLong(x))
#endif
}
}
{
#if CYTHON_COMPILING_IN_PYPY && !defined(_PyLong_AsByteArray)
PyErr_SetString(PyExc_RuntimeError,
"_PyLong_AsByteArray() not available in PyPy, cannot convert large numbers");
#else
int val;
PyObject *v = __Pyx_PyNumber_IntOrLong(x);
#if PY_MAJOR_VERSION < 3
if (likely(v) && !PyLong_Check(v)) {
PyObject *tmp = v;
v = PyNumber_Long(tmp);
Py_DECREF(tmp);
}
#endif
if (likely(v)) {
int one = 1; int is_little = (int)*(unsigned char *)&one;
unsigned char *bytes = (unsigned char *)&val;
int ret = _PyLong_AsByteArray((PyLongObject *)v,
bytes, sizeof(val),
is_little, !is_unsigned);
Py_DECREF(v);
if (likely(!ret))
return val;
}
#endif
return (int) -1;
}
} else {
int val;
PyObject *tmp = __Pyx_PyNumber_IntOrLong(x);
if (!tmp) return (int) -1;
val = __Pyx_PyInt_As_int(tmp);
Py_DECREF(tmp);
return val;
}
raise_overflow:
PyErr_SetString(PyExc_OverflowError,
"value too large to convert to int");
return (int) -1;
raise_neg_overflow:
PyErr_SetString(PyExc_OverflowError,
"can't convert negative value to int");
return (int) -1;
}
/* CIntToPy */
static CYTHON_INLINE PyObject* __Pyx_PyInt_From_long(long value) {
#ifdef __Pyx_HAS_GCC_DIAGNOSTIC
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wconversion"
#endif
const long neg_one = (long) -1, const_zero = (long) 0;
#ifdef __Pyx_HAS_GCC_DIAGNOSTIC
#pragma GCC diagnostic pop
#endif
const int is_unsigned = neg_one > const_zero;
if (is_unsigned) {
if (sizeof(long) < sizeof(long)) {
return PyInt_FromLong((long) value);
} else if (sizeof(long) <= sizeof(unsigned long)) {
return PyLong_FromUnsignedLong((unsigned long) value);
#ifdef HAVE_LONG_LONG
} else if (sizeof(long) <= sizeof(unsigned PY_LONG_LONG)) {
return PyLong_FromUnsignedLongLong((unsigned PY_LONG_LONG) value);
#endif
}
} else {
if (sizeof(long) <= sizeof(long)) {
return PyInt_FromLong((long) value);
#ifdef HAVE_LONG_LONG
} else if (sizeof(long) <= sizeof(PY_LONG_LONG)) {
return PyLong_FromLongLong((PY_LONG_LONG) value);
#endif
}
}
{
int one = 1; int little = (int)*(unsigned char *)&one;
unsigned char *bytes = (unsigned char *)&value;
return _PyLong_FromByteArray(bytes, sizeof(long),
little, !is_unsigned);
}
}
/* CIntFromPy */
static CYTHON_INLINE long __Pyx_PyInt_As_long(PyObject *x) {
#ifdef __Pyx_HAS_GCC_DIAGNOSTIC
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wconversion"
#endif
const long neg_one = (long) -1, const_zero = (long) 0;
#ifdef __Pyx_HAS_GCC_DIAGNOSTIC
#pragma GCC diagnostic pop
#endif
const int is_unsigned = neg_one > const_zero;
#if PY_MAJOR_VERSION < 3
if (likely(PyInt_Check(x))) {
if (sizeof(long) < sizeof(long)) {
__PYX_VERIFY_RETURN_INT(long, long, PyInt_AS_LONG(x))
} else {
long val = PyInt_AS_LONG(x);
if (is_unsigned && unlikely(val < 0)) {
goto raise_neg_overflow;
}
return (long) val;
}
} else
#endif
if (likely(PyLong_Check(x))) {
if (is_unsigned) {
#if CYTHON_USE_PYLONG_INTERNALS
const digit* digits = ((PyLongObject*)x)->ob_digit;
switch (Py_SIZE(x)) {
case 0: return (long) 0;
case 1: __PYX_VERIFY_RETURN_INT(long, digit, digits[0])
case 2:
if (8 * sizeof(long) > 1 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(long, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(long) >= 2 * PyLong_SHIFT) {
return (long) (((((long)digits[1]) << PyLong_SHIFT) | (long)digits[0]));
}
}
break;
case 3:
if (8 * sizeof(long) > 2 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(long) >= 3 * PyLong_SHIFT) {
return (long) (((((((long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0]));
}
}
break;
case 4:
if (8 * sizeof(long) > 3 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(long) >= 4 * PyLong_SHIFT) {
return (long) (((((((((long)digits[3]) << PyLong_SHIFT) | (long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0]));
}
}
break;
}
#endif
#if CYTHON_COMPILING_IN_CPYTHON
if (unlikely(Py_SIZE(x) < 0)) {
goto raise_neg_overflow;
}
#else
{
int result = PyObject_RichCompareBool(x, Py_False, Py_LT);
if (unlikely(result < 0))
return (long) -1;
if (unlikely(result == 1))
goto raise_neg_overflow;
}
#endif
if (sizeof(long) <= sizeof(unsigned long)) {
__PYX_VERIFY_RETURN_INT_EXC(long, unsigned long, PyLong_AsUnsignedLong(x))
#ifdef HAVE_LONG_LONG
} else if (sizeof(long) <= sizeof(unsigned PY_LONG_LONG)) {
__PYX_VERIFY_RETURN_INT_EXC(long, unsigned PY_LONG_LONG, PyLong_AsUnsignedLongLong(x))
#endif
}
} else {
#if CYTHON_USE_PYLONG_INTERNALS
const digit* digits = ((PyLongObject*)x)->ob_digit;
switch (Py_SIZE(x)) {
case 0: return (long) 0;
case -1: __PYX_VERIFY_RETURN_INT(long, sdigit, (sdigit) (-(sdigit)digits[0]))
case 1: __PYX_VERIFY_RETURN_INT(long, digit, +digits[0])
case -2:
if (8 * sizeof(long) - 1 > 1 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(long, long, -(long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) {
return (long) (((long)-1)*(((((long)digits[1]) << PyLong_SHIFT) | (long)digits[0])));
}
}
break;
case 2:
if (8 * sizeof(long) > 1 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(long, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) {
return (long) ((((((long)digits[1]) << PyLong_SHIFT) | (long)digits[0])));
}
}
break;
case -3:
if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(long, long, -(long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) {
return (long) (((long)-1)*(((((((long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0])));
}
}
break;
case 3:
if (8 * sizeof(long) > 2 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) {
return (long) ((((((((long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0])));
}
}
break;
case -4:
if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(long, long, -(long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(long) - 1 > 4 * PyLong_SHIFT) {
return (long) (((long)-1)*(((((((((long)digits[3]) << PyLong_SHIFT) | (long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0])));
}
}
break;
case 4:
if (8 * sizeof(long) > 3 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(long) - 1 > 4 * PyLong_SHIFT) {
return (long) ((((((((((long)digits[3]) << PyLong_SHIFT) | (long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0])));
}
}
break;
}
#endif
if (sizeof(long) <= sizeof(long)) {
__PYX_VERIFY_RETURN_INT_EXC(long, long, PyLong_AsLong(x))
#ifdef HAVE_LONG_LONG
} else if (sizeof(long) <= sizeof(PY_LONG_LONG)) {
__PYX_VERIFY_RETURN_INT_EXC(long, PY_LONG_LONG, PyLong_AsLongLong(x))
#endif
}
}
{
#if CYTHON_COMPILING_IN_PYPY && !defined(_PyLong_AsByteArray)
PyErr_SetString(PyExc_RuntimeError,
"_PyLong_AsByteArray() not available in PyPy, cannot convert large numbers");
#else
long val;
PyObject *v = __Pyx_PyNumber_IntOrLong(x);
#if PY_MAJOR_VERSION < 3
if (likely(v) && !PyLong_Check(v)) {
PyObject *tmp = v;
v = PyNumber_Long(tmp);
Py_DECREF(tmp);
}
#endif
if (likely(v)) {
int one = 1; int is_little = (int)*(unsigned char *)&one;
unsigned char *bytes = (unsigned char *)&val;
int ret = _PyLong_AsByteArray((PyLongObject *)v,
bytes, sizeof(val),
is_little, !is_unsigned);
Py_DECREF(v);
if (likely(!ret))
return val;
}
#endif
return (long) -1;
}
} else {
long val;
PyObject *tmp = __Pyx_PyNumber_IntOrLong(x);
if (!tmp) return (long) -1;
val = __Pyx_PyInt_As_long(tmp);
Py_DECREF(tmp);
return val;
}
raise_overflow:
PyErr_SetString(PyExc_OverflowError,
"value too large to convert to long");
return (long) -1;
raise_neg_overflow:
PyErr_SetString(PyExc_OverflowError,
"can't convert negative value to long");
return (long) -1;
}
/* FastTypeChecks */
#if CYTHON_COMPILING_IN_CPYTHON
static int __Pyx_InBases(PyTypeObject *a, PyTypeObject *b) {
while (a) {
a = a->tp_base;
if (a == b)
return 1;
}
return b == &PyBaseObject_Type;
}
static CYTHON_INLINE int __Pyx_IsSubtype(PyTypeObject *a, PyTypeObject *b) {
PyObject *mro;
if (a == b) return 1;
mro = a->tp_mro;
if (likely(mro)) {
Py_ssize_t i, n;
n = PyTuple_GET_SIZE(mro);
for (i = 0; i < n; i++) {
if (PyTuple_GET_ITEM(mro, i) == (PyObject *)b)
return 1;
}
return 0;
}
return __Pyx_InBases(a, b);
}
#if PY_MAJOR_VERSION == 2
static int __Pyx_inner_PyErr_GivenExceptionMatches2(PyObject *err, PyObject* exc_type1, PyObject* exc_type2) {
PyObject *exception, *value, *tb;
int res;
__Pyx_PyThreadState_declare
__Pyx_PyThreadState_assign
__Pyx_ErrFetch(&exception, &value, &tb);
res = exc_type1 ? PyObject_IsSubclass(err, exc_type1) : 0;
if (unlikely(res == -1)) {
PyErr_WriteUnraisable(err);
res = 0;
}
if (!res) {
res = PyObject_IsSubclass(err, exc_type2);
if (unlikely(res == -1)) {
PyErr_WriteUnraisable(err);
res = 0;
}
}
__Pyx_ErrRestore(exception, value, tb);
return res;
}
#else
static CYTHON_INLINE int __Pyx_inner_PyErr_GivenExceptionMatches2(PyObject *err, PyObject* exc_type1, PyObject *exc_type2) {
int res = exc_type1 ? __Pyx_IsSubtype((PyTypeObject*)err, (PyTypeObject*)exc_type1) : 0;
if (!res) {
res = __Pyx_IsSubtype((PyTypeObject*)err, (PyTypeObject*)exc_type2);
}
return res;
}
#endif
static int __Pyx_PyErr_GivenExceptionMatchesTuple(PyObject *exc_type, PyObject *tuple) {
Py_ssize_t i, n;
assert(PyExceptionClass_Check(exc_type));
n = PyTuple_GET_SIZE(tuple);
#if PY_MAJOR_VERSION >= 3
for (i=0; i<n; i++) {
if (exc_type == PyTuple_GET_ITEM(tuple, i)) return 1;
}
#endif
for (i=0; i<n; i++) {
PyObject *t = PyTuple_GET_ITEM(tuple, i);
#if PY_MAJOR_VERSION < 3
if (likely(exc_type == t)) return 1;
#endif
if (likely(PyExceptionClass_Check(t))) {
if (__Pyx_inner_PyErr_GivenExceptionMatches2(exc_type, NULL, t)) return 1;
} else {
}
}
return 0;
}
static CYTHON_INLINE int __Pyx_PyErr_GivenExceptionMatches(PyObject *err, PyObject* exc_type) {
if (likely(err == exc_type)) return 1;
if (likely(PyExceptionClass_Check(err))) {
if (likely(PyExceptionClass_Check(exc_type))) {
return __Pyx_inner_PyErr_GivenExceptionMatches2(err, NULL, exc_type);
} else if (likely(PyTuple_Check(exc_type))) {
return __Pyx_PyErr_GivenExceptionMatchesTuple(err, exc_type);
} else {
}
}
return PyErr_GivenExceptionMatches(err, exc_type);
}
static CYTHON_INLINE int __Pyx_PyErr_GivenExceptionMatches2(PyObject *err, PyObject *exc_type1, PyObject *exc_type2) {
assert(PyExceptionClass_Check(exc_type1));
assert(PyExceptionClass_Check(exc_type2));
if (likely(err == exc_type1 || err == exc_type2)) return 1;
if (likely(PyExceptionClass_Check(err))) {
return __Pyx_inner_PyErr_GivenExceptionMatches2(err, exc_type1, exc_type2);
}
return (PyErr_GivenExceptionMatches(err, exc_type1) || PyErr_GivenExceptionMatches(err, exc_type2));
}
#endif
/* CheckBinaryVersion */
static int __Pyx_check_binary_version(void) {
char ctversion[4], rtversion[4];
PyOS_snprintf(ctversion, 4, "%d.%d", PY_MAJOR_VERSION, PY_MINOR_VERSION);
PyOS_snprintf(rtversion, 4, "%s", Py_GetVersion());
if (ctversion[0] != rtversion[0] || ctversion[2] != rtversion[2]) {
char message[200];
PyOS_snprintf(message, sizeof(message),
"compiletime version %s of module '%.100s' "
"does not match runtime version %s",
ctversion, __Pyx_MODULE_NAME, rtversion);
return PyErr_WarnEx(NULL, message, 1);
}
return 0;
}
/* InitStrings */
static int __Pyx_InitStrings(__Pyx_StringTabEntry *t) {
while (t->p) {
#if PY_MAJOR_VERSION < 3
if (t->is_unicode) {
*t->p = PyUnicode_DecodeUTF8(t->s, t->n - 1, NULL);
} else if (t->intern) {
*t->p = PyString_InternFromString(t->s);
} else {
*t->p = PyString_FromStringAndSize(t->s, t->n - 1);
}
#else
if (t->is_unicode | t->is_str) {
if (t->intern) {
*t->p = PyUnicode_InternFromString(t->s);
} else if (t->encoding) {
*t->p = PyUnicode_Decode(t->s, t->n - 1, t->encoding, NULL);
} else {
*t->p = PyUnicode_FromStringAndSize(t->s, t->n - 1);
}
} else {
*t->p = PyBytes_FromStringAndSize(t->s, t->n - 1);
}
#endif
if (!*t->p)
return -1;
if (PyObject_Hash(*t->p) == -1)
return -1;
++t;
}
return 0;
}
static CYTHON_INLINE PyObject* __Pyx_PyUnicode_FromString(const char* c_str) {
return __Pyx_PyUnicode_FromStringAndSize(c_str, (Py_ssize_t)strlen(c_str));
}
static CYTHON_INLINE const char* __Pyx_PyObject_AsString(PyObject* o) {
Py_ssize_t ignore;
return __Pyx_PyObject_AsStringAndSize(o, &ignore);
}
#if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII || __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT
#if !CYTHON_PEP393_ENABLED
static const char* __Pyx_PyUnicode_AsStringAndSize(PyObject* o, Py_ssize_t *length) {
char* defenc_c;
PyObject* defenc = _PyUnicode_AsDefaultEncodedString(o, NULL);
if (!defenc) return NULL;
defenc_c = PyBytes_AS_STRING(defenc);
#if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII
{
char* end = defenc_c + PyBytes_GET_SIZE(defenc);
char* c;
for (c = defenc_c; c < end; c++) {
if ((unsigned char) (*c) >= 128) {
PyUnicode_AsASCIIString(o);
return NULL;
}
}
}
#endif
*length = PyBytes_GET_SIZE(defenc);
return defenc_c;
}
#else
static CYTHON_INLINE const char* __Pyx_PyUnicode_AsStringAndSize(PyObject* o, Py_ssize_t *length) {
if (unlikely(__Pyx_PyUnicode_READY(o) == -1)) return NULL;
#if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII
if (likely(PyUnicode_IS_ASCII(o))) {
*length = PyUnicode_GET_LENGTH(o);
return PyUnicode_AsUTF8(o);
} else {
PyUnicode_AsASCIIString(o);
return NULL;
}
#else
return PyUnicode_AsUTF8AndSize(o, length);
#endif
}
#endif
#endif
static CYTHON_INLINE const char* __Pyx_PyObject_AsStringAndSize(PyObject* o, Py_ssize_t *length) {
#if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII || __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT
if (
#if PY_MAJOR_VERSION < 3 && __PYX_DEFAULT_STRING_ENCODING_IS_ASCII
__Pyx_sys_getdefaultencoding_not_ascii &&
#endif
PyUnicode_Check(o)) {
return __Pyx_PyUnicode_AsStringAndSize(o, length);
} else
#endif
#if (!CYTHON_COMPILING_IN_PYPY) || (defined(PyByteArray_AS_STRING) && defined(PyByteArray_GET_SIZE))
if (PyByteArray_Check(o)) {
*length = PyByteArray_GET_SIZE(o);
return PyByteArray_AS_STRING(o);
} else
#endif
{
char* result;
int r = PyBytes_AsStringAndSize(o, &result, length);
if (unlikely(r < 0)) {
return NULL;
} else {
return result;
}
}
}
static CYTHON_INLINE int __Pyx_PyObject_IsTrue(PyObject* x) {
int is_true = x == Py_True;
if (is_true | (x == Py_False) | (x == Py_None)) return is_true;
else return PyObject_IsTrue(x);
}
static CYTHON_INLINE int __Pyx_PyObject_IsTrueAndDecref(PyObject* x) {
int retval;
if (unlikely(!x)) return -1;
retval = __Pyx_PyObject_IsTrue(x);
Py_DECREF(x);
return retval;
}
static PyObject* __Pyx_PyNumber_IntOrLongWrongResultType(PyObject* result, const char* type_name) {
#if PY_MAJOR_VERSION >= 3
if (PyLong_Check(result)) {
if (PyErr_WarnFormat(PyExc_DeprecationWarning, 1,
"__int__ returned non-int (type %.200s). "
"The ability to return an instance of a strict subclass of int "
"is deprecated, and may be removed in a future version of Python.",
Py_TYPE(result)->tp_name)) {
Py_DECREF(result);
return NULL;
}
return result;
}
#endif
PyErr_Format(PyExc_TypeError,
"__%.4s__ returned non-%.4s (type %.200s)",
type_name, type_name, Py_TYPE(result)->tp_name);
Py_DECREF(result);
return NULL;
}
static CYTHON_INLINE PyObject* __Pyx_PyNumber_IntOrLong(PyObject* x) {
#if CYTHON_USE_TYPE_SLOTS
PyNumberMethods *m;
#endif
const char *name = NULL;
PyObject *res = NULL;
#if PY_MAJOR_VERSION < 3
if (likely(PyInt_Check(x) || PyLong_Check(x)))
#else
if (likely(PyLong_Check(x)))
#endif
return __Pyx_NewRef(x);
#if CYTHON_USE_TYPE_SLOTS
m = Py_TYPE(x)->tp_as_number;
#if PY_MAJOR_VERSION < 3
if (m && m->nb_int) {
name = "int";
res = m->nb_int(x);
}
else if (m && m->nb_long) {
name = "long";
res = m->nb_long(x);
}
#else
if (likely(m && m->nb_int)) {
name = "int";
res = m->nb_int(x);
}
#endif
#else
if (!PyBytes_CheckExact(x) && !PyUnicode_CheckExact(x)) {
res = PyNumber_Int(x);
}
#endif
if (likely(res)) {
#if PY_MAJOR_VERSION < 3
if (unlikely(!PyInt_Check(res) && !PyLong_Check(res))) {
#else
if (unlikely(!PyLong_CheckExact(res))) {
#endif
return __Pyx_PyNumber_IntOrLongWrongResultType(res, name);
}
}
else if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_TypeError,
"an integer is required");
}
return res;
}
static CYTHON_INLINE Py_ssize_t __Pyx_PyIndex_AsSsize_t(PyObject* b) {
Py_ssize_t ival;
PyObject *x;
#if PY_MAJOR_VERSION < 3
if (likely(PyInt_CheckExact(b))) {
if (sizeof(Py_ssize_t) >= sizeof(long))
return PyInt_AS_LONG(b);
else
return PyInt_AsSsize_t(b);
}
#endif
if (likely(PyLong_CheckExact(b))) {
#if CYTHON_USE_PYLONG_INTERNALS
const digit* digits = ((PyLongObject*)b)->ob_digit;
const Py_ssize_t size = Py_SIZE(b);
if (likely(__Pyx_sst_abs(size) <= 1)) {
ival = likely(size) ? digits[0] : 0;
if (size == -1) ival = -ival;
return ival;
} else {
switch (size) {
case 2:
if (8 * sizeof(Py_ssize_t) > 2 * PyLong_SHIFT) {
return (Py_ssize_t) (((((size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0]));
}
break;
case -2:
if (8 * sizeof(Py_ssize_t) > 2 * PyLong_SHIFT) {
return -(Py_ssize_t) (((((size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0]));
}
break;
case 3:
if (8 * sizeof(Py_ssize_t) > 3 * PyLong_SHIFT) {
return (Py_ssize_t) (((((((size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0]));
}
break;
case -3:
if (8 * sizeof(Py_ssize_t) > 3 * PyLong_SHIFT) {
return -(Py_ssize_t) (((((((size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0]));
}
break;
case 4:
if (8 * sizeof(Py_ssize_t) > 4 * PyLong_SHIFT) {
return (Py_ssize_t) (((((((((size_t)digits[3]) << PyLong_SHIFT) | (size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0]));
}
break;
case -4:
if (8 * sizeof(Py_ssize_t) > 4 * PyLong_SHIFT) {
return -(Py_ssize_t) (((((((((size_t)digits[3]) << PyLong_SHIFT) | (size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0]));
}
break;
}
}
#endif
return PyLong_AsSsize_t(b);
}
x = PyNumber_Index(b);
if (!x) return -1;
ival = PyInt_AsSsize_t(x);
Py_DECREF(x);
return ival;
}
static CYTHON_INLINE Py_hash_t __Pyx_PyIndex_AsHash_t(PyObject* o) {
if (sizeof(Py_hash_t) == sizeof(Py_ssize_t)) {
return (Py_hash_t) __Pyx_PyIndex_AsSsize_t(o);
#if PY_MAJOR_VERSION < 3
} else if (likely(PyInt_CheckExact(o))) {
return PyInt_AS_LONG(o);
#endif
} else {
Py_ssize_t ival;
PyObject *x;
x = PyNumber_Index(o);
if (!x) return -1;
ival = PyInt_AsLong(x);
Py_DECREF(x);
return ival;
}
}
static CYTHON_INLINE PyObject * __Pyx_PyBool_FromLong(long b) {
return b ? __Pyx_NewRef(Py_True) : __Pyx_NewRef(Py_False);
}
static CYTHON_INLINE PyObject * __Pyx_PyInt_FromSize_t(size_t ival) {
return PyInt_FromSize_t(ival);
}
#endif /* Py_PYTHON_H */
| 41.903231 | 333 | 0.722102 | [
"object",
"shape"
] |
6596fae17130fc3986e809bfa7a0cae7dd6206e6 | 10,430 | h | C | gpac-0.7.1/include/gpac/modules/raster2d.h | xu5343/ffmpegtoolkit_CentOS7 | 974496c709a1c8c69034e46ae5ce7101cf03716f | [
"Apache-2.0"
] | null | null | null | gpac-0.7.1/include/gpac/modules/raster2d.h | xu5343/ffmpegtoolkit_CentOS7 | 974496c709a1c8c69034e46ae5ce7101cf03716f | [
"Apache-2.0"
] | null | null | null | gpac-0.7.1/include/gpac/modules/raster2d.h | xu5343/ffmpegtoolkit_CentOS7 | 974496c709a1c8c69034e46ae5ce7101cf03716f | [
"Apache-2.0"
] | 1 | 2021-04-15T18:27:37.000Z | 2021-04-15T18:27:37.000Z | /*
* GPAC - Multimedia Framework C SDK
*
* Authors: Jean Le Feuvre
* Copyright (c) Telecom ParisTech 2000-2012
* All rights reserved
*
* This file is part of GPAC / modules interfaces
*
* GPAC is free software; you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as published by
* the Free Software Foundation; either version 2, or (at your option)
* any later version.
*
* GPAC is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this library; see the file COPYING. If not, write to
* the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
*
*/
#ifndef _GF_MODULE_RASTER2D_H_
#define _GF_MODULE_RASTER2D_H_
#ifdef __cplusplus
extern "C" {
#endif
#include <gpac/path2d.h>
#include <gpac/module.h>
#include <gpac/color.h>
/*stencil types*/
typedef enum
{
/*solid color stencil*/
GF_STENCIL_SOLID = 0,
/*linear color gradient stencil*/
GF_STENCIL_LINEAR_GRADIENT,
/*radial color gradient stencil*/
GF_STENCIL_RADIAL_GRADIENT,
/*texture stencil*/
GF_STENCIL_VERTEX_GRADIENT,
/*texture stencil*/
GF_STENCIL_TEXTURE,
} GF_StencilType;
/*gradient filling modes*/
typedef enum
{
/*edge colors are repeated until path is filled*/
GF_GRADIENT_MODE_PAD,
/*pattern is inversed each time it's repeated*/
GF_GRADIENT_MODE_SPREAD,
/*pattern is repeated to fill path*/
GF_GRADIENT_MODE_REPEAT
} GF_GradientMode;
/*texture tiling flags*/
typedef enum
{
/*texture is repeated in its horizontal direction*/
GF_TEXTURE_REPEAT_S = (1<<1),
/*texture is repeated in its horizontal direction*/
GF_TEXTURE_REPEAT_T = (1<<2),
/*texture is fliped vertically*/
GF_TEXTURE_FLIP = (1<<3),
} GF_TextureTiling;
/*filter levels for texturing - up to the graphics engine but the following levels are used by
the client*/
typedef enum
{
/*high speed mapping (ex, no filtering applied)*/
GF_TEXTURE_FILTER_HIGH_SPEED,
/*compromise between speed and quality (ex, filter to nearest pixel)*/
GF_TEXTURE_FILTER_MID,
/*high quality mapping (ex, bi-linear/bi-cubic interpolation)*/
GF_TEXTURE_FILTER_HIGH_QUALITY
} GF_TextureFilter;
/* rasterizer antialiasing depending on the graphics engine*/
typedef enum
{
/*raster shall use no antialiasing */
GF_RASTER_HIGH_SPEED,
/*raster should use fast mode and good quality if possible*/
GF_RASTER_MID,
/*raster should use full antialiasing*/
GF_RASTER_HIGH_QUALITY
} GF_RasterLevel;
/*user routines for raserizer. common syntaxes:
@cbk: user defined callback
@x, y: first pixel position of the run, in device memory (top-left) coordinates
@run_h_len: number of pixels to fill on line
@color: color to fill pixel with. USER MUST IGNORE THE ALPHA COMPONENT OF THIS COLOR, the final
alpha is computed by the lib
@alpha: blending amount (0->0xFF) for the pixels
*/
typedef void (*raster_cbk_fill_run_alpha) (void *, u32, u32, u32, GF_Color, u8);
typedef void (*raster_cbk_fill_run_no_alpha) (void *, u32, u32, u32, GF_Color);
typedef void (*raster_cbk_fill_rect)(void *cbk, u32 x, u32 y, u32 width, u32 height, GF_Color color);
typedef struct
{
void *cbk;
/*fills line pixels without any blending operation*/
raster_cbk_fill_run_no_alpha fill_run_no_alpha;
/* fills line pixels with blending operation - alpha combines both fill color and anti-aliasing blending */
raster_cbk_fill_run_alpha fill_run_alpha;
/*fills rectangle*/
raster_cbk_fill_rect fill_rect;
} GF_RasterCallback;
/*opaque handler for all stencils*/
typedef void *GF_STENCIL;
/*visual surface handler*/
typedef void *GF_SURFACE;
/*interface name and version for raster2D*/
#define GF_RASTER_2D_INTERFACE GF_4CC('G','R','2', '3')
/*graphics driver*/
typedef struct _raster2d_interface
{
/* interface declaration*/
GF_DECL_MODULE_INTERFACE
GF_STENCIL (*stencil_new) (struct _raster2d_interface *, GF_StencilType type);
/*common destructor for all stencils*/
void (*stencil_delete) (GF_STENCIL _this);
/*set stencil transformation matrix*/
GF_Err (*stencil_set_matrix) (GF_STENCIL _this, GF_Matrix2D *mat);
/*solid brush - set brush color*/
GF_Err (*stencil_set_brush_color) (GF_STENCIL _this, GF_Color c);
/*gradient brushes*/
/*sets gradient repeat mode - return GF_NOT_SUPPORTED if driver doesn't support this to let the app compute repeat patterns
this may be called before the gradient is setup*/
GF_Err (*stencil_set_gradient_mode) (GF_STENCIL _this, GF_GradientMode mode);
/*set linear gradient. line is defined by start and end, and you can give interpolation colors at specified positions*/
GF_Err (*stencil_set_linear_gradient) (GF_STENCIL _this, Fixed start_x, Fixed start_y, Fixed end_x, Fixed end_y);
/*radial gradient brush center point, focal point and radius - colors can only be set through set_interpolation */
GF_Err (*stencil_set_radial_gradient) (GF_STENCIL _this, Fixed cx, Fixed cy, Fixed fx, Fixed fy, Fixed x_radius, Fixed y_radius);
/*radial and linear gradient (not used with vertex) - set color interpolation at given points,
@pos[i]: distance from (center for radial, start for linear) expressed between 0 and 1 (1 being the gradient bounds)
@col[i]: associated color
NOTE 1: the colors at 0 and 1.0 MUST be provided
NOTE 2: colors shall be fed in order from 0 to 1
NOTE 3: this overrides the colors provided for linear gradient
*/
GF_Err (*stencil_set_gradient_interpolation) (GF_STENCIL _this, Fixed *pos, GF_Color *col, u32 count);
/*vertex gradient : set limit path */
GF_Err (*stencil_set_vertex_path) (GF_STENCIL _this, GF_Path *path);
/*set the center of the gradient*/
GF_Err (*stencil_set_vertex_center) (GF_STENCIL _this, Fixed cx, Fixed cy, u32 color);
/*set the center of the gradient*/
GF_Err (*stencil_set_vertex_colors) (GF_STENCIL _this, u32 *colors, u32 nbCol);
/*sets global alpha blending level for stencil (texture and gradients)
the alpha channel shall be combined with the color matrix if any*/
GF_Err (*stencil_set_alpha) (GF_STENCIL _this, u8 alpha);
/*set stencil texture
@pixels: texture data, from top to bottom
@width, @height: texture size
@stride: texture horizontal pitch (bytes to skip to get to next row)
@pixelFormat: texture pixel format as defined in file constants.h
@destination_format_hint: this is the current pixel format of the destination surface, and is given
as a hint in case the texture needs to be converted by the stencil
@no_copy: if set, specifies the texture data shall not be cached by the module (eg it must be able
to directly modify the given memory
NOTE: this stencil acts as a data wrapper, the pixel data is not required to be locally copied
data is not required to be available for texturing until the stencil is used in a draw operation
*/
GF_Err (*stencil_set_texture) (GF_STENCIL _this, char *pixels, u32 width, u32 height, u32 stride, GF_PixelFormat pixelFormat, GF_PixelFormat destination_format_hint, Bool no_copy);
/*signals the texture has been modified (internal texture only)*/
void (*stencil_texture_modified) (GF_STENCIL _this);
/*sets texture tile mode*/
GF_Err (*stencil_set_tiling) (GF_STENCIL _this, GF_TextureTiling mode);
/*sets texture filtering mode*/
GF_Err (*stencil_set_filter) (GF_STENCIL _this, GF_TextureFilter filter_mode);
/*set stencil color matrix - texture stencils only. If matrix is NULL, resets current color matrix*/
GF_Err (*stencil_set_color_matrix) (GF_STENCIL _this, GF_ColorMatrix *cmat);
/*creates surface object*/
/* @center_coords: true indicates mathematical-like coord system,
false indicates computer-like coord system */
GF_SURFACE (*surface_new) (struct _raster2d_interface *, Bool center_coords);
/* delete surface object */
void (*surface_delete) (GF_SURFACE _this);
/* attach surface object to device object (Win32: HDC) width and height are target surface size*/
GF_Err (*surface_attach_to_device) (GF_SURFACE _this, void *os_handle, u32 width, u32 height);
/* attach surface object to stencil object*/
GF_Err (*surface_attach_to_texture) (GF_SURFACE _this, GF_STENCIL sten);
/* attach surface object to memory buffer if supported
@pixels: texture data
@width, @height: texture size
@pitch_x: texture horizontal pitch (bytes to skip to get to next pixel). O means linear frame buffer (eg pitch_x==bytes per pixel)
@pitch_y: texture vertical pitch (bytes to skip to get to next line)
@pixelFormat: texture pixel format
*/
GF_Err (*surface_attach_to_buffer) (GF_SURFACE _this, char *pixels, u32 width, u32 height, s32 pitch_x, s32 pitch_y, GF_PixelFormat pixelFormat);
GF_Err (*surface_attach_to_callbacks) (GF_SURFACE _this, GF_RasterCallback *callbacks, u32 width, u32 height);
/* detach surface object */
void (*surface_detach) (GF_SURFACE _this);
/*sets rasterizer precision */
GF_Err (*surface_set_raster_level) (GF_SURFACE _this, GF_RasterLevel RasterSetting);
/* set the given matrix as the current transformations for all drawn paths
if NULL reset the current transformation */
GF_Err (*surface_set_matrix) (GF_SURFACE _this, GF_Matrix2D *mat);
/* set the given rectangle as a clipper - nothing will be drawn outside this clipper
if the clipper is NULL then no clipper is set
NB: the clipper is not affected by the surface matrix and is given in pixels
CF ABOVE NOTE ON CLIPPERS*/
GF_Err (*surface_set_clipper) (GF_SURFACE _this, GF_IRect *rc);
/*sets the given path as the current one for drawing - the surface transform is NEVER changed between
setting the path and filling, only the clipper may change*/
GF_Err (*surface_set_path) (GF_SURFACE _this, GF_Path *path);
/*fills the current path using the given stencil - can be called several times with the same current path*/
GF_Err (*surface_fill) (GF_SURFACE _this, GF_STENCIL stencil);
/*flushes to surface*/
GF_Err (*surface_flush) (GF_SURFACE _this);
/*clears given pixel rect on the surface with the given color - REQUIRED
the given rect is formatted as a clipper - CF ABOVE NOTE ON CLIPPERS*/
GF_Err (*surface_clear)(GF_SURFACE _this, GF_IRect *rc, GF_Color col);
/*private:*/
void *internal;
} GF_Raster2D;
#ifdef __cplusplus
}
#endif
#endif /*_GF_MODULE_RASTER2D_H_*/
| 39.657795 | 181 | 0.762895 | [
"object",
"transform",
"solid"
] |
659ca82f6b8cf8aba26db8ad1cf41ce174ae2fbf | 1,805 | h | C | core/fpdfapi/page/cpdf_psengine.h | ADVAN-ELAA-8QM-PRC1/platform-external-pdfium | e67ae11a46c7b9f48ebc2efab8ca58cc9982cb38 | [
"BSD-3-Clause"
] | 18 | 2015-01-07T21:02:47.000Z | 2021-01-19T02:14:58.000Z | core/fpdfapi/page/cpdf_psengine.h | ADVAN-ELAA-8QM-PRC1/platform-external-pdfium | e67ae11a46c7b9f48ebc2efab8ca58cc9982cb38 | [
"BSD-3-Clause"
] | 1 | 2017-02-14T01:38:56.000Z | 2017-02-15T06:01:13.000Z | core/fpdfapi/page/cpdf_psengine.h | ADVAN-ELAA-8QM-PRC1/platform-external-pdfium | e67ae11a46c7b9f48ebc2efab8ca58cc9982cb38 | [
"BSD-3-Clause"
] | 10 | 2015-07-04T06:37:40.000Z | 2021-04-08T09:31:20.000Z | // Copyright 2016 PDFium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
// Original code copyright 2014 Foxit Software Inc. http://www.foxitsoftware.com
#ifndef CORE_FPDFAPI_PAGE_CPDF_PSENGINE_H_
#define CORE_FPDFAPI_PAGE_CPDF_PSENGINE_H_
#include <memory>
#include <vector>
#include "core/fxcrt/fx_system.h"
class CPDF_PSEngine;
class CPDF_PSOP;
class CPDF_SimpleParser;
enum PDF_PSOP {
PSOP_ADD,
PSOP_SUB,
PSOP_MUL,
PSOP_DIV,
PSOP_IDIV,
PSOP_MOD,
PSOP_NEG,
PSOP_ABS,
PSOP_CEILING,
PSOP_FLOOR,
PSOP_ROUND,
PSOP_TRUNCATE,
PSOP_SQRT,
PSOP_SIN,
PSOP_COS,
PSOP_ATAN,
PSOP_EXP,
PSOP_LN,
PSOP_LOG,
PSOP_CVI,
PSOP_CVR,
PSOP_EQ,
PSOP_NE,
PSOP_GT,
PSOP_GE,
PSOP_LT,
PSOP_LE,
PSOP_AND,
PSOP_OR,
PSOP_XOR,
PSOP_NOT,
PSOP_BITSHIFT,
PSOP_TRUE,
PSOP_FALSE,
PSOP_IF,
PSOP_IFELSE,
PSOP_POP,
PSOP_EXCH,
PSOP_DUP,
PSOP_COPY,
PSOP_INDEX,
PSOP_ROLL,
PSOP_PROC,
PSOP_CONST
};
constexpr uint32_t PSENGINE_STACKSIZE = 100;
class CPDF_PSProc {
public:
CPDF_PSProc();
~CPDF_PSProc();
bool Parse(CPDF_SimpleParser* parser, int depth);
bool Execute(CPDF_PSEngine* pEngine);
private:
static const int kMaxDepth = 128;
std::vector<std::unique_ptr<CPDF_PSOP>> m_Operators;
};
class CPDF_PSEngine {
public:
CPDF_PSEngine();
~CPDF_PSEngine();
bool Parse(const FX_CHAR* str, int size);
bool Execute();
bool DoOperator(PDF_PSOP op);
void Reset() { m_StackCount = 0; }
void Push(FX_FLOAT value);
FX_FLOAT Pop();
uint32_t GetStackSize() const { return m_StackCount; }
private:
FX_FLOAT m_Stack[PSENGINE_STACKSIZE];
uint32_t m_StackCount;
CPDF_PSProc m_MainProc;
};
#endif // CORE_FPDFAPI_PAGE_CPDF_PSENGINE_H_
| 17.871287 | 80 | 0.731856 | [
"vector"
] |
4559eab0a7aeaf0f91995e7b3314b030bd32a156 | 44,568 | h | C | Source/Headers/inmost_variable.h | INM-RAS/INMOST | 2846aa63c1fc11c406cb2d558646237223183201 | [
"BSD-3-Clause"
] | 25 | 2016-03-14T19:34:00.000Z | 2022-03-17T13:34:59.000Z | Source/Headers/inmost_variable.h | INMOST-DEV/INMOST | c2209a6378b0d2ecc2f3ec9a12e0217cca011ca8 | [
"BSD-3-Clause"
] | 22 | 2015-01-17T18:43:16.000Z | 2021-03-26T08:09:43.000Z | Source/Headers/inmost_variable.h | INM-RAS/INMOST | 2846aa63c1fc11c406cb2d558646237223183201 | [
"BSD-3-Clause"
] | 13 | 2015-04-22T16:04:21.000Z | 2021-03-31T10:51:48.000Z |
#ifndef INMOST_AUTODIFF_ETVAR_H_INCLUDED
#define INMOST_AUTODIFF_ETVAR_H_INCLUDED
#include "inmost_common.h"
#include "inmost_expression.h"
#include "inmost_mesh.h"
#include "inmost_autodiff.h"
#include "inmost_solver.h"
#include <sstream> //for debug
#include <new>
#if defined(USE_AUTODIFF) && defined(USE_MESH)
//TODO:
// 1. Incorporate tables
// 2. (ok, test) implement condition
// 3. (ok, test) implement stencil
// 4. (???) copying of basic_dynamic_variable
// 5. Consider optimization by checking zero variation multipliers, check that assembly do not degrade.
// 6. Document everything
// 7. change stencil_variable with foreach_variable and introduce function foreach(iterator beg, iterator end, arg)
// 8. enclose in namespace
// 9. maybe should not use const A & in classes, since some class may be destroyed prior use - investigate
//This should stop Visual Studio from complaining of very long auto-generated class types
#ifdef _MSC_VER
#pragma warning(disable : 4503)
#endif
namespace INMOST
{
template<class Op, class A>
class unary_pool
{
A arg;
Op operand;
unary_pool & operator = (unary_pool const & other) {arg = other.arg; operand.assign(other.operand,arg); return * this;}
public:
unary_pool(const A & parg) : arg(parg), operand(arg) {}
unary_pool(const A & parg, INMOST_DATA_REAL_TYPE pmult) : arg(parg), operand(arg,pmult) {}
unary_pool(const unary_pool & other) : arg(other.arg), operand(other.operand,arg) {}
const shell_expression<A> & get_arg() {return arg;}
Op & get_op() {return operand;}
const Op & get_op() const {return operand;}
};
template<class Op, class A, class B>
class binary_pool
{
A left;
B right;
Op operand;
binary_pool & operator = (binary_pool const & other) {left = other.left; right = other.right; operand.assign(other.operand,left,right); return * this;}
public:
binary_pool(const A & pleft, const B & pright) : left(pleft), right(pright), operand(left,right) {}
binary_pool(const binary_pool & other) : left(other.left), right(other.right), operand(other.operand,left,right) {}
const shell_expression<A> & get_left() {return left;}
const shell_expression<B> & get_right() {return right;}
Op & get_op() {return operand;}
const Op & get_op() const {return operand;}
~binary_pool() {}
};
template<class Op, class A, class B, class C>
class ternary_pool
{
A cond;
B left;
C right;
Op operand;
ternary_pool & operator =(ternary_pool const & other) {cond = other.cond; left = other.left; right = other.right; operand.assign(other.operand,cond,left,right); return * this;}
public:
ternary_pool(const A & pcond, const B & pleft, const C & pright) : cond(pcond), left(pleft), right(pright), operand(cond,left,right) {}
ternary_pool(const ternary_pool & other) : cond(other.cond), left(other.left), right(other.right), operand(other.operand,cond,left,right) {}
const shell_expression<A> & get_cond() {return cond;}
const shell_expression<B> & get_left() {return left;}
const shell_expression<C> & get_right() {return right;}
Op & get_op() {return operand;}
const Op & get_op() const {return operand;}
~ternary_pool() {}
};
template<class A, class ArgA>
class unary_pool_expression : public shell_expression<unary_pool_expression<A,ArgA> >
{
unary_pool<A,ArgA> pool;
public:
unary_pool_expression(const unary_pool<A,ArgA> & ppool) : pool(ppool) {}
unary_pool_expression(const unary_pool_expression & other) : pool(other.pool) {}
unary_pool_expression & operator = (unary_pool_expression const & other) {pool = other.pool; return * this;}
__INLINE INMOST_DATA_REAL_TYPE GetValue() const { return pool.get_op().GetValue(); }
__INLINE void GetJacobian(INMOST_DATA_REAL_TYPE mult, Sparse::RowMerger & r) const {pool.get_op().GetJacobian(mult,r);}
__INLINE void GetJacobian(INMOST_DATA_REAL_TYPE mult, Sparse::Row & r) const {pool.get_op().GetJacobian(mult,r);}
__INLINE void GetHessian(INMOST_DATA_REAL_TYPE multJ, Sparse::Row & J, INMOST_DATA_REAL_TYPE multH, Sparse::HessianRow & H) const {pool.get_op().GetHessian(multJ,J,multH,H);}
};
template<class A, class ArgA, class ArgB>
class binary_pool_expression : public shell_expression<binary_pool_expression<A,ArgA,ArgB> >
{
binary_pool<A,ArgA,ArgB> pool;
public:
binary_pool_expression(const binary_pool<A,ArgA,ArgB> & ppool) : pool(ppool) {}
binary_pool_expression(const binary_pool_expression & other) : pool(other.pool) {}
binary_pool_expression & operator = (binary_pool_expression const & other) {pool = other.pool; return * this;}
__INLINE INMOST_DATA_REAL_TYPE GetValue() const { return pool.get_op().GetValue(); }
__INLINE void GetJacobian(INMOST_DATA_REAL_TYPE mult, Sparse::RowMerger & r) const {pool.get_op().GetJacobian(mult,r);}
__INLINE void GetJacobian(INMOST_DATA_REAL_TYPE mult, Sparse::Row & r) const {pool.get_op().GetJacobian(mult,r);}
__INLINE void GetHessian(INMOST_DATA_REAL_TYPE multJ, Sparse::Row & J, INMOST_DATA_REAL_TYPE multH, Sparse::HessianRow & H) const {pool.get_op().GetHessian(multJ,J,multH,H);}
};
template<class A, class ArgA, class ArgB, class ArgC>
class ternary_pool_expression : public shell_expression<ternary_pool_expression<A,ArgA,ArgB,ArgC> >
{
ternary_pool<A,ArgA,ArgB,ArgC> pool;
public:
ternary_pool_expression(const ternary_pool<A,ArgA,ArgB,ArgC> & ppool) : pool(ppool) {}
ternary_pool_expression(const ternary_pool_expression & other) : pool(other.pool) {}
ternary_pool_expression & operator = (ternary_pool_expression const & other) {pool = other.pool; return * this;}
__INLINE INMOST_DATA_REAL_TYPE GetValue() const { return pool.get_op().GetValue(); }
__INLINE void GetJacobian(INMOST_DATA_REAL_TYPE mult, Sparse::RowMerger & r) const {pool.get_op().GetJacobian(mult,r);}
__INLINE void GetJacobian(INMOST_DATA_REAL_TYPE mult, Sparse::Row & r) const {pool.get_op().GetJacobian(mult,r);}
__INLINE void GetHessian(INMOST_DATA_REAL_TYPE multJ, Sparse::Row & J, INMOST_DATA_REAL_TYPE multH, Sparse::HessianRow & H) const {pool.get_op().GetHessian(multJ,J,multH,H);}
};
class abstract_dynamic_variable
{
public:
virtual INMOST_DATA_REAL_TYPE Value (const Storage & e) const = 0;
virtual multivar_expression Variable(const Storage & e) const = 0;
virtual void GetVariation(const Storage & e, Sparse::Row & r) const = 0;
virtual void GetVariation(const Storage & e, Sparse::RowMerger & r) const = 0;
virtual abstract_dynamic_variable * Copy() const = 0;
virtual ~abstract_dynamic_variable() {}
};
template<typename RetType>
class get_variable
{
public:
virtual RetType operator()(const Storage & e) const = 0;
};
template<>
class get_variable<multivar_expression>
{
const abstract_dynamic_variable & var;
public:
typedef multivar_expression type;
get_variable(const abstract_dynamic_variable & var) : var(var) {}
multivar_expression operator()(const Storage & e) const {return var.Variable(e);}
};
template<>
class get_variable<INMOST_DATA_REAL_TYPE>
{
const abstract_dynamic_variable & var;
public:
typedef INMOST_DATA_REAL_TYPE type;
get_variable(const abstract_dynamic_variable & var) : var(var) {}
INMOST_DATA_REAL_TYPE operator()(const Storage & e) const {return var.Value(e);}
};
template<class VariableType>
class basic_dynamic_variable : public abstract_dynamic_variable
{
public:
typedef VariableType Var;
virtual INMOST_DATA_REAL_TYPE Value(const Storage & e) const = 0;
virtual multivar_expression Variable(const Storage & e) const = 0;
virtual VariableType operator[](const Storage & e) const = 0;
virtual void GetVariation(const Storage & e, Sparse::Row & r) const = 0;
virtual void GetVariation(const Storage & e, Sparse::RowMerger & r) const = 0;
virtual abstract_dynamic_variable * Copy() const = 0;
virtual ~basic_dynamic_variable() {}
};
template<class VariableType, class Derived>
class shell_dynamic_variable : virtual public basic_dynamic_variable<VariableType>
{
public:
typedef VariableType Var;
virtual INMOST_DATA_REAL_TYPE Value(const Storage & e) const {return static_cast<const Derived *>(this)->Value(e);}
virtual multivar_expression operator ()(const Storage & e) const {return static_cast<const Derived *>(this)->Variable(e);}
virtual VariableType operator[](const Storage & e) const {return (*static_cast<const Derived *>(this))[e];}
virtual void GetVariation(const Storage & e, Sparse::Row & r) const {static_cast<const Derived *>(this)->GetVariation(e,r);}
virtual void GetVariation(const Storage & e, Sparse::RowMerger & r) const {static_cast<const Derived *>(this)->GetVariation(e,r);}
operator Derived & () {return *static_cast<Derived *>(this);}
operator const Derived & () const {return *static_cast<const Derived *>(this);}
virtual abstract_dynamic_variable * Copy() const { return static_cast<const Derived *>(this)->Copy(); }
};
class stored_variable_expression : public shell_dynamic_variable<multivar_expression,stored_variable_expression>
{
abstract_dynamic_variable * var;
public:
stored_variable_expression() : var(NULL) {}
stored_variable_expression(const abstract_dynamic_variable & pvar) : var(pvar.Copy()) {}
stored_variable_expression(const stored_variable_expression & other) : var(other.var->Copy()) {}
~stored_variable_expression() {delete var; var = NULL;}
stored_variable_expression operator =(stored_variable_expression const & other) {var = other.var->Copy(); return *this;}
stored_variable_expression operator =(const abstract_dynamic_variable & pvar) {var = pvar.Copy(); return *this;}
INMOST_DATA_REAL_TYPE Value(const Storage & e) const {return var->Value(e);}
multivar_expression Variable(const Storage & e) const {return var->Variable(e);}
multivar_expression operator [](const Storage & e) const {return var->Variable(e);}
void GetVariation(const Storage & e, Sparse::Row & r) const { (*this)[e].GetJacobian(1.0,r); }
void GetVariation(const Storage & e, Sparse::RowMerger & r) const { (*this)[e].GetJacobian(1.0,r); }
template<typename T>
get_variable<T> get_variable() {return get_variable<T>(*var);}
abstract_dynamic_variable & retrive_expression() {return *var;}
const abstract_dynamic_variable & retrive_expression() const {return *var;}
abstract_dynamic_variable * Copy() const {return static_cast<abstract_dynamic_variable *>(new stored_variable_expression(*this));}
};
class dynamic_variable : public shell_dynamic_variable<var_expression,dynamic_variable >
{
private:
const AbstractEntry * entry;
INMOST_DATA_ENUM_TYPE comp;
public:
dynamic_variable() :entry(NULL), comp(ENUMUNDEF) {}
dynamic_variable(Automatizator & aut, INMOST_DATA_ENUM_TYPE reg_index, INMOST_DATA_ENUM_TYPE comp = 0) : entry(reg_index==ENUMUNDEF?NULL:&aut.GetEntry(reg_index)), comp(comp) {}
dynamic_variable(const AbstractEntry * re, INMOST_DATA_ENUM_TYPE comp = 0) : entry(re), comp(comp) {}
dynamic_variable(const dynamic_variable & other) : entry(other.entry), comp(other.comp) {}
dynamic_variable & operator =(const dynamic_variable & other)
{
entry = other.entry;
comp = other.comp;
return * this;
}
INMOST_DATA_REAL_TYPE Value(const Storage & e) const {return entry->Value(e,comp);}
INMOST_DATA_ENUM_TYPE Index(const Storage & e) const {return entry->isValid(e) ? entry->Index(e,comp):ENUMUNDEF;}
multivar_expression Variable(const Storage & e) const
{
if( entry->isValid(e) )
return entry->Unknown(e,comp);
else
return entry->Value(e,comp);
}
var_expression operator [](const Storage & e) const {return var_expression(entry->Value(e,comp),entry->isValid(e)?entry->Index(e,comp):ENUMUNDEF);}
void GetVariation(const Storage & e, Sparse::Row & r) const { (*this)[e].GetJacobian(1.0,r); }
void GetVariation(const Storage & e, Sparse::RowMerger & r) const { (*this)[e].GetJacobian(1.0,r); }
bool isUnknown(const Storage & e) const {return entry->isValid(e)?true:false;}
abstract_dynamic_variable * Copy() const {return static_cast<abstract_dynamic_variable *>(new dynamic_variable(*this));}
};
class const_variable : public shell_dynamic_variable<const_expression,const_variable>
{
private:
INMOST_DATA_REAL_TYPE value;
public:
const_variable(INMOST_DATA_REAL_TYPE _value) : value(_value) {}
const_variable(const const_variable & other) : value(other.value) {}
const_variable & operator =(const const_variable & other)
{
value = other.value;
return * this;
}
INMOST_DATA_REAL_TYPE Value(const Storage & e) const {(void)e; return value;}
multivar_expression Variable(const Storage & e) const
{
(void)e;
return multivar_expression(value);
}
const_expression operator [](const Storage & e) const {(void)e; return const_expression(value);}
void GetVariation(const Storage & e, Sparse::Row & r) const { (*this)[e].GetJacobian(1.0,r); }
void GetVariation(const Storage & e, Sparse::RowMerger & r) const { (*this)[e].GetJacobian(1.0,r); }
abstract_dynamic_variable * Copy() const {return static_cast<abstract_dynamic_variable *>(new const_variable(*this));}
};
class const_link_variable : public shell_dynamic_variable<const_expression,const_link_variable>
{
private:
const INMOST_DATA_REAL_TYPE * value;
public:
const_link_variable(const INMOST_DATA_REAL_TYPE * _value) : value(_value) {}
const_link_variable(const const_link_variable & other) : value(other.value) {}
const_link_variable & operator =(const const_link_variable & other)
{
value = other.value;
return * this;
}
INMOST_DATA_REAL_TYPE Value(const Storage & e) const {(void)e; return *value;}
multivar_expression Variable(const Storage & e) const
{
(void)e;
return multivar_expression(*value);
}
const_expression operator [](const Storage & e) const {(void)e; return const_expression(*value);}
void GetVariation(const Storage & e, Sparse::Row & r) const { (*this)[e].GetJacobian(1.0,r); }
void GetVariation(const Storage & e, Sparse::RowMerger & r) const { (*this)[e].GetJacobian(1.0,r); }
abstract_dynamic_variable * Copy() const {return static_cast<abstract_dynamic_variable *>(new const_link_variable(*this));}
};
class static_variable : public shell_dynamic_variable<const_expression,static_variable>
{
private:
Tag value_tag;
INMOST_DATA_ENUM_TYPE comp;
public:
static_variable(Tag t, INMOST_DATA_ENUM_TYPE pcomp = 0) : value_tag(t), comp(pcomp) {}
static_variable(const static_variable & other) : value_tag(other.value_tag), comp(other.comp) {}
static_variable & operator =(const static_variable & other)
{
value_tag = other.value_tag;
comp = other.comp;
return * this;
}
INMOST_DATA_REAL_TYPE Value(const Storage & e) const {return e->RealArray(value_tag)[comp];}
multivar_expression Variable(const Storage & e) const
{
return multivar_expression(e->RealArray(value_tag)[comp]);
}
const_expression operator [](const Storage & e) const {return const_expression(e->RealArray(value_tag)[comp]);}
Tag ValueTag() {return value_tag;}
void GetVariation(const Storage & e, Sparse::Row & r) const { (*this)[e].GetJacobian(1.0,r); }
void GetVariation(const Storage & e, Sparse::RowMerger & r) const { (*this)[e].GetJacobian(1.0,r); }
bool isUnknown(const Storage & e) const {(void)e; return false;}
abstract_dynamic_variable * Copy() const {return static_cast<abstract_dynamic_variable *>(new static_variable(*this));}
};
class stored_variable : public shell_dynamic_variable<multivar_expression_reference,stored_variable>
{
private:
Tag variable_tag;
INMOST_DATA_ENUM_TYPE comp;
public:
stored_variable() : variable_tag(), comp(ENUMUNDEF) {}
stored_variable(Tag t, INMOST_DATA_ENUM_TYPE pcomp = 0) : variable_tag(t), comp(pcomp)
{
assert(t.GetDataType() == DATA_REAL || t.GetDataType() == DATA_VARIABLE);
}
stored_variable(const stored_variable & other) : variable_tag(other.variable_tag), comp(other.comp) {}
stored_variable & operator =(const stored_variable & other)
{
variable_tag = other.variable_tag;
comp = other.comp;
return * this;
}
INMOST_DATA_REAL_TYPE Value(const Storage & e) const
{
if( variable_tag.GetDataType() == DATA_VARIABLE )
return e->VariableArray(variable_tag)[comp].GetValue();
else if( variable_tag.GetDataType() == DATA_REAL )
return e->RealArray(variable_tag)[comp];
else throw NotImplemented;
}
multivar_expression Variable(const Storage & e) const
{
if( variable_tag.GetDataType() == DATA_VARIABLE )
return e->VariableArray(variable_tag)[comp];
else if( variable_tag.GetDataType() == DATA_REAL )
return variable(e->RealArray(variable_tag)[comp]);
else throw NotImplemented;
}
multivar_expression_reference operator [](const Storage & e) const
{
if( variable_tag.GetDataType() == DATA_VARIABLE )
return e->VariableArray(variable_tag)[comp];
else if( variable_tag.GetDataType() == DATA_REAL )
return multivar_expression_reference(e->RealArray(variable_tag)[comp],NULL);
else throw NotImplemented;
}
Tag VariableTag() {return variable_tag;}
void GetVariation(const Storage & e, Sparse::Row & r) const { (*this)[e].GetJacobian(1.0,r); }
void GetVariation(const Storage & e, Sparse::RowMerger & r) const { (*this)[e].GetJacobian(1.0,r); }
bool isUnknown(const Storage & e) const {(void)e; return false;}
abstract_dynamic_variable * Copy() const {return static_cast<abstract_dynamic_variable *>(new stored_variable(*this));}
};
template<class A>
class stencil_expression : public shell_expression<stencil_expression<A> >
{
public:
typedef const_multiplication_expression<A> argument;
typedef unary_pool_expression< argument, A> pool;
typedef std::vector< argument > container;
private:
container arg;
INMOST_DATA_REAL_TYPE value;
public:
stencil_expression(const container & parg) : arg(parg)
{
value = 0.0;
for(typename container::iterator it = arg.begin(); it != arg.end(); ++it)
value += it->GetValue();
}
stencil_expression(const stencil_expression & other) : arg(other.arg), value(other.value) {}
__INLINE INMOST_DATA_REAL_TYPE GetValue() const { return value; }
__INLINE void GetJacobian(INMOST_DATA_REAL_TYPE mult, Sparse::RowMerger & r) const
{
for(typename container::iterator it = arg.begin(); it != arg.end(); ++it)
it->GetJacobian(mult,r);
}
__INLINE void GetJacobian(INMOST_DATA_REAL_TYPE mult, Sparse::Row & r) const
{
for(typename container::iterator it = arg.begin(); it != arg.end(); ++it)
it->GetJacobian(mult,r);
}
__INLINE void GetHessian(INMOST_DATA_REAL_TYPE multJ, Sparse::Row & J, INMOST_DATA_REAL_TYPE multH, Sparse::HessianRow & H) const
{
Sparse::Row tmpJ, curJ;
Sparse::HessianRow tmpH, curH;
for(typename container::iterator it = arg.begin(); it != arg.end(); ++it)
{
curJ.Clear();
curH.Clear();
it->GetHessian(multJ,curJ,multH,curH);
Sparse::Row::MergeSortedRows(1.0,curJ,1.0,J,tmpJ);
Sparse::HessianRow::MergeSortedRows(1.0,curH,1.0,H,tmpH);
J.Swap(tmpJ);
H.Swap(tmpH);
}
}
};
template<class A>
class stencil_variable : public shell_dynamic_variable< stencil_expression<typename A::Var>, stencil_variable<A> >
{
private:
TagReferenceArray tag_elems;
TagRealArray tag_coefs;
A Arg;
public:
stencil_variable(Tag tag_elems, Tag tag_coefs, const shell_dynamic_variable<typename A::Var,A> & parg) : tag_elems(tag_elems), tag_coefs(tag_coefs), Arg(parg) {}
stencil_variable(const stencil_variable & other) : tag_elems(other.tag_elems), tag_coefs(other.tag_coefs), Arg(other.Arg) {}
stencil_variable & operator =(const stencil_variable & other) {tag_elems = other.tag_elems; tag_coefs = other.tag_coefs; Arg = other.Arg; return * this;}
INMOST_DATA_REAL_TYPE Value(const Storage & e) const {return (*this)[e].GetValue();}
multivar_expression Variable(const Storage & e) const
{
multivar_expression ret = (*this)[e];
return ret;
}
stencil_expression<typename A::Var> operator [](const Storage & e) const
{
typename stencil_expression<typename A::Var>::container tmp;
Storage::real_array coefs = tag_coefs[e];
Storage::reference_array elems = tag_elems[e];
assert(coefs.size() == elems.size());
tmp.resize(elems.size());
for(INMOST_DATA_ENUM_TYPE k = 0; k < elems.size(); ++k)
{
typename stencil_expression<typename A::Var>::argument arg(Arg[elems[k]],coefs[k]);
typename stencil_expression<typename A::Var>::pool pool(arg);
tmp[k] = pool;
}
return stencil_expression<typename A::Var>(tmp);
}
void GetVariation(const Storage & e, Sparse::Row & r) const { (*this)[e].GetJacobian(1.0,r); }
void GetVariation(const Storage & e, Sparse::RowMerger & r) const { (*this)[e].GetJacobian(1.0,r); }
abstract_dynamic_variable * Copy() const {return static_cast<abstract_dynamic_variable *>(new stencil_variable(*this));}
};
template<class A>
class table_variable : public shell_dynamic_variable< unary_pool_expression< function_expression<typename A::Var>,typename A::Var > , table_variable<A> >
{
A Arg;
const keyval_table & Table;
public:
table_variable(const shell_dynamic_variable<typename A::Var,A> & parg, const keyval_table & ptable) : Arg(parg), Table(ptable) {}
table_variable(const table_variable & other) : Arg(other.Arg), Table(other.Table) {}
table_variable & operator = (table_variable const & other) {Arg = other.Arg; Table = other.Table; return * this;}
INMOST_DATA_REAL_TYPE Value(const Storage & e) const {return (*this)[e].GetValue();}
multivar_expression Variable(const Storage & e) const
{
multivar_expression ret = (*this)[e];
return ret;
}
unary_pool_expression< function_expression<typename A::Var> ,typename A::Var > operator [](const Storage & e) const
{
typename A::Var arg = Arg[e];
unary_pool< function_expression<typename A::Var>, typename A::Var> pool(arg);
std::pair<INMOST_DATA_REAL_TYPE, INMOST_DATA_REAL_TYPE> both = Table.GetBoth(arg.GetValue());
pool.get_op().SetFunctionValue(both.first);
pool.get_op().SetFunctionDerivative(both.second);
return unary_pool_expression< function_expression<typename A::Var>, typename A::Var >(pool);
}
void GetVariation(const Storage & e, Sparse::Row & r) const { (*this)[e].GetJacobian(1.0,r); }
void GetVariation(const Storage & e, Sparse::RowMerger & r) const { (*this)[e].GetJacobian(1.0,r); }
abstract_dynamic_variable * Copy() const {return static_cast<abstract_dynamic_variable *>(new table_variable(*this));}
};
/// This class makes possible to evaluate different expressions on different element types.
/// See etype_branch function.
template<class A, class B>
class etype_branch_variable : public shell_dynamic_variable< multivar_expression, etype_branch_variable<A,B> >
{
private:
A ArgA; //< Variable expression to be evaluated when type of provided element matches selected types.
B ArgB; //< Variable expression to be evaluated when type of provided element does not match selected types.
ElementType types_true; //< Selected types of elements.
public:
/// Constructor. Used by etype_branch function.
etype_branch_variable(ElementType _types_true, const A & _ArgA, const B & _ArgB) : types_true(_types_true), ArgA(_ArgA), ArgB(_ArgB) {}
/// Copy constructor.
etype_branch_variable(const etype_branch_variable & other) : types_true(other.types_true), ArgA(other.ArgA), ArgB(other.ArgB) {}
/// Assignment operator.
etype_branch_variable & operator =(etype_branch_variable const & other)
{
types_true = other.types_true;
ArgA = other.ArgA;
ArgB = other.ArgB;
return *this;
}
/// Get value of variable expression on provided element e.
INMOST_DATA_REAL_TYPE Value(const Storage & e) const {return (*this)[e].GetValue();}
/// Get value with derivatives of variable expression on provided element e.
/// This function collapses associated expression tree into multivar_expression.
multivar_expression Variable(const Storage & e) const { return (*this)[e]; }
/// Build an expression associated with variable expression on provided element e.
multivar_expression operator [](const Storage & e) const
{
if( e->GetElementType() & types_true )
return ArgA[e];
else return ArgB[e];
}
/// Retrive first derivatives of variable expression on provided element e, default approach.
void GetVariation(const Storage & e, Sparse::Row & r) const { (*this)[e].GetJacobian(1.0,r); }
/// Retrive first derivatives of variable expression on provided element e, with supplimentary structure Sparse::RowMerger.
void GetVariation(const Storage & e, Sparse::RowMerger & r) const { (*this)[e].GetJacobian(1.0,r); }
/// Make a copy of this class, used to reproduce and store a tree of variable expressions.
abstract_dynamic_variable * Copy() const {return static_cast<abstract_dynamic_variable *>(new etype_branch_variable(*this));}
};
/// This class makes possible to evaluate different expressions depending on the markers.
/// Works similarly for shared and private markers.
/// See marker_branch function.
template<class A, class B>
class marker_branch_variable : public shell_dynamic_variable< multivar_expression, marker_branch_variable<A,B> >
{
private:
A ArgA; //< Variable expression to be evaluated when marker is set on the element.
B ArgB; //< Variable expression to be evaluated when marker is not set on the element.
MarkerType marker; //< Marker.
public:
/// Constructor. Used by marker_branch function.
marker_branch_variable(MarkerType _marker, const A & _ArgA, const B & _ArgB) : marker(_marker), ArgA(_ArgA), ArgB(_ArgB) {}
/// Copy constructor.
marker_branch_variable(const marker_branch_variable & other) : marker(other.marker), ArgA(other.ArgA), ArgB(other.ArgB) {}
/// Assignment operator.
marker_branch_variable & operator =(marker_branch_variable const & other)
{
marker = other.marker;
ArgA = other.ArgA;
ArgB = other.ArgB;
return *this;
}
/// Get value of variable expression on provided element e.
INMOST_DATA_REAL_TYPE Value(const Storage & e) const {return (*this)[e].GetValue();}
/// Get value with derivatives of variable expression on provided element e.
/// This function collapses associated expression tree into multivar_expression.
multivar_expression Variable(const Storage & e) const { return (*this)[e]; }
/// Build an expression associated with variable expression on provided element e.
multivar_expression operator [](const Storage & e) const
{
if( isPrivate(marker) ? e->GetPrivateMarker(marker) : e->GetMarker(marker) )
return ArgA[e];
else return ArgB[e];
}
/// Retrive first derivatives of variable expression on provided element e, default approach.
void GetVariation(const Storage & e, Sparse::Row & r) const { (*this)[e].GetJacobian(1.0,r); }
/// Retrive first derivatives of variable expression on provided element e, with supplimentary structure Sparse::RowMerger.
void GetVariation(const Storage & e, Sparse::RowMerger & r) const { (*this)[e].GetJacobian(1.0,r); }
/// Make a copy of this class, used to reproduce and store a tree of variable expressions.
abstract_dynamic_variable * Copy() const {return static_cast<abstract_dynamic_variable *>(new marker_branch_variable(*this));}
};
template<class Expr, class A>
class unary_custom_variable : public shell_dynamic_variable< unary_pool_expression<Expr, typename A::Var >,unary_custom_variable<Expr,A> >
{
private:
A Arg;
public:
unary_custom_variable(const shell_dynamic_variable<typename A::Var,A> & parg) : Arg(parg) {}
unary_custom_variable(const unary_custom_variable & other) : Arg(other.Arg) {}
unary_custom_variable & operator =(unary_custom_variable const & other) {Arg = other.Arg; return * this;}
INMOST_DATA_REAL_TYPE Value(const Storage & e) const {return (*this)[e].GetValue();}
multivar_expression Variable(const Storage & e) const
{
multivar_expression ret = (*this)[e];
return ret;
}
unary_pool_expression<Expr, typename A::Var > operator [](const Storage & e) const
{
unary_pool<Expr,typename A::Var> pool(Arg[e]);
return unary_pool_expression<Expr, typename A::Var >(pool);
}
void GetVariation(const Storage & e, Sparse::Row & r) const { (*this)[e].GetJacobian(1.0,r); }
void GetVariation(const Storage & e, Sparse::RowMerger & r) const { (*this)[e].GetJacobian(1.0,r); }
abstract_dynamic_variable * Copy() const {return static_cast<abstract_dynamic_variable *>(new unary_custom_variable(*this));}
};
template<class Expr, class A>
class unary_const_custom_variable : public shell_dynamic_variable< unary_pool_expression<Expr, typename A::Var >,unary_const_custom_variable<Expr,A> >
{
private:
A Left;
INMOST_DATA_REAL_TYPE Right;
public:
unary_const_custom_variable(const shell_dynamic_variable<typename A::Var,A> & pleft, INMOST_DATA_REAL_TYPE pright)
: Left(pleft), Right(pright) {}
unary_const_custom_variable(const unary_const_custom_variable & other) : Left(other.Left), Right(other.Right) {}
unary_const_custom_variable & operator =(unary_const_custom_variable const & other) {Left = other.Left; Right = other.Right; return * this;}
INMOST_DATA_REAL_TYPE Value(const Storage & e) const {return (*this)[e].GetValue();}
multivar_expression Variable(const Storage & e) const
{
multivar_expression ret = (*this)[e];
return ret;
}
unary_pool_expression<Expr, typename A::Var > operator [](const Storage & e) const
{
unary_pool<Expr,typename A::Var> pool(Left[e],Right);
return unary_pool_expression<Expr, typename A::Var >(pool);
}
void GetVariation(const Storage & e, Sparse::Row & r) const { (*this)[e].GetJacobian(1.0,r); }
void GetVariation(const Storage & e, Sparse::RowMerger & r) const { (*this)[e].GetJacobian(1.0,r); }
abstract_dynamic_variable * Copy() const {return static_cast<abstract_dynamic_variable *>(new unary_const_custom_variable(*this));}
};
template<class Expr, class A, class B>
class binary_custom_variable : public shell_dynamic_variable< binary_pool_expression<Expr, typename A::Var, typename B::Var >,binary_custom_variable<Expr,A,B> >
{
private:
A Left;
B Right;
public:
binary_custom_variable(const shell_dynamic_variable<typename A::Var,A> & pleft, const shell_dynamic_variable<typename B::Var,B> & pright)
: Left(pleft), Right(pright) {}
binary_custom_variable(const binary_custom_variable & other) : Left(other.Left), Right(other.Right) {}
binary_custom_variable & operator =(binary_custom_variable const & other) {Left = other.Left; Right = other.Right; return * this;}
INMOST_DATA_REAL_TYPE Value(const Storage & e) const {return (*this)[e].GetValue();}
multivar_expression Variable(const Storage & e) const
{
multivar_expression ret = (*this)[e];
return ret;
}
binary_pool_expression<Expr, typename A::Var, typename B::Var > operator [](const Storage & e) const
{
binary_pool<Expr,typename A::Var,typename B::Var> pool(Left[e],Right[e]);
return binary_pool_expression<Expr, typename A::Var, typename B::Var >(pool);
}
void GetVariation(const Storage & e, Sparse::Row & r) const { (*this)[e].GetJacobian(1.0,r); }
void GetVariation(const Storage & e, Sparse::RowMerger & r) const { (*this)[e].GetJacobian(1.0,r); }
abstract_dynamic_variable * Copy() const {return static_cast<abstract_dynamic_variable *>(new binary_custom_variable(*this));}
};
template<class Expr, class A, class B, class C>
class ternary_custom_variable : public shell_dynamic_variable< ternary_pool_expression<Expr, typename A::Var, typename B::Var, typename C::Var >,ternary_custom_variable<Expr,A,B,C> >
{
private:
A Cond;
B Left;
C Right;
public:
ternary_custom_variable(const shell_dynamic_variable<typename A::Var,A> & pcond, const shell_dynamic_variable<typename B::Var,B> & pleft, const shell_dynamic_variable<typename C::Var,C> & pright)
: Cond(pcond), Left(pleft), Right(pright) {}
ternary_custom_variable(const ternary_custom_variable & other) : Cond(other.Cond), Left(other.Left), Right(other.Right) {}
ternary_custom_variable & operator =(ternary_custom_variable const & other) {Cond = other.Cond; Left = other.Left; Right = other.Right; return * this;}
INMOST_DATA_REAL_TYPE Value(const Storage & e) const {return (*this)[e].GetValue();}
multivar_expression Variable(const Storage & e) const
{
multivar_expression ret = (*this)[e];
return ret;
}
ternary_pool_expression<Expr, typename A::Var, typename B::Var, typename C::Var > operator [](const Storage & e) const
{
ternary_pool<Expr,typename A::Var,typename B::Var, typename C::Var> pool(Cond[e],Left[e],Right[e]);
return ternary_pool_expression<Expr, typename A::Var, typename B::Var, typename C::Var>(pool);
}
void GetVariation(const Storage & e, Sparse::Row & r) const { (*this)[e].GetJacobian(1.0,r); }
void GetVariation(const Storage & e, Sparse::RowMerger & r) const { (*this)[e].GetJacobian(1.0,r); }
abstract_dynamic_variable * Copy() const {return static_cast<abstract_dynamic_variable *>(new ternary_custom_variable(*this));}
};
typedef abstract_dynamic_variable abstract_variable;
}
template<class A, class B, class C>
__INLINE
INMOST::ternary_custom_variable<INMOST::condition_expression<typename A::Var, typename B::Var, typename C::Var>,A,B,C> condition(INMOST::shell_dynamic_variable<typename A::Var, A> const & control, INMOST::shell_dynamic_variable<typename B::Var, B> const & if_ge_zero, INMOST::shell_dynamic_variable<typename C::Var, C> const & if_lt_zero) { return INMOST::ternary_custom_variable<INMOST::condition_expression<typename A::Var, typename B::Var, typename C::Var>,A,B,C>(control,if_ge_zero,if_lt_zero); }
template<class A> __INLINE INMOST::unary_custom_variable<INMOST::unary_minus_expression<typename A::Var>,A> operator-(INMOST::shell_dynamic_variable<typename A::Var, A> const & Arg) { return INMOST::unary_custom_variable<INMOST::unary_minus_expression<typename A::Var>,A>(Arg); }
template<class A> __INLINE INMOST::unary_custom_variable<INMOST::unary_plus_expression<typename A::Var>,A> operator+(INMOST::shell_dynamic_variable<typename A::Var, A> const & Arg) { return INMOST::unary_custom_variable<INMOST::unary_plus_expression<typename A::Var>,A>(Arg); }
template<class A> __INLINE INMOST::unary_custom_variable<INMOST::abs_expression<typename A::Var>,A> fabs(INMOST::shell_dynamic_variable<typename A::Var, A> const & Arg) { return INMOST::unary_custom_variable<INMOST::abs_expression<typename A::Var>,A>(Arg); }
template<class A> __INLINE INMOST::unary_custom_variable<INMOST::exp_expression<typename A::Var>,A> exp(INMOST::shell_dynamic_variable<typename A::Var, A> const & Arg) { return INMOST::unary_custom_variable<INMOST::exp_expression<typename A::Var>,A>(Arg); }
template<class A> __INLINE INMOST::unary_custom_variable<INMOST::log_expression<typename A::Var>,A> log(INMOST::shell_dynamic_variable<typename A::Var, A> const & Arg) { return INMOST::unary_custom_variable<INMOST::log_expression<typename A::Var>,A>(Arg); }
template<class A> __INLINE INMOST::unary_custom_variable<INMOST::sin_expression<typename A::Var>,A> sin(INMOST::shell_dynamic_variable<typename A::Var, A> const & Arg) { return INMOST::unary_custom_variable<INMOST::sin_expression<typename A::Var>,A>(Arg ); }
template<class A> __INLINE INMOST::unary_custom_variable<INMOST::cos_expression<typename A::Var>,A> cos(INMOST::shell_dynamic_variable<typename A::Var, A> const & Arg) { return INMOST::unary_custom_variable<INMOST::cos_expression<typename A::Var>,A>(Arg); }
template<class A> __INLINE INMOST::unary_custom_variable<INMOST::sqrt_expression<typename A::Var>,A> sqrt(INMOST::shell_dynamic_variable<typename A::Var, A> const & Arg) { return INMOST::unary_custom_variable<INMOST::sqrt_expression<typename A::Var>,A>(Arg); }
template<class A> __INLINE INMOST::unary_const_custom_variable<INMOST::variation_multiplication_expression<typename A::Var>,A> variation(INMOST::shell_dynamic_variable<typename A::Var, A> const & Arg, INMOST_DATA_REAL_TYPE Mult) {return INMOST::unary_const_custom_variable<INMOST::variation_multiplication_expression<typename A::Var>,A>(Arg,Mult);}
template<class A, class B> __INLINE INMOST::binary_custom_variable<INMOST::addition_expression<typename A::Var,typename B::Var>,A, B> operator+(INMOST::shell_dynamic_variable<typename A::Var,A> const & Left, INMOST::shell_dynamic_variable<typename B::Var,B> const & Right) { return INMOST::binary_custom_variable<INMOST::addition_expression<typename A::Var,typename B::Var>,A, B> (Left, Right); }
template<class A, class B> __INLINE INMOST::binary_custom_variable<INMOST::subtraction_expression<typename A::Var,typename B::Var>,A, B> operator-(INMOST::shell_dynamic_variable<typename A::Var,A> const & Left, INMOST::shell_dynamic_variable<typename B::Var,B> const & Right) { return INMOST::binary_custom_variable<INMOST::subtraction_expression<typename A::Var,typename B::Var>, A, B> (Left, Right); }
template<class A, class B> __INLINE INMOST::binary_custom_variable<INMOST::multiplication_expression<typename A::Var,typename B::Var>,A, B> operator*(INMOST::shell_dynamic_variable<typename A::Var,A> const & Left, INMOST::shell_dynamic_variable<typename B::Var,B> const & Right) { return INMOST::binary_custom_variable<INMOST::multiplication_expression<typename A::Var,typename B::Var>, A, B> (Left, Right); }
template<class A, class B> __INLINE INMOST::binary_custom_variable<INMOST::division_expression<typename A::Var,typename B::Var>,A, B> operator/(INMOST::shell_dynamic_variable<typename A::Var,A> const & Left, INMOST::shell_dynamic_variable<typename B::Var,B> const & Right) { return INMOST::binary_custom_variable<INMOST::division_expression<typename A::Var,typename B::Var>, A, B> (Left, Right); }
template<class A, class B> __INLINE INMOST::binary_custom_variable<INMOST::pow_expression<typename A::Var,typename B::Var>,A, B> pow(INMOST::shell_dynamic_variable<typename A::Var,A> const & Left, INMOST::shell_dynamic_variable<typename B::Var,B> const & Right) { return INMOST::binary_custom_variable<INMOST::pow_expression<typename A::Var,typename B::Var>,A, B>(Left, Right); }
template<class B> __INLINE INMOST::unary_const_custom_variable<INMOST::const_pow_expression<typename B::Var>,B> pow(INMOST_DATA_REAL_TYPE Left, INMOST::shell_dynamic_variable<typename B::Var,B> const & Right) { return INMOST::unary_const_custom_variable<INMOST::const_pow_expression<typename B::Var>,B>(Left, Right); }
template<class A> __INLINE INMOST::unary_const_custom_variable<INMOST::pow_const_expression<typename A::Var>,A> pow(INMOST::shell_dynamic_variable<typename A::Var,A> const & Left, INMOST_DATA_REAL_TYPE Right) { return INMOST::unary_const_custom_variable<INMOST::pow_const_expression<typename A::Var>,A>(Left, Right); }
template<class B> __INLINE INMOST::unary_const_custom_variable<INMOST::const_multiplication_expression<typename B::Var>,B> operator*(INMOST_DATA_REAL_TYPE Left, INMOST::shell_dynamic_variable<typename B::Var,B> const & Right) { return INMOST::unary_const_custom_variable<INMOST::const_multiplication_expression<typename B::Var>,B>(Right,Left); }
template<class A> __INLINE INMOST::unary_const_custom_variable<INMOST::const_multiplication_expression<typename A::Var>,A> operator*(INMOST::shell_dynamic_variable<typename A::Var,A> const & Left, INMOST_DATA_REAL_TYPE Right) { return INMOST::unary_const_custom_variable<INMOST::const_multiplication_expression<typename A::Var>,A>(Left,Right); }
template<class B> __INLINE INMOST::unary_const_custom_variable<INMOST::reciprocal_expression<typename B::Var>,B> operator/(INMOST_DATA_REAL_TYPE Left, INMOST::shell_dynamic_variable<typename B::Var,B> const & Right) { return INMOST::unary_const_custom_variable<INMOST::reciprocal_expression<typename B::Var>,B>(Right,Left); }
template<class A> __INLINE INMOST::unary_const_custom_variable<INMOST::const_division_expression<typename A::Var>,A> operator/(INMOST::shell_dynamic_variable<typename A::Var,A> const & Left, INMOST_DATA_REAL_TYPE Right) { return INMOST::unary_const_custom_variable<INMOST::const_division_expression<typename A::Var>,A>(Left, Right); }
template<class B> __INLINE INMOST::unary_const_custom_variable<INMOST::const_addition_expression<typename B::Var>,B> operator+(INMOST_DATA_REAL_TYPE Left, INMOST::shell_dynamic_variable<typename B::Var,B> const & Right) { return INMOST::unary_const_custom_variable<INMOST::const_addition_expression<typename B::Var>,B>(Right,Left); }
template<class A> __INLINE INMOST::unary_const_custom_variable<INMOST::const_addition_expression<typename A::Var>,A> operator+(INMOST::shell_dynamic_variable<typename A::Var,A> const & Left, INMOST_DATA_REAL_TYPE Right) { return INMOST::unary_const_custom_variable<INMOST::const_addition_expression<typename A::Var>,A>(Left,Right); }
template<class B> __INLINE INMOST::unary_const_custom_variable<INMOST::const_subtraction_expression<typename B::Var>,B> operator-(INMOST_DATA_REAL_TYPE Left, INMOST::shell_dynamic_variable<typename B::Var,B> const & Right) { return INMOST::unary_const_custom_variable<INMOST::const_subtraction_expression<typename B::Var>,B>(Right, Left); }
template<class A> __INLINE INMOST::unary_const_custom_variable<INMOST::const_addition_expression<typename A::Var>,A> operator-(INMOST::shell_dynamic_variable<typename A::Var,A> const & Left, INMOST_DATA_REAL_TYPE Right) { return INMOST::unary_const_custom_variable<INMOST::const_addition_expression<typename A::Var>,A>(Left, -Right); }
template<class A> __INLINE INMOST::stencil_variable<A> stencil(INMOST::Tag tag_elems, INMOST::Tag tag_coefs, INMOST::shell_dynamic_variable<typename A::Var,A> const & Arg) { return INMOST::stencil_variable<A>(tag_elems,tag_coefs,Arg); }
template<class A> __INLINE INMOST::table_variable<A> get_table(INMOST::shell_dynamic_variable<typename A::Var,A> const & Arg, const INMOST::keyval_table & Table) {return INMOST::table_variable<A>(Arg,Table);}
template<class A> __INLINE INMOST::stencil_expression<A> stencil(INMOST::HandleType * elems, INMOST_DATA_REAL_TYPE * coefs, INMOST_DATA_ENUM_TYPE num, INMOST::shell_dynamic_variable<typename A::Var,A> const & Arg)
{
std::vector< INMOST::const_multiplication_expression<typename A::Var> > tmp;
for( INMOST_DATA_ENUM_TYPE k = 0; k < num; ++k) if( elems[k] != 0 )
tmp.push_back(INMOST::const_multiplication_expression<typename A::Var>(Arg[elems[k]],coefs[k]));
return INMOST::stencil_expression<typename A::Var>(tmp);
}
template<class A, class B> __INLINE INMOST::etype_branch_variable<A,B> etype_branch(INMOST::ElementType true_type, INMOST::shell_dynamic_variable<typename A::Var,A> const & iftrue, INMOST::shell_dynamic_variable<typename B::Var,B> const & iffalse) {return INMOST::etype_branch_variable<A,B>(true_type,iftrue,iffalse);}
template<class A, class B> __INLINE INMOST::marker_branch_variable<A,B> marker_branch(INMOST::MarkerType marker, INMOST::shell_dynamic_variable<typename A::Var,A> const & iftrue, INMOST::shell_dynamic_variable<typename B::Var,B> const & iffalse) {return INMOST::marker_branch_variable<A,B>(marker,iftrue,iffalse);}
__INLINE INMOST::const_link_variable extval(const INMOST_DATA_REAL_TYPE & pvar) {return INMOST::const_link_variable(&pvar);}
#endif //defined(USE_AUTODIFF) && defined(USE_MESH)
#endif //INMOST_AUTODIFF_ETVAR_H_INCLUDED
| 59.983849 | 500 | 0.727697 | [
"vector"
] |
455d21eaa8605804772a68b6f4735291e88a8ee4 | 8,538 | c | C | CM_GDP_CU/src/IO.c | CU-2021AM-GDP-T1/rollstiffnesscontrol-enhancehandling-matlabsimulink | 64162ebb0e0dc51f23e545f886bbcfce1e9e5ab6 | [
"MIT"
] | null | null | null | CM_GDP_CU/src/IO.c | CU-2021AM-GDP-T1/rollstiffnesscontrol-enhancehandling-matlabsimulink | 64162ebb0e0dc51f23e545f886bbcfce1e9e5ab6 | [
"MIT"
] | null | null | null | CM_GDP_CU/src/IO.c | CU-2021AM-GDP-T1/rollstiffnesscontrol-enhancehandling-matlabsimulink | 64162ebb0e0dc51f23e545f886bbcfce1e9e5ab6 | [
"MIT"
] | null | null | null | /*
******************************************************************************
** CarMaker - Version 10.2.1
** Vehicle Dynamics Simulation Toolkit
**
** Copyright (C) IPG Automotive GmbH
** Bannwaldallee 60 Phone +49.721.98520.0
** 76185 Karlsruhe Fax +49.721.98520.99
** Germany WWW www.ipg-automotive.com
******************************************************************************
**
** Connection to I/O hardware of the CarMaker/HIL test stand
**
** Connected test rig: ???
**
******************************************************************************
**
** Functions
** ---------
**
** - iGetCal ()
** - CalIn ()
** - CalInF ()
** - CalOut ()
** - CalOutF ()
** - LimitInt ()
** - IO_Init_First ()
** - IO_Init_Finalize ()
** - IO_Init ()
** - IO_Param_Get ()
** - IO_BeginCycle ()
** - IO_In ()
** - IO_Out ()
** - IO_Cleanup ()
**
******************************************************************************
*/
#include <Global.h>
#include <stdlib.h>
#include <stdio.h>
#include <time.h>
#include <math.h>
#include <string.h>
#include <CarMaker.h>
#include <mio.h>
#include <ioconf.h>
#include <can_interface.h>
#include <FailSafeTester.h>
#include <flex.h>
#include <rbs.h>
#include <CM_XCP.h>
#include <CM_CCP.h>
#include "IOVec.h"
/*** I/O vector */
tIOVec IO;
/*** I/O configuration */
/* int IO_None; DON'T - Variable is predefined by CarMaker! */
int IO_CAN_IF;
int IO_FlexRay;
static struct tIOConfig IOConfiguration[] = {
/* This table should contain one line for each IO_xyz-flag in IOVec.h */
/* { <Flagvar>, <Name for -io>, <Description for -help> }, */
{ &IO_None, "none", "No I/O" }, /* Always keep this first line! */
{ &IO_CAN_IF, "can", "CAN communication" },
{ &IO_FlexRay, "flexray", "FlexRay communication" },
{ NULL, NULL, NULL } /* End of table */
};
/**** Additional useful functions *********************************************/
/*
** iGetCal()
**
** Read calibration parameters.
*/
void
iGetCal (tInfos *Inf, const char *key, tCal *cal, int optional)
{
cal->Min = 1e37;
cal->Max = -1e37;
cal->LimitLow = 1e37;
cal->LimitHigh = -1e37;
cal->Factor = 1.0;
cal->Offset = 0.0;
cal->Rezip = 0;
const char *item = iGetStrOpt(Inf, key, NULL);
if (item != NULL) {
int n = sscanf(item, "%g %g %g %g %d",
&cal->LimitLow, &cal->LimitHigh,
&cal->Factor, &cal->Offset, &cal->Rezip);
if (n != 5)
LogErrF(EC_Init, "Invalid calibration parameter entry '%s'", key);
} else {
if (!optional) {
LogErrF(EC_Init, "Missing calibration parameter entry '%s'", key);
return;
}
cal->LimitLow = -1e37;
cal->LimitHigh = 1e37;
}
cal->Min = cal->LimitHigh;
cal->Max = cal->LimitLow;
}
/*
** CalInF() / CalIn()
**
** Analog input -> calibration infos -> physical quantity
** Converts an I/O value (e.g. the voltage from an analog input module) to
** the corresponding physical value, delimited by LimitLow and LimitHigh.
*/
float
CalInF (tCal *cal, float Value)
{
float Result = (Value - cal->Offset) * cal->Factor;
if (cal->Rezip)
Result = 1.0 / Result;
if (Result < cal->Min) cal->Min = Result;
else if (Result > cal->Max) cal->Max = Result;
if (Result < cal->LimitLow) Result = cal->LimitLow;
else if (Result > cal->LimitHigh) Result = cal->LimitHigh;
return Result;
}
float
CalIn (tCal *cal, int Value)
{
return CalInF(cal, Value);
}
/*
** CalOutF() / CalOut()
**
** Physical quantity -> calibration infos -> analog output
** The physical value is delimited by LimitLow and LimitHigh and then converted
** to the corresponding I/O value (e.g. voltage for an analog output module).
*/
float
CalOutF (tCal *cal, float Value)
{
if (Value < cal->Min) cal->Min = Value;
else if (Value > cal->Max) cal->Max = Value;
if (Value < cal->LimitLow) Value = cal->LimitLow;
else if (Value > cal->LimitHigh) Value = cal->LimitHigh;
if (cal->Rezip) {
return 1.0 / (Value*cal->Factor) + cal->Offset;
} else {
return Value/cal->Factor + cal->Offset;
}
}
int
CalOut (tCal *cal, float Value)
{
return (int)CalOutF(cal, Value);
}
int
LimitInt (float fValue, int Min, int Max)
{
int Value = (int)fValue;
if (Value < Min) return Min;
else if (Value > Max) return Max;
return Value;
}
/*****************************************************************************/
/*
** IO_Init_First ()
**
** First, low level initialization of the IO module
**
** Call:
** - one times at start of program
** - no realtime conditions
*/
int
IO_Init_First (void)
{
memset(&IO, 0, sizeof(IO));
IO_SetConfigurations(IOConfiguration);
CANIf_Init_First();
FC_Init_First();
RBS_Init_First();
return 0;
}
/*
** IO_Init ()
**
** initialization
** - i/o hardware
** - add variables to data dictionary
**
** call:
** - single call at program start
*/
int
IO_Init (void)
{
Log("I/O Configuration: %s\n", IO_ListNames(NULL, 1));
if (IOConf_Init() < 0)
return -1;
/* hardware configuration "none" */
if (IO_None)
return 0;
int nErrors = Log_nError;
/*** MIO initialization */
if (MIO_Init(NULL) < 0) {
LogErrF(EC_General, "MIO initialization failed. I/O disabled (1)");
IO_SelectNone();
return -1;
}
MIO_SetAppState(0.0, MIO_SimState_AppInit);
// MIO_ModuleShow();
/* check for MIO errors */
if (nErrors != Log_nError) {
LogErrF(EC_General, "MIO initization failed. I/O disabled (2)");
IO_SelectNone();
return -1;
}
/*** FailSafeTester */
FST_ConfigureCAN();
if (IO_CAN_IF) {
if (CANIf_Init() < 0)
return -1;
}
if (IO_FlexRay) {
if (FC_Init())
return -1;
}
if (RBS_Init())
return -1;
return 0;
}
/*
** IO_Init_Finalize ()
**
** last (deferred) I/O initialization step
**
** call:
** - single call at program start in CarMaker_FinishStartup()
*/
int
IO_Init_Finalize (void)
{
RBS_MapQuants();
if (IO_FlexRay) {
if (FC_Start())
return -1;
}
if (IO_CAN_IF) {
if (CANIf_Init_Finalize() < 0)
return -1;
}
if (!IO_None) {
if (RBS_Start())
return -1;
}
return 0;
}
/*
** IO_Param_Get ()
**
** Get i/o configuration parameters
** - calibration
** - constant values
** - ids
*/
int
IO_Param_Get (tInfos *inf)
{
unsigned nError = GetInfoErrorCount ();
/* ignition off */
SetKl15 (0);
IOConf_Param_Get();
if (IO_None)
return 0;
if (IO_CAN_IF)
CANIf_Param_Get(inf, NULL);
if (IO_FlexRay)
FC_Param_Get(inf, NULL);
RBS_Param_Get(inf, NULL);
return nError != GetInfoErrorCount() ? -1 : 0;
}
void
IO_BeginCycle (void)
{
MIO_SetAppState(TimeGlobal, (tMIO_SimState)SimCore_State2MIO_SimState(SimCore.State));
}
/*
** IO_In ()
**
** reading signals from hardware / ECU
**
** CycleNo: simulation cycle counter, incremented every loop/millisecond
**
** call:
** - in the main loop
** - first function call in main loop, after waiting for next loop
** - just before User_In()
** - pay attention to realtime condition
*/
void
IO_In (unsigned CycleNo)
{
CAN_Msg Msg;
IO.DeltaT = SimCore.DeltaT;
IO.T = TimeGlobal;
IOConf_In(CycleNo);
if (IO_None)
return;
/*** FailSafeTester messages */
if (FST_IsActive()) {
while (MIO_M51_Recv(FST_CAN_Slot, FST_CAN_Ch, &Msg) == 0)
FST_MsgIn (CycleNo, &Msg);
}
if (IO_CAN_IF)
CANIf_In(CycleNo);
if (IO_FlexRay)
FC_In(CycleNo);
RBS_In(CycleNo);
}
/*
** IO_Out ()
**
** writing signals to hardware / ECU
**
** CycleNo: simulation cycle counter, incremented every loop/millisecond
**
** call:
** - in the main loop
** - last function call in main loop
** - just after User_Out()
** - pay attention to realtime condition
*/
void
IO_Out (unsigned CycleNo)
{
IOConf_Out(CycleNo);
if (IO_None)
return;
/*** Messages to the FailSafeTester */
FST_MsgOut(CycleNo);
RBS_Out(CycleNo);
}
/*
** IO_Cleanup ()
**
** Uninits all MIO hardware:
** - puts M-Modules into reset state
** - frees unneeded memory
*/
void
IO_Cleanup (void)
{
if (IO_None)
goto EndReturn;
RBS_Cleanup();
if (IO_FlexRay)
FC_Cleanup();
if (IO_CAN_IF)
CANIf_Cleanup();
MIO_SetAppState(TimeGlobal, MIO_SimState_AppExit);
MIO_ResetModules();
MIO_DeleteAll();
EndReturn:
return;
}
| 18.931264 | 90 | 0.574139 | [
"vector"
] |
4564670a788a53ddbaa8662575c0a146bc43874e | 57,379 | h | C | mindspore/ccsrc/frontend/parallel/auto_parallel/operator_costmodel.h | AK391/mindspore | f5aeaa9172dcd647885774e7f657593c81b79fc6 | [
"Apache-2.0"
] | 1 | 2022-03-05T02:59:21.000Z | 2022-03-05T02:59:21.000Z | mindspore/ccsrc/frontend/parallel/auto_parallel/operator_costmodel.h | AK391/mindspore | f5aeaa9172dcd647885774e7f657593c81b79fc6 | [
"Apache-2.0"
] | null | null | null | mindspore/ccsrc/frontend/parallel/auto_parallel/operator_costmodel.h | AK391/mindspore | f5aeaa9172dcd647885774e7f657593c81b79fc6 | [
"Apache-2.0"
] | null | null | null | /**
* Copyright 2019 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef PARALLEL_AUTO_PARALLEL_OPERATOR_COSTMODEL_H_
#define PARALLEL_AUTO_PARALLEL_OPERATOR_COSTMODEL_H_
#include <memory>
#include <vector>
#include <map>
#include "frontend/parallel/device_manager.h"
#include "frontend/parallel/tensor_layout/tensor_info.h"
namespace mindspore {
namespace parallel {
#define MAXIMUM_INPUT_NUMBER 100
#define DEFAULT_DATA_TYPE_LENGTH 4
#define DROPOUT_COST_RATE 1.125 // the DropoutGenMask need 12.5% memory
#define GATHERV2_COST_WEIGHT0 3
#define GATHERV2_COST_WEIGHT1 7
#define GATHERV2_COST_WEIGHT2 2
#define GATHERV2_COST_WEIGHT3 6
class OperatorCost;
using OperatorCostPtr = std::shared_ptr<OperatorCost>;
template <typename T>
double ListProduct(std::vector<T> vec) {
double result = 1;
for (size_t i = 0; i < vec.size(); ++i) {
result *= vec[i];
}
return result;
}
// NOTE: Currently, the returned value in each method is bytes of memory size, which is calculated by the number of
// entries timing the length of each entry's data type
class OperatorCost {
public:
OperatorCost() {
// this is only for the case when set_is_parameter() and SetInputAndOutputTypeLength() are not invoked
for (size_t i = 0; i < MAXIMUM_INPUT_NUMBER; ++i) {
is_parameter_.push_back(false);
is_parameter_involve_.push_back(false);
inputs_type_lengths_.push_back(DEFAULT_DATA_TYPE_LENGTH);
outputs_type_lengths_.push_back(DEFAULT_DATA_TYPE_LENGTH);
}
}
virtual ~OperatorCost() = default;
void set_is_parameter(const std::vector<bool> &is_parameter);
void set_is_parameter_involve(const std::vector<bool> &);
void set_output_parameter_involve(int64_t);
void set_output_critical(int64_t);
void SetInputAndOutputTypeLength(const std::vector<size_t> &input_lengths, const std::vector<size_t> &output_lengths);
std::vector<size_t> inputs_type_lengths() const { return inputs_type_lengths_; }
std::vector<size_t> outputs_type_lengths() const { return outputs_type_lengths_; }
// per device communication cost
virtual double GetCommCost(const std::vector<TensorInfo> &inputs, const std::vector<TensorInfo> &outputs,
int64_t stage_id) const = 0;
virtual double GetForwardCommCost(const std::vector<TensorInfo> &inputs, const std::vector<TensorInfo> &outputs,
int64_t stage_id) const = 0;
virtual double GetBackwardCommCost(const std::vector<TensorInfo> &inputs, const std::vector<TensorInfo> &outputs,
int64_t stage_id) const = 0;
// per device computation cost
virtual double GetComputationCost(const std::vector<TensorInfo> &inputs, const std::vector<TensorInfo> &outputs,
int64_t stage_id) const = 0;
virtual double GetForwardComputationCost(const std::vector<TensorInfo> &inputs,
const std::vector<TensorInfo> &outputs, int64_t stage_id) const = 0;
virtual double GetBackwardComputationCost(const std::vector<TensorInfo> &inputs,
const std::vector<TensorInfo> &outputs, int64_t stage_id) const = 0;
virtual void CalculateOutputInMemory() = 0;
virtual void CalculateInputsInMemory(const std::map<size_t, bool> &prev_output_in_mem) = 0;
bool is_output_in_memory() const { return is_output_should_in_memory_; }
// per device PEAK memory cost in a training iteration
// Typically, the PEAK memory cost contributed by an operator is its output (if the output is parameter-involved),
// plus necessary inputs.
virtual double GetMemoryCost(const std::vector<TensorInfo> &inputs, const std::vector<TensorInfo> &outputs) const;
// Contributing the input part for 'GetMemoryCost'
double GetInputMemoryCost(const std::vector<TensorInfo> &inputs, const std::vector<TensorInfo> &outputs) const;
// Contributing the output part for 'GetMemoryCost'
double GetOutputMemoryCost(const std::vector<TensorInfo> &inputs, const std::vector<TensorInfo> &outputs) const;
// per device memory cost in a inference phase
double GetMemoryCostForInference(const std::vector<TensorInfo> &, const std::vector<TensorInfo> &) const;
protected:
// For each input in 'inputs_', a bool variable is true if the corresponding one is a parameter or a output of
// pre-operator that has parameters as input.
std::vector<bool> is_parameter_involve_;
int64_t output_parameter_involve_ = -1; // -1: unset; 0: not parameter_involved; 1: parameter_involved
// For each input in 'inputs_', there is a bool variable indicating whether that the corresponding input is parameter
std::vector<bool> is_parameter_;
// Whether the input should keep in memory in training phase. It depends on the operator and the operator's
// previous operators.
std::vector<bool> is_inputs_should_in_memory_;
// Whether the output should keep in memory in training phase. It depends on 'is_parameter_involve_' and the operator.
bool is_output_should_in_memory_ = false;
// For each input and output, the followings record the number of bytes of each element
std::vector<size_t> inputs_type_lengths_;
std::vector<size_t> outputs_type_lengths_;
// Whether the output is critical, which means that this output is included in calculating peak memory cost
// in the inference phase.
int64_t is_outputs_critical_ = -1;
};
using OperatorCostPtr = std::shared_ptr<OperatorCost>;
class MatMulCost : public OperatorCost {
public:
MatMulCost() : OperatorCost() {}
~MatMulCost() override = default;
// per device communication cost
double GetCommCost(const std::vector<TensorInfo> &inputs, const std::vector<TensorInfo> &outputs,
int64_t stage_id) const override {
return GetForwardCommCost(inputs, outputs, stage_id) + GetBackwardCommCost(inputs, outputs, stage_id);
}
double GetForwardCommCost(const std::vector<TensorInfo> &inputs, const std::vector<TensorInfo> &outputs,
int64_t stage_id) const override;
double GetBackwardCommCost(const std::vector<TensorInfo> &inputs, const std::vector<TensorInfo> &outputs,
int64_t stage_id) const override;
// per device computation cost
double GetComputationCost(const std::vector<TensorInfo> &inputs, const std::vector<TensorInfo> &outputs,
int64_t stage_id) const override {
return GetForwardComputationCost(inputs, outputs, stage_id) + GetBackwardComputationCost(inputs, outputs, stage_id);
}
double GetForwardComputationCost(const std::vector<TensorInfo> &inputs, const std::vector<TensorInfo> &outputs,
int64_t stage_id) const override;
double GetBackwardComputationCost(const std::vector<TensorInfo> &inputs, const std::vector<TensorInfo> &outputs,
int64_t stage_id) const override;
void CalculateOutputInMemory() override;
void CalculateInputsInMemory(const std::map<size_t, bool> &prev_output_in_mem) override;
};
using TensorDotCost = MatMulCost;
class CastCost : public OperatorCost {
public:
CastCost() : OperatorCost() {}
~CastCost() override = default;
double GetCommCost(const std::vector<TensorInfo> &inputs, const std::vector<TensorInfo> &outputs,
int64_t stage_id) const override {
return GetForwardCommCost(inputs, outputs, stage_id) + GetBackwardCommCost(inputs, outputs, stage_id);
}
double GetForwardCommCost(const std::vector<TensorInfo> &inputs, const std::vector<TensorInfo> &outputs,
int64_t stage_id) const override;
double GetBackwardCommCost(const std::vector<TensorInfo> &inputs, const std::vector<TensorInfo> &outputs,
int64_t stage_id) const override;
double GetComputationCost(const std::vector<TensorInfo> &inputs, const std::vector<TensorInfo> &outputs,
int64_t stage_id) const override {
return GetForwardComputationCost(inputs, outputs, stage_id) + GetBackwardComputationCost(inputs, outputs, stage_id);
}
double GetForwardComputationCost(const std::vector<TensorInfo> &inputs, const std::vector<TensorInfo> &outputs,
int64_t stage_id) const override;
double GetBackwardComputationCost(const std::vector<TensorInfo> &inputs, const std::vector<TensorInfo> &outputs,
int64_t stage_id) const override;
// Not taking account of output
void CalculateOutputInMemory() override;
// Not Taking account of input
void CalculateInputsInMemory(const std::map<size_t, bool> &prev_output_in_mem) override;
};
using RepeatElementsCost = CastCost;
using NegCost = CastCost;
using ExpandDimsCost = CastCost;
using SqueezeCost = CastCost;
using ConcatCost = CastCost;
using LogicalNotCost = CastCost;
using SignCost = CastCost;
using FloorCost = CastCost;
using RoundCost = CastCost;
using CeilCost = CastCost;
using ZerosLikeCost = CastCost;
using OnesLikeCost = CastCost;
using RangeCost = CastCost;
using SplitCost = CastCost;
using ScatterUpdateCost = CastCost;
using UniformRealCost = CastCost;
using ResizeBilinearCost = CastCost;
using BoundingBoxEncodeCost = CastCost;
using IOUCost = CastCost;
using RandomChoicWithMaskCost = CastCost;
class SqrtCost : public CastCost {
public:
SqrtCost() : CastCost() {}
~SqrtCost() override = default;
// Taking account of output, not taking accounting of input
void CalculateOutputInMemory() override;
};
using TanhCost = SqrtCost;
using EluCost = SqrtCost;
using ReLUCost = SqrtCost;
using SigmoidCost = SqrtCost;
using ReciprocalCost =
SqrtCost; // The derivative of 'Reciprocal' is different on 'Ascend' and 'GPU'. Here, 'Ascend' is chosen
using InvCost = SqrtCost;
using RsqrtCost = SqrtCost;
using AsinhCost = SqrtCost;
using AcoshCost = SqrtCost;
using ReLUV2Cost = SqrtCost;
using TopKCost = SqrtCost;
class ReLU6Cost : public CastCost {
public:
ReLU6Cost() : CastCost() {}
~ReLU6Cost() override = default;
// Taking account of input, not taking account of output
void CalculateInputsInMemory(const std::map<size_t, bool> &prev_output_in_mem) override;
};
using SoftsignCost = ReLU6Cost;
using SoftplusCost = ReLU6Cost;
using SquareCost = ReLU6Cost;
using ExpCost = ReLU6Cost;
using LogCost = ReLU6Cost;
using CosCost = ReLU6Cost;
using ACosCost = ReLU6Cost;
using AbsCost = ReLU6Cost;
using TanCost = ReLU6Cost;
using SinCost = ReLU6Cost;
using SinhCost = ReLU6Cost;
using Log1pCost = ReLU6Cost;
using Expm1Cost = ReLU6Cost;
using CoshCost = ReLU6Cost;
using AtanhCost = ReLU6Cost;
using AtanCost = ReLU6Cost;
using AsinCost = ReLU6Cost;
using ErfCost = ReLU6Cost;
using ErfcCost = ReLU6Cost;
using ActivationInfoCost = ReLU6Cost;
using SelectCost = ReLU6Cost;
class TransposeCost : public CastCost {
public:
TransposeCost() : CastCost() {}
~TransposeCost() override = default;
// Taking account of input, not taking account of output
void CalculateInputsInMemory(const std::map<size_t, bool> &prev_output_in_mem) override;
};
class GeLUCost : public SqrtCost {
public:
GeLUCost() : SqrtCost() {}
~GeLUCost() override = default;
// Taking account of input and output
void CalculateInputsInMemory(const std::map<size_t, bool> &prev_output_in_mem) override;
};
using FastGeLUCost = GeLUCost;
using BesselI0eCost = GeLUCost;
using BesselI1eCost = GeLUCost;
using L2NormalizeCost = GeLUCost;
using MaxPoolCost = GeLUCost;
class SoftmaxCost : public OperatorCost {
public:
SoftmaxCost() : OperatorCost() {}
~SoftmaxCost() override = default;
double GetCommCost(const std::vector<TensorInfo> &inputs, const std::vector<TensorInfo> &outputs,
int64_t stage_id) const override {
return GetForwardCommCost(inputs, outputs, stage_id) + GetBackwardCommCost(inputs, outputs, stage_id);
}
double GetForwardCommCost(const std::vector<TensorInfo> &inputs, const std::vector<TensorInfo> &outputs,
int64_t stage_id) const override;
double GetBackwardCommCost(const std::vector<TensorInfo> &inputs, const std::vector<TensorInfo> &outputs,
int64_t stage_id) const override;
double GetComputationCost(const std::vector<TensorInfo> &inputs, const std::vector<TensorInfo> &outputs,
int64_t stage_id) const override {
return GetForwardComputationCost(inputs, outputs, stage_id) + GetBackwardComputationCost(inputs, outputs, stage_id);
}
double GetForwardComputationCost(const std::vector<TensorInfo> &inputs, const std::vector<TensorInfo> &outputs,
int64_t stage_id) const override;
double GetBackwardComputationCost(const std::vector<TensorInfo> &inputs, const std::vector<TensorInfo> &outputs,
int64_t) const override;
// Taking account of output
void CalculateOutputInMemory() override;
// Not Taking account of input
void CalculateInputsInMemory(const std::map<size_t, bool> &prev_output_in_mem) override;
};
class TileCost : public SoftmaxCost {
public:
TileCost() : SoftmaxCost() {}
~TileCost() override = default;
// Not taking account of output
void CalculateOutputInMemory() override;
// Taking account of input
void CalculateInputsInMemory(const std::map<size_t, bool> &prev_output_in_mem) override;
};
class PackCost : public SoftmaxCost {
public:
PackCost() : SoftmaxCost() {}
~PackCost() override = default;
// Not taking account of output
void CalculateOutputInMemory() override;
// Not taking account of input
void CalculateInputsInMemory(const std::map<size_t, bool> &prev_output_in_mem) override;
};
class BroadcastToCost : public SoftmaxCost {
public:
BroadcastToCost() : SoftmaxCost() {}
~BroadcastToCost() override = default;
// Not taking account of output
void CalculateOutputInMemory() override;
// Not Taking account of input
void CalculateInputsInMemory(const std::map<size_t, bool> &prev_output_in_mem) override;
};
class TmpIdentityCost : public OperatorCost {
public:
TmpIdentityCost() : OperatorCost() {}
~TmpIdentityCost() override = default;
double GetCommCost(const std::vector<TensorInfo> &inputs, const std::vector<TensorInfo> &outputs,
int64_t stage_id) const override {
return GetForwardCommCost(inputs, outputs, stage_id) + GetBackwardCommCost(inputs, outputs, stage_id);
}
double GetForwardCommCost(const std::vector<TensorInfo> &inputs, const std::vector<TensorInfo> &outputs,
int64_t stage_id) const override;
double GetBackwardCommCost(const std::vector<TensorInfo> &inputs, const std::vector<TensorInfo> &outputs,
int64_t stage_id) const override;
double GetComputationCost(const std::vector<TensorInfo> &inputs, const std::vector<TensorInfo> &outputs,
int64_t stage_id) const override {
return GetForwardComputationCost(inputs, outputs, stage_id) + GetBackwardComputationCost(inputs, outputs, stage_id);
}
double GetForwardComputationCost(const std::vector<TensorInfo> &inputs, const std::vector<TensorInfo> &outputs,
int64_t stage_id) const override;
double GetBackwardComputationCost(const std::vector<TensorInfo> &inputs, const std::vector<TensorInfo> &outputs,
int64_t stage_id) const override;
// Not taking account of output
void CalculateOutputInMemory() override;
// Not taking account of input
void CalculateInputsInMemory(const std::map<size_t, bool> &prev_output_in_mem) override;
};
using TmpIdentityCostPtr = std::shared_ptr<TmpIdentityCost>;
class BatchParallelCost : public OperatorCost {
public:
BatchParallelCost() : OperatorCost() {}
~BatchParallelCost() override = default;
double GetCommCost(const std::vector<TensorInfo> &inputs, const std::vector<TensorInfo> &outputs,
int64_t stage_id) const override {
return GetForwardCommCost(inputs, outputs, stage_id) + GetBackwardCommCost(inputs, outputs, stage_id);
}
double GetForwardCommCost(const std::vector<TensorInfo> &, const std::vector<TensorInfo> &, int64_t) const override {
return 0.0;
}
double GetBackwardCommCost(const std::vector<TensorInfo> &, const std::vector<TensorInfo> &, int64_t) const override;
double GetComputationCost(const std::vector<TensorInfo> &inputs, const std::vector<TensorInfo> &outputs,
int64_t stage_id) const override {
return GetForwardComputationCost(inputs, outputs, stage_id) + GetBackwardComputationCost(inputs, outputs, stage_id);
}
double GetForwardComputationCost(const std::vector<TensorInfo> &inputs, const std::vector<TensorInfo> &outputs,
int64_t stage_id) const override;
double GetBackwardComputationCost(const std::vector<TensorInfo> &inputs, const std::vector<TensorInfo> &outputs,
int64_t stage_id) const override;
// Not taking account of output
void CalculateOutputInMemory() override;
// Taking account of input
void CalculateInputsInMemory(const std::map<size_t, bool> &prev_output_in_mem) override;
};
class SparseSoftmaxCrossEntropyWithLogitsCost : public BatchParallelCost {
public:
SparseSoftmaxCrossEntropyWithLogitsCost() : BatchParallelCost() {}
~SparseSoftmaxCrossEntropyWithLogitsCost() override = default;
// Taking account of output
void CalculateOutputInMemory() override;
// Not taking account of input
void CalculateInputsInMemory(const std::map<size_t, bool> &prev_output_in_mem) override;
};
class VirtualDatasetCost : public OperatorCost {
public:
VirtualDatasetCost() : OperatorCost() {}
~VirtualDatasetCost() override = default;
double GetCommCost(const std::vector<TensorInfo> &inputs, const std::vector<TensorInfo> &outputs,
int64_t stage_id) const override {
return GetForwardCommCost(inputs, outputs, stage_id) + GetBackwardCommCost(inputs, outputs, stage_id);
}
double GetForwardCommCost(const std::vector<TensorInfo> &, const std::vector<TensorInfo> &, int64_t) const override {
return 0.0;
}
double GetBackwardCommCost(const std::vector<TensorInfo> &, const std::vector<TensorInfo> &, int64_t) const override {
return 0.0;
}
double GetComputationCost(const std::vector<TensorInfo> &inputs, const std::vector<TensorInfo> &outputs,
int64_t stage_id) const override {
return GetForwardComputationCost(inputs, outputs, stage_id) + GetBackwardComputationCost(inputs, outputs, stage_id);
}
double GetForwardComputationCost(const std::vector<TensorInfo> &, const std::vector<TensorInfo> &,
int64_t) const override {
return 0.0;
}
double GetBackwardComputationCost(const std::vector<TensorInfo> &, const std::vector<TensorInfo> &,
int64_t) const override {
return 0.0;
}
// Not taking account of output
void CalculateOutputInMemory() override;
// Not taking account of input
void CalculateInputsInMemory(const std::map<size_t, bool> &prev_output_in_mem) override;
};
class GeneratorBaseCost : public OperatorCost {
public:
GeneratorBaseCost() : OperatorCost() {}
~GeneratorBaseCost() override = default;
double GetCommCost(const std::vector<TensorInfo> &inputs, const std::vector<TensorInfo> &outputs,
int64_t stage_id) const override {
return GetForwardCommCost(inputs, outputs, stage_id) + GetBackwardCommCost(inputs, outputs, stage_id);
}
double GetForwardCommCost(const std::vector<TensorInfo> &, const std::vector<TensorInfo> &, int64_t) const override {
return 0.0;
}
double GetBackwardCommCost(const std::vector<TensorInfo> &, const std::vector<TensorInfo> &, int64_t) const override {
return 0.0;
}
double GetComputationCost(const std::vector<TensorInfo> &inputs, const std::vector<TensorInfo> &outputs,
int64_t stage_id) const override {
return GetForwardComputationCost(inputs, outputs, stage_id) + GetBackwardComputationCost(inputs, outputs, stage_id);
}
// Inputs vector is empty for generator ops.
double GetForwardComputationCost(const std::vector<TensorInfo> &, const std::vector<TensorInfo> &,
int64_t) const override {
return 0.0;
}
// Generator ops don't have backward steps.
double GetBackwardComputationCost(const std::vector<TensorInfo> &, const std::vector<TensorInfo> &,
int64_t) const override {
return 0.0;
}
};
using GeneratorBaseCostPtr = std::shared_ptr<GeneratorBaseCost>;
class PReLUCost : public OperatorCost {
public:
PReLUCost() : OperatorCost() {}
~PReLUCost() override = default;
// per device communication cost
double GetCommCost(const std::vector<TensorInfo> &inputs, const std::vector<TensorInfo> &outputs,
int64_t stage_id) const override {
return GetForwardCommCost(inputs, outputs, stage_id) + GetBackwardCommCost(inputs, outputs, stage_id);
}
double GetForwardCommCost(const std::vector<TensorInfo> &inputs, const std::vector<TensorInfo> &outputs,
int64_t stage_id) const override;
double GetBackwardCommCost(const std::vector<TensorInfo> &inputs, const std::vector<TensorInfo> &outputs,
int64_t stage_id) const override;
// per device computation cost
double GetComputationCost(const std::vector<TensorInfo> &inputs, const std::vector<TensorInfo> &outputs,
int64_t stage_id) const override {
return GetForwardComputationCost(inputs, outputs, stage_id) + GetBackwardComputationCost(inputs, outputs, stage_id);
}
double GetForwardComputationCost(const std::vector<TensorInfo> &inputs, const std::vector<TensorInfo> &outputs,
int64_t stage_id) const override;
double GetBackwardComputationCost(const std::vector<TensorInfo> &inputs, const std::vector<TensorInfo> &outputs,
int64_t stage_id) const override;
// Not taking account of output
void CalculateOutputInMemory() override;
// Taking account of input
void CalculateInputsInMemory(const std::map<size_t, bool> &prev_output_in_mem) override;
};
using PReLUCostPtr = std::shared_ptr<PReLUCost>;
class OneHotCost : public OperatorCost {
public:
OneHotCost() : OperatorCost() {}
~OneHotCost() override = default;
// per device communication cost
double GetCommCost(const std::vector<TensorInfo> &inputs, const std::vector<TensorInfo> &outputs,
int64_t stage_id) const override {
return GetForwardCommCost(inputs, outputs, stage_id) + GetBackwardCommCost(inputs, outputs, stage_id);
}
double GetForwardCommCost(const std::vector<TensorInfo> &inputs, const std::vector<TensorInfo> &outputs,
int64_t stage_id) const override;
double GetBackwardCommCost(const std::vector<TensorInfo> &inputs, const std::vector<TensorInfo> &outputs,
int64_t stage_id) const override;
// per device computation cost
double GetComputationCost(const std::vector<TensorInfo> &inputs, const std::vector<TensorInfo> &outputs,
int64_t stage_id) const override {
return GetForwardComputationCost(inputs, outputs, stage_id) + GetBackwardComputationCost(inputs, outputs, stage_id);
}
double GetForwardComputationCost(const std::vector<TensorInfo> &inputs, const std::vector<TensorInfo> &outputs,
int64_t stage_id) const override;
double GetBackwardComputationCost(const std::vector<TensorInfo> &inputs, const std::vector<TensorInfo> &outputs,
int64_t stage_id) const override;
// Not taking account of output
void CalculateOutputInMemory() override;
// Not taking account of input
void CalculateInputsInMemory(const std::map<size_t, bool> &prev_output_in_mem) override;
};
using OneHotCostPtr = std::shared_ptr<OneHotCost>;
class SoftmaxCrossEntropyWithLogitsCost : public OperatorCost {
public:
SoftmaxCrossEntropyWithLogitsCost() : OperatorCost() {}
~SoftmaxCrossEntropyWithLogitsCost() override = default;
// per device communication cost
double GetCommCost(const std::vector<TensorInfo> &inputs, const std::vector<TensorInfo> &outputs,
int64_t stage_id) const override {
return GetForwardCommCost(inputs, outputs, stage_id) + GetBackwardCommCost(inputs, outputs, stage_id);
}
double GetForwardCommCost(const std::vector<TensorInfo> &inputs, const std::vector<TensorInfo> &outputs,
int64_t stage_id) const override;
double GetBackwardCommCost(const std::vector<TensorInfo> &inputs, const std::vector<TensorInfo> &outputs,
int64_t stage_id) const override;
// per device computation cost
double GetComputationCost(const std::vector<TensorInfo> &inputs, const std::vector<TensorInfo> &outputs,
int64_t stage_id) const override {
return GetForwardComputationCost(inputs, outputs, stage_id) + GetBackwardComputationCost(inputs, outputs, stage_id);
}
double GetForwardComputationCost(const std::vector<TensorInfo> &inputs, const std::vector<TensorInfo> &outputs,
int64_t stage_id) const override;
double GetBackwardComputationCost(const std::vector<TensorInfo> &inputs, const std::vector<TensorInfo> &outputs,
int64_t stage_id) const override;
// Taking account of output
void CalculateOutputInMemory() override;
// Not taking account of input
void CalculateInputsInMemory(const std::map<size_t, bool> &prev_output_in_mem) override;
};
class ReshapeCost : public OperatorCost {
public:
ReshapeCost() : OperatorCost() {}
~ReshapeCost() override = default;
// per device communication cost
double GetCommCost(const std::vector<TensorInfo> &inputs, const std::vector<TensorInfo> &outputs,
int64_t stage_id) const override {
return GetForwardCommCost(inputs, outputs, stage_id) + GetBackwardCommCost(inputs, outputs, stage_id);
}
double GetForwardCommCost(const std::vector<TensorInfo> &inputs, const std::vector<TensorInfo> &outputs,
int64_t stage_id) const override;
double GetBackwardCommCost(const std::vector<TensorInfo> &inputs, const std::vector<TensorInfo> &outputs,
int64_t stage_id) const override;
// per device computation cost
double GetComputationCost(const std::vector<TensorInfo> &inputs, const std::vector<TensorInfo> &outputs,
int64_t stage_id) const override {
return GetForwardComputationCost(inputs, outputs, stage_id) + GetBackwardComputationCost(inputs, outputs, stage_id);
}
double GetForwardComputationCost(const std::vector<TensorInfo> &inputs, const std::vector<TensorInfo> &outputs,
int64_t stage_id) const override;
double GetBackwardComputationCost(const std::vector<TensorInfo> &inputs, const std::vector<TensorInfo> &outputs,
int64_t stage_id) const override;
// Not taking account of output
void CalculateOutputInMemory() override;
// Not taking account of input
void CalculateInputsInMemory(const std::map<size_t, bool> &prev_output_in_mem) override;
};
using ReshapeCostPtr = std::shared_ptr<ReshapeCost>;
class SubCost : public OperatorCost {
public:
SubCost() : OperatorCost() {}
~SubCost() override = default;
double GetCommCost(const std::vector<TensorInfo> &inputs, const std::vector<TensorInfo> &outputs,
int64_t stage_id) const override {
return GetForwardCommCost(inputs, outputs, stage_id) + GetBackwardCommCost(inputs, outputs, stage_id);
}
double GetForwardCommCost(const std::vector<TensorInfo> &, const std::vector<TensorInfo> &, int64_t) const override {
return 0.0;
}
double GetBackwardCommCost(const std::vector<TensorInfo> &, const std::vector<TensorInfo> &, int64_t) const override;
double GetComputationCost(const std::vector<TensorInfo> &inputs, const std::vector<TensorInfo> &outputs,
int64_t stage_id) const override {
return GetForwardComputationCost(inputs, outputs, stage_id) + GetBackwardComputationCost(inputs, outputs, stage_id);
}
double GetForwardComputationCost(const std::vector<TensorInfo> &inputs, const std::vector<TensorInfo> &outputs,
int64_t stage_id) const override;
double GetBackwardComputationCost(const std::vector<TensorInfo> &inputs, const std::vector<TensorInfo> &outputs,
int64_t stage_id) const override;
// Not taking account of output
void CalculateOutputInMemory() override;
// Not taking account of input
void CalculateInputsInMemory(const std::map<size_t, bool> &prev_output_in_mem) override;
};
using TensorAddCost = SubCost;
using FloorDivCost = SubCost;
using AssignSubCost = SubCost;
using AssignAddCost = SubCost;
using LogicalAndCost = SubCost;
using LogicalOrCost = SubCost;
using BiasAddCost = SubCost;
using EqualCost = SubCost;
using ApproximateEqualCost = SubCost;
using NotEqualCost = SubCost;
using GreaterCost = SubCost;
using GreaterEqualCost = SubCost;
using LessCost = SubCost;
using LessEqualCost = SubCost;
using GatherNdCost = SubCost;
class MulCost : public SubCost {
public:
MulCost() : SubCost() {}
~MulCost() override = default;
// Taking account of input, not taking account of output
void CalculateInputsInMemory(const std::map<size_t, bool> &prev_output_in_mem) override;
};
using GatherDCost = MulCost;
class DivCost : public SubCost {
public:
DivCost() : SubCost() {}
~DivCost() override = default;
// Taking account of output
void CalculateOutputInMemory() override;
// Taking account of input
void CalculateInputsInMemory(const std::map<size_t, bool> &prev_output_in_mem) override;
};
using ReadDivCost = DivCost;
class ModCost : public SubCost {
public:
ModCost() : SubCost() {}
~ModCost() override = default;
// Taking account of input, not taking account of output
void CalculateInputsInMemory(const std::map<size_t, bool> &prev_output_in_mem) override;
};
using FloorModCost = ModCost;
class PowCost : public SubCost {
public:
PowCost() : SubCost() {}
~PowCost() override = default;
// Taking account of output
void CalculateOutputInMemory() override;
// Taking account of input
void CalculateInputsInMemory(const std::map<size_t, bool> &prev_output_in_mem) override;
};
class AssignCost : public SubCost {
public:
AssignCost() : SubCost() {}
~AssignCost() override = default;
// Taking account of input, not taking account of output
void CalculateInputsInMemory(const std::map<size_t, bool> &prev_output_in_mem) override;
};
class SigmoidCrossEntropyWithLogitsCost : public SubCost {
public:
SigmoidCrossEntropyWithLogitsCost() : SubCost() {}
~SigmoidCrossEntropyWithLogitsCost() override = default;
// Taking account of input, not taking account of output
void CalculateInputsInMemory(const std::map<size_t, bool> &prev_output_in_mem) override;
};
class Atan2Cost : public SubCost {
public:
Atan2Cost() : SubCost() {}
~Atan2Cost() override = default;
// Taking account of input, not taking account of output
void CalculateInputsInMemory(const std::map<size_t, bool> &prev_output_in_mem) override;
};
class DivNoNanCost : public SubCost {
public:
DivNoNanCost() : SubCost() {}
~DivNoNanCost() override = default;
// Taking account of output
void CalculateOutputInMemory() override;
// Taking account of input
void CalculateInputsInMemory(const std::map<size_t, bool> &prev_output_in_mem) override;
};
class MaximumCost : public SubCost {
public:
MaximumCost() : SubCost() {}
~MaximumCost() override = default;
// Taking account of input, not taking account of output
void CalculateInputsInMemory(const std::map<size_t, bool> &prev_output_in_mem) override;
};
using MinimumCost = MaximumCost;
class SliceCost : public CastCost {
public:
SliceCost() : CastCost() {}
~SliceCost() override = default;
// Not taking account of output, taking account of input
void CalculateInputsInMemory(const std::map<size_t, bool> &prev_output_in_mem) override;
};
class StridedSliceCost : public CastCost {
public:
StridedSliceCost() : CastCost() {}
~StridedSliceCost() override = default;
// Not taking account of output, taking account of input
void CalculateInputsInMemory(const std::map<size_t, bool> &prev_output_in_mem) override;
};
class ReduceSumCost : public OperatorCost {
public:
ReduceSumCost() : OperatorCost() {}
~ReduceSumCost() override = default;
double GetCommCost(const std::vector<TensorInfo> &inputs, const std::vector<TensorInfo> &outputs,
int64_t stage_id) const override {
return GetForwardCommCost(inputs, outputs, stage_id) + GetBackwardCommCost(inputs, outputs, stage_id);
}
double GetForwardCommCost(const std::vector<TensorInfo> &, const std::vector<TensorInfo> &,
int64_t stage_id) const override;
double GetBackwardCommCost(const std::vector<TensorInfo> &inputs, const std::vector<TensorInfo> &outputs,
int64_t stage_id) const override;
double GetComputationCost(const std::vector<TensorInfo> &inputs, const std::vector<TensorInfo> &outputs,
int64_t stage_id) const override {
return GetForwardComputationCost(inputs, outputs, stage_id) + GetBackwardCommCost(inputs, outputs, stage_id);
}
double GetForwardComputationCost(const std::vector<TensorInfo> &inputs, const std::vector<TensorInfo> &outputs,
int64_t stage_id) const override;
double GetBackwardComputationCost(const std::vector<TensorInfo> &, const std::vector<TensorInfo> &,
int64_t) const override {
return 0.0;
}
void set_cross_batch(bool cb) { cross_batch_ = cb; }
// Not taking account of output
void CalculateOutputInMemory() override;
// Taking account of input
void CalculateInputsInMemory(const std::map<size_t, bool> &prev_output_in_mem) override;
protected:
bool cross_batch_ = false;
};
using ReduceMethodCost = ReduceSumCost;
class ReduceMeanCost : public ReduceSumCost {
public:
ReduceMeanCost() : ReduceSumCost() {}
~ReduceMeanCost() override = default;
double GetForwardComputationCost(const std::vector<TensorInfo> &inputs, const std::vector<TensorInfo> &outputs,
int64_t stage_id) const override;
};
class ReduceMinCost : public ReduceSumCost {
public:
ReduceMinCost() : ReduceSumCost() {}
~ReduceMinCost() override = default;
// Taking account of output
void CalculateOutputInMemory() override;
// Taking account of input
void CalculateInputsInMemory(const std::map<size_t, bool> &prev_output_in_mem) override;
};
using ReduceMaxCost = ReduceMinCost;
class ArgMaxWithValueCost : public ReduceSumCost {
public:
ArgMaxWithValueCost() : ReduceSumCost() {}
~ArgMaxWithValueCost() override = default;
// Taking account of output
void CalculateOutputInMemory() override;
// Taking account of input
void CalculateInputsInMemory(const std::map<size_t, bool> &prev_output_in_mem) override;
};
using ArgMinWithValueCost = ArgMaxWithValueCost;
class GetNextCost : public OperatorCost {
public:
GetNextCost() : OperatorCost() {}
~GetNextCost() override = default;
double GetCommCost(const std::vector<TensorInfo> &inputs, const std::vector<TensorInfo> &outputs,
int64_t stage_id) const override {
return GetForwardCommCost(inputs, outputs, stage_id) + GetBackwardCommCost(inputs, outputs, stage_id);
}
double GetForwardCommCost(const std::vector<TensorInfo> &, const std::vector<TensorInfo> &, int64_t) const override {
return 0.0;
}
double GetBackwardCommCost(const std::vector<TensorInfo> &, const std::vector<TensorInfo> &, int64_t) const override {
return 0.0;
}
double GetComputationCost(const std::vector<TensorInfo> &inputs, const std::vector<TensorInfo> &outputs,
int64_t stage_id) const override {
return GetForwardComputationCost(inputs, outputs, stage_id) + GetBackwardComputationCost(inputs, outputs, stage_id);
}
// Inputs vector is empty for generator ops.
double GetForwardComputationCost(const std::vector<TensorInfo> &, const std::vector<TensorInfo> &,
int64_t) const override {
return 0.0;
}
// Generator ops don't have backward steps.
double GetBackwardComputationCost(const std::vector<TensorInfo> &, const std::vector<TensorInfo> &,
int64_t) const override {
return 0.0;
}
// Not taking account of output
void CalculateOutputInMemory() override;
// Not Taking account of input
void CalculateInputsInMemory(const std::map<size_t, bool> &prev_output_in_mem) override;
};
using GetNextCostPtr = std::shared_ptr<GetNextCost>;
class DSDMatmulCost : public OperatorCost {
public:
DSDMatmulCost() : OperatorCost() {}
~DSDMatmulCost() override = default;
double GetCommCost(const std::vector<TensorInfo> &inputs, const std::vector<TensorInfo> &outputs,
int64_t stage_id) const override {
return GetForwardCommCost(inputs, outputs, stage_id) + GetBackwardCommCost(inputs, outputs, stage_id);
}
double GetForwardCommCost(const std::vector<TensorInfo> &, const std::vector<TensorInfo> &, int64_t) const override {
return 0.0;
}
double GetBackwardCommCost(const std::vector<TensorInfo> &, const std::vector<TensorInfo> &, int64_t) const override {
return 0.0;
}
double GetComputationCost(const std::vector<TensorInfo> &inputs, const std::vector<TensorInfo> &outputs,
int64_t stage_id) const override {
return GetForwardComputationCost(inputs, outputs, stage_id) + GetBackwardComputationCost(inputs, outputs, stage_id);
}
// Inputs vector is empty for generator ops.
double GetForwardComputationCost(const std::vector<TensorInfo> &, const std::vector<TensorInfo> &,
int64_t) const override;
// Generator ops don't have backward steps.
double GetBackwardComputationCost(const std::vector<TensorInfo> &, const std::vector<TensorInfo> &,
int64_t) const override {
return 0.0;
}
// Not taking account of output
void CalculateOutputInMemory() override;
// Not Taking account of input
void CalculateInputsInMemory(const std::map<size_t, bool> &prev_output_in_mem) override;
};
using DSDMatmulCostPtr = std::shared_ptr<DSDMatmulCost>;
// For memory cost, taking account of output, not taking account of input
class DropOutCost : public SqrtCost {
public:
DropOutCost() : SqrtCost() {}
~DropOutCost() override = default;
double GetCommCost(const std::vector<TensorInfo> &inputs, const std::vector<TensorInfo> &outputs,
int64_t stage_id) const override {
return GetForwardCommCost(inputs, outputs, stage_id) + GetBackwardCommCost(inputs, outputs, stage_id);
}
double GetForwardCommCost(const std::vector<TensorInfo> &, const std::vector<TensorInfo> &, int64_t) const override {
return 0.0;
}
double GetBackwardCommCost(const std::vector<TensorInfo> &, const std::vector<TensorInfo> &, int64_t) const override {
return 0.0;
}
double GetComputationCost(const std::vector<TensorInfo> &inputs, const std::vector<TensorInfo> &outputs,
int64_t stage_id) const override {
return GetForwardComputationCost(inputs, outputs, stage_id) + GetBackwardComputationCost(inputs, outputs, stage_id);
}
double GetForwardComputationCost(const std::vector<TensorInfo> &, const std::vector<TensorInfo> &,
int64_t) const override;
double GetBackwardComputationCost(const std::vector<TensorInfo> &, const std::vector<TensorInfo> &,
int64_t) const override {
return 0.0;
}
};
class DropOutDoMaskCost : public DropOutCost {
public:
DropOutDoMaskCost() : DropOutCost() {}
~DropOutDoMaskCost() override = default;
// Not taking account of output
void CalculateOutputInMemory() override;
// Taking account of input
void CalculateInputsInMemory(const std::map<size_t, bool> &prev_output_in_mem) override;
};
class UnsortedSegmentSumCost : public OperatorCost {
public:
UnsortedSegmentSumCost() : OperatorCost() {}
~UnsortedSegmentSumCost() override = default;
double GetCommCost(const std::vector<TensorInfo> &inputs, const std::vector<TensorInfo> &outputs,
int64_t stage_id) const override {
return GetForwardCommCost(inputs, outputs, stage_id) + GetBackwardCommCost(inputs, outputs, stage_id);
}
double GetForwardCommCost(const std::vector<TensorInfo> &, const std::vector<TensorInfo> &, int64_t) const override;
double GetBackwardCommCost(const std::vector<TensorInfo> &, const std::vector<TensorInfo> &, int64_t) const override;
double GetComputationCost(const std::vector<TensorInfo> &inputs, const std::vector<TensorInfo> &outputs,
int64_t stage_id) const override {
return GetForwardComputationCost(inputs, outputs, stage_id) + GetBackwardComputationCost(inputs, outputs, stage_id);
}
double GetForwardComputationCost(const std::vector<TensorInfo> &, const std::vector<TensorInfo> &,
int64_t) const override;
double GetBackwardComputationCost(const std::vector<TensorInfo> &, const std::vector<TensorInfo> &,
int64_t) const override {
return 0.0;
}
// Not taking account of output
void CalculateOutputInMemory() override;
// Taking account of input
void CalculateInputsInMemory(const std::map<size_t, bool> &prev_output_in_mem) override;
};
class UnsortedSegmentMinCost : public OperatorCost {
public:
UnsortedSegmentMinCost() : OperatorCost() {}
~UnsortedSegmentMinCost() override = default;
double GetCommCost(const std::vector<TensorInfo> &inputs, const std::vector<TensorInfo> &outputs,
int64_t stage_id) const override {
return GetForwardCommCost(inputs, outputs, stage_id) + GetBackwardCommCost(inputs, outputs, stage_id);
}
double GetForwardCommCost(const std::vector<TensorInfo> &, const std::vector<TensorInfo> &, int64_t) const override;
double GetBackwardCommCost(const std::vector<TensorInfo> &, const std::vector<TensorInfo> &, int64_t) const override;
double GetComputationCost(const std::vector<TensorInfo> &inputs, const std::vector<TensorInfo> &outputs,
int64_t stage_id) const override {
return GetForwardComputationCost(inputs, outputs, stage_id) + GetBackwardComputationCost(inputs, outputs, stage_id);
}
double GetForwardComputationCost(const std::vector<TensorInfo> &, const std::vector<TensorInfo> &,
int64_t) const override;
double GetBackwardComputationCost(const std::vector<TensorInfo> &, const std::vector<TensorInfo> &,
int64_t) const override {
return 0.0;
}
// Taking account of output
void CalculateOutputInMemory() override;
// Taking account of input
void CalculateInputsInMemory(const std::map<size_t, bool> &prev_output_in_mem) override;
};
using UnsortedSegmentMaxCost = UnsortedSegmentMinCost;
class LayerNormCost : public OperatorCost {
public:
LayerNormCost() : OperatorCost() {}
~LayerNormCost() override = default;
double GetCommCost(const std::vector<TensorInfo> &inputs, const std::vector<TensorInfo> &outputs,
int64_t stage_id) const override {
return GetForwardCommCost(inputs, outputs, stage_id) + GetBackwardCommCost(inputs, outputs, stage_id);
}
double GetForwardCommCost(const std::vector<TensorInfo> &, const std::vector<TensorInfo> &, int64_t) const override {
return 0.0;
}
double GetBackwardCommCost(const std::vector<TensorInfo> &, const std::vector<TensorInfo> &, int64_t) const override;
double GetComputationCost(const std::vector<TensorInfo> &inputs, const std::vector<TensorInfo> &outputs,
int64_t stage_id) const override {
return GetForwardComputationCost(inputs, outputs, stage_id) + GetBackwardComputationCost(inputs, outputs, stage_id);
}
double GetForwardComputationCost(const std::vector<TensorInfo> &, const std::vector<TensorInfo> &,
int64_t) const override;
double GetBackwardComputationCost(const std::vector<TensorInfo> &, const std::vector<TensorInfo> &,
int64_t) const override {
return 0.0;
}
// Taking account of output
void CalculateOutputInMemory() override;
// Taking account of input
void CalculateInputsInMemory(const std::map<size_t, bool> &prev_output_in_mem) override;
};
class UniqueCost : public OperatorCost {
public:
UniqueCost() : OperatorCost() {}
~UniqueCost() override = default;
double GetCommCost(const std::vector<TensorInfo> &inputs, const std::vector<TensorInfo> &outputs,
int64_t stage_id) const override {
return GetForwardCommCost(inputs, outputs, stage_id) + GetBackwardCommCost(inputs, outputs, stage_id);
}
double GetForwardCommCost(const std::vector<TensorInfo> &inputs, const std::vector<TensorInfo> &outputs,
int64_t stage_id) const override;
double GetBackwardCommCost(const std::vector<TensorInfo> &inputs, const std::vector<TensorInfo> &outputs,
int64_t stage_id) const override;
double GetComputationCost(const std::vector<TensorInfo> &inputs, const std::vector<TensorInfo> &outputs,
int64_t stage_id) const override {
return GetForwardComputationCost(inputs, outputs, stage_id) + GetBackwardComputationCost(inputs, outputs, stage_id);
}
double GetForwardComputationCost(const std::vector<TensorInfo> &inputs, const std::vector<TensorInfo> &outputs,
int64_t stage_id) const override;
double GetBackwardComputationCost(const std::vector<TensorInfo> &inputs, const std::vector<TensorInfo> &outputs,
int64_t) const override;
// Taking account of output
void CalculateOutputInMemory() override;
// Not Taking account of input
void CalculateInputsInMemory(const std::map<size_t, bool> &prev_output_in_mem) override;
};
class UniformCandidateSamplerCost : public OperatorCost {
public:
UniformCandidateSamplerCost() : OperatorCost() {}
~UniformCandidateSamplerCost() override = default;
double GetCommCost(const std::vector<TensorInfo> &inputs, const std::vector<TensorInfo> &outputs,
int64_t stage_id) const override {
return GetForwardCommCost(inputs, outputs, stage_id) + GetBackwardCommCost(inputs, outputs, stage_id);
}
double GetForwardCommCost(const std::vector<TensorInfo> &inputs, const std::vector<TensorInfo> &outputs,
int64_t stage_id) const override {
return 0;
}
double GetBackwardCommCost(const std::vector<TensorInfo> &inputs, const std::vector<TensorInfo> &outputs,
int64_t stage_id) const override {
return 0;
}
double GetComputationCost(const std::vector<TensorInfo> &inputs, const std::vector<TensorInfo> &outputs,
int64_t stage_id) const override {
return GetForwardComputationCost(inputs, outputs, stage_id) + GetBackwardComputationCost(inputs, outputs, stage_id);
}
double GetForwardComputationCost(const std::vector<TensorInfo> &inputs, const std::vector<TensorInfo> &outputs,
int64_t stage_id) const override;
double GetBackwardComputationCost(const std::vector<TensorInfo> &inputs, const std::vector<TensorInfo> &outputs,
int64_t) const override {
return 0.0;
}
// Not taking account of output
void CalculateOutputInMemory() override;
// Not Taking account of input
void CalculateInputsInMemory(const std::map<size_t, bool> &prev_output_in_mem) override;
};
class GatherV2Cost : public OperatorCost {
public:
GatherV2Cost() : OperatorCost() {}
~GatherV2Cost() override = default;
double GetCommCost(const std::vector<TensorInfo> &inputs, const std::vector<TensorInfo> &outputs,
int64_t stage_id) const override {
return GetForwardCommCost(inputs, outputs, stage_id) + GetBackwardCommCost(inputs, outputs, stage_id);
}
double GetForwardCommCost(const std::vector<TensorInfo> &inputs, const std::vector<TensorInfo> &outputs,
int64_t stage_id) const override;
double GetBackwardCommCost(const std::vector<TensorInfo> &inputs, const std::vector<TensorInfo> &outputs,
int64_t stage_id) const override;
double GetComputationCost(const std::vector<TensorInfo> &inputs, const std::vector<TensorInfo> &outputs,
int64_t stage_id) const override {
return GetForwardComputationCost(inputs, outputs, stage_id) + GetBackwardComputationCost(inputs, outputs, stage_id);
}
double GetForwardComputationCost(const std::vector<TensorInfo> &inputs, const std::vector<TensorInfo> &outputs,
int64_t stage_id) const override;
double GetBackwardComputationCost(const std::vector<TensorInfo> &inputs, const std::vector<TensorInfo> &outputs,
int64_t) const override;
// Not taking account of output
void CalculateOutputInMemory() override;
// Taking account of input
void CalculateInputsInMemory(const std::map<size_t, bool> &prev_output_in_mem) override;
};
class GatherV2PCost : public GatherV2Cost {
public:
GatherV2PCost() : GatherV2Cost(), axis_(0) {}
~GatherV2PCost() override = default;
double GetCommCost(const std::vector<TensorInfo> &inputs, const std::vector<TensorInfo> &outputs,
int64_t stage_id) const override {
return GetForwardCommCost(inputs, outputs, stage_id) + GetBackwardCommCost(inputs, outputs, stage_id);
}
double GetForwardCommCost(const std::vector<TensorInfo> &inputs, const std::vector<TensorInfo> &outputs,
int64_t stage_id) const override;
double GetBackwardCommCost(const std::vector<TensorInfo> &inputs, const std::vector<TensorInfo> &outputs,
int64_t stage_id) const override;
double GetComputationCost(const std::vector<TensorInfo> &inputs, const std::vector<TensorInfo> &outputs,
int64_t stage_id) const override {
return GetForwardComputationCost(inputs, outputs, stage_id) + GetBackwardComputationCost(inputs, outputs, stage_id);
}
double GetForwardComputationCost(const std::vector<TensorInfo> &inputs, const std::vector<TensorInfo> &outputs,
int64_t stage_id) const override;
double GetBackwardComputationCost(const std::vector<TensorInfo> &inputs, const std::vector<TensorInfo> &outputs,
int64_t) const override;
void set_axis(int64_t axis) { axis_ = axis; }
void set_strategy(const Shape &strategy) { strategy_ = strategy; }
protected:
int64_t axis_;
Shape strategy_;
};
class MatmulDDSCost : public OperatorCost {
public:
MatmulDDSCost() : OperatorCost() {}
~MatmulDDSCost() override = default;
// per device communication cost
double GetCommCost(const std::vector<TensorInfo> &inputs, const std::vector<TensorInfo> &outputs,
int64_t stage_id) const override {
return GetForwardCommCost(inputs, outputs, stage_id) + GetBackwardCommCost(inputs, outputs, stage_id);
}
double GetForwardCommCost(const std::vector<TensorInfo> &inputs, const std::vector<TensorInfo> &outputs,
int64_t stage_id) const override {
return 0.0;
};
double GetBackwardCommCost(const std::vector<TensorInfo> &inputs, const std::vector<TensorInfo> &outputs,
int64_t stage_id) const override {
return 0.0;
};
// per device computation cost
double GetComputationCost(const std::vector<TensorInfo> &inputs, const std::vector<TensorInfo> &outputs,
int64_t stage_id) const override {
return GetForwardComputationCost(inputs, outputs, stage_id) + GetBackwardComputationCost(inputs, outputs, stage_id);
}
double GetForwardComputationCost(const std::vector<TensorInfo> &inputs, const std::vector<TensorInfo> &outputs,
int64_t stage_id) const override;
double GetBackwardComputationCost(const std::vector<TensorInfo> &inputs, const std::vector<TensorInfo> &outputs,
int64_t stage_id) const override {
return 0.0;
};
// Not taking account of output
void CalculateOutputInMemory() override;
// Taking account of input
void CalculateInputsInMemory(const std::map<size_t, bool> &prev_output_in_mem) override;
};
using MatmulDDSCostPtr = std::shared_ptr<MatmulDDSCost>;
class CropAndResizeCost : public OperatorCost {
public:
CropAndResizeCost() : OperatorCost() {}
~CropAndResizeCost() override = default;
double GetCommCost(const std::vector<TensorInfo> &inputs, const std::vector<TensorInfo> &outputs,
int64_t stage_id) const override {
return GetForwardCommCost(inputs, outputs, stage_id) + GetBackwardCommCost(inputs, outputs, stage_id);
}
double GetForwardCommCost(const std::vector<TensorInfo> &inputs, const std::vector<TensorInfo> &outputs,
int64_t stage_id) const override;
double GetBackwardCommCost(const std::vector<TensorInfo> &inputs, const std::vector<TensorInfo> &outputs,
int64_t stage_id) const override;
double GetComputationCost(const std::vector<TensorInfo> &inputs, const std::vector<TensorInfo> &outputs,
int64_t stage_id) const override {
return GetForwardComputationCost(inputs, outputs, stage_id) + GetBackwardComputationCost(inputs, outputs, stage_id);
}
double GetForwardComputationCost(const std::vector<TensorInfo> &inputs, const std::vector<TensorInfo> &outputs,
int64_t stage_id) const override;
double GetBackwardComputationCost(const std::vector<TensorInfo> &inputs, const std::vector<TensorInfo> &outputs,
int64_t stage_id) const override;
// Taking account for input
void CalculateInputsInMemory(const std::map<size_t, bool> &prev_output_in_mem) override;
// Not taking account of output
void CalculateOutputInMemory() override;
void set_strategy(const Shape &strategy) { strategy_ = strategy; }
void set_crop_size(const std::vector<int64_t> &crop_size) { crop_size_ = crop_size; }
protected:
Shape strategy_;
std::vector<int64_t> crop_size_;
private:
static const size_t CROP_AND_RESIZE_COST_WEIGHT0 = 1;
static const size_t CROP_AND_RESIZE_COST_WEIGHT1 = 1;
static const size_t CROP_AND_RESIZE_COST_WEIGHT2 = 8;
static const size_t CROP_AND_RESIZE_COST_WEIGHT3 = 2;
};
class ROIAlignCost : public CropAndResizeCost {
public:
ROIAlignCost() : CropAndResizeCost() {}
~ROIAlignCost() override = default;
double GetForwardCommCost(const std::vector<TensorInfo> &inputs, const std::vector<TensorInfo> &outputs,
int64_t stage_id) const override;
double GetForwardComputationCost(const std::vector<TensorInfo> &inputs, const std::vector<TensorInfo> &outputs,
int64_t stage_id) const override;
double GetBackwardComputationCost(const std::vector<TensorInfo> &inputs, const std::vector<TensorInfo> &outputs,
int64_t stage_id) const override;
// Taking account for input
void CalculateInputsInMemory(const std::map<size_t, bool> &prev_output_in_mem) override;
// Taking account of output
void CalculateOutputInMemory() override;
void set_pooled_shape(Shape pooled_shape) { pooled_shape_ = pooled_shape; }
protected:
Shape pooled_shape_;
private:
static const size_t ROI_ALIGN_COST_WEIGHT0 = 1;
static const size_t ROI_ALIGN_COST_WEIGHT1 = 4;
static const size_t ROI_ALIGN_COST_WEIGHT2 = 2;
};
} // namespace parallel
} // namespace mindspore
#endif // PARALLEL_AUTO_PARALLEL_OPERATOR_COSTMODEL_H_
| 47.77602 | 120 | 0.714495 | [
"shape",
"vector"
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.