code stringlengths 1 25.8M | language stringclasses 18 values | source stringclasses 4 values | repo stringclasses 78 values | path stringlengths 0 268 |
|---|---|---|---|---|
#!/usr/bin/env python
import os, re
def lineify_fileobjs(ifo, ofo, strip=False):
from pyutil.strutil import pop_trailing_newlines, split_on_newlines
for l in ifo:
for sl in split_on_newlines(pop_trailing_newlines(l)):
if strip:
sl = sl.strip()
ofo.write(pop_trailing_newlines(sl) + '\n')
def lineify_file(fname, strip=False, nobak=True):
f = open(fname, "rU")
from pyutil.fileutil import ReopenableNamedTemporaryFile
rntf = ReopenableNamedTemporaryFile()
fo = open(rntf.name, "wb")
for l in f:
if strip:
l = l.strip() + '\n'
fo.write(l)
fo.close()
import shutil
if not nobak:
shutil.copyfile(fname, fname + ".lines.py-bak")
import shutil
try:
shutil.move(rntf.name, fname)
except EnvironmentError:
# Couldn't atomically overwrite, so just hope that this process doesn't die
# and the target file doesn't get recreated in between the following two
# operations:
if nobak:
os.remove(fname)
else:
shutil.move(fname, fname + ".lines.py-bak-2")
shutil.move(rntf.name, fname)
def darcs_metadir_dirpruner(dirs):
if "_darcs" in dirs:
dirs.remove("_darcs")
SCRE=re.compile("\\.(py|php|c|h|cpp|hpp|txt|sh|pyx|pxi|html|htm)$|makefile$", re.IGNORECASE)
def source_code_filepruner(fname):
return SCRE.search(fname)
def all_filepruner(fname):
return True
def all_dirpruner(dirs):
return
def lineify_all_files(dirname, strip=False, nobak=True, dirpruner=all_dirpruner, filepruner=all_filepruner):
for (root, dirs, files,) in os.walk(dirname):
dirpruner(dirs)
for fname in files:
fullfname = os.path.join(root, fname)
if filepruner(fullfname):
lineify_file(fullfname, strip=strip, nobak=nobak) | unknown | codeparrot/codeparrot-clean | ||
/*-------------------------------------------------------------------------
*
* int8.c
* Internal 64-bit integer operations
*
* Portions Copyright (c) 1996-2026, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
* src/backend/utils/adt/int8.c
*
*-------------------------------------------------------------------------
*/
#include "postgres.h"
#include <ctype.h>
#include <limits.h>
#include <math.h>
#include "common/int.h"
#include "funcapi.h"
#include "libpq/pqformat.h"
#include "nodes/nodeFuncs.h"
#include "nodes/supportnodes.h"
#include "optimizer/optimizer.h"
#include "utils/builtins.h"
#include "utils/fmgroids.h"
typedef struct
{
int64 current;
int64 finish;
int64 step;
} generate_series_fctx;
/***********************************************************************
**
** Routines for 64-bit integers.
**
***********************************************************************/
/*----------------------------------------------------------
* Formatting and conversion routines.
*---------------------------------------------------------*/
/* int8in()
*/
Datum
int8in(PG_FUNCTION_ARGS)
{
char *num = PG_GETARG_CSTRING(0);
PG_RETURN_INT64(pg_strtoint64_safe(num, fcinfo->context));
}
/* int8out()
*/
Datum
int8out(PG_FUNCTION_ARGS)
{
int64 val = PG_GETARG_INT64(0);
char buf[MAXINT8LEN + 1];
char *result;
int len;
len = pg_lltoa(val, buf) + 1;
/*
* Since the length is already known, we do a manual palloc() and memcpy()
* to avoid the strlen() call that would otherwise be done in pstrdup().
*/
result = palloc(len);
memcpy(result, buf, len);
PG_RETURN_CSTRING(result);
}
/*
* int8recv - converts external binary format to int8
*/
Datum
int8recv(PG_FUNCTION_ARGS)
{
StringInfo buf = (StringInfo) PG_GETARG_POINTER(0);
PG_RETURN_INT64(pq_getmsgint64(buf));
}
/*
* int8send - converts int8 to binary format
*/
Datum
int8send(PG_FUNCTION_ARGS)
{
int64 arg1 = PG_GETARG_INT64(0);
StringInfoData buf;
pq_begintypsend(&buf);
pq_sendint64(&buf, arg1);
PG_RETURN_BYTEA_P(pq_endtypsend(&buf));
}
/*----------------------------------------------------------
* Relational operators for int8s, including cross-data-type comparisons.
*---------------------------------------------------------*/
/* int8relop()
* Is val1 relop val2?
*/
Datum
int8eq(PG_FUNCTION_ARGS)
{
int64 val1 = PG_GETARG_INT64(0);
int64 val2 = PG_GETARG_INT64(1);
PG_RETURN_BOOL(val1 == val2);
}
Datum
int8ne(PG_FUNCTION_ARGS)
{
int64 val1 = PG_GETARG_INT64(0);
int64 val2 = PG_GETARG_INT64(1);
PG_RETURN_BOOL(val1 != val2);
}
Datum
int8lt(PG_FUNCTION_ARGS)
{
int64 val1 = PG_GETARG_INT64(0);
int64 val2 = PG_GETARG_INT64(1);
PG_RETURN_BOOL(val1 < val2);
}
Datum
int8gt(PG_FUNCTION_ARGS)
{
int64 val1 = PG_GETARG_INT64(0);
int64 val2 = PG_GETARG_INT64(1);
PG_RETURN_BOOL(val1 > val2);
}
Datum
int8le(PG_FUNCTION_ARGS)
{
int64 val1 = PG_GETARG_INT64(0);
int64 val2 = PG_GETARG_INT64(1);
PG_RETURN_BOOL(val1 <= val2);
}
Datum
int8ge(PG_FUNCTION_ARGS)
{
int64 val1 = PG_GETARG_INT64(0);
int64 val2 = PG_GETARG_INT64(1);
PG_RETURN_BOOL(val1 >= val2);
}
/* int84relop()
* Is 64-bit val1 relop 32-bit val2?
*/
Datum
int84eq(PG_FUNCTION_ARGS)
{
int64 val1 = PG_GETARG_INT64(0);
int32 val2 = PG_GETARG_INT32(1);
PG_RETURN_BOOL(val1 == val2);
}
Datum
int84ne(PG_FUNCTION_ARGS)
{
int64 val1 = PG_GETARG_INT64(0);
int32 val2 = PG_GETARG_INT32(1);
PG_RETURN_BOOL(val1 != val2);
}
Datum
int84lt(PG_FUNCTION_ARGS)
{
int64 val1 = PG_GETARG_INT64(0);
int32 val2 = PG_GETARG_INT32(1);
PG_RETURN_BOOL(val1 < val2);
}
Datum
int84gt(PG_FUNCTION_ARGS)
{
int64 val1 = PG_GETARG_INT64(0);
int32 val2 = PG_GETARG_INT32(1);
PG_RETURN_BOOL(val1 > val2);
}
Datum
int84le(PG_FUNCTION_ARGS)
{
int64 val1 = PG_GETARG_INT64(0);
int32 val2 = PG_GETARG_INT32(1);
PG_RETURN_BOOL(val1 <= val2);
}
Datum
int84ge(PG_FUNCTION_ARGS)
{
int64 val1 = PG_GETARG_INT64(0);
int32 val2 = PG_GETARG_INT32(1);
PG_RETURN_BOOL(val1 >= val2);
}
/* int48relop()
* Is 32-bit val1 relop 64-bit val2?
*/
Datum
int48eq(PG_FUNCTION_ARGS)
{
int32 val1 = PG_GETARG_INT32(0);
int64 val2 = PG_GETARG_INT64(1);
PG_RETURN_BOOL(val1 == val2);
}
Datum
int48ne(PG_FUNCTION_ARGS)
{
int32 val1 = PG_GETARG_INT32(0);
int64 val2 = PG_GETARG_INT64(1);
PG_RETURN_BOOL(val1 != val2);
}
Datum
int48lt(PG_FUNCTION_ARGS)
{
int32 val1 = PG_GETARG_INT32(0);
int64 val2 = PG_GETARG_INT64(1);
PG_RETURN_BOOL(val1 < val2);
}
Datum
int48gt(PG_FUNCTION_ARGS)
{
int32 val1 = PG_GETARG_INT32(0);
int64 val2 = PG_GETARG_INT64(1);
PG_RETURN_BOOL(val1 > val2);
}
Datum
int48le(PG_FUNCTION_ARGS)
{
int32 val1 = PG_GETARG_INT32(0);
int64 val2 = PG_GETARG_INT64(1);
PG_RETURN_BOOL(val1 <= val2);
}
Datum
int48ge(PG_FUNCTION_ARGS)
{
int32 val1 = PG_GETARG_INT32(0);
int64 val2 = PG_GETARG_INT64(1);
PG_RETURN_BOOL(val1 >= val2);
}
/* int82relop()
* Is 64-bit val1 relop 16-bit val2?
*/
Datum
int82eq(PG_FUNCTION_ARGS)
{
int64 val1 = PG_GETARG_INT64(0);
int16 val2 = PG_GETARG_INT16(1);
PG_RETURN_BOOL(val1 == val2);
}
Datum
int82ne(PG_FUNCTION_ARGS)
{
int64 val1 = PG_GETARG_INT64(0);
int16 val2 = PG_GETARG_INT16(1);
PG_RETURN_BOOL(val1 != val2);
}
Datum
int82lt(PG_FUNCTION_ARGS)
{
int64 val1 = PG_GETARG_INT64(0);
int16 val2 = PG_GETARG_INT16(1);
PG_RETURN_BOOL(val1 < val2);
}
Datum
int82gt(PG_FUNCTION_ARGS)
{
int64 val1 = PG_GETARG_INT64(0);
int16 val2 = PG_GETARG_INT16(1);
PG_RETURN_BOOL(val1 > val2);
}
Datum
int82le(PG_FUNCTION_ARGS)
{
int64 val1 = PG_GETARG_INT64(0);
int16 val2 = PG_GETARG_INT16(1);
PG_RETURN_BOOL(val1 <= val2);
}
Datum
int82ge(PG_FUNCTION_ARGS)
{
int64 val1 = PG_GETARG_INT64(0);
int16 val2 = PG_GETARG_INT16(1);
PG_RETURN_BOOL(val1 >= val2);
}
/* int28relop()
* Is 16-bit val1 relop 64-bit val2?
*/
Datum
int28eq(PG_FUNCTION_ARGS)
{
int16 val1 = PG_GETARG_INT16(0);
int64 val2 = PG_GETARG_INT64(1);
PG_RETURN_BOOL(val1 == val2);
}
Datum
int28ne(PG_FUNCTION_ARGS)
{
int16 val1 = PG_GETARG_INT16(0);
int64 val2 = PG_GETARG_INT64(1);
PG_RETURN_BOOL(val1 != val2);
}
Datum
int28lt(PG_FUNCTION_ARGS)
{
int16 val1 = PG_GETARG_INT16(0);
int64 val2 = PG_GETARG_INT64(1);
PG_RETURN_BOOL(val1 < val2);
}
Datum
int28gt(PG_FUNCTION_ARGS)
{
int16 val1 = PG_GETARG_INT16(0);
int64 val2 = PG_GETARG_INT64(1);
PG_RETURN_BOOL(val1 > val2);
}
Datum
int28le(PG_FUNCTION_ARGS)
{
int16 val1 = PG_GETARG_INT16(0);
int64 val2 = PG_GETARG_INT64(1);
PG_RETURN_BOOL(val1 <= val2);
}
Datum
int28ge(PG_FUNCTION_ARGS)
{
int16 val1 = PG_GETARG_INT16(0);
int64 val2 = PG_GETARG_INT64(1);
PG_RETURN_BOOL(val1 >= val2);
}
/*
* in_range support function for int8.
*
* Note: we needn't supply int8_int4 or int8_int2 variants, as implicit
* coercion of the offset value takes care of those scenarios just as well.
*/
Datum
in_range_int8_int8(PG_FUNCTION_ARGS)
{
int64 val = PG_GETARG_INT64(0);
int64 base = PG_GETARG_INT64(1);
int64 offset = PG_GETARG_INT64(2);
bool sub = PG_GETARG_BOOL(3);
bool less = PG_GETARG_BOOL(4);
int64 sum;
if (offset < 0)
ereport(ERROR,
(errcode(ERRCODE_INVALID_PRECEDING_OR_FOLLOWING_SIZE),
errmsg("invalid preceding or following size in window function")));
if (sub)
offset = -offset; /* cannot overflow */
if (unlikely(pg_add_s64_overflow(base, offset, &sum)))
{
/*
* If sub is false, the true sum is surely more than val, so correct
* answer is the same as "less". If sub is true, the true sum is
* surely less than val, so the answer is "!less".
*/
PG_RETURN_BOOL(sub ? !less : less);
}
if (less)
PG_RETURN_BOOL(val <= sum);
else
PG_RETURN_BOOL(val >= sum);
}
/*----------------------------------------------------------
* Arithmetic operators on 64-bit integers.
*---------------------------------------------------------*/
Datum
int8um(PG_FUNCTION_ARGS)
{
int64 arg = PG_GETARG_INT64(0);
int64 result;
if (unlikely(arg == PG_INT64_MIN))
ereport(ERROR,
(errcode(ERRCODE_NUMERIC_VALUE_OUT_OF_RANGE),
errmsg("bigint out of range")));
result = -arg;
PG_RETURN_INT64(result);
}
Datum
int8up(PG_FUNCTION_ARGS)
{
int64 arg = PG_GETARG_INT64(0);
PG_RETURN_INT64(arg);
}
Datum
int8pl(PG_FUNCTION_ARGS)
{
int64 arg1 = PG_GETARG_INT64(0);
int64 arg2 = PG_GETARG_INT64(1);
int64 result;
if (unlikely(pg_add_s64_overflow(arg1, arg2, &result)))
ereport(ERROR,
(errcode(ERRCODE_NUMERIC_VALUE_OUT_OF_RANGE),
errmsg("bigint out of range")));
PG_RETURN_INT64(result);
}
Datum
int8mi(PG_FUNCTION_ARGS)
{
int64 arg1 = PG_GETARG_INT64(0);
int64 arg2 = PG_GETARG_INT64(1);
int64 result;
if (unlikely(pg_sub_s64_overflow(arg1, arg2, &result)))
ereport(ERROR,
(errcode(ERRCODE_NUMERIC_VALUE_OUT_OF_RANGE),
errmsg("bigint out of range")));
PG_RETURN_INT64(result);
}
Datum
int8mul(PG_FUNCTION_ARGS)
{
int64 arg1 = PG_GETARG_INT64(0);
int64 arg2 = PG_GETARG_INT64(1);
int64 result;
if (unlikely(pg_mul_s64_overflow(arg1, arg2, &result)))
ereport(ERROR,
(errcode(ERRCODE_NUMERIC_VALUE_OUT_OF_RANGE),
errmsg("bigint out of range")));
PG_RETURN_INT64(result);
}
Datum
int8div(PG_FUNCTION_ARGS)
{
int64 arg1 = PG_GETARG_INT64(0);
int64 arg2 = PG_GETARG_INT64(1);
int64 result;
if (arg2 == 0)
{
ereport(ERROR,
(errcode(ERRCODE_DIVISION_BY_ZERO),
errmsg("division by zero")));
/* ensure compiler realizes we mustn't reach the division (gcc bug) */
PG_RETURN_NULL();
}
/*
* INT64_MIN / -1 is problematic, since the result can't be represented on
* a two's-complement machine. Some machines produce INT64_MIN, some
* produce zero, some throw an exception. We can dodge the problem by
* recognizing that division by -1 is the same as negation.
*/
if (arg2 == -1)
{
if (unlikely(arg1 == PG_INT64_MIN))
ereport(ERROR,
(errcode(ERRCODE_NUMERIC_VALUE_OUT_OF_RANGE),
errmsg("bigint out of range")));
result = -arg1;
PG_RETURN_INT64(result);
}
/* No overflow is possible */
result = arg1 / arg2;
PG_RETURN_INT64(result);
}
/* int8abs()
* Absolute value
*/
Datum
int8abs(PG_FUNCTION_ARGS)
{
int64 arg1 = PG_GETARG_INT64(0);
int64 result;
if (unlikely(arg1 == PG_INT64_MIN))
ereport(ERROR,
(errcode(ERRCODE_NUMERIC_VALUE_OUT_OF_RANGE),
errmsg("bigint out of range")));
result = (arg1 < 0) ? -arg1 : arg1;
PG_RETURN_INT64(result);
}
/* int8mod()
* Modulo operation.
*/
Datum
int8mod(PG_FUNCTION_ARGS)
{
int64 arg1 = PG_GETARG_INT64(0);
int64 arg2 = PG_GETARG_INT64(1);
if (unlikely(arg2 == 0))
{
ereport(ERROR,
(errcode(ERRCODE_DIVISION_BY_ZERO),
errmsg("division by zero")));
/* ensure compiler realizes we mustn't reach the division (gcc bug) */
PG_RETURN_NULL();
}
/*
* Some machines throw a floating-point exception for INT64_MIN % -1,
* which is a bit silly since the correct answer is perfectly
* well-defined, namely zero.
*/
if (arg2 == -1)
PG_RETURN_INT64(0);
/* No overflow is possible */
PG_RETURN_INT64(arg1 % arg2);
}
/*
* Greatest Common Divisor
*
* Returns the largest positive integer that exactly divides both inputs.
* Special cases:
* - gcd(x, 0) = gcd(0, x) = abs(x)
* because 0 is divisible by anything
* - gcd(0, 0) = 0
* complies with the previous definition and is a common convention
*
* Special care must be taken if either input is INT64_MIN ---
* gcd(0, INT64_MIN), gcd(INT64_MIN, 0) and gcd(INT64_MIN, INT64_MIN) are
* all equal to abs(INT64_MIN), which cannot be represented as a 64-bit signed
* integer.
*/
static int64
int8gcd_internal(int64 arg1, int64 arg2)
{
int64 swap;
int64 a1,
a2;
/*
* Put the greater absolute value in arg1.
*
* This would happen automatically in the loop below, but avoids an
* expensive modulo operation, and simplifies the special-case handling
* for INT64_MIN below.
*
* We do this in negative space in order to handle INT64_MIN.
*/
a1 = (arg1 < 0) ? arg1 : -arg1;
a2 = (arg2 < 0) ? arg2 : -arg2;
if (a1 > a2)
{
swap = arg1;
arg1 = arg2;
arg2 = swap;
}
/* Special care needs to be taken with INT64_MIN. See comments above. */
if (arg1 == PG_INT64_MIN)
{
if (arg2 == 0 || arg2 == PG_INT64_MIN)
ereport(ERROR,
(errcode(ERRCODE_NUMERIC_VALUE_OUT_OF_RANGE),
errmsg("bigint out of range")));
/*
* Some machines throw a floating-point exception for INT64_MIN % -1,
* which is a bit silly since the correct answer is perfectly
* well-defined, namely zero. Guard against this and just return the
* result, gcd(INT64_MIN, -1) = 1.
*/
if (arg2 == -1)
return 1;
}
/* Use the Euclidean algorithm to find the GCD */
while (arg2 != 0)
{
swap = arg2;
arg2 = arg1 % arg2;
arg1 = swap;
}
/*
* Make sure the result is positive. (We know we don't have INT64_MIN
* anymore).
*/
if (arg1 < 0)
arg1 = -arg1;
return arg1;
}
Datum
int8gcd(PG_FUNCTION_ARGS)
{
int64 arg1 = PG_GETARG_INT64(0);
int64 arg2 = PG_GETARG_INT64(1);
int64 result;
result = int8gcd_internal(arg1, arg2);
PG_RETURN_INT64(result);
}
/*
* Least Common Multiple
*/
Datum
int8lcm(PG_FUNCTION_ARGS)
{
int64 arg1 = PG_GETARG_INT64(0);
int64 arg2 = PG_GETARG_INT64(1);
int64 gcd;
int64 result;
/*
* Handle lcm(x, 0) = lcm(0, x) = 0 as a special case. This prevents a
* division-by-zero error below when x is zero, and an overflow error from
* the GCD computation when x = INT64_MIN.
*/
if (arg1 == 0 || arg2 == 0)
PG_RETURN_INT64(0);
/* lcm(x, y) = abs(x / gcd(x, y) * y) */
gcd = int8gcd_internal(arg1, arg2);
arg1 = arg1 / gcd;
if (unlikely(pg_mul_s64_overflow(arg1, arg2, &result)))
ereport(ERROR,
(errcode(ERRCODE_NUMERIC_VALUE_OUT_OF_RANGE),
errmsg("bigint out of range")));
/* If the result is INT64_MIN, it cannot be represented. */
if (unlikely(result == PG_INT64_MIN))
ereport(ERROR,
(errcode(ERRCODE_NUMERIC_VALUE_OUT_OF_RANGE),
errmsg("bigint out of range")));
if (result < 0)
result = -result;
PG_RETURN_INT64(result);
}
Datum
int8inc(PG_FUNCTION_ARGS)
{
int64 arg = PG_GETARG_INT64(0);
int64 result;
if (unlikely(pg_add_s64_overflow(arg, 1, &result)))
ereport(ERROR,
(errcode(ERRCODE_NUMERIC_VALUE_OUT_OF_RANGE),
errmsg("bigint out of range")));
PG_RETURN_INT64(result);
}
Datum
int8dec(PG_FUNCTION_ARGS)
{
int64 arg = PG_GETARG_INT64(0);
int64 result;
if (unlikely(pg_sub_s64_overflow(arg, 1, &result)))
ereport(ERROR,
(errcode(ERRCODE_NUMERIC_VALUE_OUT_OF_RANGE),
errmsg("bigint out of range")));
PG_RETURN_INT64(result);
}
/*
* These functions are exactly like int8inc/int8dec but are used for
* aggregates that count only non-null values. Since the functions are
* declared strict, the null checks happen before we ever get here, and all we
* need do is increment the state value. We could actually make these pg_proc
* entries point right at int8inc/int8dec, but then the opr_sanity regression
* test would complain about mismatched entries for a built-in function.
*/
Datum
int8inc_any(PG_FUNCTION_ARGS)
{
return int8inc(fcinfo);
}
Datum
int8inc_float8_float8(PG_FUNCTION_ARGS)
{
return int8inc(fcinfo);
}
Datum
int8dec_any(PG_FUNCTION_ARGS)
{
return int8dec(fcinfo);
}
/*
* int8inc_support
* prosupport function for int8inc() and int8inc_any()
*/
Datum
int8inc_support(PG_FUNCTION_ARGS)
{
Node *rawreq = (Node *) PG_GETARG_POINTER(0);
if (IsA(rawreq, SupportRequestWFuncMonotonic))
{
SupportRequestWFuncMonotonic *req = (SupportRequestWFuncMonotonic *) rawreq;
MonotonicFunction monotonic = MONOTONICFUNC_NONE;
int frameOptions = req->window_clause->frameOptions;
/* No ORDER BY clause then all rows are peers */
if (req->window_clause->orderClause == NIL)
monotonic = MONOTONICFUNC_BOTH;
else
{
/*
* Otherwise take into account the frame options. When the frame
* bound is the start of the window then the resulting value can
* never decrease, therefore is monotonically increasing
*/
if (frameOptions & FRAMEOPTION_START_UNBOUNDED_PRECEDING)
monotonic |= MONOTONICFUNC_INCREASING;
/*
* Likewise, if the frame bound is the end of the window then the
* resulting value can never decrease.
*/
if (frameOptions & FRAMEOPTION_END_UNBOUNDED_FOLLOWING)
monotonic |= MONOTONICFUNC_DECREASING;
}
req->monotonic = monotonic;
PG_RETURN_POINTER(req);
}
if (IsA(rawreq, SupportRequestSimplifyAggref))
{
SupportRequestSimplifyAggref *req = (SupportRequestSimplifyAggref *) rawreq;
Aggref *agg = req->aggref;
/*
* Check for COUNT(ANY) and try to convert to COUNT(*). The input
* argument cannot be NULL, we can't have an ORDER BY / DISTINCT in
* the aggregate, and agglevelsup must be 0.
*
* Technically COUNT(ANY) must have 1 arg, but be paranoid and check.
*/
if (agg->aggfnoid == F_COUNT_ANY && list_length(agg->args) == 1)
{
TargetEntry *tle = (TargetEntry *) linitial(agg->args);
Expr *arg = tle->expr;
/* Check for unsupported cases */
if (agg->aggdistinct != NIL || agg->aggorder != NIL ||
agg->agglevelsup != 0)
PG_RETURN_POINTER(NULL);
/* If the arg isn't NULLable, do the conversion */
if (expr_is_nonnullable(req->root, arg, false))
{
Aggref *newagg;
/* We don't expect these to have been set yet */
Assert(agg->aggtransno == -1);
Assert(agg->aggtranstype == InvalidOid);
/* Convert COUNT(ANY) to COUNT(*) by making a new Aggref */
newagg = makeNode(Aggref);
memcpy(newagg, agg, sizeof(Aggref));
newagg->aggfnoid = F_COUNT_;
/* count(*) has no args */
newagg->aggargtypes = NULL;
newagg->args = NULL;
newagg->aggstar = true;
newagg->location = -1;
PG_RETURN_POINTER(newagg);
}
}
}
PG_RETURN_POINTER(NULL);
}
Datum
int8larger(PG_FUNCTION_ARGS)
{
int64 arg1 = PG_GETARG_INT64(0);
int64 arg2 = PG_GETARG_INT64(1);
int64 result;
result = ((arg1 > arg2) ? arg1 : arg2);
PG_RETURN_INT64(result);
}
Datum
int8smaller(PG_FUNCTION_ARGS)
{
int64 arg1 = PG_GETARG_INT64(0);
int64 arg2 = PG_GETARG_INT64(1);
int64 result;
result = ((arg1 < arg2) ? arg1 : arg2);
PG_RETURN_INT64(result);
}
Datum
int84pl(PG_FUNCTION_ARGS)
{
int64 arg1 = PG_GETARG_INT64(0);
int32 arg2 = PG_GETARG_INT32(1);
int64 result;
if (unlikely(pg_add_s64_overflow(arg1, (int64) arg2, &result)))
ereport(ERROR,
(errcode(ERRCODE_NUMERIC_VALUE_OUT_OF_RANGE),
errmsg("bigint out of range")));
PG_RETURN_INT64(result);
}
Datum
int84mi(PG_FUNCTION_ARGS)
{
int64 arg1 = PG_GETARG_INT64(0);
int32 arg2 = PG_GETARG_INT32(1);
int64 result;
if (unlikely(pg_sub_s64_overflow(arg1, (int64) arg2, &result)))
ereport(ERROR,
(errcode(ERRCODE_NUMERIC_VALUE_OUT_OF_RANGE),
errmsg("bigint out of range")));
PG_RETURN_INT64(result);
}
Datum
int84mul(PG_FUNCTION_ARGS)
{
int64 arg1 = PG_GETARG_INT64(0);
int32 arg2 = PG_GETARG_INT32(1);
int64 result;
if (unlikely(pg_mul_s64_overflow(arg1, (int64) arg2, &result)))
ereport(ERROR,
(errcode(ERRCODE_NUMERIC_VALUE_OUT_OF_RANGE),
errmsg("bigint out of range")));
PG_RETURN_INT64(result);
}
Datum
int84div(PG_FUNCTION_ARGS)
{
int64 arg1 = PG_GETARG_INT64(0);
int32 arg2 = PG_GETARG_INT32(1);
int64 result;
if (arg2 == 0)
{
ereport(ERROR,
(errcode(ERRCODE_DIVISION_BY_ZERO),
errmsg("division by zero")));
/* ensure compiler realizes we mustn't reach the division (gcc bug) */
PG_RETURN_NULL();
}
/*
* INT64_MIN / -1 is problematic, since the result can't be represented on
* a two's-complement machine. Some machines produce INT64_MIN, some
* produce zero, some throw an exception. We can dodge the problem by
* recognizing that division by -1 is the same as negation.
*/
if (arg2 == -1)
{
if (unlikely(arg1 == PG_INT64_MIN))
ereport(ERROR,
(errcode(ERRCODE_NUMERIC_VALUE_OUT_OF_RANGE),
errmsg("bigint out of range")));
result = -arg1;
PG_RETURN_INT64(result);
}
/* No overflow is possible */
result = arg1 / arg2;
PG_RETURN_INT64(result);
}
Datum
int48pl(PG_FUNCTION_ARGS)
{
int32 arg1 = PG_GETARG_INT32(0);
int64 arg2 = PG_GETARG_INT64(1);
int64 result;
if (unlikely(pg_add_s64_overflow((int64) arg1, arg2, &result)))
ereport(ERROR,
(errcode(ERRCODE_NUMERIC_VALUE_OUT_OF_RANGE),
errmsg("bigint out of range")));
PG_RETURN_INT64(result);
}
Datum
int48mi(PG_FUNCTION_ARGS)
{
int32 arg1 = PG_GETARG_INT32(0);
int64 arg2 = PG_GETARG_INT64(1);
int64 result;
if (unlikely(pg_sub_s64_overflow((int64) arg1, arg2, &result)))
ereport(ERROR,
(errcode(ERRCODE_NUMERIC_VALUE_OUT_OF_RANGE),
errmsg("bigint out of range")));
PG_RETURN_INT64(result);
}
Datum
int48mul(PG_FUNCTION_ARGS)
{
int32 arg1 = PG_GETARG_INT32(0);
int64 arg2 = PG_GETARG_INT64(1);
int64 result;
if (unlikely(pg_mul_s64_overflow((int64) arg1, arg2, &result)))
ereport(ERROR,
(errcode(ERRCODE_NUMERIC_VALUE_OUT_OF_RANGE),
errmsg("bigint out of range")));
PG_RETURN_INT64(result);
}
Datum
int48div(PG_FUNCTION_ARGS)
{
int32 arg1 = PG_GETARG_INT32(0);
int64 arg2 = PG_GETARG_INT64(1);
if (unlikely(arg2 == 0))
{
ereport(ERROR,
(errcode(ERRCODE_DIVISION_BY_ZERO),
errmsg("division by zero")));
/* ensure compiler realizes we mustn't reach the division (gcc bug) */
PG_RETURN_NULL();
}
/* No overflow is possible */
PG_RETURN_INT64((int64) arg1 / arg2);
}
Datum
int82pl(PG_FUNCTION_ARGS)
{
int64 arg1 = PG_GETARG_INT64(0);
int16 arg2 = PG_GETARG_INT16(1);
int64 result;
if (unlikely(pg_add_s64_overflow(arg1, (int64) arg2, &result)))
ereport(ERROR,
(errcode(ERRCODE_NUMERIC_VALUE_OUT_OF_RANGE),
errmsg("bigint out of range")));
PG_RETURN_INT64(result);
}
Datum
int82mi(PG_FUNCTION_ARGS)
{
int64 arg1 = PG_GETARG_INT64(0);
int16 arg2 = PG_GETARG_INT16(1);
int64 result;
if (unlikely(pg_sub_s64_overflow(arg1, (int64) arg2, &result)))
ereport(ERROR,
(errcode(ERRCODE_NUMERIC_VALUE_OUT_OF_RANGE),
errmsg("bigint out of range")));
PG_RETURN_INT64(result);
}
Datum
int82mul(PG_FUNCTION_ARGS)
{
int64 arg1 = PG_GETARG_INT64(0);
int16 arg2 = PG_GETARG_INT16(1);
int64 result;
if (unlikely(pg_mul_s64_overflow(arg1, (int64) arg2, &result)))
ereport(ERROR,
(errcode(ERRCODE_NUMERIC_VALUE_OUT_OF_RANGE),
errmsg("bigint out of range")));
PG_RETURN_INT64(result);
}
Datum
int82div(PG_FUNCTION_ARGS)
{
int64 arg1 = PG_GETARG_INT64(0);
int16 arg2 = PG_GETARG_INT16(1);
int64 result;
if (unlikely(arg2 == 0))
{
ereport(ERROR,
(errcode(ERRCODE_DIVISION_BY_ZERO),
errmsg("division by zero")));
/* ensure compiler realizes we mustn't reach the division (gcc bug) */
PG_RETURN_NULL();
}
/*
* INT64_MIN / -1 is problematic, since the result can't be represented on
* a two's-complement machine. Some machines produce INT64_MIN, some
* produce zero, some throw an exception. We can dodge the problem by
* recognizing that division by -1 is the same as negation.
*/
if (arg2 == -1)
{
if (unlikely(arg1 == PG_INT64_MIN))
ereport(ERROR,
(errcode(ERRCODE_NUMERIC_VALUE_OUT_OF_RANGE),
errmsg("bigint out of range")));
result = -arg1;
PG_RETURN_INT64(result);
}
/* No overflow is possible */
result = arg1 / arg2;
PG_RETURN_INT64(result);
}
Datum
int28pl(PG_FUNCTION_ARGS)
{
int16 arg1 = PG_GETARG_INT16(0);
int64 arg2 = PG_GETARG_INT64(1);
int64 result;
if (unlikely(pg_add_s64_overflow((int64) arg1, arg2, &result)))
ereport(ERROR,
(errcode(ERRCODE_NUMERIC_VALUE_OUT_OF_RANGE),
errmsg("bigint out of range")));
PG_RETURN_INT64(result);
}
Datum
int28mi(PG_FUNCTION_ARGS)
{
int16 arg1 = PG_GETARG_INT16(0);
int64 arg2 = PG_GETARG_INT64(1);
int64 result;
if (unlikely(pg_sub_s64_overflow((int64) arg1, arg2, &result)))
ereport(ERROR,
(errcode(ERRCODE_NUMERIC_VALUE_OUT_OF_RANGE),
errmsg("bigint out of range")));
PG_RETURN_INT64(result);
}
Datum
int28mul(PG_FUNCTION_ARGS)
{
int16 arg1 = PG_GETARG_INT16(0);
int64 arg2 = PG_GETARG_INT64(1);
int64 result;
if (unlikely(pg_mul_s64_overflow((int64) arg1, arg2, &result)))
ereport(ERROR,
(errcode(ERRCODE_NUMERIC_VALUE_OUT_OF_RANGE),
errmsg("bigint out of range")));
PG_RETURN_INT64(result);
}
Datum
int28div(PG_FUNCTION_ARGS)
{
int16 arg1 = PG_GETARG_INT16(0);
int64 arg2 = PG_GETARG_INT64(1);
if (unlikely(arg2 == 0))
{
ereport(ERROR,
(errcode(ERRCODE_DIVISION_BY_ZERO),
errmsg("division by zero")));
/* ensure compiler realizes we mustn't reach the division (gcc bug) */
PG_RETURN_NULL();
}
/* No overflow is possible */
PG_RETURN_INT64((int64) arg1 / arg2);
}
/* Binary arithmetics
*
* int8and - returns arg1 & arg2
* int8or - returns arg1 | arg2
* int8xor - returns arg1 # arg2
* int8not - returns ~arg1
* int8shl - returns arg1 << arg2
* int8shr - returns arg1 >> arg2
*/
Datum
int8and(PG_FUNCTION_ARGS)
{
int64 arg1 = PG_GETARG_INT64(0);
int64 arg2 = PG_GETARG_INT64(1);
PG_RETURN_INT64(arg1 & arg2);
}
Datum
int8or(PG_FUNCTION_ARGS)
{
int64 arg1 = PG_GETARG_INT64(0);
int64 arg2 = PG_GETARG_INT64(1);
PG_RETURN_INT64(arg1 | arg2);
}
Datum
int8xor(PG_FUNCTION_ARGS)
{
int64 arg1 = PG_GETARG_INT64(0);
int64 arg2 = PG_GETARG_INT64(1);
PG_RETURN_INT64(arg1 ^ arg2);
}
Datum
int8not(PG_FUNCTION_ARGS)
{
int64 arg1 = PG_GETARG_INT64(0);
PG_RETURN_INT64(~arg1);
}
Datum
int8shl(PG_FUNCTION_ARGS)
{
int64 arg1 = PG_GETARG_INT64(0);
int32 arg2 = PG_GETARG_INT32(1);
PG_RETURN_INT64(arg1 << arg2);
}
Datum
int8shr(PG_FUNCTION_ARGS)
{
int64 arg1 = PG_GETARG_INT64(0);
int32 arg2 = PG_GETARG_INT32(1);
PG_RETURN_INT64(arg1 >> arg2);
}
/*----------------------------------------------------------
* Conversion operators.
*---------------------------------------------------------*/
Datum
int48(PG_FUNCTION_ARGS)
{
int32 arg = PG_GETARG_INT32(0);
PG_RETURN_INT64((int64) arg);
}
Datum
int84(PG_FUNCTION_ARGS)
{
int64 arg = PG_GETARG_INT64(0);
if (unlikely(arg < PG_INT32_MIN) || unlikely(arg > PG_INT32_MAX))
ereport(ERROR,
(errcode(ERRCODE_NUMERIC_VALUE_OUT_OF_RANGE),
errmsg("integer out of range")));
PG_RETURN_INT32((int32) arg);
}
Datum
int28(PG_FUNCTION_ARGS)
{
int16 arg = PG_GETARG_INT16(0);
PG_RETURN_INT64((int64) arg);
}
Datum
int82(PG_FUNCTION_ARGS)
{
int64 arg = PG_GETARG_INT64(0);
if (unlikely(arg < PG_INT16_MIN) || unlikely(arg > PG_INT16_MAX))
ereport(ERROR,
(errcode(ERRCODE_NUMERIC_VALUE_OUT_OF_RANGE),
errmsg("smallint out of range")));
PG_RETURN_INT16((int16) arg);
}
Datum
i8tod(PG_FUNCTION_ARGS)
{
int64 arg = PG_GETARG_INT64(0);
float8 result;
result = arg;
PG_RETURN_FLOAT8(result);
}
/* dtoi8()
* Convert float8 to 8-byte integer.
*/
Datum
dtoi8(PG_FUNCTION_ARGS)
{
float8 num = PG_GETARG_FLOAT8(0);
/*
* Get rid of any fractional part in the input. This is so we don't fail
* on just-out-of-range values that would round into range. Note
* assumption that rint() will pass through a NaN or Inf unchanged.
*/
num = rint(num);
/* Range check */
if (unlikely(isnan(num) || !FLOAT8_FITS_IN_INT64(num)))
ereport(ERROR,
(errcode(ERRCODE_NUMERIC_VALUE_OUT_OF_RANGE),
errmsg("bigint out of range")));
PG_RETURN_INT64((int64) num);
}
Datum
i8tof(PG_FUNCTION_ARGS)
{
int64 arg = PG_GETARG_INT64(0);
float4 result;
result = arg;
PG_RETURN_FLOAT4(result);
}
/* ftoi8()
* Convert float4 to 8-byte integer.
*/
Datum
ftoi8(PG_FUNCTION_ARGS)
{
float4 num = PG_GETARG_FLOAT4(0);
/*
* Get rid of any fractional part in the input. This is so we don't fail
* on just-out-of-range values that would round into range. Note
* assumption that rint() will pass through a NaN or Inf unchanged.
*/
num = rint(num);
/* Range check */
if (unlikely(isnan(num) || !FLOAT4_FITS_IN_INT64(num)))
ereport(ERROR,
(errcode(ERRCODE_NUMERIC_VALUE_OUT_OF_RANGE),
errmsg("bigint out of range")));
PG_RETURN_INT64((int64) num);
}
Datum
i8tooid(PG_FUNCTION_ARGS)
{
int64 arg = PG_GETARG_INT64(0);
if (unlikely(arg < 0) || unlikely(arg > PG_UINT32_MAX))
ereport(ERROR,
(errcode(ERRCODE_NUMERIC_VALUE_OUT_OF_RANGE),
errmsg("OID out of range")));
PG_RETURN_OID((Oid) arg);
}
Datum
oidtoi8(PG_FUNCTION_ARGS)
{
Oid arg = PG_GETARG_OID(0);
PG_RETURN_INT64((int64) arg);
}
Datum
oidtooid8(PG_FUNCTION_ARGS)
{
Oid arg = PG_GETARG_OID(0);
PG_RETURN_OID8((Oid8) arg);
}
/*
* non-persistent numeric series generator
*/
Datum
generate_series_int8(PG_FUNCTION_ARGS)
{
return generate_series_step_int8(fcinfo);
}
Datum
generate_series_step_int8(PG_FUNCTION_ARGS)
{
FuncCallContext *funcctx;
generate_series_fctx *fctx;
int64 result;
MemoryContext oldcontext;
/* stuff done only on the first call of the function */
if (SRF_IS_FIRSTCALL())
{
int64 start = PG_GETARG_INT64(0);
int64 finish = PG_GETARG_INT64(1);
int64 step = 1;
/* see if we were given an explicit step size */
if (PG_NARGS() == 3)
step = PG_GETARG_INT64(2);
if (step == 0)
ereport(ERROR,
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
errmsg("step size cannot equal zero")));
/* create a function context for cross-call persistence */
funcctx = SRF_FIRSTCALL_INIT();
/*
* switch to memory context appropriate for multiple function calls
*/
oldcontext = MemoryContextSwitchTo(funcctx->multi_call_memory_ctx);
/* allocate memory for user context */
fctx = palloc_object(generate_series_fctx);
/*
* Use fctx to keep state from call to call. Seed current with the
* original start value
*/
fctx->current = start;
fctx->finish = finish;
fctx->step = step;
funcctx->user_fctx = fctx;
MemoryContextSwitchTo(oldcontext);
}
/* stuff done on every call of the function */
funcctx = SRF_PERCALL_SETUP();
/*
* get the saved state and use current as the result for this iteration
*/
fctx = funcctx->user_fctx;
result = fctx->current;
if ((fctx->step > 0 && fctx->current <= fctx->finish) ||
(fctx->step < 0 && fctx->current >= fctx->finish))
{
/*
* Increment current in preparation for next iteration. If next-value
* computation overflows, this is the final result.
*/
if (pg_add_s64_overflow(fctx->current, fctx->step, &fctx->current))
fctx->step = 0;
/* do when there is more left to send */
SRF_RETURN_NEXT(funcctx, Int64GetDatum(result));
}
else
/* do when there is no more left */
SRF_RETURN_DONE(funcctx);
}
/*
* Planner support function for generate_series(int8, int8 [, int8])
*/
Datum
generate_series_int8_support(PG_FUNCTION_ARGS)
{
Node *rawreq = (Node *) PG_GETARG_POINTER(0);
Node *ret = NULL;
if (IsA(rawreq, SupportRequestRows))
{
/* Try to estimate the number of rows returned */
SupportRequestRows *req = (SupportRequestRows *) rawreq;
if (is_funcclause(req->node)) /* be paranoid */
{
List *args = ((FuncExpr *) req->node)->args;
Node *arg1,
*arg2,
*arg3;
/* We can use estimated argument values here */
arg1 = estimate_expression_value(req->root, linitial(args));
arg2 = estimate_expression_value(req->root, lsecond(args));
if (list_length(args) >= 3)
arg3 = estimate_expression_value(req->root, lthird(args));
else
arg3 = NULL;
/*
* If any argument is constant NULL, we can safely assume that
* zero rows are returned. Otherwise, if they're all non-NULL
* constants, we can calculate the number of rows that will be
* returned. Use double arithmetic to avoid overflow hazards.
*/
if ((IsA(arg1, Const) &&
((Const *) arg1)->constisnull) ||
(IsA(arg2, Const) &&
((Const *) arg2)->constisnull) ||
(arg3 != NULL && IsA(arg3, Const) &&
((Const *) arg3)->constisnull))
{
req->rows = 0;
ret = (Node *) req;
}
else if (IsA(arg1, Const) &&
IsA(arg2, Const) &&
(arg3 == NULL || IsA(arg3, Const)))
{
double start,
finish,
step;
start = DatumGetInt64(((Const *) arg1)->constvalue);
finish = DatumGetInt64(((Const *) arg2)->constvalue);
step = arg3 ? DatumGetInt64(((Const *) arg3)->constvalue) : 1;
/* This equation works for either sign of step */
if (step != 0)
{
req->rows = floor((finish - start + step) / step);
ret = (Node *) req;
}
}
}
}
PG_RETURN_POINTER(ret);
} | c | github | https://github.com/postgres/postgres | src/backend/utils/adt/int8.c |
# -*- encoding: utf-8 -*-
##############################################################################
#
# Copyright (C) 2014 Abstract
# (<http://abstract.it>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
"name": "Ateco codes",
"version": "1.0",
"category": "Localisation/Italy",
"description": """Italian Localisation module - Ateco codes
Funcionalities:
- Add Ateco codes model
- Reference Ateco codes to partner model
""",
"author": "Abstract",
"website": "http://abstract.it",
"depends": [
"base"
],
"data": [
"security/ir.model.access.csv",
"view/ateco_view.xml",
"view/partner_view.xml",
"data/ateco_data.xml"
],
"active": False,
"installable": True
} | unknown | codeparrot/codeparrot-clean | ||
/*-------------------------------------------------------------------------
*
* pg_description.h
* definition of the "description" system catalog (pg_description)
*
* Because the contents of this table are taken from the *.dat files
* of other catalogs, there is no pg_description.dat file. The initial
* contents are assembled by genbki.pl and loaded during initdb.
*
* NOTE: an object is identified by the OID of the row that primarily
* defines the object, plus the OID of the table that that row appears in.
* For example, a function is identified by the OID of its pg_proc row
* plus the pg_class OID of table pg_proc. This allows unique identification
* of objects without assuming that OIDs are unique across tables.
*
* Since attributes don't have OIDs of their own, we identify an attribute
* comment by the objoid+classoid of its parent table, plus an "objsubid"
* giving the attribute column number. "objsubid" must be zero in a comment
* for a table itself, so that it is distinct from any column comment.
* Currently, objsubid is unused and zero for all other kinds of objects,
* but perhaps it might be useful someday to associate comments with
* constituent elements of other kinds of objects (arguments of a function,
* for example).
*
*
* Portions Copyright (c) 1996-2026, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
* src/include/catalog/pg_description.h
*
* NOTES
* The Catalog.pm module reads this file and derives schema
* information.
*
*-------------------------------------------------------------------------
*/
#ifndef PG_DESCRIPTION_H
#define PG_DESCRIPTION_H
#include "catalog/genbki.h"
#include "catalog/pg_description_d.h" /* IWYU pragma: export */
/* ----------------
* pg_description definition. cpp turns this into
* typedef struct FormData_pg_description
* ----------------
*/
CATALOG(pg_description,2609,DescriptionRelationId)
{
Oid objoid; /* OID of object itself */
Oid classoid; /* OID of table containing object */
int32 objsubid; /* column number, or 0 if not used */
#ifdef CATALOG_VARLEN /* variable-length fields start here */
text description BKI_FORCE_NOT_NULL; /* description of object */
#endif
} FormData_pg_description;
/* ----------------
* Form_pg_description corresponds to a pointer to a tuple with
* the format of pg_description relation.
* ----------------
*/
typedef FormData_pg_description * Form_pg_description;
DECLARE_TOAST(pg_description, 2834, 2835);
DECLARE_UNIQUE_INDEX_PKEY(pg_description_o_c_o_index, 2675, DescriptionObjIndexId, pg_description, btree(objoid oid_ops, classoid oid_ops, objsubid int4_ops));
/* We do not use BKI_LOOKUP here because it causes problems for genbki.pl */
DECLARE_FOREIGN_KEY((classoid), pg_class, (oid));
#endif /* PG_DESCRIPTION_H */ | c | github | https://github.com/postgres/postgres | src/include/catalog/pg_description.h |
from contextlib import contextmanager
from bok_choy.javascript import wait_for_js
from bok_choy.page_object import PageObject
from bok_choy.promise import EmptyPromise
from common.test.acceptance.pages.common.utils import hover
from common.test.acceptance.pages.lms.course_page import CoursePage
from common.test.acceptance.tests.helpers import is_focused_on_element
class DiscussionPageMixin(object):
def is_ajax_finished(self):
return self.browser.execute_script("return jQuery.active") == 0
def find_visible_element(self, selector):
"""
Finds a single visible element with the specified selector.
"""
full_selector = selector
if self.root_selector:
full_selector = self.root_selector + " " + full_selector
elements = self.q(css=full_selector)
return next((element for element in elements if element.is_displayed()), None)
@property
def new_post_button(self):
"""
Returns the new post button if visible, else it returns None.
"""
return self.find_visible_element(".new-post-btn")
@property
def new_post_form(self):
"""
Returns the new post form if visible, else it returns None.
"""
return self.find_visible_element(".forum-new-post-form")
def click_new_post_button(self):
"""
Clicks the 'New Post' button.
"""
self.wait_for(
lambda: self.new_post_button,
description="Waiting for new post button"
)
self.new_post_button.click()
self.wait_for(
lambda: self.new_post_form,
description="Waiting for new post form"
)
def click_cancel_new_post(self):
"""
Clicks the 'Cancel' button from the new post form.
"""
self.click_element(".cancel")
self.wait_for(
lambda: not self.new_post_form,
"Waiting for new post form to close"
)
class DiscussionThreadPage(PageObject, DiscussionPageMixin):
url = None
def __init__(self, browser, thread_selector):
super(DiscussionThreadPage, self).__init__(browser)
self.thread_selector = thread_selector
def _find_within(self, selector):
"""
Returns a query corresponding to the given CSS selector within the scope
of this thread page
"""
return self.q(css=self.thread_selector + " " + selector)
def is_browser_on_page(self):
return self.q(css=self.thread_selector).visible
def _get_element_text(self, selector):
"""
Returns the text of the first element matching the given selector, or
None if no such element exists
"""
text_list = self._find_within(selector).text
return text_list[0] if text_list else None
def is_element_visible(self, selector):
"""
Returns true if the element matching the specified selector is visible.
Args:
selector (str): The CSS selector that matches the desired element.
Returns:
bool: True if the element is visible.
"""
query = self._find_within(selector)
return query.present and query.visible
@contextmanager
def secondary_action_menu_open(self, ancestor_selector):
"""
Given the selector for an ancestor of a secondary menu, return a context
manager that will open and close the menu
"""
self.wait_for_ajax()
self._find_within(ancestor_selector + " .action-more").click()
EmptyPromise(
lambda: self.is_element_visible(ancestor_selector + " .actions-dropdown"),
"Secondary action menu opened"
).fulfill()
yield
if self.is_element_visible(ancestor_selector + " .actions-dropdown"):
self._find_within(ancestor_selector + " .action-more").click()
EmptyPromise(
lambda: not self.is_element_visible(ancestor_selector + " .actions-dropdown"),
"Secondary action menu closed"
).fulfill()
def get_group_visibility_label(self):
"""
Returns the group visibility label shown for the thread.
"""
return self._get_element_text(".group-visibility-label")
def get_response_total_text(self):
"""Returns the response count text, or None if not present"""
self.wait_for_ajax()
return self._get_element_text(".response-count")
def get_num_displayed_responses(self):
"""Returns the number of responses actually rendered"""
return len(self._find_within(".discussion-response"))
def get_shown_responses_text(self):
"""Returns the shown response count text, or None if not present"""
return self._get_element_text(".response-display-count")
def get_load_responses_button_text(self):
"""Returns the load more responses button text, or None if not present"""
return self._get_element_text(".load-response-button")
def load_more_responses(self):
"""Clicks the load more responses button and waits for responses to load"""
self._find_within(".load-response-button").click()
EmptyPromise(
self.is_ajax_finished,
"Loading more Responses"
).fulfill()
def has_add_response_button(self):
"""Returns true if the add response button is visible, false otherwise"""
return self.is_element_visible(".add-response-btn")
def has_discussion_reply_editor(self):
"""
Returns true if the discussion reply editor is is visible
"""
return self.is_element_visible(".discussion-reply-new")
def click_add_response_button(self):
"""
Clicks the add response button and ensures that the response text
field receives focus
"""
self._find_within(".add-response-btn").first.click()
EmptyPromise(
lambda: self._find_within(".discussion-reply-new textarea:focus").present,
"Response field received focus"
).fulfill()
@wait_for_js
def is_response_editor_visible(self, response_id):
"""Returns true if the response editor is present, false otherwise"""
return self.is_element_visible(".response_{} .edit-post-body".format(response_id))
@wait_for_js
def is_discussion_body_visible(self):
return self.is_element_visible(".post-body")
def verify_mathjax_preview_available(self):
""" Checks that MathJax Preview css class is present """
self.wait_for(
lambda: len(self.q(css=".MathJax_Preview").text) > 0 and self.q(css=".MathJax_Preview").text[0] == "",
description="MathJax Preview is rendered"
)
def verify_mathjax_rendered(self):
""" Checks that MathJax css class is present """
self.wait_for(
lambda: self.is_element_visible(".MathJax_SVG"),
description="MathJax Preview is rendered"
)
def is_response_visible(self, comment_id):
"""Returns true if the response is viewable onscreen"""
self.wait_for_ajax()
return self.is_element_visible(".response_{} .response-body".format(comment_id))
def is_response_editable(self, response_id):
"""Returns true if the edit response button is present, false otherwise"""
with self.secondary_action_menu_open(".response_{} .discussion-response".format(response_id)):
return self.is_element_visible(".response_{} .discussion-response .action-edit".format(response_id))
def is_response_deletable(self, response_id):
"""
Returns true if the delete response button is present, false otherwise
"""
with self.secondary_action_menu_open(".response_{} .discussion-response".format(response_id)):
return self.is_element_visible(".response_{} .discussion-response .action-delete".format(response_id))
def get_response_body(self, response_id):
return self._get_element_text(".response_{} .response-body".format(response_id))
def start_response_edit(self, response_id):
"""Click the edit button for the response, loading the editing view"""
with self.secondary_action_menu_open(".response_{} .discussion-response".format(response_id)):
self._find_within(".response_{} .discussion-response .action-edit".format(response_id)).first.click()
EmptyPromise(
lambda: self.is_response_editor_visible(response_id),
"Response edit started"
).fulfill()
def get_link_href(self):
"""Extracts href attribute of the referenced link"""
link_href = self._find_within(".post-body p a").attrs('href')
return link_href[0] if link_href else None
def get_response_vote_count(self, response_id):
vote_count_css = '.response_{} .discussion-response .action-vote'.format(response_id)
vote_count_element = self.browser.find_element_by_css_selector(vote_count_css)
# To get the vote count, one must hover over the element first.
hover(self.browser, vote_count_element)
return self._get_element_text(".response_{} .discussion-response .action-vote .vote-count".format(response_id))
def vote_response(self, response_id):
current_count = self.get_response_vote_count(response_id)
self._find_within(".response_{} .discussion-response .action-vote".format(response_id)).first.click()
self.wait_for(
lambda: current_count != self.get_response_vote_count(response_id),
description="Vote updated for {response_id}".format(response_id=response_id)
)
def cannot_vote_response(self, response_id):
"""Assert that the voting button is not visible on this response"""
return not self.is_element_visible(".response_{} .discussion-response .action-vote".format(response_id))
def is_response_reported(self, response_id):
return self.is_element_visible(".response_{} .discussion-response .post-label-reported".format(response_id))
def report_response(self, response_id):
with self.secondary_action_menu_open(".response_{} .discussion-response".format(response_id)):
self._find_within(".response_{} .discussion-response .action-report".format(response_id)).first.click()
self.wait_for_ajax()
EmptyPromise(
lambda: self.is_response_reported(response_id),
"Response is reported"
).fulfill()
def cannot_report_response(self, response_id):
"""Assert that the reporting button is not visible on this response"""
return not self.is_element_visible(".response_{} .discussion-response .action-report".format(response_id))
def is_response_endorsed(self, response_id):
return "endorsed" in self._get_element_text(".response_{} .discussion-response .posted-details".format(response_id))
def endorse_response(self, response_id):
self._find_within(".response_{} .discussion-response .action-endorse".format(response_id)).first.click()
self.wait_for_ajax()
EmptyPromise(
lambda: self.is_response_endorsed(response_id),
"Response edit started"
).fulfill()
def set_response_editor_value(self, response_id, new_body):
"""Replace the contents of the response editor"""
self._find_within(".response_{} .discussion-response .wmd-input".format(response_id)).fill(new_body)
def verify_link_editor_error_messages_shown(self):
"""
Confirm that the error messages are displayed in the editor.
"""
def errors_visible():
"""
Returns True if both errors are visible, False otherwise.
"""
return (
self.q(css="#new-url-input-field-message.has-error").visible and
self.q(css="#new-url-desc-input-field-message.has-error").visible
)
self.wait_for(errors_visible, "Form errors should be visible.")
def add_content_via_editor_button(self, content_type, response_id, url, description, is_decorative=False):
"""Replace the contents of the response editor"""
self._find_within(
"#wmd-{}-button-edit-post-body-{}".format(
content_type,
response_id,
)
).click()
self.q(css='#new-url-input').fill(url)
self.q(css='#new-url-desc-input').fill(description)
if is_decorative:
self.q(css='#img-is-decorative').click()
self.q(css='input[value="OK"]').click()
def submit_response_edit(self, response_id, new_response_body):
"""Click the submit button on the response editor"""
def submit_response_check_func():
"""
Tries to click "Update post" and returns True if the post
was successfully updated, False otherwise.
"""
self._find_within(
".response_{} .discussion-response .post-update".format(
response_id
)
).first.click()
return (
not self.is_response_editor_visible(response_id) and
self.is_response_visible(response_id) and
self.get_response_body(response_id) == new_response_body
)
self.wait_for(submit_response_check_func, "Comment edit succeeded")
def is_show_comments_visible(self, response_id):
"""Returns true if the "show comments" link is visible for a response"""
return self.is_element_visible(".response_{} .action-show-comments".format(response_id))
def show_comments(self, response_id):
"""Click the "show comments" link for a response"""
self._find_within(".response_{} .action-show-comments".format(response_id)).first.click()
EmptyPromise(
lambda: self.is_element_visible(".response_{} .comments".format(response_id)),
"Comments shown"
).fulfill()
def is_add_comment_visible(self, response_id):
"""Returns true if the "add comment" form is visible for a response"""
return self.is_element_visible("#wmd-input-comment-body-{}".format(response_id))
def is_comment_visible(self, comment_id):
"""Returns true if the comment is viewable onscreen"""
return self.is_element_visible("#comment_{} .response-body".format(comment_id))
def get_comment_body(self, comment_id):
return self._get_element_text("#comment_{} .response-body".format(comment_id))
def is_comment_deletable(self, comment_id):
"""Returns true if the delete comment button is present, false otherwise"""
with self.secondary_action_menu_open("#comment_{}".format(comment_id)):
return self.is_element_visible("#comment_{} .action-delete".format(comment_id))
def delete_comment(self, comment_id):
with self.handle_alert():
with self.secondary_action_menu_open("#comment_{}".format(comment_id)):
self._find_within("#comment_{} .action-delete".format(comment_id)).first.click()
EmptyPromise(
lambda: not self.is_comment_visible(comment_id),
"Deleted comment was removed"
).fulfill()
def is_comment_editable(self, comment_id):
"""Returns true if the edit comment button is present, false otherwise"""
with self.secondary_action_menu_open("#comment_{}".format(comment_id)):
return self.is_element_visible("#comment_{} .action-edit".format(comment_id))
def is_comment_editor_visible(self, comment_id):
"""Returns true if the comment editor is present, false otherwise"""
return self.is_element_visible(".edit-comment-body[data-id='{}']".format(comment_id))
def _get_comment_editor_value(self, comment_id):
return self._find_within("#wmd-input-edit-comment-body-{}".format(comment_id)).text[0]
def start_comment_edit(self, comment_id):
"""Click the edit button for the comment, loading the editing view"""
old_body = self.get_comment_body(comment_id)
with self.secondary_action_menu_open("#comment_{}".format(comment_id)):
self._find_within("#comment_{} .action-edit".format(comment_id)).first.click()
EmptyPromise(
lambda: (
self.is_comment_editor_visible(comment_id) and
not self.is_comment_visible(comment_id) and
self._get_comment_editor_value(comment_id) == old_body
),
"Comment edit started"
).fulfill()
def set_comment_editor_value(self, comment_id, new_body):
"""Replace the contents of the comment editor"""
self._find_within("#comment_{} .wmd-input".format(comment_id)).fill(new_body)
def submit_comment_edit(self, comment_id, new_comment_body):
"""Click the submit button on the comment editor"""
self._find_within("#comment_{} .post-update".format(comment_id)).first.click()
self.wait_for_ajax()
EmptyPromise(
lambda: (
not self.is_comment_editor_visible(comment_id) and
self.is_comment_visible(comment_id) and
self.get_comment_body(comment_id) == new_comment_body
),
"Comment edit succeeded"
).fulfill()
def cancel_comment_edit(self, comment_id, original_body):
"""Click the cancel button on the comment editor"""
self._find_within("#comment_{} .post-cancel".format(comment_id)).first.click()
EmptyPromise(
lambda: (
not self.is_comment_editor_visible(comment_id) and
self.is_comment_visible(comment_id) and
self.get_comment_body(comment_id) == original_body
),
"Comment edit was canceled"
).fulfill()
class DiscussionSortPreferencePage(CoursePage):
"""
Page that contain the discussion board with sorting options
"""
def __init__(self, browser, course_id):
super(DiscussionSortPreferencePage, self).__init__(browser, course_id)
self.url_path = "discussion/forum"
def is_browser_on_page(self):
"""
Return true if the browser is on the right page else false.
"""
return self.q(css="body.discussion .forum-nav-sort-control").present
def show_all_discussions(self):
""" Show the list of all discussions. """
self.q(css=".all-topics").click()
def get_selected_sort_preference(self):
"""
Return the text of option that is selected for sorting.
"""
# Using this workaround (execute script) to make this test work with Chrome browser
selected_value = self.browser.execute_script(
'var selected_value = $(".forum-nav-sort-control").val(); return selected_value')
return selected_value
def change_sort_preference(self, sort_by):
"""
Change the option of sorting by clicking on new option.
"""
self.q(css=".forum-nav-sort-control option[value='{0}']".format(sort_by)).click()
# Click initiates an ajax call, waiting for it to complete
self.wait_for_ajax()
def refresh_page(self):
"""
Reload the page.
"""
self.browser.refresh()
class DiscussionTabSingleThreadPage(CoursePage):
def __init__(self, browser, course_id, discussion_id, thread_id):
super(DiscussionTabSingleThreadPage, self).__init__(browser, course_id)
self.thread_page = DiscussionThreadPage(
browser,
"body.discussion .discussion-article[data-id='{thread_id}']".format(thread_id=thread_id)
)
self.url_path = "discussion/forum/{discussion_id}/threads/{thread_id}".format(
discussion_id=discussion_id, thread_id=thread_id
)
def is_browser_on_page(self):
return self.thread_page.is_browser_on_page()
def __getattr__(self, name):
return getattr(self.thread_page, name)
def show_all_discussions(self):
""" Show the list of all discussions. """
self.q(css=".all-topics").click()
def close_open_thread(self):
with self.thread_page.secondary_action_menu_open(".thread-main-wrapper"):
self._find_within(".thread-main-wrapper .action-close").first.click()
def _thread_is_rendered_successfully(self, thread_id):
return self.q(css=".discussion-article[data-id='{}']".format(thread_id)).visible
def click_and_open_thread(self, thread_id):
"""
Click specific thread on the list.
"""
thread_selector = "li[data-id='{}']".format(thread_id)
self.show_all_discussions()
self.q(css=thread_selector).first.click()
EmptyPromise(
lambda: self._thread_is_rendered_successfully(thread_id),
"Thread has been rendered"
).fulfill()
def check_threads_rendered_successfully(self, thread_count):
"""
Count the number of threads available on page.
"""
return len(self.q(css=".forum-nav-thread").results) == thread_count
class InlineDiscussionPage(PageObject, DiscussionPageMixin):
"""
Acceptance tests for inline discussions.
"""
url = None
def __init__(self, browser, discussion_id):
super(InlineDiscussionPage, self).__init__(browser)
self.root_selector = (
".discussion-module[data-discussion-id='{discussion_id}'] ".format(
discussion_id=discussion_id
)
)
def _find_within(self, selector):
"""
Returns a query corresponding to the given CSS selector within the scope
of this discussion page
"""
return self.q(css=self.root_selector + " " + selector)
def is_browser_on_page(self):
self.wait_for_ajax()
return self.q(css=self.root_selector).present
def is_discussion_expanded(self):
return self._find_within(".discussion").present
def expand_discussion(self):
"""Click the link to expand the discussion"""
self._find_within(".discussion-show").first.click()
EmptyPromise(
self.is_discussion_expanded,
"Discussion expanded"
).fulfill()
def get_num_displayed_threads(self):
return len(self._find_within(".forum-nav-thread"))
def element_exists(self, selector):
return self.q(css=self.root_selector + " " + selector).present
def click_element(self, selector):
self.wait_for_element_presence(
"{discussion} {selector}".format(discussion=self.root_selector, selector=selector),
"{selector} is visible".format(selector=selector)
)
self._find_within(selector).click()
def is_new_post_button_visible(self):
"""
Check if new post button present and visible
"""
return self._is_element_visible('.new-post-btn')
@wait_for_js
def _is_element_visible(self, selector):
query = self._find_within(selector)
return query.present and query.visible
def show_thread(self, thread_id):
"""
Clicks the link for the specified thread to show the detailed view.
"""
self.wait_for_element_presence('.forum-nav-thread-link', 'Thread list has loaded')
thread_selector = ".forum-nav-thread[data-id='{thread_id}'] .forum-nav-thread-link".format(thread_id=thread_id)
self._find_within(thread_selector).first.click()
self.thread_page = InlineDiscussionThreadPage(self.browser, thread_id) # pylint: disable=attribute-defined-outside-init
self.thread_page.wait_for_page()
class InlineDiscussionThreadPage(DiscussionThreadPage):
"""
Page object to manipulate an individual thread view in an inline discussion.
"""
def __init__(self, browser, thread_id):
super(InlineDiscussionThreadPage, self).__init__(
browser,
".discussion-module .discussion-article[data-id='{thread_id}']".format(thread_id=thread_id)
)
def is_thread_anonymous(self):
return not self.q(css=".posted-details > .username").present
@wait_for_js
def check_if_selector_is_focused(self, selector):
"""
Check if selector is focused
"""
return is_focused_on_element(self.browser, selector)
class DiscussionUserProfilePage(CoursePage):
TEXT_NEXT = u'Next >'
TEXT_PREV = u'< Previous'
PAGING_SELECTOR = ".discussion-pagination[data-page-number]"
def __init__(self, browser, course_id, user_id, username, page=1):
super(DiscussionUserProfilePage, self).__init__(browser, course_id)
self.url_path = "discussion/forum/dummy/users/{}?page={}".format(user_id, page)
self.username = username
def is_browser_on_page(self):
return (
self.q(css='.discussion-user-threads[data-course-id="{}"]'.format(self.course_id)).present
and
self.q(css='.user-name').present
and
self.q(css='.user-name').text[0] == self.username
)
@wait_for_js
def is_window_on_top(self):
return self.browser.execute_script("return $('html, body').offset().top") == 0
def get_shown_thread_ids(self):
elems = self.q(css="li.forum-nav-thread")
return [elem.get_attribute("data-id") for elem in elems]
def click_on_sidebar_username(self):
self.wait_for_page()
self.q(css='.user-name').first.click()
def get_user_roles(self):
"""Get user roles"""
return self.q(css='.user-roles').text[0]
class DiscussionTabHomePage(CoursePage, DiscussionPageMixin):
ALERT_SELECTOR = ".discussion-body .forum-nav .search-alert"
def __init__(self, browser, course_id):
super(DiscussionTabHomePage, self).__init__(browser, course_id)
self.url_path = "discussion/forum/"
self.root_selector = None
def is_browser_on_page(self):
return self.q(css=".discussion-body section.home-header").present
def perform_search(self, text="dummy"):
self.q(css=".search-input").fill(text + chr(10))
EmptyPromise(
self.is_ajax_finished,
"waiting for server to return result"
).fulfill()
def is_element_visible(self, selector):
"""
Returns true if the element matching the specified selector is visible.
"""
query = self.q(css=selector)
return query.present and query.visible
def is_checkbox_selected(self, selector):
"""
Returns true or false depending upon the matching checkbox is checked.
"""
return self.q(css=selector).selected
def refresh_and_wait_for_load(self):
"""
Refresh the page and wait for all resources to load.
"""
self.browser.refresh()
self.wait_for_page()
def get_search_alert_messages(self):
return self.q(css=self.ALERT_SELECTOR + " .message").text
def get_search_alert_links(self):
return self.q(css=self.ALERT_SELECTOR + " .link-jump")
def dismiss_alert_message(self, text):
"""
dismiss any search alert message containing the specified text.
"""
def _match_messages(text):
return self.q(css=".search-alert").filter(lambda elem: text in elem.text)
for alert_id in _match_messages(text).attrs("id"):
self.q(css="{}#{} .dismiss".format(self.ALERT_SELECTOR, alert_id)).click()
EmptyPromise(
lambda: _match_messages(text).results == [],
"waiting for dismissed alerts to disappear"
).fulfill()
def click_element(self, selector):
"""
Clicks the element specified by selector
"""
element = self.q(css=selector)
return element.click()
def set_new_post_editor_value(self, new_body):
"""
Set the Discussions new post editor (wmd) with the content in new_body
"""
self.q(css=".wmd-input").fill(new_body)
def get_new_post_preview_value(self, selector=".wmd-preview > *"):
"""
Get the rendered preview of the contents of the Discussions new post editor
Waits for content to appear, as the preview is triggered on debounced/delayed onchange
"""
self.wait_for_element_visibility(selector, "WMD preview pane has contents", timeout=10)
return self.q(css=".wmd-preview").html[0]
def get_new_post_preview_text(self):
"""
Get the rendered preview of the contents of the Discussions new post editor
Waits for content to appear, as the preview is triggered on debounced/delayed onchange
"""
self.wait_for_element_visibility(".wmd-preview > div", "WMD preview pane has contents", timeout=10)
return self.q(css=".wmd-preview").text[0] | unknown | codeparrot/codeparrot-clean | ||
# =============================================================================
# PROJECT CHRONO - http:#projectchrono.org
#
# Copyright (c) 2014 projectchrono.org
# All rights reserved.
#
# Use of this source code is governed by a BSD-style license that can be found
# in the LICENSE file at the top level of the distribution and at
# http://projectchrono.org/license-chrono.txt.
#
# =============================================================================
# Authors: Radu Serban
# =============================================================================
#
# Demonstration of using rotation limits on a revolute joint. Note that this
# capability is only available for ChLinkLockRevolute. It is not available
# for ChLinkRevolute (which uses a different formulation).
#
# Recall that Irrlicht uses a left-hand frame, so everything is rendered with
# left and right flipped.
#
# =============================================================================
import pychrono as chrono
import pychrono.irrlicht as irr
import math as m
print("Copyright (c) 2017 projectchrono.org")
system = chrono.ChSystemNSC()
# Create ground body
ground = chrono.ChBody()
system.AddBody(ground)
ground.SetIdentifier(-1)
ground.SetBodyFixed(True)
ground.SetCollide(False)
# Visualization for revolute joint
cyl_rev = chrono.ChCylinderShape()
cyl_rev.GetCylinderGeometry().p1 = chrono.ChVectorD(0, 0, 0.2)
cyl_rev.GetCylinderGeometry().p2 = chrono.ChVectorD(0, 0, -0.2)
cyl_rev.GetCylinderGeometry().rad = 0.04
ground.AddAsset(cyl_rev)
# Create a pendulum body
pend = chrono.ChBody()
system.AddBody(pend)
pend.SetIdentifier(1)
pend.SetBodyFixed(False)
pend.SetCollide(False)
pend.SetMass(1)
pend.SetInertiaXX(chrono.ChVectorD(0.2, 1, 1))
# Initial position of the pendulum (horizontal, pointing towards positive X).
pend.SetPos(chrono.ChVectorD(1.5, 0, 0))
# Attach visualization assets.
cyl_p = chrono.ChCylinderShape()
cyl_p.GetCylinderGeometry().p1 = chrono.ChVectorD(-1.46, 0, 0)
cyl_p.GetCylinderGeometry().p2 = chrono.ChVectorD(1.46, 0, 0)
cyl_p.GetCylinderGeometry().rad = 0.2
pend.AddAsset(cyl_p)
col_p = chrono.ChColorAsset()
col_p.SetColor(chrono.ChColor(0.6, 0, 0))
pend.AddAsset(col_p)
# Create a revolute joint to connect pendulum to ground
rev = chrono.ChLinkLockRevolute()
system.AddLink(rev)
# Add limits to the Z rotation of the revolute joint
min_angle = 0
max_angle = 0.75 * m.pi
rev.GetLimit_Rz().SetActive(True)
rev.GetLimit_Rz().SetMin(min_angle)
rev.GetLimit_Rz().SetMax(max_angle)
# Initialize the joint specifying a coordinate system (expressed in the absolute frame).
rev.Initialize(ground, pend, chrono.ChCoordsysD(chrono.VNULL, chrono.QUNIT))
# Create the Irrlicht application
application = irr.ChIrrApp(system, "Limits on LinkLockRevolute demo", irr.dimension2du(800, 600))
application.AddTypicalLogo()
application.AddTypicalSky()
application.AddTypicalLights()
application.AddTypicalCamera(irr.vector3df(-2, 1.5, 5))
application.AssetBindAll()
application.AssetUpdateAll()
# Points for drawing line segments
p0 = chrono.ChVectorD(0, 0, 0)
p1 = chrono.ChVectorD(m.cos(min_angle), -m.sin(min_angle), 0)
p2 = chrono.ChVectorD(m.cos(max_angle), -m.sin(max_angle), 0)
# Simulation loop
application.SetTimestep(0.01)
application.SetTryRealtime(True)
while (application.GetDevice().run()) :
application.BeginScene()
application.DrawAll()
irr.drawSegment(application.GetVideoDriver(), p0, p0 + p1 * 4, irr.SColor(255, 255, 150, 0), True);
irr.drawSegment(application.GetVideoDriver(), p0, p0 + p2 * 4, irr.SColor(255, 255, 150, 0), True);
application.DoStep()
application.EndScene() | unknown | codeparrot/codeparrot-clean | ||
# As a test suite for the os module, this is woefully inadequate, but this
# does add tests for a few functions which have been determined to be more
# more portable than they had been thought to be.
import os
import unittest
import warnings
from test import test_support
warnings.filterwarnings("ignore", "tempnam", RuntimeWarning, __name__)
warnings.filterwarnings("ignore", "tmpnam", RuntimeWarning, __name__)
class TemporaryFileTests(unittest.TestCase):
def setUp(self):
self.files = []
os.mkdir(test_support.TESTFN)
def tearDown(self):
for name in self.files:
os.unlink(name)
os.rmdir(test_support.TESTFN)
def check_tempfile(self, name):
# make sure it doesn't already exist:
self.failIf(os.path.exists(name),
"file already exists for temporary file")
# make sure we can create the file
open(name, "w")
self.files.append(name)
def test_tempnam(self):
if not hasattr(os, "tempnam"):
return
warnings.filterwarnings("ignore", "tempnam", RuntimeWarning,
r"test_os$")
self.check_tempfile(os.tempnam())
name = os.tempnam(test_support.TESTFN)
self.check_tempfile(name)
name = os.tempnam(test_support.TESTFN, "pfx")
self.assert_(os.path.basename(name)[:3] == "pfx")
self.check_tempfile(name)
def test_tmpfile(self):
if not hasattr(os, "tmpfile"):
return
fp = os.tmpfile()
fp.write("foobar")
fp.seek(0,0)
s = fp.read()
fp.close()
self.assert_(s == "foobar")
def test_tmpnam(self):
import sys
if not hasattr(os, "tmpnam"):
return
warnings.filterwarnings("ignore", "tmpnam", RuntimeWarning,
r"test_os$")
name = os.tmpnam()
if sys.platform in ("win32",):
# The Windows tmpnam() seems useless. From the MS docs:
#
# The character string that tmpnam creates consists of
# the path prefix, defined by the entry P_tmpdir in the
# file STDIO.H, followed by a sequence consisting of the
# digit characters '0' through '9'; the numerical value
# of this string is in the range 1 - 65,535. Changing the
# definitions of L_tmpnam or P_tmpdir in STDIO.H does not
# change the operation of tmpnam.
#
# The really bizarre part is that, at least under MSVC6,
# P_tmpdir is "\\". That is, the path returned refers to
# the root of the current drive. That's a terrible place to
# put temp files, and, depending on privileges, the user
# may not even be able to open a file in the root directory.
self.failIf(os.path.exists(name),
"file already exists for temporary file")
else:
self.check_tempfile(name)
# Test attributes on return values from os.*stat* family.
class StatAttributeTests(unittest.TestCase):
def setUp(self):
os.mkdir(test_support.TESTFN)
self.fname = os.path.join(test_support.TESTFN, "f1")
f = open(self.fname, 'wb')
f.write("ABC")
f.close()
def tearDown(self):
os.unlink(self.fname)
os.rmdir(test_support.TESTFN)
def test_stat_attributes(self):
if not hasattr(os, "stat"):
return
import stat
result = os.stat(self.fname)
# Make sure direct access works
self.assertEquals(result[stat.ST_SIZE], 3)
self.assertEquals(result.st_size, 3)
import sys
# Make sure all the attributes are there
members = dir(result)
for name in dir(stat):
if name[:3] == 'ST_':
attr = name.lower()
self.assertEquals(getattr(result, attr),
result[getattr(stat, name)])
self.assert_(attr in members)
try:
result[200]
self.fail("No exception thrown")
except IndexError:
pass
# Make sure that assignment fails
try:
result.st_mode = 1
self.fail("No exception thrown")
except TypeError:
pass
try:
result.st_rdev = 1
self.fail("No exception thrown")
except (AttributeError, TypeError):
pass
try:
result.parrot = 1
self.fail("No exception thrown")
except AttributeError:
pass
# Use the stat_result constructor with a too-short tuple.
try:
result2 = os.stat_result((10,))
self.fail("No exception thrown")
except TypeError:
pass
# Use the constructr with a too-long tuple.
try:
result2 = os.stat_result((0,1,2,3,4,5,6,7,8,9,10,11,12,13,14))
except TypeError:
pass
def test_statvfs_attributes(self):
if not hasattr(os, "statvfs"):
return
import statvfs
try:
result = os.statvfs(self.fname)
except OSError, e:
# On AtheOS, glibc always returns ENOSYS
import errno
if e.errno == errno.ENOSYS:
return
# Make sure direct access works
self.assertEquals(result.f_bfree, result[statvfs.F_BFREE])
# Make sure all the attributes are there
members = dir(result)
for name in dir(statvfs):
if name[:2] == 'F_':
attr = name.lower()
self.assertEquals(getattr(result, attr),
result[getattr(statvfs, name)])
self.assert_(attr in members)
# Make sure that assignment really fails
try:
result.f_bfree = 1
self.fail("No exception thrown")
except TypeError:
pass
try:
result.parrot = 1
self.fail("No exception thrown")
except AttributeError:
pass
# Use the constructor with a too-short tuple.
try:
result2 = os.statvfs_result((10,))
self.fail("No exception thrown")
except TypeError:
pass
# Use the constructr with a too-long tuple.
try:
result2 = os.statvfs_result((0,1,2,3,4,5,6,7,8,9,10,11,12,13,14))
except TypeError:
pass
from test_userdict import TestMappingProtocol
class EnvironTests(TestMappingProtocol):
"""check that os.environ object conform to mapping protocol"""
_tested_class = None
def _reference(self):
return {"KEY1":"VALUE1", "KEY2":"VALUE2", "KEY3":"VALUE3"}
def _empty_mapping(self):
os.environ.clear()
return os.environ
def setUp(self):
self.__save = dict(os.environ)
os.environ.clear()
def tearDown(self):
os.environ.clear()
os.environ.update(self.__save)
class WalkTests(unittest.TestCase):
"""Tests for os.walk()."""
def test_traversal(self):
import os
from os.path import join
# Build:
# TESTFN/ a file kid and two directory kids
# tmp1
# SUB1/ a file kid and a directory kid
# tmp2
# SUB11/ no kids
# SUB2/ just a file kid
# tmp3
sub1_path = join(test_support.TESTFN, "SUB1")
sub11_path = join(sub1_path, "SUB11")
sub2_path = join(test_support.TESTFN, "SUB2")
tmp1_path = join(test_support.TESTFN, "tmp1")
tmp2_path = join(sub1_path, "tmp2")
tmp3_path = join(sub2_path, "tmp3")
# Create stuff.
os.makedirs(sub11_path)
os.makedirs(sub2_path)
for path in tmp1_path, tmp2_path, tmp3_path:
f = file(path, "w")
f.write("I'm " + path + " and proud of it. Blame test_os.\n")
f.close()
# Walk top-down.
all = list(os.walk(test_support.TESTFN))
self.assertEqual(len(all), 4)
# We can't know which order SUB1 and SUB2 will appear in.
# Not flipped: TESTFN, SUB1, SUB11, SUB2
# flipped: TESTFN, SUB2, SUB1, SUB11
flipped = all[0][1][0] != "SUB1"
all[0][1].sort()
self.assertEqual(all[0], (test_support.TESTFN, ["SUB1", "SUB2"], ["tmp1"]))
self.assertEqual(all[1 + flipped], (sub1_path, ["SUB11"], ["tmp2"]))
self.assertEqual(all[2 + flipped], (sub11_path, [], []))
self.assertEqual(all[3 - 2 * flipped], (sub2_path, [], ["tmp3"]))
# Prune the search.
all = []
for root, dirs, files in os.walk(test_support.TESTFN):
all.append((root, dirs, files))
# Don't descend into SUB1.
if 'SUB1' in dirs:
# Note that this also mutates the dirs we appended to all!
dirs.remove('SUB1')
self.assertEqual(len(all), 2)
self.assertEqual(all[0], (test_support.TESTFN, ["SUB2"], ["tmp1"]))
self.assertEqual(all[1], (sub2_path, [], ["tmp3"]))
# Walk bottom-up.
all = list(os.walk(test_support.TESTFN, topdown=False))
self.assertEqual(len(all), 4)
# We can't know which order SUB1 and SUB2 will appear in.
# Not flipped: SUB11, SUB1, SUB2, TESTFN
# flipped: SUB2, SUB11, SUB1, TESTFN
flipped = all[3][1][0] != "SUB1"
all[3][1].sort()
self.assertEqual(all[3], (test_support.TESTFN, ["SUB1", "SUB2"], ["tmp1"]))
self.assertEqual(all[flipped], (sub11_path, [], []))
self.assertEqual(all[flipped + 1], (sub1_path, ["SUB11"], ["tmp2"]))
self.assertEqual(all[2 - 2 * flipped], (sub2_path, [], ["tmp3"]))
# Tear everything down. This is a decent use for bottom-up on
# Windows, which doesn't have a recursive delete command. The
# (not so) subtlety is that rmdir will fail unless the dir's
# kids are removed first, so bottom up is essential.
for root, dirs, files in os.walk(test_support.TESTFN, topdown=False):
for name in files:
os.remove(join(root, name))
for name in dirs:
os.rmdir(join(root, name))
os.rmdir(test_support.TESTFN)
def test_main():
test_support.run_unittest(
TemporaryFileTests,
StatAttributeTests,
EnvironTests,
WalkTests
)
if __name__ == "__main__":
test_main() | unknown | codeparrot/codeparrot-clean | ||
# Always prefer setuptools over distutils
from setuptools import setup, find_packages
# To use a consistent encoding
from codecs import open
from os import path
here = path.abspath(path.dirname(__file__))
setup(
name='pdf2image',
version='1.5.1',
description='A wrapper around the pdftoppm and pdftocairo command line tools to convert PDF to a PIL Image list.',
url='https://github.com/Belval/pdf2image',
author='Edouard Belval',
author_email='edouard@belval.org',
# Choose your license
license='MIT',
# See https://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=[
# 3 - Alpha
# 4 - Beta
# 5 - Production/Stable
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
],
keywords='pdf image png jpeg jpg convert',
packages=find_packages(exclude=['contrib', 'docs', 'tests']),
install_requires=[
'pillow',
]
) | unknown | codeparrot/codeparrot-clean | ||
# -*- coding: utf-8 -*-
from datetime import date
from ..parsers import TagParser
from .base import BaseCinemaSpider
class Spider(BaseCinemaSpider):
name = 'praha-bioponrepo'
calendar_url = 'http://www.bio-ponrepo.cz/program.html'
calendar_next = ".//*[@class='programme']/form"
def iter_calendar_next_formdata(self):
year = date.today().year
formdata = {
'filterFor': 'all',
'filterAction': '2', # -- film --
'filterDay': '0', # -- vše --
}
yield dict(filterYear=str(year), **formdata)
yield dict(filterYear=str(year + 1), **formdata)
calendar_showtime_element = ".//*[@class='programme']//tr[th]"
calendar_showtime = [
('title', ".//a/@title|.//a/text()"),
('film_url', ".//a/@href"),
('showtime_time', ".//td[1]//text()"),
('showtime_date', ".//th[2]//text()"),
('tags', "./following-sibling::tr[1]//td[3]//a", TagParser()),
('info', "./following-sibling::tr[1]//td[3]//text()"),
]
film = [
('title', "//*[@class='film-detail']/text()"),
('info',
"//*[@class='film-detail']/ancestor::*[@class='container']"
"//text()[following::img[@class='film-picture'] and "
"preceding::*[@class='film-detail']]"),
('description',
"//*[@class='film-detail']/ancestor::*[@class='container']"
"//text()[preceding::img[@class='film-picture']]"),
('poster_urls', "//img[@class='film-picture']/@src"),
] | unknown | codeparrot/codeparrot-clean | ||
// Copyright 2021 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package schema
import (
"go.uber.org/zap"
"go.etcd.io/etcd/server/v3/storage/backend"
)
type action interface {
// unsafeDo executes the action and returns revert action, when executed
// should restore the state from before.
unsafeDo(tx backend.UnsafeReadWriter) (revert action, err error)
}
type setKeyAction struct {
Bucket backend.Bucket
FieldName []byte
FieldValue []byte
}
func (a setKeyAction) unsafeDo(tx backend.UnsafeReadWriter) (action, error) {
revert := restoreFieldValueAction(tx, a.Bucket, a.FieldName)
tx.UnsafePut(a.Bucket, a.FieldName, a.FieldValue)
return revert, nil
}
type deleteKeyAction struct {
Bucket backend.Bucket
FieldName []byte
}
func (a deleteKeyAction) unsafeDo(tx backend.UnsafeReadWriter) (action, error) {
revert := restoreFieldValueAction(tx, a.Bucket, a.FieldName)
tx.UnsafeDelete(a.Bucket, a.FieldName)
return revert, nil
}
func restoreFieldValueAction(tx backend.UnsafeReader, bucket backend.Bucket, fieldName []byte) action {
_, vs := tx.UnsafeRange(bucket, fieldName, nil, 1)
if len(vs) == 1 {
return &setKeyAction{
Bucket: bucket,
FieldName: fieldName,
FieldValue: vs[0],
}
}
return &deleteKeyAction{
Bucket: bucket,
FieldName: fieldName,
}
}
type ActionList []action
// unsafeExecute executes actions one by one. If one of actions returns error,
// it will revert them.
func (as ActionList) unsafeExecute(lg *zap.Logger, tx backend.UnsafeReadWriter) error {
revertActions := make(ActionList, 0, len(as))
for _, a := range as {
revert, err := a.unsafeDo(tx)
if err != nil {
revertActions.unsafeExecuteInReversedOrder(lg, tx)
return err
}
revertActions = append(revertActions, revert)
}
return nil
}
// unsafeExecuteInReversedOrder executes actions in revered order. Will panic on
// action error. Should be used when reverting.
func (as ActionList) unsafeExecuteInReversedOrder(lg *zap.Logger, tx backend.UnsafeReadWriter) {
for j := len(as) - 1; j >= 0; j-- {
_, err := as[j].unsafeDo(tx)
if err != nil {
lg.Panic("Cannot recover from revert error", zap.Error(err))
}
}
} | go | github | https://github.com/etcd-io/etcd | server/storage/schema/actions.go |
# -*- coding: utf-8 -*-
"""
pygments.formatters.other
~~~~~~~~~~~~~~~~~~~~~~~~~
Other formatters: NullFormatter, RawTokenFormatter.
:copyright: Copyright 2006-2013 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from pygments.formatter import Formatter
from pygments.util import OptionError, get_choice_opt, b
from pygments.token import Token
from pygments.console import colorize
__all__ = ['NullFormatter', 'RawTokenFormatter']
class NullFormatter(Formatter):
"""
Output the text unchanged without any formatting.
"""
name = 'Text only'
aliases = ['text', 'null']
filenames = ['*.txt']
def format(self, tokensource, outfile):
enc = self.encoding
for ttype, value in tokensource:
if enc:
outfile.write(value.encode(enc))
else:
outfile.write(value)
class RawTokenFormatter(Formatter):
r"""
Format tokens as a raw representation for storing token streams.
The format is ``tokentype<TAB>repr(tokenstring)\n``. The output can later
be converted to a token stream with the `RawTokenLexer`, described in the
`lexer list <lexers.txt>`_.
Only two options are accepted:
`compress`
If set to ``'gz'`` or ``'bz2'``, compress the output with the given
compression algorithm after encoding (default: ``''``).
`error_color`
If set to a color name, highlight error tokens using that color. If
set but with no value, defaults to ``'red'``.
*New in Pygments 0.11.*
"""
name = 'Raw tokens'
aliases = ['raw', 'tokens']
filenames = ['*.raw']
unicodeoutput = False
def __init__(self, **options):
Formatter.__init__(self, **options)
if self.encoding:
raise OptionError('the raw formatter does not support the '
'encoding option')
self.encoding = 'ascii' # let pygments.format() do the right thing
self.compress = get_choice_opt(options, 'compress',
['', 'none', 'gz', 'bz2'], '')
self.error_color = options.get('error_color', None)
if self.error_color is True:
self.error_color = 'red'
if self.error_color is not None:
try:
colorize(self.error_color, '')
except KeyError:
raise ValueError("Invalid color %r specified" %
self.error_color)
def format(self, tokensource, outfile):
try:
outfile.write(b(''))
except TypeError:
raise TypeError('The raw tokens formatter needs a binary '
'output file')
if self.compress == 'gz':
import gzip
outfile = gzip.GzipFile('', 'wb', 9, outfile)
def write(text):
outfile.write(text.encode())
flush = outfile.flush
elif self.compress == 'bz2':
import bz2
compressor = bz2.BZ2Compressor(9)
def write(text):
outfile.write(compressor.compress(text.encode()))
def flush():
outfile.write(compressor.flush())
outfile.flush()
else:
def write(text):
outfile.write(text.encode())
flush = outfile.flush
if self.error_color:
for ttype, value in tokensource:
line = "%s\t%r\n" % (ttype, value)
if ttype is Token.Error:
write(colorize(self.error_color, line))
else:
write(line)
else:
for ttype, value in tokensource:
write("%s\t%r\n" % (ttype, value))
flush() | unknown | codeparrot/codeparrot-clean | ||
import simplejson as json
from mongrel2 import handler
sender_id = "82209006-86FF-4982-B5EA-D1E29E55D481"
conn = handler.Connection(sender_id, "tcp://127.0.0.1:9999",
"tcp://127.0.0.1:9998")
users = {}
user_list = []
while True:
try:
req = conn.recv_json()
except:
print "FAILED RECV JSON"
continue
data = req.data
print "DATA", data, req.conn_id
if data["type"] == "join":
conn.deliver_json(req.sender, users.keys(), data)
users[req.conn_id] = data['user']
user_list = [u[1] for u in users.items()]
conn.reply_json(req, {'type': 'userList', 'users': user_list})
elif data["type"] == "disconnect":
print "DISCONNECTED", req.conn_id
if req.conn_id in users:
data['user'] = users[req.conn_id]
del users[req.conn_id]
if len(users.keys()) > 0:
conn.deliver_json(req.sender, users.keys(), data)
user_list = [u[1] for u in users.items()]
elif req.conn_id not in users:
users[req.conn_id] = data['user']
elif data['type'] == "msg":
conn.deliver_json(req.sender, users.keys(), data)
print "REGISTERED USERS:", len(users) | unknown | codeparrot/codeparrot-clean | ||
import doctest
import pytest
from insights.parsers import abrt_ccpp
from insights.parsers.abrt_ccpp import AbrtCCppConf
from insights.tests import context_wrap
from insights.parsers import SkipException
ABRT_CONF_CONTENT = """
# Configuration file for CCpp hook
# CCpp hook writes its template to the "/proc/sys/kernel/core_pattern" file
# and stores the original template in the "/var/run/abrt/saved_core_pattern"
# file. If you want CCpp hook to create a core dump file named according to
# the original template as well, set 'MakeCompatCore' to 'yes'.
# If the original template string starts with "|", the string "core" is used
# instead of the template.
# For more information about naming core dump files see 'man 5 core'.
MakeCompatCore = yes
# The option allows you to set limit for the core file size in MiB.
#
# This value is compared to value of the MaxCrashReportSize configuration
# option from (/etc/abrt.conf) and the lower value is used as the limit.
#
# If MaxCoreFileSize is 0 then the value of MaxCrashReportSize is the limit.
# If MaxCrashReportSize is 0 then the value of MaxCoreFileSize is the limit.
# If both values are 0 then the core file size is unlimited.
MaxCoreFileSize = 0
# Do you want a copy of crashed binary be saved?
# (useful, for example, when _deleted binary_ segfaults)
SaveBinaryImage = no
# When this option is set to 'yes', core backtrace is generated
# from the memory image of the crashing process. Only the crash
# thread is present in the backtrace.
CreateCoreBacktrace = yes
# Save full coredump? If set to 'no', coredump won't be saved
# and you won't be able to report the crash to Bugzilla. Only
# useful with CreateCoreBacktrace set to 'yes'. Please
# note that if this option is set to 'no' and MakeCompatCore
# is set to 'yes', the core is still written to the current
# directory.
SaveFullCore = yes
# Used for debugging the hook
#VerboseLog = 2
# Specify where you want to store debuginfos (default: /var/cache/abrt-di)
#
DebuginfoLocation = /var/cache/abrt-di
# ABRT will ignore crashes in executables whose absolute path matches one of
# specified patterns.
#
#IgnoredPaths =
# ABRT will process only crashes of either allowed users or users who are
# members of allowed group. If no allowed users nor allowed group are specified
# ABRT will process crashes of all users.
#
#AllowedUsers =
#AllowedGroups =
""".strip()
ABRT_CONF_CONTENT_NO = """
""".strip()
def test_empty_content():
with pytest.raises(SkipException):
AbrtCCppConf(context_wrap(ABRT_CONF_CONTENT_NO))
def test_abrt_class():
abrt_obj = AbrtCCppConf(context_wrap(ABRT_CONF_CONTENT))
assert abrt_obj.get('CreateCoreBacktrace', '').lower() == 'yes'
assert abrt_obj.get('DebuginfoLocation', '').lower() == '/var/cache/abrt-di'
assert abrt_obj.get('Debuginfo', '').lower() == ''
def test_docs():
env = {
'abrt_conf': AbrtCCppConf(context_wrap(ABRT_CONF_CONTENT))
}
failed, total = doctest.testmod(abrt_ccpp, globs=env)
assert failed == 0 | unknown | codeparrot/codeparrot-clean | ||
# Copyright 2025 Google Inc. HuggingFace Inc. team. All rights reserved.
#
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
r"""Utility to convert Gemma models from Orbax to HF Transformers checkpoint.
python src/transformers/models/gemma3n/convert_gemma3n_weights.py \
--variant='gemma3n_e4b' \
--tokenizer_path="$HOME/tokenizers/gemma-3n-tokenizer.model" \
--checkpoint_path="$HOME/checkpoints/gemma-3n-orbax/" \
--output_path="$HOME/checkpoints/gemma-3n-safetensors/"
"""
import json
import os
import re
from collections.abc import Iterable, Mapping
from typing import Any
import accelerate
import numpy as np
import torch
import tree
from absl import app, flags, logging
from orbax import checkpoint as obc
from transformers import (
Gemma3nAudioConfig,
Gemma3nAudioFeatureExtractor,
Gemma3nConfig,
Gemma3nForConditionalGeneration,
Gemma3nProcessor,
Gemma3nTextConfig,
Gemma3nVisionConfig,
GemmaTokenizerFast,
GenerationConfig,
SiglipImageProcessorFast,
)
from transformers.image_utils import PILImageResampling
# ==== Internal Constants and Classes ====
_CHAT_TEMPLATE = """{{ bos_token }}
{%- if messages[0]['role'] == 'system' -%}
{%- if messages[0]['content'] is string -%}
{%- set first_user_prefix = messages[0]['content'] + '\n\n' -%}
{%- else -%}
{%- set first_user_prefix = messages[0]['content'][0]['text'] + '\n\n' -%}
{%- endif -%}
{%- set loop_messages = messages[1:] -%}
{%- else -%}
{%- set first_user_prefix = "" -%}
{%- set loop_messages = messages -%}
{%- endif -%}
{%- for message in loop_messages -%}
{%- if (message['role'] == 'user') != (loop.index0 % 2 == 0) -%}
{{ raise_exception("Conversation roles must alternate user/assistant/user/assistant/...") }}
{%- endif -%}
{%- if (message['role'] == 'assistant') -%}
{%- set role = "model" -%}
{%- else -%}
{%- set role = message['role'] -%}
{%- endif -%}
{{ '<start_of_turn>' + role + '\n' + (first_user_prefix if loop.first else "") }}
{%- if message['content'] is string -%}
{{ message['content'] | trim }}
{%- elif message['content'] is iterable -%}
{%- for item in message['content'] -%}
{%- if item['type'] == 'audio' -%}
{{ '<audio_soft_token>' }}
{%- elif item['type'] == 'image' -%}
{{ '<image_soft_token>' }}
{%- elif item['type'] == 'text' -%}
{{ item['text'] | trim }}
{%- endif -%}
{%- endfor -%}
{%- else -%}
{{ raise_exception("Invalid content type") }}
{%- endif -%}
{{ '<end_of_turn>\n' }}
{%- endfor -%}
{%- if add_generation_prompt -%}
{{'<start_of_turn>model\n'}}
{%- endif -%}
"""
_DTYPES = {"float32", "bfloat16", "float16"}
_SLIDING_WINDOW_PATTERN = 5
_AUDIO_ENCODER_PARAMETER = "AudioEncoder/encoder"
_AUDIO_ENCODER_CONFORMER = f"{_AUDIO_ENCODER_PARAMETER}/conformer/stacked_layers"
_AUDIO_ENCODER_SSCP = f"{_AUDIO_ENCODER_PARAMETER}/feature"
_TRANSFORMER_PARAMETER = "transformer"
_TRANSFORMER_ALTUP_PROJ = f"{_TRANSFORMER_PARAMETER}/altup_projection_"
_TRANSFORMER_ALTUP_UNEMB = f"{_TRANSFORMER_PARAMETER}/altup_unembed_projection_"
_TRANSFORMER_DECODER_BLOCK = f"{_TRANSFORMER_PARAMETER}/stacked_layers/attention_type_"
_TRANSFORMER_DECODER_BLOCK_LEN = len(_TRANSFORMER_DECODER_BLOCK)
_TRANSFORMER_EMBEDDER = f"{_TRANSFORMER_PARAMETER}/embedder"
_TRANSFORMER_FINAL_NORM = "transformer/final_norm"
_TRANSFORMER_POST_TRAINING_PREFIX = "rlx_networks/policy_network/"
_TRANSFORMER_POST_TRAINING_PREFIX_LEN = len(_TRANSFORMER_POST_TRAINING_PREFIX)
# _MOBILE_NET_CONFIG = Gemma3nVisionConfig.from_pretrained("")
_MOBILE_NET_PREFIX = "mobilenet"
_MOBILE_NET_TIMM_SUMMED_BLOCK_SIZES = [3, 8, 45, 84]
_MOBILE_NET_CONV = "block_group_conv2d_"
_MOBILE_NET_FIB = "block_group_fused_ib_"
_MOBILE_NET_MQA = "block_group_mmqa_"
_MOBILE_NET_MSFA = "block_adapter_"
_MOBILE_NET_UIB = "block_group_uib_"
_MOBILE_NET_UIB_HAS_DW_START = {
(1, 0),
(1, 1),
(1, 2),
(1, 3),
(1, 4),
(2, 0),
(2, 1),
(2, 2),
(2, 3),
(2, 4),
(2, 5),
(2, 6),
(2, 7),
(3, 0),
}
_MOBILE_NET_UIB_HAS_DW_MID = {
(1, 0),
(2, 0),
(3, 0),
}
_VARIANT_GEMMA_3_2B = "gemma3n_e2b"
_VARIANT_GEMMA_3_4B = "gemma3n_e4b"
_VARIANTS: Mapping[str, Gemma3nConfig] = {
_VARIANT_GEMMA_3_2B: Gemma3nConfig(
text_config=Gemma3nTextConfig(
intermediate_size=2048 * 4,
num_hidden_layers=30,
activation_sparsity_pattern=(0.95,) * 10 + (0.0,) * 20,
num_kv_shared_layers=10,
),
vision_config=Gemma3nVisionConfig(),
audio_config=Gemma3nAudioConfig(),
),
_VARIANT_GEMMA_3_4B: Gemma3nConfig(
text_config=Gemma3nTextConfig(),
vision_config=Gemma3nVisionConfig(),
audio_config=Gemma3nAudioConfig(),
),
}
# ==== Flags ====
_AUDIO_DTYPE = flags.DEFINE_enum(
name="audio_dtype",
default="bfloat16",
help="The floating point precision (aka dtype) of the model.",
enum_values=_DTYPES,
)
_CHECKPOINT_PATH = flags.DEFINE_string(
name="checkpoint_path",
default=None,
help="Path to the Orbax checkpoint.",
required=True,
)
_INCLUDE_CHAT_TEMPLATE = flags.DEFINE_bool(
name="include_chat_template", default=False, help="If true, will save the default chat template with the tokenizer"
)
_OUTPUT_PATH = flags.DEFINE_string(
name="output_path",
default=None,
help="Path to store the HF checkpoint.",
required=True,
)
_TRANSFORMER_DTYPE = flags.DEFINE_enum(
name="text_dtype",
default="bfloat16",
help="The floating point precision (aka dtype) of the model.",
enum_values=_DTYPES,
)
_TOKENIZER_PATH = flags.DEFINE_string(
name="tokenizer_path",
default=None,
help="Path to the SentencePiece model file.",
required=True,
)
_VARIANT = flags.DEFINE_enum(
name="variant",
default=_VARIANT_GEMMA_3_4B,
help="The model variant to convert.",
enum_values=set(_VARIANTS.keys()),
)
_VERBOSE = flags.DEFINE_bool(
name="verbose",
default=False,
help="If true, log the path, shape, and dtype of every converted layer.",
)
_VISION_DTYPE = flags.DEFINE_enum(
name="vision_dtype",
default="bfloat16",
help="The floating point precision (aka dtype) of the model.",
enum_values=_DTYPES,
)
def convert_audio_encoder_weights(
config: Gemma3nAudioConfig,
path: str,
param: str,
weights: np.ndarray,
) -> Iterable[tuple[str, np.ndarray]]:
converted_paths: list[str] = []
converted_weights: list[Any] = []
if path.startswith(_AUDIO_ENCODER_CONFORMER):
assert weights.shape[0] == config.conf_num_hidden_layers
for i, matrix in enumerate(weights):
if "fflayer_end" in path:
base = f"conformer.{i}.ffw_layer_end"
if path.endswith("ffn_layer1"):
converted_paths.append(f"{base}.ffw_layer_1.weight")
converted_weights.append(matrix.transpose())
elif path.endswith("ffn_layer2"):
converted_paths.append(f"{base}.ffw_layer_2.weight")
converted_weights.append(matrix.transpose())
elif path.endswith("post_layer_norm"):
converted_paths.append(f"{base}.post_layer_norm.weight")
converted_weights.append(matrix)
elif path.endswith("pre_layer_norm"):
converted_paths.append(f"{base}.pre_layer_norm.weight")
converted_weights.append(matrix)
elif "fflayer_start" in path:
base = f"conformer.{i}.ffw_layer_start"
if path.endswith("ffn_layer1"):
converted_paths.append(f"{base}.ffw_layer_1.weight")
converted_weights.append(matrix.transpose())
elif path.endswith("ffn_layer2"):
converted_paths.append(f"{base}.ffw_layer_2.weight")
converted_weights.append(matrix.transpose())
elif path.endswith("post_layer_norm"):
converted_paths.append(f"{base}.post_layer_norm.weight")
converted_weights.append(matrix)
elif path.endswith("pre_layer_norm"):
converted_paths.append(f"{base}.pre_layer_norm.weight")
converted_weights.append(matrix)
elif path.endswith("final_ln"):
converted_paths.append(f"conformer.{i}.norm.weight")
converted_weights.append(matrix)
elif "lconv" in path:
base = f"conformer.{i}.lconv1d"
if path.endswith("conv_norm"):
converted_paths.append(f"{base}.conv_norm.weight")
converted_weights.append(matrix)
elif path.endswith("depthwise_conv1d"):
converted_paths.append(f"{base}.depthwise_conv1d.weight")
converted_weights.append(matrix.transpose())
elif path.endswith("linear_end"):
converted_paths.append(f"{base}.linear_end.weight")
converted_weights.append(matrix.transpose())
elif path.endswith("linear_start"):
converted_paths.append(f"{base}.linear_start.weight")
converted_weights.append(matrix.transpose())
elif path.endswith("ln"):
converted_paths.append(f"{base}.pre_layer_norm.weight")
converted_weights.append(matrix)
elif "trans_atten" in path:
base = f"conformer.{i}.attention"
if param == "per_dim_scale":
converted_paths.append(f"{base}.attn.per_dim_scale")
converted_weights.append(matrix)
if path.endswith("query_key_value_projection"):
converted_paths.extend(
[f"{base}.attn.q_proj.weight", f"{base}.attn.k_proj.weight", f"{base}.attn.v_proj.weight"]
)
converted_weights.extend(
[
m.reshape(config.hidden_size, config.hidden_size).transpose()
for m in matrix.transpose(1, 0, 2, 3)
]
)
elif path.endswith("pos_proj"):
converted_paths.append(f"{base}.attn.relative_position_embedding.pos_proj.weight")
converted_weights.append(matrix.reshape(config.hidden_size, config.hidden_size).transpose())
elif path.endswith("post"):
converted_paths.append(f"{base}.post.weight")
converted_weights.append(matrix.transpose(2, 0, 1).reshape(config.hidden_size, config.hidden_size))
elif path.endswith("post_norm"):
converted_paths.append(f"{base}.post_norm.weight")
converted_weights.append(matrix)
elif path.endswith("pre_norm"):
converted_paths.append(f"{base}.pre_attn_norm.weight")
converted_weights.append(matrix)
elif path.startswith(_AUDIO_ENCODER_SSCP):
if path.endswith("input_proj"):
converted_paths.append("subsample_conv_projection.input_proj_linear.weight")
converted_weights.append(
weights.transpose(2, 0, 1).reshape(config.hidden_size, config.sscp_conv_channel_size[1] ** 2)
)
elif "norm_" in path:
index = int(path[-1])
converted_paths.append(f"subsample_conv_projection.conv_{index}.norm.weight")
converted_weights.append(weights)
elif "subsampling_" in path:
index = int(path[-1])
converted_paths.append(f"subsample_conv_projection.conv_{index}.conv.weight")
converted_weights.append(weights.transpose(3, 2, 0, 1))
if (cpl := len(converted_paths)) != (cwl := len(converted_weights)):
raise ValueError(
"The `converted_paths` and `converted_weights` should be the same "
f"length. Got {cpl} and {cwl}, respectively, for {path}."
)
return zip(converted_paths, converted_weights)
def convert_transformer_weights(
config: Gemma3nTextConfig,
path: str,
param: str,
weights: np.ndarray,
) -> Iterable[tuple[str, np.ndarray]]:
if path.startswith(_TRANSFORMER_POST_TRAINING_PREFIX):
path = path[_TRANSFORMER_POST_TRAINING_PREFIX_LEN:]
converted_paths: list[str] = []
converted_weights: list[Any] = []
if path.startswith(_TRANSFORMER_ALTUP_PROJ):
index = int(path[-1])
converted_paths.append(f"altup_projections.{index}.weight")
converted_weights.append(weights.transpose())
elif path.startswith(_TRANSFORMER_ALTUP_UNEMB):
index = int(path[-1])
converted_paths.append(f"altup_unembed_projections.{index}.weight")
converted_weights.append(weights.transpose())
elif path.startswith(_TRANSFORMER_DECODER_BLOCK):
attention_type_index = int(path[_TRANSFORMER_DECODER_BLOCK_LEN])
assert weights.shape[0] == config.num_hidden_layers / _SLIDING_WINDOW_PATTERN
for i, matrix in enumerate(weights):
layer_idx = _SLIDING_WINDOW_PATTERN * i + attention_type_index
base_path = f"layers.{layer_idx}"
if "altup" in path:
altup_path = f"{base_path}.altup"
if param == "correct_output_scale":
converted_paths.append(f"{altup_path}.correct_output_scale")
converted_weights.append(matrix)
elif param == "correction_coefs":
converted_paths.append(f"{altup_path}.correction_coefs.weight")
converted_weights.append(matrix.transpose())
elif param == "prediction_coefs":
converted_paths.append(f"{altup_path}.prediction_coefs.weight")
converted_weights.append(
np.clip(
matrix.reshape(config.altup_num_inputs, config.altup_num_inputs**2).transpose(),
-config.altup_coef_clip,
config.altup_coef_clip,
)
)
if path.endswith("modality_router"):
converted_paths.append(f"{altup_path}.modality_router.weight")
converted_weights.append(matrix.transpose())
elif path.endswith("router_norm_layer"):
converted_paths.append(f"{altup_path}.router_norm.weight")
converted_weights.append(matrix)
elif path.endswith("attn/attn_vec_einsum"):
converted_paths.append(f"{base_path}.self_attn.o_proj.weight")
converted_weights.append(
matrix.transpose(2, 0, 1).reshape(config.hidden_size, config.num_attention_heads * config.head_dim)
)
elif path.endswith("attn/kv_einsum"):
converted_paths.extend(
[
f"{base_path}.self_attn.k_proj.weight",
f"{base_path}.self_attn.v_proj.weight",
]
)
k_proj_weights, v_proj_weights = matrix.transpose(0, 2, 1, 3)
kv_proj_shape = (config.hidden_size, config.num_key_value_heads * config.head_dim)
converted_weights.extend(
[
k_proj_weights.reshape(kv_proj_shape).transpose(),
v_proj_weights.reshape(kv_proj_shape).transpose(),
]
)
elif path.endswith("attn/q_einsum"):
converted_paths.append(f"{base_path}.self_attn.q_proj.weight")
converted_weights.append(
matrix.transpose(1, 0, 2)
.reshape(config.hidden_size, config.num_attention_heads * config.head_dim)
.transpose()
)
elif path.endswith("attn/query_norm"):
converted_paths.append(f"{base_path}.self_attn.q_norm.weight")
converted_weights.append(matrix)
elif path.endswith("attn/key_norm"):
converted_paths.append(f"{base_path}.self_attn.k_norm.weight")
converted_weights.append(matrix)
elif path.endswith("laurel_block/linear_left"):
converted_paths.append(f"{base_path}.laurel.linear_left.weight")
converted_weights.append(matrix.transpose())
elif path.endswith("laurel_block/linear_right"):
converted_paths.append(f"{base_path}.laurel.linear_right.weight")
converted_weights.append(matrix.transpose())
elif path.endswith("mlp/gating_einsum"):
converted_paths.extend([f"{base_path}.mlp.gate_proj.weight", f"{base_path}.mlp.up_proj.weight"])
gate_proj_weight, up_proj_weight = matrix
converted_weights.extend([gate_proj_weight, up_proj_weight])
elif path.endswith("mlp/linear"):
converted_paths.append(f"{base_path}.mlp.down_proj.weight")
converted_weights.append(matrix.transpose())
elif path.endswith("per_layer_input_gate"):
converted_paths.append(f"{base_path}.per_layer_input_gate.weight")
converted_weights.append(matrix.transpose())
elif path.endswith("per_layer_projection"):
converted_paths.append(f"{base_path}.per_layer_projection.weight")
converted_weights.append(matrix.transpose())
elif path.endswith("post_attention_norm"):
converted_paths.append(f"{base_path}.post_attention_layernorm.weight")
converted_weights.append(matrix)
elif path.endswith("post_ffw_norm"):
converted_paths.append(f"{base_path}.post_feedforward_layernorm.weight")
converted_weights.append(matrix)
elif path.endswith("post_laurel_norm"):
converted_paths.append(f"{base_path}.laurel.post_laurel_norm.weight")
converted_weights.append(matrix)
elif path.endswith("post_per_layer_input_norm"):
converted_paths.append(f"{base_path}.post_per_layer_input_norm.weight")
converted_weights.append(matrix)
elif path.endswith("pre_attention_norm"):
converted_paths.append(f"{base_path}.input_layernorm.weight")
converted_weights.append(matrix)
elif path.endswith("pre_ffw_norm"):
converted_paths.append(f"{base_path}.pre_feedforward_layernorm.weight")
converted_weights.append(matrix)
elif path == _TRANSFORMER_EMBEDDER:
if param == "input_embedding":
converted_paths.append("embed_tokens.weight")
# Gemma 3n model doesn't have soft tokens or "end of" tokens for images and audio in its input and output
# embeddings, so we resize to avoid bugs observed with Mllama
pre_expansion_embeddings = weights
pad_token_slice = slice(config.pad_token_id, config.pad_token_id + 1)
new_embeddings = np.repeat(pre_expansion_embeddings[pad_token_slice], 256, axis=0)
weights = np.vstack([pre_expansion_embeddings, new_embeddings])
converted_weights.append(weights)
elif param == "per_layer_embeddings":
converted_paths.append("embed_tokens_per_layer.weight")
converted_weights.append(
weights.reshape(
config.vocab_size_per_layer_input, config.num_hidden_layers * config.hidden_size_per_layer_input
)
)
elif path.startswith(_TRANSFORMER_EMBEDDER):
# TODO: ryanmullins - support multimodal norms and projections
if path.endswith("per_layer_model_projection"):
converted_paths.append("per_layer_model_projection.weight")
converted_weights.append(
weights.reshape(
config.hidden_size, config.num_hidden_layers * config.hidden_size_per_layer_input
).transpose()
)
elif path.endswith("per_layer_projection_norm"):
converted_paths.append("per_layer_projection_norm.weight")
converted_weights.append(weights)
elif path == _TRANSFORMER_FINAL_NORM:
converted_paths = ["norm.weight"]
converted_weights = [weights]
if (cpl := len(converted_paths)) != (cwl := len(converted_weights)):
raise ValueError(
"The `converted_paths` and `converted_weights` should be the same "
f"length. Got {cpl} and {cwl}, respectively, for {path}."
)
return zip(converted_paths, converted_weights)
def convert_vision_weights(
config: Gemma3nVisionConfig,
path: str,
param: str,
weights: np.ndarray,
) -> Iterable[tuple[str, np.ndarray]]:
def generate_base_path(path: str, block_type: str) -> tuple[str, tuple[int, int]]:
re_str = rf"{block_type}(\d+)/"
re_pattern = re.compile(re_str)
match = re.search(re_pattern, path).group(1)
idx = abs(int(match)) - 1
for block_idx, v in enumerate(_MOBILE_NET_TIMM_SUMMED_BLOCK_SIZES):
if v > idx:
offset = _MOBILE_NET_TIMM_SUMMED_BLOCK_SIZES[block_idx - 1] if block_idx > 0 else 0
layer_idx = idx - offset
return f"blocks.{block_idx}.{layer_idx}", (block_idx, layer_idx)
raise ValueError(f"could not extract a base path from {path}")
if _MOBILE_NET_MSFA in path:
converted_path = "msfa"
if "ffn/Normalize_0" in path:
converted_path += ".ffn.pw_exp.bn.weight"
converted_weight = weights
elif "ffn/Normalize_1" in path:
converted_path += ".ffn.pw_proj.bn.weight"
converted_weight = weights
elif "ffn/expand" in path:
converted_path += ".ffn.pw_exp.conv.weight"
converted_weight = weights.transpose()[:, :, None, None]
elif "ffn/project" in path:
converted_path += ".ffn.pw_proj.conv.weight"
converted_weight = weights.transpose()[:, :, None, None]
elif "Normalize_0" in path:
converted_path += ".norm.weight"
converted_weight = weights
elif _MOBILE_NET_CONV in path:
if "Conv_0" in path:
converted_path = ("conv_stem.conv.weight", "conv_stem.conv.bias")
converted_weight = weights.transpose(3, 2, 0, 1)
converted_weight = (converted_weight, np.zeros(converted_weight.shape[0]))
elif "Normalize_0" in path:
converted_path = "conv_stem.bn.weight"
converted_weight = weights
elif _MOBILE_NET_FIB in path:
converted_path, _ = generate_base_path(path, _MOBILE_NET_FIB)
if "Normalize_0" in path:
converted_path += ".bn1.weight"
converted_weight = weights
elif "Normalize_1" in path:
converted_path += ".bn2.weight"
converted_weight = weights
elif "expand_conv" in path:
converted_path += ".conv_exp.weight"
converted_weight = weights.transpose(3, 2, 0, 1)
else:
converted_path += ".conv_pwl.weight"
converted_weight = weights.transpose()[:, :, None, None]
elif _MOBILE_NET_MQA in path:
converted_path, _ = generate_base_path(path, _MOBILE_NET_MQA)
if "LayerScale_0" in path:
converted_path += ".layer_scale.gamma"
converted_weight = weights
elif "Normalize_0" in path:
converted_path += ".norm.weight"
converted_weight = weights
elif "Normalize_1" in path:
converted_path += ".attn.key.norm.weight"
converted_weight = weights
elif "Normalize_2" in path:
converted_path += ".attn.value.norm.weight"
converted_weight = weights
elif "key_dwconv" in path:
converted_path += ".attn.key.down_conv.weight"
converted_weight = weights.transpose(3, 2, 0, 1)
elif "key_proj" in path:
converted_path += ".attn.key.proj.weight"
converted_weight = weights.transpose()[:, :, None, None]
elif "output_proj" in path:
converted_path += ".attn.output.proj.weight"
converted_weight = weights.transpose()[:, :, None, None]
elif "query_proj" in path:
converted_path += ".attn.query.proj.weight"
converted_weight = weights.transpose()[:, :, None, None]
elif "value_dwconv" in path:
converted_path += ".attn.value.down_conv.weight"
converted_weight = weights.transpose(3, 2, 0, 1)
elif "value_proj" in path:
converted_path += ".attn.value.proj.weight"
converted_weight = weights.transpose()[:, :, None, None]
elif _MOBILE_NET_UIB in path:
converted_path, idx_key = generate_base_path(path, _MOBILE_NET_UIB)
has_dw_start = idx_key in _MOBILE_NET_UIB_HAS_DW_START
has_dw_mid = idx_key in _MOBILE_NET_UIB_HAS_DW_MID
if "LayerScale_0" in path:
converted_path += ".layer_scale.gamma"
converted_weight = weights
elif "Normalize_0" in path:
converted_path += ".dw_start.bn.weight" if has_dw_start else ".pw_exp.bn.weight"
converted_weight = weights
elif "Normalize_1" in path:
converted_path += ".pw_exp.bn.weight" if has_dw_start else ".pw_proj.bn.weight"
converted_weight = weights
elif "Normalize_2" in path:
converted_path += ".dw_mid.bn.weight" if has_dw_mid else ".pw_proj.bn.weight"
converted_weight = weights
elif "Normalize_3" in path:
converted_path += ".pw_proj.bn.weight"
converted_weight = weights
elif "expand" in path:
converted_path += ".pw_exp.conv.weight"
converted_weight = weights.transpose()[:, :, None, None]
elif "middle_dwconv" in path:
converted_path += ".dw_mid.conv.weight"
converted_weight = weights.transpose(3, 2, 0, 1)
elif "project" in path:
converted_path += ".pw_proj.conv.weight"
converted_weight = weights.transpose()[:, :, None, None]
elif "start_dwconv" in path:
converted_path += ".dw_start.conv.weight"
converted_weight = weights.transpose(3, 2, 0, 1)
if isinstance(converted_path, (tuple, list)):
return zip(converted_path, converted_weight)
else:
return [(converted_path, converted_weight)]
def convert(checkpoint_path: str, config: Gemma3nConfig) -> dict[str, torch.Tensor]:
"""Loads Orbax checkpoint from `input_path` and converts it to HF tree."""
checkpointer = obc.PyTreeCheckpointer()
ckpt = checkpointer.restore(checkpoint_path)
hf_tree: dict[str, torch.Tensor] = {}
def update_tree(path: str, weights: np.ndarray, target_dtype: torch.dtype) -> None:
hf_tree[path] = torch.from_numpy(weights.astype("float32")).type(target_dtype)
if _VERBOSE.value:
logging.info(
"%s converted shape=%s with dtype=%s",
path,
weights.shape,
target_dtype,
)
for (path, param), value in tree.flatten_with_path(ckpt):
if param == "audio_input_embedding_extra":
update_tree("model.embed_audio.embedding.weight", value, config.audio_config.dtype)
elif path.endswith("audio_embedding_norm"):
update_tree("model.embed_audio.hard_embedding_norm.weight", value, config.audio_config.dtype)
elif path.endswith("audio_input_projection"):
update_tree("model.embed_audio.embedding_projection.weight", value.transpose(), config.audio_config.dtype)
elif path.endswith("audio_soft_embedding_norm"):
update_tree("model.embed_audio.soft_embedding_norm.weight", value, config.audio_config.dtype)
elif param == "mm_input_embedding_extra":
update_tree("model.embed_vision.embedding.weight", value, config.vision_config.dtype)
elif path.endswith("mm_hard_embedding_norm"):
update_tree("model.embed_vision.hard_embedding_norm.weight", value, config.vision_config.dtype)
elif path.endswith("mm_input_projection"):
update_tree(
"model.embed_vision.embedding_projection.weight", value.transpose(), config.vision_config.dtype
)
elif path.endswith("mm_soft_embedding_norm"):
update_tree("model.embed_vision.soft_embedding_norm.weight", value, config.vision_config.dtype)
elif path.startswith(_TRANSFORMER_PARAMETER):
for path, weights in convert_transformer_weights(config.text_config, path, param, value):
update_tree(f"model.language_model.{path}", weights, config.text_config.dtype)
elif _MOBILE_NET_PREFIX in path:
mobilenet_prefix_idx = path.index(_MOBILE_NET_PREFIX)
path = path[mobilenet_prefix_idx:]
for path, weights in convert_vision_weights(config.vision_config, path, param, value):
update_tree(f"model.vision_tower.timm_model.{path}", weights, config.vision_config.dtype)
elif path.startswith(_AUDIO_ENCODER_PARAMETER):
for path, weights in convert_audio_encoder_weights(config.audio_config, path, param, value):
update_tree(f"model.audio_tower.{path}", weights, config.audio_config.dtype)
hf_tree["lm_head.weight"] = hf_tree["model.language_model.embed_tokens.weight"]
return hf_tree
def main(*args):
del args
output_path = _OUTPUT_PATH.value
variant = _VARIANT.value
config = _VARIANTS[variant]
config.audio_config.dtype = getattr(torch, _AUDIO_DTYPE.value)
config.text_config.dtype = getattr(torch, _TRANSFORMER_DTYPE.value)
config.vision_config.dtype = getattr(torch, _VISION_DTYPE.value)
if _INCLUDE_CHAT_TEMPLATE.value:
# Chat template is included for instruction tuned models, which treat
# both "<eos>" and "<end_of_turn>" as generation stoppers.
config.eos_token_id = [1, 106]
logging.info(
"Converting Gemma 3 (%s) @ %s (language) and %s (vision)",
variant,
_TRANSFORMER_DTYPE.value,
_VISION_DTYPE.value,
)
state_tree = convert(_CHECKPOINT_PATH.value, config)
logging.info("Converted Gemma 3 (%s) state tree from Orbax to Hugging Face.", variant)
with accelerate.init_empty_weights():
model = Gemma3nForConditionalGeneration(config=config)
model.load_state_dict(state_tree, assign=True, strict=True)
logging.info(
"Loaded Gemma 3 (%s) in Hugging Face Transformers as a %s instance.",
variant,
type(model).__name__,
)
model.save_pretrained(output_path, state_dict=state_tree)
logging.info(
"Saved Gemma 3 (%s) to SafeTensors in %s using %s",
variant,
output_path,
type(model).__name__,
)
del model
del state_tree
chat_template_kwargs = {"chat_template": _CHAT_TEMPLATE} if _INCLUDE_CHAT_TEMPLATE.value else {}
tokenizer = GemmaTokenizerFast(
_TOKENIZER_PATH.value,
add_bos_token=True,
extra_special_tokens={
"image_token": "<image_soft_token>", # Should be ID=262_145
"boi_token": "<start_of_image>", # Should be ID=255_999
"eoi_token": "<end_of_image>", # Should be ID=262_144
"audio_token": "<audio_soft_token>", # Should be ID=262_273
"boa_token": "<start_of_audio>", # Should be ID=256_000
"eoa_token": "<end_of_audio>", # Should be ID=262_272
},
**chat_template_kwargs,
)
tokenizer.save_pretrained(output_path)
logging.info("Saved GemmaTokenizer for %s to %s", variant, output_path)
feature_extractor = Gemma3nAudioFeatureExtractor()
image_processor = SiglipImageProcessorFast(
image_seq_length=256,
image_mean=(0.5,) * 3,
image_std=(0.5,) * 3,
size={"height": 768, "width": 768},
resample=PILImageResampling.BILINEAR,
do_normalize=False,
)
processor = Gemma3nProcessor(
feature_extractor=feature_extractor,
image_processor=image_processor,
tokenizer=tokenizer,
**chat_template_kwargs,
)
processor.save_pretrained(output_path)
logging.info("Saved Gemma3nProcessor for %s to %s", variant, output_path)
# NOTE: feature_extractor and image_processor both use the same filename, preprocessor_config.json, when saved to
# disk, but the files are overwritten by processor.save_pretrained(). However, the configs can be unioned, saved,
# and loaded from the same preprocessor_config.json file, so we do that explicitly here.
feature_extractor_config = json.loads(feature_extractor.to_json_string())
image_processor_config = json.loads(image_processor.to_json_string())
preprocessor_config = {**feature_extractor_config, **image_processor_config}
with open(os.path.join(output_path, "preprocessor_config.json"), "w", encoding="utf-8") as writer:
writer.write(json.dumps(preprocessor_config, indent=2, sort_keys=True) + "\n")
logging.info("Saved joint preprocessor_config.json for %s to %s", variant, output_path)
del feature_extractor, image_processor, processor, tokenizer
generation_config = GenerationConfig(
pad_token_id=config.text_config.pad_token_id,
bos_token_id=config.text_config.bos_token_id,
eos_token_id=(
[config.text_config.eos_token_id, 106] if _INCLUDE_CHAT_TEMPLATE.value else config.text_config.eos_token_id
),
cache_implementation="hybrid",
temperature=1.0,
do_sample=True,
top_k=64,
top_p=0.95,
)
generation_config.save_pretrained(output_path)
if __name__ == "__main__":
app.run(main) | python | github | https://github.com/huggingface/transformers | src/transformers/models/gemma3n/convert_gemma3n_weights.py |
"""
Provide the namespace doxygen comment.
It checks if there is doxygen sytle comment in front of each 'namespace' keyword
== Violation ==
namespace AA <== Violation. No comment on the namespace AA
{
}
/* <== Violation. There is comment but not a doxygen comment.
* blar blar
*/
namespace BB
{
}
== Good ==
/** <== OK!
* blar blar
*/
namespace AA
{
}
"""
from nsiqcppstyle_rulehelper import *
from nsiqcppstyle_reporter import *
from nsiqcppstyle_rulemanager import *
def RunRule(lexer, currentType, fullName, decl, contextStack, typeContext) :
if not decl and currentType == "NAMESPACE" and typeContext != None:
t = lexer.GetCurToken()
lexer.PushTokenIndex()
t2 = lexer.GetPrevTokenInType("COMMENT")
lexer.PopTokenIndex()
lexer.PushTokenIndex()
t3 = lexer.GetPrevTokenInTypeList(["SEMI", "PREPROCESSOR", "LBRACE"], False, True)
lexer.PopTokenIndex()
if t2 != None and t2.additional == "DOXYGEN" :
if t3 == None or t2.lexpos > t3.lexpos :
return
nsiqcppstyle_reporter.Error(t, __name__,
"Doxygen Comment should be provided in front of namespace def(%s)."
% fullName)
ruleManager.AddTypeNameRule(RunRule)
###########################################################################################
# Unit Test
###########################################################################################
from nsiqunittest.nsiqcppstyle_unittestbase import *
class testRule(nct):
def setUpRule(self):
ruleManager.AddTypeNameRule(RunRule)
def test1(self):
self.Analyze("thisfile.c",
"""
namespace K;
""")
assert not CheckErrorContent(__name__)
def test2(self):
self.Analyze("thisfile.c",
"""
/*
*/
namespace K {
}
""")
assert CheckErrorContent(__name__)
def test3(self):
self.Analyze("thisfile.c",
"""
/**
*/
using namespace A;
""")
assert not CheckErrorContent(__name__) | unknown | codeparrot/codeparrot-clean | ||
# -*- encoding: utf-8 -*-
##############################################################################
#
# Avanzosc - Avanced Open Source Consulting
# Copyright (C) 2011 - 2012 Avanzosc <http://www.avanzosc.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see http://www.gnu.org/licenses/.
#
##############################################################################
import time
from datetime import datetime
from operator import itemgetter
import netsvc
from osv import fields, osv
from tools.translate import _
import decimal_precision as dp
import tools
class account_move(osv.osv):
_inherit = 'account.move'
def apply_changes(self, cr, uid, ids, context=None):
res = {}
lines = []
account_move_line_obj = self.pool.get('account.move.line')
account_move = self.browse(cr, uid, ids)[0]
if account_move:
line_ids = account_move_line_obj.search(cr,uid,[('move_id', '=', account_move.id)])
account_move_line_obj.write(cr, uid, line_ids, {'journal_id': account_move.journal_id.id, 'period_id':account_move.period_id.id, 'date':account_move.date}, context, False, True)
return True
def write(self, cr, uid, ids, vals, context=None):
if context is None:
context = {}
c = context.copy()
c['novalidate'] = True
result = super(osv.osv, self).write(cr, uid, ids, vals, c)
return result
def _check_period_journal(self, cursor, user, ids, context=None):
return True
_constraints = [
(_check_period_journal,
'You cannot create entries on different periods/journals in the same move',
['line_id']),
]
account_move() | unknown | codeparrot/codeparrot-clean | ||
import unittest
from pulsar import async, Future, new_event_loop
__benchmark__ = True
DELAY = 0
def async_func(loop, value):
p = Future(loop=loop)
loop.call_later(DELAY, p.set_result, value)
return p
def sub_sub(loop, num):
a = yield from async_func(loop, num)
b = yield from async_func(loop, num)
return a + b
def sub(loop, num):
a = yield from async_func(loop, num)
b = yield from async_func(loop, num)
c = yield from sub_sub(loop, num)
return a + b + c
def main(loop, num):
a = yield from async_func(loop, num)
b = yield from sub(loop, num)
c = yield from sub(loop, num)
return a + b + c
class TestCoroutine33(unittest.TestCase):
__benchmark__ = True
__number__ = 100
def setUp(self):
self.loop = new_event_loop()
def test_coroutine(self):
future = async(main(self.loop, 1), loop=self.loop)
self.loop.run_until_complete(future)
self.assertEqual(future.result(), 9)
def getTime(self, dt):
return dt - 9*DELAY | unknown | codeparrot/codeparrot-clean | ||
import {
Stringify,
makeObject_Primitives,
mutate,
mutateAndReturn,
} from 'shared-runtime';
function useFoo({data}) {
let obj = null;
let myDiv = null;
label: {
if (data.cond) {
obj = makeObject_Primitives();
if (data.cond1) {
myDiv = <Stringify value={mutateAndReturn(obj)} />;
break label;
}
mutate(obj);
}
}
return myDiv;
}
export const FIXTURE_ENTRYPOINT = {
fn: useFoo,
params: [{data: {cond: true, cond1: true}}],
sequentialRenders: [
{data: {cond: true, cond1: true}},
{data: {cond: true, cond1: true}},
],
}; | typescript | github | https://github.com/facebook/react | compiler/packages/babel-plugin-react-compiler/src/__tests__/fixtures/compiler/mutation-within-jsx-and-break.tsx |
/* Checks that BOLT correctly handles instrumentation of indirect calls
* including case with indirect calls in signals handlers.
*/
#include <signal.h>
#include <stdio.h>
#include <sys/types.h>
#include <sys/wait.h>
#include <unistd.h>
int foo(int x) { return x + 1; }
int bar(int (*fn)(int), int val) { return fn(val); }
void sigHandler(int signum) { bar(foo, 3); }
int main(int argc, char **argv) {
long long i;
pid_t pid, wpid;
int wstatus;
signal(SIGUSR1, sigHandler);
pid = fork();
if (pid) {
do {
kill(pid, SIGUSR1);
usleep(0);
wpid = waitpid(pid, &wstatus, WNOHANG);
} while (wpid == 0);
printf("[parent]\n");
} else {
for (i = 0; i < 100000; i++) {
bar(foo, i % 10);
}
printf("[child]\n");
}
return 0;
}
/*
REQUIRES: system-linux,bolt-runtime,lit-max-individual-test-time
RUN: %clang %cflags -D_GNU_SOURCE %s -o %t.exe -Wl,-q -pie -fpie
RUN: llvm-bolt %t.exe --instrument --instrumentation-file=%t.fdata \
RUN: --instrumentation-wait-forks=1 --conservative-instrumentation \
RUN: -o %t.instrumented_conservative
# Instrumented program needs to finish returning zero
RUN: %t.instrumented_conservative | FileCheck %s -check-prefix=CHECK-OUTPUT
RUN: llvm-bolt %t.exe --instrument --instrumentation-file=%t.fdata \
RUN: --instrumentation-wait-forks=1 \
RUN: -o %t.instrumented
# Instrumented program needs to finish returning zero
RUN: %t.instrumented | FileCheck %s -check-prefix=CHECK-OUTPUT
# Test that the instrumented data makes sense
RUN: llvm-bolt %t.exe -o %t.bolted --data %t.fdata \
RUN: --reorder-blocks=ext-tsp --reorder-functions=hfsort+ \
RUN: --print-only=interp --print-finalized
RUN: %t.bolted | FileCheck %s -check-prefix=CHECK-OUTPUT
CHECK-OUTPUT: [child]
CHECK-OUTPUT: [parent]
*/ | c | github | https://github.com/llvm/llvm-project | bolt/test/runtime/X86/instrumentation-indirect.c |
from setuptools import setup, find_packages
try:
desc = open('README.rst').read()
except:
desc = 'see README.rst'
setup(
name='nosexcover',
version='1.0.11',
description='Extends nose.plugins.cover to add Cobertura-style XML reports',
long_description=desc,
author='Chris Heisel',
author_email='chris@heisel.org',
url='http://github.com/cmheisel/nose-xcover/',
license='BSD',
packages=find_packages(),
include_package_data=True,
zip_safe=False,
install_requires=['nose', 'coverage>=3.4'],
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
'Topic :: Software Development :: Libraries :: Python Modules',
],
entry_points={
'nose.plugins': ['xcover = nosexcover.nosexcover:XCoverage']
},
) | unknown | codeparrot/codeparrot-clean | ||
"""Internals of array-api-extra.""" | python | github | https://github.com/scikit-learn/scikit-learn | sklearn/externals/array_api_extra/_lib/__init__.py |
#!/usr/bin/env python
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import contextlib
import datetime
import errno
import graphviz
import json
import logging
import os
import pprint
import re
import requests
import shutil
import sys
import tarfile
import tempfile
import threading
import time
import docker
import git
import jinja2
from oslo_config import cfg
from requests import exceptions as requests_exc
import six
PROJECT_ROOT = os.path.abspath(os.path.join(
os.path.dirname(os.path.realpath(__file__)), '../..'))
# NOTE(SamYaple): Update the search patch to prefer PROJECT_ROOT as the source
# of packages to import if we are using local tools/build.py
# instead of pip installed kolla-build tool
if PROJECT_ROOT not in sys.path:
sys.path.insert(0, PROJECT_ROOT)
from kolla.common import config as common_config
from kolla.common import task
from kolla.template import filters as jinja_filters
from kolla.template import methods as jinja_methods
from kolla import version
def make_a_logger(conf=None, image_name=None):
if image_name:
log = logging.getLogger(".".join([__name__, image_name]))
else:
log = logging.getLogger(__name__)
if not log.handlers:
if conf is None or not conf.logs_dir or not image_name:
handler = logging.StreamHandler(sys.stdout)
log.propagate = False
else:
filename = os.path.join(conf.logs_dir, "%s.log" % image_name)
handler = logging.FileHandler(filename, delay=True)
handler.setFormatter(logging.Formatter(logging.BASIC_FORMAT))
log.addHandler(handler)
if conf is not None and conf.debug:
log.setLevel(logging.DEBUG)
else:
log.setLevel(logging.INFO)
return log
LOG = make_a_logger()
class KollaDirNotFoundException(Exception):
pass
class KollaUnknownBuildTypeException(Exception):
pass
class KollaMismatchBaseTypeException(Exception):
pass
class KollaRpmSetupUnknownConfig(Exception):
pass
# Image status constants.
#
# TODO(harlowja): use enum lib in the future??
STATUS_CONNECTION_ERROR = 'connection_error'
STATUS_PUSH_ERROR = 'push_error'
STATUS_ERROR = 'error'
STATUS_PARENT_ERROR = 'parent_error'
STATUS_BUILT = 'built'
STATUS_BUILDING = 'building'
STATUS_UNMATCHED = 'unmatched'
STATUS_MATCHED = 'matched'
STATUS_UNPROCESSED = 'unprocessed'
# All error status constants.
STATUS_ERRORS = (STATUS_CONNECTION_ERROR, STATUS_PUSH_ERROR,
STATUS_ERROR, STATUS_PARENT_ERROR)
@contextlib.contextmanager
def join_many(threads):
try:
yield
for t in threads:
t.join()
except KeyboardInterrupt:
try:
LOG.info('Waiting for daemon threads exit. Push Ctrl + c again to'
' force exit')
for t in threads:
if t.is_alive():
LOG.debug('Waiting thread %s to exit', t.name)
# NOTE(Jeffrey4l): Python Bug: When join without timeout,
# KeyboardInterrupt is never sent.
t.join(0xffff)
LOG.debug('Thread %s exits', t.name)
except KeyboardInterrupt:
LOG.warning('Force exits')
class DockerTask(task.Task):
docker_kwargs = docker.utils.kwargs_from_env()
def __init__(self):
super(DockerTask, self).__init__()
self._dc = None
@property
def dc(self):
if self._dc is not None:
return self._dc
docker_kwargs = self.docker_kwargs.copy()
self._dc = docker.Client(version='auto', **docker_kwargs)
return self._dc
class Image(object):
def __init__(self, name, canonical_name, path, parent_name='',
status=STATUS_UNPROCESSED, parent=None,
source=None, logger=None):
self.name = name
self.canonical_name = canonical_name
self.path = path
self.status = status
self.parent = parent
self.source = source
self.parent_name = parent_name
if logger is None:
logger = make_a_logger(image_name=name)
self.logger = logger
self.children = []
self.plugins = []
def copy(self):
c = Image(self.name, self.canonical_name, self.path,
logger=self.logger, parent_name=self.parent_name,
status=self.status, parent=self.parent)
if self.source:
c.source = self.source.copy()
if self.children:
c.children = list(self.children)
if self.plugins:
c.plugins = list(self.plugins)
return c
def __repr__(self):
return ("Image(%s, %s, %s, parent_name=%s,"
" status=%s, parent=%s, source=%s)") % (
self.name, self.canonical_name, self.path,
self.parent_name, self.status, self.parent, self.source)
class PushIntoQueueTask(task.Task):
"""Task that pushes some other task into a queue."""
def __init__(self, push_task, push_queue):
super(PushIntoQueueTask, self).__init__()
self.push_task = push_task
self.push_queue = push_queue
@property
def name(self):
return 'PushIntoQueueTask(%s=>%s)' % (self.push_task.name,
self.push_queue)
def run(self):
self.push_queue.put(self.push_task)
self.success = True
class PushTask(DockerTask):
"""Task that pushes an image to a docker repository."""
def __init__(self, conf, image):
super(PushTask, self).__init__()
self.conf = conf
self.image = image
self.logger = image.logger
@property
def name(self):
return 'PushTask(%s)' % self.image.name
def run(self):
image = self.image
self.logger.info('Trying to push the image')
try:
self.push_image(image)
except requests_exc.ConnectionError:
self.logger.exception('Make sure Docker is running and that you'
' have the correct privileges to run Docker'
' (root)')
image.status = STATUS_CONNECTION_ERROR
except Exception:
self.logger.exception('Unknown error when pushing')
image.status = STATUS_PUSH_ERROR
finally:
if (image.status not in STATUS_ERRORS
and image.status != STATUS_UNPROCESSED):
self.logger.info('Pushed successfully')
self.success = True
else:
self.success = False
def push_image(self, image):
for response in self.dc.push(image.canonical_name,
stream=True,
insecure_registry=True):
stream = json.loads(response)
if 'stream' in stream:
self.logger.info(stream['stream'])
elif 'errorDetail' in stream:
image.status = STATUS_ERROR
self.logger.error(stream['errorDetail']['message'])
class BuildTask(DockerTask):
"""Task that builds out an image."""
def __init__(self, conf, image, push_queue):
super(BuildTask, self).__init__()
self.conf = conf
self.image = image
self.push_queue = push_queue
self.nocache = not conf.cache
self.forcerm = not conf.keep
self.logger = image.logger
@property
def name(self):
return 'BuildTask(%s)' % self.image.name
def run(self):
self.builder(self.image)
if self.image.status == STATUS_BUILT:
self.success = True
@property
def followups(self):
followups = []
if self.conf.push and self.success:
followups.extend([
# If we are supposed to push the image into a docker
# repository, then make sure we do that...
PushIntoQueueTask(
PushTask(self.conf, self.image),
self.push_queue),
])
if self.image.children and self.success:
for image in self.image.children:
followups.append(BuildTask(self.conf, image, self.push_queue))
return followups
def process_source(self, image, source):
dest_archive = os.path.join(image.path, source['name'] + '-archive')
if source.get('type') == 'url':
self.logger.debug("Getting archive from %s", source['source'])
try:
r = requests.get(source['source'], timeout=self.conf.timeout)
except requests_exc.Timeout:
self.logger.exception(
'Request timed out while getting archive from %s',
source['source'])
image.status = STATUS_ERROR
return
if r.status_code == 200:
with open(dest_archive, 'wb') as f:
f.write(r.content)
else:
self.logger.error(
'Failed to download archive: status_code %s',
r.status_code)
image.status = STATUS_ERROR
return
elif source.get('type') == 'git':
clone_dir = '{}-{}'.format(dest_archive,
source['reference'].replace('/', '-'))
try:
self.logger.debug("Cloning from %s", source['source'])
git.Git().clone(source['source'], clone_dir)
git.Git(clone_dir).checkout(source['reference'])
reference_sha = git.Git(clone_dir).rev_parse('HEAD')
self.logger.debug("Git checkout by reference %s (%s)",
source['reference'], reference_sha)
except Exception as e:
self.logger.error("Failed to get source from git", image.name)
self.logger.error("Error: %s", e)
# clean-up clone folder to retry
shutil.rmtree(clone_dir)
image.status = STATUS_ERROR
return
with tarfile.open(dest_archive, 'w') as tar:
tar.add(clone_dir, arcname=os.path.basename(clone_dir))
elif source.get('type') == 'local':
self.logger.debug("Getting local archive from %s",
source['source'])
if os.path.isdir(source['source']):
with tarfile.open(dest_archive, 'w') as tar:
tar.add(source['source'],
arcname=os.path.basename(source['source']))
else:
shutil.copyfile(source['source'], dest_archive)
else:
self.logger.error("Wrong source type '%s'", source.get('type'))
image.status = STATUS_ERROR
return
# Set time on destination archive to epoch 0
os.utime(dest_archive, (0, 0))
return dest_archive
def update_buildargs(self):
buildargs = dict()
if self.conf.build_args:
buildargs = dict(self.conf.build_args)
proxy_vars = ('HTTP_PROXY', 'http_proxy', 'HTTPS_PROXY',
'https_proxy', 'FTP_PROXY', 'ftp_proxy',
'NO_PROXY', 'no_proxy')
for proxy_var in proxy_vars:
if proxy_var in os.environ and proxy_var not in buildargs:
buildargs[proxy_var] = os.environ.get(proxy_var)
if not buildargs:
return None
return buildargs
def builder(self, image):
self.logger.debug('Processing')
if image.status == STATUS_UNMATCHED:
return
if (image.parent is not None and
image.parent.status in STATUS_ERRORS):
self.logger.error('Parent image error\'d with message "%s"',
image.parent.status)
image.status = STATUS_PARENT_ERROR
return
image.status = STATUS_BUILDING
self.logger.info('Building')
if image.source and 'source' in image.source:
self.process_source(image, image.source)
if image.status in STATUS_ERRORS:
return
plugin_archives = list()
plugins_path = os.path.join(image.path, 'plugins')
for plugin in image.plugins:
archive_path = self.process_source(image, plugin)
if image.status in STATUS_ERRORS:
return
plugin_archives.append(archive_path)
if plugin_archives:
for plugin_archive in plugin_archives:
with tarfile.open(plugin_archive, 'r') as plugin_archive_tar:
plugin_archive_tar.extractall(path=plugins_path)
else:
try:
os.mkdir(plugins_path)
except OSError as e:
if e.errno == errno.EEXIST:
self.logger.info('Directory %s already exist. Skipping.',
plugins_path)
else:
self.logger.error('Failed to create directory %s: %s',
plugins_path, e)
image.status = STATUS_CONNECTION_ERROR
return
with tarfile.open(os.path.join(image.path, 'plugins-archive'),
'w') as tar:
tar.add(plugins_path, arcname='plugins')
# Pull the latest image for the base distro only
pull = True if image.parent is None else False
buildargs = self.update_buildargs()
try:
for response in self.dc.build(path=image.path,
tag=image.canonical_name,
nocache=not self.conf.cache,
rm=True,
pull=pull,
forcerm=self.forcerm,
buildargs=buildargs):
stream = json.loads(response.decode('utf-8'))
if 'stream' in stream:
for line in stream['stream'].split('\n'):
if line:
self.logger.info('%s', line)
if 'errorDetail' in stream:
image.status = STATUS_ERROR
self.logger.error('Error\'d with the following message')
for line in stream['errorDetail']['message'].split('\n'):
if line:
self.logger.error('%s', line)
return
except docker.errors.DockerException:
image.status = STATUS_ERROR
self.logger.exception('Unknown docker error when building')
except Exception:
image.status = STATUS_ERROR
self.logger.exception('Unknown error when building')
else:
image.status = STATUS_BUILT
self.logger.info('Built')
class WorkerThread(threading.Thread):
"""Thread that executes tasks until the queue provides a tombstone."""
#: Object to be put on worker queues to get them to die.
tombstone = object()
def __init__(self, conf, queue):
super(WorkerThread, self).__init__()
self.queue = queue
self.conf = conf
self.should_stop = False
def run(self):
while not self.should_stop:
task = self.queue.get()
if task is self.tombstone:
# Ensure any other threads also get the tombstone.
self.queue.put(task)
break
try:
for attempt in six.moves.range(self.conf.retries + 1):
if self.should_stop:
break
if attempt > 0:
LOG.info("Attempting to run task %s for the %s time",
task.name, attempt + 1)
else:
LOG.info("Attempting to run task %s for the first"
" time", task.name)
try:
task.run()
if task.success:
break
except Exception:
LOG.exception('Unhandled error when running %s',
task.name)
# try again...
task.reset()
if task.success and not self.should_stop:
for next_task in task.followups:
LOG.info('Added next task %s to queue',
next_task.name)
self.queue.put(next_task)
finally:
self.queue.task_done()
class KollaWorker(object):
def __init__(self, conf):
self.conf = conf
self.images_dir = self._get_images_dir()
self.registry = conf.registry
if self.registry:
self.namespace = self.registry + '/' + conf.namespace
else:
self.namespace = conf.namespace
self.base = conf.base
self.base_tag = conf.base_tag
self.install_type = conf.install_type
self.tag = conf.tag
self.images = list()
if conf.rpm_setup_config:
rpm_setup_config = filter(None, conf.rpm_setup_config)
else:
rpm_setup_config = list()
self.rpm_setup = self.build_rpm_setup(rpm_setup_config)
rh_base = ['fedora', 'centos', 'oraclelinux', 'rhel']
rh_type = ['source', 'binary', 'rdo', 'rhos']
deb_base = ['ubuntu', 'debian']
deb_type = ['source', 'binary']
if not ((self.base in rh_base and self.install_type in rh_type) or
(self.base in deb_base and self.install_type in deb_type)):
raise KollaMismatchBaseTypeException(
'{} is unavailable for {}'.format(self.install_type, self.base)
)
if self.base == 'fedora':
LOG.warning('Fedora images are deprecated since Newton and will '
'be removed in the future')
if self.install_type == 'binary':
self.install_metatype = 'rdo'
elif self.install_type == 'source':
self.install_metatype = 'mixed'
elif self.install_type == 'rdo':
self.install_type = 'binary'
self.install_metatype = 'rdo'
elif self.install_type == 'rhos':
self.install_type = 'binary'
self.install_metatype = 'rhos'
else:
raise KollaUnknownBuildTypeException(
'Unknown install type'
)
self.image_prefix = self.base + '-' + self.install_type + '-'
self.include_header = conf.include_header
self.include_footer = conf.include_footer
self.regex = conf.regex
self.image_statuses_bad = dict()
self.image_statuses_good = dict()
self.image_statuses_unmatched = dict()
self.maintainer = conf.maintainer
def _get_images_dir(self):
possible_paths = (
PROJECT_ROOT,
os.path.join(sys.prefix, 'share/kolla'),
os.path.join(sys.prefix, 'local/share/kolla'))
for path in possible_paths:
image_path = os.path.join(path, 'docker')
# NOTE(SamYaple): We explicty check for the base folder to ensure
# this is the correct path
# TODO(SamYaple): Improve this to make this safer
if os.path.exists(os.path.join(image_path, 'base')):
LOG.info('Found the docker image folder at %s', image_path)
return image_path
else:
raise KollaDirNotFoundException('Image dir can not be found')
def build_rpm_setup(self, rpm_setup_config):
"""Generates a list of docker commands based on provided configuration.
:param rpm_setup_config: A list of .rpm or .repo paths or URLs
:return: A list of docker commands
"""
rpm_setup = list()
for config in rpm_setup_config:
if config.endswith('.rpm'):
# RPM files can be installed with yum from file path or url
cmd = "RUN yum -y install {}".format(config)
elif config.endswith('.repo'):
if config.startswith('http'):
# Curl http://url/etc.repo to /etc/yum.repos.d/etc.repo
name = config.split('/')[-1]
cmd = "RUN curl -L {} -o /etc/yum.repos.d/{}".format(
config, name)
else:
# Copy .repo file from filesystem
cmd = "COPY {} /etc/yum.repos.d/".format(config)
else:
raise KollaRpmSetupUnknownConfig(
'RPM setup must be provided as .rpm or .repo files.'
' Attempted configuration was {}'.format(config)
)
rpm_setup.append(cmd)
return rpm_setup
def copy_apt_files(self):
if self.conf.apt_sources_list:
shutil.copyfile(
self.conf.apt_sources_list,
os.path.join(self.working_dir, "base", "sources.list")
)
if self.conf.apt_preferences:
shutil.copyfile(
self.conf.apt_preferences,
os.path.join(self.working_dir, "base", "apt_preferences")
)
def setup_working_dir(self):
"""Creates a working directory for use while building"""
ts = time.time()
ts = datetime.datetime.fromtimestamp(ts).strftime('%Y-%m-%d_%H-%M-%S_')
self.temp_dir = tempfile.mkdtemp(prefix='kolla-' + ts)
self.working_dir = os.path.join(self.temp_dir, 'docker')
shutil.copytree(self.images_dir, self.working_dir)
self.copy_apt_files()
LOG.debug('Created working dir: %s', self.working_dir)
def set_time(self):
for root, dirs, files in os.walk(self.working_dir):
for file_ in files:
os.utime(os.path.join(root, file_), (0, 0))
for dir_ in dirs:
os.utime(os.path.join(root, dir_), (0, 0))
LOG.debug('Set atime and mtime to 0 for all content in working dir')
def _get_filters(self):
filters = {
'customizable': jinja_filters.customizable,
}
return filters
def _get_methods(self):
"""Mapping of available Jinja methods
return a dictionary that maps available function names and their
corresponding python methods to make them available in jinja templates
"""
return {
'debian_package_install': jinja_methods.debian_package_install,
}
def create_dockerfiles(self):
kolla_version = version.version_info.cached_version_string()
supported_distro_release = common_config.DISTRO_RELEASE.get(
self.base)
for path in self.docker_build_paths:
template_name = "Dockerfile.j2"
image_name = path.split("/")[-1]
values = {'base_distro': self.base,
'base_image': self.conf.base_image,
'base_distro_tag': self.base_tag,
'supported_distro_release': supported_distro_release,
'install_metatype': self.install_metatype,
'image_prefix': self.image_prefix,
'install_type': self.install_type,
'namespace': self.namespace,
'tag': self.tag,
'maintainer': self.maintainer,
'kolla_version': kolla_version,
'image_name': image_name,
'rpm_setup': self.rpm_setup}
env = jinja2.Environment( # nosec: not used to render HTML
loader=jinja2.FileSystemLoader(self.working_dir))
env.filters.update(self._get_filters())
env.globals.update(self._get_methods())
tpl_path = os.path.join(
os.path.relpath(path, self.working_dir),
template_name)
template = env.get_template(tpl_path)
if self.conf.template_override:
template_path = os.path.dirname(self.conf.template_override)
template_name = os.path.basename(self.conf.template_override)
values['parent_template'] = template
env = jinja2.Environment( # nosec: not used to render HTML
loader=jinja2.FileSystemLoader(template_path))
env.filters.update(self._get_filters())
env.globals.update(self._get_methods())
template = env.get_template(template_name)
if self.include_header:
with open(self.include_header, 'r') as f:
values['include_header'] = f.read()
if self.include_footer:
with open(self.include_footer, 'r') as f:
values['include_footer'] = f.read()
content = template.render(values)
with open(os.path.join(path, 'Dockerfile'), 'w') as f:
f.write(content)
def find_dockerfiles(self):
"""Recursive search for Dockerfiles in the working directory"""
self.docker_build_paths = list()
path = self.working_dir
filename = 'Dockerfile.j2'
for root, dirs, names in os.walk(path):
if filename in names:
self.docker_build_paths.append(root)
LOG.debug('Found %s', root.split(self.working_dir)[1])
LOG.debug('Found %d Dockerfiles', len(self.docker_build_paths))
def cleanup(self):
"""Remove temp files"""
shutil.rmtree(self.temp_dir)
def filter_images(self):
"""Filter which images to build"""
filter_ = list()
if self.regex:
filter_ += self.regex
elif self.conf.profile:
for profile in self.conf.profile:
if profile not in self.conf.profiles:
self.conf.register_opt(cfg.ListOpt(profile,
default=[]),
'profiles')
if len(self.conf.profiles[profile]) == 0:
msg = 'Profile: {} does not exist'.format(profile)
raise ValueError(msg)
else:
filter_ += self.conf.profiles[profile]
if filter_:
patterns = re.compile(r"|".join(filter_).join('()'))
for image in self.images:
if image.status == STATUS_MATCHED:
continue
if re.search(patterns, image.name):
image.status = STATUS_MATCHED
while (image.parent is not None and
image.parent.status != STATUS_MATCHED):
image = image.parent
image.status = STATUS_MATCHED
LOG.debug('Image %s matched regex', image.name)
else:
image.status = STATUS_UNMATCHED
else:
for image in self.images:
image.status = STATUS_MATCHED
def summary(self):
"""Walk the dictionary of images statuses and print results"""
# For debug we print the logs again if the image error'd. This is to
# help us debug and it will be extra helpful in the gate.
for image in self.images:
if image.status in STATUS_ERRORS:
LOG.debug("Image %s failed", image.name)
self.get_image_statuses()
if self.image_statuses_good:
LOG.info("=========================")
LOG.info("Successfully built images")
LOG.info("=========================")
for name in self.image_statuses_good.keys():
LOG.info(name)
if self.image_statuses_bad:
LOG.info("===========================")
LOG.info("Images that failed to build")
LOG.info("===========================")
for name, status in self.image_statuses_bad.items():
LOG.error('%s Failed with status: %s', name, status)
if self.image_statuses_unmatched:
LOG.debug("=====================================")
LOG.debug("Images not matched for build by regex")
LOG.debug("=====================================")
for name in self.image_statuses_unmatched.keys():
LOG.debug(name)
def get_image_statuses(self):
if any([self.image_statuses_bad,
self.image_statuses_good,
self.image_statuses_unmatched]):
return (self.image_statuses_bad,
self.image_statuses_good,
self.image_statuses_unmatched)
for image in self.images:
if image.status == STATUS_BUILT:
self.image_statuses_good[image.name] = image.status
elif image.status == STATUS_UNMATCHED:
self.image_statuses_unmatched[image.name] = image.status
else:
self.image_statuses_bad[image.name] = image.status
return (self.image_statuses_bad,
self.image_statuses_good,
self.image_statuses_unmatched)
def build_image_list(self):
def process_source_installation(image, section):
installation = dict()
# NOTE(jeffrey4l): source is not needed when the type is None
if self.conf._get('type', self.conf._get_group(section)) is None:
if image.parent_name is None:
LOG.debug('No source location found in section %s',
section)
else:
installation['type'] = self.conf[section]['type']
installation['source'] = self.conf[section]['location']
installation['name'] = section
if installation['type'] == 'git':
installation['reference'] = self.conf[section]['reference']
return installation
for path in self.docker_build_paths:
# Reading parent image name
with open(os.path.join(path, 'Dockerfile')) as f:
content = f.read()
image_name = os.path.basename(path)
canonical_name = (self.namespace + '/' + self.image_prefix +
image_name + ':' + self.tag)
image = Image(image_name, canonical_name, path,
parent_name=content.split(' ')[1].split('\n')[0],
logger=make_a_logger(self.conf, image_name))
if self.install_type == 'source':
# NOTE(jeffrey4l): register the opts if the section didn't
# register in the kolla/common/config.py file
if image.name not in self.conf._groups:
self.conf.register_opts(common_config.get_source_opts(),
image.name)
image.source = process_source_installation(image, image.name)
for plugin in [match.group(0) for match in
(re.search('{}-plugin-.+'.format(image.name),
section) for section in
self.conf.list_all_sections()) if match]:
try:
self.conf.register_opts(
common_config.get_source_opts(),
plugin
)
except cfg.DuplicateOptError:
LOG.debug('Plugin %s already registered in config',
plugin)
image.plugins.append(
process_source_installation(image, plugin))
self.images.append(image)
def save_dependency(self, to_file):
dot = graphviz.Digraph(comment='Docker Images Dependency')
dot.body.extend(['rankdir=LR'])
for image in self.images:
if image.status not in [STATUS_MATCHED]:
continue
dot.node(image.name)
if image.parent is not None:
dot.edge(image.parent.name, image.name)
with open(to_file, 'w') as f:
f.write(dot.source)
def list_images(self):
for count, image in enumerate(self.images):
print(count + 1, ':', image.name)
def list_dependencies(self):
match = False
for image in self.images:
if image.status in [STATUS_MATCHED]:
match = True
if image.parent is None:
base = image
if not match:
print('Nothing matched!')
return
def list_children(images, ancestry):
children = six.next(iter(ancestry.values()))
for image in images:
if image.status not in [STATUS_MATCHED]:
continue
if not image.children:
children.append(image.name)
else:
newparent = {image.name: []}
children.append(newparent)
list_children(image.children, newparent)
ancestry = {base.name: []}
list_children(base.children, ancestry)
pprint.pprint(ancestry)
def find_parents(self):
"""Associate all images with parents and children"""
sort_images = dict()
for image in self.images:
sort_images[image.canonical_name] = image
for parent_name, parent in sort_images.items():
for image in sort_images.values():
if image.parent_name == parent_name:
parent.children.append(image)
image.parent = parent
def build_queue(self, push_queue):
"""Organizes Queue list
Return a list of Queues that have been organized into a hierarchy
based on dependencies
"""
self.build_image_list()
self.find_parents()
self.filter_images()
queue = six.moves.queue.Queue()
for image in self.images:
if image.parent is None:
queue.put(BuildTask(self.conf, image, push_queue))
LOG.info('Added image %s to queue', image.name)
return queue
def run_build():
"""Build container images.
:return: A 3-tuple containing bad, good, and unmatched container image
status dicts, or None if no images were built.
"""
conf = cfg.ConfigOpts()
common_config.parse(conf, sys.argv[1:], prog='kolla-build')
if conf.debug:
LOG.setLevel(logging.DEBUG)
kolla = KollaWorker(conf)
kolla.setup_working_dir()
kolla.find_dockerfiles()
kolla.create_dockerfiles()
if conf.template_only:
LOG.info('Dockerfiles are generated in %s', kolla.working_dir)
return
# We set the atime and mtime to 0 epoch to preserve allow the Docker cache
# to work like we want. A different size or hash will still force a rebuild
kolla.set_time()
if conf.save_dependency:
kolla.build_image_list()
kolla.find_parents()
kolla.filter_images()
kolla.save_dependency(conf.save_dependency)
LOG.info('Docker images dependency are saved in %s',
conf.save_dependency)
return
if conf.list_images:
kolla.build_image_list()
kolla.list_images()
return
if conf.list_dependencies:
kolla.build_image_list()
kolla.find_parents()
kolla.filter_images()
kolla.list_dependencies()
return
push_queue = six.moves.queue.Queue()
queue = kolla.build_queue(push_queue)
workers = []
with join_many(workers):
try:
for x in six.moves.range(conf.threads):
worker = WorkerThread(conf, queue)
worker.setDaemon(True)
worker.start()
workers.append(worker)
for x in six.moves.range(conf.push_threads):
worker = WorkerThread(conf, push_queue)
worker.setDaemon(True)
worker.start()
workers.append(worker)
# sleep until queue is empty
while queue.unfinished_tasks or push_queue.unfinished_tasks:
time.sleep(3)
# ensure all threads exited happily
push_queue.put(WorkerThread.tombstone)
queue.put(WorkerThread.tombstone)
except KeyboardInterrupt:
for w in workers:
w.should_stop = True
push_queue.put(WorkerThread.tombstone)
queue.put(WorkerThread.tombstone)
raise
kolla.summary()
kolla.cleanup()
return kolla.get_image_statuses() | unknown | codeparrot/codeparrot-clean | ||
# -*- coding: utf-8 -*-
from south.db import db
from south.v2 import SchemaMigration
try:
from django.contrib.auth import get_user_model
except ImportError: # django < 1.5
from django.contrib.auth.models import User
else:
User = get_user_model()
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'Token'
db.create_table('authtoken_token', (
('key', self.gf('django.db.models.fields.CharField')(max_length=40, primary_key=True)),
('user', self.gf('django.db.models.fields.related.OneToOneField')(related_name='auth_token', unique=True, to=orm['%s.%s' % (User._meta.app_label, User._meta.object_name)])),
('created', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True)),
))
db.send_create_signal('authtoken', ['Token'])
def backwards(self, orm):
# Deleting model 'Token'
db.delete_table('authtoken_token')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
"%s.%s" % (User._meta.app_label, User._meta.module_name): {
'Meta': {'object_name': User._meta.module_name, 'db_table': repr(User._meta.db_table)},
},
'authtoken.token': {
'Meta': {'object_name': 'Token'},
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '40', 'primary_key': 'True'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'auth_token'", 'unique': 'True', 'to': "orm['%s.%s']" % (User._meta.app_label, User._meta.object_name)})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
}
}
complete_apps = ['authtoken'] | unknown | codeparrot/codeparrot-clean | ||
# -*- coding: utf-8 -*-
# Copyright (c) 2012 Giorgos Verigakis <verigak@gmail.com>
#
# Permission to use, copy, modify, and distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
# copyright notice and this permission notice appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
from __future__ import unicode_literals
from . import Infinite, Progress
from .helpers import WriteMixin
class Counter(WriteMixin, Infinite):
message = ''
hide_cursor = True
def update(self):
self.write(str(self.index))
class Countdown(WriteMixin, Progress):
hide_cursor = True
def update(self):
self.write(str(self.remaining))
class Stack(WriteMixin, Progress):
phases = (' ', '▁', '▂', '▃', '▄', '▅', '▆', '▇', '█')
hide_cursor = True
def update(self):
nphases = len(self.phases)
i = min(nphases - 1, int(self.progress * nphases))
self.write(self.phases[i])
class Pie(Stack):
phases = ('○', '◔', '◑', '◕', '●') | unknown | codeparrot/codeparrot-clean | ||
#!/bin/sh
#
# Copyright (c) 2006 Catalin Marinas
#
test_description='git apply trying to add an ending line.
'
. ./test-lib.sh
# setup
cat >test-patch <<\EOF
diff --git a/file b/file
--- a/file
+++ b/file
@@ -1,2 +1,3 @@
a
b
+c
EOF
echo 'a' >file
echo 'b' >>file
echo 'c' >>file
test_expect_success setup \
'git update-index --add file'
# test
test_expect_success 'apply at the end' \
'test_must_fail git apply --index test-patch'
cat >test-patch <<\EOF
diff a/file b/file
--- a/file
+++ b/file
@@ -1,2 +1,3 @@
+a
b
c
EOF
echo >file 'a
b
c'
git update-index file
test_expect_success 'apply at the beginning' \
'test_must_fail git apply --index test-patch'
test_done | unknown | github | https://github.com/git/git | t/t4113-apply-ending.sh |
- name: "test that the vaulted_utf8_value decrypts correctly"
gather_facts: false
hosts: testhost
vars:
expected: "aöffü"
tasks:
- name: decrypt vaulted_utf8_value and show it in debug
debug:
var: vaulted_utf8_value
- name: assert decrypted vaulted_utf8_value matches expected
assert:
that:
- "vaulted_utf8_value == expected"
- "vaulted_utf8_value == 'aöffü'" | unknown | github | https://github.com/ansible/ansible | test/integration/targets/ansible-vault/test_vaulted_utf8_value.yml |
"""
Test cases adapted from the test_bsddb.py module in Python's
regression test suite.
"""
import os, string
import unittest
from test_all import db, hashopen, btopen, rnopen, verbose, \
get_new_database_path
class CompatibilityTestCase(unittest.TestCase):
def setUp(self):
self.filename = get_new_database_path()
def tearDown(self):
try:
os.remove(self.filename)
except os.error:
pass
def test01_btopen(self):
self.do_bthash_test(btopen, 'btopen')
def test02_hashopen(self):
self.do_bthash_test(hashopen, 'hashopen')
def test03_rnopen(self):
data = "The quick brown fox jumped over the lazy dog.".split()
if verbose:
print "\nTesting: rnopen"
f = rnopen(self.filename, 'c')
for x in range(len(data)):
f[x+1] = data[x]
getTest = (f[1], f[2], f[3])
if verbose:
print '%s %s %s' % getTest
self.assertEqual(getTest[1], 'quick', 'data mismatch!')
rv = f.set_location(3)
if rv != (3, 'brown'):
self.fail('recno database set_location failed: '+repr(rv))
f[25] = 'twenty-five'
f.close()
del f
f = rnopen(self.filename, 'w')
f[20] = 'twenty'
def noRec(f):
rec = f[15]
self.assertRaises(KeyError, noRec, f)
def badKey(f):
rec = f['a string']
self.assertRaises(TypeError, badKey, f)
del f[3]
rec = f.first()
while rec:
if verbose:
print rec
try:
rec = f.next()
except KeyError:
break
f.close()
def test04_n_flag(self):
f = hashopen(self.filename, 'n')
f.close()
def do_bthash_test(self, factory, what):
if verbose:
print '\nTesting: ', what
f = factory(self.filename, 'c')
if verbose:
print 'creation...'
# truth test
if f:
if verbose: print "truth test: true"
else:
if verbose: print "truth test: false"
f['0'] = ''
f['a'] = 'Guido'
f['b'] = 'van'
f['c'] = 'Rossum'
f['d'] = 'invented'
# 'e' intentionally left out
f['f'] = 'Python'
if verbose:
print '%s %s %s' % (f['a'], f['b'], f['c'])
if verbose:
print 'key ordering...'
start = f.set_location(f.first()[0])
if start != ('0', ''):
self.fail("incorrect first() result: "+repr(start))
while 1:
try:
rec = f.next()
except KeyError:
self.assertEqual(rec, f.last(), 'Error, last <> last!')
f.previous()
break
if verbose:
print rec
self.assertTrue(f.has_key('f'), 'Error, missing key!')
# test that set_location() returns the next nearest key, value
# on btree databases and raises KeyError on others.
if factory == btopen:
e = f.set_location('e')
if e != ('f', 'Python'):
self.fail('wrong key,value returned: '+repr(e))
else:
try:
e = f.set_location('e')
except KeyError:
pass
else:
self.fail("set_location on non-existent key did not raise KeyError")
f.sync()
f.close()
# truth test
try:
if f:
if verbose: print "truth test: true"
else:
if verbose: print "truth test: false"
except db.DBError:
pass
else:
self.fail("Exception expected")
del f
if verbose:
print 'modification...'
f = factory(self.filename, 'w')
f['d'] = 'discovered'
if verbose:
print 'access...'
for key in f.keys():
word = f[key]
if verbose:
print word
def noRec(f):
rec = f['no such key']
self.assertRaises(KeyError, noRec, f)
def badKey(f):
rec = f[15]
self.assertRaises(TypeError, badKey, f)
f.close()
#----------------------------------------------------------------------
def test_suite():
return unittest.makeSuite(CompatibilityTestCase)
if __name__ == '__main__':
unittest.main(defaultTest='test_suite') | unknown | codeparrot/codeparrot-clean | ||
<?php
namespace Illuminate\Tests\Support;
use Illuminate\Support\Traits\Conditionable;
use PHPUnit\Framework\TestCase;
class SupportConditionableTest extends TestCase
{
public function testWhenConditionCallback()
{
// With static condition
$logger = (new ConditionableLogger())
->when(2, function ($logger, $condition) {
$logger->log('when', $condition);
}, function ($logger, $condition) {
$logger->log('default', $condition);
});
$this->assertSame(['when', 2], $logger->values);
// With callback condition
$logger = (new ConditionableLogger())->log('init')
->when(function ($logger) {
return $logger->has('init');
}, function ($logger, $condition) {
$logger->log('when', $condition);
}, function ($logger, $condition) {
$logger->log('default', $condition);
});
$this->assertSame(['init', 'when', true], $logger->values);
}
public function testWhenDefaultCallback()
{
// With static condition
$logger = (new ConditionableLogger())
->when(null, function ($logger, $condition) {
$logger->log('when', $condition);
}, function ($logger, $condition) {
$logger->log('default', $condition);
});
$this->assertSame(['default', null], $logger->values);
// With callback condition
$logger = (new ConditionableLogger())
->when(function ($logger) {
return $logger->has('missing');
}, function ($logger, $condition) {
$logger->log('when', $condition);
}, function ($logger, $condition) {
$logger->log('default', $condition);
});
$this->assertSame(['default', false], $logger->values);
}
public function testUnlessConditionCallback()
{
// With static condition
$logger = (new ConditionableLogger())
->unless(null, function ($logger, $condition) {
$logger->log('unless', $condition);
}, function ($logger, $condition) {
$logger->log('default', $condition);
});
$this->assertSame(['unless', null], $logger->values);
// With callback condition
$logger = (new ConditionableLogger())
->unless(function ($logger) {
return $logger->has('missing');
}, function ($logger, $condition) {
$logger->log('unless', $condition);
}, function ($logger, $condition) {
$logger->log('default', $condition);
});
$this->assertSame(['unless', false], $logger->values);
}
public function testUnlessDefaultCallback()
{
// With static condition
$logger = (new ConditionableLogger())
->unless(2, function ($logger, $condition) {
$logger->log('unless', $condition);
}, function ($logger, $condition) {
$logger->log('default', $condition);
});
$this->assertSame(['default', 2], $logger->values);
// With callback condition
$logger = (new ConditionableLogger())->log('init')
->unless(function ($logger) {
return $logger->has('init');
}, function ($logger, $condition) {
$logger->log('unless', $condition);
}, function ($logger, $condition) {
$logger->log('default', $condition);
});
$this->assertSame(['init', 'default', true], $logger->values);
}
public function testWhenProxy()
{
// With static condition
$logger = (new ConditionableLogger())
->when(true)->log('one')
->when(false)->log('two');
$this->assertSame(['one'], $logger->values);
// With callback condition
$logger = (new ConditionableLogger())->log('init')
->when(function ($logger) {
return $logger->has('init');
})
->log('one')
->when(function ($logger) {
return $logger->has('missing');
})
->log('two')
->when()->has('init')->log('three')
->when()->has('missing')->log('four')
->when()->toggle->log('five')
->toggle()
->when()->toggle->log('six');
$this->assertSame(['init', 'one', 'three', 'six'], $logger->values);
}
public function testUnlessProxy()
{
// With static condition
$logger = (new ConditionableLogger())
->unless(true)->log('one')
->unless(false)->log('two');
$this->assertSame(['two'], $logger->values);
// With callback condition
$logger = (new ConditionableLogger())->log('init')
->unless(function ($logger) {
return $logger->has('init');
})
->log('one')
->unless(function ($logger) {
return $logger->has('missing');
})
->log('two')
->unless()->has('init')->log('three')
->unless()->has('missing')->log('four')
->unless()->toggle->log('five')
->toggle()
->unless()->toggle->log('six');
$this->assertSame(['init', 'two', 'four', 'five'], $logger->values);
}
}
class ConditionableLogger
{
use Conditionable;
public $values = [];
public $toggle = false;
public function log(...$values)
{
array_push($this->values, ...$values);
return $this;
}
public function has($value)
{
return in_array($value, $this->values);
}
public function toggle()
{
$this->toggle = ! $this->toggle;
return $this;
}
} | php | github | https://github.com/laravel/framework | tests/Support/SupportConditionableTest.php |
/*
Copyright 2014 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package legacyscheme
import (
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/serializer"
)
var (
// Scheme is the default instance of runtime.Scheme to which types in the Kubernetes API are already registered.
// NOTE: If you are copying this file to start a new api group, STOP! Copy the
// extensions group instead. This Scheme is special and should appear ONLY in
// the api group, unless you really know what you're doing.
// TODO(lavalamp): make the above error impossible.
Scheme = runtime.NewScheme()
// Codecs provides access to encoding and decoding for the scheme
Codecs = serializer.NewCodecFactory(Scheme)
// ParameterCodec handles versioning of objects that are converted to query parameters.
ParameterCodec = runtime.NewParameterCodec(Scheme)
) | go | github | https://github.com/kubernetes/kubernetes | pkg/api/legacyscheme/scheme.go |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
]
operations = [
migrations.CreateModel(
name='MyUser',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('password', models.CharField(max_length=128, verbose_name='password')),
('last_login', models.DateTimeField(default=django.utils.timezone.now, verbose_name='last login')),
('username', models.CharField(unique=True, max_length=255)),
('email', models.EmailField(unique=True, max_length=255, verbose_name=b'email address')),
('first_name', models.CharField(max_length=120, null=True, blank=True)),
('last_name', models.CharField(max_length=120, null=True, blank=True)),
('is_member', models.BooleanField(default=False, verbose_name=b'Is Paid Member')),
('is_active', models.BooleanField(default=True)),
('is_admin', models.BooleanField(default=False)),
],
options={
'abstract': False,
},
bases=(models.Model,),
),
] | unknown | codeparrot/codeparrot-clean | ||
"""
This module provides a pool manager that uses Google App Engine's
`URLFetch Service <https://cloud.google.com/appengine/docs/python/urlfetch>`_.
Example usage::
from urllib3 import PoolManager
from urllib3.contrib.appengine import AppEngineManager, is_appengine_sandbox
if is_appengine_sandbox():
# AppEngineManager uses AppEngine's URLFetch API behind the scenes
http = AppEngineManager()
else:
# PoolManager uses a socket-level API behind the scenes
http = PoolManager()
r = http.request('GET', 'https://google.com/')
There are `limitations <https://cloud.google.com/appengine/docs/python/\
urlfetch/#Python_Quotas_and_limits>`_ to the URLFetch service and it may not be
the best choice for your application. There are three options for using
urllib3 on Google App Engine:
1. You can use :class:`AppEngineManager` with URLFetch. URLFetch is
cost-effective in many circumstances as long as your usage is within the
limitations.
2. You can use a normal :class:`~urllib3.PoolManager` by enabling sockets.
Sockets also have `limitations and restrictions
<https://cloud.google.com/appengine/docs/python/sockets/\
#limitations-and-restrictions>`_ and have a lower free quota than URLFetch.
To use sockets, be sure to specify the following in your ``app.yaml``::
env_variables:
GAE_USE_SOCKETS_HTTPLIB : 'true'
3. If you are using `App Engine Flexible
<https://cloud.google.com/appengine/docs/flexible/>`_, you can use the standard
:class:`PoolManager` without any configuration or special environment variables.
"""
from __future__ import absolute_import
import logging
import os
import warnings
from ..packages.six.moves.urllib.parse import urljoin
from ..exceptions import (
HTTPError,
HTTPWarning,
MaxRetryError,
ProtocolError,
TimeoutError,
SSLError
)
from ..packages.six import BytesIO
from ..request import RequestMethods
from ..response import HTTPResponse
from ..util.timeout import Timeout
from ..util.retry import Retry
try:
from google.appengine.api import urlfetch
except ImportError:
urlfetch = None
log = logging.getLogger(__name__)
class AppEnginePlatformWarning(HTTPWarning):
pass
class AppEnginePlatformError(HTTPError):
pass
class AppEngineManager(RequestMethods):
"""
Connection manager for Google App Engine sandbox applications.
This manager uses the URLFetch service directly instead of using the
emulated httplib, and is subject to URLFetch limitations as described in
the App Engine documentation `here
<https://cloud.google.com/appengine/docs/python/urlfetch>`_.
Notably it will raise an :class:`AppEnginePlatformError` if:
* URLFetch is not available.
* If you attempt to use this on App Engine Flexible, as full socket
support is available.
* If a request size is more than 10 megabytes.
* If a response size is more than 32 megabtyes.
* If you use an unsupported request method such as OPTIONS.
Beyond those cases, it will raise normal urllib3 errors.
"""
def __init__(self, headers=None, retries=None, validate_certificate=True,
urlfetch_retries=True):
if not urlfetch:
raise AppEnginePlatformError(
"URLFetch is not available in this environment.")
if is_prod_appengine_mvms():
raise AppEnginePlatformError(
"Use normal urllib3.PoolManager instead of AppEngineManager"
"on Managed VMs, as using URLFetch is not necessary in "
"this environment.")
warnings.warn(
"urllib3 is using URLFetch on Google App Engine sandbox instead "
"of sockets. To use sockets directly instead of URLFetch see "
"https://urllib3.readthedocs.io/en/latest/reference/urllib3.contrib.html.",
AppEnginePlatformWarning)
RequestMethods.__init__(self, headers)
self.validate_certificate = validate_certificate
self.urlfetch_retries = urlfetch_retries
self.retries = retries or Retry.DEFAULT
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
# Return False to re-raise any potential exceptions
return False
def urlopen(self, method, url, body=None, headers=None,
retries=None, redirect=True, timeout=Timeout.DEFAULT_TIMEOUT,
**response_kw):
retries = self._get_retries(retries, redirect)
try:
follow_redirects = (
redirect and
retries.redirect != 0 and
retries.total)
response = urlfetch.fetch(
url,
payload=body,
method=method,
headers=headers or {},
allow_truncated=False,
follow_redirects=self.urlfetch_retries and follow_redirects,
deadline=self._get_absolute_timeout(timeout),
validate_certificate=self.validate_certificate,
)
except urlfetch.DeadlineExceededError as e:
raise TimeoutError(self, e)
except urlfetch.InvalidURLError as e:
if 'too large' in str(e):
raise AppEnginePlatformError(
"URLFetch request too large, URLFetch only "
"supports requests up to 10mb in size.", e)
raise ProtocolError(e)
except urlfetch.DownloadError as e:
if 'Too many redirects' in str(e):
raise MaxRetryError(self, url, reason=e)
raise ProtocolError(e)
except urlfetch.ResponseTooLargeError as e:
raise AppEnginePlatformError(
"URLFetch response too large, URLFetch only supports"
"responses up to 32mb in size.", e)
except urlfetch.SSLCertificateError as e:
raise SSLError(e)
except urlfetch.InvalidMethodError as e:
raise AppEnginePlatformError(
"URLFetch does not support method: %s" % method, e)
http_response = self._urlfetch_response_to_http_response(
response, retries=retries, **response_kw)
# Handle redirect?
redirect_location = redirect and http_response.get_redirect_location()
if redirect_location:
# Check for redirect response
if (self.urlfetch_retries and retries.raise_on_redirect):
raise MaxRetryError(self, url, "too many redirects")
else:
if http_response.status == 303:
method = 'GET'
try:
retries = retries.increment(method, url, response=http_response, _pool=self)
except MaxRetryError:
if retries.raise_on_redirect:
raise MaxRetryError(self, url, "too many redirects")
return http_response
retries.sleep_for_retry(http_response)
log.debug("Redirecting %s -> %s", url, redirect_location)
redirect_url = urljoin(url, redirect_location)
return self.urlopen(
method, redirect_url, body, headers,
retries=retries, redirect=redirect,
timeout=timeout, **response_kw)
# Check if we should retry the HTTP response.
has_retry_after = bool(http_response.getheader('Retry-After'))
if retries.is_retry(method, http_response.status, has_retry_after):
retries = retries.increment(
method, url, response=http_response, _pool=self)
log.debug("Retry: %s", url)
retries.sleep(http_response)
return self.urlopen(
method, url,
body=body, headers=headers,
retries=retries, redirect=redirect,
timeout=timeout, **response_kw)
return http_response
def _urlfetch_response_to_http_response(self, urlfetch_resp, **response_kw):
if is_prod_appengine():
# Production GAE handles deflate encoding automatically, but does
# not remove the encoding header.
content_encoding = urlfetch_resp.headers.get('content-encoding')
if content_encoding == 'deflate':
del urlfetch_resp.headers['content-encoding']
transfer_encoding = urlfetch_resp.headers.get('transfer-encoding')
# We have a full response's content,
# so let's make sure we don't report ourselves as chunked data.
if transfer_encoding == 'chunked':
encodings = transfer_encoding.split(",")
encodings.remove('chunked')
urlfetch_resp.headers['transfer-encoding'] = ','.join(encodings)
return HTTPResponse(
# In order for decoding to work, we must present the content as
# a file-like object.
body=BytesIO(urlfetch_resp.content),
headers=urlfetch_resp.headers,
status=urlfetch_resp.status_code,
**response_kw
)
def _get_absolute_timeout(self, timeout):
if timeout is Timeout.DEFAULT_TIMEOUT:
return None # Defer to URLFetch's default.
if isinstance(timeout, Timeout):
if timeout._read is not None or timeout._connect is not None:
warnings.warn(
"URLFetch does not support granular timeout settings, "
"reverting to total or default URLFetch timeout.",
AppEnginePlatformWarning)
return timeout.total
return timeout
def _get_retries(self, retries, redirect):
if not isinstance(retries, Retry):
retries = Retry.from_int(
retries, redirect=redirect, default=self.retries)
if retries.connect or retries.read or retries.redirect:
warnings.warn(
"URLFetch only supports total retries and does not "
"recognize connect, read, or redirect retry parameters.",
AppEnginePlatformWarning)
return retries
def is_appengine():
return (is_local_appengine() or
is_prod_appengine() or
is_prod_appengine_mvms())
def is_appengine_sandbox():
return is_appengine() and not is_prod_appengine_mvms()
def is_local_appengine():
return ('APPENGINE_RUNTIME' in os.environ and
'Development/' in os.environ['SERVER_SOFTWARE'])
def is_prod_appengine():
return ('APPENGINE_RUNTIME' in os.environ and
'Google App Engine/' in os.environ['SERVER_SOFTWARE'] and
not is_prod_appengine_mvms())
def is_prod_appengine_mvms():
return os.environ.get('GAE_VM', False) == 'true' | unknown | codeparrot/codeparrot-clean | ||
# -*- encoding: utf-8 -*-
# This file is distributed under the same license as the Django package.
#
from __future__ import unicode_literals
# The *_FORMAT strings use the Django date format syntax,
# see http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
DATE_FORMAT = 'd. F Y'
TIME_FORMAT = 'H:i:s'
DATETIME_FORMAT = 'j. F Y. H:i'
YEAR_MONTH_FORMAT = 'F Y'
MONTH_DAY_FORMAT = 'j. F'
SHORT_DATE_FORMAT = 'j. M. Y'
SHORT_DATETIME_FORMAT = 'j.n.Y. H:i'
FIRST_DAY_OF_WEEK = 0
# The *_INPUT_FORMATS strings use the Python strftime format syntax,
# see http://docs.python.org/library/datetime.html#strftime-strptime-behavior
DATE_INPUT_FORMATS = (
'%d.%m.%Y', '%d.%m.%y', # '25.10.2006', '25.10.06'
'%d-%m-%Y', # '25-10-2006'
'%d. %m. %Y', '%d. %m. %y', # '25. 10. 2006', '25. 10. 06'
)
DATETIME_INPUT_FORMATS = (
'%d.%m.%Y %H:%M:%S', # '25.10.2006 14:30:59'
'%d.%m.%Y %H:%M:%S.%f', # '25.10.2006 14:30:59.000200'
'%d.%m.%Y %H:%M', # '25.10.2006 14:30'
'%d.%m.%Y', # '25.10.2006'
'%d.%m.%y %H:%M:%S', # '25.10.06 14:30:59'
'%d.%m.%y %H:%M:%S.%f', # '25.10.06 14:30:59.000200'
'%d.%m.%y %H:%M', # '25.10.06 14:30'
'%d.%m.%y', # '25.10.06'
'%d-%m-%Y %H:%M:%S', # '25-10-2006 14:30:59'
'%d-%m-%Y %H:%M:%S.%f', # '25-10-2006 14:30:59.000200'
'%d-%m-%Y %H:%M', # '25-10-2006 14:30'
'%d-%m-%Y', # '25-10-2006'
'%d. %m. %Y %H:%M:%S', # '25. 10. 2006 14:30:59'
'%d. %m. %Y %H:%M:%S.%f', # '25. 10. 2006 14:30:59.000200'
'%d. %m. %Y %H:%M', # '25. 10. 2006 14:30'
'%d. %m. %Y', # '25. 10. 2006'
'%d. %m. %y %H:%M:%S', # '25. 10. 06 14:30:59'
'%d. %m. %y %H:%M:%S.%f', # '25. 10. 06 14:30:59.000200'
'%d. %m. %y %H:%M', # '25. 10. 06 14:30'
'%d. %m. %y', # '25. 10. 06'
)
DECIMAL_SEPARATOR = ','
THOUSAND_SEPARATOR = '.'
NUMBER_GROUPING = 3 | unknown | codeparrot/codeparrot-clean | ||
<!--Copyright 2022 The HuggingFace Team. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
rendered properly in your Markdown viewer.
-->
# Text classification
[[open-in-colab]]
<Youtube id="leNG9fN9FQU"/>
Text classification is a common NLP task that assigns a label or class to text. Some of the largest companies run text classification in production for a wide range of practical applications. One of the most popular forms of text classification is sentiment analysis, which assigns a label like 🙂 positive, 🙁 negative, or 😐 neutral to a sequence of text.
This guide will show you how to:
1. Finetune [DistilBERT](https://huggingface.co/distilbert/distilbert-base-uncased) on the [IMDb](https://huggingface.co/datasets/imdb) dataset to determine whether a movie review is positive or negative.
2. Use your finetuned model for inference.
<Tip>
To see all architectures and checkpoints compatible with this task, we recommend checking the [task-page](https://huggingface.co/tasks/text-classification).
</Tip>
Before you begin, make sure you have all the necessary libraries installed:
```bash
pip install transformers datasets evaluate accelerate
```
We encourage you to login to your Hugging Face account so you can upload and share your model with the community. When prompted, enter your token to login:
```py
>>> from huggingface_hub import notebook_login
>>> notebook_login()
```
## Load IMDb dataset
Start by loading the IMDb dataset from the 🤗 Datasets library:
```py
>>> from datasets import load_dataset
>>> imdb = load_dataset("imdb")
```
Then take a look at an example:
```py
>>> imdb["test"][0]
{
"label": 0,
"text": "I love sci-fi and am willing to put up with a lot. Sci-fi movies/TV are usually underfunded, under-appreciated and misunderstood. I tried to like this, I really did, but it is to good TV sci-fi as Babylon 5 is to Star Trek (the original). Silly prosthetics, cheap cardboard sets, stilted dialogues, CG that doesn't match the background, and painfully one-dimensional characters cannot be overcome with a 'sci-fi' setting. (I'm sure there are those of you out there who think Babylon 5 is good sci-fi TV. It's not. It's clichéd and uninspiring.) While US viewers might like emotion and character development, sci-fi is a genre that does not take itself seriously (cf. Star Trek). It may treat important issues, yet not as a serious philosophy. It's really difficult to care about the characters here as they are not simply foolish, just missing a spark of life. Their actions and reactions are wooden and predictable, often painful to watch. The makers of Earth KNOW it's rubbish as they have to always say \"Gene Roddenberry's Earth...\" otherwise people would not continue watching. Roddenberry's ashes must be turning in their orbit as this dull, cheap, poorly edited (watching it without advert breaks really brings this home) trudging Trabant of a show lumbers into space. Spoiler. So, kill off a main character. And then bring him back as another actor. Jeeez! Dallas all over again.",
}
```
There are two fields in this dataset:
- `text`: the movie review text.
- `label`: a value that is either `0` for a negative review or `1` for a positive review.
## Preprocess
The next step is to load a DistilBERT tokenizer to preprocess the `text` field:
```py
>>> from transformers import AutoTokenizer
>>> tokenizer = AutoTokenizer.from_pretrained("distilbert/distilbert-base-uncased")
```
Create a preprocessing function to tokenize `text` and truncate sequences to be no longer than DistilBERT's maximum input length:
```py
>>> def preprocess_function(examples):
... return tokenizer(examples["text"], truncation=True)
```
To apply the preprocessing function over the entire dataset, use 🤗 Datasets [`~datasets.Dataset.map`] function. You can speed up `map` by setting `batched=True` to process multiple elements of the dataset at once:
```py
tokenized_imdb = imdb.map(preprocess_function, batched=True)
```
Now create a batch of examples using [`DataCollatorWithPadding`]. It's more efficient to *dynamically pad* the sentences to the longest length in a batch during collation, instead of padding the whole dataset to the maximum length.
```py
>>> from transformers import DataCollatorWithPadding
>>> data_collator = DataCollatorWithPadding(tokenizer=tokenizer)
```
## Evaluate
Including a metric during training is often helpful for evaluating your model's performance. You can quickly load a evaluation method with the 🤗 [Evaluate](https://huggingface.co/docs/evaluate/index) library. For this task, load the [accuracy](https://huggingface.co/spaces/evaluate-metric/accuracy) metric (see the 🤗 Evaluate [quick tour](https://huggingface.co/docs/evaluate/a_quick_tour) to learn more about how to load and compute a metric):
```py
>>> import evaluate
>>> accuracy = evaluate.load("accuracy")
```
Then create a function that passes your predictions and labels to [`~evaluate.EvaluationModule.compute`] to calculate the accuracy:
```py
>>> import numpy as np
>>> def compute_metrics(eval_pred):
... predictions, labels = eval_pred
... predictions = np.argmax(predictions, axis=1)
... return accuracy.compute(predictions=predictions, references=labels)
```
Your `compute_metrics` function is ready to go now, and you'll return to it when you setup your training.
## Train
Before you start training your model, create a map of the expected ids to their labels with `id2label` and `label2id`:
```py
>>> id2label = {0: "NEGATIVE", 1: "POSITIVE"}
>>> label2id = {"NEGATIVE": 0, "POSITIVE": 1}
```
<Tip>
If you aren't familiar with finetuning a model with the [`Trainer`], take a look at the basic tutorial [here](../training#train-with-pytorch-trainer)!
</Tip>
You're ready to start training your model now! Load DistilBERT with [`AutoModelForSequenceClassification`] along with the number of expected labels, and the label mappings:
```py
>>> from transformers import AutoModelForSequenceClassification, TrainingArguments, Trainer
>>> model = AutoModelForSequenceClassification.from_pretrained(
... "distilbert/distilbert-base-uncased", num_labels=2, id2label=id2label, label2id=label2id
... )
```
At this point, only three steps remain:
1. Define your training hyperparameters in [`TrainingArguments`]. The only required parameter is `output_dir` which specifies where to save your model. You'll push this model to the Hub by setting `push_to_hub=True` (you need to be signed in to Hugging Face to upload your model). At the end of each epoch, the [`Trainer`] will evaluate the accuracy and save the training checkpoint.
2. Pass the training arguments to [`Trainer`] along with the model, dataset, tokenizer, data collator, and `compute_metrics` function.
3. Call [`~Trainer.train`] to finetune your model.
```py
>>> training_args = TrainingArguments(
... output_dir="my_awesome_model",
... learning_rate=2e-5,
... per_device_train_batch_size=16,
... per_device_eval_batch_size=16,
... num_train_epochs=2,
... weight_decay=0.01,
... eval_strategy="epoch",
... save_strategy="epoch",
... load_best_model_at_end=True,
... push_to_hub=True,
... )
>>> trainer = Trainer(
... model=model,
... args=training_args,
... train_dataset=tokenized_imdb["train"],
... eval_dataset=tokenized_imdb["test"],
... processing_class=tokenizer,
... data_collator=data_collator,
... compute_metrics=compute_metrics,
... )
>>> trainer.train()
```
<Tip>
[`Trainer`] applies dynamic padding by default when you pass `tokenizer` to it. In this case, you don't need to specify a data collator explicitly.
</Tip>
Once training is completed, share your model to the Hub with the [`~transformers.Trainer.push_to_hub`] method so everyone can use your model:
```py
>>> trainer.push_to_hub()
```
<Tip>
For a more in-depth example of how to finetune a model for text classification, take a look at the corresponding
[PyTorch notebook](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/text_classification.ipynb).
</Tip>
## Inference
Great, now that you've finetuned a model, you can use it for inference!
Grab some text you'd like to run inference on:
```py
>>> text = "This was a masterpiece. Not completely faithful to the books, but enthralling from beginning to end. Might be my favorite of the three."
```
The simplest way to try out your finetuned model for inference is to use it in a [`pipeline`]. Instantiate a `pipeline` for sentiment analysis with your model, and pass your text to it:
```py
>>> from transformers import pipeline
>>> classifier = pipeline("sentiment-analysis", model="stevhliu/my_awesome_model")
>>> classifier(text)
[{'label': 'POSITIVE', 'score': 0.9994940757751465}]
```
You can also manually replicate the results of the `pipeline` if you'd like:
Tokenize the text and return PyTorch tensors:
```py
>>> from transformers import AutoTokenizer
>>> tokenizer = AutoTokenizer.from_pretrained("stevhliu/my_awesome_model")
>>> inputs = tokenizer(text, return_tensors="pt")
```
Pass your inputs to the model and return the `logits`:
```py
>>> from transformers import AutoModelForSequenceClassification
>>> model = AutoModelForSequenceClassification.from_pretrained("stevhliu/my_awesome_model")
>>> with torch.no_grad():
... logits = model(**inputs).logits
```
Get the class with the highest probability, and use the model's `id2label` mapping to convert it to a text label:
```py
>>> predicted_class_id = logits.argmax().item()
>>> model.config.id2label[predicted_class_id]
'POSITIVE'
``` | unknown | github | https://github.com/huggingface/transformers | docs/source/en/tasks/sequence_classification.md |
// Copyright (c) HashiCorp, Inc.
// SPDX-License-Identifier: BUSL-1.1
package command
import (
"fmt"
"path/filepath"
"strings"
svchost "github.com/hashicorp/terraform-svchost"
"github.com/hashicorp/terraform/internal/command/cliconfig"
"github.com/hashicorp/terraform/internal/tfdiags"
)
// LogoutCommand is a Command implementation which removes stored credentials
// for a remote service host.
type LogoutCommand struct {
Meta
}
// Run implements cli.Command.
func (c *LogoutCommand) Run(args []string) int {
args = c.Meta.process(args)
cmdFlags := c.Meta.defaultFlagSet("logout")
cmdFlags.Usage = func() { c.Ui.Error(c.Help()) }
if err := cmdFlags.Parse(args); err != nil {
return 1
}
args = cmdFlags.Args()
if len(args) > 1 {
c.Ui.Error(
"The logout command expects at most one argument: the host to log out of.")
cmdFlags.Usage()
return 1
}
var diags tfdiags.Diagnostics
givenHostname := "app.terraform.io"
if len(args) != 0 {
givenHostname = args[0]
}
hostname, err := svchost.ForComparison(givenHostname)
if err != nil {
diags = diags.Append(tfdiags.Sourceless(
tfdiags.Error,
"Invalid hostname",
fmt.Sprintf("The given hostname %q is not valid: %s.", givenHostname, err.Error()),
))
c.showDiagnostics(diags)
return 1
}
// From now on, since we've validated the given hostname, we should use
// dispHostname in the UI to ensure we're presenting it in the canonical
// form, in case that helps users with debugging when things aren't
// working as expected. (Perhaps the normalization is part of the cause.)
dispHostname := hostname.ForDisplay()
creds := c.Services.CredentialsSource().(*cliconfig.CredentialsSource)
filename, _ := creds.CredentialsFilePath()
credsCtx := &loginCredentialsContext{
Location: creds.HostCredentialsLocation(hostname),
LocalFilename: filename, // empty in the very unlikely event that we can't select a config directory for this user
HelperType: creds.CredentialsHelperType(),
}
if credsCtx.Location == cliconfig.CredentialsInOtherFile {
diags = diags.Append(tfdiags.Sourceless(
tfdiags.Error,
fmt.Sprintf("Credentials for %s are manually configured", dispHostname),
"The \"terraform logout\" command cannot log out because credentials for this host are manually configured in a CLI configuration file.\n\nTo log out, revoke the existing credentials and remove that block from the CLI configuration.",
))
}
if diags.HasErrors() {
c.showDiagnostics(diags)
return 1
}
switch credsCtx.Location {
case cliconfig.CredentialsNotAvailable:
c.Ui.Output(fmt.Sprintf("No credentials for %s are stored.\n", dispHostname))
return 0
case cliconfig.CredentialsViaHelper:
c.Ui.Output(fmt.Sprintf("Removing the stored credentials for %s from the configured\n%q credentials helper.\n", dispHostname, credsCtx.HelperType))
case cliconfig.CredentialsInPrimaryFile:
c.Ui.Output(fmt.Sprintf("Removing the stored credentials for %s from the following file:\n %s\n", dispHostname, credsCtx.LocalFilename))
}
err = creds.ForgetForHost(hostname)
if err != nil {
diags = diags.Append(tfdiags.Sourceless(
tfdiags.Error,
"Failed to remove API token",
fmt.Sprintf("Unable to remove stored API token: %s", err),
))
}
c.showDiagnostics(diags)
if diags.HasErrors() {
return 1
}
c.Ui.Output(
fmt.Sprintf(
c.Colorize().Color(strings.TrimSpace(`
[green][bold]Success![reset] [bold]Terraform has removed the stored API token for %s.[reset]
`)),
dispHostname,
) + "\n",
)
return 0
}
// Help implements cli.Command.
func (c *LogoutCommand) Help() string {
defaultFile := c.defaultOutputFile()
if defaultFile == "" {
// Because this is just for the help message and it's very unlikely
// that a user wouldn't have a functioning home directory anyway,
// we'll just use a placeholder here. The real command has some
// more complex behavior for this case. This result is not correct
// on all platforms, but given how unlikely we are to hit this case
// that seems okay.
defaultFile = "~/.terraform/credentials.tfrc.json"
}
helpText := `
Usage: terraform [global options] logout [hostname]
Removes locally-stored credentials for specified hostname.
Note: the API token is only removed from local storage, not destroyed on the
remote server, so it will remain valid until manually revoked.
If no hostname is provided, the default hostname is app.terraform.io.
%s
`
return strings.TrimSpace(helpText)
}
// Synopsis implements cli.Command.
func (c *LogoutCommand) Synopsis() string {
return "Remove locally-stored credentials for a remote host"
}
func (c *LogoutCommand) defaultOutputFile() string {
if c.CLIConfigDir == "" {
return "" // no default available
}
return filepath.Join(c.CLIConfigDir, "credentials.tfrc.json")
} | go | github | https://github.com/hashicorp/terraform | internal/command/logout.go |
import os, sys
from ConfigParser import SafeConfigParser
# This dequote() business is required for some older versions
# of mysql_config
def dequote(s):
if s[0] in "\"'" and s[0] == s[-1]:
s = s[1:-1]
return s
def compiler_flag(f):
return "-%s" % f
def mysql_config(what):
from os import popen
f = popen("%s --%s" % (mysql_config.path, what))
data = f.read().strip().split()
ret = f.close()
if ret:
if ret/256:
data = []
if ret/256 > 1:
raise EnvironmentError("%s not found" % (mysql_config.path,))
return data
mysql_config.path = "mysql_config"
def get_config():
from setup_common import get_metadata_and_options, enabled, create_release_file
metadata, options = get_metadata_and_options()
if 'mysql_config' in options:
mysql_config.path = options['mysql_config']
extra_objects = []
static = enabled(options, 'static')
if enabled(options, 'embedded'):
libs = mysql_config("libmysqld-libs")
client = "mysqld"
elif enabled(options, 'threadsafe'):
libs = mysql_config("libs_r")
client = "mysqlclient_r"
if not libs:
libs = mysql_config("libs")
client = "mysqlclient"
else:
libs = mysql_config("libs")
client = "mysqlclient"
library_dirs = [ dequote(i[2:]) for i in libs if i.startswith(compiler_flag("L")) ]
libraries = [ dequote(i[2:]) for i in libs if i.startswith(compiler_flag("l")) ]
removable_compile_args = [ compiler_flag(f) for f in "ILl" ]
extra_compile_args = [ i.replace("%", "%%") for i in mysql_config("cflags")
if i[:2] not in removable_compile_args ]
# Copy the arch flags for linking as well
extra_link_args = list()
for i in range(len(extra_compile_args)):
if extra_compile_args[i] == '-arch':
extra_link_args += ['-arch', extra_compile_args[i + 1]]
include_dirs = [ dequote(i[2:])
for i in mysql_config('include')
if i.startswith(compiler_flag('I')) ]
if not include_dirs: # fix for MySQL-3.23
include_dirs = [ dequote(i[2:])
for i in mysql_config('cflags')
if i.startswith(compiler_flag('I')) ]
if static:
extra_objects.append(os.path.join(library_dirs[0],'lib%s.a' % client))
if client in libraries:
libraries.remove(client)
name = "MySQL-python"
if enabled(options, 'embedded'):
name = name + "-embedded"
metadata['name'] = name
define_macros = [
('version_info', metadata['version_info']),
('__version__', metadata['version']),
]
create_release_file(metadata)
del metadata['version_info']
ext_options = dict(
name = "_mysql",
library_dirs = library_dirs,
libraries = libraries,
extra_compile_args = extra_compile_args,
extra_link_args = extra_link_args,
include_dirs = include_dirs,
extra_objects = extra_objects,
define_macros = define_macros,
)
return metadata, ext_options
if __name__ == "__main__":
sys.stderr.write("""You shouldn't be running this directly; it is used by setup.py.""") | unknown | codeparrot/codeparrot-clean | ||
from __future__ import absolute_import
import datetime
from operator import attrgetter
from django.core.exceptions import ValidationError
from django.test import TestCase, skipUnlessDBFeature
from django.utils import tzinfo
from .models import (Worker, Article, Party, Event, Department,
BrokenUnicodeMethod, NonAutoPK)
class ModelTests(TestCase):
# The bug is that the following queries would raise:
# "TypeError: Related Field has invalid lookup: gte"
def test_related_gte_lookup(self):
"""
Regression test for #10153: foreign key __gte lookups.
"""
Worker.objects.filter(department__gte=0)
def test_related_lte_lookup(self):
"""
Regression test for #10153: foreign key __lte lookups.
"""
Worker.objects.filter(department__lte=0)
def test_empty_choice(self):
# NOTE: Part of the regression test here is merely parsing the model
# declaration. The verbose_name, in particular, did not always work.
a = Article.objects.create(
headline="Look at me!", pub_date=datetime.datetime.now()
)
# An empty choice field should return None for the display name.
self.assertIs(a.get_status_display(), None)
# Empty strings should be returned as Unicode
a = Article.objects.get(pk=a.pk)
self.assertEqual(a.misc_data, u'')
self.assertIs(type(a.misc_data), unicode)
def test_long_textfield(self):
# TextFields can hold more than 4000 characters (this was broken in
# Oracle).
a = Article.objects.create(
headline="Really, really big",
pub_date=datetime.datetime.now(),
article_text = "ABCDE" * 1000
)
a = Article.objects.get(pk=a.pk)
self.assertEqual(len(a.article_text), 5000)
def test_date_lookup(self):
# Regression test for #659
Party.objects.create(when=datetime.datetime(1999, 12, 31))
Party.objects.create(when=datetime.datetime(1998, 12, 31))
Party.objects.create(when=datetime.datetime(1999, 1, 1))
self.assertQuerysetEqual(
Party.objects.filter(when__month=2), []
)
self.assertQuerysetEqual(
Party.objects.filter(when__month=1), [
datetime.date(1999, 1, 1)
],
attrgetter("when")
)
self.assertQuerysetEqual(
Party.objects.filter(when__month=12), [
datetime.date(1999, 12, 31),
datetime.date(1998, 12, 31),
],
attrgetter("when")
)
self.assertQuerysetEqual(
Party.objects.filter(when__year=1998), [
datetime.date(1998, 12, 31),
],
attrgetter("when")
)
# Regression test for #8510
self.assertQuerysetEqual(
Party.objects.filter(when__day="31"), [
datetime.date(1999, 12, 31),
datetime.date(1998, 12, 31),
],
attrgetter("when")
)
self.assertQuerysetEqual(
Party.objects.filter(when__month="12"), [
datetime.date(1999, 12, 31),
datetime.date(1998, 12, 31),
],
attrgetter("when")
)
self.assertQuerysetEqual(
Party.objects.filter(when__year="1998"), [
datetime.date(1998, 12, 31),
],
attrgetter("when")
)
def test_date_filter_null(self):
# Date filtering was failing with NULL date values in SQLite
# (regression test for #3501, amongst other things).
Party.objects.create(when=datetime.datetime(1999, 1, 1))
Party.objects.create()
p = Party.objects.filter(when__month=1)[0]
self.assertEqual(p.when, datetime.date(1999, 1, 1))
self.assertQuerysetEqual(
Party.objects.filter(pk=p.pk).dates("when", "month"), [
1
],
attrgetter("month")
)
def test_get_next_prev_by_field(self):
# Check that get_next_by_FIELD and get_previous_by_FIELD don't crash
# when we have usecs values stored on the database
#
# It crashed after the Field.get_db_prep_* refactor, because on most
# backends DateTimeFields supports usecs, but DateTimeField.to_python
# didn't recognize them. (Note that
# Model._get_next_or_previous_by_FIELD coerces values to strings)
Event.objects.create(when=datetime.datetime(2000, 1, 1, 16, 0, 0))
Event.objects.create(when=datetime.datetime(2000, 1, 1, 6, 1, 1))
Event.objects.create(when=datetime.datetime(2000, 1, 1, 13, 1, 1))
e = Event.objects.create(when=datetime.datetime(2000, 1, 1, 12, 0, 20, 24))
self.assertEqual(
e.get_next_by_when().when, datetime.datetime(2000, 1, 1, 13, 1, 1)
)
self.assertEqual(
e.get_previous_by_when().when, datetime.datetime(2000, 1, 1, 6, 1, 1)
)
def test_primary_key_foreign_key_types(self):
# Check Department and Worker (non-default PK type)
d = Department.objects.create(id=10, name="IT")
w = Worker.objects.create(department=d, name="Full-time")
self.assertEqual(unicode(w), "Full-time")
def test_broken_unicode(self):
# Models with broken unicode methods should still have a printable repr
b = BrokenUnicodeMethod.objects.create(name="Jerry")
self.assertEqual(repr(b), "<BrokenUnicodeMethod: [Bad Unicode data]>")
@skipUnlessDBFeature("supports_timezones")
def test_timezones(self):
# Saving an updating with timezone-aware datetime Python objects.
# Regression test for #10443.
# The idea is that all these creations and saving should work without
# crashing. It's not rocket science.
dt1 = datetime.datetime(2008, 8, 31, 16, 20, tzinfo=tzinfo.FixedOffset(600))
dt2 = datetime.datetime(2008, 8, 31, 17, 20, tzinfo=tzinfo.FixedOffset(600))
obj = Article.objects.create(
headline="A headline", pub_date=dt1, article_text="foo"
)
obj.pub_date = dt2
obj.save()
self.assertEqual(
Article.objects.filter(headline="A headline").update(pub_date=dt1),
1
)
class ModelValidationTest(TestCase):
def test_pk_validation(self):
one = NonAutoPK.objects.create(name="one")
again = NonAutoPK(name="one")
self.assertRaises(ValidationError, again.validate_unique)
class EvaluateMethodTest(TestCase):
"""
Regression test for #13640: cannot filter by objects with 'evaluate' attr
"""
def test_model_with_evaluate_method(self):
"""
Ensures that you can filter by objects that have an 'evaluate' attr
"""
dept = Department.objects.create(pk=1, name='abc')
dept.evaluate = 'abc'
Worker.objects.filter(department=dept) | unknown | codeparrot/codeparrot-clean | ||
/* We need this macro to access core_apply_sparse_checkout */
#define USE_THE_REPOSITORY_VARIABLE
#include "builtin.h"
#include "git-compat-util.h"
#include "config.h"
#include "parse-options.h"
#include "repository.h"
#include "commit.h"
#include "dir.h"
#include "environment.h"
#include "hex.h"
#include "tree.h"
#include "tree-walk.h"
#include "object.h"
#include "odb.h"
#include "oid-array.h"
#include "oidset.h"
#include "promisor-remote.h"
#include "strmap.h"
#include "string-list.h"
#include "revision.h"
#include "trace2.h"
#include "progress.h"
#include "packfile.h"
#include "path-walk.h"
static const char * const builtin_backfill_usage[] = {
N_("git backfill [--min-batch-size=<n>] [--[no-]sparse]"),
NULL
};
struct backfill_context {
struct repository *repo;
struct oid_array current_batch;
size_t min_batch_size;
int sparse;
};
static void backfill_context_clear(struct backfill_context *ctx)
{
oid_array_clear(&ctx->current_batch);
}
static void download_batch(struct backfill_context *ctx)
{
promisor_remote_get_direct(ctx->repo,
ctx->current_batch.oid,
ctx->current_batch.nr);
oid_array_clear(&ctx->current_batch);
/*
* We likely have a new packfile. Add it to the packed list to
* avoid possible duplicate downloads of the same objects.
*/
odb_reprepare(ctx->repo->objects);
}
static int fill_missing_blobs(const char *path UNUSED,
struct oid_array *list,
enum object_type type,
void *data)
{
struct backfill_context *ctx = data;
if (type != OBJ_BLOB)
return 0;
for (size_t i = 0; i < list->nr; i++) {
if (!odb_has_object(ctx->repo->objects, &list->oid[i],
OBJECT_INFO_FOR_PREFETCH))
oid_array_append(&ctx->current_batch, &list->oid[i]);
}
if (ctx->current_batch.nr >= ctx->min_batch_size)
download_batch(ctx);
return 0;
}
static int do_backfill(struct backfill_context *ctx)
{
struct rev_info revs;
struct path_walk_info info = PATH_WALK_INFO_INIT;
int ret;
if (ctx->sparse) {
CALLOC_ARRAY(info.pl, 1);
if (get_sparse_checkout_patterns(info.pl)) {
path_walk_info_clear(&info);
return error(_("problem loading sparse-checkout"));
}
}
repo_init_revisions(ctx->repo, &revs, "");
handle_revision_arg("HEAD", &revs, 0, 0);
info.blobs = 1;
info.tags = info.commits = info.trees = 0;
info.revs = &revs;
info.path_fn = fill_missing_blobs;
info.path_fn_data = ctx;
ret = walk_objects_by_path(&info);
/* Download the objects that did not fill a batch. */
if (!ret)
download_batch(ctx);
path_walk_info_clear(&info);
release_revisions(&revs);
return ret;
}
int cmd_backfill(int argc, const char **argv, const char *prefix, struct repository *repo)
{
int result;
struct backfill_context ctx = {
.repo = repo,
.current_batch = OID_ARRAY_INIT,
.min_batch_size = 50000,
.sparse = 0,
};
struct option options[] = {
OPT_UNSIGNED(0, "min-batch-size", &ctx.min_batch_size,
N_("Minimum number of objects to request at a time")),
OPT_BOOL(0, "sparse", &ctx.sparse,
N_("Restrict the missing objects to the current sparse-checkout")),
OPT_END(),
};
show_usage_with_options_if_asked(argc, argv,
builtin_backfill_usage, options);
argc = parse_options(argc, argv, prefix, options, builtin_backfill_usage,
0);
repo_config(repo, git_default_config, NULL);
if (ctx.sparse < 0)
ctx.sparse = core_apply_sparse_checkout;
result = do_backfill(&ctx);
backfill_context_clear(&ctx);
return result;
} | c | github | https://github.com/git/git | builtin/backfill.c |
#! -*- coding: utf-8 -*-
from __future__ import unicode_literals
import base64
import errno
import hashlib
import json
import os
import shutil
import tempfile as sys_tempfile
import unittest
from io import BytesIO
from django.core.files import temp as tempfile
from django.core.files.uploadedfile import SimpleUploadedFile
from django.http.multipartparser import MultiPartParser, parse_header
from django.test import SimpleTestCase, TestCase, client, override_settings
from django.utils.encoding import force_bytes
from django.utils.http import urlquote
from django.utils.six import PY2, StringIO
from . import uploadhandler
from .models import FileModel
UNICODE_FILENAME = 'test-0123456789_中文_Orléans.jpg'
MEDIA_ROOT = sys_tempfile.mkdtemp()
UPLOAD_TO = os.path.join(MEDIA_ROOT, 'test_upload')
@override_settings(MEDIA_ROOT=MEDIA_ROOT, ROOT_URLCONF='file_uploads.urls', MIDDLEWARE_CLASSES=[])
class FileUploadTests(TestCase):
@classmethod
def setUpClass(cls):
super(FileUploadTests, cls).setUpClass()
if not os.path.isdir(MEDIA_ROOT):
os.makedirs(MEDIA_ROOT)
@classmethod
def tearDownClass(cls):
shutil.rmtree(MEDIA_ROOT)
super(FileUploadTests, cls).tearDownClass()
def test_simple_upload(self):
with open(__file__, 'rb') as fp:
post_data = {
'name': 'Ringo',
'file_field': fp,
}
response = self.client.post('/upload/', post_data)
self.assertEqual(response.status_code, 200)
def test_large_upload(self):
file = tempfile.NamedTemporaryFile
with file(suffix=".file1") as file1, file(suffix=".file2") as file2:
file1.write(b'a' * (2 ** 21))
file1.seek(0)
file2.write(b'a' * (10 * 2 ** 20))
file2.seek(0)
post_data = {
'name': 'Ringo',
'file_field1': file1,
'file_field2': file2,
}
for key in list(post_data):
try:
post_data[key + '_hash'] = hashlib.sha1(post_data[key].read()).hexdigest()
post_data[key].seek(0)
except AttributeError:
post_data[key + '_hash'] = hashlib.sha1(force_bytes(post_data[key])).hexdigest()
response = self.client.post('/verify/', post_data)
self.assertEqual(response.status_code, 200)
def _test_base64_upload(self, content, encode=base64.b64encode):
payload = client.FakePayload("\r\n".join([
'--' + client.BOUNDARY,
'Content-Disposition: form-data; name="file"; filename="test.txt"',
'Content-Type: application/octet-stream',
'Content-Transfer-Encoding: base64',
'']))
payload.write(b"\r\n" + encode(force_bytes(content)) + b"\r\n")
payload.write('--' + client.BOUNDARY + '--\r\n')
r = {
'CONTENT_LENGTH': len(payload),
'CONTENT_TYPE': client.MULTIPART_CONTENT,
'PATH_INFO': "/echo_content/",
'REQUEST_METHOD': 'POST',
'wsgi.input': payload,
}
response = self.client.request(**r)
received = json.loads(response.content.decode('utf-8'))
self.assertEqual(received['file'], content)
def test_base64_upload(self):
self._test_base64_upload("This data will be transmitted base64-encoded.")
def test_big_base64_upload(self):
self._test_base64_upload("Big data" * 68000) # > 512Kb
def test_big_base64_newlines_upload(self):
self._test_base64_upload(
# encodestring is a deprecated alias on Python 3
"Big data" * 68000, encode=base64.encodestring if PY2 else base64.encodebytes)
def test_unicode_file_name(self):
tdir = sys_tempfile.mkdtemp()
self.addCleanup(shutil.rmtree, tdir, True)
# This file contains chinese symbols and an accented char in the name.
with open(os.path.join(tdir, UNICODE_FILENAME), 'w+b') as file1:
file1.write(b'b' * (2 ** 10))
file1.seek(0)
post_data = {
'file_unicode': file1,
}
response = self.client.post('/unicode_name/', post_data)
self.assertEqual(response.status_code, 200)
def test_unicode_file_name_rfc2231(self):
"""
Test receiving file upload when filename is encoded with RFC2231
(#22971).
"""
payload = client.FakePayload()
payload.write('\r\n'.join([
'--' + client.BOUNDARY,
'Content-Disposition: form-data; name="file_unicode"; filename*=UTF-8\'\'%s' % urlquote(UNICODE_FILENAME),
'Content-Type: application/octet-stream',
'',
'You got pwnd.\r\n',
'\r\n--' + client.BOUNDARY + '--\r\n'
]))
r = {
'CONTENT_LENGTH': len(payload),
'CONTENT_TYPE': client.MULTIPART_CONTENT,
'PATH_INFO': "/unicode_name/",
'REQUEST_METHOD': 'POST',
'wsgi.input': payload,
}
response = self.client.request(**r)
self.assertEqual(response.status_code, 200)
def test_unicode_name_rfc2231(self):
"""
Test receiving file upload when filename is encoded with RFC2231
(#22971).
"""
payload = client.FakePayload()
payload.write(
'\r\n'.join([
'--' + client.BOUNDARY,
'Content-Disposition: form-data; name*=UTF-8\'\'file_unicode; filename*=UTF-8\'\'%s' % urlquote(
UNICODE_FILENAME
),
'Content-Type: application/octet-stream',
'',
'You got pwnd.\r\n',
'\r\n--' + client.BOUNDARY + '--\r\n'
])
)
r = {
'CONTENT_LENGTH': len(payload),
'CONTENT_TYPE': client.MULTIPART_CONTENT,
'PATH_INFO': "/unicode_name/",
'REQUEST_METHOD': 'POST',
'wsgi.input': payload,
}
response = self.client.request(**r)
self.assertEqual(response.status_code, 200)
def test_dangerous_file_names(self):
"""Uploaded file names should be sanitized before ever reaching the view."""
# This test simulates possible directory traversal attacks by a
# malicious uploader We have to do some monkeybusiness here to construct
# a malicious payload with an invalid file name (containing os.sep or
# os.pardir). This similar to what an attacker would need to do when
# trying such an attack.
scary_file_names = [
"/tmp/hax0rd.txt", # Absolute path, *nix-style.
"C:\\Windows\\hax0rd.txt", # Absolute path, win-style.
"C:/Windows/hax0rd.txt", # Absolute path, broken-style.
"\\tmp\\hax0rd.txt", # Absolute path, broken in a different way.
"/tmp\\hax0rd.txt", # Absolute path, broken by mixing.
"subdir/hax0rd.txt", # Descendant path, *nix-style.
"subdir\\hax0rd.txt", # Descendant path, win-style.
"sub/dir\\hax0rd.txt", # Descendant path, mixed.
"../../hax0rd.txt", # Relative path, *nix-style.
"..\\..\\hax0rd.txt", # Relative path, win-style.
"../..\\hax0rd.txt" # Relative path, mixed.
]
payload = client.FakePayload()
for i, name in enumerate(scary_file_names):
payload.write('\r\n'.join([
'--' + client.BOUNDARY,
'Content-Disposition: form-data; name="file%s"; filename="%s"' % (i, name),
'Content-Type: application/octet-stream',
'',
'You got pwnd.\r\n'
]))
payload.write('\r\n--' + client.BOUNDARY + '--\r\n')
r = {
'CONTENT_LENGTH': len(payload),
'CONTENT_TYPE': client.MULTIPART_CONTENT,
'PATH_INFO': "/echo/",
'REQUEST_METHOD': 'POST',
'wsgi.input': payload,
}
response = self.client.request(**r)
# The filenames should have been sanitized by the time it got to the view.
received = json.loads(response.content.decode('utf-8'))
for i, name in enumerate(scary_file_names):
got = received["file%s" % i]
self.assertEqual(got, "hax0rd.txt")
def test_filename_overflow(self):
"""File names over 256 characters (dangerous on some platforms) get fixed up."""
long_str = 'f' * 300
cases = [
# field name, filename, expected
('long_filename', '%s.txt' % long_str, '%s.txt' % long_str[:251]),
('long_extension', 'foo.%s' % long_str, '.%s' % long_str[:254]),
('no_extension', long_str, long_str[:255]),
('no_filename', '.%s' % long_str, '.%s' % long_str[:254]),
('long_everything', '%s.%s' % (long_str, long_str), '.%s' % long_str[:254]),
]
payload = client.FakePayload()
for name, filename, _ in cases:
payload.write("\r\n".join([
'--' + client.BOUNDARY,
'Content-Disposition: form-data; name="{}"; filename="{}"',
'Content-Type: application/octet-stream',
'',
'Oops.',
''
]).format(name, filename))
payload.write('\r\n--' + client.BOUNDARY + '--\r\n')
r = {
'CONTENT_LENGTH': len(payload),
'CONTENT_TYPE': client.MULTIPART_CONTENT,
'PATH_INFO': "/echo/",
'REQUEST_METHOD': 'POST',
'wsgi.input': payload,
}
response = self.client.request(**r)
result = json.loads(response.content.decode('utf-8'))
for name, _, expected in cases:
got = result[name]
self.assertEqual(expected, got, 'Mismatch for {}'.format(name))
self.assertLess(len(got), 256,
"Got a long file name (%s characters)." % len(got))
def test_file_content(self):
file = tempfile.NamedTemporaryFile
with file(suffix=".ctype_extra") as no_content_type, file(suffix=".ctype_extra") as simple_file:
no_content_type.write(b'no content')
no_content_type.seek(0)
simple_file.write(b'text content')
simple_file.seek(0)
simple_file.content_type = 'text/plain'
string_io = StringIO('string content')
bytes_io = BytesIO(b'binary content')
response = self.client.post('/echo_content/', {
'no_content_type': no_content_type,
'simple_file': simple_file,
'string': string_io,
'binary': bytes_io,
})
received = json.loads(response.content.decode('utf-8'))
self.assertEqual(received['no_content_type'], 'no content')
self.assertEqual(received['simple_file'], 'text content')
self.assertEqual(received['string'], 'string content')
self.assertEqual(received['binary'], 'binary content')
def test_content_type_extra(self):
"""Uploaded files may have content type parameters available."""
file = tempfile.NamedTemporaryFile
with file(suffix=".ctype_extra") as no_content_type, file(suffix=".ctype_extra") as simple_file:
no_content_type.write(b'something')
no_content_type.seek(0)
simple_file.write(b'something')
simple_file.seek(0)
simple_file.content_type = 'text/plain; test-key=test_value'
response = self.client.post('/echo_content_type_extra/', {
'no_content_type': no_content_type,
'simple_file': simple_file,
})
received = json.loads(response.content.decode('utf-8'))
self.assertEqual(received['no_content_type'], {})
self.assertEqual(received['simple_file'], {'test-key': 'test_value'})
def test_truncated_multipart_handled_gracefully(self):
"""
If passed an incomplete multipart message, MultiPartParser does not
attempt to read beyond the end of the stream, and simply will handle
the part that can be parsed gracefully.
"""
payload_str = "\r\n".join([
'--' + client.BOUNDARY,
'Content-Disposition: form-data; name="file"; filename="foo.txt"',
'Content-Type: application/octet-stream',
'',
'file contents'
'--' + client.BOUNDARY + '--',
'',
])
payload = client.FakePayload(payload_str[:-10])
r = {
'CONTENT_LENGTH': len(payload),
'CONTENT_TYPE': client.MULTIPART_CONTENT,
'PATH_INFO': '/echo/',
'REQUEST_METHOD': 'POST',
'wsgi.input': payload,
}
got = json.loads(self.client.request(**r).content.decode('utf-8'))
self.assertEqual(got, {})
def test_empty_multipart_handled_gracefully(self):
"""
If passed an empty multipart message, MultiPartParser will return
an empty QueryDict.
"""
r = {
'CONTENT_LENGTH': 0,
'CONTENT_TYPE': client.MULTIPART_CONTENT,
'PATH_INFO': '/echo/',
'REQUEST_METHOD': 'POST',
'wsgi.input': client.FakePayload(b''),
}
got = json.loads(self.client.request(**r).content.decode('utf-8'))
self.assertEqual(got, {})
def test_custom_upload_handler(self):
file = tempfile.NamedTemporaryFile
with file() as smallfile, file() as bigfile:
# A small file (under the 5M quota)
smallfile.write(b'a' * (2 ** 21))
smallfile.seek(0)
# A big file (over the quota)
bigfile.write(b'a' * (10 * 2 ** 20))
bigfile.seek(0)
# Small file posting should work.
response = self.client.post('/quota/', {'f': smallfile})
got = json.loads(response.content.decode('utf-8'))
self.assertIn('f', got)
# Large files don't go through.
response = self.client.post("/quota/", {'f': bigfile})
got = json.loads(response.content.decode('utf-8'))
self.assertNotIn('f', got)
def test_broken_custom_upload_handler(self):
with tempfile.NamedTemporaryFile() as file:
file.write(b'a' * (2 ** 21))
file.seek(0)
# AttributeError: You cannot alter upload handlers after the upload has been processed.
self.assertRaises(
AttributeError,
self.client.post,
'/quota/broken/',
{'f': file}
)
def test_fileupload_getlist(self):
file = tempfile.NamedTemporaryFile
with file() as file1, file() as file2, file() as file2a:
file1.write(b'a' * (2 ** 23))
file1.seek(0)
file2.write(b'a' * (2 * 2 ** 18))
file2.seek(0)
file2a.write(b'a' * (5 * 2 ** 20))
file2a.seek(0)
response = self.client.post('/getlist_count/', {
'file1': file1,
'field1': 'test',
'field2': 'test3',
'field3': 'test5',
'field4': 'test6',
'field5': 'test7',
'file2': (file2, file2a)
})
got = json.loads(response.content.decode('utf-8'))
self.assertEqual(got.get('file1'), 1)
self.assertEqual(got.get('file2'), 2)
def test_fileuploads_closed_at_request_end(self):
file = tempfile.NamedTemporaryFile
with file() as f1, file() as f2a, file() as f2b:
response = self.client.post('/fd_closing/t/', {
'file': f1,
'file2': (f2a, f2b),
})
request = response.wsgi_request
# Check that the files got actually parsed.
self.assertTrue(hasattr(request, '_files'))
file = request._files['file']
self.assertTrue(file.closed)
files = request._files.getlist('file2')
self.assertTrue(files[0].closed)
self.assertTrue(files[1].closed)
def test_no_parsing_triggered_by_fd_closing(self):
file = tempfile.NamedTemporaryFile
with file() as f1, file() as f2a, file() as f2b:
response = self.client.post('/fd_closing/f/', {
'file': f1,
'file2': (f2a, f2b),
})
request = response.wsgi_request
# Check that the fd closing logic doesn't trigger parsing of the stream
self.assertFalse(hasattr(request, '_files'))
def test_file_error_blocking(self):
"""
The server should not block when there are upload errors (bug #8622).
This can happen if something -- i.e. an exception handler -- tries to
access POST while handling an error in parsing POST. This shouldn't
cause an infinite loop!
"""
class POSTAccessingHandler(client.ClientHandler):
"""A handler that'll access POST during an exception."""
def handle_uncaught_exception(self, request, resolver, exc_info):
ret = super(POSTAccessingHandler, self).handle_uncaught_exception(request, resolver, exc_info)
request.POST # evaluate
return ret
# Maybe this is a little more complicated that it needs to be; but if
# the django.test.client.FakePayload.read() implementation changes then
# this test would fail. So we need to know exactly what kind of error
# it raises when there is an attempt to read more than the available bytes:
try:
client.FakePayload(b'a').read(2)
except Exception as err:
reference_error = err
# install the custom handler that tries to access request.POST
self.client.handler = POSTAccessingHandler()
with open(__file__, 'rb') as fp:
post_data = {
'name': 'Ringo',
'file_field': fp,
}
try:
self.client.post('/upload_errors/', post_data)
except reference_error.__class__ as err:
self.assertFalse(
str(err) == str(reference_error),
"Caught a repeated exception that'll cause an infinite loop in file uploads."
)
except Exception as err:
# CustomUploadError is the error that should have been raised
self.assertEqual(err.__class__, uploadhandler.CustomUploadError)
def test_filename_case_preservation(self):
"""
The storage backend shouldn't mess with the case of the filenames
uploaded.
"""
# Synthesize the contents of a file upload with a mixed case filename
# so we don't have to carry such a file in the Django tests source code
# tree.
vars = {'boundary': 'oUrBoUnDaRyStRiNg'}
post_data = [
'--%(boundary)s',
'Content-Disposition: form-data; name="file_field"; filename="MiXeD_cAsE.txt"',
'Content-Type: application/octet-stream',
'',
'file contents\n'
'',
'--%(boundary)s--\r\n',
]
response = self.client.post(
'/filename_case/',
'\r\n'.join(post_data) % vars,
'multipart/form-data; boundary=%(boundary)s' % vars
)
self.assertEqual(response.status_code, 200)
id = int(response.content)
obj = FileModel.objects.get(pk=id)
# The name of the file uploaded and the file stored in the server-side
# shouldn't differ.
self.assertEqual(os.path.basename(obj.testfile.path), 'MiXeD_cAsE.txt')
@override_settings(MEDIA_ROOT=MEDIA_ROOT)
class DirectoryCreationTests(SimpleTestCase):
"""
Tests for error handling during directory creation
via _save_FIELD_file (ticket #6450)
"""
@classmethod
def setUpClass(cls):
super(DirectoryCreationTests, cls).setUpClass()
if not os.path.isdir(MEDIA_ROOT):
os.makedirs(MEDIA_ROOT)
@classmethod
def tearDownClass(cls):
shutil.rmtree(MEDIA_ROOT)
super(DirectoryCreationTests, cls).tearDownClass()
def setUp(self):
self.obj = FileModel()
def test_readonly_root(self):
"""Permission errors are not swallowed"""
os.chmod(MEDIA_ROOT, 0o500)
self.addCleanup(os.chmod, MEDIA_ROOT, 0o700)
try:
self.obj.testfile.save('foo.txt', SimpleUploadedFile('foo.txt', b'x'), save=False)
except OSError as err:
self.assertEqual(err.errno, errno.EACCES)
except Exception:
self.fail("OSError [Errno %s] not raised." % errno.EACCES)
def test_not_a_directory(self):
"""The correct IOError is raised when the upload directory name exists but isn't a directory"""
# Create a file with the upload directory name
open(UPLOAD_TO, 'wb').close()
self.addCleanup(os.remove, UPLOAD_TO)
with self.assertRaises(IOError) as exc_info:
with SimpleUploadedFile('foo.txt', b'x') as file:
self.obj.testfile.save('foo.txt', file, save=False)
# The test needs to be done on a specific string as IOError
# is raised even without the patch (just not early enough)
self.assertEqual(exc_info.exception.args[0],
"%s exists and is not a directory." % UPLOAD_TO)
class MultiParserTests(unittest.TestCase):
def test_empty_upload_handlers(self):
# We're not actually parsing here; just checking if the parser properly
# instantiates with empty upload handlers.
MultiPartParser({
'CONTENT_TYPE': 'multipart/form-data; boundary=_foo',
'CONTENT_LENGTH': '1'
}, StringIO('x'), [], 'utf-8')
def test_rfc2231_parsing(self):
test_data = (
(b"Content-Type: application/x-stuff; title*=us-ascii'en-us'This%20is%20%2A%2A%2Afun%2A%2A%2A",
"This is ***fun***"),
(b"Content-Type: application/x-stuff; title*=UTF-8''foo-%c3%a4.html",
"foo-ä.html"),
(b"Content-Type: application/x-stuff; title*=iso-8859-1''foo-%E4.html",
"foo-ä.html"),
)
for raw_line, expected_title in test_data:
parsed = parse_header(raw_line)
self.assertEqual(parsed[1]['title'], expected_title)
def test_rfc2231_wrong_title(self):
"""
Test wrongly formatted RFC 2231 headers (missing double single quotes).
Parsing should not crash (#24209).
"""
test_data = (
(b"Content-Type: application/x-stuff; title*='This%20is%20%2A%2A%2Afun%2A%2A%2A",
b"'This%20is%20%2A%2A%2Afun%2A%2A%2A"),
(b"Content-Type: application/x-stuff; title*='foo.html",
b"'foo.html"),
(b"Content-Type: application/x-stuff; title*=bar.html",
b"bar.html"),
)
for raw_line, expected_title in test_data:
parsed = parse_header(raw_line)
self.assertEqual(parsed[1]['title'], expected_title) | unknown | codeparrot/codeparrot-clean | ||
// Copyright IBM Corp. 2016, 2025
// SPDX-License-Identifier: BUSL-1.1
package consul
import (
"context"
"fmt"
"github.com/hashicorp/consul/api"
"github.com/hashicorp/vault/sdk/framework"
"github.com/hashicorp/vault/sdk/logical"
)
const (
SecretTokenType = "token"
)
func secretToken(b *backend) *framework.Secret {
return &framework.Secret{
Type: SecretTokenType,
Fields: map[string]*framework.FieldSchema{
"token": {
Type: framework.TypeString,
Description: "Request token",
},
},
Renew: b.secretTokenRenew,
Revoke: b.secretTokenRevoke,
}
}
func (b *backend) secretTokenRenew(ctx context.Context, req *logical.Request, d *framework.FieldData) (*logical.Response, error) {
resp := &logical.Response{Secret: req.Secret}
roleRaw, ok := req.Secret.InternalData["role"]
if !ok {
return resp, nil
}
role, ok := roleRaw.(string)
if !ok {
return resp, nil
}
entry, err := req.Storage.Get(ctx, "policy/"+role)
if err != nil {
return nil, fmt.Errorf("error retrieving role: %w", err)
}
if entry == nil {
return logical.ErrorResponse(fmt.Sprintf("issuing role %q not found", role)), nil
}
var result roleConfig
if err := entry.DecodeJSON(&result); err != nil {
return nil, err
}
resp.Secret.TTL = result.TTL
resp.Secret.MaxTTL = result.MaxTTL
b.TryRecordObservationWithRequest(ctx, req, ObservationTypeConsulCredentialRenew, map[string]interface{}{
"role_name": role,
"ttl": result.TTL.String(),
"max_ttl": result.MaxTTL.String(),
"token_type": result.TokenType,
"local": result.Local,
})
return resp, nil
}
func (b *backend) secretTokenRevoke(ctx context.Context, req *logical.Request, d *framework.FieldData) (*logical.Response, error) {
c, userErr, intErr := b.client(ctx, req.Storage)
if intErr != nil {
return nil, intErr
}
if userErr != nil {
// Returning logical.ErrorResponse from revocation function is risky
return nil, userErr
}
tokenRaw, ok := req.Secret.InternalData["token"]
if !ok {
// We return nil here because this is a pre-0.5.3 problem and there is
// nothing we can do about it. We already can't revoke the lease
// properly if it has been renewed and this is documented pre-0.5.3
// behavior with a security bulletin about it.
return nil, nil
}
var version string
versionRaw, ok := req.Secret.InternalData["version"]
if ok {
version = versionRaw.(string)
}
// Extract Consul Namespace and Partition info from secret
var revokeWriteOptions *api.WriteOptions
var namespace, partition string
namespaceRaw, ok := req.Data["consul_namespace"]
if ok {
namespace = namespaceRaw.(string)
}
partitionRaw, ok := req.Data["partition"]
if ok {
partition = partitionRaw.(string)
}
revokeWriteOptions = &api.WriteOptions{
Namespace: namespace,
Partition: partition,
}
switch version {
case "":
// Pre 1.4 tokens
token := tokenRaw.(string)
_, err := c.ACL().Destroy(token, nil)
if err != nil {
return nil, err
}
b.TryRecordObservationWithRequest(ctx, req, ObservationTypeConsulCredentialRevoke, map[string]interface{}{
"role_name": req.Secret.InternalData["role"],
})
case tokenPolicyType:
token := tokenRaw.(string)
_, err := c.ACL().TokenDelete(token, revokeWriteOptions)
if err != nil {
return nil, err
}
b.TryRecordObservationWithRequest(ctx, req, ObservationTypeConsulCredentialRevoke, map[string]interface{}{
"role_name": req.Secret.InternalData["role"],
})
default:
return nil, fmt.Errorf("Invalid version string in data: %s", version)
}
return nil, nil
} | go | github | https://github.com/hashicorp/vault | builtin/logical/consul/secret_token.go |
@import url('https://fonts.googleapis.com/icon?family=Material+Symbols+Outlined');
:host {
display: flex;
justify-content: center;
font-family: var(--inter-font);
--border-color: color-mix(in srgb, var(--full-contrast) 20%, var(--page-background));
}
[ngMenuBar] {
display: flex;
gap: 0.25rem;
padding: 0.25rem;
border-radius: 0.5rem;
border: 1px solid var(--border-color);
background-color: var(--page-background);
}
[ngMenu] {
margin: 0;
width: 15rem;
padding: 0.25rem;
border-radius: 0.5rem;
border: 1px solid var(--border-color);
background-color: var(--page-background);
}
[ngMenu][data-visible='false'] {
display: none;
}
[ngMenuItem] {
outline: none;
display: flex;
cursor: pointer;
align-items: center;
gap: 0.5rem;
padding: 0.5rem;
font-size: 0.875rem;
border-radius: 0.25rem;
}
[ngMenuItem][data-active='true'] {
background: color-mix(in srgb, var(--border-color) 10%, transparent);
}
[ngMenuItem]:focus {
outline: 2px solid var(--vivid-pink);
}
[ngMenuItem] .icon {
opacity: 0.875;
font-size: 1.25rem;
}
[ngMenuItem] .label {
flex: 1;
opacity: 0.875;
font-size: 0.875rem;
}
[ngMenuItem]:not([aria-expanded='true']) .arrow {
opacity: 0.5;
}
[ngMenu] .separator {
border-top: 1px solid var(--border-color);
margin: 0.25rem 0;
opacity: 0.25;
} | css | github | https://github.com/angular/angular | adev/src/content/examples/aria/menubar/src/basic/app/app.css |
from __future__ import absolute_import, print_function, unicode_literals
from builtins import dict, str
import re
import sys
import getopt
import xml.dom.minidom
import logging
import requests
logger = logging.getLogger(__name__)
base_url = 'http://trips.ihmc.us/parser/cgi/'
def send_query(text, service_endpoint='drum', query_args=None):
"""Send a query to the TRIPS web service.
Parameters
----------
text : str
The text to be processed.
service_endpoint : Optional[str]
Selects the TRIPS/DRUM web service endpoint to use. Is a choice between
"drum" (default), "drum-dev", a nightly build, and "cwms" for use with
more general knowledge extraction.
query_args : Optional[dict]
A dictionary of arguments to be passed with the query.
Returns
-------
html : str
The HTML result returned by the web service.
"""
if service_endpoint in ['drum', 'drum-dev', 'cwms', 'cwmsreader']:
url = base_url + service_endpoint
else:
logger.error('Invalid service endpoint: %s' % service_endpoint)
return ''
if query_args is None:
query_args = {}
query_args.update({'input': text})
res = requests.get(url, query_args, timeout=3600)
if not res.status_code == 200:
logger.error('Problem with TRIPS query: status code %s' %
res.status_code)
return ''
# Gets unicode content
return res.text
def get_xml(html, content_tag='ekb', fail_if_empty=False):
"""Extract the content XML from the HTML output of the TRIPS web service.
Parameters
----------
html : str
The HTML output from the TRIPS web service.
content_tag : str
The xml tag used to label the content. Default is 'ekb'.
fail_if_empty : bool
If True, and if the xml content found is an empty string, raise an
exception. Default is False.
Returns
-------
The extraction knowledge base (e.g. EKB) XML that contains the event and
term extractions.
"""
cont = re.findall(r'<%(tag)s(.*?)>(.*?)</%(tag)s>' % {'tag': content_tag},
html, re.MULTILINE | re.DOTALL)
if cont:
events_terms = ''.join([l.strip() for l in cont[0][1].splitlines()])
if 'xmlns' in cont[0][0]:
meta = ' '.join([l.strip() for l in cont[0][0].splitlines()])
else:
meta = ''
else:
events_terms = ''
meta = ''
if fail_if_empty:
assert events_terms != '',\
"Got empty string for events content from html:\n%s" % html
header = ('<?xml version="1.0" encoding="utf-8" standalone="yes"?><%s%s>'
% (content_tag, meta))
footer = '</%s>' % content_tag
return header + events_terms.replace('\n', '') + footer
def save_xml(xml_str, file_name, pretty=True):
"""Save the TRIPS EKB XML in a file.
Parameters
----------
xml_str : str
The TRIPS EKB XML string to be saved.
file_name : str
The name of the file to save the result in.
pretty : Optional[bool]
If True, the XML is pretty printed.
"""
try:
fh = open(file_name, 'wt')
except IOError:
logger.error('Could not open %s for writing.' % file_name)
return
if pretty:
xmld = xml.dom.minidom.parseString(xml_str)
xml_str_pretty = xmld.toprettyxml()
fh.write(xml_str_pretty)
else:
fh.write(xml_str)
fh.close()
if __name__ == '__main__':
filemode = False
text = 'Active BRAF phosphorylates MEK1 at Ser222.'
outfile_name = 'braf_test.xml'
opts, extraparams = getopt.getopt(sys.argv[1:], 's:f:o:h',
['string=', 'file=', 'output=', 'help'])
for o, p in opts:
if o in ['-h', '--help']:
print('String mode: python -m indra.sources.trips.client.py '
'--string "RAS binds GTP" --output text.xml')
print('File mode: python -m indra.sources.trips.client.py '
'--file test.txt --output text.xml')
sys.exit()
elif o in ['-s', '--string']:
text = p
elif o in ['-f', '--file']:
filemode = True
infile_name = p
elif o in ['-o', '--output']:
outfile_name = p
if filemode:
try:
fh = open(infile_name, 'rt')
except IOError:
print('Could not open %s.' % infile_name)
exit()
text = fh.read()
fh.close()
print('Parsing contents of %s...' % infile_name)
else:
print('Parsing string: %s' % text)
html = send_query(text)
xml = get_xml(html)
save_xml(xml, outfile_name) | unknown | codeparrot/codeparrot-clean | ||
# This file is dual licensed under the terms of the Apache License, Version
# 2.0, and the BSD License. See the LICENSE file in the root of this repository
# for complete details.
from __future__ import absolute_import, division, print_function
import math
from cryptography import utils
from cryptography.exceptions import (
InvalidSignature, UnsupportedAlgorithm, _Reasons
)
from cryptography.hazmat.backends.openssl.utils import (
_calculate_digest_and_algorithm, _check_not_prehashed,
_warn_sign_verify_deprecated
)
from cryptography.hazmat.primitives import hashes
from cryptography.hazmat.primitives.asymmetric import (
AsymmetricSignatureContext, AsymmetricVerificationContext, rsa
)
from cryptography.hazmat.primitives.asymmetric.padding import (
AsymmetricPadding, MGF1, OAEP, PKCS1v15, PSS, calculate_max_pss_salt_length
)
from cryptography.hazmat.primitives.asymmetric.rsa import (
RSAPrivateKeyWithSerialization, RSAPublicKeyWithSerialization
)
def _get_rsa_pss_salt_length(pss, key, hash_algorithm):
salt = pss._salt_length
if salt is MGF1.MAX_LENGTH or salt is PSS.MAX_LENGTH:
return calculate_max_pss_salt_length(key, hash_algorithm)
else:
return salt
def _enc_dec_rsa(backend, key, data, padding):
if not isinstance(padding, AsymmetricPadding):
raise TypeError("Padding must be an instance of AsymmetricPadding.")
if isinstance(padding, PKCS1v15):
padding_enum = backend._lib.RSA_PKCS1_PADDING
elif isinstance(padding, OAEP):
padding_enum = backend._lib.RSA_PKCS1_OAEP_PADDING
if not isinstance(padding._mgf, MGF1):
raise UnsupportedAlgorithm(
"Only MGF1 is supported by this backend.",
_Reasons.UNSUPPORTED_MGF
)
if not backend.rsa_padding_supported(padding):
raise UnsupportedAlgorithm(
"This combination of padding and hash algorithm is not "
"supported by this backend.",
_Reasons.UNSUPPORTED_PADDING
)
else:
raise UnsupportedAlgorithm(
"{} is not supported by this backend.".format(
padding.name
),
_Reasons.UNSUPPORTED_PADDING
)
return _enc_dec_rsa_pkey_ctx(backend, key, data, padding_enum, padding)
def _enc_dec_rsa_pkey_ctx(backend, key, data, padding_enum, padding):
if isinstance(key, _RSAPublicKey):
init = backend._lib.EVP_PKEY_encrypt_init
crypt = backend._lib.EVP_PKEY_encrypt
else:
init = backend._lib.EVP_PKEY_decrypt_init
crypt = backend._lib.EVP_PKEY_decrypt
pkey_ctx = backend._lib.EVP_PKEY_CTX_new(
key._evp_pkey, backend._ffi.NULL
)
backend.openssl_assert(pkey_ctx != backend._ffi.NULL)
pkey_ctx = backend._ffi.gc(pkey_ctx, backend._lib.EVP_PKEY_CTX_free)
res = init(pkey_ctx)
backend.openssl_assert(res == 1)
res = backend._lib.EVP_PKEY_CTX_set_rsa_padding(
pkey_ctx, padding_enum)
backend.openssl_assert(res > 0)
buf_size = backend._lib.EVP_PKEY_size(key._evp_pkey)
backend.openssl_assert(buf_size > 0)
if (
isinstance(padding, OAEP) and
backend._lib.Cryptography_HAS_RSA_OAEP_MD
):
mgf1_md = backend._evp_md_non_null_from_algorithm(
padding._mgf._algorithm)
res = backend._lib.EVP_PKEY_CTX_set_rsa_mgf1_md(pkey_ctx, mgf1_md)
backend.openssl_assert(res > 0)
oaep_md = backend._evp_md_non_null_from_algorithm(padding._algorithm)
res = backend._lib.EVP_PKEY_CTX_set_rsa_oaep_md(pkey_ctx, oaep_md)
backend.openssl_assert(res > 0)
if (
isinstance(padding, OAEP) and
padding._label is not None and
len(padding._label) > 0
):
# set0_rsa_oaep_label takes ownership of the char * so we need to
# copy it into some new memory
labelptr = backend._lib.OPENSSL_malloc(len(padding._label))
backend.openssl_assert(labelptr != backend._ffi.NULL)
backend._ffi.memmove(labelptr, padding._label, len(padding._label))
res = backend._lib.EVP_PKEY_CTX_set0_rsa_oaep_label(
pkey_ctx, labelptr, len(padding._label)
)
backend.openssl_assert(res == 1)
outlen = backend._ffi.new("size_t *", buf_size)
buf = backend._ffi.new("unsigned char[]", buf_size)
res = crypt(pkey_ctx, buf, outlen, data, len(data))
if res <= 0:
_handle_rsa_enc_dec_error(backend, key)
return backend._ffi.buffer(buf)[:outlen[0]]
def _handle_rsa_enc_dec_error(backend, key):
errors = backend._consume_errors()
backend.openssl_assert(errors)
backend.openssl_assert(errors[0].lib == backend._lib.ERR_LIB_RSA)
if isinstance(key, _RSAPublicKey):
backend.openssl_assert(
errors[0].reason == backend._lib.RSA_R_DATA_TOO_LARGE_FOR_KEY_SIZE
)
raise ValueError(
"Data too long for key size. Encrypt less data or use a "
"larger key size."
)
else:
decoding_errors = [
backend._lib.RSA_R_BLOCK_TYPE_IS_NOT_01,
backend._lib.RSA_R_BLOCK_TYPE_IS_NOT_02,
backend._lib.RSA_R_OAEP_DECODING_ERROR,
# Though this error looks similar to the
# RSA_R_DATA_TOO_LARGE_FOR_KEY_SIZE, this occurs on decrypts,
# rather than on encrypts
backend._lib.RSA_R_DATA_TOO_LARGE_FOR_MODULUS,
]
if backend._lib.Cryptography_HAS_RSA_R_PKCS_DECODING_ERROR:
decoding_errors.append(backend._lib.RSA_R_PKCS_DECODING_ERROR)
backend.openssl_assert(errors[0].reason in decoding_errors)
raise ValueError("Decryption failed.")
def _rsa_sig_determine_padding(backend, key, padding, algorithm):
if not isinstance(padding, AsymmetricPadding):
raise TypeError("Expected provider of AsymmetricPadding.")
pkey_size = backend._lib.EVP_PKEY_size(key._evp_pkey)
backend.openssl_assert(pkey_size > 0)
if isinstance(padding, PKCS1v15):
padding_enum = backend._lib.RSA_PKCS1_PADDING
elif isinstance(padding, PSS):
if not isinstance(padding._mgf, MGF1):
raise UnsupportedAlgorithm(
"Only MGF1 is supported by this backend.",
_Reasons.UNSUPPORTED_MGF
)
# Size of key in bytes - 2 is the maximum
# PSS signature length (salt length is checked later)
if pkey_size - algorithm.digest_size - 2 < 0:
raise ValueError("Digest too large for key size. Use a larger "
"key or different digest.")
padding_enum = backend._lib.RSA_PKCS1_PSS_PADDING
else:
raise UnsupportedAlgorithm(
"{} is not supported by this backend.".format(padding.name),
_Reasons.UNSUPPORTED_PADDING
)
return padding_enum
def _rsa_sig_setup(backend, padding, algorithm, key, data, init_func):
padding_enum = _rsa_sig_determine_padding(backend, key, padding, algorithm)
evp_md = backend._evp_md_non_null_from_algorithm(algorithm)
pkey_ctx = backend._lib.EVP_PKEY_CTX_new(key._evp_pkey, backend._ffi.NULL)
backend.openssl_assert(pkey_ctx != backend._ffi.NULL)
pkey_ctx = backend._ffi.gc(pkey_ctx, backend._lib.EVP_PKEY_CTX_free)
res = init_func(pkey_ctx)
backend.openssl_assert(res == 1)
res = backend._lib.EVP_PKEY_CTX_set_signature_md(pkey_ctx, evp_md)
if res == 0:
backend._consume_errors()
raise UnsupportedAlgorithm(
"{} is not supported by this backend for RSA signing.".format(
algorithm.name
),
_Reasons.UNSUPPORTED_HASH
)
res = backend._lib.EVP_PKEY_CTX_set_rsa_padding(pkey_ctx, padding_enum)
backend.openssl_assert(res > 0)
if isinstance(padding, PSS):
res = backend._lib.EVP_PKEY_CTX_set_rsa_pss_saltlen(
pkey_ctx, _get_rsa_pss_salt_length(padding, key, algorithm)
)
backend.openssl_assert(res > 0)
mgf1_md = backend._evp_md_non_null_from_algorithm(
padding._mgf._algorithm)
res = backend._lib.EVP_PKEY_CTX_set_rsa_mgf1_md(pkey_ctx, mgf1_md)
backend.openssl_assert(res > 0)
return pkey_ctx
def _rsa_sig_sign(backend, padding, algorithm, private_key, data):
pkey_ctx = _rsa_sig_setup(
backend, padding, algorithm, private_key, data,
backend._lib.EVP_PKEY_sign_init
)
buflen = backend._ffi.new("size_t *")
res = backend._lib.EVP_PKEY_sign(
pkey_ctx,
backend._ffi.NULL,
buflen,
data,
len(data)
)
backend.openssl_assert(res == 1)
buf = backend._ffi.new("unsigned char[]", buflen[0])
res = backend._lib.EVP_PKEY_sign(
pkey_ctx, buf, buflen, data, len(data))
if res != 1:
errors = backend._consume_errors()
backend.openssl_assert(errors[0].lib == backend._lib.ERR_LIB_RSA)
if (
errors[0].reason ==
backend._lib.RSA_R_DATA_TOO_LARGE_FOR_KEY_SIZE
):
reason = ("Salt length too long for key size. Try using "
"MAX_LENGTH instead.")
else:
backend.openssl_assert(
errors[0].reason ==
backend._lib.RSA_R_DIGEST_TOO_BIG_FOR_RSA_KEY
)
reason = "Digest too large for key size. Use a larger key."
raise ValueError(reason)
return backend._ffi.buffer(buf)[:]
def _rsa_sig_verify(backend, padding, algorithm, public_key, signature, data):
pkey_ctx = _rsa_sig_setup(
backend, padding, algorithm, public_key, data,
backend._lib.EVP_PKEY_verify_init
)
res = backend._lib.EVP_PKEY_verify(
pkey_ctx, signature, len(signature), data, len(data)
)
# The previous call can return negative numbers in the event of an
# error. This is not a signature failure but we need to fail if it
# occurs.
backend.openssl_assert(res >= 0)
if res == 0:
backend._consume_errors()
raise InvalidSignature
@utils.register_interface(AsymmetricSignatureContext)
class _RSASignatureContext(object):
def __init__(self, backend, private_key, padding, algorithm):
self._backend = backend
self._private_key = private_key
# We now call _rsa_sig_determine_padding in _rsa_sig_setup. However
# we need to make a pointless call to it here so we maintain the
# API of erroring on init with this context if the values are invalid.
_rsa_sig_determine_padding(backend, private_key, padding, algorithm)
self._padding = padding
self._algorithm = algorithm
self._hash_ctx = hashes.Hash(self._algorithm, self._backend)
def update(self, data):
self._hash_ctx.update(data)
def finalize(self):
return _rsa_sig_sign(
self._backend,
self._padding,
self._algorithm,
self._private_key,
self._hash_ctx.finalize()
)
@utils.register_interface(AsymmetricVerificationContext)
class _RSAVerificationContext(object):
def __init__(self, backend, public_key, signature, padding, algorithm):
self._backend = backend
self._public_key = public_key
self._signature = signature
self._padding = padding
# We now call _rsa_sig_determine_padding in _rsa_sig_setup. However
# we need to make a pointless call to it here so we maintain the
# API of erroring on init with this context if the values are invalid.
_rsa_sig_determine_padding(backend, public_key, padding, algorithm)
padding = padding
self._algorithm = algorithm
self._hash_ctx = hashes.Hash(self._algorithm, self._backend)
def update(self, data):
self._hash_ctx.update(data)
def verify(self):
return _rsa_sig_verify(
self._backend,
self._padding,
self._algorithm,
self._public_key,
self._signature,
self._hash_ctx.finalize()
)
@utils.register_interface(RSAPrivateKeyWithSerialization)
class _RSAPrivateKey(object):
def __init__(self, backend, rsa_cdata, evp_pkey):
self._backend = backend
self._rsa_cdata = rsa_cdata
self._evp_pkey = evp_pkey
n = self._backend._ffi.new("BIGNUM **")
self._backend._lib.RSA_get0_key(
self._rsa_cdata, n, self._backend._ffi.NULL,
self._backend._ffi.NULL
)
self._backend.openssl_assert(n[0] != self._backend._ffi.NULL)
self._key_size = self._backend._lib.BN_num_bits(n[0])
key_size = utils.read_only_property("_key_size")
def signer(self, padding, algorithm):
_warn_sign_verify_deprecated()
_check_not_prehashed(algorithm)
return _RSASignatureContext(self._backend, self, padding, algorithm)
def decrypt(self, ciphertext, padding):
key_size_bytes = int(math.ceil(self.key_size / 8.0))
if key_size_bytes != len(ciphertext):
raise ValueError("Ciphertext length must be equal to key size.")
return _enc_dec_rsa(self._backend, self, ciphertext, padding)
def public_key(self):
ctx = self._backend._lib.RSAPublicKey_dup(self._rsa_cdata)
self._backend.openssl_assert(ctx != self._backend._ffi.NULL)
ctx = self._backend._ffi.gc(ctx, self._backend._lib.RSA_free)
res = self._backend._lib.RSA_blinding_on(ctx, self._backend._ffi.NULL)
self._backend.openssl_assert(res == 1)
evp_pkey = self._backend._rsa_cdata_to_evp_pkey(ctx)
return _RSAPublicKey(self._backend, ctx, evp_pkey)
def private_numbers(self):
n = self._backend._ffi.new("BIGNUM **")
e = self._backend._ffi.new("BIGNUM **")
d = self._backend._ffi.new("BIGNUM **")
p = self._backend._ffi.new("BIGNUM **")
q = self._backend._ffi.new("BIGNUM **")
dmp1 = self._backend._ffi.new("BIGNUM **")
dmq1 = self._backend._ffi.new("BIGNUM **")
iqmp = self._backend._ffi.new("BIGNUM **")
self._backend._lib.RSA_get0_key(self._rsa_cdata, n, e, d)
self._backend.openssl_assert(n[0] != self._backend._ffi.NULL)
self._backend.openssl_assert(e[0] != self._backend._ffi.NULL)
self._backend.openssl_assert(d[0] != self._backend._ffi.NULL)
self._backend._lib.RSA_get0_factors(self._rsa_cdata, p, q)
self._backend.openssl_assert(p[0] != self._backend._ffi.NULL)
self._backend.openssl_assert(q[0] != self._backend._ffi.NULL)
self._backend._lib.RSA_get0_crt_params(
self._rsa_cdata, dmp1, dmq1, iqmp
)
self._backend.openssl_assert(dmp1[0] != self._backend._ffi.NULL)
self._backend.openssl_assert(dmq1[0] != self._backend._ffi.NULL)
self._backend.openssl_assert(iqmp[0] != self._backend._ffi.NULL)
return rsa.RSAPrivateNumbers(
p=self._backend._bn_to_int(p[0]),
q=self._backend._bn_to_int(q[0]),
d=self._backend._bn_to_int(d[0]),
dmp1=self._backend._bn_to_int(dmp1[0]),
dmq1=self._backend._bn_to_int(dmq1[0]),
iqmp=self._backend._bn_to_int(iqmp[0]),
public_numbers=rsa.RSAPublicNumbers(
e=self._backend._bn_to_int(e[0]),
n=self._backend._bn_to_int(n[0]),
)
)
def private_bytes(self, encoding, format, encryption_algorithm):
return self._backend._private_key_bytes(
encoding,
format,
encryption_algorithm,
self._evp_pkey,
self._rsa_cdata
)
def sign(self, data, padding, algorithm):
data, algorithm = _calculate_digest_and_algorithm(
self._backend, data, algorithm
)
return _rsa_sig_sign(self._backend, padding, algorithm, self, data)
@utils.register_interface(RSAPublicKeyWithSerialization)
class _RSAPublicKey(object):
def __init__(self, backend, rsa_cdata, evp_pkey):
self._backend = backend
self._rsa_cdata = rsa_cdata
self._evp_pkey = evp_pkey
n = self._backend._ffi.new("BIGNUM **")
self._backend._lib.RSA_get0_key(
self._rsa_cdata, n, self._backend._ffi.NULL,
self._backend._ffi.NULL
)
self._backend.openssl_assert(n[0] != self._backend._ffi.NULL)
self._key_size = self._backend._lib.BN_num_bits(n[0])
key_size = utils.read_only_property("_key_size")
def verifier(self, signature, padding, algorithm):
_warn_sign_verify_deprecated()
utils._check_bytes("signature", signature)
_check_not_prehashed(algorithm)
return _RSAVerificationContext(
self._backend, self, signature, padding, algorithm
)
def encrypt(self, plaintext, padding):
return _enc_dec_rsa(self._backend, self, plaintext, padding)
def public_numbers(self):
n = self._backend._ffi.new("BIGNUM **")
e = self._backend._ffi.new("BIGNUM **")
self._backend._lib.RSA_get0_key(
self._rsa_cdata, n, e, self._backend._ffi.NULL
)
self._backend.openssl_assert(n[0] != self._backend._ffi.NULL)
self._backend.openssl_assert(e[0] != self._backend._ffi.NULL)
return rsa.RSAPublicNumbers(
e=self._backend._bn_to_int(e[0]),
n=self._backend._bn_to_int(n[0]),
)
def public_bytes(self, encoding, format):
return self._backend._public_key_bytes(
encoding,
format,
self,
self._evp_pkey,
self._rsa_cdata
)
def verify(self, signature, data, padding, algorithm):
data, algorithm = _calculate_digest_and_algorithm(
self._backend, data, algorithm
)
return _rsa_sig_verify(
self._backend, padding, algorithm, self, signature, data
) | unknown | codeparrot/codeparrot-clean | ||
// Copyright 2023 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package main
import (
"internal/platform"
"testing"
)
// TestSupported tests that dist and the main tools agree on
// which build modes are supported for a given target. We do things
// this way because the dist tool needs to be buildable directly by
// the bootstrap compiler, and as such can't import internal packages.
func TestSupported(t *testing.T) {
defer func(a, o string) {
goarch = a
goos = o
}(goarch, goos)
var modes = []string{
// we assume that "exe" and "archive" always work
"pie",
"c-archive",
"c-shared",
"shared",
"plugin",
}
for _, a := range okgoarch {
goarch = a
for _, o := range okgoos {
if _, ok := cgoEnabled[o+"/"+a]; !ok {
continue
}
goos = o
for _, mode := range modes {
var dt tester
dist := dt.supportedBuildmode(mode)
std := platform.BuildModeSupported("gc", mode, o, a)
if dist != std {
t.Errorf("discrepancy for %s-%s %s: dist says %t, standard library says %t", o, a, mode, dist, std)
}
}
}
}
} | go | github | https://github.com/golang/go | src/cmd/dist/supported_test.go |
<?php
namespace Illuminate\View;
use ArrayIterator;
use Closure;
use Illuminate\Contracts\Support\DeferringDisplayableValue;
use Illuminate\Support\Enumerable;
use IteratorAggregate;
use Stringable;
use Traversable;
class InvokableComponentVariable implements DeferringDisplayableValue, IteratorAggregate, Stringable
{
/**
* The callable instance to resolve the variable value.
*
* @var \Closure
*/
protected $callable;
/**
* Create a new variable instance.
*
* @param \Closure $callable
*/
public function __construct(Closure $callable)
{
$this->callable = $callable;
}
/**
* Resolve the displayable value that the class is deferring.
*
* @return \Illuminate\Contracts\Support\Htmlable|string
*/
public function resolveDisplayableValue()
{
return $this->__invoke();
}
/**
* Get an iterator instance for the variable.
*
* @return \ArrayIterator
*/
public function getIterator(): Traversable
{
$result = $this->__invoke();
return new ArrayIterator($result instanceof Enumerable ? $result->all() : $result);
}
/**
* Dynamically proxy attribute access to the variable.
*
* @param string $key
* @return mixed
*/
public function __get($key)
{
return $this->__invoke()->{$key};
}
/**
* Dynamically proxy method access to the variable.
*
* @param string $method
* @param array $parameters
* @return mixed
*/
public function __call($method, $parameters)
{
return $this->__invoke()->{$method}(...$parameters);
}
/**
* Resolve the variable.
*
* @return mixed
*/
public function __invoke()
{
return call_user_func($this->callable);
}
/**
* Resolve the variable as a string.
*
* @return string
*/
public function __toString()
{
return (string) $this->__invoke();
}
} | php | github | https://github.com/laravel/framework | src/Illuminate/View/InvokableComponentVariable.php |
import cgi
import errno
import io
import mimetypes
import os
import posixpath
import re
import shutil
import stat
import sys
import tempfile
from os import path
import django
from django.conf import settings
from django.core.management.base import BaseCommand, CommandError
from django.core.management.utils import handle_extensions
from django.template import Context, Engine
from django.utils import archive, six
from django.utils.six.moves.urllib.request import urlretrieve
from django.utils.version import get_docs_version
_drive_re = re.compile('^([a-z]):', re.I)
_url_drive_re = re.compile('^([a-z])[:|]', re.I)
class TemplateCommand(BaseCommand):
"""
Copies either a Django application layout template or a Django project
layout template into the specified directory.
:param style: A color style object (see django.core.management.color).
:param app_or_project: The string 'app' or 'project'.
:param name: The name of the application or project.
:param directory: The directory to which the template should be copied.
:param options: The additional variables passed to project or app templates
"""
requires_system_checks = False
# The supported URL schemes
url_schemes = ['http', 'https', 'ftp']
# Can't perform any active locale changes during this command, because
# setting might not be available at all.
leave_locale_alone = True
# Rewrite the following suffixes when determining the target filename.
rewrite_template_suffixes = (
# Allow shipping invalid .py files without byte-compilation.
('.py-tpl', '.py'),
)
def add_arguments(self, parser):
parser.add_argument('name', help='Name of the application or project.')
parser.add_argument('directory', nargs='?', help='Optional destination directory')
parser.add_argument('--template', help='The path or URL to load the template from.')
parser.add_argument(
'--extension', '-e', dest='extensions',
action='append', default=['py'],
help='The file extension(s) to render (default: "py"). '
'Separate multiple extensions with commas, or use '
'-e multiple times.'
)
parser.add_argument(
'--name', '-n', dest='files',
action='append', default=[],
help='The file name(s) to render. Separate multiple extensions '
'with commas, or use -n multiple times.'
)
def handle(self, app_or_project, name, target=None, **options):
self.app_or_project = app_or_project
self.paths_to_remove = []
self.verbosity = options['verbosity']
self.validate_name(name, app_or_project)
# if some directory is given, make sure it's nicely expanded
if target is None:
top_dir = path.join(os.getcwd(), name)
try:
os.makedirs(top_dir)
except OSError as e:
if e.errno == errno.EEXIST:
message = "'%s' already exists" % top_dir
else:
message = e
raise CommandError(message)
else:
top_dir = os.path.abspath(path.expanduser(target))
if not os.path.exists(top_dir):
raise CommandError("Destination directory '%s' does not "
"exist, please create it first." % top_dir)
extensions = tuple(handle_extensions(options['extensions']))
extra_files = []
for file in options['files']:
extra_files.extend(map(lambda x: x.strip(), file.split(',')))
if self.verbosity >= 2:
self.stdout.write("Rendering %s template files with "
"extensions: %s\n" %
(app_or_project, ', '.join(extensions)))
self.stdout.write("Rendering %s template files with "
"filenames: %s\n" %
(app_or_project, ', '.join(extra_files)))
base_name = '%s_name' % app_or_project
base_subdir = '%s_template' % app_or_project
base_directory = '%s_directory' % app_or_project
camel_case_name = 'camel_case_%s_name' % app_or_project
camel_case_value = ''.join(x for x in name.title() if x != '_')
context = Context(dict(options, **{
base_name: name,
base_directory: top_dir,
camel_case_name: camel_case_value,
'docs_version': get_docs_version(),
'django_version': django.__version__,
'unicode_literals': '' if six.PY3 else '# -*- coding: utf-8 -*-\n'
'from __future__ import unicode_literals\n\n',
}), autoescape=False)
# Setup a stub settings environment for template rendering
if not settings.configured:
settings.configure()
django.setup()
template_dir = self.handle_template(options['template'],
base_subdir)
prefix_length = len(template_dir) + 1
for root, dirs, files in os.walk(template_dir):
path_rest = root[prefix_length:]
relative_dir = path_rest.replace(base_name, name)
if relative_dir:
target_dir = path.join(top_dir, relative_dir)
if not path.exists(target_dir):
os.mkdir(target_dir)
for dirname in dirs[:]:
if dirname.startswith('.') or dirname == '__pycache__':
dirs.remove(dirname)
for filename in files:
if filename.endswith(('.pyo', '.pyc', '.py.class')):
# Ignore some files as they cause various breakages.
continue
old_path = path.join(root, filename)
new_path = path.join(top_dir, relative_dir,
filename.replace(base_name, name))
for old_suffix, new_suffix in self.rewrite_template_suffixes:
if new_path.endswith(old_suffix):
new_path = new_path[:-len(old_suffix)] + new_suffix
break # Only rewrite once
if path.exists(new_path):
raise CommandError("%s already exists, overlaying a "
"project or app into an existing "
"directory won't replace conflicting "
"files" % new_path)
# Only render the Python files, as we don't want to
# accidentally render Django templates files
if new_path.endswith(extensions) or filename in extra_files:
with io.open(old_path, 'r', encoding='utf-8') as template_file:
content = template_file.read()
template = Engine().from_string(content)
content = template.render(context)
with io.open(new_path, 'w', encoding='utf-8') as new_file:
new_file.write(content)
else:
shutil.copyfile(old_path, new_path)
if self.verbosity >= 2:
self.stdout.write("Creating %s\n" % new_path)
try:
shutil.copymode(old_path, new_path)
self.make_writeable(new_path)
except OSError:
self.stderr.write(
"Notice: Couldn't set permission bits on %s. You're "
"probably using an uncommon filesystem setup. No "
"problem." % new_path, self.style.NOTICE)
if self.paths_to_remove:
if self.verbosity >= 2:
self.stdout.write("Cleaning up temporary files.\n")
for path_to_remove in self.paths_to_remove:
if path.isfile(path_to_remove):
os.remove(path_to_remove)
else:
shutil.rmtree(path_to_remove)
def handle_template(self, template, subdir):
"""
Determines where the app or project templates are.
Use django.__path__[0] as the default because we don't
know into which directory Django has been installed.
"""
if template is None:
return path.join(django.__path__[0], 'conf', subdir)
else:
if template.startswith('file://'):
template = template[7:]
expanded_template = path.expanduser(template)
expanded_template = path.normpath(expanded_template)
if path.isdir(expanded_template):
return expanded_template
if self.is_url(template):
# downloads the file and returns the path
absolute_path = self.download(template)
else:
absolute_path = path.abspath(expanded_template)
if path.exists(absolute_path):
return self.extract(absolute_path)
raise CommandError("couldn't handle %s template %s." %
(self.app_or_project, template))
def validate_name(self, name, app_or_project):
if name is None:
raise CommandError("you must provide %s %s name" % (
"an" if app_or_project == "app" else "a", app_or_project))
# If it's not a valid directory name.
if six.PY2:
if not re.search(r'^[_a-zA-Z]\w*$', name):
# Provide a smart error message, depending on the error.
if not re.search(r'^[_a-zA-Z]', name):
message = 'make sure the name begins with a letter or underscore'
else:
message = 'use only numbers, letters and underscores'
raise CommandError("%r is not a valid %s name. Please %s." %
(name, app_or_project, message))
else:
if not name.isidentifier():
raise CommandError(
"%r is not a valid %s name. Please make sure the name is "
"a valid identifier." % (name, app_or_project)
)
def download(self, url):
"""
Downloads the given URL and returns the file name.
"""
def cleanup_url(url):
tmp = url.rstrip('/')
filename = tmp.split('/')[-1]
if url.endswith('/'):
display_url = tmp + '/'
else:
display_url = url
return filename, display_url
prefix = 'django_%s_template_' % self.app_or_project
tempdir = tempfile.mkdtemp(prefix=prefix, suffix='_download')
self.paths_to_remove.append(tempdir)
filename, display_url = cleanup_url(url)
if self.verbosity >= 2:
self.stdout.write("Downloading %s\n" % display_url)
try:
the_path, info = urlretrieve(url, path.join(tempdir, filename))
except IOError as e:
raise CommandError("couldn't download URL %s to %s: %s" %
(url, filename, e))
used_name = the_path.split('/')[-1]
# Trying to get better name from response headers
content_disposition = info.get('content-disposition')
if content_disposition:
_, params = cgi.parse_header(content_disposition)
guessed_filename = params.get('filename') or used_name
else:
guessed_filename = used_name
# Falling back to content type guessing
ext = self.splitext(guessed_filename)[1]
content_type = info.get('content-type')
if not ext and content_type:
ext = mimetypes.guess_extension(content_type)
if ext:
guessed_filename += ext
# Move the temporary file to a filename that has better
# chances of being recognized by the archive utils
if used_name != guessed_filename:
guessed_path = path.join(tempdir, guessed_filename)
shutil.move(the_path, guessed_path)
return guessed_path
# Giving up
return the_path
def splitext(self, the_path):
"""
Like os.path.splitext, but takes off .tar, too
"""
base, ext = posixpath.splitext(the_path)
if base.lower().endswith('.tar'):
ext = base[-4:] + ext
base = base[:-4]
return base, ext
def extract(self, filename):
"""
Extracts the given file to a temporarily and returns
the path of the directory with the extracted content.
"""
prefix = 'django_%s_template_' % self.app_or_project
tempdir = tempfile.mkdtemp(prefix=prefix, suffix='_extract')
self.paths_to_remove.append(tempdir)
if self.verbosity >= 2:
self.stdout.write("Extracting %s\n" % filename)
try:
archive.extract(filename, tempdir)
return tempdir
except (archive.ArchiveException, IOError) as e:
raise CommandError("couldn't extract file %s to %s: %s" %
(filename, tempdir, e))
def is_url(self, template):
"""
Returns True if the name looks like a URL
"""
if ':' not in template:
return False
scheme = template.split(':', 1)[0].lower()
return scheme in self.url_schemes
def make_writeable(self, filename):
"""
Make sure that the file is writeable.
Useful if our source is read-only.
"""
if sys.platform.startswith('java'):
# On Jython there is no os.access()
return
if not os.access(filename, os.W_OK):
st = os.stat(filename)
new_permissions = stat.S_IMODE(st.st_mode) | stat.S_IWUSR
os.chmod(filename, new_permissions) | unknown | codeparrot/codeparrot-clean | ||
import re
from .base import FIELD_TYPE
from django.utils.datastructures import OrderedSet
from django.db.backends import BaseDatabaseIntrospection, FieldInfo
from django.utils.encoding import force_text
foreign_key_re = re.compile(r"\sCONSTRAINT `[^`]*` FOREIGN KEY \(`([^`]*)`\) REFERENCES `([^`]*)` \(`([^`]*)`\)")
class DatabaseIntrospection(BaseDatabaseIntrospection):
data_types_reverse = {
FIELD_TYPE.BLOB: 'TextField',
FIELD_TYPE.CHAR: 'CharField',
FIELD_TYPE.DECIMAL: 'DecimalField',
FIELD_TYPE.NEWDECIMAL: 'DecimalField',
FIELD_TYPE.DATE: 'DateField',
FIELD_TYPE.DATETIME: 'DateTimeField',
FIELD_TYPE.DOUBLE: 'FloatField',
FIELD_TYPE.FLOAT: 'FloatField',
FIELD_TYPE.INT24: 'IntegerField',
FIELD_TYPE.LONG: 'IntegerField',
FIELD_TYPE.LONGLONG: 'BigIntegerField',
FIELD_TYPE.SHORT: 'IntegerField',
FIELD_TYPE.STRING: 'CharField',
FIELD_TYPE.TIME: 'TimeField',
FIELD_TYPE.TIMESTAMP: 'DateTimeField',
FIELD_TYPE.TINY: 'IntegerField',
FIELD_TYPE.TINY_BLOB: 'TextField',
FIELD_TYPE.MEDIUM_BLOB: 'TextField',
FIELD_TYPE.LONG_BLOB: 'TextField',
FIELD_TYPE.VAR_STRING: 'CharField',
}
def get_table_list(self, cursor):
"Returns a list of table names in the current database."
cursor.execute("SHOW TABLES")
return [row[0] for row in cursor.fetchall()]
def get_table_description(self, cursor, table_name):
"""
Returns a description of the table, with the DB-API cursor.description interface."
"""
# varchar length returned by cursor.description is an internal length,
# not visible length (#5725), use information_schema database to fix this
cursor.execute("""
SELECT column_name, character_maximum_length FROM information_schema.columns
WHERE table_name = %s AND table_schema = DATABASE()
AND character_maximum_length IS NOT NULL""", [table_name])
length_map = dict(cursor.fetchall())
# Also getting precision and scale from information_schema (see #5014)
cursor.execute("""
SELECT column_name, numeric_precision, numeric_scale FROM information_schema.columns
WHERE table_name = %s AND table_schema = DATABASE()
AND data_type='decimal'""", [table_name])
numeric_map = dict((line[0], tuple(int(n) for n in line[1:])) for line in cursor.fetchall())
cursor.execute("SELECT * FROM %s LIMIT 1" % self.connection.ops.quote_name(table_name))
return [FieldInfo(*((force_text(line[0]),)
+ line[1:3]
+ (length_map.get(line[0], line[3]),)
+ numeric_map.get(line[0], line[4:6])
+ (line[6],)))
for line in cursor.description]
def _name_to_index(self, cursor, table_name):
"""
Returns a dictionary of {field_name: field_index} for the given table.
Indexes are 0-based.
"""
return dict((d[0], i) for i, d in enumerate(self.get_table_description(cursor, table_name)))
def get_relations(self, cursor, table_name):
"""
Returns a dictionary of {field_index: (field_index_other_table, other_table)}
representing all relationships to the given table. Indexes are 0-based.
"""
my_field_dict = self._name_to_index(cursor, table_name)
constraints = self.get_key_columns(cursor, table_name)
relations = {}
for my_fieldname, other_table, other_field in constraints:
other_field_index = self._name_to_index(cursor, other_table)[other_field]
my_field_index = my_field_dict[my_fieldname]
relations[my_field_index] = (other_field_index, other_table)
return relations
def get_key_columns(self, cursor, table_name):
"""
Returns a list of (column_name, referenced_table_name, referenced_column_name) for all
key columns in given table.
"""
key_columns = []
cursor.execute("""
SELECT column_name, referenced_table_name, referenced_column_name
FROM information_schema.key_column_usage
WHERE table_name = %s
AND table_schema = DATABASE()
AND referenced_table_name IS NOT NULL
AND referenced_column_name IS NOT NULL""", [table_name])
key_columns.extend(cursor.fetchall())
return key_columns
def get_indexes(self, cursor, table_name):
cursor.execute("SHOW INDEX FROM %s" % self.connection.ops.quote_name(table_name))
# Do a two-pass search for indexes: on first pass check which indexes
# are multicolumn, on second pass check which single-column indexes
# are present.
rows = list(cursor.fetchall())
multicol_indexes = set()
for row in rows:
if row[3] > 1:
multicol_indexes.add(row[2])
indexes = {}
for row in rows:
if row[2] in multicol_indexes:
continue
if row[4] not in indexes:
indexes[row[4]] = {'primary_key': False, 'unique': False}
# It's possible to have the unique and PK constraints in separate indexes.
if row[2] == 'PRIMARY':
indexes[row[4]]['primary_key'] = True
if not bool(row[1]):
indexes[row[4]]['unique'] = True
return indexes
def get_constraints(self, cursor, table_name):
"""
Retrieves any constraints or keys (unique, pk, fk, check, index) across one or more columns.
"""
constraints = {}
# Get the actual constraint names and columns
name_query = """
SELECT kc.`constraint_name`, kc.`column_name`,
kc.`referenced_table_name`, kc.`referenced_column_name`
FROM information_schema.key_column_usage AS kc
WHERE
kc.table_schema = %s AND
kc.table_name = %s
"""
cursor.execute(name_query, [self.connection.settings_dict['NAME'], table_name])
for constraint, column, ref_table, ref_column in cursor.fetchall():
if constraint not in constraints:
constraints[constraint] = {
'columns': OrderedSet(),
'primary_key': False,
'unique': False,
'index': False,
'check': False,
'foreign_key': (ref_table, ref_column) if ref_column else None,
}
constraints[constraint]['columns'].add(column)
# Now get the constraint types
type_query = """
SELECT c.constraint_name, c.constraint_type
FROM information_schema.table_constraints AS c
WHERE
c.table_schema = %s AND
c.table_name = %s
"""
cursor.execute(type_query, [self.connection.settings_dict['NAME'], table_name])
for constraint, kind in cursor.fetchall():
if kind.lower() == "primary key":
constraints[constraint]['primary_key'] = True
constraints[constraint]['unique'] = True
elif kind.lower() == "unique":
constraints[constraint]['unique'] = True
# Now add in the indexes
cursor.execute("SHOW INDEX FROM %s" % self.connection.ops.quote_name(table_name))
for table, non_unique, index, colseq, column in [x[:5] for x in cursor.fetchall()]:
if index not in constraints:
constraints[index] = {
'columns': OrderedSet(),
'primary_key': False,
'unique': False,
'index': True,
'check': False,
'foreign_key': None,
}
constraints[index]['index'] = True
constraints[index]['columns'].add(column)
# Convert the sorted sets to lists
for constraint in constraints.values():
constraint['columns'] = list(constraint['columns'])
return constraints | unknown | codeparrot/codeparrot-clean | ||
# Copyright (C) 2013 Nippon Telegraph and Telephone Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import struct
from ryu.base import app_manager
from ryu.controller.handler import MAIN_DISPATCHER
from ryu.controller.handler import set_ev_cls
from ryu.ofproto import ofproto_v1_0
from ryu.lib import addrconv
from ryu.lib import igmplib
from ryu.lib.dpid import str_to_dpid
class SimpleSwitchIgmp(app_manager.RyuApp):
OFP_VERSIONS = [ofproto_v1_0.OFP_VERSION]
_CONTEXTS = {'igmplib': igmplib.IgmpLib}
def __init__(self, *args, **kwargs):
super(SimpleSwitchIgmp, self).__init__(*args, **kwargs)
self.mac_to_port = {}
self._snoop = kwargs['igmplib']
# if you want a switch to operate as a querier,
# set up as follows:
self._snoop.set_querier_mode(
dpid=str_to_dpid('0000000000000001'), server_port=2)
# dpid the datapath id that will operate as a querier.
# server_port a port number which connect to the multicast
# server.
#
# NOTE: you can set up only the one querier.
# when you called this method several times,
# only the last one becomes effective.
def add_flow(self, datapath, in_port, dst, actions):
ofproto = datapath.ofproto
parser = datapath.ofproto_parser
match = parser.OFPMatch(in_port=in_port,
dl_dst=addrconv.mac.text_to_bin(dst))
mod = parser.OFPFlowMod(
datapath=datapath, match=match, cookie=0,
command=ofproto.OFPFC_ADD, actions=actions)
datapath.send_msg(mod)
@set_ev_cls(igmplib.EventPacketIn, MAIN_DISPATCHER)
def _packet_in_handler(self, ev):
msg = ev.msg
datapath = msg.datapath
ofproto = datapath.ofproto
(dst_, src_, _eth_type) = struct.unpack_from(
'!6s6sH', buffer(msg.data), 0)
src = addrconv.mac.bin_to_text(src_)
dst = addrconv.mac.bin_to_text(dst_)
dpid = datapath.id
self.mac_to_port.setdefault(dpid, {})
self.logger.info("packet in %s %s %s %s",
dpid, src, dst, msg.in_port)
# learn a mac address to avoid FLOOD next time.
self.mac_to_port[dpid][src] = msg.in_port
if dst in self.mac_to_port[dpid]:
out_port = self.mac_to_port[dpid][dst]
else:
out_port = ofproto.OFPP_FLOOD
actions = [datapath.ofproto_parser.OFPActionOutput(out_port)]
# install a flow to avoid packet_in next time
if out_port != ofproto.OFPP_FLOOD:
self.add_flow(datapath, msg.in_port, dst, actions)
out = datapath.ofproto_parser.OFPPacketOut(
datapath=datapath, buffer_id=msg.buffer_id, in_port=msg.in_port,
actions=actions)
datapath.send_msg(out)
@set_ev_cls(igmplib.EventMulticastGroupStateChanged,
MAIN_DISPATCHER)
def _status_changed(self, ev):
msg = {
igmplib.MG_GROUP_ADDED: 'Multicast Group Added',
igmplib.MG_MEMBER_CHANGED: 'Multicast Group Member Changed',
igmplib.MG_GROUP_REMOVED: 'Multicast Group Removed',
}
self.logger.info("%s: [%s] querier:[%s] hosts:%s",
msg.get(ev.reason), ev.address, ev.src,
ev.dsts) | unknown | codeparrot/codeparrot-clean | ||
import sys
from django.conf import settings
from django.core.signals import got_request_exception
from django.http import HttpResponse
from django.template.response import TemplateResponse
from django.template import Template
from django.test import TestCase
class TestException(Exception):
pass
# A middleware base class that tracks which methods have been called
class TestMiddleware(object):
def __init__(self):
self.process_request_called = False
self.process_view_called = False
self.process_response_called = False
self.process_template_response_called = False
self.process_exception_called = False
def process_request(self, request):
self.process_request_called = True
def process_view(self, request, view_func, view_args, view_kwargs):
self.process_view_called = True
def process_template_response(self, request, response):
self.process_template_response_called = True
return response
def process_response(self, request, response):
self.process_response_called = True
return response
def process_exception(self, request, exception):
self.process_exception_called = True
# Middleware examples that do the right thing
class RequestMiddleware(TestMiddleware):
def process_request(self, request):
super(RequestMiddleware, self).process_request(request)
return HttpResponse('Request Middleware')
class ViewMiddleware(TestMiddleware):
def process_view(self, request, view_func, view_args, view_kwargs):
super(ViewMiddleware, self).process_view(request, view_func, view_args, view_kwargs)
return HttpResponse('View Middleware')
class ResponseMiddleware(TestMiddleware):
def process_response(self, request, response):
super(ResponseMiddleware, self).process_response(request, response)
return HttpResponse('Response Middleware')
class TemplateResponseMiddleware(TestMiddleware):
def process_template_response(self, request, response):
super(TemplateResponseMiddleware, self).process_template_response(request, response)
return TemplateResponse(request, Template('Template Response Middleware'))
class ExceptionMiddleware(TestMiddleware):
def process_exception(self, request, exception):
super(ExceptionMiddleware, self).process_exception(request, exception)
return HttpResponse('Exception Middleware')
# Sample middlewares that raise exceptions
class BadRequestMiddleware(TestMiddleware):
def process_request(self, request):
super(BadRequestMiddleware, self).process_request(request)
raise TestException('Test Request Exception')
class BadViewMiddleware(TestMiddleware):
def process_view(self, request, view_func, view_args, view_kwargs):
super(BadViewMiddleware, self).process_view(request, view_func, view_args, view_kwargs)
raise TestException('Test View Exception')
class BadTemplateResponseMiddleware(TestMiddleware):
def process_template_response(self, request, response):
super(BadTemplateResponseMiddleware, self).process_template_response(request, response)
raise TestException('Test Template Response Exception')
class BadResponseMiddleware(TestMiddleware):
def process_response(self, request, response):
super(BadResponseMiddleware, self).process_response(request, response)
raise TestException('Test Response Exception')
class BadExceptionMiddleware(TestMiddleware):
def process_exception(self, request, exception):
super(BadExceptionMiddleware, self).process_exception(request, exception)
raise TestException('Test Exception Exception')
class BaseMiddlewareExceptionTest(TestCase):
def setUp(self):
self.exceptions = []
got_request_exception.connect(self._on_request_exception)
self.client.handler.load_middleware()
def tearDown(self):
got_request_exception.disconnect(self._on_request_exception)
self.exceptions = []
def _on_request_exception(self, sender, request, **kwargs):
self.exceptions.append(sys.exc_info())
def _add_middleware(self, middleware):
self.client.handler._request_middleware.insert(0, middleware.process_request)
self.client.handler._view_middleware.insert(0, middleware.process_view)
self.client.handler._template_response_middleware.append(middleware.process_template_response)
self.client.handler._response_middleware.append(middleware.process_response)
self.client.handler._exception_middleware.append(middleware.process_exception)
def assert_exceptions_handled(self, url, errors, extra_error=None):
try:
response = self.client.get(url)
except TestException, e:
# Test client intentionally re-raises any exceptions being raised
# during request handling. Hence actual testing that exception was
# properly handled is done by relying on got_request_exception
# signal being sent.
pass
except Exception, e:
if type(extra_error) != type(e):
self.fail("Unexpected exception: %s" % e)
self.assertEqual(len(self.exceptions), len(errors))
for i, error in enumerate(errors):
exception, value, tb = self.exceptions[i]
self.assertEqual(value.args, (error, ))
def assert_middleware_usage(self, middleware, request, view, template_response, response, exception):
self.assertEqual(middleware.process_request_called, request)
self.assertEqual(middleware.process_view_called, view)
self.assertEqual(middleware.process_template_response_called, template_response)
self.assertEqual(middleware.process_response_called, response)
self.assertEqual(middleware.process_exception_called, exception)
class MiddlewareTests(BaseMiddlewareExceptionTest):
def test_process_request_middleware(self):
pre_middleware = TestMiddleware()
middleware = RequestMiddleware()
post_middleware = TestMiddleware()
self._add_middleware(post_middleware)
self._add_middleware(middleware)
self._add_middleware(pre_middleware)
self.assert_exceptions_handled('/middleware_exceptions/view/', [])
# Check that the right middleware methods have been invoked
self.assert_middleware_usage(pre_middleware, True, False, False, True, False)
self.assert_middleware_usage(middleware, True, False, False, True, False)
self.assert_middleware_usage(post_middleware, False, False, False, True, False)
def test_process_view_middleware(self):
pre_middleware = TestMiddleware()
middleware = ViewMiddleware()
post_middleware = TestMiddleware()
self._add_middleware(post_middleware)
self._add_middleware(middleware)
self._add_middleware(pre_middleware)
self.assert_exceptions_handled('/middleware_exceptions/view/', [])
# Check that the right middleware methods have been invoked
self.assert_middleware_usage(pre_middleware, True, True, False, True, False)
self.assert_middleware_usage(middleware, True, True, False, True, False)
self.assert_middleware_usage(post_middleware, True, False, False, True, False)
def test_process_response_middleware(self):
pre_middleware = TestMiddleware()
middleware = ResponseMiddleware()
post_middleware = TestMiddleware()
self._add_middleware(post_middleware)
self._add_middleware(middleware)
self._add_middleware(pre_middleware)
self.assert_exceptions_handled('/middleware_exceptions/view/', [])
# Check that the right middleware methods have been invoked
self.assert_middleware_usage(pre_middleware, True, True, False, True, False)
self.assert_middleware_usage(middleware, True, True, False, True, False)
self.assert_middleware_usage(post_middleware, True, True, False, True, False)
def test_process_template_response_middleware(self):
pre_middleware = TestMiddleware()
middleware = TemplateResponseMiddleware()
post_middleware = TestMiddleware()
self._add_middleware(post_middleware)
self._add_middleware(middleware)
self._add_middleware(pre_middleware)
self.assert_exceptions_handled('/middleware_exceptions/template_response/', [])
# Check that the right middleware methods have been invoked
self.assert_middleware_usage(pre_middleware, True, True, True, True, False)
self.assert_middleware_usage(middleware, True, True, True, True, False)
self.assert_middleware_usage(post_middleware, True, True, True, True, False)
def test_process_exception_middleware(self):
pre_middleware = TestMiddleware()
middleware = ExceptionMiddleware()
post_middleware = TestMiddleware()
self._add_middleware(post_middleware)
self._add_middleware(middleware)
self._add_middleware(pre_middleware)
self.assert_exceptions_handled('/middleware_exceptions/view/', [])
# Check that the right middleware methods have been invoked
self.assert_middleware_usage(pre_middleware, True, True, False, True, False)
self.assert_middleware_usage(middleware, True, True, False, True, False)
self.assert_middleware_usage(post_middleware, True, True, False, True, False)
def test_process_request_middleware_not_found(self):
pre_middleware = TestMiddleware()
middleware = RequestMiddleware()
post_middleware = TestMiddleware()
self._add_middleware(post_middleware)
self._add_middleware(middleware)
self._add_middleware(pre_middleware)
self.assert_exceptions_handled('/middleware_exceptions/not_found/', [])
# Check that the right middleware methods have been invoked
self.assert_middleware_usage(pre_middleware, True, False, False, True, False)
self.assert_middleware_usage(middleware, True, False, False, True, False)
self.assert_middleware_usage(post_middleware, False, False, False, True, False)
def test_process_view_middleware_not_found(self):
pre_middleware = TestMiddleware()
middleware = ViewMiddleware()
post_middleware = TestMiddleware()
self._add_middleware(post_middleware)
self._add_middleware(middleware)
self._add_middleware(pre_middleware)
self.assert_exceptions_handled('/middleware_exceptions/not_found/', [])
# Check that the right middleware methods have been invoked
self.assert_middleware_usage(pre_middleware, True, True, False, True, False)
self.assert_middleware_usage(middleware, True, True, False, True, False)
self.assert_middleware_usage(post_middleware, True, False, False, True, False)
def test_process_template_response_middleware_not_found(self):
pre_middleware = TestMiddleware()
middleware = TemplateResponseMiddleware()
post_middleware = TestMiddleware()
self._add_middleware(post_middleware)
self._add_middleware(middleware)
self._add_middleware(pre_middleware)
self.assert_exceptions_handled('/middleware_exceptions/not_found/', [])
# Check that the right middleware methods have been invoked
self.assert_middleware_usage(pre_middleware, True, True, False, True, True)
self.assert_middleware_usage(middleware, True, True, False, True, True)
self.assert_middleware_usage(post_middleware, True, True, False, True, True)
def test_process_response_middleware_not_found(self):
pre_middleware = TestMiddleware()
middleware = ResponseMiddleware()
post_middleware = TestMiddleware()
self._add_middleware(post_middleware)
self._add_middleware(middleware)
self._add_middleware(pre_middleware)
self.assert_exceptions_handled('/middleware_exceptions/not_found/', [])
# Check that the right middleware methods have been invoked
self.assert_middleware_usage(pre_middleware, True, True, False, True, True)
self.assert_middleware_usage(middleware, True, True, False, True, True)
self.assert_middleware_usage(post_middleware, True, True, False, True, True)
def test_process_exception_middleware_not_found(self):
pre_middleware = TestMiddleware()
middleware = ExceptionMiddleware()
post_middleware = TestMiddleware()
self._add_middleware(post_middleware)
self._add_middleware(middleware)
self._add_middleware(pre_middleware)
self.assert_exceptions_handled('/middleware_exceptions/not_found/', [])
# Check that the right middleware methods have been invoked
self.assert_middleware_usage(pre_middleware, True, True, False, True, False)
self.assert_middleware_usage(middleware, True, True, False, True, True)
self.assert_middleware_usage(post_middleware, True, True, False, True, True)
def test_process_request_middleware_exception(self):
pre_middleware = TestMiddleware()
middleware = RequestMiddleware()
post_middleware = TestMiddleware()
self._add_middleware(post_middleware)
self._add_middleware(middleware)
self._add_middleware(pre_middleware)
self.assert_exceptions_handled('/middleware_exceptions/error/', [])
# Check that the right middleware methods have been invoked
self.assert_middleware_usage(pre_middleware, True, False, False, True, False)
self.assert_middleware_usage(middleware, True, False, False, True, False)
self.assert_middleware_usage(post_middleware, False, False, False, True, False)
def test_process_view_middleware_exception(self):
pre_middleware = TestMiddleware()
middleware = ViewMiddleware()
post_middleware = TestMiddleware()
self._add_middleware(post_middleware)
self._add_middleware(middleware)
self._add_middleware(pre_middleware)
self.assert_exceptions_handled('/middleware_exceptions/error/', [])
# Check that the right middleware methods have been invoked
self.assert_middleware_usage(pre_middleware, True, True, False, True, False)
self.assert_middleware_usage(middleware, True, True, False, True, False)
self.assert_middleware_usage(post_middleware, True, False, False, True, False)
def test_process_response_middleware_exception(self):
pre_middleware = TestMiddleware()
middleware = ResponseMiddleware()
post_middleware = TestMiddleware()
self._add_middleware(post_middleware)
self._add_middleware(middleware)
self._add_middleware(pre_middleware)
self.assert_exceptions_handled('/middleware_exceptions/error/', ['Error in view'], Exception())
# Check that the right middleware methods have been invoked
self.assert_middleware_usage(pre_middleware, True, True, False, True, True)
self.assert_middleware_usage(middleware, True, True, False, True, True)
self.assert_middleware_usage(post_middleware, True, True, False, True, True)
def test_process_exception_middleware_exception(self):
pre_middleware = TestMiddleware()
middleware = ExceptionMiddleware()
post_middleware = TestMiddleware()
self._add_middleware(post_middleware)
self._add_middleware(middleware)
self._add_middleware(pre_middleware)
self.assert_exceptions_handled('/middleware_exceptions/error/', [])
# Check that the right middleware methods have been invoked
self.assert_middleware_usage(pre_middleware, True, True, False, True, False)
self.assert_middleware_usage(middleware, True, True, False, True, True)
self.assert_middleware_usage(post_middleware, True, True, False, True, True)
def test_process_request_middleware_null_view(self):
pre_middleware = TestMiddleware()
middleware = RequestMiddleware()
post_middleware = TestMiddleware()
self._add_middleware(post_middleware)
self._add_middleware(middleware)
self._add_middleware(pre_middleware)
self.assert_exceptions_handled('/middleware_exceptions/null_view/', [])
# Check that the right middleware methods have been invoked
self.assert_middleware_usage(pre_middleware, True, False, False, True, False)
self.assert_middleware_usage(middleware, True, False, False, True, False)
self.assert_middleware_usage(post_middleware, False, False, False, True, False)
def test_process_view_middleware_null_view(self):
pre_middleware = TestMiddleware()
middleware = ViewMiddleware()
post_middleware = TestMiddleware()
self._add_middleware(post_middleware)
self._add_middleware(middleware)
self._add_middleware(pre_middleware)
self.assert_exceptions_handled('/middleware_exceptions/null_view/', [])
# Check that the right middleware methods have been invoked
self.assert_middleware_usage(pre_middleware, True, True, False, True, False)
self.assert_middleware_usage(middleware, True, True, False, True, False)
self.assert_middleware_usage(post_middleware, True, False, False, True, False)
def test_process_response_middleware_null_view(self):
pre_middleware = TestMiddleware()
middleware = ResponseMiddleware()
post_middleware = TestMiddleware()
self._add_middleware(post_middleware)
self._add_middleware(middleware)
self._add_middleware(pre_middleware)
self.assert_exceptions_handled('/middleware_exceptions/null_view/', [
"The view regressiontests.middleware_exceptions.views.null_view didn't return an HttpResponse object.",
],
ValueError())
# Check that the right middleware methods have been invoked
self.assert_middleware_usage(pre_middleware, True, True, False, True, False)
self.assert_middleware_usage(middleware, True, True, False, True, False)
self.assert_middleware_usage(post_middleware, True, True, False, True, False)
def test_process_exception_middleware_null_view(self):
pre_middleware = TestMiddleware()
middleware = ExceptionMiddleware()
post_middleware = TestMiddleware()
self._add_middleware(post_middleware)
self._add_middleware(middleware)
self._add_middleware(pre_middleware)
self.assert_exceptions_handled('/middleware_exceptions/null_view/', [
"The view regressiontests.middleware_exceptions.views.null_view didn't return an HttpResponse object."
],
ValueError())
# Check that the right middleware methods have been invoked
self.assert_middleware_usage(pre_middleware, True, True, False, True, False)
self.assert_middleware_usage(middleware, True, True, False, True, False)
self.assert_middleware_usage(post_middleware, True, True, False, True, False)
def test_process_request_middleware_permission_denied(self):
pre_middleware = TestMiddleware()
middleware = RequestMiddleware()
post_middleware = TestMiddleware()
self._add_middleware(post_middleware)
self._add_middleware(middleware)
self._add_middleware(pre_middleware)
self.assert_exceptions_handled('/middleware_exceptions/permission_denied/', [])
# Check that the right middleware methods have been invoked
self.assert_middleware_usage(pre_middleware, True, False, False, True, False)
self.assert_middleware_usage(middleware, True, False, False, True, False)
self.assert_middleware_usage(post_middleware, False, False, False, True, False)
def test_process_view_middleware_permission_denied(self):
pre_middleware = TestMiddleware()
middleware = ViewMiddleware()
post_middleware = TestMiddleware()
self._add_middleware(post_middleware)
self._add_middleware(middleware)
self._add_middleware(pre_middleware)
self.assert_exceptions_handled('/middleware_exceptions/permission_denied/', [])
# Check that the right middleware methods have been invoked
self.assert_middleware_usage(pre_middleware, True, True, False, True, False)
self.assert_middleware_usage(middleware, True, True, False, True, False)
self.assert_middleware_usage(post_middleware, True, False, False, True, False)
def test_process_response_middleware_permission_denied(self):
pre_middleware = TestMiddleware()
middleware = ResponseMiddleware()
post_middleware = TestMiddleware()
self._add_middleware(post_middleware)
self._add_middleware(middleware)
self._add_middleware(pre_middleware)
self.assert_exceptions_handled('/middleware_exceptions/permission_denied/', [])
# Check that the right middleware methods have been invoked
self.assert_middleware_usage(pre_middleware, True, True, False, True, True)
self.assert_middleware_usage(middleware, True, True, False, True, True)
self.assert_middleware_usage(post_middleware, True, True, False, True, True)
def test_process_exception_middleware_permission_denied(self):
pre_middleware = TestMiddleware()
middleware = ExceptionMiddleware()
post_middleware = TestMiddleware()
self._add_middleware(post_middleware)
self._add_middleware(middleware)
self._add_middleware(pre_middleware)
self.assert_exceptions_handled('/middleware_exceptions/permission_denied/', [])
# Check that the right middleware methods have been invoked
self.assert_middleware_usage(pre_middleware, True, True, False, True, False)
self.assert_middleware_usage(middleware, True, True, False, True, True)
self.assert_middleware_usage(post_middleware, True, True, False, True, True)
def test_process_template_response_error(self):
middleware = TestMiddleware()
self._add_middleware(middleware)
self.assert_exceptions_handled('/middleware_exceptions/template_response_error/', [])
# Check that the right middleware methods have been invoked
self.assert_middleware_usage(middleware, True, True, True, True, False)
class BadMiddlewareTests(BaseMiddlewareExceptionTest):
def test_process_request_bad_middleware(self):
pre_middleware = TestMiddleware()
bad_middleware = BadRequestMiddleware()
post_middleware = TestMiddleware()
self._add_middleware(post_middleware)
self._add_middleware(bad_middleware)
self._add_middleware(pre_middleware)
self.assert_exceptions_handled('/middleware_exceptions/view/', ['Test Request Exception'])
# Check that the right middleware methods have been invoked
self.assert_middleware_usage(pre_middleware, True, False, False, True, False)
self.assert_middleware_usage(bad_middleware, True, False, False, True, False)
self.assert_middleware_usage(post_middleware, False, False, False, True, False)
def test_process_view_bad_middleware(self):
pre_middleware = TestMiddleware()
bad_middleware = BadViewMiddleware()
post_middleware = TestMiddleware()
self._add_middleware(post_middleware)
self._add_middleware(bad_middleware)
self._add_middleware(pre_middleware)
self.assert_exceptions_handled('/middleware_exceptions/view/', ['Test View Exception'])
# Check that the right middleware methods have been invoked
self.assert_middleware_usage(pre_middleware, True, True, False, True, False)
self.assert_middleware_usage(bad_middleware, True, True, False, True, False)
self.assert_middleware_usage(post_middleware, True, False, False, True, False)
def test_process_template_response_bad_middleware(self):
pre_middleware = TestMiddleware()
bad_middleware = BadTemplateResponseMiddleware()
post_middleware = TestMiddleware()
self._add_middleware(post_middleware)
self._add_middleware(bad_middleware)
self._add_middleware(pre_middleware)
self.assert_exceptions_handled('/middleware_exceptions/template_response/', ['Test Template Response Exception'])
# Check that the right middleware methods have been invoked
self.assert_middleware_usage(pre_middleware, True, True, False, True, False)
self.assert_middleware_usage(bad_middleware, True, True, True, True, False)
self.assert_middleware_usage(post_middleware, True, True, True, True, False)
def test_process_response_bad_middleware(self):
pre_middleware = TestMiddleware()
bad_middleware = BadResponseMiddleware()
post_middleware = TestMiddleware()
self._add_middleware(post_middleware)
self._add_middleware(bad_middleware)
self._add_middleware(pre_middleware)
self.assert_exceptions_handled('/middleware_exceptions/view/', ['Test Response Exception'])
# Check that the right middleware methods have been invoked
self.assert_middleware_usage(pre_middleware, True, True, False, False, False)
self.assert_middleware_usage(bad_middleware, True, True, False, True, False)
self.assert_middleware_usage(post_middleware, True, True, False, True, False)
def test_process_exception_bad_middleware(self):
pre_middleware = TestMiddleware()
bad_middleware = BadExceptionMiddleware()
post_middleware = TestMiddleware()
self._add_middleware(post_middleware)
self._add_middleware(bad_middleware)
self._add_middleware(pre_middleware)
self.assert_exceptions_handled('/middleware_exceptions/view/', [])
# Check that the right middleware methods have been invoked
self.assert_middleware_usage(pre_middleware, True, True, False, True, False)
self.assert_middleware_usage(bad_middleware, True, True, False, True, False)
self.assert_middleware_usage(post_middleware, True, True, False, True, False)
def test_process_request_bad_middleware_not_found(self):
pre_middleware = TestMiddleware()
bad_middleware = BadRequestMiddleware()
post_middleware = TestMiddleware()
self._add_middleware(post_middleware)
self._add_middleware(bad_middleware)
self._add_middleware(pre_middleware)
self.assert_exceptions_handled('/middleware_exceptions/not_found/', ['Test Request Exception'])
# Check that the right middleware methods have been invoked
self.assert_middleware_usage(pre_middleware, True, False, False, True, False)
self.assert_middleware_usage(bad_middleware, True, False, False, True, False)
self.assert_middleware_usage(post_middleware, False, False, False, True, False)
def test_process_view_bad_middleware_not_found(self):
pre_middleware = TestMiddleware()
bad_middleware = BadViewMiddleware()
post_middleware = TestMiddleware()
self._add_middleware(post_middleware)
self._add_middleware(bad_middleware)
self._add_middleware(pre_middleware)
self.assert_exceptions_handled('/middleware_exceptions/not_found/', ['Test View Exception'])
# Check that the right middleware methods have been invoked
self.assert_middleware_usage(pre_middleware, True, True, False, True, False)
self.assert_middleware_usage(bad_middleware, True, True, False, True, False)
self.assert_middleware_usage(post_middleware, True, False, False, True, False)
def test_process_response_bad_middleware_not_found(self):
pre_middleware = TestMiddleware()
bad_middleware = BadResponseMiddleware()
post_middleware = TestMiddleware()
self._add_middleware(post_middleware)
self._add_middleware(bad_middleware)
self._add_middleware(pre_middleware)
self.assert_exceptions_handled('/middleware_exceptions/not_found/', ['Test Response Exception'])
# Check that the right middleware methods have been invoked
self.assert_middleware_usage(pre_middleware, True, True, False, False, True)
self.assert_middleware_usage(bad_middleware, True, True, False, True, True)
self.assert_middleware_usage(post_middleware, True, True, False, True, True)
def test_process_exception_bad_middleware_not_found(self):
pre_middleware = TestMiddleware()
bad_middleware = BadExceptionMiddleware()
post_middleware = TestMiddleware()
self._add_middleware(post_middleware)
self._add_middleware(bad_middleware)
self._add_middleware(pre_middleware)
self.assert_exceptions_handled('/middleware_exceptions/not_found/', ['Test Exception Exception'])
# Check that the right middleware methods have been invoked
self.assert_middleware_usage(pre_middleware, True, True, False, True, False)
self.assert_middleware_usage(bad_middleware, True, True, False, True, True)
self.assert_middleware_usage(post_middleware, True, True, False, True, True)
def test_process_request_bad_middleware_exception(self):
pre_middleware = TestMiddleware()
bad_middleware = BadRequestMiddleware()
post_middleware = TestMiddleware()
self._add_middleware(post_middleware)
self._add_middleware(bad_middleware)
self._add_middleware(pre_middleware)
self.assert_exceptions_handled('/middleware_exceptions/error/', ['Test Request Exception'])
# Check that the right middleware methods have been invoked
self.assert_middleware_usage(pre_middleware, True, False, False, True, False)
self.assert_middleware_usage(bad_middleware, True, False, False, True, False)
self.assert_middleware_usage(post_middleware, False, False, False, True, False)
def test_process_view_bad_middleware_exception(self):
pre_middleware = TestMiddleware()
bad_middleware = BadViewMiddleware()
post_middleware = TestMiddleware()
self._add_middleware(post_middleware)
self._add_middleware(bad_middleware)
self._add_middleware(pre_middleware)
self.assert_exceptions_handled('/middleware_exceptions/error/', ['Test View Exception'])
# Check that the right middleware methods have been invoked
self.assert_middleware_usage(pre_middleware, True, True, False, True, False)
self.assert_middleware_usage(bad_middleware, True, True, False, True, False)
self.assert_middleware_usage(post_middleware, True, False, False, True, False)
def test_process_response_bad_middleware_exception(self):
pre_middleware = TestMiddleware()
bad_middleware = BadResponseMiddleware()
post_middleware = TestMiddleware()
self._add_middleware(post_middleware)
self._add_middleware(bad_middleware)
self._add_middleware(pre_middleware)
self.assert_exceptions_handled('/middleware_exceptions/error/', ['Error in view', 'Test Response Exception'])
# Check that the right middleware methods have been invoked
self.assert_middleware_usage(pre_middleware, True, True, False, False, True)
self.assert_middleware_usage(bad_middleware, True, True, False, True, True)
self.assert_middleware_usage(post_middleware, True, True, False, True, True)
def test_process_exception_bad_middleware_exception(self):
pre_middleware = TestMiddleware()
bad_middleware = BadExceptionMiddleware()
post_middleware = TestMiddleware()
self._add_middleware(post_middleware)
self._add_middleware(bad_middleware)
self._add_middleware(pre_middleware)
self.assert_exceptions_handled('/middleware_exceptions/error/', ['Test Exception Exception'])
# Check that the right middleware methods have been invoked
self.assert_middleware_usage(pre_middleware, True, True, False, True, False)
self.assert_middleware_usage(bad_middleware, True, True, False, True, True)
self.assert_middleware_usage(post_middleware, True, True, False, True, True)
def test_process_request_bad_middleware_null_view(self):
pre_middleware = TestMiddleware()
bad_middleware = BadRequestMiddleware()
post_middleware = TestMiddleware()
self._add_middleware(post_middleware)
self._add_middleware(bad_middleware)
self._add_middleware(pre_middleware)
self.assert_exceptions_handled('/middleware_exceptions/null_view/', ['Test Request Exception'])
# Check that the right middleware methods have been invoked
self.assert_middleware_usage(pre_middleware, True, False, False, True, False)
self.assert_middleware_usage(bad_middleware, True, False, False, True, False)
self.assert_middleware_usage(post_middleware, False, False, False, True, False)
def test_process_view_bad_middleware_null_view(self):
pre_middleware = TestMiddleware()
bad_middleware = BadViewMiddleware()
post_middleware = TestMiddleware()
self._add_middleware(post_middleware)
self._add_middleware(bad_middleware)
self._add_middleware(pre_middleware)
self.assert_exceptions_handled('/middleware_exceptions/null_view/', ['Test View Exception'])
# Check that the right middleware methods have been invoked
self.assert_middleware_usage(pre_middleware, True, True, False, True, False)
self.assert_middleware_usage(bad_middleware, True, True, False, True, False)
self.assert_middleware_usage(post_middleware, True, False, False, True, False)
def test_process_response_bad_middleware_null_view(self):
pre_middleware = TestMiddleware()
bad_middleware = BadResponseMiddleware()
post_middleware = TestMiddleware()
self._add_middleware(post_middleware)
self._add_middleware(bad_middleware)
self._add_middleware(pre_middleware)
self.assert_exceptions_handled('/middleware_exceptions/null_view/', [
"The view regressiontests.middleware_exceptions.views.null_view didn't return an HttpResponse object.",
'Test Response Exception'
])
# Check that the right middleware methods have been invoked
self.assert_middleware_usage(pre_middleware, True, True, False, False, False)
self.assert_middleware_usage(bad_middleware, True, True, False, True, False)
self.assert_middleware_usage(post_middleware, True, True, False, True, False)
def test_process_exception_bad_middleware_null_view(self):
pre_middleware = TestMiddleware()
bad_middleware = BadExceptionMiddleware()
post_middleware = TestMiddleware()
self._add_middleware(post_middleware)
self._add_middleware(bad_middleware)
self._add_middleware(pre_middleware)
self.assert_exceptions_handled('/middleware_exceptions/null_view/', [
"The view regressiontests.middleware_exceptions.views.null_view didn't return an HttpResponse object."
],
ValueError())
# Check that the right middleware methods have been invoked
self.assert_middleware_usage(pre_middleware, True, True, False, True, False)
self.assert_middleware_usage(bad_middleware, True, True, False, True, False)
self.assert_middleware_usage(post_middleware, True, True, False, True, False)
def test_process_request_bad_middleware_permission_denied(self):
pre_middleware = TestMiddleware()
bad_middleware = BadRequestMiddleware()
post_middleware = TestMiddleware()
self._add_middleware(post_middleware)
self._add_middleware(bad_middleware)
self._add_middleware(pre_middleware)
self.assert_exceptions_handled('/middleware_exceptions/permission_denied/', ['Test Request Exception'])
# Check that the right middleware methods have been invoked
self.assert_middleware_usage(pre_middleware, True, False, False, True, False)
self.assert_middleware_usage(bad_middleware, True, False, False, True, False)
self.assert_middleware_usage(post_middleware, False, False, False, True, False)
def test_process_view_bad_middleware_permission_denied(self):
pre_middleware = TestMiddleware()
bad_middleware = BadViewMiddleware()
post_middleware = TestMiddleware()
self._add_middleware(post_middleware)
self._add_middleware(bad_middleware)
self._add_middleware(pre_middleware)
self.assert_exceptions_handled('/middleware_exceptions/permission_denied/', ['Test View Exception'])
# Check that the right middleware methods have been invoked
self.assert_middleware_usage(pre_middleware, True, True, False, True, False)
self.assert_middleware_usage(bad_middleware, True, True, False, True, False)
self.assert_middleware_usage(post_middleware, True, False, False, True, False)
def test_process_response_bad_middleware_permission_denied(self):
pre_middleware = TestMiddleware()
bad_middleware = BadResponseMiddleware()
post_middleware = TestMiddleware()
self._add_middleware(post_middleware)
self._add_middleware(bad_middleware)
self._add_middleware(pre_middleware)
self.assert_exceptions_handled('/middleware_exceptions/permission_denied/', ['Test Response Exception'])
# Check that the right middleware methods have been invoked
self.assert_middleware_usage(pre_middleware, True, True, False, False, True)
self.assert_middleware_usage(bad_middleware, True, True, False, True, True)
self.assert_middleware_usage(post_middleware, True, True, False, True, True)
def test_process_exception_bad_middleware_permission_denied(self):
pre_middleware = TestMiddleware()
bad_middleware = BadExceptionMiddleware()
post_middleware = TestMiddleware()
self._add_middleware(post_middleware)
self._add_middleware(bad_middleware)
self._add_middleware(pre_middleware)
self.assert_exceptions_handled('/middleware_exceptions/permission_denied/', ['Test Exception Exception'])
# Check that the right middleware methods have been invoked
self.assert_middleware_usage(pre_middleware, True, True, False, True, False)
self.assert_middleware_usage(bad_middleware, True, True, False, True, True)
self.assert_middleware_usage(post_middleware, True, True, False, True, True)
_missing = object()
class RootUrlconfTests(TestCase):
def test_missing_root_urlconf(self):
try:
original_ROOT_URLCONF = settings.ROOT_URLCONF
del settings.ROOT_URLCONF
except AttributeError:
original_ROOT_URLCONF = _missing
self.assertRaises(AttributeError,
self.client.get, "/middleware_exceptions/view/"
)
if original_ROOT_URLCONF is not _missing:
settings.ROOT_URLCONF = original_ROOT_URLCONF | unknown | codeparrot/codeparrot-clean | ||
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from datetime import datetime, timedelta
from sentry.models import GroupBookmark, GroupStatus, GroupTagValue
from sentry.search.django.backend import DjangoSearchBackend
from sentry.testutils import TestCase
class DjangoSearchBackendTest(TestCase):
def create_backend(self):
return DjangoSearchBackend()
def setUp(self):
self.backend = self.create_backend()
self.project1 = self.create_project(name='foo')
self.project2 = self.create_project(name='bar')
self.group1 = self.create_group(
project=self.project1,
checksum='a' * 32,
message='foo',
times_seen=5,
status=GroupStatus.UNRESOLVED,
last_seen=datetime(2013, 8, 13, 3, 8, 24, 880386),
first_seen=datetime(2013, 7, 13, 3, 8, 24, 880386),
)
self.event1 = self.create_event(
event_id='a' * 32,
group=self.group1,
tags={
'server': 'example.com',
'env': 'production',
}
)
self.group2 = self.create_group(
project=self.project1,
checksum='b' * 32,
message='bar',
times_seen=10,
status=GroupStatus.RESOLVED,
last_seen=datetime(2013, 7, 14, 3, 8, 24, 880386),
first_seen=datetime(2013, 7, 14, 3, 8, 24, 880386),
)
self.event2 = self.create_event(
event_id='b' * 32,
group=self.group2,
tags={
'server': 'example.com',
'env': 'staging',
'url': 'http://example.com',
}
)
for key, value in self.event1.data['tags']:
GroupTagValue.objects.create(
group=self.group1,
key=key,
value=value,
)
for key, value in self.event2.data['tags']:
GroupTagValue.objects.create(
group=self.group2,
key=key,
value=value,
)
GroupBookmark.objects.create(
user=self.user,
group=self.group2,
project=self.group2.project,
)
self.backend.index(self.event1)
self.backend.index(self.event2)
def test_query(self):
backend = self.create_backend()
results = self.backend.query(self.project1, query='foo')
assert len(results) == 1
assert results[0] == self.group1
results = self.backend.query(self.project1, query='bar')
assert len(results) == 1
assert results[0] == self.group2
def test_sort(self):
backend = self.create_backend()
results = self.backend.query(self.project1, sort_by='date')
assert len(results) == 2
assert results[0] == self.group1
assert results[1] == self.group2
results = self.backend.query(self.project1, sort_by='new')
assert len(results) == 2
assert results[0] == self.group2
assert results[1] == self.group1
results = self.backend.query(self.project1, sort_by='freq')
assert len(results) == 2
assert results[0] == self.group2
assert results[1] == self.group1
def test_status(self):
results = self.backend.query(self.project1, status=GroupStatus.UNRESOLVED)
assert len(results) == 1
assert results[0] == self.group1
results = self.backend.query(self.project1, status=GroupStatus.RESOLVED)
assert len(results) == 1
assert results[0] == self.group2
def test_tags(self):
results = self.backend.query(self.project1, tags={'env': 'staging'})
assert len(results) == 1
assert results[0] == self.group2
results = self.backend.query(self.project1, tags={'env': 'example.com'})
assert len(results) == 0
def test_bookmarked_by(self):
results = self.backend.query(self.project1, bookmarked_by=self.user)
assert len(results) == 1
assert results[0] == self.group2
def test_project(self):
results = self.backend.query(self.project2)
assert len(results) == 0
def test_pagination(self):
results = self.backend.query(self.project1, limit=1, sort_by='date')
assert len(results) == 1
assert results[0] == self.group1
results = self.backend.query(self.project1, cursor=results.next, limit=1, sort_by='date')
assert len(results) == 1
assert results[0] == self.group2
results = self.backend.query(self.project1, cursor=results.next, limit=1, sort_by='date')
assert len(results) == 0
def test_first_seen_date_filter(self):
backend = self.create_backend()
results = self.backend.query(
self.project1, date_from=self.group2.first_seen,
date_filter='first_seen')
assert len(results) == 1
assert results[0] == self.group2
results = self.backend.query(
self.project1, date_to=self.group1.first_seen + timedelta(minutes=1),
date_filter='first_seen')
assert len(results) == 1
assert results[0] == self.group1
results = self.backend.query(
self.project1,
date_from=self.group1.first_seen,
date_to=self.group1.first_seen + timedelta(minutes=1),
date_filter='first_seen',
)
assert len(results) == 1
assert results[0] == self.group1
def test_last_seen_date_filter(self):
backend = self.create_backend()
results = self.backend.query(
self.project1, date_from=self.group1.last_seen,
date_filter='last_seen')
assert len(results) == 1
assert results[0] == self.group1
results = self.backend.query(
self.project1,
date_to=self.group1.last_seen - timedelta(minutes=1),
date_filter='last_seen')
assert len(results) == 1
assert results[0] == self.group2
results = self.backend.query(
self.project1,
date_from=self.group2.last_seen,
date_to=self.group1.last_seen - timedelta(minutes=1),
date_filter='last_seen',
)
assert len(results) == 1
assert results[0] == self.group2 | unknown | codeparrot/codeparrot-clean | ||
import mock
import pytest
from datarobot_batch_scoring.main import (
main, UI, main_standalone, parse_args, main_deployment_aware
)
def test_lower_case_for_user(monkeypatch):
main_args = ['--host',
'http://localhost:53646/api',
'--user', 'DataRobot@datarobot.com',
'56dd9570018e213242dfa93c',
'56dd9570018e213242dfa93d',
'tests/fixtures/temperatura_predict.csv',
'--n_samples',
'10',
'--n_concurrent', '1']
monkeypatch.setattr('datarobot_batch_scoring.main.UI', mock.Mock(spec=UI))
with mock.patch(
'datarobot_batch_scoring.main'
'.run_batch_predictions') as mock_method:
main(argv=main_args)
mock_method.assert_called_once_with(
base_url='http://localhost:53646/predApi/v1.0/',
base_headers={},
user='datarobot@datarobot.com',
pwd=mock.ANY,
api_token=None,
create_api_token=False,
pid='56dd9570018e213242dfa93c',
lid='56dd9570018e213242dfa93d',
deployment_id=None,
import_id=None,
n_retry=3,
concurrent=1,
resume=None,
n_samples=10,
out_file='out.csv',
keep_cols=None,
delimiter=None,
dataset='tests/fixtures/temperatura_predict.csv',
pred_name=None,
pred_threshold_name=None,
pred_decision_name=None,
timeout=None,
ui=mock.ANY,
auto_sample=False,
fast_mode=False,
dry_run=False,
encoding='',
skip_dialect=False,
skip_row_id=False,
output_delimiter=None,
compression=False,
field_size_limit=None,
verify_ssl=True,
max_prediction_explanations=0,
)
def test_without_passed_user_and_passwd(monkeypatch):
main_args = ['--host',
'http://localhost:53646/api',
'56dd9570018e213242dfa93c',
'56dd9570018e213242dfa93d',
'tests/fixtures/temperatura_predict.csv',
'--n_samples',
'10',
'--n_concurrent', '1', '--no']
monkeypatch.setattr('datarobot_batch_scoring.main.UI', mock.Mock(spec=UI))
with mock.patch(
'datarobot_batch_scoring.main'
'.run_batch_predictions') as mock_method:
main(argv=main_args)
mock_method.assert_called_once_with(
base_url='http://localhost:53646/predApi/v1.0/',
base_headers={},
user=mock.ANY,
pwd=mock.ANY,
api_token=None,
create_api_token=False,
pid='56dd9570018e213242dfa93c',
lid='56dd9570018e213242dfa93d',
deployment_id=None,
import_id=None,
n_retry=3,
concurrent=1,
resume=None,
n_samples=10,
out_file='out.csv',
keep_cols=None,
delimiter=None,
dataset='tests/fixtures/temperatura_predict.csv',
pred_name=None,
pred_threshold_name=None,
pred_decision_name=None,
timeout=None,
ui=mock.ANY,
auto_sample=False,
fast_mode=False,
dry_run=False,
encoding='',
skip_dialect=False,
skip_row_id=False,
output_delimiter=None,
compression=False,
field_size_limit=None,
verify_ssl=True,
max_prediction_explanations=0,
)
def test_keep_cols(monkeypatch):
main_args = ['--host',
'http://localhost:53646/api',
'56dd9570018e213242dfa93c',
'56dd9570018e213242dfa93d',
'tests/fixtures/temperatura_predict.csv',
'--keep_cols', 'a, b, c']
monkeypatch.setattr('datarobot_batch_scoring.main.UI', mock.Mock(spec=UI))
with mock.patch(
'datarobot_batch_scoring.main'
'.run_batch_predictions') as mock_method:
main(argv=main_args)
mock_method.assert_called_once_with(
base_url='http://localhost:53646/predApi/v1.0/',
base_headers={},
user=mock.ANY,
pwd=mock.ANY,
api_token=None,
create_api_token=False,
pid='56dd9570018e213242dfa93c',
lid='56dd9570018e213242dfa93d',
deployment_id=None,
import_id=None,
n_retry=3,
concurrent=4,
resume=None,
n_samples=False,
out_file='out.csv',
keep_cols=['a', 'b', 'c'],
delimiter=None,
dataset='tests/fixtures/temperatura_predict.csv',
pred_name=None,
pred_threshold_name=None,
pred_decision_name=None,
timeout=None,
ui=mock.ANY,
auto_sample=True,
fast_mode=False,
dry_run=False,
encoding='',
skip_dialect=False,
skip_row_id=False,
output_delimiter=None,
compression=False,
field_size_limit=None,
verify_ssl=True,
max_prediction_explanations=0,
)
def test_input_dataset_doesnt_exist(monkeypatch):
main_args = ['--host',
'http://localhost:53646/api',
'56dd9570018e213242dfa93c',
'56dd9570018e213242dfa93d',
'file-not-exists.csv']
ui_class = mock.Mock(spec=UI)
ui = ui_class.return_value
ui.fatal.side_effect = SystemExit
monkeypatch.setattr('datarobot_batch_scoring.main.UI', ui_class)
with mock.patch(
'datarobot_batch_scoring.main'
'.run_batch_predictions') as mock_method:
with pytest.raises(SystemExit):
main(argv=main_args)
assert not mock_method.called
ui.fatal.assert_called_with('file file-not-exists.csv does not exist.')
def test_bad_objectid(monkeypatch):
main_args = ['--host',
'http://localhost:53646/api',
'56dd9570018e213242dfa93caa',
'56dd9570018e213242dfa93d',
'tests/fixtures/temperatura_predict.csv']
ui_class = mock.Mock(spec=UI)
ui = ui_class.return_value
ui.fatal.side_effect = SystemExit
monkeypatch.setattr('datarobot_batch_scoring.main.UI', ui_class)
monkeypatch.setattr('datarobot_batch_scoring.main.verify_objectid',
mock.Mock(side_effect=ValueError('bad objectid')))
with mock.patch(
'datarobot_batch_scoring.main'
'.run_batch_predictions') as mock_method:
with pytest.raises(SystemExit):
main(argv=main_args)
assert not mock_method.called
ui.fatal.assert_called_with('bad objectid')
def test_datarobot_key(monkeypatch):
main_args = ['--host',
'http://localhost:53646/api',
'56dd9570018e213242dfa93c',
'56dd9570018e213242dfa93d',
'tests/fixtures/temperatura_predict.csv',
'--datarobot_key', 'the_key']
monkeypatch.setattr('datarobot_batch_scoring.main.UI', mock.Mock(spec=UI))
with mock.patch(
'datarobot_batch_scoring.main'
'.run_batch_predictions') as mock_method:
main(argv=main_args)
mock_method.assert_called_once_with(
base_url='http://localhost:53646/predApi/v1.0/',
base_headers={'datarobot-key': 'the_key'},
user=mock.ANY,
pwd=mock.ANY,
api_token=None,
create_api_token=False,
pid='56dd9570018e213242dfa93c',
lid='56dd9570018e213242dfa93d',
deployment_id=None,
import_id=None,
n_retry=3,
concurrent=4,
resume=None,
n_samples=False,
out_file='out.csv',
keep_cols=None,
delimiter=None,
dataset='tests/fixtures/temperatura_predict.csv',
pred_name=None,
pred_threshold_name=None,
pred_decision_name=None,
timeout=None,
ui=mock.ANY,
fast_mode=False,
auto_sample=True,
dry_run=False,
encoding='',
skip_dialect=False,
skip_row_id=False,
output_delimiter=None,
compression=False,
field_size_limit=None,
verify_ssl=True,
max_prediction_explanations=0
)
def test_encoding_options(monkeypatch):
main_args = ['--host',
'http://localhost:53646/api',
'56dd9570018e213242dfa93c',
'56dd9570018e213242dfa93d',
'tests/fixtures/temperatura_predict.csv',
'--delimiter=tab',
'--encoding=utf-8', '--skip_dialect']
monkeypatch.setattr('datarobot_batch_scoring.main.UI', mock.Mock(spec=UI))
with mock.patch(
'datarobot_batch_scoring.main'
'.run_batch_predictions') as mock_method:
main(argv=main_args)
mock_method.assert_called_once_with(
base_url='http://localhost:53646/predApi/v1.0/',
base_headers={},
user=mock.ANY,
pwd=mock.ANY,
api_token=None,
create_api_token=False,
pid='56dd9570018e213242dfa93c',
lid='56dd9570018e213242dfa93d',
deployment_id=None,
import_id=None,
n_retry=3,
concurrent=4,
resume=None,
n_samples=False,
out_file='out.csv',
keep_cols=None,
delimiter='\t',
dataset='tests/fixtures/temperatura_predict.csv',
pred_name=None,
pred_threshold_name=None,
pred_decision_name=None,
timeout=None,
ui=mock.ANY,
fast_mode=False,
auto_sample=True,
dry_run=False,
encoding='utf-8',
skip_dialect=True,
skip_row_id=False,
output_delimiter=None,
compression=False,
field_size_limit=None,
verify_ssl=True,
max_prediction_explanations=0,
)
def test_unicode_decode_error_message_fast(monkeypatch):
main_args = ['--host',
'http://localhost:53646/api',
'56dd9570018e213242dfa93c',
'56dd9570018e213242dfa93d',
'tests/fixtures/temperatura_predict.csv',
'--encoding=ascii', '--fast', '--dry_run']
ui_class = mock.Mock(spec=UI)
ui = ui_class.return_value
monkeypatch.setattr('datarobot_batch_scoring.main.UI', ui_class)
with mock.patch(
'datarobot_batch_scoring.main'
'.run_batch_predictions') as mock_method:
mock_method.side_effect = UnicodeDecodeError('test',
b'', 1, 1, 'test')
assert main(argv=main_args) == 1
ui.error.assert_has_calls([
mock.call("'test' codec can't decode bytes in position 1-0: test"),
mock.call("You are using --fast option, which uses a small sample "
"of data to figuring out the encoding of your file. You "
"can try to specify the encoding directly for this file "
"by using the encoding flag (e.g. --encoding utf-8). "
"You could also try to remove the --fast mode to auto-"
"detect the encoding with a larger sample size")
])
def test_unicode_decode_error_message_slow(monkeypatch):
main_args = ['--host',
'http://localhost:53646/api',
'56dd9570018e213242dfa93c',
'56dd9570018e213242dfa93d',
'tests/fixtures/temperatura_predict.csv',
'--dry_run']
ui_class = mock.Mock(spec=UI)
ui = ui_class.return_value
monkeypatch.setattr('datarobot_batch_scoring.main.UI', ui_class)
with mock.patch(
'datarobot_batch_scoring.main'
'.run_batch_predictions') as mock_method:
mock_method.side_effect = UnicodeDecodeError('test',
b'', 1, 1, 'test')
assert main(argv=main_args) == 1
# Without fast flag, we don't show the verbose error message
ui.error.assert_called_with(
"'test' codec can't decode bytes in position 1-0: test"
)
def test_invalid_delimiter(monkeypatch):
main_args = ['--host',
'http://localhost:53646/api',
'--delimiter', 'INVALID',
'56dd9570018e213242dfa93c',
'56dd9570018e213242dfa93d',
'tests/fixtures/temperatura_predict.csv']
ui_class = mock.Mock(spec=UI)
ui = ui_class.return_value
ui.fatal.side_effect = SystemExit
monkeypatch.setattr('datarobot_batch_scoring.main.UI', ui_class)
with mock.patch(
'datarobot_batch_scoring.main'
'.run_batch_predictions') as mock_method:
with pytest.raises(SystemExit):
main(argv=main_args)
assert not mock_method.called
ui.fatal.assert_called_with(
'Delimiter "INVALID" is not a valid delimiter.')
def test_no_required_params(monkeypatch):
main_args = ['--host',
'http://localhost:53646/api',
'--n_samples',
'10',
'--n_concurrent', '1', '--no']
monkeypatch.setattr('datarobot_batch_scoring.main.UI', mock.Mock(spec=UI))
with mock.patch(
'datarobot_batch_scoring.main'
'.run_batch_predictions') as mock_method:
with pytest.raises(SystemExit):
main(argv=main_args)
assert not mock_method.called
def test_output_delimiter(monkeypatch):
main_args = ['--host',
'http://localhost:53646/api',
'56dd9570018e213242dfa93c',
'56dd9570018e213242dfa93d',
'tests/fixtures/temperatura_predict.csv',
'--output_delimiter=tab',
'--encoding=utf-8', '--skip_dialect']
monkeypatch.setattr('datarobot_batch_scoring.main.UI', mock.Mock(spec=UI))
with mock.patch(
'datarobot_batch_scoring.main'
'.run_batch_predictions') as mock_method:
main(argv=main_args)
mock_method.assert_called_once_with(
base_url='http://localhost:53646/predApi/v1.0/',
base_headers={},
user=mock.ANY,
pwd=mock.ANY,
api_token=None,
create_api_token=False,
pid='56dd9570018e213242dfa93c',
lid='56dd9570018e213242dfa93d',
deployment_id=None,
import_id=None,
n_retry=3,
concurrent=4,
resume=None,
n_samples=False,
out_file='out.csv',
keep_cols=None,
delimiter=None,
dataset='tests/fixtures/temperatura_predict.csv',
pred_name=None,
pred_threshold_name=None,
pred_decision_name=None,
timeout=None,
ui=mock.ANY,
fast_mode=False,
auto_sample=True,
dry_run=False,
encoding='utf-8',
skip_dialect=True,
skip_row_id=False,
output_delimiter='\t',
compression=False,
field_size_limit=None,
verify_ssl=True,
max_prediction_explanations=0
)
def test_skip_row_id(monkeypatch):
main_args = ['--host',
'http://localhost:53646/api',
'56dd9570018e213242dfa93c',
'56dd9570018e213242dfa93d',
'tests/fixtures/temperatura_predict.csv',
'--skip_row_id',
'--encoding=utf-8', '--skip_dialect']
monkeypatch.setattr('datarobot_batch_scoring.main.UI', mock.Mock(spec=UI))
with mock.patch(
'datarobot_batch_scoring.main'
'.run_batch_predictions') as mock_method:
main(argv=main_args)
mock_method.assert_called_once_with(
base_url='http://localhost:53646/predApi/v1.0/',
base_headers={},
user=mock.ANY,
pwd=mock.ANY,
api_token=None,
create_api_token=False,
pid='56dd9570018e213242dfa93c',
lid='56dd9570018e213242dfa93d',
deployment_id=None,
import_id=None,
n_retry=3,
concurrent=4,
resume=None,
n_samples=False,
out_file='out.csv',
keep_cols=None,
delimiter=None,
dataset='tests/fixtures/temperatura_predict.csv',
pred_name=None,
pred_threshold_name=None,
pred_decision_name=None,
timeout=None,
ui=mock.ANY,
fast_mode=False,
auto_sample=True,
dry_run=False,
encoding='utf-8',
skip_dialect=True,
skip_row_id=True,
output_delimiter=None,
compression=False,
field_size_limit=None,
verify_ssl=True,
max_prediction_explanations=0
)
def test_pred_threshold(monkeypatch):
main_args = ['--host',
'http://localhost:53646/api',
'56dd9570018e213242dfa93c',
'56dd9570018e213242dfa93d',
'tests/fixtures/temperatura_predict.csv',
'--pred_threshold=threshold',
'--encoding=utf-8', '--skip_dialect']
monkeypatch.setattr('datarobot_batch_scoring.main.UI', mock.Mock(spec=UI))
with mock.patch(
'datarobot_batch_scoring.main'
'.run_batch_predictions') as mock_method:
main(argv=main_args)
mock_method.assert_called_once_with(
base_url='http://localhost:53646/predApi/v1.0/',
base_headers={},
user=mock.ANY,
pwd=mock.ANY,
api_token=None,
create_api_token=False,
pid='56dd9570018e213242dfa93c',
lid='56dd9570018e213242dfa93d',
deployment_id=None,
import_id=None,
n_retry=3,
concurrent=4,
resume=None,
n_samples=False,
out_file='out.csv',
keep_cols=None,
delimiter=None,
dataset='tests/fixtures/temperatura_predict.csv',
pred_name=None,
pred_decision_name=None,
pred_threshold_name='threshold',
timeout=None,
ui=mock.ANY,
fast_mode=False,
auto_sample=True,
dry_run=False,
encoding='utf-8',
skip_dialect=True,
skip_row_id=False,
output_delimiter=None,
compression=False,
field_size_limit=None,
verify_ssl=True,
max_prediction_explanations=0
)
def test_pred_decision(monkeypatch):
main_args = ['--host',
'http://localhost:53646/api',
'56dd9570018e213242dfa93c',
'56dd9570018e213242dfa93d',
'tests/fixtures/temperatura_predict.csv',
'--pred_decision=label',
'--encoding=utf-8', '--skip_dialect']
monkeypatch.setattr('datarobot_batch_scoring.main.UI', mock.Mock(spec=UI))
with mock.patch(
'datarobot_batch_scoring.main'
'.run_batch_predictions') as mock_method:
main(argv=main_args)
mock_method.assert_called_once_with(
base_url='http://localhost:53646/predApi/v1.0/',
base_headers={},
user=mock.ANY,
pwd=mock.ANY,
api_token=None,
create_api_token=False,
pid='56dd9570018e213242dfa93c',
lid='56dd9570018e213242dfa93d',
deployment_id=None,
import_id=None,
n_retry=3,
concurrent=4,
resume=None,
n_samples=False,
out_file='out.csv',
keep_cols=None,
delimiter=None,
dataset='tests/fixtures/temperatura_predict.csv',
pred_name=None,
pred_decision_name='label',
pred_threshold_name=None,
timeout=None,
ui=mock.ANY,
fast_mode=False,
auto_sample=True,
dry_run=False,
encoding='utf-8',
skip_dialect=True,
skip_row_id=False,
output_delimiter=None,
compression=False,
field_size_limit=None,
verify_ssl=True,
max_prediction_explanations=0
)
def test_batch_scoring_deployment_aware_call(monkeypatch):
main_args = ['--host',
'http://localhost:53646/api',
'56dd9570018e213242dfa93d',
'tests/fixtures/temperatura_predict.csv',
'--encoding=utf-8', '--skip_dialect']
monkeypatch.setattr('datarobot_batch_scoring.main.UI', mock.Mock(spec=UI))
with mock.patch(
'datarobot_batch_scoring.main'
'.run_batch_predictions') as mock_method:
main_deployment_aware(argv=main_args)
mock_method.assert_called_once_with(
base_url='http://localhost:53646/predApi/v1.0/',
base_headers={},
user=mock.ANY,
pwd=mock.ANY,
api_token=None,
create_api_token=False,
pid=None,
lid=None,
deployment_id='56dd9570018e213242dfa93d',
import_id=None,
n_retry=3,
concurrent=4,
resume=None,
n_samples=False,
out_file='out.csv',
keep_cols=None,
delimiter=None,
dataset='tests/fixtures/temperatura_predict.csv',
pred_name=None,
pred_threshold_name=None,
pred_decision_name=None,
timeout=None,
ui=mock.ANY,
fast_mode=False,
auto_sample=True,
dry_run=False,
encoding='utf-8',
skip_dialect=True,
skip_row_id=False,
output_delimiter=None,
compression=False,
field_size_limit=None,
verify_ssl=True,
max_prediction_explanations=0
)
def test_datarobot_transferable_call(monkeypatch):
main_args = ['--host',
'http://localhost:53646/api',
'0ec5bcea7f0f45918fa88257bfe42c09',
'tests/fixtures/temperatura_predict.csv']
monkeypatch.setattr('datarobot_batch_scoring.main.UI', mock.Mock(spec=UI))
with mock.patch(
'datarobot_batch_scoring.main'
'.run_batch_predictions') as mock_method:
main_standalone(argv=main_args)
mock_method.assert_called_once_with(
base_url='http://localhost:53646/predApi/v1.0/',
base_headers={},
user=mock.ANY,
pwd=mock.ANY,
api_token=None,
create_api_token=False,
pid=None,
lid=None,
import_id='0ec5bcea7f0f45918fa88257bfe42c09',
n_retry=3,
concurrent=4,
resume=None,
n_samples=False,
out_file='out.csv',
keep_cols=None,
delimiter=None,
dataset='tests/fixtures/temperatura_predict.csv',
pred_name=None,
pred_threshold_name=None,
pred_decision_name=None,
timeout=None,
ui=mock.ANY,
fast_mode=False,
auto_sample=True,
dry_run=False,
encoding='',
skip_dialect=False,
skip_row_id=False,
output_delimiter=None,
compression=False,
field_size_limit=None,
verify_ssl=True,
max_prediction_explanations=0
)
def test_resume(monkeypatch):
main_args_yes = ['--host',
'http://localhost:53646/api',
'56dd9570018e213242dfa93c',
'56dd9570018e213242dfa93d',
'tests/fixtures/temperatura_predict.csv',
'--resume']
monkeypatch.setattr('datarobot_batch_scoring.main.UI', mock.Mock(spec=UI))
with mock.patch(
'datarobot_batch_scoring.main'
'.run_batch_predictions') as mock_method:
main(argv=main_args_yes)
mock_method.assert_called_once_with(
base_url='http://localhost:53646/predApi/v1.0/',
base_headers={},
user=mock.ANY,
pwd=mock.ANY,
api_token=None,
create_api_token=False,
pid='56dd9570018e213242dfa93c',
lid='56dd9570018e213242dfa93d',
deployment_id=None,
import_id=None,
n_retry=3,
concurrent=4,
resume=True,
n_samples=False,
out_file='out.csv',
keep_cols=None,
delimiter=None,
dataset='tests/fixtures/temperatura_predict.csv',
pred_name=None,
pred_threshold_name=None,
pred_decision_name=None,
timeout=None,
ui=mock.ANY,
fast_mode=False,
auto_sample=True,
dry_run=False,
encoding='',
skip_dialect=False,
skip_row_id=False,
output_delimiter=None,
compression=False,
field_size_limit=None,
verify_ssl=True,
max_prediction_explanations=0
)
def test_resume_no(monkeypatch):
main_args_no = ['--host',
'http://localhost:53646/api',
'56dd9570018e213242dfa93c',
'56dd9570018e213242dfa93d',
'tests/fixtures/temperatura_predict.csv',
'--no-resume']
monkeypatch.setattr('datarobot_batch_scoring.main.UI', mock.Mock(spec=UI))
with mock.patch(
'datarobot_batch_scoring.main'
'.run_batch_predictions') as mock_method:
main(argv=main_args_no)
mock_method.assert_called_once_with(
base_url='http://localhost:53646/predApi/v1.0/',
base_headers={},
user=mock.ANY,
pwd=mock.ANY,
api_token=None,
create_api_token=False,
pid='56dd9570018e213242dfa93c',
lid='56dd9570018e213242dfa93d',
deployment_id=None,
import_id=None,
n_retry=3,
concurrent=4,
resume=False,
n_samples=False,
out_file='out.csv',
keep_cols=None,
delimiter=None,
dataset='tests/fixtures/temperatura_predict.csv',
pred_name=None,
pred_threshold_name=None,
pred_decision_name=None,
timeout=None,
ui=mock.ANY,
fast_mode=False,
auto_sample=True,
dry_run=False,
encoding='',
skip_dialect=False,
skip_row_id=False,
output_delimiter=None,
compression=False,
field_size_limit=None,
verify_ssl=True,
max_prediction_explanations=0
)
@pytest.mark.parametrize('ssl_argvs, verify_ssl_value', [
('', True),
('--no_verify_ssl', False),
('--ca_bundle /path/to/cert', '/path/to/cert'),
('--ca_bundle /path/to/cert --no_verify_ssl', False)
])
def test_verify_ssl_parameter(ssl_argvs, verify_ssl_value):
argvs = (
'--host http://localhost:53646/api '
'56dd9570018e213242dfa93c 56dd9570018e213242dfa93d '
'tests/fixtures/temperatura_predict.csv ' + ssl_argvs
).strip().split(' ')
parsed_args = parse_args(argvs)
assert parsed_args['verify_ssl'] == verify_ssl_value
def test_reason_codes_not_compatible_wit_old_api(monkeypatch):
main_args = ['--host',
'http://localhost:53646/',
'56dd9570018e213242dfa93c',
'56dd9570018e213242dfa93d',
'tests/fixtures/temperatura_predict.csv',
'--api_version', 'api/v1',
'--max_prediction_explanations', '3']
ui_class = mock.Mock(spec=UI)
ui = ui_class.return_value
ui.fatal.side_effect = SystemExit
monkeypatch.setattr('datarobot_batch_scoring.main.UI', ui_class)
with mock.patch(
'datarobot_batch_scoring.main'
'.run_batch_predictions') as mock_method:
with pytest.raises(SystemExit):
main(argv=main_args)
assert not mock_method.called
ui.fatal.assert_called_with(
'Prediction explanation is not available for '
'api_version `api/v1` please use the '
'`predApi/v1.0` or deployments endpoint') | unknown | codeparrot/codeparrot-clean | ||
"""
System users
============
"""
from fabtools.files import is_file
from fabtools.user import *
import fabtools.require
def user(name, comment=None, home=None, group=None, extra_groups=None,
create_home=False, skeleton_dir=None, password=None, system=False,
shell=None, uid=None):
"""
Require a user and its home directory.
::
from fabtools import require
# This will also create a home directory for alice
require.user('alice')
# Sometimes we don't need a home directory
require.user('mydaemon', create_home=False)
.. note:: This function can be accessed directly from the
``fabtools.require`` module for convenience.
"""
# Make sure the user exists
if not exists(name):
create(name, comment=comment, home=home, group=group,
extra_groups=extra_groups, create_home=create_home,
skeleton_dir=skeleton_dir, password=password, system=system,
shell=shell, uid=uid)
else:
modify(name, comment=comment, home=home, group=group,
extra_groups=extra_groups, password=password,
shell=shell, uid=uid)
# Make sure the home directory exists and is owned by user
if home:
fabtools.require.directory(home, owner=name, use_sudo=True)
def sudoer(username, hosts="ALL", operators="ALL", passwd=False, commands="ALL"):
"""
Require sudo permissions for a given user.
.. note:: This function can be accessed directly from the
``fabtools.require`` module for convenience.
"""
tags = "PASSWD:" if passwd else "NOPASSWD:"
spec = "%(username)s %(hosts)s=(%(operators)s) %(tags)s %(commands)s" % locals()
filename = '/etc/sudoers.d/fabtools-%s' % username
if is_file(filename):
sudo('chmod 0640 %(filename)s && rm -f %(filename)s' % locals())
sudo('echo "%(spec)s" >%(filename)s && chmod 0440 %(filename)s' % locals(), shell=True) | unknown | codeparrot/codeparrot-clean | ||
import json
import requests
import math
def write_status_code_error(job, exception):
service = job.service
if 499 >= exception.code >= 400:
job.model.dbobj.result = json.dumps({'message': exception.message, 'code': exception.code}).encode()
service.saveAll()
return
def find_disks(disk_type, nodes, partition_name):
"""
return a list of disk that are not used by storage pool
or has a different type as the one required for this cluster
"""
available_disks = {}
def check_partition(disk):
for partition in disk.partitions:
for filesystem in partition.filesystems:
if filesystem['label'].startswith(partition_name):
return True
for node in nodes:
available_disks.setdefault(node.name, [])
for disk in node.disks.list():
# skip disks of wrong type
if disk.type.name != disk_type:
continue
# skip devices which have filesystems on the device
if len(disk.filesystems) > 0:
continue
# include devices which have partitions
if len(disk.partitions) == 0:
available_disks[node.name].append(disk)
else:
if check_partition(disk):
# devices that have partitions with correct label will be in the beginning
available_disks[node.name].insert(0, disk)
return available_disks
def send_event(event_type, data, aysrepo):
"""
Post data to all webhooks that are registered to event_type.
:param event_type: the event type for which the webhook is triggered
:param data: payload of the webhook
:param aysrepo: ays repo to search for webhooks in
:return:
"""
webhook_services = aysrepo.servicesFind(role='webhook')
for webhook_service in webhook_services:
if event_type in webhook_service.model.data.eventtypes:
requests.post(webhook_service.model.data.url, data=data) | unknown | codeparrot/codeparrot-clean | ||
#!/usr/bin/python
# Copyright 2013 Google Inc.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: gce
version_added: "1.4"
short_description: create or terminate GCE instances
description:
- Creates or terminates Google Compute Engine (GCE) instances. See
U(https://cloud.google.com/compute) for an overview.
Full install/configuration instructions for the gce* modules can
be found in the comments of ansible/test/gce_tests.py.
options:
image:
description:
- image string to use for the instance (default will follow latest
stable debian image)
default: "debian-8"
image_family:
description:
- image family from which to select the image. The most recent
non-deprecated image in the family will be used.
version_added: "2.4"
external_projects:
description:
- A list of other projects (accessible with the provisioning credentials)
to be searched for the image.
version_added: "2.4"
instance_names:
description:
- a comma-separated list of instance names to create or destroy
machine_type:
description:
- machine type to use for the instance, use 'n1-standard-1' by default
default: "n1-standard-1"
metadata:
description:
- a hash/dictionary of custom data for the instance;
'{"key":"value", ...}'
service_account_email:
version_added: "1.5.1"
description:
- service account email
service_account_permissions:
version_added: "2.0"
description:
- service account permissions (see
U(https://cloud.google.com/sdk/gcloud/reference/compute/instances/create),
--scopes section for detailed information)
choices: [
"bigquery", "cloud-platform", "compute-ro", "compute-rw",
"useraccounts-ro", "useraccounts-rw", "datastore", "logging-write",
"monitoring", "sql-admin", "storage-full", "storage-ro",
"storage-rw", "taskqueue", "userinfo-email"
]
pem_file:
version_added: "1.5.1"
description:
- path to the pem file associated with the service account email
This option is deprecated. Use 'credentials_file'.
credentials_file:
version_added: "2.1.0"
description:
- path to the JSON file associated with the service account email
project_id:
version_added: "1.5.1"
description:
- your GCE project ID
name:
description:
- either a name of a single instance or when used with 'num_instances',
the base name of a cluster of nodes
aliases: ['base_name']
num_instances:
description:
- can be used with 'name', specifies
the number of nodes to provision using 'name'
as a base name
version_added: "2.3"
network:
description:
- name of the network, 'default' will be used if not specified
default: "default"
subnetwork:
description:
- name of the subnetwork in which the instance should be created
version_added: "2.2"
persistent_boot_disk:
description:
- if set, create the instance with a persistent boot disk
type: bool
default: 'no'
disks:
description:
- a list of persistent disks to attach to the instance; a string value
gives the name of the disk; alternatively, a dictionary value can
define 'name' and 'mode' ('READ_ONLY' or 'READ_WRITE'). The first entry
will be the boot disk (which must be READ_WRITE).
version_added: "1.7"
state:
description:
- desired state of the resource
default: "present"
choices: ["active", "present", "absent", "deleted", "started", "stopped", "terminated"]
tags:
description:
- a comma-separated list of tags to associate with the instance
zone:
description:
- the GCE zone to use. The list of available zones is at U(https://cloud.google.com/compute/docs/regions-zones/regions-zones#available).
required: true
default: "us-central1-a"
ip_forward:
version_added: "1.9"
description:
- set to C(yes) if the instance can forward ip packets (useful for
gateways)
type: bool
default: 'no'
external_ip:
version_added: "1.9"
description:
- type of external ip, ephemeral by default; alternatively, a fixed gce ip or ip name can be given. Specify 'none' if no external ip is desired.
default: "ephemeral"
disk_auto_delete:
version_added: "1.9"
description:
- if set boot disk will be removed after instance destruction
type: bool
default: 'yes'
preemptible:
version_added: "2.1"
description:
- if set to C(yes), instances will be preemptible and time-limited.
(requires libcloud >= 0.20.0)
type: bool
default: 'no'
disk_size:
description:
- The size of the boot disk created for this instance (in GB)
default: 10
version_added: "2.3"
requirements:
- "python >= 2.6"
- "apache-libcloud >= 0.13.3, >= 0.17.0 if using JSON credentials,
>= 0.20.0 if using preemptible option"
notes:
- Either I(instance_names) or I(name) is required.
- JSON credentials strongly preferred.
author:
- Eric Johnson (@erjohnso) <erjohnso@google.com>
- Tom Melendez (@supertom) <supertom@google.com>
'''
EXAMPLES = '''
# Basic provisioning example. Create a single Debian 8 instance in the
# us-central1-a Zone of the n1-standard-1 machine type.
# Create multiple instances by specifying multiple names, separated by
# commas in the instance_names field
# (e.g. my-test-instance1,my-test-instance2)
- gce:
instance_names: my-test-instance1
zone: us-central1-a
machine_type: n1-standard-1
image: debian-8
state: present
service_account_email: "your-sa@your-project-name.iam.gserviceaccount.com"
credentials_file: "/path/to/your-key.json"
project_id: "your-project-name"
disk_size: 32
# Create a single instance of an image from the "my-base-image" image family
# in the us-central1-a Zone of the n1-standard-1 machine type.
# This image family is in the "my-other-project" GCP project.
- gce:
instance_names: my-test-instance1
zone: us-central1-a
machine_type: n1-standard-1
image_family: my-base-image
external_projects:
- my-other-project
state: present
service_account_email: "your-sa@your-project-name.iam.gserviceaccount.com"
credentials_file: "/path/to/your-key.json"
project_id: "your-project-name"
disk_size: 32
# Create a single Debian 8 instance in the us-central1-a Zone
# Use existing disks, custom network/subnetwork, set service account permissions
# add tags and metadata.
- gce:
instance_names: my-test-instance
zone: us-central1-a
machine_type: n1-standard-1
state: present
metadata: '{"db":"postgres", "group":"qa", "id":500}'
tags:
- http-server
- my-other-tag
disks:
- name: disk-2
mode: READ_WRITE
- name: disk-3
mode: READ_ONLY
disk_auto_delete: false
network: foobar-network
subnetwork: foobar-subnetwork-1
preemptible: true
ip_forward: true
service_account_permissions:
- storage-full
- taskqueue
- bigquery
- https://www.googleapis.com/auth/ndev.clouddns.readwrite
service_account_email: "your-sa@your-project-name.iam.gserviceaccount.com"
credentials_file: "/path/to/your-key.json"
project_id: "your-project-name"
---
# Example Playbook
- name: Compute Engine Instance Examples
hosts: localhost
vars:
service_account_email: "your-sa@your-project-name.iam.gserviceaccount.com"
credentials_file: "/path/to/your-key.json"
project_id: "your-project-name"
tasks:
- name: create multiple instances
# Basic provisioning example. Create multiple Debian 8 instances in the
# us-central1-a Zone of n1-standard-1 machine type.
gce:
instance_names: test1,test2,test3
zone: us-central1-a
machine_type: n1-standard-1
image: debian-8
state: present
service_account_email: "{{ service_account_email }}"
credentials_file: "{{ credentials_file }}"
project_id: "{{ project_id }}"
metadata : '{ "startup-script" : "apt-get update" }'
register: gce
- name: Save host data
add_host:
hostname: "{{ item.public_ip }}"
groupname: gce_instances_ips
with_items: "{{ gce.instance_data }}"
- name: Wait for SSH for instances
wait_for:
delay: 1
host: "{{ item.public_ip }}"
port: 22
state: started
timeout: 30
with_items: "{{ gce.instance_data }}"
- name: Configure Hosts
hosts: gce_instances_ips
become: yes
become_method: sudo
roles:
- my-role-one
- my-role-two
tags:
- config
- name: delete test-instances
# Basic termination of instance.
gce:
service_account_email: "{{ service_account_email }}"
credentials_file: "{{ credentials_file }}"
project_id: "{{ project_id }}"
instance_names: "{{ gce.instance_names }}"
zone: us-central1-a
state: absent
tags:
- delete
'''
import socket
import logging
try:
from ast import literal_eval
HAS_PYTHON26 = True
except ImportError:
HAS_PYTHON26 = False
try:
import libcloud
from libcloud.compute.types import Provider
from libcloud.compute.providers import get_driver
from libcloud.common.google import GoogleBaseError, QuotaExceededError, \
ResourceExistsError, ResourceInUseError, ResourceNotFoundError
from libcloud.compute.drivers.gce import GCEAddress
_ = Provider.GCE
HAS_LIBCLOUD = True
except ImportError:
HAS_LIBCLOUD = False
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.gce import gce_connect, unexpected_error_msg
from ansible.module_utils.gcp import get_valid_location
from ansible.module_utils.six.moves import reduce
def get_instance_info(inst):
"""Retrieves instance information from an instance object and returns it
as a dictionary.
"""
metadata = {}
if 'metadata' in inst.extra and 'items' in inst.extra['metadata']:
for md in inst.extra['metadata']['items']:
metadata[md['key']] = md['value']
try:
netname = inst.extra['networkInterfaces'][0]['network'].split('/')[-1]
except Exception:
netname = None
try:
subnetname = inst.extra['networkInterfaces'][0]['subnetwork'].split('/')[-1]
except Exception:
subnetname = None
if 'disks' in inst.extra:
disk_names = [disk_info['source'].split('/')[-1]
for disk_info
in sorted(inst.extra['disks'],
key=lambda disk_info: disk_info['index'])]
else:
disk_names = []
if len(inst.public_ips) == 0:
public_ip = None
else:
public_ip = inst.public_ips[0]
return ({
'image': inst.image is not None and inst.image.split('/')[-1] or None,
'disks': disk_names,
'machine_type': inst.size,
'metadata': metadata,
'name': inst.name,
'network': netname,
'subnetwork': subnetname,
'private_ip': inst.private_ips[0],
'public_ip': public_ip,
'status': ('status' in inst.extra) and inst.extra['status'] or None,
'tags': ('tags' in inst.extra) and inst.extra['tags'] or [],
'zone': ('zone' in inst.extra) and inst.extra['zone'].name or None,
})
def create_instances(module, gce, instance_names, number, lc_zone):
"""Creates new instances. Attributes other than instance_names are picked
up from 'module'
module : AnsibleModule object
gce: authenticated GCE libcloud driver
instance_names: python list of instance names to create
number: number of instances to create
lc_zone: GCEZone object
Returns:
A list of dictionaries with instance information
about the instances that were launched.
"""
image = module.params.get('image')
image_family = module.params.get('image_family')
external_projects = module.params.get('external_projects')
machine_type = module.params.get('machine_type')
metadata = module.params.get('metadata')
network = module.params.get('network')
subnetwork = module.params.get('subnetwork')
persistent_boot_disk = module.params.get('persistent_boot_disk')
disks = module.params.get('disks')
tags = module.params.get('tags')
ip_forward = module.params.get('ip_forward')
external_ip = module.params.get('external_ip')
disk_auto_delete = module.params.get('disk_auto_delete')
preemptible = module.params.get('preemptible')
disk_size = module.params.get('disk_size')
service_account_permissions = module.params.get('service_account_permissions')
if external_ip == "none":
instance_external_ip = None
elif external_ip != "ephemeral":
instance_external_ip = external_ip
try:
# check if instance_external_ip is an ip or a name
try:
socket.inet_aton(instance_external_ip)
instance_external_ip = GCEAddress(id='unknown', name='unknown', address=instance_external_ip, region='unknown', driver=gce)
except socket.error:
instance_external_ip = gce.ex_get_address(instance_external_ip)
except GoogleBaseError as e:
module.fail_json(msg='Unexpected error attempting to get a static ip %s, error: %s' % (external_ip, e.value))
else:
instance_external_ip = external_ip
new_instances = []
changed = False
lc_disks = []
disk_modes = []
for i, disk in enumerate(disks or []):
if isinstance(disk, dict):
lc_disks.append(gce.ex_get_volume(disk['name'], lc_zone))
disk_modes.append(disk['mode'])
else:
lc_disks.append(gce.ex_get_volume(disk, lc_zone))
# boot disk is implicitly READ_WRITE
disk_modes.append('READ_ONLY' if i > 0 else 'READ_WRITE')
lc_network = gce.ex_get_network(network)
lc_machine_type = gce.ex_get_size(machine_type, lc_zone)
# Try to convert the user's metadata value into the format expected
# by GCE. First try to ensure user has proper quoting of a
# dictionary-like syntax using 'literal_eval', then convert the python
# dict into a python list of 'key' / 'value' dicts. Should end up
# with:
# [ {'key': key1, 'value': value1}, {'key': key2, 'value': value2}, ...]
if metadata:
if isinstance(metadata, dict):
md = metadata
else:
try:
md = literal_eval(str(metadata))
if not isinstance(md, dict):
raise ValueError('metadata must be a dict')
except ValueError as e:
module.fail_json(msg='bad metadata: %s' % str(e))
except SyntaxError as e:
module.fail_json(msg='bad metadata syntax')
if hasattr(libcloud, '__version__') and libcloud.__version__ < '0.15':
items = []
for k, v in md.items():
items.append({"key": k, "value": v})
metadata = {'items': items}
else:
metadata = md
lc_image = LazyDiskImage(module, gce, image, lc_disks, family=image_family, projects=external_projects)
ex_sa_perms = []
bad_perms = []
if service_account_permissions:
for perm in service_account_permissions:
if perm not in gce.SA_SCOPES_MAP and not perm.startswith('https://www.googleapis.com/auth'):
bad_perms.append(perm)
if len(bad_perms) > 0:
module.fail_json(msg='bad permissions: %s' % str(bad_perms))
ex_sa_perms.append({'email': "default"})
ex_sa_perms[0]['scopes'] = service_account_permissions
# These variables all have default values but check just in case
if not lc_network or not lc_machine_type or not lc_zone:
module.fail_json(msg='Missing required create instance variable',
changed=False)
gce_args = dict(
location=lc_zone,
ex_network=network, ex_tags=tags, ex_metadata=metadata,
ex_can_ip_forward=ip_forward,
external_ip=instance_external_ip, ex_disk_auto_delete=disk_auto_delete,
ex_service_accounts=ex_sa_perms
)
if preemptible is not None:
gce_args['ex_preemptible'] = preemptible
if subnetwork is not None:
gce_args['ex_subnetwork'] = subnetwork
if isinstance(instance_names, str) and not number:
instance_names = [instance_names]
if isinstance(instance_names, str) and number:
instance_responses = gce.ex_create_multiple_nodes(instance_names, lc_machine_type,
lc_image(), number, **gce_args)
for resp in instance_responses:
n = resp
if isinstance(resp, libcloud.compute.drivers.gce.GCEFailedNode):
try:
n = gce.ex_get_node(n.name, lc_zone)
except ResourceNotFoundError:
pass
else:
# Assure that at least one node has been created to set changed=True
changed = True
new_instances.append(n)
else:
for instance in instance_names:
pd = None
if lc_disks:
pd = lc_disks[0]
elif persistent_boot_disk:
try:
pd = gce.ex_get_volume("%s" % instance, lc_zone)
except ResourceNotFoundError:
pd = gce.create_volume(disk_size, "%s" % instance, image=lc_image())
gce_args['ex_boot_disk'] = pd
inst = None
try:
inst = gce.ex_get_node(instance, lc_zone)
except ResourceNotFoundError:
inst = gce.create_node(
instance, lc_machine_type, lc_image(), **gce_args
)
changed = True
except GoogleBaseError as e:
module.fail_json(msg='Unexpected error attempting to create ' +
'instance %s, error: %s' % (instance, e.value))
if inst:
new_instances.append(inst)
for inst in new_instances:
for i, lc_disk in enumerate(lc_disks):
# Check whether the disk is already attached
if (len(inst.extra['disks']) > i):
attached_disk = inst.extra['disks'][i]
if attached_disk['source'] != lc_disk.extra['selfLink']:
module.fail_json(
msg=("Disk at index %d does not match: requested=%s found=%s" % (
i, lc_disk.extra['selfLink'], attached_disk['source'])))
elif attached_disk['mode'] != disk_modes[i]:
module.fail_json(
msg=("Disk at index %d is in the wrong mode: requested=%s found=%s" % (
i, disk_modes[i], attached_disk['mode'])))
else:
continue
gce.attach_volume(inst, lc_disk, ex_mode=disk_modes[i])
# Work around libcloud bug: attached volumes don't get added
# to the instance metadata. get_instance_info() only cares about
# source and index.
if len(inst.extra['disks']) != i + 1:
inst.extra['disks'].append(
{'source': lc_disk.extra['selfLink'], 'index': i})
instance_names = []
instance_json_data = []
for inst in new_instances:
d = get_instance_info(inst)
instance_names.append(d['name'])
instance_json_data.append(d)
return (changed, instance_json_data, instance_names)
def change_instance_state(module, gce, instance_names, number, zone, state):
"""Changes the state of a list of instances. For example,
change from started to stopped, or started to absent.
module: Ansible module object
gce: authenticated GCE connection object
instance_names: a list of instance names to terminate
zone: GCEZone object where the instances reside prior to termination
state: 'state' parameter passed into module as argument
Returns a dictionary of instance names that were changed.
"""
changed = False
nodes = []
state_instance_names = []
if isinstance(instance_names, str) and number:
node_names = ['%s-%03d' % (instance_names, i) for i in range(number)]
elif isinstance(instance_names, str) and not number:
node_names = [instance_names]
else:
node_names = instance_names
for name in node_names:
inst = None
try:
inst = gce.ex_get_node(name, zone)
except ResourceNotFoundError:
state_instance_names.append(name)
except Exception as e:
module.fail_json(msg=unexpected_error_msg(e), changed=False)
else:
nodes.append(inst)
state_instance_names.append(name)
if state in ['absent', 'deleted'] and number:
changed_nodes = gce.ex_destroy_multiple_nodes(nodes) or [False]
changed = reduce(lambda x, y: x or y, changed_nodes)
else:
for node in nodes:
if state in ['absent', 'deleted']:
gce.destroy_node(node)
changed = True
elif state == 'started' and node.state == libcloud.compute.types.NodeState.STOPPED:
gce.ex_start_node(node)
changed = True
elif state in ['stopped', 'terminated'] and node.state == libcloud.compute.types.NodeState.RUNNING:
gce.ex_stop_node(node)
changed = True
return (changed, state_instance_names)
def main():
module = AnsibleModule(
argument_spec=dict(
image=dict(default='debian-8'),
image_family=dict(),
external_projects=dict(type='list'),
instance_names=dict(),
machine_type=dict(default='n1-standard-1'),
metadata=dict(),
name=dict(aliases=['base_name']),
num_instances=dict(type='int'),
network=dict(default='default'),
subnetwork=dict(),
persistent_boot_disk=dict(type='bool', default=False),
disks=dict(type='list'),
state=dict(choices=['active', 'present', 'absent', 'deleted',
'started', 'stopped', 'terminated'],
default='present'),
tags=dict(type='list'),
zone=dict(default='us-central1-a'),
service_account_email=dict(),
service_account_permissions=dict(type='list'),
pem_file=dict(type='path'),
credentials_file=dict(type='path'),
project_id=dict(),
ip_forward=dict(type='bool', default=False),
external_ip=dict(default='ephemeral'),
disk_auto_delete=dict(type='bool', default=True),
disk_size=dict(type='int', default=10),
preemptible=dict(type='bool', default=None),
),
mutually_exclusive=[('instance_names', 'name')]
)
if not HAS_PYTHON26:
module.fail_json(msg="GCE module requires python's 'ast' module, python v2.6+")
if not HAS_LIBCLOUD:
module.fail_json(msg='libcloud with GCE support (0.17.0+) required for this module')
gce = gce_connect(module)
image = module.params.get('image')
image_family = module.params.get('image_family')
external_projects = module.params.get('external_projects')
instance_names = module.params.get('instance_names')
name = module.params.get('name')
number = module.params.get('num_instances')
subnetwork = module.params.get('subnetwork')
state = module.params.get('state')
zone = module.params.get('zone')
preemptible = module.params.get('preemptible')
changed = False
inames = None
if isinstance(instance_names, list):
inames = instance_names
elif isinstance(instance_names, str):
inames = instance_names.split(',')
if name:
inames = name
if not inames:
module.fail_json(msg='Must specify a "name" or "instance_names"',
changed=False)
if not zone:
module.fail_json(msg='Must specify a "zone"', changed=False)
lc_zone = get_valid_location(module, gce, zone)
if preemptible is not None and hasattr(libcloud, '__version__') and libcloud.__version__ < '0.20':
module.fail_json(msg="Apache Libcloud 0.20.0+ is required to use 'preemptible' option",
changed=False)
if subnetwork is not None and not hasattr(gce, 'ex_get_subnetwork'):
module.fail_json(msg="Apache Libcloud 1.0.0+ is required to use 'subnetwork' option",
changed=False)
json_output = {'zone': zone}
if state in ['absent', 'deleted', 'started', 'stopped', 'terminated']:
json_output['state'] = state
(changed, state_instance_names) = change_instance_state(
module, gce, inames, number, lc_zone, state)
# based on what user specified, return the same variable, although
# value could be different if an instance could not be destroyed
if instance_names or name and number:
json_output['instance_names'] = state_instance_names
elif name:
json_output['name'] = name
elif state in ['active', 'present']:
json_output['state'] = 'present'
(changed, instance_data, instance_name_list) = create_instances(
module, gce, inames, number, lc_zone)
json_output['instance_data'] = instance_data
if instance_names:
json_output['instance_names'] = instance_name_list
elif name:
json_output['name'] = name
json_output['changed'] = changed
module.exit_json(**json_output)
class LazyDiskImage:
"""
Object for lazy instantiation of disk image
gce.ex_get_image is a very expensive call, so we want to avoid calling it as much as possible.
"""
def __init__(self, module, gce, name, has_pd, family=None, projects=None):
self.image = None
self.was_called = False
self.gce = gce
self.name = name
self.has_pd = has_pd
self.module = module
self.family = family
self.projects = projects
def __call__(self):
if not self.was_called:
self.was_called = True
if not self.has_pd:
if self.family:
self.image = self.gce.ex_get_image_from_family(self.family, ex_project_list=self.projects)
else:
self.image = self.gce.ex_get_image(self.name, ex_project_list=self.projects)
if not self.image:
self.module.fail_json(msg='image or disks missing for create instance', changed=False)
return self.image
if __name__ == '__main__':
main() | unknown | codeparrot/codeparrot-clean | ||
#
# errors.py : exception definitions
#
# Copyright 2007, Red Hat Inc.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; version 2 of the License.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Library General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
class CreatorError(Exception):
"""An exception base class for all imgcreate errors."""
def __init__(self, msg):
Exception.__init__(self, msg)
# Some error messages may contain unicode strings (especially if your system
# locale is different from 'C', e.g. 'de_DE'). Python's exception class does
# not handle this appropriately (at least until 2.5) because str(Exception)
# returns just self.message without ensuring that all characters can be
# represented using ASCII. So we try to return a str and fall back to repr
# if this does not work.
#
# Please use unicode for your error logging strings so that we can really
# print nice error messages, e.g.:
# log.error(u"Internal error: " % e)
# instead of
# log.error("Internal error: " % e)
# With our custom __str__ and __unicode__ methods both will work but the
# first log call print a more readable error message.
def __str__(self):
try:
return str(self.message)
except UnicodeEncodeError:
return repr(self.message)
def __unicode__(self):
if not self.message:
return unicode("")
return unicode(self.message.decode("utf8"))
class KickstartError(CreatorError):
pass
class MountError(CreatorError):
pass
class SnapshotError(CreatorError):
pass
class SquashfsError(CreatorError):
pass
class ResizeError(CreatorError):
pass | unknown | codeparrot/codeparrot-clean | ||
from math import ceil
from django.db import IntegrityError, connection, models
from django.db.models.sql.constants import GET_ITERATOR_CHUNK_SIZE
from django.test import TestCase, skipIfDBFeature, skipUnlessDBFeature
from .models import (
MR, A, Avatar, Base, Child, HiddenUser, HiddenUserProfile, M, M2MFrom,
M2MTo, MRNull, Parent, R, RChild, S, T, User, create_a, get_default_r,
)
class OnDeleteTests(TestCase):
def setUp(self):
self.DEFAULT = get_default_r()
def test_auto(self):
a = create_a('auto')
a.auto.delete()
self.assertFalse(A.objects.filter(name='auto').exists())
def test_auto_nullable(self):
a = create_a('auto_nullable')
a.auto_nullable.delete()
self.assertFalse(A.objects.filter(name='auto_nullable').exists())
def test_setvalue(self):
a = create_a('setvalue')
a.setvalue.delete()
a = A.objects.get(pk=a.pk)
self.assertEqual(self.DEFAULT, a.setvalue.pk)
def test_setnull(self):
a = create_a('setnull')
a.setnull.delete()
a = A.objects.get(pk=a.pk)
self.assertIsNone(a.setnull)
def test_setdefault(self):
a = create_a('setdefault')
a.setdefault.delete()
a = A.objects.get(pk=a.pk)
self.assertEqual(self.DEFAULT, a.setdefault.pk)
def test_setdefault_none(self):
a = create_a('setdefault_none')
a.setdefault_none.delete()
a = A.objects.get(pk=a.pk)
self.assertIsNone(a.setdefault_none)
def test_cascade(self):
a = create_a('cascade')
a.cascade.delete()
self.assertFalse(A.objects.filter(name='cascade').exists())
def test_cascade_nullable(self):
a = create_a('cascade_nullable')
a.cascade_nullable.delete()
self.assertFalse(A.objects.filter(name='cascade_nullable').exists())
def test_protect(self):
a = create_a('protect')
with self.assertRaises(IntegrityError):
a.protect.delete()
def test_do_nothing(self):
# Testing DO_NOTHING is a bit harder: It would raise IntegrityError for a normal model,
# so we connect to pre_delete and set the fk to a known value.
replacement_r = R.objects.create()
def check_do_nothing(sender, **kwargs):
obj = kwargs['instance']
obj.donothing_set.update(donothing=replacement_r)
models.signals.pre_delete.connect(check_do_nothing)
a = create_a('do_nothing')
a.donothing.delete()
a = A.objects.get(pk=a.pk)
self.assertEqual(replacement_r, a.donothing)
models.signals.pre_delete.disconnect(check_do_nothing)
def test_do_nothing_qscount(self):
"""
A models.DO_NOTHING relation doesn't trigger a query.
"""
b = Base.objects.create()
with self.assertNumQueries(1):
# RelToBase should not be queried.
b.delete()
self.assertEqual(Base.objects.count(), 0)
def test_inheritance_cascade_up(self):
child = RChild.objects.create()
child.delete()
self.assertFalse(R.objects.filter(pk=child.pk).exists())
def test_inheritance_cascade_down(self):
child = RChild.objects.create()
parent = child.r_ptr
parent.delete()
self.assertFalse(RChild.objects.filter(pk=child.pk).exists())
def test_cascade_from_child(self):
a = create_a('child')
a.child.delete()
self.assertFalse(A.objects.filter(name='child').exists())
self.assertFalse(R.objects.filter(pk=a.child_id).exists())
def test_cascade_from_parent(self):
a = create_a('child')
R.objects.get(pk=a.child_id).delete()
self.assertFalse(A.objects.filter(name='child').exists())
self.assertFalse(RChild.objects.filter(pk=a.child_id).exists())
def test_setnull_from_child(self):
a = create_a('child_setnull')
a.child_setnull.delete()
self.assertFalse(R.objects.filter(pk=a.child_setnull_id).exists())
a = A.objects.get(pk=a.pk)
self.assertIsNone(a.child_setnull)
def test_setnull_from_parent(self):
a = create_a('child_setnull')
R.objects.get(pk=a.child_setnull_id).delete()
self.assertFalse(RChild.objects.filter(pk=a.child_setnull_id).exists())
a = A.objects.get(pk=a.pk)
self.assertIsNone(a.child_setnull)
def test_o2o_setnull(self):
a = create_a('o2o_setnull')
a.o2o_setnull.delete()
a = A.objects.get(pk=a.pk)
self.assertIsNone(a.o2o_setnull)
class DeletionTests(TestCase):
def test_m2m(self):
m = M.objects.create()
r = R.objects.create()
MR.objects.create(m=m, r=r)
r.delete()
self.assertFalse(MR.objects.exists())
r = R.objects.create()
MR.objects.create(m=m, r=r)
m.delete()
self.assertFalse(MR.objects.exists())
m = M.objects.create()
r = R.objects.create()
m.m2m.add(r)
r.delete()
through = M._meta.get_field('m2m').remote_field.through
self.assertFalse(through.objects.exists())
r = R.objects.create()
m.m2m.add(r)
m.delete()
self.assertFalse(through.objects.exists())
m = M.objects.create()
r = R.objects.create()
MRNull.objects.create(m=m, r=r)
r.delete()
self.assertFalse(not MRNull.objects.exists())
self.assertFalse(m.m2m_through_null.exists())
def test_bulk(self):
s = S.objects.create(r=R.objects.create())
for i in range(2 * GET_ITERATOR_CHUNK_SIZE):
T.objects.create(s=s)
# 1 (select related `T` instances)
# + 1 (select related `U` instances)
# + 2 (delete `T` instances in batches)
# + 1 (delete `s`)
self.assertNumQueries(5, s.delete)
self.assertFalse(S.objects.exists())
def test_instance_update(self):
deleted = []
related_setnull_sets = []
def pre_delete(sender, **kwargs):
obj = kwargs['instance']
deleted.append(obj)
if isinstance(obj, R):
related_setnull_sets.append(list(a.pk for a in obj.setnull_set.all()))
models.signals.pre_delete.connect(pre_delete)
a = create_a('update_setnull')
a.setnull.delete()
a = create_a('update_cascade')
a.cascade.delete()
for obj in deleted:
self.assertIsNone(obj.pk)
for pk_list in related_setnull_sets:
for a in A.objects.filter(id__in=pk_list):
self.assertIsNone(a.setnull)
models.signals.pre_delete.disconnect(pre_delete)
def test_deletion_order(self):
pre_delete_order = []
post_delete_order = []
def log_post_delete(sender, **kwargs):
pre_delete_order.append((sender, kwargs['instance'].pk))
def log_pre_delete(sender, **kwargs):
post_delete_order.append((sender, kwargs['instance'].pk))
models.signals.post_delete.connect(log_post_delete)
models.signals.pre_delete.connect(log_pre_delete)
r = R.objects.create(pk=1)
s1 = S.objects.create(pk=1, r=r)
s2 = S.objects.create(pk=2, r=r)
T.objects.create(pk=1, s=s1)
T.objects.create(pk=2, s=s2)
RChild.objects.create(r_ptr=r)
r.delete()
self.assertEqual(
pre_delete_order, [(T, 2), (T, 1), (RChild, 1), (S, 2), (S, 1), (R, 1)]
)
self.assertEqual(
post_delete_order, [(T, 1), (T, 2), (RChild, 1), (S, 1), (S, 2), (R, 1)]
)
models.signals.post_delete.disconnect(log_post_delete)
models.signals.pre_delete.disconnect(log_pre_delete)
def test_relational_post_delete_signals_happen_before_parent_object(self):
deletions = []
def log_post_delete(instance, **kwargs):
self.assertTrue(R.objects.filter(pk=instance.r_id))
self.assertIs(type(instance), S)
deletions.append(instance.id)
r = R.objects.create(pk=1)
S.objects.create(pk=1, r=r)
models.signals.post_delete.connect(log_post_delete, sender=S)
try:
r.delete()
finally:
models.signals.post_delete.disconnect(log_post_delete)
self.assertEqual(len(deletions), 1)
self.assertEqual(deletions[0], 1)
@skipUnlessDBFeature("can_defer_constraint_checks")
def test_can_defer_constraint_checks(self):
u = User.objects.create(
avatar=Avatar.objects.create()
)
a = Avatar.objects.get(pk=u.avatar_id)
# 1 query to find the users for the avatar.
# 1 query to delete the user
# 1 query to delete the avatar
# The important thing is that when we can defer constraint checks there
# is no need to do an UPDATE on User.avatar to null it out.
# Attach a signal to make sure we will not do fast_deletes.
calls = []
def noop(*args, **kwargs):
calls.append('')
models.signals.post_delete.connect(noop, sender=User)
self.assertNumQueries(3, a.delete)
self.assertFalse(User.objects.exists())
self.assertFalse(Avatar.objects.exists())
self.assertEqual(len(calls), 1)
models.signals.post_delete.disconnect(noop, sender=User)
@skipIfDBFeature("can_defer_constraint_checks")
def test_cannot_defer_constraint_checks(self):
u = User.objects.create(
avatar=Avatar.objects.create()
)
# Attach a signal to make sure we will not do fast_deletes.
calls = []
def noop(*args, **kwargs):
calls.append('')
models.signals.post_delete.connect(noop, sender=User)
a = Avatar.objects.get(pk=u.avatar_id)
# The below doesn't make sense... Why do we need to null out
# user.avatar if we are going to delete the user immediately after it,
# and there are no more cascades.
# 1 query to find the users for the avatar.
# 1 query to delete the user
# 1 query to null out user.avatar, because we can't defer the constraint
# 1 query to delete the avatar
self.assertNumQueries(4, a.delete)
self.assertFalse(User.objects.exists())
self.assertFalse(Avatar.objects.exists())
self.assertEqual(len(calls), 1)
models.signals.post_delete.disconnect(noop, sender=User)
def test_hidden_related(self):
r = R.objects.create()
h = HiddenUser.objects.create(r=r)
HiddenUserProfile.objects.create(user=h)
r.delete()
self.assertEqual(HiddenUserProfile.objects.count(), 0)
def test_large_delete(self):
TEST_SIZE = 2000
objs = [Avatar() for i in range(0, TEST_SIZE)]
Avatar.objects.bulk_create(objs)
# Calculate the number of queries needed.
batch_size = connection.ops.bulk_batch_size(['pk'], objs)
# The related fetches are done in batches.
batches = int(ceil(float(len(objs)) / batch_size))
# One query for Avatar.objects.all() and then one related fast delete for
# each batch.
fetches_to_mem = 1 + batches
# The Avatar objects are going to be deleted in batches of GET_ITERATOR_CHUNK_SIZE
queries = fetches_to_mem + TEST_SIZE // GET_ITERATOR_CHUNK_SIZE
self.assertNumQueries(queries, Avatar.objects.all().delete)
self.assertFalse(Avatar.objects.exists())
def test_large_delete_related(self):
TEST_SIZE = 2000
s = S.objects.create(r=R.objects.create())
for i in range(TEST_SIZE):
T.objects.create(s=s)
batch_size = max(connection.ops.bulk_batch_size(['pk'], range(TEST_SIZE)), 1)
# TEST_SIZE // batch_size (select related `T` instances)
# + 1 (select related `U` instances)
# + TEST_SIZE // GET_ITERATOR_CHUNK_SIZE (delete `T` instances in batches)
# + 1 (delete `s`)
expected_num_queries = (ceil(TEST_SIZE // batch_size) +
ceil(TEST_SIZE // GET_ITERATOR_CHUNK_SIZE) + 2)
self.assertNumQueries(expected_num_queries, s.delete)
self.assertFalse(S.objects.exists())
self.assertFalse(T.objects.exists())
def test_delete_with_keeping_parents(self):
child = RChild.objects.create()
parent_id = child.r_ptr_id
child.delete(keep_parents=True)
self.assertFalse(RChild.objects.filter(id=child.id).exists())
self.assertTrue(R.objects.filter(id=parent_id).exists())
def test_delete_with_keeping_parents_relationships(self):
child = RChild.objects.create()
parent_id = child.r_ptr_id
parent_referent_id = S.objects.create(r=child.r_ptr).pk
child.delete(keep_parents=True)
self.assertFalse(RChild.objects.filter(id=child.id).exists())
self.assertTrue(R.objects.filter(id=parent_id).exists())
self.assertTrue(S.objects.filter(pk=parent_referent_id).exists())
def test_queryset_delete_returns_num_rows(self):
"""
QuerySet.delete() should return the number of deleted rows and a
dictionary with the number of deletions for each object type.
"""
Avatar.objects.bulk_create([Avatar(desc='a'), Avatar(desc='b'), Avatar(desc='c')])
avatars_count = Avatar.objects.count()
deleted, rows_count = Avatar.objects.all().delete()
self.assertEqual(deleted, avatars_count)
# more complex example with multiple object types
r = R.objects.create()
h1 = HiddenUser.objects.create(r=r)
HiddenUser.objects.create(r=r)
HiddenUserProfile.objects.create(user=h1)
existed_objs = {
R._meta.label: R.objects.count(),
HiddenUser._meta.label: HiddenUser.objects.count(),
A._meta.label: A.objects.count(),
MR._meta.label: MR.objects.count(),
HiddenUserProfile._meta.label: HiddenUserProfile.objects.count(),
}
deleted, deleted_objs = R.objects.all().delete()
for k, v in existed_objs.items():
self.assertEqual(deleted_objs[k], v)
def test_model_delete_returns_num_rows(self):
"""
Model.delete() should return the number of deleted rows and a
dictionary with the number of deletions for each object type.
"""
r = R.objects.create()
h1 = HiddenUser.objects.create(r=r)
h2 = HiddenUser.objects.create(r=r)
HiddenUser.objects.create(r=r)
HiddenUserProfile.objects.create(user=h1)
HiddenUserProfile.objects.create(user=h2)
m1 = M.objects.create()
m2 = M.objects.create()
MR.objects.create(r=r, m=m1)
r.m_set.add(m1)
r.m_set.add(m2)
r.save()
existed_objs = {
R._meta.label: R.objects.count(),
HiddenUser._meta.label: HiddenUser.objects.count(),
A._meta.label: A.objects.count(),
MR._meta.label: MR.objects.count(),
HiddenUserProfile._meta.label: HiddenUserProfile.objects.count(),
M.m2m.through._meta.label: M.m2m.through.objects.count(),
}
deleted, deleted_objs = r.delete()
self.assertEqual(deleted, sum(existed_objs.values()))
for k, v in existed_objs.items():
self.assertEqual(deleted_objs[k], v)
def test_proxied_model_duplicate_queries(self):
"""
#25685 - Deleting instances of a model with existing proxy
classes should not issue multiple queries during cascade
deletion of referring models.
"""
avatar = Avatar.objects.create()
# One query for the Avatar table and a second for the User one.
with self.assertNumQueries(2):
avatar.delete()
class FastDeleteTests(TestCase):
def test_fast_delete_fk(self):
u = User.objects.create(
avatar=Avatar.objects.create()
)
a = Avatar.objects.get(pk=u.avatar_id)
# 1 query to fast-delete the user
# 1 query to delete the avatar
self.assertNumQueries(2, a.delete)
self.assertFalse(User.objects.exists())
self.assertFalse(Avatar.objects.exists())
def test_fast_delete_m2m(self):
t = M2MTo.objects.create()
f = M2MFrom.objects.create()
f.m2m.add(t)
# 1 to delete f, 1 to fast-delete m2m for f
self.assertNumQueries(2, f.delete)
def test_fast_delete_revm2m(self):
t = M2MTo.objects.create()
f = M2MFrom.objects.create()
f.m2m.add(t)
# 1 to delete t, 1 to fast-delete t's m_set
self.assertNumQueries(2, f.delete)
def test_fast_delete_qs(self):
u1 = User.objects.create()
u2 = User.objects.create()
self.assertNumQueries(1, User.objects.filter(pk=u1.pk).delete)
self.assertEqual(User.objects.count(), 1)
self.assertTrue(User.objects.filter(pk=u2.pk).exists())
def test_fast_delete_joined_qs(self):
a = Avatar.objects.create(desc='a')
User.objects.create(avatar=a)
u2 = User.objects.create()
expected_queries = 1 if connection.features.update_can_self_select else 2
self.assertNumQueries(expected_queries,
User.objects.filter(avatar__desc='a').delete)
self.assertEqual(User.objects.count(), 1)
self.assertTrue(User.objects.filter(pk=u2.pk).exists())
def test_fast_delete_inheritance(self):
c = Child.objects.create()
p = Parent.objects.create()
# 1 for self, 1 for parent
self.assertNumQueries(2, c.delete)
self.assertFalse(Child.objects.exists())
self.assertEqual(Parent.objects.count(), 1)
self.assertEqual(Parent.objects.filter(pk=p.pk).count(), 1)
# 1 for self delete, 1 for fast delete of empty "child" qs.
self.assertNumQueries(2, p.delete)
self.assertFalse(Parent.objects.exists())
# 1 for self delete, 1 for fast delete of empty "child" qs.
c = Child.objects.create()
p = c.parent_ptr
self.assertNumQueries(2, p.delete)
self.assertFalse(Parent.objects.exists())
self.assertFalse(Child.objects.exists())
def test_fast_delete_large_batch(self):
User.objects.bulk_create(User() for i in range(0, 2000))
# No problems here - we aren't going to cascade, so we will fast
# delete the objects in a single query.
self.assertNumQueries(1, User.objects.all().delete)
a = Avatar.objects.create(desc='a')
User.objects.bulk_create(User(avatar=a) for i in range(0, 2000))
# We don't hit parameter amount limits for a, so just one query for
# that + fast delete of the related objs.
self.assertNumQueries(2, a.delete)
self.assertEqual(User.objects.count(), 0)
def test_fast_delete_empty_no_update_can_self_select(self):
"""
#25932 - Fast deleting on backends that don't have the
`no_update_can_self_select` feature should work even if the specified
filter doesn't match any row.
"""
with self.assertNumQueries(1):
self.assertEqual(
User.objects.filter(avatar__desc='missing').delete(),
(0, {'delete.User': 0})
) | unknown | codeparrot/codeparrot-clean | ||
#
# Copyright (c) 1996-2000 Tyler C. Sarna <tsarna@sarna.org>
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. All advertising materials mentioning features or use of this software
# must display the following acknowledgement:
# This product includes software developed by Tyler C. Sarna.
# 4. Neither the name of the author nor the names of contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS
# BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
__version__ = '0.9'
__doc__='''Popular barcodes available as reusable widgets'''
def getCodes():
"""Returns a dict mapping code names to widgets"""
from reportlab.graphics.barcode.widgets import BarcodeI2of5, BarcodeCode128, BarcodeStandard93,\
BarcodeExtended93, BarcodeStandard39, BarcodeExtended39,\
BarcodeMSI, BarcodeCodabar, BarcodeCode11, BarcodeFIM,\
BarcodePOSTNET, BarcodeUSPS_4State
#newer codes will typically get their own module
from reportlab.graphics.barcode.eanbc import Ean13BarcodeWidget, Ean8BarcodeWidget, UPCA
from reportlab.graphics.barcode.qr import QrCodeWidget
#the module exports a dictionary of names to widgets, to make it easy for
#apps and doc tools to display information about them.
codes = {}
for widget in (
BarcodeI2of5,
BarcodeCode128,
BarcodeStandard93,
BarcodeExtended93,
BarcodeStandard39,
BarcodeExtended39,
BarcodeMSI,
BarcodeCodabar,
BarcodeCode11,
BarcodeFIM,
BarcodePOSTNET,
BarcodeUSPS_4State,
Ean13BarcodeWidget,
Ean8BarcodeWidget,
UPCA,
QrCodeWidget,
):
codeName = widget.codeName
codes[codeName] = widget
return codes
def getCodeNames():
"""Returns sorted list of supported bar code names"""
return sorted(getCodes().keys())
def createBarcodeDrawing(codeName, **options):
"""This creates and returns a drawing with a barcode.
"""
from reportlab.graphics.shapes import Drawing, Group
codes = getCodes()
bcc = codes[codeName]
width = options.pop('width',None)
height = options.pop('height',None)
isoScale = options.pop('isoScale',0)
kw = {}
for k,v in options.items():
if k.startswith('_') or k in bcc._attrMap: kw[k] = v
bc = bcc(**kw)
#Robin's new ones validate when setting the value property.
#Ty Sarna's old ones do not. We need to test.
if hasattr(bc, 'validate'):
bc.validate() #raise exception if bad value
if not bc.valid:
raise ValueError("Illegal barcode with value '%s' in code '%s'" % (options.get('value',None), codeName))
#size it after setting the data
x1, y1, x2, y2 = bc.getBounds()
w = float(x2 - x1)
h = float(y2 - y1)
sx = width not in ('auto',None)
sy = height not in ('auto',None)
if sx or sy:
sx = sx and width/w or 1.0
sy = sy and height/h or 1.0
if isoScale:
if sx<1.0 and sy<1.0:
sx = sy = max(sx,sy)
else:
sx = sy = min(sx,sy)
w *= sx
h *= sy
else:
sx = sy = 1
#bc.x = -sx*x1
#bc.y = -sy*y1
d = Drawing(width=w,height=h,transform=[sx,0,0,sy,-sx*x1,-sy*y1])
d.add(bc, "_bc")
return d
def createBarcodeImageInMemory(codeName,**options):
"""This creates and returns barcode as an image in memory.
Takes same arguments as createBarcodeDrawing and also an
optional format keyword which can be anything acceptable
to Drawing.asString eg gif, pdf, tiff, py ......
"""
format = options.pop('format','png')
d = createBarcodeDrawing(codeName, **options)
return d.asString(format) | unknown | codeparrot/codeparrot-clean | ||
#!/usr/bin/env python
"""
Cleanup for Singularity container
Scan the images in the singularity CVMFS. If an image directory has not been "linked" to for 2 days,
remove the image directory.
Maintains state in a file in the root singularity directory named .missing_links.json
"""
import glob
import os
import json
import shutil
import argparse
import time
from datetime import datetime, timedelta
# JSON structure:
# {
# "missing_links": {
# "/cvmfs/singularity.opensciencegrid.org/.images/7d/ba009871baa50e01d655a80f79728800401bbd0f5e7e18b5055839e713c09f": "<timestamp_last_linked>"
# ...
# }
# }
def remove_unlisted_images(current_images, singularity_base, test=False):
"""
Remove the images that are not in the current list
"""
# Get all the image paths
named_image_dirs = set()
for subdir, dirs, files in os.walk(singularity_base):
try:
images_index = dirs.index(".images")
del dirs[images_index]
except ValueError as ve:
pass
for directory in dirs:
path = os.path.join(subdir, directory)
if os.path.islink(path):
named_image_dirs.add(path)
# Compare the list of current images with the list of images from the FS
for image in current_images:
# Always has the registry as the first entry, remove it
image_dir = image.split('/', 1)[-1]
full_image_dir = os.path.join(singularity_base, image_dir)
if full_image_dir in named_image_dirs:
named_image_dirs.remove(full_image_dir)
# named_image_dirs should now only contain containers that are
# not in the images
for image_dir in named_image_dirs:
print("Removing deleted image: %s" % image_dir)
if not test:
try:
os.unlink(image_dir)
except OSError as e:
print("Failed to remove deleted image: %s" % e)
def cleanup(delay=2, test=False,
singularity_base='/cvmfs/singularity.opensciencegrid.org',
max_per_cycle=50):
'''Clean up unlinked singularity images'''
json_location = os.path.join(singularity_base, '.missing_links.json')
# Read in the old json, if it exists
json_missing_links = {}
try:
with open(json_location) as json_file:
json_missing_links = json.load(json_file)['missing_links']
except (IOError, ValueError):
# File is missing, unreadable, or damaged
pass
# Get all the images in the repo
# Walk the directory /cvmfs/singularity.opensciencegrid.org/.images/*
image_dirs = glob.glob(os.path.join(singularity_base, '.images/*/*'))
# Walk the named image dirs
named_image_dirs = []
for subdir, dirs, files in os.walk(singularity_base):
try:
images_index = dirs.index(".images")
del dirs[images_index]
except ValueError as ve:
pass
for directory in dirs:
path = os.path.join(subdir, directory)
if os.path.islink(path):
named_image_dirs.append(path)
# For named image dir, look at the what the symlink points at
for named_image in named_image_dirs:
link_target = os.readlink(named_image)
while link_target in image_dirs:
image_dirs.remove(link_target)
# Remove linked image from json (in case link is restored)
json_missing_links.pop(link_target, None)
# Now, for each image, see if it's in the json
for image_dir in image_dirs:
if image_dir not in json_missing_links:
# Add it to the json
print("Newly found missing link: %s" % (image_dir))
json_missing_links[image_dir] = int(time.time())
# Loop through the json missing links, removing directories if over the `delay` days
expiry = datetime.now() - timedelta(days=delay)
images_removed = 0
for image_dir, last_linked in list(json_missing_links.items()):
date_last_linked = datetime.fromtimestamp(last_linked)
if date_last_linked < expiry:
# Confirm that we're inside the managed directory
if not image_dir.startswith(singularity_base):
continue
# Remove the directory
print("Removing missing link: %s" % image_dir)
if not test:
try:
shutil.rmtree(image_dir)
del json_missing_links[image_dir]
except OSError as e:
print("Failed to remove missing link: %s" % e)
images_removed += 1
if images_removed >= max_per_cycle:
print("Reached limit of cleaning %d images. Stopping cleanup cycle." % images_removed)
break
# Write out the end json
with open(json_location, 'w') as json_file:
json.dump({"missing_links": json_missing_links}, json_file)
def main():
'''Main function'''
args = parse_args()
cleanup(test=args.test)
def parse_args():
'''Parse CLI options'''
parser = argparse.ArgumentParser()
parser.add_argument('--test', action='store_true',
help="Don't remove files, but go through the motions of removing them.")
return parser.parse_args()
if __name__ == "__main__":
main() | unknown | codeparrot/codeparrot-clean | ||
// Copyright IBM Corp. 2016, 2025
// SPDX-License-Identifier: MPL-2.0
package gcp
import (
"context"
"encoding/json"
"fmt"
"io"
"net/http"
"net/url"
"time"
"cloud.google.com/go/compute/metadata"
credentials "cloud.google.com/go/iam/credentials/apiv1"
"github.com/hashicorp/vault/api"
credentialspb "google.golang.org/genproto/googleapis/iam/credentials/v1"
)
type GCPAuth struct {
roleName string
mountPath string
authType string
serviceAccountEmail string
}
var _ api.AuthMethod = (*GCPAuth)(nil)
type LoginOption func(a *GCPAuth) error
const (
iamType = "iam"
gceType = "gce"
defaultMountPath = "gcp"
defaultAuthType = gceType
identityMetadataURL = "http://metadata/computeMetadata/v1/instance/service-accounts/default/identity"
)
// NewGCPAuth initializes a new GCP auth method interface to be
// passed as a parameter to the client.Auth().Login method.
//
// Supported options: WithMountPath, WithIAMAuth, WithGCEAuth
func NewGCPAuth(roleName string, opts ...LoginOption) (*GCPAuth, error) {
if roleName == "" {
return nil, fmt.Errorf("no role name provided for login")
}
a := &GCPAuth{
mountPath: defaultMountPath,
authType: defaultAuthType,
roleName: roleName,
}
// Loop through each option
for _, opt := range opts {
// Call the option giving the instantiated
// *GCPAuth as the argument
err := opt(a)
if err != nil {
return nil, fmt.Errorf("error with login option: %w", err)
}
}
// return the modified auth struct instance
return a, nil
}
// Login sets up the required request body for the GCP auth method's /login
// endpoint, and performs a write to it. This method defaults to the "gce"
// auth type unless NewGCPAuth is called with WithIAMAuth().
func (a *GCPAuth) Login(ctx context.Context, client *api.Client) (*api.Secret, error) {
if ctx == nil {
ctx = context.Background()
}
loginData := map[string]interface{}{
"role": a.roleName,
}
switch a.authType {
case gceType:
jwt, err := a.getJWTFromMetadataService(client.Address())
if err != nil {
return nil, fmt.Errorf("unable to retrieve JWT from GCE metadata service: %w", err)
}
loginData["jwt"] = jwt
case iamType:
jwtResp, err := a.signJWT()
if err != nil {
return nil, fmt.Errorf("unable to sign JWT for authenticating to GCP: %w", err)
}
loginData["jwt"] = jwtResp.SignedJwt
}
path := fmt.Sprintf("auth/%s/login", a.mountPath)
resp, err := client.Logical().WriteWithContext(ctx, path, loginData)
if err != nil {
return nil, fmt.Errorf("unable to log in with GCP auth: %w", err)
}
return resp, nil
}
func WithMountPath(mountPath string) LoginOption {
return func(a *GCPAuth) error {
a.mountPath = mountPath
return nil
}
}
func WithIAMAuth(serviceAccountEmail string) LoginOption {
return func(a *GCPAuth) error {
a.serviceAccountEmail = serviceAccountEmail
a.authType = iamType
return nil
}
}
func WithGCEAuth() LoginOption {
return func(a *GCPAuth) error {
a.authType = gceType
return nil
}
}
// generate signed JWT token from GCP IAM.
func (a *GCPAuth) signJWT() (*credentialspb.SignJwtResponse, error) {
ctx := context.Background()
iamClient, err := credentials.NewIamCredentialsClient(ctx) // can pass option.WithCredentialsFile("path/to/creds.json") as second param if GOOGLE_APPLICATION_CREDENTIALS env var not set
if err != nil {
return nil, fmt.Errorf("unable to initialize IAM credentials client: %w", err)
}
defer iamClient.Close()
resourceName := fmt.Sprintf("projects/-/serviceAccounts/%s", a.serviceAccountEmail)
jwtPayload := map[string]interface{}{
"aud": fmt.Sprintf("vault/%s", a.roleName),
"sub": a.serviceAccountEmail,
"exp": time.Now().Add(time.Minute * 10).Unix(),
}
payloadBytes, err := json.Marshal(jwtPayload)
if err != nil {
return nil, fmt.Errorf("unable to marshal jwt payload to json: %w", err)
}
signJWTReq := &credentialspb.SignJwtRequest{
Name: resourceName,
Payload: string(payloadBytes),
}
jwtResp, err := iamClient.SignJwt(ctx, signJWTReq)
if err != nil {
return nil, fmt.Errorf("unable to sign JWT: %w", err)
}
return jwtResp, nil
}
func (a *GCPAuth) getJWTFromMetadataService(vaultAddress string) (string, error) {
if !metadata.OnGCE() {
return "", fmt.Errorf("GCE metadata service not available")
}
// build request to metadata server
c := &http.Client{}
req, err := http.NewRequest(http.MethodGet, identityMetadataURL, nil)
if err != nil {
return "", fmt.Errorf("error creating http request: %w", err)
}
req.Header.Add("Metadata-Flavor", "Google")
q := url.Values{}
q.Add("audience", fmt.Sprintf("%s/vault/%s", vaultAddress, a.roleName))
q.Add("format", "full")
req.URL.RawQuery = q.Encode()
resp, err := c.Do(req)
if err != nil {
return "", fmt.Errorf("error making request to metadata service: %w", err)
}
defer resp.Body.Close()
// get jwt from response
body, err := io.ReadAll(resp.Body)
jwt := string(body)
if err != nil {
return "", fmt.Errorf("error reading response from metadata service: %w", err)
}
return jwt, nil
} | go | github | https://github.com/hashicorp/vault | api/auth/gcp/gcp.go |
// reference: https://github.com/vuejs/vue/blob/dev/test/unit/modules/vdom/patch/children.spec.js
import {
type TestElement,
TestNodeTypes,
h,
nodeOps,
render,
serialize,
serializeInner,
} from '@vue/runtime-test'
function toSpan(content: any) {
if (typeof content === 'string') {
return h('span', content.toString())
} else {
return h('span', { key: content }, content.toString())
}
}
const inner = (c: TestElement) => serializeInner(c)
function shuffle(array: Array<any>) {
let currentIndex = array.length
let temporaryValue
let randomIndex
// while there remain elements to shuffle...
while (currentIndex !== 0) {
// pick a remaining element...
randomIndex = Math.floor(Math.random() * currentIndex)
currentIndex -= 1
// and swap it with the current element.
temporaryValue = array[currentIndex]
array[currentIndex] = array[randomIndex]
array[randomIndex] = temporaryValue
}
return array
}
test('should patch previously empty children', () => {
const root = nodeOps.createElement('div')
render(h('div', []), root)
expect(inner(root)).toBe('<div></div>')
render(h('div', ['hello']), root)
expect(inner(root)).toBe('<div>hello</div>')
})
test('should patch previously null children', () => {
const root = nodeOps.createElement('div')
render(h('div'), root)
expect(inner(root)).toBe('<div></div>')
render(h('div', ['hello']), root)
expect(inner(root)).toBe('<div>hello</div>')
})
test('array children -> text children', () => {
const root = nodeOps.createElement('div')
render(h('div', [h('div')]), root)
expect(inner(root)).toBe('<div><div></div></div>')
render(h('div', 'hello'), root)
expect(inner(root)).toBe('<div>hello</div>')
})
test('plain object child', () => {
const root = nodeOps.createElement('div')
const foo = { foo: '1' }
// @ts-expect-error
render(h('div', null, [foo]), root)
expect('Invalid VNode type').not.toHaveBeenWarned()
expect(inner(root)).toBe('<div>[object Object]</div>')
})
describe('renderer: keyed children', () => {
let root: TestElement
let elm: TestElement
const renderChildren = (arr: number[]) => {
render(h('div', arr.map(toSpan)), root)
return root.children[0] as TestElement
}
beforeEach(() => {
root = nodeOps.createElement('div')
render(h('div', { id: 1 }, 'hello'), root)
})
test('append', () => {
elm = renderChildren([1])
expect(elm.children.length).toBe(1)
elm = renderChildren([1, 2, 3])
expect(elm.children.length).toBe(3)
expect(serialize(elm.children[1])).toBe('<span>2</span>')
expect(serialize(elm.children[2])).toBe('<span>3</span>')
})
test('prepend', () => {
elm = renderChildren([4, 5])
expect(elm.children.length).toBe(2)
elm = renderChildren([1, 2, 3, 4, 5])
expect(elm.children.length).toBe(5)
expect((elm.children as TestElement[]).map(inner)).toEqual([
'1',
'2',
'3',
'4',
'5',
])
})
test('insert in middle', () => {
elm = renderChildren([1, 2, 4, 5])
expect(elm.children.length).toBe(4)
elm = renderChildren([1, 2, 3, 4, 5])
expect(elm.children.length).toBe(5)
expect((elm.children as TestElement[]).map(inner)).toEqual([
'1',
'2',
'3',
'4',
'5',
])
})
test('insert at beginning and end', () => {
elm = renderChildren([2, 3, 4])
expect(elm.children.length).toBe(3)
elm = renderChildren([1, 2, 3, 4, 5])
expect(elm.children.length).toBe(5)
expect((elm.children as TestElement[]).map(inner)).toEqual([
'1',
'2',
'3',
'4',
'5',
])
})
test('insert to empty parent', () => {
elm = renderChildren([])
expect(elm.children.length).toBe(0)
elm = renderChildren([1, 2, 3, 4, 5])
expect(elm.children.length).toBe(5)
expect((elm.children as TestElement[]).map(inner)).toEqual([
'1',
'2',
'3',
'4',
'5',
])
})
test('remove all children from parent', () => {
elm = renderChildren([1, 2, 3, 4, 5])
expect(elm.children.length).toBe(5)
expect((elm.children as TestElement[]).map(inner)).toEqual([
'1',
'2',
'3',
'4',
'5',
])
render(h('div'), root)
expect(elm.children.length).toBe(0)
})
test('remove from beginning', () => {
elm = renderChildren([1, 2, 3, 4, 5])
expect(elm.children.length).toBe(5)
elm = renderChildren([3, 4, 5])
expect(elm.children.length).toBe(3)
expect((elm.children as TestElement[]).map(inner)).toEqual(['3', '4', '5'])
})
test('remove from end', () => {
elm = renderChildren([1, 2, 3, 4, 5])
expect(elm.children.length).toBe(5)
elm = renderChildren([1, 2, 3])
expect(elm.children.length).toBe(3)
expect((elm.children as TestElement[]).map(inner)).toEqual(['1', '2', '3'])
})
test('remove from middle', () => {
elm = renderChildren([1, 2, 3, 4, 5])
expect(elm.children.length).toBe(5)
elm = renderChildren([1, 2, 4, 5])
expect(elm.children.length).toBe(4)
expect((elm.children as TestElement[]).map(inner)).toEqual([
'1',
'2',
'4',
'5',
])
})
test('moving single child forward', () => {
elm = renderChildren([1, 2, 3, 4])
expect(elm.children.length).toBe(4)
elm = renderChildren([2, 3, 1, 4])
expect(elm.children.length).toBe(4)
expect((elm.children as TestElement[]).map(inner)).toEqual([
'2',
'3',
'1',
'4',
])
})
test('moving single child backwards', () => {
elm = renderChildren([1, 2, 3, 4])
expect(elm.children.length).toBe(4)
elm = renderChildren([1, 4, 2, 3])
expect(elm.children.length).toBe(4)
expect((elm.children as TestElement[]).map(inner)).toEqual([
'1',
'4',
'2',
'3',
])
})
test('moving single child to end', () => {
elm = renderChildren([1, 2, 3])
expect(elm.children.length).toBe(3)
elm = renderChildren([2, 3, 1])
expect(elm.children.length).toBe(3)
expect((elm.children as TestElement[]).map(inner)).toEqual(['2', '3', '1'])
})
test('swap first and last', () => {
elm = renderChildren([1, 2, 3, 4])
expect(elm.children.length).toBe(4)
elm = renderChildren([4, 2, 3, 1])
expect(elm.children.length).toBe(4)
expect((elm.children as TestElement[]).map(inner)).toEqual([
'4',
'2',
'3',
'1',
])
})
test('move to left & replace', () => {
elm = renderChildren([1, 2, 3, 4, 5])
expect(elm.children.length).toBe(5)
elm = renderChildren([4, 1, 2, 3, 6])
expect(elm.children.length).toBe(5)
expect((elm.children as TestElement[]).map(inner)).toEqual([
'4',
'1',
'2',
'3',
'6',
])
})
test('move to left and leaves hold', () => {
elm = renderChildren([1, 4, 5])
expect(elm.children.length).toBe(3)
elm = renderChildren([4, 6])
expect((elm.children as TestElement[]).map(inner)).toEqual(['4', '6'])
})
test('moved and set to undefined element ending at the end', () => {
elm = renderChildren([2, 4, 5])
expect(elm.children.length).toBe(3)
elm = renderChildren([4, 5, 3])
expect(elm.children.length).toBe(3)
expect((elm.children as TestElement[]).map(inner)).toEqual(['4', '5', '3'])
})
test('reverse element', () => {
elm = renderChildren([1, 2, 3, 4, 5, 6, 7, 8])
expect(elm.children.length).toBe(8)
elm = renderChildren([8, 7, 6, 5, 4, 3, 2, 1])
expect((elm.children as TestElement[]).map(inner)).toEqual([
'8',
'7',
'6',
'5',
'4',
'3',
'2',
'1',
])
})
test('something', () => {
elm = renderChildren([0, 1, 2, 3, 4, 5])
expect(elm.children.length).toBe(6)
elm = renderChildren([4, 3, 2, 1, 5, 0])
expect((elm.children as TestElement[]).map(inner)).toEqual([
'4',
'3',
'2',
'1',
'5',
'0',
])
})
test('random shuffle', () => {
const elms = 14
const samples = 5
const arr = [...Array(elms).keys()]
const opacities: string[] = []
function spanNumWithOpacity(n: number, o: string) {
return h('span', { key: n, style: { opacity: o } }, n.toString())
}
for (let n = 0; n < samples; ++n) {
render(
h(
'span',
arr.map(n => spanNumWithOpacity(n, '1')),
),
root,
)
elm = root.children[0] as TestElement
for (let i = 0; i < elms; ++i) {
expect(serializeInner(elm.children[i] as TestElement)).toBe(
i.toString(),
)
opacities[i] = Math.random().toFixed(5).toString()
}
const shufArr = shuffle(arr.slice(0))
render(
h(
'span',
arr.map(n => spanNumWithOpacity(shufArr[n], opacities[n])),
),
root,
)
elm = root.children[0] as TestElement
for (let i = 0; i < elms; ++i) {
expect(serializeInner(elm.children[i] as TestElement)).toBe(
shufArr[i].toString(),
)
expect(elm.children[i]).toMatchObject({
props: {
style: {
opacity: opacities[i],
},
},
})
}
}
})
test('children with the same key but with different tag', () => {
render(
h('div', [
h('div', { key: 1 }, 'one'),
h('div', { key: 2 }, 'two'),
h('div', { key: 3 }, 'three'),
h('div', { key: 4 }, 'four'),
]),
root,
)
elm = root.children[0] as TestElement
expect((elm.children as TestElement[]).map(c => c.tag)).toEqual([
'div',
'div',
'div',
'div',
])
expect((elm.children as TestElement[]).map(inner)).toEqual([
'one',
'two',
'three',
'four',
])
render(
h('div', [
h('div', { key: 4 }, 'four'),
h('span', { key: 3 }, 'three'),
h('span', { key: 2 }, 'two'),
h('div', { key: 1 }, 'one'),
]),
root,
)
expect((elm.children as TestElement[]).map(c => c.tag)).toEqual([
'div',
'span',
'span',
'div',
])
expect((elm.children as TestElement[]).map(inner)).toEqual([
'four',
'three',
'two',
'one',
])
})
test('children with the same tag, same key, but one with data and one without data', () => {
render(h('div', [h('div', { class: 'hi' }, 'one')]), root)
elm = root.children[0] as TestElement
expect(elm.children[0]).toMatchObject({
props: {
class: 'hi',
},
})
render(h('div', [h('div', 'four')]), root)
elm = root.children[0] as TestElement
expect(elm.children[0] as TestElement).toMatchObject({
props: {
// in the DOM renderer this will be ''
// but the test renderer simply sets whatever value it receives.
class: null,
},
})
expect(serialize(elm.children[0])).toBe(`<div>four</div>`)
})
test('should warn with duplicate keys', () => {
renderChildren([1, 2, 3, 4, 5])
renderChildren([1, 6, 6, 3, 5])
expect(`Duplicate keys`).toHaveBeenWarned()
})
})
describe('renderer: unkeyed children', () => {
let root: TestElement
let elm: TestElement
const renderChildren = (arr: Array<number | string>) => {
render(h('div', arr.map(toSpan)), root)
return root.children[0] as TestElement
}
beforeEach(() => {
root = nodeOps.createElement('div')
render(h('div', { id: 1 }, 'hello'), root)
})
test('move a key in non-keyed nodes with a size up', () => {
elm = renderChildren([1, 'a', 'b', 'c'])
expect(elm.children.length).toBe(4)
expect((elm.children as TestElement[]).map(inner)).toEqual([
'1',
'a',
'b',
'c',
])
elm = renderChildren(['d', 'a', 'b', 'c', 1, 'e'])
expect(elm.children.length).toBe(6)
expect((elm.children as TestElement[]).map(inner)).toEqual([
'd',
'a',
'b',
'c',
'1',
'e',
])
})
test('append elements with updating children without keys', () => {
elm = renderChildren(['hello'])
expect((elm.children as TestElement[]).map(inner)).toEqual(['hello'])
elm = renderChildren(['hello', 'world'])
expect((elm.children as TestElement[]).map(inner)).toEqual([
'hello',
'world',
])
})
test('unmoved text nodes with updating children without keys', () => {
render(h('div', ['text', h('span', ['hello'])]), root)
elm = root.children[0] as TestElement
expect(elm.children[0]).toMatchObject({
type: TestNodeTypes.TEXT,
text: 'text',
})
render(h('div', ['text', h('span', ['hello'])]), root)
elm = root.children[0] as TestElement
expect(elm.children[0]).toMatchObject({
type: TestNodeTypes.TEXT,
text: 'text',
})
})
test('changing text children with updating children without keys', () => {
render(h('div', ['text', h('span', ['hello'])]), root)
elm = root.children[0] as TestElement
expect(elm.children[0]).toMatchObject({
type: TestNodeTypes.TEXT,
text: 'text',
})
render(h('div', ['text2', h('span', ['hello'])]), root)
elm = root.children[0] as TestElement
expect(elm.children[0]).toMatchObject({
type: TestNodeTypes.TEXT,
text: 'text2',
})
})
test('prepend element with updating children without keys', () => {
render(h('div', [h('span', ['world'])]), root)
elm = root.children[0] as TestElement
expect((elm.children as TestElement[]).map(inner)).toEqual(['world'])
render(h('div', [h('span', ['hello']), h('span', ['world'])]), root)
expect((elm.children as TestElement[]).map(inner)).toEqual([
'hello',
'world',
])
})
test('prepend element of different tag type with updating children without keys', () => {
render(h('div', [h('span', ['world'])]), root)
elm = root.children[0] as TestElement
expect((elm.children as TestElement[]).map(inner)).toEqual(['world'])
render(h('div', [h('div', ['hello']), h('span', ['world'])]), root)
expect((elm.children as TestElement[]).map(c => c.tag)).toEqual([
'div',
'span',
])
expect((elm.children as TestElement[]).map(inner)).toEqual([
'hello',
'world',
])
})
test('remove elements with updating children without keys', () => {
render(
h('div', [h('span', ['one']), h('span', ['two']), h('span', ['three'])]),
root,
)
elm = root.children[0] as TestElement
expect((elm.children as TestElement[]).map(inner)).toEqual([
'one',
'two',
'three',
])
render(h('div', [h('span', ['one']), h('span', ['three'])]), root)
elm = root.children[0] as TestElement
expect((elm.children as TestElement[]).map(inner)).toEqual(['one', 'three'])
})
test('remove a single text node with updating children without keys', () => {
render(h('div', ['one']), root)
elm = root.children[0] as TestElement
expect(serializeInner(elm)).toBe('one')
render(h('div'), root)
expect(serializeInner(elm)).toBe('')
})
test('remove a single text node when children are updated', () => {
render(h('div', ['one']), root)
elm = root.children[0] as TestElement
expect(serializeInner(elm)).toBe('one')
render(h('div', [h('div', ['two']), h('span', ['three'])]), root)
elm = root.children[0] as TestElement
expect((elm.children as TestElement[]).map(inner)).toEqual(['two', 'three'])
})
test('remove a text node among other elements', () => {
render(h('div', ['one', h('span', ['two'])]), root)
elm = root.children[0] as TestElement
expect((elm.children as TestElement[]).map(c => serialize(c))).toEqual([
'one',
'<span>two</span>',
])
render(h('div', [h('div', ['three'])]), root)
elm = root.children[0] as TestElement
expect(elm.children.length).toBe(1)
expect(serialize(elm.children[0])).toBe('<div>three</div>')
})
test('reorder elements', () => {
render(
h('div', [h('span', ['one']), h('div', ['two']), h('b', ['three'])]),
root,
)
elm = root.children[0] as TestElement
expect((elm.children as TestElement[]).map(inner)).toEqual([
'one',
'two',
'three',
])
render(
h('div', [h('b', ['three']), h('div', ['two']), h('span', ['one'])]),
root,
)
elm = root.children[0] as TestElement
expect((elm.children as TestElement[]).map(inner)).toEqual([
'three',
'two',
'one',
])
})
// #6502
test('should not de-opt when both head and tail change', () => {
render(h('div', [null, h('div'), null]), root)
elm = root.children[0] as TestElement
const original = elm.children[1]
render(h('div', [h('p'), h('div'), h('p')]), root)
elm = root.children[0] as TestElement
const postPatch = elm.children[1]
expect(postPatch).toBe(original)
})
}) | typescript | github | https://github.com/vuejs/core | packages/runtime-core/__tests__/rendererChildren.spec.ts |
// This file is part of OpenCV project.
// It is subject to the license terms in the LICENSE file found in the top-level directory
// of this distribution and at http://opencv.org/license.html
#include "precomp.hpp"
namespace cv { namespace hal {
/****************************************************************************************\
* LU & Cholesky implementation for small matrices *
\****************************************************************************************/
template<typename _Tp> static inline int
LUImpl(_Tp* A, size_t astep, int m, _Tp* b, size_t bstep, int n, _Tp eps)
{
int i, j, k, p = 1;
astep /= sizeof(A[0]);
bstep /= sizeof(b[0]);
for( i = 0; i < m; i++ )
{
k = i;
for( j = i+1; j < m; j++ )
if( std::abs(A[j*astep + i]) > std::abs(A[k*astep + i]) )
k = j;
if( std::abs(A[k*astep + i]) < eps )
return 0;
if( k != i )
{
for( j = i; j < m; j++ )
std::swap(A[i*astep + j], A[k*astep + j]);
if( b )
for( j = 0; j < n; j++ )
std::swap(b[i*bstep + j], b[k*bstep + j]);
p = -p;
}
_Tp d = -1/A[i*astep + i];
for( j = i+1; j < m; j++ )
{
_Tp alpha = A[j*astep + i]*d;
for( k = i+1; k < m; k++ )
A[j*astep + k] += alpha*A[i*astep + k];
if( b )
for( k = 0; k < n; k++ )
b[j*bstep + k] += alpha*b[i*bstep + k];
}
}
if( b )
{
for( i = m-1; i >= 0; i-- )
for( j = 0; j < n; j++ )
{
_Tp s = b[i*bstep + j];
for( k = i+1; k < m; k++ )
s -= A[i*astep + k]*b[k*bstep + j];
b[i*bstep + j] = s/A[i*astep + i];
}
}
return p;
}
int LU32f(float* A, size_t astep, int m, float* b, size_t bstep, int n)
{
CV_INSTRUMENT_REGION();
int output;
CALL_HAL_RET(LU32f, cv_hal_LU32f, output, A, astep, m, b, bstep, n)
output = LUImpl(A, astep, m, b, bstep, n, FLT_EPSILON*10);
return output;
}
int LU64f(double* A, size_t astep, int m, double* b, size_t bstep, int n)
{
CV_INSTRUMENT_REGION();
int output;
CALL_HAL_RET(LU64f, cv_hal_LU64f, output, A, astep, m, b, bstep, n)
output = LUImpl(A, astep, m, b, bstep, n, DBL_EPSILON*100);
return output;
}
template<typename _Tp> static inline bool
CholImpl(_Tp* A, size_t astep, int m, _Tp* b, size_t bstep, int n)
{
_Tp* L = A;
int i, j, k;
double s;
astep /= sizeof(A[0]);
bstep /= sizeof(b[0]);
for( i = 0; i < m; i++ )
{
for( j = 0; j < i; j++ )
{
s = A[i*astep + j];
for( k = 0; k < j; k++ )
s -= L[i*astep + k]*L[j*astep + k];
L[i*astep + j] = (_Tp)(s*L[j*astep + j]);
}
s = A[i*astep + i];
for( k = 0; k < j; k++ )
{
double t = L[i*astep + k];
s -= t*t;
}
if( s < std::numeric_limits<_Tp>::epsilon() )
return false;
L[i*astep + i] = (_Tp)(1./std::sqrt(s));
}
if (!b)
{
for( i = 0; i < m; i++ )
L[i*astep + i]=1/L[i*astep + i];
return true;
}
// LLt x = b
// 1: L y = b
// 2. Lt x = y
/*
[ L00 ] y0 b0
[ L10 L11 ] y1 = b1
[ L20 L21 L22 ] y2 b2
[ L30 L31 L32 L33 ] y3 b3
[ L00 L10 L20 L30 ] x0 y0
[ L11 L21 L31 ] x1 = y1
[ L22 L32 ] x2 y2
[ L33 ] x3 y3
*/
for( i = 0; i < m; i++ )
{
for( j = 0; j < n; j++ )
{
s = b[i*bstep + j];
for( k = 0; k < i; k++ )
s -= L[i*astep + k]*b[k*bstep + j];
b[i*bstep + j] = (_Tp)(s*L[i*astep + i]);
}
}
for( i = m-1; i >= 0; i-- )
{
for( j = 0; j < n; j++ )
{
s = b[i*bstep + j];
for( k = m-1; k > i; k-- )
s -= L[k*astep + i]*b[k*bstep + j];
b[i*bstep + j] = (_Tp)(s*L[i*astep + i]);
}
}
for( i = 0; i < m; i++ )
L[i*astep + i]=1/L[i*astep + i];
return true;
}
bool Cholesky32f(float* A, size_t astep, int m, float* b, size_t bstep, int n)
{
CV_INSTRUMENT_REGION();
bool output;
CALL_HAL_RET(Cholesky32f, cv_hal_Cholesky32f, output, A, astep, m, b, bstep, n)
return CholImpl(A, astep, m, b, bstep, n);
}
bool Cholesky64f(double* A, size_t astep, int m, double* b, size_t bstep, int n)
{
CV_INSTRUMENT_REGION();
bool output;
CALL_HAL_RET(Cholesky64f, cv_hal_Cholesky64f, output, A, astep, m, b, bstep, n)
return CholImpl(A, astep, m, b, bstep, n);
}
template<typename _Tp> inline static int
sign(_Tp x)
{
if (x >= (_Tp)0)
return 1;
else
return -1;
}
template<typename _Tp> static inline int
QRImpl(_Tp* A, size_t astep, int m, int n, int k, _Tp* b, size_t bstep, _Tp* hFactors, _Tp eps)
{
astep /= sizeof(_Tp);
bstep /= sizeof(_Tp);
cv::AutoBuffer<_Tp> buffer;
size_t buf_size = m ? m + n : hFactors != NULL;
buffer.allocate(buf_size);
_Tp* vl = buffer.data();
if (hFactors == NULL)
hFactors = vl + m;
for (int l = 0; l < n; l++)
{
//generate vl
int vlSize = m - l;
_Tp vlNorm = (_Tp)0;
for (int i = 0; i < vlSize; i++)
{
vl[i] = A[(l + i)*astep + l];
vlNorm += vl[i] * vl[i];
}
_Tp tmpV = vl[0];
vl[0] = vl[0] + sign(vl[0])*std::sqrt(vlNorm);
vlNorm = std::sqrt(vlNorm + vl[0] * vl[0] - tmpV*tmpV);
for (int i = 0; i < vlSize; i++)
{
vl[i] /= vlNorm;
}
//multiply A_l*vl
for (int j = l; j < n; j++)
{
_Tp v_lA = (_Tp)0;
for (int i = l; i < m; i++)
{
v_lA += vl[i - l] * A[i*astep + j];
}
for (int i = l; i < m; i++)
{
A[i*astep + j] -= 2 * vl[i - l] * v_lA;
}
}
//save vl and factors
hFactors[l] = vl[0] * vl[0];
for (int i = 1; i < vlSize; i++)
{
A[(l + i)*astep + l] = vl[i] / vl[0];
}
}
if (b)
{
//generate new rhs
for (int l = 0; l < n; l++)
{
//unpack vl
vl[0] = (_Tp)1;
for (int j = 1; j < m - l; j++)
{
vl[j] = A[(j + l)*astep + l];
}
//h_l*x
for (int j = 0; j < k; j++)
{
_Tp v_lB = (_Tp)0;
for (int i = l; i < m; i++)
v_lB += vl[i - l] * b[i*bstep + j];
for (int i = l; i < m; i++)
b[i*bstep + j] -= 2 * vl[i - l] * v_lB * hFactors[l];
}
}
//do back substitution
for (int i = n - 1; i >= 0; i--)
{
for (int j = n - 1; j > i; j--)
{
for (int p = 0; p < k; p++)
b[i*bstep + p] -= b[j*bstep + p] * A[i*astep + j];
}
if (std::abs(A[i*astep + i]) < eps)
return 0;
for (int p = 0; p < k; p++)
b[i*bstep + p] /= A[i*astep + i];
}
}
return 1;
}
int QR32f(float* A, size_t astep, int m, int n, int k, float* b, size_t bstep, float* hFactors)
{
CV_INSTRUMENT_REGION();
int output;
CALL_HAL_RET(QR32f, cv_hal_QR32f, output, A, astep, m, n, k, b, bstep, hFactors);
output = QRImpl(A, astep, m, n, k, b, bstep, hFactors, FLT_EPSILON * 10);
return output;
}
int QR64f(double* A, size_t astep, int m, int n, int k, double* b, size_t bstep, double* hFactors)
{
CV_INSTRUMENT_REGION();
int output;
CALL_HAL_RET(QR64f, cv_hal_QR64f, output, A, astep, m, n, k, b, bstep, hFactors)
output = QRImpl(A, astep, m, n, k, b, bstep, hFactors, DBL_EPSILON * 100);
return output;
}
//=============================================================================
// for compatibility with 3.0
int LU(float* A, size_t astep, int m, float* b, size_t bstep, int n)
{
return LUImpl(A, astep, m, b, bstep, n, FLT_EPSILON*10);
}
int LU(double* A, size_t astep, int m, double* b, size_t bstep, int n)
{
return LUImpl(A, astep, m, b, bstep, n, DBL_EPSILON*100);
}
bool Cholesky(float* A, size_t astep, int m, float* b, size_t bstep, int n)
{
return CholImpl(A, astep, m, b, bstep, n);
}
bool Cholesky(double* A, size_t astep, int m, double* b, size_t bstep, int n)
{
return CholImpl(A, astep, m, b, bstep, n);
}
}} // cv::hal:: | cpp | github | https://github.com/opencv/opencv | modules/core/src/matrix_decomp.cpp |
# coding=utf-8
# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import os
import tempfile
import unittest
from pants.backend.jvm.subsystems.shader import Shader, Shading
from pants.java.distribution.distribution import DistributionLocator
from pants.java.executor import SubprocessExecutor
from pants.util.contextutil import open_zip
from pants.util.dirutil import safe_delete
from pants_test.subsystem.subsystem_util import subsystem_instance
class ShaderTest(unittest.TestCase):
def setUp(self):
self.jarjar = '/not/really/jarjar.jar'
with subsystem_instance(DistributionLocator):
executor = SubprocessExecutor(DistributionLocator.cached())
self.shader = Shader(jarjar_classpath=[self.jarjar], executor=executor)
self.output_jar = '/not/really/shaded.jar'
def populate_input_jar(self, *entries):
fd, input_jar_path = tempfile.mkstemp()
os.close(fd)
self.addCleanup(safe_delete, input_jar_path)
with open_zip(input_jar_path, 'w') as jar:
for entry in entries:
jar.writestr(entry, '0xCAFEBABE')
return input_jar_path
def test_assemble_default_rules(self):
input_jar = self.populate_input_jar('org/pantsbuild/tools/fake/Main.class',
'com/google/common/base/Function.class')
rules = self.shader.assemble_binary_rules('org.pantsbuild.tools.fake.Main', input_jar)
self.assertEqual(Shader.exclude_package('org.pantsbuild.tools.fake'), rules[0])
self.assertIn(Shader.exclude_package('javax.annotation'), rules[1:-1])
self.assertEqual(Shader.shade_package('com.google.common.base'), rules[-1])
def test_assemble_default_rules_default_package(self):
input_jar = self.populate_input_jar('main.class', 'com/google/common/base/Function.class')
rules = self.shader.assemble_binary_rules('main', input_jar)
self.assertEqual(Shader.exclude_package(), rules[0])
self.assertIn(Shader.exclude_package('javax.annotation'), rules[1:-1])
self.assertEqual(Shader.shade_package('com.google.common.base'), rules[-1])
def test_assemble_custom_rules(self):
input_jar = self.populate_input_jar('main.class')
rules = self.shader.assemble_binary_rules('main', input_jar,
custom_rules=[Shader.shade_class('bob'),
Shader.exclude_class('fred')])
self.assertEqual(Shader.shade_class('bob'), rules[0])
self.assertEqual(Shader.exclude_class('fred'), rules[1])
self.assertEqual(Shader.exclude_package(), rules[2])
self.assertIn(Shader.exclude_package('javax.annotation'), rules[3:])
def test_runner_command(self):
input_jar = self.populate_input_jar('main.class', 'com/google/common/base/Function.class')
custom_rules = [Shader.exclude_package('log4j', recursive=True)]
with self.shader.binary_shader(self.output_jar, 'main', input_jar,
custom_rules=custom_rules) as shader:
command = shader.command
self.assertTrue(command.pop(0).endswith('java'))
jar_or_cp = command.pop(0)
self.assertIn(jar_or_cp, {'-cp', 'classpath', '-jar'})
self.assertEqual(self.jarjar, os.path.abspath(command.pop(0)))
if jar_or_cp != '-jar':
# We don't really care what the name of the jarjar main class is - shader.command[2]
command.pop(0)
self.assertEqual('process', command.pop(0))
rules_file = command.pop(0)
self.assertTrue(os.path.exists(rules_file))
with open(rules_file) as fp:
lines = fp.read().splitlines()
self.assertEqual('rule log4j.** log4j.@1', lines[0]) # The custom rule.
self.assertEqual('rule * @1', lines[1]) # Exclude main's package.
self.assertIn('rule javax.annotation.* javax.annotation.@1', lines) # Exclude system.
self.assertEqual('rule com.google.common.base.* {}com.google.common.base.@1'
.format(Shading.SHADE_PREFIX), lines[-1]) # Shade the rest.
self.assertEqual(input_jar, command.pop(0))
self.assertEqual(self.output_jar, command.pop(0))
def test_sanitize_package_name(self):
def assert_sanitize(name, sanitized):
self.assertEqual(sanitized, Shading.Relocate._sanitize_package_name(name))
assert_sanitize('hello', 'hello')
assert_sanitize('hello.goodbye', 'hello.goodbye')
assert_sanitize('.hello.goodbye', 'hello.goodbye')
assert_sanitize('hello.goodbye.', 'hello.goodbye')
assert_sanitize('123', '_123')
assert_sanitize('123.456', '_123._456')
assert_sanitize('123.v2', '_123.v2')
assert_sanitize('hello-goodbye', 'hello_goodbye')
assert_sanitize('hello-/.goodbye.?', 'hello__.goodbye._')
assert_sanitize('one.two..three....four.', 'one.two.three.four')
def test_infer_shaded_pattern(self):
def assert_inference(from_pattern, prefix, to_pattern):
result = ''.join(Shading.Relocate._infer_shaded_pattern_iter(from_pattern, prefix))
self.assertEqual(to_pattern, result)
assert_inference('com.foo.bar.Main', None, 'com.foo.bar.Main')
assert_inference('com.foo.bar.', None, 'com.foo.bar.')
assert_inference('com.foo.bar.', '__prefix__.', '__prefix__.com.foo.bar.')
assert_inference('com.*.bar.', None, 'com.@1.bar.')
assert_inference('com.*.bar.*.', None, 'com.@1.bar.@2.')
assert_inference('com.*.bar.**', None, 'com.@1.bar.@2')
assert_inference('*', None, '@1')
assert_inference('**', None, '@1')
assert_inference('**', '__prefix__.', '__prefix__.@1')
def test_shading_exclude(self):
def assert_exclude(from_pattern, to_pattern):
self.assertEqual((from_pattern, to_pattern), Shading.Exclude.new(from_pattern).rule())
assert_exclude('com.foo.bar.Main', 'com.foo.bar.Main')
assert_exclude('com.foo.bar.**', 'com.foo.bar.@1')
assert_exclude('com.*.bar.**', 'com.@1.bar.@2')
def test_shading_exclude_package(self):
self.assertEqual(('com.foo.bar.**', 'com.foo.bar.@1'),
Shading.ExcludePackage.new('com.foo.bar').rule())
self.assertEqual(('com.foo.bar.*', 'com.foo.bar.@1'),
Shading.ExcludePackage.new('com.foo.bar', recursive=False).rule())
def test_relocate(self):
self.assertEqual(('com.foo.bar.**', '{}com.foo.bar.@1'.format(Shading.SHADE_PREFIX)),
Shading.Relocate.new(from_pattern='com.foo.bar.**').rule())
self.assertEqual(('com.foo.bar.**', '{}com.foo.bar.@1'.format('__my_prefix__.')),
Shading.Relocate.new(from_pattern='com.foo.bar.**',
shade_prefix='__my_prefix__.').rule())
self.assertEqual(('com.foo.bar.**', 'org.biz.baz.@1'.format('__my_prefix__.')),
Shading.Relocate.new(from_pattern='com.foo.bar.**',
shade_prefix='__my_prefix__.',
shade_pattern='org.biz.baz.@1').rule())
def test_relocate_package(self):
self.assertEqual(('com.foo.bar.**', '{}com.foo.bar.@1'.format(Shading.SHADE_PREFIX)),
Shading.RelocatePackage.new('com.foo.bar').rule())
self.assertEqual(('com.foo.bar.*', '{}com.foo.bar.@1'.format(Shading.SHADE_PREFIX)),
Shading.RelocatePackage.new('com.foo.bar', recursive=False).rule())
self.assertEqual(('com.foo.bar.**', '__p__.com.foo.bar.@1'),
Shading.RelocatePackage.new('com.foo.bar', shade_prefix='__p__.').rule()) | unknown | codeparrot/codeparrot-clean | ||
"""
Utility functions for transcripts.
++++++++++++++++++++++++++++++++++
"""
import os
import copy
import json
import requests
import logging
from pysrt import SubRipTime, SubRipItem, SubRipFile
from lxml import etree
from HTMLParser import HTMLParser
from xmodule.exceptions import NotFoundError
from xmodule.contentstore.content import StaticContent
from xmodule.contentstore.django import contentstore
from .bumper_utils import get_bumper_settings
log = logging.getLogger(__name__)
class TranscriptException(Exception): # pylint: disable=missing-docstring
pass
class TranscriptsGenerationException(Exception): # pylint: disable=missing-docstring
pass
class GetTranscriptsFromYouTubeException(Exception): # pylint: disable=missing-docstring
pass
class TranscriptsRequestValidationException(Exception): # pylint: disable=missing-docstring
pass
def generate_subs(speed, source_speed, source_subs):
"""
Generate transcripts from one speed to another speed.
Args:
`speed`: float, for this speed subtitles will be generated,
`source_speed`: float, speed of source_subs
`source_subs`: dict, existing subtitles for speed `source_speed`.
Returns:
`subs`: dict, actual subtitles.
"""
if speed == source_speed:
return source_subs
coefficient = 1.0 * speed / source_speed
subs = {
'start': [
int(round(timestamp * coefficient)) for
timestamp in source_subs['start']
],
'end': [
int(round(timestamp * coefficient)) for
timestamp in source_subs['end']
],
'text': source_subs['text']}
return subs
def save_to_store(content, name, mime_type, location):
"""
Save named content to store by location.
Returns location of saved content.
"""
content_location = Transcript.asset_location(location, name)
content = StaticContent(content_location, name, mime_type, content)
contentstore().save(content)
return content_location
def save_subs_to_store(subs, subs_id, item, language='en'):
"""
Save transcripts into `StaticContent`.
Args:
`subs_id`: str, subtitles id
`item`: video module instance
`language`: two chars str ('uk'), language of translation of transcripts
Returns: location of saved subtitles.
"""
filedata = json.dumps(subs, indent=2)
filename = subs_filename(subs_id, language)
return save_to_store(filedata, filename, 'application/json', item.location)
def youtube_video_transcript_name(youtube_text_api):
"""
Get the transcript name from available transcripts of video
with respect to language from youtube server
"""
utf8_parser = etree.XMLParser(encoding='utf-8')
transcripts_param = {'type': 'list', 'v': youtube_text_api['params']['v']}
lang = youtube_text_api['params']['lang']
# get list of transcripts of specific video
# url-form
# http://video.google.com/timedtext?type=list&v={VideoId}
youtube_response = requests.get('http://' + youtube_text_api['url'], params=transcripts_param)
if youtube_response.status_code == 200 and youtube_response.text:
youtube_data = etree.fromstring(youtube_response.content, parser=utf8_parser)
# iterate all transcripts information from youtube server
for element in youtube_data:
# search specific language code such as 'en' in transcripts info list
if element.tag == 'track' and element.get('lang_code', '') == lang:
return element.get('name')
return None
def get_transcripts_from_youtube(youtube_id, settings, i18n, youtube_transcript_name=''):
"""
Gets transcripts from youtube for youtube_id.
Parses only utf-8 encoded transcripts.
Other encodings are not supported at the moment.
Returns (status, transcripts): bool, dict.
"""
_ = i18n.ugettext
utf8_parser = etree.XMLParser(encoding='utf-8')
youtube_text_api = copy.deepcopy(settings.YOUTUBE['TEXT_API'])
youtube_text_api['params']['v'] = youtube_id
# if the transcript name is not empty on youtube server we have to pass
# name param in url in order to get transcript
# example http://video.google.com/timedtext?lang=en&v={VideoId}&name={transcript_name}
youtube_transcript_name = youtube_video_transcript_name(youtube_text_api)
if youtube_transcript_name:
youtube_text_api['params']['name'] = youtube_transcript_name
data = requests.get('http://' + youtube_text_api['url'], params=youtube_text_api['params'])
if data.status_code != 200 or not data.text:
msg = _("Can't receive transcripts from Youtube for {youtube_id}. Status code: {status_code}.").format(
youtube_id=youtube_id,
status_code=data.status_code
)
raise GetTranscriptsFromYouTubeException(msg)
sub_starts, sub_ends, sub_texts = [], [], []
xmltree = etree.fromstring(data.content, parser=utf8_parser)
for element in xmltree:
if element.tag == "text":
start = float(element.get("start"))
duration = float(element.get("dur", 0)) # dur is not mandatory
text = element.text
end = start + duration
if text:
# Start and end should be ints representing the millisecond timestamp.
sub_starts.append(int(start * 1000))
sub_ends.append(int((end + 0.0001) * 1000))
sub_texts.append(text.replace('\n', ' '))
return {'start': sub_starts, 'end': sub_ends, 'text': sub_texts}
def download_youtube_subs(youtube_id, video_descriptor, settings):
"""
Download transcripts from Youtube and save them to assets.
Args:
youtube_id: str, actual youtube_id of the video.
video_descriptor: video descriptor instance.
We save transcripts for 1.0 speed, as for other speed conversion is done on front-end.
Returns:
None, if transcripts were successfully downloaded and saved.
Raises:
GetTranscriptsFromYouTubeException, if fails.
"""
i18n = video_descriptor.runtime.service(video_descriptor, "i18n")
_ = i18n.ugettext
subs = get_transcripts_from_youtube(youtube_id, settings, i18n)
save_subs_to_store(subs, youtube_id, video_descriptor)
log.info("Transcripts for youtube_id %s for 1.0 speed are downloaded and saved.", youtube_id)
def remove_subs_from_store(subs_id, item, lang='en'):
"""
Remove from store, if transcripts content exists.
"""
filename = subs_filename(subs_id, lang)
Transcript.delete_asset(item.location, filename)
def generate_subs_from_source(speed_subs, subs_type, subs_filedata, item, language='en'):
"""Generate transcripts from source files (like SubRip format, etc.)
and save them to assets for `item` module.
We expect, that speed of source subs equal to 1
:param speed_subs: dictionary {speed: sub_id, ...}
:param subs_type: type of source subs: "srt", ...
:param subs_filedata:unicode, content of source subs.
:param item: module object.
:param language: str, language of translation of transcripts
:returns: True, if all subs are generated and saved successfully.
"""
_ = item.runtime.service(item, "i18n").ugettext
if subs_type.lower() != 'srt':
raise TranscriptsGenerationException(_("We support only SubRip (*.srt) transcripts format."))
try:
srt_subs_obj = SubRipFile.from_string(subs_filedata)
except Exception as ex:
msg = _("Something wrong with SubRip transcripts file during parsing. Inner message is {error_message}").format(
error_message=ex.message
)
raise TranscriptsGenerationException(msg)
if not srt_subs_obj:
raise TranscriptsGenerationException(_("Something wrong with SubRip transcripts file during parsing."))
sub_starts = []
sub_ends = []
sub_texts = []
for sub in srt_subs_obj:
sub_starts.append(sub.start.ordinal)
sub_ends.append(sub.end.ordinal)
sub_texts.append(sub.text.replace('\n', ' '))
subs = {
'start': sub_starts,
'end': sub_ends,
'text': sub_texts}
for speed, subs_id in speed_subs.iteritems():
save_subs_to_store(
generate_subs(speed, 1, subs),
subs_id,
item,
language
)
return subs
def generate_srt_from_sjson(sjson_subs, speed):
"""Generate transcripts with speed = 1.0 from sjson to SubRip (*.srt).
:param sjson_subs: "sjson" subs.
:param speed: speed of `sjson_subs`.
:returns: "srt" subs.
"""
output = ''
equal_len = len(sjson_subs['start']) == len(sjson_subs['end']) == len(sjson_subs['text'])
if not equal_len:
return output
sjson_speed_1 = generate_subs(speed, 1, sjson_subs)
for i in range(len(sjson_speed_1['start'])):
item = SubRipItem(
index=i,
start=SubRipTime(milliseconds=sjson_speed_1['start'][i]),
end=SubRipTime(milliseconds=sjson_speed_1['end'][i]),
text=sjson_speed_1['text'][i]
)
output += (unicode(item))
output += '\n'
return output
def copy_or_rename_transcript(new_name, old_name, item, delete_old=False, user=None):
"""
Renames `old_name` transcript file in storage to `new_name`.
If `old_name` is not found in storage, raises `NotFoundError`.
If `delete_old` is True, removes `old_name` files from storage.
"""
filename = 'subs_{0}.srt.sjson'.format(old_name)
content_location = StaticContent.compute_location(item.location.course_key, filename)
transcripts = contentstore().find(content_location).data
save_subs_to_store(json.loads(transcripts), new_name, item)
item.sub = new_name
item.save_with_metadata(user)
if delete_old:
remove_subs_from_store(old_name, item)
def get_html5_ids(html5_sources):
"""
Helper method to parse out an HTML5 source into the ideas
NOTE: This assumes that '/' are not in the filename
"""
html5_ids = [x.split('/')[-1].rsplit('.', 1)[0] for x in html5_sources]
return html5_ids
def manage_video_subtitles_save(item, user, old_metadata=None, generate_translation=False):
"""
Does some specific things, that can be done only on save.
Video player item has some video fields: HTML5 ones and Youtube one.
If value of `sub` field of `new_item` is cleared, transcripts should be removed.
`item` is video module instance with updated values of fields,
but actually have not been saved to store yet.
`old_metadata` contains old values of XFields.
# 1.
If value of `sub` field of `new_item` is different from values of video fields of `new_item`,
and `new_item.sub` file is present, then code in this function creates copies of
`new_item.sub` file with new names. That names are equal to values of video fields of `new_item`
After that `sub` field of `new_item` is changed to one of values of video fields.
This whole action ensures that after user changes video fields, proper `sub` files, corresponding
to new values of video fields, will be presented in system.
# 2 convert /static/filename.srt to filename.srt in self.transcripts.
(it is done to allow user to enter both /static/filename.srt and filename.srt)
# 3. Generate transcripts translation only when user clicks `save` button, not while switching tabs.
a) delete sjson translation for those languages, which were removed from `item.transcripts`.
Note: we are not deleting old SRT files to give user more flexibility.
b) For all SRT files in`item.transcripts` regenerate new SJSON files.
(To avoid confusing situation if you attempt to correct a translation by uploading
a new version of the SRT file with same name).
"""
_ = item.runtime.service(item, "i18n").ugettext
# 1.
html5_ids = get_html5_ids(item.html5_sources)
possible_video_id_list = [item.youtube_id_1_0] + html5_ids
sub_name = item.sub
for video_id in possible_video_id_list:
if not video_id:
continue
if not sub_name:
remove_subs_from_store(video_id, item)
continue
# copy_or_rename_transcript changes item.sub of module
try:
# updates item.sub with `video_id`, if it is successful.
copy_or_rename_transcript(video_id, sub_name, item, user=user)
except NotFoundError:
# subtitles file `sub_name` is not presented in the system. Nothing to copy or rename.
log.debug(
"Copying %s file content to %s name is failed, "
"original file does not exist.",
sub_name, video_id
)
# 2.
if generate_translation:
for lang, filename in item.transcripts.items():
item.transcripts[lang] = os.path.split(filename)[-1]
# 3.
if generate_translation:
old_langs = set(old_metadata.get('transcripts', {})) if old_metadata else set()
new_langs = set(item.transcripts)
for lang in old_langs.difference(new_langs): # 3a
for video_id in possible_video_id_list:
if video_id:
remove_subs_from_store(video_id, item, lang)
reraised_message = ''
for lang in new_langs: # 3b
try:
generate_sjson_for_all_speeds(
item,
item.transcripts[lang],
{speed: subs_id for subs_id, speed in youtube_speed_dict(item).iteritems()},
lang,
)
except TranscriptException as ex:
item.transcripts.pop(lang) # remove key from transcripts because proper srt file does not exist in assets.
reraised_message += ' ' + ex.message
if reraised_message:
item.save_with_metadata(user)
raise TranscriptException(reraised_message)
def youtube_speed_dict(item):
"""
Returns {speed: youtube_ids, ...} dict for existing youtube_ids
"""
yt_ids = [item.youtube_id_0_75, item.youtube_id_1_0, item.youtube_id_1_25, item.youtube_id_1_5]
yt_speeds = [0.75, 1.00, 1.25, 1.50]
youtube_ids = {p[0]: p[1] for p in zip(yt_ids, yt_speeds) if p[0]}
return youtube_ids
def subs_filename(subs_id, lang='en'):
"""
Generate proper filename for storage.
"""
if lang == 'en':
return u'subs_{0}.srt.sjson'.format(subs_id)
else:
return u'{0}_subs_{1}.srt.sjson'.format(lang, subs_id)
def generate_sjson_for_all_speeds(item, user_filename, result_subs_dict, lang):
"""
Generates sjson from srt for given lang.
`item` is module object.
"""
_ = item.runtime.service(item, "i18n").ugettext
try:
srt_transcripts = contentstore().find(Transcript.asset_location(item.location, user_filename))
except NotFoundError as ex:
raise TranscriptException(_("{exception_message}: Can't find uploaded transcripts: {user_filename}").format(
exception_message=ex.message,
user_filename=user_filename
))
if not lang:
lang = item.transcript_language
# Used utf-8-sig encoding type instead of utf-8 to remove BOM(Byte Order Mark), e.g. U+FEFF
generate_subs_from_source(
result_subs_dict,
os.path.splitext(user_filename)[1][1:],
srt_transcripts.data.decode('utf-8-sig'),
item,
lang
)
def get_or_create_sjson(item, transcripts):
"""
Get sjson if already exists, otherwise generate it.
Generate sjson with subs_id name, from user uploaded srt.
Subs_id is extracted from srt filename, which was set by user.
Args:
transcipts (dict): dictionary of (language: file) pairs.
Raises:
TranscriptException: when srt subtitles do not exist,
and exceptions from generate_subs_from_source.
`item` is module object.
"""
user_filename = transcripts[item.transcript_language]
user_subs_id = os.path.splitext(user_filename)[0]
source_subs_id, result_subs_dict = user_subs_id, {1.0: user_subs_id}
try:
sjson_transcript = Transcript.asset(item.location, source_subs_id, item.transcript_language).data
except NotFoundError: # generating sjson from srt
generate_sjson_for_all_speeds(item, user_filename, result_subs_dict, item.transcript_language)
sjson_transcript = Transcript.asset(item.location, source_subs_id, item.transcript_language).data
return sjson_transcript
class Transcript(object):
"""
Container for transcript methods.
"""
mime_types = {
'srt': 'application/x-subrip; charset=utf-8',
'txt': 'text/plain; charset=utf-8',
'sjson': 'application/json',
}
@staticmethod
def convert(content, input_format, output_format):
"""
Convert transcript `content` from `input_format` to `output_format`.
Accepted input formats: sjson, srt.
Accepted output format: srt, txt.
"""
assert input_format in ('srt', 'sjson')
assert output_format in ('txt', 'srt', 'sjson')
if input_format == output_format:
return content
if input_format == 'srt':
if output_format == 'txt':
text = SubRipFile.from_string(content.decode('utf8')).text
return HTMLParser().unescape(text)
elif output_format == 'sjson':
raise NotImplementedError
if input_format == 'sjson':
if output_format == 'txt':
text = json.loads(content)['text']
return HTMLParser().unescape("\n".join(text))
elif output_format == 'srt':
return generate_srt_from_sjson(json.loads(content), speed=1.0)
@staticmethod
def asset(location, subs_id, lang='en', filename=None):
"""
Get asset from contentstore, asset location is built from subs_id and lang.
`location` is module location.
"""
asset_filename = subs_filename(subs_id, lang) if not filename else filename
return Transcript.get_asset(location, asset_filename)
@staticmethod
def get_asset(location, filename):
"""
Return asset by location and filename.
"""
return contentstore().find(Transcript.asset_location(location, filename))
@staticmethod
def asset_location(location, filename):
"""
Return asset location. `location` is module location.
"""
return StaticContent.compute_location(location.course_key, filename)
@staticmethod
def delete_asset(location, filename):
"""
Delete asset by location and filename.
"""
try:
contentstore().delete(Transcript.asset_location(location, filename))
log.info("Transcript asset %s was removed from store.", filename)
except NotFoundError:
pass
return StaticContent.compute_location(location.course_key, filename)
class VideoTranscriptsMixin(object):
"""Mixin class for transcript functionality.
This is necessary for both VideoModule and VideoDescriptor.
"""
def available_translations(self, transcripts, verify_assets=True):
"""Return a list of language codes for which we have transcripts.
Args:
verify_assets (boolean): If True, checks to ensure that the transcripts
really exist in the contentstore. If False, we just look at the
VideoDescriptor fields and do not query the contentstore. One reason
we might do this is to avoid slamming contentstore() with queries
when trying to make a listing of videos and their languages.
Defaults to True.
transcripts (dict): A dict with all transcripts and a sub.
Defaults to False
"""
translations = []
sub, other_lang = transcripts["sub"], transcripts["transcripts"]
# If we're not verifying the assets, we just trust our field values
if not verify_assets:
translations = list(other_lang)
if not translations or sub:
translations += ['en']
return set(translations)
# If we've gotten this far, we're going to verify that the transcripts
# being referenced are actually in the contentstore.
if sub: # check if sjson exists for 'en'.
try:
Transcript.asset(self.location, sub, 'en')
except NotFoundError:
try:
Transcript.asset(self.location, None, None, sub)
except NotFoundError:
pass
else:
translations = ['en']
else:
translations = ['en']
for lang in other_lang:
try:
Transcript.asset(self.location, None, None, other_lang[lang])
except NotFoundError:
continue
translations.append(lang)
return translations
def get_transcript(self, transcripts, transcript_format='srt', lang=None):
"""
Returns transcript, filename and MIME type.
transcripts (dict): A dict with all transcripts and a sub.
Raises:
- NotFoundError if cannot find transcript file in storage.
- ValueError if transcript file is empty or incorrect JSON.
- KeyError if transcript file has incorrect format.
If language is 'en', self.sub should be correct subtitles name.
If language is 'en', but if self.sub is not defined, this means that we
should search for video name in order to get proper transcript (old style courses).
If language is not 'en', give back transcript in proper language and format.
"""
if not lang:
lang = self.get_default_transcript_language(transcripts)
sub, other_lang = transcripts["sub"], transcripts["transcripts"]
if lang == 'en':
if sub: # HTML5 case and (Youtube case for new style videos)
transcript_name = sub
elif self.youtube_id_1_0: # old courses
transcript_name = self.youtube_id_1_0
else:
log.debug("No subtitles for 'en' language")
raise ValueError
data = Transcript.asset(self.location, transcript_name, lang).data
filename = u'{}.{}'.format(transcript_name, transcript_format)
content = Transcript.convert(data, 'sjson', transcript_format)
else:
data = Transcript.asset(self.location, None, None, other_lang[lang]).data
filename = u'{}.{}'.format(os.path.splitext(other_lang[lang])[0], transcript_format)
content = Transcript.convert(data, 'srt', transcript_format)
if not content:
log.debug('no subtitles produced in get_transcript')
raise ValueError
return content, filename, Transcript.mime_types[transcript_format]
def get_default_transcript_language(self, transcripts):
"""
Returns the default transcript language for this video module.
Args:
transcripts (dict): A dict with all transcripts and a sub.
"""
sub, other_lang = transcripts["sub"], transcripts["transcripts"]
if self.transcript_language in other_lang:
transcript_language = self.transcript_language
elif sub:
transcript_language = u'en'
elif len(other_lang) > 0:
transcript_language = sorted(other_lang)[0]
else:
transcript_language = u'en'
return transcript_language
def get_transcripts_info(self, is_bumper=False):
"""
Returns a transcript dictionary for the video.
"""
if is_bumper:
transcripts = copy.deepcopy(get_bumper_settings(self).get('transcripts', {}))
return {
"sub": transcripts.pop("en", ""),
"transcripts": transcripts,
}
else:
return {
"sub": self.sub,
"transcripts": self.transcripts,
} | unknown | codeparrot/codeparrot-clean | ||
""" test feather-format compat """
import pytest
feather = pytest.importorskip('feather')
import numpy as np
import pandas as pd
from pandas.io.feather_format import to_feather, read_feather
from feather import FeatherError
from pandas.util.testing import assert_frame_equal, ensure_clean
import pandas.util.testing as tm
from distutils.version import LooseVersion
fv = LooseVersion(feather.__version__)
@pytest.mark.single
class TestFeather(object):
def check_error_on_write(self, df, exc):
# check that we are raising the exception
# on writing
with pytest.raises(exc):
with ensure_clean() as path:
to_feather(df, path)
def check_round_trip(self, df, **kwargs):
with ensure_clean() as path:
to_feather(df, path)
result = read_feather(path, **kwargs)
assert_frame_equal(result, df)
def test_error(self):
for obj in [pd.Series([1, 2, 3]), 1, 'foo', pd.Timestamp('20130101'),
np.array([1, 2, 3])]:
self.check_error_on_write(obj, ValueError)
def test_basic(self):
df = pd.DataFrame({'string': list('abc'),
'int': list(range(1, 4)),
'uint': np.arange(3, 6).astype('u1'),
'float': np.arange(4.0, 7.0, dtype='float64'),
'float_with_null': [1., np.nan, 3],
'bool': [True, False, True],
'bool_with_null': [True, np.nan, False],
'cat': pd.Categorical(list('abc')),
'dt': pd.date_range('20130101', periods=3),
'dttz': pd.date_range('20130101', periods=3,
tz='US/Eastern'),
'dt_with_null': [pd.Timestamp('20130101'), pd.NaT,
pd.Timestamp('20130103')],
'dtns': pd.date_range('20130101', periods=3,
freq='ns')})
assert df.dttz.dtype.tz.zone == 'US/Eastern'
self.check_round_trip(df)
@pytest.mark.skipif(fv >= '0.4.0', reason='fixed in 0.4.0')
def test_strided_data_issues(self):
# strided data issuehttps://github.com/wesm/feather/issues/97
df = pd.DataFrame(np.arange(12).reshape(4, 3), columns=list('abc'))
self.check_error_on_write(df, FeatherError)
def test_duplicate_columns(self):
# https://github.com/wesm/feather/issues/53
# not currently able to handle duplicate columns
df = pd.DataFrame(np.arange(12).reshape(4, 3),
columns=list('aaa')).copy()
self.check_error_on_write(df, ValueError)
def test_stringify_columns(self):
df = pd.DataFrame(np.arange(12).reshape(4, 3)).copy()
self.check_error_on_write(df, ValueError)
@pytest.mark.skipif(fv >= '0.4.0', reason='fixed in 0.4.0')
def test_unsupported(self):
# timedelta
df = pd.DataFrame({'a': pd.timedelta_range('1 day', periods=3)})
self.check_error_on_write(df, FeatherError)
# non-strings
df = pd.DataFrame({'a': ['a', 1, 2.0]})
self.check_error_on_write(df, ValueError)
def test_unsupported_other(self):
# period
df = pd.DataFrame({'a': pd.period_range('2013', freq='M', periods=3)})
self.check_error_on_write(df, ValueError)
@pytest.mark.skipif(fv < '0.4.0', reason='new in 0.4.0')
def test_rw_nthreads(self):
df = pd.DataFrame({'A': np.arange(100000)})
self.check_round_trip(df, nthreads=2)
def test_write_with_index(self):
df = pd.DataFrame({'A': [1, 2, 3]})
self.check_round_trip(df)
# non-default index
for index in [[2, 3, 4],
pd.date_range('20130101', periods=3),
list('abc'),
[1, 3, 4],
pd.MultiIndex.from_tuples([('a', 1), ('a', 2),
('b', 1)]),
]:
df.index = index
self.check_error_on_write(df, ValueError)
# index with meta-data
df.index = [0, 1, 2]
df.index.name = 'foo'
self.check_error_on_write(df, ValueError)
# column multi-index
df.index = [0, 1, 2]
df.columns = pd.MultiIndex.from_tuples([('a', 1), ('a', 2), ('b', 1)]),
self.check_error_on_write(df, ValueError)
def test_path_pathlib(self):
df = tm.makeDataFrame().reset_index()
result = tm.round_trip_pathlib(df.to_feather, pd.read_feather)
tm.assert_frame_equal(df, result)
def test_path_localpath(self):
df = tm.makeDataFrame().reset_index()
result = tm.round_trip_localpath(df.to_feather, pd.read_feather)
tm.assert_frame_equal(df, result) | unknown | codeparrot/codeparrot-clean | ||
#!/usr/bin/env python
# (C) Copyright IBM Corporation 2004, 2005
# All Rights Reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# on the rights to use, copy, modify, merge, publish, distribute, sub
# license, and/or sell copies of the Software, and to permit persons to whom
# the Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice (including the next
# paragraph) shall be included in all copies or substantial portions of the
# Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
# IBM AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
# Authors:
# Ian Romanick <idr@us.ibm.com>
import license
import gl_XML, glX_XML
import sys, getopt
class PrintGenericStubs(gl_XML.gl_print_base):
def __init__(self):
gl_XML.gl_print_base.__init__(self)
self.name = "gl_x86_asm.py (from Mesa)"
self.license = license.bsd_license_template % ( \
"""Copyright (C) 1999-2001 Brian Paul All Rights Reserved.
(C) Copyright IBM Corporation 2004, 2005""", "BRIAN PAUL, IBM")
return
def get_stack_size(self, f):
size = 0
for p in f.parameterIterator():
if p.is_padding:
continue
size += p.get_stack_size()
return size
def printRealHeader(self):
print '#include "x86/assyntax.h"'
print '#include "glapi/glapioffsets.h"'
print ''
print '#if defined(STDCALL_API)'
print '# if defined(USE_MGL_NAMESPACE)'
print '# define GL_PREFIX(n,n2) GLNAME(CONCAT(mgl,n2))'
print '# else'
print '# define GL_PREFIX(n,n2) GLNAME(CONCAT(gl,n2))'
print '# endif'
print '#else'
print '# if defined(USE_MGL_NAMESPACE)'
print '# define GL_PREFIX(n,n2) GLNAME(CONCAT(mgl,n))'
print '# define _glapi_Dispatch _mglapi_Dispatch'
print '# else'
print '# define GL_PREFIX(n,n2) GLNAME(CONCAT(gl,n))'
print '# endif'
print '#endif'
print ''
print '#define GL_OFFSET(x) CODEPTR(REGOFF(4 * x, EAX))'
print ''
print '#if defined(GNU_ASSEMBLER) && !defined(__DJGPP__) && !defined(__MINGW32__) && !defined(__APPLE__)'
print '#define GLOBL_FN(x) GLOBL x ; .type x, function'
print '#else'
print '#define GLOBL_FN(x) GLOBL x'
print '#endif'
print ''
print '#if defined(PTHREADS) || defined(WIN32_THREADS) || defined(BEOS_THREADS)'
print '# define THREADS'
print '#endif'
print ''
print '#ifdef GLX_USE_TLS'
print ''
print '#ifdef GLX_X86_READONLY_TEXT'
print '# define CTX_INSNS MOV_L(GS:(EAX), EAX)'
print '#else'
print '# define CTX_INSNS NOP /* Pad for init_glapi_relocs() */'
print '#endif'
print ''
print '# define GL_STUB(fn,off,fn_alt)\t\t\t\\'
print 'ALIGNTEXT16;\t\t\t\t\t\t\\'
print 'GLOBL_FN(GL_PREFIX(fn, fn_alt));\t\t\t\\'
print 'GL_PREFIX(fn, fn_alt):\t\t\t\t\t\\'
print '\tCALL(_x86_get_dispatch) ;\t\t\t\\'
print '\tCTX_INSNS ; \\'
print '\tJMP(GL_OFFSET(off))'
print ''
print '#elif defined(PTHREADS)'
print '# define GL_STUB(fn,off,fn_alt)\t\t\t\\'
print 'ALIGNTEXT16;\t\t\t\t\t\t\\'
print 'GLOBL_FN(GL_PREFIX(fn, fn_alt));\t\t\t\\'
print 'GL_PREFIX(fn, fn_alt):\t\t\t\t\t\\'
print '\tMOV_L(CONTENT(GLNAME(_glapi_Dispatch)), EAX) ;\t\\'
print '\tTEST_L(EAX, EAX) ;\t\t\t\t\\'
print '\tJE(1f) ;\t\t\t\t\t\\'
print '\tJMP(GL_OFFSET(off)) ;\t\t\t\t\\'
print '1:\tCALL(_x86_get_dispatch) ;\t\t\t\\'
print '\tJMP(GL_OFFSET(off))'
print '#elif defined(THREADS)'
print '# define GL_STUB(fn,off,fn_alt)\t\t\t\\'
print 'ALIGNTEXT16;\t\t\t\t\t\t\\'
print 'GLOBL_FN(GL_PREFIX(fn, fn_alt));\t\t\t\\'
print 'GL_PREFIX(fn, fn_alt):\t\t\t\t\t\\'
print '\tMOV_L(CONTENT(GLNAME(_glapi_Dispatch)), EAX) ;\t\\'
print '\tTEST_L(EAX, EAX) ;\t\t\t\t\\'
print '\tJE(1f) ;\t\t\t\t\t\\'
print '\tJMP(GL_OFFSET(off)) ;\t\t\t\t\\'
print '1:\tCALL(_glapi_get_dispatch) ;\t\t\t\\'
print '\tJMP(GL_OFFSET(off))'
print '#else /* Non-threaded version. */'
print '# define GL_STUB(fn,off,fn_alt)\t\t\t\\'
print 'ALIGNTEXT16;\t\t\t\t\t\t\\'
print 'GLOBL_FN(GL_PREFIX(fn, fn_alt));\t\t\t\\'
print 'GL_PREFIX(fn, fn_alt):\t\t\t\t\t\\'
print '\tMOV_L(CONTENT(GLNAME(_glapi_Dispatch)), EAX) ;\t\\'
print '\tJMP(GL_OFFSET(off))'
print '#endif'
print ''
print '#ifdef HAVE_ALIAS'
print '# define GL_STUB_ALIAS(fn,off,fn_alt,alias,alias_alt)\t\\'
print '\t.globl\tGL_PREFIX(fn, fn_alt) ;\t\t\t\\'
print '\t.set\tGL_PREFIX(fn, fn_alt), GL_PREFIX(alias, alias_alt)'
print '#else'
print '# define GL_STUB_ALIAS(fn,off,fn_alt,alias,alias_alt)\t\\'
print ' GL_STUB(fn, off, fn_alt)'
print '#endif'
print ''
print 'SEG_TEXT'
print ''
print '#ifdef GLX_USE_TLS'
print ''
print '\tGLOBL\tGLNAME(_x86_get_dispatch)'
print '\tHIDDEN(GLNAME(_x86_get_dispatch))'
print 'ALIGNTEXT16'
print 'GLNAME(_x86_get_dispatch):'
print '\tcall 1f'
print '1:\tpopl %eax'
print '\taddl $_GLOBAL_OFFSET_TABLE_+[.-1b], %eax'
print '\tmovl _glapi_tls_Dispatch@GOTNTPOFF(%eax), %eax'
print '\tret'
print ''
print '#elif defined(PTHREADS)'
print 'EXTERN GLNAME(_glapi_Dispatch)'
print 'EXTERN GLNAME(_gl_DispatchTSD)'
print 'EXTERN GLNAME(pthread_getspecific)'
print ''
print 'ALIGNTEXT16'
print 'GLNAME(_x86_get_dispatch):'
print '\tSUB_L(CONST(24), ESP)'
print '\tPUSH_L(GLNAME(_gl_DispatchTSD))'
print '\tCALL(GLNAME(pthread_getspecific))'
print '\tADD_L(CONST(28), ESP)'
print '\tRET'
print '#elif defined(THREADS)'
print 'EXTERN GLNAME(_glapi_get_dispatch)'
print '#endif'
print ''
print '#if defined( GLX_USE_TLS ) && !defined( GLX_X86_READONLY_TEXT )'
print '\t\t.section\twtext, "awx", @progbits'
print '#endif /* defined( GLX_USE_TLS ) */'
print ''
print '\t\tALIGNTEXT16'
print '\t\tGLOBL GLNAME(gl_dispatch_functions_start)'
print '\t\tHIDDEN(GLNAME(gl_dispatch_functions_start))'
print 'GLNAME(gl_dispatch_functions_start):'
print ''
return
def printRealFooter(self):
print ''
print '\t\tGLOBL\tGLNAME(gl_dispatch_functions_end)'
print '\t\tHIDDEN(GLNAME(gl_dispatch_functions_end))'
print '\t\tALIGNTEXT16'
print 'GLNAME(gl_dispatch_functions_end):'
print ''
print '#if defined(GLX_USE_TLS) && defined(__linux__)'
print ' .section ".note.ABI-tag", "a"'
print ' .p2align 2'
print ' .long 1f - 0f /* name length */'
print ' .long 3f - 2f /* data length */'
print ' .long 1 /* note length */'
print '0: .asciz "GNU" /* vendor name */'
print '1: .p2align 2'
print '2: .long 0 /* note data: the ABI tag */'
print ' .long 2,4,20 /* Minimum kernel version w/TLS */'
print '3: .p2align 2 /* pad out section */'
print '#endif /* GLX_USE_TLS */'
print ''
print '#if defined (__ELF__) && defined (__linux__)'
print ' .section .note.GNU-stack,"",%progbits'
print '#endif'
return
def printBody(self, api):
for f in api.functionIterateByOffset():
name = f.dispatch_name()
stack = self.get_stack_size(f)
alt = "%s@%u" % (name, stack)
print '\tGL_STUB(%s, _gloffset_%s, %s)' % (name, f.name, alt)
if not f.is_static_entry_point(f.name):
print '\tHIDDEN(GL_PREFIX(%s, %s))' % (name, alt)
for f in api.functionIterateByOffset():
name = f.dispatch_name()
stack = self.get_stack_size(f)
alt = "%s@%u" % (name, stack)
for n in f.entry_points:
if f.is_static_entry_point(n):
if n != f.name:
alt2 = "%s@%u" % (n, stack)
text = '\tGL_STUB_ALIAS(%s, _gloffset_%s, %s, %s, %s)' % (n, f.name, alt2, name, alt)
if f.has_different_protocol(n):
print '#ifndef GLX_INDIRECT_RENDERING'
print text
print '#endif'
else:
print text
return
def show_usage():
print "Usage: %s [-f input_file_name] [-m output_mode]" % sys.argv[0]
sys.exit(1)
if __name__ == '__main__':
file_name = "gl_API.xml"
mode = "generic"
try:
(args, trail) = getopt.getopt(sys.argv[1:], "m:f:")
except Exception,e:
show_usage()
for (arg,val) in args:
if arg == '-m':
mode = val
elif arg == "-f":
file_name = val
if mode == "generic":
printer = PrintGenericStubs()
else:
print "ERROR: Invalid mode \"%s\" specified." % mode
show_usage()
api = gl_XML.parse_GL_API(file_name, glX_XML.glx_item_factory())
printer.Print(api) | unknown | codeparrot/codeparrot-clean | ||
// RUN: %check_clang_tidy %s bugprone-signal-handler %t \
// RUN: -config='{CheckOptions: \
// RUN: {bugprone-signal-handler.AsyncSafeFunctionSet: "minimal"}}' \
// RUN: -- -isystem %clang_tidy_headers
#include "signal.h"
#include "stdlib.h"
#include "string.h"
#include "unistd.h"
void handler_bad1(int) {
_exit(0);
// CHECK-MESSAGES: :[[@LINE-1]]:3: warning: standard function '_exit' may not be asynchronous-safe; calling it from a signal handler may be dangerous [bugprone-signal-handler]
}
void handler_bad2(int) {
void *dst;
const void *src;
memcpy(dst, src, 10);
// CHECK-MESSAGES: :[[@LINE-1]]:3: warning: standard function 'memcpy' may not be asynchronous-safe; calling it from a signal handler may be dangerous [bugprone-signal-handler]
}
void handler_good(int) {
abort();
_Exit(0);
quick_exit(0);
signal(0, SIG_DFL);
}
void test(void) {
signal(SIGINT, handler_bad1);
signal(SIGINT, handler_bad2);
signal(SIGINT, handler_good);
} | c | github | https://github.com/llvm/llvm-project | clang-tools-extra/test/clang-tidy/checkers/bugprone/signal-handler-minimal.c |
/* Copyright 2015 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#ifndef TENSORFLOW_CORE_FRAMEWORK_TYPE_TRAITS_H_
#define TENSORFLOW_CORE_FRAMEWORK_TYPE_TRAITS_H_
#include <limits>
#include <utility>
#include "xla/tsl/framework/type_traits.h"
#include "tensorflow/core/framework/numeric_types.h"
#include "tensorflow/core/platform/types.h"
namespace tensorflow {
// NOLINTBEGIN(misc-unused-using-decls)
using tsl::false_type;
using tsl::is_complex;
using tsl::is_quantized;
using tsl::is_simple_type;
using tsl::true_type;
// NOLINTEND(misc-unused-using-decls)
} // namespace tensorflow
#endif // TENSORFLOW_CORE_FRAMEWORK_TYPE_TRAITS_H_ | c | github | https://github.com/tensorflow/tensorflow | tensorflow/core/framework/type_traits.h |
###############################################################################
#
# Oak game engine
# Copyright (c) 2013 Remi Papillie
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
#
###############################################################################
#! /usr/bin/env python
# encoding: utf-8
import os,sys
from waflib import Configure,Options,Utils
from waflib.Tools import ccroot
from waflib.Configure import conf
@conf
def find_emxx(conf):
cxx=conf.find_program(['em++'], var = "CXX")
cxx=conf.cmd_to_list(cxx)
conf.env.CXX_NAME='emcc'
conf.env.CXX=cxx
@conf
def emxx_common_flags(conf):
v=conf.env
v['CXX_SRC_F']=[]
v['CXX_TGT_F']=['-c','-o']
if not v['LINK_CXX']:v['LINK_CXX']=v['CXX']
v['CXXLNK_SRC_F']=[]
v['CXXLNK_TGT_F']=['-o']
v['CPPPATH_ST']='-I%s'
v['DEFINES_ST']='-D%s'
v['LIB_ST']='-l%s'
v['LIBPATH_ST']='-L%s'
# v['STLIB_ST']='lib%s.a'
v['STLIB_ST']='-l%s'
v['STLIBPATH_ST']='-L%s'
v['RPATH_ST']='-Wl,-rpath,%s'
v['SONAME_ST']='-Wl,-h,%s'
v['SHLIB_MARKER']='-Wl,-Bdynamic'
v['STLIB_MARKER']='-Wl,-Bstatic'
v['cxxprogram_PATTERN']='%s'
v['CXXFLAGS_cxxshlib']=['-fPIC']
v['LINKFLAGS_cxxshlib']=['-shared']
v['cxxshlib_PATTERN']='lib%s.js'
v['LINKFLAGS_cxxstlib']=['-Wl,-Bstatic']
v['cxxstlib_PATTERN']='lib%s.a'
v['LINKFLAGS_MACBUNDLE']=['-bundle','-undefined','dynamic_lookup']
v['CXXFLAGS_MACBUNDLE']=['-fPIC']
v['macbundle_PATTERN']='%s.bundle'
@conf
def emxx_modifier_browser(conf):
v=conf.env
v['cxxprogram_PATTERN']='%s.html'
v['cxxshlib_PATTERN']='%s.js'
v['implib_PATTERN']='lib%s.js.a'
v['IMPLIB_ST']='-Wl,--out-implib,%s'
v['CXXFLAGS_cxxshlib']=[]
v.append_value('LINKFLAGS',['-Wl,--enable-auto-import'])
@conf
def emxx_modifier_platform(conf):
emxx_modifier_func=getattr(conf,'emxx_modifier_'+conf.env.TARGET_OS,None)
if emxx_modifier_func:
emxx_modifier_func()
def configure(conf):
conf.find_emxx()
conf.load('emcc', tooldir="waf-tools")
conf.load('emar', tooldir="waf-tools")
conf.emxx_common_flags()
conf.emxx_modifier_platform()
conf.cxx_load_tools()
conf.cxx_add_flags()
conf.link_add_flags() | unknown | codeparrot/codeparrot-clean | ||
from django.conf import settings
from django.contrib.gis.gdal import OGRException
from django.contrib.gis.geos import GEOSGeometry, GEOSException
from django.forms.widgets import Textarea
from django.template import loader, Context
from django.utils import translation
# Creating a template context that contains Django settings
# values needed by admin map templates.
geo_context = Context({'ADMIN_MEDIA_PREFIX' : settings.ADMIN_MEDIA_PREFIX,
'LANGUAGE_BIDI' : translation.get_language_bidi(),
})
class OpenLayersWidget(Textarea):
"""
Renders an OpenLayers map using the WKT of the geometry.
"""
def render(self, name, value, attrs=None):
# Update the template parameters with any attributes passed in.
if attrs: self.params.update(attrs)
# Defaulting the WKT value to a blank string -- this
# will be tested in the JavaScript and the appropriate
# interface will be constructed.
self.params['wkt'] = ''
# If a string reaches here (via a validation error on another
# field) then just reconstruct the Geometry.
if isinstance(value, basestring):
try:
value = GEOSGeometry(value)
except (GEOSException, ValueError):
value = None
if value and value.geom_type.upper() != self.geom_type:
value = None
# Constructing the dictionary of the map options.
self.params['map_options'] = self.map_options()
# Constructing the JavaScript module name using the name of
# the GeometryField (passed in via the `attrs` keyword).
# Use the 'name' attr for the field name (rather than 'field')
self.params['name'] = name
# note: we must switch out dashes for underscores since js
# functions are created using the module variable
js_safe_name = self.params['name'].replace('-','_')
self.params['module'] = 'geodjango_%s' % js_safe_name
if value:
# Transforming the geometry to the projection used on the
# OpenLayers map.
srid = self.params['srid']
if value.srid != srid:
try:
ogr = value.ogr
ogr.transform(srid)
wkt = ogr.wkt
except OGRException:
wkt = ''
else:
wkt = value.wkt
# Setting the parameter WKT with that of the transformed
# geometry.
self.params['wkt'] = wkt
return loader.render_to_string(self.template, self.params,
context_instance=geo_context)
def map_options(self):
"Builds the map options hash for the OpenLayers template."
# JavaScript construction utilities for the Bounds and Projection.
def ol_bounds(extent):
return 'new OpenLayers.Bounds(%s)' % str(extent)
def ol_projection(srid):
return 'new OpenLayers.Projection("EPSG:%s")' % srid
# An array of the parameter name, the name of their OpenLayers
# counterpart, and the type of variable they are.
map_types = [('srid', 'projection', 'srid'),
('display_srid', 'displayProjection', 'srid'),
('units', 'units', str),
('max_resolution', 'maxResolution', float),
('max_extent', 'maxExtent', 'bounds'),
('num_zoom', 'numZoomLevels', int),
('max_zoom', 'maxZoomLevels', int),
('min_zoom', 'minZoomLevel', int),
]
# Building the map options hash.
map_options = {}
for param_name, js_name, option_type in map_types:
if self.params.get(param_name, False):
if option_type == 'srid':
value = ol_projection(self.params[param_name])
elif option_type == 'bounds':
value = ol_bounds(self.params[param_name])
elif option_type in (float, int):
value = self.params[param_name]
elif option_type in (str,):
value = '"%s"' % self.params[param_name]
else:
raise TypeError
map_options[js_name] = value
return map_options | unknown | codeparrot/codeparrot-clean | ||
import { test } from '../../assert';
const tick = () => Promise.resolve();
export default test({
async test({ assert, target }) {
target.innerHTML = '<custom-element></custom-element>';
await tick();
const el = target.querySelector('custom-element');
assert.equal(el.shadowRoot, null);
}
}); | javascript | github | https://github.com/sveltejs/svelte | packages/svelte/tests/runtime-browser/custom-elements-samples/closed-shadow-dom/_config.js |
from __future__ import with_statement
from setuptools import setup
def get_version():
with open('itunesiap/version.txt') as f:
return f.read().strip()
def get_readme():
try:
with open('README.rst') as f:
return f.read().strip()
except IOError:
return ''
setup(
name='tsl-itunes-iap',
version=get_version(),
description='Itunes In-app purchase verification api.',
long_description=get_readme(),
author='Ryan Pineo',
author_email='ry@tsl.io',
url='https://github.com/silverlogic/itunes-iap',
packages=(
'itunesiap',
),
package_data={
'itunesiap': ['version.txt']
},
install_requires=[
'requests', 'tsl-prettyexc>=0.5.2', 'six'
],
classifiers=[
'Intended Audience :: Developers',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
],
) | unknown | codeparrot/codeparrot-clean | ||
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import time
from appengine_wrappers import taskqueue
from cron_servlet import CronServlet
from instance_servlet import InstanceServlet
from patch_servlet import PatchServlet
from refresh_servlet import RefreshServlet
from servlet import Servlet, Request, Response
from test_servlet import TestServlet
_DEFAULT_SERVLET = InstanceServlet.GetConstructor()
_FORCE_CRON_TARGET = 'force_cron'
_SERVLETS = {
'cron': CronServlet,
'patch': PatchServlet,
'refresh': RefreshServlet,
'test': TestServlet,
}
class Handler(Servlet):
def Get(self):
path = self._request.path
if path.startswith('_'):
servlet_path = path[1:]
if not '/' in servlet_path:
servlet_path += '/'
servlet_name, servlet_path = servlet_path.split('/', 1)
if servlet_name == _FORCE_CRON_TARGET:
queue = taskqueue.Queue()
queue.purge()
time.sleep(2)
queue.add(taskqueue.Task(url='/_cron'))
return Response.Ok('Cron job started.')
if servlet_name == 'enqueue':
queue = taskqueue.Queue()
queue.add(taskqueue.Task(url='/%s'%servlet_path))
return Response.Ok('Task enqueued.')
servlet = _SERVLETS.get(servlet_name)
if servlet is None:
return Response.NotFound('"%s" servlet not found' % servlet_path)
else:
servlet_path = path
servlet = _DEFAULT_SERVLET
return servlet(Request(servlet_path,
self._request.host,
self._request.headers,
self._request.arguments)).Get() | unknown | codeparrot/codeparrot-clean | ||
/* Copyright 2015 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#ifndef TENSORFLOW_CORE_LIB_GTL_INT_TYPE_H_
#define TENSORFLOW_CORE_LIB_GTL_INT_TYPE_H_
#include "xla/tsl/lib/gtl/int_type.h"
namespace tensorflow {
namespace gtl {
using ::tsl::gtl::IntType; // NOLINT(misc-unused-using-decls)
} // namespace gtl
} // namespace tensorflow
#endif // TENSORFLOW_CORE_LIB_GTL_INT_TYPE_H_ | c | github | https://github.com/tensorflow/tensorflow | tensorflow/core/lib/gtl/int_type.h |
from __future__ import unicode_literals
import warnings
from bisect import bisect
from collections import OrderedDict, defaultdict
from itertools import chain
from django.apps import apps
from django.conf import settings
from django.core.exceptions import FieldDoesNotExist
from django.db.models.fields import AutoField
from django.db.models.fields.proxy import OrderWrt
from django.db.models.fields.related import ManyToManyField
from django.utils import six
from django.utils.datastructures import ImmutableList, OrderedSet
from django.utils.deprecation import RemovedInDjango20Warning
from django.utils.encoding import (
force_text, python_2_unicode_compatible, smart_text,
)
from django.utils.functional import cached_property
from django.utils.lru_cache import lru_cache
from django.utils.text import camel_case_to_spaces
from django.utils.translation import override, string_concat
PROXY_PARENTS = object()
EMPTY_RELATION_TREE = tuple()
IMMUTABLE_WARNING = (
"The return type of '%s' should never be mutated. If you want to manipulate this list "
"for your own use, make a copy first."
)
DEFAULT_NAMES = ('verbose_name', 'verbose_name_plural', 'db_table', 'ordering',
'unique_together', 'permissions', 'get_latest_by',
'order_with_respect_to', 'app_label', 'db_tablespace',
'abstract', 'managed', 'proxy', 'swappable', 'auto_created',
'index_together', 'apps', 'default_permissions',
'select_on_save', 'default_related_name')
class raise_deprecation(object):
def __init__(self, suggested_alternative):
self.suggested_alternative = suggested_alternative
def __call__(self, fn):
def wrapper(*args, **kwargs):
warnings.warn(
"'%s is an unofficial API that has been deprecated. "
"You may be able to replace it with '%s'" % (
fn.__name__,
self.suggested_alternative,
),
RemovedInDjango20Warning, stacklevel=2
)
return fn(*args, **kwargs)
return wrapper
def normalize_together(option_together):
"""
option_together can be either a tuple of tuples, or a single
tuple of two strings. Normalize it to a tuple of tuples, so that
calling code can uniformly expect that.
"""
try:
if not option_together:
return ()
if not isinstance(option_together, (tuple, list)):
raise TypeError
first_element = next(iter(option_together))
if not isinstance(first_element, (tuple, list)):
option_together = (option_together,)
# Normalize everything to tuples
return tuple(tuple(ot) for ot in option_together)
except TypeError:
# If the value of option_together isn't valid, return it
# verbatim; this will be picked up by the check framework later.
return option_together
def make_immutable_fields_list(name, data):
return ImmutableList(data, warning=IMMUTABLE_WARNING % name)
@python_2_unicode_compatible
class Options(object):
FORWARD_PROPERTIES = ('fields', 'many_to_many', 'concrete_fields',
'local_concrete_fields', '_forward_fields_map')
REVERSE_PROPERTIES = ('related_objects', 'fields_map', '_relation_tree')
def __init__(self, meta, app_label=None):
self._get_fields_cache = {}
self.proxied_children = []
self.local_fields = []
self.local_many_to_many = []
self.virtual_fields = []
self.model_name = None
self.verbose_name = None
self.verbose_name_plural = None
self.db_table = ''
self.ordering = []
self._ordering_clash = False
self.unique_together = []
self.index_together = []
self.select_on_save = False
self.default_permissions = ('add', 'change', 'delete')
self.permissions = []
self.object_name = None
self.app_label = app_label
self.get_latest_by = None
self.order_with_respect_to = None
self.db_tablespace = settings.DEFAULT_TABLESPACE
self.meta = meta
self.pk = None
self.has_auto_field = False
self.auto_field = None
self.abstract = False
self.managed = True
self.proxy = False
# For any class that is a proxy (including automatically created
# classes for deferred object loading), proxy_for_model tells us
# which class this model is proxying. Note that proxy_for_model
# can create a chain of proxy models. For non-proxy models, the
# variable is always None.
self.proxy_for_model = None
# For any non-abstract class, the concrete class is the model
# in the end of the proxy_for_model chain. In particular, for
# concrete models, the concrete_model is always the class itself.
self.concrete_model = None
self.swappable = None
self.parents = OrderedDict()
self.auto_created = False
# To handle various inheritance situations, we need to track where
# managers came from (concrete or abstract base classes). `managers`
# keeps a list of 3-tuples of the form:
# (creation_counter, instance, abstract(=True))
self.managers = []
# List of all lookups defined in ForeignKey 'limit_choices_to' options
# from *other* models. Needed for some admin checks. Internal use only.
self.related_fkey_lookups = []
# A custom app registry to use, if you're making a separate model set.
self.apps = apps
self.default_related_name = None
@lru_cache(maxsize=None)
def _map_model(self, link):
# This helper function is used to allow backwards compatibility with
# the previous API. No future methods should use this function.
# It maps a field to (field, model or related_model,) depending on the
# field type.
model = link.model._meta.concrete_model
if model is self.model:
model = None
return link, model
@lru_cache(maxsize=None)
def _map_model_details(self, link):
# This helper function is used to allow backwards compatibility with
# the previous API. No future methods should use this function.
# This function maps a field to a tuple of:
# (field, model or related_model, direct, is_m2m) depending on the
# field type.
direct = not link.auto_created or link.concrete
model = link.model._meta.concrete_model
if model is self.model:
model = None
m2m = link.is_relation and link.many_to_many
return link, model, direct, m2m
@property
def app_config(self):
# Don't go through get_app_config to avoid triggering imports.
return self.apps.app_configs.get(self.app_label)
@property
def installed(self):
return self.app_config is not None
@property
def abstract_managers(self):
return [
(counter, instance.name, instance) for counter, instance, abstract
in self.managers if abstract
]
@property
def concrete_managers(self):
return [
(counter, instance.name, instance) for counter, instance, abstract
in self.managers if not abstract
]
def contribute_to_class(self, cls, name):
from django.db import connection
from django.db.backends.utils import truncate_name
cls._meta = self
self.model = cls
# First, construct the default values for these options.
self.object_name = cls.__name__
self.model_name = self.object_name.lower()
self.verbose_name = camel_case_to_spaces(self.object_name)
# Store the original user-defined values for each option,
# for use when serializing the model definition
self.original_attrs = {}
# Next, apply any overridden values from 'class Meta'.
if self.meta:
meta_attrs = self.meta.__dict__.copy()
for name in self.meta.__dict__:
# Ignore any private attributes that Django doesn't care about.
# NOTE: We can't modify a dictionary's contents while looping
# over it, so we loop over the *original* dictionary instead.
if name.startswith('_'):
del meta_attrs[name]
for attr_name in DEFAULT_NAMES:
if attr_name in meta_attrs:
setattr(self, attr_name, meta_attrs.pop(attr_name))
self.original_attrs[attr_name] = getattr(self, attr_name)
elif hasattr(self.meta, attr_name):
setattr(self, attr_name, getattr(self.meta, attr_name))
self.original_attrs[attr_name] = getattr(self, attr_name)
self.unique_together = normalize_together(self.unique_together)
self.index_together = normalize_together(self.index_together)
# verbose_name_plural is a special case because it uses a 's'
# by default.
if self.verbose_name_plural is None:
self.verbose_name_plural = string_concat(self.verbose_name, 's')
# order_with_respect_and ordering are mutually exclusive.
self._ordering_clash = bool(self.ordering and self.order_with_respect_to)
# Any leftover attributes must be invalid.
if meta_attrs != {}:
raise TypeError("'class Meta' got invalid attribute(s): %s" % ','.join(meta_attrs.keys()))
else:
self.verbose_name_plural = string_concat(self.verbose_name, 's')
del self.meta
# If the db_table wasn't provided, use the app_label + model_name.
if not self.db_table:
self.db_table = "%s_%s" % (self.app_label, self.model_name)
self.db_table = truncate_name(self.db_table, connection.ops.max_name_length())
def _prepare(self, model):
if self.order_with_respect_to:
# The app registry will not be ready at this point, so we cannot
# use get_field().
query = self.order_with_respect_to
try:
self.order_with_respect_to = next(
f for f in self._get_fields(reverse=False)
if f.name == query or f.attname == query
)
except StopIteration:
raise FieldDoesNotExist('%s has no field named %r' % (self.object_name, query))
self.ordering = ('_order',)
if not any(isinstance(field, OrderWrt) for field in model._meta.local_fields):
model.add_to_class('_order', OrderWrt())
else:
self.order_with_respect_to = None
if self.pk is None:
if self.parents:
# Promote the first parent link in lieu of adding yet another
# field.
field = next(six.itervalues(self.parents))
# Look for a local field with the same name as the
# first parent link. If a local field has already been
# created, use it instead of promoting the parent
already_created = [fld for fld in self.local_fields if fld.name == field.name]
if already_created:
field = already_created[0]
field.primary_key = True
self.setup_pk(field)
else:
auto = AutoField(verbose_name='ID', primary_key=True,
auto_created=True)
model.add_to_class('id', auto)
def add_field(self, field, virtual=False):
# Insert the given field in the order in which it was created, using
# the "creation_counter" attribute of the field.
# Move many-to-many related fields from self.fields into
# self.many_to_many.
if virtual:
self.virtual_fields.append(field)
elif field.is_relation and field.many_to_many:
self.local_many_to_many.insert(bisect(self.local_many_to_many, field), field)
else:
self.local_fields.insert(bisect(self.local_fields, field), field)
self.setup_pk(field)
# If the field being added is a relation to another known field,
# expire the cache on this field and the forward cache on the field
# being referenced, because there will be new relationships in the
# cache. Otherwise, expire the cache of references *to* this field.
# The mechanism for getting at the related model is slightly odd -
# ideally, we'd just ask for field.related_model. However, related_model
# is a cached property, and all the models haven't been loaded yet, so
# we need to make sure we don't cache a string reference.
if field.is_relation and hasattr(field.remote_field, 'model') and field.remote_field.model:
try:
field.remote_field.model._meta._expire_cache(forward=False)
except AttributeError:
pass
self._expire_cache()
else:
self._expire_cache(reverse=False)
def setup_pk(self, field):
if not self.pk and field.primary_key:
self.pk = field
field.serialize = False
def setup_proxy(self, target):
"""
Does the internal setup so that the current model is a proxy for
"target".
"""
self.pk = target._meta.pk
self.proxy_for_model = target
self.db_table = target._meta.db_table
def __repr__(self):
return '<Options for %s>' % self.object_name
def __str__(self):
return "%s.%s" % (smart_text(self.app_label), smart_text(self.model_name))
@property
def verbose_name_raw(self):
"""
There are a few places where the untranslated verbose name is needed
(so that we get the same value regardless of currently active
locale).
"""
with override(None):
return force_text(self.verbose_name)
@property
def swapped(self):
"""
Has this model been swapped out for another? If so, return the model
name of the replacement; otherwise, return None.
For historical reasons, model name lookups using get_model() are
case insensitive, so we make sure we are case insensitive here.
"""
if self.swappable:
model_label = '%s.%s' % (self.app_label, self.model_name)
swapped_for = getattr(settings, self.swappable, None)
if swapped_for:
try:
swapped_label, swapped_object = swapped_for.split('.')
except ValueError:
# setting not in the format app_label.model_name
# raising ImproperlyConfigured here causes problems with
# test cleanup code - instead it is raised in get_user_model
# or as part of validation.
return swapped_for
if '%s.%s' % (swapped_label, swapped_object.lower()) not in (None, model_label):
return swapped_for
return None
@cached_property
def fields(self):
"""
Returns a list of all forward fields on the model and its parents,
excluding ManyToManyFields.
Private API intended only to be used by Django itself; get_fields()
combined with filtering of field properties is the public API for
obtaining this field list.
"""
# For legacy reasons, the fields property should only contain forward
# fields that are not virtual or with a m2m cardinality. Therefore we
# pass these three filters as filters to the generator.
# The third lambda is a longwinded way of checking f.related_model - we don't
# use that property directly because related_model is a cached property,
# and all the models may not have been loaded yet; we don't want to cache
# the string reference to the related_model.
is_not_an_m2m_field = lambda f: not (f.is_relation and f.many_to_many)
is_not_a_generic_relation = lambda f: not (f.is_relation and f.one_to_many)
is_not_a_generic_foreign_key = lambda f: not (
f.is_relation and f.many_to_one and not (hasattr(f.remote_field, 'model') and f.remote_field.model)
)
return make_immutable_fields_list(
"fields",
(f for f in self._get_fields(reverse=False) if
is_not_an_m2m_field(f) and is_not_a_generic_relation(f)
and is_not_a_generic_foreign_key(f))
)
@cached_property
def concrete_fields(self):
"""
Returns a list of all concrete fields on the model and its parents.
Private API intended only to be used by Django itself; get_fields()
combined with filtering of field properties is the public API for
obtaining this field list.
"""
return make_immutable_fields_list(
"concrete_fields", (f for f in self.fields if f.concrete)
)
@cached_property
def local_concrete_fields(self):
"""
Returns a list of all concrete fields on the model.
Private API intended only to be used by Django itself; get_fields()
combined with filtering of field properties is the public API for
obtaining this field list.
"""
return make_immutable_fields_list(
"local_concrete_fields", (f for f in self.local_fields if f.concrete)
)
@raise_deprecation(suggested_alternative="get_fields()")
def get_fields_with_model(self):
return [self._map_model(f) for f in self.get_fields()]
@raise_deprecation(suggested_alternative="get_fields()")
def get_concrete_fields_with_model(self):
return [self._map_model(f) for f in self.concrete_fields]
@cached_property
def many_to_many(self):
"""
Returns a list of all many to many fields on the model and its parents.
Private API intended only to be used by Django itself; get_fields()
combined with filtering of field properties is the public API for
obtaining this list.
"""
return make_immutable_fields_list(
"many_to_many",
(f for f in self._get_fields(reverse=False)
if f.is_relation and f.many_to_many)
)
@cached_property
def related_objects(self):
"""
Returns all related objects pointing to the current model. The related
objects can come from a one-to-one, one-to-many, or many-to-many field
relation type.
Private API intended only to be used by Django itself; get_fields()
combined with filtering of field properties is the public API for
obtaining this field list.
"""
all_related_fields = self._get_fields(forward=False, reverse=True, include_hidden=True)
return make_immutable_fields_list(
"related_objects",
(obj for obj in all_related_fields
if not obj.hidden or obj.field.many_to_many)
)
@raise_deprecation(suggested_alternative="get_fields()")
def get_m2m_with_model(self):
return [self._map_model(f) for f in self.many_to_many]
@cached_property
def _forward_fields_map(self):
res = {}
fields = self._get_fields(reverse=False)
for field in fields:
res[field.name] = field
# Due to the way Django's internals work, get_field() should also
# be able to fetch a field by attname. In the case of a concrete
# field with relation, includes the *_id name too
try:
res[field.attname] = field
except AttributeError:
pass
return res
@cached_property
def fields_map(self):
res = {}
fields = self._get_fields(forward=False, include_hidden=True)
for field in fields:
res[field.name] = field
# Due to the way Django's internals work, get_field() should also
# be able to fetch a field by attname. In the case of a concrete
# field with relation, includes the *_id name too
try:
res[field.attname] = field
except AttributeError:
pass
return res
def get_field(self, field_name, many_to_many=None):
"""
Returns a field instance given a field name. The field can be either a
forward or reverse field, unless many_to_many is specified; if it is,
only forward fields will be returned.
The many_to_many argument exists for backwards compatibility reasons;
it has been deprecated and will be removed in Django 2.0.
"""
m2m_in_kwargs = many_to_many is not None
if m2m_in_kwargs:
# Always throw a warning if many_to_many is used regardless of
# whether it alters the return type or not.
warnings.warn(
"The 'many_to_many' argument on get_field() is deprecated; "
"use a filter on field.many_to_many instead.",
RemovedInDjango20Warning
)
try:
# In order to avoid premature loading of the relation tree
# (expensive) we prefer checking if the field is a forward field.
field = self._forward_fields_map[field_name]
if many_to_many is False and field.many_to_many:
raise FieldDoesNotExist(
'%s has no field named %r' % (self.object_name, field_name)
)
return field
except KeyError:
# If the app registry is not ready, reverse fields are
# unavailable, therefore we throw a FieldDoesNotExist exception.
if not self.apps.models_ready:
raise FieldDoesNotExist(
"%s has no field named %r. The app cache isn't ready yet, "
"so if this is an auto-created related field, it won't "
"be available yet." % (self.object_name, field_name)
)
try:
if m2m_in_kwargs:
# Previous API does not allow searching reverse fields.
raise FieldDoesNotExist('%s has no field named %r' % (self.object_name, field_name))
# Retrieve field instance by name from cached or just-computed
# field map.
return self.fields_map[field_name]
except KeyError:
raise FieldDoesNotExist('%s has no field named %r' % (self.object_name, field_name))
@raise_deprecation(suggested_alternative="get_field()")
def get_field_by_name(self, name):
return self._map_model_details(self.get_field(name))
@raise_deprecation(suggested_alternative="get_fields()")
def get_all_field_names(self):
names = set()
fields = self.get_fields()
for field in fields:
# For backwards compatibility GenericForeignKey should not be
# included in the results.
if field.is_relation and field.many_to_one and field.related_model is None:
continue
# Relations to child proxy models should not be included.
if (field.model != self.model and
field.model._meta.concrete_model == self.concrete_model):
continue
names.add(field.name)
if hasattr(field, 'attname'):
names.add(field.attname)
return list(names)
@raise_deprecation(suggested_alternative="get_fields()")
def get_all_related_objects(self, local_only=False, include_hidden=False,
include_proxy_eq=False):
include_parents = True if local_only is False else PROXY_PARENTS
fields = self._get_fields(
forward=False, reverse=True,
include_parents=include_parents,
include_hidden=include_hidden,
)
fields = (obj for obj in fields if not isinstance(obj.field, ManyToManyField))
if include_proxy_eq:
children = chain.from_iterable(c._relation_tree
for c in self.concrete_model._meta.proxied_children
if c is not self)
relations = (f.remote_field for f in children
if include_hidden or not f.remote_field.field.remote_field.is_hidden())
fields = chain(fields, relations)
return list(fields)
@raise_deprecation(suggested_alternative="get_fields()")
def get_all_related_objects_with_model(self, local_only=False, include_hidden=False,
include_proxy_eq=False):
return [
self._map_model(f) for f in self.get_all_related_objects(
local_only=local_only,
include_hidden=include_hidden,
include_proxy_eq=include_proxy_eq,
)
]
@raise_deprecation(suggested_alternative="get_fields()")
def get_all_related_many_to_many_objects(self, local_only=False):
include_parents = True if local_only is not True else PROXY_PARENTS
fields = self._get_fields(
forward=False, reverse=True,
include_parents=include_parents, include_hidden=True
)
return [obj for obj in fields if isinstance(obj.field, ManyToManyField)]
@raise_deprecation(suggested_alternative="get_fields()")
def get_all_related_m2m_objects_with_model(self):
fields = self._get_fields(forward=False, reverse=True, include_hidden=True)
return [self._map_model(obj) for obj in fields if isinstance(obj.field, ManyToManyField)]
def get_base_chain(self, model):
"""
Returns a list of parent classes leading to 'model' (order from closet
to most distant ancestor). This has to handle the case were 'model' is
a grandparent or even more distant relation.
"""
if not self.parents:
return None
if model in self.parents:
return [model]
for parent in self.parents:
res = parent._meta.get_base_chain(model)
if res:
res.insert(0, parent)
return res
return None
def get_parent_list(self):
"""
Returns all the ancestors of this model as a list ordered by MRO.
Useful for determining if something is an ancestor, regardless of lineage.
"""
result = OrderedSet(self.parents)
for parent in self.parents:
for ancestor in parent._meta.get_parent_list():
result.add(ancestor)
return list(result)
def get_ancestor_link(self, ancestor):
"""
Returns the field on the current model which points to the given
"ancestor". This is possible an indirect link (a pointer to a parent
model, which points, eventually, to the ancestor). Used when
constructing table joins for model inheritance.
Returns None if the model isn't an ancestor of this one.
"""
if ancestor in self.parents:
return self.parents[ancestor]
for parent in self.parents:
# Tries to get a link field from the immediate parent
parent_link = parent._meta.get_ancestor_link(ancestor)
if parent_link:
# In case of a proxied model, the first link
# of the chain to the ancestor is that parent
# links
return self.parents[parent] or parent_link
def _populate_directed_relation_graph(self):
"""
This method is used by each model to find its reverse objects. As this
method is very expensive and is accessed frequently (it looks up every
field in a model, in every app), it is computed on first access and then
is set as a property on every model.
"""
related_objects_graph = defaultdict(list)
all_models = self.apps.get_models(include_auto_created=True)
for model in all_models:
# Abstract model's fields are copied to child models, hence we will
# see the fields from the child models.
if model._meta.abstract:
continue
fields_with_relations = (
f for f in model._meta._get_fields(reverse=False, include_parents=False)
if f.is_relation and f.related_model is not None
)
for f in fields_with_relations:
if not isinstance(f.remote_field.model, six.string_types):
related_objects_graph[f.remote_field.model._meta].append(f)
for model in all_models:
# Set the relation_tree using the internal __dict__. In this way
# we avoid calling the cached property. In attribute lookup,
# __dict__ takes precedence over a data descriptor (such as
# @cached_property). This means that the _meta._relation_tree is
# only called if related_objects is not in __dict__.
related_objects = related_objects_graph[model._meta]
model._meta.__dict__['_relation_tree'] = related_objects
# It seems it is possible that self is not in all_models, so guard
# against that with default for get().
return self.__dict__.get('_relation_tree', EMPTY_RELATION_TREE)
@cached_property
def _relation_tree(self):
return self._populate_directed_relation_graph()
def _expire_cache(self, forward=True, reverse=True):
# This method is usually called by apps.cache_clear(), when the
# registry is finalized, or when a new field is added.
properties_to_expire = []
if forward:
properties_to_expire.extend(self.FORWARD_PROPERTIES)
if reverse and not self.abstract:
properties_to_expire.extend(self.REVERSE_PROPERTIES)
for cache_key in properties_to_expire:
try:
delattr(self, cache_key)
except AttributeError:
pass
self._get_fields_cache = {}
def get_fields(self, include_parents=True, include_hidden=False):
"""
Returns a list of fields associated to the model. By default will only
return forward fields. This can be changed by enabling or disabling
field types using the parameters:
- include_parents: include fields derived from inheritance
- include_hidden: include fields that have a related_name that
starts with a "+"
"""
if include_parents is False:
include_parents = PROXY_PARENTS
return self._get_fields(include_parents=include_parents, include_hidden=include_hidden)
def _get_fields(self, forward=True, reverse=True, include_parents=True, include_hidden=False,
seen_models=None):
"""
Internal helper function to return fields of the model.
* If forward=True, then fields defined on this model are returned.
* If reverse=True, then relations pointing to this model are returned.
* If include_hidden=True, then fields with is_hidden=True are returned.
* The include_parents argument toggles if fields from parent models
should be included. It has three values: True, False, and
PROXY_PARENTS. When set to PROXY_PARENTS, the call will return all
fields defined for the current model or any of its parents in the
parent chain to the model's concrete model.
"""
if include_parents not in (True, False, PROXY_PARENTS):
raise TypeError("Invalid argument for include_parents: %s" % (include_parents,))
# This helper function is used to allow recursion in ``get_fields()``
# implementation and to provide a fast way for Django's internals to
# access specific subsets of fields.
# We must keep track of which models we have already seen. Otherwise we
# could include the same field multiple times from different models.
topmost_call = False
if seen_models is None:
seen_models = set()
topmost_call = True
seen_models.add(self.model)
# Creates a cache key composed of all arguments
cache_key = (forward, reverse, include_parents, include_hidden, topmost_call)
try:
# In order to avoid list manipulation. Always return a shallow copy
# of the results.
return self._get_fields_cache[cache_key]
except KeyError:
pass
fields = []
# Recursively call _get_fields() on each parent, with the same
# options provided in this call.
if include_parents is not False:
for parent in self.parents:
# In diamond inheritance it is possible that we see the same
# model from two different routes. In that case, avoid adding
# fields from the same parent again.
if parent in seen_models:
continue
if (parent._meta.concrete_model != self.concrete_model and
include_parents == PROXY_PARENTS):
continue
for obj in parent._meta._get_fields(
forward=forward, reverse=reverse, include_parents=include_parents,
include_hidden=include_hidden, seen_models=seen_models):
if hasattr(obj, 'parent_link') and obj.parent_link:
continue
fields.append(obj)
if reverse:
# Tree is computed once and cached until the app cache is expired.
# It is composed of a list of fields pointing to the current model
# from other models.
all_fields = self._relation_tree
for field in all_fields:
# If hidden fields should be included or the relation is not
# intentionally hidden, add to the fields dict.
if include_hidden or not field.remote_field.hidden:
fields.append(field.remote_field)
if forward:
fields.extend(
field for field in chain(self.local_fields, self.local_many_to_many)
)
# Virtual fields are recopied to each child model, and they get a
# different model as field.model in each child. Hence we have to
# add the virtual fields separately from the topmost call. If we
# did this recursively similar to local_fields, we would get field
# instances with field.model != self.model.
if topmost_call:
fields.extend(
f for f in self.virtual_fields
)
# In order to avoid list manipulation. Always
# return a shallow copy of the results
fields = make_immutable_fields_list("get_fields()", fields)
# Store result into cache for later access
self._get_fields_cache[cache_key] = fields
return fields | unknown | codeparrot/codeparrot-clean | ||
# Mantid Repository : https://github.com/mantidproject/mantid
#
# Copyright © 2020 ISIS Rutherford Appleton Laboratory UKRI,
# NScD Oak Ridge National Laboratory, European Spallation Source,
# Institut Laue - Langevin & CSNS, Institute of High Energy Physics, CAS
# SPDX - License - Identifier: GPL - 3.0 +
import unittest
from Engineering.gui.engineering_diffraction.tabs.common.cropping.cropping_model import CroppingModel
class CroppingModelTest(unittest.TestCase):
def setUp(self):
self.model = CroppingModel()
def test_validate_single_valid_spectra(self):
self.assertTrue(self.model.validate_spectrum_numbers("1,2,3,4,5,6,7,8,9,10"))
def test_validate_single_valid_spectra_regular_whitespace(self):
self.assertTrue(self.model.validate_spectrum_numbers("1, 2, 3, 4, 5, 6, 7, 8, 9, 10"))
def test_validate_single_valid_spectra_irregular_whitespace(self):
self.assertTrue(self.model.validate_spectrum_numbers("1, 2,3,4, 5,6 ,7, 8, 9, 10"))
def test_validate_single_spectra_invalid_negative(self):
self.assertFalse(self.model.validate_spectrum_numbers("1,2,3,4,-5,6,7,8,9,10"))
def test_validate_single_spectra_invalid_spectrum(self):
self.assertFalse(self.model.validate_spectrum_numbers("1,2,3,4,5,6,77777,8,9,"))
def test_validate_ranged_spectra(self):
self.assertTrue(self.model.validate_spectrum_numbers("1-5, 5, 3 , 2-7, 7-13"))
def test_clean_spectrum_numbers_regular_whitespace(self):
self.assertEqual(self.model._clean_spectrum_numbers("1, 2, 5, 76, 3"), "1,2,5,76,3")
def test_clean_spectrum_numbers_irregular_whitespace(self):
self.assertEqual(self.model._clean_spectrum_numbers("1 , 2, 5 , 76, 3 "),
"1,2,5,76,3")
def test_clean_spectrum_numbers_regular_ranges(self):
self.assertEqual(self.model._clean_spectrum_numbers("1-2, 5-76, 3"), "1-2,5-76,3")
def test_clean_spectrum_numbers_reversed_ranges(self):
self.assertEqual(self.model._clean_spectrum_numbers("2-1, 76-5, 3"), "1-2,5-76,3")
def test_clean_spectrum_numbers_equal_range(self):
self.assertRaisesRegex(ValueError,
"Ranges cannot contain the same value twice. Invalid Range:*",
self.model._clean_spectrum_numbers, "1-1, 76-76, 3")
def test_validate_and_clean_with_valid_input(self):
self.assertEqual(self.model.validate_and_clean_spectrum_numbers("1-6, 7-23, 46, 1"),
("", "1-6,7-23,46,1"))
def test_validate_and_clean_reverse_ranges(self):
self.assertEqual(self.model.validate_and_clean_spectrum_numbers("6-1, 7-24, 6-4,1"),
("", "1-6,7-24,4-6,1"))
def test_validate_and_clean_equal_ranges(self):
self.assertEqual(self.model.validate_and_clean_spectrum_numbers("6-6, 7-24, 6-4,1"),
("Ranges cannot contain the same value twice. Invalid Range: 6-6", ""))
if __name__ == '__main__':
unittest.main() | unknown | codeparrot/codeparrot-clean | ||
import unittest
from importlib import import_module, resources
from . import util
class CommonBinaryTests(util.CommonTests, unittest.TestCase):
def execute(self, package, path):
resources.files(package).joinpath(path).read_bytes()
class CommonTextTests(util.CommonTests, unittest.TestCase):
def execute(self, package, path):
resources.files(package).joinpath(path).read_text(encoding='utf-8')
class ReadTests:
def test_read_bytes(self):
result = resources.files(self.data).joinpath('binary.file').read_bytes()
self.assertEqual(result, bytes(range(4)))
def test_read_text_default_encoding(self):
result = (
resources.files(self.data)
.joinpath('utf-8.file')
.read_text(encoding='utf-8')
)
self.assertEqual(result, 'Hello, UTF-8 world!\n')
def test_read_text_given_encoding(self):
result = (
resources.files(self.data)
.joinpath('utf-16.file')
.read_text(encoding='utf-16')
)
self.assertEqual(result, 'Hello, UTF-16 world!\n')
def test_read_text_with_errors(self):
"""
Raises UnicodeError without the 'errors' argument.
"""
target = resources.files(self.data) / 'utf-16.file'
self.assertRaises(UnicodeError, target.read_text, encoding='utf-8')
result = target.read_text(encoding='utf-8', errors='ignore')
self.assertEqual(
result,
'H\x00e\x00l\x00l\x00o\x00,\x00 '
'\x00U\x00T\x00F\x00-\x001\x006\x00 '
'\x00w\x00o\x00r\x00l\x00d\x00!\x00\n\x00',
)
class ReadDiskTests(ReadTests, util.DiskSetup, unittest.TestCase):
pass
class ReadZipTests(ReadTests, util.ZipSetup, unittest.TestCase):
def test_read_submodule_resource(self):
submodule = import_module('data01.subdirectory')
result = resources.files(submodule).joinpath('binary.file').read_bytes()
self.assertEqual(result, bytes(range(4, 8)))
def test_read_submodule_resource_by_name(self):
result = (
resources.files('data01.subdirectory').joinpath('binary.file').read_bytes()
)
self.assertEqual(result, bytes(range(4, 8)))
class ReadNamespaceTests(ReadTests, util.DiskSetup, unittest.TestCase):
MODULE = 'namespacedata01'
class ReadNamespaceZipTests(ReadTests, util.ZipSetup, unittest.TestCase):
MODULE = 'namespacedata01'
def test_read_submodule_resource(self):
submodule = import_module('namespacedata01.subdirectory')
result = resources.files(submodule).joinpath('binary.file').read_bytes()
self.assertEqual(result, bytes(range(12, 16)))
def test_read_submodule_resource_by_name(self):
result = (
resources.files('namespacedata01.subdirectory')
.joinpath('binary.file')
.read_bytes()
)
self.assertEqual(result, bytes(range(12, 16)))
if __name__ == '__main__':
unittest.main() | python | github | https://github.com/python/cpython | Lib/test/test_importlib/resources/test_read.py |
# Running CockroachDB across multiple Kubernetes clusters (GKE)
The script and configuration files in this directory enable deploying
CockroachDB across multiple Kubernetes clusters that are spread across different
geographic regions and hosted on [GKE](https://cloud.google.com/kubernetes-engine). It deploys a CockroachDB
[StatefulSet](https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/)
into each separate cluster, and links them together using DNS.
To use the configuration provided here, check out this repository (or otherwise
download a copy of this directory), fill in the constants at the top of
[setup.py](setup.py) with the relevant information about your Kubernetes
clusters, optionally make any desired modifications to
[cockroachdb-statefulset-secure.yaml](cockroachdb-statefulset-secure.yaml) as
explained in [our Kubernetes performance tuning
guide](https://www.cockroachlabs.com/docs/stable/kubernetes-performance.html),
then finally run [setup.py](setup.py).
You should see a lot of output as it does its thing, hopefully ending after
printing out `job "cluster-init-secure" created`. This implies that everything
was created successfully, and you should soon see the CockroachDB cluster
initialized with 3 pods in the "READY" state in each Kubernetes cluster. At this
point you can manage the StatefulSet in each cluster independently if you so
desire, scaling up the number of replicas, changing their resource requests, or
making other modifications as you please.
If anything goes wrong along the way, please let us know via any of the [normal
troubleshooting
channels](https://www.cockroachlabs.com/docs/stable/support-resources.html).
While we believe this creates a highly available, maintainable multi-region
deployment, it is still pushing the boundaries of how Kubernetes is typically
used, so feedback and issue reports are very appreciated.
## Limitations
### Pod-to-pod connectivity
The deployment outlined in this directory relies on pod IP addresses being
routable even across Kubernetes clusters and regions. This achieves optimal
performance, particularly when compared to alternative solutions that route all packets between clusters through load balancers, but means that it won't work in certain environments.
This requirement is satisfied by clusters deployed in cloud environments such as Google Kubernetes Engine, and
can also be satisfied by on-prem environments depending on the [Kubernetes networking setup](https://kubernetes.io/docs/concepts/cluster-administration/networking/) used. If you want to test whether your cluster will work, you can run this basic network test:
```shell
$ kubectl run network-test --image=alpine --restart=Never -- sleep 999999
pod "network-test" created
$ kubectl describe pod network-test | grep IP
IP: THAT-PODS-IP-ADDRESS
$ kubectl config use-context YOUR-OTHER-CLUSTERS-CONTEXT-HERE
$ kubectl run -it network-test --image=alpine --restart=Never -- ping THAT-PODS-IP-ADDRESS
If you don't see a command prompt, try pressing enter.
64 bytes from 10.12.14.10: seq=1 ttl=62 time=0.570 ms
64 bytes from 10.12.14.10: seq=2 ttl=62 time=0.449 ms
64 bytes from 10.12.14.10: seq=3 ttl=62 time=0.635 ms
64 bytes from 10.12.14.10: seq=4 ttl=62 time=0.722 ms
64 bytes from 10.12.14.10: seq=5 ttl=62 time=0.504 ms
...
```
If the pods can directly connect, you should see successful ping output like the
above. If they can't, you won't see any successful ping responses. Make sure to
delete the `network-test` pod in each cluster when you're done!
### Exposing DNS servers to the Internet
As currently configured, the way that the DNS servers from each Kubernetes
cluster are hooked together is by exposing them via a load balanced IP address
that's visible to the public Internet. This is because [Google Cloud Platform's Internal Load Balancers do not currently support clients in one region using a load balancer in another region](https://cloud.google.com/compute/docs/load-balancing/internal/#deploying_internal_load_balancing_with_clients_across_vpn_or_interconnect).
None of the services in your Kubernetes cluster will be made accessible, but
their names could leak out to a motivated attacker. If this is unacceptable,
please let us know and we can demonstrate other options. [Your voice could also
help convince Google to allow clients from one region to use an Internal Load
Balancer in another](https://issuetracker.google.com/issues/111021512),
eliminating the problem.
## Cleaning up
To remove all the resources created in your clusters by [setup.py](setup.py),
copy the parameters you provided at the top of [setup.py](setup.py) to the top
of [teardown.py](teardown.py) and run [teardown.py](teardown.py).
## More information
For more information on running CockroachDB in Kubernetes, please see the [README
in the parent directory](../README.md). | unknown | github | https://github.com/cockroachdb/cockroach | cloud/kubernetes/multiregion/README.md |
name: Coverage Windows
on:
pull_request:
types: [opened, synchronize, reopened, ready_for_review]
paths:
- lib/**/*.js
- vcbuild.bat
- src/**/*.cc
- src/**/*.h
- test/**
- tools/gyp/**
- tools/test.py
- .github/workflows/coverage-windows.yml
- codecov.yml
- .nycrc
push:
branches:
- main
paths:
- lib/**/*.js
- vcbuild.bat
- src/**/*.cc
- src/**/*.h
- test/**
- tools/gyp/**
- tools/test.py
- .github/workflows/coverage-windows.yml
- codecov.yml
- .nycrc
concurrency:
group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }}
cancel-in-progress: true
env:
PYTHON_VERSION: '3.14'
FLAKY_TESTS: keep_retrying
permissions:
contents: read
jobs:
coverage-windows:
if: github.event.pull_request.draft == false
runs-on: windows-2025
steps:
- uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
with:
persist-credentials: false
- name: Set up Python ${{ env.PYTHON_VERSION }}
uses: actions/setup-python@a309ff8b426b58ec0e2a45f0f869d46889d02405 # v6.2.0
with:
python-version: ${{ env.PYTHON_VERSION }}
allow-prereleases: true
- name: Install deps
run: choco install nasm
- name: Environment Information
run: npx envinfo
- name: Build
run: ./vcbuild.bat clang-cl
# TODO(bcoe): investigate tests that fail with coverage enabled
# on Windows.
- name: Test
run: ./vcbuild.bat noprojgen nobuild test-ci-js; node -e 'process.exit(0)'
env:
NODE_V8_COVERAGE: ./coverage/tmp
- name: Report
run: npx c8 report
env:
NODE_OPTIONS: --max-old-space-size=8192
- name: Clean tmp
run: npx rimraf ./coverage/tmp
- name: Upload
uses: codecov/codecov-action@671740ac38dd9b0130fbe1cec585b89eea48d3de # v5.5.2
with:
directory: ./coverage | unknown | github | https://github.com/nodejs/node | .github/workflows/coverage-windows.yml |
from rest_framework import serializers
from selectable.forms.widgets import AutoCompleteSelectWidget, AutoCompleteSelectMultipleWidget
from costs.models import RVU, OrderSet
from costs.lookups import RVULookup
from myuser.models import FavoriteRVU, TestPerformed
class RVUSerializer(serializers.ModelSerializer):
"""
django-rest-framework serializer class to construct a JSON representation
of the RVU data model.
"""
class Meta:
model = RVU
fields = ('id',
'year',
'code',
'mod',
'description',
'work',
'facility',
'malpractice',
'name' )
name = serializers.SerializerMethodField('get_name')
def get_name(self, obj):
return obj.__unicode__()
class OrderSetSerializer(serializers.ModelSerializer):
"""
django-rest-framework serializer class to construct a JSON representation
of the OrderSet data model.
Returns a list or RVU primary keys.
"""
class Meta:
model = OrderSet
fields = ('name', 'RVUs')
def __init__(self, instance=None, data=None, files=None,
context=None, partial=False, many=None,
allow_add_remove=False, **kwargs):
super(OrderSetSerializer, self).__init__(instance, data, files,
context, partial, many,
allow_add_remove, **kwargs)
self.fields['RVUs'].widget = AutoCompleteSelectMultipleWidget(lookup_class=RVULookup)
class OrderSetSerializerFull(OrderSetSerializer):
"""
django-rest-framework serializer class to construct a JSON representation
of the OrderSet data model.
Returns a list or RVU objects, using the RVU serializer.
"""
RVUs = RVUSerializer()
class FavoriteRVUSerializer(serializers.ModelSerializer):
"""
django-rest-framework serializer class to construct a JSON representation
of a list of favorite RVUs, using the primary-key for each only.
"""
class Meta:
model = FavoriteRVU
fields = ('rvu', )
def __init__(self, instance=None, data=None, files=None,
context=None, partial=False, many=None,
allow_add_remove=False, **kwargs):
super(FavoriteRVUSerializer, self).__init__(instance, data, files,
context, partial, many,
allow_add_remove, **kwargs)
self.fields['rvu'].widget = AutoCompleteSelectWidget(lookup_class=RVULookup)
class TestPerformedSerializer(serializers.ModelSerializer):
"""
django-rest-framework serializer class to construct a JSON representation
of a list of the tests which have been performed, using the primary-key for
each RVU only. Does not return the username or date performed.
"""
class Meta:
model = TestPerformed
fields = ('rvu', )
def __init__(self, instance=None, data=None, files=None,
context=None, partial=False, many=None,
allow_add_remove=False, **kwargs):
super(TestPerformedSerializer, self).__init__(instance, data, files,
context, partial, many,
allow_add_remove, **kwargs)
self.fields['rvu'].widget = AutoCompleteSelectWidget(lookup_class=RVULookup) | unknown | codeparrot/codeparrot-clean | ||
# coding=utf-8
"""
Send metrics to a [graphite](http://graphite.wikidot.com/) using the high
performace pickle interface.
Graphite is an enterprise-scale monitoring tool that runs well on cheap
hardware. It was originally designed and written by Chris Davis at Orbitz in
2006 as side project that ultimately grew to be a foundational monitoring tool.
In 2008, Orbitz allowed Graphite to be released under the open source Apache
2.0 license. Since then Chris has continued to work on Graphite and has
deployed it at other companies including Sears, where it serves as a pillar of
the e-commerce monitoring system. Today many
[large companies](http://graphite.readthedocs.org/en/latest/who-is-using.html)
use it.
"""
import struct
from graphite import GraphiteHandler
try:
import cPickle as pickle
except ImportError:
import pickle as pickle
class GraphitePickleHandler(GraphiteHandler):
"""
Overrides the GraphiteHandler class
Sending data to graphite using batched pickle format
"""
def __init__(self, config=None):
"""
Create a new instance of the GraphitePickleHandler
"""
# Initialize GraphiteHandler
GraphiteHandler.__init__(self, config)
# Initialize Data
self.batch = []
# Initialize Options
self.batch_size = int(self.config['batch'])
def get_default_config_help(self):
"""
Returns the help text for the configuration options for this handler
"""
config = super(GraphitePickleHandler, self).get_default_config_help()
config.update({
})
return config
def get_default_config(self):
"""
Return the default config for the handler
"""
config = super(GraphitePickleHandler, self).get_default_config()
config.update({
'port': 2004,
})
return config
def process(self, metric):
# Convert metric to pickle format
m = (metric.path, (metric.timestamp, metric.value))
# Add the metric to the match
self.batch.append(m)
# If there are sufficient metrics, then pickle and send
if len(self.batch) >= self.batch_size:
# Log
self.log.debug("GraphitePickleHandler: Sending batch size: %d",
self.batch_size)
# Pickle the batch of metrics
self.metrics = [self._pickle_batch()]
# Send pickled batch
self._send()
# Flush the metric pack down the wire
self.flush()
# Clear Batch
self.batch = []
def _pickle_batch(self):
"""
Pickle the metrics into a form that can be understood
by the graphite pickle connector.
"""
# Pickle
payload = pickle.dumps(self.batch)
# Pack Message
header = struct.pack("!L", len(payload))
message = header + payload
# Return Message
return message | unknown | codeparrot/codeparrot-clean | ||
import wpilib
from common.distance_sensors import SharpIR2Y0A02, SharpIRGP2Y0A41SK0F, CombinedSensor
from networktables import NetworkTable
class Sensor:
def __init__(self, tote_motor, can_motor):
self.sd = NetworkTable.getTable('SmartDashboard')
self.toteLimitLSensor = wpilib.DigitalInput(0) ##Left limit switch
self.toteLimitRSensor = wpilib.DigitalInput(1) ##Right limit switch
self.longDistanceLSensor = SharpIR2Y0A02(1) # # Robot's left
self.longDistanceRSensor = SharpIR2Y0A02(3) # # Robot's right
self.shortDistanceLSensor = SharpIRGP2Y0A41SK0F(2) # # Robot's left
self.shortDistanceRSensor = SharpIRGP2Y0A41SK0F(7) # # Robot's right
self.leftSensor = CombinedSensor(self.longDistanceLSensor, 22, self.shortDistanceLSensor, 6)
self.rightSensor = CombinedSensor(self.longDistanceRSensor, 22, self.shortDistanceRSensor, 6)
self.tote_motor = tote_motor
self.can_motor = can_motor
self.in_range = False
self.in_range_start = None
# Premature optimization, but it looks nicer
self._tote_exclude_range = set()
# measured using the calibration routine
interference = [(1031, 1387), (1888, 2153), (4544, 4895), (5395, 5664), (8008, 8450)]
#for i in [1033, 2031, 4554, 5393, 7902]:
for r1, r2 in interference:
for j in range(r1, r2):
self._tote_exclude_range.add(j)
self.update()
def update(self):
self.now = wpilib.Timer.getFPGATimestamp()
self.toteLimitL = self.toteLimitLSensor.get()
self.toteLimitR = self.toteLimitRSensor.get()
self.longDistanceL = self.longDistanceLSensor.getDistance()
self.longDistanceR = self.longDistanceRSensor.getDistance()
self.shortDistanceL = self.shortDistanceLSensor.getDistance()
self.shortDistanceR = self.shortDistanceRSensor.getDistance()
self.leftDistance = self.leftSensor.getDistance()
self.rightDistance = self.rightSensor.getDistance()
self.tote_enc = self.tote_motor.getEncPosition()
self.can_enc = self.can_motor.getEncPosition()
# Calculate if its in range
in_range = (self.leftDistance < 30 and self.rightDistance < 30)
# if it's in the way, then set it to the last thing
self.interfered = self.tote_enc in self._tote_exclude_range
if self.interfered:
in_range = self.in_range
self.in_range = in_range
#if self.in_range_start is None:
# if in_range:
# self.in_range_start = self.now
#else:
# self.in_range = in_range and self.now > self.in_range_start + 0.05
#
# if not in_range:
# self.in_range_start = None
def is_against_tote(self):
if not self.toteLimitL and not self.toteLimitR:
return True
return False
def is_in_range(self):
return self.in_range
def update_sd(self):
self.sd.putNumber('shortSensorValueL', self.shortDistanceL)
self.sd.putNumber('shortSensorValueR', self.shortDistanceR)
self.sd.putNumber('longSensorValueL', self.longDistanceL)
self.sd.putNumber('longSensorValueR', self.longDistanceR)
#self.sd.putNumber('shortSensorVoltageL', self.sensor.shortDistanceL)
#self.sd.putNumber('shortSensorVoltageR', self.sensor.shortDistanceR)
#self.sd.putNumber('longSensorVoltageL', self.sensor.longDistanceL)
#self.sd.putNumber('longSensorVoltageR', self.sensor.longDistanceR)
self.sd.putBoolean('toteInRange', self.in_range)
self.sd.putBoolean('toteInterfere', self.interfered)
self.sd.putNumber('combinedL', self.leftDistance)
self.sd.putNumber('combinedR', self.rightDistance)
self.sd.putBoolean('toteLimitL', self.toteLimitL)
self.sd.putBoolean('toteLimitR', self.toteLimitR)
def doit(self):
pass | unknown | codeparrot/codeparrot-clean | ||
/*
* Copyright (c) 2019 Mockito contributors
* This program is made available under the terms of the MIT License.
*/
package org.mockito;
import static java.lang.annotation.ElementType.TYPE;
import static java.lang.annotation.RetentionPolicy.RUNTIME;
import java.lang.annotation.Documented;
import java.lang.annotation.Retention;
import java.lang.annotation.Target;
/**
* Annotation representing a type that should not be mocked.
* <p>When marking a type {@code @DoNotMock}, you should always point to alternative testing
* solutions such as standard fakes or other testing utilities.
*
* Mockito enforces {@code @DoNotMock} with the {@link org.mockito.plugins.DoNotMockEnforcer}.
*
* If you want to use a custom {@code @DoNotMock} annotation, the {@link org.mockito.plugins.DoNotMockEnforcer}
* will match on annotations with a type ending in "org.mockito.DoNotMock". You can thus place
* your custom annotation in {@code com.my.package.org.mockito.DoNotMock} and Mockito will enforce
* that types annotated by {@code @com.my.package.org.mockito.DoNotMock} can not be mocked.
*
* <pre class="code"><code class="java">
* @DoNotMock(reason = "Use a real instance instead")
* class DoNotMockMe {}
* </code></pre>
*/
@Target({TYPE})
@Retention(RUNTIME)
@Documented
public @interface DoNotMock {
/**
* The reason why the annotated type should not be mocked.
*
* <p>This should suggest alternative APIs to use for testing objects of this type.
*/
String reason() default "Create a real instance instead.";
} | java | github | https://github.com/mockito/mockito | mockito-core/src/main/java/org/mockito/DoNotMock.java |
# -*- coding: utf-8 -*-
"""
babel.lists
~~~~~~~~~~~
Locale dependent formatting of lists.
The default locale for the functions in this module is determined by the
following environment variables, in that order:
* ``LC_ALL``, and
* ``LANG``
:copyright: (c) 2015 by the Babel Team.
:license: BSD, see LICENSE for more details.
"""
from babel.core import Locale, default_locale
DEFAULT_LOCALE = default_locale()
def format_list(lst, locale=DEFAULT_LOCALE):
"""
Format the items in `lst` as a list.
>>> format_list(['apples', 'oranges', 'pears'], 'en')
u'apples, oranges, and pears'
>>> format_list(['apples', 'oranges', 'pears'], 'zh')
u'apples\u3001oranges\u548cpears'
:param lst: a sequence of items to format in to a list
:param locale: the locale
"""
locale = Locale.parse(locale)
if not lst:
return ''
if len(lst) == 1:
return lst[0]
if len(lst) == 2:
return locale.list_patterns['2'].format(*lst)
result = locale.list_patterns['start'].format(lst[0], lst[1])
for elem in lst[2:-1]:
result = locale.list_patterns['middle'].format(result, elem)
result = locale.list_patterns['end'].format(result, lst[-1])
return result | unknown | codeparrot/codeparrot-clean | ||
/*
* Copyright 2012-present the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.boot.docker.compose.lifecycle;
import java.io.File;
import java.time.Duration;
import java.util.ArrayList;
import java.util.LinkedHashSet;
import java.util.List;
import java.util.Set;
import org.jspecify.annotations.Nullable;
import org.springframework.boot.context.properties.ConfigurationProperties;
import org.springframework.boot.context.properties.bind.Binder;
import org.springframework.boot.docker.compose.core.RunningService;
import org.springframework.boot.logging.LogLevel;
/**
* Configuration properties for Docker Compose.
*
* @author Moritz Halbritter
* @author Andy Wilkinson
* @author Phillip Webb
* @since 3.1.0
*/
@ConfigurationProperties(DockerComposeProperties.NAME)
public class DockerComposeProperties {
static final String NAME = "spring.docker.compose";
/**
* Whether Docker Compose support is enabled.
*/
private boolean enabled = true;
/**
* Arguments to pass to the Docker Compose command.
*/
private final List<String> arguments = new ArrayList<>();
/**
* Paths to the Docker Compose configuration files.
*/
private final List<File> file = new ArrayList<>();
/**
* Docker compose lifecycle management.
*/
private LifecycleManagement lifecycleManagement = LifecycleManagement.START_AND_STOP;
/**
* Hostname or IP of the machine where the docker containers are started.
*/
private @Nullable String host;
/**
* Start configuration.
*/
private final Start start = new Start();
/**
* Stop configuration.
*/
private final Stop stop = new Stop();
/**
* Profiles configuration.
*/
private final Profiles profiles = new Profiles();
private final Skip skip = new Skip();
private final Readiness readiness = new Readiness();
public boolean isEnabled() {
return this.enabled;
}
public void setEnabled(boolean enabled) {
this.enabled = enabled;
}
public List<String> getArguments() {
return this.arguments;
}
public List<File> getFile() {
return this.file;
}
public LifecycleManagement getLifecycleManagement() {
return this.lifecycleManagement;
}
public void setLifecycleManagement(LifecycleManagement lifecycleManagement) {
this.lifecycleManagement = lifecycleManagement;
}
public @Nullable String getHost() {
return this.host;
}
public void setHost(@Nullable String host) {
this.host = host;
}
public Start getStart() {
return this.start;
}
public Stop getStop() {
return this.stop;
}
public Profiles getProfiles() {
return this.profiles;
}
public Skip getSkip() {
return this.skip;
}
public Readiness getReadiness() {
return this.readiness;
}
static DockerComposeProperties get(Binder binder) {
return binder.bind(NAME, DockerComposeProperties.class).orElseGet(DockerComposeProperties::new);
}
/**
* Start properties.
*/
public static class Start {
/**
* Command used to start Docker Compose.
*/
private StartCommand command = StartCommand.UP;
/**
* Log level for output.
*/
private LogLevel logLevel = LogLevel.INFO;
/**
* Whether to skip executing the start command.
*/
private Skip skip = Skip.IF_RUNNING;
/**
* Arguments to pass to the start command.
*/
private final List<String> arguments = new ArrayList<>();
public StartCommand getCommand() {
return this.command;
}
public void setCommand(StartCommand command) {
this.command = command;
}
public LogLevel getLogLevel() {
return this.logLevel;
}
public void setLogLevel(LogLevel logLevel) {
this.logLevel = logLevel;
}
public Skip getSkip() {
return this.skip;
}
public void setSkip(Skip skip) {
this.skip = skip;
}
public List<String> getArguments() {
return this.arguments;
}
/**
* Start command skip mode.
*/
public enum Skip {
/**
* Never skip start.
*/
NEVER {
@Override
boolean shouldSkip(List<RunningService> runningServices) {
return false;
}
},
/**
* Skip start if there are already services running.
*/
IF_RUNNING {
@Override
boolean shouldSkip(List<RunningService> runningServices) {
return !runningServices.isEmpty();
}
@Override
String getLogMessage() {
return "There are already Docker Compose services running, skipping startup";
}
};
abstract boolean shouldSkip(List<RunningService> runningServices);
String getLogMessage() {
return "";
}
}
}
/**
* Stop properties.
*/
public static class Stop {
/**
* Command used to stop Docker Compose.
*/
private StopCommand command = StopCommand.STOP;
/**
* Timeout for stopping Docker Compose. Use '0' for forced stop.
*/
private Duration timeout = Duration.ofSeconds(10);
/**
* Arguments to pass to the stop command.
*/
private final List<String> arguments = new ArrayList<>();
public StopCommand getCommand() {
return this.command;
}
public void setCommand(StopCommand command) {
this.command = command;
}
public Duration getTimeout() {
return this.timeout;
}
public void setTimeout(Duration timeout) {
this.timeout = timeout;
}
public List<String> getArguments() {
return this.arguments;
}
}
/**
* Profiles properties.
*/
public static class Profiles {
/**
* Docker compose profiles that should be active.
*/
private Set<String> active = new LinkedHashSet<>();
public Set<String> getActive() {
return this.active;
}
public void setActive(Set<String> active) {
this.active = active;
}
}
/**
* Skip options.
*/
public static class Skip {
/**
* Whether to skip in tests.
*/
private boolean inTests = true;
public boolean isInTests() {
return this.inTests;
}
public void setInTests(boolean inTests) {
this.inTests = inTests;
}
}
/**
* Readiness properties.
*/
public static class Readiness {
/**
* Wait strategy to use.
*/
private Wait wait = Wait.ALWAYS;
/**
* Timeout of the readiness checks.
*/
private Duration timeout = Duration.ofMinutes(2);
/**
* TCP properties.
*/
private final Tcp tcp = new Tcp();
public Wait getWait() {
return this.wait;
}
public void setWait(Wait wait) {
this.wait = wait;
}
public Duration getTimeout() {
return this.timeout;
}
public void setTimeout(Duration timeout) {
this.timeout = timeout;
}
public Tcp getTcp() {
return this.tcp;
}
/**
* Readiness wait strategies.
*/
public enum Wait {
/**
* Always perform readiness checks.
*/
ALWAYS,
/**
* Never perform readiness checks.
*/
NEVER,
/**
* Only perform readiness checks if docker was started with lifecycle
* management.
*/
ONLY_IF_STARTED
}
/**
* TCP properties.
*/
public static class Tcp {
/**
* Timeout for connections.
*/
private Duration connectTimeout = Duration.ofMillis(200);
/**
* Timeout for reads.
*/
private Duration readTimeout = Duration.ofMillis(200);
public Duration getConnectTimeout() {
return this.connectTimeout;
}
public void setConnectTimeout(Duration connectTimeout) {
this.connectTimeout = connectTimeout;
}
public Duration getReadTimeout() {
return this.readTimeout;
}
public void setReadTimeout(Duration readTimeout) {
this.readTimeout = readTimeout;
}
}
}
} | java | github | https://github.com/spring-projects/spring-boot | core/spring-boot-docker-compose/src/main/java/org/springframework/boot/docker/compose/lifecycle/DockerComposeProperties.java |
#!/usr/bin/python
# Author: Zion Orent <zorent@ics.com>
# Copyright (c) 2015 Intel Corporation.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
from __future__ import print_function
import time, sys, signal, atexit
from upm import pyupm_mhz16 as upmMhz16
def main():
# Instantiate a MHZ16 serial CO2 sensor on uart 0.
# This example was tested on the Grove CO2 sensor module.
myCO2 = upmMhz16.MHZ16(0)
## Exit handlers ##
# This stops python from printing a stacktrace when you hit control-C
def SIGINTHandler(signum, frame):
raise SystemExit
# This function lets you run code on exit,
# including functions from myCO2
def exitHandler():
print("Exiting")
sys.exit(0)
# Register exit handlers
atexit.register(exitHandler)
signal.signal(signal.SIGINT, SIGINTHandler)
# make sure port is initialized properly. 9600 baud is the default.
if (not myCO2.setupTty(upmMhz16.cvar.int_B9600)):
print("Failed to setup tty port parameters")
sys.exit(0)
print ("Make sure that the sensor has had "
"at least 3 minutes to warm up\n"
"or you will not get valid results.\n"
"The temperature reported is not the ambient temperature,\n"
"but rather the temperature of the sensor elements.")
time.sleep(1)
while(1):
if (not myCO2.getData()):
print("Failed to retrieve data")
else:
outputStr = ("CO2 concentration: {0} PPM, "
"Temperature (in C): {1}".format(
myCO2.getGas(), myCO2.getTemperature()))
print(outputStr)
time.sleep(2)
if __name__ == '__main__':
main() | unknown | codeparrot/codeparrot-clean | ||
/* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#ifndef TENSORFLOW_CORE_GRAPPLER_GRAPH_TOPOLOGY_VIEW_H_
#define TENSORFLOW_CORE_GRAPPLER_GRAPH_TOPOLOGY_VIEW_H_
#include "absl/container/flat_hash_map.h"
#include "absl/container/inlined_vector.h"
#include "absl/strings/string_view.h"
#include "absl/types/optional.h"
#include "absl/types/span.h"
#include "tensorflow/core/graph/tensor_id.h"
#include "tensorflow/core/grappler/graph_view.h"
namespace tensorflow {
namespace grappler {
// GraphTopologyView is a helper class to simplify `node-to-node` connectivity
// traversals. Regular `GraphView` simplifies `tensor-to-tensor` traversals:
// connections between output tensors and inputs of a consumer nodes. For the
// topology view we are focused on nodes connected to nodes, and it's irrelevant
// if this connection is formed by one or multiple individual tensors.
//
// Example:
// a = Placeholder(..)
// b = Placeholder(..)
// c = AddN([a, a, b])
//
// GraphView edges: [a:0 -> c:0, a:0 -> c:1, b:0 -> c:2]
// GraphTopologyView edges: [a -> c, b -> c]
//
// GraphView is used for exploring single node fanins and fanouts, and
// GraphTopologyView is focused on efficient full graph traversals (computing
// graph node properties from transitive fanouts, etc...).
class GraphTopologyView {
public:
GraphTopologyView() = default;
explicit GraphTopologyView(bool skip_invalid_edges)
: skip_invalid_edges_(skip_invalid_edges) {}
// Initialize graph topology view from the graph. It's possible to pass
// additional edges that do not exist in a graph, but must be respected when
// computing graph topology. Example: Tensorflow runtime allows concurrent
// execution of dequeue/enqueue ops from the same queue resource, but we might
// want to enforce ordering between them for the purpose of graph analysis.
absl::Status InitializeFromGraph(
const GraphDef& graph, absl::Span<const GraphView::Edge> ephemeral_edges,
bool ignore_control_edges);
absl::Status InitializeFromGraph(
const GraphDef& graph, absl::Span<const GraphView::Edge> ephemeral_edges);
absl::Status InitializeFromGraph(const GraphDef& graph,
bool ignore_control_edges);
absl::Status InitializeFromGraph(const GraphDef& graph);
bool is_initialized() const { return graph_ != nullptr; }
int num_nodes() const { return num_nodes_; }
const GraphDef* graph() const { return graph_; }
// Returns true iff the node exists in the underlying graph.
bool HasNode(absl::string_view node_name) const;
// Finds a node by name or returns `nullptr` if it's not in the graph.
const NodeDef* GetNode(absl::string_view node_name) const;
// Returns a node corresponding to the given node index.
const NodeDef* GetNode(int node_idx) const;
// Returns a node index for the given node name, if the name exists in the
// underlying graph. Otherwise returns empty optional.
const absl::optional<int> GetNodeIndex(absl::string_view node_name) const;
// Returns a node index for the given node, if the node belongs to the
// underlying graph. Otherwise returns empty optional.
const absl::optional<int> GetNodeIndex(const NodeDef& node) const;
// Returns all the node indexes that are in the direct fanin of the given
// node. If the `node_idx` is outside of [0, num_nodes_) returns empty vector.
const absl::InlinedVector<int, 4>& GetFanin(int node_idx) const;
// Returns all the node indexes that are in the direct fanout of the given
// node. If the `node_idx` is outside of [0, num_nodes_) returns empty vector.
const absl::InlinedVector<int, 2>& GetFanout(int node_idx) const;
private:
// If true, all invalid edges and inputs (srd, dst or input node not found in
// a graph) will be skipped, otherwise initialization will fail with error.
bool skip_invalid_edges_ = false;
// WARN: `graph_` must outlive this object and graph nodes must not be
// destructed, because node names captured with absl::string_view.
const GraphDef* graph_ = nullptr; // do not own
int num_nodes_ = 0;
std::vector<absl::string_view> index_to_node_name_;
absl::flat_hash_map<absl::string_view, int> node_name_to_index_;
std::vector<absl::InlinedVector<int, 4>> fanins_; // node_idx->input nodes
std::vector<absl::InlinedVector<int, 2>> fanouts_; // node_idx->output nodes
// We need a valid reference to return from GetFanin/GetFanout if the
// `node_idx` argument is outside of the [0, num_nodes_) range.
absl::InlinedVector<int, 4> empty_fanin_;
absl::InlinedVector<int, 2> empty_fanout_;
};
} // end namespace grappler
} // end namespace tensorflow
#endif // TENSORFLOW_CORE_GRAPPLER_GRAPH_TOPOLOGY_VIEW_H_ | c | github | https://github.com/tensorflow/tensorflow | tensorflow/core/grappler/graph_topology_view.h |
<!--Copyright 2022 The HuggingFace Team. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
rendered properly in your Markdown viewer.
-->
# AltCLIP
## 概要
AltCLIPモデルは、「[AltCLIP: Altering the Language Encoder in CLIP for Extended Language Capabilities](https://huggingface.co/papers/2211.06679)」という論文でZhongzhi Chen、Guang Liu、Bo-Wen Zhang、Fulong Ye、Qinghong Yang、Ledell Wuによって提案されました。AltCLIP(CLIPの言語エンコーダーの代替)は、様々な画像-テキストペアおよびテキスト-テキストペアでトレーニングされたニューラルネットワークです。CLIPのテキストエンコーダーを事前学習済みの多言語テキストエンコーダーXLM-Rに置き換えることで、ほぼ全てのタスクでCLIPに非常に近い性能を得られ、オリジナルのCLIPの能力を多言語理解などに拡張しました。
論文の要旨は以下の通りです:
*この研究では、強力なバイリンガルマルチモーダル表現モデルを訓練するための概念的に単純で効果的な方法を提案します。OpenAIによってリリースされたマルチモーダル表現モデルCLIPから開始し、そのテキストエンコーダを事前学習済みの多言語テキストエンコーダXLM-Rに交換し、教師学習と対照学習からなる2段階のトレーニングスキーマを用いて言語と画像の表現を整合させました。幅広いタスクの評価を通じて、我々の方法を検証します。ImageNet-CN、Flicker30k-CN、COCO-CNを含む多くのタスクで新たな最先端の性能を達成しました。さらに、ほぼすべてのタスクでCLIPに非常に近い性能を得ており、これはCLIPのテキストエンコーダを変更するだけで、多言語理解などの拡張を実現できることを示唆しています。*
このモデルは[jongjyh](https://huggingface.co/jongjyh)により提供されました。
## 使用上のヒントと使用例
AltCLIPの使用方法はCLIPに非常に似ています。CLIPとの違いはテキストエンコーダーにあります。私たちはカジュアルアテンションではなく双方向アテンションを使用し、XLM-Rの[CLS]トークンをテキスト埋め込みを表すものとして取ることに留意してください。
AltCLIPはマルチモーダルな視覚言語モデルです。これは画像とテキストの類似度や、ゼロショット画像分類に使用できます。AltCLIPはViTのようなTransformerを使用して視覚的特徴を、双方向言語モデルを使用してテキスト特徴を取得します。テキストと視覚の両方の特徴は、同一の次元を持つ潜在空間に射影されます。射影された画像とテキスト特徴間のドット積が類似度スコアとして使用されます。
Transformerエンコーダーに画像を与えるには、各画像を固定サイズの重複しないパッチの系列に分割し、それらを線形に埋め込みます。画像全体を表現するための[CLS]トークンが追加されます。著者は絶対位置埋め込みも追加し、結果として得られるベクトルの系列を標準的なTransformerエンコーダーに供給します。[`CLIPImageProcessor`]を使用して、モデルのために画像のサイズ変更(または拡大縮小)と正規化を行うことができます。
[`AltCLIPProcessor`]は、テキストのエンコードと画像の前処理を両方行うために、[`CLIPImageProcessor`]と[`XLMRobertaTokenizer`]を単一のインスタンスにラップします。以下の例は、[`AltCLIPProcessor`]と[`AltCLIPModel`]を使用して画像-テキスト類似スコアを取得する方法を示しています。
```python
>>> from PIL import Image
>>> import requests
>>> from transformers import AltCLIPModel, AltCLIPProcessor
>>> model = AltCLIPModel.from_pretrained("BAAI/AltCLIP")
>>> processor = AltCLIPProcessor.from_pretrained("BAAI/AltCLIP")
>>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
>>> image = Image.open(requests.get(url, stream=True).raw)
>>> inputs = processor(text=["a photo of a cat", "a photo of a dog"], images=image, return_tensors="pt", padding=True)
>>> outputs = model(**inputs)
>>> logits_per_image = outputs.logits_per_image # this is the image-text similarity score
>>> probs = logits_per_image.softmax(dim=1) # we can take the softmax to get the label probabilities
```
<Tip>
このモデルは`CLIPModel`をベースにしており、オリジナルの[CLIP](clip)と同じように使用してください。
</Tip>
## AltCLIPConfig
[[autodoc]] AltCLIPConfig
## AltCLIPTextConfig
[[autodoc]] AltCLIPTextConfig
## AltCLIPVisionConfig
[[autodoc]] AltCLIPVisionConfig
## AltCLIPProcessor
[[autodoc]] AltCLIPProcessor
## AltCLIPModel
[[autodoc]] AltCLIPModel
- forward
- get_text_features
- get_image_features
## AltCLIPTextModel
[[autodoc]] AltCLIPTextModel
- forward
## AltCLIPVisionModel
[[autodoc]] AltCLIPVisionModel
- forward | unknown | github | https://github.com/huggingface/transformers | docs/source/ja/model_doc/altclip.md |
// run
// Copyright 2020 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Issue 38515: failed to mark the method wrapper
// reflect.Type.Method itself as REFLECTMETHOD.
package main
import "reflect"
var called bool
type foo struct{}
func (foo) X() { called = true }
var h = reflect.Type.Method
func main() {
v := reflect.ValueOf(foo{})
m := h(v.Type(), 0)
f := m.Func.Interface().(func(foo))
f(foo{})
if !called {
panic("FAIL")
}
} | go | github | https://github.com/golang/go | test/reflectmethod5.go |
# coding: utf-8
from __future__ import unicode_literals, division, absolute_import, print_function
import sys
import ctypes
from ctypes import windll, wintypes, POINTER, Structure, c_void_p, c_char_p
from ctypes.wintypes import DWORD
from .._ffi import FFIEngineError
from .._types import str_cls
from ..errors import LibraryNotFoundError
from ._kernel32 import kernel32
__all__ = [
'crypt32',
'get_error',
]
try:
crypt32 = windll.crypt32
except (OSError) as e:
if str_cls(e).find('The specified module could not be found') != -1:
raise LibraryNotFoundError('crypt32.dll could not be found')
raise
HCERTSTORE = wintypes.HANDLE
HCERTCHAINENGINE = wintypes.HANDLE
HCRYPTPROV = wintypes.HANDLE
HCRYPTKEY = wintypes.HANDLE
PBYTE = c_char_p
if sys.maxsize > 2 ** 32:
ULONG_PTR = ctypes.c_uint64
else:
ULONG_PTR = ctypes.c_ulong
try:
class CRYPTOAPI_BLOB(Structure): # noqa
_fields_ = [
("cbData", DWORD),
("pbData", c_void_p),
]
CRYPT_INTEGER_BLOB = CRYPTOAPI_BLOB
CERT_NAME_BLOB = CRYPTOAPI_BLOB
CRYPT_BIT_BLOB = CRYPTOAPI_BLOB
CRYPT_OBJID_BLOB = CRYPTOAPI_BLOB
class CRYPT_ALGORITHM_IDENTIFIER(Structure): # noqa
_fields_ = [
("pszObjId", wintypes.LPSTR),
("Parameters", CRYPT_OBJID_BLOB),
]
class CERT_PUBLIC_KEY_INFO(Structure): # noqa
_fields_ = [
("Algorithm", CRYPT_ALGORITHM_IDENTIFIER),
("PublicKey", CRYPT_BIT_BLOB),
]
class CERT_EXTENSION(Structure): # noqa
_fields_ = [
("pszObjId", wintypes.LPSTR),
("fCritical", wintypes.BOOL),
("Value", CRYPT_OBJID_BLOB),
]
PCERT_EXTENSION = POINTER(CERT_EXTENSION)
class CERT_INFO(Structure): # noqa
_fields_ = [
("dwVersion", DWORD),
("SerialNumber", CRYPT_INTEGER_BLOB),
("SignatureAlgorithm", CRYPT_ALGORITHM_IDENTIFIER),
("Issuer", CERT_NAME_BLOB),
("NotBefore", kernel32.FILETIME),
("NotAfter", kernel32.FILETIME),
("Subject", CERT_NAME_BLOB),
("SubjectPublicKeyInfo", CERT_PUBLIC_KEY_INFO),
("IssuerUniqueId", CRYPT_BIT_BLOB),
("SubjectUniqueId", CRYPT_BIT_BLOB),
("cExtension", DWORD),
("rgExtension", POINTER(PCERT_EXTENSION)),
]
PCERT_INFO = POINTER(CERT_INFO)
class CERT_CONTEXT(Structure): # noqa
_fields_ = [
("dwCertEncodingType", DWORD),
("pbCertEncoded", c_void_p),
("cbCertEncoded", DWORD),
("pCertInfo", PCERT_INFO),
("hCertStore", HCERTSTORE)
]
PCERT_CONTEXT = POINTER(CERT_CONTEXT)
class CERT_ENHKEY_USAGE(Structure): # noqa
_fields_ = [
('cUsageIdentifier', DWORD),
('rgpszUsageIdentifier', POINTER(wintypes.BYTE)),
]
PCERT_ENHKEY_USAGE = POINTER(CERT_ENHKEY_USAGE)
class CERT_TRUST_STATUS(Structure): # noqa
_fields_ = [
('dwErrorStatus', DWORD),
('dwInfoStatus', DWORD),
]
class CERT_CHAIN_ELEMENT(Structure): # noqa
_fields_ = [
('cbSize', DWORD),
('pCertContext', PCERT_CONTEXT),
('TrustStatus', CERT_TRUST_STATUS),
('pRevocationInfo', c_void_p),
('pIssuanceUsage', PCERT_ENHKEY_USAGE),
('pApplicationUsage', PCERT_ENHKEY_USAGE),
('pwszExtendedErrorInfo', wintypes.LPCWSTR),
]
PCERT_CHAIN_ELEMENT = POINTER(CERT_CHAIN_ELEMENT)
class CERT_SIMPLE_CHAIN(Structure): # noqa
_fields_ = [
('cbSize', DWORD),
('TrustStatus', CERT_TRUST_STATUS),
('cElement', DWORD),
('rgpElement', POINTER(PCERT_CHAIN_ELEMENT)),
('pTrustListInfo', c_void_p),
('fHasRevocationFreshnessTime', wintypes.BOOL),
('dwRevocationFreshnessTime', DWORD),
]
PCERT_SIMPLE_CHAIN = POINTER(CERT_SIMPLE_CHAIN)
class CERT_CHAIN_CONTEXT(Structure): # noqa
_fields_ = [
('cbSize', DWORD),
('TrustStatus', CERT_TRUST_STATUS),
('cChain', DWORD),
('rgpChain', POINTER(PCERT_SIMPLE_CHAIN)),
('cLowerQualityChainContext', DWORD),
('rgpLowerQualityChainContext', c_void_p),
('fHasRevocationFreshnessTime', wintypes.BOOL),
('dwRevocationFreshnessTime', DWORD),
]
PCERT_CHAIN_CONTEXT = POINTER(CERT_CHAIN_CONTEXT)
class CERT_USAGE_MATCH(Structure): # noqa
_fields_ = [
('dwType', DWORD),
('Usage', CERT_ENHKEY_USAGE),
]
class CERT_CHAIN_PARA(Structure): # noqa
_fields_ = [
('cbSize', DWORD),
('RequestedUsage', CERT_USAGE_MATCH),
]
class CERT_CHAIN_POLICY_PARA(Structure): # noqa
_fields_ = [
('cbSize', DWORD),
('dwFlags', DWORD),
('pvExtraPolicyPara', c_void_p),
]
class SSL_EXTRA_CERT_CHAIN_POLICY_PARA(Structure): # noqa
_fields_ = [
('cbSize', DWORD),
('dwAuthType', DWORD),
('fdwChecks', DWORD),
('pwszServerName', wintypes.LPCWSTR),
]
class CERT_CHAIN_POLICY_STATUS(Structure): # noqa
_fields_ = [
('cbSize', DWORD),
('dwError', DWORD),
('lChainIndex', wintypes.LONG),
('lElementIndex', wintypes.LONG),
('pvExtraPolicyStatus', c_void_p),
]
crypt32.CertOpenStore.argtypes = [
wintypes.LPCSTR,
DWORD,
HCRYPTPROV,
DWORD,
c_void_p
]
crypt32.CertOpenStore.restype = HCERTSTORE
crypt32.CertAddEncodedCertificateToStore.argtypes = [
HCERTSTORE,
DWORD,
PBYTE,
DWORD,
DWORD,
POINTER(PCERT_CONTEXT)
]
crypt32.CertAddEncodedCertificateToStore.restype = wintypes.BOOL
crypt32.CertGetCertificateChain.argtypes = [
HCERTCHAINENGINE,
PCERT_CONTEXT,
POINTER(kernel32.FILETIME),
HCERTSTORE,
POINTER(CERT_CHAIN_PARA),
DWORD,
c_void_p,
POINTER(PCERT_CHAIN_CONTEXT)
]
crypt32.CertGetCertificateChain.restype = wintypes.BOOL
crypt32.CertVerifyCertificateChainPolicy.argtypes = [
ULONG_PTR,
PCERT_CHAIN_CONTEXT,
POINTER(CERT_CHAIN_POLICY_PARA),
POINTER(CERT_CHAIN_POLICY_STATUS)
]
crypt32.CertVerifyCertificateChainPolicy.restype = wintypes.BOOL
crypt32.CertFreeCertificateChain.argtypes = [
PCERT_CHAIN_CONTEXT
]
crypt32.CertFreeCertificateChain.restype = None
crypt32.CertOpenSystemStoreW.argtypes = [
wintypes.HANDLE,
wintypes.LPCWSTR
]
crypt32.CertOpenSystemStoreW.restype = HCERTSTORE
crypt32.CertEnumCertificatesInStore.argtypes = [
HCERTSTORE,
PCERT_CONTEXT
]
crypt32.CertEnumCertificatesInStore.restype = PCERT_CONTEXT
crypt32.CertCloseStore.argtypes = [
HCERTSTORE,
DWORD
]
crypt32.CertCloseStore.restype = wintypes.BOOL
crypt32.CertGetEnhancedKeyUsage.argtypes = [
PCERT_CONTEXT,
DWORD,
c_void_p,
POINTER(DWORD)
]
crypt32.CertGetEnhancedKeyUsage.restype = wintypes.BOOL
except (AttributeError):
raise FFIEngineError('Error initializing ctypes')
setattr(crypt32, 'FILETIME', kernel32.FILETIME)
setattr(crypt32, 'CERT_ENHKEY_USAGE', CERT_ENHKEY_USAGE)
setattr(crypt32, 'CERT_CONTEXT', CERT_CONTEXT)
setattr(crypt32, 'PCERT_CONTEXT', PCERT_CONTEXT)
setattr(crypt32, 'CERT_USAGE_MATCH', CERT_USAGE_MATCH)
setattr(crypt32, 'CERT_CHAIN_PARA', CERT_CHAIN_PARA)
setattr(crypt32, 'CERT_CHAIN_POLICY_PARA', CERT_CHAIN_POLICY_PARA)
setattr(crypt32, 'SSL_EXTRA_CERT_CHAIN_POLICY_PARA', SSL_EXTRA_CERT_CHAIN_POLICY_PARA)
setattr(crypt32, 'CERT_CHAIN_POLICY_STATUS', CERT_CHAIN_POLICY_STATUS)
setattr(crypt32, 'PCERT_CHAIN_CONTEXT', PCERT_CHAIN_CONTEXT)
def get_error():
error = ctypes.GetLastError()
return (error, ctypes.FormatError(error)) | unknown | codeparrot/codeparrot-clean | ||
# Copyright 2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Main entry point into the Policy service."""
import abc
from oslo_config import cfg
import six
from keystone.common import dependency
from keystone.common import manager
from keystone import exception
from keystone import notifications
CONF = cfg.CONF
@dependency.provider('policy_api')
class Manager(manager.Manager):
"""Default pivot point for the Policy backend.
See :mod:`keystone.common.manager.Manager` for more details on how this
dynamically calls the backend.
"""
_POLICY = 'policy'
def __init__(self):
super(Manager, self).__init__(CONF.policy.driver)
def create_policy(self, policy_id, policy, initiator=None):
ref = self.driver.create_policy(policy_id, policy)
notifications.Audit.created(self._POLICY, policy_id, initiator)
return ref
def get_policy(self, policy_id):
try:
return self.driver.get_policy(policy_id)
except exception.NotFound:
raise exception.PolicyNotFound(policy_id=policy_id)
def update_policy(self, policy_id, policy, initiator=None):
if 'id' in policy and policy_id != policy['id']:
raise exception.ValidationError('Cannot change policy ID')
try:
ref = self.driver.update_policy(policy_id, policy)
except exception.NotFound:
raise exception.PolicyNotFound(policy_id=policy_id)
notifications.Audit.updated(self._POLICY, policy_id, initiator)
return ref
@manager.response_truncated
def list_policies(self, hints=None):
# NOTE(henry-nash): Since the advantage of filtering or list limiting
# of policies at the driver level is minimal, we leave this to the
# caller.
return self.driver.list_policies()
def delete_policy(self, policy_id, initiator=None):
try:
ret = self.driver.delete_policy(policy_id)
except exception.NotFound:
raise exception.PolicyNotFound(policy_id=policy_id)
notifications.Audit.deleted(self._POLICY, policy_id, initiator)
return ret
@six.add_metaclass(abc.ABCMeta)
class Driver(object):
def _get_list_limit(self):
return CONF.policy.list_limit or CONF.list_limit
@abc.abstractmethod
def enforce(self, context, credentials, action, target):
"""Verify that a user is authorized to perform action.
For more information on a full implementation of this see:
`keystone.policy.backends.rules.Policy.enforce`
"""
raise exception.NotImplemented() # pragma: no cover
@abc.abstractmethod
def create_policy(self, policy_id, policy):
"""Store a policy blob.
:raises: keystone.exception.Conflict
"""
raise exception.NotImplemented() # pragma: no cover
@abc.abstractmethod
def list_policies(self):
"""List all policies."""
raise exception.NotImplemented() # pragma: no cover
@abc.abstractmethod
def get_policy(self, policy_id):
"""Retrieve a specific policy blob.
:raises: keystone.exception.PolicyNotFound
"""
raise exception.NotImplemented() # pragma: no cover
@abc.abstractmethod
def update_policy(self, policy_id, policy):
"""Update a policy blob.
:raises: keystone.exception.PolicyNotFound
"""
raise exception.NotImplemented() # pragma: no cover
@abc.abstractmethod
def delete_policy(self, policy_id):
"""Remove a policy blob.
:raises: keystone.exception.PolicyNotFound
"""
raise exception.NotImplemented() # pragma: no cover | unknown | codeparrot/codeparrot-clean | ||
# -*- coding: utf-8 -*-
"""
Controls command line operations
The only particularly relevant command now i: patella startup <path>
not all commands retain functionality - this will be updated eventually (read: it might not be)
"""
# \/ Third-Party Packages \/
import os
import os.path
import click
import pandas as pd
# \/ Local Packages \/
from . import htmlparser as htmlparser
from . import patellaserver as flaskapp
class filec:
pass
file1 = filec()
file2 = filec()
file1.df = file2.df = pd.DataFrame({'foo': []})
file1.path = file2.path = ''
file1.name = file2.name = ''
@click.group()
def patella():
pass
@click.command()
@click.argument('url')
@click.option('--filename', default='datafile', help='specify the name of the local file that will be downloaded to the current directory')
@click.option('--filetype', default='.csv', help='specify the file type the scraper will look for')
def scrape_url(url, filetype, filename):
parseobj = htmlparser.find_download_links(url, filetype, filename, download=True)
if type(parseobj) != 'NoneType':
click.echo('ERROR: ' + parseobj['error']) # Error reporting
@click.command()
@click.argument('file_one')
@click.option('--delimiters', default=',:,', help='Specify file type delimiters in format <DELIM>:<DELIM2>')
def load_data(file_one, delimiters):
file1.path = os.getcwd() + '/' + file_one
if os.path.exists(file1.path):
file1.name = file_one
list_delims = delimiters.split(':')
if len(list_delims) == 2:
file1.df = pd.read_table(file1.path, list_delims[0], header=0)
file2.df = htmlparser.get_fe()
os.environ['LOCAL_FILE_PATH'] = file1.path
click.echo('file successfully loaded into Dataframes')
else:
if not os.path.exists(file1.path):
click.echo('no files found with the name ' + file_one + ' in path ' + file1.path)
@click.command()
@click.argument('column')
@click.argument('filename')
def change_index(filename, column):
if filename == file1:
file1.df.set_index(column)
else:
click.echo('no file found with that name')
@click.command()
@click.argument('column_names')
@click.argument('file')
def change_names(file, column_names):
pass
@click.command()
@click.argument('path')
def startserver(path):
flaskapp.startserver(path)
@click.command()
@click.argument('file')
@click.argument('col')
@click.option('--title', default=' ', help='specify the plot title')
@click.option('--x_title', default=' ', help='specify the X axis title')
@click.option('--y_title', default=' ', help='specify the Y axis title')
def plot(file, col, title, x_title, y_title):
file1.path = os.getcwd() + '/data/' + file
file1.df = pd.read_table(file1.path, ',', header=0)
htmlparser.compare(file1.df, htmlparser.get_fe(), col, title, x_title, y_title)
# A test cli command
@click.command()
@click.argument('foo')
def testme(foo):
pass
# add all the subcommands to the patella group
patella.add_command(scrape_url, name='scrape')
patella.add_command(testme, name='test')
patella.add_command(plot)
patella.add_command(load_data, name='load')
patella.add_command(startserver, name='startup') | unknown | codeparrot/codeparrot-clean | ||
#include <ATen/native/vulkan/ops/Factory.h>
#include <torch/library.h>
namespace at {
namespace native {
namespace vulkan {
namespace ops {
Tensor _empty_affine_quantized(
const IntArrayRef sizes,
const std::optional<ScalarType> dtype,
const std::optional<c10::Layout> layout,
const std::optional<Device> device,
const std::optional<bool> pin_memory,
const double scale,
const int64_t zero_point,
const std::optional<MemoryFormat> memory_format) {
api::StorageType storage_type = api::StorageType::TEXTURE_3D;
return convert_quantized(vTensor{
api::context(),
sizes.vec(),
scale,
zero_point,
convert_dtype(dtype ? *dtype : c10::kFloat),
storage_type,
memory_format ? get_gpu_memory_layout(storage_type, *memory_format)
: api::GPUMemoryLayout::TENSOR_CHANNELS_PACKED,
});
}
static Tensor empty_memory_format(
const IntArrayRef sizes,
const std::optional<ScalarType> dtype,
const std::optional<c10::Layout> layout,
const std::optional<Device> device,
const std::optional<bool> pin_memory,
const std::optional<MemoryFormat> memory_format) {
api::StorageType storage_type = api::StorageType::TEXTURE_3D;
return convert(vTensor{
api::context(),
sizes.vec(),
convert_dtype(dtype ? *dtype : c10::kFloat),
storage_type,
memory_format ? get_gpu_memory_layout(storage_type, *memory_format)
: api::GPUMemoryLayout::TENSOR_CHANNELS_PACKED,
});
}
static Tensor empty_strided(
const IntArrayRef sizes,
const IntArrayRef /* strides */,
const std::optional<ScalarType> dtype,
const std::optional<c10::Layout> layout,
const std::optional<Device> device,
const std::optional<bool> pin_memory) {
return empty_memory_format(
sizes, dtype, layout, device, pin_memory, c10::MemoryFormat::Contiguous);
}
#ifdef USE_VULKAN_API
TORCH_LIBRARY_IMPL(aten, Vulkan, m) {
m.impl(
TORCH_SELECTIVE_NAME("aten::empty.memory_format"),
at::native::vulkan::ops::empty_memory_format);
m.impl(
TORCH_SELECTIVE_NAME("aten::_empty_affine_quantized"),
at::native::vulkan::ops::_empty_affine_quantized);
m.impl(
TORCH_SELECTIVE_NAME("aten::empty_strided"),
TORCH_FN(at::native::vulkan::ops::empty_strided));
}
#endif /* USE_VULKAN_API */
} // namespace ops
} // namespace vulkan
} // namespace native
} // namespace at | cpp | github | https://github.com/pytorch/pytorch | aten/src/ATen/native/vulkan/ops/Factory.cpp |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# || ____ _ __
# +------+ / __ )(_) /_______________ _____ ___
# | 0xBC | / __ / / __/ ___/ ___/ __ `/_ / / _ \
# +------+ / /_/ / / /_/ /__/ / / /_/ / / /_/ __/
# || || /_____/_/\__/\___/_/ \__,_/ /___/\___/
#
# Copyright (C) 2011-2013 Bitcraze AB
#
# Crazyflie Nano Quadcopter Client
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
"""
Module to read input devices and send controls to the Crazyflie.
This module reads input from joysticks or other input devices and sends control
set-points to the Crazyflie. It can be configured in the UI.
Various drivers can be used to read input device data. Currently is uses the
PyGame driver, but in the future native support will be provided for Linux and
Windows drivers.
The input device's axes and buttons are mapped to software inputs using a
configuration file.
"""
from __future__ import division
__author__ = 'Bitcraze AB'
__all__ = ['JoystickReader']
import sys
import os
import re
import glob
import traceback
import logging
import shutil
import time #usato per il flip
import math
logger = logging.getLogger(__name__)
from cfclient.utils.pygamereader import PyGameReader
from cfclient.utils.config import Config
from cfclient.utils.config_manager import ConfigManager
from cfclient.utils.periodictimer import PeriodicTimer
from cflib.utils.callbacks import Caller
import psmove
GYROSCOPE_SENSITIVITY = 65.536
dt = 0.001
MAX_THRUST = 65000
FLIP_TIME = 0.165 #time in seconds
class JoystickReader:
"""
Thread that will read input from devices/joysticks and send control-set
ponts to the Crazyflie
"""
inputConfig = []
def __init__(self, do_device_discovery=True):
# TODO: Should be OS dependant
self.SF = sensorFusion()
self.inputdevice = PyGameReader()
self.pointerDevice = psmove.PSMove()
self.PointerYaw = 0
self.kalmanPitch = KalmanFilter()
self.kalmanRoll = KalmanFilter()
self.viscousModeThrust = 67
self._emergency_landing = False
self.auto = False
self._min_thrust = 0
self._max_thrust = 0
self._maxAltitude = 0
self.currentAltitude = 0
self.minAltitude = 0
self._thrust_slew_rate = 0
self._thrust_slew_enabled = False
self._thrust_slew_limit = 0
self._emergency_stop = False
self._has_pressure_sensor = False
self._canSwitch = True
self._old_thrust = 0
self._old_alt_hold = False
self._old_flip_type = -1
self._flip_time_start = -float("inf");
self._trim_roll = Config().get("trim_roll")
self._trim_pitch = Config().get("trim_pitch")
self._trim_yaw = 0.0
if (Config().get("flightmode") is "Normal"):
self._max_yaw_rate = Config().get("normal_max_yaw")
self._max_rp_angle = Config().get("normal_max_rp")
# Values are stored at %, so use the functions to set the values
self.set_thrust_limits(
Config().get("normal_min_thrust"),
Config().get("normal_max_thrust"))
self.set_thrust_slew_limiting(
Config().get("normal_slew_rate"),
Config().get("normal_slew_limit"))
else:
self._max_yaw_rate = Config().get("max_yaw")
self._max_rp_angle = Config().get("max_rp")
# Values are stored at %, so use the functions to set the values
self.set_thrust_limits(
Config().get("min_thrust"), Config().get("max_thrust"))
self.set_thrust_slew_limiting(
Config().get("slew_rate"), Config().get("slew_limit"))
self._dev_blacklist = None
if (len(Config().get("input_device_blacklist")) > 0):
self._dev_blacklist = re.compile(
Config().get("input_device_blacklist"))
logger.info("Using device blacklist [{}]".format(
Config().get("input_device_blacklist")))
self._available_devices = {}
# TODO: The polling interval should be set from config file
self._read_timer = PeriodicTimer(0.01, self.read_input)
if do_device_discovery:
self._discovery_timer = PeriodicTimer(1.0,
self._do_device_discovery)
self._discovery_timer.start()
# Check if user config exists, otherwise copy files
if not os.path.exists(ConfigManager().configs_dir):
logger.info("No user config found, copying dist files")
os.makedirs(ConfigManager().configs_dir)
for f in glob.glob(sys.path[0] +
"/cfclient/configs/input/[A-Za-z]*.json"):
dest = os.path.join(ConfigManager().
configs_dir, os.path.basename(f))
if not os.path.isfile(dest):
logger.debug("Copying %s", f)
shutil.copy2(f, ConfigManager().configs_dir)
ConfigManager().get_list_of_configs()
self.input_updated = Caller()
self.rp_trim_updated = Caller()
self.emergency_stop_updated = Caller()
self.switch_mode_updated = Caller()
self.device_discovery = Caller()
self.device_error = Caller()
self.althold_updated = Caller()
self.auto_input_updated = Caller()
self.pointer_input_updated = Caller()
def setViscousModeThrust(self, thrust):
if thrust >= 0:
self.viscousModeThrust = thrust
def setEmergencyLanding(self, emergencyLanding):
self._emergency_landing = emergencyLanding
def setAltHoldAvailable(self, available):
self._has_pressure_sensor = available
def setAuto(self, auto):
self.auto = auto
def setAltHold(self, althold):
self._old_alt_hold = althold
def _do_device_discovery(self):
devs = self.getAvailableDevices()
if len(devs):
self.device_discovery.call(devs)
self._discovery_timer.stop()
def getAvailableDevices(self):
"""List all available and approved input devices.
This function will filter available devices by using the
blacklist configuration and only return approved devices."""
devs = self.inputdevice.getAvailableDevices()
approved_devs = []
for dev in devs:
if ((not self._dev_blacklist) or
(self._dev_blacklist and not
self._dev_blacklist.match(dev["name"]))):
self._available_devices[dev["name"]] = dev["id"]
approved_devs.append(dev)
return approved_devs
def enableRawReading(self, deviceId):
"""
Enable raw reading of the input device with id deviceId. This is used
to get raw values for setting up of input devices. Values are read
without using a mapping.
"""
self.inputdevice.enableRawReading(deviceId)
def disableRawReading(self):
"""Disable raw reading of input device."""
self.inputdevice.disableRawReading()
def readRawValues(self):
""" Read raw values from the input device."""
return self.inputdevice.readRawValues()
def start_input(self, device_name, config_name):
"""
Start reading input from the device with name device_name using config
config_name
"""
try:
device_id = self._available_devices[device_name]
self.inputdevice.start_input(
device_id,
ConfigManager().get_config(config_name))
self._read_timer.start()
except Exception:
self.device_error.call(
"Error while opening/initializing input device\n\n%s" %
(traceback.format_exc()))
def stop_input(self):
"""Stop reading from the input device."""
self._read_timer.stop()
def set_yaw_limit(self, max_yaw_rate):
"""Set a new max yaw rate value."""
self._max_yaw_rate = max_yaw_rate
def set_rp_limit(self, max_rp_angle):
"""Set a new max roll/pitch value."""
self._max_rp_angle = max_rp_angle
def set_thrust_slew_limiting(self, thrust_slew_rate, thrust_slew_limit):
"""Set new values for limit where the slewrate control kicks in and
for the slewrate."""
self._thrust_slew_rate = JoystickReader.p2t(thrust_slew_rate)
self._thrust_slew_limit = JoystickReader.p2t(thrust_slew_limit)
if (thrust_slew_rate > 0):
self._thrust_slew_enabled = True
else:
self._thrust_slew_enabled = False
def set_thrust_limits(self, min_thrust, max_thrust):
"""Set a new min/max thrust limit."""
self._min_thrust = JoystickReader.p2t(min_thrust)
self._max_thrust = JoystickReader.p2t(max_thrust)
def set_trim_roll(self, trim_roll):
"""Set a new value for the roll trim."""
self._trim_roll = trim_roll
def set_trim_pitch(self, trim_pitch):
"""Set a new value for the trim trim."""
self._trim_pitch = trim_pitch
def setMaxAltitude(self, maxAltitude):
self._maxAltitude = maxAltitude
def setCurrentAltitude(self, altitude):
if altitude < self.minAltitude or self.minAltitude == 0:
self.minAltitude = altitude
self.currentAltitude = altitude
def read_input(self):
"""Read input data from the selected device"""
if self.pointerDevice is not None:
#DT = 0.001
if self.pointerDevice.poll():
buttons = self.pointerDevice.get_buttons()
if buttons & psmove.Btn_MOVE:
self.pointerDevice.set_leds(0, 255, 0)
self.pointerDevice.update_leds()
self.SF = sensorFusion()
'''
trigger_value = self.move.get_trigger()
self.move.set_leds(trigger_value, 0, 0)
self.move.update_leds()
'''
ax, ay, az = self.pointerDevice.get_accelerometer_frame(psmove.Frame_SecondHalf)
gx, gy, gz = self.pointerDevice.get_gyroscope_frame(psmove.Frame_SecondHalf)
gx = gx * 180 / math.pi
gy = gy * 180 / math.pi
gz = gz * 180 / math.pi
#print "A: %5.2f %5.2f %5.2f " % ( ax , ay , az )
#print "G: %8.2f %8.2f %8.2f " % ( gx , gy , gz )
self.SF.sensfusion6UpdateQ(gx, gy, gz, ax, ay, az, 1/100)
roll, pitch, yaw = self.SF.sensfusion6GetEulerRPY()
self.PointerYaw = -yaw
'''
# Read sensor
acc_x = self.pointerDevice.ax
acc_y = self.pointerDevice.ay
acc_z = self.pointerDevice.az
gyr_x = self.pointerDevice.gx
gyr_y = self.pointerDevice.gy
gyr_z = self.pointerDevice.gz
#// Calculate pitch and roll based only on acceleration.
acc_pitch = math.atan2(acc_x, -acc_z)
acc_roll = -math.atan2(acc_y, -acc_z)
# Perform filtering
self.kalmanPitch.kalman_innovate(acc_pitch, gyr_x, DT)
self.kalmanRoll.kalman_innovate(acc_roll, gyr_y, DT)
# The angle is stored in state 1
pitch = self.kalmanPitch.x1
roll = self.kalmanRoll.x1
cosRoll = math.cos(roll)
sinRoll = math.sin(roll)
cosPitch = math.cos(pitch)
sinPitch = math.sin(pitch)
magX = self.pointerDevice.mx * cosPitch + self.pointerDevice.mz * sinPitch
magY = self.pointerDevice.mx * sinRoll * sinPitch + self.pointerDevice.my * cosRoll - self.pointerDevice.mz * sinRoll * cosPitch
norm = math.sqrt(magX*magX + magY*magY)
magHeadingX = magX/norm
magHeadingY = -magY/norm
#Absolute Yaw
#self.PointerYaw = self.pointerDevice.mz
#roll = self.pointerDevice.gy - self.pointerDevice.gy/GYROSCOPE_SENSITIVITY*dt
#pitch = self.pointerDevice.gx - self.pointerDevice.gx/GYROSCOPE_SENSITIVITY*dt
DT = 0.001
yaw = self.pointerDevice.gz - self.pointerDevice.gz/GYROSCOPE_SENSITIVITY*DT
self.PointerYaw -= yaw*DT
if self.PointerYaw >= 180:
self.PointerYaw = -180
elif self.PointerYaw <= -180:
self.PointerYaw = 180
if self.pointerDevice.get_buttons() & psmove.Btn_MOVE: #psmove.Btn_T:
self.pointerDevice.set_leds(0, 255, 0)
self.pointerDevice.update_leds()
self.PointerYaw = 0
'''
if self.PointerYaw >= 0:
self.pointerDevice.set_leds(int(255*self.PointerYaw/180), 255, 0)
else:
self.pointerDevice.set_leds(0, 255, int(255*math.fabs(self.PointerYaw)/180))
self.pointerDevice.update_leds()
self.pointer_input_updated.call(self.PointerYaw, False)
try:
data = self.inputdevice.read_input()
roll = data["roll"] * self._max_rp_angle
pitch = data["pitch"] * self._max_rp_angle
thrust = data["thrust"]
yaw = data["yaw"]
raw_thrust = data["thrust"]
emergency_stop = data["estop"]
trim_roll = data["rollcal"]
trim_pitch = data["pitchcal"]
althold = data["althold"]
flipleft = data["flipleft"]
flipright = data["flipright"]
viscousMode = data["viscousMode"]
switchMode = data["switchmode"]
if switchMode and self._canSwitch:
self._canSwitch = False
self.switch_mode_updated.call()
elif not switchMode:
self._canSwitch = True
if (self._old_alt_hold != althold):
self.althold_updated.call(althold)
self._old_alt_hold = althold
if self._emergency_stop != emergency_stop:
self._emergency_stop = emergency_stop
self.emergency_stop_updated.call(self._emergency_stop)
if self.auto:
self.auto_input_updated.call(trim_roll, trim_pitch, yaw, thrust)
else:
# Altitude Hold Mode Toggled
if (self._old_alt_hold != althold):
self.althold_updated.call(althold)
self._old_alt_hold = althold
# Disable hover mode if enabled
if self._emergency_stop:
if self._has_pressure_sensor:
if self._old_alt_hold:
self.althold_updated.call(False)
self._old_alt_hold = False
althold = False
'''
modalità in cui il quad rimane fermo in altitudine e può salire o scendere in base a come si
utilizza il joystick
thrust up (>0) => sale (costantemente)
thrust down (<0) => scende (costantemente)
'''
if viscousMode:
viscous_thrust = self.p2t(self.viscousModeThrust)
if raw_thrust > 0 and raw_thrust <= 0.5:
raw_thrust = 1
elif raw_thrust > 0.5:
raw_thrust = 2
elif raw_thrust >= -0.5 and raw_thrust < 0:
raw_thrust = -0.5
elif raw_thrust < -0.5:
raw_thrust = -1
'''
if (self.currentAltitude - self.minAltitude) == self._maxAltitude:
raw_thrust = 0
elif (self.currentAltitude - self.minAltitude) > self._maxAltitude:
raw_thrust = -0.2
'''
thrust = int(round(viscous_thrust + raw_thrust*self.p2t(10)))
# Thust limiting (slew, minimum and emergency stop)
elif (althold and self._has_pressure_sensor) or (flipleft or flipright):
thrust = int(round(JoystickReader.deadband(thrust,0.2)*32767 + 32767)) #Convert to uint16
else:
if raw_thrust < 0.05 or emergency_stop:
thrust = 0
else:
thrust = self._min_thrust + thrust * (self._max_thrust - self._min_thrust)
if (self._thrust_slew_enabled == True and self._thrust_slew_limit > thrust and not emergency_stop):
#if (self._thrust_slew_enabled == True and not emergency_stop):
if self._old_thrust > self._thrust_slew_limit:
self._old_thrust = self._thrust_slew_limit
if thrust < (self._old_thrust - (self._thrust_slew_rate / 100)):
thrust = self._old_thrust - self._thrust_slew_rate / 100
if raw_thrust < 0 or thrust < self._min_thrust:
thrust = 0
#if trim_pitch > 0:
# thrust += self.p2t(1)
#if trim_pitch < 0:
# thrust -= self.p2t(1)
if self._emergency_landing:
thrust = self._old_thrust - self.p2t(10)*0.2
if thrust < 0: thrust = 0
self._old_thrust = thrust
# Yaw deadband
# TODO: Add to input device config?
yaw = JoystickReader.deadband(yaw,0.2)*self._max_yaw_rate
if trim_roll != 0 or trim_pitch != 0:
self._trim_roll += trim_roll
self._trim_pitch += trim_pitch
self.rp_trim_updated.call(self._trim_roll, self._trim_pitch)
if (flipleft or flipright) and self._flip_time_start < 0:
self._flip_time_start = time.time(); #ricavo il momento in cui inizia il flip
if flipleft:
self._old_flip_type = 0;
if flipright:
self._old_flip_type = 1;
#if (flipleft and self.flipTimeControl(self._flip_time_start)) and self._old_flip_type == 0:
if flipleft and self._old_flip_type == 0:
thrust = self.p2t(70) #faccio in modo che il robot rimanga nella posizione corrente
roll = 1600
#elif (flipright and self.flipTimeControl(self._flip_time_start)) and self._old_flip_type == 1:
elif flipright and self._old_flip_type == 1:
#thrust = self.p2t(70)
#roll = 30
#self.input_updated.call(roll, 0, yaw, thrust)
#time.sleep(0.04)
thrust = self.p2t(50)
roll = -1000
self.input_updated.call(roll, 0, yaw, thrust)
#time.sleep(FLIP_TIME)
'''
#######
## 1 ##
#######
thrust = self.p2t(70) #faccio in modo che il robot rimanga nella posizione corrente
roll = 30
self.input_updated.call(roll, 0, yaw, thrust)
time.sleep(0.004)
#######
## 2 ##
#######
thrust = self.p2t(50)
roll = -1600
self.input_updated.call(roll, 0, yaw, thrust)
time.sleep(0.004)
#######
## 3 ##
#######
thrust = 0
roll = 0
self.input_updated.call(roll, 0, yaw, thrust)
time.sleep(0.0004)
#######
## 4 ##
#######
thrust = self.p2t(50)
roll = -1600
self.input_updated.call(roll, 0, yaw, thrust)
time.sleep(0.004)
#######
## 5 ##
#######
thrust = self.p2t(70)
roll = 30
self.input_updated.call(roll, 0, yaw, thrust)
time.sleep(0.004)
#######
## 6 ##
#######
thrust = self.p2t(53)
roll = 0
self.input_updated.call(roll, 0, yaw, thrust)
return;
'''
trimmed_roll = roll + self._trim_roll
trimmed_pitch = pitch + self._trim_pitch
if not flipleft and not flipright and not self.flipTimeControl(self._flip_time_start):
self._old_flip_type = -1;
self._flip_time_start = -float("inf"); #resetto _flip_time_start
self.rp_trim_updated.call(self._trim_roll, self._trim_pitch)
trimmed_roll = roll + self._trim_roll
trimmed_pitch = pitch + self._trim_pitch
#yaw = yaw + self._trim_yaw
self.input_updated.call(trimmed_roll, trimmed_pitch, yaw, thrust)
except Exception:
logger.warning("Exception while reading inputdevice: %s", traceback.format_exc())
self.device_error.call("Error reading from input device\n\n%s" % traceback.format_exc())
self._read_timer.stop()
def update_trim_yaw_signal(self, yaw):
self._trim_yaw = yaw
@staticmethod
def p2t(percentage):
"""Convert a percentage to raw thrust"""
return int(MAX_THRUST * (percentage / 100.0))
@staticmethod
def deadband(value, threshold):
if abs(value) < threshold:
value = 0
elif value > 0:
value -= threshold
elif value < 0:
value += threshold
return value/(1-threshold)
@staticmethod
def flipTimeControl(startTime):
return (time.time()-startTime >= 0 and time.time()-startTime <= FLIP_TIME)
class sensorFusion():
def __init__(self):
self.twoKp = (2.0 * 0.4) # 2 * proportional gain
self.twoKi = (2.0 * 0.001) # 2 * integral gain
self.integralFBx = 0.0
self.integralFBy = 0.0
self.integralFBz = 0.0 # integral error terms scaled by Ki
self.q0 = 1.0
self.q1 = 0.0
self.q2 = 0.0
self.q3 = 0.0 # quaternion of sensor frame relative to auxiliary frame
def sensfusion6UpdateQ(self, gx, gy, gz, ax, ay, az, dt):
gx = gx * math.pi / 180
gy = gy * math.pi / 180
gz = gz * math.pi / 180
# Compute feedback only if accelerometer measurement valid (avoids NaN in accelerometer normalisation)
if(not ((ax == 0.0) and (ay == 0.0) and (az == 0.0))):
# Normalise accelerometer measurement
recipNorm = self.invSqrt(ax * ax + ay * ay + az * az)
ax *= recipNorm
ay *= recipNorm
az *= recipNorm
# Estimated direction of gravity and vector perpendicular to magnetic flux
halfvx = self.q1 * self.q3 - self.q0 * self.q2
halfvy = self.q0 * self.q1 + self.q2 * self.q3
halfvz = self.q0 * self.q0 - 0.5 + self.q3 * self.q3
# Error is sum of cross product between estimated and measured direction of gravity
halfex = (ay * halfvz - az * halfvy)
halfey = (az * halfvx - ax * halfvz)
halfez = (ax * halfvy - ay * halfvx)
# Compute and apply integral feedback if enabled
if(self.twoKi > 0.0):
self.integralFBx += self.twoKi * halfex * dt # integral error scaled by Ki
self.integralFBy += self.twoKi * halfey * dt
self.integralFBz += self.twoKi * halfez * dt
gx += self.integralFBx # apply integral feedback
gy += self.integralFBy
gz += self.integralFBz
else:
self.integralFBx = 0.0 # prevent integral windup
self.integralFBy = 0.0
self.integralFBz = 0.0
# Apply proportional feedback
gx += self.twoKp * halfex
gy += self.twoKp * halfey
gz += self.twoKp * halfez
# Integrate rate of change of quaternion
gx *= (0.5 * dt) # pre-multiply common factors
gy *= (0.5 * dt)
gz *= (0.5 * dt)
qa = self.q0
qb = self.q1
qc = self.q2
self.q0 += (-qb * gx - qc * gy - self.q3 * gz)
self.q1 += (qa * gx + qc * gz - self.q3 * gy)
self.q2 += (qa * gy - qb * gz + self.q3 * gx)
self.q3 += (qa * gz + qb * gy - qc * gx)
# Normalise quaternion
recipNorm = self.invSqrt(self.q0 * self.q0 + self.q1 * self.q1 + self.q2 * self.q2 + self.q3 * self.q3)
self.q0 *= recipNorm
self.q1 *= recipNorm
self.q2 *= recipNorm
self.q3 *= recipNorm
def sensfusion6GetEulerRPY(self):
# float gx, gy, gz; estimated gravity direction
gx = 2 * (self.q1 * self.q3 - self.q0 * self.q2)
gy = 2 * (self.q0 * self.q1 + self.q2 * self.q3)
gz = self.q0 * self.q0 - self.q1 * self.q1 - self.q2 * self.q2 + self.q3 * self.q3
if gx > 1: gx = 1
elif gx < -1: gx = -1
yaw = math.atan2(2 * (self.q0 * self.q3 + self.q1 * self.q2), self.q0 * self.q0 + self.q1 * self.q1 - self.q2 * self.q2 - self.q3 * self.q3) * 180 / math.pi
pitch = math.asin(gx) * 180 / math.pi # Pitch seems to be inverted
roll = math.atan2(gy, gz) * 180 / math.pi
return roll, pitch, yaw
# Fast inverse square-root
def invSqrt(self, x):
return 1 / math.sqrt(x)
## KALMAN ##
# Q diagonal 3x3 with these elements on diagonal
Q1 = 5.0
Q2 = 100.0
Q3 = 0.01
# R diagonal 2x2 with these elements on diagonal
R1 = 1000.0
R2 = 1000.0
class KalmanFilter:
def __init__(self):
self.x1 = 0.0
self.x2 = 0.0
self.x3 = 0.0
# Init P to diagonal matrix with large values since
# the initial state is not known
self.p11 = 1000.0
self.p12 = 0.0
self.p13 = 0.0
self.p21 = 0.0
self.p22 = 1000.0
self.p23 = 0.0
self.p31 = 0.0
self.p32 = 0.0
self.p33 = 1000.0
self.q1 = Q1
self.q2 = Q2
self.q3 = Q3
self.r1 = R1
self.r2 = R2
def kalman_innovate(self, z1, z2, dt):
# Step 1
# x(k) = Fx(k-1) + Bu + w:
self.x1 = self.x1 + dt*self.x2 - dt*self.x3
#x2 = x2;
#x3 = x3;
# Step 2
# P = FPF'+Q
a = self.p11 + self.p21*dt - self.p31*dt
b = self.p12 + self.p22*dt - self.p32*dt
c = self.p13 + self.p23*dt - self.p33*dt
self.p11 = a + b*dt - c*dt + self.q1
self.p12 = b
self.p13 = c
self.p21 = self.p21 + self.p22*dt - self.p23*dt
self.p22 = self.p22 + self.q2
#p23 = p23;
self.p31 = self.p31 + self.p32*dt - self.p33*dt
#p32 = p32;
self.p33 = self.p33 + self.q3
# Step 3
# y = z(k) - Hx(k)
y1 = z1-self.x1
y2 = z2-self.x2
# Step 4
# S = HPT' + R
s11 = self.p11 + self.r1
s12 = self.p12
s21 = self.p21
s22 = self.p22 + self.r2
# Step 5
# K = PH*inv(S)
sDet = 1/(s11*s22 - s12*s21);
k11 = (self.p11*s22 - self.p12*s21)*sDet
k12 = (self.p12*s11 - self.p11*s12)*sDet
k21 = (self.p21*s22 - self.p22*s21)*sDet
k22 = (self.p22*s11 - self.p21*s12)*sDet
k31 = (self.p31*s22 - self.p32*s21)*sDet
k32 = (self.p32*s11 - self.p31*s12)*sDet
# Step 6
# x = x + Ky
self.x1 = self.x1 + k11*y1 + k12*y2
self.x2 = self.x2 + k21*y1 + k22*y2
self.x3 = self.x3 + k31*y1 + k32*y2
# Step 7
# P = (I-KH)P
p11 = self.p11*(1.0 - k11) - self.p21*k12
p12 = self.p12*(1.0 - k11) - self.p22*k12
p13 = self.p13*(1.0 - k11) - self.p23*k12
p21 = self.p21*(1.0 - k22) - self.p11*k21
p22 = self.p22*(1.0 - k22) - self.p12*k21
p23 = self.p23*(1.0 - k22) - self.p13*k21
p31 = self.p31 - self.p21*k32 - self.p11*k31
p32 = self.p32 - self.p22*k32 - self.p12*k31
p33 = self.p33 - self.p22*k32 - self.p13*k31
self.p11 = p11
self.p12 = p12
self.p13 = p13
self.p21 = p21
self.p22 = p22
self.p23 = p23
self.p31 = p31
self.p32 = p32
self.p33 = p33 | unknown | codeparrot/codeparrot-clean | ||
'use strict';
import {VERSION} from '../env/data.js';
import AxiosError from '../core/AxiosError.js';
const validators = {};
// eslint-disable-next-line func-names
['object', 'boolean', 'number', 'function', 'string', 'symbol'].forEach((type, i) => {
validators[type] = function validator(thing) {
return typeof thing === type || 'a' + (i < 1 ? 'n ' : ' ') + type;
};
});
const deprecatedWarnings = {};
/**
* Transitional option validator
*
* @param {function|boolean?} validator - set to false if the transitional option has been removed
* @param {string?} version - deprecated version / removed since version
* @param {string?} message - some message with additional info
*
* @returns {function}
*/
validators.transitional = function transitional(validator, version, message) {
function formatMessage(opt, desc) {
return '[Axios v' + VERSION + '] Transitional option \'' + opt + '\'' + desc + (message ? '. ' + message : '');
}
// eslint-disable-next-line func-names
return (value, opt, opts) => {
if (validator === false) {
throw new AxiosError(
formatMessage(opt, ' has been removed' + (version ? ' in ' + version : '')),
AxiosError.ERR_DEPRECATED
);
}
if (version && !deprecatedWarnings[opt]) {
deprecatedWarnings[opt] = true;
// eslint-disable-next-line no-console
console.warn(
formatMessage(
opt,
' has been deprecated since v' + version + ' and will be removed in the near future'
)
);
}
return validator ? validator(value, opt, opts) : true;
};
};
validators.spelling = function spelling(correctSpelling) {
return (value, opt) => {
// eslint-disable-next-line no-console
console.warn(`${opt} is likely a misspelling of ${correctSpelling}`);
return true;
}
};
/**
* Assert object's properties type
*
* @param {object} options
* @param {object} schema
* @param {boolean?} allowUnknown
*
* @returns {object}
*/
function assertOptions(options, schema, allowUnknown) {
if (typeof options !== 'object') {
throw new AxiosError('options must be an object', AxiosError.ERR_BAD_OPTION_VALUE);
}
const keys = Object.keys(options);
let i = keys.length;
while (i-- > 0) {
const opt = keys[i];
const validator = schema[opt];
if (validator) {
const value = options[opt];
const result = value === undefined || validator(value, opt, options);
if (result !== true) {
throw new AxiosError('option ' + opt + ' must be ' + result, AxiosError.ERR_BAD_OPTION_VALUE);
}
continue;
}
if (allowUnknown !== true) {
throw new AxiosError('Unknown option ' + opt, AxiosError.ERR_BAD_OPTION);
}
}
}
export default {
assertOptions,
validators
}; | javascript | github | https://github.com/axios/axios | lib/helpers/validator.js |
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the "Elastic License
* 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side
* Public License v 1"; you may not use this file except in compliance with, at
* your election, the "Elastic License 2.0", the "GNU Affero General Public
* License v3.0 only", or the "Server Side Public License, v 1".
*/
package org.elasticsearch.benchmark.script;
import org.apache.lucene.document.SortedNumericDocValuesField;
import org.apache.lucene.index.DirectoryReader;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.IndexWriter;
import org.apache.lucene.index.IndexWriterConfig;
import org.apache.lucene.index.IndexWriterConfig.OpenMode;
import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.search.Query;
import org.apache.lucene.search.TopDocs;
import org.apache.lucene.store.Directory;
import org.apache.lucene.store.MMapDirectory;
import org.apache.lucene.util.IOUtils;
import org.elasticsearch.common.lucene.search.Queries;
import org.elasticsearch.common.lucene.search.function.ScriptScoreQuery;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.index.IndexVersion;
import org.elasticsearch.index.fielddata.FieldDataContext;
import org.elasticsearch.index.fielddata.IndexFieldDataCache;
import org.elasticsearch.index.fielddata.IndexNumericFieldData;
import org.elasticsearch.index.fielddata.SortedNumericLongValues;
import org.elasticsearch.index.mapper.IndexType;
import org.elasticsearch.index.mapper.MappedFieldType;
import org.elasticsearch.index.mapper.MappingLookup;
import org.elasticsearch.index.mapper.NumberFieldMapper.NumberFieldType;
import org.elasticsearch.index.mapper.NumberFieldMapper.NumberType;
import org.elasticsearch.index.mapper.SourceFieldMetrics;
import org.elasticsearch.indices.breaker.CircuitBreakerService;
import org.elasticsearch.indices.breaker.NoneCircuitBreakerService;
import org.elasticsearch.plugins.PluginsLoader;
import org.elasticsearch.plugins.PluginsService;
import org.elasticsearch.plugins.ScriptPlugin;
import org.elasticsearch.script.DocReader;
import org.elasticsearch.script.DocValuesDocReader;
import org.elasticsearch.script.ScoreScript;
import org.elasticsearch.script.ScriptModule;
import org.elasticsearch.search.lookup.SearchLookup;
import org.elasticsearch.search.lookup.SourceProvider;
import org.openjdk.jmh.annotations.Benchmark;
import org.openjdk.jmh.annotations.BenchmarkMode;
import org.openjdk.jmh.annotations.Fork;
import org.openjdk.jmh.annotations.Measurement;
import org.openjdk.jmh.annotations.Mode;
import org.openjdk.jmh.annotations.OperationsPerInvocation;
import org.openjdk.jmh.annotations.OutputTimeUnit;
import org.openjdk.jmh.annotations.Param;
import org.openjdk.jmh.annotations.Scope;
import org.openjdk.jmh.annotations.Setup;
import org.openjdk.jmh.annotations.State;
import org.openjdk.jmh.annotations.Warmup;
import java.io.IOException;
import java.nio.file.Path;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.concurrent.TimeUnit;
/**
* A race between Lucene Expressions, Painless, and a hand optimized script
* implementing a {@link ScriptScoreQuery}.
*/
@Fork(2)
@Warmup(iterations = 10)
@Measurement(iterations = 5)
@BenchmarkMode(Mode.AverageTime)
@OutputTimeUnit(TimeUnit.NANOSECONDS)
@OperationsPerInvocation(1_000_000) // The index has a million documents in it.
@State(Scope.Benchmark)
public class ScriptScoreBenchmark {
private final PluginsService pluginsService = new PluginsService(
Settings.EMPTY,
null,
PluginsLoader.createPluginsLoader(Set.of(), PluginsLoader.loadPluginsBundles(Path.of(System.getProperty("plugins.dir"))), Map.of())
);
private final ScriptModule scriptModule = new ScriptModule(Settings.EMPTY, pluginsService.filterPlugins(ScriptPlugin.class).toList());
private final Map<String, MappedFieldType> fieldTypes = Map.ofEntries(
Map.entry(
"n",
new NumberFieldType(
"n",
NumberType.LONG,
IndexType.docValuesOnly(),
false,
true,
null,
Map.of(),
null,
false,
null,
null,
false
)
)
);
private final IndexFieldDataCache fieldDataCache = new IndexFieldDataCache.None();
private final CircuitBreakerService breakerService = new NoneCircuitBreakerService();
private final SearchLookup lookup = new SearchLookup(
fieldTypes::get,
(mft, lookup, fdo) -> mft.fielddataBuilder(FieldDataContext.noRuntimeFields("index", "benchmark"))
.build(fieldDataCache, breakerService),
SourceProvider.fromLookup(MappingLookup.EMPTY, null, SourceFieldMetrics.NOOP)
);
@Param({ "expression", "metal", "painless_cast", "painless_def" })
private String script;
@Param({ "16" })
private double indexingBufferMb;
private ScoreScript.Factory factory;
private IndexReader reader;
@Setup
public void setupScript() {
factory = switch (script) {
case "expression" -> scriptModule.engines.get("expression").compile("test", "doc['n'].value", ScoreScript.CONTEXT, Map.of());
case "metal" -> bareMetalScript();
case "painless_cast" -> scriptModule.engines.get("painless")
.compile(
"test",
"((org.elasticsearch.index.fielddata.ScriptDocValues.Longs)doc['n']).value",
ScoreScript.CONTEXT,
Map.of()
);
case "painless_def" -> scriptModule.engines.get("painless").compile("test", "doc['n'].value", ScoreScript.CONTEXT, Map.of());
default -> throw new IllegalArgumentException("Don't know how to implement script [" + script + "]");
};
}
@Setup
public void setupIndex() throws IOException {
Path path = Path.of(System.getProperty("tests.index"));
IOUtils.rm(path);
Directory directory = new MMapDirectory(path);
try (
IndexWriter w = new IndexWriter(
directory,
new IndexWriterConfig().setOpenMode(OpenMode.CREATE).setRAMBufferSizeMB(indexingBufferMb)
)
) {
for (int i = 1; i <= 1_000_000; i++) {
w.addDocument(List.of(new SortedNumericDocValuesField("n", i)));
}
w.commit();
}
reader = DirectoryReader.open(directory);
}
@Benchmark
public TopDocs benchmark() throws IOException {
TopDocs topDocs = new IndexSearcher(reader).search(scriptScoreQuery(factory), 10);
if (topDocs.scoreDocs[0].score != 1_000_000) {
throw new AssertionError("Expected score to be 1,000,000 but was [" + topDocs.scoreDocs[0].score + "]");
}
return topDocs;
}
private Query scriptScoreQuery(ScoreScript.Factory factory) {
ScoreScript.LeafFactory leafFactory = factory.newFactory(Map.of(), lookup);
return new ScriptScoreQuery(Queries.ALL_DOCS_INSTANCE, null, leafFactory, lookup, null, "test", 0, IndexVersion.current());
}
private ScoreScript.Factory bareMetalScript() {
return (params, lookup) -> {
MappedFieldType type = fieldTypes.get("n");
IndexNumericFieldData ifd = (IndexNumericFieldData) lookup.getForField(type, MappedFieldType.FielddataOperation.SEARCH);
return new ScoreScript.LeafFactory() {
@Override
public ScoreScript newInstance(DocReader docReader) throws IOException {
SortedNumericLongValues values = ifd.load(((DocValuesDocReader) docReader).getLeafReaderContext()).getLongValues();
return new ScoreScript(params, null, docReader) {
private int docId;
@Override
public double execute(ExplanationHolder explanation) {
try {
values.advanceExact(docId);
if (values.docValueCount() != 1) {
throw new IllegalArgumentException("script only works when there is exactly one value");
}
return values.nextValue();
} catch (IOException e) {
throw new RuntimeException(e);
}
}
@Override
public void setDocument(int docid) {
this.docId = docid;
}
};
}
@Override
public boolean needs_score() {
return false;
}
@Override
public boolean needs_termStats() {
return false;
}
};
};
}
} | java | github | https://github.com/elastic/elasticsearch | benchmarks/src/main/java/org/elasticsearch/benchmark/script/ScriptScoreBenchmark.java |
import random
from operator import attrgetter
import pytest
from wrapanapi import VmState
from cfme import test_requirements
from cfme.cloud.provider.azure import AzureProvider
from cfme.cloud.provider.ec2 import EC2Provider
from cfme.cloud.provider.gce import GCEProvider
from cfme.cloud.provider.openstack import OpenStackProvider
from cfme.common.provider import BaseProvider
from cfme.fixtures.provider import setup_or_skip
from cfme.infrastructure.provider.rhevm import RHEVMProvider
from cfme.infrastructure.provider.virtualcenter import VMwareProvider
from cfme.utils import conf
from cfme.utils.appliance.implementations.ui import navigate_to
from cfme.utils.blockers import BZ
from cfme.utils.log import logger
from cfme.utils.wait import wait_for
pytestmark = [
pytest.mark.tier(1),
test_requirements.c_and_u,
pytest.mark.provider(
[VMwareProvider, RHEVMProvider, EC2Provider, OpenStackProvider, AzureProvider],
required_fields=[(['cap_and_util', 'capandu_vm'], 'cu-24x7')], scope="module")
]
BREADCRUMB_LOCATIONS = dict(
OverviewUtilization=["Overview", "Utilization", "Utilization", "Enterprise"],
OverviewChargeback=["Overview", "Chargeback", 'Reports', 'All Saved Chargeback Reports'],
OverviewReports=["Overview", "Reports", "Saved Reports", "All Saved Reports"],
OverviewOptimization=["Overview", "Optimization"]
)
@pytest.fixture(scope="module")
def clean_setup_provider(request, provider):
BaseProvider.clear_providers()
setup_or_skip(request, provider)
yield
BaseProvider.clear_providers()
def vm_count(appliance, metrics_tbl, mgmt_system_id):
return bool(appliance.db.client.session.query(metrics_tbl).filter(
metrics_tbl.parent_ems_id == mgmt_system_id).filter(
metrics_tbl.resource_type == "VmOrTemplate").count()
)
def host_count(appliance, metrics_tbl, mgmt_system_id):
return bool(appliance.db.client.session.query(metrics_tbl).filter(
metrics_tbl.parent_ems_id == mgmt_system_id).filter(
metrics_tbl.resource_type == "Host").count()
)
@pytest.fixture(scope="module")
def metrics_collection(appliance, clean_setup_provider, provider, enable_candu):
"""Check the db is gathering collection data for the given provider.
Metadata:
test_flag: metrics_collection
"""
metrics_tbl = appliance.db.client['metrics']
rollups = appliance.db.client['metric_rollups']
mgmt_systems_tbl = appliance.db.client['ext_management_systems']
vm_name = provider.data['cap_and_util']['capandu_vm']
collection = provider.appliance.provider_based_collection(provider)
vm = collection.instantiate(vm_name, provider)
if not vm.exists_on_provider:
pytest.skip("Skipping test, cu-24x7 VM does not exist")
vm.mgmt.ensure_state(VmState.RUNNING)
logger.info("Deleting metrics tables")
appliance.db.client.session.query(metrics_tbl).delete()
appliance.db.client.session.query(rollups).delete()
logger.info("Fetching provider ID for %s", provider.key)
mgmt_system_id = appliance.db.client.session.query(mgmt_systems_tbl).filter(
mgmt_systems_tbl.name == conf.cfme_data.get('management_systems', {})[provider.key]['name']
).first().id
logger.info("ID fetched; testing metrics collection now")
# vms for both infa and cloud provider
wait_for(
vm_count, [appliance, metrics_tbl, mgmt_system_id],
delay=20,
timeout=1500,
fail_condition=False,
message="wait for VMs")
# host only for infa
if provider.category == "infra":
wait_for(
vm_count, [appliance, metrics_tbl, mgmt_system_id],
delay=20,
timeout=1500,
fail_condition=False,
message="wait for hosts.")
def get_host_name(provider):
cfme_host = random.choice(provider.data["hosts"])
return cfme_host.name
def query_metric_db(appliance, provider, metric, vm_name=None, host_name=None):
metrics_tbl = appliance.db.client['metrics']
ems = appliance.db.client['ext_management_systems']
if vm_name is None:
if host_name is not None:
object_name = host_name
elif vm_name is not None:
object_name = vm_name
with appliance.db.client.transaction:
provs = (
appliance.db.client.session.query(metrics_tbl.id)
.join(ems, metrics_tbl.parent_ems_id == ems.id)
.filter(metrics_tbl.resource_name == object_name,
ems.name == provider.name)
)
return appliance.db.client.session.query(metrics_tbl).filter(
metrics_tbl.id.in_(provs.subquery()))
@pytest.mark.meta(automates=[1671580, 1722937])
@pytest.mark.meta(
blockers=[BZ(1744845, forced_streams=['5.10'],
unblock=lambda provider: not provider.one_of(AzureProvider))]
)
# Tests to check that specific metrics are being collected
def test_raw_metric_vm_cpu(metrics_collection, appliance, provider):
"""
Polarion:
assignee: gtalreja
casecomponent: CandU
initialEstimate: 1/12h
"""
vm_name = provider.data['cap_and_util']['capandu_vm']
if provider.category == "infra":
query = query_metric_db(appliance, provider, 'cpu_usagemhz_rate_average',
vm_name)
average_rate = attrgetter('cpu_usagemhz_rate_average')
elif provider.category == "cloud":
query = query_metric_db(appliance, provider, 'cpu_usage_rate_average',
vm_name)
average_rate = attrgetter('cpu_usage_rate_average')
for record in query:
if average_rate(record) is not None:
assert average_rate(record) > 0, 'Zero VM CPU Usage'
break
@pytest.mark.meta(automates=[1671580, 1722937])
@pytest.mark.meta(
blockers=[BZ(1724415, forced_streams=['5.10', '5.11'],
unblock=lambda provider: not provider.one_of(AzureProvider))]
)
def test_raw_metric_vm_memory(metrics_collection, appliance, provider):
"""
Polarion:
assignee: gtalreja
caseimportance: medium
casecomponent: CandU
initialEstimate: 1/12h
"""
vm_name = provider.data['cap_and_util']['capandu_vm']
if provider.type == 'azure':
query = query_metric_db(appliance, provider, 'mem_usage_absolute_average',
vm_name)
average_rate = attrgetter('mem_usage_absolute_average')
else:
query = query_metric_db(appliance, provider, 'derived_memory_used',
vm_name)
average_rate = attrgetter('derived_memory_used')
for record in query:
if average_rate(record) is not None:
assert average_rate(record) > 0, 'Zero VM Memory Usage'
break
@pytest.mark.meta(automates=[1671580, 1722937])
@pytest.mark.meta(
blockers=[BZ(1744845, forced_streams=['5.10'],
unblock=lambda provider: not provider.one_of(AzureProvider))]
)
def test_raw_metric_vm_network(metrics_collection, appliance, provider):
"""
Polarion:
assignee: gtalreja
initialEstimate: 1/12h
casecomponent: CandU
"""
vm_name = provider.data['cap_and_util']['capandu_vm']
query = query_metric_db(appliance, provider, 'net_usage_rate_average',
vm_name)
for record in query:
if record.net_usage_rate_average is not None:
assert record.net_usage_rate_average > 0, 'Zero VM Network IO'
break
@pytest.mark.provider(
[VMwareProvider, RHEVMProvider, OpenStackProvider, AzureProvider, GCEProvider],
required_fields=[(['cap_and_util', 'capandu_vm'], 'cu-24x7')],
scope='module',
)
@pytest.mark.meta(
blockers=[BZ(1744845, forced_streams=['5.10'],
unblock=lambda provider: not provider.one_of(AzureProvider))]
)
@pytest.mark.meta(automates=[1671580, 1722937])
def test_raw_metric_vm_disk(metrics_collection, appliance, provider):
"""
Polarion:
assignee: gtalreja
caseimportance: medium
initialEstimate: 1/8h
casecomponent: CandU
"""
vm_name = provider.data['cap_and_util']['capandu_vm']
query = query_metric_db(appliance, provider, 'disk_usage_rate_average',
vm_name)
for record in query:
if record.disk_usage_rate_average is not None:
assert record.disk_usage_rate_average > 0, 'Zero VM Disk IO'
break
@pytest.mark.provider(
[VMwareProvider, RHEVMProvider],
required_fields=[(['cap_and_util', 'capandu_vm'], 'cu-24x7')],
scope='module',
)
def test_raw_metric_host_cpu(metrics_collection, appliance, provider):
"""
Polarion:
assignee: gtalreja
casecomponent: CandU
initialEstimate: 1/12h
"""
host_name = get_host_name(provider)
query = query_metric_db(appliance, provider, 'cpu_usagemhz_rate_average',
host_name)
for record in query:
if record.cpu_usagemhz_rate_average is not None:
assert record.cpu_usagemhz_rate_average > 0, 'Zero Host CPU Usage'
break
@pytest.mark.provider(
[VMwareProvider, RHEVMProvider],
required_fields=[(['cap_and_util', 'capandu_vm'], 'cu-24x7')],
scope='module',
)
def test_raw_metric_host_memory(metrics_collection, appliance, provider):
"""
Polarion:
assignee: gtalreja
caseimportance: low
casecomponent: CandU
initialEstimate: 1/12h
"""
host_name = get_host_name(provider)
query = query_metric_db(appliance, provider, 'derived_memory_used',
host_name)
for record in query:
if record.derived_memory_used is not None:
assert record.derived_memory_used > 0, 'Zero Host Memory Usage'
break
@pytest.mark.provider(
[VMwareProvider, RHEVMProvider],
required_fields=[(['cap_and_util', 'capandu_vm'], 'cu-24x7')],
scope='module',
)
def test_raw_metric_host_network(metrics_collection, appliance, provider):
"""
Polarion:
assignee: gtalreja
initialEstimate: 1/12h
casecomponent: CandU
"""
host_name = get_host_name(provider)
query = query_metric_db(appliance, provider, 'net_usage_rate_average',
host_name)
for record in query:
if record.net_usage_rate_average is not None:
assert record.net_usage_rate_average > 0, 'Zero Host Network IO'
break
@pytest.mark.provider(
[VMwareProvider, RHEVMProvider],
required_fields=[(['cap_and_util', 'capandu_vm'], 'cu-24x7')],
scope='module',
)
def test_raw_metric_host_disk(metrics_collection, appliance, provider):
"""
Polarion:
assignee: gtalreja
caseimportance: high
casecomponent: CandU
initialEstimate: 1/12h
"""
host_name = get_host_name(provider)
query = query_metric_db(appliance, provider, 'disk_usage_rate_average',
host_name)
for record in query:
if record.disk_usage_rate_average is not None:
assert record.disk_usage_rate_average > 0, 'Zero Host Disk IO'
break
def query_metric_rollup_table(appliance, provider, metric, azone_name):
"""
These queries return all records pertaining to a specific Availability zone
specified by $azone_name from the metric_rollups table.
"""
metrics_tbl = appliance.db.client['metric_rollups']
ems = appliance.db.client['ext_management_systems']
with appliance.db.client.transaction:
provs = (
appliance.db.client.session.query(metrics_tbl.id)
.join(ems, metrics_tbl.parent_ems_id == ems.id)
.filter(metrics_tbl.resource_name == azone_name,
ems.name == provider.name)
)
return appliance.db.client.session.query(metrics_tbl).filter(
metrics_tbl.id.in_(provs.subquery()))
def generic_test_azone_rollup(appliance, provider, metric):
"""
The test_azone* tests require that the metrics_collection fixture be run.
The first metric collection is scheduled through this fixture. The first
metric collection itself takes at least 15 minutes(sometimes longer).
The three tests that are run per provider take at least 20 minutes each
for completion(considering provider add, provider refresh etc.). But, without
parametrization, the three tests could be run in under 20 minutes.So, the tests
haven't been parametrized.
In an effort to reduce DRY code, I've written this generic function that
checks if any of the table records have a metric with a non-zero value.
"""
azone_name = provider.data["cap_and_util"]["capandu_azone"]
query = query_metric_rollup_table(appliance, provider, metric, azone_name)
for record in query:
if int(getattr(record, metric, 0)):
return True
else:
raise ValueError('The record had a zero in it!')
@pytest.mark.provider(
[EC2Provider, AzureProvider],
required_fields=[(['cap_and_util', 'capandu_vm'], 'cu-24x7')],
scope="module"
)
@pytest.mark.meta(
blockers=[BZ(1744845, forced_streams=['5.10'],
unblock=lambda provider: not provider.one_of(AzureProvider))]
)
class TestAzone:
def test_azone_cpu_usage(metrics_collection, appliance, provider):
"""
Polarion:
assignee: gtalreja
caseimportance: high
casecomponent: CandU
initialEstimate: 1/12h
"""
generic_test_azone_rollup(appliance, provider, 'cpu_usage_rate_average')
def test_azone_memory_usage(metrics_collection, appliance, provider):
"""
Polarion:
assignee: gtalreja
caseimportance: high
casecomponent: CandU
initialEstimate: 1/12h
"""
generic_test_azone_rollup(appliance, provider, 'mem_usage_absolute_average')
def test_azone_network_io(metrics_collection, appliance, provider):
"""
Polarion:
assignee: gtalreja
caseimportance: high
casecomponent: CandU
initialEstimate: 1/12h
"""
generic_test_azone_rollup(appliance, provider, 'net_usage_rate_average')
def test_azone_disk_io(metrics_collection, appliance, provider):
"""
Polarion:
assignee: gtalreja
caseimportance: high
casecomponent: CandU
initialEstimate: 1/12h
"""
generic_test_azone_rollup(appliance, provider, 'disk_usage_rate_average')
@pytest.mark.ignore_stream("5.10")
@pytest.mark.meta(automates=[1741188])
def test_utilization_breadcrumbs(appliance):
"""
Bugzilla:
1741188
Polarion:
assignee: gtalreja
casecomponent: CandU
caseimportance: medium
initialEstimate: 1/12h
"""
view = navigate_to(appliance.collections.utilization, "All")
assert view.breadcrumb.locations == BREADCRUMB_LOCATIONS["OverviewUtilization"]
assert view.breadcrumb.is_displayed | unknown | codeparrot/codeparrot-clean | ||
#ifndef NPY_HWY
#error "This is not a standalone header. Include simd.hpp instead."
#define NPY_HWY 1 // Prevent editors from graying out the happy branch
#endif
// Using anonymous namespace instead of inline to ensure each translation unit
// gets its own copy of constants based on local compilation flags
namespace {
// NOTE: This file is included by simd.hpp multiple times with different namespaces
// so avoid including any headers here
/**
* Determines whether the specified lane type is supported by the SIMD extension.
* Always defined as false when SIMD is not enabled, so it can be used in SFINAE.
*
* @tparam TLane The lane type to check for support.
*/
template <typename TLane>
constexpr bool kSupportLane = NPY_HWY != 0;
#if NPY_HWY
// Define lane type support based on Highway capabilities
template <>
constexpr bool kSupportLane<hwy::float16_t> = HWY_HAVE_FLOAT16 != 0;
template <>
constexpr bool kSupportLane<double> = HWY_HAVE_FLOAT64 != 0;
template <>
constexpr bool kSupportLane<long double> =
HWY_HAVE_FLOAT64 != 0 && sizeof(long double) == sizeof(double);
/// Maximum number of lanes supported by the SIMD extension for the specified lane type.
template <typename TLane>
constexpr size_t kMaxLanes = HWY_MAX_LANES_D(_Tag<TLane>);
/// Represents an N-lane vector based on the specified lane type.
/// @tparam TLane The scalar type for each vector lane
template <typename TLane>
using Vec = hn::Vec<_Tag<TLane>>;
/// Represents a mask vector with boolean values or as a bitmask.
/// @tparam TLane The scalar type the mask corresponds to
template <typename TLane>
using Mask = hn::Mask<_Tag<TLane>>;
/// Unaligned load of a vector from memory.
template <typename TLane>
HWY_API Vec<TLane>
LoadU(const TLane *ptr)
{
return hn::LoadU(_Tag<TLane>(), ptr);
}
/// Unaligned store of a vector to memory.
template <typename TLane>
HWY_API void
StoreU(const Vec<TLane> &a, TLane *ptr)
{
hn::StoreU(a, _Tag<TLane>(), ptr);
}
/// Returns the number of vector lanes based on the lane type.
template <typename TLane>
HWY_API HWY_LANES_CONSTEXPR size_t
Lanes(TLane tag = 0)
{
return hn::Lanes(_Tag<TLane>());
}
/// Returns an uninitialized N-lane vector.
template <typename TLane>
HWY_API Vec<TLane>
Undefined(TLane tag = 0)
{
return hn::Undefined(_Tag<TLane>());
}
/// Returns N-lane vector with all lanes equal to zero.
template <typename TLane>
HWY_API Vec<TLane>
Zero(TLane tag = 0)
{
return hn::Zero(_Tag<TLane>());
}
/// Returns N-lane vector with all lanes equal to the given value of type `TLane`.
template <typename TLane>
HWY_API Vec<TLane>
Set(TLane val)
{
return hn::Set(_Tag<TLane>(), val);
}
/// Converts a mask to a vector based on the specified lane type.
template <typename TLane, typename TMask>
HWY_API Vec<TLane>
VecFromMask(const TMask &m)
{
return hn::VecFromMask(_Tag<TLane>(), m);
}
/// Convert (Reinterpret) an N-lane vector to a different type without modifying the
/// underlying data.
template <typename TLaneTo, typename TVec>
HWY_API Vec<TLaneTo>
BitCast(const TVec &v)
{
return hn::BitCast(_Tag<TLaneTo>(), v);
}
// Import common Highway intrinsics
using hn::Abs;
using hn::Add;
using hn::And;
using hn::AndNot;
using hn::Div;
using hn::Eq;
using hn::Ge;
using hn::Gt;
using hn::Le;
using hn::Lt;
using hn::Max;
using hn::Min;
using hn::Mul;
using hn::Or;
using hn::Sqrt;
using hn::Sub;
using hn::Xor;
#endif // NPY_HWY
} // namespace | unknown | github | https://github.com/numpy/numpy | numpy/_core/src/common/simd/simd.inc.hpp |
#!/usr/bin/env python
from __future__ import absolute_import, print_function, division
from future.utils import with_metaclass
import abc
from opsimsummary import Tiling, HealpixTiles
from analyzeSN import LightCurve
from .universe import Universe
from .paramDistribution import SimpleSALTDist
import os
import numpy as np
import pandas as pd
from lsst.sims.photUtils import BandpassDict
from lsst.sims.catUtils.supernovae import SNObject
__all__ = ['SimulationTile', 'EntireSimulation', 'TiledSimulation']
simBandNameDict = dict((x, 'lsst' + x) for x in 'ugrizy')
class Photometry(object):
"""
Temporary class standing in for the Photometry class in AnalyzeSN which is
currently in a branch
"""
def __init__(self):
pass
@staticmethod
def pair_method(obsHistID, snid, maxObsHistID):
return snid * maxObsHistID + obsHistID
class EntireSimulation(Universe):
"""
Simulation of a set of SN from a set of telescope pointings
and a set of SN. The simulation is perfectly reproducible if
both the pointings, paramsDF are the same (in terms of ordering)
Parameters
-----------
rng : instance of `numpy.random.RandomState`
pointings: instance of `pd.DataFrame`
dataFrame with a minimal set of columns
[`expMJD`, `filter`, `fiveSigmaDepth`]
paramsDF : `pd.DataFrame`
the minimal set of columns are
[`snid`, `x0`, `t0`, `x1` , `c` , `snra`, `sndec`]
Attributes
----------
randomState : `numpy.random.RandomState`
snParams : `pd.DataFrame`
"""
def __init__(self, rng, pointings, paramsDF, angularUnits='degrees',
maxObsHistID=None):
self.pointings = pointings
self._paramsDf = paramsDF
self._rng = rng
self.angularUnits = angularUnits
self.bandPasses = BandpassDict.loadTotalBandpassesFromFiles()
self.maxObsHistID = maxObsHistID
@property
def randomState(self):
return self._rng
@property
def snParams(self):
return self._paramsDf
@staticmethod
def getSNCosmoParamDict(odict, SNCosmoModel):
mydict = dict()
param_names = SNCosmoModel.param_names
for param in odict.index.values:
if param in param_names:
mydict[param] = odict[param]
return mydict
def SN(self, snid, timeRange='model'):
mySNParams = self.snParams.ix[snid]
if self.angularUnits == 'radians':
myra = np.degees(mySNParams.snra)
mydec = np.degrees(mySNParams.sndec)
elif self.angularUnits == 'degrees':
myra = mySNParams.snra
mydec = mySNParams.sndec
sn = SNObject(ra=myra, dec=mydec)
sncosmo_params = self.getSNCosmoParamDict(mySNParams, sn)
sn.set(**sncosmo_params)
return sn
def lc(self, snid, maxObsHistID=None):
if maxObsHistID is None:
maxObsHistID = self.maxObsHistID
sn = self.SN(snid, timeRange='model')
lcMinTime = sn.mintime()
lcMaxTime = sn.maxtime()
# lcMinTime = self.SN(snid, timeRange='model').mintime()
# lcMaxTime = self.SN(snid, timeRange='model').maxtime()
if lcMinTime is None or lcMaxTime is None:
df = self.pointings.copy()
else:
df = self.pointings.query('expMJD < @lcMaxTime and expMJD > @lcMinTime').copy().reset_index()
if maxObsHistID is None or ('obsHistID' in df.columns):
pass
else:
raise ValueError('Cannot index if obsHistID column not provided')
if maxObsHistID is not None:
idx = Photometry.pair_method(df.obsHistID.values,
snid,
maxObsHistID)
else:
idx = np.ones(len(df))
fluxerr = np.zeros(len(df))
modelFlux = np.zeros(len(df))
for i, rowtuple in enumerate(df.iterrows()):
row = rowtuple[1]
# print(row['expMJD'], row['filter'], row['fiveSigmaDepth'])
bp = self.bandPasses[row['filter']]
modelFlux[i] = self.staticModelFlux(sn, row['expMJD'],
bandpassobject=bp)
fluxerr[i] = sn.catsimBandFluxError(time=row['expMJD'],
bandpassobject=bp,
fluxinMaggies=modelFlux[i],
m5=row['fiveSigmaDepth'])
rng = self.randomState
df['fluxerr'] = fluxerr
deviations = rng.normal(size=len(df))
df['deviations'] = deviations
df['zp'] = 0.
df['ModelFlux'] = modelFlux
df['snid'] = snid
df['flux'] = df['ModelFlux'] + df['deviations'] * df['fluxerr']
df['zpsys']= 'ab'
df['diaID'] = idx
lc = df[['diaID', 'snid', 'expMJD', 'filter', 'ModelFlux', 'flux', 'fluxerr',
'zp', 'zpsys', 'fieldID']]
return LightCurve(lc)
@staticmethod
def staticModelFlux(sn, time, bandpassobject):
return sn.catsimBandFlux(bandpassobject=bandpassobject,
time=time)
def modelFlux(self, snid, time, bandpassobject):
# assert len(times) == len(bands)
# flux = np.zeros(len(times))
sn = self.SN(snid)
return self.staticModelFlux(sn, time=time, bandpassobject=bandpassobject)
# return self.SN(snid).catsimBandFlux(bandpassobject=bandpassobject,
# time=time)
def writeSNParams(self, paramFileName, IDVal=0):
"""
Write the dataframe `self.snParams` to a file
Parameters
----------
paramFileName : Instance of string
paramFileName
IDVal : integer
used as a key to write a group
"""
if paramFileName.endswith('.hdf'):
self.snParams.to_hdf(paramFileName, key='{}'.format(IDVal))
else:
raise NotImplementedError('Only methods to write to hdf files'
'implemented')
def writeSN(self, snid, fileName, IDVal=0, timeRange='model'):
"""
Write light curve of SN to disc
Parameters
----------
snid : int/string
SN id of SN
fileName : string, mandatory
timeRange : string, optional, defaults to model
time range over which the light curve is written to disk
"""
lc = self.lc(snid)
df = lc.lightCurve
df['band'] = df['band'].astype(str)
with pd.get_store(fileName) as store:
store.append('tile_{}'.format(IDVal), df)
class TiledSimulation(EntireSimulation):
def __init__(self,
paramDF,
NSIDE,
tileID,
hpOpSim,
rng=None,
allPointings=None,
timeRange=None):
"""
Parameters
----------
paramDF
"""
self.tileID = tileID
self._randomState = rng
if self._randomState is None:
self._randomState = np.random.RandomState(self.tileID)
self.Tiling = HealpixTiles(nside=NSIDE, preComputedMap=hpOpSim)
self.fieldArea = self.Tiling.area(self.tileID)
self.columns = ('expMJD', 'filter', 'fieldID', 'fiveSigmaDepth')
self.tilePointings = self.Tiling.pointingSequenceForTile(self.tileID,
allPointings=allPointings,
columns=self.columns)
super(TiledSimulation, self).__init__(rng=self._randomState,
pointings=self.tilePointings,
paramsDF=paramDF)
class SimulationTile(Universe):
def __init__(self,
paramDist,
rate,
NSIDE,
tileID,
hpOpSim,
allPointings=None,
timeRange=None,
angularUnits='radians'):
self.Tiling = HealpixTiles(nside=NSIDE, preComputedMap=hpOpSim)
self.tileID = tileID
self._randomState = np.random.RandomState(self.tileID)
self.fieldArea = self.Tiling.area(tileID)
self.zdist = rate(rng=self.randomState, fieldArea=self.fieldArea)
self.zsamples = self.zdist.zSamples
self.numSN = len(self.zsamples)
self.positions = self.Tiling.positions(self.tileID, self.numSN,
rng=self.randomState)
self._snParamTable = None
self.columns = ('expMJD', 'filter', 'fieldID', 'fiveSigmaDepth')
self.tilePointings = self.Tiling.pointingSequenceForTile(self.tileID,
allPointings=allPointings,
columns=self.columns)
self._timeRange = timeRange
self.bandPasses = BandpassDict.loadTotalBandpassesFromFiles()
@property
def minPeakTime(self):
if self._timeRange is None:
minTime = self.tilePointings.expMJD.min()
else:
minTime = self._timeRange[0]
return minTime
@property
def maxPeakTime(self):
if self._timeRange is None:
maxTime = self.tilePointings.expMJD.max()
else:
maxTime = self._timeRange[1]
return maxTime
@property
def snParamTable(self):
if self._snParamTable is None:
self.snParams()
return self._snParamTable
@property
def randomState(self):
if self._randomState is None:
self._randomState = np.random.RandomState(self.tileID)
return self._randomState
def snParams(self):
zsamples = self.zdist.zSamples
numSN = len(zsamples)
positions = self.Tiling.positions(self.tileID, numSN,
rng=self.randomState)
ra = self.positions[0]
dec = - self.positions[1] + 45.0
# Why do we need numSN
sp = SimpleSALTDist(numSN=numSN, rng=self.randomState,
mjdmin=self.minPeakTime,
zSamples=self.zsamples).paramSamples
sp['ra'] = self.positions[0]
sp['dec'] = self.positions[1]
sp['snid'] = self.tileID * 500.0 + np.arange(numSN)
sp.set_index('snid', inplace=True)
self._snParamTable = sp
# if self.minPeakTime is None or self.maxPeakTime is None:
# pass
# else:
# sp['t0'] = self.minPeakTime + \
# (self.maxPeakTime - self.minPeakTime) * sp['t0']
return sp
@staticmethod
def getSNCosmoParamDict(odict, SNCosmoModel):
mydict = dict()
param_names = SNCosmoModel.param_names
for param in odict.index.values:
if param in param_names:
mydict[param] = odict[param]
return mydict
def SN(self, snid, timeRange='model'):
mySNParams = self.snParamTable.ix[snid]
sn = SNObject(ra=mySNParams.ra, dec=mySNParams.dec)
sncosmo_params = self.getSNCosmoParamDict(mySNParams, sn)
sn.set(**sncosmo_params)
z = sn.get('z')
t0 = sn.get('t0')
# lcMinTime = t0 - 20. * (1.0 + z)
# lcMaxTime = t0 + 50. * (1.0 + z )
return sn
@staticmethod
def staticModelFlux(sn, time, bandpassobject):
return sn.catsimBandFlux(bandpassobject=bandpassobject,
time=time)
def modelFlux(self, snid, time, bandpassobject):
# assert len(times) == len(bands)
# flux = np.zeros(len(times))
# flux = np.asarray(list(self.SN(snid).catsimBandFlux(bandpassobject=self.bandPasses[bands[i]],
# time=times[i]) for i in range(len(times))))
#for i, band in enumerate(bands):
# bp = self.bandPasses[band]
# flux[i] = self.SN(snid).catsimBandFlux(bandpassobject=bp, time=times[i])
# print(len(flux), len(times))
return self.SN(snid).catsimBandFlux(bandpassobject=bandpassobject,
time=time)
def lc(self, snid):
sn = self.SN(snid, timeRange='model')
lcMinTime = sn.mintime()
lcMaxTime = sn.maxtime()
# lcMinTime = self.SN(snid, timeRange='model').mintime()
# lcMaxTime = self.SN(snid, timeRange='model').maxtime()
if lcMinTime is None or lcMaxTime is None:
df = self.tilePointings.copy()
else:
df = self.tilePointings.query('expMJD < @lcMaxTime and expMJD > @lcMinTime').copy()
df['snid'] = snid
fluxerr = np.zeros(len(df))
modelFlux = np.zeros(len(df))
for i, rowtuple in enumerate(df.iterrows()):
row = rowtuple[1]
# print(row['expMJD'], row['filter'], row['fiveSigmaDepth'])
bp = self.bandPasses[row['filter']]
modelFlux[i] = self.staticModelFlux(sn, row['expMJD'],
bandpassobject=bp)
fluxerr[i] = sn.catsimBandFluxError(time=row['expMJD'],
bandpassobject=bp,
# fluxinMaggies=row['ModelFlux'],
fluxinMaggies=modelFlux[i],
m5=row['fiveSigmaDepth'])
rng = self.randomState
df['fluxerr'] = fluxerr
deviations = rng.normal(size=len(df))
df['deviations'] = deviations
df['zp'] = 0.
df['ModelFlux'] = modelFlux
df['flux'] = df['ModelFlux'] + df['deviations'] * df['fluxerr']
df['zpsys']= 'ab'
lc = df[['snid', 'expMJD', 'filter', 'ModelFlux', 'flux', 'fluxerr',
'zp', 'zpsys', 'fieldID']]
return LightCurve(lc, bandNameDict=simBandNameDict)
def writeTile(self, fileName, timeRange='model', paramFileName=None):
"""
"""
count = 0
for snid in self.snParamTable.index.values:
self.writeSN(snid, fileName, timeRange=timeRange)
if count % 50 == 0:
if count == 0:
pass
print('another 50', snid)
count += 1
if paramFileName is None:
filename_parts = fileName.split('.')
filename_parts[-2] += '_params'
paramFileName = '.'.join(filename_parts)
self.writeSNParams(paramFileName)
def writeSNParams(self, paramFileName):
if paramFileName.endswith('.hdf'):
self.snParamTable.to_hdf(paramFileName, key='{}'.format(self.tileID))
else:
raise NotImplementedError('Only methods to write to hdf files'
'implemented')
def writeSN(self, snid, fileName, timeRange='model'):
"""
Write light curve of SN to disc
Parameters
----------
snid : int/string
SN id of SN
fileName : string, mandatory
timeRange : string, optional, defaults to model
time range over which the light curve is written to disk
"""
lc = self.lc(snid)
df = lc.lightCurve
df['band'] = df['band'].astype(str)
with pd.get_store(fileName) as store:
store.append('tile_{}'.format(self.tileID), df) | unknown | codeparrot/codeparrot-clean | ||
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
#######################################################
# This script wraps simple timeseries analysis tools
# and access to the Gnip Search API into a simple tool
# to help the analysis quickly iterate on filters
# a and understand time series trend and events.
#
# If you find this useful or find a bug you don't want
# to fix for yourself, please let me know at @drskippy
#######################################################
__author__="Scott Hendrickson"
import ConfigParser
import argparse
import calendar
import codecs
import csv
import datetime
import json
import logging
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
import os
import pandas as pd
import re
import statsmodels.api as sm
import string
import sys
import time
from functools import partial
from operator import itemgetter
from scipy import signal
from search.results import *
reload(sys)
sys.stdout = codecs.getwriter('utf-8')(sys.stdout)
sys.stdin = codecs.getreader('utf-8')(sys.stdin)
# basic defaults
FROM_PICKLE = False
DEFAULT_CONFIG_FILENAME = os.path.join(".",".gnip")
DATE_FMT = "%Y%m%d%H%M"
DATE_FMT2 = "%Y-%m-%dT%H:%M:%S"
LOG_FILE_PATH = os.path.join(".","time_series.log")
# set up simple logging
logging.basicConfig(filename=LOG_FILE_PATH,level=logging.DEBUG)
logging.info("#"*70)
logging.info("################# started {} #################".format(datetime.datetime.now()))
# tunable defaults
CHAR_UPPER_CUTOFF = 20 # don't include tokens longer than CHAR_UPPER_CUTOFF
TWEET_SAMPLE = 4000 # tweets to collect for peak topics
MIN_SNR = 2.0 # signal to noise threshold for peak detection
MAX_N_PEAKS = 7 # maximum number of peaks to output
MAX_PEAK_WIDTH = 20 # max peak width in periods
MIN_PEAK_WIDTH = 1 # min peak width in periods
SEARCH_PEAK_WIDTH = 3 # min peak width in periods
N_MOVING = 4 # average over buckets
OUTLIER_FRAC = 0.8 # cut off values over 80% above or below the average
PLOTS_PREFIX = os.path.join(".","plots")
PLOT_DELTA_Y = 1.2 # spacing of y values in dotplot
logging.debug("CHAR_UPPER_CUTOFF={},TWEET_SAMPLE={},MIN_SNR={},MAX_N_PEAKS={},MAX_PEAK_WIDTH={},MIN_PEAK_WIDTH={},SEARCH_PEAK_WIDTH={},N_MOVING={},OUTLIER_FRAC={},PLOTS_PREFIX={},PLOT_DELTA_Y={}".format(
CHAR_UPPER_CUTOFF
, TWEET_SAMPLE
, MIN_SNR
, MAX_N_PEAKS
, MAX_PEAK_WIDTH
, MIN_PEAK_WIDTH
, SEARCH_PEAK_WIDTH
, N_MOVING
, OUTLIER_FRAC
, PLOTS_PREFIX
, PLOT_DELTA_Y ))
class TimeSeries():
"""Containter class for data collected from the API and associated analysis outputs"""
pass
class GnipSearchTimeseries():
def __init__(self, token_list_size=40):
"""Retrieve and analysis timesseries and associated interesting trends, spikes and tweet content."""
# default tokenizer and character limit
char_upper_cutoff = CHAR_UPPER_CUTOFF
self.token_list_size = int(token_list_size)
#############################################
# CONFIG FILE/COMMAND LINE OPTIONS PATTERN
# parse config file
config_from_file = self.config_file()
# set required fields to None. Sequence of setting is:
# (1) config file
# (2) command line
# if still none, then fail
self.user = None
self.password = None
self.stream_url = None
if config_from_file is not None:
try:
# command line options take presidence if they exist
self.user = config_from_file.get('creds', 'un')
self.password = config_from_file.get('creds', 'pwd')
self.stream_url = config_from_file.get('endpoint', 'url')
except (ConfigParser.NoOptionError,
ConfigParser.NoSectionError) as e:
logging.warn("Error reading configuration file ({}), ignoring configuration file.".format(e))
# parse the command line options
self.options = self.args().parse_args()
self.options.filter = self.options.filter.decode("utf-8")
self.options.second_filter = self.options.second_filter.decode("utf-8")
# set up the job
# over ride config file with command line args if present
if self.options.user is not None:
self.user = self.options.user
if self.options.password is not None:
self.password = self.options.password
if self.options.stream_url is not None:
self.stream_url = self.options.stream_url
# search v2 uses a different url
if "data-api.twitter.com" not in self.stream_url:
logging.error("gnipSearch timeline tools require Search V2. Exiting.")
sys.stderr.write("gnipSearch timeline tools require Search V2. Exiting.\n")
sys.exit(-1)
# set some options that should not be changed for this anaysis
self.options.paged = True
self.options.search_v2 = True
self.options.max = 500
self.options.query = False
# check paths
if self.options.output_file_path is not None:
if not os.path.exists(self.options.output_file_path):
logging.error("Path {} doesn't exist. Please create it and try again. Exiting.".format(
self.options.output_file_path))
sys.stderr.write("Path {} doesn't exist. Please create it and try again. Exiting.\n".format(
self.options.output_file_path))
sys.exit(-1)
if not os.path.exists(PLOTS_PREFIX):
logging.error("Path {} doesn't exist. Please create it and try again. Exiting.".format(
PLOTS_PREFIX))
sys.stderr.write("Path {} doesn't exist. Please create it and try again. Exiting.\n".format(
PLOTS_PREFIX))
sys.exit(-1)
# log the attributes of this class including all of the options
for v in dir(self):
# except don't log the password!
if not v.startswith('__') and not callable(getattr(self,v)) and not v.lower().startswith('password'):
tmp = str(getattr(self,v))
tmp = re.sub("password=.*,", "password=XXXXXXX,", tmp)
logging.debug(" {}={}".format(v, tmp))
def config_file(self):
"""Search for a valid config file in the standard locations."""
config = ConfigParser.ConfigParser()
# (1) default file name precidence
config.read(DEFAULT_CONFIG_FILENAME)
logging.info("attempting to read config file {}".format(DEFAULT_CONFIG_FILENAME))
if not config.has_section("creds"):
# (2) environment variable file name second
if 'GNIP_CONFIG_FILE' in os.environ:
config_filename = os.environ['GNIP_CONFIG_FILE']
logging.info("attempting to read config file {}".format(config_filename))
config.read(config_filename)
if config.has_section("creds") and config.has_section("endpoint"):
return config
else:
logging.warn("no creds or endpoint section found in config file, attempting to proceed without config info from file")
return None
def args(self):
"Set up the command line argments and the associated help strings."""
twitter_parser = argparse.ArgumentParser(
description="GnipSearch timeline tools")
twitter_parser.add_argument("-b", "--bucket", dest="count_bucket",
default="day",
help="Bucket size for counts query. Options are day, hour, minute (default is 'day').")
twitter_parser.add_argument("-e", "--end-date", dest="end",
default=None,
help="End of datetime window, format 'YYYY-mm-DDTHH:MM' (default: most recent activities)")
twitter_parser.add_argument("-f", "--filter", dest="filter",
default="from:jrmontag OR from:gnip",
help="PowerTrack filter rule (See: http://support.gnip.com/customer/portal/articles/901152-powertrack-operators)")
twitter_parser.add_argument("-g", "--second_filter", dest="second_filter",
default=None,
help="Use a second filter to show correlation plots of -f timeline vs -g timeline.")
twitter_parser.add_argument("-l", "--stream-url", dest="stream_url",
default=None,
help="Url of search endpoint. (See your Gnip console.)")
twitter_parser.add_argument("-p", "--password", dest="password", default=None,
help="Password")
twitter_parser.add_argument("-s", "--start-date", dest="start",
default=None,
help="Start of datetime window, format 'YYYY-mm-DDTHH:MM' (default: 30 days ago)")
twitter_parser.add_argument("-u", "--user-name", dest="user",
default=None,
help="User name")
twitter_parser.add_argument("-t", "--get-topics", dest="get_topics", action="store_true",
default=False,
help="Set flag to evaluate peak topics (this may take a few minutes)")
twitter_parser.add_argument("-w", "--output-file-path", dest="output_file_path",
default=None,
help="Create files in ./OUTPUT-FILE-PATH. This path must exists and will not be created. This options is available only with -a option. Default is no output files.")
return twitter_parser
def get_results(self):
"""Execute API calls to the timeseries data and tweet data we need for analysis. Perform analysis
as we go because we often need results for next steps."""
######################
# (1) Get the timeline
######################
logging.info("retrieving timeline counts")
results_timeseries = Results( self.user
, self.password
, self.stream_url
, self.options.paged
, self.options.output_file_path
, pt_filter=self.options.filter
, max_results=int(self.options.max)
, start=self.options.start
, end=self.options.end
, count_bucket=self.options.count_bucket
, show_query=self.options.query
, search_v2=self.options.search_v2
)
# sort by date
res_timeseries = sorted(results_timeseries.get_time_series(), key = itemgetter(0))
# calculate total time interval span
time_min_date = min(res_timeseries, key = itemgetter(2))[2]
time_max_date = max(res_timeseries, key = itemgetter(2))[2]
time_min = float(calendar.timegm(time_min_date.timetuple()))
time_max = float(calendar.timegm(time_max_date.timetuple()))
time_span = time_max - time_min
logging.debug("time_min = {}, time_max = {}, time_span = {}".format(time_min, time_max, time_span))
# create a simple object to hold our data
ts = TimeSeries()
ts.dates = []
ts.x = []
ts.counts = []
# load and format data
for i in res_timeseries:
ts.dates.append(i[2])
ts.counts.append(float(i[1]))
# create a independent variable in interval [0.0,1.0]
ts.x.append((calendar.timegm(datetime.datetime.strptime(i[0], DATE_FMT).timetuple()) - time_min)/time_span)
logging.info("read {} time items from search API".format(len(ts.dates)))
if len(ts.dates) < 35:
logging.warn("peak detection with with fewer than ~35 points is unreliable!")
logging.debug('dates: ' + ','.join(map(str, ts.dates[:10])) + "...")
logging.debug('counts: ' + ','.join(map(str, ts.counts[:10])) + "...")
logging.debug('indep var: ' + ','.join(map(str, ts.x[:10])) + "...")
######################
# (1.1) Get a second timeline?
######################
if self.options.second_filter is not None:
logging.info("retrieving second timeline counts")
results_timeseries = Results( self.user
, self.password
, self.stream_url
, self.options.paged
, self.options.output_file_path
, pt_filter=self.options.second_filter
, max_results=int(self.options.max)
, start=self.options.start
, end=self.options.end
, count_bucket=self.options.count_bucket
, show_query=self.options.query
, search_v2=self.options.search_v2
)
# sort by date
second_res_timeseries = sorted(results_timeseries.get_time_series(), key = itemgetter(0))
if len(second_res_timeseries) != len(res_timeseries):
logging.error("time series of different sizes not allowed")
else:
ts.second_counts = []
# load and format data
for i in second_res_timeseries:
ts.second_counts.append(float(i[1]))
logging.info("read {} time items from search API".format(len(ts.second_counts)))
logging.debug('second counts: ' + ','.join(map(str, ts.second_counts[:10])) + "...")
######################
# (2) Detrend and remove prominent period
######################
logging.info("detrending timeline counts")
no_trend = signal.detrend(np.array(ts.counts))
# determine period of data
df = (ts.dates[1] - ts.dates[0]).total_seconds()
if df == 86400:
# day counts, average over week
n_buckets = 7
n_avgs = {i:[] for i in range(n_buckets)}
for t,c in zip(ts.dates, no_trend):
n_avgs[t.weekday()].append(c)
elif df == 3600:
# hour counts, average over day
n_buckets = 24
n_avgs = {i:[] for i in range(n_buckets)}
for t,c in zip(ts.dates, no_trend):
n_avgs[t.hour].append(c)
elif df == 60:
# minute counts; average over day
n_buckets = 24*60
n_avgs = {i:[] for i in range(n_buckets)}
for t,c in zip(ts.dates, no_trend):
n_avgs[t.minute].append(c)
else:
sys.stderr.write("Weird interval problem! Exiting.\n")
logging.error("Weird interval problem! Exiting.\n")
sys.exit()
logging.info("averaging over periods of {} buckets".format(n_buckets))
# remove upper outliers from averages
df_avg_all = {i:np.average(n_avgs[i]) for i in range(n_buckets)}
logging.debug("bucket averages: {}".format(','.join(map(str, [df_avg_all[i] for i in df_avg_all]))))
n_avgs_remove_outliers = {i: [j for j in n_avgs[i]
if abs(j - df_avg_all[i])/df_avg_all[i] < (1. + OUTLIER_FRAC) ]
for i in range(n_buckets)}
df_avg = {i:np.average(n_avgs_remove_outliers[i]) for i in range(n_buckets)}
logging.debug("bucket averages w/o outliers: {}".format(','.join(map(str, [df_avg[i] for i in df_avg]))))
# flatten cycle
ts.counts_no_cycle_trend = np.array([no_trend[i] - df_avg[ts.dates[i].hour] for i in range(len(ts.counts))])
logging.debug('no trend: ' + ','.join(map(str, ts.counts_no_cycle_trend[:10])) + "...")
######################
# (3) Moving average
######################
ts.moving = np.convolve(ts.counts, np.ones((N_MOVING,))/N_MOVING, mode='valid')
logging.debug('moving ({}): '.format(N_MOVING) + ','.join(map(str, ts.moving[:10])) + "...")
######################
# (4) Peak detection
######################
peakind = signal.find_peaks_cwt(ts.counts_no_cycle_trend, np.arange(MIN_PEAK_WIDTH, MAX_PEAK_WIDTH), min_snr = MIN_SNR)
n_peaks = min(MAX_N_PEAKS, len(peakind))
logging.debug('peaks ({}): '.format(n_peaks) + ','.join(map(str, peakind)))
logging.debug('peaks ({}): '.format(n_peaks) + ','.join(map(str, [ts.dates[i] for i in peakind])))
# top peaks determined by peak volume, better way?
# peak detector algorithm:
# * middle of peak (of unknown width)
# * finds peaks up to MAX_PEAK_WIDTH wide
#
# algorithm for geting peak start, peak and end parameters:
# find max, find fwhm,
# find start, step past peak, keep track of volume and peak height,
# stop at end of period or when timeseries turns upward
peaks = []
for i in peakind:
# find the first max in the possible window
i_start = max(0, i - SEARCH_PEAK_WIDTH)
i_finish = min(len(ts.counts) - 1, i + SEARCH_PEAK_WIDTH)
p_max = max(ts.counts[i_start:i_finish])
h_max = p_max/2.
# i_max not center
i_max = i_start + ts.counts[i_start:i_finish].index(p_max)
i_start, i_finish = i_max, i_max
# start at peak, and go back and forward to find start and end
while i_start >= 1:
if (ts.counts[i_start - 1] <= h_max or
ts.counts[i_start - 1] >= ts.counts[i_start] or
i_start - 1 <= 0):
break
i_start -= 1
while i_finish < len(ts.counts) - 1:
if (ts.counts[i_finish + 1] <= h_max or
ts.counts[i_finish + 1] >= ts.counts[i_finish] or
i_finish + 1 >= len(ts.counts)):
break
i_finish += 1
# i is center of peak so balance window
delta_i = max(1, i - i_start)
if i_finish - i > delta_i:
delta_i = i_finish - i
# final est of start and finish
i_finish = min(len(ts.counts) - 1, i + delta_i)
i_start = max(0, i - delta_i)
p_volume = sum(ts.counts[i_start:i_finish])
peaks.append([ i , p_volume , (i, i_start, i_max, i_finish
, h_max , p_max, p_volume
, ts.dates[i_start], ts.dates[i_max], ts.dates[i_finish])])
# top n_peaks by volume
top_peaks = sorted(peaks, key = itemgetter(1))[-n_peaks:]
# re-sort peaks by date
ts.top_peaks = sorted(top_peaks, key = itemgetter(0))
logging.debug('top peaks ({}): '.format(len(ts.top_peaks)) + ','.join(map(str, ts.top_peaks[:4])) + "...")
######################
# (5) high/low frequency
######################
ts.cycle, ts.trend = sm.tsa.filters.hpfilter(np.array(ts.counts))
logging.debug('cycle: ' + ','.join(map(str, ts.cycle[:10])) + "...")
logging.debug('trend: ' + ','.join(map(str, ts.trend[:10])) + "...")
######################
# (6) n-grams for top peaks
######################
ts.topics = []
if self.options.get_topics:
logging.info("retrieving tweets for peak topics")
for a in ts.top_peaks:
# start at peak
ds = datetime.datetime.strftime(a[2][8], DATE_FMT2)
# estimate how long to get TWEET_SAMPLE tweets
# a[1][5] is max tweets per period
if a[2][5] > 0:
est_periods = float(TWEET_SAMPLE)/a[2][5]
else:
logging.warn("peak with zero max tweets ({}), setting est_periods to 1".format(a))
est_periods = 1
# df comes from above, in seconds
# time resolution is hours
est_time = max(int(est_periods * df), 60)
logging.debug("est_periods={}, est_time={}".format(est_periods, est_time))
#
if a[2][8] + datetime.timedelta(seconds=est_time) < a[2][9]:
de = datetime.datetime.strftime(a[2][8] + datetime.timedelta(seconds=est_time), DATE_FMT2)
elif a[2][8] < a[2][9]:
de = datetime.datetime.strftime(a[2][9], DATE_FMT2)
else:
de = datetime.datetime.strftime(a[2][8] + datetime.timedelta(seconds=60), DATE_FMT2)
logging.info("retreive data for peak index={} in date range [{},{}]".format(a[0], ds, de))
res = Results(
self.user
, self.password
, self.stream_url
, self.options.paged
, self.options.output_file_path
, pt_filter=self.options.filter
, max_results=int(self.options.max)
, start=ds
, end=de
, count_bucket=None
, show_query=self.options.query
, search_v2=self.options.search_v2
, hard_max = TWEET_SAMPLE
)
logging.info("retrieved {} records".format(len(res)))
n_grams_counts = list(res.get_top_grams(n=self.token_list_size))
ts.topics.append(n_grams_counts)
logging.debug('n_grams for peak index={}: '.format(a[0]) + ','.join(
map(str, [i[4].encode("utf-8","ignore") for i in n_grams_counts][:10])) + "...")
return ts
def dotplot(self, x, labels, path = "dotplot.png"):
"""Makeshift dotplots in matplotlib. This is not completely general and encodes labels and
parameter selections that are particular to n-gram dotplots."""
logging.info("dotplot called, writing image to path={}".format(path))
if len(x) <= 1 or len(labels) <= 1:
raise ValueError("cannot make a dot plot with only 1 point")
# split n_gram_counts into 2 data sets
n = len(labels)/2
x1, x2 = x[:n], x[n:]
labels1, labels2 = labels[:n], labels[n:]
# create enough equally spaced y values for the horizontal lines
ys = [r*PLOT_DELTA_Y for r in range(1,len(labels2)+1)]
# give ourselves a little extra room on the plot
maxx = max(x)*1.05
maxy = max(ys)*1.05
# set up plots to be a factor taller than the default size
# make factor proportional to the number of n-grams plotted
size = plt.gcf().get_size_inches()
# factor of n/10 is empirical
scale_denom = 10
fig, (ax1, ax2) = plt.subplots(nrows=2, ncols=1,figsize=(size[0], size[1]*n/scale_denom))
logging.debug("plotting top {} terms".format(n))
logging.debug("plot size=({},{})".format(size[0], size[1]*n/scale_denom))
# first plot 1-grams
ax1.set_xlim(0,maxx)
ax1.set_ylim(0,maxy)
ticks = ax1.yaxis.set_ticks(ys)
text = ax1.yaxis.set_ticklabels(labels1)
for ct, item in enumerate(labels1):
ax1.hlines(ys[ct], 0, maxx, linestyle='dashed', color='0.9')
ax1.plot(x1, ys, 'ko')
ax1.set_title("1-grams")
# second plot 2-grams
ax2.set_xlim(0,maxx)
ax2.set_ylim(0,maxy)
ticks = ax2.yaxis.set_ticks(ys)
text = ax2.yaxis.set_ticklabels(labels2)
for ct, item in enumerate(labels2):
ax2.hlines(ys[ct], 0, maxx, linestyle='dashed', color='0.9')
ax2.plot(x2, ys, 'ko')
ax2.set_title("2-grams")
ax2.set_xlabel("Fraction of Mentions")
#
plt.tight_layout()
plt.savefig(path)
plt.close("all")
def plots(self, ts, out_type="png"):
"""Basic choice for plotting analysis. If you wish to extend this class, over-
write this method."""
# creat a valid file name, in this case and additional requirement is no spaces
valid_chars = "-_.() %s%s" % (string.ascii_letters, string.digits)
filter_prefix_name = ''.join(c for c in self.options.filter if c in valid_chars)
filter_prefix_name = filter_prefix_name.replace(" ", "_")
if len(filter_prefix_name) > 16:
filter_prefix_name = filter_prefix_name[:16]
if self.options.second_filter is not None:
second_filter_prefix_name = ''.join(c for c in self.options.second_filter if c in valid_chars)
second_filter_prefix_name = second_filter_prefix_name.replace(" ", "_")
if len(second_filter_prefix_name) > 16:
second_filter_prefix_name = second_filter_prefix_name[:16]
######################
# timeline
######################
df0 = pd.Series(ts.counts, index=ts.dates)
df0.plot()
plt.ylabel("Counts")
plt.title(filter_prefix_name)
plt.tight_layout()
plt.savefig(os.path.join(PLOTS_PREFIX, '{}_{}.{}'.format(filter_prefix_name, "time_line", out_type)))
plt.close("all")
######################
# cycle and trend
######################
df1 = pd.DataFrame({"cycle":ts.cycle, "trend":ts.trend}, index=ts.dates)
df1.plot()
plt.ylabel("Counts")
plt.title(filter_prefix_name)
plt.tight_layout()
plt.savefig(os.path.join(PLOTS_PREFIX, '{}_{}.{}'.format(filter_prefix_name, "cycle_trend_line", out_type)))
plt.close("all")
######################
# moving avg
######################
df2 = pd.DataFrame({"moving":ts.moving}, index=ts.dates[:len(ts.moving)])
df2.plot()
plt.ylabel("Counts")
plt.title(filter_prefix_name)
plt.tight_layout()
plt.savefig(os.path.join(PLOTS_PREFIX, '{}_{}.{}'.format(filter_prefix_name, "mov_avg_line", out_type)))
plt.close("all")
######################
# timeline with peaks marked by vertical bands
######################
df3 = pd.Series(ts.counts, index=ts.dates)
df3.plot()
# peaks
for a in ts.top_peaks:
xs = a[2][7]
xp = a[2][8]
xe = a[2][9]
y = a[2][5]
# need to get x and y locs
plt.axvspan(xs, xe, ymin=0, ymax = y, linewidth=1, color='g', alpha=0.2)
plt.axvline(xp, ymin=0, ymax = y, linewidth=1, color='y')
plt.ylabel("Counts")
plt.title(filter_prefix_name)
plt.tight_layout()
plt.savefig(os.path.join(PLOTS_PREFIX, '{}_{}.{}'.format(filter_prefix_name, "time_peaks_line", out_type)))
plt.close("all")
######################
# n-grams to help determine topics of peaks
######################
for n, p in enumerate(ts.topics):
x = []
labels = []
for i in p:
x.append(i[1])
labels.append(i[4])
try:
logging.info("creating n-grams dotplot for peak {}".format(n))
path = os.path.join(PLOTS_PREFIX, "{}_{}_{}.{}".format(filter_prefix_name, "peak", n, out_type))
self.dotplot(x, labels, path)
except ValueError, e:
logging.error("{} - plot path={} skipped".format(e, path))
######################
# x vs y scatter plot for correlations
######################
if self.options.second_filter is not None:
logging.info("creating scatter for queries {} and {}".format(self.options.filter, self.options.second_filter))
df4 = pd.DataFrame({filter_prefix_name: ts.counts, second_filter_prefix_name:ts.second_counts})
df4.plot(kind='scatter', x=filter_prefix_name, y=second_filter_prefix_name)
plt.ylabel(second_filter_prefix_name)
plt.xlabel(filter_prefix_name)
plt.xlim([0, 1.05 * max(ts.counts)])
plt.ylim([0, 1.05 * max(ts.second_counts)])
plt.title("{} vs. {}".format(second_filter_prefix_name, filter_prefix_name))
plt.tight_layout()
plt.savefig(os.path.join(PLOTS_PREFIX, '{}_v_{}_{}.{}'.format(filter_prefix_name,
second_filter_prefix_name,
"scatter",
out_type)))
plt.close("all")
if __name__ == "__main__":
""" Simple command line utility."""
import pickle
g = GnipSearchTimeseries()
if FROM_PICKLE:
ts = pickle.load(open("./time_series.pickle", "rb"))
else:
ts = g.get_results()
pickle.dump(ts,open("./time_series.pickle", "wb"))
g.plots(ts) | unknown | codeparrot/codeparrot-clean | ||
#!/usr/bin/env python
from __future__ import print_function
import os, sys, json
from common_paths import *
import spec_validator
import argparse
def expand_test_expansion_pattern(spec_test_expansion, test_expansion_schema):
expansion = {}
for artifact in spec_test_expansion:
artifact_value = spec_test_expansion[artifact]
if artifact_value == '*':
expansion[artifact] = test_expansion_schema[artifact]
elif isinstance(artifact_value, list):
expansion[artifact] = artifact_value
else:
expansion[artifact] = [artifact_value]
return expansion
def permute_expansion(expansion, selection = {}, artifact_index = 0):
artifact_order = ['delivery_method', 'redirection', 'origin',
'source_protocol', 'target_protocol', 'subresource',
'referrer_url', 'name']
if artifact_index >= len(artifact_order):
yield selection
return
artifact_key = artifact_order[artifact_index]
for artifact_value in expansion[artifact_key]:
selection[artifact_key] = artifact_value
for next_selection in permute_expansion(expansion,
selection,
artifact_index + 1):
yield next_selection
def generate_selection(selection, spec, subresource_path,
test_html_template_basename):
selection['spec_name'] = spec['name']
selection['spec_title'] = spec['title']
selection['spec_description'] = spec['description']
selection['spec_specification_url'] = spec['specification_url']
selection['subresource_path'] = subresource_path
# Oddball: it can be None, so in JS it's null.
selection['referrer_policy_json'] = json.dumps(spec['referrer_policy'])
test_filename = test_file_path_pattern % selection
test_directory = os.path.dirname(test_filename)
full_path = os.path.join(spec_directory, test_directory)
test_html_template = get_template(test_html_template_basename)
test_js_template = get_template("test.js.template")
disclaimer_template = get_template('disclaimer.template')
test_description_template = get_template("test_description.template")
html_template_filename = os.path.join(template_directory,
test_html_template_basename)
generated_disclaimer = disclaimer_template \
% {'generating_script_filename': os.path.relpath(__file__,
test_root_directory),
'html_template_filename': os.path.relpath(html_template_filename,
test_root_directory)}
# Adjust the template for the test invoking JS. Indent it to look nice.
selection['generated_disclaimer'] = generated_disclaimer.rstrip()
test_description_template = \
test_description_template.rstrip().replace("\n", "\n" + " " * 33)
selection['test_description'] = test_description_template % selection
# Adjust the template for the test invoking JS. Indent it to look nice.
indent = "\n" + " " * 6;
test_js_template = indent + test_js_template.replace("\n", indent);
selection['test_js'] = test_js_template % selection
# Directory for the test files.
try:
os.makedirs(full_path)
except:
pass
selection['meta_delivery_method'] = ''
if spec['referrer_policy'] != None:
if selection['delivery_method'] == 'meta-referrer':
selection['meta_delivery_method'] = \
'<meta name="referrer" content="%(referrer_policy)s">' % spec
elif selection['delivery_method'] == 'http-rp':
selection['meta_delivery_method'] = \
"<!-- No meta: Referrer policy delivered via HTTP headers. -->"
test_headers_filename = test_filename + ".headers"
with open(test_headers_filename, "w") as f:
f.write('Referrer-Policy: ' + \
'%(referrer_policy)s\n' % spec)
# TODO(kristijanburnik): Limit to WPT origins.
f.write('Access-Control-Allow-Origin: *\n')
elif selection['delivery_method'] == 'attr-referrer':
# attr-referrer is supported by the JS test wrapper.
pass
elif selection['delivery_method'] == 'rel-noreferrer':
# rel=noreferrer is supported by the JS test wrapper.
pass
else:
raise ValueError('Not implemented delivery_method: ' \
+ selection['delivery_method'])
# Obey the lint and pretty format.
if len(selection['meta_delivery_method']) > 0:
selection['meta_delivery_method'] = "\n " + \
selection['meta_delivery_method']
with open(test_filename, 'w') as f:
f.write(test_html_template % selection)
def generate_test_source_files(spec_json, target):
test_expansion_schema = spec_json['test_expansion_schema']
specification = spec_json['specification']
spec_json_js_template = get_template('spec_json.js.template')
with open(generated_spec_json_filename, 'w') as f:
f.write(spec_json_js_template
% {'spec_json': json.dumps(spec_json)})
# Choose a debug/release template depending on the target.
html_template = "test.%s.html.template" % target
# Create list of excluded tests.
exclusion_dict = {}
for excluded_pattern in spec_json['excluded_tests']:
excluded_expansion = \
expand_test_expansion_pattern(excluded_pattern,
test_expansion_schema)
for excluded_selection in permute_expansion(excluded_expansion):
excluded_selection_path = selection_pattern % excluded_selection
exclusion_dict[excluded_selection_path] = True
for spec in specification:
for spec_test_expansion in spec['test_expansion']:
expansion = expand_test_expansion_pattern(spec_test_expansion,
test_expansion_schema)
for selection in permute_expansion(expansion):
selection_path = selection_pattern % selection
if not selection_path in exclusion_dict:
subresource_path = \
spec_json["subresource_path"][selection["subresource"]]
generate_selection(selection,
spec,
subresource_path,
html_template)
else:
print('Excluding selection:', selection_path)
def main(target):
spec_json = load_spec_json();
spec_validator.assert_valid_spec_json(spec_json)
generate_test_source_files(spec_json, target)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Test suite generator utility')
parser.add_argument('-t', '--target', type = str,
choices = ("release", "debug"), default = "release",
help = 'Sets the appropriate template for generating tests')
# TODO(kristijanburnik): Add option for the spec_json file.
args = parser.parse_args()
main(args.target) | unknown | codeparrot/codeparrot-clean | ||
// Copyright IBM Corp. 2016, 2025
// SPDX-License-Identifier: MPL-2.0
package ldap
import (
"context"
"encoding/json"
"fmt"
"net"
"net/http"
"os"
"strings"
"testing"
"github.com/hashicorp/vault/api"
)
// testHTTPServer creates a test HTTP server that handles requests until
// the listener returned is closed.
func testHTTPServer(
t *testing.T, handler http.Handler,
) (*api.Config, net.Listener) {
ln, err := net.Listen("tcp", "127.0.0.1:0")
if err != nil {
t.Fatalf("err: %s", err)
}
server := &http.Server{Handler: handler}
go server.Serve(ln)
config := api.DefaultConfig()
config.Address = fmt.Sprintf("http://%s", ln.Addr())
return config, ln
}
func init() {
os.Setenv("VAULT_TOKEN", "")
}
func TestLogin(t *testing.T) {
passwordEnvVar := "LDAP_PASSWORD"
allowedPassword := "6hrtL!*bro!ywbQbvDwW"
content := []byte(allowedPassword)
tmpfile, err := os.CreateTemp("./", "file-containing-password")
if err != nil {
t.Fatalf("error creating temp file: %v", err)
}
defer os.Remove(tmpfile.Name()) // clean up
err = os.Setenv(passwordEnvVar, allowedPassword)
if err != nil {
t.Fatalf("error writing password to env var: %v", err)
}
if _, err := tmpfile.Write(content); err != nil {
t.Fatalf("error writing to temp file: %v", err)
}
if err := tmpfile.Close(); err != nil {
t.Fatalf("error closing temp file: %v", err)
}
// a response to return if the correct values were passed to login
authSecret := &api.Secret{
Auth: &api.SecretAuth{
ClientToken: "a-client-token",
},
}
authBytes, err := json.Marshal(authSecret)
if err != nil {
t.Fatalf("error marshaling json: %v", err)
}
handler := func(w http.ResponseWriter, req *http.Request) {
payload := make(map[string]interface{})
err := json.NewDecoder(req.Body).Decode(&payload)
if err != nil {
t.Fatalf("error decoding json: %v", err)
}
if payload["password"] == allowedPassword {
w.Write(authBytes)
}
}
config, ln := testHTTPServer(t, http.HandlerFunc(handler))
defer ln.Close()
config.Address = strings.ReplaceAll(config.Address, "127.0.0.1", "localhost")
client, err := api.NewClient(config)
if err != nil {
t.Fatalf("error initializing Vault client: %v", err)
}
// Password fromFile test
authFromFile, err := NewLDAPAuth("my-ldap-username", &Password{FromFile: tmpfile.Name()})
if err != nil {
t.Fatalf("error initializing LDAPAuth with password file: %v", err)
}
loginRespFromFile, err := client.Auth().Login(context.TODO(), authFromFile)
if err != nil {
t.Fatalf("error logging in with password from file: %v", err)
}
if loginRespFromFile.Auth == nil || loginRespFromFile.Auth.ClientToken == "" {
t.Fatalf("no authentication info returned by login")
}
// Password fromEnv Test
authFromEnv, err := NewLDAPAuth("my-ldap-username", &Password{FromEnv: passwordEnvVar})
if err != nil {
t.Fatalf("error initializing LDAPAuth with password env var: %v", err)
}
loginRespFromEnv, err := client.Auth().Login(context.TODO(), authFromEnv)
if err != nil {
t.Fatalf("error logging in with password from env var: %v", err)
}
if loginRespFromEnv.Auth == nil || loginRespFromEnv.Auth.ClientToken == "" {
t.Fatalf("no authentication info returned by login with password from env var")
}
// Password fromStr test
authFromStr, err := NewLDAPAuth("my-ldap-username", &Password{FromString: allowedPassword})
if err != nil {
t.Fatalf("error initializing LDAPAuth with password string: %v", err)
}
loginRespFromStr, err := client.Auth().Login(context.TODO(), authFromStr)
if err != nil {
t.Fatalf("error logging in with string: %v", err)
}
if loginRespFromStr.Auth == nil || loginRespFromStr.Auth.ClientToken == "" {
t.Fatalf("no authentication info returned by login with password from string")
}
// Empty User Test
_, err = NewLDAPAuth("", &Password{FromString: allowedPassword})
if err.Error() != "no user name provided for login" {
t.Fatalf("Auth object created for empty username: %v", err)
}
// Empty Password Test
_, err = NewLDAPAuth("my-ldap-username", nil)
if err.Error() != "no password provided for login" {
t.Fatalf("Auth object created when passing a nil Password struct: %v", err)
}
// Auth with Custom MountPath
ldapMount := WithMountPath("customMount")
_, err = NewLDAPAuth("my-ldap-username", &Password{FromString: allowedPassword}, ldapMount)
if err != nil {
t.Fatalf("error initializing LDAPAuth with custom mountpath: %v", err)
}
} | go | github | https://github.com/hashicorp/vault | api/auth/ldap/ldap_test.go |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.