text
stringlengths 4
1.02M
| meta
dict |
|---|---|
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('blog', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='blogpostmodel',
name='posted_at',
field=models.DateTimeField(),
preserve_default=True,
),
]
|
{
"content_hash": "d79382fe3d5de46229ee0e03d017e038",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 41,
"avg_line_length": 21.05263157894737,
"alnum_prop": 0.58,
"repo_name": "GMadorell/djagolb",
"id": "df9f2f9a9495eb2c972b308e533f9dee3f99a9ba",
"size": "424",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/blog/migrations/0002_auto_20141211_2202.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "382159"
},
{
"name": "JavaScript",
"bytes": "175"
},
{
"name": "Python",
"bytes": "30896"
},
{
"name": "Ruby",
"bytes": "905"
},
{
"name": "Shell",
"bytes": "230"
}
],
"symlink_target": ""
}
|
import pytest
from ibis.compat import mock
import pandas as pd
pytest.importorskip('hdfs')
pytest.importorskip('sqlalchemy')
pytest.importorskip('impala.dbapi')
pytestmark = pytest.mark.impala
def patch_execute(con):
return mock.patch.object(con, '_execute', wraps=con._execute)
def test_invalidate_metadata(con, test_data_db):
with patch_execute(con) as ex_mock:
con.invalidate_metadata()
ex_mock.assert_called_with('INVALIDATE METADATA')
con.invalidate_metadata('functional_alltypes')
t = con.table('functional_alltypes')
t.invalidate_metadata()
with patch_execute(con) as ex_mock:
con.invalidate_metadata('functional_alltypes', database=test_data_db)
ex_mock.assert_called_with(
'INVALIDATE METADATA {}.`{}`'.format(
test_data_db, 'functional_alltypes'
)
)
def test_refresh(con, test_data_db):
tname = 'functional_alltypes'
with patch_execute(con) as ex_mock:
con.refresh(tname)
ex_cmd = 'REFRESH {0}.`{1}`'.format(test_data_db, tname)
ex_mock.assert_called_with(ex_cmd)
t = con.table(tname)
with patch_execute(con) as ex_mock:
t.refresh()
ex_cmd = 'REFRESH {0}.`{1}`'.format(test_data_db, tname)
ex_mock.assert_called_with(ex_cmd)
def test_describe_formatted(con, test_data_db):
from ibis.impala.metadata import TableMetadata
t = con.table('functional_alltypes')
with patch_execute(con) as ex_mock:
desc = t.describe_formatted()
ex_mock.assert_called_with('DESCRIBE FORMATTED '
'{0}.`{1}`'
.format(test_data_db,
'functional_alltypes'),
results=True)
assert isinstance(desc, TableMetadata)
def test_show_files(con, test_data_db):
t = con.table('functional_alltypes')
qualified_name = '{0}.`{1}`'.format(test_data_db, 'functional_alltypes')
with patch_execute(con) as ex_mock:
desc = t.files()
ex_mock.assert_called_with('SHOW FILES IN {0}'.format(qualified_name),
results=True)
assert isinstance(desc, pd.DataFrame)
def test_table_column_stats(con, test_data_db):
t = con.table('functional_alltypes')
qualified_name = '{0}.`{1}`'.format(test_data_db, 'functional_alltypes')
with patch_execute(con) as ex_mock:
desc = t.stats()
ex_mock.assert_called_with('SHOW TABLE STATS {0}'
.format(qualified_name),
results=True)
assert isinstance(desc, pd.DataFrame)
with patch_execute(con) as ex_mock:
desc = t.column_stats()
ex_mock.assert_called_with('SHOW COLUMN STATS {0}'
.format(qualified_name),
results=True)
assert isinstance(desc, pd.DataFrame)
|
{
"content_hash": "3a9ded9f753638af271f0c115bddefa2",
"timestamp": "",
"source": "github",
"line_count": 89,
"max_line_length": 78,
"avg_line_length": 33.62921348314607,
"alnum_prop": 0.5860340795188774,
"repo_name": "deepfield/ibis",
"id": "2ac5efc3d7ba714632e0b10700f1871b4703bc90",
"size": "2993",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ibis/impala/tests/test_patched.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "1730"
},
{
"name": "C",
"bytes": "3684"
},
{
"name": "C++",
"bytes": "44943"
},
{
"name": "CMake",
"bytes": "4383"
},
{
"name": "Makefile",
"bytes": "186"
},
{
"name": "Python",
"bytes": "1265497"
},
{
"name": "Shell",
"bytes": "5808"
}
],
"symlink_target": ""
}
|
from pycket import config
from pycket import values, values_string, values_parameter
from pycket import vector
from pycket.AST import AST
from pycket.arity import Arity
from pycket.cont import Cont, NilCont, label, continuation
from pycket.env import SymList, ConsEnv, ToplevelEnv
from pycket.error import SchemeException
from pycket.prims.expose import prim_env, make_call_method
from pycket.prims.control import convert_runtime_exception, convert_os_error
from pycket.prims.parameter import current_cmd_args_param
from pycket.hash.persistent_hash_map import make_persistent_hash_type
from rpython.rlib import jit, debug, objectmodel
from rpython.rlib.objectmodel import import_from_mixin
from rpython.rlib.objectmodel import r_dict, compute_hash, specialize
from rpython.rlib.rarithmetic import r_uint
from rpython.tool.pairtype import extendabletype
from small_list import inline_small_list
import inspect
import sys
# imported for side effects
import pycket.prims.general
BUILTIN_MODULES = [
"#%kernel",
"#%unsafe",
"#%paramz",
"#%flfxnum",
"#%utils",
"#%place",
"#%foreign",
"#%builtin",
"#%extfl",
"#%futures",
"#%core",
"#%linklet",
"#%linklet-primitive",
"#%network" ]
class BindingFormMixin(object):
_immutable_fields_ = ['_mutable_var_flags[*]']
_mutable_var_flags = None
def init_mutable_var_flags(self, flags):
if True in flags:
self._mutable_var_flags = flags
else:
self._mutable_var_flags = None
@jit.unroll_safe
def binds_mutable_var(self):
if self._mutable_var_flags is None:
return False
for flag in self._mutable_var_flags:
if flag:
return True
return False
@objectmodel.always_inline
def is_mutable_var(self, i):
return self._mutable_var_flags is not None and self._mutable_var_flags[i]
@objectmodel.always_inline
def wrap_value(self, val, i):
if self.is_mutable_var(i):
val = values.W_Cell(val)
return val
class Context(object):
__metaclass__ = extendabletype
_attrs_ = []
def plug(self, ast):
raise NotImplementedError("absract base class")
class __extend__(Context):
# Below are the set of defunctionalized continuations used in the
# paper "The Essence of Compiling with Continuations"
# https://dl.acm.org/citation.cfm?id=989393.989443&coll=DL&dl=GUIDE
def context(func):
argspec = inspect.getargspec(func)
assert argspec.varargs is None
assert argspec.keywords is None
assert argspec.defaults is None
argnames = argspec.args[:-1]
class PrimContext(Context):
_attrs_ = _immutable_fields_ = ["args"]
def __init__(self, *args):
Context.__init__(self)
self.args = args
def plug_direct(self, ast):
args = self.args + (ast,)
return func(*args)
def plug(self, ast):
return the_ast, TrampolineContext(ast, self)
__getitem__ = plug
class TrampolineAST(AST):
_attrs_ = _immutable_fields_ = []
def normalize(self, context):
assert type(context) is TrampolineContext
ast, context = context.ast, context.prev
return context.plug_direct(ast)
class TrampolineContext(Context):
_attrs_ = _immutable_fields_ = ["ast", "prev"]
def __init__(self, ast, prev):
assert type(prev) is PrimContext
self.ast = ast
self.prev = prev
def plug_direct(self, ast):
return self.prev.plug_direct(ast)
plug = plug_direct
the_ast = TrampolineAST()
@objectmodel.always_inline
def make_context(*args):
return PrimContext(*args)
make_context.__name__ = "%sContext" % func.__name__.replace("_", "")
return make_context
class Done(Exception):
def __init__(self, ast):
self.ast = ast
class AstList(AST):
_attrs_ = ["nodes"]
def __init__(self, nodes):
self.nodes = nodes
EmptyList = AstList([])
@staticmethod
@objectmodel.always_inline
def yields(ast):
raise Context.Done(ast)
@context
def Nil(ast):
Context.yields(ast)
Nil = Nil()
@staticmethod
@specialize.arg(2)
def normalize_term(expr, context=Nil, expect=AST):
"""
This will perform A-normalization on the given expression. The given
context is a value of type Context which contains the surrounding binding
context of the current expression. The context is needed to properly
re-build the expression inside out, as the given expression is traversed
from outside inward.
The transformation is trampolined in order to overcome Python's
improverished call stack.
"""
try:
while True:
expr, context = expr.normalize(context)
except Context.Done as e:
expr = e.ast
assert isinstance(expr, expect)
return expr
@staticmethod
def normalize_name(expr, context, hint="g"):
context = Context.Name(context, hint)
return expr, context
@staticmethod
def normalize_names(exprs, context, i=0):
if i >= len(exprs):
return context.plug(Context.EmptyList)
expr = exprs[i]
context = Context.Names(exprs, i, context)
return Context.normalize_name(expr, context, hint="AppRand")
@staticmethod
@context
def Name(context, hint, ast):
if ast.simple:
return context.plug(ast)
sym = Gensym.gensym(hint=hint)
var = LexicalVar(sym)
body = Context.normalize_term(var, context)
Context.yields(make_let_singlevar(sym, ast, [body]))
@staticmethod
@context
def Names(exprs, i, context, ast):
context = Context.Append(ast, context)
return Context.normalize_names(exprs, context, i+1)
@staticmethod
def Let(xs, Ms, body, context):
return Context._Let(xs, Ms, body, 0, context)
@staticmethod
@context
def _Let(xs, Ms, body, i, context, ast):
assert len(xs) == len(Ms)
if i == len(Ms) - 1:
body = Context.normalize_term(body, context)
# Body may have been wrapped in a begin for convenience
body = body.body if isinstance(body, Begin) else [body]
Context.yields(make_let([xs[i]], [ast], body))
X = xs[i]
i += 1
x_, M = xs[i], Ms[i]
context = Context._Let(xs, Ms, body, i, context)
body = Context.normalize_term(M, context)
Context.yields(make_let([X], [ast], [body]))
@staticmethod
@context
def If(thn, els, context, tst):
thn = Context.normalize_term(thn)
els = Context.normalize_term(els)
result = If.make(tst, thn, els)
return context.plug(result)
@staticmethod
@context
def AppRator(args, context, ast):
context = Context.AppRand(ast, context)
return Context.normalize_names(args, context)
@staticmethod
@context
def AppRand(rator, context, ast):
assert isinstance(ast, Context.AstList)
rands = ast.nodes
result = App.make(rator, rands)
return context.plug(result)
@staticmethod
@context
def Append(expr, context, ast):
assert isinstance(ast, Context.AstList)
ast = Context.AstList([expr] + ast.nodes)
return context.plug(ast)
@staticmethod
@context
def SetBang(var, context, ast):
ast = SetBang(var, ast)
return context.plug(ast)
@objectmodel.always_inline
def equal(a, b):
assert a is None or isinstance(a, values.W_Symbol)
assert b is None or isinstance(b, values.W_Symbol)
return a is b
@objectmodel.always_inline
def hashfun(v):
assert v is None or isinstance(v, values.W_Symbol)
return r_uint(compute_hash(v))
SymbolSet = make_persistent_hash_type(
super=values.W_ProtoObject,
base=object,
keytype=values.W_Symbol,
valtype=values.W_Symbol,
name="SymbolSet",
hashfun=hashfun,
equal=equal)
def is_builtin_module(mod):
return (mod in BUILTIN_MODULES) or (0 <= mod.find("pycket-lang/extra-prims"))
class Done(Exception):
_attrs_ = ["values"]
_immutable_ = True
def __init__(self, vals):
self.values = vals
def var_eq(a, b):
if isinstance(a, LexicalVar) and isinstance(b, LexicalVar):
return a.sym is b.sym
elif isinstance(a, ModuleVar) and isinstance(b, ModuleVar):
# two renamed variables can be the same
return a.srcsym is b.srcsym
return False
def var_hash(a):
if isinstance(a, LexicalVar):
return compute_hash(a.sym)
elif isinstance(a, LinkletVar):
return compute_hash(a.sym)
elif isinstance(a, ModuleVar):
return compute_hash(a.srcsym)
assert False
def variable_set():
" new set-like structure for variables "
return r_dict(var_eq, var_hash, force_non_null=True)
def variables_equal(a, b):
if len(a) != len(b):
return False
for k, v in a.iteritems():
if not k in b:
return False
return True
def check_one_val(vals):
if not isinstance(vals, values.W_Object):
raise SchemeException("expected 1 value but got %s"%(vals.num_values()))
return vals
class LetrecCont(Cont):
_attrs_ = _immutable_fields_ = ["counting_ast"]
def __init__(self, counting_ast, env, prev):
Cont.__init__(self, env, prev)
self.counting_ast = counting_ast
def _clone(self):
return LetrecCont(self.counting_ast, self.env, self.prev)
def get_ast(self):
return self.counting_ast.ast
def get_next_executed_ast(self):
ast, rhsindex = self.counting_ast.unpack(Letrec)
if rhsindex == (len(ast.rhss) - 1):
return ast.body[0]
return ast.rhss[rhsindex + 1]
@jit.unroll_safe
def plug_reduce(self, vals, env):
ast, i = self.counting_ast.unpack(Letrec)
if ast.counts[i] != vals.num_values():
raise SchemeException("wrong number of values")
for j in range(vals.num_values()):
w_val = vals.get_value(j)
v = self.env.lookup(ast.args.elems[ast.total_counts[i] + j], ast.args)
assert isinstance(v, values.W_Cell)
v.set_val(w_val)
if i >= (len(ast.rhss) - 1):
return ast.make_begin_cont(self.env, self.prev)
else:
return (ast.rhss[i + 1], self.env,
LetrecCont(ast.counting_asts[i + 1],
self.env, self.prev))
@inline_small_list(immutable=True, attrname="vals_w",
unbox_num=True, factoryname="_make")
class LetCont(Cont):
_attrs_ = ["counting_ast", "_get_size_list"]
_immutable_fields_ = ["counting_ast"]
return_safe = True
def __init__(self, counting_ast, env, prev):
Cont.__init__(self, env, prev)
self.counting_ast = counting_ast
def get_ast(self):
return self.counting_ast.ast
def get_next_executed_ast(self):
ast, rhsindex = self.counting_ast.unpack(Let)
if rhsindex == (len(ast.rhss) - 1):
return ast.body[0]
return ast.rhss[rhsindex + 1]
@staticmethod
@jit.unroll_safe
def make(vals_w, ast, rhsindex, env, prev):
counting_ast = ast.counting_asts[rhsindex]
env = ast._prune_env(env, rhsindex + 1)
return LetCont._make(vals_w, counting_ast, env, prev)
@jit.unroll_safe
def plug_reduce(self, vals, _env):
len_vals = vals.num_values()
jit.promote(len_vals)
len_self = self._get_size_list()
jit.promote(len_self)
new_length = len_self + len_vals
ast, rhsindex = self.counting_ast.unpack(Let)
assert isinstance(ast, Let)
if ast.counts[rhsindex] != len_vals:
raise SchemeException("wrong number of values")
if rhsindex == (len(ast.rhss) - 1):
prev = self.env
if ast.env_speculation_works:
# speculate moar!
if _env is self.env:
prev = _env
elif not jit.we_are_jitted():
ast.env_speculation_works = False
env = self._construct_env(ast, len_self, vals, len_vals, new_length, prev)
return ast.make_begin_cont(env, self.prev)
else:
# XXX remove copy
vals_w = [None] * new_length
i = 0
for j in range(len_self):
vals_w[i] = self._get_list(j)
i += 1
for j in range(len_vals):
vals_w[i] = vals.get_value(j)
i += 1
return (ast.rhss[rhsindex + 1], self.env,
LetCont.make(vals_w, ast, rhsindex + 1,
self.env, self.prev))
@jit.unroll_safe
def _construct_env(self, ast, len_self, vals, len_vals, new_length, prev):
assert isinstance(ast, Let)
# this is a complete mess. however, it really helps warmup a lot
if new_length == 0:
return ConsEnv.make0(prev)
if new_length == 1:
if len_self == 1:
elem = self._get_list(0)
else:
assert len_self == 0 and len_vals == 1
elem = vals.get_value(0)
elem = ast.wrap_value(elem, 0)
return ConsEnv.make1(elem, prev)
if new_length == 2:
if len_self == 0:
assert len_vals == 2
elem1 = vals.get_value(0)
elem2 = vals.get_value(1)
elif len_self == 1:
assert len_vals == 1
elem1 = self._get_list(0)
elem2 = vals.get_value(0)
else:
assert len_self == 2 and len_vals == 0
elem1 = self._get_list(0)
elem2 = self._get_list(1)
elem1 = ast.wrap_value(elem1, 0)
elem2 = ast.wrap_value(elem2, 1)
return ConsEnv.make2(elem1, elem2, prev)
env = ConsEnv.make_n(new_length, prev)
i = 0
for j in range(len_self):
val = self._get_list(j)
val = ast.wrap_value(val, i)
env._set_list(i, val)
i += 1
for j in range(len_vals):
val = vals.get_value(j)
val = ast.wrap_value(val, i)
env._set_list(i, val)
i += 1
return env
class CellCont(Cont):
_attrs_ = _immutable_fields_ = ['ast']
def __init__(self, ast, env, prev):
Cont.__init__(self, env, prev)
self.ast = ast
def _clone(self):
return CellCont(self.ast, self.env, self.prev)
def get_ast(self):
return self.ast
@jit.unroll_safe
def plug_reduce(self, vals, env):
ast = jit.promote(self.ast)
vals_w = []
for i, needs_cell in enumerate(ast.need_cell_flags):
w_val = vals.get_value(i)
if needs_cell:
w_val = values.W_Cell(w_val)
vals_w.append(w_val)
return return_multi_vals(values.Values.make(vals_w), self.env, self.prev)
class BeginCont(Cont):
_attrs_ = _immutable_fields_ = ["counting_ast"]
return_safe = True
def __init__(self, counting_ast, env, prev):
Cont.__init__(self, env, prev)
self.counting_ast = counting_ast
def _clone(self):
return BeginCont(self.counting_ast, self.env, self.prev)
def get_ast(self):
return self.counting_ast.ast
def get_next_executed_ast(self):
ast, i = self.counting_ast.unpack(SequencedBodyAST)
return ast.body[i]
def plug_reduce(self, vals, env):
ast, i = self.counting_ast.unpack(SequencedBodyAST)
return ast.make_begin_cont(self.env, self.prev, i)
@inline_small_list(immutable=True, attrname="vals_w",
unbox_num=True, factoryname="_make")
class Begin0BodyCont(Cont):
_attrs_ = _immutable_fields_ = ["counting_ast"]
return_safe = True
def __init__(self, ast, env, prev):
Cont.__init__(self, env, prev)
self.counting_ast = ast
@staticmethod
def make(vals, ast, index, env, prev):
counting_ast = ast.counting_asts[index]
return Begin0BodyCont._make(vals, counting_ast, env, prev)
def get_ast(self):
return self.counting_ast.ast
def get_next_executed_ast(self):
ast, i = self.counting_ast.unpack(Begin0)
return ast.body[i]
def plug_reduce(self, vals, env):
ast, index = self.counting_ast.unpack(Begin0)
vals = self._get_full_list()
if index == len(ast.body) - 1:
return return_multi_vals(values.Values.make(vals), env, self.prev)
return (ast.body[index + 1], self.env,
Begin0BodyCont.make(vals, ast, index + 1, self.env, self.prev))
# FIXME: it would be nice to not need two continuation types here
class Begin0Cont(Cont):
_attrs_ = _immutable_fields_ = ["ast"]
return_safe = True
def __init__(self, ast, env, prev):
Cont.__init__(self, env, prev)
self.ast = ast
def _clone(self):
return Begin0Cont(self.ast, self.env, self.prev)
def get_ast(self):
return self.ast
def get_next_executed_ast(self):
return self.ast
def plug_reduce(self, vals, env):
ast = jit.promote(self.ast)
vals_w = vals.get_all_values()
return ast.body[0], self.env, Begin0BodyCont.make(vals_w, ast, 0, self.env, self.prev)
class WCMKeyCont(Cont):
_attrs_ = _immutable_fields_ = ["ast"]
return_safe = True
def __init__(self, ast, env, prev):
Cont.__init__(self, env, prev)
self.ast = ast
def _clone(self):
return WCMKeyCont(self.ast, self.env, self.prev)
def get_ast(self):
return self.ast
def get_next_executed_ast(self):
return self.ast.value
def plug_reduce(self, vals, env):
key = check_one_val(vals)
return self.ast.value, self.env, WCMValCont(self.ast, key, self.env, self.prev)
class WCMValCont(Cont):
_attrs_ = _immutable_fields_ = ["ast", "key"]
return_safe = True
def __init__(self, ast, key, env, prev):
Cont.__init__(self, env, prev)
self.ast = ast
self.key = key
def _clone(self):
return WCMValCont(self.ast, self.key, self.env, self.prev)
def get_ast(self):
return self.ast
def get_next_executed_ast(self):
return self.ast.body
def plug_reduce(self, vals, env):
val = check_one_val(vals)
key = self.key
if isinstance(key, values.W_ContinuationMarkKey):
body = values.W_ThunkBodyCMK(self.ast.body, self.env)
return key.set_cmk(body, val, self.prev, env, self.prev)
# Perform a shallow copying of the continuation to ensure any marks
# captured by call/cc and family are not affected by the mutation of
# the mark set.
cont = self.prev.clone()
cont.update_cm(key, val)
return self.ast.body, self.env, cont
class ModuleInfo(object):
def __init__(self, current_module):
self.current_module = current_module
self.submodules = []
self.requires = []
class Module(AST):
_immutable_fields_ = ["name", "body[*]", "requires[*]", "parent", "submodules[*]", "interpreted?", "lang"]
visitable = True
simple = True
def __init__(self, name, body, config, lang=None):
self.parent = None
self.lang = lang
self.name = name
info = ModuleInfo(self)
todo = body[:]
while todo:
curr = todo.pop()
rest = curr.collect_module_info(info)
todo.extend(rest)
self.submodules = info.submodules[:]
self.requires = info.requires[:]
self.body = [b for b in body if not isinstance(b, Require)]
self.env = None
self.interpreted = False
self.config = config
defs = {}
for b in self.body:
b.defined_vars(defs)
self.defs = defs
def rebuild_body(self):
return self.requires + self.body
def set_parent_module(self, parent):
assert isinstance(parent, Module)
self.parent = parent
def collect_module_info(self, info):
info.submodules.append(self)
self.set_parent_module(info.current_module)
return []
def full_module_path(self):
if self.parent is None:
return self.name
path = []
while self is not None:
path.append(self.name)
self = self.parent
return "/".join([i for i in reversed(path)])
@jit.elidable
def lookup(self, sym):
if sym not in self.defs:
path = self.full_module_path()
raise SchemeException("unknown module variable %s in module %s" % (sym.tostring(), path))
v = self.defs[sym]
if v is None:
raise SchemeException("use of module variable before definition %s" % (sym.tostring()))
return v
def mod_mutated_vars(self, cache):
""" return all the module-bound variables that are mutated"""
x = variable_set()
for r in self.body:
x.update(r.mutated_vars(cache))
return x
def direct_children(self):
return self.rebuild_body()
def _tostring(self):
return "(module %s %s)"%(self.name," ".join([s.tostring() for s in self.body]))
def to_sexp(self):
mod_sym = values.W_Symbol.make("module")
name_s_exp = values_string.W_String.fromstr_utf8(self.name)
lang_s_exp = self.lang.to_sexp()
bodies_s_exp = values.to_list([b.to_sexp() for b in self.body])
cons = values.W_Cons.make
return cons(mod_sym, cons(name_s_exp, cons(lang_s_exp, bodies_s_exp)))
def interpret_simple(self, env):
""" Interpretation of a module is a no-op from the outer module.
Modules must be executed explicitly by |interpret_mod|, usually via
a require statement. """
return values.w_void
def interpret_mod(self, env):
if self.interpreted:
return values.w_void
try:
self.interpreted = True
return self._interpret_mod(env)
except SchemeException, e:
if e.context_module is None:
e.context_module = self
raise
@jit.unroll_safe
def root_module(self):
while self.parent is not None:
self = self.parent
return self
@jit.unroll_safe
def find_submodule(self, name):
if name == ".":
return self
if name == "..":
return self.parent
for s in self.submodules:
assert isinstance(s, Module)
if s.name == name:
return s
return None
@jit.unroll_safe
def resolve_submodule_path(self, path):
if path is None:
return self
for p in path:
self = self.find_submodule(p)
assert self is not None
return self
def normalize(self, context):
# Return the current module, as it is not safe to duplicate module forms
for i, b in enumerate(self.body):
self.body[i] = Context.normalize_term(b)
return context.plug(self)
def _interpret_mod(self, env):
self.env = env
module_env = env.toplevel_env().module_env
old = module_env.current_module
module_env.current_module = self
if self.lang is not None:
interpret_one(self.lang, self.env)
elif self.parent is not None:
self.parent.interpret_mod(self.env)
for r in self.requires:
interpret_one(r, self.env)
for f in self.body:
# FIXME: this is wrong -- the continuation barrier here is around the RHS,
# whereas in Racket it's around the whole `define-values`
if isinstance(f, DefineValues):
e = f.rhs
vs = interpret_one(e, self.env).get_all_values()
if len(f.names) == len(vs):
for n in range(len(vs)):
self.defs[f.names[n]] = vs[n]
else:
raise SchemeException("wrong number of values for define-values")
else: # FIXME modules can have other things, assuming expression
vs = interpret_one(f, self.env)
continue
module_env.current_module = old
class Require(AST):
_immutable_fields_ = ["fname", "loader", "path[*]"]
visitable = True
simple = True
def __init__(self, fname, loader, path=None):
self.fname = fname
self.path = path
self.loader = loader
def find_module(self, env):
assert not jit.we_are_jitted()
if self.loader is not None:
module = self.loader.lazy_load(self.fname)
else:
module = env.toplevel_env().module_env.current_module
assert module is not None
module = module.resolve_submodule_path(self.path)
return module
# Interpret the module and add it to the module environment
def interpret_simple(self, env):
module = self.find_module(env)
top = env.toplevel_env()
top.module_env.add_module(self.fname, module.root_module())
module.interpret_mod(top)
return values.w_void
def collect_module_info(self, info):
for r in info.requires:
assert isinstance(r, Require)
if (self.fname == r.fname and
self.path == r.path and
self.loader is r.loader):
break
else:
info.requires.append(self)
return []
def _tostring(self):
return "(require %s)" % self.fname
def to_sexp(self):
req_sym = values.W_Symbol.make("require")
return values.to_list([req_sym, values_string.W_String.fromstr_utf8(self.fname)])
def return_value(w_val, env, cont):
return return_multi_vals(values.Values.make1(w_val), env, cont)
def return_value_direct(w_val, env, cont):
""" like return_value, but without using a label. only safe to use in
AST.interpret and (automatically) by simple primitives """
val = values.Values.make1(w_val)
return cont.plug_reduce(val, env)
def return_multi_vals(vals, env, cont):
if cont.return_safe:
return cont.plug_reduce(vals, env)
return safe_return_multi_vals(vals, env, cont)
# A safe variant which returns ensures control is handed back to
# the CEK loop before applying the continuation.
@label
def safe_return_multi_vals(vals, env, cont):
return cont.plug_reduce(vals, env)
def return_multi_vals_direct(vals, env, cont):
return cont.plug_reduce(vals, env)
def return_void(env, cont):
return return_value(values.w_void, env, cont)
class Cell(AST):
_immutable_fields_ = ["expr", "need_cell_flags[*]"]
visitable = True
def __init__(self, expr, need_cell_flags=None):
if need_cell_flags is None:
need_cell_flags = [True]
self.expr = expr
self.need_cell_flags = need_cell_flags
def interpret(self, env, cont):
return self.expr, env, CellCont(self, env, cont)
def direct_children(self):
return [self.expr]
def _tostring(self):
return "Cell(%s)"%self.expr.tostring()
def write(self, port, env):
port.write("Cell(")
self.expr.write(port, env)
port.write(" . %s)" % self.need_cell_flags)
class Quote(AST):
_immutable_fields_ = ["w_val"]
visitable = True
simple = True
ispure = True
def __init__ (self, w_val):
self.w_val = w_val
def interpret_simple(self, env):
return self.w_val
def direct_children(self):
return []
def _tostring(self):
if (isinstance(self.w_val, values.W_Bool) or
isinstance(self.w_val, values.W_Number) or
isinstance(self.w_val, values_string.W_String)):
return "%s" % self.w_val.tostring()
return "'%s" % self.w_val.tostring()
def write(self, port, env):
from pycket.prims.input_output import write_loop
port.write("(quote ")
write_loop(self.w_val, port, env)
port.write(")")
def to_sexp(self):
q_sym = values.W_Symbol.make("quote")
return values.W_Cons.make(q_sym, values.W_Cons.make(self.w_val, values.w_null))
class QuoteSyntax(AST):
_immutable_fields_ = ["w_val"]
visitable = True
simple = True
ispure = True
def __init__ (self, w_val):
self.w_val = w_val
def interpret_simple(self, env):
from pycket.prims.correlated import W_Correlated
return W_Correlated(self.w_val, values.w_false, {})
def direct_children(self):
return []
def _tostring(self):
return "#'%s" % self.w_val.tostring()
def to_sexp(self):
qs_sym = values.W_Symbol.make("quote-syntax")
return values.W_Cons.make(qs_sym, values.W_Cons.make(self.w_val.to_sexp(), values.w_null))
def write(self, port, env):
from pycket.prims.input_output import write_loop
port.write("(quote-syntax ")
write_loop(self.w_val, port, env)
port.write(")")
class VariableReference(AST):
_immutable_fields_ = ["var", "is_mut", "path", "unsafe"]
visitable = True
simple = True
def __init__ (self, var, path, is_mut=False, unsafe=False):
self.var = var
self.path = path
self.is_mut = is_mut
self.unsafe = unsafe
def is_mutable(self, env):
if self.is_mut:
return True
var = self.var
if isinstance(var, ModuleVar):
return var.is_mutable(env)
else:
return False
def interpret_simple(self, env):
instance_var_sym = values.W_Symbol.make("instance-variable-reference")
try:
instance = env.toplevel_env().toplevel_lookup(instance_var_sym)
except SchemeException:
instance = None
return values.W_VariableReference(self, instance)
def direct_children(self):
return []
def _tostring(self):
return "#<#%variable-reference>"
def to_sexp(self):
from pycket.values_string import W_String
vr_sym = values.W_Symbol.make("#%variable-reference")
var_sexp = self.var.to_sexp() if self.var else values.w_false
path_sexp = values.w_false
if isinstance(self.path, str):
path_sexp = W_String.fromascii(self.path)
mut_sexp = values.w_true if self.is_mut else values.w_false
return values.to_list([vr_sym, var_sexp, path_sexp, mut_sexp])
def write(self, port, env):
port.write("(#%variable-reference ")
if self.var:
self.var.write(port, env)
else:
port.write(" #f")
if self.path:
port.write(" %s " % self.path)
else:
port.write(" #f")
if self.is_mut:
port.write(" #t")
else:
port.write(" #f")
port.write(")")
class WithContinuationMark(AST):
_immutable_fields_ = ["key", "value", "body"]
visitable = True
def __init__(self, key, value, body):
self.key = key
self.value = value
self.body = body
def _tostring(self):
return "(with-continuation-mark %s %s %s)"%(self.key.tostring(),
self.value.tostring(),
self.body.tostring())
def direct_children(self):
return [self.key, self.value, self.body]
def interpret(self, env, cont):
return self.key, env, WCMKeyCont(self, env, cont)
def normalize(self, context):
key = Context.normalize_term(self.key)
value = Context.normalize_term(self.value)
body = Context.normalize_term(self.body)
result = WithContinuationMark(key, value, body)
return context.plug(result)
def to_sexp(self):
wcm_sym = values.W_Symbol.make("with-continuation-mark")
assert self.key and self.value and self.body
return values.to_list([wcm_sym, self.key.to_sexp(), self.value.to_sexp(), self.body.to_sexp()])
def write(self, port, env):
port.write("(with-continuation-mark ")
if self.key:
self.key.write(port, env)
port.write(" ")
if self.value:
self.value.write(port, env)
port.write(" ")
if self.body:
self.body.write(port, env)
port.write(" ")
port.write(")")
class App(AST):
_immutable_fields_ = ["rator", "rands[*]", "env_structure"]
visitable = True
def __init__ (self, rator, rands, env_structure=None):
self.rator = rator
self.rands = rands
self.env_structure = env_structure
@staticmethod
def make(rator, rands, env_structure=None):
if isinstance(rator, ModuleVar) and rator.is_primitive():
try:
w_prim = rator._lookup_primitive()
except SchemeException:
pass
else:
if isinstance(w_prim, values.W_PrimSimple1) and len(rands) == 1:
return SimplePrimApp1(rator, rands, env_structure, w_prim)
if isinstance(w_prim, values.W_PrimSimple2) and len(rands) == 2:
return SimplePrimApp2(rator, rands, env_structure, w_prim)
return App(rator, rands, env_structure)
def direct_children(self):
return [self.rator] + self.rands
# Let conversion ensures that all the participants in an application
# are simple.
@jit.unroll_safe
def interpret(self, env, cont):
rator = self.rator
if (not env.pycketconfig().callgraph and
isinstance(rator, ModuleVar) and
rator.is_primitive()):
self.set_should_enter() # to jit downrecursion
try:
w_callable = rator.interpret_simple(env)
args_w = [None] * len(self.rands)
for i, rand in enumerate(self.rands):
args_w[i] = rand.interpret_simple(env)
if isinstance(w_callable, values.W_PromotableClosure):
# fast path
jit.promote(w_callable)
w_callable = w_callable.closure
except SchemeException, exn:
return convert_runtime_exception(exn, env, cont)
except OSError, exn:
return convert_os_error(exn, env, cont)
return w_callable.call_with_extra_info(args_w, env, cont, self)
def normalize(self, context):
context = Context.AppRator(self.rands, context)
return Context.normalize_name(self.rator, context, hint="AppRator")
def _tostring(self):
elements = [self.rator] + self.rands
return "(%s)" % " ".join([r.tostring() for r in elements])
def to_sexp(self):
rator_sexp = self.rator.to_sexp()
rands_sexp = values.w_null
for rand in reversed(self.rands):
rands_sexp = values.W_Cons.make(rand.to_sexp(), rands_sexp)
return values.W_Cons.make(rator_sexp, rands_sexp)
def write(self, port, env):
port.write("(")
self.rator.write(port, env)
for r in self.rands:
port.write(" ")
r.write(port, env)
port.write(")")
class SimplePrimApp1(App):
_immutable_fields_ = ['w_prim', 'rand1']
simple = True
visitable = False
def __init__(self, rator, rands, env_structure, w_prim):
App.__init__(self, rator, rands, env_structure)
assert len(rands) == 1
self.rand1, = rands
self.w_prim = w_prim
def normalize(self, context):
context = Context.AppRand(self.rator, context)
return Context.normalize_names(self.rands, context)
def run(self, env):
result = self.w_prim.simple1(self.rand1.interpret_simple(env))
if result is None:
result = values.w_void
return result
def interpret_simple(self, env):
return check_one_val(self.run(env))
def interpret(self, env, cont):
if not env.pycketconfig().callgraph:
self.set_should_enter() # to jit downrecursion
try:
result = self.run(env)
except SchemeException, exn:
return convert_runtime_exception(exn, env, cont)
except OSError, exn:
return convert_os_error(exn, env, cont)
return return_multi_vals_direct(result, env, cont)
class SimplePrimApp2(App):
_immutable_fields_ = ['w_prim', 'rand1', 'rand2']
simple = True
visitable = False
def __init__(self, rator, rands, env_structure, w_prim):
App.__init__(self, rator, rands, env_structure)
assert len(rands) == 2
self.rand1, self.rand2 = rands
self.w_prim = w_prim
def normalize(self, context):
context = Context.AppRand(self.rator, context)
return Context.normalize_names(self.rands, context)
def run(self, env):
arg1 = self.rand1.interpret_simple(env)
arg2 = self.rand2.interpret_simple(env)
result = self.w_prim.simple2(arg1, arg2)
if result is None:
result = values.w_void
return result
def interpret_simple(self, env):
return check_one_val(self.run(env))
def interpret(self, env, cont):
if not env.pycketconfig().callgraph:
self.set_should_enter() # to jit downrecursion
try:
result = self.run(env)
except SchemeException, exn:
return convert_runtime_exception(exn, env, cont)
except OSError, exn:
return convert_os_error(exn, env, cont)
return return_multi_vals_direct(result, env, cont)
class SequencedBodyAST(AST):
_immutable_fields_ = ["body[*]", "counting_asts[*]",
"_sequenced_env_structure",
"_sequenced_remove_num_envs[*]"]
visitable = False
_sequenced_env_structure = None
_sequenced_remove_num_envs = None
def __init__(self, body, counts_needed=-1, sequenced_env_structure=None, sequenced_remove_num_envs=None):
from rpython.rlib.debug import make_sure_not_resized
assert isinstance(body, list)
assert len(body) > 0
self.body = body
make_sure_not_resized(self.body)
if counts_needed < len(self.body) + 1:
counts_needed = len(self.body) + 1
self.counting_asts = [
CombinedAstAndIndex(self, i)
for i in range(counts_needed)]
def init_body_pruning(self, env_structure, remove_num_envs):
self._sequenced_env_structure = env_structure
self._sequenced_remove_num_envs = remove_num_envs
def copy_body_pruning(self, other):
assert isinstance(other, SequencedBodyAST)
self._sequenced_env_structure = other._sequenced_env_structure
self._sequenced_remove_num_envs = other._sequenced_remove_num_envs
@staticmethod
def _check_environment_consistency(env, env_structure):
if objectmodel.we_are_translated():
return
if env_structure is None:
assert isinstance(env, ToplevelEnv)
else:
env_structure.check_plausibility(env)
@jit.unroll_safe
def _prune_sequenced_envs(self, env, i=0):
env_structure = self._sequenced_env_structure
if env_structure is None:
return env
if i:
already_pruned = self._sequenced_remove_num_envs[i - 1]
for j in range(already_pruned):
env_structure = env_structure.prev
else:
already_pruned = 0
self._check_environment_consistency(env, env_structure)
for i in range(self._sequenced_remove_num_envs[i] - already_pruned):
env = env.get_prev(env_structure)
env_structure = env_structure.prev
return env
@objectmodel.always_inline
def make_begin_cont(self, env, prev, i=0):
jit.promote(self)
jit.promote(i)
if not i:
env = self._prune_sequenced_envs(env, 0)
if i == len(self.body) - 1:
return self.body[i], env, prev
else:
new_env = self._prune_sequenced_envs(env, i + 1)
return self.body[i], env, BeginCont(
self.counting_asts[i + 1], new_env, prev)
class Begin0(SequencedBodyAST):
_immutable_fields_ = ["first"]
visitable = True
@staticmethod
def make(first, rest):
rest = remove_pure_ops(rest, always_last=False)
if rest:
return Begin0(first, rest)
return first
def __init__(self, fst, rst):
assert isinstance(rst, list)
SequencedBodyAST.__init__(self, rst)
self.first = fst
def direct_children(self):
return [self.first] + self.body
def _tostring(self):
body = [self.first.tostring()] + [b.tostring() for b in self.body]
return "(begin0 %s)" % " ".join(body)
def normalize(self, context):
first = Context.normalize_term(self.first)
body = [Context.normalize_term(b) for b in self.body]
result = Begin0(first, body)
return context.plug(result)
def interpret(self, env, cont):
return self.first, env, Begin0Cont(self, env, cont)
def to_sexp(self):
beg0_sym = values.W_Symbol.make("begin0")
return values.to_list([beg0_sym] + [b.to_sexp() for b in self.direct_children()])
def write(self, port, env):
port.write("(begin0 ")
self.first.write(port, env)
for b in self.body:
port.write(" ")
b.write(port, env)
port.write(")")
@specialize.call_location()
def remove_pure_ops(ops, always_last=True):
""" The specialize annotation is to allow handling of resizable and non-resizable
lists as arguments. """
if always_last:
last = len(ops) - 1
return [op for i, op in enumerate(ops) if not op.ispure or i == last]
else:
return [op for i, op in enumerate(ops) if not op.ispure]
class Begin(SequencedBodyAST):
visitable = True
@staticmethod
def make(body):
body = remove_pure_ops(body)
if len(body) == 1:
return body[0]
# Flatten nested begin expressions
flattened = []
for b in body:
if isinstance(b, Begin):
for inner in b.body:
flattened.append(inner)
else:
flattened.append(b)
body = remove_pure_ops(flattened)
# Convert (begin (let ([...]) letbody) rest ...) =>
# (let ([...]) letbody ... rest ...)
b0 = body[0]
if isinstance(b0, Let):
rest = body[1:]
letbody = b0.body
letargs = b0._rebuild_args()
letrhss = b0.rhss
return make_let(letargs, letrhss, letbody + rest)
return Begin(body)
def direct_children(self):
return self.body
@objectmodel.always_inline
def interpret(self, env, cont):
return self.make_begin_cont(env, cont)
def normalize(self, context):
body = [Context.normalize_term(b) for b in self.body]
result = Begin.make(body)
return context.plug(result)
def _tostring(self):
return "(begin %s)" % (" ".join([e.tostring() for e in self.body]))
def to_sexp(self):
begin_sym = values.W_Symbol.make("begin")
return values.to_list([begin_sym] + [b.to_sexp() for b in self.body])
def write(self, port, env):
port.write("(begin ")
for b in self.body:
port.write(" ")
b.write(port, env)
port.write(")")
class BeginForSyntax(AST):
_immutable_fields_ = ["body[*]"]
visitable = True
simple = True
def __init__(self, body):
self.body = body
def direct_children(self):
return self.body[:]
def interpret_simple(self, env):
return values.w_void
def _tostring(self):
return "(begin-for-syntax %s)" % " ".join([b.tostring() for b in self.body])
def to_sexp(self):
bfs_sym = values.W_Symbol.make("begin-for-syntax")
return values.to_list([bfs_sym] + [b.to_sexp() for b in self.body])
def write(self, port, env):
port.write("(begin-for-syntax ")
for b in self.body:
port.write(" ")
b.write(port, env)
port.write(")")
class Var(AST):
_immutable_fields_ = ["sym", "env_structure"]
simple = True
ispure = True
def __init__ (self, sym, env_structure=None):
assert isinstance(sym, values.W_Symbol)
self.sym = sym
self.env_structure = env_structure
def interpret_simple(self, env):
val = self._lookup(env)
if val is None:
raise SchemeException("%s: undefined" % self.sym.tostring())
return val
def direct_children(self):
return []
def _free_vars(self, cache):
return SymbolSet.singleton(self.sym)
def _tostring(self):
return "%s" % self.sym.variable_name()
def write(self, port, env):
from pycket.prims.input_output import write_loop
write_loop(self.sym, port, env)
def to_sexp(self):
return self.sym
class CellRef(Var):
simple = True
visitable = True
def _tostring(self):
return "CellRef(%s)" % Var._tostring(self)
def _set(self, w_val, env):
v = env.lookup(self.sym, self.env_structure)
assert isinstance(v, values.W_Cell)
v.set_val(w_val)
def _lookup(self, env):
v = env.lookup(self.sym, self.env_structure)
assert isinstance(v, values.W_Cell)
return v.get_val()
def write(self, port, env):
from pycket.prims.input_output import write_loop
write_loop(self.sym, port, env)
class GensymCounter(object):
_attrs_ = ['_val']
def __init__(self, val=0):
self._val = val
def next_value(self):
val = self._val
self._val += 1
return val
class Gensym(object):
_counters = {}
@staticmethod
@jit.elidable
def get_counter(hint):
result = Gensym._counters.get(hint, None)
if result is not None:
return result
result = GensymCounter()
Gensym._counters[hint] = result
return result
@staticmethod
def gensym(hint="g"):
counter = Gensym.get_counter(hint)
count = counter.next_value()
return values.W_Symbol(hint + str(count))
# Same with ToplevelVar(is_free=False)
# It's better to have LinkletVars only refer to W_LinkletVar
class LinkletVar(Var):
visitable = True
_immutable_fields_ = ["sym"]
def __init__(self, sym):
self.sym = sym
def tostring(self):
return "(LinkletVar %s)" % (self.sym.tostring())
def write(self, port, env):
from pycket.prims.input_output import write_loop
write_loop(self.sym, port, env)
def _free_vars(self, cache):
return SymbolSet.EMPTY()
def _set(self, w_val, env):
env.toplevel_env().toplevel_set(self.sym, w_val)
def _lookup(self, env):
return env.toplevel_env().toplevel_lookup(self.sym)
class LinkletStaticVar(LinkletVar):
_immutable_fields_ = ["sym", "w_value?"]
def __init__(self, sym):
LinkletVar.__init__(self, sym)
self.w_value = None
def _set(self, w_val, env):
env.toplevel_env().toplevel_set(self.sym, w_val)
self.w_value = w_val
def _lookup(self, env):
if self.w_value:
return self.w_value
self.w_value = env.toplevel_env().toplevel_lookup(self.sym)
return self.w_value
class LexicalVar(Var):
visitable = True
def _lookup(self, env):
return env.lookup(self.sym, self.env_structure)
def _set(self, w_val, env):
assert 0
def write(self, port, env):
from pycket.prims.input_output import write_loop
write_loop(self.sym, port, env)
class ModuleVar(Var):
_immutable_fields_ = ["modenv?", "sym", "srcmod", "srcsym", "w_value?", "path[*]"]
visitable = True
def __init__(self, sym, srcmod, srcsym, path=None):
Var.__init__(self, sym)
self.srcmod = srcmod
self.srcsym = srcsym
self.path = path
self.modenv = None
self.w_value = None
def _free_vars(self, cache):
return SymbolSet.EMPTY()
def write(self, port, env):
from pycket.prims.input_output import write_loop
write_loop(self.srcsym, port, env)
def _lookup(self, env):
w_res = self.w_value
if w_res is None:
if self.modenv is None:
self.modenv = env.toplevel_env().module_env
self.w_value = w_res = self._elidable_lookup()
if type(w_res) is values.W_Cell:
return w_res.get_val()
else:
return w_res
def is_mutable(self, env):
if self.modenv is None:
self.modenv = env.toplevel_env().module_env
v = self._elidable_lookup()
return isinstance(v, values.W_Cell)
@jit.elidable
def is_primitive(self):
return self.srcmod is not None and is_builtin_module(self.srcmod)
@jit.elidable
def _elidable_lookup(self):
assert self.modenv
modenv = self.modenv
if self.srcmod is None:
mod = modenv.current_module
elif self.is_primitive():
return self._lookup_primitive()
else:
mod = modenv._find_module(self.srcmod)
if mod is None:
raise SchemeException("can't find module %s for %s" % (self.srcmod, self.srcsym.tostring()))
return mod.resolve_submodule_path(self.path).lookup(self.srcsym)
def _lookup_primitive(self):
# we don't separate these the way racket does
# but maybe we should
try:
return prim_env[self.srcsym]
except KeyError:
raise SchemeException("can't find primitive %s" % (self.srcsym.tostring()))
def _set(self, w_val, env):
if self.modenv is None:
self.modenv = env.toplevel_env().module_env
v = self._elidable_lookup()
assert isinstance(v, values.W_Cell)
v.set_val(w_val)
class ToplevelVar(Var):
visitable = True
def __init__(self, sym, env_structure=None, is_free=True):
Var.__init__(self, sym, env_structure)
self.is_free = is_free
def _free_vars(self, cache):
if self.is_free:
return SymbolSet.singleton(self.sym)
return SymbolSet.EMPTY()
def _lookup(self, env):
return env.toplevel_env().toplevel_lookup(self.sym)
def _set(self, w_val, env):
env.toplevel_env().toplevel_set(self.sym, w_val)
def write(self, port, env):
from pycket.prims.input_output import write_loop
write_loop(self.sym, port, env)
class SetBang(AST):
_immutable_fields_ = ["var", "rhs"]
visitable = True
simple = True
def __init__(self, var, rhs):
self.var = var
self.rhs = rhs
def interpret_simple(self, env):
w_val = self.rhs.interpret_simple(env)
self.var._set(w_val, env)
return values.w_void
def _mutated_vars(self, cache):
x = self.rhs.mutated_vars(cache)
var = self.var
if isinstance(var, CellRef):
x[LexicalVar(self.var.sym)] = None
# even though we don't change these to cell refs, we still
# have to convert the definitions
elif isinstance(var, LinkletVar):
x[var] = None
elif isinstance(var, ModuleVar):
x[var] = None
# do nothing for top-level vars, they're all mutated
return x
def direct_children(self):
return [self.var, self.rhs]
def normalize(self, context):
context = Context.SetBang(self.var, context)
return Context.normalize_name(self.rhs, context, hint="SetBang")
def _tostring(self):
return "(set! %s %s)" % (self.var.tostring(), self.rhs.tostring())
def to_sexp(self):
set_sym = values.W_Symbol.make("set!")
return values.to_list([set_sym, self.var.to_sexp(), self.rhs.to_sexp()])
def write(self, port, env):
port.write("(set! ")
self.var.write(port, env)
port.write(" ")
self.rhs.write(port, env)
port.write(")")
class If(AST):
_immutable_fields_ = ["tst", "thn", "els"]
visitable = True
def __init__(self, tst, thn, els):
self.tst = tst
self.thn = thn
self.els = els
@staticmethod
def make(tst, thn, els):
if isinstance(tst, Quote):
if tst.w_val is values.w_false:
return els
else:
return thn
return If(tst, thn, els)
@objectmodel.always_inline
def interpret(self, env, cont):
w_val = self.tst.interpret_simple(env)
if w_val is values.w_false:
return self.els, env, cont
else:
return self.thn, env, cont
def direct_children(self):
return [self.tst, self.thn, self.els]
def normalize(self, context):
context = Context.If(self.thn, self.els, context)
return Context.normalize_name(self.tst, context, hint="if")
def _tostring(self):
return "(if %s %s %s)" % (self.tst.tostring(), self.thn.tostring(), self.els.tostring())
def to_sexp(self):
if_sym = values.W_Symbol.make("if")
return values.to_list([if_sym, self.tst.to_sexp(), self.thn.to_sexp(), self.els.to_sexp()])
def write(self, port, env):
port.write("(if ")
self.tst.write(port, env)
port.write(" ")
self.thn.write(port, env)
port.write(" ")
self.els.write(port, env)
port.write(")")
def make_lambda(formals, rest, body, sourceinfo=None):
"""
Create a λ-node after computing information about the free variables
in the body. The 'args' field stores both the function arguments as well
as the free variables in a SymList.
The 'args' SymList hold the expected environment structure for the body of
the λ-expression.
"""
body = remove_pure_ops(body)
args = SymList(formals + ([rest] if rest else []))
frees = SymList(free_vars_lambda(body, args, {}).keys())
args = SymList(args.elems, frees)
return Lambda(formals, rest, args, frees, body, sourceinfo=sourceinfo)
def free_vars_lambda(body, args, cache):
x = SymbolSet.EMPTY()
for b in body:
x = x.union(b.free_vars(cache))
x = x.without_many(args.elems)
return x
class CaseLambda(AST):
_immutable_fields_ = ["lams[*]", "any_frees", "recursive_sym", "w_closure_if_no_frees?", "_arity"]
visitable = True
simple = True
ispure = True
def __init__(self, lams, recursive_sym=None, arity=None):
## TODO: drop lams whose arity is redundant
## (case-lambda [x 0] [(y) 1]) == (lambda x 0)
self.lams = lams
self.any_frees = False
for l in lams:
frees = l.frees.elems
if frees and frees != [recursive_sym]:
self.any_frees = True
break
self._closurerepr = None
self.w_closure_if_no_frees = None
self.recursive_sym = recursive_sym
self._arity = arity
self.compute_arity()
@jit.unroll_safe
def enable_jitting(self):
for l in self.lams:
l.enable_jitting()
def make_recursive_copy(self, sym):
return CaseLambda(self.lams, sym, self._arity)
def interpret_simple(self, env):
if not env.pycketconfig().callgraph:
self.enable_jitting() # XXX not perfectly pretty
if not self.any_frees:
# cache closure if there are no free variables and the toplevel env
# is the same as last time
w_closure = self.w_closure_if_no_frees
if w_closure is None or (len(self.lams) > 0 and w_closure.closure._get_list(0).toplevel_env() is not env.toplevel_env()):
w_closure = values.W_PromotableClosure(self, env.toplevel_env())
self.w_closure_if_no_frees = w_closure
return w_closure
return values.W_Closure.make(self, env)
def _free_vars(self, cache):
# call _free_vars() to avoid populating the free vars cache
result = AST._free_vars(self, cache)
if self.recursive_sym is not None:
result = result.without(self.recursive_sym)
return result
def direct_children(self):
# the copy is needed for weird annotator reasons that I don't understand :-(
return [l for l in self.lams]
def _tostring(self):
if len(self.lams) == 1:
return self.lams[0].tostring()
r_sym_str = self.recursive_sym.tostring() if self.recursive_sym else ""
return "(case-lambda (recursive-sym %s) %s)" % (r_sym_str, " ".join([l.tostring() for l in self.lams]))
def to_sexp(self):
case_sym = values.W_Symbol.make("case-lambda")
rec_sym = values.W_Symbol.make("recursive-sym")
rec_ls = [rec_sym, self.recursive_sym.to_sexp()] if self.recursive_sym else [rec_sym]
rec_sexp = values.to_list(rec_ls)
lams_ls = [l.to_sexp() for l in self.lams]
return values.to_list([case_sym, rec_sexp] + lams_ls)
def write(self, port, env):
from pycket.prims.input_output import write_loop
port.write("(case-lambda (recursive-sym ")
if self.recursive_sym:
write_loop(self.recursive_sym, port, env)
port.write(")")
for l in self.lams:
port.write(" ")
l.write(port, env)
port.write(")")
@jit.elidable_promote('all')
def tostring_as_closure(self):
_closurerepr = self._closurerepr
if _closurerepr is None:
_closurerepr = self._closurerepr = self._tostring_as_closure()
return _closurerepr
def _tostring_as_closure(self):
if len(self.lams) == 0:
return "#<procedure>"
lam = self.lams[0]
assert isinstance(lam, Lambda)
info = lam.sourceinfo
file, pos = info.sourcefile, info.position
if file and pos >= 0:
return "#<procedure:%s:%s>" % (file, pos)
if file is not None:
return "#<procedure:%s>" % file
return "#<procedure>"
def get_arity(self):
return self._arity
def compute_arity(self):
if self._arity is not None:
return
arities = []
rest = -1
for l in self.lams:
n = l.get_arity()
if n < 0:
r = (-n - 1)
if rest >= 0:
rest = min(r, rest)
else:
rest = r
else:
arities = arities + [n]
self._arity = Arity(arities[:], rest)
def normalize(self, context):
lams = [Context.normalize_term(lam, expect=Lambda) for lam in self.lams]
result = CaseLambda(lams, recursive_sym=self.recursive_sym, arity=self._arity)
return context.plug(result)
class Lambda(SequencedBodyAST):
_immutable_fields_ = ["formals[*]", "rest", "args",
"frees", "enclosing_env_structure", "env_structure",
"sourceinfo"]
visitable = True
simple = True
ispure = True
import_from_mixin(BindingFormMixin)
def __init__ (self, formals, rest, args, frees, body, sourceinfo=None, enclosing_env_structure=None, env_structure=None):
SequencedBodyAST.__init__(self, body)
self.sourceinfo = sourceinfo
self.formals = formals
self.rest = rest
self.args = args
self.frees = frees
self.enclosing_env_structure = enclosing_env_structure
self.env_structure = env_structure
for b in self.body:
b.set_surrounding_lambda(self)
def init_arg_cell_flags(self, args_need_cell_flags):
if True in args_need_cell_flags:
self.args_need_cell_flags = args_need_cell_flags
def enable_jitting(self):
self.body[0].set_should_enter()
def can_enter(self):
return self.body[0].should_enter
# returns n for fixed arity, -(n+1) for arity-at-least n
# my kingdom for Either
def get_arity(self):
if self.rest:
return -(len(self.formals)+1)
else:
return len(self.formals)
def interpret_simple(self, env):
assert False # unreachable
def direct_children(self):
return self.body[:]
def set_surrounding_lambda(self, lam):
self.surrounding_lambda = lam
# don't recurse
def _mutated_vars(self, cache):
x = variable_set()
for b in self.body:
x.update(b.mutated_vars(cache))
for v in self.args.elems:
lv = LexicalVar(v)
if lv in x:
del x[lv]
return x
def _free_vars(self, cache):
return free_vars_lambda(self.body, self.args, cache)
@jit.unroll_safe
def _has_mutable_args(self):
if self.args_need_cell_flags is None:
return False
for flag in self.args_need_cell_flags:
if flag:
return True
return False
def _is_mutable_arg(self, i):
return self.args_need_cell_flags is not None and self.args_need_cell_flags[i]
@jit.unroll_safe
def match_args(self, args):
fmls_len = len(self.formals)
args_len = len(args)
if fmls_len != args_len and self.rest is None:
return None
if fmls_len > args_len:
return None
if self.rest is None:
if not self.binds_mutable_var():
return args
numargs = fmls_len
else:
numargs = fmls_len + 1
actuals = [None] * numargs
for i in range(fmls_len):
actuals[i] = self.wrap_value(args[i], i)
if self.rest is None:
return actuals
rest = values.to_list(args, start=fmls_len)
actuals[-1] = self.wrap_value(rest, -1)
return actuals
def raise_nice_error(self, args):
fmls_len = len(self.formals)
args_len = len(args)
if fmls_len != args_len and not self.rest:
raise SchemeException(
"wrong number of arguments to %s, expected %s but got %s" % (
self.tostring(), fmls_len,args_len))
if fmls_len > args_len:
raise SchemeException(
"wrong number of arguments to %s, expected at least %s but got %s" % (
self.tostring(), fmls_len,args_len))
@jit.unroll_safe
def collect_frees(self, recursive_sym, env, closure):
for s in self.frees.elems:
assert isinstance(s, values.W_Symbol)
vals = [None] * len(self.frees.elems)
for j, v in enumerate(self.frees.elems):
if v is recursive_sym:
vals[j] = closure
else:
vals[j] = env.lookup(v, self.enclosing_env_structure)
return vals
@jit.unroll_safe
def collect_frees_without_recursive(self, recursive_sym, env):
num_vals = len(self.frees.elems)
if recursive_sym is not None and self.frees.contains_sym(recursive_sym):
num_vals -= 1
vals = [None] * num_vals
i = 0
for v in self.frees.elems:
if v is not recursive_sym:
vals[i] = env.lookup(v, self.enclosing_env_structure)
i += 1
return vals
def normalize(self, context):
body = [Context.normalize_term(b) for b in self.body]
result = Lambda(self.formals, self.rest, self.args, self.frees, body,
sourceinfo=self.sourceinfo,
enclosing_env_structure=self.enclosing_env_structure,
env_structure=self.env_structure)
return context.plug(result)
def _tostring(self):
if self.rest and not self.formals:
return "(lambda %s %s)" % (self.rest.tostring(), [b.tostring() for b in self.body])
if self.rest:
fmls = " ".join([v.variable_name() for v in self.formals])
return "(lambda (%s . %s) %s)" % (fmls, self.rest.tostring(), [b.tostring() for b in self.body])
else:
return "(lambda (%s) %s)" % (
" ".join([v.variable_name() for v in self.formals]),
self.body[0].tostring() if len(self.body) == 1 else
" ".join([b.tostring() for b in self.body]))
def to_sexp(self):
lam_sym = values.W_Symbol.make("lambda")
if self.rest and not self.formals:
args_sexp = self.rest.to_sexp()
elif self.rest:
args_sexp = self.rest.to_sexp()
for f in reversed(self.formals):
args_sexp = values.W_Cons.make(f.to_sexp(), args_sexp)
else:
args_sexp = values.to_list(self.formals)
body_ls = [b.to_sexp() for b in self.body]
return values.to_list([lam_sym, args_sexp] + body_ls)
def write(self, port, env):
from pycket.prims.input_output import write_loop
port.write("(lambda")
port.write(" ")
if self.rest and not self.formals:
write_loop(self.rest, port, env)
port.write(" ")
elif self.rest:
port.write("(")
for f in self.formals:
write_loop(f, port, env)
port.write(" ")
port.write(".")
port.write(" ")
write_loop(self.rest, port, env)
port.write(")")
else:
port.write("(")
for f in self.formals:
write_loop(f, port, env)
port.write(" ")
port.write(")")
for b in self.body:
port.write(" ")
b.write(port, env)
port.write(")")
class CombinedAstAndIndex(AST):
_immutable_fields_ = ["ast", "index"]
def __init__(self, ast, index):
self.ast = ast
self.index = index
self.combinations = None
@specialize.arg(1)
def unpack(self, cls):
jit.promote(self)
ast = self.ast
assert isinstance(ast, cls)
return ast, self.index
@jit.elidable
def combine(self, other):
key = (self, other)
if self.combinations is None:
self.combinations = {}
result = self.combinations.get(key, None)
if result is None:
result = CombinedAstAndAst(self, other)
self.combinations[key] = result
return result
def _tostring(self):
return "<%s of %s>" % (self.index, self.ast.tostring())
class CombinedAstAndAst(AST):
_immutable_fields_ = ["ast1", "ast2"]
def __init__(self, ast1, ast2):
self.ast1 = ast1
self.ast2 = ast2
def unpack(self):
jit.promote(self)
ast1 = self.ast1
ast2 = self.ast2
return ast1, ast2
class Letrec(SequencedBodyAST):
_immutable_fields_ = ["args", "rhss[*]", "counts[*]", "total_counts[*]"]
visitable = True
def __init__(self, args, counts, rhss, body):
assert len(counts) > 0 # otherwise just use a begin
assert isinstance(args, SymList)
SequencedBodyAST.__init__(self, body, counts_needed=len(rhss))
self.counts = counts
total_counts = [0] * len(counts)
total_count = 0
for i, count in enumerate(counts):
total_counts[i] = total_count
total_count += count
self.total_counts = total_counts
self.rhss = rhss
self.args = args
@jit.unroll_safe
def interpret(self, env, cont):
n_elems = len(self.args.elems)
env_new = ConsEnv.make_n(n_elems, env)
if n_elems:
assert isinstance(env_new, ConsEnv)
for i in range(n_elems):
env_new._set_list(i, values.W_Cell(None))
return self.rhss[0], env_new, LetrecCont(self.counting_asts[0], env_new, cont)
def direct_children(self):
return self.rhss + self.body
def _mutated_vars(self, cache):
x = variable_set()
for b in self.body + self.rhss:
x.update(b.mutated_vars(cache))
for v in self.args.elems:
lv = LexicalVar(v)
x[lv] = None
return x
def _free_vars(self, cache):
x = AST._free_vars(self, cache)
x = x.without_many(self.args.elems)
return x
def normalize(self, context):
# XXX could we do something smarter here?
args = self._rebuild_args()
rhss = [Context.normalize_term(rhs) for rhs in self.rhss]
body = [Context.normalize_term(b) for b in self.body]
result = make_letrec(args, rhss, body)
return context.plug(result)
def _rebuild_args(self):
start = 0
result = [None] * len(self.counts)
for i, c in enumerate(self.counts):
result[i] = self.args.elems[start:start+c]
start += c
return result
def _tostring(self):
varss = self._rebuild_args()
bindings = [None] * len(varss)
for i, vars in enumerate(varss):
lhs = "(%s)" % " ".join([v.variable_name() for v in vars])
rhs = self.rhss[i].tostring()
bindings[i] = "[%s %s]" % (lhs, rhs)
bindings = " ".join(bindings)
body = " ".join([b.tostring() for b in self.body])
return "(letrec (%s) %s)" % (bindings, body)
def to_sexp(self):
letrec_sym = values.W_Symbol.make("letrec-values")
all_bindings_ls = [None]*len(self.counts)
total = 0
for i, count in enumerate(self.counts):
binding_ls = [None]*count
for k in range(count):
binding_ls[k] = self.args.elems[total+k]
total += count
current_bindings_sexp = values.to_list(binding_ls)
current_rhs_sexp = self.rhss[i].to_sexp()
current_ids_ = values.W_Cons.make(current_rhs_sexp, values.w_null)
current_ids = values.W_Cons.make(current_bindings_sexp, current_ids_)
all_bindings_ls[i] = current_ids
all_bindings = values.to_list(all_bindings_ls)
body_ls = [b.to_sexp() for b in self.body]
return values.to_list([letrec_sym, all_bindings] + body_ls)
def write(self, port, env):
from pycket.prims.input_output import write_loop
port.write("(letrec-values (")
j = 0
for i, count in enumerate(self.counts):
port.write("(")
port.write("(")
for k in range(count):
if k > 0:
port.write(" ")
write_loop(self.args.elems[j], port, env)
j += 1
port.write(")")
port.write(" ")
self.rhss[i].write(port, env)
port.write(")")
port.write(")")
for b in self.body:
port.write(" ")
b.write(port, env)
port.write(")")
def _make_symlist_counts(varss):
counts = []
argsl = []
for vars in varss:
counts.append(len(vars))
argsl.extend(vars)
argsl = argsl[:] # copy to make fixed-size
return SymList(argsl), counts
def make_let(varss, rhss, body):
if not varss:
return Begin.make(body)
body = remove_pure_ops(body)
if len(body) != 1 or not isinstance(body[0], Let):
return _make_let_direct(varss, rhss, body)
body = body[0]
assert isinstance(body, Let)
for rhs in body.rhss:
frees = rhs.free_vars()
for vars in varss:
for var in vars:
if frees.haskey(var):
return _make_let_direct(varss, rhss, [body])
# At this point, we know the inner let does not
# reference vars in the outer let
varss = varss + body._rebuild_args()
rhss = rhss + body.rhss
body = body.body
return make_let(varss, rhss, body)
def make_let_singlevar(sym, rhs, body):
# Try to convert nested lets into a single let e.g.
# (let ([v1 e1]) (let ([v2 e2]) e3)) => (let ([v1 e1] [v2 e2]) e3)
# This improves the performance of some of the AST anaylsis/transformation
# passes and flattens the environment, reducing allocation and pointer hopping.
if len(body) == 1:
b = body[0]
if isinstance(b, Let):
for r in b.rhss:
if r.free_vars().haskey(sym):
break
else:
varss = [[sym]] + b._rebuild_args()
rhss = [rhs] + b.rhss
body = b.body
return make_let(varss, rhss, body)
body = remove_pure_ops(body)
return Let(SymList([sym]), [1], [rhs], body)
def _make_let_direct(varss, rhss, body):
symlist, counts = _make_symlist_counts(varss)
if len(body) == 1:
b = body[0]
if isinstance(b, Begin):
body = b.body
return Let(symlist, counts, rhss, body)
def make_letrec(varss, rhss, body):
if not varss:
return Begin.make(body)
if len(varss) == 1 and len(varss[0]) == 1:
rhs = rhss[0]
sym = varss[0][0]
if isinstance(rhs, CaseLambda) and LexicalVar(sym) not in rhs.mutated_vars():
reclambda = rhs.make_recursive_copy(sym)
return make_let_singlevar(sym, reclambda, body)
# Convert letrec binding no values to a let since the interpreter optimizes
# them better
for vars in varss:
if vars:
break
else:
return make_let(varss, rhss, body)
symlist, counts = _make_symlist_counts(varss)
return Letrec(symlist, counts, rhss, body)
class Let(SequencedBodyAST):
_immutable_fields_ = ["rhss[*]", "args", "counts[*]", "env_speculation_works?", "remove_num_envs[*]"]
visitable = True
import_from_mixin(BindingFormMixin)
def __init__(self, args, counts, rhss, body, remove_num_envs=None):
SequencedBodyAST.__init__(self, body, counts_needed=len(rhss))
assert len(counts) > 0 # otherwise just use a begin
assert isinstance(args, SymList)
self.counts = counts
self.rhss = rhss
self.args = args
self.env_speculation_works = True
if remove_num_envs is None:
remove_num_envs = [0] * (len(rhss) + 1)
self.remove_num_envs = remove_num_envs
@jit.unroll_safe
def _prune_env(self, env, i):
env_structure = self.args.prev
if i:
# that many were pruned already:
already_pruned = self.remove_num_envs[i - 1]
for j in range(already_pruned):
env_structure = env_structure.prev
else:
already_pruned = 0
self._check_environment_consistency(env, env_structure)
for i in range(self.remove_num_envs[i] - already_pruned):
env = env.get_prev(env_structure)
env_structure = env_structure.prev
return env
@objectmodel.always_inline
def interpret(self, env, cont):
env = self._prune_env(env, 0)
return self.rhss[0], env, LetCont.make(
None, self, 0, env, cont)
def direct_children(self):
return self.rhss + self.body
def _mutated_vars(self, cache):
x = variable_set()
for b in self.body:
x.update(b.mutated_vars(cache))
for v in self.args.elems:
lv = LexicalVar(v)
if lv in x:
del x[lv]
for b in self.rhss:
x.update(b.mutated_vars(cache))
return x
def _free_vars(self, cache):
x = SymbolSet.EMPTY()
for b in self.body:
x = x.union(b.free_vars(cache))
x = x.without_many(self.args.elems)
for b in self.rhss:
x = x.union(b.free_vars(cache))
return x
def normalize(self, context):
args = self._rebuild_args()
body = Begin.make(self.body)
context = Context.Let(args, self.rhss, body, context)
return self.rhss[0], context
def _rebuild_args(self):
start = 0
result = [None] * len(self.counts)
for i, c in enumerate(self.counts):
result[i] = self.args.elems[start:start+c]
start += c
return result
def _tostring(self):
result = ["(let ("]
j = 0
for i, count in enumerate(self.counts):
result.append("[")
if count > 1:
result.append("(")
for k in range(count):
if k > 0:
result.append(" ")
result.append(self.args.elems[j].variable_name())
j += 1
if count > 1:
result.append(")")
result.append(" ")
result.append(self.rhss[i].tostring())
result.append("]")
result.append(") ")
result.append(" ".join([b.tostring() for b in self.body]))
result.append(")")
return "".join(result)
def to_sexp(self):
let_sym = values.W_Symbol.make("let-values")
all_bindings_ls = [None]*len(self.counts)
total = 0
for i, count in enumerate(self.counts):
binding_ls = [None]*count
for k in range(count):
binding_ls[k] = self.args.elems[total+k]
total += count
current_bindings_sexp = values.to_list(binding_ls)
current_rhs_sexp = self.rhss[i].to_sexp()
current_ids_ = values.W_Cons.make(current_rhs_sexp, values.w_null)
current_ids = values.W_Cons.make(current_bindings_sexp, current_ids_)
all_bindings_ls[i] = current_ids
all_bindings = values.to_list(all_bindings_ls)
body_ls = [b.to_sexp() for b in self.body]
return values.to_list([let_sym, all_bindings] + body_ls)
def write(self, port, env):
from pycket.prims.input_output import write_loop
port.write("(let-values (")
j = 0
for i, count in enumerate(self.counts):
port.write("(")
port.write("(")
for k in range(count):
if k > 0:
port.write(" ")
write_loop(self.args.elems[j], port, env)
j += 1
port.write(")")
port.write(" ")
self.rhss[i].write(port, env)
port.write(")")
port.write(")")
for b in self.body:
port.write(" ")
b.write(port, env)
port.write(")")
class DefineValues(AST):
_immutable_fields_ = ["names", "rhs", "display_names"]
visitable = True
def __init__(self, ns, r, display_names):
self.names = ns
self.rhs = r
self.display_names = display_names
def defined_vars(self, defs):
for n in self.names:
defs[n] = None
def interpret(self, env, cont):
return self.rhs.interpret(env, cont)
def direct_children(self):
return [self.rhs]
def normalize(self, context):
rhs = Context.normalize_term(self.rhs)
result = DefineValues(self.names, rhs, self.display_names)
return context.plug(result)
def _tostring(self):
return "(define-values (%s) %s)" % (
' '.join([n.tostring() for n in self.display_names]), self.rhs.tostring())
def to_sexp(self):
dv_sym = values.W_Symbol.make("define-values")
ids = values.w_null
for name in reversed(self.display_names):
ids = values.W_Cons.make(name, ids)
rhs = self.rhs.to_sexp()
rhs_sexp = values.W_Cons.make(rhs, values.w_null)
return values.W_Cons.make(dv_sym, values.W_Cons.make(ids, rhs_sexp))
def write(self, port, env):
from pycket.prims.input_output import write_loop
port.write("(define-values (")
for n in self.names:
port.write(" ")
write_loop(n, port, env)
port.write(") ")
self.rhs.write(port, env)
port.write(")")
def get_printable_location_two_state(green_ast, came_from):
if green_ast is None:
return 'Green_Ast is None'
surrounding = green_ast.surrounding_lambda
if green_ast.should_enter:
return green_ast.tostring() + ' from ' + came_from.tostring()
return green_ast.tostring()
driver_two_state = jit.JitDriver(reds=["env", "cont"],
greens=["ast", "came_from"],
get_printable_location=get_printable_location_two_state,
should_unroll_one_iteration=lambda *args : True,
is_recursive=True)
def inner_interpret_two_state(ast, env, cont):
came_from = ast
config = env.pycketconfig()
while True:
driver_two_state.jit_merge_point(ast=ast, came_from=came_from, env=env, cont=cont)
came_from = ast if isinstance(ast, App) else came_from
t = type(ast)
# Manual conditionals to force specialization in translation
# This (or a slight variant) is known as "The Trick" in the partial evaluation literature
# (see Jones, Gomard, Sestof 1993)
if t is Let:
ast, env, cont = ast.interpret(env, cont)
elif t is If:
ast, env, cont = ast.interpret(env, cont)
elif t is Begin:
ast, env, cont = ast.interpret(env, cont)
else:
ast, env, cont = ast.interpret(env, cont)
if ast.should_enter:
driver_two_state.can_enter_jit(ast=ast, came_from=came_from, env=env, cont=cont)
def get_printable_location_one_state(green_ast ):
if green_ast is None:
return 'Green_Ast is None'
return green_ast.tostring()
driver_one_state = jit.JitDriver(reds=["env", "cont"],
greens=["ast"],
get_printable_location=get_printable_location_one_state,
should_unroll_one_iteration=lambda *args : True,
is_recursive=True)
def inner_interpret_one_state(ast, env, cont):
while True:
driver_one_state.jit_merge_point(ast=ast, env=env, cont=cont)
ast, env, cont = ast.interpret(env, cont)
if ast.should_enter:
driver_one_state.can_enter_jit(ast=ast, env=env, cont=cont)
def interpret_one(ast, env=None, cont=None):
if env is None:
env = ToplevelEnv()
if env.pycketconfig().two_state:
inner_interpret = inner_interpret_two_state
else:
inner_interpret = inner_interpret_one_state
from pycket.env import w_global_config
w_global_config.set_error_exit(None)
if cont is None:
cont = NilCont()
if cont.marks is None:
cont.update_cm(values.parameterization_key, values_parameter.top_level_config)
if env.toplevel_env().get_commandline_arguments():
cell = current_cmd_args_param.get_cell(cont)
cell.set(vector.W_Vector.fromelements(env.get_commandline_arguments()))
try:
inner_interpret(ast, env, cont)
except Done, e:
if w_global_config.is_error_triggered():
from pycket.error import ExitException
raise ExitException(e.values)
return e.values
except SchemeException, e:
if e.context_ast is None:
e.context_ast = ast
raise
def interpret_toplevel(a, env):
if isinstance(a, Begin):
x = None
for a2 in a.body:
x = interpret_toplevel(a2, env)
return x
elif isinstance(a, DefineValues):
assert 0 # FIXME
env.toplevel_env().toplevel_set(a.name, interpret_one(a.rhs, env))
return values.Values.make([values.w_void])
else:
return interpret_one(a, env)
def interpret_module(m, env):
env = env if env else ToplevelEnv()
m.interpret_mod(env)
return m
def interpret(asts):
env = ToplevelEnv()
x = None
for a in asts:
x = interpret_toplevel(a, env)
return x
|
{
"content_hash": "b533cf0d23a3b84fa40295aecf52ebb9",
"timestamp": "",
"source": "github",
"line_count": 2613,
"max_line_length": 133,
"avg_line_length": 32.4902411021814,
"alnum_prop": 0.5708800075385467,
"repo_name": "samth/pycket",
"id": "d306e334865f7541de3c78d3699b7ab3c430f608",
"size": "84947",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pycket/interpreter.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "654"
},
{
"name": "Eagle",
"bytes": "1986"
},
{
"name": "KiCad",
"bytes": "76411"
},
{
"name": "Makefile",
"bytes": "2680"
},
{
"name": "Python",
"bytes": "1050245"
},
{
"name": "Racket",
"bytes": "694687"
},
{
"name": "Scheme",
"bytes": "215"
},
{
"name": "Shell",
"bytes": "8656"
}
],
"symlink_target": ""
}
|
import json
from json import JSONDecodeError
import sys
import argparse
import gzip
import re
parser = argparse.ArgumentParser(description='Match multiple patterns against a JSON field')
parser.add_argument('--field', help='Field to grep', default='url')
parser.add_argument('file', help='File to look in')
parser.add_argument('patterns', nargs=argparse.REMAINDER, help='Patterns to match')
args = parser.parse_args()
lineno = 0
with gzip.open(args.file, 'rt') as fp:
for line in fp:
try:
o = json.loads(line)
lineno += 1
success = False
if args.field in o:
fval = o[args.field]
for p in args.patterns:
if p in fval:
success = True
break
if success:
print(json.dumps(o))
except JSONDecodeError as err:
print('{0}: JSON parse error: {1}'.format(lineno, err), file=sys.stderr)
except KeyError as err:
print('{0}: Missing field: {1}'.format(lineno, err), file=sys.stderr)
|
{
"content_hash": "d5482698a939fd4e859d5e5ad29aa52a",
"timestamp": "",
"source": "github",
"line_count": 35,
"max_line_length": 92,
"avg_line_length": 31.714285714285715,
"alnum_prop": 0.5783783783783784,
"repo_name": "isoboroff/crawl-eval",
"id": "b2d04de55c294978f06d17e9f6d6d20b32bb5dcf",
"size": "1136",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "scripts/fgrep-json.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Java",
"bytes": "147264"
},
{
"name": "Python",
"bytes": "8887"
},
{
"name": "R",
"bytes": "3019"
}
],
"symlink_target": ""
}
|
import logging
import traceback
import types
import canary.context
from canary.utils import DTable
import dtuple
class Concept (canary.context.Cacheable, DTable):
# FIXME: Resolve conflict between Exp/Out/Spec as "concepts" and this
# 'Concept'; namely that Concept.uid == Exp/Out/Spec.concept_id.
# Can wait until refactoring.
CACHE_KEY = 'concept'
def __init__ (self, context=None, uid=-1, load_synonyms=False):
try:
if self.term:
return
except AttributeError:
pass
self.uid = uid
self.study_id = -1
self.concept_source_id = -1
self.concept_source_code = ''
self.term = ''
self.sources = []
self.synonyms = []
def load (self, context, load_synonyms=True):
if self.uid == -1:
return
# Is it already loaded? Convenience check for client calls
# don't need to verify loads from the cache.
if context.config.use_cache:
try:
if self.term:
# Already loaded
return
except AttributeError:
# Note already loaded, so continue
pass
cursor = context.get_cursor()
cursor.execute("""
SELECT umls_concepts.preferred_name,
umls_concepts_sources.umls_source_id,
umls_concepts_sources.umls_source_code
FROM umls_concepts, umls_concepts_sources
WHERE umls_concepts_sources.umls_concept_id = umls_concepts.umls_concept_id
AND umls_concepts.umls_concept_id = %s
""", self.uid)
fields = [d[0] for d in cursor.description]
desc = dtuple.TupleDescriptor([[f] for f in fields])
rows = cursor.fetchall()
for row in rows:
row = dtuple.DatabaseTuple(desc, row)
self.term = row['preferred_name']
self.sources.append((row['umls_source_id'], row['umls_source_code']))
self.concept_source_id = row['umls_source_id']
self.concept_source_code = row['umls_source_code']
if load_synonyms:
# NOTE: Is there any value in using umls_term_id? It's ignored here.
cursor.execute("""
SELECT term
FROM umls_terms
WHERE umls_concept_id = %s
""", self.uid)
fields = [d[0] for d in cursor.description]
desc = dtuple.TupleDescriptor([[f] for f in fields])
rows = cursor.fetchall()
for row in rows:
row = dtuple.DatabaseTuple(desc, row)
synonym = row['term']
if not synonym in self.synonyms:
self.synonyms.append(synonym)
def save (self, context, update_all=False):
# NOTE: For now, do not allow creation of arbitrary concepts
if self.uid == -1:
return
cursor = context.get_cursor()
# NOTE: For now, only allow update of preferred_name
cursor.execute("""
UPDATE umls_concepts
SET preferred_name = %s
WHERE umls_concept_id = %s
""", (self.term, self.uid))
if context.config.use_cache:
context.cache_set('%s:%s' % (self.CACHE_KEY, self.uid), self)
def add_synonym (self, context, term):
cursor = context.get_cursor()
# If a synonym does not yet exist, add it here, starting at id 20,000,000
# (5,000,000+ and 10,000,000+ are already in use from ITIS faux-merge)
if not term in self.synonyms:
cursor.execute("""
SELECT MAX(umls_term_id) AS max_id
FROM umls_terms
""")
row = cursor.fetchone()
current_max = row[0]
if current_max < 20000000:
new_max = 20000001
else:
new_max = current_max + 1
cursor.execute("""
INSERT INTO umls_terms
(umls_term_id, term, umls_concept_id)
VALUES (%s, %s, %s)
""", (new_max, term, self.uid))
def find_concepts (context, search_term):
cursor = context.get_cursor()
concepts = {}
if isinstance(search_term, types.IntType):
cursor.execute("""
SELECT umls_terms.umls_concept_id, term, preferred_name, umls_source_id
FROM umls_terms, umls_concepts, umls_concepts_sources
WHERE umls_concepts.umls_concept_id = %s
AND umls_concepts.umls_concept_id = umls_terms.umls_concept_id
AND umls_concepts_sources.umls_concept_id = umls_concepts.umls_concept_id
GROUP BY umls_concepts.umls_concept_id
ORDER BY term, preferred_name
""", search_term)
else:
# Assumes search_term is text
if search_term \
and len(search_term) > 0:
query_term = search_term.strip().replace(' ', '% ') + '%'
cursor.execute("""
SELECT umls_terms.umls_concept_id, term, preferred_name, umls_source_id
FROM umls_terms, umls_concepts, umls_concepts_sources
WHERE term LIKE %s
AND umls_concepts.umls_concept_id = umls_terms.umls_concept_id
AND umls_concepts_sources.umls_concept_id = umls_concepts.umls_concept_id
GROUP BY umls_concepts.umls_concept_id
ORDER BY term, preferred_name
""", query_term)
fields = [d[0] for d in cursor.description]
desc = dtuple.TupleDescriptor([[f] for f in fields])
rows = cursor.fetchall()
for row in rows:
row = dtuple.DatabaseTuple(desc, row)
if not concepts.has_key((row['umls_concept_id'], row['umls_source_id'])):
concept = Concept(uid=row['umls_concept_id'])
concept.concept_source_id = row['umls_source_id']
concept.term = row['preferred_name']
concept.synonyms.append(row['term'])
concepts[(concept.uid, concept.concept_source_id)] = concept
else:
concept = concepts[(row['umls_concept_id'], row['umls_source_id'])]
if not row['term'] in concept.synonyms:
concept.synonyms.append(row['term'])
concepts[(concept.uid, concept.concept_source_id)] = concept
if not isinstance(search_term, types.IntType):
# Try to bump up coarse "relevance" of exact matches
concepts_ranked = concepts.values()
for concept in concepts_ranked:
if concept.term.lower() == search_term.lower()\
or search_term.lower() in [syn.lower() for syn in concept.synonyms]:
concepts_ranked.remove(concept)
concepts_ranked.insert(0, concept)
return concepts_ranked
return concepts.values()
class Category (DTable):
concept_types = [
'exposure',
'risk_factor',
'outcome',
'species',
'location',
]
def __init__ (self, uid=-1, name=''):
self.uid = uid
self.name = name
self.types = []
self.groups = []
self.concepts = []
self.logger = logging.getLogger(str(self.__class__))
def add_type (self, type):
if type in self.concept_types \
and not type in self.types:
self.types.append(type)
def clear_types (self):
self.types = []
def set_types (self, types):
self.clear_types()
if types.__class__ == ''.__class__:
type_dict = dict(zip([t[0:1] for t in self.concept_types],
self.concept_types))
for type in types:
concept_type = type_dict.get(type, None)
if concept_type:
self.add_type(concept_type)
elif types.__class__ == [].__class__:
for type in types:
if type in self.concept_types:
self.add_type(type)
def get_types (self, shorthand=False):
if shorthand:
sh = ''.join([type[0:1] for type in self.types])
return sh
else:
return self.types
def add_group (self, group):
if group.__class__ == ''.__class__:
group = CategoryGroup(name=group, category_id=self.uid)
if not group.name in [g.name for g in self.groups]:
self.groups.append(group)
def clear_groups (self):
self.groups = []
def set_groups (self, groups):
self.clear_groups()
for group in groups:
self.add_group(group)
def get_groups (self):
return self.groups
def add_concept (self, concept):
if not concept.concept_id in [c.concept_id for c in self.concepts] \
and not concept.uid in [c.uid for c in self.concepts]:
self.concepts.append(concept)
def remove_concept (self, context, concept):
cursor = context.get_cursor()
for c in self.concepts:
if concept.uid == c.uid:
self.concepts.remove(c)
try:
cursor.execute("""
DELETE FROM category_concepts
WHERE uid = %s
""", concept.uid)
except Exception, e:
self.logger.error('Could not remove concept %s (%s)', concept.uid, e)
def update_concept (self, concept):
self.remove(concept)
self.add_concept(concept)
def get_concepts (self):
return self.concepts
def load (self, context, load_concepts=False):
if self.uid == -1:
return
cursor = context.get_cursor()
cursor.execute("""
SELECT *
FROM categories
WHERE uid = %s
""", self.uid)
fields = [d[0] for d in cursor.description]
desc = dtuple.TupleDescriptor([[f] for f in fields])
row = cursor.fetchone()
row = dtuple.DatabaseTuple(desc, row)
self.name = row['name']
self.set_types(row['concept_types'])
cursor.execute("""
SELECT *
FROM category_groups
WHERE category_id = %s
""", self.uid)
fields = [d[0] for d in cursor.description]
desc = dtuple.TupleDescriptor([[f] for f in fields])
rows = cursor.fetchall()
for row in rows:
row = dtuple.DatabaseTuple(desc, row)
group = CategoryGroup(uid=row['uid'], category_id=self.uid,
name=row['name'])
self.add_group(group)
if load_concepts:
cursor.execute("""
SELECT *
FROM category_concepts
WHERE category_id = %s
""", self.uid)
fields = [d[0] for d in cursor.description]
desc = dtuple.TupleDescriptor([[f] for f in fields])
rows = cursor.fetchall()
for row in rows:
row = dtuple.DatabaseTuple(desc, row)
cat_concept = CategoryConcept(uid=row['uid'],
category_id=self.uid,
concept_id=row['concept_id'])
cat_concept.is_broad = row['is_broad']
cat_concept.is_default = row['is_default']
cat_concept.load(context)
self.add_concept(cat_concept)
def save (self, context):
cursor = context.get_cursor()
if self.uid == -1:
cursor.execute("""
INSERT INTO categories
(uid, name, concept_types)
VALUES
(NULL, %s, %s)
""", (self.name, self.get_types(shorthand=True)))
cursor.execute("""
SELECT LAST_INSERT_ID() AS new_uid
""")
row = cursor.fetchone()
self.uid = row[0]
for group in self.groups:
group.category_id = self.uid
group.save(context)
else:
cursor.execute("""
UPDATE categories
SET name = %s, concept_types = %s
WHERE uid = %s
""", (self.name, self.get_types(shorthand=True), self.uid))
def load_categories (context):
cursor = context.get_cursor()
categories = []
cursor.execute("""
SELECT uid
FROM categories
ORDER BY name
""")
rows = cursor.fetchall()
for row in rows:
category = Category(uid=row[0])
category.load(context)
categories.append(category)
return categories
class CategoryGroup (DTable):
def __init__ (self, uid=-1, category_id=-1, name=''):
self.uid = uid
self.category_id = category_id
self.name = name
def save (self, context):
cursor = context.get_cursor()
if self.uid == -1:
cursor.execute("""
INSERT INTO category_groups
(uid, category_id, name)
VALUES
(NULL, %s, %s)
""", (self.category_id, self.name))
else:
cursor.execute("""
UPDATE category_groups
SET name = %s
WHERE uid = %s
""", (self.name, self.uid))
class CategoryConcept (DTable):
def __init__ (self, uid=-1, category_id=-1, concept_id=-1, term=''):
self.uid = uid
self.category_id = category_id
self.concept_id = concept_id
self.is_broad = False
self.is_default = False
self.term = term
self.groups = []
self.concept = None
self.logger = logging.getLogger(str(self.__class__))
def load (self, context):
cursor = context.get_cursor()
if self.uid == -1:
if not self.concept_id == -1 \
and not self.category_id == -1:
cursor.execute("""
SELECT *
FROM category_concepts
WHERE category_id = %s
AND concept_id = %s
""", (self.category_id, self.concept_id))
fields = [d[0] for d in cursor.description]
desc = dtuple.TupleDescriptor([[f] for f in fields])
row = cursor.fetchone()
if row:
row = dtuple.DatabaseTuple(desc, row)
self.uid = row['uid']
self.is_broad = row['is_broad']
self.is_default = row['is_default']
else:
self.logger.debug('No matched rows')
return
else:
self.logger.debug('Not enough info')
return
else:
cursor.execute("""
SELECT *
FROM category_concepts
WHERE uid = %s
""", self.uid)
fields = [d[0] for d in cursor.description]
desc = dtuple.TupleDescriptor([[f] for f in fields])
row = cursor.fetchone()
if row:
row = dtuple.DatabaseTuple(desc, row)
self.concept_id = row['concept_id']
self.is_broad = row['is_broad']
self.is_default = row['is_default']
cursor.execute("""
SELECT *
FROM category_concept_groups
WHERE category_concept_id = %s
""", self.uid)
fields = [d[0] for d in cursor.description]
desc = dtuple.TupleDescriptor([[f] for f in fields])
rows = cursor.fetchall()
for row in rows:
row = dtuple.DatabaseTuple(desc, row)
self.groups.append(row['category_group_id'])
self.concept = Concept(context, self.concept_id)
def save (self, context):
cursor = context.get_cursor()
if self.uid == -1:
cursor.execute("""
INSERT INTO category_concepts
(uid, category_id, concept_id,
is_default, is_broad)
VALUES
(NULL, %s, %s,
%s, %s)
""", (self.category_id, self.concept_id,
int(self.is_default), int(self.is_broad)))
cursor.execute("""
SELECT LAST_INSERT_ID() AS new_uid
""")
row = cursor.fetchone()
self.uid = row[0]
else:
cursor.execute("""
UPDATE category_concepts
SET is_default = %s,
is_broad = %s
WHERE uid = %s
""", (int(self.is_default), int(self.is_broad), self.uid))
|
{
"content_hash": "16778eef7970d556ffb87f222be78591",
"timestamp": "",
"source": "github",
"line_count": 495,
"max_line_length": 89,
"avg_line_length": 34.60606060606061,
"alnum_prop": 0.5042615294804437,
"repo_name": "dchud/sentinel",
"id": "6a70334737e4ff9282190bc3bb408bd647d8830c",
"size": "17138",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "canary/concept.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "151729"
},
{
"name": "Python",
"bytes": "345235"
},
{
"name": "Shell",
"bytes": "775"
}
],
"symlink_target": ""
}
|
from __future__ import with_statement
import os
import plistlib
import subprocess
import xml.dom.minidom
k_iTunesApp = '/Applications/iTunes.app'
k_iTunesDir = os.path.abspath('./iTunes')
def GetVersion():
'''Get the current iTunes version'''
infoPlist = os.path.join(k_iTunesApp, 'Contents', 'Info.plist')
return plistlib.readPlist(infoPlist)['CFBundleVersion']
def SaveSdef(output):
'''Save the iTunes sdef xml to disk'''
proc = subprocess.Popen('sdef %s' % k_iTunesApp, shell=True,
stdout=subprocess.PIPE, stderr=subprocess.PIPE
)
stdout, stderr = proc.communicate()
if stderr:
raise RuntimeError(stderr)
dom = xml.dom.minidom.parseString(stdout.strip())
with open(output, 'w') as file:
file.write(dom.toprettyxml(encoding='utf-8'))
def SaveHeader(output):
'''Save the iTunes header file to disk'''
proc = subprocess.Popen('sdef %s | sdp -fh --basename iTunes -o "%s"' % (k_iTunesApp, output),
shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE
)
stdout, stderr = proc.communicate()
if stderr:
raise RuntimeError(stderr)
if __name__ == '__main__':
version = GetVersion()
basename = 'iTunes_%s' % version
SaveHeader(os.path.join(k_iTunesDir, '%s.h' % basename))
SaveSdef(os.path.join(k_iTunesDir, '%s_aete0.sdef' % basename))
|
{
"content_hash": "c60d60e61babc0374170a0dd84ce2300",
"timestamp": "",
"source": "github",
"line_count": 41,
"max_line_length": 99,
"avg_line_length": 33.853658536585364,
"alnum_prop": 0.6534582132564841,
"repo_name": "kgn/EyeTunes",
"id": "ef69117e0bb110a03fe43877ff7095630da1854c",
"size": "1460",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "iTunes.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "11779"
},
{
"name": "Objective-C",
"bytes": "161481"
},
{
"name": "Python",
"bytes": "1460"
}
],
"symlink_target": ""
}
|
"""A helper class for inferring Distribution shape."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import contextlib
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops.distributions import util as distribution_util
class _DistributionShape(object):
"""Manage and manipulate `Distribution` shape.
#### Terminology
Recall that a `Tensor` has:
- `shape`: size of `Tensor` dimensions,
- `ndims`: size of `shape`; number of `Tensor` dimensions,
- `dims`: indexes into `shape`; useful for transpose, reduce.
`Tensor`s sampled from a `Distribution` can be partitioned by `sample_dims`,
`batch_dims`, and `event_dims`. To understand the semantics of these
dimensions, consider when two of the three are fixed and the remaining
is varied:
- `sample_dims`: indexes independent draws from identical
parameterizations of the `Distribution`.
- `batch_dims`: indexes independent draws from non-identical
parameterizations of the `Distribution`.
- `event_dims`: indexes event coordinates from one sample.
The `sample`, `batch`, and `event` dimensions constitute the entirety of a
`Distribution` `Tensor`'s shape.
The dimensions are always in `sample`, `batch`, `event` order.
#### Purpose
This class partitions `Tensor` notions of `shape`, `ndims`, and `dims` into
`Distribution` notions of `sample,` `batch,` and `event` dimensions. That
is, it computes any of:
```
sample_shape batch_shape event_shape
sample_dims batch_dims event_dims
sample_ndims batch_ndims event_ndims
```
for a given `Tensor`, e.g., the result of
`Distribution.sample(sample_shape=...)`.
For a given `Tensor`, this class computes the above table using minimal
information: `batch_ndims` and `event_ndims`.
#### Examples
We show examples of distribution shape semantics.
- Sample dimensions:
Computing summary statistics, i.e., the average is a reduction over sample
dimensions.
```python
sample_dims = [0]
tf.reduce_mean(Normal(loc=1.3, scale=1.).sample_n(1000),
axis=sample_dims) # ~= 1.3
```
- Batch dimensions:
Monte Carlo estimation of a marginal probability:
Average over batch dimensions where batch dimensions are associated with
random draws from a prior.
E.g., suppose we want to find the Monte Carlo estimate of the marginal
distribution of a `Normal` with a random `Laplace` location:
```
P(X=x) = integral P(X=x|y) P(Y=y) dy
~= 1/n sum_{i=1}^n P(X=x|y_i), y_i ~iid Laplace(0,1)
= tf.reduce_mean(Normal(loc=Laplace(0., 1.).sample_n(n=1000),
scale=tf.ones(1000)).prob(x),
axis=batch_dims)
```
The `Laplace` distribution generates a `Tensor` of shape `[1000]`. When
fed to a `Normal`, this is interpreted as 1000 different locations, i.e.,
1000 non-identical Normals. Therefore a single call to `prob(x)` yields
1000 probabilities, one for every location. The average over this batch
yields the marginal.
- Event dimensions:
Computing the determinant of the Jacobian of a function of a random
variable involves a reduction over event dimensions.
E.g., Jacobian of the transform `Y = g(X) = exp(X)`:
```python
tf.div(1., tf.reduce_prod(x, event_dims))
```
We show examples using this class.
Write `S, B, E` for `sample_shape`, `batch_shape`, and `event_shape`.
```python
# 150 iid samples from one multivariate Normal with two degrees of freedom.
mu = [0., 0]
sigma = [[1., 0],
[0, 1]]
mvn = MultivariateNormal(mu, sigma)
rand_mvn = mvn.sample(sample_shape=[3, 50])
shaper = DistributionShape(batch_ndims=0, event_ndims=1)
S, B, E = shaper.get_shape(rand_mvn)
# S = [3, 50]
# B = []
# E = [2]
# 12 iid samples from one Wishart with 2x2 events.
sigma = [[1., 0],
[2, 1]]
wishart = Wishart(df=5, scale=sigma)
rand_wishart = wishart.sample(sample_shape=[3, 4])
shaper = DistributionShape(batch_ndims=0, event_ndims=2)
S, B, E = shaper.get_shape(rand_wishart)
# S = [3, 4]
# B = []
# E = [2, 2]
# 100 iid samples from two, non-identical trivariate Normal distributions.
mu = ... # shape(2, 3)
sigma = ... # shape(2, 3, 3)
X = MultivariateNormal(mu, sigma).sample(shape=[4, 25])
# S = [4, 25]
# B = [2]
# E = [3]
```
#### Argument Validation
When `validate_args=False`, checks that cannot be done during
graph construction are performed at graph execution. This may result in a
performance degradation because data must be switched from GPU to CPU.
For example, when `validate_args=False` and `event_ndims` is a
non-constant `Tensor`, it is checked to be a non-negative integer at graph
execution. (Same for `batch_ndims`). Constant `Tensor`s and non-`Tensor`
arguments are always checked for correctness since this can be done for
"free," i.e., during graph construction.
"""
def __init__(self,
batch_ndims=None,
event_ndims=None,
validate_args=False,
name="DistributionShape"):
"""Construct `DistributionShape` with fixed `batch_ndims`, `event_ndims`.
`batch_ndims` and `event_ndims` are fixed throughout the lifetime of a
`Distribution`. They may only be known at graph execution.
If both `batch_ndims` and `event_ndims` are python scalars (rather than
either being a `Tensor`), functions in this class automatically perform
sanity checks during graph construction.
Args:
batch_ndims: `Tensor`. Number of `dims` (`rank`) of the batch portion of
indexes of a `Tensor`. A "batch" is a non-identical distribution, i.e,
Normal with different parameters.
event_ndims: `Tensor`. Number of `dims` (`rank`) of the event portion of
indexes of a `Tensor`. An "event" is what is sampled from a
distribution, i.e., a trivariate Normal has an event shape of [3] and a
4 dimensional Wishart has an event shape of [4, 4].
validate_args: Python `bool`, default `False`. When `True`,
non-`tf.constant` `Tensor` arguments are checked for correctness.
(`tf.constant` arguments are always checked.)
name: Python `str`. The name prepended to Ops created by this class.
Raises:
ValueError: if either `batch_ndims` or `event_ndims` are: `None`,
negative, not `int32`.
"""
if batch_ndims is None: raise ValueError("batch_ndims cannot be None")
if event_ndims is None: raise ValueError("event_ndims cannot be None")
self._batch_ndims = batch_ndims
self._event_ndims = event_ndims
self._validate_args = validate_args
with ops.name_scope(name):
self._name = name
with ops.name_scope("init"):
self._batch_ndims = self._assert_non_negative_int32_scalar(
ops.convert_to_tensor(
batch_ndims, name="batch_ndims"))
self._batch_ndims_static, self._batch_ndims_is_0 = (
self._introspect_ndims(self._batch_ndims))
self._event_ndims = self._assert_non_negative_int32_scalar(
ops.convert_to_tensor(
event_ndims, name="event_ndims"))
self._event_ndims_static, self._event_ndims_is_0 = (
self._introspect_ndims(self._event_ndims))
@property
def name(self):
"""Name given to ops created by this class."""
return self._name
@property
def batch_ndims(self):
"""Returns number of dimensions corresponding to non-identical draws."""
return self._batch_ndims
@property
def event_ndims(self):
"""Returns number of dimensions needed to index a sample's coordinates."""
return self._event_ndims
@property
def validate_args(self):
"""Returns True if graph-runtime `Tensor` checks are enabled."""
return self._validate_args
def get_ndims(self, x, name="get_ndims"):
"""Get `Tensor` number of dimensions (rank).
Args:
x: `Tensor`.
name: Python `str`. The name to give this op.
Returns:
ndims: Scalar number of dimensions associated with a `Tensor`.
"""
with self._name_scope(name, values=[x]):
x = ops.convert_to_tensor(x, name="x")
ndims = x.get_shape().ndims
if ndims is None:
return array_ops.rank(x, name="ndims")
return ops.convert_to_tensor(ndims, dtype=dtypes.int32, name="ndims")
def get_sample_ndims(self, x, name="get_sample_ndims"):
"""Returns number of dimensions corresponding to iid draws ("sample").
Args:
x: `Tensor`.
name: Python `str`. The name to give this op.
Returns:
sample_ndims: `Tensor` (0D, `int32`).
Raises:
ValueError: if `sample_ndims` is calculated to be negative.
"""
with self._name_scope(name, values=[x]):
ndims = self.get_ndims(x, name=name)
if self._is_all_constant_helper(ndims, self.batch_ndims,
self.event_ndims):
ndims = tensor_util.constant_value(ndims)
sample_ndims = (ndims - self._batch_ndims_static -
self._event_ndims_static)
if sample_ndims < 0:
raise ValueError(
"expected batch_ndims(%d) + event_ndims(%d) <= ndims(%d)" %
(self._batch_ndims_static, self._event_ndims_static, ndims))
return ops.convert_to_tensor(sample_ndims, name="sample_ndims")
else:
with ops.name_scope(name="sample_ndims"):
sample_ndims = ndims - self.batch_ndims - self.event_ndims
if self.validate_args:
sample_ndims = control_flow_ops.with_dependencies(
[check_ops.assert_non_negative(sample_ndims)], sample_ndims)
return sample_ndims
def get_dims(self, x, name="get_dims"):
"""Returns dimensions indexing `sample_shape`, `batch_shape`, `event_shape`.
Example:
```python
x = ... # Tensor with shape [4, 3, 2, 1]
sample_dims, batch_dims, event_dims = _DistributionShape(
batch_ndims=2, event_ndims=1).get_dims(x)
# sample_dims == [0]
# batch_dims == [1, 2]
# event_dims == [3]
# Note that these are not the shape parts, but rather indexes into shape.
```
Args:
x: `Tensor`.
name: Python `str`. The name to give this op.
Returns:
sample_dims: `Tensor` (1D, `int32`).
batch_dims: `Tensor` (1D, `int32`).
event_dims: `Tensor` (1D, `int32`).
"""
with self._name_scope(name, values=[x]):
def make_dims(start_sum, size, name):
"""Closure to make dims range."""
start_sum = start_sum if start_sum else [
array_ops.zeros([], dtype=dtypes.int32, name="zero")]
if self._is_all_constant_helper(size, *start_sum):
start = sum(tensor_util.constant_value(s) for s in start_sum)
stop = start + tensor_util.constant_value(size)
return ops.convert_to_tensor(
list(range(start, stop)), dtype=dtypes.int32, name=name)
else:
start = sum(start_sum)
return math_ops.range(start, start + size)
sample_ndims = self.get_sample_ndims(x, name=name)
return (make_dims([], sample_ndims, name="sample_dims"),
make_dims([sample_ndims], self.batch_ndims, name="batch_dims"),
make_dims([sample_ndims, self.batch_ndims],
self.event_ndims, name="event_dims"))
def get_shape(self, x, name="get_shape"):
"""Returns `Tensor`'s shape partitioned into `sample`, `batch`, `event`.
Args:
x: `Tensor`.
name: Python `str`. The name to give this op.
Returns:
sample_shape: `Tensor` (1D, `int32`).
batch_shape: `Tensor` (1D, `int32`).
event_shape: `Tensor` (1D, `int32`).
"""
with self._name_scope(name, values=[x]):
x = ops.convert_to_tensor(x, name="x")
def slice_shape(start_sum, size, name):
"""Closure to slice out shape."""
start_sum = start_sum if start_sum else [
array_ops.zeros([], dtype=dtypes.int32, name="zero")]
if (x.get_shape().ndims is not None and
self._is_all_constant_helper(size, *start_sum)):
start = sum(tensor_util.constant_value(s) for s in start_sum)
stop = start + tensor_util.constant_value(size)
slice_ = x.get_shape()[start:stop].as_list()
if all(s is not None for s in slice_):
return ops.convert_to_tensor(slice_, dtype=dtypes.int32, name=name)
return array_ops.slice(array_ops.shape(x), [sum(start_sum)], [size])
sample_ndims = self.get_sample_ndims(x, name=name)
return (slice_shape([], sample_ndims,
name="sample_shape"),
slice_shape([sample_ndims], self.batch_ndims,
name="batch_shape"),
slice_shape([sample_ndims, self.batch_ndims], self.event_ndims,
name="event_shape"))
# TODO(jvdillon): Make remove expand_batch_dim and make expand_batch_dim=False
# the default behavior.
def make_batch_of_event_sample_matrices(
self, x, expand_batch_dim=True,
name="make_batch_of_event_sample_matrices"):
"""Reshapes/transposes `Distribution` `Tensor` from S+B+E to B_+E_+S_.
Where:
- `B_ = B if B or not expand_batch_dim else [1]`,
- `E_ = E if E else [1]`,
- `S_ = [tf.reduce_prod(S)]`.
Args:
x: `Tensor`.
expand_batch_dim: Python `bool`. If `True` the batch dims will be expanded
such that `batch_ndims >= 1`.
name: Python `str`. The name to give this op.
Returns:
x: `Tensor`. Input transposed/reshaped to `B_+E_+S_`.
sample_shape: `Tensor` (1D, `int32`).
"""
with self._name_scope(name, values=[x]):
x = ops.convert_to_tensor(x, name="x")
# x.shape: S+B+E
sample_shape, batch_shape, event_shape = self.get_shape(x)
event_shape = distribution_util.pick_vector(
self._event_ndims_is_0, [1], event_shape)
if expand_batch_dim:
batch_shape = distribution_util.pick_vector(
self._batch_ndims_is_0, [1], batch_shape)
new_shape = array_ops.concat([[-1], batch_shape, event_shape], 0)
x = array_ops.reshape(x, shape=new_shape)
# x.shape: [prod(S)]+B_+E_
x = distribution_util.rotate_transpose(x, shift=-1)
# x.shape: B_+E_+[prod(S)]
return x, sample_shape
# TODO(jvdillon): Make remove expand_batch_dim and make expand_batch_dim=False
# the default behavior.
def undo_make_batch_of_event_sample_matrices(
self, x, sample_shape, expand_batch_dim=True,
name="undo_make_batch_of_event_sample_matrices"):
"""Reshapes/transposes `Distribution` `Tensor` from B_+E_+S_ to S+B+E.
Where:
- `B_ = B if B or not expand_batch_dim else [1]`,
- `E_ = E if E else [1]`,
- `S_ = [tf.reduce_prod(S)]`.
This function "reverses" `make_batch_of_event_sample_matrices`.
Args:
x: `Tensor` of shape `B_+E_+S_`.
sample_shape: `Tensor` (1D, `int32`).
expand_batch_dim: Python `bool`. If `True` the batch dims will be expanded
such that `batch_ndims>=1`.
name: Python `str`. The name to give this op.
Returns:
x: `Tensor`. Input transposed/reshaped to `S+B+E`.
"""
with self._name_scope(name, values=[x, sample_shape]):
x = ops.convert_to_tensor(x, name="x")
# x.shape: _B+_E+[prod(S)]
sample_shape = ops.convert_to_tensor(sample_shape, name="sample_shape")
x = distribution_util.rotate_transpose(x, shift=1)
# x.shape: [prod(S)]+_B+_E
if self._is_all_constant_helper(self.batch_ndims, self.event_ndims):
if self._batch_ndims_is_0 or self._event_ndims_is_0:
squeeze_dims = []
if self._event_ndims_is_0:
squeeze_dims += [-1]
if self._batch_ndims_is_0 and expand_batch_dim:
squeeze_dims += [1]
if squeeze_dims:
x = array_ops.squeeze(x, axis=squeeze_dims)
# x.shape: [prod(S)]+B+E
_, batch_shape, event_shape = self.get_shape(x)
else:
s = (x.get_shape().as_list() if x.get_shape().is_fully_defined()
else array_ops.shape(x))
batch_shape = s[1:1+self.batch_ndims]
# Since sample_dims=1 and is left-most, we add 1 to the number of
# batch_ndims to get the event start dim.
event_start = array_ops.where(
math_ops.logical_and(expand_batch_dim, self._batch_ndims_is_0),
2, 1 + self.batch_ndims)
event_shape = s[event_start:event_start+self.event_ndims]
new_shape = array_ops.concat([sample_shape, batch_shape, event_shape], 0)
x = array_ops.reshape(x, shape=new_shape)
# x.shape: S+B+E
return x
@contextlib.contextmanager
def _name_scope(self, name=None, values=None):
"""Helper function to standardize op scope."""
with ops.name_scope(self.name):
with ops.name_scope(name, values=(
(values or []) + [self.batch_ndims, self.event_ndims])) as scope:
yield scope
def _is_all_constant_helper(self, *args):
"""Helper which returns True if all inputs are constant_value."""
return all(tensor_util.constant_value(x) is not None for x in args)
def _assert_non_negative_int32_scalar(self, x):
"""Helper which ensures that input is a non-negative, int32, scalar."""
x = ops.convert_to_tensor(x, name="x")
if x.dtype.base_dtype != dtypes.int32.base_dtype:
raise TypeError("%s.dtype=%s is not %s" % (x.name, x.dtype, dtypes.int32))
x_value_static = tensor_util.constant_value(x)
if x.get_shape().ndims is not None and x_value_static is not None:
if x.get_shape().ndims != 0:
raise ValueError("%s.ndims=%d is not 0 (scalar)" %
(x.name, x.get_shape().ndims))
if x_value_static < 0:
raise ValueError("%s.value=%d cannot be negative" %
(x.name, x_value_static))
return x
if self.validate_args:
x = control_flow_ops.with_dependencies([
check_ops.assert_rank(x, 0),
check_ops.assert_non_negative(x)], x)
return x
def _introspect_ndims(self, ndims):
"""Helper to establish some properties of input ndims args."""
if self._is_all_constant_helper(ndims):
return (tensor_util.constant_value(ndims),
tensor_util.constant_value(ndims) == 0)
return None, math_ops.equal(ndims, 0)
|
{
"content_hash": "0b54de6125943f02e5f4e9e27e2897d0",
"timestamp": "",
"source": "github",
"line_count": 483,
"max_line_length": 80,
"avg_line_length": 39.25879917184265,
"alnum_prop": 0.6183946841050522,
"repo_name": "nburn42/tensorflow",
"id": "6a7f28713acefd2285b07a212e2e47a6db1ae5e1",
"size": "19651",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "tensorflow/contrib/distributions/python/ops/shape.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "9274"
},
{
"name": "C",
"bytes": "341132"
},
{
"name": "C++",
"bytes": "39824558"
},
{
"name": "CMake",
"bytes": "194702"
},
{
"name": "Go",
"bytes": "1046987"
},
{
"name": "HTML",
"bytes": "4680032"
},
{
"name": "Java",
"bytes": "590137"
},
{
"name": "Jupyter Notebook",
"bytes": "1940883"
},
{
"name": "LLVM",
"bytes": "6536"
},
{
"name": "Makefile",
"bytes": "48231"
},
{
"name": "Objective-C",
"bytes": "12456"
},
{
"name": "Objective-C++",
"bytes": "94385"
},
{
"name": "PHP",
"bytes": "2140"
},
{
"name": "Perl",
"bytes": "6179"
},
{
"name": "Perl 6",
"bytes": "1357"
},
{
"name": "PureBasic",
"bytes": "25356"
},
{
"name": "Python",
"bytes": "33704964"
},
{
"name": "Ruby",
"bytes": "533"
},
{
"name": "Shell",
"bytes": "426212"
}
],
"symlink_target": ""
}
|
import sys, os
# If your extensions are in another directory, add it here. If the directory
# is relative to the documentation root, use os.path.abspath to make it
# absolute, like shown here.
#sys.path.append(os.path.abspath('some/directory'))
# General configuration
# ---------------------
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
#extensions = []
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General substitutions.
project = 'PyEphem'
copyright = '2008, Brandon Craig Rhodes'
# The default replacements for |version| and |release|, also used in various
# other places throughout the built documents.
#
# The short X.Y version.
#version = '...'
# The full version, including alpha/beta/rc tags.
#release = '...'
# Read version and release from ephem/__init__.py itself.
path = os.path.join(os.path.dirname(__file__), '..', '__init__.py')
for line in open(path):
if line.startswith('__version__'):
version = eval(line.split(None, 2)[2]) # skip '__version__', '='
release = version
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
today_fmt = '%B %d, %Y'
# List of documents that shouldn't be included in the build.
#unused_docs = []
# List of directories, relative to source directories, that shouldn't be searched
# for source files.
#exclude_dirs = []
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# Options for HTML output
# -----------------------
# The style sheet to use for HTML and HTML Help pages. A file of that name
# must exist either in Sphinx' static/ path, or in one of the custom paths
# given in html_static_path.
#html_style = 'default.css'
html_style = 'pyephem.css'
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
html_title = "PyEphem home page"
# The name of an image file (within the static path) to place at the top of
# the sidebar.
#html_logo = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
html_use_modindex = False
# If true, the reST sources are included in the HTML build as _sources/<name>.
html_copy_source = False
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = ''
# Output file base name for HTML help builder.
htmlhelp_basename = 'PyEphemdoc'
# Options for LaTeX output
# ------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, document class [howto/manual]).
latex_documents = [
('index', 'PyEphem.tex', 'PyEphem Documentation', 'Brandon Craig Rhodes', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_use_modindex = True
|
{
"content_hash": "03402dc96a6bd4c662892a0fe039725f",
"timestamp": "",
"source": "github",
"line_count": 159,
"max_line_length": 86,
"avg_line_length": 31.232704402515722,
"alnum_prop": 0.7078131292790979,
"repo_name": "bennettscience/PySky",
"id": "1337fc76f97be30631f89c6f3d1854adc7785075",
"size": "5465",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pyephem-3.7.5.1/src/ephem/doc/conf.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "1350"
}
],
"symlink_target": ""
}
|
from bs4 import BeautifulSoup
import requests
from processor.processor import TagProcessor
class PageAnalytic(object):
headers={"User-Agent": "Mozilla/5.0 (X11; U; Linux i686) Gecko/20071127 Firefox/2.0.0.11",
"Accept-Encoding": "gzip"}
tagsClass = TagProcessor.__subclasses__()
def __init__(self, url):
self.base_url = url
response = requests.get(url, headers=self.headers)
self.html_size = int(response.headers["content-length"])
self.page_object = BeautifulSoup(response.content)
self._tags = []
def _get_tag_size(self, tag):
for subCls in self.tagsClass:
if subCls.is_resource(tag):
self._tags.append(subCls(tag, base_url=self.base_url))
return True
else:
return False
def get_size_tag(self):
size = 0
self.page_object.find_all(self._get_tag_size)
for tag in self._tags:
#threading?
resp = requests.head(tag.get_resource_url(), headers=self.headers)
while resp.status_code == 301 or resp.status_code == 302:
resp = requests.head(resp.headers["location"])
if resp.status_code == 200:
size += int(resp.headers["content-length"])
self._tags = []
return size
def get_page_size(self):
return self.html_size + self.get_size_tag()
|
{
"content_hash": "4613fb1fbc64d8702ecee96fd8889838",
"timestamp": "",
"source": "github",
"line_count": 40,
"max_line_length": 94,
"avg_line_length": 35.275,
"alnum_prop": 0.5910701630049611,
"repo_name": "pitomba/libra",
"id": "281dd6fa0a18b1939a0d05504e400fbae8b2cc9e",
"size": "1411",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "libra/processor/page_analytic.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "1545"
},
{
"name": "Python",
"bytes": "20998"
},
{
"name": "Ruby",
"bytes": "235"
}
],
"symlink_target": ""
}
|
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
import logutil_pb2 as logutil__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='vtworkerdata.proto',
package='vtworkerdata',
syntax='proto3',
serialized_pb=_b('\n\x12vtworkerdata.proto\x12\x0cvtworkerdata\x1a\rlogutil.proto\"-\n\x1d\x45xecuteVtworkerCommandRequest\x12\x0c\n\x04\x61rgs\x18\x01 \x03(\t\"?\n\x1e\x45xecuteVtworkerCommandResponse\x12\x1d\n\x05\x65vent\x18\x01 \x01(\x0b\x32\x0e.logutil.Eventb\x06proto3')
,
dependencies=[logutil__pb2.DESCRIPTOR,])
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
_EXECUTEVTWORKERCOMMANDREQUEST = _descriptor.Descriptor(
name='ExecuteVtworkerCommandRequest',
full_name='vtworkerdata.ExecuteVtworkerCommandRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='args', full_name='vtworkerdata.ExecuteVtworkerCommandRequest.args', index=0,
number=1, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=51,
serialized_end=96,
)
_EXECUTEVTWORKERCOMMANDRESPONSE = _descriptor.Descriptor(
name='ExecuteVtworkerCommandResponse',
full_name='vtworkerdata.ExecuteVtworkerCommandResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='event', full_name='vtworkerdata.ExecuteVtworkerCommandResponse.event', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=98,
serialized_end=161,
)
_EXECUTEVTWORKERCOMMANDRESPONSE.fields_by_name['event'].message_type = logutil__pb2._EVENT
DESCRIPTOR.message_types_by_name['ExecuteVtworkerCommandRequest'] = _EXECUTEVTWORKERCOMMANDREQUEST
DESCRIPTOR.message_types_by_name['ExecuteVtworkerCommandResponse'] = _EXECUTEVTWORKERCOMMANDRESPONSE
ExecuteVtworkerCommandRequest = _reflection.GeneratedProtocolMessageType('ExecuteVtworkerCommandRequest', (_message.Message,), dict(
DESCRIPTOR = _EXECUTEVTWORKERCOMMANDREQUEST,
__module__ = 'vtworkerdata_pb2'
# @@protoc_insertion_point(class_scope:vtworkerdata.ExecuteVtworkerCommandRequest)
))
_sym_db.RegisterMessage(ExecuteVtworkerCommandRequest)
ExecuteVtworkerCommandResponse = _reflection.GeneratedProtocolMessageType('ExecuteVtworkerCommandResponse', (_message.Message,), dict(
DESCRIPTOR = _EXECUTEVTWORKERCOMMANDRESPONSE,
__module__ = 'vtworkerdata_pb2'
# @@protoc_insertion_point(class_scope:vtworkerdata.ExecuteVtworkerCommandResponse)
))
_sym_db.RegisterMessage(ExecuteVtworkerCommandResponse)
import abc
from grpc.early_adopter import implementations
from grpc.framework.alpha import utilities
# @@protoc_insertion_point(module_scope)
|
{
"content_hash": "aa99c3cb13102b7cd394f1a7f20f233e",
"timestamp": "",
"source": "github",
"line_count": 111,
"max_line_length": 278,
"avg_line_length": 33.207207207207205,
"alnum_prop": 0.7593597395550733,
"repo_name": "skyportsystems/vitess",
"id": "50eaf0f89755b0b2faa29e33c2480a66f18452bd",
"size": "3775",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "py/vtproto/vtworkerdata_pb2.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "40319"
},
{
"name": "CSS",
"bytes": "80182"
},
{
"name": "Go",
"bytes": "4581355"
},
{
"name": "HTML",
"bytes": "81084"
},
{
"name": "Java",
"bytes": "252526"
},
{
"name": "JavaScript",
"bytes": "66316"
},
{
"name": "Liquid",
"bytes": "18108"
},
{
"name": "Makefile",
"bytes": "7544"
},
{
"name": "PHP",
"bytes": "7167"
},
{
"name": "PLpgSQL",
"bytes": "10070"
},
{
"name": "Protocol Buffer",
"bytes": "62086"
},
{
"name": "Python",
"bytes": "955569"
},
{
"name": "Ruby",
"bytes": "465"
},
{
"name": "Shell",
"bytes": "24467"
},
{
"name": "Yacc",
"bytes": "19014"
}
],
"symlink_target": ""
}
|
"""VTA TOPI Utils."""
def is_packed_layout(layout):
"""Check if layout is packed layout"""
if layout == "NCHW":
return False
if "n" in layout and "c" in layout:
return True
return False
|
{
"content_hash": "eb7811d000b28e761bb60d3f28672824",
"timestamp": "",
"source": "github",
"line_count": 9,
"max_line_length": 42,
"avg_line_length": 24.333333333333332,
"alnum_prop": 0.589041095890411,
"repo_name": "Huyuwei/tvm",
"id": "0fbdb2f86a197def1799b128585eb74ff340daee",
"size": "1004",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "vta/python/vta/top/util.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ANTLR",
"bytes": "6056"
},
{
"name": "C",
"bytes": "95567"
},
{
"name": "C++",
"bytes": "5569606"
},
{
"name": "CMake",
"bytes": "67305"
},
{
"name": "Go",
"bytes": "112376"
},
{
"name": "HTML",
"bytes": "8625"
},
{
"name": "Java",
"bytes": "173219"
},
{
"name": "JavaScript",
"bytes": "49801"
},
{
"name": "Makefile",
"bytes": "50818"
},
{
"name": "Objective-C",
"bytes": "15264"
},
{
"name": "Objective-C++",
"bytes": "46673"
},
{
"name": "Python",
"bytes": "6775044"
},
{
"name": "Rust",
"bytes": "182027"
},
{
"name": "Scala",
"bytes": "184105"
},
{
"name": "Shell",
"bytes": "96633"
},
{
"name": "Tcl",
"bytes": "53645"
},
{
"name": "Verilog",
"bytes": "30605"
}
],
"symlink_target": ""
}
|
"""Extension for flake8 to test for certain __future__ imports"""
from __future__ import print_function
import optparse
import sys
from collections import namedtuple
from typing import Optional
try:
import argparse
except ImportError as e:
argparse = e
from ast import NodeVisitor, Str, Module, parse
__version__ = '0.4.7'
class FutureImportVisitor(NodeVisitor):
def __init__(self):
super(FutureImportVisitor, self).__init__()
self.future_imports = []
self._uses_code = False
def visit_ImportFrom(self, node):
if node.module == '__future__':
self.future_imports += [node]
def visit_Expr(self, node):
if not isinstance(node.value, Str) or node.value.col_offset != 0:
self._uses_code = True
def generic_visit(self, node):
if not isinstance(node, Module):
self._uses_code = True
super(FutureImportVisitor, self).generic_visit(node)
@property
def uses_code(self):
return self._uses_code or self.future_imports
class Flake8Argparse(object):
@classmethod
def add_options(cls, parser):
class Wrapper(object):
def add_argument(self, *args, **kwargs):
kwargs.setdefault('parse_from_config', True)
try:
parser.add_option(*args, **kwargs)
except (optparse.OptionError, TypeError):
use_config = kwargs.pop('parse_from_config')
option = parser.add_option(*args, **kwargs)
if use_config:
# flake8 2.X uses config_options to handle stuff like 'store_true'
parser.config_options.append(option.get_opt_string().lstrip('-'))
cls.add_arguments(Wrapper())
@classmethod
def add_arguments(cls, parser):
pass
Feature = namedtuple('Feature', 'index, name, optional, mandatory')
DIVISION = Feature(0, 'division', (2, 2, 0), (3, 0, 0))
ABSOLUTE_IMPORT = Feature(1, 'absolute_import', (2, 5, 0), (3, 0, 0))
WITH_STATEMENT = Feature(2, 'with_statement', (2, 5, 0), (2, 6, 0))
PRINT_FUNCTION = Feature(3, 'print_function', (2, 6, 0), (3, 0, 0))
UNICODE_LITERALS = Feature(4, 'unicode_literals', (2, 6, 0), (3, 0, 0))
GENERATOR_STOP = Feature(5, 'generator_stop', (3, 5, 0), (3, 7, 0))
NESTED_SCOPES = Feature(6, 'nested_scopes', (2, 1, 0), (2, 2, 0))
GENERATORS = Feature(7, 'generators', (2, 2, 0), (2, 3, 0))
ANNOTATIONS = Feature(8, 'annotations', (3, 7, 0), (4, 0, 0))
# Order important as it defines the error code
ALL_FEATURES = (DIVISION, ABSOLUTE_IMPORT, WITH_STATEMENT, PRINT_FUNCTION,
UNICODE_LITERALS, GENERATOR_STOP, NESTED_SCOPES, GENERATORS, ANNOTATIONS)
FEATURES = dict((feature.name, feature) for feature in ALL_FEATURES)
FEATURE_NAMES = frozenset(feature.name for feature in ALL_FEATURES)
# Make sure the features aren't messed up
assert len(FEATURES) == len(ALL_FEATURES)
assert all(feature.index == index for index, feature in enumerate(ALL_FEATURES))
class FutureImportChecker(Flake8Argparse):
version = __version__
name = 'flake8-future-import'
require_code = True
min_version = False
def __init__(self, tree, filename):
self.tree = tree
@classmethod
def add_arguments(cls, parser):
parser.add_argument('--require-code', action='store_true',
help='Do only apply to files which not only have '
'comments and (doc)strings')
parser.add_argument('--min-version', default=False,
help='The minimum version supported so that it can '
'ignore mandatory and non-existent features')
@classmethod
def parse_options(cls, options):
cls.require_code = options.require_code
min_version = options.min_version
if min_version is not False:
try:
min_version = tuple(int(num)
for num in min_version.split('.'))
except ValueError:
min_version = None
if min_version is None or len(min_version) > 3:
raise ValueError('Minimum version "{0}" not formatted '
'like "A.B.C"'.format(options.min_version))
# Ensure that min_version is a tuple of length 3
min_version += (0, ) * (max(3 - len(min_version), 0))
cls.min_version = min_version
def _generate_error(self, future_import: str, present: bool) -> Optional[str]:
"""Checks whether the import is an error and returns it.
:param future_import: The name of the future import (e.g. "annotations")
:param present: Whether the import is present
:return: An error message if the combination is one or None otherwise
"""
feature = FEATURES.get(future_import)
if feature is None:
code = 90
msg = 'does not exist'
else:
if (not present and self.min_version and
(feature.mandatory <= self.min_version or
feature.optional > self.min_version)):
return None
code = 10 + feature.index
if present:
msg = 'present'
code += 40
else:
msg = 'missing'
msg = 'FI{0} __future__ import "{1}" ' + msg
return msg.format(code, future_import)
def run(self):
visitor = FutureImportVisitor()
visitor.visit(self.tree)
if self.require_code and not visitor.uses_code:
return
present = set()
for import_node in visitor.future_imports:
for alias in import_node.names:
err = self._generate_error(alias.name, True)
if err:
yield import_node.lineno, 0, err, type(self)
present.add(alias.name)
for name in FEATURES:
if name not in present:
err = self._generate_error(name, False)
if err:
yield 1, 0, err, type(self)
def main(args):
if isinstance(argparse, ImportError):
print('argparse is required for the standalone version.')
return
parser = argparse.ArgumentParser()
choices = set(10 + feature.index for feature in FEATURES.values())
choices |= set(40 + choice for choice in choices) | set([90])
choices = set('FI{0}'.format(choice) for choice in choices)
parser.add_argument('--ignore', help='Ignore the given comma-separated '
'codes')
FutureImportChecker.add_arguments(parser)
parser.add_argument('files', nargs='+')
args = parser.parse_args(args)
FutureImportChecker.parse_options(args)
if args.ignore:
ignored = set(args.ignore.split(','))
unrecognized = ignored - choices
ignored &= choices
if unrecognized:
invalid = set()
for invalid_code in unrecognized:
no_valid = True
if not invalid:
for valid_code in choices:
if valid_code.startswith(invalid_code):
ignored.add(valid_code)
no_valid = False
if no_valid:
invalid.add(invalid_code)
if invalid:
raise ValueError('The code(s) is/are invalid: "{0}"'.format(
'", "'.join(invalid)))
else:
ignored = set()
has_errors = False
for filename in args.files:
with open(filename, 'rb') as f:
tree = parse(f.read(), filename=filename, mode='exec')
for line, char, msg, checker in FutureImportChecker(tree,
filename).run():
if msg[:4] not in ignored:
has_errors = True
print('{0}:{1}:{2}: {3}'.format(filename, line, char + 1, msg))
return has_errors
if __name__ == '__main__':
sys.exit(1 if main(sys.argv[1:]) else 0)
|
{
"content_hash": "979846a23fd796c16965a5a20eb90cdc",
"timestamp": "",
"source": "github",
"line_count": 219,
"max_line_length": 90,
"avg_line_length": 37.22374429223744,
"alnum_prop": 0.5652600588812562,
"repo_name": "xZise/flake8-future-import",
"id": "444b02c1203da731f37cef2898eea8d478c89213",
"size": "8194",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "flake8_future_import.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "25011"
}
],
"symlink_target": ""
}
|
import json
from django.forms import Media, widgets
from wagtail.utils.widgets import WidgetWithScript
class CustomRichTextArea(WidgetWithScript, widgets.Textarea):
def render_js_init(self, id_, name, value):
return "customEditorInitScript({0});".format(json.dumps(id_))
@property
def media(self):
return Media(js=["vendor/custom_editor.js"])
class LegacyRichTextArea(WidgetWithScript, widgets.Textarea):
def render_js_init(self, id_, name, value):
return "legacyEditorInitScript({0});".format(json.dumps(id_))
@property
def media(self):
return Media(js=["vendor/legacy_editor.js"])
|
{
"content_hash": "86ad72126d88100bee6d03c306b29372",
"timestamp": "",
"source": "github",
"line_count": 23,
"max_line_length": 69,
"avg_line_length": 28.130434782608695,
"alnum_prop": 0.7017001545595054,
"repo_name": "rsalmaso/wagtail",
"id": "141b836cf263ddc79d945eb8f25782814d2241de",
"size": "647",
"binary": false,
"copies": "4",
"ref": "refs/heads/main",
"path": "wagtail/test/testapp/rich_text.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "2522"
},
{
"name": "Dockerfile",
"bytes": "2041"
},
{
"name": "HTML",
"bytes": "593672"
},
{
"name": "JavaScript",
"bytes": "624463"
},
{
"name": "Makefile",
"bytes": "1413"
},
{
"name": "Python",
"bytes": "6598232"
},
{
"name": "SCSS",
"bytes": "221911"
},
{
"name": "Shell",
"bytes": "6845"
},
{
"name": "TypeScript",
"bytes": "296087"
}
],
"symlink_target": ""
}
|
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('flashcards', '0057_auto_20180721_0221'),
]
operations = [
migrations.AlterUniqueTogether(
name='deck',
unique_together=set(),
),
migrations.RunSQL(
'''
ALTER TABLE flashcards_deck
ADD CONSTRAINT flashcards_deck_unique_name_for_owner_if_active EXCLUDE (owner_id WITH =, name WITH =, synchronized_with_id WITH =) WHERE (active IS TRUE);
'''
),
]
|
{
"content_hash": "4a0f09308525b891c65da00ac9c0d791",
"timestamp": "",
"source": "github",
"line_count": 21,
"max_line_length": 170,
"avg_line_length": 27.19047619047619,
"alnum_prop": 0.5691768826619965,
"repo_name": "aehlke/manabi",
"id": "45e8abc1f43dbcde4d08e90c26d32cf90385ddfe",
"size": "620",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "manabi/apps/flashcards/migrations/0058_unique_only_required_for_active_decks.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "60000"
},
{
"name": "HTML",
"bytes": "287098"
},
{
"name": "JavaScript",
"bytes": "260813"
},
{
"name": "Jinja",
"bytes": "152668"
},
{
"name": "PowerShell",
"bytes": "935"
},
{
"name": "Python",
"bytes": "5129354"
},
{
"name": "Ruby",
"bytes": "5722"
},
{
"name": "SCSS",
"bytes": "25268"
},
{
"name": "Shell",
"bytes": "3041"
}
],
"symlink_target": ""
}
|
"""SCons.Tool.Perforce.py
Tool-specific initialization for Perforce Source Code Management system.
There normally shouldn't be any need to import this module directly.
It will usually be imported through the generic SCons.Tool.Tool()
selection method.
"""
# __COPYRIGHT__
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
__revision__ = "__FILE__ __REVISION__ __DATE__ __DEVELOPER__"
import os
import SCons.Action
import SCons.Builder
import SCons.Node.FS
import SCons.Util
# This function should maybe be moved to SCons.Util?
from SCons.Tool.PharLapCommon import addPathIfNotExists
# Variables that we want to import from the base OS environment.
_import_env = [ 'P4PORT', 'P4CLIENT', 'P4USER', 'USER', 'USERNAME', 'P4PASSWD',
'P4CHARSET', 'P4LANGUAGE', 'SystemRoot' ]
PerforceAction = SCons.Action.Action('$P4COM', '$P4COMSTR')
def generate(env):
"""Add a Builder factory function and construction variables for
Perforce to an Environment."""
def PerforceFactory(env=env):
""" """
import SCons.Warnings as W
W.warn(W.DeprecatedSourceCodeWarning, """The Perforce() factory is deprecated and there is no replacement.""")
return SCons.Builder.Builder(action = PerforceAction, env = env)
#setattr(env, 'Perforce', PerforceFactory)
env.Perforce = PerforceFactory
env['P4'] = 'p4'
env['P4FLAGS'] = SCons.Util.CLVar('')
env['P4COM'] = '$P4 $P4FLAGS sync $TARGET'
try:
environ = env['ENV']
except KeyError:
environ = {}
env['ENV'] = environ
# Perforce seems to use the PWD environment variable rather than
# calling getcwd() for itself, which is odd. If no PWD variable
# is present, p4 WILL call getcwd, but this seems to cause problems
# with good ol' Windows's tilde-mangling for long file names.
environ['PWD'] = env.Dir('#').get_abspath()
for var in _import_env:
v = os.environ.get(var)
if v:
environ[var] = v
if SCons.Util.can_read_reg:
# If we can read the registry, add the path to Perforce to our environment.
try:
k=SCons.Util.RegOpenKeyEx(SCons.Util.hkey_mod.HKEY_LOCAL_MACHINE,
'Software\\Perforce\\environment')
val, tok = SCons.Util.RegQueryValueEx(k, 'P4INSTROOT')
addPathIfNotExists(environ, 'PATH', val)
except SCons.Util.RegError:
# Can't detect where Perforce is, hope the user has it set in the
# PATH.
pass
def exists(env):
return env.Detect('p4')
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
|
{
"content_hash": "3494cb950119b35178c4d424bb1ff006",
"timestamp": "",
"source": "github",
"line_count": 103,
"max_line_length": 118,
"avg_line_length": 36.05825242718446,
"alnum_prop": 0.68578352180937,
"repo_name": "Distrotech/scons",
"id": "15dd83f3a80fae7a087c52f5c699ffd7ff9b11b1",
"size": "3714",
"binary": false,
"copies": "5",
"ref": "refs/heads/distrotech-scons",
"path": "bootstrap/src/engine/SCons/Tool/Perforce.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "259"
},
{
"name": "JavaScript",
"bytes": "17316"
},
{
"name": "Perl",
"bytes": "45214"
},
{
"name": "Python",
"bytes": "12517068"
},
{
"name": "Shell",
"bytes": "20589"
}
],
"symlink_target": ""
}
|
import numpy
import matplotlib
import matplotlib.pyplot as plt
errors = numpy.loadtxt('series0_1_d_errors.txt')
numberOfIntervals = numpy.loadtxt('series0_1_d_numbers.txt')
plt.loglog(numberOfIntervals, errors, '-*', label='$|I_n(f)-I(f)|$')
plt.title('Error plot for the midpoint rule')
plt.xlabel('$n$')
plt.ylabel('$|I(f)-I_n(f)|$')
plt.grid('on')
# Now compare against the rate n^(-2)
plt.hold('on')
plt.loglog(numberOfIntervals, numberOfIntervals**(-2), '--',label='$\mathcal{O}(n^{-2})$')
plt.legend()
plt.show()
|
{
"content_hash": "c9d60e147b177608ecfdbb14d4f112d9",
"timestamp": "",
"source": "github",
"line_count": 17,
"max_line_length": 90,
"avg_line_length": 30.647058823529413,
"alnum_prop": 0.6871401151631478,
"repo_name": "westernmagic/NumPDE",
"id": "93b897c78fe0a997e63f60ae448d203bd9f93f57",
"size": "521",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "series0/midpoint/plot_error_compare.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C++",
"bytes": "152120"
},
{
"name": "CMake",
"bytes": "12126"
},
{
"name": "GLSL",
"bytes": "1377"
},
{
"name": "Matlab",
"bytes": "3597"
},
{
"name": "Python",
"bytes": "13661"
},
{
"name": "R",
"bytes": "2897"
},
{
"name": "Shell",
"bytes": "1659"
}
],
"symlink_target": ""
}
|
import logging
from banal import ensure_list
from flask_babel import gettext
from werkzeug.exceptions import BadRequest
from aleph.index.indexes import entities_read_index
from aleph.index.collections import collections_index
from aleph.index.xref import xref_index, XREF_SOURCE
from aleph.index.notifications import notifications_index
from aleph.index.entities import ENTITY_SOURCE
from aleph.logic.matching import match_query
from aleph.logic.notifications import get_role_channels
from aleph.search.parser import QueryParser, SearchQueryParser # noqa
from aleph.search.result import QueryResult, DatabaseQueryResult # noqa
from aleph.search.query import Query
log = logging.getLogger(__name__)
class CollectionsQuery(Query):
TEXT_FIELDS = ["label^3", "text"]
SORT_DEFAULT = ["_score", {"label.kw": "asc"}]
SKIP_FILTERS = ["writeable"]
PREFIX_FIELD = "label"
SOURCE = {"excludes": ["text"]}
def get_filters(self, **kwargs):
filters = super(CollectionsQuery, self).get_filters(**kwargs)
if self.parser.getbool("filter:writeable"):
ids = self.parser.authz.collections(self.parser.authz.WRITE)
filters.append({"ids": {"values": ids}})
return filters
def get_index(self):
return collections_index()
class EntitiesQuery(Query):
TEXT_FIELDS = ["fingerprints.text^3", "text"]
PREFIX_FIELD = "fingerprints.text"
HIGHLIGHT_FIELD = "properties.*"
SKIP_FILTERS = ["schema", "schemata"]
SOURCE = ENTITY_SOURCE
SORT_DEFAULT = []
def get_index(self):
schemata = self.parser.getlist("filter:schema")
if len(schemata):
return entities_read_index(schema=schemata, expand=False)
schemata = self.parser.getlist("filter:schemata")
if not len(schemata):
raise BadRequest(gettext("No schema is specified for the query."))
return entities_read_index(schema=schemata)
class MatchQuery(EntitiesQuery):
"""Given an entity, find the most similar other entities."""
def __init__(self, parser, entity=None, exclude=None, collection_ids=None):
self.entity = entity
self.exclude = ensure_list(exclude)
self.collection_ids = collection_ids
super(MatchQuery, self).__init__(parser)
def get_index(self):
# Attempt to find only matches within the "matchable" set of
# entity schemata. For example, a Company and be matched to
# another company or a LegalEntity, but not a Person.
# Real estate is "unmatchable", i.e. even if two plots of land
# have almost the same name and criteria, it does not make
# sense to suggest they are the same.
schemata = list(self.entity.schema.matchable_schemata)
return entities_read_index(schema=schemata)
def get_query(self):
query = super(MatchQuery, self).get_query()
query = match_query(
self.entity, collection_ids=self.collection_ids, query=query
)
if len(self.exclude):
exclude = {"ids": {"values": self.exclude}}
query["bool"]["must_not"].append(exclude)
return query
class XrefQuery(Query):
TEXT_FIELDS = ["text"]
SORT_DEFAULT = [{"score": "desc"}]
SORT_FIELDS = {
"random": "random",
"doubt": "doubt",
"score": "_score",
}
AUTHZ_FIELD = "match_collection_id"
SCORE_CUTOFF = 0.5
SOURCE = XREF_SOURCE
def __init__(self, parser, collection_id=None):
self.collection_id = collection_id
parser.highlight = False
super(XrefQuery, self).__init__(parser)
def get_filters(self, **kwargs):
filters = super(XrefQuery, self).get_filters(**kwargs)
filters.append({"term": {"collection_id": self.collection_id}})
sorts = [f for (f, _) in self.parser.sorts]
if "random" not in sorts and "doubt" not in sorts:
filters.append({"range": {"score": {"gt": self.SCORE_CUTOFF}}})
return filters
def get_index(self):
return xref_index()
class NotificationsQuery(Query):
AUTHZ_FIELD = None
TEXT_FIELDS = ["text"]
SORT_DEFAULT = [{"created_at": {"order": "desc"}}]
def get_text_query(self):
return [{"match_all": {}}]
def get_filters(self, **kwargs):
channels = get_role_channels(self.parser.authz.role)
filters = super(NotificationsQuery, self).get_filters(**kwargs)
filters.append({"terms": {"channels": channels}})
return filters
def get_negative_filters(self):
return [{"term": {"actor_id": self.parser.authz.role.id}}]
def get_index(self):
return notifications_index()
class EntitySetItemsQuery(EntitiesQuery):
SKIP_FILTERS = []
def __init__(self, *args, **kwargs):
self.entityset = kwargs.pop("entityset")
super(EntitySetItemsQuery, self).__init__(*args, **kwargs)
def get_filters(self, **kwargs):
filters = super(EntitySetItemsQuery, self).get_filters(**kwargs)
filters.append({"ids": {"values": self.entityset.entities}})
return filters
def get_index(self):
return entities_read_index()
|
{
"content_hash": "5685f296b486e722d5aed0af151208d2",
"timestamp": "",
"source": "github",
"line_count": 149,
"max_line_length": 79,
"avg_line_length": 34.718120805369125,
"alnum_prop": 0.6456601585153683,
"repo_name": "alephdata/aleph",
"id": "c415213e59352d2ec3100213ec2585c04cbb3538",
"size": "5173",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "aleph/search/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "2610"
},
{
"name": "HTML",
"bytes": "4162"
},
{
"name": "JavaScript",
"bytes": "882037"
},
{
"name": "Makefile",
"bytes": "7861"
},
{
"name": "Mako",
"bytes": "412"
},
{
"name": "Python",
"bytes": "618821"
},
{
"name": "SCSS",
"bytes": "140491"
},
{
"name": "Shell",
"bytes": "3215"
},
{
"name": "TypeScript",
"bytes": "308454"
}
],
"symlink_target": ""
}
|
import tensorflow as tf
# LSTM Encoder-decoder Autoencoder model functions
def create_LSTM_stack(lstm_hidden_units, lstm_dropout_output_keep_probs):
"""Create LSTM stacked cells.
Given list of LSTM hidden units and list of LSTM dropout output keep
probabilities.
Args:
lstm_hidden_units: List of integers for the number of hidden units in each
layer.
lstm_dropout_output_keep_probs: List of floats for the dropout output keep
probabilities for each layer.
Returns:
MultiRNNCell object of stacked LSTM layers.
"""
# First create a list of LSTM cell objects using our list of lstm hidden
# unit sizes
lstm_cells = [tf.contrib.rnn.BasicLSTMCell(
num_units=units,
forget_bias=1.0,
state_is_tuple=True)
for units in lstm_hidden_units]
# Next apply a dropout wrapper to our stack of LSTM cells,
# in this case just on the outputs
dropout_lstm_cells = [tf.nn.rnn_cell.DropoutWrapper(
cell=lstm_cells[cell_index],
input_keep_prob=1.0,
output_keep_prob=lstm_dropout_output_keep_probs[cell_index],
state_keep_prob=1.0)
for cell_index in range(len(lstm_cells))]
# Create a stack of layers of LSTM cells
# Combines list into MultiRNNCell object
stacked_lstm_cells = tf.contrib.rnn.MultiRNNCell(
cells=dropout_lstm_cells,
state_is_tuple=True)
return stacked_lstm_cells
# The rnn_decoder function takes labels during TRAIN/EVAL
# and a start token followed by its previous predictions during PREDICT
# Starts with an initial state of the final encoder states
def rnn_decoder(dec_input, init_state, cell, infer, dnn_hidden_units, num_feat):
"""Decoder for RNN cell.
Given list of LSTM hidden units and list of LSTM dropout output keep
probabilities.
Args:
dec_input: List of tf.float64 current batch size by number of features
matrix tensors input to the decoder.
init_state: Initial state of the decoder cell. Final state from the
encoder cell.
cell: RNN Cell object.
infer: Boolean whether in inference mode or not.
dnn_hidden_units: Python list of integers of number of units per DNN layer.
num_feat: Python integer of the number of features.
Returns:
outputs: List of decoder outputs of length number of timesteps of tf.float64
current batch size by number of features matrix tensors.
state: Final cell state of the decoder.
"""
# Create the decoder variable scope
with tf.variable_scope("decoder"):
# Load in our initial state from our encoder
# Tuple of final encoder c_state and h_state of final encoder layer
state = init_state
# Create an empty list to store our hidden state output for every timestep
outputs = []
# Begin with no previous output
previous_output = None
# Loop over all of our dec_input which will be seq_len long
for index, decoder_input in enumerate(dec_input):
# If there has been a previous output, we will determine the next input
if previous_output is not None:
# Create the input layer to our DNN
# shape = (cur_batch_size, lstm_hidden_units[-1])
network = previous_output
# Create our dnn variable scope
with tf.variable_scope(name_or_scope="dnn", reuse=tf.AUTO_REUSE):
# Add hidden layers with the given number of units/neurons per layer
# shape = (cur_batch_size, dnn_hidden_units[i])
for units in dnn_hidden_units:
network = tf.layers.dense(
inputs=network,
units=units,
activation=tf.nn.relu)
# Connect final hidden layer to linear layer to get the logits
# shape = (cur_batch_size, num_feat)
logits = tf.layers.dense(
inputs=network,
units=num_feat,
activation=None)
# If we are in inference then we will overwrite our next decoder_input
# with the logits we just calculated. Otherwise, we leave the decoder
# input input as it was from the enumerated list. We have to calculate
# the logits even when not using them so that the correct DNN subgraph
# will be generated here and after the encoder-decoder for both
# training and inference
if infer:
# shape = (cur_batch_size, num_feat)
decoder_input = logits
# If this isn"t our first time through the loop, just reuse(share) the
# same variables for each iteration within the current variable scope
if index > 0:
tf.get_variable_scope().reuse_variables()
# Run the decoder input through the decoder stack picking up from the
# previous state
# output_shape = (cur_batch_size, lstm_hidden_units[-1])
# state_shape = # tuple of final decoder c_state and h_state
output, state = cell(decoder_input, state)
# Append the current decoder hidden state output to the outputs list
# List seq_len long of shape = (cur_batch_size, lstm_hidden_units[-1])
outputs.append(output)
# Set the previous output to the output just calculated
# shape = (cur_batch_size, lstm_hidden_units[-1])
previous_output = output
return outputs, state
def lstm_enc_dec_autoencoder_model(
X, mode, params, cur_batch_size, dummy_var):
"""LSTM autoencoder to reconstruct inputs and minimize reconstruction error.
Given data matrix tensor X, the current Estimator mode, the dictionary of
parameters, current batch size, and the number of features, process through
LSTM model encoder, decoder, and DNN subgraphs and return reconstructed inputs
as output.
Args:
X: tf.float64 matrix tensor of input data.
mode: Estimator ModeKeys. Can take values of TRAIN, EVAL, and PREDICT.
params: Dictionary of parameters.
cur_batch_size: Current batch size, could be partially filled.
dummy_var: Dummy variable used to allow training mode to happen since it
requires a gradient to tie back to the graph dependency.
Returns:
loss: Reconstruction loss.
train_op: Train operation so that Estimator can correctly add to dependency
graph.
X_time: 2D tensor representation of time major input data.
X_time_recon: 2D tensor representation of time major input data.
X_feat: 2D tensor representation of feature major input data.
X_feat_recon: 2D tensor representation of feature major input data.
"""
# Unstack 3-D features tensor into a sequence(list) of 2-D tensors
# shape = (cur_batch_size, num_feat)
X_sequence = tf.unstack(value=X, num=params["seq_len"], axis=1)
# Since this is an autoencoder, the features are the labels.
# It often works better though to have the labels in reverse order
# shape = (cur_batch_size, seq_len, num_feat)
if params["reverse_labels_sequence"]:
Y = tf.reverse_sequence(
input=X,
seq_lengths=tf.tile(
input=tf.constant(value=[params["seq_len"]], dtype=tf.int64),
multiples=tf.expand_dims(input=cur_batch_size, axis=0)),
seq_axis=1,
batch_axis=0)
else:
Y = X # shape = (cur_batch_size, seq_len, num_feat)
##############################################################################
# Create encoder of encoder-decoder LSTM stacks
# Create our decoder now
dec_stacked_lstm_cells = create_LSTM_stack(
params["dec_lstm_hidden_units"],
params["lstm_dropout_output_keep_probs"])
# Create the encoder variable scope
with tf.variable_scope("encoder"):
# Create separate encoder cells with their own weights separate from decoder
enc_stacked_lstm_cells = create_LSTM_stack(
params["enc_lstm_hidden_units"],
params["lstm_dropout_output_keep_probs"])
# Encode the input sequence using our encoder stack of LSTMs
# enc_outputs = seq_len long of shape = (cur_batch_size, enc_lstm_hidden_units[-1])
# enc_states = tuple of final encoder c_state and h_state for each layer
_, enc_states = tf.nn.static_rnn(
cell=enc_stacked_lstm_cells,
inputs=X_sequence,
initial_state=enc_stacked_lstm_cells.zero_state(
batch_size=tf.cast(x=cur_batch_size, dtype=tf.int32),
dtype=tf.float64),
dtype=tf.float64)
# We just pass on the final c and h states of the encoder"s last layer,
# so extract that and drop the others
# LSTMStateTuple shape = (cur_batch_size, lstm_hidden_units[-1])
enc_final_states = enc_states[-1]
# Extract the c and h states from the tuple
# both have shape = (cur_batch_size, lstm_hidden_units[-1])
enc_final_c, enc_final_h = enc_final_states
# In case the decoder"s first layer's number of units is different than
# encoder's last layer's number of units, use a dense layer to map to the
# correct shape
# shape = (cur_batch_size, dec_lstm_hidden_units[0])
enc_final_c_dense = tf.layers.dense(
inputs=enc_final_c,
units=params["dec_lstm_hidden_units"][0],
activation=None)
# shape = (cur_batch_size, dec_lstm_hidden_units[0])
enc_final_h_dense = tf.layers.dense(
inputs=enc_final_h,
units=params["dec_lstm_hidden_units"][0],
activation=None)
# The decoder"s first layer"s state comes from the encoder,
# the rest of the layers" initial states are zero
dec_init_states = tuple(
[tf.contrib.rnn.LSTMStateTuple(c=enc_final_c_dense,
h=enc_final_h_dense)] + \
[tf.contrib.rnn.LSTMStateTuple(
c=tf.zeros(shape=[cur_batch_size, units], dtype=tf.float64),
h=tf.zeros(shape=[cur_batch_size, units], dtype=tf.float64))
for units in params["dec_lstm_hidden_units"][1:]])
##############################################################################
# Create decoder of encoder-decoder LSTM stacks
# Train our decoder now
# Encoder-decoders work differently during training, evaluation, and inference
# so we will have two separate subgraphs for each
if (mode == tf.estimator.ModeKeys.TRAIN and
params["training_mode"] == "reconstruction"):
# Break 3-D labels tensor into a list of 2-D tensors
# shape = (cur_batch_size, num_feat)
unstacked_labels = tf.unstack(value=Y, num=params["seq_len"], axis=1)
# Call our decoder using the labels as our inputs, the encoder final state
# as our initial state, our other LSTM stack as our cells, and inference
# set to false
dec_outputs, _ = rnn_decoder(
dec_input=unstacked_labels,
init_state=dec_init_states,
cell=dec_stacked_lstm_cells,
infer=False,
dnn_hidden_units=params["dnn_hidden_units"],
num_feat=params["num_feat"])
else:
# Since this is inference create fake labels. The list length needs to be
# the output sequence length even though only the first element is the only
# one actually used (as our go signal)
fake_labels = [tf.zeros(shape=[cur_batch_size, params["num_feat"]],
dtype=tf.float64)
for _ in range(params["seq_len"])]
# Call our decoder using fake labels as our inputs, the encoder final state
# as our initial state, our other LSTM stack as our cells, and inference
# set to true
# dec_outputs = seq_len long of shape = (cur_batch_size, dec_lstm_hidden_units[-1])
# decoder_states = tuple of final decoder c_state and h_state for each layer
dec_outputs, _ = rnn_decoder(
dec_input=fake_labels,
init_state=dec_init_states,
cell=dec_stacked_lstm_cells,
infer=True,
dnn_hidden_units=params["dnn_hidden_units"],
num_feat=params["num_feat"])
# Stack together list of rank 2 decoder output tensors into one rank 3 tensor
# shape = (cur_batch_size, seq_len, lstm_hidden_units[-1])
stacked_dec_outputs = tf.stack(values=dec_outputs, axis=1)
# Reshape rank 3 decoder outputs into rank 2 by folding sequence length into
# batch size
# shape = (cur_batch_size * seq_len, lstm_hidden_units[-1])
reshaped_stacked_dec_outputs = tf.reshape(
tensor=stacked_dec_outputs,
shape=[cur_batch_size * params["seq_len"],
params["dec_lstm_hidden_units"][-1]])
##############################################################################
# Create the DNN structure now after the encoder-decoder LSTM stack
# Create the input layer to our DNN
# shape = (cur_batch_size * seq_len, lstm_hidden_units[-1])
network = reshaped_stacked_dec_outputs
# Reuse the same variable scope as we used within our decoder (for inference)
with tf.variable_scope(name_or_scope="dnn", reuse=tf.AUTO_REUSE):
# Add hidden layers with the given number of units/neurons per layer
for units in params["dnn_hidden_units"]:
# shape = (cur_batch_size * seq_len, dnn_hidden_units[i])
network = tf.layers.dense(
inputs=network,
units=units,
activation=tf.nn.relu)
# Connect the final hidden layer to a dense layer with no activation to
# get the logits
# shape = (cur_batch_size * seq_len, num_feat)
logits = tf.layers.dense(
inputs=network,
units=params["num_feat"],
activation=None)
# Now that we are through the final DNN for each sequence element for
# each example in the batch, reshape the predictions to match our labels.
# shape = (cur_batch_size, seq_len, num_feat)
predictions = tf.reshape(
tensor=logits,
shape=[cur_batch_size, params["seq_len"], params["num_feat"]])
if (mode == tf.estimator.ModeKeys.TRAIN and
params["training_mode"] == "reconstruction"):
loss = tf.losses.mean_squared_error(labels=Y, predictions=predictions)
train_op = tf.contrib.layers.optimize_loss(
loss=loss,
global_step=tf.train.get_global_step(),
learning_rate=params["learning_rate"],
optimizer="Adam")
return loss, train_op, None, None, None, None
else:
if params["reverse_labels_sequence"]:
# shape=(cur_batch_size, seq_len, num_feat)
predictions = tf.reverse_sequence(
input=predictions,
seq_lengths=tf.tile(
input=tf.constant(value=[params["seq_len"]], dtype=tf.int64),
multiples=tf.expand_dims(input=cur_batch_size, axis=0)),
seq_axis=1,
batch_axis=0)
# Reshape into 2-D tensors
# Time based
# shape = (cur_batch_size * seq_len, num_feat)
X_time = tf.reshape(
tensor=X,
shape=[cur_batch_size * params["seq_len"], params["num_feat"]])
X_time_recon = tf.reshape(
tensor=predictions,
shape=[cur_batch_size * params["seq_len"], params["num_feat"]])
# Features based
# shape = (cur_batch_size, num_feat, seq_len)
X_transposed = tf.transpose(a=X, perm=[0, 2, 1])
# shape = (cur_batch_size * num_feat, seq_len)
X_feat = tf.reshape(
tensor=X_transposed,
shape=[cur_batch_size * params["num_feat"], params["seq_len"]])
# shape = (cur_batch_size, num_feat, seq_len)
predictions_transposed = tf.transpose(a=predictions, perm=[0, 2, 1])
# shape = (cur_batch_size * num_feat, seq_len)
X_feat_recon = tf.reshape(
tensor=predictions_transposed,
shape=[cur_batch_size * params["num_feat"], params["seq_len"]])
return None, None, X_time, X_time_recon, X_feat, X_feat_recon
|
{
"content_hash": "a5e1b00acea6a8c51c38777a0afdded9",
"timestamp": "",
"source": "github",
"line_count": 382,
"max_line_length": 87,
"avg_line_length": 40.390052356020945,
"alnum_prop": 0.6533151856892864,
"repo_name": "turbomanage/training-data-analyst",
"id": "dea9f5ebc16d19f7c841fa5d0b4385b4785b6ae8",
"size": "15429",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "courses/machine_learning/asl/open_project/time_series_anomaly_detection/tf_anomaly_detection_model_selection/anomaly_detection_module/trainer/autoencoder_lstm.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "19768"
},
{
"name": "C++",
"bytes": "30926"
},
{
"name": "CSS",
"bytes": "13208"
},
{
"name": "Dockerfile",
"bytes": "35682"
},
{
"name": "HTML",
"bytes": "2069111"
},
{
"name": "Java",
"bytes": "1539437"
},
{
"name": "JavaScript",
"bytes": "2540305"
},
{
"name": "Jsonnet",
"bytes": "5696"
},
{
"name": "Jupyter Notebook",
"bytes": "61371931"
},
{
"name": "Makefile",
"bytes": "4118"
},
{
"name": "PLpgSQL",
"bytes": "5868"
},
{
"name": "PigLatin",
"bytes": "393"
},
{
"name": "Python",
"bytes": "9553863"
},
{
"name": "R",
"bytes": "68"
},
{
"name": "Shell",
"bytes": "390786"
},
{
"name": "TSQL",
"bytes": "34160"
}
],
"symlink_target": ""
}
|
import uuid
from msrest.pipeline import ClientRawResponse
from msrestazure.azure_exceptions import CloudError
from .. import models
class BackupProtectedItemsOperations(object):
"""BackupProtectedItemsOperations operations.
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An objec model deserializer.
:ivar api_version: Client Api Version. Constant value: "2016-12-01".
"""
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self.api_version = "2016-12-01"
self.config = config
def list(
self, vault_name, resource_group_name, filter=None, skip_token=None, custom_headers=None, raw=False, **operation_config):
"""Provides a pageable list of all items that are backed up within a
vault.
:param vault_name: The name of the recovery services vault.
:type vault_name: str
:param resource_group_name: The name of the resource group where the
recovery services vault is present.
:type resource_group_name: str
:param filter: OData filter options.
:type filter: str
:param skip_token: skipToken Filter.
:type skip_token: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: An iterator like instance of :class:`ProtectedItemResource
<azure.mgmt.recoveryservicesbackup.models.ProtectedItemResource>`
:rtype: :class:`ProtectedItemResourcePaged
<azure.mgmt.recoveryservicesbackup.models.ProtectedItemResourcePaged>`
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
def internal_paging(next_link=None, raw=False):
if not next_link:
# Construct URL
url = '/Subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.RecoveryServices/vaults/{vaultName}/backupProtectedItems'
path_format_arguments = {
'vaultName': self._serialize.url("vault_name", vault_name, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
if filter is not None:
query_parameters['$filter'] = self._serialize.query("filter", filter, 'str')
if skip_token is not None:
query_parameters['$skipToken'] = self._serialize.query("skip_token", skip_token, 'str')
else:
url = next_link
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(
request, header_parameters, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
return response
# Deserialize response
deserialized = models.ProtectedItemResourcePaged(internal_paging, self._deserialize.dependencies)
if raw:
header_dict = {}
client_raw_response = models.ProtectedItemResourcePaged(internal_paging, self._deserialize.dependencies, header_dict)
return client_raw_response
return deserialized
|
{
"content_hash": "1397da64d2875de1663a116e468fefce",
"timestamp": "",
"source": "github",
"line_count": 106,
"max_line_length": 167,
"avg_line_length": 45.08490566037736,
"alnum_prop": 0.6333961079723791,
"repo_name": "lmazuel/azure-sdk-for-python",
"id": "795ea6e78e384ad0030d7c0eee5843d82ed123f6",
"size": "5253",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "azure-mgmt-recoveryservicesbackup/azure/mgmt/recoveryservicesbackup/operations/backup_protected_items_operations.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "42572767"
}
],
"symlink_target": ""
}
|
import time
from tvm.rpc import proxy
def start_proxy_server(port, timeout):
prox = proxy.Proxy("localhost", port=port, port_end=port+1)
if timeout > 0:
import time
time.sleep(timeout)
prox.terminate()
else:
prox.proc.join()
if __name__ == "__main__":
import sys
if len(sys.argv) < 2:
sys.exit(-1)
port = int(sys.argv[1])
timeout = 0 if len(sys.argv) == 2 else float(sys.argv[2])
start_proxy_server(port, timeout)
|
{
"content_hash": "4c27632c117d123c9d7e468160417dc5",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 63,
"avg_line_length": 25.68421052631579,
"alnum_prop": 0.5922131147540983,
"repo_name": "mlperf/training_results_v0.6",
"id": "09b7e4a6fe02c9c2be5492e9216d958fde63132a",
"size": "488",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Fujitsu/benchmarks/resnet/implementations/mxnet/3rdparty/tvm/jvm/core/src/test/scripts/test_rpc_proxy_server.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ANTLR",
"bytes": "1731"
},
{
"name": "Batchfile",
"bytes": "13941"
},
{
"name": "C",
"bytes": "208630"
},
{
"name": "C++",
"bytes": "10999411"
},
{
"name": "CMake",
"bytes": "129712"
},
{
"name": "CSS",
"bytes": "64767"
},
{
"name": "Clojure",
"bytes": "396764"
},
{
"name": "Cuda",
"bytes": "2272433"
},
{
"name": "Dockerfile",
"bytes": "67820"
},
{
"name": "Groovy",
"bytes": "62557"
},
{
"name": "HTML",
"bytes": "19753082"
},
{
"name": "Java",
"bytes": "166294"
},
{
"name": "JavaScript",
"bytes": "71846"
},
{
"name": "Julia",
"bytes": "408765"
},
{
"name": "Jupyter Notebook",
"bytes": "2713169"
},
{
"name": "Lua",
"bytes": "4430"
},
{
"name": "MATLAB",
"bytes": "34903"
},
{
"name": "Makefile",
"bytes": "115694"
},
{
"name": "Perl",
"bytes": "1535873"
},
{
"name": "Perl 6",
"bytes": "7280"
},
{
"name": "PowerShell",
"bytes": "6150"
},
{
"name": "Python",
"bytes": "24905683"
},
{
"name": "R",
"bytes": "351865"
},
{
"name": "Roff",
"bytes": "293052"
},
{
"name": "Scala",
"bytes": "1189019"
},
{
"name": "Shell",
"bytes": "794096"
},
{
"name": "Smalltalk",
"bytes": "3497"
},
{
"name": "TypeScript",
"bytes": "361164"
}
],
"symlink_target": ""
}
|
import time
import datetime
from django import template
from django.utils.translation import ugettext as _
from django.conf import settings
from django.utils.translation import get_language
from django.utils import formats
from codenerix.helpers import zeropad, monthname, nameunify
register = template.Library()
register.filter('digitos', zeropad)
register.filter('monthname', monthname)
register.filter('nameunify', nameunify)
@register.filter
def debugme(obj, kind=None):
if kind == 'dict':
obj = obj.__dict__
elif kind == 'keys':
obj = obj.__dict__.keys()
raise IOError(obj)
@register.filter
def debugmedict(obj):
raise IOError(obj.__dict__)
@register.filter
def addedit(value):
return (value == 'add') or (value == 'edit')
@register.filter
def invert(listt):
newlist = []
for element in listt:
newlist.insert(0, element)
return newlist
@register.filter
def differ(value1, value2):
return abs(value1 - value2)
@register.filter
def ghtml(value):
if value:
splitted = value.replace("\r", "").split("\n")
result = ''
for row in splitted:
oldlen = 0
while oldlen != len(row):
oldlen = len(row)
row = row.replace(' ', ' ')
if len(row) > 0 and row[0] == '#':
result += "<b>%s</b>" % (row[1:])
else:
result += "%s" % (row)
result += '<br>'
else:
result = value
return result
@register.filter
def smallerthan(value1, value2):
return value1 < value2
@register.filter
def br(value):
splitted = value.split("\n")
header = splitted[0]
body = '<br>'.join(splitted[1:])
return "<div style='color:#5588BB'>%s</div><div style='color:#22BB00; margin-top:5px;'>%s</div>" % (header, body)
@register.filter
def nicenull(value):
if value:
return value
else:
return "-"
@register.filter
def nicekilometers(value):
if value:
return "{0}km".format(value)
else:
return "-"
@register.filter
def niceeuronull(value):
if value:
return u"{0}\u20AC".format(value)
else:
return "-"
@register.filter
def nicepercentnull(value):
if value:
return "%s%%" % (value)
else:
return "-"
@register.filter
def nicebool(value):
if value:
return _("Yes")
else:
return _("No")
@register.filter
def ynbool(value):
if value:
return "yes"
else:
return "no"
@register.filter
def toint(value):
try:
newvalue = int(value)
except Exception:
newvalue = None
return newvalue
@register.filter
def notval(value):
return not value
@register.filter
def count(values):
return values.count()
@register.filter
def countpages(values):
return (values.count() - 1)
@register.filter
def freedombool(value1, value2):
if value1 >= value2:
return "yes"
else:
return "no"
@register.filter
def pair(value):
if value % 2:
return False
else:
return True
@register.filter(name='len')
def lenlist(list):
return len(list)
@register.filter
def nbsp(value):
return value.replace(' ', ' ')
@register.filter
def mod(value, arg):
if (value % arg == 0):
return 1
else:
return
@register.filter
def keyvalue(dic, key):
return dic[key]
@register.filter
def acumulate(element, li):
if li:
number = li[-1]['id'] + 1
else:
number = 1
li.append({'id': number, 'value': element})
return number
@register.filter
def getforms(forms, form):
if forms:
return forms
else:
return [form]
@register.filter
def langforms(forms, language):
for form in forms:
form.set_language(language)
return forms
@register.filter
def objectatrib(instance, atrib):
'''
this filter is going to be useful to execute an object method or get an
object attribute dynamically. this method is going to take into account
the atrib param can contains underscores
'''
atrib = atrib.replace("__", ".")
atribs = []
atribs = atrib.split(".")
obj = instance
for atrib in atribs:
if type(obj) == dict:
result = obj[atrib]
else:
try:
result = getattr(obj, atrib)()
except Exception:
result = getattr(obj, atrib)
obj = result
return result
@register.filter
def TrueFalse(value):
if type(value) == bool:
if value:
return _('True')
else:
return _('False')
return value
@register.filter
def cdnx_beauty(value, kind=None):
if kind:
if kind == 'skype':
return u"<a ng-click='$event.stopPropagation();' href='tel:{0}'>{0}</a>".format(value)
elif kind == 'image':
return u"<img ng-click='$event.stopPropagation();' src='{0}{1}'>".format(settings.MEDIA_URL, value)
elif kind == 'nofilter':
return value
else:
raise Exception("Django filter 'codenerix' got a wrong kind named '" + kind + "'")
else:
if value is None:
return nicenull(value)
elif type(value) is bool:
return TrueFalse(value)
elif type(value) is datetime.datetime:
fmt = formats.get_format('DATETIME_INPUT_FORMATS', lang=get_language())[0]
value = datetime.datetime.strftime(value, fmt)
elif type(value) is time.time:
fmt = formats.get_format('TIME_INPUT_FORMATS', lang=get_language())[0]
value = time.time.strftime(value, fmt)
elif type(value) is float:
if float(int(value)) == value:
value = int(value)
return value
@register.filter
def multiplication(value, arg):
return float(value) * float(arg)
@register.filter
def division(value, arg):
if arg != 0:
return float(value) / float(arg)
else:
return None
@register.filter
def addition(value, arg):
return float(value) + float(arg)
@register.filter
def subtraction(value, arg):
return float(value) - float(arg)
@register.filter
def autofocus(f):
if f.get('focus', False):
return "autofocus"
else:
return ""
@register.filter
def replace(value, fromto):
(f, t) = fromto.split('·')
return str(value).replace(f, t)
@register.filter
def set_ngmodel(inp, name):
inp.field.widget.field_name = name
return inp
|
{
"content_hash": "e38a93e0c599fbb53344f0d74b91ef34",
"timestamp": "",
"source": "github",
"line_count": 324,
"max_line_length": 117,
"avg_line_length": 20.271604938271604,
"alnum_prop": 0.5927222898903776,
"repo_name": "centrologic/django-codenerix",
"id": "a4a98e6e97d56faaa79d489884e6824230a6effb",
"size": "7266",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "codenerix/templatetags/codenerix_common.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "611079"
},
{
"name": "HTML",
"bytes": "113074"
},
{
"name": "JavaScript",
"bytes": "1060640"
},
{
"name": "Python",
"bytes": "482014"
}
],
"symlink_target": ""
}
|
"""This example gets all audience segments.
"""
# Import appropriate modules from the client library.
from googleads import ad_manager
def main(client):
# Initialize appropriate service.
audience_segment_service = client.GetService(
'AudienceSegmentService', version='v202211')
# Create a statement to select audience segments.
statement = ad_manager.StatementBuilder(version='v202211')
# Retrieve a small amount of audience segments at a time, paging
# through until all audience segments have been retrieved.
while True:
response = audience_segment_service.getAudienceSegmentsByStatement(
statement.ToStatement())
if 'results' in response and len(response['results']):
for audience_segment in response['results']:
# Print out some information for each audience segment.
print('Audience segment with ID "%d", name "%s", and size "%d" was '
'found.\n' % (audience_segment['id'], audience_segment['name'],
audience_segment['size']))
statement.offset += statement.limit
else:
break
print('\nNumber of results found: %s' % response['totalResultSetSize'])
if __name__ == '__main__':
# Initialize client object.
ad_manager_client = ad_manager.AdManagerClient.LoadFromStorage()
main(ad_manager_client)
|
{
"content_hash": "f828f20355d6b78bed3d143e2c4e3cb1",
"timestamp": "",
"source": "github",
"line_count": 37,
"max_line_length": 77,
"avg_line_length": 35.891891891891895,
"alnum_prop": 0.6905120481927711,
"repo_name": "googleads/googleads-python-lib",
"id": "296a3d7dd4879deb5d2576a1c37077837f92a7cc",
"size": "1949",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "examples/ad_manager/v202211/audience_segment_service/get_all_audience_segments.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "403821"
}
],
"symlink_target": ""
}
|
"""
Ansible module to add boundary meters.
(c) 2013, curtis <curtis@serverascode.com>
This file is part of Ansible
Ansible is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
Ansible is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with Ansible. If not, see <http://www.gnu.org/licenses/>.
"""
import json
import datetime
import base64
import os
DOCUMENTATION = '''
module: boundary_meter
short_description: Manage boundary meters
description:
- This module manages boundary meters
version_added: "1.3"
author: curtis@serverascode.com
requirements:
- Boundary API access
- bprobe is required to send data, but not to register a meter
- Python urllib2
options:
name:
description:
- meter name
required: true
state:
description:
- Whether to create or remove the client from boundary
required: false
default: true
choices: ["present", "absent"]
apiid:
description:
- Organizations boundary API ID
required: true
apikey:
description:
- Organizations boundary API KEY
required: true
validate_certs:
description:
- If C(no), SSL certificates will not be validated. This should only be used
on personally controlled sites using self-signed certificates.
required: false
default: 'yes'
choices: ['yes', 'no']
version_added: 1.5.1
notes:
- This module does not yet support boundary tags.
'''
EXAMPLES='''
- name: Create meter
boundary_meter: apiid=AAAAAA api_key=BBBBBB state=present name={{ inventory_hostname }}"
- name: Delete meter
boundary_meter: apiid=AAAAAA api_key=BBBBBB state=absent name={{ inventory_hostname }}"
'''
api_host = "api.boundary.com"
config_directory = "/etc/bprobe"
# "resource" like thing or apikey?
def auth_encode(apikey):
auth = base64.standard_b64encode(apikey)
auth.replace("\n", "")
return auth
def build_url(name, apiid, action, meter_id=None, cert_type=None):
if action == "create":
return 'https://%s/%s/meters' % (api_host, apiid)
elif action == "search":
return "https://%s/%s/meters?name=%s" % (api_host, apiid, name)
elif action == "certificates":
return "https://%s/%s/meters/%s/%s.pem" % (api_host, apiid, meter_id, cert_type)
elif action == "tags":
return "https://%s/%s/meters/%s/tags" % (api_host, apiid, meter_id)
elif action == "delete":
return "https://%s/%s/meters/%s" % (api_host, apiid, meter_id)
def http_request(module, name, apiid, apikey, action, data=None, meter_id=None, cert_type=None):
if meter_id is None:
url = build_url(name, apiid, action)
else:
if cert_type is None:
url = build_url(name, apiid, action, meter_id)
else:
url = build_url(name, apiid, action, meter_id, cert_type)
headers = dict()
headers["Authorization"] = "Basic %s" % auth_encode(apikey)
headers["Content-Type"] = "application/json"
return fetch_url(module, url, data=data, headers=headers)
def create_meter(module, name, apiid, apikey):
meters = search_meter(module, name, apiid, apikey)
if len(meters) > 0:
# If the meter already exists, do nothing
module.exit_json(status="Meter " + name + " already exists",changed=False)
else:
# If it doesn't exist, create it
body = '{"name":"' + name + '"}'
response, info = http_request(module, name, apiid, apikey, data=body, action="create")
if info['status'] != 200:
module.fail_json(msg="Failed to connect to api host to create meter")
# If the config directory doesn't exist, create it
if not os.path.exists(config_directory):
try:
os.makedirs(config_directory)
except:
module.fail_json("Could not create " + config_directory)
# Download both cert files from the api host
types = ['key', 'cert']
for cert_type in types:
try:
# If we can't open the file it's not there, so we should download it
cert_file = open('%s/%s.pem' % (config_directory,cert_type))
except IOError:
# Now download the file...
rc = download_request(module, name, apiid, apikey, cert_type)
if rc == False:
module.fail_json("Download request for " + cert_type + ".pem failed")
return 0, "Meter " + name + " created"
def search_meter(module, name, apiid, apikey):
response, info = http_request(module, name, apiid, apikey, action="search")
if info['status'] != 200:
module.fail_json("Failed to connect to api host to search for meter")
# Return meters
return json.loads(response.read())
def get_meter_id(module, name, apiid, apikey):
# In order to delete the meter we need its id
meters = search_meter(module, name, apiid, apikey)
if len(meters) > 0:
return meters[0]['id']
else:
return None
def delete_meter(module, name, apiid, apikey):
meter_id = get_meter_id(module, name, apiid, apikey)
if meter_id is None:
return 1, "Meter does not exist, so can't delete it"
else:
response, info = http_request(module, name, apiid, apikey, action, meter_id)
if info['status'] != 200:
module.fail_json("Failed to delete meter")
# Each new meter gets a new key.pem and ca.pem file, so they should be deleted
types = ['cert', 'key']
for cert_type in types:
try:
cert_file = '%s/%s.pem' % (config_directory,cert_type)
os.remove(cert_file)
except OSError, e:
module.fail_json("Failed to remove " + cert_type + ".pem file")
return 0, "Meter " + name + " deleted"
def download_request(module, name, apiid, apikey, cert_type):
meter_id = get_meter_id(module, name, apiid, apikey)
if meter_id is not None:
action = "certificates"
response, info = http_request(module, name, apiid, apikey, action, meter_id, cert_type)
if info['status'] != 200:
module.fail_json("Failed to connect to api host to download certificate")
if result:
try:
cert_file_path = '%s/%s.pem' % (config_directory,cert_type)
body = response.read()
cert_file = open(cert_file_path, 'w')
cert_file.write(body)
cert_file.close
os.chmod(cert_file_path, 0600)
except:
module.fail_json("Could not write to certificate file")
return True
else:
module.fail_json("Could not get meter id")
def main():
module = AnsibleModule(
argument_spec=dict(
state=dict(required=True, choices=['present', 'absent']),
name=dict(required=False),
apikey=dict(required=True),
apiid=dict(required=True),
validate_certs = dict(default='yes', type='bool'),
)
)
state = module.params['state']
name= module.params['name']
apikey = module.params['api_key']
apiid = module.params['api_id']
if state == "present":
(rc, result) = create_meter(module, name, apiid, apikey)
if state == "absent":
(rc, result) = delete_meter(module, name, apiid, apikey)
if rc != 0:
module.fail_json(msg=result)
module.exit_json(status=result,changed=True)
# import module snippets
from ansible.module_utils.basic import *
from ansible.module_utils.urls import *
main()
|
{
"content_hash": "10afcbe76576896d502c69313e8a5354",
"timestamp": "",
"source": "github",
"line_count": 253,
"max_line_length": 96,
"avg_line_length": 32.019762845849804,
"alnum_prop": 0.6138748302678682,
"repo_name": "mith1979/ansible_automation",
"id": "f6d84328597960fed76f5a5a810c5d5a9b93b3ad",
"size": "8144",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "applied_python/applied_python/lib/python2.7/site-packages/ansible/modules/extras/monitoring/boundary_meter.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "1005"
},
{
"name": "C",
"bytes": "84868"
},
{
"name": "CSS",
"bytes": "50289"
},
{
"name": "HTML",
"bytes": "70428"
},
{
"name": "JavaScript",
"bytes": "105262"
},
{
"name": "PowerShell",
"bytes": "51840"
},
{
"name": "Python",
"bytes": "19073705"
},
{
"name": "Shell",
"bytes": "3747"
},
{
"name": "XSLT",
"bytes": "152770"
}
],
"symlink_target": ""
}
|
from switch import Map
from switch import MediaItem
from switch import Rule
from switch import Switch
from switch import TwilioMessageRequest
from _version import __version__
|
{
"content_hash": "5d9d31c2c35b7f345ace01466baf8720",
"timestamp": "",
"source": "github",
"line_count": 7,
"max_line_length": 39,
"avg_line_length": 25.142857142857142,
"alnum_prop": 0.8295454545454546,
"repo_name": "skimbrel/strowger",
"id": "932e0c470080716316fbf05949cdde13c83b9c1b",
"size": "176",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "strowger/__init__.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "5629"
}
],
"symlink_target": ""
}
|
"""Program to locate the first space in the input string."""
s = input("Please enter a string: ")
pos = 0
for c in s:
if c == " ":
print("First space occurred at position", pos)
break
pos += 1
else:
print("No spaces in that string.")
|
{
"content_hash": "6e0d596211f3537c74aff669afaefbe0",
"timestamp": "",
"source": "github",
"line_count": 11,
"max_line_length": 60,
"avg_line_length": 23.818181818181817,
"alnum_prop": 0.5954198473282443,
"repo_name": "ceeblet/OST_PythonCertificationTrack",
"id": "cb61698a6a93f7c6005e24fdaf85d9ccaed5365a",
"size": "287",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Python1/python1/space_finder.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "198495"
}
],
"symlink_target": ""
}
|
from django.contrib import admin
from src.comms.models import Channel, Msg, PlayerChannelConnection, ExternalChannelConnection
class MsgAdmin(admin.ModelAdmin):
list_display = ('id', 'db_date_sent', 'db_sender', 'db_receivers', 'db_channels', 'db_message', 'db_lock_storage')
list_display_links = ("id",)
ordering = ["db_date_sent", 'db_sender', 'db_receivers', 'db_channels']
#readonly_fields = ['db_message', 'db_sender', 'db_receivers', 'db_channels']
search_fields = ['id', '^db_date_sent', '^db_message']
save_as = True
save_on_top = True
list_select_related = True
#admin.site.register(Msg, MsgAdmin)
class PlayerChannelConnectionInline(admin.TabularInline):
model = PlayerChannelConnection
fieldsets = (
(None, {
'fields':(('db_player', 'db_channel')),
'classes':('collapse',)}),)
extra = 1
class ExternalChannelConnectionInline(admin.StackedInline):
model = ExternalChannelConnection
fieldsets = (
(None, {
'fields':(('db_is_enabled','db_external_key', 'db_channel'), 'db_external_send_code', 'db_external_config'),
'classes':('collapse',)
}),)
extra = 1
class ChannelAdmin(admin.ModelAdmin):
inlines = (PlayerChannelConnectionInline, ExternalChannelConnectionInline)
list_display = ('id', 'db_key', 'db_desc', 'db_aliases', 'db_keep_log', 'db_lock_storage')
list_display_links = ("id", 'db_key')
ordering = ["db_key"]
search_fields = ['id', 'db_key', 'db_aliases']
save_as = True
save_on_top = True
list_select_related = True
fieldsets = (
(None, {'fields':(('db_key', 'db_aliases', 'db_desc'),'db_lock_storage', 'db_keep_log')}),
)
admin.site.register(Channel, ChannelAdmin)
# class PlayerChannelConnectionAdmin(admin.ModelAdmin):
# list_display = ('db_channel', 'db_player')
# list_display_links = ("db_player", 'db_channel')
# ordering = ["db_channel"]
# search_fields = ['db_channel', 'db_player']
# save_as = True
# save_on_top = True
# list_select_related = True
# admin.site.register(PlayerChannelConnection, PlayerChannelConnectionAdmin)
# class ExternalChannelConnectionAdmin(admin.ModelAdmin):
# list_display = ('db_channel', 'db_external_key', 'db_external_config')
# list_display_links = ("db_channel", 'db_external_key', 'db_external_config')
# ordering = ["db_channel"]
# search_fields = ['db_channel', 'db_external_key']
# save_as = True
# save_on_top = True
# list_select_related = True
# admin.site.register(ExternalChannelConnection, ExternalChannelConnectionAdmin)
|
{
"content_hash": "59eff9204cd0a6fdc6dd6050c4b0b0e1",
"timestamp": "",
"source": "github",
"line_count": 67,
"max_line_length": 124,
"avg_line_length": 39.73134328358209,
"alnum_prop": 0.6416228399699474,
"repo_name": "TaliesinSkye/evennia",
"id": "b88a7ad240f0a40325573fc0cf6d9b8ee6125a68",
"size": "2737",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/comms/admin.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "59698"
},
{
"name": "D",
"bytes": "9343933"
},
{
"name": "Emacs Lisp",
"bytes": "2734"
},
{
"name": "JavaScript",
"bytes": "91190"
},
{
"name": "Python",
"bytes": "2840755"
},
{
"name": "Shell",
"bytes": "4577"
}
],
"symlink_target": ""
}
|
from django.core.exceptions import ImproperlyConfigured
from django.core.management import call_command
from django.apps import apps
from .conf import settings
from haystack import connections, connection_router
from haystack.exceptions import NotHandled as IndexNotFoundException
from celery.app.task import Task # noqa
from celery.utils.log import get_task_logger
logger = get_task_logger(__name__)
class CeleryHaystackSignalHandler(Task):
name = 'celery_haystack.tasks.CeleryHaystackSignalHandler'
using = settings.CELERY_HAYSTACK_DEFAULT_ALIAS
max_retries = settings.CELERY_HAYSTACK_MAX_RETRIES
default_retry_delay = settings.CELERY_HAYSTACK_RETRY_DELAY
def split_identifier(self, identifier, **kwargs):
"""
Break down the identifier representing the instance.
Converts 'notes.note.23' into ('notes.note', 23).
"""
bits = identifier.split('.')
if len(bits) < 2:
logger.error("Unable to parse object "
"identifer '%s'. Moving on..." % identifier)
return (None, None)
pk = bits[-1]
# In case Django ever handles full paths...
object_path = '.'.join(bits[:-1])
return (object_path, pk)
def get_model_class(self, object_path, **kwargs):
"""
Fetch the model's class in a standarized way.
"""
bits = object_path.split('.')
app_name = '.'.join(bits[:-1])
classname = bits[-1]
model_class = apps.get_model(app_name, classname)
if model_class is None:
raise ImproperlyConfigured("Could not load model '%s'." %
object_path)
return model_class
def get_instance(self, model_class, pk, **kwargs):
"""
Fetch the instance in a standarized way.
"""
instance = None
try:
instance = model_class._default_manager.using(
settings.CELERY_HAYSTACK_DEFAULT_DATABASE_ALIAS
).get(pk=pk)
except model_class.DoesNotExist:
logger.error("Couldn't load %s.%s.%s. Somehow it went missing?" %
(model_class._meta.app_label.lower(),
model_class._meta.object_name.lower(), pk))
except model_class.MultipleObjectsReturned:
logger.error("More than one object with pk %s. Oops?" % pk)
return instance
def get_indexes(self, model_class, **kwargs):
"""
Fetch the model's registered ``SearchIndex`` in a standarized way.
"""
try:
using_backends = connection_router.for_write(**{'models': [model_class]})
for using in using_backends:
index_holder = connections[using].get_unified_index()
yield index_holder.get_index(model_class), using
except IndexNotFoundException:
raise ImproperlyConfigured("Couldn't find a SearchIndex for %s." %
model_class)
def run(self, action, identifier, **kwargs):
"""
Trigger the actual index handler depending on the
given action ('update' or 'delete').
"""
# First get the object path and pk (e.g. ('notes.note', 23))
object_path, pk = self.split_identifier(identifier, **kwargs)
if object_path is None or pk is None:
msg = "Couldn't handle object with identifier %s" % identifier
logger.error(msg)
raise ValueError(msg)
# Then get the model class for the object path
model_class = self.get_model_class(object_path, **kwargs)
for current_index, using in self.get_indexes(model_class, **kwargs):
current_index_name = ".".join([current_index.__class__.__module__,
current_index.__class__.__name__])
if action == 'delete':
# If the object is gone, we'll use just the identifier
# against the index.
try:
current_index.remove_object(identifier, using=using)
except Exception as exc:
logger.exception(exc)
self.retry(exc=exc)
else:
msg = ("Deleted '%s' (with %s)" %
(identifier, current_index_name))
logger.debug(msg)
elif action == 'update':
# and the instance of the model class with the pk
instance = self.get_instance(model_class, pk, **kwargs)
if instance is None:
logger.debug("Failed updating '%s' (with %s)" %
(identifier, current_index_name))
raise ValueError("Couldn't load object '%s'" % identifier)
# Call the appropriate handler of the current index and
# handle exception if neccessary
try:
current_index.update_object(instance, using=using)
except Exception as exc:
logger.exception(exc)
self.retry(exc=exc)
else:
msg = ("Updated '%s' (with %s)" %
(identifier, current_index_name))
logger.debug(msg)
else:
logger.error("Unrecognized action '%s'. Moving on..." % action)
raise ValueError("Unrecognized action %s" % action)
class CeleryHaystackUpdateIndex(Task):
"""
A celery task class to be used to call the update_index management
command from Celery.
"""
def run(self, apps=None, **kwargs):
defaults = {
'batchsize': settings.CELERY_HAYSTACK_COMMAND_BATCH_SIZE,
'age': settings.CELERY_HAYSTACK_COMMAND_AGE,
'remove': settings.CELERY_HAYSTACK_COMMAND_REMOVE,
'using': [settings.CELERY_HAYSTACK_DEFAULT_ALIAS],
'workers': settings.CELERY_HAYSTACK_COMMAND_WORKERS,
'verbosity': settings.CELERY_HAYSTACK_COMMAND_VERBOSITY,
}
defaults.update(kwargs)
if apps is None:
apps = settings.CELERY_HAYSTACK_COMMAND_APPS
# Run the update_index management command
logger.info("Starting update index")
call_command('update_index', *apps, **defaults)
logger.info("Finishing update index")
|
{
"content_hash": "340d9de6d2e25d0eb4072d1e6ad20897",
"timestamp": "",
"source": "github",
"line_count": 159,
"max_line_length": 85,
"avg_line_length": 40.59748427672956,
"alnum_prop": 0.5657629744384198,
"repo_name": "roverdotcom/celery-haystack",
"id": "7dbe479fc814b55dc4f0de5537acc598dc88fead",
"size": "6455",
"binary": false,
"copies": "1",
"ref": "refs/heads/rover-0.10",
"path": "celery_haystack/tasks.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "16675"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import
import re
try:
from django.urls import get_resolver
except ImportError:
from django.core.urlresolvers import get_resolver
def get_regex(resolver_or_pattern):
"""Utility method for django's deprecated resolver.regex"""
try:
regex = resolver_or_pattern.regex
except AttributeError:
regex = resolver_or_pattern.pattern.regex
return regex
class RouteResolver(object):
_optional_group_matcher = re.compile(r'\(\?\:([^\)]+)\)')
_named_group_matcher = re.compile(r'\(\?P<(\w+)>[^\)]+\)')
_non_named_group_matcher = re.compile(r'\([^\)]+\)')
# [foo|bar|baz]
_either_option_matcher = re.compile(r'\[([^\]]+)\|([^\]]+)\]')
_camel_re = re.compile(r'([A-Z]+)([a-z])')
_cache = {}
def _simplify(self, pattern):
r"""
Clean up urlpattern regexes into something readable by humans:
From:
> "^(?P<sport_slug>\w+)/athletes/(?P<athlete_slug>\w+)/$"
To:
> "{sport_slug}/athletes/{athlete_slug}/"
"""
# remove optional params
# TODO(dcramer): it'd be nice to change these into [%s] but it currently
# conflicts with the other rules because we're doing regexp matches
# rather than parsing tokens
result = self._optional_group_matcher.sub(lambda m: '%s' % m.group(1), pattern)
# handle named groups first
result = self._named_group_matcher.sub(lambda m: '{%s}' % m.group(1), result)
# handle non-named groups
result = self._non_named_group_matcher.sub('{var}', result)
# handle optional params
result = self._either_option_matcher.sub(lambda m: m.group(1), result)
# clean up any outstanding regex-y characters.
result = result.replace('^', '').replace('$', '') \
.replace('?', '').replace('//', '/').replace('\\', '')
return result
def _resolve(self, resolver, path, parents=None):
match = get_regex(resolver).search(path) # Django < 2.0
if not match:
return
if parents is None:
parents = [resolver]
elif resolver not in parents:
parents = parents + [resolver]
new_path = path[match.end():]
for pattern in resolver.url_patterns:
# this is an include()
if not pattern.callback:
match = self._resolve(pattern, new_path, parents)
if match:
return match
continue
elif not get_regex(pattern).search(new_path):
continue
try:
return self._cache[pattern]
except KeyError:
pass
prefix = ''.join(self._simplify(get_regex(p).pattern) for p in parents)
result = prefix + self._simplify(get_regex(pattern).pattern)
if not result.startswith('/'):
result = '/' + result
self._cache[pattern] = result
return result
def resolve(self, path, urlconf=None):
resolver = get_resolver(urlconf)
match = self._resolve(resolver, path)
return match or path
|
{
"content_hash": "2bafb9af63b2ca99b8bb3b7a16ddd2d6",
"timestamp": "",
"source": "github",
"line_count": 99,
"max_line_length": 87,
"avg_line_length": 32.24242424242424,
"alnum_prop": 0.5620300751879699,
"repo_name": "getsentry/raven-python",
"id": "3f3c74f4c6cff845f9a78b37df606f8ce9770bd7",
"size": "3192",
"binary": false,
"copies": "11",
"ref": "refs/heads/master",
"path": "raven/contrib/django/resolver.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "101"
},
{
"name": "Makefile",
"bytes": "957"
},
{
"name": "Python",
"bytes": "473465"
},
{
"name": "Shell",
"bytes": "2352"
}
],
"symlink_target": ""
}
|
from typing import List, Dict
from datetime import datetime
import numpy as np
from vnpy.app.portfolio_strategy import StrategyTemplate, StrategyEngine
from vnpy.trader.utility import BarGenerator
from vnpy.trader.object import TickData, BarData
class PairTradingStrategy(StrategyTemplate):
""""""
author = "用Python的交易员"
price_add = 5
boll_window = 20
boll_dev = 2
fixed_size = 1
leg1_ratio = 1
leg2_ratio = 1
leg1_symbol = ""
leg2_symbol = ""
current_spread = 0.0
boll_mid = 0.0
boll_down = 0.0
boll_up = 0.0
parameters = [
"price_add",
"boll_window",
"boll_dev",
"fixed_size",
"leg1_ratio",
"leg2_ratio",
]
variables = [
"leg1_symbol",
"leg2_symbol",
"current_spread",
"boll_mid",
"boll_down",
"boll_up",
]
def __init__(
self,
strategy_engine: StrategyEngine,
strategy_name: str,
vt_symbols: List[str],
setting: dict
):
""""""
super().__init__(strategy_engine, strategy_name, vt_symbols, setting)
self.bgs: Dict[str, BarGenerator] = {}
self.targets: Dict[str, int] = {}
self.last_tick_time: datetime = None
self.spread_count: int = 0
self.spread_data: np.array = np.zeros(100)
# Obtain contract info
self.leg1_symbol, self.leg2_symbol = vt_symbols
def on_bar(bar: BarData):
""""""
pass
for vt_symbol in self.vt_symbols:
self.targets[vt_symbol] = 0
self.bgs[vt_symbol] = BarGenerator(on_bar)
def on_init(self):
"""
Callback when strategy is inited.
"""
self.write_log("策略初始化")
self.load_bars(1)
def on_start(self):
"""
Callback when strategy is started.
"""
self.write_log("策略启动")
def on_stop(self):
"""
Callback when strategy is stopped.
"""
self.write_log("策略停止")
def on_tick(self, tick: TickData):
"""
Callback of new tick data update.
"""
if (
self.last_tick_time
and self.last_tick_time.minute != tick.datetime.minute
):
bars = {}
for vt_symbol, bg in self.bgs.items():
bars[vt_symbol] = bg.generate()
self.on_bars(bars)
bg: BarGenerator = self.bgs[tick.vt_symbol]
bg.update_tick(tick)
self.last_tick_time = tick.datetime
def on_bars(self, bars: Dict[str, BarData]):
""""""
self.cancel_all()
# Return if one leg data is missing
if self.leg1_symbol not in bars or self.leg2_symbol not in bars:
return
# Calculate current spread
leg1_bar = bars[self.leg1_symbol]
leg2_bar = bars[self.leg2_symbol]
# Filter time only run every 5 minutes
if (leg1_bar.datetime.minute + 1) % 5:
return
self.current_spread = (
leg1_bar.close_price * self.leg1_ratio - leg2_bar.close_price * self.leg2_ratio
)
# Update to spread array
self.spread_data[:-1] = self.spread_data[1:]
self.spread_data[-1] = self.current_spread
self.spread_count += 1
if self.spread_count <= self.boll_window:
return
# Calculate boll value
buf: np.array = self.spread_data[-self.boll_window:]
std = buf.std()
self.boll_mid = buf.mean()
self.boll_up = self.boll_mid + self.boll_dev * std
self.boll_down = self.boll_mid - self.boll_dev * std
# Calculate new target position
leg1_pos = self.get_pos(self.leg1_symbol)
if not leg1_pos:
if self.current_spread >= self.boll_up:
self.targets[self.leg1_symbol] = -1
self.targets[self.leg2_symbol] = 1
elif self.current_spread <= self.boll_down:
self.targets[self.leg1_symbol] = 1
self.targets[self.leg2_symbol] = -1
elif leg1_pos > 0:
if self.current_spread >= self.boll_mid:
self.targets[self.leg1_symbol] = 0
self.targets[self.leg2_symbol] = 0
else:
if self.current_spread <= self.boll_mid:
self.targets[self.leg1_symbol] = 0
self.targets[self.leg2_symbol] = 0
# Execute orders
for vt_symbol in self.vt_symbols:
target_pos = self.targets[vt_symbol]
current_pos = self.get_pos(vt_symbol)
pos_diff = target_pos - current_pos
volume = abs(pos_diff)
bar = bars[vt_symbol]
if pos_diff > 0:
price = bar.close_price + self.price_add
if current_pos < 0:
self.cover(vt_symbol, price, volume)
else:
self.buy(vt_symbol, price, volume)
elif pos_diff < 0:
price = bar.close_price - self.price_add
if current_pos > 0:
self.sell(vt_symbol, price, volume)
else:
self.short(vt_symbol, price, volume)
self.put_event()
|
{
"content_hash": "ec0c06d198f22d3de471e77b1bfa2040",
"timestamp": "",
"source": "github",
"line_count": 192,
"max_line_length": 91,
"avg_line_length": 27.505208333333332,
"alnum_prop": 0.5300132550653285,
"repo_name": "bigdig/vnpy",
"id": "0b66dc7e52451d468d6a02ad6f63eb493fb5eed1",
"size": "5317",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "vnpy/app/portfolio_strategy/strategies/pair_trading_strategy.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "390"
},
{
"name": "C",
"bytes": "1652953"
},
{
"name": "C++",
"bytes": "13737810"
},
{
"name": "Objective-C",
"bytes": "1200"
},
{
"name": "Python",
"bytes": "2979947"
},
{
"name": "Shell",
"bytes": "6050"
}
],
"symlink_target": ""
}
|
"""Test the openml loader.
"""
import gzip
import json
import numpy as np
import os
import re
import scipy.sparse
import sklearn
import pytest
from sklearn.datasets import fetch_openml
from sklearn.datasets.openml import (_open_openml_url,
_get_data_description_by_id,
_download_data_arff)
from sklearn.utils.testing import (assert_warns_message,
assert_raise_message)
from sklearn.externals.six import string_types
from sklearn.externals.six.moves.urllib.error import HTTPError
from sklearn.datasets.tests.test_common import check_return_X_y
from functools import partial
currdir = os.path.dirname(os.path.abspath(__file__))
# if True, urlopen will be monkey patched to only use local files
test_offline = True
def _test_features_list(data_id):
# XXX Test is intended to verify/ensure correct decoding behavior
# Not usable with sparse data or datasets that have columns marked as
# {row_identifier, ignore}
def decode_column(data_bunch, col_idx):
col_name = data_bunch.feature_names[col_idx]
if col_name in data_bunch.categories:
# XXX: This would be faster with np.take, although it does not
# handle missing values fast (also not with mode='wrap')
cat = data_bunch.categories[col_name]
result = [cat[idx] if 0 <= idx < len(cat) else None for idx in
data_bunch.data[:, col_idx].astype(int)]
return np.array(result, dtype='O')
else:
# non-nominal attribute
return data_bunch.data[:, col_idx]
data_bunch = fetch_openml(data_id=data_id, cache=False, target_column=None)
# also obtain decoded arff
data_description = _get_data_description_by_id(data_id, None)
sparse = data_description['format'].lower() == 'sparse_arff'
if sparse is True:
raise ValueError('This test is not intended for sparse data, to keep '
'code relatively simple')
data_arff = _download_data_arff(data_description['file_id'],
sparse, None, False)
data_downloaded = np.array(data_arff['data'], dtype='O')
for i in range(len(data_bunch.feature_names)):
# XXX: Test per column, as this makes it easier to avoid problems with
# missing values
np.testing.assert_array_equal(data_downloaded[:, i],
decode_column(data_bunch, i))
def _fetch_dataset_from_openml(data_id, data_name, data_version,
target_column,
expected_observations, expected_features,
expected_missing,
expected_data_dtype, expected_target_dtype,
expect_sparse, compare_default_target):
# fetches a dataset in three various ways from OpenML, using the
# fetch_openml function, and does various checks on the validity of the
# result. Note that this function can be mocked (by invoking
# _monkey_patch_webbased_functions before invoking this function)
data_by_name_id = fetch_openml(name=data_name, version=data_version,
cache=False)
assert int(data_by_name_id.details['id']) == data_id
fetch_openml(name=data_name, cache=False)
# without specifying the version, there is no guarantee that the data id
# will be the same
# fetch with dataset id
data_by_id = fetch_openml(data_id=data_id, cache=False,
target_column=target_column)
assert data_by_id.details['name'] == data_name
assert data_by_id.data.shape == (expected_observations, expected_features)
if isinstance(target_column, str):
# single target, so target is vector
assert data_by_id.target.shape == (expected_observations, )
elif isinstance(target_column, list):
# multi target, so target is array
assert data_by_id.target.shape == (expected_observations,
len(target_column))
assert data_by_id.data.dtype == np.float64
assert data_by_id.target.dtype == expected_target_dtype
assert len(data_by_id.feature_names) == expected_features
for feature in data_by_id.feature_names:
assert isinstance(feature, string_types)
# TODO: pass in a list of expected nominal features
for feature, categories in data_by_id.categories.items():
feature_idx = data_by_id.feature_names.index(feature)
values = np.unique(data_by_id.data[:, feature_idx])
values = values[np.isfinite(values)]
assert set(values) <= set(range(len(categories)))
if compare_default_target:
# check whether the data by id and data by id target are equal
data_by_id_default = fetch_openml(data_id=data_id, cache=False)
if data_by_id.data.dtype == np.float64:
np.testing.assert_allclose(data_by_id.data,
data_by_id_default.data)
else:
assert np.array_equal(data_by_id.data, data_by_id_default.data)
if data_by_id.target.dtype == np.float64:
np.testing.assert_allclose(data_by_id.target,
data_by_id_default.target)
else:
assert np.array_equal(data_by_id.target, data_by_id_default.target)
if expect_sparse:
assert isinstance(data_by_id.data, scipy.sparse.csr_matrix)
else:
assert isinstance(data_by_id.data, np.ndarray)
# np.isnan doesn't work on CSR matrix
assert (np.count_nonzero(np.isnan(data_by_id.data)) ==
expected_missing)
# test return_X_y option
fetch_func = partial(fetch_openml, data_id=data_id, cache=False,
target_column=target_column)
check_return_X_y(data_by_id, fetch_func)
return data_by_id
def _monkey_patch_webbased_functions(context,
data_id,
gzip_response):
url_prefix_data_description = "https://openml.org/api/v1/json/data/"
url_prefix_data_features = "https://openml.org/api/v1/json/data/features/"
url_prefix_download_data = "https://openml.org/data/v1/"
url_prefix_data_list = "https://openml.org/api/v1/json/data/list/"
path_suffix = '.gz'
read_fn = gzip.open
class MockHTTPResponse(object):
def __init__(self, data, is_gzip):
self.data = data
self.is_gzip = is_gzip
def read(self, amt=-1):
return self.data.read(amt)
def tell(self):
return self.data.tell()
def seek(self, pos, whence=0):
return self.data.seek(pos, whence)
def close(self):
self.data.close()
def info(self):
if self.is_gzip:
return {'Content-Encoding': 'gzip'}
return {}
def _file_name(url, suffix):
return (re.sub(r'\W', '-', url[len("https://openml.org/"):])
+ suffix + path_suffix)
def _mock_urlopen_data_description(url, has_gzip_header):
assert url.startswith(url_prefix_data_description)
path = os.path.join(currdir, 'data', 'openml', str(data_id),
_file_name(url, '.json'))
if has_gzip_header and gzip_response:
fp = open(path, 'rb')
return MockHTTPResponse(fp, True)
else:
fp = read_fn(path, 'rb')
return MockHTTPResponse(fp, False)
def _mock_urlopen_data_features(url, has_gzip_header):
assert url.startswith(url_prefix_data_features)
path = os.path.join(currdir, 'data', 'openml', str(data_id),
_file_name(url, '.json'))
if has_gzip_header and gzip_response:
fp = open(path, 'rb')
return MockHTTPResponse(fp, True)
else:
fp = read_fn(path, 'rb')
return MockHTTPResponse(fp, False)
def _mock_urlopen_download_data(url, has_gzip_header):
assert (url.startswith(url_prefix_download_data))
path = os.path.join(currdir, 'data', 'openml', str(data_id),
_file_name(url, '.arff'))
if has_gzip_header and gzip_response:
fp = open(path, 'rb')
return MockHTTPResponse(fp, True)
else:
fp = read_fn(path, 'rb')
return MockHTTPResponse(fp, False)
def _mock_urlopen_data_list(url, has_gzip_header):
assert url.startswith(url_prefix_data_list)
json_file_path = os.path.join(currdir, 'data', 'openml',
str(data_id), _file_name(url, '.json'))
# load the file itself, to simulate a http error
json_data = json.loads(read_fn(json_file_path, 'rb').
read().decode('utf-8'))
if 'error' in json_data:
raise HTTPError(url=None, code=412,
msg='Simulated mock error',
hdrs=None, fp=None)
if has_gzip_header:
fp = open(json_file_path, 'rb')
return MockHTTPResponse(fp, True)
else:
fp = read_fn(json_file_path, 'rb')
return MockHTTPResponse(fp, False)
def _mock_urlopen(request):
url = request.get_full_url()
has_gzip_header = request.get_header('Accept-encoding') == "gzip"
if url.startswith(url_prefix_data_list):
return _mock_urlopen_data_list(url, has_gzip_header)
elif url.startswith(url_prefix_data_features):
return _mock_urlopen_data_features(url, has_gzip_header)
elif url.startswith(url_prefix_download_data):
return _mock_urlopen_download_data(url, has_gzip_header)
elif url.startswith(url_prefix_data_description):
return _mock_urlopen_data_description(url, has_gzip_header)
else:
raise ValueError('Unknown mocking URL pattern: %s' % url)
# XXX: Global variable
if test_offline:
context.setattr(sklearn.datasets.openml, 'urlopen', _mock_urlopen)
@pytest.mark.parametrize('gzip_response', [True, False])
def test_fetch_openml_iris(monkeypatch, gzip_response):
# classification dataset with numeric only columns
data_id = 61
data_name = 'iris'
data_version = 1
target_column = 'class'
expected_observations = 150
expected_features = 4
expected_missing = 0
_monkey_patch_webbased_functions(monkeypatch, data_id, gzip_response)
assert_warns_message(
UserWarning,
"Multiple active versions of the dataset matching the name"
" iris exist. Versions may be fundamentally different, "
"returning version 1.",
_fetch_dataset_from_openml,
**{'data_id': data_id, 'data_name': data_name,
'data_version': data_version,
'target_column': target_column,
'expected_observations': expected_observations,
'expected_features': expected_features,
'expected_missing': expected_missing,
'expect_sparse': False,
'expected_data_dtype': np.float64,
'expected_target_dtype': object,
'compare_default_target': True}
)
def test_decode_iris(monkeypatch):
data_id = 61
_monkey_patch_webbased_functions(monkeypatch, data_id, False)
_test_features_list(data_id)
@pytest.mark.parametrize('gzip_response', [True, False])
def test_fetch_openml_iris_multitarget(monkeypatch, gzip_response):
# classification dataset with numeric only columns
data_id = 61
data_name = 'iris'
data_version = 1
target_column = ['sepallength', 'sepalwidth']
expected_observations = 150
expected_features = 3
expected_missing = 0
_monkey_patch_webbased_functions(monkeypatch, data_id, gzip_response)
_fetch_dataset_from_openml(data_id, data_name, data_version, target_column,
expected_observations, expected_features,
expected_missing,
object, np.float64, expect_sparse=False,
compare_default_target=False)
@pytest.mark.parametrize('gzip_response', [True, False])
def test_fetch_openml_anneal(monkeypatch, gzip_response):
# classification dataset with numeric and categorical columns
data_id = 2
data_name = 'anneal'
data_version = 1
target_column = 'class'
# Not all original instances included for space reasons
expected_observations = 11
expected_features = 38
expected_missing = 267
_monkey_patch_webbased_functions(monkeypatch, data_id, gzip_response)
_fetch_dataset_from_openml(data_id, data_name, data_version, target_column,
expected_observations, expected_features,
expected_missing,
object, object, expect_sparse=False,
compare_default_target=True)
def test_decode_anneal(monkeypatch):
data_id = 2
_monkey_patch_webbased_functions(monkeypatch, data_id, False)
_test_features_list(data_id)
@pytest.mark.parametrize('gzip_response', [True, False])
def test_fetch_openml_anneal_multitarget(monkeypatch, gzip_response):
# classification dataset with numeric and categorical columns
data_id = 2
data_name = 'anneal'
data_version = 1
target_column = ['class', 'product-type', 'shape']
# Not all original instances included for space reasons
expected_observations = 11
expected_features = 36
expected_missing = 267
_monkey_patch_webbased_functions(monkeypatch, data_id, gzip_response)
_fetch_dataset_from_openml(data_id, data_name, data_version, target_column,
expected_observations, expected_features,
expected_missing,
object, object, expect_sparse=False,
compare_default_target=False)
@pytest.mark.parametrize('gzip_response', [True, False])
def test_fetch_openml_cpu(monkeypatch, gzip_response):
# regression dataset with numeric and categorical columns
data_id = 561
data_name = 'cpu'
data_version = 1
target_column = 'class'
expected_observations = 209
expected_features = 7
expected_missing = 0
_monkey_patch_webbased_functions(monkeypatch, data_id, gzip_response)
_fetch_dataset_from_openml(data_id, data_name, data_version, target_column,
expected_observations, expected_features,
expected_missing,
object, np.float64, expect_sparse=False,
compare_default_target=True)
def test_decode_cpu(monkeypatch):
data_id = 561
_monkey_patch_webbased_functions(monkeypatch, data_id, False)
_test_features_list(data_id)
@pytest.mark.parametrize('gzip_response', [True, False])
def test_fetch_openml_australian(monkeypatch, gzip_response):
# sparse dataset
# Australian is the only sparse dataset that is reasonably small
# as it is inactive, we need to catch the warning. Due to mocking
# framework, it is not deactivated in our tests
data_id = 292
data_name = 'Australian'
data_version = 1
target_column = 'Y'
# Not all original instances included for space reasons
expected_observations = 85
expected_features = 14
expected_missing = 0
_monkey_patch_webbased_functions(monkeypatch, data_id, gzip_response)
assert_warns_message(
UserWarning,
"Version 1 of dataset Australian is inactive,",
_fetch_dataset_from_openml,
**{'data_id': data_id, 'data_name': data_name,
'data_version': data_version,
'target_column': target_column,
'expected_observations': expected_observations,
'expected_features': expected_features,
'expected_missing': expected_missing,
'expect_sparse': True,
'expected_data_dtype': np.float64,
'expected_target_dtype': object,
'compare_default_target': False} # numpy specific check
)
@pytest.mark.parametrize('gzip_response', [True, False])
def test_fetch_openml_miceprotein(monkeypatch, gzip_response):
# JvR: very important check, as this dataset defined several row ids
# and ignore attributes. Note that data_features json has 82 attributes,
# and row id (1), ignore attributes (3) have been removed (and target is
# stored in data.target)
data_id = 40966
data_name = 'MiceProtein'
data_version = 4
target_column = 'class'
# Not all original instances included for space reasons
expected_observations = 7
expected_features = 77
expected_missing = 7
_monkey_patch_webbased_functions(monkeypatch, data_id, gzip_response)
_fetch_dataset_from_openml(data_id, data_name, data_version, target_column,
expected_observations, expected_features,
expected_missing,
np.float64, object, expect_sparse=False,
compare_default_target=True)
@pytest.mark.parametrize('gzip_response', [True, False])
def test_fetch_openml_emotions(monkeypatch, gzip_response):
# classification dataset with multiple targets (natively)
data_id = 40589
data_name = 'emotions'
data_version = 3
target_column = ['amazed.suprised', 'happy.pleased', 'relaxing.calm',
'quiet.still', 'sad.lonely', 'angry.aggresive']
expected_observations = 13
expected_features = 72
expected_missing = 0
_monkey_patch_webbased_functions(monkeypatch, data_id, gzip_response)
_fetch_dataset_from_openml(data_id, data_name, data_version, target_column,
expected_observations, expected_features,
expected_missing,
np.float64, object, expect_sparse=False,
compare_default_target=True)
def test_decode_emotions(monkeypatch):
data_id = 40589
_monkey_patch_webbased_functions(monkeypatch, data_id, False)
_test_features_list(data_id)
@pytest.mark.parametrize('gzip_response', [True, False])
def test_open_openml_url_cache(monkeypatch, gzip_response):
data_id = 61
_monkey_patch_webbased_functions(
monkeypatch, data_id, gzip_response)
openml_path = sklearn.datasets.openml._DATA_FILE.format(data_id)
test_directory = os.path.join(os.path.expanduser('~'), 'scikit_learn_data')
# first fill the cache
response1 = _open_openml_url(openml_path, test_directory)
# assert file exists
location = os.path.join(test_directory, 'openml.org', openml_path + '.gz')
assert os.path.isfile(location)
# redownload, to utilize cache
response2 = _open_openml_url(openml_path, test_directory)
assert response1.read() == response2.read()
@pytest.mark.parametrize('gzip_response', [True, False])
def test_fetch_openml_notarget(monkeypatch, gzip_response):
data_id = 61
target_column = None
expected_observations = 150
expected_features = 5
_monkey_patch_webbased_functions(monkeypatch, data_id, gzip_response)
data = fetch_openml(data_id=data_id, target_column=target_column,
cache=False)
assert data.data.shape == (expected_observations, expected_features)
assert data.target is None
@pytest.mark.parametrize('gzip_response', [True, False])
def test_fetch_openml_inactive(monkeypatch, gzip_response):
# fetch inactive dataset by id
data_id = 40675
_monkey_patch_webbased_functions(monkeypatch, data_id, gzip_response)
glas2 = assert_warns_message(
UserWarning, "Version 1 of dataset glass2 is inactive,", fetch_openml,
data_id=data_id, cache=False)
# fetch inactive dataset by name and version
assert glas2.data.shape == (163, 9)
glas2_by_version = assert_warns_message(
UserWarning, "Version 1 of dataset glass2 is inactive,", fetch_openml,
data_id=None, name="glass2", version=1, cache=False)
assert int(glas2_by_version.details['id']) == data_id
@pytest.mark.parametrize('gzip_response', [True, False])
def test_fetch_nonexiting(monkeypatch, gzip_response):
# there is no active version of glass2
data_id = 40675
_monkey_patch_webbased_functions(monkeypatch, data_id, gzip_response)
# Note that we only want to search by name (not data id)
assert_raise_message(ValueError, "No active dataset glass2 found",
fetch_openml, name='glass2', cache=False)
@pytest.mark.parametrize('gzip_response', [True, False])
def test_raises_illegal_multitarget(monkeypatch, gzip_response):
data_id = 61
targets = ['sepalwidth', 'class']
_monkey_patch_webbased_functions(monkeypatch, data_id, gzip_response)
# Note that we only want to search by name (not data id)
assert_raise_message(ValueError,
"Can only handle homogeneous multi-target datasets,",
fetch_openml, data_id=data_id,
target_column=targets, cache=False)
@pytest.mark.parametrize('gzip_response', [True, False])
def test_warn_ignore_attribute(monkeypatch, gzip_response):
data_id = 40966
expected_row_id_msg = "target_column={} has flag is_row_identifier."
expected_ignore_msg = "target_column={} has flag is_ignore."
_monkey_patch_webbased_functions(monkeypatch, data_id, gzip_response)
# single column test
assert_warns_message(UserWarning, expected_row_id_msg.format('MouseID'),
fetch_openml, data_id=data_id,
target_column='MouseID',
cache=False)
assert_warns_message(UserWarning, expected_ignore_msg.format('Genotype'),
fetch_openml, data_id=data_id,
target_column='Genotype',
cache=False)
# multi column test
assert_warns_message(UserWarning, expected_row_id_msg.format('MouseID'),
fetch_openml, data_id=data_id,
target_column=['MouseID', 'class'],
cache=False)
assert_warns_message(UserWarning, expected_ignore_msg.format('Genotype'),
fetch_openml, data_id=data_id,
target_column=['Genotype', 'class'],
cache=False)
@pytest.mark.parametrize('gzip_response', [True, False])
def test_string_attribute(monkeypatch, gzip_response):
data_id = 40945
_monkey_patch_webbased_functions(monkeypatch, data_id, gzip_response)
# single column test
assert_raise_message(ValueError,
'STRING attributes are not yet supported',
fetch_openml, data_id=data_id, cache=False)
@pytest.mark.parametrize('gzip_response', [True, False])
def test_illegal_column(monkeypatch, gzip_response):
data_id = 61
_monkey_patch_webbased_functions(monkeypatch, data_id, gzip_response)
assert_raise_message(KeyError, "Could not find target_column=",
fetch_openml, data_id=data_id,
target_column='undefined', cache=False)
assert_raise_message(KeyError, "Could not find target_column=",
fetch_openml, data_id=data_id,
target_column=['undefined', 'class'],
cache=False)
@pytest.mark.parametrize('gzip_response', [True, False])
def test_fetch_openml_raises_missing_values_target(monkeypatch, gzip_response):
data_id = 2
_monkey_patch_webbased_functions(monkeypatch, data_id, gzip_response)
assert_raise_message(ValueError, "Target column ",
fetch_openml, data_id=data_id, target_column='family')
def test_fetch_openml_raises_illegal_argument():
assert_raise_message(ValueError, "Dataset data_id=",
fetch_openml, data_id=-1, name="name")
assert_raise_message(ValueError, "Dataset data_id=",
fetch_openml, data_id=-1, name=None,
version="version")
assert_raise_message(ValueError, "Dataset data_id=",
fetch_openml, data_id=-1, name="name",
version="version")
assert_raise_message(ValueError, "Neither name nor data_id are provided. "
"Please provide name or data_id.", fetch_openml)
|
{
"content_hash": "b945d2de34d7f6ccd9b0e0ec2a1b8301",
"timestamp": "",
"source": "github",
"line_count": 596,
"max_line_length": 79,
"avg_line_length": 41.49832214765101,
"alnum_prop": 0.6178385153438726,
"repo_name": "vortex-ape/scikit-learn",
"id": "cf9cfcdc81edee8899a1ff2e0c73c824d24c6776",
"size": "24733",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "sklearn/datasets/tests/test_openml.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "3366"
},
{
"name": "C",
"bytes": "394787"
},
{
"name": "C++",
"bytes": "140225"
},
{
"name": "Makefile",
"bytes": "1588"
},
{
"name": "PowerShell",
"bytes": "17312"
},
{
"name": "Python",
"bytes": "6351428"
},
{
"name": "Shell",
"bytes": "8687"
}
],
"symlink_target": ""
}
|
from findatapy.market import Market, MarketDataRequest, MarketDataGenerator
market = Market(market_data_generator=MarketDataGenerator())
# download event data from Bloomberg
# we have to use the special category "events" keyword for economic data events
# so findatapy can correctly identify them (given the underlying Bloomberg API calls are all different, however,
# this will appear transparent to the user)
market_data_request = MarketDataRequest(
start_date = "year",
category = "events",
data_source = 'bloomberg', # use Bloomberg as data source
tickers = ['FOMC', 'NFP'],
fields = ['release-date-time-full', 'release-dt', 'actual-release'], # which fields to download
vendor_tickers = ['FDTR Index', 'NFP TCH Index'], # ticker (Bloomberg)
vendor_fields = ['ECO_FUTURE_RELEASE_DATE_LIST', 'ECO_RELEASE_DT', 'ACTUAL_RELEASE']) # which Bloomberg fields to download
df = market.fetch_market(market_data_request)
print(df)
# we also have a few events defined in our configuation file
# those tickers/fields which are predefined this way are easier to download
# note how we don't have to use the vendor_tickers and vendor_fields for examples
market_data_request = MarketDataRequest(
start_date = "year",
category = "events",
data_source = 'bloomberg', # use Bloomberg as data source
tickers = ['USD-US Employees on Nonfarm Payrolls Total MoM Net Change SA'],
fields = ['release-date-time-full', 'release-dt', 'actual-release', 'number-observations'])
df = market.fetch_market(market_data_request)
print(df)
# now just download the event day
market_data_request = MarketDataRequest(
start_date = "year",
category = "events",
data_source = 'bloomberg', # use Bloomberg as data source
tickers = ['NFP'],
fields = ['release-date-time-full'], # which fields to download
vendor_tickers = ['NFP TCH Index'], # ticker (Bloomberg)
vendor_fields = ['ECO_FUTURE_RELEASE_DATE_LIST']) # which Bloomberg fields to download
df = market.fetch_market(market_data_request)
print(df)
|
{
"content_hash": "ef703c820d1e23503bab9103c7728b78",
"timestamp": "",
"source": "github",
"line_count": 48,
"max_line_length": 140,
"avg_line_length": 51.1875,
"alnum_prop": 0.6027676027676028,
"repo_name": "kalaytan/findatapy",
"id": "3b33ecee8758d7341aa309997109e6b42a22f69d",
"size": "2457",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "findatapy/examples/eventsdata_example.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "41"
},
{
"name": "Python",
"bytes": "280126"
}
],
"symlink_target": ""
}
|
import collections
def cross_product(lhs, rhs):
return lhs.x * rhs.y - lhs.y * rhs.x
class Point(collections.namedtuple('Point', 'x y')):
def __add__(self, other):
return Point(self.x+other.x, self.y+other.y)
def __sub__(self, other):
return Point(self.x-other.x, self.y-other.y)
def direction(origin, first, second):
return cross_product(second-origin, first-origin)
def direction(origin, first, second):
return cross_product(second-origin, first-origin)
def on_segment(origin, first, second):
if (min(origin.x, first.x) <= second.x <= max(origin.x, first.x)
and min(origin.y, first.y) <= second.y <= may(origin.y, first.y)):
return True
return False
def does_intersect(first_start, first_end, second_start, second_end):
direction_1 = direction(second_start, second_end, first_start)
direction_2 = direction(second_start, second_end, first_end)
direction_3 = direction(first_start, first_end, second_start)
direction_4 = direction(first_start, first_end, second_end)
if (direction_1 * direction_2 < 0
and direction_3 * direction_4 < 0):
return True
elif direction_1 == 0 and on_segment(second_start, second_end, first_start):
return True
elif direction_2 == 0 and on_segment(second_start, second_end, first_end):
return True
elif direction_3 == 0 and on_segment(first_start, first_end, second_start):
return True
elif direction_4 == 0 and on_segment(first_start, first_end, second_end):
return True
else:
return False
|
{
"content_hash": "5b87c7ef8114aef309e147a8cdace8be",
"timestamp": "",
"source": "github",
"line_count": 43,
"max_line_length": 80,
"avg_line_length": 36.7906976744186,
"alnum_prop": 0.6605562579013906,
"repo_name": "rik0/rk-exempla",
"id": "4dfce0129e35c0ee490ebf3661553b192ab8faf6",
"size": "1582",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "algorithms/python/segment_intersection.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "16861"
},
{
"name": "Clojure",
"bytes": "2540"
},
{
"name": "Common Lisp",
"bytes": "5076"
},
{
"name": "Erlang",
"bytes": "2571"
},
{
"name": "Java",
"bytes": "4951"
},
{
"name": "Perl",
"bytes": "4321"
},
{
"name": "Python",
"bytes": "152481"
},
{
"name": "Racket",
"bytes": "9970"
},
{
"name": "Ruby",
"bytes": "283"
},
{
"name": "Scheme",
"bytes": "7945"
},
{
"name": "Shell",
"bytes": "230"
}
],
"symlink_target": ""
}
|
import datetime
from south.db import db
from south.v2 import DataMigration
from django.db import models
class Migration(DataMigration):
def forwards(self, orm):
"""Remove unvouched users from groups."""
orm['groups.GroupMembership'].objects.filter(userprofile__is_vouched=False).delete()
def backwards(self, orm):
pass
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'groups.group': {
'Meta': {'ordering': "['name']", 'object_name': 'Group'},
'accepting_new_members': ('django.db.models.fields.CharField', [], {'default': "'yes'", 'max_length': '10'}),
'curator': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'groups_curated'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': "orm['users.UserProfile']"}),
'description': ('django.db.models.fields.TextField', [], {'default': "''", 'max_length': '255', 'blank': 'True'}),
'functional_area': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'irc_channel': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '63', 'blank': 'True'}),
'max_reminder': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'members_can_leave': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '50', 'db_index': 'True'}),
'new_member_criteria': ('django.db.models.fields.TextField', [], {'default': "''", 'max_length': '255', 'blank': 'True'}),
'url': ('django.db.models.fields.SlugField', [], {'max_length': '50', 'blank': 'True'}),
'visible': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'website': ('django.db.models.fields.URLField', [], {'default': "''", 'max_length': '200', 'blank': 'True'}),
'wiki': ('django.db.models.fields.URLField', [], {'default': "''", 'max_length': '200', 'blank': 'True'})
},
'groups.groupalias': {
'Meta': {'object_name': 'GroupAlias'},
'alias': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'aliases'", 'to': "orm['groups.Group']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '50'}),
'url': ('autoslug.fields.AutoSlugField', [], {'unique': 'True', 'max_length': '50', 'populate_from': "'name'", 'unique_with': '()', 'blank': 'True'})
},
'groups.groupmembership': {
'Meta': {'unique_together': "(('userprofile', 'group'),)", 'object_name': 'GroupMembership'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'group': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['groups.Group']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'status': ('django.db.models.fields.CharField', [], {'max_length': '10'}),
'userprofile': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['users.UserProfile']"})
},
'groups.skill': {
'Meta': {'ordering': "['name']", 'object_name': 'Skill'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '50', 'db_index': 'True'}),
'url': ('django.db.models.fields.SlugField', [], {'max_length': '50', 'blank': 'True'})
},
'groups.skillalias': {
'Meta': {'object_name': 'SkillAlias'},
'alias': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'aliases'", 'to': "orm['groups.Skill']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '50'}),
'url': ('autoslug.fields.AutoSlugField', [], {'unique': 'True', 'max_length': '50', 'populate_from': "'name'", 'unique_with': '()', 'blank': 'True'})
},
'users.userprofile': {
'Meta': {'ordering': "['full_name']", 'object_name': 'UserProfile', 'db_table': "'profile'"},
'allows_community_sites': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'allows_mozilla_sites': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'basket_token': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '1024', 'blank': 'True'}),
'bio': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'}),
'city': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '255', 'blank': 'True'}),
'country': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '50'}),
'date_mozillian': ('django.db.models.fields.DateField', [], {'default': 'None', 'null': 'True', 'blank': 'True'}),
'date_vouched': ('django.db.models.fields.DateTimeField', [], {'default': 'None', 'null': 'True', 'blank': 'True'}),
'full_name': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '255'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'members'", 'blank': 'True', 'through': "orm['groups.GroupMembership']", 'to': "orm['groups.Group']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ircname': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '63', 'blank': 'True'}),
'is_vouched': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_updated': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'auto_now': 'True', 'blank': 'True'}),
'photo': (u'sorl.thumbnail.fields.ImageField', [], {'default': "''", 'max_length': '100', 'blank': 'True'}),
'privacy_bio': ('mozillians.users.models.PrivacyField', [], {'default': '3'}),
'privacy_city': ('mozillians.users.models.PrivacyField', [], {'default': '3'}),
'privacy_country': ('mozillians.users.models.PrivacyField', [], {'default': '3'}),
'privacy_date_mozillian': ('mozillians.users.models.PrivacyField', [], {'default': '3'}),
'privacy_email': ('mozillians.users.models.PrivacyField', [], {'default': '3'}),
'privacy_full_name': ('mozillians.users.models.PrivacyField', [], {'default': '3'}),
'privacy_groups': ('mozillians.users.models.PrivacyField', [], {'default': '3'}),
'privacy_ircname': ('mozillians.users.models.PrivacyField', [], {'default': '3'}),
'privacy_languages': ('mozillians.users.models.PrivacyField', [], {'default': '3'}),
'privacy_photo': ('mozillians.users.models.PrivacyField', [], {'default': '3'}),
'privacy_region': ('mozillians.users.models.PrivacyField', [], {'default': '3'}),
'privacy_skills': ('mozillians.users.models.PrivacyField', [], {'default': '3'}),
'privacy_timezone': ('mozillians.users.models.PrivacyField', [], {'default': '3'}),
'privacy_title': ('mozillians.users.models.PrivacyField', [], {'default': '3'}),
'privacy_tshirt': ('mozillians.users.models.PrivacyField', [], {'default': '1'}),
'privacy_vouched_by': ('mozillians.users.models.PrivacyField', [], {'default': '3'}),
'region': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '255', 'blank': 'True'}),
'skills': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'members'", 'blank': 'True', 'to': "orm['groups.Skill']"}),
'timezone': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '100', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '70', 'blank': 'True'}),
'tshirt': ('django.db.models.fields.IntegerField', [], {'default': 'None', 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['auth.User']", 'unique': 'True'}),
'vouched_by': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'vouchees'", 'on_delete': 'models.SET_NULL', 'default': 'None', 'to': "orm['users.UserProfile']", 'blank': 'True', 'null': 'True'})
}
}
complete_apps = ['groups']
symmetrical = True
|
{
"content_hash": "2e076df955e5c0d2a989e1879903c342",
"timestamp": "",
"source": "github",
"line_count": 142,
"max_line_length": 226,
"avg_line_length": 84.55633802816901,
"alnum_prop": 0.5501790622137087,
"repo_name": "ChristineLaMuse/mozillians",
"id": "d640ef3a6cca83fa1b77ddaacb29d9a705d449f6",
"size": "12031",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "mozillians/groups/migrations/0023_remove_unvouched.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "204258"
},
{
"name": "JavaScript",
"bytes": "89948"
},
{
"name": "Makefile",
"bytes": "5580"
},
{
"name": "Python",
"bytes": "8218039"
},
{
"name": "Shell",
"bytes": "13340"
}
],
"symlink_target": ""
}
|
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
REQUIREMENTS = [i.strip() for i in open("requirements.txt").readlines()]
config = dict(description='Kandilli last earthquakes api',
long_description=open('README.md').read(),
author='Halit Alptekin',
url='https://github.com/halitalptekin/kandilli',
author_email='info@halitalptekin.com',
license='MIT',
keywords='api, earthqueke, earthquakes',
version='0.1.1',
py_modules=['kandilli'],
platforms='any',
name='kandilli',
install_requires=REQUIREMENTS)
setup(**config)
|
{
"content_hash": "26a3b54062808f1d130bbcf962ef543a",
"timestamp": "",
"source": "github",
"line_count": 21,
"max_line_length": 72,
"avg_line_length": 33.42857142857143,
"alnum_prop": 0.6011396011396012,
"repo_name": "halitalptekin/kandilli",
"id": "2688aa5558c1064cddb9c19909fc99fe6356fac7",
"size": "702",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "6113"
}
],
"symlink_target": ""
}
|
"""
Support for Modbus switches.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/switch.modbus/
"""
import logging
import voluptuous as vol
import homeassistant.components.modbus as modbus
from homeassistant.const import CONF_NAME, CONF_SLAVE
from homeassistant.helpers.entity import ToggleEntity
from homeassistant.helpers import config_validation as cv
from homeassistant.components.sensor import PLATFORM_SCHEMA
_LOGGER = logging.getLogger(__name__)
DEPENDENCIES = ['modbus']
CONF_COIL = "coil"
CONF_COILS = "coils"
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Required(CONF_COILS): [{
vol.Required(CONF_COIL): cv.positive_int,
vol.Required(CONF_NAME): cv.string,
vol.Optional(CONF_SLAVE): cv.positive_int,
}]
})
def setup_platform(hass, config, add_devices, discovery_info=None):
"""Read configuration and create Modbus devices."""
switches = []
for coil in config.get("coils"):
switches.append(ModbusCoilSwitch(
coil.get(CONF_NAME),
coil.get(CONF_SLAVE),
coil.get(CONF_COIL)))
add_devices(switches)
class ModbusCoilSwitch(ToggleEntity):
"""Representation of a Modbus switch."""
def __init__(self, name, slave, coil):
"""Initialize the switch."""
self._name = name
self._slave = int(slave) if slave else None
self._coil = int(coil)
self._is_on = None
@property
def is_on(self):
"""Return true if switch is on."""
return self._is_on
@property
def name(self):
"""Return the name of the switch."""
return self._name
def turn_on(self, **kwargs):
"""Set switch on."""
modbus.HUB.write_coil(self._slave, self._coil, True)
def turn_off(self, **kwargs):
"""Set switch off."""
modbus.HUB.write_coil(self._slave, self._coil, False)
def update(self):
"""Update the state of the switch."""
result = modbus.HUB.read_coils(self._slave, self._coil, 1)
if not result:
_LOGGER.error(
'No response from modbus slave %s coil %s',
self._slave,
self._coil)
return
self._is_on = bool(result.bits[0])
|
{
"content_hash": "401f41d4c0cad9e97d9fd8a972957058",
"timestamp": "",
"source": "github",
"line_count": 79,
"max_line_length": 74,
"avg_line_length": 29.10126582278481,
"alnum_prop": 0.6272292301000435,
"repo_name": "alexmogavero/home-assistant",
"id": "95168d5b830041268aad6916c7538e9a4d7075fd",
"size": "2299",
"binary": false,
"copies": "1",
"ref": "refs/heads/dev",
"path": "homeassistant/components/switch/modbus.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "1905204"
},
{
"name": "Python",
"bytes": "6749372"
},
{
"name": "Ruby",
"bytes": "517"
},
{
"name": "Shell",
"bytes": "14930"
}
],
"symlink_target": ""
}
|
"""
This module contains functions that generate ctypes prototypes for the
GDAL routines.
"""
from ctypes import c_char_p, c_double, c_int, c_void_p
from django.contrib.gis.gdal.prototypes.errcheck import (
check_arg_errcode, check_errcode, check_geom, check_geom_offset,
check_pointer, check_srs, check_str_arg, check_string, check_const_string)
class gdal_char_p(c_char_p):
pass
def double_output(func, argtypes, errcheck=False, strarg=False):
"Generates a ctypes function that returns a double value."
func.argtypes = argtypes
func.restype = c_double
if errcheck:
func.errcheck = check_arg_errcode
if strarg:
func.errcheck = check_str_arg
return func
def geom_output(func, argtypes, offset=None):
"""
Generates a function that returns a Geometry either by reference
or directly (if the return_geom keyword is set to True).
"""
# Setting the argument types
func.argtypes = argtypes
if not offset:
# When a geometry pointer is directly returned.
func.restype = c_void_p
func.errcheck = check_geom
else:
# Error code returned, geometry is returned by-reference.
func.restype = c_int
def geomerrcheck(result, func, cargs):
return check_geom_offset(result, func, cargs, offset)
func.errcheck = geomerrcheck
return func
def int_output(func, argtypes):
"Generates a ctypes function that returns an integer value."
func.argtypes = argtypes
func.restype = c_int
return func
def srs_output(func, argtypes):
"""
Generates a ctypes prototype for the given function with
the given C arguments that returns a pointer to an OGR
Spatial Reference System.
"""
func.argtypes = argtypes
func.restype = c_void_p
func.errcheck = check_srs
return func
def const_string_output(func, argtypes, offset=None, decoding=None):
func.argtypes = argtypes
if offset:
func.restype = c_int
else:
func.restype = c_char_p
def _check_const(result, func, cargs):
res = check_const_string(result, func, cargs, offset=offset)
if res and decoding:
res = res.decode(decoding)
return res
func.errcheck = _check_const
return func
def string_output(func, argtypes, offset=-1, str_result=False, decoding=None):
"""
Generates a ctypes prototype for the given function with the
given argument types that returns a string from a GDAL pointer.
The `const` flag indicates whether the allocated pointer should
be freed via the GDAL library routine VSIFree -- but only applies
only when `str_result` is True.
"""
func.argtypes = argtypes
if str_result:
# Use subclass of c_char_p so the error checking routine
# can free the memory at the pointer's address.
func.restype = gdal_char_p
else:
# Error code is returned
func.restype = c_int
# Dynamically defining our error-checking function with the
# given offset.
def _check_str(result, func, cargs):
res = check_string(result, func, cargs,
offset=offset, str_result=str_result)
if res and decoding:
res = res.decode(decoding)
return res
func.errcheck = _check_str
return func
def void_output(func, argtypes, errcheck=True):
"""
For functions that don't only return an error code that needs to
be examined.
"""
if argtypes:
func.argtypes = argtypes
if errcheck:
# `errcheck` keyword may be set to False for routines that
# return void, rather than a status code.
func.restype = c_int
func.errcheck = check_errcode
else:
func.restype = None
return func
def voidptr_output(func, argtypes):
"For functions that return c_void_p."
func.argtypes = argtypes
func.restype = c_void_p
func.errcheck = check_pointer
return func
|
{
"content_hash": "10eeb6a9245a4f8be660fd393ea1b728",
"timestamp": "",
"source": "github",
"line_count": 138,
"max_line_length": 78,
"avg_line_length": 28.92753623188406,
"alnum_prop": 0.657314629258517,
"repo_name": "yceruto/django",
"id": "6b38dc593f51ee40bb68843faa7fcb85fbd9f2be",
"size": "3992",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "django/contrib/gis/gdal/prototypes/generation.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "51177"
},
{
"name": "JavaScript",
"bytes": "102290"
},
{
"name": "Python",
"bytes": "9171078"
},
{
"name": "Shell",
"bytes": "12137"
}
],
"symlink_target": ""
}
|
import os
import sys
import ast
import re
import subprocess
from setuptools import find_packages
import functools
import fnmatch
# get version number
# avoid importing from package
version_re = re.compile(r'__version__\s+=\s+(.*)')
with open('butterflow/version.py', 'rb') as f:
version = str(ast.literal_eval(version_re.search(
f.read().decode('utf-8')).group(1)))
# directories
rootdir = os.path.abspath(os.sep)
topdir = os.path.join(os.path.dirname(os.path.realpath(__file__)))
pkgdir = os.path.join(topdir, 'butterflow')
dependsdir = os.path.join(topdir, 'depends')
# are we building a development version?
building = True
for x in sys.argv:
if x.startswith('build') or x.startswith('develop'):
building = False
is_devbuild = ('dev' in version or 'a' in version) and not building
# make a list with no duplicates
# does not maintain ordering
def mklist(*items):
s = set([])
for x in items:
if isinstance(x, list):
for y in x:
s.add(y)
elif x is not None:
s.add(x)
return list(s)
py_ver_X = sys.version_info.major
py_ver_Y = sys.version_info.minor
py_ver = '{}.{}'.format(py_ver_X, py_ver_Y)
homebrew_prefix = None
homebrew_site_pkgs = None
try:
homebrew_prefix = subprocess.Popen(['brew', '--prefix'],
stdout=subprocess.PIPE)
homebrew_prefix = homebrew_prefix.stdout.read().strip()
except Exception:
# fall back to environment variable if brew command is not found
if 'HOMEBREW_PREFIX' in os.environ:
homebrew_prefix = os.environ['HOMEBREW_PREFIX']
if homebrew_prefix is not None:
homebrew_site_pkgs = os.path.join(homebrew_prefix, 'lib/python{}/'
'site-packages/'.format(py_ver))
# Because some formulae provide python bindings, homebrew builds bindings
# against the first python (and python-config) in PATH (check
# `which python`).
#
# Homebrew site-packages should preceed all others on sys.path
# if it exists:
sys.path.insert(1, homebrew_site_pkgs)
cflags = ['-std=c11'] # c compilation flags
linkflags = [] # linker flags
cxxflags = []
is_win = sys.platform.startswith('win')
is_osx = sys.platform.startswith('darwin')
is_nix = sys.platform.startswith('linux')
# should we use cxfreeze?
use_cx_freeze = False
if is_win and 'build_exe' in sys.argv:
try:
# cxfreeze extends setuptools and should be imported after it
from cx_Freeze import setup, Executable
from distutils.core import Extension # cxfreeze builds upon distutils
use_cx_freeze = True
except ImportError as exc:
sys.exit(exc)
else:
from setuptools import setup, Extension, find_packages
# global cflags
if is_devbuild:
cflags.append('-Wall')
cflags.append('-g') # turn off debugging symbols for release
cflags.extend(['-O0', '-fbuiltin', '-fdiagnostics-show-option'])
# disable warnings that are safe to ignore
cflags.extend(['-Wno-unused-variable', '-Wno-unused-function'])
if is_osx:
cflags.extend(['-Wno-shorten-64-to-32', '-Wno-overloaded-virtual',
'-Wno-#warnings'])
else:
cflags.extend(['-Wno-cpp'])
# set cxxflags, remove c only options
for x in cflags:
if x != '-std=c11' and \
x != '-Wstrict-prototypes':
cxxflags.append(x)
# global link flags
if is_nix:
linkflags.extend(['-shared', '-Wl,--export-dynamic'])
elif is_osx:
# Don't explicity link against the system python on OSX to prevent
# segfaults arising from modules being built with one python (i.e. system
# python) and imported from a foreign python (i.e. brewed python).
#
# See: https://github.com/Homebrew/homebrew/blob/master/share/doc/
# homebrew/Common-Issues.md#python-segmentation-fault-11-on-import-
# some_python_module
#
# Building modules with `-undefined dynamic_lookup` instead of an explict
# link allows symbols to be resolved at import time. `otool -L <module>.so`
# shouldn't mention Python.
# See: https://github.com/Homebrew/homebrew-science/pull/1886
linkflags.append('-Wl,-undefined,dynamic_lookup')
linkflags.extend(['-arch', 'x86_64'])
avinfo_ext = Extension('butterflow.avinfo', extra_compile_args=cflags,
extra_link_args=linkflags,
libraries=['avcodec', 'avformat', 'avutil'],
sources=[os.path.join(pkgdir, 'avinfo.c')],
language='c')
# opencl args
cl_lib = None
cl_linkflags = None
if is_osx:
cl_linkflags = ['-framework', 'OpenCL']
else:
cl_lib = ['OpenCL']
old_linkflags = linkflags
if cl_linkflags:
linkflags.extend(cl_linkflags)
ocl_ext = Extension('butterflow.ocl', extra_compile_args=cxxflags,
extra_link_args=linkflags,
libraries=mklist(cl_lib, 'opencv_core', 'opencv_ocl'),
sources=[os.path.join(pkgdir, 'ocl.cpp')], language='c++')
linkflags = old_linkflags
# numpy args
np_includes = None
if is_osx:
if homebrew_prefix is not None:
# Homebrew opencv uses a brewed numpy by default but it's possible for
# a user to their own or the system one if the --without-brewed-numpy
# option is used.
#
# Note: usually all pythonX.Y packages with headers are placed in
# /usr/include/pythonX.Y/<package> or /usr/local/include/, but
# homebrew policy is to put them in site-packages
np_includes = os.path.join(homebrew_site_pkgs, 'numpy/core/include')
else:
# fallback to the system's numpy
np_includes = '/System/Library/Frameworks/Python.framework/Versions/'\
'{}/Extras/lib/python/numpy/core/include'.format(py_ver)
# opencv-ndarray-conversion args
nddir = os.path.join(dependsdir, 'opencv-ndarray-conversion')
nd_includes = os.path.join(nddir, 'include')
motion_ext = Extension('butterflow.motion',
extra_compile_args=cxxflags,
extra_link_args=linkflags,
include_dirs=mklist(nd_includes, np_includes),
libraries=['opencv_core', 'opencv_ocl',
'opencv_imgproc'],
sources=[os.path.join(pkgdir, 'motion.cpp'),
os.path.join(nddir, 'src', 'conversion.cpp')],
language='c++')
# shared args
setup_kwargs = {
'name': 'butterflow',
'packages': find_packages(exclude=['tests']),
'ext_modules': [avinfo_ext, ocl_ext, motion_ext],
'version': version,
'author': 'Duong Pham',
'author_email': 'dthpham@gmail.com',
'url': 'https://github.com/dthpham/butterflow',
'download_url': 'http://srv.dthpham.me/butterflow/butterflow-{}.tar.gz'.
format(version),
'description': 'Makes motion interpolated and fluid slow motion videos',
'keywords': ['motion interpolation', 'slow motion', 'slowmo',
'smooth motion'],
'entry_points': {'console_scripts': ['butterflow = butterflow.cli:main']},
'test_suite': 'tests'
}
setup = functools.partial(setup, **setup_kwargs)
if use_cx_freeze:
additional_files = []
with open('include_files.txt', 'r') as f:
for line in f:
line = line.rstrip()
if line.startswith('#'):
continue
elif line.startswith('prefix'):
prefix = line.split('=')[1]
continue
else:
pattern = line
for file in os.listdir(prefix):
if fnmatch.fnmatch(file, pattern):
filename = file
relpath = os.path.relpath(os.path.join(prefix, filename))
additional_files.append((relpath, filename))
build_exe_options = {
'packages': ['butterflow'],
'includes': ['numpy.core._methods', 'numpy.lib.format'], # Bug: https://stackoverflow.com/q/41735413
'include_msvcr': True,
'excludes': ['copyreg', 'distutils', 'email', 'future', 'pydoc_data',
'setuptools', 'test', 'tests', 'test', 'Tkinter'],
'include_files': additional_files,
# 'replace_paths': [("*", "")],
}
executables = [
Executable(script='butterflow/__main__.py',
# initScript=os.path.abspath('butterflow/console.py'),
base=None,
targetName='butterflow.exe',
icon='butterflow.ico',
copyright='Copyright (c) 2017 Duong Pham')
]
setup(options={'build_exe': build_exe_options}, executables=executables)
else:
setup()
|
{
"content_hash": "7aa2c10c4309956e4d83f7d66b4ef6b5",
"timestamp": "",
"source": "github",
"line_count": 241,
"max_line_length": 109,
"avg_line_length": 36.448132780082986,
"alnum_prop": 0.6068989071038251,
"repo_name": "dthpham/butterflow",
"id": "a3bd3887eb98ee0e1c44f34ae6c2df3aa2ab0254",
"size": "8832",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "8280"
},
{
"name": "C++",
"bytes": "27414"
},
{
"name": "Python",
"bytes": "114067"
}
],
"symlink_target": ""
}
|
import numpy as np
from pymatgen.core.structure import Structure
"""
This module implements symmetry-related structure forms.
"""
__author__ = "Shyue Ping Ong"
__copyright__ = "Copyright 2012, The Materials Project"
__version__ = "0.1"
__maintainer__ = "Shyue Ping Ong"
__email__ = "shyuep@gmail.com"
__date__ = "Mar 9, 2012"
class SymmetrizedStructure(Structure):
"""
This class represents a symmetrized structure, i.e. a structure
where the spacegroup and symmetry operations are defined. This class is
typically not called but instead is typically obtained by calling
pymatgen.symmetry.analyzer.SpacegroupAnalyzer.get_symmetrized_structure.
Args:
structure (Structure): Original structure
spacegroup (SpacegroupOperations): An input SpacegroupOperations from
SpacegroupAnalyzer.
equivalent_positions: Equivalent positions from SpacegroupAnalyzer.
.. attribute: equivalent_indices
indices of structure grouped by equivalency
"""
def __init__(self, structure, spacegroup, equivalent_positions,
wyckoff_letters):
self.spacegroup = spacegroup
u, inv = np.unique(equivalent_positions, return_inverse=True)
self.site_labels = equivalent_positions
# site_properties = structure.site_properties
# site_properties["wyckoff"] = [
# "%d%s" % (list(self.site_labels).count(self.site_labels[i]),
# wyckoff_letters[i]) for i in range(len(structure))]
super(SymmetrizedStructure, self).__init__(
structure.lattice, [site.species for site in structure],
structure.frac_coords, site_properties=structure.site_properties)
self.equivalent_indices = [[] for i in range(len(u))]
self.equivalent_sites = [[] for i in range(len(u))]
wyckoff_symbols = [[] for i in range(len(u))]
for i, inv in enumerate(inv):
self.equivalent_indices[inv].append(i)
self.equivalent_sites[inv].append(self.sites[i])
wyckoff_symbols[inv].append(wyckoff_letters[i])
self.wyckoff_symbols = ["%d%s" % (len(w), w[0])
for w in wyckoff_symbols]
def find_equivalent_sites(self, site):
"""
Finds all symmetrically equivalent sites for a particular site
Args:
site (PeriodicSite): A site in the structure
Returns:
([PeriodicSite]): List of all symmetrically equivalent sites.
"""
for sites in self.equivalent_sites:
if site in sites:
return sites
raise ValueError("Site not in structure")
|
{
"content_hash": "47c60eec8228f64a25747a1ac32f915b",
"timestamp": "",
"source": "github",
"line_count": 74,
"max_line_length": 78,
"avg_line_length": 36.21621621621622,
"alnum_prop": 0.6380597014925373,
"repo_name": "montoyjh/pymatgen",
"id": "eba58609e3d57c3d3c30f39b1f1b3bfe5debafa0",
"size": "2790",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pymatgen/symmetry/structure.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "5100"
},
{
"name": "CSS",
"bytes": "7550"
},
{
"name": "Common Lisp",
"bytes": "3029065"
},
{
"name": "Dockerfile",
"bytes": "275"
},
{
"name": "HTML",
"bytes": "827"
},
{
"name": "Makefile",
"bytes": "5573"
},
{
"name": "Perl",
"bytes": "229104"
},
{
"name": "Propeller Spin",
"bytes": "15152267"
},
{
"name": "Python",
"bytes": "7718850"
},
{
"name": "Roff",
"bytes": "1898220"
}
],
"symlink_target": ""
}
|
import unittest
from datetime import datetime
from unittest import mock
import pytest
from airflow.exceptions import AirflowException
from airflow.providers.google.cloud.transfers.gcs_to_gcs import WILDCARD, GCSToGCSOperator
TASK_ID = 'test-gcs-to-gcs-operator'
TEST_BUCKET = 'test-bucket'
PREFIX = 'TEST'
SOURCE_OBJECTS_NO_FILE = ['']
SOURCE_OBJECTS_TWO_EMPTY_STRING = ['', '']
SOURCE_OBJECTS_SINGLE_FILE = ['test_object/file1.txt']
SOURCE_OBJECTS_MULTIPLE_FILES = ['test_object/file1.txt', 'test_object/file2.txt']
SOURCE_OBJECTS_LIST = ['test_object/file1.txt', 'test_object/file2.txt', 'test_object/file3.json']
SOURCE_OBJECT_WILDCARD_PREFIX = '*test_object'
SOURCE_OBJECT_WILDCARD_SUFFIX = 'test_object*'
SOURCE_OBJECT_WILDCARD_MIDDLE = 'test*object'
SOURCE_OBJECT_WILDCARD_FILENAME = 'test_object*.txt'
SOURCE_OBJECT_NO_WILDCARD = 'test_object.txt'
SOURCE_OBJECT_MULTIPLE_WILDCARDS = 'csv/*/test_*.csv'
DESTINATION_BUCKET = 'archive'
DESTINATION_OBJECT = 'foo/bar'
DESTINATION_OBJECT_PREFIX = 'foo/bar'
SOURCE_FILES_LIST = [
'test_object/file1.txt',
'test_object/file2.txt',
'test_object/file3.json',
]
DELIMITER = '.json'
MOD_TIME_1 = datetime(2016, 1, 1)
MOD_TIME_2 = datetime(2019, 1, 1)
class TestGoogleCloudStorageToCloudStorageOperator(unittest.TestCase):
"""
Tests the three use-cases for the wildcard operator. These are
no_prefix: *test_object
no_suffix: test_object*
prefix_and_suffix: test*object
Also tests the destination_object as prefix when the wildcard is used.
"""
@mock.patch('airflow.providers.google.cloud.transfers.gcs_to_gcs.GCSHook')
def test_execute_no_prefix(self, mock_hook):
operator = GCSToGCSOperator(
task_id=TASK_ID,
source_bucket=TEST_BUCKET,
source_object=SOURCE_OBJECT_WILDCARD_PREFIX,
destination_bucket=DESTINATION_BUCKET,
)
operator.execute(None)
mock_hook.return_value.list.assert_called_once_with(TEST_BUCKET, prefix="", delimiter="test_object")
@mock.patch('airflow.providers.google.cloud.transfers.gcs_to_gcs.GCSHook')
def test_execute_no_suffix(self, mock_hook):
operator = GCSToGCSOperator(
task_id=TASK_ID,
source_bucket=TEST_BUCKET,
source_object=SOURCE_OBJECT_WILDCARD_SUFFIX,
destination_bucket=DESTINATION_BUCKET,
)
operator.execute(None)
mock_hook.return_value.list.assert_called_once_with(TEST_BUCKET, prefix="test_object", delimiter="")
@mock.patch('airflow.providers.google.cloud.transfers.gcs_to_gcs.GCSHook')
def test_execute_wildcard_with_replace_flag_false(self, mock_hook):
operator = GCSToGCSOperator(
task_id=TASK_ID,
source_bucket=TEST_BUCKET,
source_object=SOURCE_OBJECT_WILDCARD_SUFFIX,
destination_bucket=DESTINATION_BUCKET,
replace=False,
)
operator.execute(None)
mock_calls = [
mock.call(TEST_BUCKET, prefix="test_object", delimiter=""),
mock.call(DESTINATION_BUCKET, prefix="test_object", delimiter=""),
]
mock_hook.return_value.list.assert_has_calls(mock_calls)
@mock.patch('airflow.providers.google.cloud.transfers.gcs_to_gcs.GCSHook')
def test_execute_prefix_and_suffix(self, mock_hook):
operator = GCSToGCSOperator(
task_id=TASK_ID,
source_bucket=TEST_BUCKET,
source_object=SOURCE_OBJECT_WILDCARD_MIDDLE,
destination_bucket=DESTINATION_BUCKET,
)
operator.execute(None)
mock_hook.return_value.list.assert_called_once_with(TEST_BUCKET, prefix="test", delimiter="object")
# copy with wildcard
@mock.patch('airflow.providers.google.cloud.transfers.gcs_to_gcs.GCSHook')
def test_execute_wildcard_with_destination_object(self, mock_hook):
mock_hook.return_value.list.return_value = SOURCE_FILES_LIST
operator = GCSToGCSOperator(
task_id=TASK_ID,
source_bucket=TEST_BUCKET,
source_object=SOURCE_OBJECT_WILDCARD_FILENAME,
destination_bucket=DESTINATION_BUCKET,
destination_object=DESTINATION_OBJECT_PREFIX,
)
operator.execute(None)
mock_calls = [
mock.call(TEST_BUCKET, 'test_object/file1.txt', DESTINATION_BUCKET, 'foo/bar/file1.txt'),
mock.call(TEST_BUCKET, 'test_object/file2.txt', DESTINATION_BUCKET, 'foo/bar/file2.txt'),
]
mock_hook.return_value.rewrite.assert_has_calls(mock_calls)
@mock.patch('airflow.providers.google.cloud.transfers.gcs_to_gcs.GCSHook')
def test_execute_wildcard_with_destination_object_retained_prefix(self, mock_hook):
mock_hook.return_value.list.return_value = SOURCE_FILES_LIST
operator = GCSToGCSOperator(
task_id=TASK_ID,
source_bucket=TEST_BUCKET,
source_object=SOURCE_OBJECT_WILDCARD_FILENAME,
destination_bucket=DESTINATION_BUCKET,
destination_object=f'{DESTINATION_OBJECT_PREFIX}/{SOURCE_OBJECT_WILDCARD_SUFFIX[:-1]}',
)
operator.execute(None)
mock_calls_retained = [
mock.call(
TEST_BUCKET, 'test_object/file1.txt', DESTINATION_BUCKET, 'foo/bar/test_object/file1.txt'
),
mock.call(
TEST_BUCKET, 'test_object/file2.txt', DESTINATION_BUCKET, 'foo/bar/test_object/file2.txt'
),
]
mock_hook.return_value.rewrite.assert_has_calls(mock_calls_retained)
@mock.patch('airflow.providers.google.cloud.transfers.gcs_to_gcs.GCSHook')
def test_execute_wildcard_without_destination_object(self, mock_hook):
mock_hook.return_value.list.return_value = SOURCE_FILES_LIST
operator = GCSToGCSOperator(
task_id=TASK_ID,
source_bucket=TEST_BUCKET,
source_object=SOURCE_OBJECT_WILDCARD_FILENAME,
destination_bucket=DESTINATION_BUCKET,
)
operator.execute(None)
mock_calls_none = [
mock.call(TEST_BUCKET, 'test_object/file1.txt', DESTINATION_BUCKET, 'test_object/file1.txt'),
mock.call(TEST_BUCKET, 'test_object/file2.txt', DESTINATION_BUCKET, 'test_object/file2.txt'),
]
mock_hook.return_value.rewrite.assert_has_calls(mock_calls_none)
@mock.patch('airflow.providers.google.cloud.transfers.gcs_to_gcs.GCSHook')
def test_execute_wildcard_empty_destination_object(self, mock_hook):
mock_hook.return_value.list.return_value = SOURCE_FILES_LIST
operator = GCSToGCSOperator(
task_id=TASK_ID,
source_bucket=TEST_BUCKET,
source_object=SOURCE_OBJECT_WILDCARD_FILENAME,
destination_bucket=DESTINATION_BUCKET,
destination_object='',
)
operator.execute(None)
mock_calls_empty = [
mock.call(TEST_BUCKET, 'test_object/file1.txt', DESTINATION_BUCKET, '/file1.txt'),
mock.call(TEST_BUCKET, 'test_object/file2.txt', DESTINATION_BUCKET, '/file2.txt'),
]
mock_hook.return_value.rewrite.assert_has_calls(mock_calls_empty)
@mock.patch('airflow.providers.google.cloud.transfers.gcs_to_gcs.GCSHook')
def test_execute_last_modified_time(self, mock_hook):
mock_hook.return_value.list.return_value = SOURCE_FILES_LIST
operator = GCSToGCSOperator(
task_id=TASK_ID,
source_bucket=TEST_BUCKET,
source_object=SOURCE_OBJECT_WILDCARD_FILENAME,
destination_bucket=DESTINATION_BUCKET,
last_modified_time=None,
)
operator.execute(None)
mock_calls_none = [
mock.call(TEST_BUCKET, 'test_object/file1.txt', DESTINATION_BUCKET, 'test_object/file1.txt'),
mock.call(TEST_BUCKET, 'test_object/file2.txt', DESTINATION_BUCKET, 'test_object/file2.txt'),
]
mock_hook.return_value.rewrite.assert_has_calls(mock_calls_none)
@mock.patch('airflow.providers.google.cloud.transfers.gcs_to_gcs.GCSHook')
def test_wc_with_last_modified_time_with_all_true_cond(self, mock_hook):
mock_hook.return_value.list.return_value = SOURCE_FILES_LIST
mock_hook.return_value.is_updated_after.side_effect = [True, True, True]
operator = GCSToGCSOperator(
task_id=TASK_ID,
source_bucket=TEST_BUCKET,
source_object=SOURCE_OBJECT_WILDCARD_FILENAME,
destination_bucket=DESTINATION_BUCKET,
last_modified_time=MOD_TIME_1,
)
operator.execute(None)
mock_calls_none = [
mock.call(TEST_BUCKET, 'test_object/file1.txt', DESTINATION_BUCKET, 'test_object/file1.txt'),
mock.call(TEST_BUCKET, 'test_object/file2.txt', DESTINATION_BUCKET, 'test_object/file2.txt'),
]
mock_hook.return_value.rewrite.assert_has_calls(mock_calls_none)
@mock.patch('airflow.providers.google.cloud.transfers.gcs_to_gcs.GCSHook')
def test_wc_with_last_modified_time_with_one_true_cond(self, mock_hook):
mock_hook.return_value.list.return_value = SOURCE_FILES_LIST
mock_hook.return_value.is_updated_after.side_effect = [True, False, False]
operator = GCSToGCSOperator(
task_id=TASK_ID,
source_bucket=TEST_BUCKET,
source_object=SOURCE_OBJECT_WILDCARD_FILENAME,
destination_bucket=DESTINATION_BUCKET,
last_modified_time=MOD_TIME_1,
)
operator.execute(None)
mock_hook.return_value.rewrite.assert_called_once_with(
TEST_BUCKET, 'test_object/file1.txt', DESTINATION_BUCKET, 'test_object/file1.txt'
)
@mock.patch('airflow.providers.google.cloud.transfers.gcs_to_gcs.GCSHook')
def test_wc_with_no_last_modified_time(self, mock_hook):
mock_hook.return_value.list.return_value = SOURCE_FILES_LIST
operator = GCSToGCSOperator(
task_id=TASK_ID,
source_bucket=TEST_BUCKET,
source_object=SOURCE_OBJECT_WILDCARD_FILENAME,
destination_bucket=DESTINATION_BUCKET,
last_modified_time=None,
)
operator.execute(None)
mock_calls_none = [
mock.call(TEST_BUCKET, 'test_object/file1.txt', DESTINATION_BUCKET, 'test_object/file1.txt'),
mock.call(TEST_BUCKET, 'test_object/file2.txt', DESTINATION_BUCKET, 'test_object/file2.txt'),
]
mock_hook.return_value.rewrite.assert_has_calls(mock_calls_none)
@mock.patch('airflow.providers.google.cloud.transfers.gcs_to_gcs.GCSHook')
def test_no_prefix_with_last_modified_time_with_true_cond(self, mock_hook):
mock_hook.return_value.is_updated_after.return_value = True
operator = GCSToGCSOperator(
task_id=TASK_ID,
source_bucket=TEST_BUCKET,
source_object=SOURCE_OBJECT_NO_WILDCARD,
destination_bucket=DESTINATION_BUCKET,
destination_object=SOURCE_OBJECT_NO_WILDCARD,
last_modified_time=MOD_TIME_1,
)
operator.execute(None)
mock_hook.return_value.rewrite.assert_called_once_with(
TEST_BUCKET, 'test_object.txt', DESTINATION_BUCKET, 'test_object.txt'
)
@mock.patch('airflow.providers.google.cloud.transfers.gcs_to_gcs.GCSHook')
def test_no_prefix_with_maximum_modified_time_with_true_cond(self, mock_hook):
mock_hook.return_value.is_updated_before.return_value = True
operator = GCSToGCSOperator(
task_id=TASK_ID,
source_bucket=TEST_BUCKET,
source_object=SOURCE_OBJECT_NO_WILDCARD,
destination_bucket=DESTINATION_BUCKET,
destination_object=SOURCE_OBJECT_NO_WILDCARD,
maximum_modified_time=MOD_TIME_1,
)
operator.execute(None)
mock_hook.return_value.rewrite.assert_called_once_with(
TEST_BUCKET, 'test_object.txt', DESTINATION_BUCKET, 'test_object.txt'
)
@mock.patch('airflow.providers.google.cloud.transfers.gcs_to_gcs.GCSHook')
def test_exe_last_modified_time_and_maximum_modified_time_with_true_cond(self, mock_hook):
mock_hook.return_value.is_updated_between.return_value = True
operator = GCSToGCSOperator(
task_id=TASK_ID,
source_bucket=TEST_BUCKET,
source_object=SOURCE_OBJECT_NO_WILDCARD,
destination_bucket=DESTINATION_BUCKET,
destination_object=SOURCE_OBJECT_NO_WILDCARD,
last_modified_time=MOD_TIME_1,
maximum_modified_time=MOD_TIME_2,
)
operator.execute(None)
mock_hook.return_value.rewrite.assert_called_once_with(
TEST_BUCKET, 'test_object.txt', DESTINATION_BUCKET, 'test_object.txt'
)
@mock.patch('airflow.providers.google.cloud.transfers.gcs_to_gcs.GCSHook')
def test_execute_no_prefix_with_no_last_modified_time(self, mock_hook):
operator = GCSToGCSOperator(
task_id=TASK_ID,
source_bucket=TEST_BUCKET,
source_object=SOURCE_OBJECT_NO_WILDCARD,
destination_bucket=DESTINATION_BUCKET,
destination_object=SOURCE_OBJECT_NO_WILDCARD,
last_modified_time=None,
)
operator.execute(None)
mock_hook.return_value.rewrite.assert_called_once_with(
TEST_BUCKET, 'test_object.txt', DESTINATION_BUCKET, 'test_object.txt'
)
@mock.patch('airflow.providers.google.cloud.transfers.gcs_to_gcs.GCSHook')
def test_no_prefix_with_last_modified_time_with_false_cond(self, mock_hook):
mock_hook.return_value.is_updated_after.return_value = False
operator = GCSToGCSOperator(
task_id=TASK_ID,
source_bucket=TEST_BUCKET,
source_object=SOURCE_OBJECT_NO_WILDCARD,
destination_bucket=DESTINATION_BUCKET,
destination_object=SOURCE_OBJECT_NO_WILDCARD,
last_modified_time=MOD_TIME_1,
)
operator.execute(None)
mock_hook.return_value.rewrite.assert_not_called()
@mock.patch('airflow.providers.google.cloud.transfers.gcs_to_gcs.GCSHook')
def test_executes_with_is_older_than_with_true_cond(self, mock_hook):
mock_hook.return_value.is_older_than.return_value = True
operator = GCSToGCSOperator(
task_id=TASK_ID,
source_bucket=TEST_BUCKET,
source_object=SOURCE_OBJECT_NO_WILDCARD,
destination_bucket=DESTINATION_BUCKET,
destination_object=SOURCE_OBJECT_NO_WILDCARD,
last_modified_time=MOD_TIME_1,
maximum_modified_time=MOD_TIME_2,
is_older_than=3600,
)
operator.execute(None)
mock_hook.return_value.rewrite.assert_called_once_with(
TEST_BUCKET, 'test_object.txt', DESTINATION_BUCKET, 'test_object.txt'
)
@mock.patch('airflow.providers.google.cloud.transfers.gcs_to_gcs.GCSHook')
def test_execute_more_than_1_wildcard(self, mock_hook):
mock_hook.return_value.list.return_value = SOURCE_FILES_LIST
operator = GCSToGCSOperator(
task_id=TASK_ID,
source_bucket=TEST_BUCKET,
source_object=SOURCE_OBJECT_MULTIPLE_WILDCARDS,
destination_bucket=DESTINATION_BUCKET,
destination_object=DESTINATION_OBJECT_PREFIX,
)
total_wildcards = operator.source_object.count(WILDCARD)
error_msg = f"Only one wildcard '[*]' is allowed in source_object parameter. Found {total_wildcards}"
with pytest.raises(AirflowException, match=error_msg):
operator.execute(None)
@mock.patch('airflow.providers.google.cloud.transfers.gcs_to_gcs.GCSHook')
def test_execute_with_empty_destination_bucket(self, mock_hook):
mock_hook.return_value.list.return_value = SOURCE_FILES_LIST
operator = GCSToGCSOperator(
task_id=TASK_ID,
source_bucket=TEST_BUCKET,
source_object=SOURCE_OBJECT_NO_WILDCARD,
destination_bucket=None,
destination_object=DESTINATION_OBJECT_PREFIX,
)
with mock.patch.object(operator.log, 'warning') as mock_warn:
operator.execute(None)
mock_warn.assert_called_once_with(
'destination_bucket is None. Defaulting it to source_bucket (%s)', TEST_BUCKET
)
assert operator.destination_bucket == operator.source_bucket
# Tests the use of delimiter and source object as list
@mock.patch('airflow.providers.google.cloud.transfers.gcs_to_gcs.GCSHook')
def test_executes_with_empty_source_objects(self, mock_hook):
operator = GCSToGCSOperator(
task_id=TASK_ID, source_bucket=TEST_BUCKET, source_objects=SOURCE_OBJECTS_NO_FILE
)
operator.execute(None)
mock_hook.return_value.list.assert_called_once_with(TEST_BUCKET, prefix='', delimiter=None)
@mock.patch('airflow.providers.google.cloud.transfers.gcs_to_gcs.GCSHook')
def test_raises_exception_with_two_empty_list_inside_source_objects(self, mock_hook):
mock_hook.return_value.list.return_value = SOURCE_OBJECTS_LIST
operator = GCSToGCSOperator(
task_id=TASK_ID, source_bucket=TEST_BUCKET, source_objects=SOURCE_OBJECTS_TWO_EMPTY_STRING
)
with pytest.raises(AirflowException, match="You can't have two empty strings inside source_object"):
operator.execute(None)
@mock.patch('airflow.providers.google.cloud.transfers.gcs_to_gcs.GCSHook')
def test_executes_with_single_item_in_source_objects(self, mock_hook):
operator = GCSToGCSOperator(
task_id=TASK_ID, source_bucket=TEST_BUCKET, source_objects=SOURCE_OBJECTS_SINGLE_FILE
)
operator.execute(None)
mock_hook.return_value.list.assert_called_once_with(
TEST_BUCKET, prefix=SOURCE_OBJECTS_SINGLE_FILE[0], delimiter=None
)
@mock.patch('airflow.providers.google.cloud.transfers.gcs_to_gcs.GCSHook')
def test_executes_with_multiple_items_in_source_objects(self, mock_hook):
operator = GCSToGCSOperator(
task_id=TASK_ID, source_bucket=TEST_BUCKET, source_objects=SOURCE_OBJECTS_MULTIPLE_FILES
)
operator.execute(None)
mock_hook.return_value.list.assert_has_calls(
[
mock.call(TEST_BUCKET, prefix='test_object/file1.txt', delimiter=None),
mock.call(TEST_BUCKET, prefix='test_object/file2.txt', delimiter=None),
],
any_order=True,
)
@mock.patch('airflow.providers.google.cloud.transfers.gcs_to_gcs.GCSHook')
def test_executes_with_a_delimiter(self, mock_hook):
operator = GCSToGCSOperator(
task_id=TASK_ID,
source_bucket=TEST_BUCKET,
source_objects=SOURCE_OBJECTS_NO_FILE,
delimiter=DELIMITER,
)
operator.execute(None)
mock_hook.return_value.list.assert_called_once_with(TEST_BUCKET, prefix='', delimiter=DELIMITER)
# COPY
@mock.patch('airflow.providers.google.cloud.transfers.gcs_to_gcs.GCSHook')
def test_executes_with_delimiter_and_destination_object(self, mock_hook):
mock_hook.return_value.list.return_value = ['test_object/file3.json']
operator = GCSToGCSOperator(
task_id=TASK_ID,
source_bucket=TEST_BUCKET,
source_objects=SOURCE_OBJECTS_LIST,
destination_bucket=DESTINATION_BUCKET,
destination_object=DESTINATION_OBJECT,
delimiter=DELIMITER,
)
operator.execute(None)
mock_calls = [
mock.call(TEST_BUCKET, 'test_object/file3.json', DESTINATION_BUCKET, DESTINATION_OBJECT),
]
mock_hook.return_value.rewrite.assert_has_calls(mock_calls)
@mock.patch('airflow.providers.google.cloud.transfers.gcs_to_gcs.GCSHook')
def test_executes_with_different_delimiter_and_destination_object(self, mock_hook):
mock_hook.return_value.list.return_value = ['test_object/file1.txt', 'test_object/file2.txt']
operator = GCSToGCSOperator(
task_id=TASK_ID,
source_bucket=TEST_BUCKET,
source_objects=SOURCE_OBJECTS_LIST,
destination_bucket=DESTINATION_BUCKET,
destination_object=DESTINATION_OBJECT,
delimiter='.txt',
)
operator.execute(None)
mock_calls = [
mock.call(TEST_BUCKET, 'test_object/file1.txt', DESTINATION_BUCKET, "test_object/file1.txt"),
mock.call(TEST_BUCKET, 'test_object/file2.txt', DESTINATION_BUCKET, "test_object/file2.txt"),
]
mock_hook.return_value.rewrite.assert_has_calls(mock_calls)
@mock.patch('airflow.providers.google.cloud.transfers.gcs_to_gcs.GCSHook')
def test_executes_with_no_destination_bucket_and_no_destination_object(self, mock_hook):
mock_hook.return_value.list.return_value = SOURCE_OBJECTS_LIST
operator = GCSToGCSOperator(
task_id=TASK_ID, source_bucket=TEST_BUCKET, source_objects=SOURCE_OBJECTS_LIST
)
operator.execute(None)
mock_calls = [
mock.call(TEST_BUCKET, 'test_object/file1.txt', TEST_BUCKET, 'test_object/file1.txt'),
mock.call(TEST_BUCKET, 'test_object/file2.txt', TEST_BUCKET, 'test_object/file2.txt'),
mock.call(TEST_BUCKET, 'test_object/file3.json', TEST_BUCKET, 'test_object/file3.json'),
]
mock_hook.return_value.rewrite.assert_has_calls(mock_calls)
@mock.patch('airflow.providers.google.cloud.transfers.gcs_to_gcs.GCSHook')
def test_wc_with_last_modified_time_with_all_true_cond_no_file(self, mock_hook):
mock_hook.return_value.list.return_value = SOURCE_OBJECTS_LIST
mock_hook.return_value.is_updated_after.side_effect = [True, True, True]
operator = GCSToGCSOperator(
task_id=TASK_ID,
source_bucket=TEST_BUCKET,
source_objects=SOURCE_OBJECTS_NO_FILE,
destination_bucket=DESTINATION_BUCKET,
last_modified_time=MOD_TIME_1,
)
operator.execute(None)
mock_calls_none = [
mock.call(TEST_BUCKET, 'test_object/file1.txt', DESTINATION_BUCKET, 'test_object/file1.txt'),
mock.call(TEST_BUCKET, 'test_object/file2.txt', DESTINATION_BUCKET, 'test_object/file2.txt'),
mock.call(TEST_BUCKET, 'test_object/file3.json', DESTINATION_BUCKET, 'test_object/file3.json'),
]
mock_hook.return_value.rewrite.assert_has_calls(mock_calls_none)
@mock.patch('airflow.providers.google.cloud.transfers.gcs_to_gcs.GCSHook')
def test_execute_wildcard_with_replace_flag_false_with_destination_object(self, mock_hook):
operator = GCSToGCSOperator(
task_id=TASK_ID,
source_bucket=TEST_BUCKET,
source_object=SOURCE_OBJECT_WILDCARD_SUFFIX,
destination_bucket=DESTINATION_BUCKET,
destination_object=DESTINATION_OBJECT_PREFIX,
replace=False,
)
operator.execute(None)
mock_calls = [
mock.call(TEST_BUCKET, prefix="test_object", delimiter=""),
mock.call(DESTINATION_BUCKET, prefix="foo/bar", delimiter=""),
]
mock_hook.return_value.list.assert_has_calls(mock_calls)
|
{
"content_hash": "75fbf1e4c4e5d0fd42396dcbd1844a72",
"timestamp": "",
"source": "github",
"line_count": 530,
"max_line_length": 109,
"avg_line_length": 44.19245283018868,
"alnum_prop": 0.6531039193920246,
"repo_name": "mistercrunch/airflow",
"id": "a23b50d42f89328e640e5126d5407cf7f16e71f4",
"size": "24210",
"binary": false,
"copies": "2",
"ref": "refs/heads/main",
"path": "tests/providers/google/cloud/transfers/test_gcs_to_gcs.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "36341"
},
{
"name": "HTML",
"bytes": "99243"
},
{
"name": "JavaScript",
"bytes": "891460"
},
{
"name": "Mako",
"bytes": "494"
},
{
"name": "Python",
"bytes": "773270"
},
{
"name": "Shell",
"bytes": "5659"
}
],
"symlink_target": ""
}
|
from collections import namedtuple
from calendar import monthrange
from configparser import ExtendedInterpolation
import numpy as np
from datetime import datetime
__all__ = ["Date", "all_dates_in_year", "number_of_days_in_month",
"generate_days", "generate_years", "generate_months",
"extract_months", "extract_years", "extract_day_of_month",
"extract_day_of_week", "is_first_day_of_month", "is_first_day_of_week",
"is_month_change", "is_year_change", "yield_hours_on_day",
"yield_minutes_on_day", "yield_seconds_on_day",
"generate_datetime_filename"]
Date = namedtuple("Date", ["year", "month", "day"])
def generate_datetime_filename(label="data", extension="csv", fractional=True, dt=None):
"""
Generate a filename such as
mydata-2022-09-02_00-31-50-613015.csv
where "mydata" is the label and "csv" is the extensions.
You can also generate the filename without fractional seconds
by setting fractional=False which is useful if your code is guaranteed
to never generate multiple files per second
with cross-OS compatible filenames
(e.g. not containing special characters like colons)
and lexically sortable filenames.
:param str label The data label (prefix) or None if no data label should be used
:param str extension The filename extension (suffix) without leading '.' or None if no extension should be used
:param bool fractional If true, microseconds will be added to the filename timestamp
:param datetime.datetime dt Set this to a datetime.datetime to use a custom timestamp. If None, uses datetime.now()
"""
if dt is None:
dt = datetime.now()
filename = "" if label is None else f"{label}-"
fractional_str = f"-{dt.microsecond:06d}" if fractional is True else ""
filename += f"{dt.year}-{dt.month:02d}-{dt.day:02d}_{dt.hour:02d}-{dt.minute:02d}-{dt.second:02d}{fractional_str}"
if extension is not None:
filename += f".{extension}"
return filename
def number_of_days_in_month(year=2019, month=1):
"""
Returns the number of days in a month, e.g. 31 in January (month=1).
Takes into account leap days.
"""
return monthrange(year, month)[1]
def all_dates_in_year(year=2019):
"""
Iterates all dates in a specific year, taking into account leap days.
Yields Date() objects.
"""
for month in range(1, 13): # Month is always 1..12
for day in range(1, number_of_days_in_month(year, month) + 1):
yield Date(year, month, day)
def extract_months(timestamps):
"""
Given an 1D array of np.datetime64 timestamps,
extract the month of each timestamps and return a
numpy array of the same size, containing the month number
(january = 1)
"""
return np.asarray([dt.month for dt in timestamps.astype(datetime)])
def extract_years(timestamps):
"""
Given an 1D array of np.datetime64 timestamps,
extract the month of each timestamps and return a
numpy array of the same size, containing the year number
(e.g. 2022)
"""
return np.asarray([dt.year for dt in timestamps.astype(datetime)])
def extract_day_of_month(timestamps):
"""
Given an 1D array of np.datetime64 timestamps,
extract the month of each timestamps and return a
numpy array of the same size, containing the day of month
(1-31, depending on the month)
"""
return np.asarray([dt.day for dt in timestamps.astype(datetime)])
def extract_day_of_week(timestamps):
"""
Given an 1D array of np.datetime64 timestamps,
extract the month of each timestamps and return a
numpy array of the same size, containing the day of week
(Monday=1, Sunday=7)
"""
return np.asarray([dt.isoweekday() for dt in timestamps.astype(datetime)])
def is_first_day_of_month(timestamps):
"""
Takes a Numpy array of np.datetime64.
Returns a boolean array of the same length which is
true if the given date is on the first day of any month.
This is related to is_first_day_of_month(), but implements
a slightly different algorithm
"""
return extract_day_of_month(timestamps) == 1
def is_first_day_of_week(timestamps):
"""
Takes a Numpy array of np.datetime64.
Returns a boolean array of the same length which is
true if the given date is on the first day of any week.
"""
return extract_day_of_week(timestamps) == 1
def is_month_change(timestamps, first_value_is_change=False):
"""
Takes a Numpy array of np.datetime64.
Returns a boolean array of the same length which is
true if the given date is the first date in the given array
in that particular month
If first_value_is_change is True, the first element of the array will be True,
else it will be False.
When using day-resolution datasets, this is often similar
to using is_first_day_of_month(), however this function
will only return True once for a given month,
whereas is_first_day_of_month() will return True for ANY
date that is on the 1st day of the month.
"""
if len(timestamps) == 0:
return np.asarray([], dtype=bool)
return np.append([first_value_is_change],
np.diff(extract_months(timestamps)).astype(bool))
def is_year_change(timestamps, first_value_is_change=False):
"""
Takes a Numpy array of np.datetime64.
If first_value_is_change is True, the first element of the array will be True,
else it will be False.
Returns a boolean array of the same length which is
true if the given date is the first date in the given array
in that particular year
"""
if len(timestamps) == 0:
return np.asarray([], dtype=bool)
return np.append([first_value_is_change],
np.diff(extract_years(timestamps)).astype(bool))
def generate_days(ndays, year=2022, month=1, day=1):
"""
Generate an 1d array of [ndays] timestamps, starting at the given day,
each timestamp being exactly one day from the previous one.
The given date will be the first timestamp.
Returns a array of np.datetime64[us]
>>> generate_days(5, 2022, 1, 1)
['2022-01-01T00:00:00.000000',
'2022-01-02T00:00:00.000000',
'2022-01-03T00:00:00.000000',
'2022-01-04T00:00:00.000000',
'2022-01-05T00:00:00.000000']
"""
# NOTE: This method is more efficient than the "string parsing"
# method used by generate_months() and generate_years(),
# but this only matters if generating a lot of entries
# and it only works if the datetime64-represented
# distance between units to generate is constant
day_indexes = np.arange(ndays, dtype=np.int64) # 0, 1, ..., [ndays-1]
startdate = np.datetime64(f'{year:02d}-{month:02d}-{day:02d}T00:00:00.000000', 'us')
usec_per_day = int(1e6) * 86400 # 86.4k sec per day = 60*60*24s
usec_offsets = day_indexes * usec_per_day
return usec_offsets + startdate
def generate_months(nmonths, year=2022, month=1, day=1):
"""
Generate an 1d array of [ndays] timestamps, starting at the given day,
each timestamp being exactly one month from the previous one.
The given date will be the first timestamp.
Returns a array of np.datetime64[us]
>>> generate_months(5, 2022, 1, 1)
['2022-01-01T00:00:00.000000',
'2022-02-01T00:00:00.000000',
'2022-03-01T00:00:00.000000',
'2022-04-01T00:00:00.000000',
'2022-05-01T00:00:00.000000']
"""
return np.asarray([
f'{year:04d}-{month+i:02d}-{day:02d}T00:00:00.000000'
for i in range(nmonths)
], dtype='datetime64[us]')
def generate_years(nyears, year=2022, month=1, day=1):
"""
Generate an 1d array of [ndays] timestamps, starting at the given day,
each timestamp being exactly one year from the previous one.
The given date will be the first timestamp.
Returns a array of np.datetime64[us]
>>> generate_years(5, 2022, 1, 1)
['2022-01-01T00:00:00.000000',
'2023-01-01T00:00:00.000000',
'2024-01-01T00:00:00.000000',
'2025-01-01T00:00:00.000000',
'2026-01-01T00:00:00.000000']
"""
return np.asarray([
f'{year+i:04d}-{month:02d}-{day:02d}T00:00:00.000000'
for i in range(nyears)
], dtype='datetime64[us]')
def yield_hours_on_day(year=2022, month=6, day=15, tz=None):
"""
For each hour on the given day in the given timezone,
yield a Python datetime object representing this timestamp.
Note that this function is not DST-aware and for a day having 25 hours
due to the change, it will still only generate 24*60 timestamps.
Note that in contrast to other functions in this module, this function
does not generate a NumPy array of timestamps directly but instead yields
a list of Python datetime objects.
:param year The year of the day for which to generate one timestamp each second
:param month The month for which to generate one timestamp each second
:param day The day of the month for which to generate one timestamp each second
:param tz a tzinfo instance to use for the resulting datetime. Optional.
"""
for hour in range(24):
yield datetime(year=year,
month=month,
day=day,
hour=hour,
minute=0,
second=0,
tzinfo=tz)
def yield_minutes_on_day(year=2022, month=6, day=15, tz=None):
"""
For each minute on the given day in the given timezone,
yield a Python datetime object representing this timestamp.
Note that this function is not DST-aware and for a day having 25 hours
due to the change, it will still only generate 24*60 timestamps.
Note that in contrast to other functions in this module, this function
does not generate a NumPy array of timestamps directly but instead yields
a list of Python datetime objects.
:param year The year of the day for which to generate one timestamp each second
:param month The month for which to generate one timestamp each second
:param day The day of the month for which to generate one timestamp each second
:param tz a tzinfo instance to use for the resulting datetime. Optional.
"""
for hour in range(24):
for minute in range(60):
yield datetime(year=year,
month=month,
day=day,
hour=hour,
minute=minute,
second=0,
tzinfo=tz)
def yield_seconds_on_day(year=2022, month=6, day=15, tz=None):
"""
For each second on the given day in the given timezone,
yield a Python datetime object representing this timestamp.
Note that this function is not DST-aware and for a day having 25 hours
due to the change, it will still only generate 24*60 timestamps.
Furthermore, this function is not leap-second aware.
Note that in contrast to other functions in this module, this function
does not generate a NumPy array of timestamps directly but instead yields
a list of Python datetime objects.
:param year The year of the day for which to generate one timestamp each second
:param month The month for which to generate one timestamp each second
:param day The day of the month for which to generate one timestamp each second
:param tz a tzinfo instance to use for the resulting datetime. Optional.
"""
for hour in range(24):
for minute in range(60):
for second in range(60):
yield datetime(year=year,
month=month,
day=day,
hour=hour,
minute=minute,
second=second,
tzinfo=tz)
|
{
"content_hash": "8d09a1f5c0ade9bb4fa572af84cedc34",
"timestamp": "",
"source": "github",
"line_count": 303,
"max_line_length": 119,
"avg_line_length": 39.161716171617165,
"alnum_prop": 0.6612169222990055,
"repo_name": "ulikoehler/UliEngineering",
"id": "1fe3cdae8e8c5f44117e80b306a3e585de2dc446",
"size": "11889",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "UliEngineering/Utils/Date.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "396308"
}
],
"symlink_target": ""
}
|
"""
Author : Jaganadh Gopinadhan
e-mail : jaganadhg@gmail.com
Licence : MIT
Third Eye V1.0 Udacity Self Driving Car Nano Degree Project 1
Finding Lane Lines on the Road
"""
import matplotlib.image as mpimg
import numpy as np
from moviepy.editor import VideoFileClip
from pputil import FrameTransformer
from lineutil import LineDrawBase, plot_img
class Pipeline(object):
"""
Basic pipeline for Lane Line detection
TODO : Improve with more features
"""
def __init__(self):
self.rho = 1
self.theta = np.pi/180
self.threshold = 15
self.min_line_len = 25
self.max_line_gap = 250
self.preprocessor = FrameTransformer()
self.annotator = LineDrawBase()
def fit_frame(self,image):
"""
Preprocess and draw image
"""
roi = self.preprocessor.transform(image)
annotated = self.annotator.draw(image,roi,self.rho,self.theta,\
self.threshold, self.min_line_len,self.max_line_gap)
return annotated
def fit_vid(self,vidfile):
"""
Process video file
"""
vf = VideoFileClip(vidfile)
white_clip = vf.fl_image(self.fit_frame)
return white_clip
if __name__ == "__main__":
print
|
{
"content_hash": "2e404adc7f3a2f56b9356703ba91d000",
"timestamp": "",
"source": "github",
"line_count": 55,
"max_line_length": 71,
"avg_line_length": 23.345454545454544,
"alnum_prop": 0.6207165109034268,
"repo_name": "jaganadhg/sdcnd_p1_lld",
"id": "26bae1a522e4e49929b69d825a8f7dfe979c7a3d",
"size": "1306",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "thirdeye/pipeline.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "2261546"
},
{
"name": "Python",
"bytes": "15096"
}
],
"symlink_target": ""
}
|
import nltk
nltk.download('stopwords')
nltk.download('punkt')
from nltk.corpus import stopwords
from nltk.stem import PorterStemmer
from nltk.tokenize import word_tokenize, sent_tokenize
import bs4 as BeautifulSoup
import urllib.request
import sys
#URI of the data source
URI = sys.argv[1] if len(sys.argv) > 1 else 'https://en.wikipedia.org/wiki/Library'
#fetching the content from the URL
#fetched_data = urllib.request.urlopen('https://en.wikipedia.org/wiki/20th_century')
fetched_data = urllib.request.urlopen(URI)
article_read = fetched_data.read()
#parsing the URL content and storing in a variable
article_parsed = BeautifulSoup.BeautifulSoup(article_read,'html.parser')
#returning <p> tags
paragraphs = article_parsed.find_all('p')
article_content = ''
#looping through the paragraphs and adding them to the variable
for p in paragraphs:
article_content += p.text
def _create_dictionary_table(text_string) -> dict:
#removing stop words
stop_words = set(stopwords.words("english"))
words = word_tokenize(text_string)
#reducing words to their root form
stem = PorterStemmer()
#creating dictionary for the word frequency table
frequency_table = dict()
for wd in words:
wd = stem.stem(wd)
if wd in stop_words:
continue
if wd in frequency_table:
frequency_table[wd] += 1
else:
frequency_table[wd] = 1
return frequency_table
def _calculate_sentence_scores(sentences, frequency_table) -> dict:
#algorithm for scoring a sentence by its words
sentence_weight = dict()
for sentence in sentences:
sentence_wordcount = (len(word_tokenize(sentence)))
sentence_wordcount_without_stop_words = 0
for word_weight in frequency_table:
if word_weight in sentence.lower():
sentence_wordcount_without_stop_words += 1
if sentence[:7] in sentence_weight:
sentence_weight[sentence[:7]] += frequency_table[word_weight]
else:
sentence_weight[sentence[:7]] = frequency_table[word_weight]
sentence_weight[sentence[:7]] = sentence_weight[sentence[:7]] / sentence_wordcount_without_stop_words
return sentence_weight
def _calculate_average_score(sentence_weight) -> int:
#calculating the average score for the sentences
sum_values = 0
for entry in sentence_weight:
sum_values += sentence_weight[entry]
#getting sentence average value from source text
average_score = (sum_values / len(sentence_weight))
return average_score
def _get_article_summary(sentences, sentence_weight, threshold):
sentence_counter = 0
article_summary = ''
for sentence in sentences:
if sentence[:7] in sentence_weight and sentence_weight[sentence[:7]] >= (threshold):
article_summary += " " + sentence
sentence_counter += 1
return article_summary
def _run_article_summary(article):
#creating a dictionary for the word frequency table
frequency_table = _create_dictionary_table(article)
#tokenizing the sentences
sentences = sent_tokenize(article)
#algorithm for scoring a sentence by its words
sentence_scores = _calculate_sentence_scores(sentences, frequency_table)
#getting the threshold
threshold = _calculate_average_score(sentence_scores)
#producing the summary
article_summary = _get_article_summary(sentences, sentence_scores, 1.5 * threshold)
return article_summary
#note: create function where summary is printed to file
if __name__ == '__main__':
summary_results = _run_article_summary(article_content)
print(summary_results)
|
{
"content_hash": "266db03b970a77ddbad3f0e1d269ef00",
"timestamp": "",
"source": "github",
"line_count": 124,
"max_line_length": 109,
"avg_line_length": 30.225806451612904,
"alnum_prop": 0.6824973319103522,
"repo_name": "jasonclark/python-samples",
"id": "e0ab2a2a8b2305f5e5cfb2b010575e937c298645",
"size": "4130",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "text-summarization-extraction.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "9444"
}
],
"symlink_target": ""
}
|
from revscoring import Feature
from revscoring.datasources.datasource import Datasource
from revscoring.features import wikibase as wikibase_
from revscoring.features import modifiers
from revscoring.features.meta import aggregators, bools
from revscoring.features.modifiers import not_
from . import wikibase
from .wikidatawiki_data import property_datatypes, items_lists
name = "wikidatawiki"
IMPORTANT_LANG_CODES = {'en', 'de', 'ar', 'zh', 'es', 'pt', 'ru', 'fr'}
IMPORTANT_LANG_CODES_LIST = sorted(list(IMPORTANT_LANG_CODES))
"""
Language codes for important languages which are described in
https://www.wikidata.org/wiki/Wikidata:Item_quality#Translations
"""
class properties:
"""
Mapping of english descriptions to property identifiers
"""
IMAGE = 'P18'
INSTANCE_OF = 'P31'
DATE_OF_BIRTH = 'P569'
DATE_OF_DEATH = 'P570'
IMPORTED_FROM_WIKIMEDIA = 'P143'
class items:
"""
Mapping of english descriptions to item idenifiers
"""
HUMAN = 'Q5'
SCHOLARLY_ARTICLE = 'Q13442814'
def _filter_nonexternal_identifier_statements(entity):
return [
statement
for pid, statements in entity.properties.items()
if pid in property_datatypes.NONEXTERNAL_IDENTIFIERS
for statement in statements]
def _process_references(entity):
nonexternal_identifier_statements = \
_filter_nonexternal_identifier_statements(entity)
return [reference
for statement in nonexternal_identifier_statements
for pid, references in statement.references.items()
for reference in references]
references = Datasource(
name + ".revision.references",
_process_references,
depends_on=[wikibase_.revision.datasources.entity])
def _process_external_identifiers(entity):
return [statement
for pid, statements in entity.properties.items()
if pid not in property_datatypes.NONEXTERNAL_IDENTIFIERS
for statement in statements]
external_identifiers = Datasource(
name + ".revision.external_identifiers",
_process_external_identifiers,
depends_on=[wikibase_.revision.datasources.entity])
def _process_commons_media(entity):
for pid in entity.properties.keys():
if pid in property_datatypes.COMMONS_MEDIA:
return True
return False
has_commons_media = Feature(
name + ".revision.has_commons_media",
_process_commons_media,
returns=bool,
depends_on=[wikibase_.revision.datasources.entity])
def _process_wikimedia_references(references):
return [reference
for reference in references
if (reference.property == properties.IMPORTED_FROM_WIKIMEDIA)]
wikimedia_references = Datasource(
name + ".revision.wikimedia_references",
_process_wikimedia_references, depends_on=[references])
def _process_unique_references(references):
return {(reference.property, str(reference.datavalue))
for reference in references}
unique_references = Datasource(
name + ".revision.unique_references",
_process_unique_references, depends_on=[references])
def _process_complete_translations(item_labels, item_descriptions):
return (item_labels.keys() & item_descriptions.keys())
complete_translations = Datasource(
name + ".revision.complete_translations",
_process_complete_translations,
depends_on=[wikibase_.revision.datasources.labels,
wikibase_.revision.datasources.descriptions],)
def _process_important_complete_translations(complete_translations):
return (complete_translations & IMPORTANT_LANG_CODES)
important_complete_translations = Datasource(
name + ".revision.important_complete_translations",
_process_important_complete_translations,
depends_on=[complete_translations])
def _process_important_label_translations(item_labels):
return (item_labels.keys() & IMPORTANT_LANG_CODES)
important_label_translations = Datasource(
name + ".revision.important_label_translations",
_process_important_label_translations,
depends_on=[wikibase_.revision.datasources.labels])
def _process_important_description_translations(item_descriptions):
return (item_descriptions.keys() & IMPORTANT_LANG_CODES)
important_description_translations = Datasource(
name + ".revision.important_description_translations",
_process_important_description_translations,
depends_on=[wikibase_.revision.datasources.descriptions])
def _process_non_external_id_statements(entity):
return [statement
for pid, statements in entity.properties.items()
if pid in property_datatypes.NONEXTERNAL_IDENTIFIERS
for statement in statements]
non_external_id_statements = Datasource(
name + ".revision.non_external_id_statements",
_process_non_external_id_statements,
depends_on=[wikibase_.revision.datasources.entity])
references_count = aggregators.len(references)
"`int` : A count of all sources in the revision"
wikimedia_references_count = aggregators.len(wikimedia_references)
"`int` : A count of all sources which come from Wikimedia projects"
external_references_count = references_count - wikimedia_references_count
"`int` : A count of all sources which do not come from Wikimedia projects"
unique_references_count = aggregators.len(unique_references)
"`int` : A count of unique sources in the revision"
non_external_id_statements_count = aggregators.len(non_external_id_statements)
"`int` : A count of all statements that are not external identifiers"
def _process_is_astronomical_object(entity):
statements = entity.properties.get(properties.INSTANCE_OF, [])
for s in statements:
if str(s.claim.datavalue) in items_lists.ASTRONOMICAL_OBJECTS:
return True
return False
def _process_referenced_claims_ratio(entity):
statements = _filter_nonexternal_identifier_statements(entity)
referenced_statements = [
statement
for statement in statements
if statement.references]
return len(referenced_statements) / max([len(statements), 1])
def _process_wikimedia_referenced_ratio(entity):
statements = _filter_nonexternal_identifier_statements(entity)
wikimedia_referenced_statements = 0
for statement in statements:
wikimedia_referenced = False
for ref_pid in statement.references:
if ref_pid == properties.IMPORTED_FROM_WIKIMEDIA:
wikimedia_referenced = True
break
if wikimedia_referenced:
wikimedia_referenced_statements += 1
return wikimedia_referenced_statements / max([len(statements), 1])
def _process_externally_referenced_claims_ratio(entity):
statements = _filter_nonexternal_identifier_statements(entity)
externally_referenced_statements = 0
for statement in statements:
externally_referenced = False
for ref_pid in statement.references:
if ref_pid != properties.IMPORTED_FROM_WIKIMEDIA:
externally_referenced = True
break
if externally_referenced:
externally_referenced_statements += 1
return externally_referenced_statements / max([len(statements), 1])
# Status
is_human = wikibase_.revision.has_property_value(
properties.INSTANCE_OF, items.HUMAN, name=name + '.revision.is_human')
has_birthday = wikibase_.revision.has_property(
properties.DATE_OF_BIRTH, name=name + '.revision.has_birthday')
dead = wikibase_.revision.has_property(
properties.DATE_OF_DEATH, name=name + '.revision.dead')
is_blp = has_birthday.and_(not_(dead))
is_scholarlyarticle = wikibase_.revision.has_property_value(
properties.INSTANCE_OF,
items.SCHOLARLY_ARTICLE,
name=name + '.revision.is_scholarlyarticle'
)
is_astronomical_object = Feature(
name + '.revision.page.is_astronomical_object',
_process_is_astronomical_object,
returns=bool,
depends_on=[wikibase_.revision.datasources.entity])
has_image = wikibase_.revision.has_property(
properties.IMAGE, name=name + '.revision.has_image')
referenced_claims_ratio = Feature(
name + '.revision.page.referenced_claims_ratio',
_process_referenced_claims_ratio,
returns=float,
depends_on=[wikibase_.revision.datasources.entity])
wikimedia_referenced_ratio = Feature(
name + '.revision.page.wikimedia_referenced_ratio',
_process_wikimedia_referenced_ratio,
returns=float,
depends_on=[wikibase_.revision.datasources.entity])
externally_referenced_claims_ratio = Feature(
name + '.revision.page.externally_referenced_claims_ratio',
_process_externally_referenced_claims_ratio,
returns=float,
depends_on=[wikibase_.revision.datasources.entity])
important_label_translation_features = [
bools.item_in_set(i, important_label_translations)
for i in IMPORTANT_LANG_CODES_LIST
]
important_description_translation_features = [
bools.item_in_set(i, important_description_translations)
for i in IMPORTANT_LANG_CODES_LIST
]
important_complete_translation_features = [
bools.item_in_set(i, important_complete_translations)
for i in IMPORTANT_LANG_CODES_LIST
]
local_wiki = \
important_label_translation_features + \
important_description_translation_features + \
important_complete_translation_features + \
[
is_scholarlyarticle,
is_astronomical_object,
is_human,
is_blp,
has_image,
has_commons_media,
aggregators.len(complete_translations),
aggregators.len(important_label_translations),
aggregators.len(important_description_translations),
aggregators.len(important_complete_translations),
references_count,
referenced_claims_ratio,
wikimedia_references_count,
wikimedia_references_count / modifiers.max(references_count, 1),
wikimedia_referenced_ratio,
external_references_count,
external_references_count / modifiers.max(references_count, 1),
externally_referenced_claims_ratio,
unique_references_count,
unique_references_count / modifiers.max(references_count, 1),
aggregators.len(external_identifiers),
non_external_id_statements_count
]
item_quality = wikibase.item + local_wiki
|
{
"content_hash": "2f111ebd71532fe94c1ee48e29541ff3",
"timestamp": "",
"source": "github",
"line_count": 313,
"max_line_length": 78,
"avg_line_length": 32.83067092651757,
"alnum_prop": 0.7162319968859479,
"repo_name": "halfak/Wiki-Class",
"id": "bffdfb2f04ab1976575da06eb541b8b5dd03f7eb",
"size": "10276",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "articlequality/feature_lists/wikidatawiki.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "6739"
},
{
"name": "Python",
"bytes": "53959"
}
],
"symlink_target": ""
}
|
from .pool import Pool, _NaivePool # NoQA
__all__ = ('Pool',)
|
{
"content_hash": "47c99285bbbbd5174a68643c88d8cb62",
"timestamp": "",
"source": "github",
"line_count": 4,
"max_line_length": 42,
"avg_line_length": 16.25,
"alnum_prop": 0.5846153846153846,
"repo_name": "edgedb/edgedb",
"id": "577dffa0fcf4e0d69ad174665d16c79dd160c437",
"size": "745",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "edb/server/connpool/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Cython",
"bytes": "372837"
},
{
"name": "JavaScript",
"bytes": "7481"
},
{
"name": "Makefile",
"bytes": "1159"
},
{
"name": "Python",
"bytes": "9860929"
},
{
"name": "Rust",
"bytes": "238373"
}
],
"symlink_target": ""
}
|
debug = True
debug = False
# from CommonClasses import * # hxl: comment out this line for submission
class Solution:
# @param head, a ListNode
# @return a ListNode
def sortList(self, head):
if head == None:
return None
elif head.next == None: # hxl: one node case
return head
elif head.next.next == None:
if head.val <= head.next.val: # hxl: two nodes case
return head
else:
newHead = head.next
newHead.next = head
newHead.next.next = None
return newHead
else: # hxl: more than two nodes case
prePtr1x = None
ptr1x = head # hxl: this will end up pointing to the middle of the list
ptr2x = head
while ptr2x != None:
prePtr1x = ptr1x
ptr1x = ptr1x.next
ptr2x = ptr2x.next
if ptr2x != None:
ptr2x = ptr2x.next
if debug: print head, ptr1x
prePtr1x.next = None # hxl: don't forget to cut the list before doing sort!
firstSortedHalf = self.sortList(head)
secondSortedHalf = self.sortList(ptr1x)
if debug: print firstSortedHalf, secondSortedHalf
newHead = None
if firstSortedHalf.val < secondSortedHalf.val:
newHead = firstSortedHalf
firstSortedHalf = firstSortedHalf.next
else:
newHead = secondSortedHalf
secondSortedHalf = secondSortedHalf.next
cur = newHead
while (firstSortedHalf != None
and secondSortedHalf != None):
if firstSortedHalf.val < secondSortedHalf.val:
cur.next = firstSortedHalf
cur = cur.next
firstSortedHalf = firstSortedHalf.next
else:
cur.next = secondSortedHalf
cur = cur.next
secondSortedHalf = secondSortedHalf.next
if firstSortedHalf == None:
cur.next = secondSortedHalf
else: # secondSortedHalf is empty
cur.next = firstSortedHalf
return newHead
|
{
"content_hash": "0f4b9d9fd9ad2a3891b42e4d33548a7f",
"timestamp": "",
"source": "github",
"line_count": 75,
"max_line_length": 90,
"avg_line_length": 33.89333333333333,
"alnum_prop": 0.47482297403619195,
"repo_name": "54lihaoxin/leetcode_python",
"id": "78b87232934b70ef90b21221bbf3d1c766663d18",
"size": "2633",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/SortList/solution.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "715933"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
import bluebottle.utils.fields
from decimal import Decimal
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('contenttypes', '0002_remove_content_type_name'),
('funding', '0028_merge_20190912_1354'),
]
operations = [
migrations.CreateModel(
name='BankAccount',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('polymorphic_ctype', models.ForeignKey(editable=False, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='polymorphic_funding.bankaccount_set+', to='contenttypes.ContentType')),
],
options={
'abstract': False,
},
),
migrations.RemoveField(
model_name='funding',
name='account',
),
migrations.AlterField(
model_name='budgetline',
name='activity',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='budget_lines', to='funding.Funding'),
),
migrations.AlterField(
model_name='funding',
name='amount_matching',
field=bluebottle.utils.fields.MoneyField(blank=True, currency_choices=[(b'EUR', b'Euro')], decimal_places=2, default=Decimal('0'), default_currency=b'EUR', max_digits=12, null=True),
),
migrations.AlterField(
model_name='funding',
name='target',
field=bluebottle.utils.fields.MoneyField(blank=True, currency_choices=[(b'EUR', b'Euro')], decimal_places=2, default=Decimal('0'), default_currency=b'EUR', max_digits=12, null=True),
),
migrations.AddField(
model_name='funding',
name='bank_account',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='funding.BankAccount'),
),
]
|
{
"content_hash": "882ff6ad1f213b970fc7d9511508f5d6",
"timestamp": "",
"source": "github",
"line_count": 51,
"max_line_length": 213,
"avg_line_length": 40.76470588235294,
"alnum_prop": 0.60990860990861,
"repo_name": "onepercentclub/bluebottle",
"id": "9fdfd50235b930d3544ce23ed4854801993c39a6",
"size": "2153",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "bluebottle/funding/migrations/0029_auto_20190913_1458.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "41694"
},
{
"name": "HTML",
"bytes": "246695"
},
{
"name": "Handlebars",
"bytes": "63"
},
{
"name": "JavaScript",
"bytes": "139123"
},
{
"name": "PHP",
"bytes": "35"
},
{
"name": "PLpgSQL",
"bytes": "1369882"
},
{
"name": "PostScript",
"bytes": "2927"
},
{
"name": "Python",
"bytes": "4983116"
},
{
"name": "Rich Text Format",
"bytes": "39109"
},
{
"name": "SCSS",
"bytes": "99555"
},
{
"name": "Shell",
"bytes": "3068"
},
{
"name": "Smarty",
"bytes": "3814"
}
],
"symlink_target": ""
}
|
from supriya.ugens.InfoUGenBase import InfoUGenBase
class BufInfoUGenBase(InfoUGenBase):
"""
Abstract base class for buffer information ugens.
Buffer information ugens expose both scalar-rate and control-rate
constructors, as buffer topology may change after a synth is instantiated.
"""
### CLASS VARIABLES ###
__documentation_section__ = "Buffer UGens"
|
{
"content_hash": "fc4bd2674f07a32e31c86149884079be",
"timestamp": "",
"source": "github",
"line_count": 14,
"max_line_length": 78,
"avg_line_length": 27.714285714285715,
"alnum_prop": 0.729381443298969,
"repo_name": "Pulgama/supriya",
"id": "41937483fd228016e2095677b591a8a02787fd12",
"size": "388",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "supriya/ugens/BufInfoUGenBase.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "6712"
},
{
"name": "CSS",
"bytes": "446"
},
{
"name": "HTML",
"bytes": "1083"
},
{
"name": "JavaScript",
"bytes": "6163"
},
{
"name": "Makefile",
"bytes": "6775"
},
{
"name": "Python",
"bytes": "2790612"
},
{
"name": "Shell",
"bytes": "569"
}
],
"symlink_target": ""
}
|
"""
WSGI config for composeexample project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.9/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "composeexample.settings")
application = get_wsgi_application()
|
{
"content_hash": "ce40bf341295dc0d9bfec0d4304fd28b",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 78,
"avg_line_length": 25.3125,
"alnum_prop": 0.7777777777777778,
"repo_name": "uobdic/dice-docker",
"id": "737f080f1f2f2bd8e32205241b422e39cedfffe6",
"size": "405",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "black-box/composeexample/wsgi.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "18274"
},
{
"name": "HTML",
"bytes": "477"
},
{
"name": "Makefile",
"bytes": "627"
},
{
"name": "Python",
"bytes": "17790"
},
{
"name": "Shell",
"bytes": "50015"
}
],
"symlink_target": ""
}
|
import platform
from rsqueakvm.error import PrimitiveFailedError
from rpython.rlib import jit
from rpython.rtyper.lltypesystem import rffi
try:
assert "64bit" in platform.architecture()[0]
from sqpyte.capi import CConfig
from sqpyte.interpreter import SQPyteDB
except (ImportError, AssertionError):
class CConfig():
SQLITE_TEXT = SQLITE_BLOB = SQLITE_INTEGER = SQLITE_FLOAT = None
SQLITE_NULL = SQLITE_ROW = SQLITE_DONE = None
SQPyteDB = None
###############################################################################
# SQL Connection And Cursor Implementation #
###############################################################################
class SQLConnection(object):
_immutable_fields_ = ['db', 'statement_cache']
def __init__(self, db_class, filename):
self.statement_cache = StatementCache(self)
self.is_closed = False
self.connect(db_class, filename)
def connect(self, db_class, filename):
try:
print 'Trying to connect to %s...' % filename
self.db = db_class(filename)
print 'Success'
except Exception as e:
print 'Unable to connect to database: ', e
def cursor(self):
return SQLCursor(self)
def execute(self, space, sql, args=None):
return self.cursor().execute(space, sql, args)
def close(self):
if self.is_closed:
return False
for holder in self.statement_cache.all_statements():
holder.close()
self.db.close()
self.is_closed = True
print 'Disconnected'
return True
class SQLCursor(object):
_immutable_fields_ = ['connection', 'column_count', 'column_names']
def __init__(self, connection):
assert isinstance(connection, SQLConnection)
self.connection = connection
self.statement = None
self.column_count = -1
@jit.unroll_safe
def execute(self, space, sql, args=None):
if self.column_count > 0:
raise PrimitiveFailedError(
'execute() cannot be called twice on same cursor')
# otherwise we can't assume that column_{count,names} are immutable
jit.promote(self.connection)
jit.promote(self.statement)
cache = self.connection.statement_cache
self.statement = cache.get_or_make(sql)
query = self.statement.query
if args is not None:
if len(args) != query.bind_parameter_count():
raise PrimitiveFailedError('wrong # of arguments for query')
for i, w_value in enumerate(args):
self.bind_query_argument(space, w_value, query, i + 1)
self._step()
self.column_count = query.data_count()
self.column_names = [rffi.charp2strn(query.column_name(i), 255)
for i in range(self.column_count)]
return self
def bind_query_argument(self, space, w_value, query, i):
cls = w_value.getclass(space)
if (cls.is_same_object(space.w_String)):
query.bind_str(i, space.unwrap_string(w_value))
elif cls.is_same_object(space.w_SmallInteger):
query.bind_int64(i, space.unwrap_int(w_value))
elif cls.is_same_object(space.w_Float):
query.bind_double(i, space.unwrap_float(w_value))
elif cls.is_same_object(space.w_nil):
query.bind_null(i)
else:
raise PrimitiveFailedError(
'unable to unwrap %s' % w_value.getclass(space))
def next(self, space):
if jit.promote(self.statement) is None:
return space.w_nil
row = self._fetch_one_row(space)
self._step()
# This should be unroll safe, since _fetch_one_row() was also marked
# unroll_safe.
return space.wrap_list_unroll_safe(row)
def raw_next(self, space):
if jit.promote(self.statement) is None:
return []
row = self._fetch_one_row(space)
self._step()
return row
def close(self):
if self.statement:
self.statement.close()
self.statement = None
@jit.unroll_safe
def _fetch_one_row(self, space):
query = jit.promote(self.statement).query
num_cols = query.data_count()
jit.promote(num_cols)
cols = [None] * num_cols
for i in range(num_cols):
tid = query.column_type(i)
if tid == CConfig.SQLITE_TEXT or tid == CConfig.SQLITE_BLOB:
textlen = query.column_bytes(i)
result = rffi.charpsize2str(rffi.cast(rffi.CCHARP,
query.column_text(i)),
textlen)
w_result = space.wrap_string(result) # no encoding
elif tid == CConfig.SQLITE_INTEGER:
result = query.column_int64(i)
w_result = space.wrap_int(result)
elif tid == CConfig.SQLITE_FLOAT:
result = query.column_double(i)
w_result = space.wrap_float(result)
elif tid == CConfig.SQLITE_NULL:
w_result = space.w_nil
else:
raise PrimitiveFailedError('read_row [tid: %s' % tid)
cols[i] = w_result
return cols
def _step(self):
rc = self.statement.query.mainloop()
if rc == CConfig.SQLITE_ROW:
pass
elif rc == CConfig.SQLITE_DONE:
self._reset()
else:
raise PrimitiveFailedError('strange result: %s' % rc)
def _reset(self):
if self.statement:
self.statement._reset()
self.statement = None
###############################################################################
# Statement Caching #
###############################################################################
class Statement(object):
_immutable_fields_ = ['w_connection', 'sql', 'query']
def __init__(self, w_connection, sql):
assert isinstance(w_connection, SQLConnection)
self.w_connection = w_connection
self.sql = sql
try:
self.query = w_connection.db.execute(sql)
except Exception as e:
raise PrimitiveFailedError(str(e))
# space = w_connection.space
# w_module = space.getbuiltinmodule('sqpyte')
# w_error = space.getattr(w_module, space.wrap('OperationalError'))
# raise PrimitiveFailedError(w_error, space.wrap(e.msg))
# self.query.use_translated.disable_from_cmdline(
# w_connection.disable_opcodes)
def close(self):
if self.query:
self.query.close()
self.query = None
def _reset(self):
cache = self.w_connection.statement_cache
holder = cache.get_holder(self.sql)
if holder.statement is not None:
self.close()
else:
holder.statement = self
self.query.reset_query()
class StatementHolder(object):
def __init__(self):
self.statement = None
def _get_or_make(self, cache, sql):
if self.statement is None:
return Statement(cache.w_connection, sql)
result = self.statement
self.statement = None
return jit.promote(result)
class StatementCache(object):
def __init__(self, w_connection):
self.w_connection = w_connection
self.cache = {}
def get_or_make(self, sql):
holder = self.get_holder(sql)
return holder._get_or_make(self, sql)
def get_holder(self, sql):
jit.promote(self)
return self._get_holder(sql)
@jit.elidable
def _get_holder(self, sql):
holder = self.cache.get(sql, None)
if not holder:
holder = self.cache[sql] = StatementHolder()
return holder
def all_statements(self):
# return [holder.statement for holder in self.cache.itervalues()
# if holder.statement is not None]
return []
def invalidate(self):
self.cache = {}
###############################################################################
# Generic Database Manager #
###############################################################################
class DBManager(object):
_immutable_fields_ = ["db_connection?"]
def __init__(self):
self.db_file_name = ":memory:"
self.db_connection = None
self.driver = SQPyteDB if SQPyteDB else None # Driver for DBObjects
self._db_count = 0
self._dbs = {}
self._cursor_count = 0
self._cursors = {}
def connection(self):
if self.db_connection is not None:
return self.db_connection
if self.driver is None:
raise PrimitiveFailedError('connection [driver is None]')
print "DBMode: %s" % self.driver
connection = SQLConnection(self.driver, self.db_file_name)
assert connection is not None
self.db_connection = connection
return connection
def connect(self, db_class, filename):
handle = self._db_count
self._dbs[handle] = SQLConnection(db_class, filename)
self._db_count += 1
return handle
def get_connection(self, db_handle):
db = self._dbs.get(db_handle, None)
if db is None:
raise PrimitiveFailedError('execute [db is None]')
return db
def execute(self, space, db, sql, args=None):
handle = self._cursor_count
self._cursors[handle] = db.execute(space, sql, args)
self._cursor_count += 1
return handle
@jit.elidable
def cursor(self, cursor_handle):
cursor = self._cursors.get(cursor_handle, None)
if cursor is None:
raise PrimitiveFailedError('cursor not found')
return cursor
def close(self, db_pointer):
db = self._dbs.get(db_pointer, None)
if db is None:
raise PrimitiveFailedError('close [db is None]')
return db.close()
dbm = DBManager()
|
{
"content_hash": "3d6248b1f20fb73b7fe354daf52987c7",
"timestamp": "",
"source": "github",
"line_count": 307,
"max_line_length": 79,
"avg_line_length": 33.36482084690554,
"alnum_prop": 0.5471053402323538,
"repo_name": "HPI-SWA-Lab/RSqueak",
"id": "8e9562955e5d3848c0b57dd1d1605aa00f0b0f42",
"size": "10243",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "rsqueakvm/plugins/database/__init__.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "1638"
},
{
"name": "C",
"bytes": "115644"
},
{
"name": "HTML",
"bytes": "4754"
},
{
"name": "PowerShell",
"bytes": "1691"
},
{
"name": "Python",
"bytes": "1140634"
},
{
"name": "Shell",
"bytes": "18715"
},
{
"name": "Smalltalk",
"bytes": "71208"
}
],
"symlink_target": ""
}
|
import catoclient.catocommand
from catoclient.param import Param
class GetTaskParameters(catoclient.catocommand.CatoCommand):
Description = 'Gets a json formatted parameters template for a task.'
API = 'get_task_parameters'
Examples = '''
_To get the parameters of the default version of a task and redirect to a file_
cato-get-task-parameters -t "mytask01" > mytask01_params.json
_To get the parameters of a specific version of a task_
cato-get-task-parameters -t "new example" -v "2.000"
_To get the most basic parameter template of a task, minus descriptions and defaults_
cato-get-task-parameters -t "new example" -b
'''
Options = [Param(name='task', short_name='t', long_name='task',
optional=False, ptype='string',
doc='The ID or Name of a Task.'),
Param(name='version', short_name='v', long_name='version',
optional=True, ptype='string',
doc='An optional specific Task Version. (Default if omitted.)'),
Param(name='basic', short_name='b', long_name='basic',
optional=True, ptype='boolean',
doc='Get a basic template with no descriptive details or default values.')]
def main(self):
results = self.call_api(self.API, ['task', 'version', 'basic'])
print(results)
|
{
"content_hash": "c9b968e052dfbdc5b330d39f96a2ad4a",
"timestamp": "",
"source": "github",
"line_count": 35,
"max_line_length": 96,
"avg_line_length": 39.91428571428571,
"alnum_prop": 0.6220472440944882,
"repo_name": "cloudsidekick/catoclient",
"id": "20375896c499191ce6ca336efb34223109ebc564",
"size": "2123",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "catoclient/commands/gettaskparameters.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "227800"
},
{
"name": "Ruby",
"bytes": "1000"
},
{
"name": "Tcl",
"bytes": "3573"
}
],
"symlink_target": ""
}
|
'''
Created by auto_sdk on 2015.04.21
'''
from aliyun.api.base import RestApi
class Slb20140515DescribeHealthStatusRequest(RestApi):
def __init__(self,domain='slb.aliyuncs.com',port=80):
RestApi.__init__(self,domain, port)
self.ListenerPort = None
self.LoadBalancerId = None
def getapiname(self):
return 'slb.aliyuncs.com.DescribeHealthStatus.2014-05-15'
|
{
"content_hash": "e6703f8fe126ccd14ce1b591521e8726",
"timestamp": "",
"source": "github",
"line_count": 12,
"max_line_length": 59,
"avg_line_length": 31.5,
"alnum_prop": 0.7275132275132276,
"repo_name": "wanghe4096/website",
"id": "3442be5f221abb449379bc6ec748a67fd4fa702d",
"size": "378",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "aliyun/api/rest/Slb20140515DescribeHealthStatusRequest.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "CSS",
"bytes": "121965"
},
{
"name": "HTML",
"bytes": "163477"
},
{
"name": "JavaScript",
"bytes": "227130"
},
{
"name": "Lua",
"bytes": "5653"
},
{
"name": "Python",
"bytes": "325945"
},
{
"name": "Shell",
"bytes": "1359"
}
],
"symlink_target": ""
}
|
import socket
port = 8080
server = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) #udp
server.bind(('', port))
print('Listen on port: %s' % port)
while True:
data, addr = server.recvfrom(1024)
print('Received from %s data: %s' % (addr, data))
|
{
"content_hash": "cca0ea7dca087ba5b4c0f66302d6e5e3",
"timestamp": "",
"source": "github",
"line_count": 14,
"max_line_length": 63,
"avg_line_length": 18.571428571428573,
"alnum_prop": 0.6538461538461539,
"repo_name": "istommao/pynotes",
"id": "1e09ec9a64f1ab20977186793f62242b04cb6430",
"size": "260",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "demo/socket/server.py",
"mode": "33188",
"license": "mit",
"language": [],
"symlink_target": ""
}
|
'''
API service module of deepnlp.org
'''
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals # compatible with python3 unicode coding
import requests
import re
import sys
if (sys.version_info>(3,0)): from urllib.parse import quote
else : from urllib import quote
base_url = 'http://www.deepnlp.org'
def fetch_str_input(prompt):
var = ""
if (sys.version_info > (3,0)): #py3
var = input(prompt)
else :
var = raw_input(prompt)
return var
# username = fetch_str_input("username:")
def init():
# register the first time
print ("Connnecting to deepnlp.org server")
print ("Register to get full API usage")
username = fetch_str_input("username:")
while (len(username) == 0):
print ('username cannot be none')
username = fetch_str_input("username:")
email = fetch_str_input("email:")
while (check_email_format(email) == False):
print ('email format incorrect!')
email = fetch_str_input("email:")
password = fetch_str_input("password:")
while (len(password) == 0):
print ('password cannot be none')
password = fetch_str_input("password:")
register_url = base_url + '/account/register/'
userInfo = {'username' : username,
'email' : email,
'password' : password,
}
res = requests.post(register_url, data=userInfo)
credentials = {}
if (res.status_code == 200):
print ('Registration succeeded!Return your safe login credentials.')
credentials = {'username': username, 'password':password}
else:
print ('Registration failed, please check your input information')
credentials = {}
return credentials
def check_email_format(email):
if re.match("^.+\\@(\\[?)[a-zA-Z0-9\\-\\.]+\\.([a-zA-Z]{2,3}|[0-9]{1,3})(\\]?)$", email) != None:
return True
return False
def connect(credentials):
login = credentials
default_login = {'username': 'pypi_default' , 'password': 'pypi_passw0rd'}
if (len(login)==0):
print ("Warning: credentials dictionary is empty")
print ("Run 'login = api_service.init()' to initialize your personal account with full API access")
print ("loading default login for pypi with limited API access")
login = default_login
if('username' not in login.keys()):
print ("key 'username' missing in credentials")
print ("loading default login with limited API access")
login = default_login
if('password' not in login.keys()):
print ("key 'password' missing in credentials")
print ("loading default login with limited API access")
login = default_login
login_url = base_url + '/account/login/'
login_cookie = requests.post(login_url, data=login)
secure_cookie = login_cookie.cookies # save cookie for future use
return secure_cookie
|
{
"content_hash": "e2db3f810455057a86a49927012a6f13",
"timestamp": "",
"source": "github",
"line_count": 91,
"max_line_length": 107,
"avg_line_length": 34.16483516483517,
"alnum_prop": 0.6069475715664201,
"repo_name": "rockingdingo/deepnlp",
"id": "f2a1a3f3eba3e14598888837d39230718ca21aac",
"size": "3152",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "deepnlp/api_service.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "255776"
},
{
"name": "Shell",
"bytes": "4032"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import
from agms.response.response import Response
class InvoicingResponse(Response):
"""
A class representing AGMS Invoicing Response objects.
"""
def __init__(self):
self._response = None
self._mapping = []
|
{
"content_hash": "1aae0041dd2ba8f6bd95fd921667f637",
"timestamp": "",
"source": "github",
"line_count": 12,
"max_line_length": 57,
"avg_line_length": 22.916666666666668,
"alnum_prop": 0.6618181818181819,
"repo_name": "agmscode/agms_python",
"id": "4f67b6c257ddae355c26158c07e24c4b0dcea5bb",
"size": "275",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "agms/response/invoicing_response.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "273109"
}
],
"symlink_target": ""
}
|
"""
Algorithms implementation to retrieve geophysical parameters
"""
from satistjenesten import data
import numpy
def compute_parameter(scene, alg='dummy_sic'):
eval("compute_{0}(scene)".format(alg))
def compute_dummy_sic(scene):
"""
Compute sea ice concentration using a selection of algorithms
Appends a new band with computed parameter
Args:
scene (data.SatScene): existing scene instance with loaded bands
algorithm (string): name of the algorithm to use
"""
reflec_1 = scene.bands['reflec_1'].data
sic_array = numpy.ones(reflec_1.shape)
sic_band = data.SatBand()
sic_band.data = sic_array
sic_band.long_name = 'Dummy SIC'
scene.bands['sic'] = sic_band
|
{
"content_hash": "5ede4fad9f67965e1e9273dd5e8a6579",
"timestamp": "",
"source": "github",
"line_count": 26,
"max_line_length": 72,
"avg_line_length": 27.96153846153846,
"alnum_prop": 0.686382393397524,
"repo_name": "mitkin/satistjenesten",
"id": "99ca8858e4eaf5387d5950e11e50050bb23d34ff",
"size": "749",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "satistjenesten/retrievals.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Cucumber",
"bytes": "1130"
},
{
"name": "Makefile",
"bytes": "221"
},
{
"name": "Python",
"bytes": "45812"
},
{
"name": "Shell",
"bytes": "654"
}
],
"symlink_target": ""
}
|
import pathlib
import subprocess
import tempfile
from unittest.mock import MagicMock
import pytest
from utils.guestfsprocess import CompletedProcess, GuestFSInterface, run
class TestWithoutCheck:
def test_capture_stdout(self):
cmd = 'echo abc123'
result = run(_make_local_guestfs(), cmd, raiseOnError=False)
assert result == CompletedProcess('abc123\n', '', 0, cmd)
def test_capture_stderr(self):
cmd = 'echo error msg > /dev/stderr'
result = run(_make_local_guestfs(), cmd, raiseOnError=False)
assert result == CompletedProcess('', 'error msg\n', 0, cmd)
def test_support_positive_code(self):
cmd = 'exit 100'
result = run(_make_local_guestfs(), cmd, raiseOnError=False)
assert result == CompletedProcess('', '', 100, cmd)
def test_support_array_args(self):
result = run(_make_local_guestfs(), ['echo', 'hi'], raiseOnError=False)
assert result == CompletedProcess('hi\n', '', 0, 'echo hi')
def test_escape_array_members(self):
result = run(_make_local_guestfs(),
['echo', 'hello', '; ls *'], raiseOnError=False)
assert result == CompletedProcess('hello ; ls *\n', '', 0,
"echo hello '; ls *'")
def test_capture_runtime_errors(self):
result = run(_make_local_guestfs(), 'not-a-command', raiseOnError=False)
assert result.code != 0
assert 'not-a-command' in result.stderr
def test_capture_output_when_non_zero_return(self):
cmd = 'printf content; printf err > /dev/stderr; exit 1'
result = run(_make_local_guestfs(), cmd, raiseOnError=False)
assert result == CompletedProcess('content', 'err', 1, cmd)
class TestWithCheck:
def test_return_completed_process_when_success(self):
cmd = 'echo abc123'
result = run(_make_local_guestfs(), cmd)
assert result == CompletedProcess('abc123\n', '', 0, cmd)
def test_raise_error_when_failure(self):
cmd = '>&2 echo stderr msg; exit 1'
with pytest.raises(RuntimeError, match='stderr msg'):
run(_make_local_guestfs(), cmd)
def _make_local_guestfs():
tmp_dir = tempfile.mkdtemp()
g = GuestFSInterface()
g.mkdtemp = MagicMock(return_value=tmp_dir)
g.cat = lambda path: pathlib.Path(path).read_text()
g.command = lambda args: subprocess.run(args, check=True)
g.write = lambda path, txt: pathlib.Path(path).write_text(txt)
return g
|
{
"content_hash": "6bd4bca87f29695a3e4bd804bb8f7550",
"timestamp": "",
"source": "github",
"line_count": 66,
"max_line_length": 76,
"avg_line_length": 35.878787878787875,
"alnum_prop": 0.6638513513513513,
"repo_name": "adjackura/compute-image-tools",
"id": "2a7c85ca3bb0f3f0095ef889231305f718b1d610",
"size": "2960",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "daisy_workflows/linux_common/tests/test_guestfsprocess.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "11517"
},
{
"name": "Dockerfile",
"bytes": "15161"
},
{
"name": "Go",
"bytes": "1363926"
},
{
"name": "Makefile",
"bytes": "2727"
},
{
"name": "PowerShell",
"bytes": "109744"
},
{
"name": "Python",
"bytes": "135972"
},
{
"name": "Shell",
"bytes": "84427"
}
],
"symlink_target": ""
}
|
from cps import *
import math, random, time
class ResourceChannel(WorldChannel):
"""
Updates the world's concentration of resource R due to:
- diffusion with an agent (Nvirtual/Nmax virtual cells)
- exchange with a reservoir and sink
"""
def scheduleEvent(self, world, cells, time, src):
return float('inf')
def fireEvent(self, world, cells, time, event_time, gradient, dt):
Nvirtual = world._size[-1]
diffusionIn_rate = -(1/Vexternal)*kdiff*gradient*(Nvirtual/Nmax)
netFlowIn_rate = kdil*(Rreservoir - world.R)
world.R += (diffusionIn_rate + netFlowIn_rate)*dt
return True
class DiffusionChannel(AgentChannel):
"""
Updates a cell's concentration of resource R due to:
- diffusion with the environment
- metabolic consumption, degradation
"""
def scheduleEvent(self, cell, world, time, src):
return float('inf')
def fireEvent(self, cell, world, time, event_time):
gradient = world.R-cell.R
dt = event_time-time
diffusionIn_rate = (1/cell.V)*kdiff*gradient
consumption_rate = kdeg*cell.R
cell.R += (diffusionIn_rate - consumption_rate)*dt
self.fire(world, 'ResourceChannel', gradient=gradient, dt=dt)
return True
class CellDivisionChannel(AgentChannel):
"""
Performs cell division as a "reaction" whose propensity depends on the
resource concentration R(t) inside the cell over time. Since the kinetic
rate depends on time, we use the Shahrezaei and Swain algorithm.
This involves setting a series of time barriers for providing a linear
approximation of the division propensity. The position of the next barrier
is controlled by BarrierStepChannel.
"""
def __init__(self, div_rate):
self.div_rate = div_rate
def scheduleEvent(self, cell, world, time, src):
# predict value of internal resource at time barrier
gradient = world.R-cell.R
dt = cell.TBarrier-time
diffusionIn_rate = (1/cell.V)*kdiff*gradient
consumption_rate = kdeg*cell.R
cell.RBarrier = cell.R + (diffusionIn_rate - consumption_rate)*dt
# find tau by linear approx method
a0 = self.div_rate*cell.R # value now
aB = self.div_rate*cell.RBarrier # value at barrier
alpha = (aB-a0)/(cell.TBarrier-time) # slope
r = random.uniform(0,1)
if alpha == 0:
# slope is zero, so we have a constant propensity
tau = -math.log(r)/a0
else:
if a0 == 0 or -math.log(r) >= 0.5*a0**2/alpha:
# our a(t) approximation crosses the t-axis given the r we sampled
# in this case, we assume the reaction doesn't happen
tau = float('inf')
else:
tau = a0/alpha*(-1 + math.sqrt(1-(2*alpha*math.log(r)/a0**2)))
return time + tau
def fireEvent(self, cell, world, time, event_time):
self.fire(cell, 'DiffusionChannel')
cell.div_count += 1
new_cell = self.cloneAgent(cell)
return True
class BarrierStepChannel(AgentChannel):
"""
Sets a series of time barriers for improving the prediction of the time of
cell division by CellDivisionChannel.
If the next predicted division time is later than the current time barrier,
this channel will fire first, advancing the agent's clock to the barrier.
Then CellDivisionChannel will be rescheduled using the next time barrier.
"""
def __init__(self, barrier_size):
self.barrier_size = barrier_size
def scheduleEvent(self, cell, world, time, src):
return cell.TBarrier
def fireEvent(self, cell, world, time, event_time):
self.fire(cell, 'DiffusionChannel')
cell.TBarrier = event_time + self.barrier_size
return True
class CellDeathChannel(AgentChannel):
"""
Cell death is a stochastic reaction with constant rate (i.e. a poisson process).
"""
def __init__(self, death_rate):
self.death_rate = death_rate
def scheduleEvent(self, cell, world, time, src):
r = random.uniform(0,1)
return time - math.log(r)/self.death_rate
def fireEvent(self, cell, world, time, event_time):
self.fire(cell, 'DiffusionChannel')
self.killAgent(cell)
return True
class CellDilutionChannel(WorldChannel):
"""
Cells are being diluted away with the media. Implemented as a WORLD channel.
We'll treat it as a stochastic reaction with rate constant kdil. Our
"reactor" is the whole system, so the reaction propensity would be
kdil*Nvirtual, where Nvirtual is the virtual population size.
The effect of the reaction is the loss of ONE virtual cell.
Since the cell that gets diluted away is random and completely unbiased,
we don't have to actually have to delete any agents from the collection.
"""
def scheduleEvent(self, world, agents, time, src):
r = random.uniform(0,1)
Nvirtual = world._size[-1]
dilution_rate = kdil*Nvirtual
return time - math.log(r)/dilution_rate
def fireEvent(self, world, agents, time, event_time):
# NOTE: Sync channels fire before this fires
Nvirtual = world._size[-1]
# pretend a random cell got diluted away...
# no need to actually remove one!
world._size.append(Nvirtual - 1)
world._ts.append(event_time)
return False
class CellDilutionChannel2(AgentChannel):
"""
Cells are being diluted away with the media. Implemented as an AGENT channel.
We'll treat it as a poisson process with rate kdil.
NOTE: This poisson process will be occuring in each agent in the collection simultaneously.
The effect of the reaction is the death of an agent, which corresponds to
the loss of Nvirtual/Nmax virtual cells from the virtual population.
We don't have to actually have to delete the agent from the collection,
since the dilution event depends in no way on the agent's state and so does
not affect the composition of the population.
"""
def scheduleEvent(self, cell, world, time, src):
r = random.uniform(0,1)
return time - math.log(r)/kdil
def fireEvent(self, cell, world, time, event_time):
self.fire(cell, 'DiffusionChannel')
Nvirtual = world._size[-1]
# pretend Nvirtual/Nmax virtual cells got diluted away...
# no need to actually delete an agent!
world._size.append(Nvirtual - Nvirtual/Nmax)
world._ts.append(event_time)
return False
# Create the model...
Nmax = 100
model = Model(n0=Nmax, nmax=Nmax)
# CONSTANTS
recorder_step = 0.01
barrier_size = 0.5
div_rate = 0.1
death_rate = 0.02
Vexternal = 1
Vcell0 = 1e-4
Rreservoir = 1
kdiff = 0.01
kdeg = 0.05
kdil = 0.01
# Initializer
def my_init(world, cells):
world.R = Rreservoir
for cell in cells:
cell.V = Vcell0
cell.R = 0.5*random.uniform(0,1)
cell.TBarrier = cell._time + barrier_size
cell.RBarrier = ((1/cell.V)*kdiff*(world.R-cell.R) - kdeg*cell.R)*(cell.TBarrier-cell._time)
cell.div_count = 0
model.addInitializer(['R'], ['V', 'R', 'TBarrier', 'RBarrier', 'div_count'], my_init)
# Recording/logging
def my_recorder(log, time, world, agents):
log['Rext'].append(world.R)
log['Rint'].append([agent.R for agent in agents])
recorder = Recorder(['Rext'], ['Rint'], my_recorder)
model.addRecorder(recorder)
##def my_logger(log, time, agent):
## log['R'].append(agent.R)
## log['rB'].append(agent.RBarrier)
## log['tB'].append(agent.TBarrier)
## log['div_count'].append(agent.div_count)
##model.addLogger(0, ['R','rB','tB','div_count'], my_logger)
# Add the channels
Crec = RecordingChannel(tstep=recorder_step, recorder=recorder)
CresW = ResourceChannel()
CresA = DiffusionChannel()
Cdiv = CellDivisionChannel(div_rate=div_rate)
Cdeath = CellDeathChannel(death_rate=death_rate)
Cdil = CellDilutionChannel()
Cbar = BarrierStepChannel(barrier_size=barrier_size)
model.addWorldChannel(channel=Crec)
model.addWorldChannel(channel=CresW )
model.addWorldChannel(channel=Cdil)
model.addAgentChannel(channel=CresA, sync=True)
model.addAgentChannel(channel=Cdeath)
model.addAgentChannel(channel=Cdiv, ac_dependents=[Cdeath])
model.addAgentChannel(channel=Cbar, ac_dependents=[Cdiv])
from os import path
DATA_PATH = path.join(path.abspath(path.pardir), 'data')
# Run the simulation
if __name__ == '__main__':
sim = FMSimulator(model, 0)
t0 = time.time()
sim.runSimulation(50)
t = time.time()
print(t-t0)
savemat_snapshot(path.join(DATA_PATH, 'snapshot_test.mat'),
sim.recorders[0])
#savemat_lineage('c:/users/nezar/temp/competition-model/lineage.mat', sim.loggers[0])
import scipy.io
size_data = {'t':sim.world._ts, 'sz':sim.world._size}
scipy.io.savemat(path.join(DATA_PATH, 'size.mat'), size_data, oned_as='column')
|
{
"content_hash": "ac312a692ab1327e71ae6be6488c526c",
"timestamp": "",
"source": "github",
"line_count": 269,
"max_line_length": 100,
"avg_line_length": 33.446096654275095,
"alnum_prop": 0.6608869623207736,
"repo_name": "nvictus/CellPopSim",
"id": "a25f0fd4f8c3fb2179ce84c350cc987aca0aa269",
"size": "11266",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "examples/model_competition.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "100684"
}
],
"symlink_target": ""
}
|
import copy
import operator
import re
import sys
import warnings
import weakref
import numbers
from functools import reduce
from collections import OrderedDict
from contextlib import suppress
import numpy as np
from numpy import char as chararray
from .card import Card, CARD_LENGTH
from .util import (pairwise, _is_int, _convert_array, encode_ascii, cmp,
NotifierMixin)
from .verify import VerifyError, VerifyWarning
from astropy.utils import lazyproperty, isiterable, indent
from astropy.utils.exceptions import AstropyUserWarning
__all__ = ['Column', 'ColDefs', 'Delayed']
# mapping from TFORM data type to numpy data type (code)
# L: Logical (Boolean)
# B: Unsigned Byte
# I: 16-bit Integer
# J: 32-bit Integer
# K: 64-bit Integer
# E: Single-precision Floating Point
# D: Double-precision Floating Point
# C: Single-precision Complex
# M: Double-precision Complex
# A: Character
FITS2NUMPY = {'L': 'i1', 'B': 'u1', 'I': 'i2', 'J': 'i4', 'K': 'i8', 'E': 'f4',
'D': 'f8', 'C': 'c8', 'M': 'c16', 'A': 'a'}
# the inverse dictionary of the above
NUMPY2FITS = {val: key for key, val in FITS2NUMPY.items()}
# Normally booleans are represented as ints in Astropy, but if passed in a numpy
# boolean array, that should be supported
NUMPY2FITS['b1'] = 'L'
# Add unsigned types, which will be stored as signed ints with a TZERO card.
NUMPY2FITS['u2'] = 'I'
NUMPY2FITS['u4'] = 'J'
NUMPY2FITS['u8'] = 'K'
# Add half precision floating point numbers which will be up-converted to
# single precision.
NUMPY2FITS['f2'] = 'E'
# This is the order in which values are converted to FITS types
# Note that only double precision floating point/complex are supported
FORMATORDER = ['L', 'B', 'I', 'J', 'K', 'D', 'M', 'A']
# Convert single precision floating point/complex to double precision.
FITSUPCONVERTERS = {'E': 'D', 'C': 'M'}
# mapping from ASCII table TFORM data type to numpy data type
# A: Character
# I: Integer (32-bit)
# J: Integer (64-bit; non-standard)
# F: Float (64-bit; fixed decimal notation)
# E: Float (64-bit; exponential notation)
# D: Float (64-bit; exponential notation, always 64-bit by convention)
ASCII2NUMPY = {'A': 'a', 'I': 'i4', 'J': 'i8', 'F': 'f8', 'E': 'f8', 'D': 'f8'}
# Maps FITS ASCII column format codes to the appropriate Python string
# formatting codes for that type.
ASCII2STR = {'A': '', 'I': 'd', 'J': 'd', 'F': 'f', 'E': 'E', 'D': 'E'}
# For each ASCII table format code, provides a default width (and decimal
# precision) for when one isn't given explicitly in the column format
ASCII_DEFAULT_WIDTHS = {'A': (1, 0), 'I': (10, 0), 'J': (15, 0),
'E': (15, 7), 'F': (16, 7), 'D': (25, 17)}
# TDISPn for both ASCII and Binary tables
TDISP_RE_DICT = {}
TDISP_RE_DICT['F'] = re.compile(r'(?:(?P<formatc>[F])(?:(?P<width>[0-9]+)\.{1}'
r'(?P<precision>[0-9])+)+)|')
TDISP_RE_DICT['A'] = TDISP_RE_DICT['L'] = \
re.compile(r'(?:(?P<formatc>[AL])(?P<width>[0-9]+)+)|')
TDISP_RE_DICT['I'] = TDISP_RE_DICT['B'] = \
TDISP_RE_DICT['O'] = TDISP_RE_DICT['Z'] = \
re.compile(r'(?:(?P<formatc>[IBOZ])(?:(?P<width>[0-9]+)'
r'(?:\.{0,1}(?P<precision>[0-9]+))?))|')
TDISP_RE_DICT['E'] = TDISP_RE_DICT['G'] = \
TDISP_RE_DICT['D'] = \
re.compile(r'(?:(?P<formatc>[EGD])(?:(?P<width>[0-9]+)\.'
r'(?P<precision>[0-9]+))+)'
r'(?:E{0,1}(?P<exponential>[0-9]+)?)|')
TDISP_RE_DICT['EN'] = TDISP_RE_DICT['ES'] = \
re.compile(r'(?:(?P<formatc>E[NS])(?:(?P<width>[0-9]+)\.{1}'
r'(?P<precision>[0-9])+)+)')
# mapping from TDISP format to python format
# A: Character
# L: Logical (Boolean)
# I: 16-bit Integer
# Can't predefine zero padding and space padding before hand without
# knowing the value being formatted, so grabbing precision and using that
# to zero pad, ignoring width. Same with B, O, and Z
# B: Binary Integer
# O: Octal Integer
# Z: Hexadecimal Integer
# F: Float (64-bit; fixed decimal notation)
# EN: Float (engineering fortran format, exponential multiple of thee
# ES: Float (scientific, same as EN but non-zero leading digit
# E: Float, exponential notation
# Can't get exponential restriction to work without knowing value
# before hand, so just using width and precision, same with D, G, EN, and
# ES formats
# D: Double-precision Floating Point with exponential
# (E but for double precision)
# G: Double-precision Floating Point, may or may not show exponent
TDISP_FMT_DICT = {
'I': '{{:{width}d}}',
'B': '{{:{width}b}}',
'O': '{{:{width}o}}',
'Z': '{{:{width}x}}',
'F': '{{:{width}.{precision}f}}',
'G': '{{:{width}.{precision}g}}'
}
TDISP_FMT_DICT['A'] = TDISP_FMT_DICT['L'] = '{{:>{width}}}'
TDISP_FMT_DICT['E'] = TDISP_FMT_DICT['D'] = \
TDISP_FMT_DICT['EN'] = TDISP_FMT_DICT['ES'] = '{{:{width}.{precision}e}}'
# tuple of column/field definition common names and keyword names, make
# sure to preserve the one-to-one correspondence when updating the list(s).
# Use lists, instead of dictionaries so the names can be displayed in a
# preferred order.
KEYWORD_NAMES = ('TTYPE', 'TFORM', 'TUNIT', 'TNULL', 'TSCAL', 'TZERO',
'TDISP', 'TBCOL', 'TDIM', 'TCTYP', 'TCUNI', 'TCRPX',
'TCRVL', 'TCDLT', 'TRPOS')
KEYWORD_ATTRIBUTES = ('name', 'format', 'unit', 'null', 'bscale', 'bzero',
'disp', 'start', 'dim', 'coord_type', 'coord_unit',
'coord_ref_point', 'coord_ref_value', 'coord_inc',
'time_ref_pos')
"""This is a list of the attributes that can be set on `Column` objects."""
KEYWORD_TO_ATTRIBUTE = OrderedDict(zip(KEYWORD_NAMES, KEYWORD_ATTRIBUTES))
ATTRIBUTE_TO_KEYWORD = OrderedDict(zip(KEYWORD_ATTRIBUTES, KEYWORD_NAMES))
# TODO: Define a list of default comments to associate with each table keyword
# TFORMn regular expression
TFORMAT_RE = re.compile(r'(?P<repeat>^[0-9]*)(?P<format>[LXBIJKAEDCMPQ])'
r'(?P<option>[!-~]*)', re.I)
# TFORMn for ASCII tables; two different versions depending on whether
# the format is floating-point or not; allows empty values for width
# in which case defaults are used
TFORMAT_ASCII_RE = re.compile(r'(?:(?P<format>[AIJ])(?P<width>[0-9]+)?)|'
r'(?:(?P<formatf>[FED])'
r'(?:(?P<widthf>[0-9]+)\.'
r'(?P<precision>[0-9]+))?)')
TTYPE_RE = re.compile(r'[0-9a-zA-Z_]+')
"""
Regular expression for valid table column names. See FITS Standard v3.0 section
7.2.2.
"""
# table definition keyword regular expression
TDEF_RE = re.compile(r'(?P<label>^T[A-Z]*)(?P<num>[1-9][0-9 ]*$)')
# table dimension keyword regular expression (fairly flexible with whitespace)
TDIM_RE = re.compile(r'\(\s*(?P<dims>(?:\d+\s*)(?:,\s*\d+\s*)*\s*)\)\s*')
# value for ASCII table cell with value = TNULL
# this can be reset by user.
ASCIITNULL = 0
# The default placeholder to use for NULL values in ASCII tables when
# converting from binary to ASCII tables
DEFAULT_ASCII_TNULL = '---'
class Delayed:
"""Delayed file-reading data."""
def __init__(self, hdu=None, field=None):
self.hdu = weakref.proxy(hdu)
self.field = field
def __getitem__(self, key):
# This forces the data for the HDU to be read, which will replace
# the corresponding Delayed objects in the Tables Columns to be
# transformed into ndarrays. It will also return the value of the
# requested data element.
return self.hdu.data[key][self.field]
class _BaseColumnFormat(str):
"""
Base class for binary table column formats (just called _ColumnFormat)
and ASCII table column formats (_AsciiColumnFormat).
"""
def __eq__(self, other):
if not other:
return False
if isinstance(other, str):
if not isinstance(other, self.__class__):
try:
other = self.__class__(other)
except ValueError:
return False
else:
return False
return self.canonical == other.canonical
def __hash__(self):
return hash(self.canonical)
@lazyproperty
def dtype(self):
"""
The Numpy dtype object created from the format's associated recformat.
"""
return np.dtype(self.recformat)
@classmethod
def from_column_format(cls, format):
"""Creates a column format object from another column format object
regardless of their type.
That is, this can convert a _ColumnFormat to an _AsciiColumnFormat
or vice versa at least in cases where a direct translation is possible.
"""
return cls.from_recformat(format.recformat)
class _ColumnFormat(_BaseColumnFormat):
"""
Represents a FITS binary table column format.
This is an enhancement over using a normal string for the format, since the
repeat count, format code, and option are available as separate attributes,
and smart comparison is used. For example 1J == J.
"""
def __new__(cls, format):
self = super().__new__(cls, format)
self.repeat, self.format, self.option = _parse_tformat(format)
self.format = self.format.upper()
if self.format in ('P', 'Q'):
# TODO: There should be a generic factory that returns either
# _FormatP or _FormatQ as appropriate for a given TFORMn
if self.format == 'P':
recformat = _FormatP.from_tform(format)
else:
recformat = _FormatQ.from_tform(format)
# Format of variable length arrays
self.p_format = recformat.format
else:
self.p_format = None
return self
@classmethod
def from_recformat(cls, recformat):
"""Creates a column format from a Numpy record dtype format."""
return cls(_convert_format(recformat, reverse=True))
@lazyproperty
def recformat(self):
"""Returns the equivalent Numpy record format string."""
return _convert_format(self)
@lazyproperty
def canonical(self):
"""
Returns a 'canonical' string representation of this format.
This is in the proper form of rTa where T is the single character data
type code, a is the optional part, and r is the repeat. If repeat == 1
(the default) it is left out of this representation.
"""
if self.repeat == 1:
repeat = ''
else:
repeat = str(self.repeat)
return f'{repeat}{self.format}{self.option}'
class _AsciiColumnFormat(_BaseColumnFormat):
"""Similar to _ColumnFormat but specifically for columns in ASCII tables.
The formats of ASCII table columns and binary table columns are inherently
incompatible in FITS. They don't support the same ranges and types of
values, and even reuse format codes in subtly different ways. For example
the format code 'Iw' in ASCII columns refers to any integer whose string
representation is at most w characters wide, so 'I' can represent
effectively any integer that will fit in a FITS columns. Whereas for
binary tables 'I' very explicitly refers to a 16-bit signed integer.
Conversions between the two column formats can be performed using the
``to/from_binary`` methods on this class, or the ``to/from_ascii``
methods on the `_ColumnFormat` class. But again, not all conversions are
possible and may result in a `ValueError`.
"""
def __new__(cls, format, strict=False):
self = super().__new__(cls, format)
self.format, self.width, self.precision = \
_parse_ascii_tformat(format, strict)
# If no width has been specified, set the dtype here to default as well
if format == self.format:
self.recformat = ASCII2NUMPY[format]
# This is to support handling logical (boolean) data from binary tables
# in an ASCII table
self._pseudo_logical = False
return self
@classmethod
def from_column_format(cls, format):
inst = cls.from_recformat(format.recformat)
# Hack
if format.format == 'L':
inst._pseudo_logical = True
return inst
@classmethod
def from_recformat(cls, recformat):
"""Creates a column format from a Numpy record dtype format."""
return cls(_convert_ascii_format(recformat, reverse=True))
@lazyproperty
def recformat(self):
"""Returns the equivalent Numpy record format string."""
return _convert_ascii_format(self)
@lazyproperty
def canonical(self):
"""
Returns a 'canonical' string representation of this format.
This is in the proper form of Tw.d where T is the single character data
type code, w is the width in characters for this field, and d is the
number of digits after the decimal place (for format codes 'E', 'F',
and 'D' only).
"""
if self.format in ('E', 'F', 'D'):
return f'{self.format}{self.width}.{self.precision}'
return f'{self.format}{self.width}'
class _FormatX(str):
"""For X format in binary tables."""
def __new__(cls, repeat=1):
nbytes = ((repeat - 1) // 8) + 1
# use an array, even if it is only ONE u1 (i.e. use tuple always)
obj = super().__new__(cls, repr((nbytes,)) + 'u1')
obj.repeat = repeat
return obj
def __getnewargs__(self):
return (self.repeat,)
@property
def tform(self):
return f'{self.repeat}X'
# TODO: Table column formats need to be verified upon first reading the file;
# as it is, an invalid P format will raise a VerifyError from some deep,
# unexpected place
class _FormatP(str):
"""For P format in variable length table."""
# As far as I can tell from my reading of the FITS standard, a type code is
# *required* for P and Q formats; there is no default
_format_re_template = (r'(?P<repeat>\d+)?{}(?P<dtype>[LXBIJKAEDCM])'
r'(?:\((?P<max>\d*)\))?')
_format_code = 'P'
_format_re = re.compile(_format_re_template.format(_format_code))
_descriptor_format = '2i4'
def __new__(cls, dtype, repeat=None, max=None):
obj = super().__new__(cls, cls._descriptor_format)
obj.format = NUMPY2FITS[dtype]
obj.dtype = dtype
obj.repeat = repeat
obj.max = max
return obj
def __getnewargs__(self):
return (self.dtype, self.repeat, self.max)
@classmethod
def from_tform(cls, format):
m = cls._format_re.match(format)
if not m or m.group('dtype') not in FITS2NUMPY:
raise VerifyError(f'Invalid column format: {format}')
repeat = m.group('repeat')
array_dtype = m.group('dtype')
max = m.group('max')
if not max:
max = None
return cls(FITS2NUMPY[array_dtype], repeat=repeat, max=max)
@property
def tform(self):
repeat = '' if self.repeat is None else self.repeat
max = '' if self.max is None else self.max
return f'{repeat}{self._format_code}{self.format}({max})'
class _FormatQ(_FormatP):
"""Carries type description of the Q format for variable length arrays.
The Q format is like the P format but uses 64-bit integers in the array
descriptors, allowing for heaps stored beyond 2GB into a file.
"""
_format_code = 'Q'
_format_re = re.compile(_FormatP._format_re_template.format(_format_code))
_descriptor_format = '2i8'
class ColumnAttribute:
"""
Descriptor for attributes of `Column` that are associated with keywords
in the FITS header and describe properties of the column as specified in
the FITS standard.
Each `ColumnAttribute` may have a ``validator`` method defined on it.
This validates values set on this attribute to ensure that they meet the
FITS standard. Invalid values will raise a warning and will not be used in
formatting the column. The validator should take two arguments--the
`Column` it is being assigned to, and the new value for the attribute, and
it must raise an `AssertionError` if the value is invalid.
The `ColumnAttribute` itself is a decorator that can be used to define the
``validator`` for each column attribute. For example::
@ColumnAttribute('TTYPE')
def name(col, name):
if not isinstance(name, str):
raise AssertionError
The actual object returned by this decorator is the `ColumnAttribute`
instance though, not the ``name`` function. As such ``name`` is not a
method of the class it is defined in.
The setter for `ColumnAttribute` also updates the header of any table
HDU this column is attached to in order to reflect the change. The
``validator`` should ensure that the value is valid for inclusion in a FITS
header.
"""
def __init__(self, keyword):
self._keyword = keyword
self._validator = None
# The name of the attribute associated with this keyword is currently
# determined from the KEYWORD_NAMES/ATTRIBUTES lists. This could be
# make more flexible in the future, for example, to support custom
# column attributes.
self._attr = '_' + KEYWORD_TO_ATTRIBUTE[self._keyword]
def __get__(self, obj, objtype=None):
if obj is None:
return self
else:
return getattr(obj, self._attr)
def __set__(self, obj, value):
if self._validator is not None:
self._validator(obj, value)
old_value = getattr(obj, self._attr, None)
setattr(obj, self._attr, value)
obj._notify('column_attribute_changed', obj, self._attr[1:], old_value,
value)
def __call__(self, func):
"""
Set the validator for this column attribute.
Returns ``self`` so that this can be used as a decorator, as described
in the docs for this class.
"""
self._validator = func
return self
def __repr__(self):
return f"{self.__class__.__name__}('{self._keyword}')"
class Column(NotifierMixin):
"""
Class which contains the definition of one column, e.g. ``ttype``,
``tform``, etc. and the array containing values for the column.
"""
def __init__(self, name=None, format=None, unit=None, null=None,
bscale=None, bzero=None, disp=None, start=None, dim=None,
array=None, ascii=None, coord_type=None, coord_unit=None,
coord_ref_point=None, coord_ref_value=None, coord_inc=None,
time_ref_pos=None):
"""
Construct a `Column` by specifying attributes. All attributes
except ``format`` can be optional; see :ref:`astropy:column_creation`
and :ref:`astropy:creating_ascii_table` for more information regarding
``TFORM`` keyword.
Parameters
----------
name : str, optional
column name, corresponding to ``TTYPE`` keyword
format : str
column format, corresponding to ``TFORM`` keyword
unit : str, optional
column unit, corresponding to ``TUNIT`` keyword
null : str, optional
null value, corresponding to ``TNULL`` keyword
bscale : int-like, optional
bscale value, corresponding to ``TSCAL`` keyword
bzero : int-like, optional
bzero value, corresponding to ``TZERO`` keyword
disp : str, optional
display format, corresponding to ``TDISP`` keyword
start : int, optional
column starting position (ASCII table only), corresponding
to ``TBCOL`` keyword
dim : str, optional
column dimension corresponding to ``TDIM`` keyword
array : iterable, optional
a `list`, `numpy.ndarray` (or other iterable that can be used to
initialize an ndarray) providing initial data for this column.
The array will be automatically converted, if possible, to the data
format of the column. In the case were non-trivial ``bscale``
and/or ``bzero`` arguments are given, the values in the array must
be the *physical* values--that is, the values of column as if the
scaling has already been applied (the array stored on the column
object will then be converted back to its storage values).
ascii : bool, optional
set `True` if this describes a column for an ASCII table; this
may be required to disambiguate the column format
coord_type : str, optional
coordinate/axis type corresponding to ``TCTYP`` keyword
coord_unit : str, optional
coordinate/axis unit corresponding to ``TCUNI`` keyword
coord_ref_point : int-like, optional
pixel coordinate of the reference point corresponding to ``TCRPX``
keyword
coord_ref_value : int-like, optional
coordinate value at reference point corresponding to ``TCRVL``
keyword
coord_inc : int-like, optional
coordinate increment at reference point corresponding to ``TCDLT``
keyword
time_ref_pos : str, optional
reference position for a time coordinate column corresponding to
``TRPOS`` keyword
"""
if format is None:
raise ValueError('Must specify format to construct Column.')
# any of the input argument (except array) can be a Card or just
# a number/string
kwargs = {'ascii': ascii}
for attr in KEYWORD_ATTRIBUTES:
value = locals()[attr] # get the argument's value
if isinstance(value, Card):
value = value.value
kwargs[attr] = value
valid_kwargs, invalid_kwargs = self._verify_keywords(**kwargs)
if invalid_kwargs:
msg = ['The following keyword arguments to Column were invalid:']
for val in invalid_kwargs.values():
msg.append(indent(val[1]))
raise VerifyError('\n'.join(msg))
for attr in KEYWORD_ATTRIBUTES:
setattr(self, attr, valid_kwargs.get(attr))
# TODO: Try to eliminate the following two special cases
# for recformat and dim:
# This is not actually stored as an attribute on columns for some
# reason
recformat = valid_kwargs['recformat']
# The 'dim' keyword's original value is stored in self.dim, while
# *only* the tuple form is stored in self._dims.
self._dims = self.dim
self.dim = dim
# Awful hack to use for now to keep track of whether the column holds
# pseudo-unsigned int data
self._pseudo_unsigned_ints = False
# if the column data is not ndarray, make it to be one, i.e.
# input arrays can be just list or tuple, not required to be ndarray
# does not include Object array because there is no guarantee
# the elements in the object array are consistent.
if not isinstance(array,
(np.ndarray, chararray.chararray, Delayed)):
try: # try to convert to a ndarray first
if array is not None:
array = np.array(array)
except Exception:
try: # then try to convert it to a strings array
itemsize = int(recformat[1:])
array = chararray.array(array, itemsize=itemsize)
except ValueError:
# then try variable length array
# Note: This includes _FormatQ by inheritance
if isinstance(recformat, _FormatP):
array = _VLF(array, dtype=recformat.dtype)
else:
raise ValueError('Data is inconsistent with the '
'format `{}`.'.format(format))
array = self._convert_to_valid_data_type(array)
# We have required (through documentation) that arrays passed in to
# this constructor are already in their physical values, so we make
# note of that here
if isinstance(array, np.ndarray):
self._physical_values = True
else:
self._physical_values = False
self._parent_fits_rec = None
self.array = array
def __repr__(self):
text = ''
for attr in KEYWORD_ATTRIBUTES:
value = getattr(self, attr)
if value is not None:
text += attr + ' = ' + repr(value) + '; '
return text[:-2]
def __eq__(self, other):
"""
Two columns are equal if their name and format are the same. Other
attributes aren't taken into account at this time.
"""
# According to the FITS standard column names must be case-insensitive
a = (self.name.lower(), self.format)
b = (other.name.lower(), other.format)
return a == b
def __hash__(self):
"""
Like __eq__, the hash of a column should be based on the unique column
name and format, and be case-insensitive with respect to the column
name.
"""
return hash((self.name.lower(), self.format))
@property
def array(self):
"""
The Numpy `~numpy.ndarray` associated with this `Column`.
If the column was instantiated with an array passed to the ``array``
argument, this will return that array. However, if the column is
later added to a table, such as via `BinTableHDU.from_columns` as
is typically the case, this attribute will be updated to reference
the associated field in the table, which may no longer be the same
array.
"""
# Ideally the .array attribute never would have existed in the first
# place, or would have been internal-only. This is a legacy of the
# older design from Astropy that needs to have continued support, for
# now.
# One of the main problems with this design was that it created a
# reference cycle. When the .array attribute was updated after
# creating a FITS_rec from the column (as explained in the docstring) a
# reference cycle was created. This is because the code in BinTableHDU
# (and a few other places) does essentially the following:
#
# data._coldefs = columns # The ColDefs object holding this Column
# for col in columns:
# col.array = data.field(col.name)
#
# This way each columns .array attribute now points to the field in the
# table data. It's actually a pretty confusing interface (since it
# replaces the array originally pointed to by .array), but it's the way
# things have been for a long, long time.
#
# However, this results, in *many* cases, in a reference cycle.
# Because the array returned by data.field(col.name), while sometimes
# an array that owns its own data, is usually like a slice of the
# original data. It has the original FITS_rec as the array .base.
# This results in the following reference cycle (for the n-th column):
#
# data -> data._coldefs -> data._coldefs[n] ->
# data._coldefs[n].array -> data._coldefs[n].array.base -> data
#
# Because ndarray objects do not handled by Python's garbage collector
# the reference cycle cannot be broken. Therefore the FITS_rec's
# refcount never goes to zero, its __del__ is never called, and its
# memory is never freed. This didn't occur in *all* cases, but it did
# occur in many cases.
#
# To get around this, Column.array is no longer a simple attribute
# like it was previously. Now each Column has a ._parent_fits_rec
# attribute which is a weakref to a FITS_rec object. Code that
# previously assigned each col.array to field in a FITS_rec (as in
# the example a few paragraphs above) is still used, however now
# array.setter checks if a reference cycle will be created. And if
# so, instead of saving directly to the Column's __dict__, it creates
# the ._prent_fits_rec weakref, and all lookups of the column's .array
# go through that instead.
#
# This alone does not fully solve the problem. Because
# _parent_fits_rec is a weakref, if the user ever holds a reference to
# the Column, but deletes all references to the underlying FITS_rec,
# the .array attribute would suddenly start returning None instead of
# the array data. This problem is resolved on FITS_rec's end. See the
# note in the FITS_rec._coldefs property for the rest of the story.
# If the Columns's array is not a reference to an existing FITS_rec,
# then it is just stored in self.__dict__; otherwise check the
# _parent_fits_rec reference if it 's still available.
if 'array' in self.__dict__:
return self.__dict__['array']
elif self._parent_fits_rec is not None:
parent = self._parent_fits_rec()
if parent is not None:
return parent[self.name]
else:
return None
@array.setter
def array(self, array):
# The following looks over the bases of the given array to check if it
# has a ._coldefs attribute (i.e. is a FITS_rec) and that that _coldefs
# contains this Column itself, and would create a reference cycle if we
# stored the array directly in self.__dict__.
# In this case it instead sets up the _parent_fits_rec weakref to the
# underlying FITS_rec, so that array.getter can return arrays through
# self._parent_fits_rec().field(self.name), rather than storing a
# hard reference to the field like it used to.
base = array
while True:
if (hasattr(base, '_coldefs') and
isinstance(base._coldefs, ColDefs)):
for col in base._coldefs:
if col is self and self._parent_fits_rec is None:
self._parent_fits_rec = weakref.ref(base)
# Just in case the user already set .array to their own
# array.
if 'array' in self.__dict__:
del self.__dict__['array']
return
if getattr(base, 'base', None) is not None:
base = base.base
else:
break
self.__dict__['array'] = array
@array.deleter
def array(self):
try:
del self.__dict__['array']
except KeyError:
pass
self._parent_fits_rec = None
@ColumnAttribute('TTYPE')
def name(col, name):
if name is None:
# Allow None to indicate deleting the name, or to just indicate an
# unspecified name (when creating a new Column).
return
# Check that the name meets the recommended standard--other column
# names are *allowed*, but will be discouraged
if isinstance(name, str) and not TTYPE_RE.match(name):
warnings.warn(
'It is strongly recommended that column names contain only '
'upper and lower-case ASCII letters, digits, or underscores '
'for maximum compatibility with other software '
'(got {!r}).'.format(name), VerifyWarning)
# This ensures that the new name can fit into a single FITS card
# without any special extension like CONTINUE cards or the like.
if (not isinstance(name, str)
or len(str(Card('TTYPE', name))) != CARD_LENGTH):
raise AssertionError(
'Column name must be a string able to fit in a single '
'FITS card--typically this means a maximum of 68 '
'characters, though it may be fewer if the string '
'contains special characters like quotes.')
@ColumnAttribute('TCTYP')
def coord_type(col, coord_type):
if coord_type is None:
return
if (not isinstance(coord_type, str)
or len(coord_type) > 8):
raise AssertionError(
'Coordinate/axis type must be a string of atmost 8 '
'characters.')
@ColumnAttribute('TCUNI')
def coord_unit(col, coord_unit):
if (coord_unit is not None
and not isinstance(coord_unit, str)):
raise AssertionError(
'Coordinate/axis unit must be a string.')
@ColumnAttribute('TCRPX')
def coord_ref_point(col, coord_ref_point):
if (coord_ref_point is not None
and not isinstance(coord_ref_point, numbers.Real)):
raise AssertionError(
'Pixel coordinate of the reference point must be '
'real floating type.')
@ColumnAttribute('TCRVL')
def coord_ref_value(col, coord_ref_value):
if (coord_ref_value is not None
and not isinstance(coord_ref_value, numbers.Real)):
raise AssertionError(
'Coordinate value at reference point must be real '
'floating type.')
@ColumnAttribute('TCDLT')
def coord_inc(col, coord_inc):
if (coord_inc is not None
and not isinstance(coord_inc, numbers.Real)):
raise AssertionError(
'Coordinate increment must be real floating type.')
@ColumnAttribute('TRPOS')
def time_ref_pos(col, time_ref_pos):
if (time_ref_pos is not None
and not isinstance(time_ref_pos, str)):
raise AssertionError(
'Time reference position must be a string.')
format = ColumnAttribute('TFORM')
unit = ColumnAttribute('TUNIT')
null = ColumnAttribute('TNULL')
bscale = ColumnAttribute('TSCAL')
bzero = ColumnAttribute('TZERO')
disp = ColumnAttribute('TDISP')
start = ColumnAttribute('TBCOL')
dim = ColumnAttribute('TDIM')
@lazyproperty
def ascii(self):
"""Whether this `Column` represents a column in an ASCII table."""
return isinstance(self.format, _AsciiColumnFormat)
@lazyproperty
def dtype(self):
return self.format.dtype
def copy(self):
"""
Return a copy of this `Column`.
"""
tmp = Column(format='I') # just use a throw-away format
tmp.__dict__ = self.__dict__.copy()
return tmp
@staticmethod
def _convert_format(format, cls):
"""The format argument to this class's initializer may come in many
forms. This uses the given column format class ``cls`` to convert
to a format of that type.
TODO: There should be an abc base class for column format classes
"""
# Short circuit in case we're already a _BaseColumnFormat--there is at
# least one case in which this can happen
if isinstance(format, _BaseColumnFormat):
return format, format.recformat
if format in NUMPY2FITS:
with suppress(VerifyError):
# legit recarray format?
recformat = format
format = cls.from_recformat(format)
try:
# legit FITS format?
format = cls(format)
recformat = format.recformat
except VerifyError:
raise VerifyError(f'Illegal format `{format}`.')
return format, recformat
@classmethod
def _verify_keywords(cls, name=None, format=None, unit=None, null=None,
bscale=None, bzero=None, disp=None, start=None,
dim=None, ascii=None, coord_type=None, coord_unit=None,
coord_ref_point=None, coord_ref_value=None,
coord_inc=None, time_ref_pos=None):
"""
Given the keyword arguments used to initialize a Column, specifically
those that typically read from a FITS header (so excluding array),
verify that each keyword has a valid value.
Returns a 2-tuple of dicts. The first maps valid keywords to their
values. The second maps invalid keywords to a 2-tuple of their value,
and a message explaining why they were found invalid.
"""
valid = {}
invalid = {}
try:
format, recformat = cls._determine_formats(format, start, dim, ascii)
valid.update(format=format, recformat=recformat)
except (ValueError, VerifyError) as err:
msg = (
f'Column format option (TFORMn) failed verification: {err!s} '
'The invalid value will be ignored for the purpose of '
'formatting the data in this column.')
invalid['format'] = (format, msg)
except AttributeError as err:
msg = (
f'Column format option (TFORMn) must be a string with a valid '
f'FITS table format (got {format!s}: {err!s}). '
'The invalid value will be ignored for the purpose of '
'formatting the data in this column.')
invalid['format'] = (format, msg)
# Currently we don't have any validation for name, unit, bscale, or
# bzero so include those by default
# TODO: Add validation for these keywords, obviously
for k, v in [('name', name), ('unit', unit), ('bscale', bscale),
('bzero', bzero)]:
if v is not None and v != '':
valid[k] = v
# Validate null option
# Note: Enough code exists that thinks empty strings are sensible
# inputs for these options that we need to treat '' as None
if null is not None and null != '':
msg = None
if isinstance(format, _AsciiColumnFormat):
null = str(null)
if len(null) > format.width:
msg = (
"ASCII table null option (TNULLn) is longer than "
"the column's character width and will be truncated "
"(got {!r}).".format(null))
else:
tnull_formats = ('B', 'I', 'J', 'K')
if not _is_int(null):
# Make this an exception instead of a warning, since any
# non-int value is meaningless
msg = (
'Column null option (TNULLn) must be an integer for '
'binary table columns (got {!r}). The invalid value '
'will be ignored for the purpose of formatting '
'the data in this column.'.format(null))
elif not (format.format in tnull_formats or
(format.format in ('P', 'Q') and
format.p_format in tnull_formats)):
# TODO: We should also check that TNULLn's integer value
# is in the range allowed by the column's format
msg = (
'Column null option (TNULLn) is invalid for binary '
'table columns of type {!r} (got {!r}). The invalid '
'value will be ignored for the purpose of formatting '
'the data in this column.'.format(format, null))
if msg is None:
valid['null'] = null
else:
invalid['null'] = (null, msg)
# Validate the disp option
# TODO: Add full parsing and validation of TDISPn keywords
if disp is not None and disp != '':
msg = None
if not isinstance(disp, str):
msg = (
f'Column disp option (TDISPn) must be a string (got '
f'{disp!r}). The invalid value will be ignored for the '
'purpose of formatting the data in this column.')
elif (isinstance(format, _AsciiColumnFormat) and
disp[0].upper() == 'L'):
# disp is at least one character long and has the 'L' format
# which is not recognized for ASCII tables
msg = (
"Column disp option (TDISPn) may not use the 'L' format "
"with ASCII table columns. The invalid value will be "
"ignored for the purpose of formatting the data in this "
"column.")
if msg is None:
try:
_parse_tdisp_format(disp)
valid['disp'] = disp
except VerifyError as err:
msg = (
f'Column disp option (TDISPn) failed verification: '
f'{err!s} The invalid value will be ignored for the '
'purpose of formatting the data in this column.')
invalid['disp'] = (disp, msg)
else:
invalid['disp'] = (disp, msg)
# Validate the start option
if start is not None and start != '':
msg = None
if not isinstance(format, _AsciiColumnFormat):
# The 'start' option only applies to ASCII columns
msg = (
'Column start option (TBCOLn) is not allowed for binary '
'table columns (got {!r}). The invalid keyword will be '
'ignored for the purpose of formatting the data in this '
'column.'.format(start))
else:
try:
start = int(start)
except (TypeError, ValueError):
pass
if not _is_int(start) or start < 1:
msg = (
'Column start option (TBCOLn) must be a positive integer '
'(got {!r}). The invalid value will be ignored for the '
'purpose of formatting the data in this column.'.format(start))
if msg is None:
valid['start'] = start
else:
invalid['start'] = (start, msg)
# Process TDIMn options
# ASCII table columns can't have a TDIMn keyword associated with it;
# for now we just issue a warning and ignore it.
# TODO: This should be checked by the FITS verification code
if dim is not None and dim != '':
msg = None
dims_tuple = tuple()
# NOTE: If valid, the dim keyword's value in the the valid dict is
# a tuple, not the original string; if invalid just the original
# string is returned
if isinstance(format, _AsciiColumnFormat):
msg = (
'Column dim option (TDIMn) is not allowed for ASCII table '
'columns (got {!r}). The invalid keyword will be ignored '
'for the purpose of formatting this column.'.format(dim))
elif isinstance(dim, str):
dims_tuple = _parse_tdim(dim)
elif isinstance(dim, tuple):
dims_tuple = dim
else:
msg = (
"`dim` argument must be a string containing a valid value "
"for the TDIMn header keyword associated with this column, "
"or a tuple containing the C-order dimensions for the "
"column. The invalid value will be ignored for the purpose "
"of formatting this column.")
if dims_tuple:
if reduce(operator.mul, dims_tuple) > format.repeat:
msg = (
"The repeat count of the column format {!r} for column {!r} "
"is fewer than the number of elements per the TDIM "
"argument {!r}. The invalid TDIMn value will be ignored "
"for the purpose of formatting this column.".format(
name, format, dim))
if msg is None:
valid['dim'] = dims_tuple
else:
invalid['dim'] = (dim, msg)
if coord_type is not None and coord_type != '':
msg = None
if not isinstance(coord_type, str):
msg = (
"Coordinate/axis type option (TCTYPn) must be a string "
"(got {!r}). The invalid keyword will be ignored for the "
"purpose of formatting this column.".format(coord_type))
elif len(coord_type) > 8:
msg = (
"Coordinate/axis type option (TCTYPn) must be a string "
"of atmost 8 characters (got {!r}). The invalid keyword "
"will be ignored for the purpose of formatting this "
"column.".format(coord_type))
if msg is None:
valid['coord_type'] = coord_type
else:
invalid['coord_type'] = (coord_type, msg)
if coord_unit is not None and coord_unit != '':
msg = None
if not isinstance(coord_unit, str):
msg = (
"Coordinate/axis unit option (TCUNIn) must be a string "
"(got {!r}). The invalid keyword will be ignored for the "
"purpose of formatting this column.".format(coord_unit))
if msg is None:
valid['coord_unit'] = coord_unit
else:
invalid['coord_unit'] = (coord_unit, msg)
for k, v in [('coord_ref_point', coord_ref_point),
('coord_ref_value', coord_ref_value),
('coord_inc', coord_inc)]:
if v is not None and v != '':
msg = None
if not isinstance(v, numbers.Real):
msg = (
"Column {} option ({}n) must be a real floating type (got {!r}). "
"The invalid value will be ignored for the purpose of formatting "
"the data in this column.".format(k, ATTRIBUTE_TO_KEYWORD[k], v))
if msg is None:
valid[k] = v
else:
invalid[k] = (v, msg)
if time_ref_pos is not None and time_ref_pos != '':
msg = None
if not isinstance(time_ref_pos, str):
msg = (
"Time coordinate reference position option (TRPOSn) must be "
"a string (got {!r}). The invalid keyword will be ignored for "
"the purpose of formatting this column.".format(time_ref_pos))
if msg is None:
valid['time_ref_pos'] = time_ref_pos
else:
invalid['time_ref_pos'] = (time_ref_pos, msg)
return valid, invalid
@classmethod
def _determine_formats(cls, format, start, dim, ascii):
"""
Given a format string and whether or not the Column is for an
ASCII table (ascii=None means unspecified, but lean toward binary table
where ambiguous) create an appropriate _BaseColumnFormat instance for
the column's format, and determine the appropriate recarray format.
The values of the start and dim keyword arguments are also useful, as
the former is only valid for ASCII tables and the latter only for
BINARY tables.
"""
# If the given format string is unambiguously a Numpy dtype or one of
# the Numpy record format type specifiers supported by Astropy then that
# should take priority--otherwise assume it is a FITS format
if isinstance(format, np.dtype):
format, _, _ = _dtype_to_recformat(format)
# check format
if ascii is None and not isinstance(format, _BaseColumnFormat):
# We're just give a string which could be either a Numpy format
# code, or a format for a binary column array *or* a format for an
# ASCII column array--there may be many ambiguities here. Try our
# best to guess what the user intended.
format, recformat = cls._guess_format(format, start, dim)
elif not ascii and not isinstance(format, _BaseColumnFormat):
format, recformat = cls._convert_format(format, _ColumnFormat)
elif ascii and not isinstance(format, _AsciiColumnFormat):
format, recformat = cls._convert_format(format,
_AsciiColumnFormat)
else:
# The format is already acceptable and unambiguous
recformat = format.recformat
return format, recformat
@classmethod
def _guess_format(cls, format, start, dim):
if start and dim:
# This is impossible; this can't be a valid FITS column
raise ValueError(
'Columns cannot have both a start (TCOLn) and dim '
'(TDIMn) option, since the former is only applies to '
'ASCII tables, and the latter is only valid for binary '
'tables.')
elif start:
# Only ASCII table columns can have a 'start' option
guess_format = _AsciiColumnFormat
elif dim:
# Only binary tables can have a dim option
guess_format = _ColumnFormat
else:
# If the format is *technically* a valid binary column format
# (i.e. it has a valid format code followed by arbitrary
# "optional" codes), but it is also strictly a valid ASCII
# table format, then assume an ASCII table column was being
# requested (the more likely case, after all).
with suppress(VerifyError):
format = _AsciiColumnFormat(format, strict=True)
# A safe guess which reflects the existing behavior of previous
# Astropy versions
guess_format = _ColumnFormat
try:
format, recformat = cls._convert_format(format, guess_format)
except VerifyError:
# For whatever reason our guess was wrong (for example if we got
# just 'F' that's not a valid binary format, but it an ASCII format
# code albeit with the width/precision omitted
guess_format = (_AsciiColumnFormat
if guess_format is _ColumnFormat
else _ColumnFormat)
# If this fails too we're out of options--it is truly an invalid
# format, or at least not supported
format, recformat = cls._convert_format(format, guess_format)
return format, recformat
def _convert_to_valid_data_type(self, array):
# Convert the format to a type we understand
if isinstance(array, Delayed):
return array
elif array is None:
return array
else:
format = self.format
dims = self._dims
if dims:
shape = dims[:-1] if 'A' in format else dims
shape = (len(array),) + shape
array = array.reshape(shape)
if 'P' in format or 'Q' in format:
return array
elif 'A' in format:
if array.dtype.char in 'SU':
if dims:
# The 'last' dimension (first in the order given
# in the TDIMn keyword itself) is the number of
# characters in each string
fsize = dims[-1]
else:
fsize = np.dtype(format.recformat).itemsize
return chararray.array(array, itemsize=fsize, copy=False)
else:
return _convert_array(array, np.dtype(format.recformat))
elif 'L' in format:
# boolean needs to be scaled back to storage values ('T', 'F')
if array.dtype == np.dtype('bool'):
return np.where(array == np.False_, ord('F'), ord('T'))
else:
return np.where(array == 0, ord('F'), ord('T'))
elif 'X' in format:
return _convert_array(array, np.dtype('uint8'))
else:
# Preserve byte order of the original array for now; see #77
numpy_format = array.dtype.byteorder + format.recformat
# Handle arrays passed in as unsigned ints as pseudo-unsigned
# int arrays; blatantly tacked in here for now--we need columns
# to have explicit knowledge of whether they treated as
# pseudo-unsigned
bzeros = {2: np.uint16(2**15), 4: np.uint32(2**31),
8: np.uint64(2**63)}
if (array.dtype.kind == 'u' and
array.dtype.itemsize in bzeros and
self.bscale in (1, None, '') and
self.bzero == bzeros[array.dtype.itemsize]):
# Basically the array is uint, has scale == 1.0, and the
# bzero is the appropriate value for a pseudo-unsigned
# integer of the input dtype, then go ahead and assume that
# uint is assumed
numpy_format = numpy_format.replace('i', 'u')
self._pseudo_unsigned_ints = True
# The .base here means we're dropping the shape information,
# which is only used to format recarray fields, and is not
# useful for converting input arrays to the correct data type
dtype = np.dtype(numpy_format).base
return _convert_array(array, dtype)
class ColDefs(NotifierMixin):
"""
Column definitions class.
It has attributes corresponding to the `Column` attributes
(e.g. `ColDefs` has the attribute ``names`` while `Column`
has ``name``). Each attribute in `ColDefs` is a list of
corresponding attribute values from all `Column` objects.
"""
_padding_byte = '\x00'
_col_format_cls = _ColumnFormat
def __new__(cls, input, ascii=False):
klass = cls
if (hasattr(input, '_columns_type') and
issubclass(input._columns_type, ColDefs)):
klass = input._columns_type
elif (hasattr(input, '_col_format_cls') and
issubclass(input._col_format_cls, _AsciiColumnFormat)):
klass = _AsciiColDefs
if ascii: # force ASCII if this has been explicitly requested
klass = _AsciiColDefs
return object.__new__(klass)
def __getnewargs__(self):
return (self._arrays,)
def __init__(self, input, ascii=False):
"""
Parameters
----------
input : sequence of `Column` or `ColDefs` or ndarray or `~numpy.recarray`
An existing table HDU, an existing `ColDefs`, or any multi-field
Numpy array or `numpy.recarray`.
ascii : bool
Use True to ensure that ASCII table columns are used.
"""
from .hdu.table import _TableBaseHDU
from .fitsrec import FITS_rec
if isinstance(input, ColDefs):
self._init_from_coldefs(input)
elif (isinstance(input, FITS_rec) and hasattr(input, '_coldefs') and
input._coldefs):
# If given a FITS_rec object we can directly copy its columns, but
# only if its columns have already been defined, otherwise this
# will loop back in on itself and blow up
self._init_from_coldefs(input._coldefs)
elif isinstance(input, np.ndarray) and input.dtype.fields is not None:
# Construct columns from the fields of a record array
self._init_from_array(input)
elif isiterable(input):
# if the input is a list of Columns
self._init_from_sequence(input)
elif isinstance(input, _TableBaseHDU):
# Construct columns from fields in an HDU header
self._init_from_table(input)
else:
raise TypeError('Input to ColDefs must be a table HDU, a list '
'of Columns, or a record/field array.')
# Listen for changes on all columns
for col in self.columns:
col._add_listener(self)
def _init_from_coldefs(self, coldefs):
"""Initialize from an existing ColDefs object (just copy the
columns and convert their formats if necessary).
"""
self.columns = [self._copy_column(col) for col in coldefs]
def _init_from_sequence(self, columns):
for idx, col in enumerate(columns):
if not isinstance(col, Column):
raise TypeError(f'Element {idx} in the ColDefs input is not a Column.')
self._init_from_coldefs(columns)
def _init_from_array(self, array):
self.columns = []
for idx in range(len(array.dtype)):
cname = array.dtype.names[idx]
ftype = array.dtype.fields[cname][0]
format = self._col_format_cls.from_recformat(ftype)
# Determine the appropriate dimensions for items in the column
dim = array.dtype[idx].shape[::-1]
if dim and (len(dim) > 0 or 'A' in format):
if 'A' in format:
# should take into account multidimensional items in the column
dimel = int(re.findall('[0-9]+', str(ftype.subdtype[0]))[0])
# n x m string arrays must include the max string
# length in their dimensions (e.g. l x n x m)
dim = (dimel,) + dim
dim = '(' + ','.join(str(d) for d in dim) + ')'
else:
dim = None
# Check for unsigned ints.
bzero = None
if ftype.base.kind == 'u':
if 'I' in format:
bzero = np.uint16(2**15)
elif 'J' in format:
bzero = np.uint32(2**31)
elif 'K' in format:
bzero = np.uint64(2**63)
c = Column(name=cname, format=format,
array=array.view(np.ndarray)[cname], bzero=bzero,
dim=dim)
self.columns.append(c)
def _init_from_table(self, table):
hdr = table._header
nfields = hdr['TFIELDS']
# go through header keywords to pick out column definition keywords
# definition dictionaries for each field
col_keywords = [{} for i in range(nfields)]
for keyword in hdr:
key = TDEF_RE.match(keyword)
try:
label = key.group('label')
except Exception:
continue # skip if there is no match
if label in KEYWORD_NAMES:
col = int(key.group('num'))
if 0 < col <= nfields:
attr = KEYWORD_TO_ATTRIBUTE[label]
value = hdr[keyword]
if attr == 'format':
# Go ahead and convert the format value to the
# appropriate ColumnFormat container now
value = self._col_format_cls(value)
col_keywords[col - 1][attr] = value
# Verify the column keywords and display any warnings if necessary;
# we only want to pass on the valid keywords
for idx, kwargs in enumerate(col_keywords):
valid_kwargs, invalid_kwargs = Column._verify_keywords(**kwargs)
for val in invalid_kwargs.values():
warnings.warn(
f'Invalid keyword for column {idx + 1}: {val[1]}',
VerifyWarning)
# Special cases for recformat and dim
# TODO: Try to eliminate the need for these special cases
del valid_kwargs['recformat']
if 'dim' in valid_kwargs:
valid_kwargs['dim'] = kwargs['dim']
col_keywords[idx] = valid_kwargs
# data reading will be delayed
for col in range(nfields):
col_keywords[col]['array'] = Delayed(table, col)
# now build the columns
self.columns = [Column(**attrs) for attrs in col_keywords]
# Add the table HDU is a listener to changes to the columns
# (either changes to individual columns, or changes to the set of
# columns (add/remove/etc.))
self._add_listener(table)
def __copy__(self):
return self.__class__(self)
def __deepcopy__(self, memo):
return self.__class__([copy.deepcopy(c, memo) for c in self.columns])
def _copy_column(self, column):
"""Utility function used currently only by _init_from_coldefs
to help convert columns from binary format to ASCII format or vice
versa if necessary (otherwise performs a straight copy).
"""
if isinstance(column.format, self._col_format_cls):
# This column has a FITS format compatible with this column
# definitions class (that is ascii or binary)
return column.copy()
new_column = column.copy()
# Try to use the Numpy recformat as the equivalency between the
# two formats; if that conversion can't be made then these
# columns can't be transferred
# TODO: Catch exceptions here and raise an explicit error about
# column format conversion
new_column.format = self._col_format_cls.from_column_format(column.format)
# Handle a few special cases of column format options that are not
# compatible between ASCII an binary tables
# TODO: This is sort of hacked in right now; we really need
# separate classes for ASCII and Binary table Columns, and they
# should handle formatting issues like these
if not isinstance(new_column.format, _AsciiColumnFormat):
# the column is a binary table column...
new_column.start = None
if new_column.null is not None:
# We can't just "guess" a value to represent null
# values in the new column, so just disable this for
# now; users may modify it later
new_column.null = None
else:
# the column is an ASCII table column...
if new_column.null is not None:
new_column.null = DEFAULT_ASCII_TNULL
if (new_column.disp is not None and
new_column.disp.upper().startswith('L')):
# ASCII columns may not use the logical data display format;
# for now just drop the TDISPn option for this column as we
# don't have a systematic conversion of boolean data to ASCII
# tables yet
new_column.disp = None
return new_column
def __getattr__(self, name):
"""
Automatically returns the values for the given keyword attribute for
all `Column`s in this list.
Implements for example self.units, self.formats, etc.
"""
cname = name[:-1]
if cname in KEYWORD_ATTRIBUTES and name[-1] == 's':
attr = []
for col in self.columns:
val = getattr(col, cname)
attr.append(val if val is not None else '')
return attr
raise AttributeError(name)
@lazyproperty
def dtype(self):
# Note: This previously returned a dtype that just used the raw field
# widths based on the format's repeat count, and did not incorporate
# field *shapes* as provided by TDIMn keywords.
# Now this incorporates TDIMn from the start, which makes *this* method
# a little more complicated, but simplifies code elsewhere (for example
# fields will have the correct shapes even in the raw recarray).
formats = []
offsets = [0]
for format_, dim in zip(self.formats, self._dims):
dt = format_.dtype
if len(offsets) < len(self.formats):
# Note: the size of the *original* format_ may be greater than
# one would expect from the number of elements determined by
# dim. The FITS format allows this--the rest of the field is
# filled with undefined values.
offsets.append(offsets[-1] + dt.itemsize)
if dim:
if format_.format == 'A':
dt = np.dtype((dt.char + str(dim[-1]), dim[:-1]))
else:
dt = np.dtype((dt.base, dim))
formats.append(dt)
return np.dtype({'names': self.names,
'formats': formats,
'offsets': offsets})
@lazyproperty
def names(self):
return [col.name for col in self.columns]
@lazyproperty
def formats(self):
return [col.format for col in self.columns]
@lazyproperty
def _arrays(self):
return [col.array for col in self.columns]
@lazyproperty
def _recformats(self):
return [fmt.recformat for fmt in self.formats]
@lazyproperty
def _dims(self):
"""Returns the values of the TDIMn keywords parsed into tuples."""
return [col._dims for col in self.columns]
def __getitem__(self, key):
if isinstance(key, str):
key = _get_index(self.names, key)
x = self.columns[key]
if _is_int(key):
return x
else:
return ColDefs(x)
def __len__(self):
return len(self.columns)
def __repr__(self):
rep = 'ColDefs('
if hasattr(self, 'columns') and self.columns:
# The hasattr check is mostly just useful in debugging sessions
# where self.columns may not be defined yet
rep += '\n '
rep += '\n '.join([repr(c) for c in self.columns])
rep += '\n'
rep += ')'
return rep
def __add__(self, other, option='left'):
if isinstance(other, Column):
b = [other]
elif isinstance(other, ColDefs):
b = list(other.columns)
else:
raise TypeError('Wrong type of input.')
if option == 'left':
tmp = list(self.columns) + b
else:
tmp = b + list(self.columns)
return ColDefs(tmp)
def __radd__(self, other):
return self.__add__(other, 'right')
def __sub__(self, other):
if not isinstance(other, (list, tuple)):
other = [other]
_other = [_get_index(self.names, key) for key in other]
indx = list(range(len(self)))
for x in _other:
indx.remove(x)
tmp = [self[i] for i in indx]
return ColDefs(tmp)
def _update_column_attribute_changed(self, column, attr, old_value,
new_value):
"""
Handle column attribute changed notifications from columns that are
members of this `ColDefs`.
`ColDefs` itself does not currently do anything with this, and just
bubbles the notification up to any listening table HDUs that may need
to update their headers, etc. However, this also informs the table of
the numerical index of the column that changed.
"""
idx = 0
for idx, col in enumerate(self.columns):
if col is column:
break
if attr == 'name':
del self.names
elif attr == 'format':
del self.formats
self._notify('column_attribute_changed', column, idx, attr, old_value,
new_value)
def add_col(self, column):
"""
Append one `Column` to the column definition.
"""
if not isinstance(column, Column):
raise AssertionError
# Ask the HDU object to load the data before we modify our columns
self._notify('load_data')
self._arrays.append(column.array)
# Obliterate caches of certain things
del self.dtype
del self._recformats
del self._dims
del self.names
del self.formats
self.columns.append(column)
# Listen for changes on the new column
column._add_listener(self)
# If this ColDefs is being tracked by a Table, inform the
# table that its data is now invalid.
self._notify('column_added', self, column)
return self
def del_col(self, col_name):
"""
Delete (the definition of) one `Column`.
col_name : str or int
The column's name or index
"""
# Ask the HDU object to load the data before we modify our columns
self._notify('load_data')
indx = _get_index(self.names, col_name)
col = self.columns[indx]
del self._arrays[indx]
# Obliterate caches of certain things
del self.dtype
del self._recformats
del self._dims
del self.names
del self.formats
del self.columns[indx]
col._remove_listener(self)
# If this ColDefs is being tracked by a table HDU, inform the HDU (or
# any other listeners) that the column has been removed
# Just send a reference to self, and the index of the column that was
# removed
self._notify('column_removed', self, indx)
return self
def change_attrib(self, col_name, attrib, new_value):
"""
Change an attribute (in the ``KEYWORD_ATTRIBUTES`` list) of a `Column`.
Parameters
----------
col_name : str or int
The column name or index to change
attrib : str
The attribute name
new_value : object
The new value for the attribute
"""
setattr(self[col_name], attrib, new_value)
def change_name(self, col_name, new_name):
"""
Change a `Column`'s name.
Parameters
----------
col_name : str
The current name of the column
new_name : str
The new name of the column
"""
if new_name != col_name and new_name in self.names:
raise ValueError(f'New name {new_name} already exists.')
else:
self.change_attrib(col_name, 'name', new_name)
def change_unit(self, col_name, new_unit):
"""
Change a `Column`'s unit.
Parameters
----------
col_name : str or int
The column name or index
new_unit : str
The new unit for the column
"""
self.change_attrib(col_name, 'unit', new_unit)
def info(self, attrib='all', output=None):
"""
Get attribute(s) information of the column definition.
Parameters
----------
attrib : str
Can be one or more of the attributes listed in
``astropy.io.fits.column.KEYWORD_ATTRIBUTES``. The default is
``"all"`` which will print out all attributes. It forgives plurals
and blanks. If there are two or more attribute names, they must be
separated by comma(s).
output : file-like, optional
File-like object to output to. Outputs to stdout by default.
If `False`, returns the attributes as a `dict` instead.
Notes
-----
This function doesn't return anything by default; it just prints to
stdout.
"""
if output is None:
output = sys.stdout
if attrib.strip().lower() in ['all', '']:
lst = KEYWORD_ATTRIBUTES
else:
lst = attrib.split(',')
for idx in range(len(lst)):
lst[idx] = lst[idx].strip().lower()
if lst[idx][-1] == 's':
lst[idx] = list[idx][:-1]
ret = {}
for attr in lst:
if output:
if attr not in KEYWORD_ATTRIBUTES:
output.write("'{}' is not an attribute of the column "
"definitions.\n".format(attr))
continue
output.write(f"{attr}:\n")
output.write(f" {getattr(self, attr + 's')}\n")
else:
ret[attr] = getattr(self, attr + 's')
if not output:
return ret
class _AsciiColDefs(ColDefs):
"""ColDefs implementation for ASCII tables."""
_padding_byte = ' '
_col_format_cls = _AsciiColumnFormat
def __init__(self, input, ascii=True):
super().__init__(input)
# if the format of an ASCII column has no width, add one
if not isinstance(input, _AsciiColDefs):
self._update_field_metrics()
else:
for idx, s in enumerate(input.starts):
self.columns[idx].start = s
self._spans = input.spans
self._width = input._width
@lazyproperty
def dtype(self):
dtype = {}
for j in range(len(self)):
data_type = 'S' + str(self.spans[j])
dtype[self.names[j]] = (data_type, self.starts[j] - 1)
return np.dtype(dtype)
@property
def spans(self):
"""A list of the widths of each field in the table."""
return self._spans
@lazyproperty
def _recformats(self):
if len(self) == 1:
widths = []
else:
widths = [y - x for x, y in pairwise(self.starts)]
# Widths is the width of each field *including* any space between
# fields; this is so that we can map the fields to string records in a
# Numpy recarray
widths.append(self._width - self.starts[-1] + 1)
return ['a' + str(w) for w in widths]
def add_col(self, column):
super().add_col(column)
self._update_field_metrics()
def del_col(self, col_name):
super().del_col(col_name)
self._update_field_metrics()
def _update_field_metrics(self):
"""
Updates the list of the start columns, the list of the widths of each
field, and the total width of each record in the table.
"""
spans = [0] * len(self.columns)
end_col = 0 # Refers to the ASCII text column, not the table col
for idx, col in enumerate(self.columns):
width = col.format.width
# Update the start columns and column span widths taking into
# account the case that the starting column of a field may not
# be the column immediately after the previous field
if not col.start:
col.start = end_col + 1
end_col = col.start + width - 1
spans[idx] = width
self._spans = spans
self._width = end_col
# Utilities
class _VLF(np.ndarray):
"""Variable length field object."""
def __new__(cls, input, dtype='a'):
"""
Parameters
----------
input
a sequence of variable-sized elements.
"""
if dtype == 'a':
try:
# this handles ['abc'] and [['a','b','c']]
# equally, beautiful!
input = [chararray.array(x, itemsize=1) for x in input]
except Exception:
raise ValueError(
f'Inconsistent input data array: {input}')
a = np.array(input, dtype=object)
self = np.ndarray.__new__(cls, shape=(len(input),), buffer=a,
dtype=object)
self.max = 0
self.element_dtype = dtype
return self
def __array_finalize__(self, obj):
if obj is None:
return
self.max = obj.max
self.element_dtype = obj.element_dtype
def __setitem__(self, key, value):
"""
To make sure the new item has consistent data type to avoid
misalignment.
"""
if isinstance(value, np.ndarray) and value.dtype == self.dtype:
pass
elif isinstance(value, chararray.chararray) and value.itemsize == 1:
pass
elif self.element_dtype == 'a':
value = chararray.array(value, itemsize=1)
else:
value = np.array(value, dtype=self.element_dtype)
np.ndarray.__setitem__(self, key, value)
self.max = max(self.max, len(value))
def tolist(self):
return [list(item) for item in super().tolist()]
def _get_index(names, key):
"""
Get the index of the ``key`` in the ``names`` list.
The ``key`` can be an integer or string. If integer, it is the index
in the list. If string,
a. Field (column) names are case sensitive: you can have two
different columns called 'abc' and 'ABC' respectively.
b. When you *refer* to a field (presumably with the field
method), it will try to match the exact name first, so in
the example in (a), field('abc') will get the first field,
and field('ABC') will get the second field.
If there is no exact name matched, it will try to match the
name with case insensitivity. So, in the last example,
field('Abc') will cause an exception since there is no unique
mapping. If there is a field named "XYZ" and no other field
name is a case variant of "XYZ", then field('xyz'),
field('Xyz'), etc. will get this field.
"""
if _is_int(key):
indx = int(key)
elif isinstance(key, str):
# try to find exact match first
try:
indx = names.index(key.rstrip())
except ValueError:
# try to match case-insentively,
_key = key.lower().rstrip()
names = [n.lower().rstrip() for n in names]
count = names.count(_key) # occurrence of _key in names
if count == 1:
indx = names.index(_key)
elif count == 0:
raise KeyError(f"Key '{key}' does not exist.")
else: # multiple match
raise KeyError(f"Ambiguous key name '{key}'.")
else:
raise KeyError(f"Illegal key '{key!r}'.")
return indx
def _unwrapx(input, output, repeat):
"""
Unwrap the X format column into a Boolean array.
Parameters
----------
input
input ``Uint8`` array of shape (`s`, `nbytes`)
output
output Boolean array of shape (`s`, `repeat`)
repeat
number of bits
"""
pow2 = np.array([128, 64, 32, 16, 8, 4, 2, 1], dtype='uint8')
nbytes = ((repeat - 1) // 8) + 1
for i in range(nbytes):
_min = i * 8
_max = min((i + 1) * 8, repeat)
for j in range(_min, _max):
output[..., j] = np.bitwise_and(input[..., i], pow2[j - i * 8])
def _wrapx(input, output, repeat):
"""
Wrap the X format column Boolean array into an ``UInt8`` array.
Parameters
----------
input
input Boolean array of shape (`s`, `repeat`)
output
output ``Uint8`` array of shape (`s`, `nbytes`)
repeat
number of bits
"""
output[...] = 0 # reset the output
nbytes = ((repeat - 1) // 8) + 1
unused = nbytes * 8 - repeat
for i in range(nbytes):
_min = i * 8
_max = min((i + 1) * 8, repeat)
for j in range(_min, _max):
if j != _min:
np.left_shift(output[..., i], 1, output[..., i])
np.add(output[..., i], input[..., j], output[..., i])
# shift the unused bits
np.left_shift(output[..., i], unused, output[..., i])
def _makep(array, descr_output, format, nrows=None):
"""
Construct the P (or Q) format column array, both the data descriptors and
the data. It returns the output "data" array of data type `dtype`.
The descriptor location will have a zero offset for all columns
after this call. The final offset will be calculated when the file
is written.
Parameters
----------
array
input object array
descr_output
output "descriptor" array of data type int32 (for P format arrays) or
int64 (for Q format arrays)--must be nrows long in its first dimension
format
the _FormatP object representing the format of the variable array
nrows : int, optional
number of rows to create in the column; defaults to the number of rows
in the input array
"""
# TODO: A great deal of this is redundant with FITS_rec._convert_p; see if
# we can merge the two somehow.
_offset = 0
if not nrows:
nrows = len(array)
data_output = _VLF([None] * nrows, dtype=format.dtype)
if format.dtype == 'a':
_nbytes = 1
else:
_nbytes = np.array([], dtype=format.dtype).itemsize
for idx in range(nrows):
if idx < len(array):
rowval = array[idx]
else:
if format.dtype == 'a':
rowval = ' ' * data_output.max
else:
rowval = [0] * data_output.max
if format.dtype == 'a':
data_output[idx] = chararray.array(encode_ascii(rowval),
itemsize=1)
else:
data_output[idx] = np.array(rowval, dtype=format.dtype)
descr_output[idx, 0] = len(data_output[idx])
descr_output[idx, 1] = _offset
_offset += len(data_output[idx]) * _nbytes
return data_output
def _parse_tformat(tform):
"""Parse ``TFORMn`` keyword for a binary table into a
``(repeat, format, option)`` tuple.
"""
try:
(repeat, format, option) = TFORMAT_RE.match(tform.strip()).groups()
except Exception:
# TODO: Maybe catch this error use a default type (bytes, maybe?) for
# unrecognized column types. As long as we can determine the correct
# byte width somehow..
raise VerifyError(f'Format {tform!r} is not recognized.')
if repeat == '':
repeat = 1
else:
repeat = int(repeat)
return (repeat, format.upper(), option)
def _parse_ascii_tformat(tform, strict=False):
"""
Parse the ``TFORMn`` keywords for ASCII tables into a ``(format, width,
precision)`` tuple (the latter is always zero unless format is one of 'E',
'F', or 'D').
"""
match = TFORMAT_ASCII_RE.match(tform.strip())
if not match:
raise VerifyError(f'Format {tform!r} is not recognized.')
# Be flexible on case
format = match.group('format')
if format is None:
# Floating point format
format = match.group('formatf').upper()
width = match.group('widthf')
precision = match.group('precision')
if width is None or precision is None:
if strict:
raise VerifyError('Format {!r} is not unambiguously an ASCII '
'table format.')
else:
width = 0 if width is None else width
precision = 1 if precision is None else precision
else:
format = format.upper()
width = match.group('width')
if width is None:
if strict:
raise VerifyError('Format {!r} is not unambiguously an ASCII '
'table format.')
else:
# Just use a default width of 0 if unspecified
width = 0
precision = 0
def convert_int(val):
msg = ('Format {!r} is not valid--field width and decimal precision '
'must be integers.')
try:
val = int(val)
except (ValueError, TypeError):
raise VerifyError(msg.format(tform))
return val
if width and precision:
# This should only be the case for floating-point formats
width, precision = convert_int(width), convert_int(precision)
elif width:
# Just for integer/string formats; ignore precision
width = convert_int(width)
else:
# For any format, if width was unspecified use the set defaults
width, precision = ASCII_DEFAULT_WIDTHS[format]
if width <= 0:
raise VerifyError("Format {!r} not valid--field width must be a "
"positive integeter.".format(tform))
if precision >= width:
raise VerifyError("Format {!r} not valid--the number of decimal digits "
"must be less than the format's total "
"width {}.".format(tform, width))
return format, width, precision
def _parse_tdim(tdim):
"""Parse the ``TDIM`` value into a tuple (may return an empty tuple if
the value ``TDIM`` value is empty or invalid).
"""
m = tdim and TDIM_RE.match(tdim)
if m:
dims = m.group('dims')
return tuple(int(d.strip()) for d in dims.split(','))[::-1]
# Ignore any dim values that don't specify a multidimensional column
return tuple()
def _scalar_to_format(value):
"""
Given a scalar value or string, returns the minimum FITS column format
that can represent that value. 'minimum' is defined by the order given in
FORMATORDER.
"""
# First, if value is a string, try to convert to the appropriate scalar
# value
for type_ in (int, float, complex):
try:
value = type_(value)
break
except ValueError:
continue
numpy_dtype_str = np.min_scalar_type(value).str
numpy_dtype_str = numpy_dtype_str[1:] # Strip endianness
try:
fits_format = NUMPY2FITS[numpy_dtype_str]
return FITSUPCONVERTERS.get(fits_format, fits_format)
except KeyError:
return "A" + str(len(value))
def _cmp_recformats(f1, f2):
"""
Compares two numpy recformats using the ordering given by FORMATORDER.
"""
if f1[0] == 'a' and f2[0] == 'a':
return cmp(int(f1[1:]), int(f2[1:]))
else:
f1, f2 = NUMPY2FITS[f1], NUMPY2FITS[f2]
return cmp(FORMATORDER.index(f1), FORMATORDER.index(f2))
def _convert_fits2record(format):
"""
Convert FITS format spec to record format spec.
"""
repeat, dtype, option = _parse_tformat(format)
if dtype in FITS2NUMPY:
if dtype == 'A':
output_format = FITS2NUMPY[dtype] + str(repeat)
# to accommodate both the ASCII table and binary table column
# format spec, i.e. A7 in ASCII table is the same as 7A in
# binary table, so both will produce 'a7'.
# Technically the FITS standard does not allow this but it's a very
# common mistake
if format.lstrip()[0] == 'A' and option != '':
# make sure option is integer
output_format = FITS2NUMPY[dtype] + str(int(option))
else:
repeat_str = ''
if repeat != 1:
repeat_str = str(repeat)
output_format = repeat_str + FITS2NUMPY[dtype]
elif dtype == 'X':
output_format = _FormatX(repeat)
elif dtype == 'P':
output_format = _FormatP.from_tform(format)
elif dtype == 'Q':
output_format = _FormatQ.from_tform(format)
elif dtype == 'F':
output_format = 'f8'
else:
raise ValueError(f'Illegal format `{format}`.')
return output_format
def _convert_record2fits(format):
"""
Convert record format spec to FITS format spec.
"""
recformat, kind, dtype = _dtype_to_recformat(format)
shape = dtype.shape
itemsize = dtype.base.itemsize
if dtype.char == 'U' or (dtype.subdtype is not None
and dtype.subdtype[0].char == 'U'):
# Unicode dtype--itemsize is 4 times actual ASCII character length,
# which what matters for FITS column formats
# Use dtype.base and dtype.subdtype --dtype for multi-dimensional items
itemsize = itemsize // 4
option = str(itemsize)
ndims = len(shape)
repeat = 1
if ndims > 0:
nel = np.array(shape, dtype='i8').prod()
if nel > 1:
repeat = nel
if kind == 'a':
# This is a kludge that will place string arrays into a
# single field, so at least we won't lose data. Need to
# use a TDIM keyword to fix this, declaring as (slength,
# dim1, dim2, ...) as mwrfits does
ntot = int(repeat) * int(option)
output_format = str(ntot) + 'A'
elif recformat in NUMPY2FITS: # record format
if repeat != 1:
repeat = str(repeat)
else:
repeat = ''
output_format = repeat + NUMPY2FITS[recformat]
else:
raise ValueError(f'Illegal format `{format}`.')
return output_format
def _dtype_to_recformat(dtype):
"""
Utility function for converting a dtype object or string that instantiates
a dtype (e.g. 'float32') into one of the two character Numpy format codes
that have been traditionally used by Astropy.
In particular, use of 'a' to refer to character data is long since
deprecated in Numpy, but Astropy remains heavily invested in its use
(something to try to get away from sooner rather than later).
"""
if not isinstance(dtype, np.dtype):
dtype = np.dtype(dtype)
kind = dtype.base.kind
if kind in ('U', 'S'):
recformat = kind = 'a'
else:
itemsize = dtype.base.itemsize
recformat = kind + str(itemsize)
return recformat, kind, dtype
def _convert_format(format, reverse=False):
"""
Convert FITS format spec to record format spec. Do the opposite if
reverse=True.
"""
if reverse:
return _convert_record2fits(format)
else:
return _convert_fits2record(format)
def _convert_ascii_format(format, reverse=False):
"""Convert ASCII table format spec to record format spec."""
if reverse:
recformat, kind, dtype = _dtype_to_recformat(format)
itemsize = dtype.itemsize
if kind == 'a':
return 'A' + str(itemsize)
elif NUMPY2FITS.get(recformat) == 'L':
# Special case for logical/boolean types--for ASCII tables we
# represent these as single character columns containing 'T' or 'F'
# (a la the storage format for Logical columns in binary tables)
return 'A1'
elif kind == 'i':
# Use for the width the maximum required to represent integers
# of that byte size plus 1 for signs, but use a minimum of the
# default width (to keep with existing behavior)
width = 1 + len(str(2 ** (itemsize * 8)))
width = max(width, ASCII_DEFAULT_WIDTHS['I'][0])
return 'I' + str(width)
elif kind == 'f':
# This is tricky, but go ahead and use D if float-64, and E
# if float-32 with their default widths
if itemsize >= 8:
format = 'D'
else:
format = 'E'
width = '.'.join(str(w) for w in ASCII_DEFAULT_WIDTHS[format])
return format + width
# TODO: There may be reasonable ways to represent other Numpy types so
# let's see what other possibilities there are besides just 'a', 'i',
# and 'f'. If it doesn't have a reasonable ASCII representation then
# raise an exception
else:
format, width, precision = _parse_ascii_tformat(format)
# This gives a sensible "default" dtype for a given ASCII
# format code
recformat = ASCII2NUMPY[format]
# The following logic is taken from CFITSIO:
# For integers, if the width <= 4 we can safely use 16-bit ints for all
# values, if width >= 10 we may need to accommodate 64-bit ints.
# values [for the non-standard J format code just always force 64-bit]
if format == 'I':
if width <= 4:
recformat = 'i2'
elif width > 9:
recformat = 'i8'
elif format == 'A':
recformat += str(width)
return recformat
def _parse_tdisp_format(tdisp):
"""
Parse the ``TDISPn`` keywords for ASCII and binary tables into a
``(format, width, precision, exponential)`` tuple (the TDISP values
for ASCII and binary are identical except for 'Lw',
which is only present in BINTABLE extensions
Parameters
----------
tdisp : str
TDISPn FITS Header keyword. Used to specify display formatting.
Returns
-------
formatc: str
The format characters from TDISPn
width: str
The width int value from TDISPn
precision: str
The precision int value from TDISPn
exponential: str
The exponential int value from TDISPn
"""
# Use appropriate regex for format type
tdisp = tdisp.strip()
fmt_key = tdisp[0] if tdisp[0] != 'E' or (
len(tdisp) > 1 and tdisp[1] not in 'NS') else tdisp[:2]
try:
tdisp_re = TDISP_RE_DICT[fmt_key]
except KeyError:
raise VerifyError(f'Format {tdisp} is not recognized.')
match = tdisp_re.match(tdisp.strip())
if not match or match.group('formatc') is None:
raise VerifyError(f'Format {tdisp} is not recognized.')
formatc = match.group('formatc')
width = match.group('width')
precision = None
exponential = None
# Some formats have precision and exponential
if tdisp[0] in ('I', 'B', 'O', 'Z', 'F', 'E', 'G', 'D'):
precision = match.group('precision')
if precision is None:
precision = 1
if tdisp[0] in ('E', 'D', 'G') and tdisp[1] not in ('N', 'S'):
exponential = match.group('exponential')
if exponential is None:
exponential = 1
# Once parsed, check format dict to do conversion to a formatting string
return formatc, width, precision, exponential
def _fortran_to_python_format(tdisp):
"""
Turn the TDISPn fortran format pieces into a final Python format string.
See the format_type definitions above the TDISP_FMT_DICT. If codes is
changed to take advantage of the exponential specification, will need to
add it as another input parameter.
Parameters
----------
tdisp : str
TDISPn FITS Header keyword. Used to specify display formatting.
Returns
-------
format_string: str
The TDISPn keyword string translated into a Python format string.
"""
format_type, width, precision, exponential = _parse_tdisp_format(tdisp)
try:
fmt = TDISP_FMT_DICT[format_type]
return fmt.format(width=width, precision=precision)
except KeyError:
raise VerifyError(f'Format {format_type} is not recognized.')
def python_to_tdisp(format_string, logical_dtype=False):
"""
Turn the Python format string to a TDISP FITS compliant format string. Not
all formats convert. these will cause a Warning and return None.
Parameters
----------
format_string : str
TDISPn FITS Header keyword. Used to specify display formatting.
logical_dtype : bool
True is this format type should be a logical type, 'L'. Needs special
handling.
Returns
-------
tdsip_string: str
The TDISPn keyword string translated into a Python format string.
"""
fmt_to_tdisp = {'a': 'A', 's': 'A', 'd': 'I', 'b': 'B', 'o': 'O', 'x': 'Z',
'X': 'Z', 'f': 'F', 'F': 'F', 'g': 'G', 'G': 'G', 'e': 'E',
'E': 'E'}
if format_string in [None, "", "{}"]:
return None
# Strip out extra format characters that aren't a type or a width/precision
if format_string[0] == '{' and format_string != "{}":
fmt_str = format_string.lstrip("{:").rstrip('}')
elif format_string[0] == '%':
fmt_str = format_string.lstrip("%")
else:
fmt_str = format_string
precision, sep = '', ''
# Character format, only translate right aligned, and don't take zero fills
if fmt_str[-1].isdigit() and fmt_str[0] == '>' and fmt_str[1] != '0':
ftype = fmt_to_tdisp['a']
width = fmt_str[1:]
elif fmt_str[-1] == 's' and fmt_str != 's':
ftype = fmt_to_tdisp['a']
width = fmt_str[:-1].lstrip('0')
# Number formats, don't take zero fills
elif fmt_str[-1].isalpha() and len(fmt_str) > 1 and fmt_str[0] != '0':
ftype = fmt_to_tdisp[fmt_str[-1]]
fmt_str = fmt_str[:-1]
# If format has a "." split out the width and precision
if '.' in fmt_str:
width, precision = fmt_str.split('.')
sep = '.'
if width == "":
ascii_key = ftype if ftype != 'G' else 'F'
width = str(int(precision) + (ASCII_DEFAULT_WIDTHS[ascii_key][0] -
ASCII_DEFAULT_WIDTHS[ascii_key][1]))
# Otherwise we just have a width
else:
width = fmt_str
else:
warnings.warn('Format {} cannot be mapped to the accepted '
'TDISPn keyword values. Format will not be '
'moved into TDISPn keyword.'.format(format_string),
AstropyUserWarning)
return None
# Catch logical data type, set the format type back to L in this case
if logical_dtype:
ftype = 'L'
return ftype + width + sep + precision
|
{
"content_hash": "55207728690929588a6f63c9b2dcaeb3",
"timestamp": "",
"source": "github",
"line_count": 2654,
"max_line_length": 90,
"avg_line_length": 37.04031650339111,
"alnum_prop": 0.5707135954427547,
"repo_name": "StuartLittlefair/astropy",
"id": "1dad3278622102895d37848212d07a6ba6e771b6",
"size": "98369",
"binary": false,
"copies": "2",
"ref": "refs/heads/main",
"path": "astropy/io/fits/column.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "11034753"
},
{
"name": "C++",
"bytes": "47001"
},
{
"name": "Cython",
"bytes": "78631"
},
{
"name": "HTML",
"bytes": "1172"
},
{
"name": "Lex",
"bytes": "183333"
},
{
"name": "M4",
"bytes": "18757"
},
{
"name": "Makefile",
"bytes": "52457"
},
{
"name": "Python",
"bytes": "12224600"
},
{
"name": "Shell",
"bytes": "17024"
},
{
"name": "TeX",
"bytes": "853"
}
],
"symlink_target": ""
}
|
from btcmarkets_api import Market
BTC = Market("/market/BTC/AUD/tick", "BTC")
LTC = Market("/market/LTC/AUD/tick", "LTC")
ETH = Market("/market/ETH/AUD/tick", "ETH")
ETC = Market("/market/ETC/AUD/tick", "ETC")
XRP = Market("/market/XRP/AUD/tick", "XRP")
BCH = Market("/market/BCH/AUD/tick", "BCH")
BTC.update_data()
LTC.update_data()
ETH.update_data()
ETC.update_data()
XRP.update_data()
BCH.update_data()
|
{
"content_hash": "66eb9d7819eeb53da97c789916e49ee7",
"timestamp": "",
"source": "github",
"line_count": 15,
"max_line_length": 43,
"avg_line_length": 27.2,
"alnum_prop": 0.6740196078431373,
"repo_name": "infectiious/Pharaoh_script",
"id": "5821dd47fba75dd2ea15fba39cbd3efaf72653d2",
"size": "431",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Markets/BTCMarkets/btcmarkets_fetch.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "10999"
},
{
"name": "Shell",
"bytes": "402"
}
],
"symlink_target": ""
}
|
import simplejson as json
import unicodecsv as csv
import urllib
url = "https://www.publicstuff.com/api/2.0/requests_list?return_type=json&limit=100&lat=35.62336&request_type_id=11339&lon=-82.561531&nearby=250&api_key=58j013k159vpqz87xd85df0uy7epvl"
response = urllib.urlopen(url);
data = json.loads(response.read())
f = csv.writer(open('file.csv', 'wb+'))
def main(data):
rowkey="request"
rowkey=rowkey.upper()
rowData='start'
doHeaders = True
def walkDict(d):
for k,v in d.items():
if checkrowkey(k):
f.writerow(v.values())
if isinstance(v,dict):
walkDict(v)
if isinstance(v,list):
walkList(v)
print''
def walkList(d):
for v in d:
if isinstance(v,dict):
walkDict(v)
if isinstance(v,list):
walkList(v)
def checkrowkey(k):
if str(k).upper()==rowkey:
return True
else:
return False
walkDict(data)
main(data)
|
{
"content_hash": "b9fb0fe9a7bf0b2451efc0712c0602db",
"timestamp": "",
"source": "github",
"line_count": 49,
"max_line_length": 184,
"avg_line_length": 18.387755102040817,
"alnum_prop": 0.6670366259711432,
"repo_name": "CodeForAsheville/get-json-convert-to-csv",
"id": "93c755319783346b989496bf9bb85a17e58403c9",
"size": "901",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "convert.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "901"
}
],
"symlink_target": ""
}
|
"""Setup.py module for the workflow's worker utilities.
All the workflow related code is gathered in a package that will be built as a
source distribution, staged in the staging area for the workflow being run and
then installed in the workers when they start running.
This behavior is triggered by specifying the --setup_file command line option
when running the workflow for remote execution.
"""
from __future__ import absolute_import
from __future__ import print_function
import subprocess
from distutils.command.build import build as _build # type: ignore
import setuptools
# This class handles the pip install mechanism.
class build(_build): # pylint: disable=invalid-name
"""A build command class that will be invoked during package install.
The package built using the current setup.py will be staged and later
installed in the worker using `pip install package'. This class will be
instantiated during install for this specific scenario and will trigger
running the custom commands specified.
"""
sub_commands = _build.sub_commands + [('CustomCommands', None)]
# Some custom command to run during setup. The command is not essential for this
# workflow. It is used here as an example. Each command will spawn a child
# process. Typically, these commands will include steps to install non-Python
# packages. For instance, to install a C++-based library libjpeg62 the following
# two commands will have to be added:
#
# ['apt-get', 'update'],
# ['apt-get', '--assume-yes', 'install', 'libjpeg62'],
#
# First, note that there is no need to use the sudo command because the setup
# script runs with appropriate access.
# Second, if apt-get tool is used then the first command needs to be 'apt-get
# update' so the tool refreshes itself and initializes links to download
# repositories. Without this initial step the other apt-get install commands
# will fail with package not found errors. Note also --assume-yes option which
# shortcuts the interactive confirmation.
#
# Note that in this example custom commands will run after installing required
# packages. If you have a PyPI package that depends on one of the custom
# commands, move installation of the dependent package to the list of custom
# commands, e.g.:
#
# ['pip', 'install', 'my_package'],
#
# TODO(BEAM-3237): Output from the custom commands are missing from the logs.
# The output of custom commands (including failures) will be logged in the
# worker-startup log.
CUSTOM_COMMANDS = [
['echo', 'Custom command worked!']]
class CustomCommands(setuptools.Command):
"""A setuptools Command class able to run arbitrary commands."""
def initialize_options(self):
pass
def finalize_options(self):
pass
def RunCustomCommand(self, command_list):
print('Running command: %s' % command_list)
p = subprocess.Popen(
command_list,
stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
# Can use communicate(input='y\n'.encode()) if the command run requires
# some confirmation.
stdout_data, _ = p.communicate()
print('Command output: %s' % stdout_data)
if p.returncode != 0:
raise RuntimeError(
'Command %s failed: exit code: %s' % (command_list, p.returncode))
def run(self):
for command in CUSTOM_COMMANDS:
self.RunCustomCommand(command)
# Configure the required packages and scripts to install.
# Note that the Python Dataflow containers come with numpy already installed
# so this dependency will not trigger anything to be installed unless a version
# restriction is specified.
REQUIRED_PACKAGES = [
'numpy',
]
setuptools.setup(
name='juliaset',
version='0.0.1',
description='Julia set workflow package.',
install_requires=REQUIRED_PACKAGES,
packages=setuptools.find_packages(),
cmdclass={
# Command class instantiated and run during pip install scenarios.
'build': build,
'CustomCommands': CustomCommands,
}
)
|
{
"content_hash": "fb8eae3927be0b9b562d31bfdfb886c3",
"timestamp": "",
"source": "github",
"line_count": 110,
"max_line_length": 80,
"avg_line_length": 36.39090909090909,
"alnum_prop": 0.7299525355983013,
"repo_name": "RyanSkraba/beam",
"id": "ee02a16d4d844a04b5529696c59f02a9c8720f2e",
"size": "4788",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "sdks/python/apache_beam/examples/complete/juliaset/setup.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ANTLR",
"bytes": "1597"
},
{
"name": "CSS",
"bytes": "40963"
},
{
"name": "Dockerfile",
"bytes": "16638"
},
{
"name": "FreeMarker",
"bytes": "7428"
},
{
"name": "Go",
"bytes": "2683402"
},
{
"name": "Groovy",
"bytes": "517560"
},
{
"name": "HTML",
"bytes": "183330"
},
{
"name": "Java",
"bytes": "28609011"
},
{
"name": "JavaScript",
"bytes": "16595"
},
{
"name": "Jupyter Notebook",
"bytes": "56365"
},
{
"name": "Python",
"bytes": "6191025"
},
{
"name": "Ruby",
"bytes": "4159"
},
{
"name": "Shell",
"bytes": "235061"
},
{
"name": "TSQL",
"bytes": "841"
}
],
"symlink_target": ""
}
|
"""Tests for refstack's migrations."""
import alembic
import mock
from oslotest import base
from refstack.db import migration
from refstack.db.migrations.alembic import migration as alembic_migration
class AlembicConfigTestCase(base.BaseTestCase):
@mock.patch('alembic.config.Config')
@mock.patch('os.path.join')
def test_alembic_config(self, os_join, alembic_config):
os_join.return_value = 'fake_path'
alembic_config.return_value = 'fake_config'
result = alembic_migration._alembic_config()
self.assertEqual(result, 'fake_config')
alembic_config.assert_called_once_with('fake_path')
class MigrationTestCase(base.BaseTestCase):
"""Test case for alembic's migrations API."""
def setUp(self):
super(MigrationTestCase, self).setUp()
self.config_patcher = mock.patch(
'refstack.db.migrations.alembic.migration._alembic_config')
self.config = self.config_patcher.start()
self.config.return_value = 'fake_config'
self.addCleanup(self.config_patcher.stop)
@mock.patch.object(alembic.migration.MigrationContext, 'configure',
mock.Mock())
def test_version(self):
context = mock.Mock()
context.get_current_revision = mock.Mock()
alembic.migration.MigrationContext.configure.return_value = context
with mock.patch('refstack.db.sqlalchemy.api.get_engine') as get_engine:
engine = mock.Mock()
engine.connect = mock.MagicMock()
get_engine.return_value = engine
migration.version()
context.get_current_revision.assert_called_once_with()
engine.connect.assert_called_once_with()
@mock.patch('alembic.command.upgrade')
def test_upgrade(self, upgrade):
migration.upgrade('some_revision')
upgrade.assert_called_once_with('fake_config', 'some_revision')
@mock.patch('alembic.command.upgrade')
def test_upgrade_without_revision(self, upgrade):
migration.upgrade(None)
upgrade.assert_called_once_with('fake_config', 'head')
@mock.patch('alembic.command.downgrade')
def test_downgrade(self, downgrade):
migration.downgrade('some_revision')
downgrade.assert_called_once_with('fake_config', 'some_revision')
@mock.patch('alembic.command.downgrade')
def test_downgrade_without_revision(self, downgrade):
migration.downgrade(None)
downgrade.assert_called_once_with('fake_config', 'base')
@mock.patch('alembic.command.stamp')
def test_stamp(self, stamp):
migration.stamp('some_revision')
stamp.assert_called_once_with('fake_config', 'some_revision')
@mock.patch('alembic.command.stamp')
def test_stamp_without_revision(self, stamp):
migration.stamp(None)
stamp.assert_called_once_with('fake_config', 'head')
@mock.patch('alembic.command.revision')
def test_revision(self, revision):
migration.revision('some_message', True)
revision.assert_called_once_with('fake_config', 'some_message', True)
|
{
"content_hash": "dc056d35ccaa708d800638f3b5a1141a",
"timestamp": "",
"source": "github",
"line_count": 81,
"max_line_length": 79,
"avg_line_length": 38.24691358024691,
"alnum_prop": 0.6733376371852808,
"repo_name": "markvoelker/refstack",
"id": "c140be47e264092339edc5223627c5c2a0131412",
"size": "3732",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "refstack/tests/unit/test_migration.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "2492"
},
{
"name": "HTML",
"bytes": "31508"
},
{
"name": "JavaScript",
"bytes": "76009"
},
{
"name": "Mako",
"bytes": "481"
},
{
"name": "Python",
"bytes": "204481"
},
{
"name": "Shell",
"bytes": "9547"
}
],
"symlink_target": ""
}
|
from .api.native import *
from .api.rest import *
|
{
"content_hash": "4f4a4c3fc732476fbb276fb9452c23e9",
"timestamp": "",
"source": "github",
"line_count": 2,
"max_line_length": 25,
"avg_line_length": 25,
"alnum_prop": 0.72,
"repo_name": "ResearchSoftwareInstitute/MyHPOM",
"id": "c1dc1801f8e667104127231816ffba59c670030f",
"size": "50",
"binary": false,
"copies": "4",
"ref": "refs/heads/myhpom-develop",
"path": "hs_core/tests/__init__.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "399181"
},
{
"name": "HTML",
"bytes": "950570"
},
{
"name": "JavaScript",
"bytes": "2069460"
},
{
"name": "Python",
"bytes": "5006675"
},
{
"name": "R",
"bytes": "4463"
},
{
"name": "Shell",
"bytes": "53077"
},
{
"name": "XSLT",
"bytes": "790987"
}
],
"symlink_target": ""
}
|
import collections
import bisect
class SortedItems(collections.Sequence):
def __init__(self, initial=None):
self._items = sorted(initial) if initial is None else []
# Required sequence methods
def __getitem__(self, index):
return self._items[index]
def __len__(self):
return len(self._items)
# Method for adding an item in the right location
def add(self, item):
bisect.insort(self._items, item)
if __name__ == '__main__':
items = SortedItems([5, 1, 3])
print(list(items))
print(items[0])
print(items[-1])
items.add(2)
print(list(items))
items.add(-10)
print(list(items))
print(items[1:4])
print(3 in items)
print(len(items))
for n in items:
print(n)
|
{
"content_hash": "03c51422c94f3093da85d2b8c5c2e2c4",
"timestamp": "",
"source": "github",
"line_count": 32,
"max_line_length": 64,
"avg_line_length": 23.59375,
"alnum_prop": 0.6119205298013245,
"repo_name": "tuanavu/python-cookbook-3rd",
"id": "f8ab10f34f688c06f8f55dc769ade18a61d2bc73",
"size": "788",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/8/implementing_custom_containers/example1.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "20265"
},
{
"name": "CSS",
"bytes": "184"
},
{
"name": "Jupyter Notebook",
"bytes": "219413"
},
{
"name": "Makefile",
"bytes": "231"
},
{
"name": "Python",
"bytes": "250592"
},
{
"name": "Shell",
"bytes": "179"
}
],
"symlink_target": ""
}
|
"""
Vamp is a function/codeblock signaturing framework which is
a subcomponent of vivisect. These may be used to import/export
signature sets and potentially identify code reuse or static
linking...
Current signature ideas:
function arg count
code block count
globals refs
code block refs
unusual instruction use
odd immediates
import calls
other signature calls
certianty index
Exception handling
There will be function characteristics and code-block
characteristics...
NOTE: Initial signature code consists entirely of the envi
bytesig module and byte/mask sets for known function signatures.
"""
class Signature:
"""
A function/procedure signature.
"""
pass
from vivisect.const import *
def genSigAndMask(vw, funcva):
"""
Generate an envi bytesig signature and mask for the given
function block. This will properly mask off relocations
if present.
"""
fsize = 0
if funcva not in vw.getFunctions():
funcva = vw.getFunction(funcva)
if funcva is None:
raise Exception('Given funcva not a function or within a known function')
func_blocks = [cbva for cbva, _, _ in vw.getFunctionBlocks(funcva)]
# Figure out the size of the first linear chunk
# in this function...
cb = vw.getCodeBlock(funcva)
if cb[CB_VA] not in func_blocks:
raise Exception("funcva not in given func")
while cb is not None:
cbva, cbsize, cbfunc = cb
if cbfunc != funcva:
break
fsize += cbsize
cb = vw.getCodeBlock(cbva+cbsize)
if fsize == 0:
raise Exception("0 length function??!?1")
bytez = vw.readMemory(funcva, fsize)
sig = b""
mask = b""
i = 0
while i < fsize:
rtype = vw.getRelocation(funcva + i)
if rtype is None:
sig += bytez[i:i+1]
mask += b"\xff"
i += 1
elif rtype == RTYPE_BASERELOC:
x = b"\x00" * vw.psize
sig += x
mask += x
i += vw.psize
else:
raise Exception("Unhandled Reloc Type: %d" % rtype)
return sig, mask
|
{
"content_hash": "dd488f621660087d9c369fac6d17c4c0",
"timestamp": "",
"source": "github",
"line_count": 83,
"max_line_length": 85,
"avg_line_length": 26.120481927710845,
"alnum_prop": 0.6208487084870848,
"repo_name": "cmaruti/vivisect",
"id": "5c24a86244afb7a495032ff07da89e5b3d14af1e",
"size": "2169",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "vivisect/vamp/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "167795"
},
{
"name": "CSS",
"bytes": "15980"
},
{
"name": "Makefile",
"bytes": "355"
},
{
"name": "Python",
"bytes": "17699753"
},
{
"name": "Shell",
"bytes": "476"
}
],
"symlink_target": ""
}
|
'''This is like pexpect, but it will work with any file descriptor that you
pass it. You are responsible for opening and close the file descriptor.
This allows you to use Pexpect with sockets and named pipes (FIFOs).
PEXPECT LICENSE
This license is approved by the OSI and FSF as GPL-compatible.
http://opensource.org/licenses/isc-license.txt
Copyright (c) 2012, Noah Spurrier <noah@noah.org>
PERMISSION TO USE, COPY, MODIFY, AND/OR DISTRIBUTE THIS SOFTWARE FOR ANY
PURPOSE WITH OR WITHOUT FEE IS HEREBY GRANTED, PROVIDED THAT THE ABOVE
COPYRIGHT NOTICE AND THIS PERMISSION NOTICE APPEAR IN ALL COPIES.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
'''
from .spawnbase import SpawnBase
from .exceptions import ExceptionPexpect, TIMEOUT
from .utils import select_ignore_interrupts, poll_ignore_interrupts
import os
__all__ = ['fdspawn']
class fdspawn(SpawnBase):
'''This is like pexpect.spawn but allows you to supply your own open file
descriptor. For example, you could use it to read through a file looking
for patterns, or to control a modem or serial device. '''
def __init__ (self, fd, args=None, timeout=30, maxread=2000, searchwindowsize=None,
logfile=None, encoding=None, codec_errors='strict', use_poll=False):
'''This takes a file descriptor (an int) or an object that support the
fileno() method (returning an int). All Python file-like objects
support fileno(). '''
if type(fd) != type(0) and hasattr(fd, 'fileno'):
fd = fd.fileno()
if type(fd) != type(0):
raise ExceptionPexpect('The fd argument is not an int. If this is a command string then maybe you want to use pexpect.spawn.')
try: # make sure fd is a valid file descriptor
os.fstat(fd)
except OSError:
raise ExceptionPexpect('The fd argument is not a valid file descriptor.')
self.args = None
self.command = None
SpawnBase.__init__(self, timeout, maxread, searchwindowsize, logfile,
encoding=encoding, codec_errors=codec_errors)
self.child_fd = fd
self.own_fd = False
self.closed = False
self.name = '<file descriptor %d>' % fd
self.use_poll = use_poll
def close (self):
"""Close the file descriptor.
Calling this method a second time does nothing, but if the file
descriptor was closed elsewhere, :class:`OSError` will be raised.
"""
if self.child_fd == -1:
return
self.flush()
os.close(self.child_fd)
self.child_fd = -1
self.closed = True
def isalive (self):
'''This checks if the file descriptor is still valid. If :func:`os.fstat`
does not raise an exception then we assume it is alive. '''
if self.child_fd == -1:
return False
try:
os.fstat(self.child_fd)
return True
except:
return False
def terminate (self, force=False): # pragma: no cover
'''Deprecated and invalid. Just raises an exception.'''
raise ExceptionPexpect('This method is not valid for file descriptors.')
# These four methods are left around for backwards compatibility, but not
# documented as part of fdpexpect. You're encouraged to use os.write
# directly.
def send(self, s):
"Write to fd, return number of bytes written"
s = self._coerce_send_string(s)
self._log(s, 'send')
b = self._encoder.encode(s, final=False)
return os.write(self.child_fd, b)
def sendline(self, s):
"Write to fd with trailing newline, return number of bytes written"
s = self._coerce_send_string(s)
return self.send(s + self.linesep)
def write(self, s):
"Write to fd, return None"
self.send(s)
def writelines(self, sequence):
"Call self.write() for each item in sequence"
for s in sequence:
self.write(s)
def read_nonblocking(self, size=1, timeout=-1):
"""
Read from the file descriptor and return the result as a string.
The read_nonblocking method of :class:`SpawnBase` assumes that a call
to os.read will not block (timeout parameter is ignored). This is not
the case for POSIX file-like objects such as sockets and serial ports.
Use :func:`select.select`, timeout is implemented conditionally for
POSIX systems.
:param int size: Read at most *size* bytes.
:param int timeout: Wait timeout seconds for file descriptor to be
ready to read. When -1 (default), use self.timeout. When 0, poll.
:return: String containing the bytes read
"""
if os.name == 'posix':
if timeout == -1:
timeout = self.timeout
rlist = [self.child_fd]
wlist = []
xlist = []
if self.use_poll:
rlist = poll_ignore_interrupts(rlist, timeout)
else:
rlist, wlist, xlist = select_ignore_interrupts(
rlist, wlist, xlist, timeout
)
if self.child_fd not in rlist:
raise TIMEOUT('Timeout exceeded.')
return super(fdspawn, self).read_nonblocking(size)
|
{
"content_hash": "f69e43f99c1f02fb33fba1755e84c3d4",
"timestamp": "",
"source": "github",
"line_count": 148,
"max_line_length": 138,
"avg_line_length": 39.37837837837838,
"alnum_prop": 0.6336650652024708,
"repo_name": "kennethreitz/pipenv",
"id": "cddd50e10058a8b65b57c3d5bb2724fb35a22c2a",
"size": "5828",
"binary": false,
"copies": "20",
"ref": "refs/heads/master",
"path": "pipenv/vendor/pexpect/fdpexpect.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "202"
},
{
"name": "PowerShell",
"bytes": "7195"
},
{
"name": "Python",
"bytes": "2588085"
},
{
"name": "Roff",
"bytes": "40754"
}
],
"symlink_target": ""
}
|
from m5.SimObject import SimObject
class RiscvISA(SimObject):
type = 'RiscvISA'
cxx_class = 'RiscvISA::ISA'
cxx_header = "arch/riscv/isa.hh"
|
{
"content_hash": "7965895dfb09cffa9f1202afb11424b3",
"timestamp": "",
"source": "github",
"line_count": 6,
"max_line_length": 36,
"avg_line_length": 25.666666666666668,
"alnum_prop": 0.6883116883116883,
"repo_name": "austinharris/gem5-riscv",
"id": "90a907b917f53ccddaca4a9fd849a8e5e3486f50",
"size": "2314",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/arch/riscv/RiscvISA.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Assembly",
"bytes": "239800"
},
{
"name": "C",
"bytes": "962597"
},
{
"name": "C++",
"bytes": "14760012"
},
{
"name": "CMake",
"bytes": "2202"
},
{
"name": "Emacs Lisp",
"bytes": "1969"
},
{
"name": "HTML",
"bytes": "136898"
},
{
"name": "Java",
"bytes": "3096"
},
{
"name": "Makefile",
"bytes": "27055"
},
{
"name": "Perl",
"bytes": "33602"
},
{
"name": "Protocol Buffer",
"bytes": "7033"
},
{
"name": "Python",
"bytes": "3911448"
},
{
"name": "Shell",
"bytes": "49333"
},
{
"name": "Visual Basic",
"bytes": "2884"
}
],
"symlink_target": ""
}
|
'''
Created on Feb 19, 2012
@author: dsussman
Copyright (c) 2012 Johns Hopkins University. All rights reserved.
'''
import numpy as np
# import networkx as nx # Disa - Not necessary for OCP
import scipy.sparse as sparse
from scipy.sparse import linalg as la
from scipy import linalg
#from sklearn.cluster import KMeans # Disa - Not necessary for OCP
from itertools import product
def adjacency_matrix(G):
"""Returns the adjacency matrix of a networkx graph as an np.array"""
return np.array(nx.adjacency_matrix(G))
def marchette_matrix(G):
A = np.array(nx.adjacency_matrix(G))
dia = A.dot(np.ones(A.shape[0]))/(G.number_of_nodes()-1)
return A + np.diag(dia)
# adjacency_sparse = nx.to_scipy_sparse_matrix # Disa - Edit
def laplacian_sparse(G):
"""Returns a scipy.sparse version of the normalized laplacian as given in Rohe, et al.
L = D^{-1/2}AD^{-1/2} where D is the diagonal matrix of degree"""
n = G.number_of_nodes()
A = nx.to_scipy_sparse_matrix(G)
degree = A*np.ones(n)
scale = sparse.lil_matrix((n,n))
scale.setdiag([np.sqrt(1.0/deg) if deg!=0 else 0 for deg in degree])
#scale = np.array([np.sqrt(d**-1) if d!=0 else 0 for d in degree])
return scale*A*scale
def laplacian_matrix(G):
"""Returns an np.array version of the normalized laplacian as given in Rohe, et al.
L = D^{-1/2}AD^{-1/2} where D is the diagonal matrix of degree"""
n = G.number_of_nodes()
A = np.array(nx.adjacency_matrix(G))
degree = np.dot(A,np.ones(n))
scale = [np.sqrt(1.0/deg) if deg!=0 else 0 for deg in degree]
return np.dot(np.diag(scale),np.dot(A, np.diag(scale)))
def self_matrix(G):
"""A function for embedding if G is already stored in matrix form"""
return G
class Embed(object):
"""Class do perform spectral embedding of Graphs"""
dim = None
sval = None
svec = None
svecR = None
matrix = None
G = None
directed = False
def __init__(self, dim, matrix=adjacency_matrix, directed=False):
"""Initializes an Embed object
Inputs
=======
dim -- dimension of the embeding
matrix -- function which returns a matrix which represents the graph
the default is the (dense) adjacency matrix as an np.array
The matrix must return something that is accepted by
scipy.sparse.linalg.svds
"""
self.dim = dim
self.matrix = matrix
self.directed = directed
def __check_dim(self,d):
"""Helper function to make sure this is a valid dimension to return"""
if d<1:
raise ValueError('Dimension must be >=1')
if d>self.dim:
raise ValueError('Dimension must be <=self.dim')
def embed(self, G, fast=True):
"""Calculate the matrix for the graph and embed it to self.dim dimnensions
Uses the dim largest singular values.
Inputs
======
G - the graph object, must be acceptable as a parameter for self.matrix
fast -- if true then don't check if self.G==G before re-doing the embedding
"""
if not fast or self.G is not G:
self.G = G
if not self.directed:
self.svec,self.sval,_ = la.svds(self.matrix(G), self.dim)
else:
self.svec,self.sval,self.svecR = la.svds(self.matrix(G), self.dim)
self.svecR = self.svecR[:, ::-1].T
self.sval = self.sval[::-1]
self.svec = self.svec[:, ::-1]
return self
def get_embedding(self, d=None, scale=None):
"""Return the scaled or unscaled version of the embedding
Inputs
======
d -- dimension you want for the embedding, None for self.dim
scale -- whether the singular vectors should be scaled by the square root singular values
"""
if scale:
return self.get_scaled(d)
else:
return self.get_unscaled(d)
def get_unscaled(self, d=None):
"""Return the unscaled version of the embedding
Inputs
======
d -- dimension you want for the embedding, None for self.dim
"""
if not d:
d=self.dim
self.__check_dim(d)
if not self.directed:
return self.svec[:,np.arange(d)]
else:
return np.concatenate((self.svec[:,np.arange(d)], self.svecR[:,np.arange(d)]),1)
def get_scaled(self, d=None):
"""Return the scaled version of the embedding
Inputs
======
d -- dimension you want for the embedding, None for self.dim
"""
if not d:
d=self.dim
self.__check_dim(d)
if not self.directed:
return np.dot(self.svec[:,np.arange(d)],
np.diag(np.sqrt(self.sval[np.arange(d)])))
else:
return np.concatenate((
np.dot(self.svec[:,np.arange(d)],
np.diag(np.sqrt(self.sval[np.arange(d)]))),
np.dot(self.svecR[:,np.arange(d)],
np.diag(np.sqrt(self.sval[np.arange(d)]))) ),1)
def get_P_matrix(self, d=None):
x = self.get_scaled(d)
if not self.directed:
P = x.dot(x.T)
else:
P = x[:,:d].dot(x[:,d:].T)
eps = .0001
P[P<0]= eps
P[P>1]=1-eps
return P-np.diag(np.diag(P))
class EmbedIter(object):
"""Object to iterate over different matrices, dimensions, scale/unscaled embeddings"""
d = []
matrix = []
embed = None
def __init__(self, d_range, matrices, scales):
"""Initiate with lists of dimensions, matrices, scales"""
self.d = d_range
self.matrix = matrices
self.scale = scales
self.embed = [Embed(np.max(self.d), m) for m in self.matrix]
def get_embedding(self, G):
for ( d,embed, scale) in product(self.d, self.embed, self.scale):
embed.embed(G)
yield embed.get_embedding(d, scale)
adjEmbed = Embed(10,matrix=adjacency_matrix)
lapEmbed = Embed(10,matrix=laplacian_matrix)
marEmbed = Embed(10,matrix=marchette_matrix)
def dot_product_embed(G, d, scaled=True):
""" Generates an n by d matrix using an svd of the adjacency matrix
Each row of the output corresponds to a node (ordered according to G.node)
so that each node is assigned a vector in d-dimensional euclidean space.
Parameters
----------
G -- networkx graph
d -- embedding dimension
scaled -- whether to scaled the embedding by the square root
of the eigenvalues (default=True)
Returns
-------
n times d matrix where n=G.number_of_nodes()
"""
A = adjacency_matrix(G)
if scaled:
u,s,_ = la.svds(A, d)
return np.dot(u,np.diag(np.sqrt(s)))
else:
u,_,_ = la.svds(A, d)
return u
dot_product_embed_unscaled = lambda G,d: dot_product_embed(G,d,scaled=False)
def normalized_laplacian_embed(G,d, scaled=False):
""" Generates an n by d matrix using an svd of the normalized laplacian
Each row of the output corresponds to a node (ordered according to G.node)
so that each node is assigned a vector in d-dimensional euclidean space.
Parameters
----------
G -- networkx graph
d -- embedding dimension
scaled -- whether to scaled the embedding by the square root
of the eigenvalues (default=False)
Returns
-------
n times d matrix where n=G.number_of_nodes()
"""
L = laplacian_matrix(G)
if scaled:
u,s,_ = la.svds(sparse.csr_matrix(L), d)
return np.dot(u,np.diag(np.sqrt(s)))
else:
u,_,_ = la.svds(sparse.csr_matrix(L), d)
return u
normalized_laplacian_embed_scaled = lambda G,d:normalized_laplacian_embed(G, d, scaled=True)
def cluster_vertices_kmeans(G, embed, d, k, name=None):
""" Clusters vertices into k groups based on an embedding in d dimensions
Parameters
----------
G -- networkx graph
embed -- embedding method takes 2 paramets, Graph and dimension
d -- embedding dimension
k -- number of clusters
name -- if not None then assign labels as attribute name to each node
"""
k_means = KMeans(init='k-means++', k=k, n_init=10)
x = embed(G, d)
k_means.fit(x)
label = k_means.labels_
if name is not None:
nx.set_node_attributes(G, name, dict(zip(np.arange(G.number_of_nodes()),label)))
return label
def procrustes(X,Y):
"""Finds the optimal affine transformation T to minimize ||x-Ty||_F
Parameters
----------
x - reference, shape(x)=nxd where n is number of samples and d is dimension
y - to be aligned, shape(x)=nxd
Returns
-------
Z - the transformed y
TODO: return T - the transformation
TODO: make scaling, reflection, centering optional
TODO: allow different dimension
"""
assert(X.shape == Y.shape)
# Center
muX = np.mean(X,axis=0)
muY = np.mean(Y,axis=0)
X0 = X-muX
Y0 = Y-muY
# Scale
varX = np.var(X0,axis=0)
varY = np.var(Y0,axis=0)
#Rotate
l,d,m = linalg.svd(X0.T.dot(Y0))
Z = np.sqrt(np.sum(varX)/np.sum(varY))*Y0.dot(m).dot(l.T)+muX
return Z
|
{
"content_hash": "6afd13af61d97584f6e05a34c9a1b5c1",
"timestamp": "",
"source": "github",
"line_count": 310,
"max_line_length": 97,
"avg_line_length": 31.05483870967742,
"alnum_prop": 0.5744260932793186,
"repo_name": "openconnectome/m2g",
"id": "0723e72795101a2b274efe4aefabbc4ca785832b",
"size": "10243",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "MR-OCP/mrcap/Embed.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "2662"
},
{
"name": "C++",
"bytes": "2431"
},
{
"name": "CSS",
"bytes": "35060"
},
{
"name": "HTML",
"bytes": "243779"
},
{
"name": "JavaScript",
"bytes": "18578"
},
{
"name": "M",
"bytes": "514"
},
{
"name": "Makefile",
"bytes": "970"
},
{
"name": "Matlab",
"bytes": "19890"
},
{
"name": "Nginx",
"bytes": "3000"
},
{
"name": "Python",
"bytes": "505669"
},
{
"name": "R",
"bytes": "19939"
},
{
"name": "Shell",
"bytes": "8494"
}
],
"symlink_target": ""
}
|
import _plotly_utils.basevalidators
class CminValidator(_plotly_utils.basevalidators.NumberValidator):
def __init__(
self, plotly_name="cmin", parent_name="scatterpolargl.marker", **kwargs
):
super(CminValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
implied_edits=kwargs.pop("implied_edits", {"cauto": False}),
role=kwargs.pop("role", "info"),
**kwargs
)
|
{
"content_hash": "1c717ba25beb56e248910ed7bcad6118",
"timestamp": "",
"source": "github",
"line_count": 15,
"max_line_length": 79,
"avg_line_length": 35.53333333333333,
"alnum_prop": 0.5909943714821764,
"repo_name": "plotly/python-api",
"id": "d563757261d75fdccd0a64b025f27f120f1187b8",
"size": "533",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "packages/python/plotly/plotly/validators/scatterpolargl/marker/_cmin.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "6870"
},
{
"name": "Makefile",
"bytes": "1708"
},
{
"name": "Python",
"bytes": "823245"
},
{
"name": "Shell",
"bytes": "3238"
}
],
"symlink_target": ""
}
|
from ctypes import *
## page permissions
PAGE_EXECUTE = 0x10
PAGE_EXECUTE_READ = 0x20
PAGE_EXECUTE_READWRITE = 0x40
PAGE_EXECUTE_WRITECOPY = 0x80
PAGE_NOACCESS = 0x01
PAGE_READONLY = 0x02
PAGE_READWRITE = 0x04
PAGE_WRITECOPY = 0x08
PAGE_GUARD = 0x100
PAGE_NOCACHE = 0x200
PAGE_WRITECOMBINE = 0x400
## process access permissions from winnt.h
DELETE = 0x00010000L
READ_CONTROL = 0x00020000L
WRITE_DAC = 0x00040000L
WRITE_OWNER = 0x00080000L
SYNCHRONIZE = 0x00100000L
ACCESS_SYSTEM_SECURITY = 0x01000000L
MAXIMUM_ALLOWED = 0x02000000L
GENERIC_READ = 0x80000000L
GENERIC_WRITE = 0x40000000L
GENERIC_EXECUTE = 0x20000000L
GENERIC_ALL = 0x10000000L
STANDARD_RIGHTS_REQUIRED = 0x000F0000L
STANDARD_RIGHTS_READ = READ_CONTROL
STANDARD_RIGHTS_WRITE = READ_CONTROL
STANDARD_RIGHTS_EXECUTE = READ_CONTROL
STANDARD_RIGHTS_ALL = 0x001F0000L
SPECIFIC_RIGHTS_ALL = 0x0000FFFFL
PROCESS_TERMINATE = 0x0001
PROCESS_CREATE_THREAD = 0x0002
PROCESS_SET_SESSIONID = 0x0004
PROCESS_VM_OPERATION = 0x0008
PROCESS_VM_READ = 0x0010
PROCESS_VM_WRITE = 0x0020
PROCESS_DUP_HANDLE = 0x0040
PROCESS_CREATE_PROCESS = 0x0080
PROCESS_SET_QUOTA = 0x0100
PROCESS_SET_INFORMATION = 0x0200
PROCESS_QUERY_INFORMATION = 0x0400
PROCESS_SUSPEND_RESUME = 0x0800
PROCESS_QUERY_LIMITED_INFORMATION = 0x1000
#PROCESS_ALL_ACCESS = STANDARD_RIGHTS_REQUIRED | SYNCHRONIZE | 0xFFFF
PROCESS_VM_ALL = PROCESS_VM_OPERATION|PROCESS_VM_READ|PROCESS_VM_WRITE
PROCESS_INFO_ALL = PROCESS_QUERY_INFORMATION|PROCESS_SET_INFORMATION
THREAD_TERMINATE = 0x0001
THREAD_SUSPEND_RESUME = 0x0002
THREAD_GET_CONTEXT = 0x0008
THREAD_SET_CONTEXT = 0x0010
THREAD_QUERY_INFORMATION = 0x0040
THREAD_SET_INFORMATION = 0x0020
THREAD_SET_THREAD_TOKEN = 0x0080
THREAD_IMPERSONATE = 0x0100
THREAD_DIRECT_IMPERSONATION = 0x0200
THREAD_SET_LIMITED_INFORMATION = 0x0400 # winnt
THREAD_QUERY_LIMITED_INFORMATION = 0x0800 # winnt
THREAD_ALL_ACCESS = STANDARD_RIGHTS_REQUIRED | SYNCHRONIZE | 0xFFFF
JOB_OBJECT_ASSIGN_PROCESS = 0x0001
JOB_OBJECT_SET_ATTRIBUTES = 0x0002
JOB_OBJECT_QUERY = 0x0004
JOB_OBJECT_TERMINATE = 0x0008
JOB_OBJECT_SET_SECURITY_ATTRIBUTES = 0x0010
JOB_OBJECT_ALL_ACCESS = STANDARD_RIGHTS_REQUIRED | SYNCHRONIZE | 0x1F
## constants for contexts
CONTEXT_i386 = 0x00010000 # this assumes that i386 and
CONTEXT_i486 = 0x00010000 # i486 have identical context records
CONTEXT_CONTROL = (CONTEXT_i386 | 0x00000001L) # SS:SP, CS:IP, FLAGS, BP
CONTEXT_INTEGER = (CONTEXT_i386 | 0x00000002L) # AX, BX, CX, DX, SI, DI
CONTEXT_SEGMENTS = (CONTEXT_i386 | 0x00000004L) # DS, ES, FS, GS
CONTEXT_FLOATING_POINT = (CONTEXT_i386 | 0x00000008L) # 387 state
CONTEXT_DEBUG_REGISTERS = (CONTEXT_i386 | 0x00000010L) # DB 0-3,6,7
CONTEXT_EXTENDED_REGISTERS = (CONTEXT_i386 | 0x00000020L) # cpu specific extensions
CONTEXT_FULL = (CONTEXT_CONTROL | CONTEXT_INTEGER | CONTEXT_SEGMENTS)
CONTEXT_ALL = CONTEXT_CONTROL | CONTEXT_INTEGER | CONTEXT_SEGMENTS
CONTEXT_ALL |= CONTEXT_FLOATING_POINT | CONTEXT_DEBUG_REGISTERS
CONTEXT_ALL |= CONTEXT_EXTENDED_REGISTERS
## basic types
DWORD64 = c_uint64
DWORD = c_uint32
WORD = c_uint16
BYTE = c_uint8
LONG = c_long
ULONG = c_ulong
INT = c_int
UINT = c_uint
ULONGLONG = c_uint64
LONGLONG = c_int64
## complex structures
class M128A(Structure):
_fields_ = [
('Low', ULONGLONG),
('High', LONGLONG)
]
class MMX(Structure):
_fields_ = [
('Header', ARRAY(M128A, 2)),
('Legacy', ARRAY(M128A, 8)),
('Xmm0', M128A),
('Xmm1', M128A),
('Xmm2', M128A),
('Xmm3', M128A),
('Xmm4', M128A),
('Xmm5', M128A),
('Xmm6', M128A),
('Xmm7', M128A),
('Xmm8', M128A),
('Xmm9', M128A),
('Xmm10', M128A),
('Xmm11', M128A),
('Xmm12', M128A),
('Xmm13', M128A),
('Xmm14', M128A),
('Xmm15', M128A)
]
class XMM_SAVE_AREA32(Structure):
_fields_ = [
('ControlWord', WORD),
('StatusWord', WORD),
('TagWord', BYTE),
('Reserved1', BYTE),
('ErrorOpcode', WORD),
('ErrorOffset', DWORD),
('ErrorSelector', WORD),
('Reserved2', WORD),
('DataOffset', DWORD),
('DataSelector', WORD),
('Reserved3', WORD),
('MxCsr', DWORD),
('MxCsr_Mask', DWORD),
('FloatRegisters', ARRAY(M128A, 8)),
('XmmRegisters', ARRAY(M128A, 16)),
('Reserved4', ARRAY(BYTE, 96))
]
SIZE_OF_80387_REGISTERS = 80
class FLOATING_SAVE_AREA(Structure):
_fields_ = [
('ControlWord', DWORD),
('StatusWord', DWORD),
('TagWord', DWORD),
('ErrorOffset', DWORD),
('ErrorSelector', DWORD),
('DataOffset', DWORD),
('DataSelector', DWORD),
('RegisterArea', ARRAY(BYTE, SIZE_OF_80387_REGISTERS)),
('Cr0NpxState', DWORD)
]
MAXIMUM_SUPPORTED_EXTENSION = 512
class CONTEXT(Structure):
_fields_ = [
('ContextFlags', DWORD),
('Dr0', DWORD),
('Dr1', DWORD),
('Dr2', DWORD),
('Dr3', DWORD),
('Dr6', DWORD),
('Dr7', DWORD),
('FloatSave', FLOATING_SAVE_AREA),
('SegGs', DWORD),
('SegFs', DWORD),
('SegEs', DWORD),
('SegDs', DWORD),
('Edi', DWORD),
('Esi', DWORD),
('Ebx', DWORD),
('Edx', DWORD),
('Ecx', DWORD),
('Eax', DWORD),
('Ebp', DWORD),
('Eip', DWORD),
('SegCs', DWORD),
('EFlags', DWORD),
('Esp', DWORD),
('SegSs', DWORD),
('ExtendedRegisters', ARRAY(BYTE, MAXIMUM_SUPPORTED_EXTENSION))
]
## other win32 stuff
HANDLE = c_voidp
class CLIENT_ID(Structure):
_fields_ = [
('UniqueProcess', HANDLE),
('UniqueThread', HANDLE)
]
ThreadBasicInformation = 0 # _THREADINFOCLASS
KAFFINITY = KPRIORITY = c_ulong
PVOID = c_voidp
NTSTATUS = c_long
class THREAD_BASIC_INFORMATION(Structure):
_fields_ = [
('ExitStatus', NTSTATUS),
('TebBaseAddress', PVOID),
('ClientId', CLIENT_ID),
('AffinityMask', KAFFINITY),
('Priority', KPRIORITY),
('BasePriority', KPRIORITY),
]
## token shit
class LUID(Structure):
_fields_ = [
('LowPart', DWORD),
('HighPart', LONG)
]
class LUID_AND_ATTRIBUTES(Structure):
_fields_ = [
('Luid', LUID),
('Attributes', DWORD)
]
class TOKEN_PRIVILEGES(Structure):
_fields_ = [
('PrivilegeCount', ULONG),
('Privileges', LUID_AND_ATTRIBUTES*1)
]
SE_PRIVILEGE_ENABLED_BY_DEFAULT = 0x00000001
SE_PRIVILEGE_ENABLED = 0x00000002
SE_PRIVILEGE_REMOVED = 0X00000004
SE_PRIVILEGE_USED_FOR_ACCESS = 0x80000000
SE_PRIVILEGE_VALID_ATTRIBUTES = (SE_PRIVILEGE_ENABLED_BY_DEFAULT | SE_PRIVILEGE_ENABLED | SE_PRIVILEGE_REMOVED | SE_PRIVILEGE_USED_FOR_ACCESS)
PRIVILEGE_SET_ALL_NECESSARY = (1)
class PRIVILEGE_SET(Structure):
_fields_ = [
('PrivilegeCount', DWORD),
('Control', DWORD),
('Privilege', LUID_AND_ATTRIBUTES*1)
]
## token constants
TOKEN_ASSIGN_PRIMARY = 0x0001
TOKEN_DUPLICATE = 0x0002
TOKEN_IMPERSONATE = 0x0004
TOKEN_QUERY = 0x0008
TOKEN_QUERY_SOURCE = 0x0010
TOKEN_ADJUST_PRIVILEGES = 0x0020
TOKEN_ADJUST_GROUPS = 0x0040
TOKEN_ADJUST_DEFAULT = 0x0080
TOKEN_ADJUST_SESSIONID = 0x0100
TOKEN_ALL_ACCESS_P = STANDARD_RIGHTS_REQUIRED | TOKEN_ASSIGN_PRIMARY | TOKEN_DUPLICATE | TOKEN_IMPERSONATE | TOKEN_QUERY | TOKEN_QUERY_SOURCE | TOKEN_ADJUST_PRIVILEGES | TOKEN_ADJUST_GROUPS | TOKEN_ADJUST_DEFAULT
TOKEN_ALL_ACCESS = TOKEN_ALL_ACCESS_P | TOKEN_ADJUST_SESSIONID
TOKEN_READ = STANDARD_RIGHTS_READ | TOKEN_QUERY
TOKEN_WRITE = STANDARD_RIGHTS_WRITE | TOKEN_ADJUST_PRIVILEGES | TOKEN_ADJUST_GROUPS | TOKEN_ADJUST_DEFAULT
TOKEN_EXECUTE = STANDARD_RIGHTS_EXECUTE
|
{
"content_hash": "22b31287b49e6e28b206f667ee0af9c9",
"timestamp": "",
"source": "github",
"line_count": 269,
"max_line_length": 212,
"avg_line_length": 28.851301115241636,
"alnum_prop": 0.6527509341579694,
"repo_name": "arizvisa/syringe",
"id": "f98e48acde11693bff6ed5fb4766d8def3add1cd",
"size": "7811",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lib/memorymanager/win32context.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Assembly",
"bytes": "22844"
},
{
"name": "C",
"bytes": "11095"
},
{
"name": "HTML",
"bytes": "1761"
},
{
"name": "Makefile",
"bytes": "1228"
},
{
"name": "Perl",
"bytes": "9176"
},
{
"name": "Python",
"bytes": "4312979"
},
{
"name": "Shell",
"bytes": "171"
},
{
"name": "XQuery",
"bytes": "1884"
},
{
"name": "XSLT",
"bytes": "10518"
}
],
"symlink_target": ""
}
|
import requests
from collections import OrderedDict
a = requests.get('http://bot.notenoughmods.com/1.6.2.json').json()
b = requests.get('http://bot.notenoughmods.com/1.6.4.json').json()
a = {x.pop('name'): x for x in a}
b = {x.pop('name'): x for x in b}
unique_a = set(a) - set(b)
unique_b = set(b) - set(a)
equals = []
diffs = {}
for mod in set(a) & set(b):
# all mods in here are common, let's compare their info
if a[mod] == b[mod]:
# info is the same
equals.append(mod)
else:
# info is different, but what info
diffs[mod] = {}
# join their keys and de-duplicate them
for key in set(a[mod].keys() + b[mod].keys()):
# we can ignore certain keys
if key in ('shorturl', 'aliases'):
continue
val_a = a[mod].get(key, '')
val_b = b[mod].get(key, '')
if val_a != val_b:
diffs[mod][key] = (val_a, val_b)
print "Unique in A (total %d):" % len(unique_a)
for mod in sorted(unique_a, key=lambda s: s.lower()):
print mod.encode("utf-8")
print
print "Unique in B (total %d):" % len(unique_b)
for mod in sorted(unique_b, key=lambda s: s.lower()):
print mod.encode("utf-8")
print
print "Common mods, same information (total %d):" % len(equals)
for mod in sorted(equals, key=lambda s: s.lower()):
print mod.encode("utf-8")
print
print "Common mods, different info (total %d):" % len(diffs)
for mod, values in OrderedDict(sorted(diffs.items(), key=lambda s: s[0].lower())).iteritems():
print "%s:" % mod.encode("utf-8")
for k, v in values.iteritems():
print " %s: %s -> %s" % (k, v[0] if v[0] else 'None', v[1] if v[1] else 'None')
|
{
"content_hash": "19c6f963703738079d5c5376a7e1ec85",
"timestamp": "",
"source": "github",
"line_count": 53,
"max_line_length": 94,
"avg_line_length": 33.24528301886792,
"alnum_prop": 0.5590238365493757,
"repo_name": "Pyker/nem-list-diff",
"id": "87e1d443e8662b843bce906ce9e5631ad15da593",
"size": "1762",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "nem-list-diff.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "1696"
}
],
"symlink_target": ""
}
|
import mxnet as mx
import numpy as np
from mxnet.executor_manager import _split_input_slice
from rcnn.config import config
from rcnn.io.image import tensor_vstack
from rcnn.io.rpn import get_rpn_testbatch, get_rpn_batch, assign_anchor
from rcnn.io.rcnn import get_rcnn_testbatch, get_rcnn_batch
class TestLoader(mx.io.DataIter):
def __init__(self, roidb, batch_size=1, shuffle=False,
has_rpn=False):
super(TestLoader, self).__init__()
# save parameters as properties
self.roidb = roidb
self.batch_size = batch_size
self.shuffle = shuffle
self.has_rpn = has_rpn
# infer properties from roidb
self.size = len(self.roidb)
self.index = np.arange(self.size)
# decide data and label names (only for training)
if has_rpn:
self.data_name = ['data', 'im_info']
else:
self.data_name = ['data', 'rois']
self.label_name = None
# status variable for synchronization between get_data and get_label
self.cur = 0
self.data = None
self.label = None
self.im_info = None
# get first batch to fill in provide_data and provide_label
self.reset()
self.get_batch()
@property
def provide_data(self):
return [(k, v.shape) for k, v in zip(self.data_name, self.data)]
@property
def provide_label(self):
return None
def reset(self):
self.cur = 0
if self.shuffle:
np.random.shuffle(self.index)
def iter_next(self):
return self.cur + self.batch_size <= self.size
def next(self):
if self.iter_next():
self.get_batch()
self.cur += self.batch_size
return self.im_info, \
mx.io.DataBatch(data=self.data, label=self.label,
pad=self.getpad(), index=self.getindex(),
provide_data=self.provide_data, provide_label=self.provide_label)
else:
raise StopIteration
def getindex(self):
return self.cur / self.batch_size
def getpad(self):
if self.cur + self.batch_size > self.size:
return self.cur + self.batch_size - self.size
else:
return 0
def get_batch(self):
cur_from = self.cur
cur_to = min(cur_from + self.batch_size, self.size)
roidb = [self.roidb[self.index[i]] for i in range(cur_from, cur_to)]
if self.has_rpn:
data, label, im_info = get_rpn_testbatch(roidb)
else:
data, label, im_info = get_rcnn_testbatch(roidb)
self.data = [mx.nd.array(data[name]) for name in self.data_name]
self.im_info = im_info
class ROIIter(mx.io.DataIter):
def __init__(self, roidb, batch_size=2, shuffle=False, ctx=None, work_load_list=None, aspect_grouping=False):
"""
This Iter will provide roi data to Fast R-CNN network
:param roidb: must be preprocessed
:param batch_size: must divide BATCH_SIZE(128)
:param shuffle: bool
:param ctx: list of contexts
:param work_load_list: list of work load
:param aspect_grouping: group images with similar aspects
:return: ROIIter
"""
super(ROIIter, self).__init__()
# save parameters as properties
self.roidb = roidb
self.batch_size = batch_size
self.shuffle = shuffle
self.ctx = ctx
if self.ctx is None:
self.ctx = [mx.cpu()]
self.work_load_list = work_load_list
self.aspect_grouping = aspect_grouping
# infer properties from roidb
self.size = len(roidb)
self.index = np.arange(self.size)
# decide data and label names (only for training)
self.data_name = ['data', 'rois']
self.label_name = ['label', 'bbox_target', 'bbox_weight']
# status variable for synchronization between get_data and get_label
self.cur = 0
self.batch = None
self.data = None
self.label = None
# get first batch to fill in provide_data and provide_label
self.reset()
self.get_batch()
@property
def provide_data(self):
return [(k, v.shape) for k, v in zip(self.data_name, self.data)]
@property
def provide_label(self):
return [(k, v.shape) for k, v in zip(self.label_name, self.label)]
def reset(self):
self.cur = 0
if self.shuffle:
if self.aspect_grouping:
widths = np.array([r['width'] for r in self.roidb])
heights = np.array([r['height'] for r in self.roidb])
horz = (widths >= heights)
vert = np.logical_not(horz)
horz_inds = np.where(horz)[0]
vert_inds = np.where(vert)[0]
inds = np.hstack((np.random.permutation(horz_inds), np.random.permutation(vert_inds)))
extra = inds.shape[0] % self.batch_size
inds_ = np.reshape(inds[:-extra], (-1, self.batch_size))
row_perm = np.random.permutation(np.arange(inds_.shape[0]))
inds[:-extra] = np.reshape(inds_[row_perm, :], (-1,))
self.index = inds
else:
np.random.shuffle(self.index)
def iter_next(self):
return self.cur + self.batch_size <= self.size
def next(self):
if self.iter_next():
self.get_batch()
self.cur += self.batch_size
return mx.io.DataBatch(data=self.data, label=self.label,
pad=self.getpad(), index=self.getindex(),
provide_data=self.provide_data, provide_label=self.provide_label)
else:
raise StopIteration
def getindex(self):
return self.cur / self.batch_size
def getpad(self):
if self.cur + self.batch_size > self.size:
return self.cur + self.batch_size - self.size
else:
return 0
def get_batch(self):
# slice roidb
cur_from = self.cur
cur_to = min(cur_from + self.batch_size, self.size)
roidb = [self.roidb[self.index[i]] for i in range(cur_from, cur_to)]
# decide multi device slices
work_load_list = self.work_load_list
ctx = self.ctx
if work_load_list is None:
work_load_list = [1] * len(ctx)
assert isinstance(work_load_list, list) and len(work_load_list) == len(ctx), \
"Invalid settings for work load. "
slices = _split_input_slice(self.batch_size, work_load_list)
# get each device
data_list = []
label_list = []
for islice in slices:
iroidb = [roidb[i] for i in range(islice.start, islice.stop)]
data, label = get_rcnn_batch(iroidb)
data_list.append(data)
label_list.append(label)
all_data = dict()
for key in data_list[0].keys():
all_data[key] = tensor_vstack([batch[key] for batch in data_list])
all_label = dict()
for key in label_list[0].keys():
all_label[key] = tensor_vstack([batch[key] for batch in label_list])
self.data = [mx.nd.array(all_data[name]) for name in self.data_name]
self.label = [mx.nd.array(all_label[name]) for name in self.label_name]
class AnchorLoader(mx.io.DataIter):
def __init__(self, feat_sym, roidb, batch_size=1, shuffle=False, ctx=None, work_load_list=None,
feat_stride=16, anchor_scales=(8, 16, 32), anchor_ratios=(0.5, 1, 2), allowed_border=0,
aspect_grouping=False):
"""
This Iter will provide roi data to Fast R-CNN network
:param feat_sym: to infer shape of assign_output
:param roidb: must be preprocessed
:param batch_size: must divide BATCH_SIZE(128)
:param shuffle: bool
:param ctx: list of contexts
:param work_load_list: list of work load
:param aspect_grouping: group images with similar aspects
:return: AnchorLoader
"""
super(AnchorLoader, self).__init__()
# save parameters as properties
self.feat_sym = feat_sym
self.roidb = roidb
self.batch_size = batch_size
self.shuffle = shuffle
self.ctx = ctx
if self.ctx is None:
self.ctx = [mx.cpu()]
self.work_load_list = work_load_list
self.feat_stride = feat_stride
self.anchor_scales = anchor_scales
self.anchor_ratios = anchor_ratios
self.allowed_border = allowed_border
self.aspect_grouping = aspect_grouping
# infer properties from roidb
self.size = len(roidb)
self.index = np.arange(self.size)
# decide data and label names
if config.TRAIN.END2END:
self.data_name = ['data', 'im_info', 'gt_boxes']
else:
self.data_name = ['data']
self.label_name = ['label', 'bbox_target', 'bbox_weight']
# status variable for synchronization between get_data and get_label
self.cur = 0
self.batch = None
self.data = None
self.label = None
# get first batch to fill in provide_data and provide_label
self.reset()
self.get_batch()
@property
def provide_data(self):
return [(k, v.shape) for k, v in zip(self.data_name, self.data)]
@property
def provide_label(self):
return [(k, v.shape) for k, v in zip(self.label_name, self.label)]
def reset(self):
self.cur = 0
if self.shuffle:
if self.aspect_grouping:
widths = np.array([r['width'] for r in self.roidb])
heights = np.array([r['height'] for r in self.roidb])
horz = (widths >= heights)
vert = np.logical_not(horz)
horz_inds = np.where(horz)[0]
vert_inds = np.where(vert)[0]
inds = np.hstack((np.random.permutation(horz_inds), np.random.permutation(vert_inds)))
extra = inds.shape[0] % self.batch_size
inds_ = np.reshape(inds[:-extra], (-1, self.batch_size))
row_perm = np.random.permutation(np.arange(inds_.shape[0]))
inds[:-extra] = np.reshape(inds_[row_perm, :], (-1,))
self.index = inds
else:
np.random.shuffle(self.index)
def iter_next(self):
return self.cur + self.batch_size <= self.size
def next(self):
if self.iter_next():
self.get_batch()
self.cur += self.batch_size
return mx.io.DataBatch(data=self.data, label=self.label,
pad=self.getpad(), index=self.getindex(),
provide_data=self.provide_data, provide_label=self.provide_label)
else:
raise StopIteration
def getindex(self):
return self.cur / self.batch_size
def getpad(self):
if self.cur + self.batch_size > self.size:
return self.cur + self.batch_size - self.size
else:
return 0
def infer_shape(self, max_data_shape=None, max_label_shape=None):
""" Return maximum data and label shape for single gpu """
if max_data_shape is None:
max_data_shape = []
if max_label_shape is None:
max_label_shape = []
max_shapes = dict(max_data_shape + max_label_shape)
input_batch_size = max_shapes['data'][0]
im_info = [[max_shapes['data'][2], max_shapes['data'][3], 1.0]]
_, feat_shape, _ = self.feat_sym.infer_shape(**max_shapes)
label = assign_anchor(feat_shape[0], np.zeros((0, 5)), im_info,
self.feat_stride, self.anchor_scales, self.anchor_ratios, self.allowed_border)
label = [label[k] for k in self.label_name]
label_shape = [(k, tuple([input_batch_size] + list(v.shape[1:]))) for k, v in zip(self.label_name, label)]
return max_data_shape, label_shape
def get_batch(self):
# slice roidb
cur_from = self.cur
cur_to = min(cur_from + self.batch_size, self.size)
roidb = [self.roidb[self.index[i]] for i in range(cur_from, cur_to)]
# decide multi device slice
work_load_list = self.work_load_list
ctx = self.ctx
if work_load_list is None:
work_load_list = [1] * len(ctx)
assert isinstance(work_load_list, list) and len(work_load_list) == len(ctx), \
"Invalid settings for work load. "
slices = _split_input_slice(self.batch_size, work_load_list)
# get testing data for multigpu
data_list = []
label_list = []
for islice in slices:
iroidb = [roidb[i] for i in range(islice.start, islice.stop)]
data, label = get_rpn_batch(iroidb)
data_list.append(data)
label_list.append(label)
# pad data first and then assign anchor (read label)
data_tensor = tensor_vstack([batch['data'] for batch in data_list])
for data, data_pad in zip(data_list, data_tensor):
data['data'] = data_pad[np.newaxis, :]
new_label_list = []
for data, label in zip(data_list, label_list):
# infer label shape
data_shape = {k: v.shape for k, v in data.items()}
del data_shape['im_info']
_, feat_shape, _ = self.feat_sym.infer_shape(**data_shape)
feat_shape = [int(i) for i in feat_shape[0]]
# add gt_boxes to data for e2e
data['gt_boxes'] = label['gt_boxes'][np.newaxis, :, :]
# assign anchor for label
label = assign_anchor(feat_shape, label['gt_boxes'], data['im_info'],
self.feat_stride, self.anchor_scales,
self.anchor_ratios, self.allowed_border)
new_label_list.append(label)
all_data = dict()
for key in self.data_name:
all_data[key] = tensor_vstack([batch[key] for batch in data_list])
all_label = dict()
for key in self.label_name:
pad = -1 if key == 'label' else 0
all_label[key] = tensor_vstack([batch[key] for batch in new_label_list], pad=pad)
self.data = [mx.nd.array(all_data[key]) for key in self.data_name]
self.label = [mx.nd.array(all_label[key]) for key in self.label_name]
|
{
"content_hash": "54985a9a79538806857bb477d7b35e41",
"timestamp": "",
"source": "github",
"line_count": 392,
"max_line_length": 114,
"avg_line_length": 37.22959183673469,
"alnum_prop": 0.5626284774564889,
"repo_name": "Guneet-Dhillon/mxnet",
"id": "826ee20f080cba15a9df3d7217a6ce2f23a632c7",
"size": "15380",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "example/rcnn/rcnn/core/loader.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "13130"
},
{
"name": "C",
"bytes": "100739"
},
{
"name": "C++",
"bytes": "3623658"
},
{
"name": "CMake",
"bytes": "53484"
},
{
"name": "Cuda",
"bytes": "672443"
},
{
"name": "Groovy",
"bytes": "217"
},
{
"name": "Java",
"bytes": "20406"
},
{
"name": "Jupyter Notebook",
"bytes": "1229390"
},
{
"name": "Makefile",
"bytes": "40688"
},
{
"name": "Matlab",
"bytes": "30187"
},
{
"name": "Perl",
"bytes": "687661"
},
{
"name": "Perl 6",
"bytes": "4176"
},
{
"name": "Python",
"bytes": "3836348"
},
{
"name": "R",
"bytes": "324489"
},
{
"name": "Rebol",
"bytes": "353"
},
{
"name": "Scala",
"bytes": "884082"
},
{
"name": "Shell",
"bytes": "206574"
}
],
"symlink_target": ""
}
|
from git.objects import (Commit, Tree)
import io
import os
import git.refs
from gitdb import IStream
from gitdb.util import bin_to_hex
from git.objects.util import altz_to_utctz_str
from io import StringIO, BytesIO
from collections import deque
blob_mode = '100644'
tree_mode = '40000'
def tree_item_str(mode, file_name, binsha):
if mode[0] == 0:
mode = mode[1:]
mode = mode.encode('utf-8')
if isinstance(file_name, str):
file_name = file_name.encode('utf-8')
# return '{} {}\0{}'.format(mode, file_name, binsha).encode('utf-8')
return b'\0'.join((b' '.join((mode, file_name)), binsha))
def write_blob_from_path(odb, src_path):
assert os.path.isfile(src_path) and not os.path.islink(src_path)
istream = IStream("blob", os.path.getsize(src_path), io.open(src_path, 'rb'))
odb.store(istream)
return (blob_mode, istream.binsha)
def write_blob_from_file(odb, f, line_size):
if line_size == 0:
blob_body = ''.encode('utf-8')
else:
lines = [f.readline() for i in range(line_size)]
blob_body = ''.join(lines).encode('utf-8')
istream = IStream("blob", len(blob_body), BytesIO(blob_body))
odb.store(istream)
return (blob_mode, istream.binsha)
def write_syntax_tree_from_file(odb, src_path):
if not os.path.isfile(src_path):
raise Exception('{} is not a file'.format(src_path))
f = open(src_path)
line = f.readline()
trees = [[]]
while line:
header, info = line[0:4], line[5:].rstrip()
if header == '[BN]':
# Blob entry format is following:
# [BN] blob_name
blob_name = info
line = f.readline()
header, info = line[0:4], line[5:].rstrip()
assert header == '[BI]'
(mode, binsha) = write_blob_from_file(odb, f, int(info))
trees[-1].append((mode, binsha, blob_name))
elif header == '[TS]':
# Contents of tree start from [TS].
# [TS] tree_name
trees.append([])
elif header == '[TE]':
# Contents of tree end by [TE].
# [TE] tree_name
tree_name = info
(mode, binsha) = mktree_from_iter(odb, trees.pop())
trees[-1].append((mode, binsha, tree_name))
line = f.readline()
(mode, binsha) = mktree_from_iter(odb, trees.pop())
return (mode, binsha)
def write_tree(odb, src_path):
assert os.path.isdir(src_path) and not os.path.islink(src_path)
items = []
for file in sorted(os.listdir(src_path)):
(mode, binsha) = write_path(odb, os.path.join(src_path, file))
items.append(tree_item_str(mode, file, binsha))
items_str = ''.join(items).encode('utf-8')
istream = IStream("tree", len(items_str), BytesIO(items_str))
odb.store(istream)
return (tree_mode, istream.binsha)
def write_path(odb, src_path):
if os.path.isfile(src_path):
return write_blob_from_path(odb, src_path)
elif os.path.isdir(src_path):
return write_tree(odb, src_path)
raise Exception('{} is not a valid file or directory'.format(src_path))
def write_paths(odb, paths, names):
items = []
for (path, name) in zip(paths, names):
(mode, binsha) = write_path(odb, path)
items.append(tree_item_str(mode, name, binsha))
items_str = ''.join(items).encode('utf-8')
istream = IStream("tree", len(items_str), BytesIO(items_str))
odb.store(istream)
return (tree_mode, istream.binsha)
def mktree(odb, modes, binshas, names):
items = [tree_item_str(mode, name, binsha) for mode, binsha, name in zip(modes, binshas, names)]
items_str = ''.join(items).encode('utf-8')
istream = IStream("tree", len(items_str), BytesIO(items_str))
odb.store(istream)
return (tree_mode, istream.binsha)
def mktree_from_iter(odb, object_info_iter):
items = [tree_item_str(mode, name, binsha) for mode, binsha, name in object_info_iter]
items_str = b''.join(items)
istream = IStream("tree", len(items_str), BytesIO(items_str))
odb.store(istream)
return (tree_mode, istream.binsha)
def commit_from_binsha(repo, binsha, org_commit, parents=None):
tree = Tree.new(repo, bin_to_hex(binsha).decode('utf-8'))
env = os.environ
offset = altz_to_utctz_str(org_commit.author_tz_offset)
date = org_commit.authored_date
env[Commit.env_author_date] = '{} {}'.format(date, offset)
offset = altz_to_utctz_str(org_commit.committer_tz_offset)
date = org_commit.committed_date
env[Commit.env_committer_date] = '{} {}'.format(date, offset)
return Commit.create_from_tree(repo, tree, org_commit.message, parents,
head=True,
author=org_commit.author,
committer=org_commit.committer)
def create_note(repo, message):
kwargs = ['add', '-f', '-m', message]
repo.git.notes(kwargs)
def get_reversed_topological_ordered_commits(repo, refs):
revs = [repo.commit(ref) for ref in refs]
nodes = deque(revs)
visited_hexsha = set()
visited_commits = []
while nodes:
node = nodes.pop()
if node.hexsha in visited_hexsha:
continue
children = [parent for parent in node.parents if parent.hexsha not in visited_hexsha]
if children:
nodes.append(node)
nodes.extend(children)
else:
visited_hexsha.add(node.hexsha)
visited_commits.append(node)
return visited_commits
|
{
"content_hash": "bb09391dc6a2cabe4d49822365f3b306",
"timestamp": "",
"source": "github",
"line_count": 178,
"max_line_length": 100,
"avg_line_length": 31.258426966292134,
"alnum_prop": 0.6044212796549245,
"repo_name": "niyaton/kenja",
"id": "0973ca3a6daab0553ed256db8d4f1430293a14d0",
"size": "5564",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "kenja/git/util.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "78086"
},
{
"name": "Shell",
"bytes": "1337"
}
],
"symlink_target": ""
}
|
"""
Description:
This program uses Yelp"s Fusion API to:
(1) Query for a specific business --->>> GET https://api.yelp.com/v3/businesses/search
using the criteira: "categories", "location", "name" in the query string, and
(2) Once the page url is extracted for a business, scrape the reivews and ratings for
that specific business
(3) Query for a set number of business reviews using the passed value in "reviews"
Sample usage of the program, but running at the command line --->>>
python get-yelp-reviews-v7.py --categories="pizza" --location="New York, NY" --name="Patsys Pizzeria" --reviews="5"
Alternatively, run without input ( hard-coded default values will take over... ), as such --->>>
python get-yelp-reviews.py
"""
from __future__ import print_function
import pprint
import requests
import sys
import string
import urllib
from flask import Flask, request, jsonify
from flask import abort, make_response, url_for
from flask.ext.httpauth import HTTPBasicAuth
from lxml import html
from urllib import quote, urlencode
from urllib2 import HTTPError
# Before accessing the Fuse API end-point, you must creat an
# app and credentials. Please go to the following link, and when
# you have them plug them in below, before attempting to run
# the code --->>> https://www.yelp.com/developers/v3/manage_app
CLIENT_ID = 'insert-creds-here'
CLIENT_SECRET = 'insert-creds-here'
# API constants for YELP's API
API_HOST = 'https://api.yelp.com'
SEARCH_PATH = '/v3/businesses/search'
BUSINESS_PATH = '/v3/businesses/' # Business ID will come AFTER slash.
REVIEWS_PATH = '/reviews' # Business ID will come BEFORE slash.
TOKEN_PATH = '/oauth2/token'
GRANT_TYPE = 'client_credentials'
# DEFAULT search categories if none are provided by the User.
DEFAULT_CATEGORIES = 'pizza'
DEFAULT_LOCATION = 'New York, NY'
DEFAULT_REVIEWS_SEARCH_LIMIT = 3
DEFAULT_NAME = "Juliana's Pizza"
BUSINESS_SEARCH_LIMIT = 0
REVIEWS_SEARCH_LIMIT = 0
app = Flask(__name__, static_url_path = "")
auth = HTTPBasicAuth()
@auth.get_password
def get_password(username):
if username == 'claudia123':
return 'reviews123'
return None
@auth.error_handler
def unauthorized():
# return 403 instead of 401 to prevent browsers from displaying the default auth dialog
return make_response(jsonify({'error': 'Unauthorized access'}), 403)
@app.errorhandler(400)
def not_found(error):
return make_response(jsonify( { 'error': 'Bad request' } ), 400)
@app.errorhandler(404)
def not_found(error):
return make_response(jsonify({'error': 'What you\'re looking for was not found.'}), 404)
#2 - Second funtion called - REMOVE this COMMENT AT WILL.
def get_bearer_token(host, path):
"""Given a bearer token, sends a GET request to the API.
Args:
host (str): The domain host of the API.
path (str): The path of the API after the domain.
url_params (dict): An optional set of query parameters in the request.
Returns:
str: OAuth bearer token -- using client_id and client_secret.
Raises:
HTTPError: An error occurs from the HTTP request.
"""
url = '{0}{1}'.format(host, quote(path.encode('utf8')))
assert CLIENT_ID, "Please supply your client_id."
assert CLIENT_SECRET, "Please supply your client_secret."
data = urlencode({
'client_id': CLIENT_ID,
'client_secret': CLIENT_SECRET,
'grant_type': GRANT_TYPE,
})
headers = {
'content-type': 'application/x-www-form-urlencoded',
}
response = requests.request('POST', url, data=data, headers=headers)
bearer_token = response.json()['access_token']
return bearer_token
#5
def make_public_reviews(uri_params):
'''
return the full URI that controls the reviews, so that clients get the URIs ready to be used
For this we can write a small helper function that generates a "public" version of a task to send to the client:
'''
new_final_reviews = []
new_final_reviews = url_for('main', categories=uri_params[0], location=uri_params[1], \
name=uri_params[2], reviews_limit=uri_params[3], \
_external=True)
return new_final_reviews
#4 - Fourth function called - REMOVE this COMMENT AT WILL.
def get_reviews(name, business_id, page, reviews_limit):
"""
Query the Business API by business ID.
{ "business_id": "julianas-pizza-brooklyn-5",
"name": "Juliana's Pizza"
}
"""
page = requests.get(page)
tree = html.fromstring(page.content)
#This will create a list of reviews:
all_reviews = tree.xpath('//p[@itemprop="description"]/text()')
ratings = tree.xpath(".//div[contains(@class,'rating-large')]//@title")
review = {}
final_reviews = []
count = int(reviews_limit) #converts passed value to interger for proper processing
new_count = 0
for i in all_reviews:
if new_count < count:
review['review'] = filter(lambda x: x in string.printable, i)
review['rating'] = ratings[new_count]
final_reviews.append(review)
review = {}
new_count += 1
print('---------')
print(u'Returning {0} reviews for {1}...'.format(new_count, name))
print('---------')
return final_reviews
#3 - Third funtion called - REMOVE this COMMENT AT WILL.
def send_request(host, path, bearer_token, url_params=None):
"""Given a bearer token, send a GET request to the API.
Args:
host (str): The domain host of the API.
path (str): The path of the API after the domain.
bearer_token (str): OAuth bearer token, obtained using client_id and client_secret.
url_params (dict): An optional set of query parameters in the request.
Returns:
dict: The JSON response from the request.
Raises:
HTTPError: An error occurs from the HTTP request.
"""
url_params = url_params or {}
url = '{0}{1}'.format(host, quote(path.encode('utf8')))
headers = {'Authorization': 'Bearer %s' % bearer_token}
print(u'Querying {0} ...'.format(url))
response = requests.request('GET', url, headers=headers, params=url_params)
return response.json()
#1 - First funtion called - REMOVE this COMMENT AT WILL.
def get_businesses(categories, location, name, reviews_limit):
"""
(1) gets a user token
(2) Sends a fully qualified request to the API for a business with specific criteria
(3) Searches response JSON objec for a specific business by name
(4) If name is found and the business has a review count > 0, retrieves a specified
number of reviews but < 10 per requirements ( this can be changed ... )
"""
business_id = ''
business_rating = ''
uri_params = []
url_params = {'categories': categories.replace(' ', '+'),
'location': location.replace(' ', '+'),
'limit': BUSINESS_SEARCH_LIMIT #not currently being used - set to 0, no param passed in
}
#2 - gets token
bearer_token = get_bearer_token(API_HOST, TOKEN_PATH)
#3 - sends fully qualified request
response = send_request(API_HOST, SEARCH_PATH, bearer_token, url_params)
businesses = response.get('businesses')
print(len(businesses)) #print lenght businesses object -- REMOVE AT WILL
print('---------')
name_found = 0
for i in businesses:
if i['name'] == name:
name_found = 1
business_id = i['id']
business_name = i['name']
business_rating = i['rating']
review_count = i['review_count']
page = i['url']
print(u'ID: {0} NAME: {1} RATING: {2} REVIEW COUNT: {3} PAGE: {4}'.format(business_id, \
business_name, business_rating, review_count, page))
break
if name_found == 0:
print(u'No businesses for {0} in {1} with the name {2} found.'.format(categories, location, name))
return
print('---------')
print(u'Match found, querying for ratings for: "{0}" in {1}...'.format(business_name, location))
print('---------')
#4 - If business has reviews, get reviews using retrieved business_id
if review_count > 0:
if review_count < int(reviews_limit): #only retrieve the number of reviews specifed by criteria
print('---------')
print(u'actual review count: {0} vs. reviews limit you provided: {1}'.format(review_count, reviews_limit))
print('---------')
print(u'Less reviews than you requested were found for {0}'.format(name))
#4 - gets a public version of the reviews
uri_params.extend([categories, location, name, reviews_limit])
final_reviews = {'name':'',
'uri':'',
'reviews':''}
final_reviews['name'] = name
final_reviews['uri'] = make_public_reviews(uri_params)
#5 - gets reviews for the business based on limit passed
reviews = get_reviews(name, business_id, page, reviews_limit)
final_reviews['reviews'] = reviews
pprint.pprint(final_reviews)
return final_reviews
else:
print(u'No Reviews are available for {0}.'.format(name))
return
#0 Logic starting point - REMOVE this COMMENT AT WILL.
@app.route('/get-reviews/api/v1.0/reviews/<categories>/<location>/<name>/<reviews_limit>', methods=['GET'])
@auth.login_required
def main(categories, location, name, reviews_limit):
"""
(1) Makes a call to "get_business" passing the user defined args.
(2) Arguments are:
--- categories (str): Defines the categories to search for ( e.g. "pizza")
--- location (str): Limits the query to a geographic location (e.g. "New York, NY")
--- name (str): Limits the query to spacific locale, by name
--- reviews (str): Limits the number of reviews to be returned as < 10
(3) If no arguments are provided, default values will kick in
"""
name = name.replace('+', ' ') #clean up all the things ...
location = location.replace('+', ' ')
print('---------')
print(name, location)
print('---------')
try:
#1 Look up if a business exists with the passed criteria
reviews = get_businesses(categories, location, name, reviews_limit)
if not reviews:
abort(404)
return jsonify(reviews)
except HTTPError as error:
sys.exit(
'Encountered HTTP error {0} on {1}:\n {2}\nAbort program.'.format(
error.code,
error.url,
error.read(),
)
)
if __name__ == '__main__':
app.run(debug=True)
|
{
"content_hash": "19a1e1e717023ffe1687b53983078224",
"timestamp": "",
"source": "github",
"line_count": 282,
"max_line_length": 118,
"avg_line_length": 38.138297872340424,
"alnum_prop": 0.6294746629474663,
"repo_name": "mmillions/get-yelp-reviews",
"id": "a095c8dcb44494f57d1db43005d286953aa099a5",
"size": "10798",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "get-yelp-reviews-API.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "10798"
}
],
"symlink_target": ""
}
|
"""Common utility for topi test"""
import tvm
from tvm import topi
_injective_schedule = {
"generic": topi.generic.schedule_injective,
"cpu": topi.x86.schedule_injective,
"arm_cpu": topi.arm_cpu.schedule_injective,
"gpu": topi.cuda.schedule_injective,
"hls": topi.hls.schedule_injective,
}
_reduce_schedule = {
"generic": topi.generic.schedule_reduce,
"cpu": topi.x86.schedule_reduce,
"gpu": topi.cuda.schedule_reduce,
"hls": topi.cuda.schedule_reduce,
}
def dispatch(target, dispatch_map):
if isinstance(target, str):
target = tvm.target.Target(target)
assert isinstance(target, tvm.target.Target)
for key in target.keys:
if key in dispatch_map:
return dispatch_map[key]
return dispatch_map["generic"]
def get_injective_schedule(target):
return dispatch(target, _injective_schedule)
def get_reduce_schedule(target):
return dispatch(target, _reduce_schedule)
get_broadcast_schedule = get_injective_schedule
get_elemwise_schedule = get_injective_schedule
_conv2d_nchw_implement = {
"generic": (topi.nn.conv2d_nchw, topi.generic.schedule_conv2d_nchw),
"cpu": (topi.x86.conv2d_nchw, topi.x86.schedule_conv2d_nchw),
"arm_cpu": (
topi.arm_cpu.conv2d_nchw_spatial_pack,
topi.arm_cpu.schedule_conv2d_nchw_spatial_pack,
),
"gpu": (topi.cuda.conv2d_nchw, topi.cuda.schedule_conv2d_nchw),
"mali": (topi.mali.conv2d_nchw_spatial_pack, topi.mali.schedule_conv2d_nchw_spatial_pack),
"bifrost": (
topi.bifrost.conv2d_nchw_spatial_pack,
topi.bifrost.schedule_conv2d_nchw_spatial_pack,
),
"intel_graphics": (topi.intel_graphics.conv2d_nchw, topi.intel_graphics.schedule_conv2d_nchw),
"hls": (topi.nn.conv2d_nchw, topi.hls.schedule_conv2d_nchw),
}
def get_conv2d_nchw_implement(target):
return dispatch(target, _conv2d_nchw_implement)
|
{
"content_hash": "12d8d4cbc059c6a1a7c3c7403d6f9035",
"timestamp": "",
"source": "github",
"line_count": 62,
"max_line_length": 98,
"avg_line_length": 30.580645161290324,
"alnum_prop": 0.6919831223628692,
"repo_name": "tqchen/tvm",
"id": "51ea19afe7ce610b1d7c631db6050f5c8a6d6850",
"size": "2712",
"binary": false,
"copies": "2",
"ref": "refs/heads/main",
"path": "python/tvm/topi/testing/common.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "4104"
},
{
"name": "C",
"bytes": "205781"
},
{
"name": "C++",
"bytes": "8124041"
},
{
"name": "CMake",
"bytes": "135007"
},
{
"name": "Cuda",
"bytes": "6677"
},
{
"name": "Go",
"bytes": "111558"
},
{
"name": "HTML",
"bytes": "2664"
},
{
"name": "Java",
"bytes": "200193"
},
{
"name": "JavaScript",
"bytes": "15075"
},
{
"name": "Makefile",
"bytes": "48206"
},
{
"name": "Objective-C",
"bytes": "18506"
},
{
"name": "Objective-C++",
"bytes": "56786"
},
{
"name": "Python",
"bytes": "10300435"
},
{
"name": "RenderScript",
"bytes": "1895"
},
{
"name": "Rust",
"bytes": "327078"
},
{
"name": "Shell",
"bytes": "157176"
},
{
"name": "TypeScript",
"bytes": "94435"
}
],
"symlink_target": ""
}
|
from datetime import date
from graphql import (
GraphQLObjectType,
GraphQLField,
GraphQLArgument,
GraphQLString,
GraphQLInt,
GraphQLNonNull,
GraphQLList,
GraphQLInputObjectType,
GraphQLInputObjectField
)
from api.graphql.common import SpotDateType, SpotIpType, SpotOperationOutputType
from api.graphql.common import SpotDateType, SpotDatetimeType, SpotIpType, SpotOperationOutputType
import api.resources.flow as Flow
ScoreInputType = GraphQLInputObjectType(
name='NetflowScoreInputType',
fields={
'date': GraphQLInputObjectField(
type=SpotDateType,
description='A reference date for the score process. Defaults to today'
),
'score': GraphQLInputObjectField(
type=GraphQLNonNull(GraphQLInt),
description='A score value, 1->High, 2->Medium, 3->Low'
),
'srcIp': GraphQLInputObjectField(
type=SpotIpType,
description='Source IP to score'
),
'dstIp': GraphQLInputObjectField(
type=SpotIpType,
description='Destination IP to score'
),
'srcPort': GraphQLInputObjectField(
type=GraphQLInt,
description='Source port to score'
),
'dstPort': GraphQLInputObjectField(
type=GraphQLInt,
description='Destination port to score'
)
}
)
ThreatDetailsInputType = GraphQLInputObjectType(
name='NetflowThreatDetailsInputType',
fields={
'firstSeen': GraphQLInputObjectField(
type=SpotDatetimeType,
description='First time two IPs were seen on a particular day of flow traffic data'
),
'lastSeen': GraphQLInputObjectField(
type=SpotDatetimeType,
description='Last time two IPs were seen on a particular day of flow traffic data'
),
'srcIp': GraphQLInputObjectField(
type=SpotIpType,
description='Source IP address'
),
'dstIp': GraphQLInputObjectField(
type=SpotIpType,
description='Destination IP address'
),
'srcPort': GraphQLInputObjectField(
type=GraphQLInt,
description='Source port'
),
'dstPort': GraphQLInputObjectField(
type=GraphQLInt,
description='Destination port'
),
'connections': GraphQLInputObjectField(
type=GraphQLInt,
description='Number of connections on a particular day of flow traffic data'
),
'maxPkts': GraphQLInputObjectField(
type=GraphQLInt,
description='Maximum number of packets tranferred on a single connection'
),
'avgPkts': GraphQLInputObjectField(
type=GraphQLInt,
description='Average number of packets transferred bwteen IPs'
),
'maxBytes': GraphQLInputObjectField(
type=GraphQLInt,
description='Maximum number of bytes tranferred on a single connection'
),
'avgBytes': GraphQLInputObjectField(
type=GraphQLInt,
description='Average number of bytes transferred bwteen IPs'
)
}
)
CreateStoryboardInputType = GraphQLInputObjectType(
name='NetflowCreateStoryboardInputType',
fields={
'date': GraphQLInputObjectField(
type=SpotDateType,
description='A reference date for the storyboard being created. Defaults to today'
),
'ip': GraphQLInputObjectField(
type=GraphQLNonNull(SpotIpType),
description='High risk IP address'
),
'title': GraphQLInputObjectField(
type=GraphQLNonNull(GraphQLString),
description='Threat title'
),
'text': GraphQLInputObjectField(
type=GraphQLNonNull(GraphQLString),
description='Threat title description'
),
'threatDetails': GraphQLInputObjectField(
type=GraphQLNonNull(GraphQLList(ThreatDetailsInputType)),
description='Threat details. See NetflowThreatInformation.details'
),
'first': GraphQLInputObjectField(
type=GraphQLInt,
description='The number of records to return'
)
}
)
def _score_connection(args):
results = []
_input = args.get('input')
for cmd in _input:
result = Flow.score_connection(
date=cmd['date'], score=cmd['score'],
src_ip=cmd.get('srcIp'), src_port=cmd.get('srcPort'),
dst_ip=cmd.get('dstIp'), dst_port=cmd.get('dstPort')
)
results.append({'success': result})
return results
def _create_storyboard(args):
_input = args.get('input')
_date = _input.get('date', date.today())
ip = _input.get('ip')
threat_details = _input.get('threatDetails')
title = _input.get('title')
text = _input.get('text')
first = _input.get('first')
result = Flow.create_storyboard(date=_date, ip=ip, title=title, text=text, expanded_search=threat_details, top_results=first)
return {'success': result}
def _reset_scored_connections(args):
_date = args.get('date', date.today())
result = Flow.reset_scored_connections(date=_date)
return {'success': result}
MutationType = GraphQLObjectType(
name='NetflowMutationType',
fields={
'score': GraphQLField(
type=GraphQLList(SpotOperationOutputType),
description='Sets a score value to connections',
args={
'input': GraphQLArgument(
type=GraphQLNonNull(GraphQLList(GraphQLNonNull(ScoreInputType))),
description='Score criteria'
)
},
resolver=lambda root, args, *_: _score_connection(args)
),
'createStoryboard': GraphQLField(
type=SpotOperationOutputType,
description='Request Spot to create an entry on storyboard for a particular threat',
args={
'input': GraphQLArgument(
type=GraphQLNonNull(CreateStoryboardInputType),
description='Threat information'
)
},
resolver=lambda root, args, *_: _create_storyboard(args)
),
'resetScoredConnections': GraphQLField(
type=SpotOperationOutputType,
description='Resets all scored connections for a certain day',
args={
'date': GraphQLArgument(
type=GraphQLNonNull(SpotDateType),
description='Date to clean'
)
},
resolver=lambda root, args, *_: _reset_scored_connections(args)
)
}
)
|
{
"content_hash": "080032334816e7ee4d51bee6c168012b",
"timestamp": "",
"source": "github",
"line_count": 202,
"max_line_length": 129,
"avg_line_length": 33.60891089108911,
"alnum_prop": 0.6048018854028575,
"repo_name": "LedaLima/incubator-spot",
"id": "32c4aeaed0c2fb2a7bb6ca7772d207482718c3e3",
"size": "7573",
"binary": false,
"copies": "7",
"ref": "refs/heads/master",
"path": "spot-oa/api/graphql/flow/mutation.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "12644"
},
{
"name": "HTML",
"bytes": "76073"
},
{
"name": "JavaScript",
"bytes": "433407"
},
{
"name": "Jupyter Notebook",
"bytes": "74291"
},
{
"name": "Python",
"bytes": "402956"
},
{
"name": "Scala",
"bytes": "282137"
},
{
"name": "Shell",
"bytes": "14832"
}
],
"symlink_target": ""
}
|
# Copyright (c) 2003-present, Jodd Team (http://jodd.org)
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
types = ['boolean', 'byte', 'double', 'float', 'int', 'long', 'short']
for type in types:
clazz = 'Fast' + type.title() + 'Buffer'
filename = clazz + '.java'
f = open(filename, 'w')
template = '''
package jodd.buffer;
import javax.annotation.Generated;
import java.lang.reflect.Array;
/**
* Faster {@code $T} buffer.$J
*/
@Generated("FastXXXBuffer.py")
public class $C {
private $T[] buffer;
private int offset;
/**
* Creates a new {@code $T} buffer. The buffer capacity is
* initially 64 $T s, though its size increases if necessary.
*/
public $C() {
this.buffer = new $T[64];
}
/**
* Creates a new {@code $T} buffer, with a buffer capacity of
* the specified size.
*
* @param size the initial size.
* @throws IllegalArgumentException if size is negative.
*/
public $C(final int size) {
this.buffer = new $T[size];
}
/**
* Grows the buffer.
*/
private void grow(final int minCapacity) {
final int oldCapacity = buffer.length;
int newCapacity = oldCapacity << 1;
if (newCapacity - minCapacity < 0) {
// special case, min capacity is larger then a grow
newCapacity = minCapacity + 512;
}
buffer = Arrays.copyOf(buffer, newCapacity);
}
/**
* Appends single {@code $T} to buffer.
*/
public void append(final $T element) {
if (offset - buffer.length >= 0) {
grow(offset);
}
buffer[offset++] = element;
}
/**
* Appends {@code $T} array to buffer.
*/
public $C append(final $T[] array, final int off, final int len) {
if (offset + len - buffer.length > 0) {
grow(offset + len);
}
System.arraycopy(array, off, buffer, offset, len);
offset += len;
return this;
}
/**
* Appends {@code $T} array to buffer.
*/
public $C append(final $T[] array) {
return append(array, 0, array.length);
}
/**
* Appends another fast buffer to this one.
*/
public $C append(final $C buff) {
if (buff.offset == 0) {
return this;
}
append(buff.buffer, 0, buff.offset);
return this;
}
/**
* Returns buffer size.
*/
public int size() {
return offset;
}
/**
* Tests if this buffer has no elements.
*/
public boolean isEmpty() {
return offset == 0;
}
/**
* Resets the buffer content.
*/
public void clear() {
offset = 0;
}
/**
* Creates {@code $T} array from buffered content.
*/
public $T[] toArray() {
return Arrays.copyOf(buffer, offset);
}
/**
* Creates {@code $T} subarray from buffered content.
*/
public $T[] toArray(final int start, final int len) {
final $T[] array = new $T[len];
if (len == 0) {
return array;
}
System.arraycopy(buffer, start, array, 0, len);
return array;
}
/**
* Returns {@code $T} element at given index.
*/
public $T get(final int index) {
if (index >= offset) {
throw new IndexOutOfBoundsException();
}
return buffer[index];
}
}
'''
javadoc = ""
if ('byte' == type):
javadoc = "\tWorks faster for smaller buffer sizes. After eg. length of 2048 the performances are practically the same."
data = template.replace('$T', type).replace('$C', clazz).replace('$J', javadoc)
f.write(data)
|
{
"content_hash": "00f4d53f6471452f683991fa5c49fdc5",
"timestamp": "",
"source": "github",
"line_count": 181,
"max_line_length": 128,
"avg_line_length": 24.845303867403317,
"alnum_prop": 0.6644429619746498,
"repo_name": "oblac/jodd",
"id": "ce669f1c6917aa2459130fd0db459d3dd38691b7",
"size": "4497",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "jodd-core/src/main/python/FastXXXBuffer.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "HTML",
"bytes": "3113"
},
{
"name": "Java",
"bytes": "2111083"
},
{
"name": "Python",
"bytes": "37164"
},
{
"name": "Shell",
"bytes": "485"
}
],
"symlink_target": ""
}
|
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.mathjax',
'sphinx.ext.autosummary', 'sphinx.ext.extlinks', 'numpydoc']
numpydoc_show_class_members = False
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'dask'
copyright = u'2015, Continuum Analytics'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
# version = '0.1.0'
from dask import __version__ as version
# The full version, including alpha/beta/rc tags.
# release = '0.1.0'
release = version
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# Taken from docs.readthedocs.io:
# on_rtd is whether we are on readthedocs.io
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
if not on_rtd: # only import and set the theme if we're building docs locally
import sphinx_rtd_theme
html_theme = 'sphinx_rtd_theme'
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'daskdoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'dask.tex', u'dask Documentation',
u'Dask Development Team', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'dask', u'dask Documentation',
[u'Dask Development Team'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'dask', u'dask Documentation',
u'Dask Development Team', 'dask', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# -- Options for Epub output ---------------------------------------------------
# Bibliographic Dublin Core info.
epub_title = u'dask'
epub_author = u'Dask Development Team'
epub_publisher = u'Continuum Analytics'
epub_copyright = u'2015, Continuum Analytics'
# The language of the text. It defaults to the language option
# or en if the language is not set.
#epub_language = ''
# The scheme of the identifier. Typical schemes are ISBN or URL.
#epub_scheme = ''
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#epub_identifier = ''
# A unique identification for the text.
#epub_uid = ''
# A tuple containing the cover image and cover page html template filenames.
#epub_cover = ()
# HTML files that should be inserted before the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_pre_files = []
# HTML files that should be inserted after the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_post_files = []
# A list of files that should not be packed into the epub file.
#epub_exclude_files = []
# The depth of the table of contents in toc.ncx.
#epub_tocdepth = 3
# Allow duplicate toc entries.
#epub_tocdup = True
extlinks = {
'issue': ('https://github.com/dask/dask/issues/%s', 'GH#'),
'pr': ('https://github.com/dask/dask/pull/%s', 'GH#')
}
|
{
"content_hash": "5e9fdd861fe26fbf9e42f34a7132cfaf",
"timestamp": "",
"source": "github",
"line_count": 287,
"max_line_length": 80,
"avg_line_length": 31.83275261324042,
"alnum_prop": 0.698007880910683,
"repo_name": "mraspaud/dask",
"id": "4c8576a5ddf8f330d012a4e9686b00fa29e2a422",
"size": "9551",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "docs/source/conf.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "5216"
},
{
"name": "Python",
"bytes": "1972674"
},
{
"name": "Shell",
"bytes": "4084"
}
],
"symlink_target": ""
}
|
from btlogic import term
a = term(term=[0,1], type="minterm", bit=2)
print a.getMaxterm()
|
{
"content_hash": "774a3a973d08b226372cd4a3cba50e4d",
"timestamp": "",
"source": "github",
"line_count": 4,
"max_line_length": 43,
"avg_line_length": 22.5,
"alnum_prop": 0.7,
"repo_name": "bongtrop/python-btlogic",
"id": "405d82fb5b6519c629944566ee76999a5e316162",
"size": "108",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Example/minterm2maxterm.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "12161"
}
],
"symlink_target": ""
}
|
"""Run a basic simulation of the example origami system."""
from lattice_origami_domains import *
# Specificy initial configuration by setting input file and step number
#input_file = JSONInputFile('simple_loop_linear.json')
input_file = JSONInputFile('simple_loop.json')
#input_file = JSONInputFile('cyclic_example.json')
#input_file = JSONInputFile('single_domain.json')
step = 0
# Set conditions
temp = 364.5
# Staple strand concentration (M)
strand_M = 1e-3
# Cation concentration (M)
cation_M = 1
# Setup origami system object
#origami_system = OrigamiSystemEight(input_file, step, temp, cation_M)
origami_system = OrigamiSystemSixteen(input_file, step, temp, strand_M, cation_M)
# Specify moves to be used and associated probabilities
#move_settings = {MOVETYPE.EXCHANGE_STAPLE: 0.4,
# MOVETYPE.REGROW_STAPLE: 0.2,
# MOVETYPE.REGROW_SCAFFOLD: 0.2,
# MOVETYPE.ROTATE_ORIENTATION_VECTOR: 0.2}
move_settings = {MOVETYPE.CB_CONSERVED_TOPOLOGY: 0.5,
MOVETYPE.CB_REGROW_STAPLE: 0.5}
# Specify output file type and name
#output_file_name = 'simple_loop_replica-0.hdf5'
output_file_name = 'single_domain.hdf5'
config_write_freq = 0
count_write_freq = 1
output_file = HDF5OutputFile(output_file_name, origami_system,
config_write_freq=config_write_freq,
count_write_freq=count_write_freq)
# Setup up simulation
sim = GCMCSimulation(origami_system, move_settings, output_file)
# Run
N = 100000
sim.run(N)
|
{
"content_hash": "4ce352618b87f29859f0a5e0e9090f69",
"timestamp": "",
"source": "github",
"line_count": 49,
"max_line_length": 81,
"avg_line_length": 30.489795918367346,
"alnum_prop": 0.715528781793842,
"repo_name": "acumb/LatticeDNAOrigami",
"id": "793322935a135b436a43c7cdee41fb9bc376aed1",
"size": "1513",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "scripts/simulation_examples/simulate_example_origami.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C++",
"bytes": "864124"
},
{
"name": "Makefile",
"bytes": "4422"
},
{
"name": "Python",
"bytes": "128543"
},
{
"name": "Shell",
"bytes": "12891"
},
{
"name": "Tcl",
"bytes": "11261"
},
{
"name": "TeX",
"bytes": "1801"
}
],
"symlink_target": ""
}
|
"""
onedrive-d
A Microsoft OneDrive client for Linux.
:copyright: (c) Xiangyu Bu <xybu92@live.com>
:license: MIT
"""
import os
import pkgutil
__project__ = 'onedrived'
__author__ = 'Xiangyu Bu'
__email__ = 'xybu92@live.com'
__version__ = '2.0.0'
__homepage__ = 'https://github.com/xybu/onedrived-dev'
def mkdir(path, uid, mode=0o700, exist_ok=True):
"""Create a path and set up owner uid."""
os.makedirs(path, mode, exist_ok=exist_ok)
os.chown(path, uid, -1)
def fix_owner_and_timestamp(path, uid, t):
"""
:param str path:
:param int uid:
:param int | float t:
:return:
"""
os.chown(path, uid, -1)
os.utime(path, (t, t))
def get_resource(rel_path, pkg_name='onedrived', is_text=True):
"""
Read a resource file in data/.
:param str rel_path:
:param str pkg_name:
:param True | False is_text: True to indicate the text is UTF-8 encoded.
:return str | bytes: Content of the file.
"""
content = pkgutil.get_data(pkg_name, rel_path)
if is_text:
content = content.decode('utf-8')
return content
|
{
"content_hash": "92513daef495a47336a7678b7eba6238",
"timestamp": "",
"source": "github",
"line_count": 46,
"max_line_length": 76,
"avg_line_length": 23.67391304347826,
"alnum_prop": 0.620752984389348,
"repo_name": "xybu/onedrived-dev",
"id": "d5efe0d69b9d72ef200d496f48280bf66b139f15",
"size": "1089",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "onedrived/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "210347"
}
],
"symlink_target": ""
}
|
from six import string_types
from six.moves import map # pylint: disable=redefined-builtin
from .base import Resource, ResourceWithID
class Domain(Resource):
"""
A domain resource, representing a domain name whose DNS is managed by
DigitalOcean's nameservers.
New domains are created via the :meth:`doapi.create_domain` method and can
be retrieved with the :meth:`doapi.fetch_domain` and
:meth:`doapi.fetch_all_domains` methods.
The DigitalOcean API specifies the following fields for domain objects:
:var name: the domain name
:vartype name: string
:var ttl: the time-to-live for the domain's records, in seconds
:vartype ttl: number
:var zone_file: the complete zone file for the domain
:vartype zone_file: string
"""
def __init__(self, state=None, **extra):
if isinstance(state, string_types):
state = {"name": state}
super(Domain, self).__init__(state, **extra)
def __str__(self):
""" Convert the domain to just the actual domain name """
return self.name
@property
def url(self):
""" The endpoint for general operations on the individual domain """
return self._url('/v2/domains/' + self.name)
def fetch(self):
"""
Fetch & return a new `Domain` object representing the domain's current
state
:rtype: Domain
:raises DOAPIError: if the API endpoint replies with an error (e.g., if
the domain no longer exists)
"""
api = self.doapi_manager
return api._domain(api.request(self.url)["domain"])
def delete(self):
"""
Delete the domain
:return: `None`
:raises DOAPIError: if the API endpoint replies with an error
"""
self.doapi_manager.request(self.url, method='DELETE')
def _record(self, obj):
"""
Construct a `DomainRecord` object belonging to the domain's `doapi`
object. ``obj`` may be a domain record ID, a dictionary of domain
record fields, or another `DomainRecord` object (which will be
shallow-copied). The resulting `DomainRecord` will only contain the
information in ``obj``; no data will be sent to or from the API
endpoint.
:type obj: integer, `dict`, or `DomainRecord`
:rtype: DomainRecord
"""
return DomainRecord(obj, domain=self, doapi_manager=self.doapi_manager)
@property
def record_url(self):
""" The endpoint for operations on the domain's DNS records """
return self.url + '/records'
def fetch_record(self, obj):
"""
Fetch a domain record by ID number
:param obj: the ID of the record, a `dict` with an ``"id"`` field,
or a `DomainRecord` object (to re-fetch the same record)
:type obj: integer, `dict`, or `DomainRecord`
:rtype: DomainRecord
:raises DOAPIError: if the API endpoint replies with an error
"""
return self._record(obj).fetch()
def fetch_all_records(self):
r"""
Returns a generator that yields all of the DNS records for the domain
:rtype: generator of `DomainRecord`\ s
:raises DOAPIError: if the API endpoint replies with an error
"""
api = self.doapi_manager
return map(self._record, api.paginate(self.record_url, 'domain_records'))
def create_record(self, type, name, data, priority=None, port=None,
weight=None, **kwargs):
# pylint: disable=redefined-builtin
"""
Add a new DNS record to the domain
:param str type: the type of DNS record to add (``"A"``, ``"CNAME"``,
etc.)
:param str name: the name (hostname, alias, etc.) of the new record
:param str data: the value of the new record
:param int priority: the priority of the new record (SRV and MX records
only)
:param int port: the port that the service is accessible on (SRV
records only)
:param int weight: the weight of records with the same priority (SRV
records only)
:param kwargs: additional fields to include in the API request
:return: the new domain record
:rtype: DomainRecord
:raises DOAPIError: if the API endpoint replies with an error
"""
api = self.doapi_manager
data = {
"type": type,
"name": name,
"data": data,
"priority": priority,
"port": port,
"weight": weight,
}
data.update(kwargs)
return self._record(api.request(self.record_url, method='POST',
data=data)["domain_record"])
class DomainRecord(ResourceWithID):
"""
A domain record resource, representing an individual DNS record that can be
set & modified by the user of the DigitalOcean API.
New domain records are created via the :meth:`Domain.create_record` method
and can be retrieved with the :meth:`Domain.fetch_record` and
:meth:`Domain.fetch_all_records` methods.
The DigitalOcean API specifies the following fields for domain record
objects:
:var id: a unique identifier for the domain record
:vartype id: int
:var type: the type of the DNS record
:vartype type: string
:var name: the name of the DNS record
:vartype name: string
:var data: the value of the DNS record
:vartype data: string
:var priority: the priority of the record (SRV and MX records only)
:vartype priority: number or `None`
:var port: the port of the record (SRV records only)
:vartype port: number or `None`
:var weight: the weight of the record (SRV records only)
:vartype weight: number or `None`
.. attribute:: domain
The `Domain` to which the record belongs
"""
_meta_attrs = ResourceWithID._meta_attrs + ('domain',)
@property
def url(self):
"""
The endpoint for general operations on the individual domain record
"""
return self.domain.record_url + '/' + str(self.id)
def fetch(self):
"""
Fetch & return a new `DomainRecord` object representing the domain
record's current state
:rtype: DomainRecord
:raises DOAPIError: if the API endpoint replies with an error (e.g., if
the domain record no longer exists)
"""
return self.domain._record(self.doapi_manager.request(self.url)\
["domain_record"])
def fetch_domain(self):
"""
Fetch & return the domain resource that the record belongs to
:rtype: Domain
:raises DOAPIError: if the API endpoint replies with an error
"""
return self.domain.fetch()
def update_record(self, **attrs):
# The `_record` is to avoid conflicts with MutableMapping.update.
"""
Update the record, modifying any number of its attributes (except
``id``). ``update_record`` takes the same keyword arguments as
:meth:`Domain.create_record`; pass in only those attributes that you
want to update.
:return: an updated `DomainRecord` object
:rtype: DomainRecord
:raises DOAPIError: if the API endpoint replies with an error
"""
return self.domain._record(self.doapi_manager.request(self.url,
method='PUT',
data=attrs)\
["domain_record"])
def delete(self):
"""
Delete the domain record
:return: `None`
:raises DOAPIError: if the API endpoint replies with an error
"""
self.doapi_manager.request(self.url, method='DELETE')
|
{
"content_hash": "c7648076428c24933ea249a0c84ea9f4",
"timestamp": "",
"source": "github",
"line_count": 229,
"max_line_length": 81,
"avg_line_length": 34.786026200873366,
"alnum_prop": 0.599548079337183,
"repo_name": "jwodder/doapi",
"id": "fa21342bcdce8788624fcf7a29b3d64f26fd0c09",
"size": "7966",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "doapi/domain.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "188382"
}
],
"symlink_target": ""
}
|
from qtpy.QtWidgets import QWidget, QSizePolicy
class FormBaseWidget(QWidget):
def __init__(self):
super().__init__()
self.setMaximumWidth(400)
self.setMinimumWidth(400)
sp = self.sizePolicy()
sp.setVerticalPolicy(QSizePolicy.Minimum)
self.setSizePolicy(sp)
|
{
"content_hash": "7f1b6037882a135aaee763e9ff5e5595",
"timestamp": "",
"source": "github",
"line_count": 12,
"max_line_length": 49,
"avg_line_length": 26.083333333333332,
"alnum_prop": 0.6485623003194888,
"repo_name": "NSLS-II-HXN/PyXRF",
"id": "0311ba998257064c50476e8d29d345ce8f06aa1f",
"size": "313",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "pyxrf/gui_module/form_base_widget.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "326789"
},
{
"name": "Shell",
"bytes": "93"
}
],
"symlink_target": ""
}
|
import numpy as np
def tensor(*args):
return reduce(np.kron, args)
def extend_vib_operator(n_vibrational_levels, m, vib_operator):
"""
Extends the vibrational operator vib_operator, associated with
vibrational mode m, into an operator on the full vibrational subspace
"""
return tensor(np.eye(np.prod(n_vibrational_levels[0:m])),
vib_operator,
np.eye(np.prod(n_vibrational_levels[
m+1:n_vibrational_levels.size])))
def vib_annihilate(N):
"""
Returns the annihilation operator for a vibrational mode with N levels
"""
return np.diag(np.sqrt(np.arange(1, N)), k=1)
def vib_create(N):
"""
Returns the creation operator for a vibrational mode with N levels
"""
return np.diag(np.sqrt(np.arange(1, N)), k=-1)
def unit_vec(n, N, dtype=complex):
"""
Returns the unit vector in direction n in N dimensions.
"""
v = np.zeros(N, dtype=dtype)
v[n] = 1
return v
def _infer_basis_transform_matrix(X, U):
if U.ndim != 2 or U.shape[0] != U.shape[1]:
raise ValueError('basis transformation must be a square matrix')
N = len(U)
if X.shape[-1] == N:
# Hilbert space
pass
elif X.shape[-1] == N ** 2:
# Liouville space
U = np.kron(U, U)
else:
raise ValueError('basis transformation incompatible with '
'operator dimensions')
return U
def basis_transform_operator(X, U):
"""
Transform the operator or super-operator X into the basis given by the
unitary transformation matrix U
Parameters
----------
X : np.ndarray
Operator as a matrix in Hilbert or Liouville space (must be 2d).
U : np.ndarray
Basis transformation matrix in Hilbert space (must be 2d).
Returns
-------
X_prime : np.ndarray
The operator reexpressed in the transformed basis.
References
----------
.. [1] Havel, T. F. Robust procedures for converting among Lindblad, Kraus
and matrix representations of quantum dynamical semigroups. J Math. Phys.
44, 534-557 (2003).
"""
X = np.asarray(X)
U = np.asarray(U)
if X.ndim != 2:
raise ValueError('operator must have ndim=2')
U = _infer_basis_transform_matrix(X, U)
return U.T.conj().dot(X).dot(U)
def basis_transform_vector(rho, U):
"""
Transform the state vector rho into the basis given by the unitary
transformation matrix U
If rho is a multi-dimensional array, this function broadcasts over all
dimensions other than the last one.
Parameters
----------
rho : np.ndarray
State vector in Hilbert or Liouville space.
U : np.ndarray
Basis transformation matrix in Hilbert space (must be 2d).
Returns
-------
rho_prime : np.ndarray
The state vector in the transformed basis.
References
----------
.. [1] Havel, T. F. Robust procedures for converting among Lindblad, Kraus
and matrix representations of quantum dynamical semigroups. J Math. Phys.
44, 534-557 (2003).
"""
rho = np.asarray(rho)
U = np.asarray(U)
U = _infer_basis_transform_matrix(rho, U)
return np.tensordot(rho, U.T.conj(), axes=(-1, -1))
def all_states(N, subspace='gef'):
"""
List all states in the desired subspace for N pigments
Assumes hard-core bosons (no double-excitations of the same state)
Parameters
----------
N : int
Number of sites.
subspace : container, default 'gef'
Container of any or all of 'g', 'e' and 'f' indicating the desired
subspaces on which the operator is defined.
Returns
-------
states : list
List of all states defined in the desired subspace, where each state is
defined as a list of sites in the excited state
"""
states = []
if 'g' in subspace:
states.append([])
if 'e' in subspace:
for i in xrange(N):
states.append([i])
if 'f' in subspace:
for i in xrange(N):
for j in xrange(i + 1, N):
states.append([i, j])
return states
def operator_1_to_2(operator1):
"""
From the matrix representation of an operator in the 1-excitation subspace,
determine its representation in the 2-excitation subspace
Assumes that given the matrix element :math:`A_{nm}`, the full
representation of the operator is given by:
.. math::
\sum_{n,m} A_{nm} a^\dagger_n a_m
Parameters
----------
operator1 : np.ndarray
Matrix representation of an operator defined on the 1-excitation
subspace
Returns
-------
operator2 : np.ndarray
Matrix representation of the operator defined on the 2-excitation
subspace
"""
states = all_states(len(operator1), 'f')
operator2 = np.zeros((len(states), len(states)), dtype=operator1.dtype)
def delta(i, j):
return int(i == j)
for m in xrange(len(states)):
for n in xrange(len(states)):
(i, j), (k, l) = states[m], states[n]
operator2[m, n] = (operator1[j, l] * delta(i, k) +
operator1[j, k] * delta(i, l) +
operator1[i, l] * delta(j, k) +
operator1[i, k] * delta(j, l))
return operator2
def operator_extend(operator1, subspace='gef'):
"""
Extend an operator defined in the 1-excitation subspace to include the
ground and/or double-excitation subspaces
Assumes that given the matrix element :math:`A_{nm}`, the full
representation of the operator is given by:
.. math::
\sum_{n,m} A_{nm} a^\dagger_n a_m
Parameters
----------
operator1 : np.ndarray
Matrix representation of an operator defined on the 1-excitation
subspace
subspace : container, default 'gef'
Container of any or all of 'g', 'e' and 'f' indicating the desired
subspaces on which the operator is defined.
Returns
-------
out : np.ndarray
Matrix representation of the operator defined on the requested subspace
"""
operators = []
if 'g' in subspace:
operators.append(np.array([[0]]))
if 'e' in subspace:
operators.append(operator1)
if 'f' in subspace:
operators.append(operator_1_to_2(operator1))
sizes = [len(op) for op in operators]
overall_size = sum(sizes)
operator_extended = np.zeros((overall_size, overall_size),
dtype=operator1.dtype)
starts = np.cumsum([0] + sizes[:-1])
ends = np.cumsum(sizes)
for start, end, op in zip(starts, ends, operators):
operator_extended[start:end, start:end] = op
return operator_extended
def transition_operator(n, n_sites, subspace='gef', include_transitions='-+'):
"""
Calculate the transition operator for creating an removing an excitation
at site n of n_sites overall
Parameters
----------
n : int
Site at which to alter the number of excitations (0-indexed).
n_sites : int
Number of sites.
subspace : container, default 'gef'
Container of any or all of 'g', 'e' and 'f' indicating the desired
subspaces on which the operator is defined.
include_transitions : str, default '-+'
String containing '-' and/or '+' to indicating whether or not to
annihilation and/or creation of an excitation.
Returns
-------
out : np.ndarray
Transition operator in matrix form
"""
states = all_states(n_sites, subspace)
dipole_matrix = np.zeros((len(states), len(states)))
for i in xrange(len(states)):
for j in xrange(len(states)):
if (('+' in include_transitions and
states[i] == sorted(states[j] + [n]))
or ('-' in include_transitions and
sorted(states[i] + [n]) == states[j])):
dipole_matrix[i, j] = 1
return dipole_matrix
class SubspaceError(Exception):
"""
Error class to indicate an invalid subspace
"""
def n_excitations(n_sites=1, n_vibrational_states=1):
"""
Given the number of sites and vibrational states, returns the number of 0-,
1- and 2-excitation states as a three item array
"""
n_exc = np.array([1, n_sites, int(n_sites * (n_sites - 1) / 2)])
return n_exc * n_vibrational_states
def excitation_to_number(excitation):
return {'g': 0, 'e': 1, 'f': 2}[excitation]
def extract_subspace(subspaces_string):
"""
Given a string a subspace in Liouville space or a mapping between subspaces,
returns the minimal containing Hilbert space subspace
"""
return sorted(set(subspaces_string) - {',', '-', '>'},
key=excitation_to_number)
def full_liouville_subspace(subspaces_string):
"""
Given a string a subspace in Liouville space or a mapping between subspaces,
returns the Liouville subspace equal to the tensor product of all
contained Hilbert space subspaces
"""
hilbert_subspaces = extract_subspace(subspaces_string)
return ','.join(a + b for a in hilbert_subspaces for b in hilbert_subspaces)
def hilbert_subspace_index(subspace, all_subspaces, n_sites,
n_vibrational_states=1):
"""
Given a Hilbert subspace 'g', 'e' or 'f' and the set of all subspaces on
which a state is defined, returns a slice object to select all elements in
the given subspace
Examples
--------
>>> hilbert_subspace_index('g', 'gef', 2)
slice(0, 1)
>>> hilbert_subspace_index('e', 'gef', 2)
slice(1, 3)
>>> hilbert_subspace_index('f', 'gef', 2)
slice(3, 4)
"""
n_exc = n_excitations(n_sites, n_vibrational_states)
included_n_exc = ['gef'.index(s) for s in all_subspaces]
breaks = [0] + list(np.cumsum(n_exc[included_n_exc]))
if subspace in all_subspaces:
N = all_subspaces.index(subspace)
return slice(breaks[N], breaks[N + 1])
else:
raise SubspaceError("{} not in set of all subspaces '{}'".format(
subspace, all_subspaces))
|
{
"content_hash": "5ca9a5f14ecbfa2fcdb6246ca1473a54",
"timestamp": "",
"source": "github",
"line_count": 334,
"max_line_length": 80,
"avg_line_length": 30.652694610778443,
"alnum_prop": 0.6051963274076968,
"repo_name": "shoyer/qspectra",
"id": "67471c192694519bf20bb1c3b218e94e96eedee9",
"size": "10238",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "qspectra/operator_tools.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "193578"
}
],
"symlink_target": ""
}
|
import warnings
from typing import Any, Dict, NamedTuple, Union, cast
import numpy as np
from onnx import OptionalProto, SequenceProto, TensorProto
TensorDtypeMap = NamedTuple(
"TensorDtypeMap", [("np_dtype", np.dtype), ("storage_dtype", int), ("name", str)]
)
# tensor_dtype: (numpy type, storage type, string name)
TENSOR_TYPE_MAP = {
int(TensorProto.FLOAT): TensorDtypeMap(
np.dtype("float32"), int(TensorProto.FLOAT), "TensorProto.FLOAT"
),
int(TensorProto.UINT8): TensorDtypeMap(
np.dtype("uint8"), int(TensorProto.INT32), "TensorProto.UINT8"
),
int(TensorProto.INT8): TensorDtypeMap(
np.dtype("int8"), int(TensorProto.INT32), "TensorProto.INT8"
),
int(TensorProto.UINT16): TensorDtypeMap(
np.dtype("uint16"), int(TensorProto.INT32), "TensorProto.UINT16"
),
int(TensorProto.INT16): TensorDtypeMap(
np.dtype("int16"), int(TensorProto.INT32), "TensorProto.INT16"
),
int(TensorProto.INT32): TensorDtypeMap(
np.dtype("int32"), int(TensorProto.INT32), "TensorProto.INT32"
),
int(TensorProto.INT64): TensorDtypeMap(
np.dtype("int64"), int(TensorProto.INT64), "TensorProto.INT64"
),
int(TensorProto.BOOL): TensorDtypeMap(
np.dtype("bool"), int(TensorProto.INT32), "TensorProto.BOOL"
),
int(TensorProto.FLOAT16): TensorDtypeMap(
np.dtype("float16"), int(TensorProto.UINT16), "TensorProto.FLOAT16"
),
# Native numpy does not support bfloat16 so now use float32 for bf16 values
# TODO ONNX should dirtectly use bfloat16 for bf16 values after numpy has supported bfloat16 type
int(TensorProto.BFLOAT16): TensorDtypeMap(
np.dtype("float32"), int(TensorProto.UINT16), "TensorProto.BFLOAT16"
),
int(TensorProto.DOUBLE): TensorDtypeMap(
np.dtype("float64"), int(TensorProto.DOUBLE), "TensorProto.DOUBLE"
),
int(TensorProto.COMPLEX64): TensorDtypeMap(
np.dtype("complex64"), int(TensorProto.FLOAT), "TensorProto.COMPLEX64"
),
int(TensorProto.COMPLEX128): TensorDtypeMap(
np.dtype("complex128"), int(TensorProto.DOUBLE), "TensorProto.COMPLEX128"
),
int(TensorProto.UINT32): TensorDtypeMap(
np.dtype("uint32"), int(TensorProto.UINT32), "TensorProto.UINT32"
),
int(TensorProto.UINT64): TensorDtypeMap(
np.dtype("uint64"), int(TensorProto.UINT64), "TensorProto.UINT64"
),
int(TensorProto.STRING): TensorDtypeMap(
np.dtype("object"), int(TensorProto.STRING), "TensorProto.STRING"
),
}
class DeprecatedWarningDict(dict): # type: ignore
def __init__(
self,
dictionary: Dict[int, Union[int, str, np.dtype]],
original_function: str,
future_function: str = "",
) -> None:
super().__init__(dictionary)
self._origin_function = original_function
self._future_function = future_function
def __eq__(self, other: object) -> bool:
if not isinstance(other, DeprecatedWarningDict):
return False
return (
self._origin_function == other._origin_function
and self._future_function == other._future_function
)
def __getitem__(self, key: Union[int, str, np.dtype]) -> Any:
if not self._future_function:
warnings.warn(
str(
f"`mapping.{self._origin_function}` is now deprecated and will be removed in the next release or so."
+ "To silence this warning, please simply use if-else statement to get the corresponding value."
),
DeprecationWarning,
stacklevel=2,
)
else:
warnings.warn(
str(
f"`mapping.{self._origin_function}` is now deprecated and will be removed in the next release or so."
+ "To silence this warning, please use `helper.{self._future_function}` instead."
),
DeprecationWarning,
stacklevel=2,
)
return super().__getitem__(key)
# This map is used for converting TensorProto values into numpy arrays
TENSOR_TYPE_TO_NP_TYPE = DeprecatedWarningDict(
{tensor_dtype: value.np_dtype for tensor_dtype, value in TENSOR_TYPE_MAP.items()},
"TENSOR_TYPE_TO_NP_TYPE",
"tensor_dtype_to_np_dtype",
)
# This is only used to get keys into STORAGE_TENSOR_TYPE_TO_FIELD.
# TODO(https://github.com/onnx/onnx/issues/4554): Move these variables into _mapping.py
TENSOR_TYPE_TO_STORAGE_TENSOR_TYPE = DeprecatedWarningDict(
{
tensor_dtype: value.storage_dtype
for tensor_dtype, value in TENSOR_TYPE_MAP.items()
},
"TENSOR_TYPE_TO_STORAGE_TENSOR_TYPE",
"tensor_dtype_to_storage_tensor_dtype",
)
# NP_TYPE_TO_TENSOR_TYPE will be eventually removed in the future
# and _NP_TYPE_TO_TENSOR_TYPE will only be used internally
_NP_TYPE_TO_TENSOR_TYPE = {
v: k for k, v in TENSOR_TYPE_TO_NP_TYPE.items() if k != TensorProto.BFLOAT16
}
# Currently native numpy does not support bfloat16 so TensorProto.BFLOAT16 is ignored for now
# Numpy float32 array is only reversed to TensorProto.FLOAT
NP_TYPE_TO_TENSOR_TYPE = DeprecatedWarningDict(
cast(Dict[int, Union[int, str, Any]], _NP_TYPE_TO_TENSOR_TYPE),
"NP_TYPE_TO_TENSOR_TYPE",
"np_dtype_to_tensor_dtype",
)
# STORAGE_TENSOR_TYPE_TO_FIELD will be eventually removed in the future
# and _STORAGE_TENSOR_TYPE_TO_FIELD will only be used internally
_STORAGE_TENSOR_TYPE_TO_FIELD = {
int(TensorProto.FLOAT): "float_data",
int(TensorProto.INT32): "int32_data",
int(TensorProto.INT64): "int64_data",
int(TensorProto.UINT16): "int32_data",
int(TensorProto.DOUBLE): "double_data",
int(TensorProto.COMPLEX64): "float_data",
int(TensorProto.COMPLEX128): "double_data",
int(TensorProto.UINT32): "uint64_data",
int(TensorProto.UINT64): "uint64_data",
int(TensorProto.STRING): "string_data",
int(TensorProto.BOOL): "int32_data",
}
STORAGE_TENSOR_TYPE_TO_FIELD = DeprecatedWarningDict(
cast(Dict[int, Union[int, str, Any]], _STORAGE_TENSOR_TYPE_TO_FIELD),
"STORAGE_TENSOR_TYPE_TO_FIELD",
)
# This map will be removed and there is no replacement for it
STORAGE_ELEMENT_TYPE_TO_FIELD = DeprecatedWarningDict(
{
int(SequenceProto.TENSOR): "tensor_values",
int(SequenceProto.SPARSE_TENSOR): "sparse_tensor_values",
int(SequenceProto.SEQUENCE): "sequence_values",
int(SequenceProto.MAP): "map_values",
int(OptionalProto.OPTIONAL): "optional_value",
},
"STORAGE_ELEMENT_TYPE_TO_FIELD",
)
# This map will be removed and there is no replacement for it
OPTIONAL_ELEMENT_TYPE_TO_FIELD = DeprecatedWarningDict(
{
int(OptionalProto.TENSOR): "tensor_value",
int(OptionalProto.SPARSE_TENSOR): "sparse_tensor_value",
int(OptionalProto.SEQUENCE): "sequence_value",
int(OptionalProto.MAP): "map_value",
int(OptionalProto.OPTIONAL): "optional_value",
},
"OPTIONAL_ELEMENT_TYPE_TO_FIELD",
)
|
{
"content_hash": "75fe3665b8b0c57ee932b6be3d3b0936",
"timestamp": "",
"source": "github",
"line_count": 185,
"max_line_length": 121,
"avg_line_length": 38.21081081081081,
"alnum_prop": 0.6582260574338662,
"repo_name": "onnx/onnx",
"id": "0c2aaa6c784d8b34fe404f85bb84df6252452171",
"size": "7069",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "onnx/mapping.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "546"
},
{
"name": "C",
"bytes": "2062"
},
{
"name": "C++",
"bytes": "2003844"
},
{
"name": "CMake",
"bytes": "32553"
},
{
"name": "Jupyter Notebook",
"bytes": "29310"
},
{
"name": "PowerShell",
"bytes": "1157"
},
{
"name": "Python",
"bytes": "2073844"
},
{
"name": "Shell",
"bytes": "2918"
}
],
"symlink_target": ""
}
|
__version__ = "0.3.0"
try:
__TRANSIT_SETUP__
except NameError:
__TRANSIT_SETUP__ = False
if not __TRANSIT_SETUP__:
__all__ = ["System", "Central", "Body", "SimpleSystem"]
from .transit import System, Central, Body
from .simple import SimpleSystem
|
{
"content_hash": "f747dad01236fbe22865b0df3ad5dabf",
"timestamp": "",
"source": "github",
"line_count": 11,
"max_line_length": 59,
"avg_line_length": 24.454545454545453,
"alnum_prop": 0.6245353159851301,
"repo_name": "dfm/transit",
"id": "81c456fad1bcee5fb2f4cd81e9b78a377dc03cea",
"size": "316",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "transit/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C++",
"bytes": "76028"
},
{
"name": "Python",
"bytes": "48162"
}
],
"symlink_target": ""
}
|
"""Utilties for testing video models."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensor2tensor.data_generators import video_generated # pylint: disable=unused-import
from tensor2tensor.layers import modalities
from tensor2tensor.utils import registry
import tensorflow as tf
def fill_hparams(hparams, in_frames, out_frames):
hparams.video_num_input_frames = in_frames
hparams.video_num_target_frames = out_frames
problem = registry.problem("video_stochastic_shapes10k")
p_hparams = problem.get_hparams(hparams)
hparams.problem = problem
hparams.problem_hparams = p_hparams
hparams.tiny_mode = True
hparams.reward_prediction = False
return hparams
def action_modalities(hparams):
hparams.problem_hparams.modality = {
"inputs": modalities.VideoModalityL2Raw(hparams, 256),
"input_action": modalities.SymbolModality(hparams, 5),
"targets": modalities.VideoModalityL2Raw(hparams, 256),
"target_action": modalities.SymbolModality(hparams, 5),
}
return hparams
def full_modalities(hparams):
"""Full modalities with actions and rewards."""
hparams.problem_hparams.modality = {
"inputs": modalities.VideoModalityL2Raw(hparams, 256),
"input_reward": modalities.SymbolModality(hparams, 3),
"input_action": modalities.SymbolModality(hparams, 5),
"targets": modalities.VideoModalityL2Raw(hparams, 256),
"target_reward": modalities.SymbolModality(hparams, 3),
"target_action": modalities.SymbolModality(hparams, 5),
}
hparams.force_full_predict = True
return hparams
def create_basic_features(in_frames, out_frames):
x = np.random.randint(0, 256, size=(8, in_frames, 64, 64, 3))
y = np.random.randint(0, 256, size=(8, out_frames, 64, 64, 3))
features = {
"inputs": tf.constant(x, dtype=tf.int32),
"targets": tf.constant(y, dtype=tf.int32),
}
return features
def create_action_features(in_frames, out_frames):
features = create_basic_features(in_frames, out_frames)
x = np.random.randint(0, 5, size=(8, in_frames, 1))
y = np.random.randint(0, 5, size=(8, out_frames, 1))
features["input_action"] = tf.constant(x, dtype=tf.int32)
features["target_action"] = tf.constant(y, dtype=tf.int32)
return features
def create_full_features(in_frames, out_frames):
features = create_basic_features(in_frames, out_frames)
x = np.random.randint(0, 5, size=(8, in_frames, 1))
y = np.random.randint(0, 5, size=(8, out_frames, 1))
features["input_reward"] = tf.constant(x, dtype=tf.int32)
features["target_reward"] = tf.constant(y, dtype=tf.int32)
return features
def get_tensor_shape(tensor):
return tuple([d.value for d in tensor.shape])
class BaseNextFrameTest(tf.test.TestCase):
"""Base helper class for next frame tests."""
def RunModel(self, model, hparams, features):
with tf.Session() as session:
model = model(hparams, tf.estimator.ModeKeys.TRAIN)
logits, _ = model(features)
session.run(tf.global_variables_initializer())
res = session.run(logits)
return res
def InferModel(self, model, hparams, features):
with tf.Session() as session:
model = model(hparams, tf.estimator.ModeKeys.PREDICT)
output = model.infer(features)
session.run(tf.global_variables_initializer())
res = session.run(output)
return res
def TestVideoModel(self,
in_frames,
out_frames,
hparams,
model,
expected_last_dim,
upsample_method="conv2d_transpose"):
hparams = fill_hparams(hparams, in_frames, out_frames)
hparams.upsample_method = upsample_method
features = create_basic_features(in_frames, out_frames)
output = self.RunModel(model, hparams, features)
targets = features["targets"]
expected_shape = get_tensor_shape(targets) + (expected_last_dim,)
self.assertEqual(output.shape, expected_shape)
def TestVideoModelInfer(self,
in_frames,
out_frames,
hparams,
model,
expected_last_dim,
upsample_method="conv2d_transpose"):
del expected_last_dim
hparams = fill_hparams(hparams, in_frames, out_frames)
hparams.upsample_method = upsample_method
features = create_basic_features(in_frames, out_frames)
output = self.InferModel(model, hparams, features)
self.assertTrue(isinstance(output, dict))
self.assertTrue("outputs" in output.keys())
self.assertTrue("scores" in output.keys())
self.assertTrue("targets" in output.keys())
expected_shape = get_tensor_shape(features["targets"])
self.assertEqual(output["targets"].shape, expected_shape)
def TestVideoModelWithActions(self,
in_frames,
out_frames,
hparams,
model,
expected_last_dim):
hparams = fill_hparams(hparams, in_frames, out_frames)
hparams = action_modalities(hparams)
hparams.reward_prediction = False
features = create_action_features(in_frames, out_frames)
output = self.RunModel(model, hparams, features)
targets = features["targets"]
expected_shape = get_tensor_shape(targets) + (expected_last_dim,)
self.assertEqual(output.shape, expected_shape)
def TestVideoModelWithActionsInfer(self,
in_frames,
out_frames,
hparams,
model,
expected_last_dim):
del expected_last_dim
hparams = fill_hparams(hparams, in_frames, out_frames)
hparams = action_modalities(hparams)
hparams.reward_prediction = False
features = create_action_features(in_frames, out_frames)
output = self.InferModel(model, hparams, features)
self.assertTrue(isinstance(output, dict))
self.assertTrue("outputs" in output.keys())
self.assertTrue("scores" in output.keys())
self.assertTrue("targets" in output.keys())
expected_shape = get_tensor_shape(features["targets"])
self.assertEqual(output["targets"].shape, expected_shape)
def TestVideoModelWithActionAndRewards(self,
in_frames,
out_frames,
hparams,
model,
expected_last_dim):
hparams = fill_hparams(hparams, in_frames, out_frames)
hparams = full_modalities(hparams)
hparams.reward_prediction = True
features = create_full_features(in_frames, out_frames)
res = self.RunModel(model, hparams, features)
output, targets = res["targets"], features["targets"]
expected_shape = get_tensor_shape(targets) + (expected_last_dim,)
self.assertEqual(output.shape, expected_shape)
output, targets = res["target_reward"], features["target_reward"]
# Assuming Symbol Modality
expected_shape = get_tensor_shape(targets)[:2] + (1, 1, 1, 1, 3,)
self.assertEqual(output.shape, expected_shape)
def TestVideoModelWithActionAndRewardsInfer(self,
in_frames,
out_frames,
hparams,
model,
expected_last_dim):
del expected_last_dim
hparams = fill_hparams(hparams, in_frames, out_frames)
hparams = full_modalities(hparams)
hparams.reward_prediction = True
features = create_full_features(in_frames, out_frames)
output = self.InferModel(model, hparams, features)
self.assertTrue(isinstance(output, dict))
self.assertTrue("outputs" in output.keys())
self.assertTrue("scores" in output.keys())
self.assertTrue("targets" in output.keys())
self.assertTrue("target_reward" in output.keys())
expected_shape = get_tensor_shape(features["targets"])
self.assertEqual(output["targets"].shape, expected_shape)
expected_shape = get_tensor_shape(features["target_reward"])[:2]
self.assertEqual(output["target_reward"].shape, expected_shape)
def TestOnVariousInputOutputSizes(
self, hparams, model, expected_last_dim, test_infer=True):
test_funcs = [self.TestVideoModel]
if test_infer:
test_funcs += [self.TestVideoModelInfer]
for test_func in test_funcs:
test_func(1, 1, hparams, model, expected_last_dim)
test_func(1, 6, hparams, model, expected_last_dim)
test_func(4, 1, hparams, model, expected_last_dim)
test_func(7, 5, hparams, model, expected_last_dim)
def TestWithActions(self, hparams, model, expected_last_dim, test_infer=True):
test_funcs = [self.TestVideoModelWithActions]
if test_infer:
test_funcs += [self.TestVideoModelWithActionsInfer]
for test_func in test_funcs:
test_func(1, 1, hparams, model, expected_last_dim)
test_func(1, 6, hparams, model, expected_last_dim)
test_func(4, 1, hparams, model, expected_last_dim)
test_func(7, 5, hparams, model, expected_last_dim)
def TestWithActionAndRewards(
self, hparams, model, expected_last_dim, test_infer=True):
test_funcs = [self.TestVideoModelWithActionAndRewards]
if test_infer:
test_funcs += [self.TestVideoModelWithActionAndRewardsInfer]
for test_func in test_funcs:
test_func(1, 1, hparams, model, expected_last_dim)
test_func(1, 6, hparams, model, expected_last_dim)
test_func(4, 1, hparams, model, expected_last_dim)
test_func(7, 5, hparams, model, expected_last_dim)
def TestOnVariousUpSampleLayers(self, hparams, model, expected_last_dim):
self.TestVideoModel(4, 1, hparams, model, expected_last_dim,
upsample_method="bilinear_upsample_conv")
self.TestVideoModel(4, 1, hparams, model, expected_last_dim,
upsample_method="nn_upsample_conv")
|
{
"content_hash": "89758bd99be1d660d2a345b328884391",
"timestamp": "",
"source": "github",
"line_count": 263,
"max_line_length": 90,
"avg_line_length": 39.10266159695818,
"alnum_prop": 0.6361338000777907,
"repo_name": "mlperf/training_results_v0.5",
"id": "7bda4b09355b7203dec9fdc5f02658c4929afb94",
"size": "10890",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "v0.5.0/google/research_v3.32/gnmt-tpuv3-32/code/gnmt/model/t2t/tensor2tensor/models/video/tests_utils.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "5720"
},
{
"name": "C++",
"bytes": "1288180"
},
{
"name": "CMake",
"bytes": "40880"
},
{
"name": "CSS",
"bytes": "32420"
},
{
"name": "Cuda",
"bytes": "1362093"
},
{
"name": "Dockerfile",
"bytes": "19488"
},
{
"name": "Go",
"bytes": "1088660"
},
{
"name": "HTML",
"bytes": "19756888"
},
{
"name": "Java",
"bytes": "45405"
},
{
"name": "JavaScript",
"bytes": "302838"
},
{
"name": "Jupyter Notebook",
"bytes": "9104667"
},
{
"name": "Lua",
"bytes": "4430"
},
{
"name": "Makefile",
"bytes": "3652"
},
{
"name": "Python",
"bytes": "31508548"
},
{
"name": "Scala",
"bytes": "106211"
},
{
"name": "Shell",
"bytes": "409745"
}
],
"symlink_target": ""
}
|
import pyqtgraph as pg
from acq4.devices.DAQGeneric.taskGUI import DAQGenericTaskGui
from acq4.util import Qt
Ui_Form = Qt.importTemplate('.TaskTemplate')
class CameraTaskGui(DAQGenericTaskGui):
def __init__(self, dev, taskRunner):
DAQGenericTaskGui.__init__(self, dev, taskRunner, ownUi=False) ## When initializing superclass, make sure it knows this class is creating the ui.
self.ui = Ui_Form()
self.ui.setupUi(self)
self.stateGroup = pg.WidgetGroup(self) ## create state group before DAQ creates its own interface
self.ui.horizSplitter.setStretchFactor(0, 0)
self.ui.horizSplitter.setStretchFactor(1, 1)
DAQGenericTaskGui.createChannelWidgets(self, self.ui.ctrlSplitter, self.ui.plotSplitter)
self.ui.plotSplitter.setStretchFactor(0, 10)
self.ui.plotSplitter.setStretchFactor(1, 1)
self.ui.plotSplitter.setStretchFactor(2, 1)
self.ui.fixedFrameEnabled.toggled.connect(self._setFixedFrameEnable)
self.ui.minFrames.setOpts(int=True, dec=True, step=0.1, minStep=1, compactHeight=False)
## plots should not be storing more than one trace at a time.
for p in self.plots.values():
p.plotItem.ctrl.maxTracesCheck.setChecked(True)
p.plotItem.ctrl.maxTracesSpin.setValue(1)
p.plotItem.ctrl.forgetTracesCheck.setChecked(True)
tModes = self.dev.listParams('triggerMode')[0]
for m in tModes:
self.ui.triggerModeCombo.addItem(m)
self.vLines = []
if 'trigger' in self.plots:
l = pg.InfiniteLine()
self.vLines.append(l)
self.plots['trigger'].addItem(l)
if 'exposure' in self.plots:
l = pg.InfiniteLine()
self.vLines.append(l)
self.plots['exposure'].addItem(l)
self.frameTicks = pg.VTickGroup()
self.frameTicks.setYRange([0.8, 1.0])
self.ui.imageView.sigTimeChanged.connect(self.timeChanged)
self.taskRunner.sigTaskPaused.connect(self.taskPaused)
def _setFixedFrameEnable(self, enable):
self.ui.minFrames.setEnabled(enable)
def timeChanged(self, i, t):
for l in self.vLines:
l.setValue(t)
def saveState(self):
s = self.currentState()
s['daqState'] = DAQGenericTaskGui.saveState(self)
return s
def restoreState(self, state):
self.stateGroup.setState(state)
if 'daqState' in state:
DAQGenericTaskGui.restoreState(self, state['daqState'])
def generateTask(self, params=None):
daqProt = DAQGenericTaskGui.generateTask(self, params)
if params is None:
params = {}
state = self.currentState()
task = {
'record': state['recordCheck'],
'triggerProtocol': state['triggerCheck'],
'params': {
'triggerMode': state['triggerModeCombo']
}
}
task['channels'] = daqProt
if state['releaseBetweenRadio']:
task['pushState'] = None
task['popState'] = None
if state['fixedFrameEnabled']:
task['minFrames'] = state['minFrames']
return task
def taskSequenceStarted(self):
DAQGenericTaskGui.taskSequenceStarted(self)
if self.ui.releaseAfterRadio.isChecked():
# For now, the task gui only changes triggerMode. If we allow
# other parameters to be changed from here, then they will have to be added
# to the list of parameters to push/pop
self.dev.pushState('cam_proto_state', params=['triggerMode'])
def taskFinished(self):
DAQGenericTaskGui.taskFinished(self)
if self.ui.releaseAfterRadio.isChecked():
self.dev.popState('cam_proto_state')
def taskPaused(self): ## If the task is paused, return the camera to its previous state until we start again
if self.ui.releaseAfterRadio.isChecked():
self.dev.popState('cam_proto_state')
self.dev.pushState('cam_proto_state')
def currentState(self):
return self.stateGroup.state()
def handleResult(self, result, params):
state = self.stateGroup.state()
if state['displayCheck']:
if result is None or len(result.frames()) == 0:
print("No images returned from camera task.")
self.ui.imageView.clear()
else:
frameTimes, precise = result.frameTimes()
if precise:
self.ui.imageView.setImage(result.asMetaArray(), xvals=frameTimes)
self.frameTicks.setXVals(frameTimes)
else:
self.ui.imageView.setImage(result.asMetaArray())
DAQGenericTaskGui.handleResult(self, result.daqResult(), params)
def quit(self):
self.ui.imageView.close()
DAQGenericTaskGui.quit(self)
|
{
"content_hash": "4c9c300cc19f780d5f7160b38cf67128",
"timestamp": "",
"source": "github",
"line_count": 129,
"max_line_length": 154,
"avg_line_length": 39.34108527131783,
"alnum_prop": 0.609064039408867,
"repo_name": "acq4/acq4",
"id": "a44b9b36de96d20c0e2def7fc120a005f637c721",
"size": "5099",
"binary": false,
"copies": "2",
"ref": "refs/heads/main",
"path": "acq4/devices/Camera/taskGUI.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "AMPL",
"bytes": "3037"
},
{
"name": "Batchfile",
"bytes": "335"
},
{
"name": "C",
"bytes": "1301111"
},
{
"name": "C++",
"bytes": "340035"
},
{
"name": "CSS",
"bytes": "716"
},
{
"name": "Inno Setup",
"bytes": "1606"
},
{
"name": "Makefile",
"bytes": "30"
},
{
"name": "Processing",
"bytes": "13403"
},
{
"name": "Python",
"bytes": "3503085"
},
{
"name": "Shell",
"bytes": "70"
}
],
"symlink_target": ""
}
|
from interfaces import *
from api import *
import declarative
__all__ = ['Schema']
class Schema(FancyValidator):
"""
A schema validates a dictionary of values, applying different
validators (be key) to the different values. If
allow_extra_fields=True, keys without validators will be allowed;
otherwise they will raise Invalid. If filter_extra_fields is
set to true, then extra fields are not passed back in the results.
Validators are associated with keys either with a class syntax, or
as keyword arguments (class syntax is usually easier). Something
like::
class MySchema(Schema):
name = Validators.PlainText()
phone = Validators.PhoneNumber()
These will not be available as actual instance variables, but will
be collected in a dictionary. To remove a validator in a subclass
that is present in a superclass, set it to None, like::
class MySubSchema(MySchema):
name = None
"""
# These validators will be applied before this schema:
pre_validators = []
# These validators will be applied after this schema:
chained_validators = []
# If true, then it is not an error when keys that aren't
# associated with a validator are present:
allow_extra_fields = False
# If true, then keys that aren't associated with a validator
# are removed:
filter_extra_fields = False
# If this is given, then any keys that aren't available but
# are expected will be replaced with this value (and then
# validated!) This does not override a present .if_missing
# attribute on validators:
if_key_missing = NoDefault
# If true, then missing keys will be missing in the result,
# if the validator doesn't have if_missing on it already:
ignore_key_missing = False
compound = True
fields = {}
order = []
messages = {
'notExpected': 'The input field %(name)s was not expected.',
'missingValue': "Missing value",
}
__mutableattributes__ = ('fields', 'chained_validators',
'pre_validators')
def __classinit__(cls, new_attrs):
FancyValidator.__classinit__(cls, new_attrs)
# Don't bother doing anything if this is the most parent
# Schema class (which is the only class with just
# FancyValidator as a superclass):
if cls.__bases__ == (FancyValidator,):
return cls
# Scan through the class variables we've defined *just*
# for this subclass, looking for validators (both classes
# and instances):
for key, value in new_attrs.items():
if key in ('pre_validators', 'chained_validators',
'view'):
continue
if is_validator(value):
cls.fields[key] = value
delattr(cls, key)
# This last case means we're overwriting a validator
# from a superclass:
elif cls.fields.has_key(key):
del cls.fields[key]
for name, value in cls.fields.items():
cls.add_field(name, value)
def __initargs__(self, new_attrs):
for key, value in new_attrs.items():
if key in ('pre_validators', 'chained_validators',
'view'):
continue
if is_validator(value):
self.fields[key] = value
delattr(self, key)
# This last case means we're overwriting a validator
# from a superclass:
elif self.fields.has_key(key):
del self.fields[key]
for name, value in self.fields.items():
self.add_field(name, value)
def _to_python(self, value_dict, state):
if not value_dict and self.if_empty is not NoDefault:
return self.if_empty
for validator in self.pre_validators:
value_dict = validator.to_python(value_dict, state)
new = {}
errors = {}
unused = self.fields.keys()
if state is not None:
previous_key = getattr(state, 'key', None)
previous_full_dict = getattr(state, 'full_dict', None)
state.full_dict = value_dict
try:
for name, value in value_dict.items():
try:
unused.remove(name)
except ValueError:
if not self.allow_extra_fields:
raise Invalid(
self.message('notExpected', state,
name=repr(name)),
value_dict, state)
else:
if not self.filter_extra_fields:
new[name] = value
continue
validator = self.fields[name]
try:
new[name] = validator.to_python(value, state)
except Invalid, e:
errors[name] = e
for name in unused:
validator = self.fields[name]
try:
if_missing = validator.if_missing
except AttributeError:
if_missing = NoDefault
if if_missing is NoDefault:
if self.ignore_key_missing:
continue
if self.if_key_missing is NoDefault:
errors[name] = Invalid(
self.message('missingValue', state),
None, state)
else:
try:
new[name] = validator.to_python(self.if_key_missing, state)
except Invalid, e:
errors[name] = e
else:
new[name] = validator.if_missing
if errors:
for validator in self.chained_validators:
if (not hasattr(validator, 'validate_partial')
or not getattr(validator, 'validate_partial_form', False)):
continue
try:
validator.validate_partial(value_dict, state)
except Invalid, e:
sub_errors = e.unpack_errors()
if not isinstance(sub_errors, dict):
# Can't do anything here
continue
merge_dicts(errors, sub_errors)
if errors:
raise Invalid(
format_compound_error(errors),
value_dict, state,
error_dict=errors)
for validator in self.chained_validators:
new = validator.to_python(new, state)
return new
finally:
if state is not None:
state.key = previous_key
state.full_dict = previous_full_dict
def _from_python(self, value_dict, state):
chained = self.chained_validators[:]
chained.reverse()
finished = []
for validator in chained:
__traceback_info__ = 'for_python chained_validator %s (finished %s)' % (validator, ', '.join(map(repr, finished)) or 'none')
finished.append(validator)
value_dict = validator.from_python(value_dict, state)
new = {}
errors = {}
unused = self.fields.keys()
if state is not None:
previous_key = getattr(state, 'key', None)
previous_full_dict = getattr(state, 'full_dict', None)
state.full_dict = value_dict
try:
__traceback_info__ = None
for name, value in value_dict.items():
__traceback_info__ = 'for_python in %s' % name
try:
unused.remove(name)
except ValueError:
if not self.allow_extra_fields:
raise Invalid(
self.message('notExpected', state,
name=repr(name)),
value_dict, state)
if not self.filter_extra_fields:
new[name] = value
else:
try:
new[name] = self.fields[name].from_python(value, state)
except Invalid, e:
errors[name] = e
del __traceback_info__
for name in unused:
validator = self.fields[name]
try:
new[name] = validator.from_python(None, state)
except Invalid, e:
errors[name] = e
if errors:
raise Invalid(
format_compound_error(errors),
value_dict, state,
error_dict=errors)
pre = self.pre_validators[:]
pre.reverse()
for validator in pre:
__traceback_info__ = 'for_python pre_validator %s' % validator
new = validator.from_python(new, state)
return new
finally:
if state is not None:
state.key = previous_key
state.full_dict = previous_full_dict
def add_chained_validator(self, cls, validator):
if self is not None:
if self.chained_validators is cls.chained_validators:
self.chained_validators = cls.chained_validators[:]
self.chained_validators.append(validator)
else:
cls.chained_validators.append(validator)
add_chained_validator = declarative.classinstancemethod(
add_chained_validator)
def add_field(self, cls, name, validator):
if self is not None:
if self.fields is cls.fields:
self.fields = cls.fields.copy()
self.fields[name] = validator
else:
cls.fields[name] = validator
add_field = declarative.classinstancemethod(add_field)
def add_pre_validator(self, cls, validator):
if self is not None:
if self.pre_validators is cls.pre_validators:
self.pre_validators = cls.pre_validators[:]
self.pre_validators.append(validator)
else:
cls.pre_validators.append(validator)
add_pre_validator = declarative.classinstancemethod(add_pre_validator)
def subvalidators(self):
result = []
result.extend(self.pre_validators)
result.extend(self.chained_validators)
result.extend(self.fields.values())
return result
def format_compound_error(v, indent=0):
if isinstance(v, Exception):
try:
return str(v)
except (UnicodeDecodeError, UnicodeEncodeError):
# There doesn't seem to be a better way to get a str()
# version if possible, and unicode() if necessary, because
# testing for the presence of a __unicode__ method isn't
# enough
return unicode(v)
elif isinstance(v, dict):
l = v.items()
l.sort()
return ('%s\n' % (' '*indent)).join(
["%s: %s" % (k, format_compound_error(value, indent=len(k)+2))
for k, value in l
if value is not None])
elif isinstance(v, list):
return ('%s\n' % (' '*indent)).join(
['%s' % (format_compound_error(value, indent=indent))
for value in v
if value is not None])
elif isinstance(v, basestring):
return v
else:
assert 0, "I didn't expect something like %s" % repr(v)
def merge_dicts(d1, d2):
for key in d2:
if key in d1:
d1[key] = merge_values(d1[key], d2[key])
else:
d1[key] = d2[key]
return d1
def merge_values(v1, v2):
if (isinstance(v1, (str, unicode))
and isinstance(v2, (str, unicode))):
return v1 + '\n' + v2
elif (isinstance(v1, (list, tuple))
and isinstance(v2, (list, tuple))):
return merge_lists(v1, v2)
elif isinstance(v1, dict) and isinstance(v2, dict):
return merge_dicts(v1, v2)
else:
# @@: Should we just ignore errors? Seems we do...
return v1
def merge_lists(l1, l2):
if len(l1) < len(l2):
l1 = l1 + [None]*(len(l2)-len(l1))
elif len(l2) < len(l1):
l2 = l2 + [None]*(len(l1)-len(l2))
result = []
for l1item, l2item in zip(l1, l2):
item = None
if l1item is None:
item = l2item
elif l2item is None:
item = l1item
else:
item = merge_values(l1item, l2item)
result.append(item)
return result
|
{
"content_hash": "3ffb3daecf27e42fbb1f32211128b209",
"timestamp": "",
"source": "github",
"line_count": 353,
"max_line_length": 136,
"avg_line_length": 36.54390934844193,
"alnum_prop": 0.5235658914728683,
"repo_name": "santisiri/popego",
"id": "b6affd1aeb5d35717216e638f1caadda2a6e2bc8",
"size": "12900",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "envs/ALPHA-POPEGO/lib/python2.5/site-packages/FormEncode-0.7.1-py2.5.egg/formencode/schema.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "1246"
},
{
"name": "C",
"bytes": "504141"
},
{
"name": "C++",
"bytes": "26125"
},
{
"name": "CSS",
"bytes": "342653"
},
{
"name": "FORTRAN",
"bytes": "4872"
},
{
"name": "GAP",
"bytes": "13267"
},
{
"name": "Genshi",
"bytes": "407"
},
{
"name": "Groff",
"bytes": "17116"
},
{
"name": "HTML",
"bytes": "383181"
},
{
"name": "JavaScript",
"bytes": "1090769"
},
{
"name": "Makefile",
"bytes": "2441"
},
{
"name": "Mako",
"bytes": "376944"
},
{
"name": "Python",
"bytes": "20895618"
},
{
"name": "Ruby",
"bytes": "3380"
},
{
"name": "Shell",
"bytes": "23581"
},
{
"name": "Smarty",
"bytes": "522"
},
{
"name": "TeX",
"bytes": "35712"
}
],
"symlink_target": ""
}
|
""" This module tries to retrieve as much platform-identifying data as
possible. It makes this information available via function APIs.
If called from the command line, it prints the platform
information concatenated as single string to stdout. The output
format is useable as part of a filename.
"""
__copyright__ = \
"""
Copyright (c) 1999-2000, Marc-Andre Lemburg; mailto:mal@lemburg.com
Copyright (c) 2000-2010, eGenix.com Software GmbH; mailto:info@egenix.com
Permission to use, copy, modify, and distribute this software and its
documentation for any purpose and without fee or royalty is hereby granted,
provided that the above copyright notice appear in all copies and that
both that copyright notice and this permission notice appear in
supporting documentation or portions thereof, including modifications,
that you make.
EGENIX.COM SOFTWARE GMBH DISCLAIMS ALL WARRANTIES WITH REGARD TO
THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND
FITNESS, IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL,
INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING
FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
WITH THE USE OR PERFORMANCE OF THIS SOFTWARE !
"""
__version__ = '1.0.7'
import sys
import string
import os
import re
### Globals & Constants
# Determine the platform's /dev/null device
try:
DEV_NULL = os.devnull
except AttributeError:
# os.devnull was added in Python 2.4, so emulate it for earlier
# Python versions
if sys.platform in ('dos', 'win32', 'win16', 'os2'):
# Use the old CP/M NUL as device name
DEV_NULL = 'NUL'
else:
# Standard Unix uses /dev/null
DEV_NULL = '/dev/null'
# Platform specific APIs
_libc_search = re.compile(
'(__libc_init)|(GLIBC_([0-9.]+))|(libc(_\\w+)?\\.so(?:\\.(\\d[0-9.]*))?)')
def libc_ver(
executable=sys.executable,
lib='',
version='',
chunksize=2048,
):
""" Tries to determine the libc version that the file executable
(which defaults to the Python interpreter) is linked against.
Returns a tuple of strings (lib,version) which default to the
given parameters in case the lookup fails.
Note that the function has intimate knowledge of how different
libc versions add symbols to the executable and thus is probably
only useable for executables compiled using gcc.
The file is read and scanned in chunks of chunksize bytes.
"""
if hasattr(os.path, 'realpath'):
# Python 2.2 introduced os.path.realpath(); it is used
# here to work around problems with Cygwin not being
# able to open symlinks for reading
executable = os.path.realpath(executable)
f = open(executable, 'rb')
binary = f.read(chunksize)
pos = 0
while 0x1:
m = _libc_search.search(binary, pos)
if not m:
binary = f.read(chunksize)
if not binary:
break
pos = 0
continue
(
libcinit,
glibc,
glibcversion,
so,
threads,
soversion,
) = m.groups()
if libcinit and not lib:
lib = 'libc'
elif glibc:
if lib != 'glibc':
lib = 'glibc'
version = glibcversion
elif glibcversion > version:
version = glibcversion
elif so:
if lib != 'glibc':
lib = 'libc'
if soversion and soversion > version:
version = soversion
if threads and version[-len(threads):] != threads:
version = version + threads
pos = m.end()
f.close()
return (lib, version)
def _dist_try_harder(
distname,
version,
id,
prefix='/mnt/CrawlDisk',
):
""" Tries some special tricks to get the distribution
information in case the default method fails.
Currently supports older SuSE Linux, Caldera OpenLinux and
Slackware Linux distributions.
"""
if os.path.exists(prefix + '/var/adm/inst-log/info'):
# SuSE Linux stores distribution information in that file
info = open(prefix + '/var/adm/inst-log/info').readlines()
distname = 'SuSE'
for line in info:
tv = string.split(line)
if len(tv) == 0x2:
(tag, value) = tv
else:
continue
if tag == 'MIN_DIST_VERSION':
version = string.strip(value)
elif tag == 'DIST_IDENT':
values = string.split(value, '-')
id = values[0x2]
return (distname, version, id)
if os.path.exists(prefix + '/etc/.installed'):
# Caldera OpenLinux has some infos in that file (thanks to Colin Kong)
info = open(prefix + '/etc/.installed').readlines()
for line in info:
pkg = string.split(line, '-')
if len(pkg) >= 0x2 and pkg[0] == 'OpenLinux':
# XXX does Caldera support non Intel platforms ? If yes,
# where can we find the needed id ?
return ('OpenLinux', pkg[0x1], id)
if os.path.isdir(prefix + '/usr/lib/setup'):
# Check for slackware version tag file (thanks to Greg Andruk)
verfiles = os.listdir(prefix + '/usr/lib/setup')
for n in range(len(verfiles) - 0x1, -0x1, -0x1):
if (verfiles[n])[:14] != 'slack-version-':
del verfiles[n]
if verfiles:
verfiles.sort()
distname = 'slackware'
version = (verfiles[-0x1])[14:]
return (distname, version, id)
return (distname, version, id)
_release_filename = re.compile(r'(\w+)[-_](release|version)')
_lsb_release_version = \
re.compile('(.+) release ([\\d.]+)[^(]*(?:\\((.+)\\))?')
_release_version = \
re.compile('([^0-9]+)(?: release )?([\\d.]+)[^(]*(?:\\((.+)\\))?')
# See also http://www.novell.com/coolsolutions/feature/11251.html
# and http://linuxmafia.com/faq/Admin/release-files.html
# and http://data.linux-ntfs.org/rpm/whichrpm
# and http://www.die.net/doc/linux/man/man1/lsb_release.1.html
_supported_dists = (
'SuSE',
'debian',
'fedora',
'redhat',
'centos',
'mandrake',
'mandriva',
'rocks',
'slackware',
'yellowdog',
'gentoo',
'UnitedLinux',
'turbolinux',
'Ubuntu',
)
def _parse_release_file(firstline):
# Default to empty 'version' and 'id' strings. Both defaults are used
# when 'firstline' is empty. 'id' defaults to empty when an id can not
# be deduced.
version = ''
id = ''
# Parse the first line
m = _lsb_release_version.match(firstline)
if m is not None:
# LSB format: "distro release x.x (codename)"
return tuple(m.groups())
# Pre-LSB format: "distro x.x (codename)"
m = _release_version.match(firstline)
if m is not None:
return tuple(m.groups())
# Unknown format... take the first two words
l = string.split(string.strip(firstline))
if l:
version = l[0]
if len(l) > 0x1:
id = l[0x1]
return ('', version, id)
_distributor_id_file_re = re.compile("(?:DISTRIB_ID\s*=)\s*(.*)", re.I)
_release_file_re = re.compile("(?:DISTRIB_RELEASE\s*=)\s*(.*)", re.I)
_codename_file_re = re.compile("(?:DISTRIB_CODENAME\s*=)\s*(.*)", re.I)
def linux_distribution(
distname='',
version='',
id='',
supported_dists=_supported_dists,
full_distribution_name=0x1,
prefix='/mnt/CrawlDisk',
):
""" Tries to determine the name of the Linux OS distribution name.
The function first looks for a distribution release file in
/etc and then reverts to _dist_try_harder() in case no
suitable files are found.
supported_dists may be given to define the set of Linux
distributions to look for. It defaults to a list of currently
supported Linux distributions identified by their release file
name.
If full_distribution_name is true (default), the full
distribution read from the OS is returned. Otherwise the short
name taken from supported_dists is used.
Returns a tuple (distname,version,id) which default to the
args given as parameters.
"""
# check for the LSB /etc/lsb-release file first, needed so
# that the distribution doesn't get identified as Debian.
try:
with open(prefix + '/etc/lsb-release', 'rU') as etclsbrel:
for line in etclsbrel:
m = _distributor_id_file_re.search(line)
if m:
_u_distname = m.group(0x1).strip()
m = _release_file_re.search(line)
if m:
_u_version = m.group(0x1).strip()
m = _codename_file_re.search(line)
if m:
_u_id = m.group(0x1).strip()
if _u_distname and _u_version:
return (_u_distname, _u_version, _u_id)
except (EnvironmentError, UnboundLocalError):
pass
try:
etc = os.listdir(prefix + '/etc')
except os.error:
# Probably not a Unix system
return (distname, version, id)
etc.sort()
for file in etc:
m = _release_filename.match(file)
if m is not None:
(_distname, dummy) = m.groups()
if _distname in supported_dists:
distname = _distname
break
else:
return _dist_try_harder(distname, version, id, prefix)
# Read the first line
f = open(prefix + '/etc/' + file, 'r')
firstline = f.readline()
f.close()
(_distname, _version, _id) = _parse_release_file(firstline)
if _distname and full_distribution_name:
distname = _distname
if _version:
version = _version
if _id:
id = _id
return (distname, version, id)
# To maintain backwards compatibility:
def dist(
distname='',
version='',
id='',
supported_dists=_supported_dists,
):
""" Tries to determine the name of the Linux OS distribution name.
The function first looks for a distribution release file in
/etc and then reverts to _dist_try_harder() in case no
suitable files are found.
Returns a tuple (distname,version,id) which default to the
args given as parameters.
"""
return linux_distribution(distname, version, id,
supported_dists=supported_dists,
full_distribution_name=0)
class _popen:
""" Fairly portable (alternative) popen implementation.
This is mostly needed in case os.popen() is not available, or
doesn't work as advertised, e.g. in Win9X GUI programs like
PythonWin or IDLE.
Writing to the pipe is currently not supported.
"""
tmpfile = ''
pipe = None
bufsize = None
mode = 'r'
def __init__(
self,
cmd,
mode='r',
bufsize=None,
):
if mode != 'r':
raise ValueError('popen()-emulation only supports read mode')
import tempfile
self.tmpfile = tmpfile = tempfile.mktemp()
os.system(cmd + ' > %s' % tmpfile)
self.pipe = open(tmpfile, 'rb')
self.bufsize = bufsize
self.mode = mode
def read(self):
return self.pipe.read()
def readlines(self):
if self.bufsize is not None:
return self.pipe.readlines()
def close(self, remove=os.unlink, error=os.error):
if self.pipe:
rc = self.pipe.close()
else:
rc = 255
if self.tmpfile:
try:
remove(self.tmpfile)
except error:
pass
return rc
# Alias
__del__ = close
def popen(cmd, mode='r', bufsize=None):
""" Portable popen() interface.
"""
# Find a working popen implementation preferring win32pipe.popen
# over os.popen over _popen
popen = None
if os.environ.get('OS', '') == 'Windows_NT':
# On NT win32pipe should work; on Win9x it hangs due to bugs
# in the MS C lib (see MS KnowledgeBase article Q150956)
try:
import win32pipe
except ImportError:
pass
else:
popen = win32pipe.popen
if popen is None:
if hasattr(os, 'popen'):
popen = os.popen
# Check whether it works... it doesn't in GUI programs
# on Windows platforms
if sys.platform == 'win32': # XXX Others too ?
try:
popen('')
except os.error:
popen = _popen
else:
popen = _popen
if bufsize is None:
return popen(cmd, mode)
else:
return popen(cmd, mode, bufsize)
def _norm_version(version, build=''):
""" Normalize the version and build strings and return a single
version string using the format major.minor.build (or patchlevel).
"""
l = string.split(version, '.')
if build:
l.append(build)
try:
ints = map(int, l)
except ValueError:
strings = l
else:
strings = map(str, ints)
version = string.join(strings[:3], '.')
return version
_ver_output = re.compile('(?:([\\w ]+) ([\\w.]+) .*\\[.* ([\\d.]+)\\])')
# Examples of VER command output:
#
# Windows 2000: Microsoft Windows 2000 [Version 5.00.2195]
# Windows XP: Microsoft Windows XP [Version 5.1.2600]
# Windows Vista: Microsoft Windows [Version 6.0.6002]
#
# Note that the "Version" string gets localized on different
# Windows versions.
def _syscmd_ver(
system='',
release='',
version='',
supported_platforms=('win32', 'win16', 'dos', 'os2'),
):
""" Tries to figure out the OS version used and returns
a tuple (system,release,version).
It uses the "ver" shell command for this which is known
to exists on Windows, DOS and OS/2. XXX Others too ?
In case this fails, the given parameters are used as
defaults.
"""
if sys.platform not in supported_platforms:
return (system, release, version)
# Try some common cmd strings
for cmd in ('ver', 'command /c ver', 'cmd /c ver'):
try:
pipe = popen(cmd)
info = pipe.read()
if pipe.close():
raise os.error('command failed')
except os.error as why:
# XXX How can I suppress shell errors from being written
# to stderr ?
# print 'Command %s failed: %s' % (cmd,why)
continue
except IOError:
# print 'Command %s failed: %s' % (cmd,why)
continue
else:
break
else:
return (system, release, version)
# Parse the output
info = string.strip(info)
m = _ver_output.match(info)
if m is not None:
(system, release, version) = m.groups()
# Strip trailing dots from version and release
if release[-0x1] == '.':
release = release[:-0x1]
if version[-0x1] == '.':
version = version[:-0x1]
# Normalize the version and build strings (eliminating additional
# zeros)
version = _norm_version(version)
return (system, release, version)
def _win32_getvalue(key, name, default=''):
""" Read a value for name from the registry key.
In case this fails, default is returned.
"""
try:
# Use win32api if available
from win32api import RegQueryValueEx
except ImportError:
# On Python 2.0 and later, emulate using _winreg
import _winreg
RegQueryValueEx = _winreg.QueryValueEx
try:
return RegQueryValueEx(key, name)
except:
return default
def win32_ver(
release='',
version='',
csd='',
ptype='',
):
""" Get additional version information from the Windows Registry
and return a tuple (version,csd,ptype) referring to version
number, CSD level (service pack), and OS type (multi/single
processor).
As a hint: ptype returns 'Uniprocessor Free' on single
processor NT machines and 'Multiprocessor Free' on multi
processor machines. The 'Free' refers to the OS version being
free of debugging code. It could also state 'Checked' which
means the OS version uses debugging code, i.e. code that
checks arguments, ranges, etc. (Thomas Heller).
Note: this function works best with Mark Hammond's win32
package installed, but also on Python 2.3 and later. It
obviously only runs on Win32 compatible platforms.
"""
# XXX Is there any way to find out the processor type on WinXX ?
# XXX Is win32 available on Windows CE ?
#
# Adapted from code posted by Karl Putland to comp.lang.python.
#
# The mappings between reg. values and release names can be found
# here:
# http://msdn.microsoft.com/library/en-us/sysinfo/base/osversioninfo_str.asp
# Import the needed APIs
try:
import win32api
from win32api import RegQueryValueEx, RegOpenKeyEx, \
RegCloseKey, GetVersionEx
from win32con import HKEY_LOCAL_MACHINE, VER_PLATFORM_WIN32_NT, \
VER_PLATFORM_WIN32_WINDOWS, VER_NT_WORKSTATION
except ImportError:
# Emulate the win32api module using Python APIs
try:
sys.getwindowsversion
except AttributeError:
# No emulation possible, so return the defaults...
return (release, version, csd, ptype)
else:
# Emulation using _winreg (added in Python 2.0) and
# sys.getwindowsversion() (added in Python 2.3)
import _winreg
GetVersionEx = sys.getwindowsversion
RegQueryValueEx = _winreg.QueryValueEx
RegOpenKeyEx = _winreg.OpenKeyEx
RegCloseKey = _winreg.CloseKey
HKEY_LOCAL_MACHINE = _winreg.HKEY_LOCAL_MACHINE
VER_PLATFORM_WIN32_WINDOWS = 0x1
VER_PLATFORM_WIN32_NT = 0x2
VER_NT_WORKSTATION = 0x1
VER_NT_SERVER = 3
REG_SZ = 0x1
# Find out the registry key and some general version infos
winver = GetVersionEx()
(maj, min, buildno, plat, csd) = winver
version = '%i.%i.%i' % (maj, min, buildno & 0xFFFF)
if hasattr(winver, 'service_pack'):
if winver.service_pack != '':
csd = 'SP%s' % winver.service_pack_major
else:
if csd[:13] == 'Service Pack ':
csd = 'SP' + csd[13:]
if plat == VER_PLATFORM_WIN32_WINDOWS:
regkey = 'SOFTWARE\\Microsoft\\Windows\\CurrentVersion'
# Try to guess the release name
if maj == 4:
if min == 0:
release = '95'
elif min == 0xa:
release = '98'
elif min == 90:
release = 'Me'
else:
release = 'postMe'
elif maj == 5:
release = '2000'
elif plat == VER_PLATFORM_WIN32_NT:
regkey = 'SOFTWARE\\Microsoft\\Windows NT\\CurrentVersion'
if maj <= 4:
release = 'NT'
elif maj == 5:
if min == 0:
release = '2000'
elif min == 0x1:
release = 'XP'
elif min == 0x2:
release = '2003Server'
else:
release = 'post2003'
elif maj == 6:
if hasattr(winver, 'product_type'):
product_type = winver.product_type
else:
product_type = VER_NT_WORKSTATION
# Without an OSVERSIONINFOEX capable sys.getwindowsversion(),
# or help from the registry, we cannot properly identify
# non-workstation versions.
try:
key = RegOpenKeyEx(HKEY_LOCAL_MACHINE, regkey)
(name, type) = RegQueryValueEx(key, 'ProductName')
# Discard any type that isn't REG_SZ
if type == REG_SZ and name.find('Server') != -0x1:
product_type = VER_NT_SERVER
except WindowsError:
# Use default of VER_NT_WORKSTATION
pass
if min == 0:
if product_type == VER_NT_WORKSTATION:
release = 'Vista'
else:
release = '2008Server'
elif min == 0x1:
if product_type == VER_NT_WORKSTATION:
release = '7'
else:
release = '2008ServerR2'
elif min == 0x2:
if product_type == VER_NT_WORKSTATION:
release = '8'
else:
release = '2012Server'
else:
release = 'post2012Server'
else:
if not release:
# E.g. Win3.1 with win32s
release = '%i.%i' % (maj, min)
return (release, version, csd, ptype)
# Open the registry key
try:
keyCurVer = RegOpenKeyEx(HKEY_LOCAL_MACHINE, regkey)
# Get a value to make sure the key exists...
RegQueryValueEx(keyCurVer, 'SystemRoot')
except:
return (release, version, csd, ptype)
# Parse values
# subversion = _win32_getvalue(keyCurVer,
# 'SubVersionNumber',
# ('',1))[0]
# if subversion:
# release = release + subversion # 95a, 95b, etc.
build = _win32_getvalue(keyCurVer, 'CurrentBuildNumber', ('',
0x1))[0]
ptype = _win32_getvalue(keyCurVer, 'CurrentType', (ptype, 0x1))[0]
# Normalize version
version = _norm_version(version, build)
# Close key
RegCloseKey(keyCurVer)
return (release, version, csd, ptype)
def _mac_ver_lookup(selectors, default=None):
from gestalt import gestalt
import MacOS
l = []
append = l.append
for selector in selectors:
try:
append(gestalt(selector))
except (RuntimeError, MacOS.Error):
append(default)
return l
def _bcd2str(bcd):
return hex(bcd)[0x2:]
def _mac_ver_gestalt():
"""
Thanks to Mark R. Levinson for mailing documentation links and
code examples for this function. Documentation for the
gestalt() API is available online at:
http://www.rgaros.nl/gestalt/
"""
# Check whether the version info module is available
try:
import gestalt
import MacOS
except ImportError:
return None
# Get the infos
(sysv, sysa) = _mac_ver_lookup(('sysv', 'sysa'))
# Decode the infos
if sysv:
major = (sysv & 0xFF00) >> 8
minor = (sysv & 0x00F0) >> 4
patch = sysv & 0x000F
if (major, minor) >= (0xa, 4):
# the 'sysv' gestald cannot return patchlevels
# higher than 9. Apple introduced 3 new
# gestalt codes in 10.4 to deal with this
# issue (needed because patch levels can
# run higher than 9, such as 10.4.11)
(major, minor, patch) = _mac_ver_lookup(('sys1', 'sys2',
'sys3'))
release = '%i.%i.%i' % (major, minor, patch)
else:
release = '%s.%i.%i' % (_bcd2str(major), minor, patch)
if sysa:
machine = {0x1: '68k', 0x2: 'PowerPC', 0xa: 'i386'}.get(sysa, ''
)
versioninfo = ('', '', '')
return (release, versioninfo, machine)
def _mac_ver_xml():
fn = '/System/Library/CoreServices/SystemVersion.plist'
if not os.path.exists(fn):
return None
try:
import plistlib
except ImportError:
return None
pl = plistlib.readPlist(fn)
release = pl['ProductVersion']
versioninfo = ('', '', '')
machine = os.uname()[4]
if machine in ('ppc', 'Power Macintosh'):
# for compatibility with the gestalt based code
machine = 'PowerPC'
return (release, versioninfo, machine)
def mac_ver(release='', versioninfo=('', '', ''), machine=''):
""" Get MacOS version information and return it as tuple (release,
versioninfo, machine) with versioninfo being a tuple (version,
dev_stage, non_release_version).
Entries which cannot be determined are set to the parameter values
which default to ''. All tuple entries are strings.
"""
# First try reading the information from an XML file which should
# always be present
info = _mac_ver_xml()
if info is not None:
return info
# If that doesn't work for some reason fall back to reading the
# information using gestalt calls.
info = _mac_ver_gestalt()
if info is not None:
return info
# If that also doesn't work return the default values
return (release, versioninfo, machine)
def _java_getprop(name, default):
from java.lang import System
try:
value = System.getProperty(name)
if value is None:
return default
return value
except AttributeError:
return default
def java_ver(
release='',
vendor='',
vminfo=('', '', ''),
osinfo=('', '', ''),
):
""" Version interface for Jython.
Returns a tuple (release,vendor,vminfo,osinfo) with vminfo being
a tuple (vm_name,vm_release,vm_vendor) and osinfo being a
tuple (os_name,os_version,os_arch).
Values which cannot be determined are set to the defaults
given as parameters (which all default to '').
"""
# Import the needed APIs
try:
import java.lang
except ImportError:
return (release, vendor, vminfo, osinfo)
vendor = _java_getprop('java.vendor', vendor)
release = _java_getprop('java.version', release)
(vm_name, vm_release, vm_vendor) = vminfo
vm_name = _java_getprop('java.vm.name', vm_name)
vm_vendor = _java_getprop('java.vm.vendor', vm_vendor)
vm_release = _java_getprop('java.vm.version', vm_release)
vminfo = (vm_name, vm_release, vm_vendor)
(os_name, os_version, os_arch) = osinfo
os_arch = _java_getprop('java.os.arch', os_arch)
os_name = _java_getprop('java.os.name', os_name)
os_version = _java_getprop('java.os.version', os_version)
osinfo = (os_name, os_version, os_arch)
return (release, vendor, vminfo, osinfo)
# System name aliasing
def system_alias(system, release, version):
""" Returns (system,release,version) aliased to common
marketing names used for some systems.
It also does some reordering of the information in some cases
where it would otherwise cause confusion.
"""
if system == 'Rhapsody':
# Apple's BSD derivative
# XXX How can we determine the marketing release number ?
return ('MacOS X Server', system + release, version)
elif system == 'SunOS':
# Sun's OS
if release < '5':
# These releases use the old name SunOS
return (system, release, version)
# Modify release (marketing release = SunOS release - 3)
l = string.split(release, '.')
if l:
try:
major = int(l[0])
except ValueError:
pass
else:
major = major - 3
l[0] = str(major)
release = string.join(l, '.')
if release < '6':
system = 'Solaris'
else:
# XXX Whatever the new SunOS marketing name is...
system = 'Solaris'
elif system == 'IRIX64':
# IRIX reports IRIX64 on platforms with 64-bit support; yet it
# is really a version and not a different platform, since 32-bit
# apps are also supported..
system = 'IRIX'
if version:
version = version + ' (64bit)'
else:
version = '64bit'
elif system in ('win32', 'win16'):
# In case one of the other tricks
system = 'Windows'
return (system, release, version)
# Various internal helpers
def _platform(*args):
""" Helper to format the platform string in a filename
compatible format e.g. "system-version-machine".
"""
# Format the platform string
platform = string.join(map(string.strip, filter(len, args)), '-')
# Cleanup some possible filename obstacles...
replace = string.replace
platform = replace(platform, ' ', '_')
platform = replace(platform, '/', '-')
platform = replace(platform, '\\', '-')
platform = replace(platform, ':', '-')
platform = replace(platform, ';', '-')
platform = replace(platform, '"', '-')
platform = replace(platform, '(', '-')
platform = replace(platform, ')', '-')
# No need to report 'unknown' information...
platform = replace(platform, 'unknown', '')
# Fold '--'s and remove trailing '-'
while 0x1:
cleaned = replace(platform, '--', '-')
if cleaned == platform:
break
platform = cleaned
while platform[-0x1] == '-':
platform = platform[:-0x1]
return platform
def _node(default=''):
""" Helper to determine the node name of this machine.
"""
try:
import socket
except ImportError:
# No sockets...
return default
try:
return socket.gethostname()
except socket.error:
# Still not working...
return default
# os.path.abspath is new in Python 1.5.2:
if not hasattr(os.path, 'abspath'):
def _abspath(
path,
isabs=os.path.isabs,
join=os.path.join,
getcwd=os.getcwd,
normpath=os.path.normpath,
):
if not isabs(path):
path = join(getcwd(), path)
return normpath(path)
else:
_abspath = os.path.abspath
def _follow_symlinks(filepath):
""" In case filepath is a symlink, follow it until a
real file is reached.
"""
filepath = _abspath(filepath)
while os.path.islink(filepath):
filepath = \
os.path.normpath(os.path.join(os.path.dirname(filepath),
os.readlink(filepath)))
return filepath
def _syscmd_uname(option, default=''):
""" Interface to the system's uname command.
"""
if sys.platform in ('dos', 'win32', 'win16', 'os2'):
# XXX Others too ?
return default
try:
f = os.popen('uname %s 2> %s' % (option, DEV_NULL))
except (AttributeError, os.error):
return default
output = string.strip(f.read())
rc = f.close()
if not output or rc:
return default
else:
return output
def _syscmd_file(target, default=''):
""" Interface to the system's file command.
The function uses the -b option of the file command to have it
ommit the filename in its output and if possible the -L option
to have the command follow symlinks. It returns default in
case the command should fail.
"""
# We do the import here to avoid a bootstrap issue.
# See c73b90b6dadd changeset.
#
# [..]
# ranlib libpython2.7.a
# gcc -o python \
# Modules/python.o \
# libpython2.7.a -lsocket -lnsl -ldl -lm
# Traceback (most recent call last):
# File "./setup.py", line 8, in <module>
# from platform import machine as platform_machine
# File "[..]/build/Lib/platform.py", line 116, in <module>
# import sys,string,os,re,subprocess
# File "[..]/build/Lib/subprocess.py", line 429, in <module>
# import select
# ImportError: No module named select
import subprocess
if sys.platform in ('dos', 'win32', 'win16', 'os2'):
# XXX Others too ?
return default
target = _follow_symlinks(target)
try:
proc = subprocess.Popen(['file', target],
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
except (AttributeError, os.error):
return default
output = proc.communicate()[0]
rc = proc.wait()
if not output or rc:
return default
else:
return output
# Information about the used architecture
# Default values for architecture; non-empty strings override the
# defaults given as parameters
_default_architecture = {
'win32': (
'', 'WindowsPE'), 'win16': (
'', 'Windows'), 'dos': (
'', 'MSDOS')}
_architecture_split = re.compile(r'[\s,]').split
def architecture(executable=sys.executable, bits='', linkage=''):
""" Queries the given executable (defaults to the Python interpreter
binary) for various architecture information.
Returns a tuple (bits,linkage) which contains information about
the bit architecture and the linkage format used for the
executable. Both values are returned as strings.
Values that cannot be determined are returned as given by the
parameter presets. If bits is given as '', the sizeof(pointer)
(or sizeof(long) on Python version < 1.5.2) is used as
indicator for the supported pointer size.
The function relies on the system's "file" command to do the
actual work. This is available on most if not all Unix
platforms. On some non-Unix platforms where the "file" command
does not exist and the executable is set to the Python interpreter
binary defaults from _default_architecture are used.
"""
# Use the sizeof(pointer) as default number of bits if nothing
# else is given as default.
if not bits:
import struct
try:
size = struct.calcsize('P')
except struct.error:
# Older installations can only query longs
size = struct.calcsize('l')
bits = str(size * 8) + 'bit'
# Get data from the 'file' system command
if executable:
output = _syscmd_file(executable, '')
else:
output = ''
if not output and executable == sys.executable:
# "file" command did not return anything; we'll try to provide
# some sensible defaults then...
if sys.platform in _default_architecture:
(b, l) = _default_architecture[sys.platform]
if b:
bits = b
if l:
linkage = l
return (bits, linkage)
# Split the output into a list of strings omitting the filename
fileout = _architecture_split(output)[0x1:]
if 'executable' not in fileout:
# Format not supported
return (bits, linkage)
# Bits
if '32-bit' in fileout:
bits = '32bit'
elif 'N32' in fileout:
# On Irix only
bits = 'n32bit'
elif '64-bit' in fileout:
bits = '64bit'
# Linkage
if 'ELF' in fileout:
linkage = 'ELF'
elif 'PE' in fileout:
# E.g. Windows uses this format
if 'Windows' in fileout:
linkage = 'WindowsPE'
else:
linkage = 'PE'
elif 'COFF' in fileout:
linkage = 'COFF'
elif 'MS-DOS' in fileout:
linkage = 'MSDOS'
else:
# XXX the A.OUT format also falls under this class...
pass
return (bits, linkage)
# Portable uname() interface
_uname_cache = None
def uname(prefix='/mnt/CrawlDisk'):
""" Fairly portable uname interface. Returns a tuple
of strings (system,node,release,version,machine,processor)
identifying the underlying platform.
Note that unlike the os.uname function this also returns
possible processor information as an additional tuple entry.
Entries which cannot be determined are set to ''.
Trivial implementation for out-of-band crawling. Does not rely on OS functions.
"""
global _uname_cache
# os attributes: ["boottime", "osdistro", "ipaddr", "osname", "osplatform", "osrelease", "ostype", "osversion"]
# os "linux" --> platform.system().lower()
# {"boottime":1394049039.0, --> psutil.BOOT_TIME
# "ipaddr":"10.154.163.164", --> get_host_ipaddr()
# "osdistro":"Ubuntu", --> platform_outofband.linux_distribution(prefix=mountpoint)[0],
# "osname":"Linux-3.11.0-12-generic-i686-with-Ubuntu-13.10-saucy", --> platform_outofband.platform(),
# "osplatform":"i686", --> platform_outofband.machine(prefix=mountpoint),
# "osrelease":"3.11.0-12-generic", --> platform_outofband.release(prefix=mountpoint),
# "ostype":"linux", --> platform_outofband.system(prefix=mountpoint).lower(),
# "osversion":"#19-Ubuntu SMP Wed Oct 9 16:12:00 UTC 2013"} --> platform_outofband.version(prefix=mountpoint)
# Some of the release info is in: /etc/issue /etc/os-release /etc/*release
# See below (unused) distrofind() method for files to check for different distros
# For now abandoning a custom impln as linux_distribution() checks some of
# the same
# defaults:
system = 'unknown'
node = 'unsupported'
release = 'unknown'
version = 'unknown'
machine = 'unknown'
processor = 'unsupported'
# Guesstimate Linux/Win/Other from linux_distribution info (system)
if linux_distribution(prefix=prefix)[0] in _supported_dists:
system = 'Linux'
elif os.path.exists(prefix + '/etc'):
system = 'Linux'
elif os.path.exists(prefix + '/Windows'):
system = 'Windows'
# Guesstimate kernel version from kernel image, i.e., 3.11.0-12-generic
# (release)
try:
# WARNING: If there are more than one kernel image in /boot, will just
# pick the first one
kernel_images = [f for f in os.listdir(prefix + '/boot')
if re.match(r'.*linu.*', f)]
if len(kernel_images) > 0:
# parse sth like: (vmlinuz)-(3.11.0-12-generic)
match = re.search('(.*linu.*?)-(.*)', kernel_images[0])
if match:
release = match.group(0x2)
except (IOError, OSError) as e:
# /boot doesn't even exist
pass
# Get distro version, i.e., 13.10 (version)
if len(linux_distribution(prefix=prefix)[0x1]) > 0:
version = linux_distribution(prefix=prefix)[0x1]
# Get machine type, i.e., i686 or x86_64 (machine)
machine = 'unknown'
# If any unknowns still exist, replace them with ''s, which are more
# portable
if system == 'unknown':
system = ''
if node == 'unknown':
node = ''
if release == 'unknown':
release = ''
if version == 'unknown':
version = ''
if machine == 'unknown':
machine = ''
if processor == 'unknown':
processor = ''
_uname_cache = (
system,
node,
release,
version,
machine,
processor,
)
return _uname_cache
# Unused method, left for reference on different release files for
# different distros
def distrofind():
DISTROS = {
'Ubuntu': './etc/lsb-release',
'Debian': './etc/debian_version',
'RedHat': './etc/redhat-release',
'SUSE': './etc/SUSE-release',
'Fedora': './etc/fedora-release',
'Gentoo': './etc/gentoo-release',
'Slackware': './etc/slackware-version',
'Mandriva': './etc/mandriva-release',
'Mandrake': './etc/mandrake-release',
'YellowDog': './etc/yellowdog-release',
'SUN JDS': './etc/sun-release',
'UnitedLinux': './etc/UnitedLinux-release',
}
for (distro, distrofile) in DISTROS.items():
try:
distfile = open('%s' % distrofile, 'r')
content = distfile.readlines()
distfile.close()
return distro
except IOError:
issueFile = open('/etc/issue', 'r')
issue = issueFile.readline()
issueFile.close()
qname = issue.split()
issue = qname[0]
return issue
# Direct interfaces to some of the uname() return values
def system(prefix='/mnt/CrawlDisk'):
""" Returns the system/OS name, e.g. 'Linux', 'Windows' or 'Java'.
An empty string is returned if the value cannot be determined.
"""
return uname(prefix=prefix)[0]
def node(prefix='/mnt/CrawlDisk'):
""" Returns the computer's network name (which may not be fully
qualified)
An empty string is returned if the value cannot be determined.
"""
return uname(prefix=prefix)[0x1]
def release(prefix='/mnt/CrawlDisk'):
""" Returns the system's release, e.g. '2.2.0' or 'NT'
An empty string is returned if the value cannot be determined.
"""
return uname(prefix=prefix)[0x2]
def version(prefix='/mnt/CrawlDisk'):
""" Returns the system's release version, e.g. '#3 on degas'
An empty string is returned if the value cannot be determined.
"""
return uname(prefix=prefix)[3]
def machine(prefix='/mnt/CrawlDisk'):
""" Returns the machine type, e.g. 'i386'
An empty string is returned if the value cannot be determined.
"""
return uname(prefix=prefix)[4]
def processor(prefix='/mnt/CrawlDisk'):
""" Returns the (true) processor name, e.g. 'amdk6'
An empty string is returned if the value cannot be
determined. Note that many platforms do not provide this
information or simply return the same value as for machine(),
e.g. NetBSD does this.
"""
return uname(prefix=prefix)[5]
# Various APIs for extracting information from sys.version
_sys_version_parser = re.compile(
'([\\w.+]+)\\s*\\(#?([^,]+),\\s*([\\w ]+),\\s*([\\w :]+)\\)\\s*\\[([^\\]]+)\\]?')
_ironpython_sys_version_parser = re.compile(
'IronPython\\s*([\\d\\.]+)(?: \\(([\\d\\.]+)\\))? on (.NET [\\d\\.]+)')
_pypy_sys_version_parser = re.compile(
'([\\w.+]+)\\s*\\(#?([^,]+),\\s*([\\w ]+),\\s*([\\w :]+)\\)\\s*\\[PyPy [^\\]]+\\]?')
_sys_version_cache = {}
def _sys_version(sys_version=None):
""" Returns a parsed version of Python's sys.version as tuple
(name, version, branch, revision, buildno, builddate, compiler)
referring to the Python implementation name, version, branch,
revision, build number, build date/time as string and the compiler
identification string.
Note that unlike the Python sys.version, the returned value
for the Python version will always include the patchlevel (it
defaults to '.0').
The function returns empty strings for tuple entries that
cannot be determined.
sys_version may be given to parse an alternative version
string, e.g. if the version was read from a different Python
interpreter.
"""
# Get the Python version
if sys_version is None:
sys_version = sys.version
# Try the cache first
result = _sys_version_cache.get(sys_version, None)
if result is not None:
return result
# Parse it
if sys_version[:0xa] == 'IronPython':
# IronPython
name = 'IronPython'
match = _ironpython_sys_version_parser.match(sys_version)
if match is None:
raise ValueError('failed to parse IronPython sys.version: %s'
% repr(sys_version))
(version, alt_version, compiler) = match.groups()
buildno = ''
builddate = ''
elif sys.platform[:4] == 'java':
# Jython
name = 'Jython'
match = _sys_version_parser.match(sys_version)
if match is None:
raise ValueError('failed to parse Jython sys.version: %s'
% repr(sys_version))
(version, buildno, builddate, buildtime, _) = match.groups()
compiler = sys.platform
elif 'PyPy' in sys_version:
# PyPy
name = 'PyPy'
match = _pypy_sys_version_parser.match(sys_version)
if match is None:
raise ValueError('failed to parse PyPy sys.version: %s'
% repr(sys_version))
(version, buildno, builddate, buildtime) = match.groups()
compiler = ''
else:
# CPython
match = _sys_version_parser.match(sys_version)
if match is None:
raise ValueError('failed to parse CPython sys.version: %s'
% repr(sys_version))
(version, buildno, builddate, buildtime, compiler) = \
match.groups()
name = 'CPython'
builddate = builddate + ' ' + buildtime
if hasattr(sys, 'subversion'):
# sys.subversion was added in Python 2.5
(_, branch, revision) = sys.subversion
else:
branch = ''
revision = ''
# Add the patchlevel version if missing
l = string.split(version, '.')
if len(l) == 0x2:
l.append('0')
version = string.join(l, '.')
# Build and cache the result
result = (
name,
version,
branch,
revision,
buildno,
builddate,
compiler,
)
_sys_version_cache[sys_version] = result
return result
def python_implementation():
""" Returns a string identifying the Python implementation.
Currently, the following implementations are identified:
'CPython' (C implementation of Python),
'IronPython' (.NET implementation of Python),
'Jython' (Java implementation of Python),
'PyPy' (Python implementation of Python).
"""
return _sys_version()[0]
def python_version():
""" Returns the Python version as string 'major.minor.patchlevel'
Note that unlike the Python sys.version, the returned value
will always include the patchlevel (it defaults to 0).
"""
return _sys_version()[0x1]
def python_version_tuple():
""" Returns the Python version as tuple (major, minor, patchlevel)
of strings.
Note that unlike the Python sys.version, the returned value
will always include the patchlevel (it defaults to 0).
"""
return tuple(string.split(_sys_version()[0x1], '.'))
def python_branch():
""" Returns a string identifying the Python implementation
branch.
For CPython this is the Subversion branch from which the
Python binary was built.
If not available, an empty string is returned.
"""
return _sys_version()[0x2]
def python_revision():
""" Returns a string identifying the Python implementation
revision.
For CPython this is the Subversion revision from which the
Python binary was built.
If not available, an empty string is returned.
"""
return _sys_version()[3]
def python_build():
""" Returns a tuple (buildno, builddate) stating the Python
build number and date as strings.
"""
return _sys_version()[4:6]
def python_compiler():
""" Returns a string identifying the compiler used for compiling
Python.
"""
return _sys_version()[6]
# The Opus Magnum of platform strings :-)
_platform_cache = {}
def platform(aliased=0, terse=0, prefix='/mnt/CrawlDisk'):
""" Returns a single string identifying the underlying platform
with as much useful information as possible (but no more :).
The output is intended to be human readable rather than
machine parseable. It may look different on different
platforms and this is intended.
If "aliased" is true, the function will use aliases for
various platforms that report system names which differ from
their common names, e.g. SunOS will be reported as
Solaris. The system_alias() function is used to implement
this.
Setting terse to true causes the function to return only the
absolute minimum information needed to identify the platform.
VERY ROUGHLY IMITATED IN OUT-OF-BAND IMPLEMENTATION!
"""
# imitate sth like:
# "(Linux-3.11.0-12-generic)-(i686)-with-(Ubuntu-13.10-saucy)"
distro_details = '-'.join(linux_distribution(prefix=prefix))
if len(distro_details) < 4:
distro_details = '-'.join(['unknown', 'unknown', 'unknown'])
system_info = system(prefix=prefix)
if len(system_info) < 0x1:
system_info = 'unknown'
release_info = release(prefix=prefix)
if len(release_info) < 0x1:
release_info = 'unknown'
machine_info = machine(prefix=prefix)
if len(machine_info) < 0x1:
machine_info = 'unknown'
# imitate sth like:
# "(Linux)-(3.11.0-12-generic)-(i686)-with-(Ubuntu-13.10-saucy)"
return system_info + '-' + release_info + '-' + machine_info \
+ '-with-' + distro_details
# Command line interface
if __name__ == '__main__':
# Default is to print the aliased verbose platform string
terse = 'terse' in sys.argv or '--terse' in sys.argv
aliased = not 'nonaliased' in sys.argv and not '--nonaliased' \
in sys.argv
print platform(aliased, terse)
sys.exit(0)
|
{
"content_hash": "c39c1054e4e79e80f46eb38e4a41695b",
"timestamp": "",
"source": "github",
"line_count": 1784,
"max_line_length": 128,
"avg_line_length": 27.931053811659194,
"alnum_prop": 0.5781171606895583,
"repo_name": "ricarkol/agentless-system-crawler",
"id": "07c87318cd54c39a90c17475303286482435dedd",
"size": "54036",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "crawler/platform_outofband.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Makefile",
"bytes": "411"
},
{
"name": "Python",
"bytes": "306342"
},
{
"name": "Shell",
"bytes": "98687"
}
],
"symlink_target": ""
}
|
from modeltranslation.translator import translator, TranslationOptions
from geotrek.common.models import Theme
class ThemeTO(TranslationOptions):
fields = ('label', )
translator.register(Theme, ThemeTO)
|
{
"content_hash": "d7c092c065ea60a2464cd53ffb30f0cd",
"timestamp": "",
"source": "github",
"line_count": 9,
"max_line_length": 70,
"avg_line_length": 23.444444444444443,
"alnum_prop": 0.8009478672985783,
"repo_name": "johan--/Geotrek",
"id": "009ce74a152b5b475af5ba43838c8c3aa7656b38",
"size": "211",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "geotrek/common/translation.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "CSS",
"bytes": "12725"
},
{
"name": "HTML",
"bytes": "94850"
},
{
"name": "JavaScript",
"bytes": "184551"
},
{
"name": "Makefile",
"bytes": "4038"
},
{
"name": "PLpgSQL",
"bytes": "73761"
},
{
"name": "Python",
"bytes": "2309827"
},
{
"name": "Shell",
"bytes": "16247"
}
],
"symlink_target": ""
}
|
def msgRcv (timestamp, source, groupID, message, attachments):
print ("msgRcv called")
print (message)
return
from pydbus import SystemBus
from gi.repository import GLib
bus = SystemBus()
loop = GLib.MainLoop()
signal = bus.get('org.asamk.Signal')
signal.onMessageReceived = msgRcv
loop.run()
|
{
"content_hash": "dd22ba62d13b9b5313b38b50a8ae94a7",
"timestamp": "",
"source": "github",
"line_count": 15,
"max_line_length": 62,
"avg_line_length": 20.4,
"alnum_prop": 0.7352941176470589,
"repo_name": "mh-g/python-signal-cli",
"id": "1428c73e147b3f274759abe8680e156b5709d0db",
"size": "341",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "receive.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "1429"
}
],
"symlink_target": ""
}
|
import datetime
from south.db import db
from south.v2 import DataMigration
from django.db import models
from oscar.core.utils import slugify
class Migration(DataMigration):
def forwards(self, orm):
"Write your forwards methods here."
# Note: Remember to use orm['appname.ModelName'] rather than "from appname.models..."
Partner = orm['partner.Partner']
for partner in Partner.objects.all():
partner.code = slugify(partner.name)
partner.save()
def backwards(self, orm):
"Write your backwards methods here."
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'catalogue.attributeentity': {
'Meta': {'object_name': 'AttributeEntity'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '255', 'blank': 'True'}),
'type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'entities'", 'to': "orm['catalogue.AttributeEntityType']"})
},
'catalogue.attributeentitytype': {
'Meta': {'object_name': 'AttributeEntityType'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '255', 'blank': 'True'})
},
'catalogue.attributeoption': {
'Meta': {'object_name': 'AttributeOption'},
'group': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'options'", 'to': "orm['catalogue.AttributeOptionGroup']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'option': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'catalogue.attributeoptiongroup': {
'Meta': {'object_name': 'AttributeOptionGroup'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'})
},
'catalogue.category': {
'Meta': {'ordering': "['full_name']", 'object_name': 'Category'},
'depth': ('django.db.models.fields.PositiveIntegerField', [], {}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'full_name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
'numchild': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'path': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '255'})
},
'catalogue.option': {
'Meta': {'object_name': 'Option'},
'code': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '128'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'type': ('django.db.models.fields.CharField', [], {'default': "'Required'", 'max_length': '128'})
},
'catalogue.product': {
'Meta': {'ordering': "['-date_created']", 'object_name': 'Product'},
'attributes': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['catalogue.ProductAttribute']", 'through': "orm['catalogue.ProductAttributeValue']", 'symmetrical': 'False'}),
'categories': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['catalogue.Category']", 'through': "orm['catalogue.ProductCategory']", 'symmetrical': 'False'}),
'date_created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'date_updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'db_index': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_discountable': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'variants'", 'null': 'True', 'to': "orm['catalogue.Product']"}),
'product_class': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalogue.ProductClass']", 'null': 'True'}),
'product_options': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['catalogue.Option']", 'symmetrical': 'False', 'blank': 'True'}),
'recommended_products': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['catalogue.Product']", 'symmetrical': 'False', 'through': "orm['catalogue.ProductRecommendation']", 'blank': 'True'}),
'related_products': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'relations'", 'blank': 'True', 'to': "orm['catalogue.Product']"}),
'score': ('django.db.models.fields.FloatField', [], {'default': '0.0', 'db_index': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '255'}),
'status': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '128', 'null': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'upc': ('django.db.models.fields.CharField', [], {'max_length': '64', 'unique': 'True', 'null': 'True', 'blank': 'True'})
},
'catalogue.productattribute': {
'Meta': {'ordering': "['code']", 'object_name': 'ProductAttribute'},
'code': ('django.db.models.fields.SlugField', [], {'max_length': '128'}),
'entity_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalogue.AttributeEntityType']", 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'option_group': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalogue.AttributeOptionGroup']", 'null': 'True', 'blank': 'True'}),
'product_class': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'attributes'", 'null': 'True', 'to': "orm['catalogue.ProductClass']"}),
'required': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'type': ('django.db.models.fields.CharField', [], {'default': "'text'", 'max_length': '20'})
},
'catalogue.productattributevalue': {
'Meta': {'object_name': 'ProductAttributeValue'},
'attribute': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalogue.ProductAttribute']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'product': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'attribute_values'", 'to': "orm['catalogue.Product']"}),
'value_boolean': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'value_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'value_entity': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalogue.AttributeEntity']", 'null': 'True', 'blank': 'True'}),
'value_float': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'value_integer': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'value_option': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalogue.AttributeOption']", 'null': 'True', 'blank': 'True'}),
'value_richtext': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'value_text': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'})
},
'catalogue.productcategory': {
'Meta': {'ordering': "['-is_canonical']", 'object_name': 'ProductCategory'},
'category': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalogue.Category']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_canonical': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'product': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalogue.Product']"})
},
'catalogue.productclass': {
'Meta': {'ordering': "['name']", 'object_name': 'ProductClass'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'options': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['catalogue.Option']", 'symmetrical': 'False', 'blank': 'True'}),
'requires_shipping': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '128'}),
'track_stock': ('django.db.models.fields.BooleanField', [], {'default': 'True'})
},
'catalogue.productrecommendation': {
'Meta': {'object_name': 'ProductRecommendation'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'primary': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'primary_recommendations'", 'to': "orm['catalogue.Product']"}),
'ranking': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '0'}),
'recommendation': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalogue.Product']"})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'partner.abstractstockalert': {
'Meta': {'ordering': "('-date_created',)", 'object_name': 'AbstractStockAlert'},
'date_closed': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'date_created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'status': ('django.db.models.fields.CharField', [], {'default': "'Open'", 'max_length': '128'}),
'stockrecord': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'alerts'", 'to': "orm['partner.StockRecord']"}),
'threshold': ('django.db.models.fields.PositiveIntegerField', [], {})
},
'partner.partner': {
'Meta': {'object_name': 'Partner'},
'code': ('django.db.models.fields.SlugField', [], {'max_length': '128'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '128'}),
'users': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'partners'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['auth.User']"})
},
'partner.stockalert': {
'Meta': {'ordering': "('-date_created',)", 'object_name': 'StockAlert', '_ormbases': ['partner.AbstractStockAlert']},
'abstractstockalert_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['partner.AbstractStockAlert']", 'unique': 'True', 'primary_key': 'True'})
},
'partner.stockrecord': {
'Meta': {'unique_together': "(('partner', 'partner_sku'),)", 'object_name': 'StockRecord'},
'cost_price': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
'date_created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'date_updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'db_index': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'low_stock_threshold': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'num_allocated': ('django.db.models.fields.IntegerField', [], {'default': '0', 'null': 'True', 'blank': 'True'}),
'num_in_stock': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'partner': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['partner.Partner']"}),
'partner_sku': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'price_currency': ('django.db.models.fields.CharField', [], {'default': "'GBP'", 'max_length': '12'}),
'price_excl_tax': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
'price_retail': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
'product': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'stockrecord'", 'unique': 'True', 'to': "orm['catalogue.Product']"})
}
}
complete_apps = ['partner']
symmetrical = True
|
{
"content_hash": "0f2da23e1edc6fa1bccf6db77add72d7",
"timestamp": "",
"source": "github",
"line_count": 207,
"max_line_length": 222,
"avg_line_length": 83.72946859903382,
"alnum_prop": 0.5608123701823217,
"repo_name": "michaelBenin/django-oscar",
"id": "53ca211eb3837becd04ff5ecabda1c2f1d1fe5be",
"size": "17356",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "oscar/apps/partner/migrations/0005_populate_slugs.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
}
|
import os
import os.path
import logging
import hashlib
import shutil
import jsonHelper
##----------------------------------------------------------------##
class CacheManager(object):
_singleton = None
@staticmethod
def get():
return CacheManager._singleton
def __init__( self ):
assert not CacheManager._singleton
CacheManager._singleton = self
super(CacheManager, self).__init__()
def save( self ):
pass
def clearFreeCacheFiles( self ):
pass
|
{
"content_hash": "7eb151380c9b28c8ac2af4ca9aec9740",
"timestamp": "",
"source": "github",
"line_count": 27,
"max_line_length": 68,
"avg_line_length": 17.444444444444443,
"alnum_prop": 0.6178343949044586,
"repo_name": "cloudteampro/juma-editor",
"id": "4d36fe1f90a8c94ce509a44d1ea4bf5020b17dbd",
"size": "471",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "editor/lib/juma/core/cache.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "490405"
},
{
"name": "C++",
"bytes": "15076"
},
{
"name": "Lua",
"bytes": "223218"
},
{
"name": "Makefile",
"bytes": "6088"
},
{
"name": "Objective-C",
"bytes": "25470"
},
{
"name": "Python",
"bytes": "1033362"
},
{
"name": "Shell",
"bytes": "2792"
}
],
"symlink_target": ""
}
|
import pytest
import libqtile
import libqtile.confreader
import libqtile.manager
import libqtile.config
import libqtile.layout
import libqtile.bar
import libqtile.widget
class CallConfig(object):
keys = [
libqtile.config.Key(
["control"], "j",
libqtile.command._Call([("layout", None)], "down")
),
libqtile.config.Key(
["control"], "k",
libqtile.command._Call([("layout", None)], "up"),
),
]
mouse = []
groups = [
libqtile.config.Group("a"),
libqtile.config.Group("b"),
]
layouts = [
libqtile.layout.Stack(num_stacks=1),
libqtile.layout.Max(),
]
floating_layout = libqtile.layout.floating.Floating()
screens = [
libqtile.config.Screen(
bottom=libqtile.bar.Bar(
[
libqtile.widget.GroupBox(),
],
20
),
)
]
main = None
auto_fullscreen = True
call_config = pytest.mark.parametrize("qtile", [CallConfig], indirect=True)
@call_config
def test_layout_filter(qtile):
qtile.testWindow("one")
qtile.testWindow("two")
assert qtile.c.groups()["a"]["focus"] == "two"
qtile.c.simulate_keypress(["control"], "j")
assert qtile.c.groups()["a"]["focus"] == "one"
qtile.c.simulate_keypress(["control"], "k")
assert qtile.c.groups()["a"]["focus"] == "two"
class TestCommands(libqtile.command.CommandObject):
@staticmethod
def cmd_one():
pass
def cmd_one_self(self):
pass
def cmd_two(self, a):
pass
def cmd_three(self, a, b=99):
pass
def _items(self, name):
return None
def _select(self, name, sel):
return None
def test_doc():
c = TestCommands()
assert "one()" in c.doc("one")
assert "one_self()" in c.doc("one_self")
assert "two(a)" in c.doc("two")
assert "three(a, b=99)" in c.doc("three")
def test_commands():
c = TestCommands()
assert len(c.cmd_commands()) == 9
def test_command():
c = TestCommands()
assert c.command("one")
assert not c.command("nonexistent")
class ConcreteCmdRoot(libqtile.command._CommandRoot):
def call(self, *args):
return args
def _items(self, name):
return None
def _select(self, name, sel):
return None
def test_selectors():
c = ConcreteCmdRoot()
s = c.layout.screen.info
assert s.selectors == [('layout', None), ('screen', None)]
assert isinstance(c.info, libqtile.command._Command)
g = c.group
assert isinstance(g, libqtile.command._TGroup)
assert g.myselector is None
g = c.group["one"]
assert isinstance(g, libqtile.command._TGroup)
assert g.myselector == "one"
cmd = c.group["one"].foo
assert cmd.name == "foo"
assert cmd.selectors == [('group', 'one')]
g = c.group["two"].layout["three"].screen
assert g.selectors == [('group', 'two'), ('layout', 'three')]
g = c.one
assert g.selectors == []
class ServerConfig(object):
auto_fullscreen = True
keys = []
mouse = []
groups = [
libqtile.config.Group("a"),
libqtile.config.Group("b"),
libqtile.config.Group("c"),
]
layouts = [
libqtile.layout.Stack(num_stacks=1),
libqtile.layout.Stack(num_stacks=2),
libqtile.layout.Stack(num_stacks=3),
]
floating_layout = libqtile.layout.floating.Floating()
screens = [
libqtile.config.Screen(
bottom=libqtile.bar.Bar(
[
libqtile.widget.TextBox(name="one"),
],
20
),
),
libqtile.config.Screen(
bottom=libqtile.bar.Bar(
[
libqtile.widget.TextBox(name="two"),
],
20
),
)
]
main = None
server_config = pytest.mark.parametrize("qtile", [ServerConfig], indirect=True)
@server_config
def test_cmd_commands(qtile):
assert qtile.c.commands()
assert qtile.c.layout.commands()
assert qtile.c.screen.bar["bottom"].commands()
@server_config
def test_call_unknown(qtile):
with pytest.raises(libqtile.command.CommandError):
qtile.c.nonexistent()
with pytest.raises(libqtile.command.CommandError):
qtile.c.layout.nonexistent()
@server_config
def test_items_qtile(qtile):
v = qtile.c.items("group")
assert v[0]
assert sorted(v[1]) == ["a", "b", "c"]
assert qtile.c.items("layout") == (True, [0, 1, 2])
v = qtile.c.items("widget")
assert not v[0]
assert sorted(v[1]) == ['one', 'two']
assert qtile.c.items("bar") == (False, ["bottom"])
t, lst = qtile.c.items("window")
assert t
assert len(lst) == 2
assert qtile.c.window[lst[0]]
assert qtile.c.items("screen") == (True, [0, 1])
@server_config
def test_select_qtile(qtile):
assert qtile.c.foo.selectors == []
assert qtile.c.layout.info()["group"] == "a"
assert len(qtile.c.layout.info()["stacks"]) == 1
assert len(qtile.c.layout[2].info()["stacks"]) == 3
with pytest.raises(libqtile.command.CommandError):
qtile.c.layout[99].info()
assert qtile.c.group.info()["name"] == "a"
assert qtile.c.group["c"].info()["name"] == "c"
with pytest.raises(libqtile.command.CommandError):
qtile.c.group["nonexistent"].info()
assert qtile.c.widget["one"].info()["name"] == "one"
with pytest.raises(libqtile.command.CommandError):
qtile.c.widget.info()
assert qtile.c.bar["bottom"].info()["position"] == "bottom"
qtile.testWindow("one")
wid = qtile.c.window.info()["id"]
assert qtile.c.window[wid].info()["id"] == wid
assert qtile.c.screen.info()["index"] == 0
assert qtile.c.screen[1].info()["index"] == 1
with pytest.raises(libqtile.command.CommandError):
qtile.c.screen[22].info()
with pytest.raises(libqtile.command.CommandError):
qtile.c.screen["foo"].info()
@server_config
def test_items_group(qtile):
g = qtile.c.group
assert g.items("layout") == (True, [0, 1, 2])
qtile.testWindow("test")
wid = qtile.c.window.info()["id"]
assert g.items("window") == (True, [wid])
assert g.items("screen") == (True, None)
@server_config
def test_select_group(qtile):
g = qtile.c.group
assert g.layout.info()["group"] == "a"
assert len(g.layout.info()["stacks"]) == 1
assert len(g.layout[2].info()["stacks"]) == 3
with pytest.raises(libqtile.command.CommandError):
qtile.c.group.window.info()
qtile.testWindow("test")
wid = qtile.c.window.info()["id"]
assert g.window.info()["id"] == wid
assert g.window[wid].info()["id"] == wid
with pytest.raises(libqtile.command.CommandError):
g.window["foo"].info()
assert g.screen.info()["index"] == 0
assert g["b"].screen.info()["index"] == 1
with pytest.raises(libqtile.command.CommandError):
g["b"].screen[0].info()
@server_config
def test_items_screen(qtile):
s = qtile.c.screen
assert s.items("layout") == (True, [0, 1, 2])
qtile.testWindow("test")
wid = qtile.c.window.info()["id"]
assert s.items("window") == (True, [wid])
assert s.items("bar") == (False, ["bottom"])
@server_config
def test_select_screen(qtile):
s = qtile.c.screen
assert s.layout.info()["group"] == "a"
assert len(s.layout.info()["stacks"]) == 1
assert len(s.layout[2].info()["stacks"]) == 3
with pytest.raises(libqtile.command.CommandError):
qtile.c.window.info()
with pytest.raises(libqtile.command.CommandError):
qtile.c.window[2].info()
qtile.testWindow("test")
wid = qtile.c.window.info()["id"]
assert s.window.info()["id"] == wid
assert s.window[wid].info()["id"] == wid
with pytest.raises(libqtile.command.CommandError):
s.bar.info()
with pytest.raises(libqtile.command.CommandError):
s.bar["top"].info()
assert s.bar["bottom"].info()["position"] == "bottom"
@server_config
def test_items_bar(qtile):
assert qtile.c.bar["bottom"].items("screen") == (True, None)
@server_config
def test_select_bar(qtile):
assert qtile.c.screen[1].bar["bottom"].screen.info()["index"] == 1
b = qtile.c.bar
assert b["bottom"].screen.info()["index"] == 0
with pytest.raises(libqtile.command.CommandError):
b.screen.info()
@server_config
def test_items_layout(qtile):
assert qtile.c.layout.items("screen") == (True, None)
assert qtile.c.layout.items("group") == (True, None)
@server_config
def test_select_layout(qtile):
assert qtile.c.layout.screen.info()["index"] == 0
with pytest.raises(libqtile.command.CommandError):
qtile.c.layout.screen[0].info()
assert qtile.c.layout.group.info()["name"] == "a"
with pytest.raises(libqtile.command.CommandError):
qtile.c.layout.group["a"].info()
@server_config
def test_items_window(qtile):
qtile.testWindow("test")
qtile.c.window.info()["id"]
assert qtile.c.window.items("group") == (True, None)
assert qtile.c.window.items("layout") == (True, [0, 1, 2])
assert qtile.c.window.items("screen") == (True, None)
@server_config
def test_select_window(qtile):
qtile.testWindow("test")
qtile.c.window.info()["id"]
assert qtile.c.window.group.info()["name"] == "a"
with pytest.raises(libqtile.command.CommandError):
qtile.c.window.group["a"].info()
assert len(qtile.c.window.layout.info()["stacks"]) == 1
assert len(qtile.c.window.layout[1].info()["stacks"]) == 2
assert qtile.c.window.screen.info()["index"] == 0
with pytest.raises(libqtile.command.CommandError):
qtile.c.window.screen[0].info()
@server_config
def test_items_widget(qtile):
assert qtile.c.widget["one"].items("bar") == (True, None)
@server_config
def test_select_widget(qtile):
w = qtile.c.widget["one"]
assert w.bar.info()["position"] == "bottom"
with pytest.raises(libqtile.command.CommandError):
w.bar["bottom"].info()
|
{
"content_hash": "353bdb452d4a25ae10547df1c2857a41",
"timestamp": "",
"source": "github",
"line_count": 384,
"max_line_length": 79,
"avg_line_length": 26.3359375,
"alnum_prop": 0.5981410066251359,
"repo_name": "cortesi/qtile",
"id": "4f82e1b36dca6a4ca61159b44de10c13fd2fc192",
"size": "11314",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/test_command.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "1142"
},
{
"name": "Python",
"bytes": "1170921"
},
{
"name": "Roff",
"bytes": "3605"
},
{
"name": "Shell",
"bytes": "5643"
}
],
"symlink_target": ""
}
|
import unittest
from oeqa.oetest import oeRuntimeTest, skipModule
from oeqa.utils.decorators import *
def setUpModule():
if not oeRuntimeTest.hasPackage("pax-utils"):
skipModule("pax-utils package not installed")
class ScanelfTest(oeRuntimeTest):
def setUp(self):
self.scancmd = 'scanelf --quiet --recursive --mount --ldpath --path'
@testcase(966)
@skipUnlessPassed('test_ssh')
def test_scanelf_textrel(self):
# print TEXTREL information
self.scancmd += " --textrel"
(status, output) = self.target.run(self.scancmd)
self.assertEqual(output.strip(), "", "\n".join([self.scancmd, output]))
@testcase(967)
@skipUnlessPassed('test_ssh')
def test_scanelf_rpath(self):
# print RPATH information
self.scancmd += " --rpath"
(status, output) = self.target.run(self.scancmd)
self.assertEqual(output.strip(), "", "\n".join([self.scancmd, output]))
|
{
"content_hash": "e01ae03881962e3c35c942358cba07e8",
"timestamp": "",
"source": "github",
"line_count": 28,
"max_line_length": 79,
"avg_line_length": 34.07142857142857,
"alnum_prop": 0.6530398322851153,
"repo_name": "wwright2/dcim3-angstrom1",
"id": "43a024ab9a3471b7e49a73b0cf43bc89d65ad121",
"size": "954",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "sources/openembedded-core/meta/lib/oeqa/runtime/scanelf.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Assembly",
"bytes": "73541"
},
{
"name": "Awk",
"bytes": "286"
},
{
"name": "Batchfile",
"bytes": "19960"
},
{
"name": "BitBake",
"bytes": "2875212"
},
{
"name": "BlitzBasic",
"bytes": "6367"
},
{
"name": "C",
"bytes": "1598095"
},
{
"name": "C++",
"bytes": "2198121"
},
{
"name": "CMake",
"bytes": "7277"
},
{
"name": "CSS",
"bytes": "28636"
},
{
"name": "Groff",
"bytes": "502999"
},
{
"name": "HTML",
"bytes": "210823"
},
{
"name": "JavaScript",
"bytes": "23100"
},
{
"name": "Lua",
"bytes": "1194"
},
{
"name": "Makefile",
"bytes": "32539"
},
{
"name": "Nginx",
"bytes": "2744"
},
{
"name": "PHP",
"bytes": "829048"
},
{
"name": "Pascal",
"bytes": "17352"
},
{
"name": "Perl",
"bytes": "66339"
},
{
"name": "Python",
"bytes": "3672452"
},
{
"name": "QMake",
"bytes": "165"
},
{
"name": "Ruby",
"bytes": "10695"
},
{
"name": "Shell",
"bytes": "820076"
},
{
"name": "SourcePawn",
"bytes": "259600"
},
{
"name": "Tcl",
"bytes": "4897"
},
{
"name": "VimL",
"bytes": "8483"
},
{
"name": "XSLT",
"bytes": "9089"
}
],
"symlink_target": ""
}
|
import inspect
import mock
import webob
from nova.api.openstack import api_version_request as api_version
from nova.api.openstack import extensions
from nova.api.openstack import wsgi
from nova import exception
from nova import i18n
from nova import test
from nova.tests.unit.api.openstack import fakes
from nova.tests.unit import utils
class RequestTest(test.NoDBTestCase):
header_name = 'X-OpenStack-Nova-API-Version'
def test_content_type_missing(self):
request = wsgi.Request.blank('/tests/123', method='POST')
request.body = "<body />"
self.assertIsNone(request.get_content_type())
def test_content_type_unsupported(self):
request = wsgi.Request.blank('/tests/123', method='POST')
request.headers["Content-Type"] = "text/html"
request.body = "asdf<br />"
self.assertRaises(exception.InvalidContentType,
request.get_content_type)
def test_content_type_with_charset(self):
request = wsgi.Request.blank('/tests/123')
request.headers["Content-Type"] = "application/json; charset=UTF-8"
result = request.get_content_type()
self.assertEqual(result, "application/json")
def test_content_type_accept_default(self):
request = wsgi.Request.blank('/tests/123.unsupported')
request.headers["Accept"] = "application/unsupported1"
result = request.best_match_content_type()
self.assertEqual(result, "application/json")
def test_cache_and_retrieve_instances(self):
request = wsgi.Request.blank('/foo')
instances = []
for x in xrange(3):
instances.append({'uuid': 'uuid%s' % x})
# Store 2
request.cache_db_instances(instances[:2])
# Store 1
request.cache_db_instance(instances[2])
self.assertEqual(request.get_db_instance('uuid0'),
instances[0])
self.assertEqual(request.get_db_instance('uuid1'),
instances[1])
self.assertEqual(request.get_db_instance('uuid2'),
instances[2])
self.assertIsNone(request.get_db_instance('uuid3'))
self.assertEqual(request.get_db_instances(),
{'uuid0': instances[0],
'uuid1': instances[1],
'uuid2': instances[2]})
def test_cache_and_retrieve_compute_nodes(self):
request = wsgi.Request.blank('/foo')
compute_nodes = []
for x in xrange(3):
compute_nodes.append({'id': 'id%s' % x})
# Store 2
request.cache_db_compute_nodes(compute_nodes[:2])
# Store 1
request.cache_db_compute_node(compute_nodes[2])
self.assertEqual(request.get_db_compute_node('id0'),
compute_nodes[0])
self.assertEqual(request.get_db_compute_node('id1'),
compute_nodes[1])
self.assertEqual(request.get_db_compute_node('id2'),
compute_nodes[2])
self.assertIsNone(request.get_db_compute_node('id3'))
self.assertEqual(request.get_db_compute_nodes(),
{'id0': compute_nodes[0],
'id1': compute_nodes[1],
'id2': compute_nodes[2]})
def test_from_request(self):
self.stubs.Set(i18n, 'get_available_languages',
fakes.fake_get_available_languages)
request = wsgi.Request.blank('/')
accepted = 'bogus;q=1.1, en-gb;q=0.7,en-us,en;q=.5,*;q=.7'
request.headers = {'Accept-Language': accepted}
self.assertEqual(request.best_match_language(), 'en_US')
def test_asterisk(self):
# asterisk should match first available if there
# are not any other available matches
self.stubs.Set(i18n, 'get_available_languages',
fakes.fake_get_available_languages)
request = wsgi.Request.blank('/')
accepted = '*,es;q=.5'
request.headers = {'Accept-Language': accepted}
self.assertEqual(request.best_match_language(), 'en_GB')
def test_prefix(self):
self.stubs.Set(i18n, 'get_available_languages',
fakes.fake_get_available_languages)
request = wsgi.Request.blank('/')
accepted = 'zh'
request.headers = {'Accept-Language': accepted}
self.assertEqual(request.best_match_language(), 'zh_CN')
def test_secondary(self):
self.stubs.Set(i18n, 'get_available_languages',
fakes.fake_get_available_languages)
request = wsgi.Request.blank('/')
accepted = 'nn,en-gb;q=.5'
request.headers = {'Accept-Language': accepted}
self.assertEqual(request.best_match_language(), 'en_GB')
def test_none_found(self):
self.stubs.Set(i18n, 'get_available_languages',
fakes.fake_get_available_languages)
request = wsgi.Request.blank('/')
accepted = 'nb-no'
request.headers = {'Accept-Language': accepted}
self.assertIs(request.best_match_language(), None)
def test_no_lang_header(self):
self.stubs.Set(i18n, 'get_available_languages',
fakes.fake_get_available_languages)
request = wsgi.Request.blank('/')
accepted = ''
request.headers = {'Accept-Language': accepted}
self.assertIs(request.best_match_language(), None)
def test_api_version_request_header_none(self):
request = wsgi.Request.blank('/')
request.set_api_version_request()
self.assertEqual(api_version.APIVersionRequest(
api_version.DEFAULT_API_VERSION), request.api_version_request)
@mock.patch("nova.api.openstack.api_version_request.max_api_version")
def test_api_version_request_header(self, mock_maxver):
mock_maxver.return_value = api_version.APIVersionRequest("2.14")
request = wsgi.Request.blank('/')
request.headers = {self.header_name: '2.14'}
request.set_api_version_request()
self.assertEqual(api_version.APIVersionRequest("2.14"),
request.api_version_request)
@mock.patch("nova.api.openstack.api_version_request.max_api_version")
def test_api_version_request_header_latest(self, mock_maxver):
mock_maxver.return_value = api_version.APIVersionRequest("3.5")
request = wsgi.Request.blank('/')
request.headers = {self.header_name: 'latest'}
request.set_api_version_request()
self.assertEqual(api_version.APIVersionRequest("3.5"),
request.api_version_request)
def test_api_version_request_header_invalid(self):
request = wsgi.Request.blank('/')
request.headers = {self.header_name: '2.1.3'}
self.assertRaises(exception.InvalidAPIVersionString,
request.set_api_version_request)
class ActionDispatcherTest(test.NoDBTestCase):
def test_dispatch(self):
serializer = wsgi.ActionDispatcher()
serializer.create = lambda x: 'pants'
self.assertEqual(serializer.dispatch({}, action='create'), 'pants')
def test_dispatch_action_None(self):
serializer = wsgi.ActionDispatcher()
serializer.create = lambda x: 'pants'
serializer.default = lambda x: 'trousers'
self.assertEqual(serializer.dispatch({}, action=None), 'trousers')
def test_dispatch_default(self):
serializer = wsgi.ActionDispatcher()
serializer.create = lambda x: 'pants'
serializer.default = lambda x: 'trousers'
self.assertEqual(serializer.dispatch({}, action='update'), 'trousers')
class DictSerializerTest(test.NoDBTestCase):
def test_dispatch_default(self):
serializer = wsgi.DictSerializer()
self.assertEqual(serializer.serialize({}, 'update'), '')
class JSONDictSerializerTest(test.NoDBTestCase):
def test_json(self):
input_dict = dict(servers=dict(a=(2, 3)))
expected_json = '{"servers":{"a":[2,3]}}'
serializer = wsgi.JSONDictSerializer()
result = serializer.serialize(input_dict)
result = result.replace('\n', '').replace(' ', '')
self.assertEqual(result, expected_json)
class TextDeserializerTest(test.NoDBTestCase):
def test_dispatch_default(self):
deserializer = wsgi.TextDeserializer()
self.assertEqual(deserializer.deserialize({}, 'update'), {})
class JSONDeserializerTest(test.NoDBTestCase):
def test_json(self):
data = """{"a": {
"a1": "1",
"a2": "2",
"bs": ["1", "2", "3", {"c": {"c1": "1"}}],
"d": {"e": "1"},
"f": "1"}}"""
as_dict = {
'body': {
'a': {
'a1': '1',
'a2': '2',
'bs': ['1', '2', '3', {'c': {'c1': '1'}}],
'd': {'e': '1'},
'f': '1',
},
},
}
deserializer = wsgi.JSONDeserializer()
self.assertEqual(deserializer.deserialize(data), as_dict)
def test_json_valid_utf8(self):
data = """{"server": {"min_count": 1, "flavorRef": "1",
"name": "\xe6\xa6\x82\xe5\xbf\xb5",
"imageRef": "10bab10c-1304-47d",
"max_count": 1}} """
as_dict = {
'body': {
u'server': {
u'min_count': 1, u'flavorRef': u'1',
u'name': u'\u6982\u5ff5',
u'imageRef': u'10bab10c-1304-47d',
u'max_count': 1
}
}
}
deserializer = wsgi.JSONDeserializer()
self.assertEqual(deserializer.deserialize(data), as_dict)
def test_json_invalid_utf8(self):
"""Send invalid utf-8 to JSONDeserializer."""
data = """{"server": {"min_count": 1, "flavorRef": "1",
"name": "\xf0\x28\x8c\x28",
"imageRef": "10bab10c-1304-47d",
"max_count": 1}} """
deserializer = wsgi.JSONDeserializer()
self.assertRaises(exception.MalformedRequestBody,
deserializer.deserialize, data)
class ResourceTest(test.NoDBTestCase):
header_name = 'X-OpenStack-Nova-API-Version'
def get_req_id_header_name(self, request):
header_name = 'x-openstack-request-id'
if utils.get_api_version(request) < 3:
header_name = 'x-compute-request-id'
return header_name
def test_resource_receives_api_version_request_default(self):
class Controller(object):
def index(self, req):
if req.api_version_request != \
api_version.APIVersionRequest(
api_version.DEFAULT_API_VERSION):
raise webob.exc.HTTPInternalServerError()
return 'success'
app = fakes.TestRouterV21(Controller())
req = webob.Request.blank('/tests')
response = req.get_response(app)
self.assertEqual(response.body, 'success')
self.assertEqual(response.status_int, 200)
@mock.patch("nova.api.openstack.api_version_request.max_api_version")
def test_resource_receives_api_version_request(self, mock_maxver):
version = "2.5"
mock_maxver.return_value = api_version.APIVersionRequest(version)
class Controller(object):
def index(self, req):
if req.api_version_request != \
api_version.APIVersionRequest(version):
raise webob.exc.HTTPInternalServerError()
return 'success'
app = fakes.TestRouterV21(Controller())
req = webob.Request.blank('/tests')
req.headers = {self.header_name: version}
response = req.get_response(app)
self.assertEqual(response.body, 'success')
self.assertEqual(response.status_int, 200)
def test_resource_receives_api_version_request_invalid(self):
invalid_version = "2.5.3"
class Controller(object):
def index(self, req):
return 'success'
app = fakes.TestRouterV21(Controller())
req = webob.Request.blank('/tests')
req.headers = {self.header_name: invalid_version}
response = req.get_response(app)
self.assertEqual(400, response.status_int)
def test_resource_call_with_method_get(self):
class Controller(object):
def index(self, req):
return 'success'
app = fakes.TestRouter(Controller())
# the default method is GET
req = webob.Request.blank('/tests')
response = req.get_response(app)
self.assertEqual(response.body, 'success')
self.assertEqual(response.status_int, 200)
req.body = '{"body": {"key": "value"}}'
response = req.get_response(app)
self.assertEqual(response.body, 'success')
self.assertEqual(response.status_int, 200)
req.content_type = 'application/json'
response = req.get_response(app)
self.assertEqual(response.body, 'success')
self.assertEqual(response.status_int, 200)
def test_resource_call_with_method_post(self):
class Controller(object):
@extensions.expected_errors(400)
def create(self, req, body):
if expected_body != body:
msg = "The request body invalid"
raise webob.exc.HTTPBadRequest(explanation=msg)
return "success"
# verify the method: POST
app = fakes.TestRouter(Controller())
req = webob.Request.blank('/tests', method="POST",
content_type='application/json')
req.body = '{"body": {"key": "value"}}'
expected_body = {'body': {
"key": "value"
}
}
response = req.get_response(app)
self.assertEqual(response.status_int, 200)
self.assertEqual(response.body, 'success')
# verify without body
expected_body = None
req.body = None
response = req.get_response(app)
self.assertEqual(response.status_int, 200)
self.assertEqual(response.body, 'success')
# the body is validated in the controller
expected_body = {'body': None}
response = req.get_response(app)
expected_unsupported_type_body = ('{"badRequest": '
'{"message": "The request body invalid", "code": 400}}')
self.assertEqual(response.status_int, 400)
self.assertEqual(expected_unsupported_type_body, response.body)
def test_resource_call_with_method_put(self):
class Controller(object):
def update(self, req, id, body):
if expected_body != body:
msg = "The request body invalid"
raise webob.exc.HTTPBadRequest(explanation=msg)
return "success"
# verify the method: PUT
app = fakes.TestRouter(Controller())
req = webob.Request.blank('/tests/test_id', method="PUT",
content_type='application/json')
req.body = '{"body": {"key": "value"}}'
expected_body = {'body': {
"key": "value"
}
}
response = req.get_response(app)
self.assertEqual(response.body, 'success')
self.assertEqual(response.status_int, 200)
req.body = None
expected_body = None
response = req.get_response(app)
self.assertEqual(response.status_int, 200)
# verify no content_type is contained in the request
req.content_type = None
req.body = '{"body": {"key": "value"}}'
response = req.get_response(app)
expected_unsupported_type_body = ('{"badRequest": '
'{"message": "Unsupported Content-Type", "code": 400}}')
self.assertEqual(response.status_int, 400)
self.assertEqual(expected_unsupported_type_body, response.body)
def test_resource_call_with_method_delete(self):
class Controller(object):
def delete(self, req, id):
return "success"
# verify the method: DELETE
app = fakes.TestRouter(Controller())
req = webob.Request.blank('/tests/test_id', method="DELETE")
response = req.get_response(app)
self.assertEqual(response.status_int, 200)
self.assertEqual(response.body, 'success')
# ignore the body
req.body = '{"body": {"key": "value"}}'
response = req.get_response(app)
self.assertEqual(response.status_int, 200)
self.assertEqual(response.body, 'success')
def test_resource_not_authorized(self):
class Controller(object):
def index(self, req):
raise exception.Forbidden()
req = webob.Request.blank('/tests')
app = fakes.TestRouter(Controller())
response = req.get_response(app)
self.assertEqual(response.status_int, 403)
def test_dispatch(self):
class Controller(object):
def index(self, req, pants=None):
return pants
controller = Controller()
resource = wsgi.Resource(controller)
method, extensions = resource.get_method(None, 'index', None, '')
actual = resource.dispatch(method, None, {'pants': 'off'})
expected = 'off'
self.assertEqual(actual, expected)
def test_get_method_unknown_controller_method(self):
class Controller(object):
def index(self, req, pants=None):
return pants
controller = Controller()
resource = wsgi.Resource(controller)
self.assertRaises(AttributeError, resource.get_method,
None, 'create', None, '')
def test_get_method_action_json(self):
class Controller(wsgi.Controller):
@wsgi.action('fooAction')
def _action_foo(self, req, id, body):
return body
controller = Controller()
resource = wsgi.Resource(controller)
method, extensions = resource.get_method(None, 'action',
'application/json',
'{"fooAction": true}')
self.assertEqual(controller._action_foo, method)
def test_get_method_action_bad_body(self):
class Controller(wsgi.Controller):
@wsgi.action('fooAction')
def _action_foo(self, req, id, body):
return body
controller = Controller()
resource = wsgi.Resource(controller)
self.assertRaises(exception.MalformedRequestBody, resource.get_method,
None, 'action', 'application/json', '{}')
def test_get_method_unknown_controller_action(self):
class Controller(wsgi.Controller):
@wsgi.action('fooAction')
def _action_foo(self, req, id, body):
return body
controller = Controller()
resource = wsgi.Resource(controller)
self.assertRaises(KeyError, resource.get_method,
None, 'action', 'application/json',
'{"barAction": true}')
def test_get_method_action_method(self):
class Controller(object):
def action(self, req, pants=None):
return pants
controller = Controller()
resource = wsgi.Resource(controller)
method, extensions = resource.get_method(None, 'action',
'application/xml',
'<fooAction>true</fooAction')
self.assertEqual(controller.action, method)
def test_get_action_args(self):
class Controller(object):
def index(self, req, pants=None):
return pants
controller = Controller()
resource = wsgi.Resource(controller)
env = {
'wsgiorg.routing_args': [None, {
'controller': None,
'format': None,
'action': 'update',
'id': 12,
}],
}
expected = {'action': 'update', 'id': 12}
self.assertEqual(resource.get_action_args(env), expected)
def test_get_body_bad_content(self):
class Controller(object):
def index(self, req, pants=None):
return pants
controller = Controller()
resource = wsgi.Resource(controller)
request = wsgi.Request.blank('/', method='POST')
request.headers['Content-Type'] = 'application/none'
request.body = 'foo'
content_type, body = resource.get_body(request)
self.assertIsNone(content_type)
self.assertEqual(body, '')
def test_get_body_no_content_type(self):
class Controller(object):
def index(self, req, pants=None):
return pants
controller = Controller()
resource = wsgi.Resource(controller)
request = wsgi.Request.blank('/', method='POST')
request.body = 'foo'
content_type, body = resource.get_body(request)
self.assertIsNone(content_type)
self.assertEqual(body, 'foo')
def test_get_body_no_content_body(self):
class Controller(object):
def index(self, req, pants=None):
return pants
controller = Controller()
resource = wsgi.Resource(controller)
request = wsgi.Request.blank('/', method='POST')
request.headers['Content-Type'] = 'application/json'
request.body = ''
content_type, body = resource.get_body(request)
self.assertEqual('application/json', content_type)
self.assertEqual(body, '')
def test_get_body(self):
class Controller(object):
def index(self, req, pants=None):
return pants
controller = Controller()
resource = wsgi.Resource(controller)
request = wsgi.Request.blank('/', method='POST')
request.headers['Content-Type'] = 'application/json'
request.body = 'foo'
content_type, body = resource.get_body(request)
self.assertEqual(content_type, 'application/json')
self.assertEqual(body, 'foo')
def test_get_request_id_with_dict_response_body(self):
class Controller(wsgi.Controller):
def index(self, req):
return {'foo': 'bar'}
req = fakes.HTTPRequest.blank('/tests')
app = fakes.TestRouter(Controller())
response = req.get_response(app)
self.assertIn('nova.context', req.environ)
self.assertEqual(response.body, '{"foo": "bar"}')
self.assertEqual(response.status_int, 200)
def test_no_request_id_with_str_response_body(self):
class Controller(wsgi.Controller):
def index(self, req):
return 'foo'
req = fakes.HTTPRequest.blank('/tests')
app = fakes.TestRouter(Controller())
response = req.get_response(app)
# NOTE(alaski): This test is really to ensure that a str response
# doesn't error. Not having a request_id header is a side effect of
# our wsgi setup, ideally it would be there.
expected_header = self.get_req_id_header_name(req)
self.assertFalse(hasattr(response.headers, expected_header))
self.assertEqual(response.body, 'foo')
self.assertEqual(response.status_int, 200)
def test_get_request_id_no_response_body(self):
class Controller(object):
def index(self, req):
pass
req = fakes.HTTPRequest.blank('/tests')
app = fakes.TestRouter(Controller())
response = req.get_response(app)
self.assertIn('nova.context', req.environ)
self.assertEqual(response.body, '')
self.assertEqual(response.status_int, 200)
def test_deserialize_badtype(self):
class Controller(object):
def index(self, req, pants=None):
return pants
controller = Controller()
resource = wsgi.Resource(controller)
self.assertRaises(exception.InvalidContentType,
resource.deserialize,
controller.index, 'application/none', 'foo')
def test_deserialize_default(self):
class JSONDeserializer(object):
def deserialize(self, body):
return 'json'
class Controller(object):
def index(self, req, pants=None):
return pants
controller = Controller()
resource = wsgi.Resource(controller, json=JSONDeserializer)
obj = resource.deserialize(controller.index, 'application/json', 'foo')
self.assertEqual(obj, 'json')
def test_register_actions(self):
class Controller(object):
def index(self, req, pants=None):
return pants
class ControllerExtended(wsgi.Controller):
@wsgi.action('fooAction')
def _action_foo(self, req, id, body):
return body
@wsgi.action('barAction')
def _action_bar(self, req, id, body):
return body
controller = Controller()
resource = wsgi.Resource(controller)
self.assertEqual({}, resource.wsgi_actions)
extended = ControllerExtended()
resource.register_actions(extended)
self.assertEqual({
'fooAction': extended._action_foo,
'barAction': extended._action_bar,
}, resource.wsgi_actions)
def test_register_extensions(self):
class Controller(object):
def index(self, req, pants=None):
return pants
class ControllerExtended(wsgi.Controller):
@wsgi.extends
def index(self, req, resp_obj, pants=None):
return None
@wsgi.extends(action='fooAction')
def _action_foo(self, req, resp, id, body):
return None
controller = Controller()
resource = wsgi.Resource(controller)
self.assertEqual({}, resource.wsgi_extensions)
self.assertEqual({}, resource.wsgi_action_extensions)
extended = ControllerExtended()
resource.register_extensions(extended)
self.assertEqual({'index': [extended.index]}, resource.wsgi_extensions)
self.assertEqual({'fooAction': [extended._action_foo]},
resource.wsgi_action_extensions)
def test_get_method_extensions(self):
class Controller(object):
def index(self, req, pants=None):
return pants
class ControllerExtended(wsgi.Controller):
@wsgi.extends
def index(self, req, resp_obj, pants=None):
return None
controller = Controller()
extended = ControllerExtended()
resource = wsgi.Resource(controller)
resource.register_extensions(extended)
method, extensions = resource.get_method(None, 'index', None, '')
self.assertEqual(method, controller.index)
self.assertEqual(extensions, [extended.index])
def test_get_method_action_extensions(self):
class Controller(wsgi.Controller):
def index(self, req, pants=None):
return pants
@wsgi.action('fooAction')
def _action_foo(self, req, id, body):
return body
class ControllerExtended(wsgi.Controller):
@wsgi.extends(action='fooAction')
def _action_foo(self, req, resp_obj, id, body):
return None
controller = Controller()
extended = ControllerExtended()
resource = wsgi.Resource(controller)
resource.register_extensions(extended)
method, extensions = resource.get_method(None, 'action',
'application/json',
'{"fooAction": true}')
self.assertEqual(method, controller._action_foo)
self.assertEqual(extensions, [extended._action_foo])
def test_get_method_action_whitelist_extensions(self):
class Controller(wsgi.Controller):
def index(self, req, pants=None):
return pants
class ControllerExtended(wsgi.Controller):
@wsgi.action('create')
def _create(self, req, body):
pass
@wsgi.action('delete')
def _delete(self, req, id):
pass
controller = Controller()
extended = ControllerExtended()
resource = wsgi.Resource(controller)
resource.register_actions(extended)
method, extensions = resource.get_method(None, 'create',
'application/json',
'{"create": true}')
self.assertEqual(method, extended._create)
self.assertEqual(extensions, [])
method, extensions = resource.get_method(None, 'delete', None, None)
self.assertEqual(method, extended._delete)
self.assertEqual(extensions, [])
def test_pre_process_extensions_regular(self):
class Controller(object):
def index(self, req, pants=None):
return pants
controller = Controller()
resource = wsgi.Resource(controller)
called = []
def extension1(req, resp_obj):
called.append(1)
return None
def extension2(req, resp_obj):
called.append(2)
return None
extensions = [extension1, extension2]
response, post = resource.pre_process_extensions(extensions, None, {})
self.assertEqual(called, [])
self.assertIsNone(response)
self.assertEqual(list(post), [extension2, extension1])
def test_pre_process_extensions_generator(self):
class Controller(object):
def index(self, req, pants=None):
return pants
controller = Controller()
resource = wsgi.Resource(controller)
called = []
def extension1(req):
called.append('pre1')
yield
called.append('post1')
def extension2(req):
called.append('pre2')
yield
called.append('post2')
extensions = [extension1, extension2]
response, post = resource.pre_process_extensions(extensions, None, {})
post = list(post)
self.assertEqual(called, ['pre1', 'pre2'])
self.assertIsNone(response)
self.assertEqual(len(post), 2)
self.assertTrue(inspect.isgenerator(post[0]))
self.assertTrue(inspect.isgenerator(post[1]))
for gen in post:
try:
gen.send(None)
except StopIteration:
continue
self.assertEqual(called, ['pre1', 'pre2', 'post2', 'post1'])
def test_pre_process_extensions_generator_response(self):
class Controller(object):
def index(self, req, pants=None):
return pants
controller = Controller()
resource = wsgi.Resource(controller)
called = []
def extension1(req):
called.append('pre1')
yield 'foo'
def extension2(req):
called.append('pre2')
extensions = [extension1, extension2]
response, post = resource.pre_process_extensions(extensions, None, {})
self.assertEqual(called, ['pre1'])
self.assertEqual(response, 'foo')
self.assertEqual(post, [])
def test_post_process_extensions_regular(self):
class Controller(object):
def index(self, req, pants=None):
return pants
controller = Controller()
resource = wsgi.Resource(controller)
called = []
def extension1(req, resp_obj):
called.append(1)
return None
def extension2(req, resp_obj):
called.append(2)
return None
response = resource.post_process_extensions([extension2, extension1],
None, None, {})
self.assertEqual(called, [2, 1])
self.assertIsNone(response)
def test_post_process_extensions_regular_response(self):
class Controller(object):
def index(self, req, pants=None):
return pants
controller = Controller()
resource = wsgi.Resource(controller)
called = []
def extension1(req, resp_obj):
called.append(1)
return None
def extension2(req, resp_obj):
called.append(2)
return 'foo'
response = resource.post_process_extensions([extension2, extension1],
None, None, {})
self.assertEqual(called, [2])
self.assertEqual(response, 'foo')
def test_post_process_extensions_generator(self):
class Controller(object):
def index(self, req, pants=None):
return pants
controller = Controller()
resource = wsgi.Resource(controller)
called = []
def extension1(req):
yield
called.append(1)
def extension2(req):
yield
called.append(2)
ext1 = extension1(None)
ext1.next()
ext2 = extension2(None)
ext2.next()
response = resource.post_process_extensions([ext2, ext1],
None, None, {})
self.assertEqual(called, [2, 1])
self.assertIsNone(response)
def test_post_process_extensions_generator_response(self):
class Controller(object):
def index(self, req, pants=None):
return pants
controller = Controller()
resource = wsgi.Resource(controller)
called = []
def extension1(req):
yield
called.append(1)
def extension2(req):
yield
called.append(2)
yield 'foo'
ext1 = extension1(None)
ext1.next()
ext2 = extension2(None)
ext2.next()
response = resource.post_process_extensions([ext2, ext1],
None, None, {})
self.assertEqual(called, [2])
self.assertEqual(response, 'foo')
def test_resource_exception_handler_type_error(self):
# A TypeError should be translated to a Fault/HTTP 400.
def foo(a,):
return a
try:
with wsgi.ResourceExceptionHandler():
foo() # generate a TypeError
self.fail("Should have raised a Fault (HTTP 400)")
except wsgi.Fault as fault:
self.assertEqual(400, fault.status_int)
def test_resource_headers_are_utf8(self):
resp = webob.Response(status_int=202)
resp.headers['x-header1'] = 1
resp.headers['x-header2'] = u'header2'
resp.headers['x-header3'] = u'header3'
class Controller(object):
def index(self, req):
return resp
req = webob.Request.blank('/tests')
app = fakes.TestRouter(Controller())
response = req.get_response(app)
for hdr, val in response.headers.iteritems():
# All headers must be utf8
self.assertIsInstance(hdr, str)
self.assertIsInstance(val, str)
self.assertEqual(response.headers['x-header1'], '1')
self.assertEqual(response.headers['x-header2'], 'header2')
self.assertEqual(response.headers['x-header3'], 'header3')
def test_resource_valid_utf8_body(self):
class Controller(object):
def update(self, req, id, body):
return body
req = webob.Request.blank('/tests/test_id', method="PUT")
body = """ {"name": "\xe6\xa6\x82\xe5\xbf\xb5" } """
expected_body = '{"name": "\\u6982\\u5ff5"}'
req.body = body
req.headers['Content-Type'] = 'application/json'
app = fakes.TestRouter(Controller())
response = req.get_response(app)
self.assertEqual(response.body, expected_body)
self.assertEqual(response.status_int, 200)
def test_resource_invalid_utf8(self):
class Controller(object):
def update(self, req, id, body):
return body
req = webob.Request.blank('/tests/test_id', method="PUT")
body = """ {"name": "\xf0\x28\x8c\x28" } """
req.body = body
req.headers['Content-Type'] = 'application/json'
app = fakes.TestRouter(Controller())
self.assertRaises(UnicodeDecodeError, req.get_response, app)
class ResponseObjectTest(test.NoDBTestCase):
def test_default_code(self):
robj = wsgi.ResponseObject({})
self.assertEqual(robj.code, 200)
def test_modified_code(self):
robj = wsgi.ResponseObject({})
robj._default_code = 202
self.assertEqual(robj.code, 202)
def test_override_default_code(self):
robj = wsgi.ResponseObject({}, code=404)
self.assertEqual(robj.code, 404)
def test_override_modified_code(self):
robj = wsgi.ResponseObject({}, code=404)
robj._default_code = 202
self.assertEqual(robj.code, 404)
def test_set_header(self):
robj = wsgi.ResponseObject({})
robj['Header'] = 'foo'
self.assertEqual(robj.headers, {'header': 'foo'})
def test_get_header(self):
robj = wsgi.ResponseObject({})
robj['Header'] = 'foo'
self.assertEqual(robj['hEADER'], 'foo')
def test_del_header(self):
robj = wsgi.ResponseObject({})
robj['Header'] = 'foo'
del robj['hEADER']
self.assertNotIn('header', robj.headers)
def test_header_isolation(self):
robj = wsgi.ResponseObject({})
robj['Header'] = 'foo'
hdrs = robj.headers
hdrs['hEADER'] = 'bar'
self.assertEqual(robj['hEADER'], 'foo')
def test_default_serializers(self):
robj = wsgi.ResponseObject({})
self.assertEqual(robj.serializers, {})
def test_bind_serializers(self):
robj = wsgi.ResponseObject({}, json='foo')
robj._bind_method_serializers(dict(xml='bar', json='baz'))
self.assertEqual(robj.serializers, dict(xml='bar', json='foo'))
def test_get_serializer(self):
robj = wsgi.ResponseObject({}, json='json', xml='xml', atom='atom')
for content_type, mtype in wsgi._MEDIA_TYPE_MAP.items():
_mtype, serializer = robj.get_serializer(content_type)
self.assertEqual(serializer, mtype)
def test_get_serializer_defaults(self):
robj = wsgi.ResponseObject({})
default_serializers = dict(json='json', xml='xml', atom='atom')
for content_type, mtype in wsgi._MEDIA_TYPE_MAP.items():
self.assertRaises(exception.InvalidContentType,
robj.get_serializer, content_type)
_mtype, serializer = robj.get_serializer(content_type,
default_serializers)
self.assertEqual(serializer, mtype)
def test_serialize(self):
class JSONSerializer(object):
def serialize(self, obj):
return 'json'
class AtomSerializer(object):
def serialize(self, obj):
return 'atom'
robj = wsgi.ResponseObject({}, code=202,
json=JSONSerializer,
atom=AtomSerializer)
robj['X-header1'] = 'header1'
robj['X-header2'] = 'header2'
robj['X-header3'] = 3
robj['X-header-unicode'] = u'header-unicode'
for content_type, mtype in wsgi._MEDIA_TYPE_MAP.items():
request = wsgi.Request.blank('/tests/123')
response = robj.serialize(request, content_type)
self.assertEqual(response.headers['Content-Type'], content_type)
for hdr, val in response.headers.iteritems():
# All headers must be utf8
self.assertIsInstance(hdr, str)
self.assertIsInstance(val, str)
self.assertEqual(response.headers['X-header1'], 'header1')
self.assertEqual(response.headers['X-header2'], 'header2')
self.assertEqual(response.headers['X-header3'], '3')
self.assertEqual(response.status_int, 202)
self.assertEqual(response.body, mtype)
class ValidBodyTest(test.NoDBTestCase):
def setUp(self):
super(ValidBodyTest, self).setUp()
self.controller = wsgi.Controller()
def test_is_valid_body(self):
body = {'foo': {}}
self.assertTrue(self.controller.is_valid_body(body, 'foo'))
def test_is_valid_body_none(self):
wsgi.Resource(controller=None)
self.assertFalse(self.controller.is_valid_body(None, 'foo'))
def test_is_valid_body_empty(self):
wsgi.Resource(controller=None)
self.assertFalse(self.controller.is_valid_body({}, 'foo'))
def test_is_valid_body_no_entity(self):
wsgi.Resource(controller=None)
body = {'bar': {}}
self.assertFalse(self.controller.is_valid_body(body, 'foo'))
def test_is_valid_body_malformed_entity(self):
wsgi.Resource(controller=None)
body = {'foo': 'bar'}
self.assertFalse(self.controller.is_valid_body(body, 'foo'))
|
{
"content_hash": "b22136f83a957f1be445bae4654258f5",
"timestamp": "",
"source": "github",
"line_count": 1156,
"max_line_length": 79,
"avg_line_length": 36.13235294117647,
"alnum_prop": 0.5755464578993991,
"repo_name": "cloudbase/nova-virtualbox",
"id": "d431fbb7ce2ca785684ea82547640157e1d57576",
"size": "42342",
"binary": false,
"copies": "1",
"ref": "refs/heads/virtualbox_driver",
"path": "nova/tests/unit/api/openstack/test_wsgi.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "16016453"
},
{
"name": "Shell",
"bytes": "20716"
},
{
"name": "Smarty",
"bytes": "497954"
}
],
"symlink_target": ""
}
|
"""
Example code to call Rosette API to get the complete morphological analysis of a piece of text.
"""
import argparse
import pprint
from rosette.api import API, DocumentParameters
parser = argparse.ArgumentParser(description="Get the complete morphological analysis of a piece of text")
parser.add_argument("--key", required=True, help="Rosette API key")
parser.add_argument("--service_url", nargs="?", help="Optional user service URL")
args = parser.parse_args()
# Create an API instance
if args.service_url:
api = API(service_url=args.service_url, user_key=args.key)
else:
api = API(user_key=args.key)
params = DocumentParameters()
params["content"] = u"The quick brown fox jumped over the lazy dog. Yes he did."
result = api.morphology(params)
pprint.pprint(result)
|
{
"content_hash": "10e7b5cf21f3c103a0a595e8b5031cd9",
"timestamp": "",
"source": "github",
"line_count": 25,
"max_line_length": 106,
"avg_line_length": 31.4,
"alnum_prop": 0.7477707006369426,
"repo_name": "JamieMoon/rosette-api",
"id": "ae13be19e129e4726808d937429a0ab29f5f65e9",
"size": "810",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "python/target/github/rosette-api/python/examples/morphology_complete.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "151815"
},
{
"name": "Shell",
"bytes": "171"
}
],
"symlink_target": ""
}
|
from PIL import Image
from six import StringIO
from django.core.files.uploadedfile import InMemoryUploadedFile
def gen_watermark_and_thumbnail_of_image(source, watermark_path=None, thumbnail_width=640):
"""
:source: an instance of django's InMemoryUploadedFile
"""
# TODO: need to specify the real watermark_path
with Image.open(source) as source_img, Image.open(watermark_path) as watermark_source_img:
file_ext = source.name.split('.')[-1].lower()
mimetype = 'image/{}'.format(file_ext)
source_img = Image.open(source)
# use the copy version
watermark_img = watermark_source_img.copy()
ratio = 0.55
wm_ratio = watermark_img.size[1] / float(watermark_img.size[0])
wm_width = int(source_img.size[0] * ratio)
wm_height = int(wm_ratio * wm_width)
wm_left = int((source_img.size[0] - wm_width) / 2.0)
wm_top = int((source_img.size[1] - wm_height) / 2.0)
wm_right = wm_left + wm_width
wm_bottom = wm_top + wm_height
box = (wm_left, wm_top, wm_right, wm_bottom)
watermark_img.thumbnail((wm_width, wm_height))
# paste the watermark photo to the original one
source_img.paste(watermark_img, box, watermark_img)
# source_img.show()
source_img_io = StringIO.StringIO()
source_img.save(source_img_io, file_ext)
watermarked_file = InMemoryUploadedFile(source_img_io, None, source.name, mimetype, source_img_io.len, None)
# gen thumbnail
t_width = thumbnail_width
source_ratio = source_img.size[1] / float(source_img.size[0])
t_height = int(t_width * source_ratio)
thumbnail = source_img.resize((t_width, t_height))
# thumbnail.show()
thumbnail_io = StringIO.StringIO()
thumbnail.save(thumbnail_io, file_ext)
thumbnail_file = InMemoryUploadedFile(thumbnail_io, None, source.name, mimetype, thumbnail_io.len, None)
return watermarked_file, thumbnail_file
|
{
"content_hash": "71fb6c714ef91206dd11e4e3c689f420",
"timestamp": "",
"source": "github",
"line_count": 52,
"max_line_length": 116,
"avg_line_length": 38.88461538461539,
"alnum_prop": 0.6409495548961425,
"repo_name": "theo-l/django_common",
"id": "40a72f6f70fc2491ecae5c6befb8197e8ccf9993",
"size": "2168",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "utils/image_utils.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "44056"
}
],
"symlink_target": ""
}
|
from __future__ import (absolute_import, print_function)
# From system
from boto.s3.connection import S3Connection
from boto.s3.key import Key
import boto3
from boto3.session import Session
from boto3.s3.transfer import (S3Transfer, TransferConfig)
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
from yaml import load
try:
# LibYAML based parser and emitter
from yaml import CLoader as Loader
except ImportError:
from yaml import Loader
import os
import time
import glob
import logging
import subprocess
import multiprocessing
from multiprocessing.dummy import Pool
# From package
from .timeout import timeout
from .utils import (add_s3_arguments, base_parser,\
map_wrap, get_s3_connection_host)
DEFAULT_CONCURRENCY = max(multiprocessing.cpu_count() - 1, 1)
BUFFER_SIZE = 64 # Default bufsize is 64M
MBFACTOR = float(1<<20)
LZOP_BIN = 'lzop'
MAX_RETRY_COUNT = 3
SLEEP_TIME = 2
UPLOAD_TIMEOUT = 600
MULTI_PART_UPLOAD_THRESHOLD = 60 # If file size > 60M, use multi part upload
logger = logging.getLogger(__name__)
def check_lzop():
try:
subprocess.call([LZOP_BIN, '--version'])
except OSError:
print("{!s} not found on path".format(LZOP_BIN))
def transfer_data(path, size, compress):
"""
Returns a generator that yields compressed chunks of
the given file_path
compression is done with lzop
"""
if compress == 'True':
print("Use lzop")
lzop = subprocess.Popen(
(LZOP_BIN, '--stdout', path),
bufsize=size,
stdout=subprocess.PIPE
)
while True:
chunk = lzop.stdout.read(size)
if not chunk:
break
yield StringIO(chunk)
else:
print("Use file read")
with open(path, 'rb') as f:
while True:
chunk = f.read(size)
if not chunk:
break
yield StringIO(chunk)
def get_bucket(
s3_bucket, aws_access_key_id,
aws_secret_access_key, s3_connection_host):
connection = S3Connection(
aws_access_key_id=aws_access_key_id,
aws_secret_access_key=aws_secret_access_key,
host=s3_connection_host
)
return connection.get_bucket(s3_bucket, validate=False)
def destination_path(s3_base_path, file_path, compressed=False):
"""
Set destination file path in AWS S3
Note:
For files smaller than 20M, we do not compress
"""
if os.path.getsize(file_path) <= int(MULTI_PART_UPLOAD_THRESHOLD * MBFACTOR):
compressed = False
compressed = False
suffix = compressed and '.lzo' or ''
return '/'.join([s3_base_path, file_path + suffix])
@map_wrap
def upload_file(bucket, source, destination, s3_ssenc, bufsize, compress_data):
completed = False
retry_count = 0
#print("compress_data is {0}({1})".format(compress_data, type(compress_data)))
# If file size less than MULTI_PART_UPLOAD_THRESHOLD,
# use single part upload
source_size = os.path.getsize(source)
if source_size <= int(MULTI_PART_UPLOAD_THRESHOLD * MBFACTOR):
k = Key(bucket) # Initialize S3 bucket object
while not completed and retry_count < MAX_RETRY_COUNT:
try:
k.key = destination # Prepend S3 path prior to uploading
bytes_written = k.set_contents_from_filename(source, encrypt_key=s3_ssenc)
completed = True
print("[SU] - {0}({1}) upload completed, number of bytes wrriten={2}".format(source, source_size, bytes_written))
except Exception:
print("Error uploading file {!s} to {!s}.".format(source))
except Exception:
print("Error uploading file {!s} to {!s}.\
Retry count: {}".format(source, destination, retry_count))
retry_count = retry_count + 1
if retry_count >= MAX_RETRY_COUNT:
print("Retried too many times uploading file")
raise
else: # Big file, use multi part upload
mp = bucket.initiate_multipart_upload(
destination,
encrypt_key=s3_ssenc)
while not completed and retry_count < MAX_RETRY_COUNT:
try:
for i, chunk in enumerate(transfer_data(source, bufsize, compress_data)):
mp.upload_part_from_file(chunk, i+1)
mp.complete_upload() # Finish the upload
completed = True
print("[MU] - {0} upload completed, xml={1}".format(source, k.to_xml()))
except Exception:
print("Error uploading file {!s} to {!s}.\
Retry count: {}".format(source, destination, retry_count))
cancel_upload(bucket, mp, destination)
retry_count = retry_count + 1
if retry_count >= MAX_RETRY_COUNT:
print("Retried too many times uploading file")
raise
mp.complete_upload()
@timeout(UPLOAD_TIMEOUT)
def upload_chunk(mp, chunk, index):
mp.upload_part_from_file(chunk, index)
def cancel_upload(bucket, mp, remote_path):
"""
Safe way to cancel a multipart upload
sleeps SLEEP_TIME seconds and then makes sure that there are not parts left
in storage
"""
while True:
try:
time.sleep(SLEEP_TIME)
mp.cancel_upload()
time.sleep(SLEEP_TIME)
for mp in bucket.list_multipart_uploads():
if mp.key_name == remote_path:
mp.cancel_upload()
return
except Exception:
logger.exception("Error while cancelling multipart upload")
def put_from_manifest(
s3_bucket, s3_connection_host, s3_ssenc, s3_base_path,
aws_access_key_id, aws_secret_access_key, manifest,
bufsize, compress_data, concurrency=None, incremental_backups=False):
"""
Uploads files listed in a manifest to amazon S3
to support larger than 5GB files multipart upload is used (chunks of 60MB)
files are uploaded compressed with lzop, the .lzo suffix is appended
"""
bucket = get_bucket(
s3_bucket, aws_access_key_id,
aws_secret_access_key, s3_connection_host)
# Create a boto3 session
session = Session(aws_access_key_id = aws_access_key_id, aws_secret_access_key = aws_secret_access_key, region_name='us-east-1')
client = session.client('s3')
event_system = client.meta.events
config = TransferConfig(
multipart_threshold = MULTI_PART_UPLOAD_THRESHOLD,
max_concurrency=4)
transfer = S3Transfer(client, config)
boto3.set_stream_logger('botocore', logging.INFO)
manifest_fp = open(manifest, 'r')
files = manifest_fp.read().splitlines()
for f in files:
file_path = s3_base_path + f
print("boto3, upload file {0} to {1}: {2}".format(f, s3_bucket, file_path))
transfer.upload_file(f, s3_bucket, file_path)
def get_data_path(conf_path):
"""Retrieve cassandra data_file_directories from cassandra.yaml"""
config_file_path = os.path.join(conf_path, 'cassandra.yaml')
cassandra_configs = {}
with open(config_file_path, 'r') as f:
cassandra_configs = load(f, Loader=Loader)
data_paths = cassandra_configs['data_file_directories']
return data_paths
def create_upload_manifest(
snapshot_name, snapshot_keyspaces, snapshot_table,
conf_path, manifest_path, exclude_tables, incremental_backups=False):
if snapshot_keyspaces:
keyspace_globs = snapshot_keyspaces.split()
else:
keyspace_globs = ['*']
if snapshot_table:
table_glob = snapshot_table
else:
table_glob = '*'
data_paths = get_data_path(conf_path)
files = []
exclude_tables_list = exclude_tables.split(',')
for data_path in data_paths:
for keyspace_glob in keyspace_globs:
logger.info("Creating data path: {0}/{1}".format(data_path, keyspace_glob))
path = [
data_path,
keyspace_glob,
table_glob
]
if incremental_backups:
path += ['backups']
else:
path += ['snapshots', snapshot_name]
path += ['*']
path = os.path.join(*path)
if len(exclude_tables_list) > 0:
for f in glob.glob(os.path.join(path)):
# Get the table name
# The current format of a file path looks like:
# /var/lib/cassandra/data03/system/compaction_history/snapshots/20151102182658/system-compaction_history-jb-6684-Summary.db
if f.split('/')[-4] not in exclude_tables_list:
files.append(f.strip())
else:
files.append(f.strip() for f in glob.glob(os.path.join(path)))
with open(manifest_path, 'w') as manifest:
for f in files:
manifest.write(f + '\n')
def main():
subparsers = base_parser.add_subparsers(
title='subcommands', dest='subcommand')
base_parser.add_argument(
'--incremental_backups', action='store_true', default=False)
put_parser = subparsers.add_parser(
'put', help="put files on s3 from a manifest")
manifest_parser = subparsers.add_parser(
'create-upload-manifest', help="put files on s3 from a manifest")
# put arguments
put_parser = add_s3_arguments(put_parser)
put_parser.add_argument(
'--bufsize',
required=False,
default=BUFFER_SIZE,
type=int,
help="Compress and upload buffer size")
put_parser.add_argument(
'--compress_data',
required=False,
default=False,
help="Compress data? Default no")
put_parser.add_argument(
'--manifest',
required=True,
help="The manifest containing the files to put on s3")
put_parser.add_argument(
'--concurrency',
required=False,
default=DEFAULT_CONCURRENCY,
type=int,
help="Compress and upload concurrent processes")
# create-upload-manifest arguments
manifest_parser.add_argument('--snapshot_name', required=True, type=str)
manifest_parser.add_argument('--conf_path', required=True, type=str)
manifest_parser.add_argument('--manifest_path', required=True, type=str)
manifest_parser.add_argument(
'--snapshot_keyspaces', default='', required=False, type=str)
manifest_parser.add_argument(
'--snapshot_table', required=False, default='', type=str)
manifest_parser.add_argument(
'--exclude_tables', required=False, type=str)
args = base_parser.parse_args()
subcommand = args.subcommand
if subcommand == 'create-upload-manifest':
create_upload_manifest(
args.snapshot_name,
args.snapshot_keyspaces,
args.snapshot_table,
args.conf_path,
args.manifest_path,
args.exclude_tables,
args.incremental_backups
)
if subcommand == 'put':
if args.compress_data:
check_lzop()
put_from_manifest(
args.s3_bucket_name,
get_s3_connection_host(args.s3_bucket_region),
args.s3_ssenc,
args.s3_base_path,
args.aws_access_key_id,
args.aws_secret_access_key,
args.manifest,
args.bufsize,
args.compress_data,
args.concurrency,
args.incremental_backups
)
if __name__ == '__main__':
main()
|
{
"content_hash": "0a990fe4899a2efe21678fc4f45d7436",
"timestamp": "",
"source": "github",
"line_count": 345,
"max_line_length": 143,
"avg_line_length": 33.971014492753625,
"alnum_prop": 0.6030716723549489,
"repo_name": "kikinteractive/cassandra_snapshotter",
"id": "23b878593c2f694a78b2d4a231f7e809222d82d6",
"size": "11720",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cassandra_snapshotter/agent.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "53962"
}
],
"symlink_target": ""
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.