function stringlengths 11 56k | repo_name stringlengths 5 60 | features list |
|---|---|---|
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.execute("INSERT INTO auth_type VALUES ('webauthn_auth')")
op.drop_constraint('ck_users_mobile_or_email_auth', 'users', type_=None, schema=None)
op.execute("""
ALTER TABLE users ADD CONSTRAINT "ck_user_has_mobile_or_other_auth"
CHECK (auth_type in ('email_auth', 'webauthn_auth') or mobile_number is not null)
NOT VALID
""")
# ### end Alembic commands ### | alphagov/notifications-api | [
56,
23,
56,
6,
1447855037
] |
def post_install(install_path):
"""
Post install script for pyCUDA applications to warm the cubin cache
"""
import pycuda.autoinit
from pycuda.compiler import SourceModule
CACHE_DIR = os.path.join(install_path, 'cache')
if not os.path.exists(CACHE_DIR):
os.mkdir(CACHE_DIR)
for kernel in glob.glob(os.path.join(install_path, 'kernel', '*.cu')):
SourceModule(open(kernel).read(), cache_dir=CACHE_DIR) | Captricity/sciguppy | [
1,
1,
1,
5,
1416963903
] |
def run(self):
install.run(self)
post_install(os.path.join(self.install_lib, 'sciguppy')) | Captricity/sciguppy | [
1,
1,
1,
5,
1416963903
] |
def DescribeMesh(self, request):
"""查询网格详情
:param request: Request instance for DescribeMesh.
:type request: :class:`tencentcloud.tcm.v20210413.models.DescribeMeshRequest`
:rtype: :class:`tencentcloud.tcm.v20210413.models.DescribeMeshResponse`
"""
try:
params = request._serialize()
body = self.call("DescribeMesh", params)
response = json.loads(body)
if "Error" not in response["Response"]:
model = models.DescribeMeshResponse()
model._deserialize(response["Response"])
return model
else:
code = response["Response"]["Error"]["Code"]
message = response["Response"]["Error"]["Message"]
reqid = response["Response"]["RequestId"]
raise TencentCloudSDKException(code, message, reqid)
except Exception as e:
if isinstance(e, TencentCloudSDKException):
raise
else:
raise TencentCloudSDKException(e.message, e.message) | tzpBingo/github-trending | [
42,
20,
42,
1,
1504755582
] |
def __init__(self, scores):
self.scores = scores | N-Parsons/exercism-python | [
1,
1,
1,
3,
1506170251
] |
def personal_best(self):
return max(self.scores) | N-Parsons/exercism-python | [
1,
1,
1,
3,
1506170251
] |
def main(argv=None):
"""
Make a confidence report and save it to disk.
"""
assert len(argv) >= 3
_name_of_script = argv[0]
model_filepath = argv[1]
adv_x_filepaths = argv[2:]
sess = tf.Session()
with sess.as_default():
model = serial.load(model_filepath)
factory = model.dataset_factory
factory.kwargs['train_start'] = FLAGS.train_start
factory.kwargs['train_end'] = FLAGS.train_end
factory.kwargs['test_start'] = FLAGS.test_start
factory.kwargs['test_end'] = FLAGS.test_end
dataset = factory()
adv_x_list = [np.load(filepath) for filepath in adv_x_filepaths]
x, y = dataset.get_set(FLAGS.which_set)
for adv_x in adv_x_list:
assert adv_x.shape == x.shape, (adv_x.shape, x.shape)
# Make sure these were made for the right dataset with right scaling
# arguments, etc.
assert adv_x.min() >= 0. - dataset.kwargs['center'] * dataset.max_val
assert adv_x.max() <= dataset.max_val
data_range = dataset.max_val * (1. + dataset.kwargs['center'])
if adv_x.max() - adv_x.min() <= .8 * data_range:
warnings.warn("Something is weird. Your adversarial examples use "
"less than 80% of the data range."
"This might mean you generated them for a model with "
"inputs in [0, 1] and are now using them for a model "
"with inputs in [0, 255] or something like that. "
"Or it could be OK if you're evaluating on a very small "
"batch.")
report_path = FLAGS.report_path
if report_path is None:
suffix = "_bundled_examples_report.joblib"
assert model_filepath.endswith('.joblib')
report_path = model_filepath[:-len('.joblib')] + suffix
goal = MaxConfidence()
bundle_examples_with_goal(sess, model, adv_x_list, y, goal,
report_path, batch_size=FLAGS.batch_size) | openai/cleverhans | [
5732,
1384,
5732,
39,
1473899284
] |
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config | Azure/azure-sdk-for-python | [
3526,
2256,
3526,
986,
1335285972
] |
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {}) | Azure/azure-sdk-for-python | [
3526,
2256,
3526,
986,
1335285972
] |
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('RouteFilterRule', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized | Azure/azure-sdk-for-python | [
3526,
2256,
3526,
986,
1335285972
] |
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('RouteFilterRule', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized | Azure/azure-sdk-for-python | [
3526,
2256,
3526,
986,
1335285972
] |
def list_by_route_filter(
self,
resource_group_name: str,
route_filter_name: str,
**kwargs: Any | Azure/azure-sdk-for-python | [
3526,
2256,
3526,
986,
1335285972
] |
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_by_route_filter.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'routeFilterName': self._serialize.url("route_filter_name", route_filter_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request | Azure/azure-sdk-for-python | [
3526,
2256,
3526,
986,
1335285972
] |
def setup_parser(self, parser):
parser.add_argument(
'where',
help='Path to new module',
) | cpenv/cpenv | [
22,
6,
22,
14,
1441120492
] |
def __init__(self, data):
self.data = data
self.left = None
self.right = None | anubhavshrimal/Data_Structures_Algorithms_In_Python | [
312,
131,
312,
1,
1474914708
] |
def __init__(self):
self.root = None | anubhavshrimal/Data_Structures_Algorithms_In_Python | [
312,
131,
312,
1,
1474914708
] |
def _insert(self, data, current_node):
if data <= current_node.data:
if current_node.left is not None:
self._insert(data, current_node.left)
else:
current_node.left = Node(data)
else:
if current_node.right is not None:
self._insert(data, current_node.right)
else:
current_node.right = Node(data) | anubhavshrimal/Data_Structures_Algorithms_In_Python | [
312,
131,
312,
1,
1474914708
] |
def _inorder(self, current_node):
if current_node is None:
return
self._inorder(current_node.left)
print(current_node.data, " -> ", end='')
self._inorder(current_node.right) | anubhavshrimal/Data_Structures_Algorithms_In_Python | [
312,
131,
312,
1,
1474914708
] |
def lca_bst(root, value1, value2):
while root is not None:
if value2 > root.data < value1:
root = root.right
elif value2 < root.data > value1:
root = root.left
else:
return root.data | anubhavshrimal/Data_Structures_Algorithms_In_Python | [
312,
131,
312,
1,
1474914708
] |
def assertTrue(self, expr, msg=None):
self.failUnless(expr,msg=msg) | babyliynfg/cross | [
75,
39,
75,
4,
1489383147
] |
def setUp(self):
if verbose:
dbutils._deadlock_VerboseFile = sys.stdout | babyliynfg/cross | [
75,
39,
75,
4,
1489383147
] |
def tearDown(self):
self.d.close()
self.env.close()
test_support.rmtree(self.homeDir) | babyliynfg/cross | [
75,
39,
75,
4,
1489383147
] |
def setEnvOpts(self):
pass | babyliynfg/cross | [
75,
39,
75,
4,
1489383147
] |
def makeData(self, key):
return DASH.join([key] * 5) | babyliynfg/cross | [
75,
39,
75,
4,
1489383147
] |
def test01_1WriterMultiReaders(self):
if verbose:
print '\n', '-=' * 30
print "Running %s.test01_1WriterMultiReaders..." % \
self.__class__.__name__ | babyliynfg/cross | [
75,
39,
75,
4,
1489383147
] |
def writerThread(self, d, keys, readers):
import sys
if sys.version_info[0] < 3 :
name = currentThread().getName()
else :
name = currentThread().name | babyliynfg/cross | [
75,
39,
75,
4,
1489383147
] |
def readerThread(self, d, readerNum):
import sys
if sys.version_info[0] < 3 :
name = currentThread().getName()
else :
name = currentThread().name | babyliynfg/cross | [
75,
39,
75,
4,
1489383147
] |
def setEnvOpts(self):
self.env.set_lk_detect(db.DB_LOCK_DEFAULT) | babyliynfg/cross | [
75,
39,
75,
4,
1489383147
] |
def test02_SimpleLocks(self):
if verbose:
print '\n', '-=' * 30
print "Running %s.test02_SimpleLocks..." % self.__class__.__name__ | babyliynfg/cross | [
75,
39,
75,
4,
1489383147
] |
def writerThread(self, d, keys, readers):
import sys
if sys.version_info[0] < 3 :
name = currentThread().getName()
else :
name = currentThread().name
if verbose:
print "%s: creating records %d - %d" % (name, start, stop) | babyliynfg/cross | [
75,
39,
75,
4,
1489383147
] |
def readerThread(self, d, readerNum):
import sys
if sys.version_info[0] < 3 :
name = currentThread().getName()
else :
name = currentThread().name | babyliynfg/cross | [
75,
39,
75,
4,
1489383147
] |
def setEnvOpts(self):
#self.env.set_lk_detect(db.DB_LOCK_DEFAULT)
pass | babyliynfg/cross | [
75,
39,
75,
4,
1489383147
] |
def test03_ThreadedTransactions(self):
if verbose:
print '\n', '-=' * 30
print "Running %s.test03_ThreadedTransactions..." % \
self.__class__.__name__ | babyliynfg/cross | [
75,
39,
75,
4,
1489383147
] |
def writerThread(self, d, keys, readers):
import sys
if sys.version_info[0] < 3 :
name = currentThread().getName()
else :
name = currentThread().name | babyliynfg/cross | [
75,
39,
75,
4,
1489383147
] |
def readerThread(self, d, readerNum):
import sys
if sys.version_info[0] < 3 :
name = currentThread().getName()
else :
name = currentThread().name | babyliynfg/cross | [
75,
39,
75,
4,
1489383147
] |
def deadlockThread(self):
self.doLockDetect = True
while self.doLockDetect:
time.sleep(0.05)
try:
aborted = self.env.lock_detect(
db.DB_LOCK_RANDOM, db.DB_LOCK_CONFLICT)
if verbose and aborted:
print "deadlock: Aborted %d deadlocked transaction(s)" \
% aborted
except db.DBError:
pass | babyliynfg/cross | [
75,
39,
75,
4,
1489383147
] |
def test_suite():
suite = unittest.TestSuite() | babyliynfg/cross | [
75,
39,
75,
4,
1489383147
] |
def getlines():
try:
f = open(os.path.join(os.path.dirname(cffi.__file__),
'..', 'c', 'commontypes.c'))
except IOError:
py.test.skip("cannot find ../c/commontypes.c")
lines = [line for line in f.readlines() if line.strip().startswith('EQ(')]
f.close()
return lines | johncsnyder/SwiftKitten | [
142,
18,
142,
10,
1458321581
] |
def test_dependencies():
r = re.compile(r'EQ[(]"([^"]+)",(?:\s*"([A-Z0-9_]+)\s*[*]*"[)])?')
lines = getlines()
d = {}
for line in lines:
match = r.search(line)
if match is not None:
d[match.group(1)] = match.group(2)
for value in d.values():
if value:
assert value in d | johncsnyder/SwiftKitten | [
142,
18,
142,
10,
1458321581
] |
def format_msg(message, headline):
msg = "Line {0}:\n {1}\n{2}:\n{3}"\
.format(PARAMS["lineno"], PARAMS["source"], headline, message)
return msg | sjdv1982/seamless | [
19,
6,
19,
76,
1465852536
] |
def jaccard_similariy(setA, setB, alternativeUnion=False):
"""
Finds the jaccard similarity between two sets.
Essentially, its intersection over union.
The alternative way to calculate this is to take union as sum of the
number of items in the two sets. This will lead to jaccard similarity
of a set with itself be 1/2 instead of 1. [MMDS 2nd Edition, Page 77]
Parameters:
:setA (set,list,tuple): A non-empty set/list
:setB (set,list,tuple): A non-empty set/list
:alternativeUnion (boolean): If True, use sum of number of
items as union
Output:
(float) The jaccard similarity between the two sets.
Examples:
>>> setA = {'a', 'b', 'c', 'd', 'e'}
>>> setB = {'c', 'd', 'e', 'f', 'h', 'i'}
>>> jaccard_similariy(setA,setB)
0.375
>>> jaccard_similariy(setA,setA)
1.0
>>> jaccard_similariy(setA,setA,True)
0.5
>>> setA = ['a', 'b', 'c', 'd', 'e']
>>> setB = ('c', 'd', 'e', 'f', 'h', 'i')
>>> jaccard_similariy(setA,setB)
0.375
"""
if isinstance(setA, set) and isinstance(setB, set):
intersection = len(setA.intersection(setB))
if alternativeUnion:
union = len(setA) + len(setB)
else:
union = len(setA.union(setB))
return intersection / union
if isinstance(setA, (list, tuple)) and isinstance(setB, (list, tuple)):
intersection = [element for element in setA if element in setB]
if alternativeUnion:
union = len(setA) + len(setB)
else:
union = setA + [element for element in setB if element not in setA]
return len(intersection) / len(union) | TheAlgorithms/Python | [
154959,
39275,
154959,
147,
1468662241
] |
def test_set(self):
sieve = '''
require "variables";
set "honorific" "Mr";
'''
self.assertFalse(checksieve.parse_string(sieve, False)) | dburkart/check-sieve | [
24,
4,
24,
6,
1434172648
] |
def test_wrong_tag(self):
sieve = '''
require "variables";
set :mime "b" "c";
'''
self.assertTrue(checksieve.parse_string(sieve, True)) | dburkart/check-sieve | [
24,
4,
24,
6,
1434172648
] |
def test_too_many_args(self):
sieve = '''
require "variables";
set "a" "b" "c" "d";
'''
self.assertTrue(checksieve.parse_string(sieve, True)) | dburkart/check-sieve | [
24,
4,
24,
6,
1434172648
] |
def test_numeral_varname(self):
sieve = '''
require "variables";
set "1" "${state} pending";
'''
self.assertFalse(checksieve.parse_string(sieve, False)) | dburkart/check-sieve | [
24,
4,
24,
6,
1434172648
] |
def test_suggest_filename(self):
"""
Testing some files. Not testing recursion in filenames. It is situation
if there exist file0, file1, file2 and input file is file
"""
filename = "mujsoubor"
# import ipdb; ipdb.set_trace() # BREAKPOINT
new_filename = misc.suggest_filename(filename, exists=True)
# self.assertTrue(new_filename == "mujsoubor2")
self.assertEqual(new_filename, "mujsoubor_2")
filename = "mujsoubor_112"
new_filename = misc.suggest_filename(filename, exists=True)
self.assertTrue(new_filename == "mujsoubor_113")
filename = "mujsoubor_2.txt"
new_filename = misc.suggest_filename(filename, exists=True)
self.assertTrue(new_filename == "mujsoubor_3.txt")
filename = "mujsoubor27.txt"
new_filename = misc.suggest_filename(filename, exists=True)
self.assertTrue(new_filename == "mujsoubor27_2.txt")
filename = "mujsoubor-a24.txt"
new_filename = misc.suggest_filename(filename, exists=False)
self.assertEqual(new_filename, "mujsoubor-a24.txt", "Rewrite") | mjirik/imtools | [
7,
1,
7,
1,
1445161284
] |
def test_getVersionString(self):
"""
getVersionString is not used anymore
"""
vfn = "../__VERSION__"
existed = False
if not os.path.exists(vfn):
with open(vfn, 'a') as the_file:
the_file.write('1.1.1\n')
existed = False
verstr = qmisc.getVersionString()
self.assertTrue(type(verstr) == str)
if existed:
os.remove(vfn) | mjirik/imtools | [
7,
1,
7,
1,
1445161284
] |
def test_obj_to_and_from_file_pickle(self):
testdata = np.random.random([4, 4, 3])
test_object = {'a': 1, 'data': testdata}
filename = 'test_obj_to_and_from_file.pkl'
misc.obj_to_file(test_object, filename, 'pickle')
saved_object = misc.obj_from_file(filename, 'pickle')
self.assertTrue(saved_object['a'] == 1)
self.assertTrue(saved_object['data'][1, 1, 1] == testdata[1, 1, 1])
os.remove(filename) | mjirik/imtools | [
7,
1,
7,
1,
1445161284
] |
def test_obj_to_and_from_file_with_directories(self):
import shutil
testdata = np.random.random([4, 4, 3])
test_object = {'a': 1, 'data': testdata}
dirname = '__test_write_and_read'
filename = '__test_write_and_read/test_obj_to_and_from_file.pkl'
misc.obj_to_file(test_object, filename, 'pickle')
saved_object = misc.obj_from_file(filename, 'pickle')
self.assertTrue(saved_object['a'] == 1)
self.assertTrue(saved_object['data'][1, 1, 1] == testdata[1, 1, 1])
shutil.rmtree(dirname) | mjirik/imtools | [
7,
1,
7,
1,
1445161284
] |
def simple_extract_stack(f=None, limit=None, skips=[]):
"""This is traceback.extract_stack from python 2.7 with this change:
- Comment the update of the cache.
- Skip internal stack trace level.
The update of the cache call os.stat to verify is the cache is up
to date. This take too much time on cluster.
limit - The number of stack level we want to return. If None, mean
all what we can.
skips - partial path of stack level we don't want to keep and count.
When we find one level that isn't skipped, we stop skipping.
"""
if f is None:
try:
raise ZeroDivisionError
except ZeroDivisionError:
f = sys.exc_info()[2].tb_frame.f_back
if limit is None:
if hasattr(sys, 'tracebacklimit'):
limit = sys.tracebacklimit
trace = []
n = 0
while f is not None and (limit is None or n < limit):
lineno = f.f_lineno
co = f.f_code
filename = co.co_filename
name = co.co_name | rizar/attention-lvcsr | [
259,
103,
259,
11,
1443211188
] |
def add_tag_trace(thing, user_line=None):
"""
Add tag.trace to an node or variable.
The argument is returned after being affected (inplace).
Parameters
----------
thing
The object where we add .tag.trace.
user_line
The max number of user line to keep.
Notes
-----
We alse use config.traceback.limit for the maximum number of stack level
we look.
"""
if user_line is None:
user_line = config.traceback.limit
if user_line == -1:
user_line = None
skips = ["theano/tensor/", "theano\\tensor\\",
"theano/compile/", "theano\\compile\\",
"theano/gof/", "theano\\gof\\",
"theano/scalar/basic.py", "theano\\scalar\\basic.py",
"theano/sandbox/", "theano\\sandbox\\",
"theano/scan_module/", "theano\\scan_module\\",
"theano/sparse/", "theano\\sparse\\",
"theano/typed_list/", "theano\\typed_list\\"]
tr = simple_extract_stack(limit=user_line, skips=skips)
# Different python version use different sementic for
# limit. python 2.7 include the call to extrack_stack. The -1 get
# rid of it.
if tr:
thing.tag.trace = [tr]
else:
thing.tag.trace = tr
return thing | rizar/attention-lvcsr | [
259,
103,
259,
11,
1443211188
] |
def __hash__(self):
# this fixes silent-error-prone new-style class behavior
if hasattr(self, '__eq__') or hasattr(self, '__cmp__'):
raise TypeError("unhashable object: %s" % self)
return id(self) | rizar/attention-lvcsr | [
259,
103,
259,
11,
1443211188
] |
def clear(self):
self.__dict__.clear() | rizar/attention-lvcsr | [
259,
103,
259,
11,
1443211188
] |
def __str__(self):
return "scratchpad" + str(self.__dict__) | rizar/attention-lvcsr | [
259,
103,
259,
11,
1443211188
] |
def info(self):
print("<theano.gof.utils.scratchpad instance at %i>" % id(self))
for k, v in iteritems(self.__dict__):
print(" %s: %s" % (k, v)) | rizar/attention-lvcsr | [
259,
103,
259,
11,
1443211188
] |
def __init__(self, **d):
self.__dict__.update(d) | rizar/attention-lvcsr | [
259,
103,
259,
11,
1443211188
] |
def rval(*args, **kwargs):
kwtup = tuple(kwargs.items())
key = (args, kwtup)
if key not in cache:
val = f(*args, **kwargs)
cache[key] = val
else:
val = cache[key]
return val | rizar/attention-lvcsr | [
259,
103,
259,
11,
1443211188
] |
def deprecated(filename, msg=''):
"""
Decorator which will print a warning message on the first call.
Use it like this::
@deprecated('myfile', 'do something different...')
def fn_name(...)
...
And it will print::
WARNING myfile.fn_name deprecated. do something different...
"""
def _deprecated(f):
printme = [True]
def g(*args, **kwargs):
if printme[0]:
print('WARNING: %s.%s deprecated. %s' %
(filename, f.__name__, msg))
printme[0] = False
return f(*args, **kwargs)
return g
return _deprecated | rizar/attention-lvcsr | [
259,
103,
259,
11,
1443211188
] |
def difference(seq1, seq2):
"""
Returns all elements in seq1 which are not in seq2: i.e ``seq1\seq2``.
"""
try:
# try to use O(const * len(seq1)) algo
if len(seq2) < 4: # I'm guessing this threshold -JB
raise Exception('not worth it')
set2 = set(seq2)
return [x for x in seq1 if x not in set2]
except Exception:
# maybe a seq2 element is not hashable
# maybe seq2 is too short
# -> use O(len(seq1) * len(seq2)) algo
return [x for x in seq1 if x not in seq2] | rizar/attention-lvcsr | [
259,
103,
259,
11,
1443211188
] |
def from_return_values(values):
if isinstance(values, (list, tuple)):
return values
else:
return [values] | rizar/attention-lvcsr | [
259,
103,
259,
11,
1443211188
] |
def __init__(self, name, nonzero=True):
self.name = name
self.nonzero = nonzero | rizar/attention-lvcsr | [
259,
103,
259,
11,
1443211188
] |
def __bool__(self):
# Python 3.x
return self.nonzero | rizar/attention-lvcsr | [
259,
103,
259,
11,
1443211188
] |
def __repr__(self):
return "<%s>" % self.name | rizar/attention-lvcsr | [
259,
103,
259,
11,
1443211188
] |
def comm_guard(type1, type2):
def wrap(f):
old_f = f.__globals__[f.__name__]
def new_f(arg1, arg2, *rest):
if ((type1 is ANY_TYPE or isinstance(arg1, type1)) and
(type2 is ANY_TYPE or isinstance(arg2, type2))):
pass
elif ((type1 is ANY_TYPE or isinstance(arg2, type1)) and
(type2 is ANY_TYPE or isinstance(arg1, type2))):
arg1, arg2 = arg2, arg1
else:
return old_f(arg1, arg2, *rest)
variable = f(arg1, arg2, *rest)
if variable is FALL_THROUGH:
return old_f(arg1, arg2, *rest)
else:
return variable
new_f.__name__ = f.__name__
def typename(type):
if isinstance(type, Keyword):
return str(type)
elif isinstance(type, (tuple, list)):
return "(" + ", ".join([x.__name__ for x in type]) + ")"
else:
return type.__name__
new_f.__doc__ = (str(old_f.__doc__) + "\n" +
", ".join([typename(type)
for type in (type1, type2)]) +
"\n" + str(f.__doc__ or ""))
return new_f
return wrap | rizar/attention-lvcsr | [
259,
103,
259,
11,
1443211188
] |
def wrap(f):
old_f = f.__globals__[f.__name__]
def new_f(arg1, *rest):
if (type1 is ANY_TYPE or isinstance(arg1, type1)):
variable = f(arg1, *rest)
if variable is FALL_THROUGH:
return old_f(arg1, *rest)
else:
return variable
else:
return old_f(arg1, *rest)
new_f.__name__ = f.__name__
def typename(type):
if isinstance(type, Keyword):
return str(type)
elif isinstance(type, (tuple, list)):
return "(" + ", ".join([x.__name__ for x in type]) + ")"
else:
return type.__name__
new_f.__doc__ = (str(old_f.__doc__) + "\n" +
", ".join([typename(type) for type in (type1,)]) +
"\n" + str(f.__doc__ or ""))
return new_f | rizar/attention-lvcsr | [
259,
103,
259,
11,
1443211188
] |
def flatten(a):
"""
Recursively flatten tuple, list and set in a list.
"""
if isinstance(a, (tuple, list, set)):
l = []
for item in a:
l.extend(flatten(item))
return l
else:
return [a] | rizar/attention-lvcsr | [
259,
103,
259,
11,
1443211188
] |
def hist(coll):
counts = {}
for elem in coll:
counts[elem] = counts.get(elem, 0) + 1
return counts | rizar/attention-lvcsr | [
259,
103,
259,
11,
1443211188
] |
def bad_var(var):
return not var.name or h[var.name] > 1 | rizar/attention-lvcsr | [
259,
103,
259,
11,
1443211188
] |
def remove(predicate, coll):
"""
Return those items of collection for which predicate(item) is true.
Examples
--------
>>> def even(x):
... return x % 2 == 0
>>> remove(even, [1, 2, 3, 4])
[1, 3]
"""
return [x for x in coll if not predicate(x)] | rizar/attention-lvcsr | [
259,
103,
259,
11,
1443211188
] |
def hash_from_code(msg):
# hashlib.md5() requires an object that supports buffer interface,
# but Python 3 (unicode) strings don't.
if isinstance(msg, str):
msg = msg.encode()
# Python 3 does not like module names that start with
# a digit.
return 'm' + hashlib.md5(msg).hexdigest() | rizar/attention-lvcsr | [
259,
103,
259,
11,
1443211188
] |
def hash_from_code(msg):
try:
return hashlib.md5(msg).hexdigest()
except TypeError:
assert isinstance(msg, numpy.ndarray)
return hashlib.md5(numpy.getbuffer(msg)).hexdigest() | rizar/attention-lvcsr | [
259,
103,
259,
11,
1443211188
] |
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config | Azure/azure-sdk-for-python | [
3526,
2256,
3526,
986,
1335285972
] |
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_by_vpn_connection.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'gatewayName': self._serialize.url("gateway_name", gateway_name, 'str'),
'connectionName': self._serialize.url("connection_name", connection_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request | Azure/azure-sdk-for-python | [
3526,
2256,
3526,
986,
1335285972
] |
def __init__(
self, plotly_name="smoothing", parent_name="contourcarpet.line", **kwargs | plotly/plotly.py | [
13052,
2308,
13052,
1319,
1385013188
] |
def __init__(self, fileDir):
self.fileDir = fileDir
self.console = sys.stdout | littlecodersh/EasierLife | [
181,
138,
181,
1,
1453081737
] |
def flush(self):
self.console.flush() | littlecodersh/EasierLife | [
181,
138,
181,
1,
1453081737
] |
def inPip(fileDir):
def _input(hint):
s = new_input(hint)
with open(fileDir, 'a') as f: f.write(s)
return s
return _input | littlecodersh/EasierLife | [
181,
138,
181,
1,
1453081737
] |
def problem_sinsin():
"""cosine example.
"""
def mesh_generator(n):
return UnitSquareMesh(n, n, "left/right")
x = sympy.DeferredVector("x")
# Choose the solution such that the boundary conditions are fulfilled
# exactly. Also, multiply with x**2 to make sure that the right-hand side
# doesn't contain the term 1/x. Although it looks like a singularity at
# x=0, this terms is esentially harmless since the volume element 2*pi*x is
# used throughout the code, canceling out with the 1/x. However, Dolfin has
# problems with this, cf.
# <https://bitbucket.org/fenics-project/dolfin/issues/831/some-problems-with-quadrature-expressions>.
solution = {
"value": x[0] ** 2 * sympy.sin(pi * x[0]) * sympy.sin(pi * x[1]),
"degree": MAX_DEGREE,
}
# Produce a matching right-hand side.
phi = solution["value"]
kappa = 2.0
rho = 3.0
cp = 5.0
conv = [1.0, 2.0]
rhs_sympy = sympy.simplify(
-1.0 / x[0] * sympy.diff(kappa * x[0] * sympy.diff(phi, x[0]), x[0])
- 1.0 / x[0] * sympy.diff(kappa * x[0] * sympy.diff(phi, x[1]), x[1])
+ rho * cp * conv[0] * sympy.diff(phi, x[0])
+ rho * cp * conv[1] * sympy.diff(phi, x[1])
)
rhs = {
"value": Expression(helpers.ccode(rhs_sympy), degree=MAX_DEGREE),
"degree": MAX_DEGREE,
}
return mesh_generator, solution, rhs, triangle, kappa, rho, cp, Constant(conv) | nschloe/maelstrom | [
28,
7,
28,
2,
1400013996
] |
def test_order(problem, stabilization):
"""Assert the correct discretization order.
"""
mesh_sizes = [16, 32, 64]
errors, hmax = _compute_errors(problem, mesh_sizes, stabilization)
# Compute the numerical order of convergence.
order = helpers.compute_numerical_order_of_convergence(hmax, errors)
# The test is considered passed if the numerical order of convergence
# matches the expected order in at least the first step in the coarsest
# spatial discretization, and is not getting worse as the spatial
# discretizations are refining.
tol = 0.1
expected_order = 2.0
assert (order > expected_order - tol).all()
return | nschloe/maelstrom | [
28,
7,
28,
2,
1400013996
] |
def _show_order_info(problem, mesh_sizes, stabilization):
"""Performs consistency check for the given problem/method combination and
show some information about it. Useful for debugging.
"""
errors, hmax = _compute_errors(problem, mesh_sizes, stabilization)
order = helpers.compute_numerical_order_of_convergence(hmax, errors)
# Print the data
print()
print("hmax ||u - u_h|| conv. order")
print("{:e} {:e}".format(hmax[0], errors[0]))
for j in range(len(errors) - 1):
print(32 * " " + "{:2.5f}".format(order[j]))
print("{:e} {:e}".format(hmax[j + 1], errors[j + 1]))
# Plot the actual data.
plt.loglog(hmax, errors, "-o")
# Compare with order curves.
plt.autoscale(False)
e0 = errors[0]
for order in range(4):
plt.loglog(
[hmax[0], hmax[-1]], [e0, e0 * (hmax[-1] / hmax[0]) ** order], color="0.7"
)
plt.xlabel("hmax")
plt.ylabel("||u-u_h||")
plt.show()
return | nschloe/maelstrom | [
28,
7,
28,
2,
1400013996
] |
def __init__(self, links=None, delimiter=' | ', **kwargs):
super(ActionsColumn, self).__init__(**kwargs)
self.orderable = False
self.delimiter = delimiter
if links is not None:
self.links = links | naphthalene/hubcave | [
6,
3,
6,
3,
1412887181
] |
def __init__(self, *args, **kwargs):
super(PaginateTable, self).__init__(*args, **kwargs)
self.template = kwargs.get('template', 'fancy_paged_tables/table.html') | naphthalene/hubcave | [
6,
3,
6,
3,
1412887181
] |
def tokenize(stream):
def is_delimiter(char):
return char.isspace() or char in "{}[]:,"
token = []
charcode = 0
completed = False
now_token = ""
def process_char(char, charcode):
nonlocal token, completed, now_token
advance = True
add_char = False
next_state = state
if state == __TOKENIZER_STATE.WHITESPACE:
if char == "{":
completed = True
now_token = (TOKEN_TYPE.OPERATOR, "{")
elif char == "}":
completed = True
now_token = (TOKEN_TYPE.OPERATOR, "}")
elif char == "[":
completed = True
now_token = (TOKEN_TYPE.OPERATOR, "[")
elif char == "]":
completed = True
now_token = (TOKEN_TYPE.OPERATOR, "]")
elif char == ",":
completed = True
now_token = (TOKEN_TYPE.OPERATOR, ",")
elif char == ":":
completed = True
now_token = (TOKEN_TYPE.OPERATOR, ":")
elif char == "\"":
next_state = __TOKENIZER_STATE.STRING
elif char in "123456789":
next_state = __TOKENIZER_STATE.INTEGER
add_char = True
elif char == "0":
next_state = __TOKENIZER_STATE.INTEGER_0
add_char = True
elif char == "-":
next_state = __TOKENIZER_STATE.INTEGER_SIGN
add_char = True
elif char == "f":
next_state = __TOKENIZER_STATE.FALSE_1
elif char == "t":
next_state = __TOKENIZER_STATE.TRUE_1
elif char == "n":
next_state = __TOKENIZER_STATE.NULL_1
elif not char.isspace():
raise ValueError("Invalid JSON character: '{0}'".format(char))
elif state == __TOKENIZER_STATE.INTEGER:
if char in "0123456789":
add_char = True
elif char == ".":
next_state = __TOKENIZER_STATE.FLOATING_POINT_0
add_char = True
elif char == "e" or char == 'E':
next_state = __TOKENIZER_STATE.INTEGER_EXP_0
add_char = True
elif is_delimiter(char):
next_state = __TOKENIZER_STATE.WHITESPACE
completed = True
now_token = (TOKEN_TYPE.NUMBER, int("".join(token)))
advance = False
else:
raise ValueError("A number must contain only digits. Got '{}'".format(char))
elif state == __TOKENIZER_STATE.INTEGER_0:
if char == ".":
next_state = __TOKENIZER_STATE.FLOATING_POINT_0
add_char = True
elif char == "e" or char == 'E':
next_state = __TOKENIZER_STATE.INTEGER_EXP_0
add_char = True
elif is_delimiter(char):
next_state = __TOKENIZER_STATE.WHITESPACE
completed = True
now_token = (TOKEN_TYPE.NUMBER, 0)
advance = False
else:
raise ValueError("A 0 must be followed by a '.' or a 'e'. Got '{0}'".format(char))
elif state == __TOKENIZER_STATE.INTEGER_SIGN:
if char == "0":
next_state = __TOKENIZER_STATE.INTEGER_0
add_char = True
elif char in "123456789":
next_state = __TOKENIZER_STATE.INTEGER
add_char = True
else:
raise ValueError("A - must be followed by a digit. Got '{0}'".format(char))
elif state == __TOKENIZER_STATE.INTEGER_EXP_0:
if char == "+" or char == "-" or char in "0123456789":
next_state = __TOKENIZER_STATE.INTEGER_EXP
add_char = True
else:
raise ValueError("An e in a number must be followed by a '+', '-' or digit. Got '{0}'".format(char))
elif state == __TOKENIZER_STATE.INTEGER_EXP:
if char in "0123456789":
add_char = True
elif is_delimiter(char):
completed = True
now_token = (TOKEN_TYPE.NUMBER, float("".join(token)))
next_state = __TOKENIZER_STATE.WHITESPACE
advance = False
else:
raise ValueError("A number exponent must consist only of digits. Got '{}'".format(char))
elif state == __TOKENIZER_STATE.FLOATING_POINT:
if char in "0123456789":
add_char = True
elif char == "e" or char == "E":
next_state = __TOKENIZER_STATE.INTEGER_EXP_0
add_char = True
elif is_delimiter(char):
completed = True
now_token = (TOKEN_TYPE.NUMBER, float("".join(token)))
next_state = __TOKENIZER_STATE.WHITESPACE
advance = False
else:
raise ValueError("A number must include only digits")
elif state == __TOKENIZER_STATE.FLOATING_POINT_0:
if char in "0123456789":
next_state = __TOKENIZER_STATE.FLOATING_POINT
add_char = True
else:
raise ValueError("A number with a decimal point must be followed by a fractional part")
elif state == __TOKENIZER_STATE.FALSE_1:
if char == "a":
next_state = __TOKENIZER_STATE.FALSE_2
else:
raise ValueError("Invalid JSON character: '{0}'".format(char))
elif state == __TOKENIZER_STATE.FALSE_2:
if char == "l":
next_state = __TOKENIZER_STATE.FALSE_3
else:
raise ValueError("Invalid JSON character: '{0}'".format(char))
elif state == __TOKENIZER_STATE.FALSE_3:
if char == "s":
next_state = __TOKENIZER_STATE.FALSE_4
else:
raise ValueError("Invalid JSON character: '{0}'".format(char))
elif state == __TOKENIZER_STATE.FALSE_4:
if char == "e":
next_state = __TOKENIZER_STATE.WHITESPACE
completed = True
now_token = (TOKEN_TYPE.BOOLEAN, False)
else:
raise ValueError("Invalid JSON character: '{0}'".format(char))
elif state == __TOKENIZER_STATE.TRUE_1:
if char == "r":
next_state = __TOKENIZER_STATE.TRUE_2
else:
raise ValueError("Invalid JSON character: '{0}'".format(char))
elif state == __TOKENIZER_STATE.TRUE_2:
if char == "u":
next_state = __TOKENIZER_STATE.TRUE_3
else:
raise ValueError("Invalid JSON character: '{0}'".format(char))
elif state == __TOKENIZER_STATE.TRUE_3:
if char == "e":
next_state = __TOKENIZER_STATE.WHITESPACE
completed = True
now_token = (TOKEN_TYPE.BOOLEAN, True)
else:
raise ValueError("Invalid JSON character: '{0}'".format(char))
elif state == __TOKENIZER_STATE.NULL_1:
if char == "u":
next_state = __TOKENIZER_STATE.NULL_2
else:
raise ValueError("Invalid JSON character: '{0}'".format(char))
elif state == __TOKENIZER_STATE.NULL_2:
if char == "l":
next_state = __TOKENIZER_STATE.NULL_3
else:
raise ValueError("Invalid JSON character: '{0}'".format(char))
elif state == __TOKENIZER_STATE.NULL_3:
if char == "l":
next_state = __TOKENIZER_STATE.WHITESPACE
completed = True
now_token = (TOKEN_TYPE.NULL, None)
else:
raise ValueError("Invalid JSON character: '{0}'".format(char))
elif state == __TOKENIZER_STATE.STRING:
if char == "\"":
completed = True
now_token = (TOKEN_TYPE.STRING, "".join(token))
next_state = __TOKENIZER_STATE.STRING_END
elif char == "\\":
next_state = __TOKENIZER_STATE.STRING_ESCAPE
else:
add_char = True
elif state == __TOKENIZER_STATE.STRING_END:
if is_delimiter(char):
advance = False
next_state = __TOKENIZER_STATE.WHITESPACE
else:
raise ValueError("Expected whitespace or an operator after strin. Got '{}'".format(char))
elif state == __TOKENIZER_STATE.STRING_ESCAPE:
next_state = __TOKENIZER_STATE.STRING
if char == "\\" or char == "\"":
add_char = True
elif char == "b":
char = "\b"
add_char = True
elif char == "f":
char = "\f"
add_char = True
elif char == "n":
char = "\n"
add_char = True
elif char == "t":
char = "\t"
add_char = True
elif char == "r":
char = "\r"
add_char = True
elif char == "/":
char = "/"
add_char = True
elif char == "u":
next_state = __TOKENIZER_STATE.UNICODE_1
charcode = 0
else:
raise ValueError("Invalid string escape: {}".format(char))
elif state == __TOKENIZER_STATE.UNICODE_1:
if char in "0123456789":
charcode = (ord(char) - 48) * 4096
elif char in "abcdef":
charcode = (ord(char) - 87) * 4096
elif char in "ABCDEF":
charcode = (ord(char) - 55) * 4096
else:
raise ValueError("Invalid character code: {}".format(char))
next_state = __TOKENIZER_STATE.UNICODE_2
char = ""
elif state == __TOKENIZER_STATE.UNICODE_2:
if char in "0123456789":
charcode += (ord(char) - 48) * 256
elif char in "abcdef":
charcode += (ord(char) - 87) * 256
elif char in "ABCDEF":
charcode += (ord(char) - 55) * 256
else:
raise ValueError("Invalid character code: {}".format(char))
next_state = __TOKENIZER_STATE.UNICODE_3
char = ""
elif state == __TOKENIZER_STATE.UNICODE_3:
if char in "0123456789":
charcode += (ord(char) - 48) * 16
elif char in "abcdef":
charcode += (ord(char) - 87) * 16
elif char in "ABCDEF":
charcode += (ord(char) - 55) * 16
else:
raise ValueError("Invalid character code: {}".format(char))
next_state = __TOKENIZER_STATE.UNICODE_4
char = ""
elif state == __TOKENIZER_STATE.UNICODE_4:
if char in "0123456789":
charcode += ord(char) - 48
elif char in "abcdef":
charcode += ord(char) - 87
elif char in "ABCDEF":
charcode += ord(char) - 55
else:
raise ValueError("Invalid character code: {}".format(char))
next_state = __TOKENIZER_STATE.STRING
char = chr(charcode)
add_char = True
if add_char:
token.append(char)
return advance, next_state, charcode
state = __TOKENIZER_STATE.WHITESPACE
char = stream.read(1)
index = 0
while char:
try:
advance, state, charcode = process_char(char, charcode)
except ValueError as e:
raise ValueError("".join([e.args[0], " at index {}".format(index)]))
if completed:
completed = False
token = []
yield now_token
if advance:
char = stream.read(1)
index += 1
process_char(" ", charcode)
if completed:
yield now_token | danielyule/naya | [
59,
13,
59,
4,
1416428638
] |
def parse(file):
token_stream = tokenize(file)
val, token_type, token = __parse(token_stream, next(token_stream))
if token is not None:
raise ValueError("Improperly closed JSON object")
try:
next(token_stream)
except StopIteration:
return val
raise ValueError("Additional string after end of JSON") | danielyule/naya | [
59,
13,
59,
4,
1416428638
] |
def __parse(token_stream, first_token):
class KVP:
def __init__(self, key):
self.key = key
self.value = None
self.set = False
def __str__(self):
if self.set:
return "{}: {}".format(self.key, self.value)
else:
return "{}: <NULL>".format(self.key)
stack = []
token_type, token = first_token
if token_type == TOKEN_TYPE.OPERATOR:
if token == "{":
stack.append({})
elif token == "[":
stack.append([])
else:
raise ValueError("Expected object or array. Got '{}'".format(token))
else:
raise ValueError("Expected object or array. Got '{}'".format(token))
last_type, last_token = token_type, token
try:
token_type, token = next(token_stream)
except StopIteration as e:
raise ValueError("Too many opening braces") from e
try:
while True:
if isinstance(stack[-1], list):
if last_type == TOKEN_TYPE.OPERATOR:
if last_token == "[":
if token_type == TOKEN_TYPE.OPERATOR:
if token == "{":
stack.append({})
elif token == "[":
stack.append([])
elif token != "]":
raise ValueError("Array must either be empty or contain a value. Got '{}'".
format(token))
else:
stack.append(token)
elif last_token == ",":
if token_type == TOKEN_TYPE.OPERATOR:
if token == "{":
stack.append({})
elif token == "[":
stack.append([])
else:
raise ValueError("Array value expected. Got '{}'".format(token))
else:
stack.append(token)
elif last_token == "]":
value = stack.pop()
if len(stack) == 0:
return value, token_type, token
if isinstance(stack[-1], list):
stack[-1].append(value)
elif isinstance(stack[-1], dict):
stack[-1][value.key] = value.value
elif isinstance(stack[-1], KVP):
stack[-1].value = value
stack[-1].set = True
value = stack.pop()
if len(stack) == 0:
return value, token_type, token
if isinstance(stack[-1], list):
stack[-1].append(value)
elif isinstance(stack[-1], dict):
stack[-1][value.key] = value.value
else:
raise ValueError("Array items must be followed by a comma or closing bracket. "
"Got '{}'".format(value))
else:
raise ValueError("Array items must be followed by a comma or closing bracket. "
"Got '{}'".format(value))
elif last_token == "}":
raise ValueError("Array closed with a '}'")
else:
raise ValueError("Array should not contain ':'")
else:
raise ValueError("Unknown Error")
elif isinstance(stack[-1], dict):
if last_type == TOKEN_TYPE.OPERATOR:
if last_token == "{":
if token_type == TOKEN_TYPE.OPERATOR:
if token == "{":
stack.append({})
elif token == "[":
stack.append([])
elif token != "}":
raise ValueError("Object must either be empty or contain key value pairs."
" Got '{}'".format(token))
elif token_type == TOKEN_TYPE.STRING:
stack.append(KVP(token))
else:
raise ValueError("Object keys must be strings. Got '{}'".format(token))
elif last_token == ",":
if token_type == TOKEN_TYPE.OPERATOR:
if token == "{":
stack.append({})
elif token == "[":
stack.append([])
else:
raise ValueError("Object key expected. Got '{}'".format(token))
elif token_type == TOKEN_TYPE.STRING:
stack.append(KVP(token))
else:
raise ValueError("Object keys must be strings. Got '{}'".format(token))
elif last_token == "}":
value = stack.pop()
if len(stack) == 0:
return value, token_type, token
if isinstance(stack[-1], list):
stack[-1].append(value)
elif isinstance(stack[-1], dict):
stack[-1][value.key] = value.value
elif isinstance(stack[-1], KVP):
stack[-1].value = value
stack[-1].set = True
value = stack.pop()
if len(stack) == 0:
return value, token_type, token
if isinstance(stack[-1], list):
stack[-1].append(value)
elif isinstance(stack[-1], dict):
stack[-1][value.key] = value.value
else:
raise ValueError("Object key value pairs must be followed by a comma or "
"closing bracket. Got '{}'".format(value))
elif last_token == "]":
raise ValueError("Object closed with a ']'")
else:
raise ValueError("Object key value pairs should be separated by comma, not ':'")
elif isinstance(stack[-1], KVP):
if stack[-1].set:
if token_type == TOKEN_TYPE.OPERATOR:
if token != "}" and token != ",":
raise ValueError("Object key value pairs should be followed by ',' or '}'. Got '"
+ token + "'")
value = stack.pop()
if len(stack) == 0:
return value, token_type, token
if isinstance(stack[-1], list):
stack[-1].append(value)
elif isinstance(stack[-1], dict):
stack[-1][value.key] = value.value
else:
raise ValueError("Object key value pairs must be followed by a comma or closing bracket. "
"Got '{}'".format(value))
if token == "}" and len(stack) == 1:
return stack[0], None, None
else:
raise ValueError("Object key value pairs should be followed by ',' or '}'. Got '"
+ token + "'")
else:
if token_type == TOKEN_TYPE.OPERATOR and token == ":" and last_type == TOKEN_TYPE.STRING:
pass
elif last_type == TOKEN_TYPE.OPERATOR and last_token == ":":
if token_type == TOKEN_TYPE.OPERATOR:
if token == "{":
stack.append({})
elif token == "[":
stack.append([])
else:
raise ValueError("Object property value expected. Got '{}'".format(token))
else:
stack[-1].value = token
stack[-1].set = True
else:
raise ValueError("Object keys must be separated from values by a single ':'. "
"Got '{}'".format(token))
else:
value = stack.pop()
if isinstance(stack[-1], list):
stack[-1].append(value)
elif isinstance(stack[-1], dict):
stack[-1][value.key] = value.value
else:
raise ValueError("Array items must be followed by a comma or closing bracket. "
"Got '{}'".format(value))
last_type, last_token = token_type, token
token_type, token = next(token_stream)
except StopIteration as e:
if len(stack) == 1:
return stack[0], None, None
else:
raise ValueError("JSON Object not properly closed") from e | danielyule/naya | [
59,
13,
59,
4,
1416428638
] |
def process_token(token_type, token):
if token_type == TOKEN_TYPE.OPERATOR:
if token == ']':
return None, None, None
elif token == ",":
token_type, token = next(token_stream)
if token_type == TOKEN_TYPE.OPERATOR:
if token == "[" or token == "{":
return __parse(token_stream, (token_type, token))
else:
raise ValueError("Expected an array value. Got '{}'".format(token))
else:
return token, None, None
elif token == "[" or token == "{":
return __parse(token_stream, (token_type, token))
else:
raise ValueError("Array entries must be followed by ',' or ']'. Got '{}'".format(token))
else:
return token, None, None | danielyule/naya | [
59,
13,
59,
4,
1416428638
] |
def run(ds):
pw.plot(data, clear=True)
for m, c in zip(methods, colors):
d1 = data.copy()
t = time.clock()
d2 = NiDAQ.downsample(d1, ds, method=m)
print("Method %d: %f" % (m, time.clock()-t))
p = pw.plot(y=d2, x=np.linspace(0, len(d2)*ds, len(d2)), pen=mkPen(c))
p.setZValue(10000)
#pw.plot(d2, pen=mkPen(colors[i-1])) | acq4/acq4 | [
51,
39,
51,
31,
1385049034
] |
def showDownsample(**kwargs):
d1 = data.copy()
d2 = NiDAQ.downsample(d1, **kwargs)
xv2 = xVals[::kwargs['ds']][:len(d2)]
pw.plot(y=d1, x=xVals, clear=True)
pw.plot(y=d2[:len(xv2)], x=xv2, pen=mkPen((255, 0, 0))) | acq4/acq4 | [
51,
39,
51,
31,
1385049034
] |
def showTransfer(**kwargs):
xVals = np.linspace(0, dur, sr*dur)
#data = sin(xVals* linspace(0, sampr*2, sampr*dur))
data = np.random.normal(size=sr*dur) | acq4/acq4 | [
51,
39,
51,
31,
1385049034
] |
def test_repeatable_zero_or_more():
"""
Tests zero or more repeatable operator.
"""
grammar = """
S: "2" b* "3";
terminals
b: "1";
"""
g = Grammar.from_string(grammar)
assert g.get_nonterminal('b_0')
assert g.get_nonterminal('b_1')
p = Parser(g)
input_str = '2 1 1 1 3'
result = p.parse(input_str)
assert result == ["2", ["1", "1", "1"], "3"]
input_str = '2 3'
result = p.parse(input_str)
assert result == ["2", [], "3"] | igordejanovic/parglare | [
118,
29,
118,
49,
1481999547
] |
def test_repeatable_one_or_more():
"""
Tests one or more repeatable operator.
"""
grammar = """
S: "2" b+ "3";
terminals
b: "1";
"""
g = Grammar.from_string(grammar)
assert g.get_nonterminal('b_1')
p = Parser(g)
input_str = '2 1 1 1 3'
result = p.parse(input_str)
assert result == ["2", ["1", "1", "1"], "3"]
input_str = '2 3'
with pytest.raises(ParseError) as e:
result = p.parse(input_str)
assert 'Expected: b' in str(e.value) | igordejanovic/parglare | [
118,
29,
118,
49,
1481999547
] |
def test_optional():
"""
Tests optional operator.
"""
grammar = """
S: "2" b? "3"?;
terminals
b: "1";
"""
g = Grammar.from_string(grammar)
assert g.get_nonterminal('b_opt')
p = Parser(g)
input_str = '2 1 3'
result = p.parse(input_str)
assert result == ["2", "1", "3"]
input_str = '2 3'
result = p.parse(input_str)
assert result == ["2", None, "3"]
input_str = '2 1'
result = p.parse(input_str)
assert result == ["2", "1", None]
input_str = ' 1 3'
with pytest.raises(ParseError) as e:
p.parse(input_str)
assert 'Expected: 2' in str(e.value) | igordejanovic/parglare | [
118,
29,
118,
49,
1481999547
] |
def test_multiple_repetition_operators():
"""
Test using of multiple repetition operators.
"""
grammar = """
S: "2" b*[comma] c+ "3"?;
terminals
b: "b";
c: "c";
comma: ",";
"""
g = Grammar.from_string(grammar)
assert g.get_nonterminal('b_0_comma')
assert g.get_nonterminal('c_1')
p = Parser(g)
input_str = '2 b, b c 3'
result = p.parse(input_str)
assert result == ["2", ["b", "b"], ["c"], "3"] | igordejanovic/parglare | [
118,
29,
118,
49,
1481999547
] |
def test_live_customer(self):
# Create customers
result = Customer.create()
Customer.create(FULL_CUSTOMER)
Customer.create(CUSTOMER_WITH_CARD)
# Read customer information. This returns the payment profile IDs
# address IDs for the user
customer_id = result.customer_id
Customer.details(customer_id)
# Update customer information
Customer.update(customer_id, {
'email': 'vincent@test.com',
'description': 'Cool web developer guy'
})
# Delete customer information
Customer.delete(customer_id)
self.assertRaises(AuthorizeResponseError, Customer.delete, customer_id)
Customer.list() | vcatalano/py-authorize | [
41,
38,
41,
8,
1366675320
] |
def __init__(self, default_factory=None, *a, **kw):
if (default_factory is not None and
not hasattr(default_factory, '__call__')):
raise TypeError('first argument must be callable')
dict.__init__(self, *a, **kw)
self.default_factory = default_factory | ActiveState/code | [
1884,
686,
1884,
41,
1500923597
] |
def __missing__(self, key):
if self.default_factory is None:
raise KeyError(key)
self[key] = value = self.default_factory()
return value | ActiveState/code | [
1884,
686,
1884,
41,
1500923597
] |
def copy(self):
return self.__copy__() | ActiveState/code | [
1884,
686,
1884,
41,
1500923597
] |
def __deepcopy__(self, memo):
import copy
return type(self)(self.default_factory,
copy.deepcopy(self.items())) | ActiveState/code | [
1884,
686,
1884,
41,
1500923597
] |
def __init__(self, feat_extractor,num_classes=None):
super(Classifier,self).__init__()
self.feat_extractor = feat_extractor
self.class_fc = nn.Linear(feat_extractor.fc.in_features, num_classes) | sankit1/cv-tricks.com | [
463,
598,
463,
10,
1487404894
] |
def orchestrate_services(self):
return Mock() | sigopt/sigopt-python | [
66,
20,
66,
4,
1418876174
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.