hexsha
stringlengths 40
40
| size
int64 1
1.03M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
239
| max_stars_repo_name
stringlengths 5
130
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
239
| max_issues_repo_name
stringlengths 5
130
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
239
| max_forks_repo_name
stringlengths 5
130
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 1
1.03M
| avg_line_length
float64 1
958k
| max_line_length
int64 1
1.03M
| alphanum_fraction
float64 0
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
4a13dc2e5a4417dac9c88a4aea701e8ec3dc8c40
| 450
|
py
|
Python
|
Music Generator/General Tests/jazziness weighting equation.py
|
sambowyer/musicgenerator
|
8d4b74e136e9a1df944f7125ab1b1d39c74224b5
|
[
"MIT"
] | null | null | null |
Music Generator/General Tests/jazziness weighting equation.py
|
sambowyer/musicgenerator
|
8d4b74e136e9a1df944f7125ab1b1d39c74224b5
|
[
"MIT"
] | null | null | null |
Music Generator/General Tests/jazziness weighting equation.py
|
sambowyer/musicgenerator
|
8d4b74e136e9a1df944f7125ab1b1d39c74224b5
|
[
"MIT"
] | null | null | null |
chord=[9,6,4,3,2]
notes=[3,4,5,6,7]
"""
def q(j):
for x in notes:
print(-((0.5*x)**2)-5)
"""
def e(j):
coefficients = []
for x in notes:
coefficients.append((0.5+j)**x)
for c in coefficients:
print(c)
comparison = coefficients[0]/coefficients[4]
print()
print("3:7 = ",comparison)
print("7:3 = ", 1/comparison)
for i in range(11):
j=float(i)/10
print("j=",j)
e(j)
print()
| 15.517241
| 47
| 0.515556
|
4a13dc8855b6859c9dc60a83252b31a48efe94de
| 2,984
|
py
|
Python
|
tests/test_smoketest.py
|
etesync/etebase-py
|
ee7bf21e9f57f0ce37f08d1f5cefd8ef0d3bb2f7
|
[
"BSD-3-Clause"
] | 50
|
2020-08-14T07:56:19.000Z
|
2022-03-27T12:33:10.000Z
|
tests/test_smoketest.py
|
etesync/etebase-py
|
ee7bf21e9f57f0ce37f08d1f5cefd8ef0d3bb2f7
|
[
"BSD-3-Clause"
] | 6
|
2020-11-26T11:53:38.000Z
|
2021-12-19T21:30:21.000Z
|
tests/test_smoketest.py
|
etesync/etebase-py
|
ee7bf21e9f57f0ce37f08d1f5cefd8ef0d3bb2f7
|
[
"BSD-3-Clause"
] | 5
|
2020-08-31T12:12:10.000Z
|
2021-04-11T23:28:40.000Z
|
import unittest
from etebase import Client, Account, FetchOptions
STORED_SESSION = "gqd2ZXJzaW9uAa1lbmNyeXB0ZWREYXRhxQGr_KWyDChQ6tXOJwJKf0Kw3QyR99itPIF3vZ5w6pVXSIq7AWul3fIXjIZOsBEwTVRumw7e9Af38D5oIL2VLNPLlmTOMjzIvuB00z3zDMFbH8pwrg2p_FvAhLHGjUGoXzU2XIxS4If7rQUfEz1zWkHPqWMrj4hACML5fks302dOUw7OsSMekcQaaVqMyj82MY3lG2qj8CL6ykSED7nW6OYWwMBJ1rSDGXhQRd5JuCGl6kgAHxKS6gkkIAWeUKjC6-Th2etk1XPKDiks0SZrQpmuXG8h_TBdd4igjRUqnIk09z5wvJFViXIU4M3pQomyFPk3Slh7KHvWhzxG0zbC2kUngQZ5h-LbVTLuT_TQWjYmHiOIihenrzl7z9MLebUq6vuwusZMRJ1Atau0Y2HcOzulYt4tLRP49d56qFEId3R4xomZ666hy-EFodsbzpxEKHeBUro3_gifOOKR8zkyLKTRz1UipZfKvnWk_RHFgZlSClRsXyaP34wstUavSiz-HNmTEmflNQKM7Awfel108FcSbW9NQAogW2Y2copP-P-R-DiHThrXmgDsWkTQFA"
SERVER_URL = "http://localhost:8033"
COL_TYPE = "some.coltype"
class TestStringMethods(unittest.TestCase):
def test_main(self):
client = Client("python_test", SERVER_URL)
self.assertTrue(Account.is_etebase_server(client))
etebase = Account.restore(client, STORED_SESSION, None)
etebase.force_server_url(SERVER_URL)
etebase.fetch_token()
col_mgr = etebase.get_collection_manager()
col_meta = {"name": "Name"}
col = col_mgr.create(COL_TYPE, col_meta, b"Something")
col_meta["bloop"] = "blap"
col.meta = col_meta
self.assertEqual(b"Something", bytes(col.content))
self.assertEqual(COL_TYPE, col.collection_type)
fetch_options = FetchOptions().prefetch(True)
col_mgr.upload(col, fetch_options)
col_list = col_mgr.list(COL_TYPE, None)
self.assertNotEqual(0, len(list(col_list.data)))
fetch_options = FetchOptions().stoken(col_list.stoken)
col_list = col_mgr.list(COL_TYPE, fetch_options)
self.assertEqual(0, len(list(col_list.data)))
col2 = col_mgr.fetch(col.uid, None)
self.assertEqual(b"Something", bytes(col2.content))
col2.content = b"Something else"
col_mgr.transaction(col2, None)
it_mgr = col_mgr.get_item_manager(col)
item_meta = {"type": "Bla"}
item = it_mgr.create(item_meta, b"Something item")
item_meta = {"type": "Bla", "bloop": "blap"}
item.meta = item_meta
self.assertNotEqual("", item.uid)
self.assertIsNotNone(item.etag)
self.assertEqual(b"Something item", bytes(item.content))
it_mgr.batch([item], None, None)
etag1 = item.etag
self.assertIsNotNone(etag1)
item.content = b"Something item2"
it_mgr.transaction([item], None, None)
self.assertNotEqual(item.etag, etag1)
item_list = it_mgr.list(None)
self.assertEqual(1, len(list(item_list.data)))
it_first = list(item_list.data)[0]
self.assertEqual(b"Something item2", bytes(it_first.content))
fetch_options = FetchOptions().stoken(item_list.stoken)
item_list = it_mgr.list(fetch_options)
self.assertEqual(0, len(list(item_list.data)))
etebase.logout()
| 43.882353
| 625
| 0.72252
|
4a13dc9d462ff2ad819ddb39c3379b45a4db07d6
| 1,210
|
py
|
Python
|
preprocessing/mtat_read.py
|
maahhi/sota-music-tagging-models
|
14f0f9b89bb6f9ecb0fdb17eb6895905c0515027
|
[
"MIT"
] | null | null | null |
preprocessing/mtat_read.py
|
maahhi/sota-music-tagging-models
|
14f0f9b89bb6f9ecb0fdb17eb6895905c0515027
|
[
"MIT"
] | null | null | null |
preprocessing/mtat_read.py
|
maahhi/sota-music-tagging-models
|
14f0f9b89bb6f9ecb0fdb17eb6895905c0515027
|
[
"MIT"
] | null | null | null |
import os
import numpy as np
import glob
import librosa
import fire
import tqdm
class Processor:
def __init__(self):
print(1)
self.fs = 16000
def get_paths(self, data_path):
self.files = glob.glob(os.path.join(data_path, 'mtat', 'mp3', '*/*.mp3'))
self.npy_path = os.path.join(data_path, 'mtat', 'npy')
if not os.path.exists(self.npy_path):
os.makedirs(self.npy_path)
def get_npy(self, fn):
x, sr = librosa.core.load(fn, sr=self.fs)
return x
def iterate(self, data_path):
print(2)
self.get_paths(data_path)
print(3,data_path,self.files)
for fn in tqdm.tqdm(self.files):
print(4)
print(fn)
npy_fn = os.path.join(self.npy_path, fn.split('/')[-1][:-3]+'npy')
if not os.path.exists(npy_fn):
try:
x = self.get_npy(fn)
np.save(open(npy_fn, 'wb'), x)
except RuntimeError:
# some audio files are broken
print(fn)
continue
if __name__ == '__main__':
p = Processor()
print(0)
fire.Fire({'run': p.iterate})
| 26.888889
| 81
| 0.531405
|
4a13de84b0c64585eaa71cbd972d5f3ab2d7241c
| 9,709
|
py
|
Python
|
nephelae/array/DimensionHelper.py
|
pnarvor/nephelae_base
|
d5f1abeae0b0473b895b4735f182ddae0516a1bd
|
[
"BSD-3-Clause"
] | null | null | null |
nephelae/array/DimensionHelper.py
|
pnarvor/nephelae_base
|
d5f1abeae0b0473b895b4735f182ddae0516a1bd
|
[
"BSD-3-Clause"
] | null | null | null |
nephelae/array/DimensionHelper.py
|
pnarvor/nephelae_base
|
d5f1abeae0b0473b895b4735f182ddae0516a1bd
|
[
"BSD-3-Clause"
] | null | null | null |
"""DimensionHelper module
Contains a bunch of helper classes mostly used in the array.ScaledArray type.
The goal of the ScaledArray type is to be able to access a data array using
floating point indexes instead of integer indexes.
The idea behind this is that the data array represent a section of space.
For example a MesoNH data cube represent a cube of atmosphere. This cube has a size
in number of lines and columns,
"""
import numpy as np
from scipy.interpolate import griddata
from scipy.interpolate import interp1d
from nephelae.types import Bounds
class AffineTransform:
def __init__(self, alpha, beta):
self.alpha = alpha
self.beta = beta
def __call__(self, x):
return self.alpha * x + self.beta
class UnitsIndexConverter:
"""UnitsIndexConverter
Base class to transform a tuple of index based indexing to a
tuple of units based indexing and vice-versa.
/!\ Is an abstract class. Concrete child classes must implement
toUnits(key) and toIndex(key).
"""
def __init__(self, dimSize):
self.dimSize = dimSize
def to_unit(self, key):
if isinstance(key, (int, float)):
return float(self.toUnit(key))
elif type(key) == slice:
if key.start is None:
key_start = self.to_unit(0)
else:
key_start = self.to_unit(key.start)
if key.stop is None:
key_stop = self.to_unit(self.dimSize - 1) # -1 because python slice...
else:
key_stop = self.to_unit(key.stop - 1) # -1 because python slice...
return slice(key_start, key_stop, None)
else:
raise ValueError("key must be a slice or a numeric type.")
def to_index(self, key):
if isinstance(key, (int, float)):
return int(self.toIndex(key) + 0.5) # rounding to closest integer
elif type(key) == slice:
if key.start is None:
key_start = 0
else:
key_start = int(self.to_index(key.start))
if key.stop is None:
key_stop = self.dimSize
else:
key_stop = self.to_index(key.stop) + 1 # +1 because python slice...
return slice(key_start, key_stop, None)
else:
raise ValueError("key must be a slice or a numeric type.")
def linear_interpolation_indexes(self, key):
"""
If key is a scalar, returns two pairs (key, weight) which are
to be used to compute a weighted sum of two elements in an array,
effectively computing a linear interpolation.
If key is a slice, returns a single pair (key, weight), with
the key being self.to_index(key) and the weight being 1.0
(no interpolation if getting a non scalar subset of a dimension).
/!\ returned key must be insides tuples to be able to concatenate keys
cleanly.
"""
if isinstance(key, slice):
output = [{'key':(self.to_index(key),), 'weight':1.0}]
return [{'key':(self.to_index(key),), 'weight':1.0}]
elif isinstance(key, (int, float)):
lowIndex = int(self.toIndex(key))
highIndex = lowIndex + 1
try:
lowUnit = self.to_unit(lowIndex)
highUnit = self.to_unit(highIndex)
lmbd = (key - lowUnit) / (highUnit - lowUnit)
return [{'key':(lowIndex,), 'weight': 1.0-lmbd},
{'key':(highIndex,), 'weight': lmbd}]
except:
return [{'key':(lowIndex,), 'weight': 1.0}]
else:
raise ValueError("key must be a slice or a numeric type.")
def bounds(self):
maxSlice = self.to_unit(slice(None,None,None))
return Bounds(maxSlice.start, maxSlice.stop)
def span(self):
bounds = self.bounds()
return bounds[-1] - bounds[0]
class AffineDimension(UnitsIndexConverter):
"""
AffineDimension : maps input 1D indexes to output 1D scale through
affine transformation.
"""
def __init__(self, dimSpan, dimSize):
super().__init__(dimSize)
self.toUnit = AffineTransform((dimSpan[-1] - dimSpan[0]) / (self.dimSize - 1), dimSpan[0])
self.toIndex = AffineTransform((self.dimSize - 1) / (dimSpan[-1] - dimSpan[0]),
-dimSpan[0]*(self.dimSize - 1) / (dimSpan[-1] - dimSpan[0]))
def subdimension(self, key):
"""Build a new AffineDimension which is a subset of self
Return None if key is not a slice.
Returns a new AffineDimension if key is a slice.
"""
index = self.to_index(key)
if isinstance(index, int):
return None
# here index is a slice
if index.stop - index.start <= 1:
# Here key represent a single element
return None
units = self.to_unit(index) # recompute units for clean borders
return AffineDimension([units.start, units.stop], index.stop - index.start)
class LookupTableDimension(UnitsIndexConverter):
"""
LookupTableDimension : maps input 1D indexes to output 1D scale through an array
defining a stricly monotonous function.
"""
def __init__(self, inputToOutput):
super().__init__(len(inputToOutput))
x_in = np.linspace(0, self.dimSize-1, self.dimSize)
self.toUnit = interp1d(x_in, np.array(inputToOutput))
self.toIndex = interp1d(np.array(inputToOutput), x_in)
def subdimension(self, key):
"""Build a new LookupTableDimension which is a subset of self
Return None if key is not a slice. Returns a new AffineDimension instead.
"""
index = self.to_index(key)
if isinstance(index, int):
return None
# here index is a slice
if index.stop - index.start <= 1:
# Here key reresent a single element
return None
return LookupTableDimension(self.toUnit.y[index])
class DimensionHelper:
"""DimensionHelper
Helper class to convert a tuple of indexes or units to
their units or indexes counterpart. To be used in ScaledArray
"""
def __init__(self):
self.dims = []
def add_dimension(self, params, typ='linear', dimLen=None):
if typ == 'linear':
if dimLen is None:
dimLen = len(params)
self.dims.append(AffineDimension([params[0], params[-1]], dimLen))
elif typ == 'LUT':
self.dims.append(LookupTableDimension(params))
elif typ == 'empty':
return
else:
raise ValueError("Invalid dimension type '" + typ + "'")
def to_unit(self, keys):
if len(keys) != len(self.dims):
raise ValueError("Number or keys must be equal to number of " +
"Dimension (" + str(len(keys)) + "/" +
str(len(self.dims)) + ")")
res = []
for key, dim in zip(keys, self.dims):
res.append(dim.to_unit(key))
return tuple(res)
def to_index(self, keys):
if len(keys) != len(self.dims):
raise ValueError("Number or keys must be equal to number of " +
"Dimension (" + str(len(keys)) + "/" +
str(len(self.dims)) + ")")
res = []
for key, dim in zip(keys, self.dims):
res.append(dim.to_index(key))
return tuple(res)
def subarray_dimensions(self, keys):
"""Compute the new DimensionHelper object associated to the subarray
corresponding to the keys.
"""
if len(keys) != len(self.dims):
raise ValueError("Number of keys must be equal to the number of" +
" dimensions. (Got " + str(len(keys)) + "/"
+ str(len(self.dims)) + ")")
newDims = DimensionHelper()
for key, dim in zip(keys, self.dims):
newDim = dim.subdimension(key)
if newDim is not None:
newDims.dims.append(newDim)
return newDims
def linear_interpolation_keys(self, keys):
""" Returns a list of pairs of keys and weights to compute a linear
interpolation. The interpolation computation should read in the
main array using generated keys and compute a weighted sum of the
resulting subrrays using the associated weights.
"""
if len(keys) != len(self.dims):
raise ValueError("Number of keys must be equal to the number of" +
" dimensions. (Got " + str(len(keys)) + "/"
+ str(len(self.dims)) + ")")
weightedKeys = []
for key, dim in zip(keys, self.dims):
weightedKeys.append(dim.linear_interpolation_indexes(key))
while len(weightedKeys) > 1:
newKeys = []
for key1 in weightedKeys[-2]:
for key2 in weightedKeys[-1]:
newKeys.append({'key':key1['key'] + key2['key'],
'weight':key1['weight']*key2['weight']})
weightedKeys.pop(-1)
weightedKeys[-1] = newKeys
return weightedKeys[0]
def bounds(self):
return [dim.bounds() for dim in self.dims]
def span(self):
return [dim.span() for dim in self.dims]
| 33.136519
| 99
| 0.564322
|
4a13dfc18b6f468a9f2aec81c693e4c62a952a4d
| 2,420
|
py
|
Python
|
vulture/lines.py
|
rahulatdeepsource/vulture
|
91044108b651c580339738e6812a91f0951fbef5
|
[
"MIT"
] | null | null | null |
vulture/lines.py
|
rahulatdeepsource/vulture
|
91044108b651c580339738e6812a91f0951fbef5
|
[
"MIT"
] | 1
|
2020-02-05T18:34:58.000Z
|
2020-02-05T18:34:58.000Z
|
vulture/lines.py
|
rahulatdeepsource/vulture
|
91044108b651c580339738e6812a91f0951fbef5
|
[
"MIT"
] | null | null | null |
import ast
def _get_last_child_with_lineno(node):
"""
Return the last direct child of `node` that has a lineno attribute,
or None if `node` has no such children.
Almost all node._field lists are sorted by the order in which they
appear in source code. For some nodes however, we have to skip some
fields that either don't have line numbers (e.g., "ctx" and "names")
or that are in the wrong position (e.g., "decorator_list" and
"returns"). Then we choose the first field (i.e., the field with the
highest line number) that actually contains a node. If it contains a
list of nodes, we return the last one.
"""
ignored_fields = {"ctx", "decorator_list", "names", "returns"}
fields = node._fields
# The fields of ast.Call are in the wrong order.
if isinstance(node, ast.Call):
fields = ("func", "args", "starargs", "keywords", "kwargs")
for name in reversed(fields):
if name in ignored_fields:
continue
try:
last_field = getattr(node, name)
except AttributeError:
continue
# Ignore non-AST objects like "is_async", "level" and "nl".
if isinstance(last_field, ast.AST):
return last_field
elif isinstance(last_field, list) and last_field:
return last_field[-1]
return None
def get_last_line_number(node):
"""Estimate last line number of the given AST node.
The estimate is based on the line number of the last descendant of
`node` that has a lineno attribute. Therefore, it underestimates the
size of code ending with, e.g., multiline strings and comments.
When traversing the tree, we may see a mix of nodes with line
numbers and nodes without line numbers. We therefore, store the
maximum line number seen so far and report it at the end. A more
accurate (but also slower to compute) estimate would traverse all
children, instead of just the last one, since choosing the last one
may lead to a path that ends with a node without line number.
"""
max_lineno = node.lineno
while True:
last_child = _get_last_child_with_lineno(node)
if last_child is None:
return max_lineno
else:
try:
max_lineno = max(max_lineno, last_child.lineno)
except AttributeError:
pass
node = last_child
| 36.666667
| 72
| 0.658678
|
4a13e0a006324e97183a9b93612c6b7d94d4ab6f
| 150
|
py
|
Python
|
src/car.py
|
TestowanieAutomatyczneUG/laboratorium-9-maciej-witkowski
|
9ebeb58814ab4921b7c1006d4b70e7a706627077
|
[
"MIT"
] | null | null | null |
src/car.py
|
TestowanieAutomatyczneUG/laboratorium-9-maciej-witkowski
|
9ebeb58814ab4921b7c1006d4b70e7a706627077
|
[
"MIT"
] | null | null | null |
src/car.py
|
TestowanieAutomatyczneUG/laboratorium-9-maciej-witkowski
|
9ebeb58814ab4921b7c1006d4b70e7a706627077
|
[
"MIT"
] | null | null | null |
class Car:
def needsFuel(self):
pass
def getEngineTemperature(self):
pass
def driveTo(self, destination):
pass
| 13.636364
| 35
| 0.586667
|
4a13e0b22ad98e68cf268aecf9f53a9289df631a
| 28,237
|
py
|
Python
|
pyNastran/op2/writer/geom2_writer.py
|
JohannesSeidel/pyNastran
|
91ccd2756b201a7a3e4bb81cc6dc53b947d43bbf
|
[
"BSD-3-Clause"
] | null | null | null |
pyNastran/op2/writer/geom2_writer.py
|
JohannesSeidel/pyNastran
|
91ccd2756b201a7a3e4bb81cc6dc53b947d43bbf
|
[
"BSD-3-Clause"
] | null | null | null |
pyNastran/op2/writer/geom2_writer.py
|
JohannesSeidel/pyNastran
|
91ccd2756b201a7a3e4bb81cc6dc53b947d43bbf
|
[
"BSD-3-Clause"
] | null | null | null |
from collections import defaultdict
from struct import pack, Struct
from .geom1_writer import write_geom_header, close_geom_table
integer_types = int
def write_geom2(op2, op2_ascii, obj, endian=b'<'):
if not hasattr(obj, 'elements'):
return
#if not hasattr(obj, 'nodes'):
#return
nspoints = len(obj.spoints)
nplotels = len(obj.plotels)
nelements = len(obj.elements)
if nelements == 0 and nplotels == 0 and nspoints == 0:
return
write_geom_header(b'GEOM2', op2, op2_ascii)
itable = -3
#etypes = [
#'CROD', 'CONROD',
#'CELAS1', 'CELAS2', 'CELAS3', 'CELAS4',
#'CDAMP1', 'CDAMP2', 'CDAMP3', 'CDAMP4',
#'CTRIA3', 'CQUAD4',
#'CTETRA', 'CHEXA', 'CPENTA',
#]
etypes_to_skip = [
'CHBDYE', 'CBEND',
#'CHBDYP',
]
out = defaultdict(list)
for eid, element in obj.elements.items():
out[element.type].append(eid)
if nspoints:
out['SPOINT'] = list(obj.spoints.keys())
if nplotels:
out['PLOTEL'] = list(obj.plotels.keys())
# elements with fixed lengths
mapper = {
# key, spack, nfields
'CHBDYP' : ((10908, 109, 407), b'12i 3f', 15),
'CHBDYG' : ((10808, 108, 406), b'16i', 16),
'PLOTEL' : ((5201, 52, 11), b'3i', 3),
'CTUBE' : ((3701, 37, 49), b'4i', 4),
'CSHEAR' : ((3101, 31, 61), b'6i', 6),
'CQUAD4' : ((2958, 51, 177), b'6iffii4f', 14),
'CTRIA3' : ((5959, 59, 282), b'5iff3i3f', 13),
'CQUADR' : ((8009, 80, 367), b'6iffii4f', 14), # same as CQUAD4
'CTRIAR' : ((9200, 92, 385), b'5iff3i3f', 13), # same as CTRIA3
'CQUAD8' : ((4701, 47, 326), b'10i 6f i', 17), # current; not 2001
'CTRIA6' : ((4801, 48, 327), b'8i 5f i', 14), # current; not 2001
'CTRIAX' : ((10108, 101, 512), b'9i', 9),
'CTRIAX6' : ((6108, 61, 107), b'8i f ii', 11),
'CQUAD' : ((9108, 91, 507), b'11i', 11),
'CQUADX' : ((9008, 90, 508), b'11i', 11), # same as CQUAD
'CROD' : ((3001, 30, 48), b'4i', 4),
'CONROD' : ((1601, 16, 47), b'4i4f', 8),
'CDAMP1' : ((201, 2, 69), b'6i', 6),
'CDAMP2' : ((301, 3, 70), b'if4i', 6),
'CDAMP3' : ((401, 4, 71), b'4i', 4),
'CDAMP4' : ((501, 5, 72), b'ifii', 4),
'CDAMP5' : ((10608, 106, 404), b'ifii', 4),
'CELAS1' : ((601, 6, 73), b'6i', 6),
'CELAS2' : ((701, 7, 74), b'if4iff', 8),
'CELAS3' : ((801, 8, 75), b'4i', 4),
'CELAS4' : ((901, 9, 76), b'ifii', 4),
'CVISC' : ((3901, 39, 50), b'4i', 4),
'CTRAX3' : ((6111, 61, 996), b'5if', 6),
'CQUADX4' : ((6112, 61, 997), b'6if', 7),
'CQUADX8' : ((6114, 61, 999), b'10if', 11),
'CTRAX6' : ((6113, 61, 998), b'8if', 9),
}
for name, eids in sorted(out.items()):
nelements = len(eids)
if name in etypes_to_skip:
obj.log.warning('skipping GEOM2-%s' % name)
continue
#if nelements == 0:
#continue
#if name not in etypes:
#obj.log.warning('skipping GEOM2-%s' % name)
#continue
if name in ['CTETRA', 'CHEXA', 'CPENTA', 'CPYRAM']:
itable = _write_solid(obj, name, eids, nelements, itable, op2, op2_ascii, endian)
continue
elif name in mapper:
key, spacki, nfields = mapper[name]
spack = Struct(endian + spacki)
#print(name, spacki)
elif name == 'CBAR':
itable = _write_cbar(obj, name, eids, nelements, itable, op2, op2_ascii, endian)
continue
elif name == 'CBEAM':
itable = _write_cbeam(obj, name, eids, nelements, itable, op2, op2_ascii, endian)
continue
elif name == 'CBUSH':
key = (2608, 26, 60)
spack = None
nfields = 14
elif name == 'CBUSH1D':
key = (5608, 56, 218)
spack = Struct(endian + b'8i')
nfields = 8
elif name == 'CGAP':
key = (1908, 19, 104)
spack = None
nfields = 9
elif name == 'SPOINT':
key = (5551, 49, 105)
spack = None
nfields = 1
else:
obj.log.warning('skipping %s' % name)
continue
#else: # pragma: no cover
#raise NotImplementedError(name)
#if self.is_debug_file:
#self.binary_debug.write('ndata=%s\n' % (nelements * 44))
nbytes = _write_intermediate_block(name, key, nfields, nelements, op2, op2_ascii)
try:
write_card(name, eids, spack, obj, op2, op2_ascii, endian)
except:
obj.log.error('failed GEOM2-%s' % name)
raise
itable = _write_end_block(nbytes, itable, op2, op2_ascii)
#-------------------------------------
#print('itable', itable)
close_geom_table(op2, op2_ascii, itable)
#-------------------------------------
def _write_intermediate_block(name, key, nfields, nelements, op2, op2_ascii):
"""writes the start of the geometry block; goes in the middle of the writer"""
nvalues = nfields * nelements + 3 # +3 comes from the keys
nbytes = nvalues * 4
op2.write(pack('3i', *[4, nvalues, 4]))
op2.write(pack('i', nbytes)) #values, nbtyes))
op2.write(pack('3i', *key))
op2_ascii.write('%s %s\n' % (name, str(key)))
return nbytes
def _write_end_block(nbytes, itable, op2, op2_ascii):
"""closes off the geometry block"""
op2.write(pack('i', nbytes))
itable -= 1
data = [
4, itable, 4,
4, 1, 4,
4, 0, 4]
op2.write(pack('9i', *data))
op2_ascii.write(str(data) + '\n')
return itable
def _write_cbeam(obj, name, eids, nelements, itable, op2, op2_ascii, endian):
"""writes the CBEAM"""
key = (5408, 54, 261)
spack = None
nfields = 18
nbytes = _write_intermediate_block(name, key, nfields, nelements, op2, op2_ascii)
s1 = Struct(endian + b'6i3f3i6f')
s3 = Struct(endian + b'12i6f')
for eid in sorted(eids):
elem = obj.elements[eid]
ga, gb = elem.node_ids
pid = elem.pid
# per DMAP: F = FE bit-wise AND with 3
#f = fe & 3
w1a, w2a, w3a = elem.wa
w1b, w2b, w3b = elem.wb
pa = elem.pa
pb = elem.pb
sa = elem.sa
sb = elem.sb
if elem.g0 is None:
x1, x2, x3 = elem.x
fe = 0
#(eid, pid, ga, gb, sa, sb, x1, x2, x3, fe,
#pa, pb, w1a, w2a, w3a, w1b, w2b, w3b) = out
data = [
eid, pid, ga, gb, sa, sb, x1, x2, x3, fe,
pa, pb, w1a, w2a, w3a, w1b, w2b, w3b]
op2.write(s1.pack(*data))
else:
fe = 2
g0 = elem.g0
#(eid, pid, ga, gb, sa, sb, g0, xxa, xxb, fe,
# pa, pb, w1a, w2a, w3a, w1b, w2b, w3b) = out
data = [
eid, pid, ga, gb, sa, sb, g0, 0, 0, fe,
pa, pb, w1a, w2a, w3a, w1b, w2b, w3b]
op2.write(s3.pack(*data))
itable = _write_end_block(nbytes, itable, op2, op2_ascii)
return itable
def _write_cbar(obj, name, eids, nelements, itable, op2, op2_ascii, endian):
"""writes the CBAR"""
key = (2408, 24, 180)
spack = None
nfields = 16
nbytes = _write_intermediate_block(name, key, nfields, nelements, op2, op2_ascii)
s1 = Struct(endian + b'4i3f3i6f')
s3 = Struct(endian + b'7ii2i6f')
for eid in sorted(eids):
elem = obj.elements[eid]
ga, gb = elem.node_ids
pid = elem.pid
# per DMAP: F = FE bit-wise AND with 3
#f = fe & 3
w1a, w2a, w3a = elem.wa
w1b, w2b, w3b = elem.wb
pa = elem.pa
pb = elem.pb
if elem.g0 is None:
x1, x2, x3 = elem.x
fe = 0
#(eid, pid, ga, gb, x1, x2, x3, _f, pa, pb,
#w1a, w2a, w3a, w1b, w2b, w3b) = out; fe=0
#(eid, pid, ga, gb, x1, x2, x3, _f, pa, pb,
#w1a, w2a, w3a, w1b, w2b, w3b) = out; fe=1
data = [
eid, pid, ga, gb, x1, x2, x3, fe, pa, pb,
w1a, w2a, w3a, w1b, w2b, w3b, ]
assert None not in data, 'CBAR-1; data=%s' % (data)
#print('CBAR data1 =', data)
op2.write(s1.pack(*data))
else:
fe = 2
g0 = elem.g0
#(eid, pid, ga, gb, g0, junk, junk, _f, pa,
#pb, w1a, w2a, w3a, w1b, w2b, w3b) = out
data = [
eid, pid, ga, gb, g0, 0, 0, fe, pa, pb,
w1a, w2a, w3a, w1b, w2b, w3b]
assert None not in data, 'CBAR-1; data=%s' % (data)
#print('CBAR data2 =', data)
op2.write(s3.pack(*data))
#if f == 0:
#out = s1.unpack(edata)
#(eid, pid, ga, gb, x1, x2, x3, _f, pa, pb,
#w1a, w2a, w3a, w1b, w2b, w3b) = out
#data_in = [[eid, pid, ga, gb, pa, pb, w1a, w2a, w3a, w1b, w2b, w3b],
#[f, x1, x2, x3]]
#elif f == 1:
#out = s2.unpack(edata)
#(eid, pid, ga, gb, x1, x2, x3, _f, pa, pb,
#w1a, w2a, w3a, w1b, w2b, w3b) = out
#data_in = [[eid, pid, ga, gb, pa, pb, w1a, w2a, w3a, w1b, w2b, w3b],
#[f, x1, x2, x3]]
#elif f == 2:
#out = s3.unpack(edata)
#(eid, pid, ga, gb, g0, junk, junk, _f, pa,
#pb, w1a, w2a, w3a, w1b, w2b, w3b) = out
#data_in = [[eid, pid, ga, gb, pa, pb, w1a,
#w2a, w3a, w1b, w2b, w3b], [f, g0]]
#else:
#raise RuntimeError('invalid f value...f=%s' % (f))
op2_ascii.write(' eid=%s pid=%s nids=[%s, %s]\n' % (eid, pid, ga, gb))
itable = _write_end_block(nbytes, itable, op2, op2_ascii)
return itable
def _write_solid(model, name, eids, nelements, itable, op2, op2_ascii, endian):
"""writes the solid elements"""
if name == 'CTETRA':
key = (5508, 55, 217)
nnodes = 10
# 12 = eid, pid, n1, n2, n3, n4, ..., n10
elif name == 'CHEXA':
key = (7308, 73, 253)
nnodes = 20
elif name == 'CPENTA':
key = (4108, 41, 280)
nnodes = 15
elif name == 'CPYRAM':
key = (17200, 172, 1000)
nnodes = 13
else: # pragma: no cover
raise NotImplementedError(name)
nfields = nnodes + 2
spack = Struct(endian + b'%ii' % (nfields))
nbytes = _write_intermediate_block(name, key, nfields, nelements, op2, op2_ascii)
for eid in sorted(eids):
elem = model.elements[eid]
nids = elem.node_ids
pid = elem.pid
if None in nids:
nids = [nid if nid is not None else 0 for nid in nids]
nnids = len(nids)
if nnids < nnodes:
nids2 = [0] * (nnodes - nnids)
data = [eid, pid] + nids + nids2
else:
data = [eid, pid] + nids
#print(name, data)
op2_ascii.write(' eid=%s pid=%s nids=%s\n' % (eid, pid, str(nids)))
op2.write(spack.pack(*data))
itable = _write_end_block(nbytes, itable, op2, op2_ascii)
return itable
def write_card(name, eids, spack, obj, op2, op2_ascii, endian):
"""writes the GEOM2 elements"""
op2_ascii.write('GEOM2-%s\n' % name)
if name == 'CHBDYP':
surface_type_str_to_int = {
'POINT' : 1,
'LINE' : 2,
'ELCYL' : 6,
'FTUBE' : 7,
'TUBE' : 10,
}
for eid in sorted(eids):
elem = obj.elements[eid]
pid = elem.pid
#print(elem.get_stats())
surface_type_int = surface_type_str_to_int[elem.surface_type]
#(eid, pid, Type, iviewf, iviewb, g1, g2, g0, radmidf, radmidb,
#dislin, ce, e1, e2, e3) = out
nids = elem.node_ids
dislin = 0 if elem.gmid is None else elem.gmid
g0 = 0 if elem.g0 is None else elem.g0
e1 = 0. if elem.e1 is None else elem.e1
e2 = 0. if elem.e2 is None else elem.e2
e3 = 0. if elem.e3 is None else elem.e3
data = (eid, pid, surface_type_int, elem.iview_front, elem.iview_back,
elem.g1, elem.g2, g0, elem.rad_mid_front, elem.rad_mid_back,
dislin, elem.ce, e1, e2, e3)
#data = [eid, 0, surface_type_int,
#elem.iview_front, elem.iview_back,
#elem.rad_mid_front, elem.rad_mid_back, 0] + all_nids
assert None not in data, data
op2_ascii.write(' eid=%s pid=%s nids=%s\n' % (eid, pid, str(nids)))
op2.write(spack.pack(*data))
elif name == 'CHBDYG':
surface_type_str_to_int = {
'REV' : 3,
'AREA3' : 4,
'AREA4' : 5,
'AREA6' : 8,
'AREA8' : 9,
}
for eid in sorted(eids):
elem = obj.elements[eid]
#print(elem.get_stats())
nids = elem.node_ids
#if None in nids:
#nids = [nid if nid is not None else 0 for nid in nids]
all_nids = [0] * 8
nnodes = len(nids)
all_nids[:nnodes] = nids
assert None not in nids, nids
surface_type_int = surface_type_str_to_int[elem.surface_type]
#(eid, unused_blank, Type, iviewf, iviewb, radmidf, radmidb, unused_blank2,
#g1, g2, g3, g4, g5, g6, g7, g8) = out
data = [eid, 0, surface_type_int,
elem.iview_front, elem.iview_back,
elem.rad_mid_front, elem.rad_mid_back, 0] + all_nids
assert None not in data, data
op2_ascii.write(' eid=%s nids=%s\n' % (eid, str(nids)))
op2.write(spack.pack(*data))
elif name == 'PLOTEL':
for eid in sorted(eids):
elem = obj.plotels[eid]
nids = elem.node_ids
#(eid, n1, n2) = out
data = [eid] + nids
op2_ascii.write(' eid=%s nids=%s\n' % (eid, str(nids)))
op2.write(spack.pack(*data))
elif name == 'CBUSH':
spacki = Struct(endian + b'4i iii i ifi3f')
spackf = Struct(endian + b'4i fff i ifi3f')
for eid in sorted(eids):
elem = obj.elements[eid]
pid = elem.pid
ga, gb = elem.node_ids
s = elem.s
s1, s2, s3 = elem.si
cid = elem.cid
ocid = elem.ocid
if cid is None:
cid = -1
# not 100%
s1 = 0.0 if s1 is None else s1
s2 = 0.0 if s2 is None else s2
s3 = 0.0 if s3 is None else s3
if elem.x[0] is None and elem.g0 is None:
# Use Element CID below for orientation
f = -1
data = [eid, pid, ga, gb, 0, 0, 0,
f, cid, s, ocid, s1, s2, s3]
assert None not in data, 'CBUSH-1 %s' % (data)
op2.write(spacki.pack(*data))
elif elem.x[0] is not None:
f = 0
x1, x2, x3 = elem.x
data = [eid, pid, ga, gb, x1, x2, x3,
f, cid, s, ocid, s1, s2, s3]
assert None not in data, 'CBUSH-2 %s x=%s' % (data, elem.x)
op2.write(spackf.pack(*data))
elif elem.g0 is not None:
f = 2
g0 = elem.g0
data = [eid, pid, ga, gb, g0, 0, 0,
f, cid, s, ocid, s1, s2, s3]
assert None not in data, 'CBUSH-3 %s' % (data)
op2.write(spacki.pack(*data))
else:
raise RuntimeError('invalid CBBUSH')
elif name == 'CBUSH1D':
for eid in sorted(eids):
elem = obj.elements[eid]
#(eid, pid, g1, g2, cid, unused_a, unused_b, unused_c) = out
g1, g2 = elem.node_ids
cid = elem.cid
if cid is None:
cid = -1
data = [eid, elem.pid, g1, g2, cid, 0, 0, 0]
op2.write(spack.pack(*data))
elif name == 'CGAP':
structf = Struct(endian + b'4i3fii')
structi = Struct(endian + b'4i3iii')
for eid in sorted(eids):
elem = obj.elements[eid]
#(eid, pid, ga, gb, x1, x2, x3, f, cid) = out # f=0,1
pid = elem.pid
ga, gb = elem.node_ids
cid = elem.cid
#print(elem.get_stats())
if cid is None:
cid = -1
if elem.x[0] is not None and elem.g0 is None:
f = 1
x1, x2, x3 = elem.x
data = [eid, pid, ga, gb, x1, x2, x3, f, cid]
op2.write(structf.pack(*data))
elif elem.x[0] is None and elem.g0 is None:
f = 1
data = [eid, pid, ga, gb, 1., 0., 0., f, cid]
op2.write(structf.pack(*data))
elif elem.x[0] is not None:
f = 1
x1, x2, x3 = elem.x
data = [eid, pid, ga, gb, x1, x2, x3, f, cid]
#print('CGAP x; x=%s data=%s' % (elem.x, data))
op2.write(structf.pack(*data))
else:
f = 2
g0 = elem.g0
data = [eid, pid, ga, gb, g0, 0, 0, f, cid]
print('CGAP g0; x=%s gab0=%s data=%s' % (g0, [ga, gb, g0], data))
op2.write(structi.pack(*data))
elif name in ['CQUAD4', 'CQUADR']:
for eid in sorted(eids):
elem = obj.elements[eid]
nids = elem.node_ids
pid = elem.pid
#(eid, pid, n1, n2, n3, n4, theta, zoffs, blank, tflag,
#t1, t2, t3, t4) = out
theta = get_theta_from_theta_mcid(elem.theta_mcid)
tflag = elem.tflag
#if tflag is None:
#tflag =
t1 = elem.T1 if elem.T1 is not None else -1.
t2 = elem.T2 if elem.T2 is not None else -1.
t3 = elem.T3 if elem.T3 is not None else -1.
t4 = elem.T4 if elem.T4 is not None else -1.
data = [eid, pid] + nids + [theta, elem.zoffset, 0,
tflag, t1, t2, t3, t4]
assert tflag in [0, 1], elem.get_stats()
#print(' CQUAD4 eid=%s pid=%s nids=%s data=%s\n' % (eid, pid, str(nids), data[6:]))
op2_ascii.write(' eid=%s pid=%s nids=%s\n' % (eid, pid, str(nids)))
assert None not in data, ' %s eid=%s pid=%s nids=%s\n%s' % (name, eid, pid, str(nids), data)
op2.write(spack.pack(*data))
elif name == 'CQUAD8': # current; not 2001
for eid in sorted(eids):
elem = obj.elements[eid]
nids = [nid if nid is not None else 0
for nid in elem.node_ids]
pid = elem.pid
#(eid, pid, n1, n2, n3, n4, n5, n6, n7, n8, t1, t2,
#t3, t4, theta, zoffs, tflag) = out # current
#(eid, pid, n1, n2, n3, n4, n5, n6, n7, n8,
#t1, t2, t3, t4, theta, zoffs) = out # cquad8; 2001
theta = get_theta_from_theta_mcid(elem.theta_mcid)
tflag = elem.tflag if elem.tflag is not None else 0
t1 = elem.T1 if elem.T1 is not None else -1.
t2 = elem.T2 if elem.T2 is not None else -1.
t3 = elem.T3 if elem.T3 is not None else -1.
t4 = elem.T4 if elem.T4 is not None else -1.
data = [eid, pid] + nids + [t1, t2, t3, t4,
theta, elem.zoffset, tflag]
assert None not in data, '%s data=%s' % (name, data)
assert isinstance(elem.tflag, int), elem.get_stats()
assert elem.tflag in [-1, 0, 1], elem.get_stats()
#print(' CQUAD8 eid=%s pid=%s nids=%s data=%s\n' % (eid, pid, str(nids), data[6:]))
op2_ascii.write(' eid=%s pid=%s nids=%s\n' % (eid, pid, str(nids)))
op2.write(spack.pack(*data))
elif name == 'CTRIA6': # current; not 2001
for eid in sorted(eids):
elem = obj.elements[eid]
nids = [nid if nid is not None else 0
for nid in elem.node_ids]
pid = elem.pid
#(eid, pid, n1, n2, n3, n4, n5, n6, theta, zoffs, t1, t2, t3, tflag) = out
theta = get_theta_from_theta_mcid(elem.theta_mcid)
t1 = elem.T1 if elem.T1 is not None else -1.
t2 = elem.T2 if elem.T2 is not None else -1.
t3 = elem.T3 if elem.T3 is not None else -1.
data = [eid, pid] + nids + [t1, t2, t3,
theta, elem.zoffset, elem.tflag]
assert None not in data, '%s data=%s' % (name, data)
assert elem.tflag in [-1, 0, 1], elem.get_stats()
#print(' CQUAD4 eid=%s pid=%s nids=%s data=%s\n' % (eid, pid, str(nids), data[6:]))
op2_ascii.write(' eid=%s pid=%s nids=%s\n' % (eid, pid, str(nids)))
op2.write(spack.pack(*data))
elif name == 'CTRIAX':
for eid in sorted(eids):
elem = obj.elements[eid]
nids = [nid if nid is not None else 0
for nid in elem.node_ids]
pid = elem.pid
#eid, pid, n1, n2, n3, n4, n5, n6, unused_undef1 = data
data = [eid, pid] + nids + [0]
assert None not in data, '%s data=%s' % (name, data)
#print(' CTRIAX eid=%s mid=%s nids=%s data=%s\n' % (eid, pid, str(nids), data[6:]))
op2_ascii.write(' eid=%s pid=%s nids=%s\n' % (eid, pid, str(nids)))
op2.write(spack.pack(*data))
elif name == 'CTRIAX6':
for eid in sorted(eids):
elem = obj.elements[eid]
nids = [nid if nid is not None else 0
for nid in elem.node_ids]
mid = elem.mid
#eid, mid, n1, n2, n3, n4, n5, n6, theta, unused_undef1, unused_undef2 = data
data = [eid, mid] + nids + [elem.theta, 0, 0]
assert None not in data, '%s data=%s' % (name, data)
#print(' CTRIAX6 eid=%s mid=%s nids=%s data=%s\n' % (eid, mid, str(nids), data[6:]))
op2_ascii.write(' eid=%s mid=%s nids=%s\n' % (eid, mid, str(nids)))
op2.write(spack.pack(*data))
elif name in ['CQUAD', 'CQUADX']:
for eid in sorted(eids):
elem = obj.elements[eid]
nids = [nid if nid is not None else 0
for nid in elem.node_ids]
pid = elem.pid
#(eid, pid, n1, n2, n3, n4, n5, n6, n7, n8, n9) = out
data = [eid, pid] + nids
assert None not in data, '%s data=%s' % (name, data)
op2_ascii.write(' eid=%s pid=%s nids=%s\n' % (eid, pid, str(nids)))
op2.write(spack.pack(*data))
elif name in ['CTRIA3', 'CTRIAR']:
for eid in sorted(eids):
elem = obj.elements[eid]
nids = elem.node_ids
pid = elem.pid
theta = get_theta_from_theta_mcid(elem.theta_mcid)
t1 = elem.T1 if elem.T1 is not None else -1.
t2 = elem.T2 if elem.T2 is not None else -1.
t3 = elem.T3 if elem.T3 is not None else -1.
#eid, pid, n1, n2, n3, theta_mcid, zoffs, blank1, blank2, tflag, t1, t2, t3
data = [eid, pid] + nids + [theta, elem.zoffset, 0, 0,
elem.tflag, t1, t2, t3]
assert elem.tflag in [0, 1], elem.get_stats()
op2_ascii.write(' eid=%s pid=%s nids=%s\n' % (eid, pid, str(nids)))
op2.write(spack.pack(*data))
elif name in ['CTRAX3', 'CTRAX6', 'CQUADX4', 'CQUADX8']:
for eid in sorted(eids):
elem = obj.elements[eid]
nids = elem.node_ids
pid = elem.pid
data = [eid, pid] + nids + [elem.theta]
assert None not in data, ' eid=%s pid=%s nids=%s theta=%r\n' % (eid, pid, str(nids), elem.theta)
#print(' eid=%s pid=%s nids=%s theta=%r\n' % (eid, pid, str(nids), elem.theta))
op2_ascii.write(' eid=%s pid=%s nids=%s theta=%r\n' % (eid, pid, str(nids), elem.theta))
op2.write(spack.pack(*data))
elif name in ['CROD', 'CTUBE', 'CVISC', 'CSHEAR']:
for eid in sorted(eids):
elem = obj.elements[eid]
nids = elem.node_ids
pid = elem.pid
data = [eid, pid] + nids
#print(data)
op2_ascii.write(' eid=%s pid=%s nids=%s\n' % (eid, pid, str(nids)))
op2.write(spack.pack(*data))
elif name == 'CONROD':
for eid in sorted(eids):
elem = obj.elements[eid]
nids = elem.node_ids
#(eid, n1, n2, mid, a, j, c, nsm) = out
data = [eid] + nids + [elem.mid, elem.A, elem.j, elem.c, elem.nsm]
op2_ascii.write(' eid=%s nids=%s\n' % (eid, str(nids)))
op2.write(spack.pack(*data))
elif name in ['CELAS1', 'CDAMP1']:
for eid in sorted(eids):
elem = obj.elements[eid]
n1, n2 = [nid if nid else 0 for nid in elem.node_ids]
pid = elem.pid
#(eid, pid, g1, g2, c1, c2)
data = [eid, pid, n1, n2, elem.c1, elem.c2]
#print(name, data)
op2_ascii.write(' eid=%s pid=%s nids=[%s, %s]\n' % (eid, pid, n1, n2))
op2.write(spack.pack(*data))
elif name == 'CELAS2':
for eid in sorted(eids):
elem = obj.elements[eid]
n1, n2 = [nid if nid else 0 for nid in elem.node_ids]
#(eid, k, g1, g2, c1, c2, ge, s) = out
c2 = elem.c2 if elem.c2 is not None else 0
data = [eid, elem.k, n1, n2, elem.c1, c2, elem.ge, elem.s]
#print('CELAS2', data)
op2_ascii.write(' eid=%s nids=[%s, %s]\n' % (eid, n1, n2))
op2.write(spack.pack(*data))
elif name in ['CELAS3', 'CDAMP3', 'CDAMP5']:
for eid in sorted(eids):
elem = obj.elements[eid]
n1, n2 = [nid if nid else 0 for nid in elem.node_ids]
pid = elem.pid
#(eid, pid, s1, s2) = out
data = [eid, pid, n1, n2]
#print(name, data)
op2_ascii.write(' eid=%s pid=%s nids=[%s, %s]\n' % (eid, pid, n1, n2))
op2.write(spack.pack(*data))
elif name == 'CELAS4':
for eid in sorted(eids):
elem = obj.elements[eid]
n1, n2 = [nid if nid else 0 for nid in elem.node_ids]
#(eid, k, s1, s2) = out
data = [eid, elem.k, n1, n2]
#print(data)
op2_ascii.write(' eid=%s nids=[%s, %s]\n' % (eid, n1, n2))
op2.write(spack.pack(*data))
elif name == 'CDAMP2':
for eid in sorted(eids):
elem = obj.elements[eid]
n1, n2 = [nid if nid else 0 for nid in elem.node_ids]
#(eid, bdamp, g1, g2, c1, c2) = out
c1 = elem.c1 if elem.c1 is not None else 0
c2 = elem.c2 if elem.c2 is not None else 0
data = [eid, elem.b, n1, n2, c1, c2]
#print(name, data)
op2_ascii.write(' eid=%s nids=[%s, %s]\n' % (eid, n1, n2))
op2.write(spack.pack(*data))
elif name == 'CDAMP4':
for eid in sorted(eids):
elem = obj.elements[eid]
n1, n2 = [nid if nid else 0 for nid in elem.node_ids]
#(eid, b, s1, s2) = out
data = [eid, elem.b, n1, n2]
#print(name, data)
op2_ascii.write(' eid=%s nids=[%s, %s]\n' % (eid, n1, n2))
op2.write(spack.pack(*data))
elif name == 'SPOINT':
nids = eids
nids.sort()
spack = Struct('%ii' % len(nids))
op2_ascii.write(' spoints%s\n' % str(nids))
op2.write(spack.pack(*nids))
else: # pragma: no cover
raise NotImplementedError(name)
def get_theta_from_theta_mcid(theta_mcid):
"""the theta/mcid field is stored in a strange way"""
if isinstance(theta_mcid, integer_types):
theta = 512. * (theta_mcid + 1)
else:
theta = theta_mcid
return theta
| 40.39628
| 109
| 0.487446
|
4a13e581461b5b1bd962fb9b2e16c814a425e981
| 4,627
|
py
|
Python
|
tflib/ops/linear.py
|
SAP-samples/security-research-differentially-private-generative-models
|
c0eced81da3bc0064beb538557f042732cda459f
|
[
"Apache-2.0"
] | 19
|
2019-08-23T08:08:38.000Z
|
2022-03-24T01:31:32.000Z
|
tflib/ops/linear.py
|
SAP-samples/security-research-differentially-private-generative-models
|
c0eced81da3bc0064beb538557f042732cda459f
|
[
"Apache-2.0"
] | 2
|
2020-04-23T15:12:48.000Z
|
2020-09-29T13:44:15.000Z
|
tflib/ops/linear.py
|
SAP-samples/security-research-differentially-private-generative-models
|
c0eced81da3bc0064beb538557f042732cda459f
|
[
"Apache-2.0"
] | 5
|
2019-12-03T17:24:38.000Z
|
2022-03-24T11:35:40.000Z
|
# SPDX-FileCopyrightText: 2020 SAP SE
#
# SPDX-License-Identifier: Apache-2.0
import tflib as lib
import numpy as np
import tensorflow as tf
_default_weightnorm = False
def enable_default_weightnorm():
global _default_weightnorm
_default_weightnorm = True
def disable_default_weightnorm():
global _default_weightnorm
_default_weightnorm = False
_weights_stdev = None
def set_weights_stdev(weights_stdev):
global _weights_stdev
_weights_stdev = weights_stdev
def unset_weights_stdev():
global _weights_stdev
_weights_stdev = None
def Linear(
name,
input_dim,
output_dim,
inputs,
biases=True,
initialization=None,
weightnorm=None,
gain=1.
):
"""
initialization: None, `lecun`, 'glorot', `he`, 'glorot_he', `orthogonal`, `("uniform", range)`
"""
with tf.name_scope(name) as scope:
def uniform(stdev, size):
if _weights_stdev is not None:
stdev = _weights_stdev
return np.random.uniform(
low=-stdev * np.sqrt(3),
high=stdev * np.sqrt(3),
size=size
).astype('float32')
if initialization == 'lecun':# and input_dim != output_dim):
# disabling orth. init for now because it's too slow
weight_values = uniform(
np.sqrt(1./input_dim),
(input_dim, output_dim)
)
elif initialization == 'glorot' or (initialization == None):
weight_values = uniform(
np.sqrt(2./(input_dim+output_dim)),
(input_dim, output_dim)
)
elif initialization == 'he':
weight_values = uniform(
np.sqrt(2./input_dim),
(input_dim, output_dim)
)
elif initialization == 'glorot_he':
weight_values = uniform(
np.sqrt(4./(input_dim+output_dim)),
(input_dim, output_dim)
)
elif initialization == 'orthogonal' or \
(initialization == None and input_dim == output_dim):
# From lasagne
def sample(shape):
if len(shape) < 2:
raise RuntimeError("Only shapes of length 2 or more are "
"supported.")
flat_shape = (shape[0], np.prod(shape[1:]))
# TODO: why normal and not uniform?
a = np.random.normal(0.0, 1.0, flat_shape)
u, _, v = np.linalg.svd(a, full_matrices=False)
# pick the one with the correct shape
q = u if u.shape == flat_shape else v
q = q.reshape(shape)
return q.astype('float32')
weight_values = sample((input_dim, output_dim))
elif initialization[0] == 'uniform':
weight_values = np.random.uniform(
low=-initialization[1],
high=initialization[1],
size=(input_dim, output_dim)
).astype('float32')
else:
raise Exception('Invalid initialization!')
weight_values *= gain
weight = lib.param(
name + '.W',
weight_values
)
if weightnorm==None:
weightnorm = _default_weightnorm
if weightnorm:
norm_values = np.sqrt(np.sum(np.square(weight_values), axis=0))
# norm_values = np.linalg.norm(weight_values, axis=0)
target_norms = lib.param(
name + '.g',
norm_values
)
with tf.name_scope('weightnorm') as scope:
norms = tf.sqrt(tf.reduce_sum(tf.square(weight), reduction_indices=[0]))
weight = weight * (target_norms / norms)
# if 'Discriminator' in name:
# print "WARNING weight constraint on {}".format(name)
# weight = tf.nn.softsign(10.*weight)*.1
if inputs.get_shape().ndims == 2:
result = tf.matmul(inputs, weight)
else:
reshaped_inputs = tf.reshape(inputs, [-1, input_dim])
result = tf.matmul(reshaped_inputs, weight)
result = tf.reshape(result, tf.pack(tf.unpack(tf.shape(inputs))[:-1] + [output_dim]))
if biases:
result = tf.nn.bias_add(
result,
lib.param(
name + '.b',
np.zeros((output_dim,), dtype='float32')
)
)
return result
| 30.440789
| 98
| 0.528852
|
4a13e64cf4e9f965c93a14d8f0588a9193bacc05
| 1,008
|
py
|
Python
|
src/Main/DispatcherBase.py
|
mozafari/vprofiler
|
23e44f04ba7476d35080bd3bc3a03c8e941e762a
|
[
"Apache-2.0"
] | 107
|
2016-03-11T04:45:42.000Z
|
2022-03-06T05:06:38.000Z
|
src/Main/DispatcherBase.py
|
mozafari/vprofiler
|
23e44f04ba7476d35080bd3bc3a03c8e941e762a
|
[
"Apache-2.0"
] | 14
|
2016-03-02T21:30:54.000Z
|
2019-12-22T18:56:17.000Z
|
src/Main/DispatcherBase.py
|
mozafari/vprofiler
|
23e44f04ba7476d35080bd3bc3a03c8e941e762a
|
[
"Apache-2.0"
] | 12
|
2017-05-31T02:05:21.000Z
|
2021-11-27T13:59:57.000Z
|
import subprocess
class Dispatcher(object):
def __init__(self, disallowedOptions, optionalOptions, requiredOptions):
self.disallowedOptions = disallowedOptions
self.optionalOptions = optionalOptions
self.requiredOptions = requiredOptions
def ParseOptions(self, options):
for barredOpt, barredOptVal in self.disallowedOptions.iteritems():
if getattr(options, barredOpt) == barredOptVal:
print 'Barred opt ' + barredOpt + " found"
return False
for reqOpt, _ in self.requiredOptions.iteritems():
self.requiredOptions[reqOpt] = getattr(options, reqOpt)
if not self.requiredOptions[reqOpt]:
print 'Required opt ' + reqOpt + " not found"
return False
for optionalOpt, _ in self.optionalOptions.iteritems():
self.optionalOptions[optionalOpt] = getattr(options, optionalOpt)
return True
def Dispatch(self, options):
pass
| 34.758621
| 77
| 0.649802
|
4a13e68ae09bad8ded0afbe850d79c5ac3cff80a
| 5,052
|
py
|
Python
|
modules/io/python/drivers/io/tests/test_serialize.py
|
TREiop/v6d
|
9ad80c65c226405b0c7b4ed6b6c9b1229bbf9175
|
[
"Apache-2.0",
"CC0-1.0"
] | null | null | null |
modules/io/python/drivers/io/tests/test_serialize.py
|
TREiop/v6d
|
9ad80c65c226405b0c7b4ed6b6c9b1229bbf9175
|
[
"Apache-2.0",
"CC0-1.0"
] | null | null | null |
modules/io/python/drivers/io/tests/test_serialize.py
|
TREiop/v6d
|
9ad80c65c226405b0c7b4ed6b6c9b1229bbf9175
|
[
"Apache-2.0",
"CC0-1.0"
] | null | null | null |
#! /usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2020-2021 Alibaba Group Holding Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import pytest
import itertools
import numpy as np
import vineyard
import vineyard.io
@pytest.fixture(scope='module')
def global_obj(vineyard_ipc_socket):
client1 = vineyard.connect(vineyard_ipc_socket)
client2 = vineyard.connect(vineyard_ipc_socket)
client3 = vineyard.connect(vineyard_ipc_socket)
client4 = vineyard.connect(vineyard_ipc_socket)
data = np.ones((1, 2, 3, 4, 5))
o1 = client1.put(data)
o2 = client2.put(data)
o3 = client3.put(data)
o4 = client4.put(data)
client4.persist(o4)
client3.persist(o3)
client2.persist(o2)
client1.persist(o1)
meta = vineyard.ObjectMeta()
meta['typename'] = 'vineyard::Tuple'
meta['size_'] = 4
meta.set_global(True)
meta.add_member('__elements_-0', o1)
meta.add_member('__elements_-1', o2)
meta.add_member('__elements_-2', o3)
meta.add_member('__elements_-3', o4)
meta['__elements_-size'] = 4
tup = client1.create_metadata(meta)
client1.persist(tup)
return tup.id
def test_seriarialize_round_trip(vineyard_ipc_socket, vineyard_endpoint, global_obj):
vineyard.io.serialize('/tmp/seri-test',
global_obj,
vineyard_ipc_socket=vineyard_ipc_socket,
vineyard_endpoint=vineyard_endpoint)
ret = vineyard.io.deserialize('/tmp/seri-test',
vineyard_ipc_socket=vineyard_ipc_socket,
vineyard_endpoint=vineyard_endpoint)
client = vineyard.connect(vineyard_ipc_socket)
old_meta = client.get_meta(global_obj)
new_meta = client.get_meta(ret)
print('old meta', old_meta)
print('new meta', new_meta)
@pytest.mark.skip("require oss")
def test_seriarialize_round_trip_on_oss(vineyard_ipc_socket, vineyard_endpoint, global_obj):
accessKeyID = os.environ["ACCESS_KEY_ID"]
accessKeySecret = os.environ["SECRET_ACCESS_KEY"]
endpoint = os.environ.get("ENDPOINT", "http://oss-cn-hangzhou.aliyuncs.com")
vineyard.io.serialize('oss://grape-uk/tmp/seri-test',
global_obj,
vineyard_ipc_socket=vineyard_ipc_socket,
vineyard_endpoint=vineyard_endpoint,
storage_options={
"key": accessKeyID,
"secret": accessKeySecret,
"endpoint": endpoint,
})
ret = vineyard.io.deserialize('oss://grape-uk/tmp/seri-test',
vineyard_ipc_socket=vineyard_ipc_socket,
vineyard_endpoint=vineyard_endpoint,
storage_options={
"key": accessKeyID,
"secret": accessKeySecret,
"endpoint": endpoint,
})
client = vineyard.connect(vineyard_ipc_socket)
old_meta = client.get_meta(global_obj)
new_meta = client.get_meta(ret)
print('old meta', old_meta)
print('new meta', new_meta)
@pytest.mark.skip(reason="require s3")
def test_seriarialize_round_trip_on_s3(vineyard_ipc_socket, vineyard_endpoint, global_obj):
accessKeyID = os.environ["ACCESS_KEY_ID"]
accessKeySecret = os.environ["SECRET_ACCESS_KEY"]
region_name = os.environ.get("REGION", "us-east-1")
vineyard.io.serialize(
"s3://test-bucket/tmp/seri-test",
global_obj,
vineyard_ipc_socket=vineyard_ipc_socket,
vineyard_endpoint=vineyard_endpoint,
storage_options={
"key": accessKeyID,
"secret": accessKeySecret,
"client_kwargs": {
"region_name": region_name
},
},
)
ret = vineyard.io.deserialize(
's3://test-bucket/tmp/seri-test',
vineyard_ipc_socket=vineyard_ipc_socket,
vineyard_endpoint=vineyard_endpoint,
storage_options={
"key": accessKeyID,
"secret": accessKeySecret,
"client_kwargs": {
"region_name": region_name
},
},
)
client = vineyard.connect(vineyard_ipc_socket)
old_meta = client.get_meta(global_obj)
new_meta = client.get_meta(ret)
print('old meta', old_meta)
print('new meta', new_meta)
| 36.085714
| 92
| 0.621932
|
4a13e6f872eb959dde254d7286bccba073f7c130
| 5,779
|
py
|
Python
|
PyEntrezId/Conversion.py
|
lwgray/pyEntrezId
|
28286cf21b876dd4894bf21a222dfd1022441b75
|
[
"MIT"
] | 28
|
2015-10-19T14:22:36.000Z
|
2022-01-30T19:52:52.000Z
|
PyEntrezId/Conversion.py
|
lwgray/pyEntrezId
|
28286cf21b876dd4894bf21a222dfd1022441b75
|
[
"MIT"
] | 8
|
2016-05-06T20:55:54.000Z
|
2017-09-30T12:14:41.000Z
|
PyEntrezId/Conversion.py
|
lwgray/pyEntrezId
|
28286cf21b876dd4894bf21a222dfd1022441b75
|
[
"MIT"
] | 6
|
2017-02-15T15:15:23.000Z
|
2018-12-15T12:38:41.000Z
|
#!/usr/bin/python
import requests
import sys
import xmltodict
import re
try:
from urllib import urlencode
except ImportError:
from urllib.parse import urlencode
class Conversion(object):
def __init__(self, email):
"""Must Include Email"""
self.params = {}
self.email = email
self.params['tool'] = 'PyEntrez'
if re.match(r"[^@]+@[^@]+\.[^@]+", self.email):
pass
else:
raise ValueError("Enter a valid Email Address")
self.params["email"] = email
self.options = urlencode(self.params, doseq=True)
return
def convert_ensembl_to_entrez(self, ensembl):
"""Convert Ensembl Id to Entrez Gene Id"""
if 'ENST' in ensembl:
pass
else:
raise (IndexError)
# Submit resquest to NCBI eutils/Gene database
server = "http://eutils.ncbi.nlm.nih.gov/entrez/eutils/esearch.fcgi?" + self.options + "&db=gene&term={0}".format(
ensembl)
r = requests.get(server, headers={"Content-Type": "text/xml"})
if not r.ok:
r.raise_for_status()
sys.exit()
# Process Request
response = r.text
info = xmltodict.parse(response)
try:
geneId = info['eSearchResult']['IdList']['Id']
except TypeError:
raise (TypeError)
return geneId
def convert_hgnc_to_entrez(self, hgnc):
"""Convert HGNC Id to Entrez Gene Id"""
entrezdict = {}
server = "http://rest.genenames.org/fetch/hgnc_id/{0}".format(hgnc)
r = requests.get(server, headers={"Content-Type": "application/json"})
if not r.ok:
r.raise_for_status()
sys.exit()
response = r.text
info = xmltodict.parse(response)
for data in info['response']['result']['doc']['str']:
if data['@name'] == 'entrez_id':
entrezdict[data['@name']] = data['#text']
if data['@name'] == 'symbol':
entrezdict[data['@name']] = data['#text']
return entrezdict
def convert_entrez_to_uniprot(self, entrez):
"""Convert Entrez Id to Uniprot Id"""
server = "http://www.uniprot.org/uniprot/?query=%22GENEID+{0}%22&format=xml".format(entrez)
r = requests.get(server, headers={"Content-Type": "text/xml"})
if not r.ok:
r.raise_for_status()
sys.exit()
response = r.text
info = xmltodict.parse(response)
try:
data = info['uniprot']['entry']['accession'][0]
return data
except TypeError:
data = info['uniprot']['entry'][0]['accession'][0]
return data
def convert_uniprot_to_entrez(self, uniprot):
"""Convert Uniprot Id to Entrez Id"""
# Submit request to NCBI eutils/Gene Database
server = "http://eutils.ncbi.nlm.nih.gov/entrez/eutils/esearch.fcgi?" + self.options + "&db=gene&term={0}".format(
uniprot)
r = requests.get(server, headers={"Content-Type": "text/xml"})
if not r.ok:
r.raise_for_status()
sys.exit()
# Process Request
response = r.text
info = xmltodict.parse(response)
geneId = info['eSearchResult']['IdList']['Id']
# check to see if more than one result is returned
# if you have more than more result then check which Entrez Id returns the same uniprot Id entered.
if len(geneId) > 1:
for x in geneId:
c = self.convert_entrez_to_uniprot(x)
c = c.lower()
u = uniprot.lower()
if c == u:
return x
else:
return geneId
def convert_accession_to_taxid(self, accessionid):
"""Convert Accession Id to Tax Id """
# Submit request to NCBI eutils/Taxonomy Database
server = "http://eutils.ncbi.nlm.nih.gov/entrez/eutils/efetch.fcgi?" + self.options + "&db=nuccore&id={0}&retmode=xml".format(
accessionid)
r = requests.get(server, headers={"Content-Type": "text/xml"})
if not r.ok:
r.raise_for_status()
sys.exit()
# Process Request
response = r.text
records = xmltodict.parse(response)
try:
for i in records['GBSet']['GBSeq']['GBSeq_feature-table']['GBFeature']['GBFeature_quals']['GBQualifier']:
for key, value in i.items():
if value == 'db_xref':
taxid = i['GBQualifier_value']
taxid = taxid.split(':')[1]
return taxid
except:
for i in records['GBSet']['GBSeq']['GBSeq_feature-table']['GBFeature'][0]['GBFeature_quals']['GBQualifier']:
for key, value in i.items():
if value == 'db_xref':
taxid = i['GBQualifier_value']
taxid = taxid.split(':')[1]
return taxid
return
def convert_symbol_to_entrezid(self, symbol):
"""Convert Symbol to Entrez Gene Id"""
entrezdict = {}
server = "http://rest.genenames.org/fetch/symbol/{0}".format(symbol)
r = requests.get(server, headers={"Content-Type": "application/json"})
if not r.ok:
r.raise_for_status()
sys.exit()
response = r.text
info = xmltodict.parse(response)
for data in info['response']['result']['doc']['str']:
if data['@name'] == 'entrez_id':
entrezdict[data['@name']] = data['#text']
if data['@name'] == 'symbol':
entrezdict[data['@name']] = data['#text']
return entrezdict
| 38.271523
| 134
| 0.545077
|
4a13e834e2862101aa1bcdd45be044958ed47ec7
| 958
|
py
|
Python
|
tests/frontend/verifier/test_ransac.py
|
yuancaimaiyi/gtsfm
|
cc5781c35af23498d45cd96a1818e4786c5cca80
|
[
"Apache-2.0"
] | null | null | null |
tests/frontend/verifier/test_ransac.py
|
yuancaimaiyi/gtsfm
|
cc5781c35af23498d45cd96a1818e4786c5cca80
|
[
"Apache-2.0"
] | null | null | null |
tests/frontend/verifier/test_ransac.py
|
yuancaimaiyi/gtsfm
|
cc5781c35af23498d45cd96a1818e4786c5cca80
|
[
"Apache-2.0"
] | 1
|
2021-09-23T13:08:49.000Z
|
2021-09-23T13:08:49.000Z
|
"""Tests for frontend's RANSAC verifier.
Authors: Ayush Baid
"""
import unittest
import tests.frontend.verifier.test_verifier_base as test_verifier_base
from gtsfm.frontend.verifier.ransac import Ransac
class TestRansacForEssentialMatrix(test_verifier_base.TestVerifierBase):
"""Unit tests for the RANSAC verifier w/ intrinsics in verification.
All unit test functions defined in TestVerifierBase are run automatically.
"""
def setUp(self):
super().setUp()
self.verifier = Ransac(use_intrinsics_in_verification=True)
class TestRansacForFundamentalMatrix(test_verifier_base.TestVerifierBase):
"""Unit tests for the RANSAC verifier w/o intrinsics in verification.
All unit test functions defined in TestVerifierBase are run automatically.
"""
def setUp(self):
super().setUp()
self.verifier = Ransac(use_intrinsics_in_verification=False)
if __name__ == "__main__":
unittest.main()
| 27.371429
| 78
| 0.75261
|
4a13e8c4288fcc20b116fc1a5e2fa77fb244d601
| 12,324
|
py
|
Python
|
tensorflow/python/lib/io/tf_record_test.py
|
sbalk/tensorflow
|
2997f48954e0877258579efa10e03ad783c164bf
|
[
"Apache-2.0"
] | 8
|
2017-03-20T12:04:21.000Z
|
2021-06-24T20:34:30.000Z
|
tensorflow/python/lib/io/tf_record_test.py
|
sbalk/tensorflow
|
2997f48954e0877258579efa10e03ad783c164bf
|
[
"Apache-2.0"
] | null | null | null |
tensorflow/python/lib/io/tf_record_test.py
|
sbalk/tensorflow
|
2997f48954e0877258579efa10e03ad783c164bf
|
[
"Apache-2.0"
] | 2
|
2017-03-20T12:10:56.000Z
|
2017-11-12T00:15:54.000Z
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tf_record.TFRecordWriter and tf_record.tf_record_iterator."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import gzip
import os
import zlib
import six
from tensorflow.python.framework import errors_impl
from tensorflow.python.lib.io import tf_record
from tensorflow.python.platform import test
from tensorflow.python.util import compat
prefix_path = "third_party/tensorflow/core/lib"
# pylint: disable=invalid-name
TFRecordCompressionType = tf_record.TFRecordCompressionType
# pylint: enable=invalid-name
# Edgar Allan Poe's 'Eldorado'
_TEXT = b"""Gaily bedight,
A gallant knight,
In sunshine and in shadow,
Had journeyed long,
Singing a song,
In search of Eldorado.
But he grew old
This knight so bold
And o'er his heart a shadow
Fell as he found
No spot of ground
That looked like Eldorado.
And, as his strength
Failed him at length,
He met a pilgrim shadow
'Shadow,' said he,
'Where can it be
This land of Eldorado?'
'Over the Mountains
Of the Moon'
Down the Valley of the Shadow,
Ride, boldly ride,'
The shade replied,
'If you seek for Eldorado!'
"""
class TFCompressionTestCase(test.TestCase):
def setUp(self):
super(TFCompressionTestCase, self).setUp()
self._num_files = 2
self._num_records = 7
def _Record(self, f, r):
return compat.as_bytes("Record %d of file %d" % (r, f))
def _CreateFiles(self, options=None, prefix=""):
filenames = []
for i in range(self._num_files):
name = prefix + "tfrecord.%d.txt" % i
records = [self._Record(i, j) for j in range(self._num_records)]
fn = self._WriteRecordsToFile(records, name, options)
filenames.append(fn)
return filenames
def _WriteRecordsToFile(self, records, name="tfrecord", options=None):
fn = os.path.join(self.get_temp_dir(), name)
with tf_record.TFRecordWriter(fn, options=options) as writer:
for r in records:
writer.write(r)
return fn
def _ZlibCompressFile(self, infile, name="tfrecord.z"):
# zlib compress the file and write compressed contents to file.
with open(infile, "rb") as f:
cdata = zlib.compress(f.read())
zfn = os.path.join(self.get_temp_dir(), name)
with open(zfn, "wb") as f:
f.write(cdata)
return zfn
def _GzipCompressFile(self, infile, name="tfrecord.gz"):
# gzip compress the file and write compressed contents to file.
with open(infile, "rb") as f:
cdata = f.read()
gzfn = os.path.join(self.get_temp_dir(), name)
with gzip.GzipFile(gzfn, "wb") as f:
f.write(cdata)
return gzfn
def _ZlibDecompressFile(self, infile, name="tfrecord"):
with open(infile, "rb") as f:
cdata = zlib.decompress(f.read())
fn = os.path.join(self.get_temp_dir(), name)
with open(fn, "wb") as f:
f.write(cdata)
return fn
def _GzipDecompressFile(self, infile, name="tfrecord"):
with gzip.GzipFile(infile, "rb") as f:
cdata = f.read()
fn = os.path.join(self.get_temp_dir(), name)
with open(fn, "wb") as f:
f.write(cdata)
return fn
class TFRecordWriterTest(TFCompressionTestCase):
def setUp(self):
super(TFRecordWriterTest, self).setUp()
def _AssertFilesEqual(self, a, b, equal):
for an, bn in zip(a, b):
with open(an, "rb") as af, open(bn, "rb") as bf:
if equal:
self.assertEqual(af.read(), bf.read())
else:
self.assertNotEqual(af.read(), bf.read())
def testWriteReadZLibFiles(self):
# Write uncompressed then compress manually.
options = tf_record.TFRecordOptions(TFRecordCompressionType.NONE)
files = self._CreateFiles(options, prefix="uncompressed")
zlib_files = [
self._ZlibCompressFile(fn, "tfrecord_%s.z" % i)
for i, fn in enumerate(files)
]
self._AssertFilesEqual(files, zlib_files, False)
# Now write compressd and verify same.
options = tf_record.TFRecordOptions(TFRecordCompressionType.ZLIB)
compressed_files = self._CreateFiles(options, prefix="compressed")
self._AssertFilesEqual(compressed_files, zlib_files, True)
# Decompress compress and verify same.
uncompressed_files = [
self._ZlibDecompressFile(fn, "tfrecord_%s.z" % i)
for i, fn in enumerate(compressed_files)
]
self._AssertFilesEqual(uncompressed_files, files, True)
def testWriteReadGzipFiles(self):
# Write uncompressed then compress manually.
options = tf_record.TFRecordOptions(TFRecordCompressionType.NONE)
files = self._CreateFiles(options, prefix="uncompressed")
gzip_files = [
self._GzipCompressFile(fn, "tfrecord_%s.gz" % i)
for i, fn in enumerate(files)
]
self._AssertFilesEqual(files, gzip_files, False)
# Now write compressd and verify same.
options = tf_record.TFRecordOptions(TFRecordCompressionType.GZIP)
compressed_files = self._CreateFiles(options, prefix="compressed")
# Note: Gzips written by TFRecordWriter add 'tfrecord_0' so
# compressed_files can't be compared with gzip_files
# Decompress compress and verify same.
uncompressed_files = [
self._GzipDecompressFile(fn, "tfrecord_%s.gz" % i)
for i, fn in enumerate(compressed_files)
]
self._AssertFilesEqual(uncompressed_files, files, True)
class TFRecordWriterZlibTest(TFCompressionTestCase):
def testZLibFlushRecord(self):
original = [b"small record"]
fn = self._WriteRecordsToFile(original, "small_record")
with open(fn, "rb") as h:
buff = h.read()
# creating more blocks and trailing blocks shouldn't break reads
compressor = zlib.compressobj(9, zlib.DEFLATED, zlib.MAX_WBITS)
output = b""
for c in buff:
if isinstance(c, int):
c = six.int2byte(c)
output += compressor.compress(c)
output += compressor.flush(zlib.Z_FULL_FLUSH)
output += compressor.flush(zlib.Z_FULL_FLUSH)
output += compressor.flush(zlib.Z_FULL_FLUSH)
output += compressor.flush(zlib.Z_FINISH)
# overwrite the original file with the compressed data
with open(fn, "wb") as h:
h.write(output)
options = tf_record.TFRecordOptions(TFRecordCompressionType.ZLIB)
actual = list(tf_record.tf_record_iterator(fn, options=options))
self.assertEqual(actual, original)
def testZlibReadWrite(self):
"""Verify that files produced are zlib compatible."""
original = [b"foo", b"bar"]
fn = self._WriteRecordsToFile(original, "zlib_read_write.tfrecord")
zfn = self._ZlibCompressFile(fn, "zlib_read_write.tfrecord.z")
# read the compressed contents and verify.
options = tf_record.TFRecordOptions(TFRecordCompressionType.ZLIB)
actual = list(tf_record.tf_record_iterator(zfn, options=options))
self.assertEqual(actual, original)
def testZlibReadWriteLarge(self):
"""Verify that writing large contents also works."""
# Make it large (about 5MB)
original = [_TEXT * 10240]
fn = self._WriteRecordsToFile(original, "zlib_read_write_large.tfrecord")
zfn = self._ZlibCompressFile(fn, "zlib_read_write_large.tfrecord.z")
options = tf_record.TFRecordOptions(TFRecordCompressionType.ZLIB)
actual = list(tf_record.tf_record_iterator(zfn, options=options))
self.assertEqual(actual, original)
def testGzipReadWrite(self):
"""Verify that files produced are gzip compatible."""
original = [b"foo", b"bar"]
fn = self._WriteRecordsToFile(original, "gzip_read_write.tfrecord")
gzfn = self._GzipCompressFile(fn, "tfrecord.gz")
options = tf_record.TFRecordOptions(TFRecordCompressionType.GZIP)
actual = list(tf_record.tf_record_iterator(gzfn, options=options))
self.assertEqual(actual, original)
class TFRecordIteratorTest(TFCompressionTestCase):
def setUp(self):
super(TFRecordIteratorTest, self).setUp()
self._num_records = 7
def testIterator(self):
records = [self._Record(0, i) for i in range(self._num_records)]
options = tf_record.TFRecordOptions(TFRecordCompressionType.ZLIB)
fn = self._WriteRecordsToFile(records, "compressed_records", options)
reader = tf_record.tf_record_iterator(fn, options)
for expected in records:
record = next(reader)
self.assertAllEqual(expected, record)
with self.assertRaises(StopIteration):
record = next(reader)
def testWriteZlibRead(self):
"""Verify compression with TFRecordWriter is zlib library compatible."""
original = [b"foo", b"bar"]
options = tf_record.TFRecordOptions(TFRecordCompressionType.ZLIB)
fn = self._WriteRecordsToFile(original, "write_zlib_read.tfrecord.z",
options)
zfn = self._ZlibDecompressFile(fn, "write_zlib_read.tfrecord")
actual = list(tf_record.tf_record_iterator(zfn))
self.assertEqual(actual, original)
def testWriteZlibReadLarge(self):
"""Verify compression for large records is zlib library compatible."""
# Make it large (about 5MB)
original = [_TEXT * 10240]
options = tf_record.TFRecordOptions(TFRecordCompressionType.ZLIB)
fn = self._WriteRecordsToFile(original, "write_zlib_read_large.tfrecord.z",
options)
zfn = self._ZlibDecompressFile(fn, "write_zlib_read_large.tfrecord")
actual = list(tf_record.tf_record_iterator(zfn))
self.assertEqual(actual, original)
def testWriteGzipRead(self):
original = [b"foo", b"bar"]
options = tf_record.TFRecordOptions(TFRecordCompressionType.GZIP)
fn = self._WriteRecordsToFile(original, "write_gzip_read.tfrecord.gz",
options)
gzfn = self._GzipDecompressFile(fn, "write_gzip_read.tfrecord")
actual = list(tf_record.tf_record_iterator(gzfn))
self.assertEqual(actual, original)
def testBadFile(self):
"""Verify that tf_record_iterator throws an exception on bad TFRecords."""
fn = os.path.join(self.get_temp_dir(), "bad_file")
with tf_record.TFRecordWriter(fn) as writer:
writer.write(b"123")
fn_truncated = os.path.join(self.get_temp_dir(), "bad_file_truncated")
with open(fn, "rb") as f:
with open(fn_truncated, "wb") as f2:
# DataLossError requires that we've written the header, so this must
# be at least 12 bytes.
f2.write(f.read(14))
with self.assertRaises(errors_impl.DataLossError):
for _ in tf_record.tf_record_iterator(fn_truncated):
pass
class TFRecordWriterCloseAndFlushTests(test.TestCase):
def setUp(self, compression_type=TFRecordCompressionType.NONE):
super(TFRecordWriterCloseAndFlushTests, self).setUp()
self._fn = os.path.join(self.get_temp_dir(), "tf_record_writer_test.txt")
self._options = tf_record.TFRecordOptions(compression_type)
self._writer = tf_record.TFRecordWriter(self._fn, self._options)
self._num_records = 20
def _Record(self, r):
return compat.as_bytes("Record %d" % r)
def testWriteAndLeaveOpen(self):
records = list(map(self._Record, range(self._num_records)))
for record in records:
self._writer.write(record)
# Verify no segfault if writer isn't explicitly closed.
class TFRecordWriterCloseAndFlushGzipTests(TFRecordWriterCloseAndFlushTests):
def setUp(self):
super(TFRecordWriterCloseAndFlushGzipTests,
self).setUp(TFRecordCompressionType.GZIP)
class TFRecordWriterCloseAndFlushZlibTests(TFRecordWriterCloseAndFlushTests):
def setUp(self):
super(TFRecordWriterCloseAndFlushZlibTests,
self).setUp(TFRecordCompressionType.ZLIB)
if __name__ == "__main__":
test.main()
| 34.424581
| 80
| 0.705128
|
4a13e8d601e0f9158382f7efb96ecd9b1b91baac
| 236
|
py
|
Python
|
intermediate_algorithms/maior_e_menor_peso.py
|
Yta-ux/python_algorithms
|
62dd2d897e2f2de8783e68df3022170a86e9132e
|
[
"MIT"
] | 1
|
2022-01-26T22:15:17.000Z
|
2022-01-26T22:15:17.000Z
|
intermediate_algorithms/maior_e_menor_peso.py
|
Yta-ux/python_algorithms
|
62dd2d897e2f2de8783e68df3022170a86e9132e
|
[
"MIT"
] | null | null | null |
intermediate_algorithms/maior_e_menor_peso.py
|
Yta-ux/python_algorithms
|
62dd2d897e2f2de8783e68df3022170a86e9132e
|
[
"MIT"
] | null | null | null |
m=0
me=0
for x in range (1,6):
p=float(input('Peso:'))
if x == 1:
m = p
me = p
else:
if p > m:
m = p
if p < me:
me = p
print(f'''Maior: {m}
Menor: {me}''')
| 16.857143
| 28
| 0.334746
|
4a13e9810eb664970dd18577c07783f37da0ef8b
| 20,082
|
py
|
Python
|
KeyboardGuitar/venv/lib/python3.6/site-packages/serial/meta.py
|
Rezillien/stmmidi
|
5d259cc30dcd08cba4e5225f8ced679c0d1675f7
|
[
"Unlicense"
] | null | null | null |
KeyboardGuitar/venv/lib/python3.6/site-packages/serial/meta.py
|
Rezillien/stmmidi
|
5d259cc30dcd08cba4e5225f8ced679c0d1675f7
|
[
"Unlicense"
] | null | null | null |
KeyboardGuitar/venv/lib/python3.6/site-packages/serial/meta.py
|
Rezillien/stmmidi
|
5d259cc30dcd08cba4e5225f8ced679c0d1675f7
|
[
"Unlicense"
] | 1
|
2018-01-24T19:54:13.000Z
|
2018-01-24T19:54:13.000Z
|
# region Backwards Compatibility
from __future__ import absolute_import, division, generators, nested_scopes, print_function, unicode_literals, \
with_statement
from future import standard_library
standard_library.install_aliases()
from builtins import *
from future.utils import native_str
# endregion
import operator
import re
from collections import Callable, OrderedDict, Sequence
from copy import copy, deepcopy
from itertools import chain
from numbers import Number
try:
import typing
from typing import Optional, Dict
except ImportError:
typing = Optional = None
import serial
from serial.utilities import qualified_name, properties_values
_DOT_SYNTAX_RE = re.compile(
r'^\d+(\.\d+)*$'
)
class Meta(object):
def __copy__(self):
new_instance = self.__class__()
for a in dir(self):
if a[0] != '_':
v = getattr(self, a)
if not isinstance(v, Callable):
setattr(new_instance, a, v)
return new_instance
def __deepcopy__(self, memo=None):
# type: (Memo) -> Meta
new_instance = self.__class__()
for a, v in properties_values(self):
setattr(new_instance, a, deepcopy(v, memo=memo))
return new_instance
def __bool__(self):
return True
def __repr__(self):
return ('\n'.join(
['%s(' % qualified_name(type(self))] +
[
' %s=%s,' % (p, repr(v))
for p, v in properties_values(self)
] +
[')']
))
class Version(Meta):
def __init__(
self,
version=None, # type: Optional[str]
specification=None, # type: Optional[Sequence[str]]
equals=None, # type: Optional[Sequence[Union[str, Number]]]
not_equals=None, # type: Optional[Sequence[Union[str, Number]]]
less_than=None, # type: Optional[Sequence[Union[str, Number]]]
less_than_or_equal_to=None, # type: Optional[Sequence[Union[str, Number]]]
greater_than=None, # type: Optional[Sequence[Union[str, Number]]]
greater_than_or_equal_to=None, # type: Optional[Sequence[Union[str, Number]]]
):
if isinstance(version, str) and (
(specification is None) and
(equals is None) and
(not_equals is None) and
(less_than is None) and
(less_than_or_equal_to is None) and
(greater_than is None) and
(greater_than_or_equal_to is None)
):
specification = None
for s in version.split('&'):
if '==' in s:
s, equals = s.split('==')
elif '<=' in s:
s, less_than_or_equal_to = s.split('<=')
elif '>=' in s:
s, greater_than_or_equal_to = s.split('>=')
elif '<' in s:
s, less_than = s.split('<')
elif '>' in s:
s, greater_than = s.split('>')
elif '!=' in s:
s, not_equals = s.split('!=')
elif '=' in s:
s, equals = s.split('=')
if specification:
if s != specification:
raise ValueError(
'Multiple specifications cannot be associated with an instance of ``serial.meta.Version``: ' +
repr(version)
)
elif s:
specification = s
self.specification = specification
self.equals = equals
self.not_equals = not_equals
self.less_than = less_than
self.less_than_or_equal_to = less_than_or_equal_to
self.greater_than = greater_than
self.greater_than_or_equal_to = greater_than_or_equal_to
def __eq__(self, other):
# type: (Any) -> bool
compare_properties_functions = (
('equals', operator.eq),
('not_equals', operator.ne),
('less_than', operator.lt),
('less_than_or_equal_to', operator.le),
('greater_than', operator.gt),
('greater_than_or_equal_to', operator.ge),
)
if (isinstance(other, str) and _DOT_SYNTAX_RE.match(other)) or isinstance(other, (Sequence, int)):
other_components = tuple(
int(n)
for n in (
other
if isinstance(other, Sequence) and not isinstance(other, (str, bytes)) else
(other,)
if isinstance(other, int) else
other.split('.')
)
)
for compare_property, compare_function in compare_properties_functions:
compare_value = getattr(self, compare_property)
if compare_value is not None:
compare_values = tuple(int(n) for n in compare_value.split('.'))
other_values = copy(other_components)
ld = len(other_values) - len(compare_values)
if ld < 0:
other_values = tuple(chain(other_values, [0] * (-ld)))
elif ld > 0:
compare_values = tuple(chain(compare_values, [0] * ld))
if not compare_function(other_values, compare_values):
return False
else:
for compare_property, compare_function in compare_properties_functions:
compare_value = getattr(self, compare_property)
if (compare_value is not None) and not compare_function(other, compare_value):
return False
return True
def __str__(self):
representation = []
for property, operator in (
('equals', '=='),
('not_equals', '!='),
('greater_than', '>'),
('greater_than_or_equal_to', '>='),
('less_than', '<'),
('less_than_or_equal_to', '<='),
):
v = getattr(self, property)
if v is not None:
representation.append(
self.specification + operator + v
)
return '&'.join(representation)
class Object(Meta):
def __init__(
self,
properties=None, # type: Optional[Properties]
):
self._properties = None # type: Optional[Properties]
self.properties = properties
@property
def properties(self):
# type: () -> Optional[Properties]s
return self._properties
@properties.setter
def properties(
self,
properties_
# type: Optional[Union[typing.Dict[str, properties.Property], Sequence[Tuple[str, properties.Property]]]]
):
# type: (...) -> Properties
self._properties = Properties(properties_)
class Dictionary(Meta):
def __init__(
self,
value_types=None, # type: Optional[typing.Sequence[properties.Property, type]]
):
self._value_types = None # type: Optional[typing.Tuple]
self.value_types = value_types
@property
def value_types(self):
# type: () -> Optional[typing.Dict[str, Union[type, Property, model.Object]]]
return self._value_types
@value_types.setter
def value_types(self, value_types):
# type: (Optional[Sequence[Union[type, Property, model.Object]]]) -> None
if value_types is not None:
if isinstance(value_types, (type, serial.properties.Property)):
value_types = (value_types,)
if native_str is not str:
if isinstance(value_types, Callable):
_types = value_types
def value_types(d):
# type: (Any) -> Any
ts = _types(d)
if (ts is not None) and (str in ts) and (native_str not in ts):
ts = tuple(chain(*(
((t, native_str) if (t is str) else (t,))
for t in ts
)))
return ts
elif (str in value_types) and (native_str is not str) and (native_str not in value_types):
value_types = chain(*(
((t, native_str) if (t is str) else (t,))
for t in value_types
))
if not isinstance(value_types, Callable):
value_types = tuple(value_types)
self._value_types = value_types
class Array(Meta):
def __init__(
self,
item_types=None, # type: Optional[typing.Sequence[properties.Property, type]]
):
self._item_types = None # type: Optional[typing.Tuple]
self.item_types = item_types
@property
def item_types(self):
return self._item_types
@item_types.setter
def item_types(self, item_types):
# type: (Optional[Sequence[Union[type, Property, model.Object]]]) -> None
if item_types is not None:
if isinstance(item_types, (type, serial.properties.Property)):
item_types = (item_types,)
if native_str is not str:
if isinstance(item_types, Callable):
_types = item_types
def item_types(d):
# type: (Any) -> Any
ts = _types(d)
if (ts is not None) and (str in ts) and (native_str not in ts):
ts = tuple(chain(*(
((t, native_str) if (t is str) else (t,))
for t in ts
)))
return ts
elif (str in item_types) and (native_str is not str) and (native_str not in item_types):
item_types = chain(*(
((t, native_str) if (t is str) else (t,))
for t in item_types
))
if not isinstance(item_types, Callable):
item_types = tuple(item_types)
self._item_types = item_types
class Properties(OrderedDict):
def __init__(
self,
items=(
None
) # type: Optional[Union[typing.Dict[str, properties.Property], Sequence[Tuple[str, properties.Property]]]]
):
if items is None:
super().__init__()
else:
if isinstance(items, OrderedDict):
items = items.items()
elif isinstance(items, dict):
items = sorted(items.items())
super().__init__(items)
def __setitem__(self, key, value):
# type: (str, Property) -> None
if not isinstance(value, serial.properties.Property):
raise ValueError(value)
super().__setitem__(key, value)
def __copy__(self):
# type: () -> Properties
return self.__class__(self)
def __deepcopy__(self, memo=None):
# type: (dict) -> Properties
return self.__class__(
tuple(
(k, deepcopy(v, memo=memo))
for k, v in self.items()
)
)
def __repr__(self):
representation = [
qualified_name(type(self)) + '('
]
items = tuple(self.items())
if len(items) > 0:
representation[0] += '['
for k, v in items:
rv = (
qualified_name(v) if isinstance(v, type) else
repr(v)
)
rvls = rv.split('\n')
if len(rvls) > 1:
rvs = [rvls[0]]
for rvl in rvls[1:]:
rvs.append(' ' + rvl)
rv = '\n'.join(rvs)
representation += [
' (',
' %s,' % repr(k),
' %s' % rv,
' ),'
]
else:
representation.append(
' (%s, %s),' % (repr(k), rv)
)
representation[-1] = representation[-1][:-1]
representation.append(
']'
)
representation[-1] += ')'
if len(representation) > 2:
return '\n'.join(representation)
else:
return ''.join(representation)
def read(
o # type: Union[type, serial.model.Object]
):
# type: (...) -> Union[Object, typing.Mapping, str]
if isinstance(o, type):
if o._meta is None:
o._meta = (
Object()
if issubclass(o, serial.model.Object) else
Array()
if issubclass(o, serial.model.Array) else
Dictionary()
if issubclass(o, serial.model.Dictionary)
else None
)
return o._meta
elif isinstance(
o,
(
serial.model.Object,
serial.model.Array,
serial.model.Dictionary
)
):
return o._meta or read(type(o))
def writable(
o # type: Union[type, serial.model.Object]
):
# type: (...) -> Union[Object, typing.Mapping, str]
if isinstance(o, type):
if o._meta is None:
o._meta = (
Object()
if issubclass(o, serial.model.Object) else
Array()
if issubclass(o, serial.model.Array) else
Dictionary()
if issubclass(o, serial.model.Dictionary)
else None
)
else:
for b in o.__bases__:
if hasattr(b, '_meta') and (o._meta is b._meta):
o._meta = deepcopy(o._meta)
break
elif isinstance(
o,
(
serial.model.Object,
serial.model.Array,
serial.model.Dictionary
)
):
if o._meta is None:
o._meta = deepcopy(writable(type(o)))
return o._meta
def write(
o, # type: Union[type, serial.model.Object]
meta # type: Meta
):
# type: (...) -> Union[Object, typing.Mapping, str]
if isinstance(o, type):
t = o
mt = (
Object
if issubclass(o, serial.model.Object) else
Array
if issubclass(o, serial.model.Array) else
Dictionary
if issubclass(o, serial.model.Dictionary)
else None
)
elif isinstance(
o,
(
serial.model.Object,
serial.model.Array,
serial.model.Dictionary
)
):
t = type(o)
mt = (
Object
if isinstance(o, serial.model.Object) else
Array
if isinstance(o, serial.model.Array) else
Dictionary
if isinstance(o, serial.model.Dictionary)
else None
)
if not isinstance(meta, mt):
raise ValueError(
'Metadata assigned to `%s` must be of type `%s`' % (
qualified_name(t),
qualified_name(mt)
)
)
o._meta = meta
_UNIDENTIFIED = None
def xpath(o, xp=_UNIDENTIFIED):
# type: (serial.model.Model, Optional[str]) -> Optional[str]
"""
Return the xpath at which the element represented by this object was found, relative to the root document. If
the parameter `xp` is provided--set the value
"""
if not isinstance(o, serial.model.Model):
raise TypeError(
'`o` must be an instance of `%s`, not %s.' % (qualified_name(serial.model.Model), repr(o))
)
if xp is not _UNIDENTIFIED:
if not isinstance(xp, str):
raise TypeError(
'`xp` must be a `str`, not %s.' % repr(xp)
)
o._xpath = xp
if isinstance(o, serial.model.Dictionary):
for k, v in o.items():
if isinstance(v, serial.model.Model):
xpath(v, '%s/%s' % (xp, k))
elif isinstance(o, serial.model.Object):
for pn, p in read(o).properties.items():
k = p.name or pn
v = getattr(o, pn)
if isinstance(v, serial.model.Model):
xpath(v, '%s/%s' % (xp, k))
elif isinstance(o, serial.model.Array):
for i in range(len(o)):
v = o[i]
if isinstance(v, serial.model.Model):
xpath(v, '%s[%s]' % str(i))
return o._xpath
def pointer(o, p=_UNIDENTIFIED):
# type: (serial.model.Model, Optional[str]) -> Optional[str]
if not isinstance(o, serial.model.Model):
raise TypeError(
'`o` must be an instance of `%s`, not %s.' % (qualified_name(serial.model.Model), repr(o))
)
if p is not _UNIDENTIFIED:
if not isinstance(p, str):
raise TypeError(
'`p` must be a `str`, not %s.' % repr(p)
)
o._pointer = p
if isinstance(o, serial.model.Dictionary):
for k, v in o.items():
if isinstance(v, serial.model.Model):
pointer(v, '%s/%s' % (p, k.replace('~', '~0').replace('/', '~1')))
elif isinstance(o, serial.model.Object):
for pn, p in read(o).properties.items():
k = p.name or pn
v = getattr(o, pn)
if isinstance(v, serial.model.Model):
pointer(v, '%s/%s' % (p, k.replace('~', '~0').replace('/', '~1')))
elif isinstance(o, serial.model.Array):
for i in range(len(o)):
v = o[i]
if isinstance(v, serial.model.Model):
pointer(v, '%s[%s]' % str(i))
return o._pointer
def url(o, u=_UNIDENTIFIED):
# type: (serial.model.Model, Optional[str]) -> Optional[str]
if not isinstance(o, serial.model.Model):
raise TypeError(
'`o` must be an instance of `%s`, not %s.' % (qualified_name(serial.model.Model), repr(o))
)
if u is not _UNIDENTIFIED:
if not isinstance(u, str):
raise TypeError(
'`u` must be a `str`, not %s.' % repr(u)
)
o._url = u
if isinstance(o, serial.model.Dictionary):
for v in o.values():
if isinstance(v, serial.model.Model):
url(v, u)
elif isinstance(o, serial.model.Object):
for pn in read(o).properties.keys():
v = getattr(o, pn)
if isinstance(v, serial.model.Model):
url(v, u)
elif isinstance(o, serial.model.Array):
for v in o:
if isinstance(v, serial.model.Model):
url(v, u)
return o._url
def format_(o, f=_UNIDENTIFIED):
# type: (serial.model.Model, Optional[str]) -> Optional[str]
if not isinstance(o, serial.model.Model):
raise TypeError(
'`o` must be an instance of `%s`, not %s.' % (qualified_name(serial.model.Model), repr(o))
)
if f is not _UNIDENTIFIED:
if not isinstance(f, str):
raise TypeError(
'`f` must be a `str`, not %s.' % repr(f)
)
o._format = f
if isinstance(o, serial.model.Dictionary):
for v in o.values():
if isinstance(v, serial.model.Model):
format_(v, f)
elif isinstance(o, serial.model.Object):
for pn in read(o).properties.keys():
v = getattr(o, pn)
if isinstance(v, serial.model.Model):
format_(v, f)
elif isinstance(o, serial.model.Array):
for v in o:
if isinstance(v, serial.model.Model):
format_(v, f)
return o._format
| 34.328205
| 122
| 0.501693
|
4a13eaf01e9957acee52f227fb58f92b392fdcf4
| 1,427
|
py
|
Python
|
great_expectations/dataset/autoinspect.py
|
mastratton3/great_expectations
|
151970d776c942bfc23cdd90c7ed00b57a34559d
|
[
"Apache-2.0"
] | null | null | null |
great_expectations/dataset/autoinspect.py
|
mastratton3/great_expectations
|
151970d776c942bfc23cdd90c7ed00b57a34559d
|
[
"Apache-2.0"
] | null | null | null |
great_expectations/dataset/autoinspect.py
|
mastratton3/great_expectations
|
151970d776c942bfc23cdd90c7ed00b57a34559d
|
[
"Apache-2.0"
] | null | null | null |
"""
Autoinspect utilities to automatically generate expectations by evaluating a data_asset.
"""
from __future__ import division
import warnings
from six import string_types
from .util import create_multiple_expectations
class AutoInspectError(Exception):
"""Exception raised for errors in autoinspection.
Attributes:
message -- explanation of the error
"""
def __init__(self, message):
self.message = message
def columns_exist(inspect_dataset):
"""
This function will take a dataset and add expectations that each column present exists.
Args:
inspect_dataset (great_expectations.dataset): The dataset to inspect and to which to add expectations.
"""
if not hasattr(inspect_dataset, 'get_table_columns'):
warnings.warn(
"No columns list found in dataset; no autoinspection performed.")
raise NotImplementedError("columns_exist autoinspection is not implemented for data assests without the table_columns property")
table_columns = inspect_dataset.get_table_columns()
if table_columns is None:
warnings.warn(
"No columns list found in dataset; no autoinspection performed.")
raise NotImplementedError("columns_exist autoinspection is not implemented for data assests without the table_columns property")
create_multiple_expectations(inspect_dataset, table_columns, "expect_column_to_exist")
| 35.675
| 136
| 0.749124
|
4a13eb01f15ac9552c64065af2a83377c17f00c2
| 2,245
|
py
|
Python
|
backend/homefieldtest_dev_22864/urls.py
|
crowdbotics-apps/homefieldtest-dev-22864
|
1dc12ae100cb170bde4aff0a8790451f61076a24
|
[
"FTL",
"AML",
"RSA-MD"
] | null | null | null |
backend/homefieldtest_dev_22864/urls.py
|
crowdbotics-apps/homefieldtest-dev-22864
|
1dc12ae100cb170bde4aff0a8790451f61076a24
|
[
"FTL",
"AML",
"RSA-MD"
] | 28
|
2021-08-15T15:11:19.000Z
|
2022-03-06T17:22:23.000Z
|
backend/homefieldtest_dev_22864/urls.py
|
crowdbotics-apps/homefieldtest-dev-22864
|
1dc12ae100cb170bde4aff0a8790451f61076a24
|
[
"FTL",
"AML",
"RSA-MD"
] | null | null | null |
"""homefieldtest_dev_22864 URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include, re_path
from django.views.generic.base import TemplateView
from allauth.account.views import confirm_email
from rest_framework import permissions
from drf_yasg.views import get_schema_view
from drf_yasg import openapi
urlpatterns = [
path("", include("home.urls")),
path("accounts/", include("allauth.urls")),
path("modules/", include("modules.urls")),
path("api/v1/", include("home.api.v1.urls")),
path("admin/", admin.site.urls),
path("users/", include("users.urls", namespace="users")),
path("rest-auth/", include("rest_auth.urls")),
# Override email confirm to use allauth's HTML view instead of rest_auth's API view
path("rest-auth/registration/account-confirm-email/<str:key>/", confirm_email),
path("rest-auth/registration/", include("rest_auth.registration.urls")),
]
admin.site.site_header = "HomefieldTest"
admin.site.site_title = "HomefieldTest Admin Portal"
admin.site.index_title = "HomefieldTest Admin"
# swagger
api_info = openapi.Info(
title="HomefieldTest API",
default_version="v1",
description="API documentation for HomefieldTest App",
)
schema_view = get_schema_view(
api_info,
public=True,
permission_classes=(permissions.IsAuthenticated,),
)
urlpatterns += [
path("api-docs/", schema_view.with_ui("swagger", cache_timeout=0), name="api_docs")
]
urlpatterns += [path("", TemplateView.as_view(template_name='index.html'))]
urlpatterns += [re_path(r"^(?:.*)/?$",
TemplateView.as_view(template_name='index.html'))]
| 35.634921
| 87
| 0.716704
|
4a13ec6f73b137a21c5fcfe02bebd235068fe3e0
| 1,206
|
py
|
Python
|
qiskit/qasm/node/format.py
|
TheGupta2012/qiskit-terra
|
5ea6e9557655b144228c29d7099375f5d2c91120
|
[
"Apache-2.0"
] | 1,599
|
2018-07-10T10:59:12.000Z
|
2022-03-31T23:56:25.000Z
|
qiskit/qasm/node/format.py
|
TheGupta2012/qiskit-terra
|
5ea6e9557655b144228c29d7099375f5d2c91120
|
[
"Apache-2.0"
] | 5,244
|
2018-07-10T06:20:13.000Z
|
2022-03-31T22:18:48.000Z
|
qiskit/qasm/node/format.py
|
TheGupta2012/qiskit-terra
|
5ea6e9557655b144228c29d7099375f5d2c91120
|
[
"Apache-2.0"
] | 1,409
|
2018-07-10T02:16:12.000Z
|
2022-03-31T09:01:32.000Z
|
# This code is part of Qiskit.
#
# (C) Copyright IBM 2017.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""Node for an OPENQASM file identifier/version statement."""
import re
from .node import Node
class Format(Node):
"""Node for an OPENQASM file identifier/version statement."""
def __init__(self, value):
"""Create the version node."""
super().__init__("format", None, None)
parts = re.match(r"(\w+)\s+(\d+)\.(\d+)", value)
self.language = parts.group(1)
self.majorversion = parts.group(2)
self.minorversion = parts.group(3)
def version(self):
"""Return the version."""
return f"{self.majorversion}.{self.minorversion}"
def qasm(self):
"""Return the corresponding format string."""
return f"{self.language} {self.version()};"
| 31.736842
| 77
| 0.669154
|
4a13eca57f1c8d1f678d11d4ca459ae292baf2fe
| 11,351
|
py
|
Python
|
salt/modules/openbsdpkg.py
|
byteskeptical/salt
|
637fe0b04f38b2274191b005d73b3c6707d7f400
|
[
"Apache-2.0"
] | 2
|
2015-06-18T19:07:20.000Z
|
2017-09-27T18:54:29.000Z
|
salt/modules/openbsdpkg.py
|
byteskeptical/salt
|
637fe0b04f38b2274191b005d73b3c6707d7f400
|
[
"Apache-2.0"
] | 12
|
2015-04-15T22:17:42.000Z
|
2016-03-22T08:46:27.000Z
|
salt/modules/openbsdpkg.py
|
byteskeptical/salt
|
637fe0b04f38b2274191b005d73b3c6707d7f400
|
[
"Apache-2.0"
] | 4
|
2015-04-16T03:24:08.000Z
|
2015-04-22T15:33:28.000Z
|
# -*- coding: utf-8 -*-
'''
Package support for OpenBSD
.. note::
The package repository is configured on each host using ``/etc/installurl``
from OpenBSD 6.1 onwards. Earlier releases relied on ``/etc/pkg.conf``.
.. versionchanged:: 2016.3.5
Package versions on OpenBSD are not normally specified explicitly; instead
packages may be available in multiple *flavors*, and *branches* which are
specified by the format of the package name. This module allows you to use
the same formatting as ``pkg_add(1)``, and will select the empty flavor and
default branch by default. Examples:
.. code-block:: yaml
- rsync
- vim--no_x11
- ruby%2.3
'''
from __future__ import absolute_import, print_function, unicode_literals
# Import python libs
import copy
import re
import logging
# Import Salt libs
import salt.utils.data
import salt.utils.versions
from salt.exceptions import CommandExecutionError, MinionError
log = logging.getLogger(__name__)
# FIXME: replace guesswork with `pkg_info -z` to correctly identify package
# flavors and branches
__PKG_RE = re.compile('^((?:[^-]+|-(?![0-9]))+)-([0-9][^-]*)(?:-(.*))?$')
# Define the module's virtual name
__virtualname__ = 'pkg'
def __virtual__():
'''
Set the virtual pkg module if the os is OpenBSD
'''
if __grains__.get('os') == 'OpenBSD':
return __virtualname__
return (False, 'The openbsdpkg execution module cannot be loaded: '
'only available on OpenBSD systems.')
def list_pkgs(versions_as_list=False, **kwargs):
'''
List the packages currently installed as a dict::
{'<package_name>': '<version>'}
CLI Example:
.. code-block:: bash
salt '*' pkg.list_pkgs
'''
versions_as_list = salt.utils.data.is_true(versions_as_list)
# not yet implemented or not applicable
if any([salt.utils.data.is_true(kwargs.get(x))
for x in ('removed', 'purge_desired')]):
return {}
if 'pkg.list_pkgs' in __context__:
if versions_as_list:
return __context__['pkg.list_pkgs']
else:
ret = copy.deepcopy(__context__['pkg.list_pkgs'])
__salt__['pkg_resource.stringify'](ret)
return ret
ret = {}
cmd = 'pkg_info -q -a'
out = __salt__['cmd.run_stdout'](cmd, output_loglevel='trace')
for line in out.splitlines():
try:
pkgname, pkgver, flavor = __PKG_RE.match(line).groups()
except AttributeError:
continue
pkgname += '--{0}'.format(flavor) if flavor else ''
__salt__['pkg_resource.add_pkg'](ret, pkgname, pkgver)
__salt__['pkg_resource.sort_pkglist'](ret)
__context__['pkg.list_pkgs'] = copy.deepcopy(ret)
if not versions_as_list:
__salt__['pkg_resource.stringify'](ret)
return ret
def latest_version(*names, **kwargs):
'''
Return the latest version of the named package available for upgrade or
installation. If more than one package name is specified, a dict of
name/version pairs is returned.
If the latest version of a given package is already installed, an empty
string will be returned for that package.
CLI Example:
.. code-block:: bash
salt '*' pkg.latest_version <package name>
'''
kwargs.pop('refresh', True)
pkgs = list_pkgs()
ret = {}
# Initialize the dict with empty strings
for name in names:
ret[name] = ''
# Query the repository for the package name
cmd = 'pkg_info -Q {0}'.format(name)
out = __salt__['cmd.run_stdout'](cmd, python_shell=False, output_loglevel='trace')
# Since we can only query instead of request the specific package
# we'll have to go through the returned list and find what we
# were looking for.
# Keep in mind the match may be flavored.
for line in out.splitlines():
try:
pkgname, pkgver, flavor = __PKG_RE.match(line).groups()
except AttributeError:
continue
match = re.match(r'.*\(installed\)$', pkgver)
if match:
# Package is explicitly marked as installed already,
# so skip any further comparison and move on to the
# next package to compare (if provided).
break
# First check if we need to look for flavors before
# looking at unflavored packages.
if "{0}--{1}".format(pkgname, flavor) == name:
pkgname += '--{0}'.format(flavor)
elif pkgname == name:
pass
else:
# No match just move on.
continue
cur = pkgs.get(pkgname, '')
if not cur or salt.utils.versions.compare(
ver1=cur,
oper='<',
ver2=pkgver):
ret[pkgname] = pkgver
# Return a string if only one package name passed
if len(names) == 1:
return ret[names[0]]
return ret
def version(*names, **kwargs):
'''
Returns a string representing the package version or an empty string if not
installed. If more than one package name is specified, a dict of
name/version pairs is returned.
CLI Example:
.. code-block:: bash
salt '*' pkg.version <package name>
salt '*' pkg.version <package1> <package2> <package3> ...
'''
return __salt__['pkg_resource.version'](*names, **kwargs)
def install(name=None, pkgs=None, sources=None, **kwargs):
'''
Install the passed package
Return a dict containing the new package names and versions::
{'<package>': {'old': '<old-version>',
'new': '<new-version>'}}
CLI Example, Install one package:
.. code-block:: bash
salt '*' pkg.install <package name>
CLI Example, Install more than one package:
.. code-block:: bash
salt '*' pkg.install pkgs='["<package name>", "<package name>"]'
CLI Example, Install more than one package from a alternate source (e.g.
salt file-server, HTTP, FTP, local filesystem):
.. code-block:: bash
salt '*' pkg.install sources='[{"<pkg name>": "salt://pkgs/<pkg filename>"}]'
'''
try:
pkg_params, pkg_type = __salt__['pkg_resource.parse_targets'](
name, pkgs, sources, **kwargs
)
except MinionError as exc:
raise CommandExecutionError(exc)
if not pkg_params:
return {}
old = list_pkgs()
errors = []
for pkg in pkg_params:
# A special case for OpenBSD package "branches" is also required in
# salt/states/pkg.py
if pkg_type == 'repository':
stem, branch = (pkg.split('%') + [''])[:2]
base, flavor = (stem.split('--') + [''])[:2]
pkg = '{0}--{1}%{2}'.format(base, flavor, branch)
cmd = 'pkg_add -x -I {0}'.format(pkg)
out = __salt__['cmd.run_all'](
cmd,
python_shell=False,
output_loglevel='trace'
)
if out['retcode'] != 0 and out['stderr']:
errors.append(out['stderr'])
__context__.pop('pkg.list_pkgs', None)
new = list_pkgs()
ret = salt.utils.data.compare_dicts(old, new)
if errors:
raise CommandExecutionError(
'Problem encountered installing package(s)',
info={'errors': errors, 'changes': ret}
)
return ret
def remove(name=None, pkgs=None, purge=False, **kwargs):
'''
Remove a single package with pkg_delete
Multiple Package Options:
pkgs
A list of packages to delete. Must be passed as a python list. The
``name`` parameter will be ignored if this option is passed.
.. versionadded:: 0.16.0
Returns a dict containing the changes.
CLI Example:
.. code-block:: bash
salt '*' pkg.remove <package name>
salt '*' pkg.remove <package1>,<package2>,<package3>
salt '*' pkg.remove pkgs='["foo", "bar"]'
'''
try:
pkg_params = [x.split('--')[0] for x in
__salt__['pkg_resource.parse_targets'](name, pkgs)[0]]
except MinionError as exc:
raise CommandExecutionError(exc)
old = list_pkgs()
targets = [x for x in pkg_params if x in old]
if not targets:
return {}
cmd = ['pkg_delete', '-Ix', '-Ddependencies']
if purge:
cmd.append('-cqq')
cmd.extend(targets)
out = __salt__['cmd.run_all'](
cmd,
python_shell=False,
output_loglevel='trace'
)
if out['retcode'] != 0 and out['stderr']:
errors = [out['stderr']]
else:
errors = []
__context__.pop('pkg.list_pkgs', None)
new = list_pkgs()
ret = salt.utils.data.compare_dicts(old, new)
if errors:
raise CommandExecutionError(
'Problem encountered removing package(s)',
info={'errors': errors, 'changes': ret}
)
return ret
def purge(name=None, pkgs=None, **kwargs):
'''
Remove a package and extra configuration files.
name
The name of the package to be deleted.
Multiple Package Options:
pkgs
A list of packages to delete. Must be passed as a python list. The
``name`` parameter will be ignored if this option is passed.
.. versionadded:: 0.16.0
Returns a dict containing the changes.
CLI Example:
.. code-block:: bash
salt '*' pkg.purge <package name>
salt '*' pkg.purge <package1>,<package2>,<package3>
salt '*' pkg.purge pkgs='["foo", "bar"]'
'''
return remove(name=name, pkgs=pkgs, purge=True)
def upgrade_available(name, **kwargs):
'''
Check whether or not an upgrade is available for a given package
.. versionadded:: 2019.2.0
CLI Example:
.. code-block:: bash
salt '*' pkg.upgrade_available <package name>
'''
return latest_version(name) != ''
def upgrade(name=None,
pkgs=None,
**kwargs):
'''
Run a full package upgrade (``pkg_add -u``), or upgrade a specific package
if ``name`` or ``pkgs`` is provided.
``name`` is ignored when ``pkgs`` is specified.
Returns a dictionary containing the changes:
.. versionadded:: 2019.2.0
.. code-block:: python
{'<package>': {'old': '<old-version>',
'new': '<new-version>'}}
CLI Example:
.. code-block:: bash
salt '*' pkg.upgrade
salt '*' pkg.upgrade python%2.7
'''
old = list_pkgs()
cmd = ['pkg_add', '-Ix', '-u']
if kwargs.get('noop', False):
cmd.append('-n')
if pkgs:
cmd.extend(pkgs)
elif name:
cmd.append(name)
# Now run the upgrade, compare the list of installed packages before and
# after and we have all the info we need.
result = __salt__['cmd.run_all'](cmd, output_loglevel='trace',
python_shell=False)
__context__.pop('pkg.list_pkgs', None)
new = list_pkgs()
ret = salt.utils.data.compare_dicts(old, new)
if result['retcode'] != 0:
raise CommandExecutionError(
'Problem encountered upgrading packages',
info={'changes': ret, 'result': result}
)
return ret
| 27.417874
| 90
| 0.592547
|
4a13eca929294d81b9eafc4b074db6f1eeeeba5c
| 3,970
|
py
|
Python
|
src/appengine/appengine_config.py
|
ABHIsHEk122811/clusterfuzz
|
7cac0ee869787e6f547a4b3dac18196c60f03383
|
[
"Apache-2.0"
] | 4
|
2019-11-26T01:50:51.000Z
|
2021-08-14T20:32:43.000Z
|
src/appengine/appengine_config.py
|
ABHIsHEk122811/clusterfuzz
|
7cac0ee869787e6f547a4b3dac18196c60f03383
|
[
"Apache-2.0"
] | 22
|
2019-12-26T17:02:34.000Z
|
2022-03-21T22:16:52.000Z
|
src/appengine/appengine_config.py
|
ABHIsHEk122811/clusterfuzz
|
7cac0ee869787e6f547a4b3dac18196c60f03383
|
[
"Apache-2.0"
] | 2
|
2019-02-09T09:09:20.000Z
|
2019-02-15T05:25:13.000Z
|
# -*- coding: utf-8 -*-
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""appengine_config initialises before the server starts."""
import os
import sys
from google.appengine.ext import ndb
from google.appengine.ext import vendor
from webob import multidict
def from_fieldstorage(cls, fs):
"""Create a dict from a cgi.FieldStorage instance.
See this for more details:
http://code.google.com/p/googleappengine/issues/detail?id=2749
"""
import base64
import quopri
obj = cls()
if fs.list:
# fs.list can be None when there's nothing to parse
for field in fs.list:
if field.filename:
obj.add(field.name, field)
else:
# first, set a common charset to utf-8.
common_charset = 'utf-8'
# second, check Content-Transfer-Encoding and decode
# the value appropriately
field_value = field.value
transfer_encoding = field.headers.get('Content-Transfer-Encoding', None)
if transfer_encoding == 'base64':
field_value = base64.b64decode(field_value)
if transfer_encoding == 'quoted-printable':
field_value = quopri.decodestring(field_value)
if field.type_options.has_key(
'charset') and field.type_options['charset'] != common_charset:
# decode with a charset specified in each
# multipart, and then encode it again with a
# charset specified in top level FieldStorage
field_value = field_value.decode(
field.type_options['charset']).encode(common_charset)
obj.add(field.name, field_value)
return obj
# True if the app is running inside the dev appserver, false otherwise. This
# is not the opposite of IS_RUNNING_IN_PRODUCTION; it is possible (in tests,
# for example) for both IS_RUNNING_IN_DEV_APPSERVER and IS_RUNNING_IN_PRODUCTION
# to be false.
IS_RUNNING_IN_DEV_APPSERVER = (
os.getenv('SERVER_SOFTWARE') and
os.getenv('SERVER_SOFTWARE').startswith('Development/') and
'testbed' not in os.getenv('SERVER_SOFTWARE'))
# True if the app is running inside an AppEngine production environment, such
# as prom.corp or appspot.com. False if it's running inside dev_appserver or
# unsupported (such as from unit tests).
IS_RUNNING_IN_PRODUCTION = (
os.getenv('SERVER_SOFTWARE') and
os.getenv('SERVER_SOFTWARE').startswith('Google App Engine/'))
multidict.MultiDict.from_fieldstorage = classmethod(from_fieldstorage)
# Add necessary directories to path.
if IS_RUNNING_IN_PRODUCTION or IS_RUNNING_IN_DEV_APPSERVER:
vendor.add('third_party')
vendor.add('python')
else:
sys.path.insert(0, 'third_party')
sys.path.insert(0, 'python')
# Adding the protobuf module to the google module. Otherwise, we couldn't
# import google.protobuf because google.appengine already took the name.
import google
google.__path__.append(os.path.join('third_party', 'google'))
if IS_RUNNING_IN_DEV_APPSERVER:
from base import modules
modules.disable_known_module_warnings()
# In tests this is done in test_utils.with_cloud_emulators.
if IS_RUNNING_IN_PRODUCTION or IS_RUNNING_IN_DEV_APPSERVER:
# Disable NDB caching, as NDB on GCE VMs do not use memcache and therefore
# can't invalidate the memcache cache.
ndb.get_context().set_memcache_policy(False)
# Disable the in-context cache, as it can use up a lot of memory for longer
# running tasks such as cron jobs.
ndb.get_context().set_cache_policy(False)
| 35.765766
| 80
| 0.733249
|
4a13eea02daac82ec05633c28b4384ad9eefdd65
| 3,765
|
py
|
Python
|
Legacy/run.py
|
puat133/MCMC-MultiSPDE
|
2beca39f32c0cdd7664baeacd495b193850d8e7d
|
[
"Apache-2.0"
] | 1
|
2020-06-23T09:32:43.000Z
|
2020-06-23T09:32:43.000Z
|
Legacy/run.py
|
puat133/MCMC-MultiSPDE
|
2beca39f32c0cdd7664baeacd495b193850d8e7d
|
[
"Apache-2.0"
] | null | null | null |
Legacy/run.py
|
puat133/MCMC-MultiSPDE
|
2beca39f32c0cdd7664baeacd495b193850d8e7d
|
[
"Apache-2.0"
] | null | null | null |
#%%
import mcmc.simulation as s
import mcmc.plotting as p
import matplotlib.pyplot as plt
import numpy as np
import argparse
import parser_help as ph
#%%
# n = 2**6
# kappa_default =1e17
# sigma_0_default = 5e6#5e6
# sigma_v_default = 1e1#1e2
# kappa_factor = 1
# kappa = kappa_default/kappa_factor
# sigma_0 = sigma_0_default*np.sqrt(kappa_factor)
# sigma_v = sigma_v_default*np.sqrt(kappa_factor)
#https://stackoverflow.com/a/36194213/11764120
#%%
if __name__=='__main__':
# n_samples = 1000,n = 2**6,beta = 2e-1,num = 2**8,uHalfInit=None,
# kappa = 1e17,sigma_u = 5e6,sigma_v = 10,printInterval = 100,
# seed=1,burnPercentage = 5,useLaTeX=True,randVectInitiated=True,
# showFigures=True
parser = argparse.ArgumentParser()
parser.add_argument('--n-layers',default=2,type=int,help='number SPDE layers, Default=2')
parser.add_argument('--n',default=2**6,type=int,help='number of Fourier basis, Default=64')
parser.add_argument('--seed',default=1,type=int,help='random generator seed, Default=1')
parser.add_argument('--num',default=2**8,type=int,help='number measurement points, Default=256')
parser.add_argument('--n-samples',default=1000,type=int,help='number of MCMC samples per computer core, Default=10000')
parser.add_argument('--evaluation-interval',default=100,type=int,help='interval to print and reevaluate beta, Default=100')
parser.add_argument('--beta',default=1,type=float,help='preconditioned Crank Nicholson beta parameter, Default=1')
parser.add_argument('--kappa',default=1e17,type=float,help='kappa constant for u_t, Default=1e17')
parser.add_argument('--sigma-0',default=5e6,type=float,help='Sigma_u constant, Default=5e6')
parser.add_argument('--sigma-v',default=1e2,type=float,help='Sigma_v constant, Default=10.0')
parser.add_argument('--sigma-scaling',default=1e-1,type=float,help='Sigma_scaling constant, Default=1e-4')
parser.add_argument('--burn-percentage',default=25.0,type=float,help='Burn Percentage, Default=25.0')
parser.add_argument('--variant',default="dunlop",type=str,help='preconditioned Crank Nicholson multilayered algorithm variant, Default=dunlop')
parser.add_argument('--measurement-signal-type',default="smooth_discontinuous",type=str,help='Test measurement signal type, Default=smooth_discontinuous')
ph.add_boolean_argument(parser,'include-history',default=False,messages='Whether to include Layer simulation history in hdf5, Default=False')
ph.add_boolean_argument(parser,'enable-beta-feedback',default=True,messages='Whether beta-feedback will be enabled, Default=True')
ph.add_boolean_argument(parser,'print-progress',default=False,messages='Whether progress is printed, Default=False')
ph.add_boolean_argument(parser,'use-latex',default=False,messages='Whether latex is used during results plotting, Default=False')
ph.add_boolean_argument(parser,'show-figure',default=False,messages='Whether plot will be shown, Default=False')
args = parser.parse_args()
sim = s.Simulation(n_layers=args.n_layers,n_samples = args.n_samples,n = args.n,beta = args.beta,num = args.num,
kappa = args.kappa,sigma_0 = args.sigma_0,sigma_v = args.sigma_v,sigma_scaling=args.sigma_scaling,evaluation_interval = args.evaluation_interval,printProgress=args.print_progress,
seed=args.seed,burn_percentage = args.burn_percentage,enable_beta_feedback=args.enable_beta_feedback,pcn_variant=args.variant,measurement_signal_type=args.measurement_signal_type)
sim.pcn.beta_feedback_gain = 2.1
sim.run()
sim.analyze()
p.plotResult(sim,include_history=args.include_history,useLaTeX=args.use_latex,showFigures=args.show_figure)
| 61.721311
| 199
| 0.747676
|
4a13eec9ef09f9d7fdb8dac0dad1b508f1e50084
| 5,711
|
py
|
Python
|
core/sawtooth_poet/poet_consensus/consensus_state_store.py
|
blockchaintp/sawtooth-poet
|
724b9fd723cfcc6fccdb8a5b4a22f18b8a6b9c80
|
[
"Apache-2.0"
] | 14
|
2018-07-27T06:06:31.000Z
|
2021-12-03T00:30:39.000Z
|
core/sawtooth_poet/poet_consensus/consensus_state_store.py
|
blockchaintp/sawtooth-poet
|
724b9fd723cfcc6fccdb8a5b4a22f18b8a6b9c80
|
[
"Apache-2.0"
] | 20
|
2018-07-24T00:58:10.000Z
|
2021-12-05T22:42:14.000Z
|
core/sawtooth_poet/poet_consensus/consensus_state_store.py
|
blockchaintp/sawtooth-poet
|
724b9fd723cfcc6fccdb8a5b4a22f18b8a6b9c80
|
[
"Apache-2.0"
] | 38
|
2018-07-20T19:54:48.000Z
|
2022-03-20T07:27:46.000Z
|
# Copyright 2017 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ------------------------------------------------------------------------------
import threading
import logging
import os
# pylint: disable=no-name-in-module
from collections.abc import MutableMapping
from sawtooth_poet.poet_consensus.consensus_state import ConsensusState
from sawtooth_poet.database.lmdb_nolock_database \
import LMDBNoLockDatabase
LOGGER = logging.getLogger(__name__)
class ConsensusStateStore(MutableMapping):
"""Manages access to the underlying database holding per-block consensus
state information. Note that because of the architectural model around
the consensus objects, all ConsensusStateStore objects actually reference
a single underlying database. Provides a dict-like interface to the
consensus state, mapping block IDs to their corresponding consensus state.
"""
_store_dbs = {}
_lock = threading.Lock()
def __init__(self, data_dir, validator_id):
"""Initialize the consensus state store
Args:
data_dir (str): The directory where underlying database file will
be stored
validator_id (str): A unique ID for the validator for which the
consensus state store is being created
Returns:
None
"""
with ConsensusStateStore._lock:
# Create an underlying LMDB database file for the validator if
# there already isn't one. We will create the LMDB with the 'c'
# flag so that it will open if already exists.
self._store_db = ConsensusStateStore._store_dbs.get(validator_id)
if self._store_db is None:
db_file_name = \
os.path.join(
data_dir,
'poet_consensus_state-{}.lmdb'.format(
validator_id[:8]))
LOGGER.debug('Create consensus store: %s', db_file_name)
self._store_db = LMDBNoLockDatabase(db_file_name, 'c')
ConsensusStateStore._store_dbs[validator_id] = self._store_db
def __setitem__(self, block_id, consensus_state):
"""Adds/updates an item in the consensus state store
Args:
block_id (str): The ID of the block that this consensus state
corresponds to
consensus_state (ConsensusState): The consensus state
Returns:
None
"""
self._store_db[block_id] = consensus_state.serialize_to_bytes()
def __getitem__(self, block_id):
"""Return the consensus state corresponding to the block ID
Args:
block_id (str): The ID of the block for which consensus state
is being requested
Returns:
ConsensusState object
Raises:
KeyError if the block ID is not in the store
"""
serialized_consensus_state = self._store_db[block_id]
if serialized_consensus_state is None:
raise KeyError('Block ID {} not found'.format(block_id))
try:
consensus_state = ConsensusState()
consensus_state.parse_from_bytes(
buffer=serialized_consensus_state)
return consensus_state
except ValueError as error:
raise \
KeyError(
'Cannot return block with ID {}: {}'.format(
block_id,
error)) from error
def __delitem__(self, block_id):
del self._store_db[block_id]
def __contains__(self, block_id):
return block_id in self._store_db
def __iter__(self):
# Required by abstract base class, but implementing is non-trivial
raise NotImplementedError('ConsensusState is not iterable')
def __len__(self):
return len(self._store_db)
def __str__(self):
out = []
for block_id in self._store_db.keys():
try:
serialized_consensus_state = self._store_db[block_id]
consensus_state = ConsensusState()
consensus_state.parse_from_bytes(
buffer=serialized_consensus_state)
out.append(
'{}...{}: {{{}}}'.format(
block_id[:8],
block_id[-8:],
consensus_state))
except ValueError:
pass
return ', '.join(out)
# pylint: disable=arguments-differ
def get(self, block_id, default=None):
"""Return the consensus state corresponding to block ID or the default
value if none exists
Args:
block_id (str): The ID of the block for which consensus state
is being requested
default (ConsensusState): The default value to return if there
is no consensus state associated with the block ID
Returns:
ConsensusState object or default if no state for key
"""
try:
return self.__getitem__(block_id)
except KeyError:
pass
return default
| 35.253086
| 80
| 0.611627
|
4a13ef7c70899048bb65823287937e4f8867bc5f
| 357
|
py
|
Python
|
RaspberryPiOS Scripts/learningMode.py
|
joy-it/MultimediaCase-for-Raspberry-Pi
|
909522521b2d53175b8460d9ef5bb1a51cfaec44
|
[
"MIT"
] | 9
|
2020-12-29T22:17:13.000Z
|
2021-12-14T09:12:47.000Z
|
RaspberryPiOS Scripts/learningMode.py
|
joy-it/MultimediaCase-for-Raspberry-Pi
|
909522521b2d53175b8460d9ef5bb1a51cfaec44
|
[
"MIT"
] | null | null | null |
RaspberryPiOS Scripts/learningMode.py
|
joy-it/MultimediaCase-for-Raspberry-Pi
|
909522521b2d53175b8460d9ef5bb1a51cfaec44
|
[
"MIT"
] | 2
|
2021-12-07T15:55:35.000Z
|
2022-03-21T11:43:43.000Z
|
#!/usr/bin/env python
import serial
import os
import time
ser = serial.Serial(
port='/dev/serial0',
baudrate = 38400,
parity=serial.PARITY_NONE,
stopbits=serial.STOPBITS_ONE,
bytesize=serial.EIGHTBITS,
timeout=1
)
ser.write(str.encode('\x0D'))
ser.write(str.encode('X04'))
ser.write(str.encode('\x0D'))
time.sleep(.1)
ser.write(str.encode('000\r'))
| 17.85
| 30
| 0.722689
|
4a13ef8acbce72cd9a53aa3b1ddaf336143bd322
| 515
|
py
|
Python
|
characters/character_classes.py
|
kovitikus/hecate
|
b40526e393b3ac8011480d7359ef7145e6425a47
|
[
"Unlicense"
] | 10
|
2020-09-21T04:47:23.000Z
|
2022-02-12T09:50:45.000Z
|
characters/character_classes.py
|
kovitikus/hecate
|
b40526e393b3ac8011480d7359ef7145e6425a47
|
[
"Unlicense"
] | 41
|
2020-12-14T10:21:23.000Z
|
2021-09-02T07:43:27.000Z
|
characters/character_classes.py
|
kovitikus/hecate
|
b40526e393b3ac8011480d7359ef7145e6425a47
|
[
"Unlicense"
] | 1
|
2020-08-30T19:00:21.000Z
|
2020-08-30T19:00:21.000Z
|
# Main Classes
main_classes = {
'mage': {
'skillsets': ['fire', 'staves'],
'char_armor_type': 'cloth',
'base_health_bonus': 0,
'base_energy_bonus': 5
},
'paladin': {
'skillsets': ['holy'],
'char_armor_type': 'plate',
'base_health_bonus': 5,
'base_energy_bonus': 0
},
'priest': {
'skillsets': ['holy'],
'char_armor_type': 'cloth',
'base_health_bonus': 0,
'base_energy_bonus': 5
}
}
| 21.458333
| 40
| 0.491262
|
4a13f0954b943ad6526068a2663710b6db8eb29d
| 9,087
|
py
|
Python
|
tensorpack/tfutils/sessinit.py
|
ChriPo92/tensorpack
|
45d2155850d3870bbf110c94c73508c707e1ae42
|
[
"Apache-2.0"
] | null | null | null |
tensorpack/tfutils/sessinit.py
|
ChriPo92/tensorpack
|
45d2155850d3870bbf110c94c73508c707e1ae42
|
[
"Apache-2.0"
] | null | null | null |
tensorpack/tfutils/sessinit.py
|
ChriPo92/tensorpack
|
45d2155850d3870bbf110c94c73508c707e1ae42
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# File: sessinit.py
import os
import numpy as np
import six
import tensorflow as tf
from ..utils import logger
from .common import get_op_tensor_name
from .varmanip import SessionUpdate, get_checkpoint_path, get_savename_from_varname, is_training_name
__all__ = ['SessionInit', 'ChainInit',
'SaverRestore', 'SaverRestoreRelaxed', 'DictRestore',
'JustCurrentSession', 'get_model_loader']
class SessionInit(object):
""" Base class for utilities to load variables to a (existing) session. """
def init(self, sess):
"""
Initialize a session
Args:
sess (tf.Session): the session
"""
self._setup_graph()
self._run_init(sess)
def _setup_graph(self):
pass
def _run_init(self, sess):
pass
class JustCurrentSession(SessionInit):
""" This is a no-op placeholder"""
pass
class CheckpointReaderAdapter(object):
"""
An adapter to work around old checkpoint format, where the keys are op
names instead of tensor names (with :0).
"""
def __init__(self, reader):
self._reader = reader
m = self._reader.get_variable_to_shape_map()
self._map = {k if k.endswith(':0') else k + ':0': v
for k, v in six.iteritems(m)}
def get_variable_to_shape_map(self):
return self._map
def get_tensor(self, name):
if self._reader.has_tensor(name):
return self._reader.get_tensor(name)
if name in self._map:
assert name.endswith(':0'), name
name = name[:-2]
return self._reader.get_tensor(name)
def has_tensor(self, name):
return name in self._map
# some checkpoint might not have ':0'
def get_real_name(self, name):
if self._reader.has_tensor(name):
return name
assert self.has_tensor(name)
return name[:-2]
class MismatchLogger(object):
def __init__(self, exists, nonexists):
self._exists = exists
self._nonexists = nonexists
self._names = []
def add(self, name):
self._names.append(get_op_tensor_name(name)[0])
def log(self):
if len(self._names):
logger.warn("The following variables are in the {}, but not found in the {}: {}".format(
self._exists, self._nonexists, ', '.join(self._names)))
class SaverRestore(SessionInit):
"""
Restore a tensorflow checkpoint saved by :class:`tf.train.Saver` or :class:`ModelSaver`.
"""
def __init__(self, model_path, prefix=None, ignore=[]):
"""
Args:
model_path (str): a model name (model-xxxx) or a ``checkpoint`` file.
prefix (str): during restore, add a ``prefix/`` for every variable in this checkpoint.
ignore (list[str]): list of tensor names that should be ignored during loading, e.g. learning-rate
"""
if model_path.endswith('.npy') or model_path.endswith('.npz'):
logger.warn("SaverRestore expect a TF checkpoint, but got a model path '{}'.".format(model_path) +
" To load from a dict, use 'DictRestore'.")
model_path = get_checkpoint_path(model_path)
self.path = model_path # attribute used by AutoResumeTrainConfig!
self.prefix = prefix
self.ignore = [i if i.endswith(':0') else i + ':0' for i in ignore]
def _setup_graph(self):
dic = self._get_restore_dict()
self.saver = tf.train.Saver(var_list=dic, name=str(id(dic)))
def _run_init(self, sess):
logger.info("Restoring checkpoint from {} ...".format(self.path))
self.saver.restore(sess, self.path)
@staticmethod
def _read_checkpoint_vars(model_path):
""" return a set of strings """
reader = tf.train.NewCheckpointReader(model_path)
reader = CheckpointReaderAdapter(reader) # use an adapter to standardize the name
ckpt_vars = reader.get_variable_to_shape_map().keys()
return reader, set(ckpt_vars)
def _match_vars(self, func):
reader, chkpt_vars = SaverRestore._read_checkpoint_vars(self.path)
graph_vars = tf.global_variables()
chkpt_vars_used = set()
mismatch = MismatchLogger('graph', 'checkpoint')
for v in graph_vars:
name = get_savename_from_varname(v.name, varname_prefix=self.prefix)
if name in self.ignore and reader.has_tensor(name):
logger.info("Variable {} in the graph will not be loaded from the checkpoint!".format(name))
else:
if reader.has_tensor(name):
func(reader, name, v)
chkpt_vars_used.add(name)
else:
# use tensor name (instead of op name) for logging, to be consistent with the reverse case
if not is_training_name(v.name):
mismatch.add(v.name)
mismatch.log()
mismatch = MismatchLogger('checkpoint', 'graph')
if len(chkpt_vars_used) < len(chkpt_vars):
unused = chkpt_vars - chkpt_vars_used
for name in sorted(unused):
if not is_training_name(name):
mismatch.add(name)
mismatch.log()
def _get_restore_dict(self):
var_dict = {}
def f(reader, name, v):
name = reader.get_real_name(name)
assert name not in var_dict, "Restore conflict: {} and {}".format(v.name, var_dict[name].name)
var_dict[name] = v
self._match_vars(f)
return var_dict
class SaverRestoreRelaxed(SaverRestore):
""" Same as :class:`SaverRestore`, but has more relaxed constraints.
It allows upcasting certain variables, or reshape certain
variables when there is a mismatch that can be fixed.
Another advantage is that it doesn't add any new ops to the graph.
But it is also slower than :class:`SaverRestore`.
"""
def _run_init(self, sess):
logger.info(
"Restoring checkpoint from {} ...".format(self.path))
def f(reader, name, v):
val = reader.get_tensor(name)
v.load(SessionUpdate.relaxed_value_for_var(val, v))
with sess.as_default():
self._match_vars(f)
class DictRestore(SessionInit):
"""
Restore variables from a dictionary.
"""
def __init__(self, variable_dict):
"""
Args:
variable_dict (dict): a dict of {name: value}
"""
assert isinstance(variable_dict, dict), type(variable_dict)
# use varname (with :0) for consistency
self._prms = {get_op_tensor_name(n)[1]: v for n, v in six.iteritems(variable_dict)}
def _run_init(self, sess):
variables = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES)
variable_names = set([k.name for k in variables])
param_names = set(six.iterkeys(self._prms))
intersect = variable_names & param_names
logger.info("Variables to restore from dict: {}".format(', '.join(map(str, intersect))))
mismatch = MismatchLogger('graph', 'dict')
for k in sorted(variable_names - param_names):
if not is_training_name(k):
mismatch.add(k)
mismatch.log()
mismatch = MismatchLogger('dict', 'graph')
for k in sorted(param_names - variable_names):
mismatch.add(k)
mismatch.log()
upd = SessionUpdate(sess, [v for v in variables if v.name in intersect])
logger.info("Restoring {} variables from dict ...".format(len(intersect)))
upd.update({name: value for name, value in six.iteritems(self._prms) if name in intersect})
class ChainInit(SessionInit):
"""
Initialize a session by a list of :class:`SessionInit` instance, executed one by one.
This can be useful for, e.g., loading several models from different files
to form a composition of models.
"""
def __init__(self, sess_inits):
"""
Args:
sess_inits (list[SessionInit]): list of :class:`SessionInit` instances.
"""
self.inits = sess_inits
def _setup_graph(self):
for i in self.inits:
i._setup_graph()
def _run_init(self, sess):
for i in self.inits:
i._run_init(sess)
def get_model_loader(filename):
"""
Get a corresponding model loader by looking at the file name.
Returns:
SessInit: either a :class:`DictRestore` (if name ends with 'npy/npz') or
:class:`SaverRestore` (otherwise).
"""
assert isinstance(filename, six.string_types), filename
filename = os.path.expanduser(filename)
if filename.endswith('.npy'):
assert tf.gfile.Exists(filename), filename
return DictRestore(np.load(filename, encoding='latin1').item())
elif filename.endswith('.npz'):
assert tf.gfile.Exists(filename), filename
obj = np.load(filename)
return DictRestore(dict(obj))
else:
return SaverRestore(filename)
| 34.420455
| 110
| 0.619676
|
4a13f09f5f12bd8b00a6aed1003ccf6067ab4e68
| 6,322
|
py
|
Python
|
emacs-the-best-python-editor/PyEval/pyeval_expression.py
|
TheTwoCentsRespository/materials
|
ebb5eefb2ac86dadff5f99856cc1542858e4dcfd
|
[
"MIT"
] | 2
|
2021-02-11T09:09:00.000Z
|
2021-02-25T15:05:54.000Z
|
emacs-the-best-python-editor/PyEval/pyeval_expression.py
|
TheTwoCentsRespository/materials
|
ebb5eefb2ac86dadff5f99856cc1542858e4dcfd
|
[
"MIT"
] | 12
|
2021-05-01T05:30:36.000Z
|
2022-02-01T22:01:27.000Z
|
emacs-the-best-python-editor/PyEval/pyeval_expression.py
|
Greentash/materials
|
c18a5b4384fcea51b7138871b6567e17bf706720
|
[
"MIT"
] | 5
|
2021-08-09T04:20:23.000Z
|
2022-03-11T06:18:48.000Z
|
"""
Expression - defines an infix expression
Uses Operator to break the infix expression down, and
outputs an RPN string using the shunting yard approach.
Algorithm outlined at https://en.wikipedia.org/wiki/Shunting-yard_algorithm
"""
from pyeval_operator import Operator
class Expression:
"""
Defines and parses an infix expression string, returning
an RPN expression string, or raising an exception if the input string
is invalid.
"""
# The _operator_stack variable uses standard Python lists to implement
# a simple stack. As operators are parsed from the string,
# they are appended to the stack. As the input string is processed, the
# grows as needed. In the end, it should be empty.
_operator_stack = [] # Holds the current stack of operators
# Store the string, and where we are in our parsing run.
_expr_string = ""
_output_string = ""
_current_position = 0
# Have we evaluated this expressions yet?
_evaluated = False
def __init__(self, expression_string):
"""
Create a new expression. Does no error checking yet, just sets
up a new expression and gets us ready to parse.
"""
# Add '$' as an end of line marker
self._expr_string = expression_string.strip() + "$"
# Start parsing at the first character
self._current_position = 0
# No output string yet
self._output_string = ""
# Clear the stack
self._operator_stack.clear()
# Reset the evaluated flag
self._evaluated = False
def result(self):
"""
Returns the result of the evaluation.
If the expression is not yet evaluated, we try to parse the expression.
If this is unsuccessful, we raise a ValueError exception.
Else we return the output string.
"""
if not self._evaluated:
self.parse()
if not self._evaluated:
raise ValueError
return self._output_string
def parse(self):
""" Parses the current infix expression, and return the RPN version."""
# If we've already evaluated, just return the result
if self._evaluated:
return self._output_string
# Let's start evaluating
# Right now, every expression starts with an operand
# This is not universally true for functions and parentheses, but we're
# not supporting them yet
# TODO: Add support for functions and parentheses
expecting_operand = True
# Get the current character to inspect
current_char = self._expr_string[self._current_position]
# Loop until we're past the end of the string
while (
self._current_position < len(self._expr_string)
and current_char != "$"
):
# Skip any leading whitespace characters
while current_char.isspace():
self._current_position += 1
current_char = self._expr_string[self._current_position]
# Store whatever is next in the current_token string
current_token = ""
# If we are looking for an operand
if expecting_operand:
# First, we need to check for a leading '-' or '+' sign
if current_char == "-" or current_char == "+":
current_token += current_char
self._current_position += 1
current_char = self._expr_string[self._current_position]
# Now we loop for as long as we have numbers
while current_char in "0123456789":
current_token += current_char
self._current_position += 1
current_char = self._expr_string[self._current_position]
# We should have a number now - add it to the output string,
# space delimited
self._output_string += current_token + " "
# And after every operand, we need to look for an operator
expecting_operand = False
else:
# Here, we just need a single operator, so
# Get that operator, validate it, then
# Create a new operator object
if current_char not in "+-*/%^":
raise SyntaxError
current_operator = Operator(current_char)
# Now comes the shunting yard part
# - If the operator stack is empty, push the current operator
# - Else
# - While the top of stack operator is higher precedence
# - Pop it and output it.
# - Push the current operator
if not self._operator_stack:
self._operator_stack.append(current_operator)
else:
top_operator = self._operator_stack[-1]
while (
self._operator_stack
and top_operator.precedence
> current_operator.precedence
):
self._output_string += top_operator.op_string + " "
self._operator_stack.pop()
if self._operator_stack:
top_operator = self._operator_stack[-1]
self._operator_stack.append(current_operator)
# Get the next character
self._current_position += 1
current_char = self._expr_string[self._current_position]
# Skip any trailing whitespace characters
while current_char.isspace():
self._current_position += 1
current_char = self._expr_string[self._current_position]
# After every operator, look for an operand
expecting_operand = True
# At this point, we're done with the string, so we just need to pop
# the remaining operators off the stack
while self._operator_stack:
top_operator = self._operator_stack.pop()
self._output_string += top_operator.op_string + " "
self._evaluated = True
return self._output_string
| 36.333333
| 79
| 0.584941
|
4a13f300deb89c54eed42acfd7d0c86232c6b40d
| 8,234
|
py
|
Python
|
userena/tests/tests_forms.py
|
bsavelev/django-userena
|
1b841560ceef95c3f4dfd8f7e2bdef9f845bc417
|
[
"BSD-3-Clause"
] | null | null | null |
userena/tests/tests_forms.py
|
bsavelev/django-userena
|
1b841560ceef95c3f4dfd8f7e2bdef9f845bc417
|
[
"BSD-3-Clause"
] | null | null | null |
userena/tests/tests_forms.py
|
bsavelev/django-userena
|
1b841560ceef95c3f4dfd8f7e2bdef9f845bc417
|
[
"BSD-3-Clause"
] | 1
|
2019-07-27T19:23:35.000Z
|
2019-07-27T19:23:35.000Z
|
# encoding: utf-8
from __future__ import unicode_literals
from django.test import TestCase
from django.utils.translation import ugettext_lazy as _, override
from userena import forms
from userena import settings as userena_settings
from userena.utils import get_user_model
class SignupFormTests(TestCase):
""" Test the signup form. """
fixtures = ['users']
def test_signup_form(self):
"""
Test that the ``SignupForm`` checks for unique usernames and unique
e-mail addresses.
"""
invalid_data_dicts = [
# Non-alphanumeric username.
{'data': {'username': 'foo@bar',
'email': 'foo@example.com',
'password': 'foo',
'password2': 'foo',
'tos': 'on'},
'error': ('username', [_('Username must contain only letters, numbers, dots and underscores.')])},
# Password is not the same
{'data': {'username': 'katy-',
'email': 'katy@newexample.com',
'password1': 'foo',
'password2': 'foo2',
'tos': 'on'},
'error': ('__all__', [_('The two password fields didn\'t match.')])},
# Already taken username
{'data': {'username': 'john',
'email': 'john@newexample.com',
'password1': 'foo',
'password2': 'foo',
'tos': 'on'},
'error': ('username', [_('This username is already taken.')])},
# Forbidden username
{'data': {'username': 'SignUp',
'email': 'foo@example.com',
'password': 'foo',
'password2': 'foo2',
'tos': 'on'},
'error': ('username', [_('This username is not allowed.')])},
# Already taken email
{'data': {'username': 'alice',
'email': 'john@example.com',
'password': 'foo',
'password2': 'foo',
'tos': 'on'},
'error': ('email', [_('This email is already in use. Please supply a different email.')])},
]
# Override locale settings since we are checking for existence of error
# messaged written in english. Note: it should not be necessasy but
# we have experienced such locale issues during tests on Travis builds.
# See: https://github.com/bread-and-pepper/django-userena/issues/446
with override('en'):
for invalid_dict in invalid_data_dicts:
form = forms.SignupForm(data=invalid_dict['data'])
self.failIf(form.is_valid())
self.assertEqual(form.errors[invalid_dict['error'][0]],
invalid_dict['error'][1])
# And finally, a valid form.
form = forms.SignupForm(data={'username': 'foo.bla',
'email': 'foo@example.com',
'password1': 'foo',
'password2': 'foo',
'tos': 'on'})
self.failUnless(form.is_valid())
class AuthenticationFormTests(TestCase):
""" Test the ``AuthenticationForm`` """
fixtures = ['users',]
def test_signin_form(self):
"""
Check that the ``SigninForm`` requires both identification and password
"""
invalid_data_dicts = [
{'data': {'identification': '',
'password': 'inhalefish'},
'error': ('identification', ['Either supply us with your email or username.'])},
{'data': {'identification': 'john',
'password': 'inhalefish'},
'error': ('__all__', ['Please enter a correct username or email and password. Note that both fields are case-sensitive.'])}
]
# Override locale settings since we are checking for existence of error
# messaged written in english. Note: it should not be necessasy but
# we have experienced such locale issues during tests on Travis builds.
# See: https://github.com/bread-and-pepper/django-userena/issues/446
with override('en'):
for invalid_dict in invalid_data_dicts:
form = forms.AuthenticationForm(data=invalid_dict['data'])
self.failIf(form.is_valid())
self.assertEqual(form.errors[invalid_dict['error'][0]],
invalid_dict['error'][1])
valid_data_dicts = [
{'identification': 'john',
'password': 'blowfish'},
{'identification': 'john@example.com',
'password': 'blowfish'}
]
for valid_dict in valid_data_dicts:
form = forms.AuthenticationForm(valid_dict)
self.failUnless(form.is_valid())
def test_signin_form_email(self):
"""
Test that the signin form has a different label is
``USERENA_WITHOUT_USERNAME`` is set to ``True``
"""
userena_settings.USERENA_WITHOUT_USERNAMES = True
form = forms.AuthenticationForm(data={'identification': "john",
'password': "blowfish"})
correct_label = "Email"
self.assertEqual(form.fields['identification'].label,
correct_label)
# Restore default settings
userena_settings.USERENA_WITHOUT_USERNAMES = False
class SignupFormOnlyEmailTests(TestCase):
"""
Test the :class:`SignupFormOnlyEmail`.
This is the same form as :class:`SignupForm` but doesn't require an
username for a successfull signup.
"""
fixtures = ['users']
def test_signup_form_only_email(self):
"""
Test that the form has no username field. And that the username is
generated in the save method
"""
valid_data = {'email': 'hans@gretel.com',
'password1': 'blowfish',
'password2': 'blowfish'}
form = forms.SignupFormOnlyEmail(data=valid_data)
# Should have no username field
self.failIf(form.fields.get('username', False))
# Form should be valid.
self.failUnless(form.is_valid())
# Creates an unique username
user = form.save()
self.failUnless(len(user.username), 5)
class ChangeEmailFormTests(TestCase):
""" Test the ``ChangeEmailForm`` """
fixtures = ['users']
def test_change_email_form(self):
user = get_user_model().objects.get(pk=1)
invalid_data_dicts = [
# No change in e-mail address
{'data': {'email': 'john@example.com'},
'error': ('email', ['You\'re already known under this email.'])},
# An e-mail address used by another
{'data': {'email': 'jane@example.com'},
'error': ('email', ['This email is already in use. Please supply a different email.'])},
]
# Override locale settings since we are checking for existence of error
# messaged written in english. Note: it should not be necessasy but
# we have experienced such locale issues during tests on Travis builds.
# See: https://github.com/bread-and-pepper/django-userena/issues/446
with override('en'):
for invalid_dict in invalid_data_dicts:
form = forms.ChangeEmailForm(user, data=invalid_dict['data'])
self.failIf(form.is_valid())
self.assertEqual(form.errors[invalid_dict['error'][0]],
invalid_dict['error'][1])
# Test a valid post
form = forms.ChangeEmailForm(user,
data={'email': 'john@newexample.com'})
self.failUnless(form.is_valid())
def test_form_init(self):
""" The form must be initialized with a ``User`` instance. """
self.assertRaises(TypeError, forms.ChangeEmailForm, None)
class EditAccountFormTest(TestCase):
""" Test the ``EditAccountForm`` """
pass
| 37.9447
| 136
| 0.546393
|
4a13f3af69aac6787069234fbf7ca4895d7e3699
| 1,561
|
py
|
Python
|
problems/test_0640.py
|
chrisxue815/leetcode_python
|
dec3c160d411a5c19dc8e9d96e7843f0e4c36820
|
[
"Unlicense"
] | 1
|
2017-06-17T23:47:17.000Z
|
2017-06-17T23:47:17.000Z
|
problems/test_0640.py
|
chrisxue815/leetcode_python
|
dec3c160d411a5c19dc8e9d96e7843f0e4c36820
|
[
"Unlicense"
] | null | null | null |
problems/test_0640.py
|
chrisxue815/leetcode_python
|
dec3c160d411a5c19dc8e9d96e7843f0e4c36820
|
[
"Unlicense"
] | null | null | null |
import unittest
# O(n)
class Solution:
def solveEquation(self, equation):
"""
:type equation: str
:rtype: str
"""
coeff = const = num = 0
sign = side = 1
for i, ch in enumerate(equation):
if ch.isdigit():
num = num * 10 + ord(ch) - ord('0')
elif ch == '+':
const -= side * sign * num
num = 0
sign = 1
elif ch == '-':
const -= side * sign * num
num = 0
sign = -1
elif ch == 'x':
if num == 0 and (i == 0 or equation[i - 1] != '0'):
num = 1
coeff += side * sign * num
num = 0
elif ch == '=':
const -= side * sign * num
num = 0
side = -1
sign = 1
const -= side * sign * num
if coeff:
return 'x=' + str(const // coeff)
return 'No solution' if const else 'Infinite solutions'
class Test(unittest.TestCase):
def test(self):
self._test('x+5-3+x=6+x-2', 'x=2')
self._test('x=x', 'Infinite solutions')
self._test('2x=x', 'x=0')
self._test('2x+3x-6x=x+2', 'x=-1')
self._test('x=x+2', 'No solution')
self._test('0x=x+2', 'x=-2')
def _test(self, equation, expected):
actual = Solution().solveEquation(equation)
self.assertEqual(expected, actual)
if __name__ == '__main__':
unittest.main()
| 26.913793
| 67
| 0.429212
|
4a13f457b250522e49e9fa74777565b440214a37
| 410
|
py
|
Python
|
PythonExecicios/ex017.py
|
lucasohara98/Python_CursoemVideo
|
e5266abaf67ef3e27fe8928458b3860feb0ed46d
|
[
"MIT"
] | null | null | null |
PythonExecicios/ex017.py
|
lucasohara98/Python_CursoemVideo
|
e5266abaf67ef3e27fe8928458b3860feb0ed46d
|
[
"MIT"
] | null | null | null |
PythonExecicios/ex017.py
|
lucasohara98/Python_CursoemVideo
|
e5266abaf67ef3e27fe8928458b3860feb0ed46d
|
[
"MIT"
] | null | null | null |
'''from math import hypot
op = float(input('Qual o comprimento do cateto oposto: '))
ad = float(input('Qual o comprimento do cateto adjacente: '))
print(f'A hipotenusa é: {hypot(op,ad):.2f}')'''
#sem importação
co = float(input('Qual o comprimento do cateto oposto: '))
ca = float (input('Qual o compriemento do cateto adjacente: '))
hi = (co**2 + ca**2)**(1/2)
print(f'a hipotenusa é: {hi:.2f}')
| 34.166667
| 64
| 0.653659
|
4a13f69fed51e27fda345129f8940f3e4581555d
| 10,345
|
py
|
Python
|
python/src/main/python/pyalink/alink/common/types/vector.py
|
Yankee24/Alink
|
fb06bd7f0e3f7e298679e13a2e0e6f38db9e2242
|
[
"Apache-2.0"
] | null | null | null |
python/src/main/python/pyalink/alink/common/types/vector.py
|
Yankee24/Alink
|
fb06bd7f0e3f7e298679e13a2e0e6f38db9e2242
|
[
"Apache-2.0"
] | null | null | null |
python/src/main/python/pyalink/alink/common/types/vector.py
|
Yankee24/Alink
|
fb06bd7f0e3f7e298679e13a2e0e6f38db9e2242
|
[
"Apache-2.0"
] | null | null | null |
from abc import ABC
from py4j.java_gateway import JavaObject
from .bases.j_obj_wrapper import JavaObjectWrapperWithAutoTypeConversion
from .conversion.java_method_call import auto_convert_java_type, call_java_method
from .data_type_display import DataTypeDisplay
__all__ = ['Vector', 'DenseVector', 'SparseVector', 'DenseMatrix']
class Vector(JavaObjectWrapperWithAutoTypeConversion, ABC):
"""
Vector
"""
_j_cls_name = 'com.alibaba.alink.common.linalg.Vector'
def __init__(self, j_obj):
self._j_obj = j_obj
def get_j_obj(self):
return self._j_obj
def add(self, i, val):
return self.add(i, val)
def get(self, i):
return self.get(i)
def append(self, v):
return self.append(v)
def size(self):
return self.size()
def iterator(self):
return self.iterator()
def set(self, i, val):
return self.set(i, val)
def scale(self, v):
return self.scale(v)
def slice(self, indexes):
return self.slice(indexes)
def prefix(self, v):
return self.prefix(v)
def scaleEqual(self, v):
return self.scaleEqual(v)
def normL1(self):
return self.normL1()
def normInf(self):
return self.normInf()
def normL2(self):
return self.normL2()
def normL2Square(self):
return self.normL2Square()
def normalizeEqual(self, p):
return self.normalizeEqual(p)
def standardizeEqual(self, mean, stdvar):
return self.standardizeEqual(mean, stdvar)
def plus(self, vec):
return self.plus(vec)
def minus(self, vec):
return self.minus(vec)
def dot(self, vec):
return self.dot(vec)
def outer(self, other=None):
if other is None:
return self.outer()
else:
return self.outer(other)
def clone(self):
return self.clone()
_unsupported_j_methods = ['toBytes']
class DenseVector(Vector, DataTypeDisplay):
"""
DenseVector
"""
_j_cls_name = 'com.alibaba.alink.common.linalg.DenseVector'
def __init__(self, *args):
"""
Construct `DenseVector` from arguments with a wrapped Java instance.
Different combinations of arguments are supported:
1. j_obj: JavaObject -> directly wrap the instance;
2. no arguments: call DenseVector();
3. n: int -> : call `DenseVector(n)` of Java side;
4. data: List[Double] -> call `DenseVector(double[] data)` of Java side;
:param args: arguments, see function description.
"""
if len(args) == 1 and isinstance(args[0], JavaObject):
j_obj = args[0]
else:
j_obj = call_java_method(self._j_cls(), *args).get_j_obj()
super(DenseVector, self).__init__(j_obj)
def setData(self, data):
return self.setData(data)
def setEqual(self, other):
return self.setEqual(other)
def plusEqual(self, other):
return self.plusEqual(other)
def minusEqual(self, other):
return self.minusEqual(other)
def plusScaleEqual(self, other, alpha):
return self.plusScaleEqual(other, alpha)
@classmethod
@auto_convert_java_type
def zeros(cls, n):
return cls._j_cls().zeros(n)
@classmethod
@auto_convert_java_type
def ones(cls, n):
return cls._j_cls().ones(n)
@classmethod
@auto_convert_java_type
def rand(cls, n):
return cls._j_cls().rand(n)
def getData(self):
return self.getData()
def toSparseVector(self):
return self.toSparseVector()
def toDisplayData(self, n: int = None):
if n is None:
return self.toDisplayData()
else:
return self.toDisplayData(n)
def toDisplaySummary(self) -> str:
return self.toDisplaySummary()
def toShortDisplayData(self) -> str:
return self.toShortDisplayData()
_unsupported_j_methods = ['toBytes']
class SparseVector(Vector, DataTypeDisplay):
"""
SparseVector
"""
_j_cls_name = 'com.alibaba.alink.common.linalg.SparseVector'
def __init__(self, *args):
"""
Construct `SparseVector` from arguments with a wrapped Java instance.
Different combinations of arguments are supported:
1. j_obj: JavaObject -> directly wrap the instance;
2. no arguments -> call SparseVector();
3. n: int -> call `SparseVector(n)` of Java side;
4. n: int, indices: List[int], values: List[int] -> call `SparseVector(int n, int[] indices, double[] values)` of Java side
:param args: arguments, see function description.
"""
if len(args) == 1 and isinstance(args[0], JavaObject):
j_obj = args[0]
elif len(args) == 3:
j_obj = call_java_method(self._j_cls(), args[0], list(args[1]), list(args[2])).get_j_obj()
else:
j_obj = call_java_method(self._j_cls(), *args).get_j_obj()
super(SparseVector, self).__init__(j_obj)
def forEach(self, action):
for (index, value) in zip(self.getIndices(), self.getValues()):
action(index, value)
return None
def setSize(self, n):
return self.setSize(n)
def getIndices(self):
return self.getIndices()
def getValues(self):
return self.getValues()
def numberOfValues(self):
return self.numberOfValues()
def removeZeroValues(self):
return self.removeZeroValues()
def toDenseVector(self):
return self.toDenseVector()
def toDisplayData(self, n: int = None):
if n is None:
return self.toDisplayData()
else:
return self.toDisplayData(n)
def toDisplaySummary(self) -> str:
return self.toDisplaySummary()
def toShortDisplayData(self) -> str:
return self.toShortDisplayData()
_unsupported_j_methods = ['toBytes']
class DenseMatrix(JavaObjectWrapperWithAutoTypeConversion):
"""
DenseMatrix
"""
_j_cls_name = 'com.alibaba.alink.common.linalg.DenseMatrix'
def __init__(self, *args):
"""
Construct `DenseMatrix` from arguments with a wrapped Java instance.
Different combinations of arguments are supported:
1. j_obj: JavaObject -> directly wrap the instance;
2. no arguments -> call DenseMatrix();
3. m: int, n: int -> call `DenseMatrix(m, n)` of Java side;
4. m: int, n: int, data: List[Double] -> call `DenseMatrix(m, n, data)` of Java side;
5. m: int, n: int, data: List[Double], inRowMajor: bool -> call `DenseMatrix(m, n, data, inRowMajor)` of Java side;
6. data: List[List[Double]] -> call `DenseMatrix(data)` of Java side.
:param args: arguments, see function description.
"""
if len(args) == 1 and isinstance(args[0], JavaObject):
j_obj = args[0]
else:
j_obj = call_java_method(self._j_cls(), *args).get_j_obj()
self._j_obj = j_obj
def get_j_obj(self):
return self._j_obj
def add(self, i, j, s):
return self.add(i, j, s)
def get(self, i, j):
return self.get(i, j)
def clone(self):
return self.clone()
def set(self, i, j, s):
return self.set(i, j, s)
def sum(self):
return self.sum()
def scale(self, v):
return self.scale(v)
@classmethod
@auto_convert_java_type
def eye(cls, m, n=None):
if n is None:
return cls._j_cls().eye(m)
else:
return cls._j_cls().eye(m, n)
@classmethod
@auto_convert_java_type
def zeros(cls, m, n):
return cls._j_cls().zeros(m, n)
@classmethod
@auto_convert_java_type
def ones(cls, m, n):
return cls._j_cls().ones(m, n)
@classmethod
@auto_convert_java_type
def rand(cls, m, n):
return cls._j_cls().rand(m, n)
@classmethod
@auto_convert_java_type
def randSymmetric(cls, n):
return cls._j_cls().randSymmetric(n)
def getArrayCopy2D(self):
return self.getArrayCopy2D()
def getArrayCopy1D(self, inRowMajor):
return self.getArrayCopy1D(inRowMajor)
def getRow(self, row):
return self.getRow(row)
def getColumn(self, col):
return self.getColumn(col)
def selectRows(self, rows):
return self.selectRows(rows)
def getSubMatrix(self, m0, m1, n0, n1):
return self.getSubMatrix(m0, m1, n0, n1)
def setSubMatrix(self, sub, m0, m1, n0, n1):
return self.setSubMatrix(sub, m0, m1, n0, n1)
def isSquare(self):
return self.isSquare()
def isSymmetric(self):
return self.isSymmetric()
def numRows(self):
return self.numRows()
def numCols(self):
return self.numCols()
def plusEquals(self, alpha_or_mat):
return self.plusEquals(alpha_or_mat)
def minusEquals(self, mat):
return self.minusEquals(mat)
def multiplies(self, vec_or_mat):
return self.multiplies(vec_or_mat)
def transpose(self):
return self.transpose()
def norm2(self):
return self.norm2()
def cond(self):
return self.cond()
def det(self):
return self.det()
def rank(self):
return self.rank()
def solve(self, vec_or_mat):
return self.solve(vec_or_mat)
def solveLS(self, vec_or_mat):
return self.solveLS(vec_or_mat)
def inverse(self):
return self.inverse()
def pseudoInverse(self):
return self.pseudoInverse()
def scaleEqual(self, v):
return self.scaleEqual(v)
def plus(self, alpha_or_mat):
return self.plus(alpha_or_mat)
def minus(self, mat):
return self.minus(mat)
def getData(self):
return self.getData()
class VectorIterator(JavaObjectWrapperWithAutoTypeConversion):
"""
VectorIterator
"""
_j_cls_name = 'com.alibaba.alink.common.linalg.VectorIterator'
def __init__(self, j_obj):
self._j_obj = j_obj
def get_j_obj(self):
return self._j_obj
def getValue(self):
return self.getValue()
def hasNext(self):
return self.hasNext()
def next(self):
return self.next()
def getIndex(self):
return self.getIndex()
| 25.109223
| 131
| 0.616336
|
4a13f781a9901807b000192cfdc1ce6b4d6d2594
| 8,027
|
py
|
Python
|
tensor2tensor/trax/rl/ppo_trainer_test.py
|
levskaya/tensor2tensor
|
4643800137f802693f880a1fab9e10de7ba32e66
|
[
"Apache-2.0"
] | null | null | null |
tensor2tensor/trax/rl/ppo_trainer_test.py
|
levskaya/tensor2tensor
|
4643800137f802693f880a1fab9e10de7ba32e66
|
[
"Apache-2.0"
] | null | null | null |
tensor2tensor/trax/rl/ppo_trainer_test.py
|
levskaya/tensor2tensor
|
4643800137f802693f880a1fab9e10de7ba32e66
|
[
"Apache-2.0"
] | null | null | null |
# coding=utf-8
# Copyright 2019 The Tensor2Tensor Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for tensor2tensor.trax.rl.ppo's training_loop."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import contextlib
import functools
import itertools
import os
import tempfile
import gin
import gym
import numpy as np
from tensor2tensor.envs import gym_env_problem
from tensor2tensor.rl import gym_utils
from tensor2tensor.trax import inputs as trax_inputs
from tensor2tensor.trax import layers
from tensor2tensor.trax import learning_rate as lr
from tensor2tensor.trax import models
from tensor2tensor.trax import optimizers as trax_opt
from tensor2tensor.trax import trax
from tensor2tensor.trax.rl import envs # pylint: disable=unused-import
from tensor2tensor.trax.rl import ppo_trainer
from tensor2tensor.trax.rl import simulated_env_problem
from tensorflow import test
from tensorflow.io import gfile
class PpoTrainerTest(test.TestCase):
def get_wrapped_env(self, name="CartPole-v0", max_episode_steps=2):
wrapper_fn = functools.partial(
gym_utils.gym_env_wrapper,
**{
"rl_env_max_episode_steps": max_episode_steps,
"maxskip_env": False,
"rendered_env": False,
"rendered_env_resize_to": None, # Do not resize frames
"sticky_actions": False,
"output_dtype": None,
})
return gym_env_problem.GymEnvProblem(base_env_name=name,
batch_size=1,
env_wrapper_fn=wrapper_fn,
discrete_rewards=False)
@contextlib.contextmanager
def tmp_dir(self):
tmp = tempfile.mkdtemp(dir=self.get_temp_dir())
yield tmp
gfile.rmtree(tmp)
def _make_trainer(self, train_env, eval_env, output_dir, model=None):
if model is None:
model = lambda: [layers.Dense(1)]
return ppo_trainer.PPO(
train_env=train_env,
eval_env=eval_env,
policy_and_value_model=model,
n_optimizer_steps=1,
output_dir=output_dir,
random_seed=0,
boundary=2,
save_every_n=1,
)
def test_training_loop_cartpole(self):
with self.tmp_dir() as output_dir:
trainer = self._make_trainer(
train_env=self.get_wrapped_env("CartPole-v0", 2),
eval_env=self.get_wrapped_env("CartPole-v0", 2),
output_dir=output_dir,
)
trainer.training_loop(n_epochs=2)
def test_training_loop_cartpole_transformer(self):
with self.tmp_dir() as output_dir:
trainer = self._make_trainer(
train_env=self.get_wrapped_env("CartPole-v0", 2),
eval_env=self.get_wrapped_env("CartPole-v0", 2),
output_dir=output_dir,
model=functools.partial(
models.TransformerDecoder,
d_model=1,
d_ff=1,
n_layers=1,
n_heads=1,
max_len=64,
mode="train",
),
)
trainer.training_loop(n_epochs=2)
def test_training_loop_onlinetune(self):
with self.tmp_dir() as output_dir:
gin.bind_parameter("OnlineTuneEnv.model", functools.partial(
models.MLP,
n_hidden_layers=0,
n_output_classes=1,
))
gin.bind_parameter("OnlineTuneEnv.inputs", functools.partial(
trax_inputs.random_inputs,
input_shape=(1, 1),
input_dtype=np.float32,
output_shape=(1, 1),
output_dtype=np.float32,
))
gin.bind_parameter("OnlineTuneEnv.train_steps", 2)
gin.bind_parameter("OnlineTuneEnv.eval_steps", 2)
gin.bind_parameter(
"OnlineTuneEnv.output_dir", os.path.join(output_dir, "envs"))
trainer = self._make_trainer(
train_env=self.get_wrapped_env("OnlineTuneEnv-v0", 2),
eval_env=self.get_wrapped_env("OnlineTuneEnv-v0", 2),
output_dir=output_dir,
)
trainer.training_loop(n_epochs=2)
def test_training_loop_simulated(self):
n_actions = 5
history_shape = (3, 2, 3)
action_shape = (3,)
obs_shape = (3, 3)
reward_shape = (3, 1)
def model(mode):
del mode
return layers.Serial(
layers.Parallel(
layers.Flatten(), # Observation stack.
layers.Embedding(d_feature=1, vocab_size=n_actions), # Action.
),
layers.Concatenate(),
layers.Dense(n_units=1),
layers.Dup(),
layers.Parallel(
layers.Dense(n_units=obs_shape[1]), # New observation.
None, # Reward.
)
)
def inputs(n_devices):
del n_devices
stream = itertools.repeat((
(np.zeros(history_shape), np.zeros(action_shape, dtype=np.int32)),
(np.zeros(obs_shape), np.zeros(reward_shape)),
))
return trax_inputs.Inputs(
train_stream=lambda: stream,
train_eval_stream=lambda: stream,
eval_stream=lambda: stream,
input_shape=(history_shape[1:], action_shape[1:]),
input_dtype=(np.float32, np.int32),
target_shape=(obs_shape[1:], reward_shape[1:]),
target_dtype=(np.float32, np.float32),
)
def loss(params, batch, model_predict, state, rng, **kwargs):
del params, batch, model_predict, rng, kwargs
return 0.0, state
with self.tmp_dir() as output_dir:
# Run fake training just to save the parameters.
trainer = trax.Trainer(
model=model,
loss_fn=loss,
inputs=inputs,
optimizer=trax_opt.SM3,
lr_schedule=lr.MultifactorSchedule,
output_dir=output_dir,
)
trainer.train_epoch(epoch_steps=1, eval_steps=1)
# Repeat the history over and over again.
stream = itertools.repeat(np.zeros(history_shape))
env_fn = functools.partial(
simulated_env_problem.RawSimulatedEnvProblem,
model=model,
history_length=history_shape[1],
trajectory_length=3,
batch_size=history_shape[0],
observation_space=gym.spaces.Box(
low=-np.inf, high=np.inf, shape=(obs_shape[1],)),
action_space=gym.spaces.Discrete(n=n_actions),
reward_range=(-1, 1),
discrete_rewards=False,
history_stream=stream,
output_dir=output_dir,
)
trainer = self._make_trainer(
train_env=env_fn(),
eval_env=env_fn(),
output_dir=output_dir,
)
trainer.training_loop(n_epochs=2)
def test_restarts(self):
with self.tmp_dir() as output_dir:
train_env = self.get_wrapped_env("CartPole-v0", 2)
eval_env = self.get_wrapped_env("CartPole-v0", 2)
# Train for 1 epoch and save.
trainer = self._make_trainer(
train_env=train_env,
eval_env=eval_env,
output_dir=output_dir,
)
self.assertEqual(trainer.epoch, 0)
trainer.training_loop(n_epochs=1)
self.assertEqual(trainer.epoch, 1)
# Restore from the saved state.
trainer = self._make_trainer(
train_env=train_env,
eval_env=eval_env,
output_dir=output_dir,
)
self.assertEqual(trainer.epoch, 1)
# Check that we can continue training from the restored checkpoint.
trainer.training_loop(n_epochs=2)
self.assertEqual(trainer.epoch, 2)
if __name__ == "__main__":
test.main()
| 32.630081
| 77
| 0.645696
|
4a13f7e2e16d614459714645c2e64963fe214d03
| 34,846
|
py
|
Python
|
synapse/api/auth.py
|
iot-factory/synapse
|
d3ac8fd87d85bd40d40b475d7a6f12f74ea0ddb0
|
[
"Apache-2.0"
] | null | null | null |
synapse/api/auth.py
|
iot-factory/synapse
|
d3ac8fd87d85bd40d40b475d7a6f12f74ea0ddb0
|
[
"Apache-2.0"
] | null | null | null |
synapse/api/auth.py
|
iot-factory/synapse
|
d3ac8fd87d85bd40d40b475d7a6f12f74ea0ddb0
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# Copyright 2014, 2015 OpenMarket Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This module contains classes for authenticating the user."""
from canonicaljson import encode_canonical_json
from signedjson.key import decode_verify_key_bytes
from signedjson.sign import verify_signed_json, SignatureVerifyException
from twisted.internet import defer
from synapse.api.constants import EventTypes, Membership, JoinRules
from synapse.api.errors import AuthError, Codes, SynapseError, EventSizeError
from synapse.types import RoomID, UserID, EventID
from synapse.util.logutils import log_function
from unpaddedbase64 import decode_base64
import logging
import pymacaroons
logger = logging.getLogger(__name__)
AuthEventTypes = (
EventTypes.Create, EventTypes.Member, EventTypes.PowerLevels,
EventTypes.JoinRules, EventTypes.RoomHistoryVisibility,
EventTypes.ThirdPartyInvite,
)
class Auth(object):
def __init__(self, hs):
self.hs = hs
self.store = hs.get_datastore()
self.state = hs.get_state_handler()
self.TOKEN_NOT_FOUND_HTTP_STATUS = 401
self._KNOWN_CAVEAT_PREFIXES = set([
"gen = ",
"guest = ",
"type = ",
"time < ",
"user_id = ",
])
def check(self, event, auth_events):
""" Checks if this event is correctly authed.
Args:
event: the event being checked.
auth_events (dict: event-key -> event): the existing room state.
Returns:
True if the auth checks pass.
"""
self.check_size_limits(event)
try:
if not hasattr(event, "room_id"):
raise AuthError(500, "Event has no room_id: %s" % event)
if auth_events is None:
# Oh, we don't know what the state of the room was, so we
# are trusting that this is allowed (at least for now)
logger.warn("Trusting event: %s", event.event_id)
return True
if event.type == EventTypes.Create:
# FIXME
return True
creation_event = auth_events.get((EventTypes.Create, ""), None)
if not creation_event:
raise SynapseError(
403,
"Room %r does not exist" % (event.room_id,)
)
creating_domain = RoomID.from_string(event.room_id).domain
originating_domain = UserID.from_string(event.sender).domain
if creating_domain != originating_domain:
if not self.can_federate(event, auth_events):
raise AuthError(
403,
"This room has been marked as unfederatable."
)
# FIXME: Temp hack
if event.type == EventTypes.Aliases:
return True
logger.debug(
"Auth events: %s",
[a.event_id for a in auth_events.values()]
)
if event.type == EventTypes.Member:
allowed = self.is_membership_change_allowed(
event, auth_events
)
if allowed:
logger.debug("Allowing! %s", event)
else:
logger.debug("Denying! %s", event)
return allowed
self.check_event_sender_in_room(event, auth_events)
self._can_send_event(event, auth_events)
if event.type == EventTypes.PowerLevels:
self._check_power_levels(event, auth_events)
if event.type == EventTypes.Redaction:
self.check_redaction(event, auth_events)
logger.debug("Allowing! %s", event)
except AuthError as e:
logger.info(
"Event auth check failed on event %s with msg: %s",
event, e.msg
)
logger.info("Denying! %s", event)
raise
def check_size_limits(self, event):
def too_big(field):
raise EventSizeError("%s too large" % (field,))
if len(event.user_id) > 255:
too_big("user_id")
if len(event.room_id) > 255:
too_big("room_id")
if event.is_state() and len(event.state_key) > 255:
too_big("state_key")
if len(event.type) > 255:
too_big("type")
if len(event.event_id) > 255:
too_big("event_id")
if len(encode_canonical_json(event.get_pdu_json())) > 65536:
too_big("event")
@defer.inlineCallbacks
def check_joined_room(self, room_id, user_id, current_state=None):
"""Check if the user is currently joined in the room
Args:
room_id(str): The room to check.
user_id(str): The user to check.
current_state(dict): Optional map of the current state of the room.
If provided then that map is used to check whether they are a
member of the room. Otherwise the current membership is
loaded from the database.
Raises:
AuthError if the user is not in the room.
Returns:
A deferred membership event for the user if the user is in
the room.
"""
if current_state:
member = current_state.get(
(EventTypes.Member, user_id),
None
)
else:
member = yield self.state.get_current_state(
room_id=room_id,
event_type=EventTypes.Member,
state_key=user_id
)
self._check_joined_room(member, user_id, room_id)
defer.returnValue(member)
@defer.inlineCallbacks
def check_user_was_in_room(self, room_id, user_id):
"""Check if the user was in the room at some point.
Args:
room_id(str): The room to check.
user_id(str): The user to check.
Raises:
AuthError if the user was never in the room.
Returns:
A deferred membership event for the user if the user was in the
room. This will be the join event if they are currently joined to
the room. This will be the leave event if they have left the room.
"""
member = yield self.state.get_current_state(
room_id=room_id,
event_type=EventTypes.Member,
state_key=user_id
)
membership = member.membership if member else None
if membership not in (Membership.JOIN, Membership.LEAVE):
raise AuthError(403, "User %s not in room %s" % (
user_id, room_id
))
defer.returnValue(member)
@defer.inlineCallbacks
def check_host_in_room(self, room_id, host):
curr_state = yield self.state.get_current_state(room_id)
for event in curr_state.values():
if event.type == EventTypes.Member:
try:
if UserID.from_string(event.state_key).domain != host:
continue
except:
logger.warn("state_key not user_id: %s", event.state_key)
continue
if event.content["membership"] == Membership.JOIN:
defer.returnValue(True)
defer.returnValue(False)
def check_event_sender_in_room(self, event, auth_events):
key = (EventTypes.Member, event.user_id, )
member_event = auth_events.get(key)
return self._check_joined_room(
member_event,
event.user_id,
event.room_id
)
def _check_joined_room(self, member, user_id, room_id):
if not member or member.membership != Membership.JOIN:
raise AuthError(403, "User %s not in room %s (%s)" % (
user_id, room_id, repr(member)
))
def can_federate(self, event, auth_events):
creation_event = auth_events.get((EventTypes.Create, ""))
return creation_event.content.get("m.federate", True) is True
@log_function
def is_membership_change_allowed(self, event, auth_events):
membership = event.content["membership"]
# Check if this is the room creator joining:
if len(event.prev_events) == 1 and Membership.JOIN == membership:
# Get room creation event:
key = (EventTypes.Create, "", )
create = auth_events.get(key)
if create and event.prev_events[0][0] == create.event_id:
if create.content["creator"] == event.state_key:
return True
target_user_id = event.state_key
creating_domain = RoomID.from_string(event.room_id).domain
target_domain = UserID.from_string(target_user_id).domain
if creating_domain != target_domain:
if not self.can_federate(event, auth_events):
raise AuthError(
403,
"This room has been marked as unfederatable."
)
# get info about the caller
key = (EventTypes.Member, event.user_id, )
caller = auth_events.get(key)
caller_in_room = caller and caller.membership == Membership.JOIN
caller_invited = caller and caller.membership == Membership.INVITE
# get info about the target
key = (EventTypes.Member, target_user_id, )
target = auth_events.get(key)
target_in_room = target and target.membership == Membership.JOIN
target_banned = target and target.membership == Membership.BAN
key = (EventTypes.JoinRules, "", )
join_rule_event = auth_events.get(key)
if join_rule_event:
join_rule = join_rule_event.content.get(
"join_rule", JoinRules.INVITE
)
else:
join_rule = JoinRules.INVITE
user_level = self._get_user_power_level(event.user_id, auth_events)
target_level = self._get_user_power_level(
target_user_id, auth_events
)
# FIXME (erikj): What should we do here as the default?
ban_level = self._get_named_level(auth_events, "ban", 50)
logger.debug(
"is_membership_change_allowed: %s",
{
"caller_in_room": caller_in_room,
"caller_invited": caller_invited,
"target_banned": target_banned,
"target_in_room": target_in_room,
"membership": membership,
"join_rule": join_rule,
"target_user_id": target_user_id,
"event.user_id": event.user_id,
}
)
if Membership.INVITE == membership and "third_party_invite" in event.content:
if not self._verify_third_party_invite(event, auth_events):
raise AuthError(403, "You are not invited to this room.")
return True
if Membership.JOIN != membership:
if (caller_invited
and Membership.LEAVE == membership
and target_user_id == event.user_id):
return True
if not caller_in_room: # caller isn't joined
raise AuthError(
403,
"%s not in room %s." % (event.user_id, event.room_id,)
)
if Membership.INVITE == membership:
# TODO (erikj): We should probably handle this more intelligently
# PRIVATE join rules.
# Invites are valid iff caller is in the room and target isn't.
if target_banned:
raise AuthError(
403, "%s is banned from the room" % (target_user_id,)
)
elif target_in_room: # the target is already in the room.
raise AuthError(403, "%s is already in the room." %
target_user_id)
else:
invite_level = self._get_named_level(auth_events, "invite", 0)
if user_level < invite_level:
raise AuthError(
403, "You cannot invite user %s." % target_user_id
)
elif Membership.JOIN == membership:
# Joins are valid iff caller == target and they were:
# invited: They are accepting the invitation
# joined: It's a NOOP
if event.user_id != target_user_id:
raise AuthError(403, "Cannot force another user to join.")
elif target_banned:
raise AuthError(403, "You are banned from this room")
elif join_rule == JoinRules.PUBLIC:
pass
elif join_rule == JoinRules.INVITE:
if not caller_in_room and not caller_invited:
raise AuthError(403, "You are not invited to this room.")
else:
# TODO (erikj): may_join list
# TODO (erikj): private rooms
raise AuthError(403, "You are not allowed to join this room")
elif Membership.LEAVE == membership:
# TODO (erikj): Implement kicks.
if target_banned and user_level < ban_level:
raise AuthError(
403, "You cannot unban user &s." % (target_user_id,)
)
elif target_user_id != event.user_id:
kick_level = self._get_named_level(auth_events, "kick", 50)
if user_level < kick_level or user_level <= target_level:
raise AuthError(
403, "You cannot kick user %s." % target_user_id
)
elif Membership.BAN == membership:
if user_level < ban_level or user_level <= target_level:
raise AuthError(403, "You don't have permission to ban")
else:
raise AuthError(500, "Unknown membership %s" % membership)
return True
def _verify_third_party_invite(self, event, auth_events):
"""
Validates that the invite event is authorized by a previous third-party invite.
Checks that the public key, and keyserver, match those in the third party invite,
and that the invite event has a signature issued using that public key.
Args:
event: The m.room.member join event being validated.
auth_events: All relevant previous context events which may be used
for authorization decisions.
Return:
True if the event fulfills the expectations of a previous third party
invite event.
"""
if "third_party_invite" not in event.content:
return False
if "signed" not in event.content["third_party_invite"]:
return False
signed = event.content["third_party_invite"]["signed"]
for key in {"mxid", "token"}:
if key not in signed:
return False
token = signed["token"]
invite_event = auth_events.get(
(EventTypes.ThirdPartyInvite, token,)
)
if not invite_event:
return False
if event.user_id != invite_event.user_id:
return False
try:
public_key = invite_event.content["public_key"]
if signed["mxid"] != event.state_key:
return False
if signed["token"] != token:
return False
for server, signature_block in signed["signatures"].items():
for key_name, encoded_signature in signature_block.items():
if not key_name.startswith("ed25519:"):
return False
verify_key = decode_verify_key_bytes(
key_name,
decode_base64(public_key)
)
verify_signed_json(signed, server, verify_key)
# We got the public key from the invite, so we know that the
# correct server signed the signed bundle.
# The caller is responsible for checking that the signing
# server has not revoked that public key.
return True
return False
except (KeyError, SignatureVerifyException,):
return False
def _get_power_level_event(self, auth_events):
key = (EventTypes.PowerLevels, "", )
return auth_events.get(key)
def _get_user_power_level(self, user_id, auth_events):
power_level_event = self._get_power_level_event(auth_events)
if power_level_event:
level = power_level_event.content.get("users", {}).get(user_id)
if not level:
level = power_level_event.content.get("users_default", 0)
if level is None:
return 0
else:
return int(level)
else:
key = (EventTypes.Create, "", )
create_event = auth_events.get(key)
if (create_event is not None and
create_event.content["creator"] == user_id):
return 100
else:
return 0
def _get_named_level(self, auth_events, name, default):
power_level_event = self._get_power_level_event(auth_events)
if not power_level_event:
return default
level = power_level_event.content.get(name, None)
if level is not None:
return int(level)
else:
return default
@defer.inlineCallbacks
def get_user_by_req(self, request, allow_guest=False):
""" Get a registered user's ID.
Args:
request - An HTTP request with an access_token query parameter.
Returns:
tuple of:
UserID (str)
Access token ID (str)
Raises:
AuthError if no user by that token exists or the token is invalid.
"""
# Can optionally look elsewhere in the request (e.g. headers)
try:
access_token = request.args["access_token"][0]
# Check for application service tokens with a user_id override
try:
app_service = yield self.store.get_app_service_by_token(
access_token
)
if not app_service:
raise KeyError
user_id = app_service.sender
if "user_id" in request.args:
user_id = request.args["user_id"][0]
if not app_service.is_interested_in_user(user_id):
raise AuthError(
403,
"Application service cannot masquerade as this user."
)
if not user_id:
raise KeyError
request.authenticated_entity = user_id
defer.returnValue((UserID.from_string(user_id), "", False))
return
except KeyError:
pass # normal users won't have the user_id query parameter set.
user_info = yield self._get_user_by_access_token(access_token)
user = user_info["user"]
token_id = user_info["token_id"]
is_guest = user_info["is_guest"]
ip_addr = self.hs.get_ip_from_request(request)
user_agent = request.requestHeaders.getRawHeaders(
"User-Agent",
default=[""]
)[0]
if user and access_token and ip_addr:
self.store.insert_client_ip(
user=user,
access_token=access_token,
ip=ip_addr,
user_agent=user_agent
)
if is_guest and not allow_guest:
raise AuthError(
403, "Guest access not allowed", errcode=Codes.GUEST_ACCESS_FORBIDDEN
)
request.authenticated_entity = user.to_string()
defer.returnValue((user, token_id, is_guest,))
except KeyError:
raise AuthError(
self.TOKEN_NOT_FOUND_HTTP_STATUS, "Missing access token.",
errcode=Codes.MISSING_TOKEN
)
@defer.inlineCallbacks
def _get_user_by_access_token(self, token):
""" Get a registered user's ID.
Args:
token (str): The access token to get the user by.
Returns:
dict : dict that includes the user and the ID of their access token.
Raises:
AuthError if no user by that token exists or the token is invalid.
"""
try:
ret = yield self._get_user_from_macaroon(token)
except AuthError:
# TODO(daniel): Remove this fallback when all existing access tokens
# have been re-issued as macaroons.
ret = yield self._look_up_user_by_access_token(token)
defer.returnValue(ret)
@defer.inlineCallbacks
def _get_user_from_macaroon(self, macaroon_str):
try:
macaroon = pymacaroons.Macaroon.deserialize(macaroon_str)
self.validate_macaroon(macaroon, "access", False)
user_prefix = "user_id = "
user = None
guest = False
for caveat in macaroon.caveats:
if caveat.caveat_id.startswith(user_prefix):
user = UserID.from_string(caveat.caveat_id[len(user_prefix):])
elif caveat.caveat_id == "guest = true":
guest = True
if user is None:
raise AuthError(
self.TOKEN_NOT_FOUND_HTTP_STATUS, "No user caveat in macaroon",
errcode=Codes.UNKNOWN_TOKEN
)
if guest:
ret = {
"user": user,
"is_guest": True,
"token_id": None,
}
else:
# This codepath exists so that we can actually return a
# token ID, because we use token IDs in place of device
# identifiers throughout the codebase.
# TODO(daniel): Remove this fallback when device IDs are
# properly implemented.
ret = yield self._look_up_user_by_access_token(macaroon_str)
if ret["user"] != user:
logger.error(
"Macaroon user (%s) != DB user (%s)",
user,
ret["user"]
)
raise AuthError(
self.TOKEN_NOT_FOUND_HTTP_STATUS,
"User mismatch in macaroon",
errcode=Codes.UNKNOWN_TOKEN
)
defer.returnValue(ret)
except (pymacaroons.exceptions.MacaroonException, TypeError, ValueError):
raise AuthError(
self.TOKEN_NOT_FOUND_HTTP_STATUS, "Invalid macaroon passed.",
errcode=Codes.UNKNOWN_TOKEN
)
def validate_macaroon(self, macaroon, type_string, verify_expiry):
"""
validate that a Macaroon is understood by and was signed by this server.
Args:
macaroon(pymacaroons.Macaroon): The macaroon to validate
type_string(str): The kind of token this is (e.g. "access", "refresh")
verify_expiry(bool): Whether to verify whether the macaroon has expired.
This should really always be True, but no clients currently implement
token refresh, so we can't enforce expiry yet.
"""
v = pymacaroons.Verifier()
v.satisfy_exact("gen = 1")
v.satisfy_exact("type = " + type_string)
v.satisfy_general(lambda c: c.startswith("user_id = "))
v.satisfy_exact("guest = true")
if verify_expiry:
v.satisfy_general(self._verify_expiry)
else:
v.satisfy_general(lambda c: c.startswith("time < "))
v.verify(macaroon, self.hs.config.macaroon_secret_key)
v = pymacaroons.Verifier()
v.satisfy_general(self._verify_recognizes_caveats)
v.verify(macaroon, self.hs.config.macaroon_secret_key)
def _verify_expiry(self, caveat):
prefix = "time < "
if not caveat.startswith(prefix):
return False
expiry = int(caveat[len(prefix):])
now = self.hs.get_clock().time_msec()
return now < expiry
def _verify_recognizes_caveats(self, caveat):
first_space = caveat.find(" ")
if first_space < 0:
return False
second_space = caveat.find(" ", first_space + 1)
if second_space < 0:
return False
return caveat[:second_space + 1] in self._KNOWN_CAVEAT_PREFIXES
@defer.inlineCallbacks
def _look_up_user_by_access_token(self, token):
ret = yield self.store.get_user_by_access_token(token)
if not ret:
raise AuthError(
self.TOKEN_NOT_FOUND_HTTP_STATUS, "Unrecognised access token.",
errcode=Codes.UNKNOWN_TOKEN
)
user_info = {
"user": UserID.from_string(ret.get("name")),
"token_id": ret.get("token_id", None),
"is_guest": False,
}
defer.returnValue(user_info)
@defer.inlineCallbacks
def get_appservice_by_req(self, request):
try:
token = request.args["access_token"][0]
service = yield self.store.get_app_service_by_token(token)
if not service:
raise AuthError(
self.TOKEN_NOT_FOUND_HTTP_STATUS,
"Unrecognised access token.",
errcode=Codes.UNKNOWN_TOKEN
)
request.authenticated_entity = service.sender
defer.returnValue(service)
except KeyError:
raise AuthError(
self.TOKEN_NOT_FOUND_HTTP_STATUS, "Missing access token."
)
def is_server_admin(self, user):
return self.store.is_server_admin(user)
@defer.inlineCallbacks
def add_auth_events(self, builder, context):
auth_ids = self.compute_auth_events(builder, context.current_state)
auth_events_entries = yield self.store.add_event_hashes(
auth_ids
)
builder.auth_events = auth_events_entries
def compute_auth_events(self, event, current_state):
if event.type == EventTypes.Create:
return []
auth_ids = []
key = (EventTypes.PowerLevels, "", )
power_level_event = current_state.get(key)
if power_level_event:
auth_ids.append(power_level_event.event_id)
key = (EventTypes.JoinRules, "", )
join_rule_event = current_state.get(key)
key = (EventTypes.Member, event.user_id, )
member_event = current_state.get(key)
key = (EventTypes.Create, "", )
create_event = current_state.get(key)
if create_event:
auth_ids.append(create_event.event_id)
if join_rule_event:
join_rule = join_rule_event.content.get("join_rule")
is_public = join_rule == JoinRules.PUBLIC if join_rule else False
else:
is_public = False
if event.type == EventTypes.Member:
e_type = event.content["membership"]
if e_type in [Membership.JOIN, Membership.INVITE]:
if join_rule_event:
auth_ids.append(join_rule_event.event_id)
if e_type == Membership.JOIN:
if member_event and not is_public:
auth_ids.append(member_event.event_id)
else:
if member_event:
auth_ids.append(member_event.event_id)
if e_type == Membership.INVITE:
if "third_party_invite" in event.content:
key = (
EventTypes.ThirdPartyInvite,
event.content["third_party_invite"]["token"]
)
third_party_invite = current_state.get(key)
if third_party_invite:
auth_ids.append(third_party_invite.event_id)
elif member_event:
if member_event.content["membership"] == Membership.JOIN:
auth_ids.append(member_event.event_id)
return auth_ids
@log_function
def _can_send_event(self, event, auth_events):
key = (EventTypes.PowerLevels, "", )
send_level_event = auth_events.get(key)
send_level = None
if send_level_event:
send_level = send_level_event.content.get("events", {}).get(
event.type
)
if send_level is None:
if hasattr(event, "state_key"):
send_level = send_level_event.content.get(
"state_default", 50
)
else:
send_level = send_level_event.content.get(
"events_default", 0
)
if send_level:
send_level = int(send_level)
else:
send_level = 0
user_level = self._get_user_power_level(event.user_id, auth_events)
if user_level < send_level:
raise AuthError(
403,
"You don't have permission to post that to the room. " +
"user_level (%d) < send_level (%d)" % (user_level, send_level)
)
# Check state_key
if hasattr(event, "state_key"):
if event.state_key.startswith("@"):
if event.state_key != event.user_id:
raise AuthError(
403,
"You are not allowed to set others state"
)
else:
sender_domain = UserID.from_string(
event.user_id
).domain
if sender_domain != event.state_key:
raise AuthError(
403,
"You are not allowed to set others state"
)
return True
def check_redaction(self, event, auth_events):
"""Check whether the event sender is allowed to redact the target event.
Returns:
True if the the sender is allowed to redact the target event if the
target event was created by them.
False if the sender is allowed to redact the target event with no
further checks.
Raises:
AuthError if the event sender is definitely not allowed to redact
the target event.
"""
user_level = self._get_user_power_level(event.user_id, auth_events)
redact_level = self._get_named_level(auth_events, "redact", 50)
if user_level > redact_level:
return False
redacter_domain = EventID.from_string(event.event_id).domain
redactee_domain = EventID.from_string(event.redacts).domain
if redacter_domain == redactee_domain:
return True
raise AuthError(
403,
"You don't have permission to redact events"
)
def _check_power_levels(self, event, auth_events):
user_list = event.content.get("users", {})
# Validate users
for k, v in user_list.items():
try:
UserID.from_string(k)
except:
raise SynapseError(400, "Not a valid user_id: %s" % (k,))
try:
int(v)
except:
raise SynapseError(400, "Not a valid power level: %s" % (v,))
key = (event.type, event.state_key, )
current_state = auth_events.get(key)
if not current_state:
return
user_level = self._get_user_power_level(event.user_id, auth_events)
# Check other levels:
levels_to_check = [
("users_default", None),
("events_default", None),
("state_default", None),
("ban", None),
("redact", None),
("kick", None),
("invite", None),
]
old_list = current_state.content.get("users")
for user in set(old_list.keys() + user_list.keys()):
levels_to_check.append(
(user, "users")
)
old_list = current_state.content.get("events")
new_list = event.content.get("events")
for ev_id in set(old_list.keys() + new_list.keys()):
levels_to_check.append(
(ev_id, "events")
)
old_state = current_state.content
new_state = event.content
for level_to_check, dir in levels_to_check:
old_loc = old_state
new_loc = new_state
if dir:
old_loc = old_loc.get(dir, {})
new_loc = new_loc.get(dir, {})
if level_to_check in old_loc:
old_level = int(old_loc[level_to_check])
else:
old_level = None
if level_to_check in new_loc:
new_level = int(new_loc[level_to_check])
else:
new_level = None
if new_level is not None and old_level is not None:
if new_level == old_level:
continue
if dir == "users" and level_to_check != event.user_id:
if old_level == user_level:
raise AuthError(
403,
"You don't have permission to remove ops level equal "
"to your own"
)
if old_level > user_level or new_level > user_level:
raise AuthError(
403,
"You don't have permission to add ops level greater "
"than your own"
)
| 36.564533
| 89
| 0.558142
|
4a13f8cc2a9a2d4216a8c31d30199dcf11cc710b
| 1,737
|
py
|
Python
|
scripts/scrape_bus_stops.py
|
yi-jiayu/bus-eta-bot
|
3f0d21f540c5ce162960698f698ce85bc4f76f06
|
[
"MIT"
] | 19
|
2017-05-09T18:19:18.000Z
|
2021-07-30T14:42:18.000Z
|
scripts/scrape_bus_stops.py
|
yi-jiayu/bus-eta-bot
|
3f0d21f540c5ce162960698f698ce85bc4f76f06
|
[
"MIT"
] | 20
|
2017-02-12T15:28:59.000Z
|
2020-02-08T04:22:49.000Z
|
scripts/scrape_bus_stops.py
|
yi-jiayu/bus-eta-bot
|
3f0d21f540c5ce162960698f698ce85bc4f76f06
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
import json
import os
import sys
import urllib.request
endpoint = 'http://datamall2.mytransport.sg/ltaodataservice/BusStops?$skip='
def get_bus_stops(account_key: str, offset: int):
url = '{}{}'.format(endpoint, offset)
req = urllib.request.Request(url, headers={'AccountKey': account_key})
with urllib.request.urlopen(req) as f:
bus_stops = json.load(f)
return bus_stops
def main():
account_key = os.environ.get('DATAMALL_ACCOUNT_KEY')
if account_key is None:
print('Error: DATAMALL_ACCOUNT_KEY environment variable not set.')
sys.exit(1)
if len(sys.argv) > 1:
out_file = sys.argv[1]
else:
out_file = './bus_stops.json'
print('[START] Fetch bus stops')
with open(out_file, 'w') as f:
offset = 0
count = 0
while True:
bus_stops = get_bus_stops(account_key, offset)
if len(bus_stops['value']) == 0:
break
count += len(bus_stops['value'])
f.write('{:04d}: {}\n'.format(offset, json.dumps(bus_stops['value'])))
sys.stdout.write('\rOffset: {}'.format(offset))
offset += len(bus_stops['value'])
print('\rFetched {} bus stops.'.format(count))
print('[END] Fetch bus stops')
print('[START] Collect bus stops')
all_bus_stops = []
with open(out_file) as f:
for line in f:
bus_stops = json.loads(line[6:])
all_bus_stops.extend(bus_stops)
print('[END] Collect bus stops')
print('[START] Write bus stops')
with open(out_file, 'w') as f:
json.dump(all_bus_stops, f)
print('[END] Write bus stops')
if __name__ == "__main__":
main()
| 27.571429
| 82
| 0.599885
|
4a13f9023194a8d4542f0eb96c6357471b291b71
| 51,883
|
py
|
Python
|
python/federatedml/util/data_transform.py
|
rubenlozanoaht3m/DataDogm
|
cd605e8072cca31e8418830c3300657ae2fa5b16
|
[
"Apache-2.0"
] | 715
|
2019-01-24T10:52:03.000Z
|
2019-10-31T12:19:22.000Z
|
python/federatedml/util/data_transform.py
|
rubenlozanoaht3m/DataDogm
|
cd605e8072cca31e8418830c3300657ae2fa5b16
|
[
"Apache-2.0"
] | 270
|
2019-02-11T02:57:36.000Z
|
2019-08-29T11:22:33.000Z
|
python/federatedml/util/data_transform.py
|
rubenlozanoaht3m/DataDogm
|
cd605e8072cca31e8418830c3300657ae2fa5b16
|
[
"Apache-2.0"
] | 200
|
2019-01-26T14:21:35.000Z
|
2019-11-01T01:14:36.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
################################################################################
#
#
################################################################################
import copy
import functools
import numpy as np
from federatedml.feature.instance import Instance
from federatedml.feature.sparse_vector import SparseVector
from federatedml.model_base import ModelBase
from federatedml.protobuf.generated.data_transform_meta_pb2 import DataTransformMeta
from federatedml.protobuf.generated.data_transform_meta_pb2 import DataTransformImputerMeta
from federatedml.protobuf.generated.data_transform_meta_pb2 import DataTransformOutlierMeta
from federatedml.protobuf.generated.data_transform_param_pb2 import DataTransformParam
from federatedml.protobuf.generated.data_transform_param_pb2 import DataTransformImputerParam
from federatedml.protobuf.generated.data_transform_param_pb2 import DataTransformOutlierParam
from federatedml.statistic import data_overview
from federatedml.util import abnormal_detection
from federatedml.util import consts
from federatedml.util import LOGGER
from federatedml.util.io_check import assert_io_num_rows_equal
# =============================================================================
# DenseFeatureTransformer
# =============================================================================
class DenseFeatureTransformer(object):
def __init__(self, data_transform_param):
self.delimitor = data_transform_param.delimitor
self.data_type = data_transform_param.data_type
self.exclusive_data_type = data_transform_param.exclusive_data_type
self.missing_fill = data_transform_param.missing_fill
self.default_value = data_transform_param.default_value
self.missing_fill_method = data_transform_param.missing_fill_method
self.missing_impute = data_transform_param.missing_impute
self.outlier_replace = data_transform_param.outlier_replace
self.outlier_replace_method = data_transform_param.outlier_replace_method
self.outlier_impute = data_transform_param.outlier_impute
self.outlier_replace_value = data_transform_param.outlier_replace_value
self.with_label = data_transform_param.with_label
self.label_name = data_transform_param.label_name.lower() if self.with_label else None
self.label_type = data_transform_param.label_type if self.with_label else None
self.output_format = data_transform_param.output_format
self.missing_impute_rate = None
self.outlier_replace_rate = None
self.label_idx = None
self.header = None
self.sid_name = None
self.exclusive_data_type_fid_map = {}
self.match_id_name = None
self.with_match_id = data_transform_param.with_match_id
def generate_header(self, input_data, mode="fit"):
header = input_data.schema["header"].lower()
sid_name = input_data.schema["sid"].lower()
LOGGER.debug("header is {}".format(header))
LOGGER.debug("sid_name is {}".format(sid_name))
if not header and not sid_name:
raise ValueError("dense input-format should have header schema")
header_gen = None
if self.with_match_id:
self.match_id_name = header.split(self.delimitor, -1)[0]
if self.with_label and self.label_name == self.match_id_name:
raise ValueError("Match id column name equals to label column name")
if self.with_label:
if mode == "fit":
if not header:
raise ValueError("dense input-format for fit stage should not be None if with_label is true")
self.label_idx = header.split(self.delimitor, -1).index(self.label_name)
header_list = header.split(self.delimitor, -1)
header_gen = header_list[:self.label_idx] + header_list[self.label_idx + 1:]
elif header:
header_list = header.split(self.delimitor, -1)
if self.label_name in header_list:
self.label_idx = header_list.index(self.label_name)
header_gen = header_list[:self.label_idx] + header_list[self.label_idx + 1:]
else:
self.label_idx = None
header_gen = header_list
elif header:
header_gen = header.split(self.delimitor, -1)
if self.with_match_id:
header_gen = header_gen[1:]
self.header = header_gen
self.sid_name = sid_name
if header_gen:
for i in range(len(header_gen)):
col_name = header_gen[i]
if self.exclusive_data_type is not None and col_name in self.exclusive_data_type:
self.exclusive_data_type_fid_map[i] = self.exclusive_data_type[col_name]
def get_schema(self):
schema = make_schema(self.header, self.sid_name, self.label_name, self.match_id_name)
return schema
def extract_feature_value(self, value):
value = value.split(self.delimitor, -1)
if not self.header:
return []
elif self.with_match_id and self.label_idx is not None:
if len(value) == 2:
return []
elif len(value) < 2:
raise ValueError("Only {} column is found, can not extract match_id and label")
else:
return value[1: self.label_idx] + value[self.label_idx + 1:]
elif self.with_match_id:
if len(value) < 1:
raise ValueError("Only 0 column is found, can not extract match_id")
elif len(value) == 1:
return []
else:
return value[1:]
elif self.label_idx is not None:
if len(value) < 1:
raise ValueError("Only 0 column is found, can not extract label")
elif len(value) == 1:
return []
else:
return value[:self.label_idx] + value[self.label_idx + 1:]
else:
return value
def read_data(self, input_data, mode="fit"):
LOGGER.info("start to read dense data and change data to instance")
abnormal_detection.empty_table_detection(input_data)
input_data_labels = None
input_data_match_id = None
fit_header = None
if mode == "transform":
fit_header = self.header
self.generate_header(input_data, mode=mode)
input_data_features = input_data.mapValues(self.extract_feature_value)
if self.label_idx is not None:
data_shape = data_overview.get_data_shape(input_data)
if not data_shape or self.label_idx >= data_shape:
raise ValueError("input data's value is empty or it does not contain a label")
input_data_labels = input_data.mapValues(lambda value: value.split(self.delimitor, -1)[self.label_idx])
if self.with_match_id:
input_data_match_id = input_data.mapValues(
lambda value: value.split(self.delimitor, -1)[0])
if mode == "fit":
data_instance = self.fit(input_data, input_data_features, input_data_labels, input_data_match_id)
else:
data_instance = self.transform(input_data_features, input_data_labels, input_data_match_id)
data_instance = data_overview.header_alignment(data_instance, fit_header)
return data_instance
def fit(self, input_data, input_data_features, input_data_labels, input_data_match_id):
schema = self.get_schema()
set_schema(input_data_features, schema)
input_data_features = self.fill_missing_value(input_data_features, "fit")
input_data_features = self.replace_outlier_value(input_data_features, "fit")
data_instance = self.gen_data_instance(input_data_features, input_data_labels, input_data_match_id)
set_schema(data_instance, schema)
return data_instance
@assert_io_num_rows_equal
def transform(self, input_data_features, input_data_labels, input_data_match_id):
schema = make_schema(self.header, self.sid_name, self.label_name)
set_schema(input_data_features, schema)
input_data_features = self.fill_missing_value(input_data_features, "transform")
input_data_features = self.replace_outlier_value(input_data_features, "transform")
data_instance = self.gen_data_instance(input_data_features, input_data_labels, input_data_match_id)
set_schema(data_instance, schema)
return data_instance
def fill_missing_value(self, input_data_features, mode="fit"):
if self.missing_fill:
from federatedml.feature.imputer import Imputer
imputer_processor = Imputer(self.missing_impute)
if mode == "fit":
input_data_features, self.default_value = imputer_processor.fit(input_data_features,
replace_method=self.missing_fill_method,
replace_value=self.default_value)
if self.missing_impute is None:
self.missing_impute = imputer_processor.get_missing_value_list()
else:
input_data_features = imputer_processor.transform(input_data_features,
transform_value=self.default_value)
if self.missing_impute is None:
self.missing_impute = imputer_processor.get_missing_value_list()
self.missing_impute_rate = imputer_processor.get_impute_rate(mode)
return input_data_features
def replace_outlier_value(self, input_data_features, mode="fit"):
if self.outlier_replace:
from federatedml.feature.imputer import Imputer
imputer_processor = Imputer(self.outlier_impute)
if mode == "fit":
input_data_features, self.outlier_replace_value = \
imputer_processor.fit(input_data_features,
replace_method=self.outlier_replace_method,
replace_value=self.outlier_replace_value)
if self.outlier_impute is None:
self.outlier_impute = imputer_processor.get_missing_value_list()
else:
input_data_features = imputer_processor.transform(input_data_features,
transform_value=self.outlier_replace_value)
self.outlier_replace_rate = imputer_processor.get_impute_rate(mode)
return input_data_features
def gen_data_instance(self, input_data_features, input_data_labels, input_data_match_id):
if self.label_idx is not None:
data_instance = input_data_features.join(input_data_labels,
lambda features, label: self.to_instance(features, label))
else:
data_instance = input_data_features.mapValues(lambda features: self.to_instance(features))
if self.with_match_id:
data_instance = data_instance.join(input_data_match_id, self.append_match_id)
return data_instance
def append_match_id(self, inst, match_id):
inst.inst_id = match_id
return inst
def to_instance(self, features, label=None):
if self.header is None and len(features) != 0:
raise ValueError("features shape {} not equal to header shape 0".format(len(features)))
elif self.header is not None and len(self.header) != len(features):
raise ValueError("features shape {} not equal to header shape {}".format(len(features), len(self.header)))
if self.label_idx is not None:
if self.label_type == 'int':
label = int(label)
elif self.label_type in ["float", "float64"]:
label = float(label)
format_features = DenseFeatureTransformer.gen_output_format(features, self.data_type,
self.exclusive_data_type_fid_map,
self.output_format,
missing_impute=self.missing_impute)
else:
format_features = DenseFeatureTransformer.gen_output_format(features, self.data_type,
self.exclusive_data_type_fid_map,
self.output_format,
missing_impute=self.missing_impute)
return Instance(inst_id=None,
features=format_features,
label=label)
@staticmethod
def gen_output_format(features, data_type='float', exclusive_data_type_fid_map=None,
output_format='dense', missing_impute=None):
if output_format not in ["dense", "sparse"]:
raise ValueError("output format {} is not define".format(output_format))
if output_format == "dense":
format_features = copy.deepcopy(features)
if data_type in ["int", "int64", "long", "float", "float64", "double"]:
for i in range(len(features)):
if (missing_impute is not None and features[i] in missing_impute) or \
(missing_impute is None and features[i] in ['', 'NULL', 'null', "NA"]):
format_features[i] = np.nan
if exclusive_data_type_fid_map:
for fid in range(len(features)):
if fid in exclusive_data_type_fid_map:
dtype = exclusive_data_type_fid_map[fid]
else:
dtype = data_type
format_features[fid] = getattr(np, dtype)(features[fid])
return np.asarray(format_features, dtype=object)
else:
return np.asarray(format_features, dtype=data_type)
indices = []
data = []
column_shape = len(features)
non_zero = 0
for i in range(column_shape):
if (missing_impute is not None and features[i] in missing_impute) or \
(missing_impute is None and features[i] in ['', 'NULL', 'null', "NA"]):
indices.append(i)
data.append(np.nan)
non_zero += 1
elif data_type in ['float', 'float64', "double"]:
if np.fabs(float(features[i])) < consts.FLOAT_ZERO:
continue
indices.append(i)
data.append(float(features[i]))
non_zero += 1
elif data_type in ['int', "int64", "long"]:
if int(features[i]) == 0:
continue
indices.append(i)
data.append(int(features[i]))
else:
indices.append(i)
data.append(features[i])
return SparseVector(indices, data, column_shape)
def get_summary(self):
if not self.missing_fill and not self.outlier_replace:
return {}
summary_buf = {}
if self.missing_fill:
missing_summary = dict()
missing_summary["missing_value"] = list(self.missing_impute)
missing_summary["missing_impute_value"] = dict(zip(self.header, self.default_value))
missing_summary["missing_impute_rate"] = dict(zip(self.header, self.missing_impute_rate))
summary_buf["missing_fill_info"] = missing_summary
if self.outlier_replace:
outlier_replace_summary = dict()
outlier_replace_summary["outlier_value"] = list(self.outlier_impute)
outlier_replace_summary["outlier_replace_value"] = dict(zip(self.header, self.outlier_replace_value))
outlier_replace_summary["outlier_replace_rate"] = dict(zip(self.header, self.outlier_replace_rate))
summary_buf["outlier_replace_rate"] = outlier_replace_summary
return summary_buf
def save_model(self):
transform_meta, transform_param = save_data_transform_model(input_format="dense",
delimitor=self.delimitor,
data_type=self.data_type,
exclusive_data_type=self.exclusive_data_type,
with_label=self.with_label,
label_type=self.label_type,
output_format=self.output_format,
header=self.header,
sid_name=self.sid_name,
label_name=self.label_name,
with_match_id=self.with_match_id,
model_name="DenseFeatureTransformer")
missing_imputer_meta, missing_imputer_param = save_missing_imputer_model(self.missing_fill,
self.missing_fill_method,
self.missing_impute,
self.default_value,
self.missing_impute_rate,
self.header,
"Imputer")
transform_meta.imputer_meta.CopyFrom(missing_imputer_meta)
transform_param.imputer_param.CopyFrom(missing_imputer_param)
outlier_meta, outlier_param = save_outlier_model(self.outlier_replace,
self.outlier_replace_method,
self.outlier_impute,
self.outlier_replace_value,
self.outlier_replace_rate,
self.header,
"Outlier")
transform_meta.outlier_meta.CopyFrom(outlier_meta)
transform_param.outlier_param.CopyFrom(outlier_param)
return {"DataTransformMeta": transform_meta,
"DataTransformParam": transform_param
}
def load_model(self, model_meta, model_param):
self.delimitor, self.data_type, self.exclusive_data_type, _1, _2, self.with_label, \
self.label_type, self.output_format, self.header, self.sid_name, self.label_name, self.with_match_id = \
load_data_transform_model("DenseFeatureTransformer", model_meta, model_param)
self.missing_fill, self.missing_fill_method, \
self.missing_impute, self.default_value = load_missing_imputer_model(self.header,
"Imputer",
model_meta.imputer_meta,
model_param.imputer_param)
self.outlier_replace, self.outlier_replace_method, \
self.outlier_impute, self.outlier_replace_value = load_outlier_model(self.header,
"Outlier",
model_meta.outlier_meta,
model_param.outlier_param)
# =============================================================================
# SparseFeatureTransformer: mainly for libsvm input format
# =============================================================================
class SparseFeatureTransformer(object):
def __init__(self, data_transform_param):
self.delimitor = data_transform_param.delimitor
self.data_type = data_transform_param.data_type
self.label_type = data_transform_param.label_type
self.output_format = data_transform_param.output_format
self.header = None
self.sid_name = "sid"
self.with_match_id = data_transform_param.with_match_id
self.match_id_name = "match_id" if self.with_match_id else None
self.with_label = data_transform_param.with_label
self.label_name = data_transform_param.label_name if self.with_label else None
def get_max_feature_index(self, line, delimitor=' '):
if line.strip() == '':
raise ValueError("find an empty line, please check!!!")
cols = line.split(delimitor, -1)
offset = 0
if self.with_match_id:
offset += 1
if self.with_label:
offset += 1
if len(cols) <= offset:
return -1
return max([int(fid_value.split(":", -1)[0]) for fid_value in cols[offset:]])
def generate_header(self, max_feature):
self.header = [str(i) for i in range(max_feature + 1)]
def read_data(self, input_data, mode="fit"):
LOGGER.info("start to read sparse data and change data to instance")
abnormal_detection.empty_table_detection(input_data)
if not data_overview.get_data_shape(input_data):
raise ValueError("input data's value is empty, it does not contain a label")
if mode == "fit":
data_instance = self.fit(input_data)
else:
data_instance = self.transform(input_data)
schema = make_schema(self.header, self.sid_name, self.label_name, self.match_id_name)
set_schema(data_instance, schema)
return data_instance
def fit(self, input_data):
get_max_fid = functools.partial(self.get_max_feature_index, delimitor=self.delimitor)
max_feature = input_data.mapValues(get_max_fid).reduce(lambda max_fid1, max_fid2: max(max_fid1, max_fid2))
if max_feature == -1:
raise ValueError("no feature value in input data, please check!")
self.generate_header(max_feature)
data_instance = self.gen_data_instance(input_data, max_feature)
return data_instance
def transform(self, input_data):
max_feature = len(self.header) - 1
data_instance = self.gen_data_instance(input_data, max_feature)
return data_instance
def gen_data_instance(self, input_data, max_feature):
params = [self.delimitor, self.data_type,
self.label_type, self.with_match_id,
self.output_format, self.with_label, max_feature]
to_instance_with_param = functools.partial(self.to_instance, params)
data_instance = input_data.mapValues(to_instance_with_param)
return data_instance
@staticmethod
def to_instance(param_list, value):
delimitor = param_list[0]
data_type = param_list[1]
label_type = param_list[2]
with_match_id = param_list[3]
output_format = param_list[4]
with_label = param_list[5]
max_fid = param_list[6]
if output_format not in ["dense", "sparse"]:
raise ValueError("output format {} is not define".format(output_format))
cols = value.split(delimitor, -1)
next_idx = 0
if with_match_id:
match_id = cols[0]
next_idx = 1
else:
match_id = None
label = None
if with_label:
label = cols[next_idx]
if label_type == 'int':
label = int(label)
elif label_type in ["float", "float64"]:
label = float(label)
next_idx += 1
fid_value = []
for i in range(next_idx, len(cols)):
fid, val = cols[i].split(":", -1)
fid = int(fid)
if data_type in ["float", "float64"]:
val = float(val)
elif data_type in ["int", "int64"]:
val = int(val)
fid_value.append((fid, val))
if output_format == "dense":
features = [0 for i in range(max_fid + 1)]
for fid, val in fid_value:
features[fid] = val
features = np.asarray(features, dtype=data_type)
else:
indices = []
data = []
for fid, val in fid_value:
indices.append(fid)
data.append(val)
features = SparseVector(indices, data, max_fid + 1)
return Instance(inst_id=match_id,
features=features,
label=label)
def save_model(self):
transform_meta, transform_param = save_data_transform_model(input_format="sparse",
delimitor=self.delimitor,
data_type=self.data_type,
label_type=self.label_type,
output_format=self.output_format,
header=self.header,
sid_name=self.sid_name,
label_name=self.label_name,
with_match_id=self.with_match_id,
with_label=self.with_label,
model_name="SparseFeatureTransformer")
missing_imputer_meta, missing_imputer_param = save_missing_imputer_model(missing_fill=False,
model_name="Imputer")
transform_meta.imputer_meta.CopyFrom(missing_imputer_meta)
transform_param.imputer_param.CopyFrom(missing_imputer_param)
outlier_meta, outlier_param = save_outlier_model(outlier_replace=False,
model_name="Outlier")
transform_meta.outlier_meta.CopyFrom(outlier_meta)
transform_param.outlier_param.CopyFrom(outlier_param)
return {"DataTransformMeta": transform_meta,
"DataTransformParam": transform_param
}
def load_model(self, model_meta, model_param):
self.delimitor, self.data_type, _0, _1, _2, self.with_label, \
self.label_type, self.output_format, self.header, self.sid_name, self.label_name, self.with_match_id = \
load_data_transform_model(
"SparseFeatureTransformer",
model_meta,
model_param)
# =============================================================================
# SparseTagTransformer: mainly for tag data
# =============================================================================
class SparseTagTransformer(object):
def __init__(self, data_transform_param):
self.delimitor = data_transform_param.delimitor
self.data_type = data_transform_param.data_type
self.tag_with_value = data_transform_param.tag_with_value
self.tag_value_delimitor = data_transform_param.tag_value_delimitor
self.with_label = data_transform_param.with_label
self.label_type = data_transform_param.label_type if self.with_label else None
self.output_format = data_transform_param.output_format
self.header = None
self.sid_name = "sid"
self.label_name = self.label_name = data_transform_param.label_name
self.missing_fill = data_transform_param.missing_fill
self.missing_fill_method = data_transform_param.missing_fill_method
self.default_value = data_transform_param.default_value
self.with_match_id = data_transform_param.with_match_id
self.match_id_name = "match_id" if self.with_match_id else None
self.missing_impute_rate = None
self.missing_impute = None
@staticmethod
def agg_tag(kvs, delimitor=' ', with_label=True, with_match_id=False, tag_with_value=False,
tag_value_delimitor=":"):
tags_set = set()
offset = 1 if with_match_id else 0
for key, value in kvs:
if with_label:
cols = value.split(delimitor, -1)[1 + offset:]
else:
cols = value.split(delimitor, -1)[0 + offset:]
if tag_with_value is False:
tags = cols
else:
tags = [fea_value.split(tag_value_delimitor, -1)[0] for fea_value in cols]
tags_set |= set(tags)
return tags_set
def generate_header(self, tags):
self.header = tags
def read_data(self, input_data, mode="fit"):
LOGGER.info("start to read sparse data and change data to instance")
abnormal_detection.empty_table_detection(input_data)
if mode == "fit":
data_instance = self.fit(input_data)
if self.with_label:
self.label_name = "label"
else:
data_instance = self.transform(input_data)
schema = make_schema(self.header, self.sid_name, self.label_name, self.match_id_name)
set_schema(data_instance, schema)
return data_instance
@staticmethod
def change_tag_to_str(value, tags_dict=None, delimitor=",", with_label=False, with_match_id=False,
tag_value_delimitor=":"):
vals = value.split(delimitor, -1)
ret = [''] * len(tags_dict)
offset = 0
if with_label:
offset += 1
if with_match_id:
offset += 1
vals = vals[2:]
for i in range(len(vals)):
tag, value = vals[i].split(tag_value_delimitor, -1)
idx = tags_dict.get(tag, None)
if idx is not None:
ret[idx] = value
return ret
@staticmethod
def change_str_to_tag(value, tags_dict=None, delimitor=",", tag_value_delimitor=":"):
ret = [None] * len(tags_dict)
tags = sorted(list(tags_dict.keys()))
for i in range(len(value)):
tag, val = tags[i], value[i]
ret[i] = tag_value_delimitor.join([tag, val])
return delimitor.join(ret)
def fill_missing_value(self, input_data, tags_dict, mode="fit"):
str_trans_method = functools.partial(self.change_tag_to_str,
tags_dict=tags_dict,
delimitor=self.delimitor,
with_label=self.with_label,
with_match_id=self.with_match_id,
tag_value_delimitor=self.tag_value_delimitor)
input_data = input_data.mapValues(str_trans_method)
schema = make_schema(self.header, self.sid_name, self.label_name, self.match_id_name)
set_schema(input_data, schema)
from federatedml.feature.imputer import Imputer
imputer_processor = Imputer()
if mode == "fit":
data, self.default_value = imputer_processor.fit(input_data,
replace_method=self.missing_fill_method,
replace_value=self.default_value)
LOGGER.debug("self.default_value is {}".format(self.default_value))
else:
data = imputer_processor.transform(input_data,
transform_value=self.default_value)
if self.missing_impute is None:
self.missing_impute = imputer_processor.get_missing_value_list()
LOGGER.debug("self.missing_impute is {}".format(self.missing_impute))
self.missing_impute_rate = imputer_processor.get_impute_rate(mode)
str_trans_tag_method = functools.partial(self.change_str_to_tag,
tags_dict=tags_dict,
delimitor=self.delimitor,
tag_value_delimitor=self.tag_value_delimitor)
data = data.mapValues(str_trans_tag_method)
return data
def fit(self, input_data):
tag_aggregator = functools.partial(SparseTagTransformer.agg_tag,
delimitor=self.delimitor,
with_label=self.with_label,
with_match_id=self.with_match_id,
tag_with_value=self.tag_with_value,
tag_value_delimitor=self.tag_value_delimitor)
tags_set_list = list(input_data.applyPartitions(tag_aggregator).collect())
tags_set = set()
for _, _tags_set in tags_set_list:
tags_set |= _tags_set
tags = list(tags_set)
tags = sorted(tags)
tags_dict = dict(zip(tags, range(len(tags))))
self.generate_header(tags)
if self.tag_with_value and self.missing_fill:
input_data = self.fill_missing_value(input_data, tags_dict, mode="fit")
data_instance = self.gen_data_instance(input_data, tags_dict)
return data_instance
def transform(self, input_data):
tags_dict = dict(zip(self.header, range(len(self.header))))
if self.tag_with_value and self.missing_fill:
input_data = self.fill_missing_value(input_data, tags_dict, mode="transform")
data_instance = self.gen_data_instance(input_data, tags_dict)
return data_instance
def gen_data_instance(self, input_data, tags_dict):
params = [self.delimitor,
self.data_type,
self.tag_with_value,
self.tag_value_delimitor,
self.with_label,
self.with_match_id,
self.label_type,
self.output_format,
tags_dict]
to_instance_with_param = functools.partial(self.to_instance, params)
data_instance = input_data.mapValues(to_instance_with_param)
return data_instance
def get_summary(self):
if not self.missing_fill:
return {}
missing_summary = dict()
missing_summary["missing_value"] = list(self.missing_impute)
missing_summary["missing_impute_value"] = dict(zip(self.header, self.default_value))
missing_summary["missing_impute_rate"] = dict(zip(self.header, self.missing_impute_rate))
summary_buf = {"missing_fill_info": missing_summary}
return summary_buf
@staticmethod
def to_instance(param_list, value):
delimitor = param_list[0]
data_type = param_list[1]
tag_with_value = param_list[2]
tag_value_delimitor = param_list[3]
with_label = param_list[4]
with_match_id = param_list[5]
label_type = param_list[6]
output_format = param_list[7]
tags_dict = param_list[8]
if output_format not in ["dense", "sparse"]:
raise ValueError("output format {} is not define".format(output_format))
cols = value.split(delimitor, -1)
offset = 0
label = None
match_id = None
if with_match_id:
offset += 1
match_id = cols[0]
if with_label:
label = cols[offset]
offset += 1
if label_type == 'int':
label = int(label)
elif label_type in ["float", "float64"]:
label = float(label)
if output_format == "dense":
features = [0 for i in range(len(tags_dict))]
for fea in cols[offset:]:
if tag_with_value:
_tag, _val = fea.split(tag_value_delimitor, -1)
if _tag in tags_dict:
features[tags_dict.get(_tag)] = _val
else:
if fea in tags_dict:
features[tags_dict.get(fea)] = 1
features = np.asarray(features, dtype=data_type)
else:
indices = []
data = []
for fea in cols[offset:]:
if tag_with_value:
_tag, _val = fea.split(tag_value_delimitor, -1)
else:
_tag = fea
_val = 1
if _tag not in tags_dict:
continue
indices.append(tags_dict.get(_tag))
if data_type in ["float", "float64"]:
_val = float(_val)
elif data_type in ["int", "int64", "long"]:
_val = int(_val)
elif data_type == "str":
_val = str(_val)
data.append(_val)
features = SparseVector(indices, data, len(tags_dict))
return Instance(inst_id=match_id,
features=features,
label=label)
def save_model(self):
transform_meta, transform_param = save_data_transform_model(input_format="tag",
delimitor=self.delimitor,
data_type=self.data_type,
tag_with_value=self.tag_with_value,
tag_value_delimitor=self.tag_value_delimitor,
with_label=self.with_label,
label_type=self.label_type,
with_match_id=self.with_match_id,
output_format=self.output_format,
header=self.header,
sid_name=self.sid_name,
label_name=self.label_name,
model_name="Transformer")
missing_imputer_meta, missing_imputer_param = save_missing_imputer_model(self.missing_fill,
self.missing_fill_method,
self.missing_impute,
self.default_value,
self.missing_impute_rate,
self.header,
"Imputer")
transform_meta.imputer_meta.CopyFrom(missing_imputer_meta)
transform_param.imputer_param.CopyFrom(missing_imputer_param)
outlier_meta, outlier_param = save_outlier_model(outlier_replace=False,
model_name="Outlier")
transform_meta.outlier_meta.CopyFrom(outlier_meta)
transform_param.outlier_param.CopyFrom(outlier_param)
return {"DataTransformMeta": transform_meta,
"DataTransformParam": transform_param
}
def load_model(self, model_meta, model_param):
self.delimitor, self.data_type, _0, self.tag_with_value, self.tag_value_delimitor, self.with_label, \
self.label_type, self.output_format, self.header, self.sid_name, self.label_name, self.with_match_id = load_data_transform_model(
"SparseTagTransformer",
model_meta,
model_param)
self.missing_fill, self.missing_fill_method, \
self.missing_impute, self.default_value = load_missing_imputer_model(self.header,
"Imputer",
model_meta.imputer_meta,
model_param.imputer_param)
class DataTransform(ModelBase):
def __init__(self):
super(DataTransform, self).__init__()
self.transformer = None
from federatedml.param.data_transform_param import DataTransformParam
self.model_param = DataTransformParam()
def _init_model(self, model_param):
print("model_param is {}".format(model_param))
if model_param.input_format == "dense":
self.transformer = DenseFeatureTransformer(self.model_param)
elif model_param.input_format == "sparse":
self.transformer = SparseFeatureTransformer(self.model_param)
elif model_param.input_format == "tag":
self.transformer = SparseTagTransformer(self.model_param)
self.model_param = model_param
def load_model(self, model_dict):
input_model_param = None
input_model_meta = None
for _, value in model_dict["model"].items():
for model in value:
if model.endswith("Meta"):
input_model_meta = value[model]
if model.endswith("Param"):
input_model_param = value[model]
if input_model_meta.input_format == "dense":
self.transformer = DenseFeatureTransformer(self.model_param)
elif input_model_meta.input_format == "sparse":
self.transformer = SparseFeatureTransformer(self.model_param)
elif input_model_meta.input_format == "tag":
self.transformer = SparseTagTransformer(self.model_param)
self.transformer.load_model(input_model_meta, input_model_param)
def fit(self, data_inst):
data_inst = self.transformer.read_data(data_inst, "fit")
if isinstance(self.transformer, (DenseFeatureTransformer, SparseTagTransformer)):
summary_buf = self.transformer.get_summary()
if summary_buf:
self.set_summary(summary_buf)
return data_inst
def transform(self, data_inst):
return self.transformer.read_data(data_inst, "transform")
def export_model(self):
model_dict = self.transformer.save_model()
model_dict["DataTransformMeta"].need_run = self.need_run
return model_dict
def make_schema(header=None, sid_name=None, label_name=None, match_id_name=None):
schema = {}
if header:
schema["header"] = header
if sid_name:
schema["sid_name"] = sid_name
if label_name:
schema["label_name"] = label_name
if match_id_name:
schema["match_id_name"] = match_id_name
ModelBase.check_schema_content(schema)
return schema
def set_schema(data_instance, schema):
data_instance.schema = schema
def save_data_transform_model(input_format="dense",
delimitor=",",
data_type="str",
exclusive_data_type=None,
tag_with_value=False,
tag_value_delimitor=":",
with_label=False,
label_name='',
label_type="int",
output_format="dense",
header=None,
sid_name=None,
with_match_id=False,
model_name="DataTransform"):
model_meta = DataTransformMeta()
model_param = DataTransformParam()
model_meta.input_format = input_format
model_meta.delimitor = delimitor
model_meta.data_type = data_type
model_meta.tag_with_value = tag_with_value
model_meta.tag_value_delimitor = tag_value_delimitor
model_meta.with_label = with_label
if with_label:
model_meta.label_name = label_name
model_meta.label_type = label_type
model_meta.output_format = output_format
model_meta.with_match_id = with_match_id
if header is not None:
model_param.header.extend(header)
if sid_name:
model_param.sid_name = sid_name
if label_name:
model_param.label_name = label_name
if exclusive_data_type is not None:
model_meta.exclusive_data_type.update(exclusive_data_type)
return model_meta, model_param
def load_data_transform_model(model_name="DataTransform",
model_meta=None,
model_param=None):
delimitor = model_meta.delimitor
data_type = model_meta.data_type
tag_with_value = model_meta.tag_with_value
tag_value_delimitor = model_meta.tag_value_delimitor
with_label = model_meta.with_label
label_name = model_meta.label_name if with_label else None
label_type = model_meta.label_type if with_label else None
with_match_id = model_meta.with_match_id
output_format = model_meta.output_format
header = list(model_param.header) or None
sid_name = None
if model_param.sid_name:
sid_name = model_param.sid_name
exclusive_data_type = None
if model_meta.exclusive_data_type:
exclusive_data_type = {}
for col_name in model_meta.exclusive_data_type:
exclusive_data_type[col_name] = model_meta.exclusive_data_type.get(col_name)
return delimitor, data_type, exclusive_data_type, tag_with_value, tag_value_delimitor, with_label, \
label_type, output_format, header, sid_name, label_name, with_match_id
def save_missing_imputer_model(missing_fill=False,
missing_replace_method=None,
missing_impute=None,
missing_fill_value=None,
missing_replace_rate=None,
header=None,
model_name="Imputer"):
model_meta = DataTransformImputerMeta()
model_param = DataTransformImputerParam()
model_meta.is_imputer = missing_fill
if missing_fill:
if missing_replace_method:
model_meta.strategy = str(missing_replace_method)
if missing_impute is not None:
model_meta.missing_value.extend(map(str, missing_impute))
if missing_fill_value is not None:
feature_value_dict = dict(zip(header, map(str, missing_fill_value)))
model_param.missing_replace_value.update(feature_value_dict)
if missing_replace_rate is not None:
missing_replace_rate_dict = dict(zip(header, missing_replace_rate))
model_param.missing_value_ratio.update(missing_replace_rate_dict)
return model_meta, model_param
def load_missing_imputer_model(header=None,
model_name="Imputer",
model_meta=None,
model_param=None):
missing_fill = model_meta.is_imputer
missing_replace_method = model_meta.strategy
missing_value = model_meta.missing_value
missing_fill_value = model_param.missing_replace_value
if missing_fill:
if not missing_replace_method:
missing_replace_method = None
if not missing_value:
missing_value = None
else:
missing_value = list(missing_value)
if missing_fill_value:
missing_fill_value = [missing_fill_value.get(head) for head in header]
else:
missing_fill_value = None
else:
missing_replace_method = None
missing_value = None
missing_fill_value = None
return missing_fill, missing_replace_method, missing_value, missing_fill_value
def save_outlier_model(outlier_replace=False,
outlier_replace_method=None,
outlier_impute=None,
outlier_replace_value=None,
outlier_replace_rate=None,
header=None,
model_name="Outlier"):
model_meta = DataTransformOutlierMeta()
model_param = DataTransformOutlierParam()
model_meta.is_outlier = outlier_replace
if outlier_replace:
if outlier_replace_method:
model_meta.strategy = str(outlier_replace_method)
if outlier_impute:
model_meta.outlier_value.extend(map(str, outlier_impute))
if outlier_replace_value:
outlier_value_dict = dict(zip(header, map(str, outlier_replace_value)))
model_param.outlier_replace_value.update(outlier_value_dict)
if outlier_replace_rate:
outlier_value_ratio_dict = dict(zip(header, outlier_replace_rate))
model_param.outlier_value_ratio.update(outlier_value_ratio_dict)
return model_meta, model_param
def load_outlier_model(header=None,
model_name="Outlier",
model_meta=None,
model_param=None):
outlier_replace = model_meta.is_outlier
outlier_replace_method = model_meta.strategy
outlier_value = model_meta.outlier_value
outlier_replace_value = model_param.outlier_replace_value
if outlier_replace:
if not outlier_replace_method:
outlier_replace_method = None
if not outlier_value:
outlier_value = None
else:
outlier_value = list(outlier_value)
if outlier_replace_value:
outlier_replace_value = [outlier_replace_value.get(head) for head in header]
else:
outlier_replace_value = None
else:
outlier_replace_method = None
outlier_value = None
outlier_replace_value = None
return outlier_replace, outlier_replace_method, outlier_value, outlier_replace_value
| 43.308013
| 141
| 0.570823
|
4a13f95263e1c00cea2eae418a8f4b3f3e691536
| 3,216
|
py
|
Python
|
alexandria/api.py
|
HarkonenBade/alexandria
|
2e16dbf2d11c7928d0a661b28bfc2552b68cb3fe
|
[
"MIT"
] | null | null | null |
alexandria/api.py
|
HarkonenBade/alexandria
|
2e16dbf2d11c7928d0a661b28bfc2552b68cb3fe
|
[
"MIT"
] | null | null | null |
alexandria/api.py
|
HarkonenBade/alexandria
|
2e16dbf2d11c7928d0a661b28bfc2552b68cb3fe
|
[
"MIT"
] | null | null | null |
import datetime
import functools
from flask import (abort, g, jsonify, redirect,
request, render_template, url_for)
from . import app, db
def render(template, **kwargs):
if hasattr(g, "user"):
kwargs['user'] = db.obj_to_dict(g.user)
kwargs['urls'] = {'root': url_for('root'),
'add_quote': url_for('add_quote'),
'add_user': url_for('add_user'),
'login': url_for('login')}
return render_template(template, **kwargs)
@app.before_request
def before_request():
g.sesh = db.Session()
@app.teardown_request
def teardown_request(exception):
sesh = getattr(g, 'sesh', None)
if sesh is not None:
sesh.close()
def token_check(admin=False):
def wrapper(func):
@functools.wraps(func)
def wrap(*args, **kwargs):
if 'token' not in request.cookies:
return redirect(url_for('login'))
user = db.check_token(g.sesh,
request.cookies['token'],
admin)
if user is None:
if admin:
if db.check_token(g.sesh,
request.cookies['token'],
False):
abort(401)
else:
return redirect(url_for('login'))
else:
return redirect(url_for('login'))
g.user = user
return func(*args, **kwargs)
return wrap
return wrapper
@app.route("/login")
def login():
return render("login.html")
@app.route("/user", methods=['POST'])
@token_check(admin=True)
def add_user():
if('name' not in request.json or
request.json['name'] == ""):
abort(422)
user = db.User()
user.name = request.json['name']
user.admin = 'admin' in request.json
user.token = db.new_token()
user.created_by = g.user.id
g.sesh.add(user)
g.sesh.commit()
return jsonify(db.obj_to_dict(user)), 201
@app.route("/quote", methods=['POST'])
@token_check()
def add_quote():
if('text' not in request.json or
'person' not in request.json or
request.json['text'] == "" or
request.json['person'] == ""):
abort(422)
quote = db.Quote()
quote.text = request.json['text']
quote.person = request.json['person']
quote.date_added = datetime.datetime.now()
quote.submitter = g.user.id
g.sesh.add(quote)
g.sesh.commit()
return jsonify({'id': quote.id}), 201
@app.route("/", methods=['GET'])
@token_check()
def root():
query = (g.sesh.query(db.Quote, db.User).
join(db.User))
if "user" in request.args:
query = query.filter(db.User.id == int(request.args['user']))
result = query.order_by(db.Quote.id.desc()).all()
quotes = [{'id': q.id,
'text': q.text,
'person': q.person,
'date': q.date_added.isoformat(),
'submitter': {'id': u.id,
'name': u.name}}
for q, u in result]
return render("index.html", quotes=quotes)
| 26.360656
| 69
| 0.52643
|
4a13fa09997acc0602b445a7a17b76d60dc822ca
| 4,831
|
py
|
Python
|
src/sklearn_evaluation/util.py
|
edublancas/sklearn-model-evaluation
|
1f35d5bcc689a5f4d54c14fde60abf09af9fc374
|
[
"MIT"
] | 351
|
2016-01-27T19:15:27.000Z
|
2022-03-09T15:40:56.000Z
|
src/sklearn_evaluation/util.py
|
edublancas/sklearn-model-evaluation
|
1f35d5bcc689a5f4d54c14fde60abf09af9fc374
|
[
"MIT"
] | 37
|
2016-03-16T03:57:59.000Z
|
2021-06-26T14:02:33.000Z
|
src/sklearn_evaluation/util.py
|
edublancas/sklearn-model-evaluation
|
1f35d5bcc689a5f4d54c14fde60abf09af9fc374
|
[
"MIT"
] | 30
|
2016-01-27T19:27:08.000Z
|
2022-03-31T06:09:59.000Z
|
from copy import copy
from inspect import signature, _empty
import re
from collections.abc import Iterable
from collections import defaultdict
from itertools import product
from six import string_types
def isiter(obj):
try:
iter(obj)
except TypeError:
return False
else:
return True
def estimator_type(model):
s = str(type(model))
model_name = re.search(".*'(.+?)'.*", s).group(1).split(".")[-1]
return model_name
def class_name(obj):
class_name = str(type(obj))
class_name = re.search(".*'(.+?)'.*", class_name).group(1)
return class_name
def _can_iterate(obj):
is_string = isinstance(obj, string_types)
is_iterable = isinstance(obj, Iterable)
return is_iterable and not is_string
def is_column_vector(x):
return len(x.shape) == 2 and x.shape[1] == 1
def is_row_vector(x):
return len(x.shape) == 1
def _group_by(data, criteria):
"""
Group objects in data using a function or a key
"""
if isinstance(criteria, str):
criteria_str = criteria
def criteria(x):
return x[criteria_str]
res = defaultdict(list)
for element in data:
key = criteria(element)
res[key].append(element)
return res
def _get_params_value(params):
"""
Given an iterator (k1, k2), returns a function that when called
with an object obj returns a tuple of the form:
((k1, obj.parameters[k1]), (k2, obj.parameters[k2]))
"""
# sort params for consistency
ord_params = sorted(params)
def fn(obj):
keys = []
for p in ord_params:
try:
keys.append((p, obj.parameters[p]))
except KeyError:
raise ValueError('{} is not a valid parameter'.format(p))
return tuple(keys)
return fn
def _sorted_map_iter(d):
ord_keys = sorted(d.keys())
for k in ord_keys:
yield (k, d[k])
def _product(k, v):
"""
Perform the product between two objects
even if they don't support iteration
"""
if not _can_iterate(k):
k = [k]
if not _can_iterate(v):
v = [v]
return list(product(k, v))
def _mapping_to_tuple_pairs(d):
"""
Convert a mapping object (such as a dictionary) to tuple pairs,
using its keys and values to generate the pairs and then generating
all possible combinations between those
e.g. {1: (1,2,3)} -> (((1, 1),), ((1, 2),), ((1, 3),))
"""
# order the keys, this will prevent different implementations of Python,
# return different results from the same dictionary since the order of
# iteration depends on it
t = []
ord_keys = sorted(d.keys())
for k in ord_keys:
t.append(_product(k, d[k]))
return tuple(product(*t))
def _flatten_list(elements):
return [item for sublist in elements for item in sublist]
def truncate_colormap(cmap, minval=0.0, maxval=1.0, n=100):
"""
http://stackoverflow.com/questions/18926031/how-to-extract-a-subset-of-a-colormap-as-a-new-colormap-in-matplotlib # noqa
"""
import matplotlib.colors as colors
import numpy as np
new_cmap = colors.LinearSegmentedColormap.from_list(
'trunc({n},{a:.2f},{b:.2f})'.format(n=cmap.name, a=minval, b=maxval),
cmap(np.linspace(minval, maxval, n)))
return new_cmap
def default_heatmap():
import matplotlib.pyplot as plt
return truncate_colormap(plt.cm.OrRd, 0.1, 0.7)
def map_parameters_in_fn_call(args, kwargs, func):
"""
Based on function signature, parse args to to convert them to key-value
pairs and merge them with kwargs
Any parameter found in args that does not match the function signature
is still passed.
Missing parameters are filled with their default values
"""
sig = signature(func)
# Get missing parameters in kwargs to look for them in args
args_spec = list(sig.parameters)
params_all = set(args_spec)
params_missing = params_all - set(kwargs.keys())
if 'self' in args_spec:
offset = 1
else:
offset = 0
# Get indexes for those args
idxs = [args_spec.index(name) for name in params_missing]
# Parse args
args_parsed = dict()
for idx in idxs:
key = args_spec[idx]
try:
value = args[idx - offset]
except IndexError:
pass
else:
args_parsed[key] = value
parsed = copy(kwargs)
parsed.update(args_parsed)
# fill default values
default = {
k: v.default
for k, v in sig.parameters.items() if v.default != _empty
}
to_add = set(default.keys()) - set(parsed.keys())
default_to_add = {k: v for k, v in default.items() if k in to_add}
parsed.update(default_to_add)
return parsed
| 25.426316
| 124
| 0.628027
|
4a13fac25fdb0e1045ab625574d9b38e9f6e1ad3
| 1,707
|
py
|
Python
|
tests/test_keys.py
|
rcarmo/miniredis
|
39f49249cddd11a4b333932fdb8157273a8a1dc4
|
[
"MIT"
] | 1
|
2020-11-15T13:35:55.000Z
|
2020-11-15T13:35:55.000Z
|
tests/test_keys.py
|
rcarmo/miniredis
|
39f49249cddd11a4b333932fdb8157273a8a1dc4
|
[
"MIT"
] | null | null | null |
tests/test_keys.py
|
rcarmo/miniredis
|
39f49249cddd11a4b333932fdb8157273a8a1dc4
|
[
"MIT"
] | null | null | null |
# vim :set ts=4 sw=4 sts=4 et :
import os, sys, signal, time
from nose.tools import ok_, eq_, istest
sys.path.append('..')
import miniredis.server
from miniredis.client import RedisClient
pid = None
r = None
def setup_module(module):
global pid, r
pid = miniredis.server.fork()
print("Launched server with pid %d." % pid)
time.sleep(1)
r = RedisClient()
def teardown_module(module):
global pid
os.kill(pid, signal.SIGKILL)
print("Killed server.")
def test_put():
eq_(r.set('test:key', 'value'),'OK')
def test_get():
eq_(r.get('test:key'),'value')
def test_del():
# single key
eq_(r.delete('test:key'), 1)
eq_(r.get('test:key'),None)
# multiple keys
r.set('test:key1', 'value')
r.set('test:key2', 'value')
eq_(r.delete('test:key1', 'test:key2'), 2)
def test_dump():
eq_(r.set('test:key','value'), 'OK')
eq_(r.dump('test:key'),'value')
def test_exists():
eq_(r.exists('test:key'), 1)
eq_(r.exists('test:notthere'), 0)
def test_expire():
# missing key
eq_(r.expire('test:notthere', 2), 0)
# valid setting
eq_(r.expire('test:key', 2), 1)
eq_(r.ttl('test:key'), 2)
# reset ttl
eq_(r.set('test:key','value'), 'OK')
eq_(r.ttl('test:key'), -1)
def test_expireat():
# missing key
at = int(time.time() + 2)
eq_(r.expireat('test:notthere', at), 0)
# valid setting
at = int(time.time() + 2)
eq_(r.expireat('test:key', at), 1)
eq_(r.ttl('test:key'), 2)
# reset ttl
eq_(r.set('test:key','value'), 'OK')
eq_(r.ttl('test:key'), -1)
def test_keys():
# place a test key
eq_(r.set('test:key','value'), 'OK')
eq_(r.keys('*:key'), ['test:key'])
| 22.460526
| 47
| 0.588166
|
4a13fc45f84ebec2881ee5fc2831a856bfaef01b
| 3,994
|
py
|
Python
|
everything_at_once/model/utils/layers.py
|
ninatu/everything_at_once
|
b4cd3a70076ea3ea2b40832aa3e2afab50495c47
|
[
"BSD-3-Clause"
] | null | null | null |
everything_at_once/model/utils/layers.py
|
ninatu/everything_at_once
|
b4cd3a70076ea3ea2b40832aa3e2afab50495c47
|
[
"BSD-3-Clause"
] | null | null | null |
everything_at_once/model/utils/layers.py
|
ninatu/everything_at_once
|
b4cd3a70076ea3ea2b40832aa3e2afab50495c47
|
[
"BSD-3-Clause"
] | null | null | null |
import torch
import torch as th
from torch import nn as nn
from torch.nn import functional as F
from timm.models.vision_transformer import DropPath, Mlp, Attention
class GatedEmbeddingUnit(nn.Module):
def __init__(self, input_dimension, output_dimension):
super().__init__()
self.fc = nn.Linear(input_dimension, output_dimension)
self.cg = ContextGating(output_dimension)
def forward(self, x):
x = self.fc(x)
x = self.cg(x)
return x
class FusedGatedUnit(nn.Module):
def __init__(self, input_dimension, output_dimension):
super(FusedGatedUnit, self).__init__()
self.fc_audio = nn.Linear(input_dimension, output_dimension)
self.fc_text = nn.Linear(input_dimension, output_dimension)
self.cg = ContextGating(output_dimension)
def forward(self, audio, text):
audio = self.fc_audio(audio)
text = self.fc_text(text)
x = audio + text
x = self.cg(x)
return x
class ContextGating(nn.Module):
def __init__(self, dimension):
super(ContextGating, self).__init__()
self.fc = nn.Linear(dimension, dimension)
def forward(self, x):
x1 = self.fc(x)
x = th.cat((x, x1), 1)
return F.glu(x, 1)
class SentenceMaxpool(nn.Module):
def __init__(self, word_dimension, output_dim):
super(SentenceMaxpool, self).__init__()
self.fc = nn.Linear(word_dimension, output_dim)
def forward(self, x):
x = self.fc(x)
x = F.relu(x)
return torch.max(x, dim=1)[0]
class FusionBlock(nn.Module):
"""
Adopted from https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/vision_transformer.py
Copyright 2020, Ross Wightman
"""
def __init__(
self, dim, num_heads, mlp_ratio=4., qkv_bias=False, drop=0., attn_drop=0., init_values=None,
drop_path=0., act_layer=nn.GELU, norm_layer=nn.LayerNorm):
super().__init__()
self.norm1 = norm_layer(dim)
self.attn = FusionAttention(dim, num_heads=num_heads, qkv_bias=qkv_bias, attn_drop=attn_drop, proj_drop=drop)
# NOTE: drop path for stochastic depth, we shall see if this is better than dropout here
self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
self.norm2 = norm_layer(dim)
mlp_hidden_dim = int(dim * mlp_ratio)
self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop)
def forward(self, x, attention_mask=None):
x = x + self.drop_path(self.attn(self.norm1(x), attention_mask))
x = x + self.drop_path(self.mlp(self.norm2(x)))
return x
class FusionAttention(Attention):
"""
Adopted from https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/vision_transformer.py
Copyright 2020, Ross Wightman
"""
def forward(self, x, attention_mask=None):
B, N, C = x.shape
qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4)
q, k, v = qkv[0], qkv[1], qkv[2]
attn = (q @ k.transpose(-2, -1)) * self.scale
if attention_mask is not None:
zero_attention_mask = (attention_mask == 0).view(B, 1, 1, N).expand_as(attn) # (bs, n_heads, q_length, k_length)
attn.masked_fill_(zero_attention_mask, -float("inf")) # (bs, n_heads, q_length, k_length)
attn = attn.softmax(dim=-1)
attn = self.attn_drop(attn)
x = (attn @ v).transpose(1, 2).reshape(B, N, C)
x = self.proj(x)
x = self.proj_drop(x)
return x
def get_projection(input_dim, output_dim, projection_type):
if projection_type == 'minimal':
return nn.Linear(input_dim, output_dim)
if projection_type == 'gated':
return GatedEmbeddingUnit(input_dim, output_dim)
elif projection_type == '':
return nn.Identity()
else:
raise NotImplementedError
| 35.035088
| 125
| 0.646219
|
4a13fd68c16215e96b21d46451d11dbc3b68ac5e
| 7,547
|
py
|
Python
|
ogb_examples/nodeproppred/unimp/main_arxiv.py
|
zbmain/PGL
|
dbded6a1543248b0a33c05eb476ddc513401a774
|
[
"Apache-2.0"
] | 1,389
|
2019-06-11T03:29:20.000Z
|
2022-03-29T18:25:43.000Z
|
ogb_examples/nodeproppred/unimp/main_arxiv.py
|
zbmain/PGL
|
dbded6a1543248b0a33c05eb476ddc513401a774
|
[
"Apache-2.0"
] | 232
|
2019-06-21T06:52:10.000Z
|
2022-03-29T08:20:31.000Z
|
ogb_examples/nodeproppred/unimp/main_arxiv.py
|
zbmain/PGL
|
dbded6a1543248b0a33c05eb476ddc513401a774
|
[
"Apache-2.0"
] | 229
|
2019-06-20T12:13:58.000Z
|
2022-03-25T12:04:48.000Z
|
import math
import torch
import paddle
import pgl
import numpy as np
import paddle.fluid as F
import paddle.fluid.layers as L
from pgl.contrib.ogb.nodeproppred.dataset_pgl import PglNodePropPredDataset
from ogb.nodeproppred import Evaluator
from utils import to_undirected, add_self_loop, linear_warmup_decay
from model import Arxiv_baseline_model, Arxiv_label_embedding_model
import argparse
from tqdm import tqdm
evaluator = Evaluator(name='ogbn-arxiv')
# place=F.CUDAPlace(6)
def get_config():
parser = argparse.ArgumentParser()
## model_arg
model_group=parser.add_argument_group('model_base_arg')
model_group.add_argument('--num_layers', default=3, type=int)
model_group.add_argument('--hidden_size', default=128, type=int)
model_group.add_argument('--num_heads', default=2, type=int)
model_group.add_argument('--dropout', default=0.3, type=float)
model_group.add_argument('--attn_dropout', default=0, type=float)
## label_embed_arg
embed_group=parser.add_argument_group('embed_arg')
embed_group.add_argument('--use_label_e', action='store_true')
embed_group.add_argument('--label_rate', default=0.625, type=float)
## train_arg
train_group=parser.add_argument_group('train_arg')
train_group.add_argument('--runs', default=10, type=int )
train_group.add_argument('--epochs', default=2000, type=int )
train_group.add_argument('--lr', default=0.001, type=float)
train_group.add_argument('--place', default=-1, type=int)
train_group.add_argument('--log_file', default='result_arxiv.txt', type=str)
return parser.parse_args()
# def optimizer_func(lr=0.01):
# return F.optimizer.AdamOptimizer(learning_rate=lr, regularization=F.regularizer.L2Decay(
# regularization_coeff=0.001))
def optimizer_func(lr=0.01):
return F.optimizer.AdamOptimizer(learning_rate=lr, regularization=F.regularizer.L2Decay(
regularization_coeff=0.0005))
def eval_test(parser, program, model, test_exe, graph, y_true, split_idx):
feed_dict=model.gw.to_feed(graph)
# feed_dict={}
if parser.use_label_e:
feed_dict['label']=y_true
feed_dict['label_idx']=split_idx['train']
avg_cost_np = test_exe.run(
program=program,
feed=feed_dict,
fetch_list=[model.out_feat])
y_pred=avg_cost_np[0].argmax(axis=-1)
y_pred=np.expand_dims(y_pred, 1)
train_acc = evaluator.eval({
'y_true': y_true[split_idx['train']],
'y_pred': y_pred[split_idx['train']],
})['acc']
val_acc = evaluator.eval({
'y_true': y_true[split_idx['valid']],
'y_pred': y_pred[split_idx['valid']],
})['acc']
test_acc = evaluator.eval({
'y_true': y_true[split_idx['test']],
'y_pred': y_pred[split_idx['test']],
})['acc']
return train_acc, val_acc, test_acc
def train_loop(parser, start_program, main_program, test_program,
model, graph, label, split_idx, exe, run_id, wf=None):
#build up training program
exe.run(start_program)
max_acc=0 # best test_acc
max_step=0 # step for best test_acc
max_val_acc=0 # best val_acc
max_cor_acc=0 # test_acc for best val_acc
max_cor_step=0 # step for best val_acc
#training loop
for epoch_id in tqdm(range(parser.epochs)):
#start training
if parser.use_label_e:
feed_dict=model.gw.to_feed(graph)
# feed_dict={}
train_idx_temp = split_idx['train']
np.random.shuffle(train_idx_temp)
label_idx=train_idx_temp[ :int(parser.label_rate*len(train_idx_temp))]
unlabel_idx=train_idx_temp[int(parser.label_rate*len(train_idx_temp)): ]
feed_dict['label']=label
feed_dict['label_idx']= label_idx
feed_dict['train_idx']= unlabel_idx
else:
feed_dict=model.gw.to_feed(graph)
# feed_dict={}
feed_dict['label']=label
feed_dict['train_idx']= split_idx['train']
loss = exe.run(main_program,
feed=feed_dict,
fetch_list=[model.avg_cost])
# print(loss[1][0])
loss = loss[0]
#eval result
result = eval_test(parser, test_program, model, exe, graph, label, split_idx)
train_acc, valid_acc, test_acc = result
max_acc = max(test_acc, max_acc)
if max_acc == test_acc:
max_step=epoch_id
max_val_acc=max(valid_acc, max_val_acc)
if max_val_acc==valid_acc:
max_cor_acc=test_acc
max_cor_step=epoch_id
max_acc=max(result[2], max_acc)
if max_acc==result[2]:
max_step=epoch_id
result_t=(f'Run: {run_id:02d}, '
f'Epoch: {epoch_id:02d}, '
f'Loss: {loss[0]:.4f}, '
f'Train: {100 * train_acc:.2f}%, '
f'Valid: {100 * valid_acc:.2f}%, '
f'Test: {100 * test_acc:.2f}% \n'
f'max_Test: {100 * max_acc:.2f}%, '
f'max_step: {max_step}\n'
f'max_val: {100 * max_val_acc:.2f}%, '
f'max_val_Test: {100 * max_cor_acc:.2f}%, '
f'max_val_step: {max_cor_step}\n'
)
if (epoch_id+1)%100==0:
print(result_t)
wf.write(result_t)
wf.write('\n')
wf.flush()
return max_cor_acc
if __name__ == '__main__':
parser = get_config()
print('===========args==============')
print(parser)
print('=============================')
startup_prog = F.default_startup_program()
train_prog = F.default_main_program()
place=F.CPUPlace() if parser.place <0 else F.CUDAPlace(parser.place)
dataset = PglNodePropPredDataset(name="ogbn-arxiv")
split_idx=dataset.get_idx_split()
graph, label = dataset[0]
print(label.shape)
graph=to_undirected(graph)
graph=add_self_loop(graph)
with F.program_guard(train_prog, startup_prog):
with F.unique_name.guard():
gw = pgl.graph_wrapper.GraphWrapper(
name="arxiv", node_feat=graph.node_feat_info(), place=place)
# gw = pgl.graph_wrapper.StaticGraphWrapper(name="graph",
# graph=graph,
# place=place)
# gw.initialize(place)
#gw, hidden_size, num_heads, dropout, num_layers)
if parser.use_label_e:
model=Arxiv_label_embedding_model(gw, parser.hidden_size, parser.num_heads,
parser.dropout, parser.num_layers)
else:
model=Arxiv_baseline_model(gw, parser.hidden_size, parser.num_heads,
parser.dropout, parser.num_layers)
test_prog=train_prog.clone(for_test=True)
model.train_program()
adam_optimizer = optimizer_func(parser.lr)#optimizer
adam_optimizer.minimize(model.avg_cost)
exe = F.Executor(place)
wf = open(parser.log_file, 'w', encoding='utf-8')
total_test_acc=0.0
for run_i in range(parser.runs):
total_test_acc+=train_loop(parser, startup_prog, train_prog, test_prog, model,
graph, label, split_idx, exe, run_i, wf)
wf.write(f'average: {100 * (total_test_acc/parser.runs):.2f}%')
wf.close()
| 36.635922
| 94
| 0.610706
|
4a140054186605cc7c0d95d5c1eb54658b66007c
| 4,970
|
py
|
Python
|
out_feature.py
|
WilliamGong/traffic-analyze
|
687481f1f53d334a534f8cbdcc66bc2420d18f6c
|
[
"MIT"
] | null | null | null |
out_feature.py
|
WilliamGong/traffic-analyze
|
687481f1f53d334a534f8cbdcc66bc2420d18f6c
|
[
"MIT"
] | null | null | null |
out_feature.py
|
WilliamGong/traffic-analyze
|
687481f1f53d334a534f8cbdcc66bc2420d18f6c
|
[
"MIT"
] | null | null | null |
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
# solve Chinese font
plt.rcParams['font.sans-serif'] = ['KaiTi']
plt.rcParams['font.serif'] = ['KaiTi']
# plt.rcParams['axes.unicode_minus'] = False # 解决保存图像是负号'-'显示为方块的问题,或者转换负号为字符串
class OutFeature:
numOutDaily = {
'每天6次以上': 0,
'每天4-6次': 0,
'每天2-3次': 0,
'每天一次': 0, # "*" just looks the key not like an int var much.
'几乎不出门': 0
}
numOutAvg = {
'10km+': 0,
'5-10km': 0,
'3-5km': 0,
'1-3km': 0,
'1km-': 0
}
numFirstOrderAim = {
'通勤': 0,
'生活': 0,
'娱乐': 0,
'几乎不出行': 0
}
numFirstOrderWay = {
'公共交通': 0,
'私家车': 0,
'自行车电动车': 0,
'步行': 0,
'其他': 0
}
numIsPublic = {
'每天使用': 0,
'经常使用': 0,
'偶尔使用': 0,
'几乎不使用': 0,
'不使用': 0
}
# init functions
def __init__(self, df):
# count of out daily
self.outDaily = df['num_out_daily']
# average distance daily
self.outAvg = df['dis_out_avg']
# order of out aim
self.orderAimDaily = df['order_daily']
self.orderAimLife = df['order_life']
self.orderAimPlay = df['order_play']
self.orderAimNoOut = df['order_no_out']
# order of out methods
self.orderWayPublic = df['order_public']
self.orderWayPrivate = df['order_private']
self.orderWayElec = df['order_elec_bike']
self.orderWayFoot = df['order_foot']
self.orderWayOthers = df['order_others']
# use public transportation
self.isPublic = df['is_public']
self.putNumOutDaily()
self.putNumOutAvg()
self.putNumFirstOrderAim()
self.putNumFirstOrderWay()
self.putNumIsPublic()
def putNumOutDaily(self):
for i in self.outDaily:
if i == 1:
self.numOutDaily['每天6次以上'] += 1
elif i == 2:
self.numOutDaily['每天4-6次'] += 1
elif i == 3:
self.numOutDaily['每天2-3次'] += 1
elif i == 4:
self.numOutDaily['每天一次'] += 1
elif i == 5:
self.numOutDaily['几乎不出门'] += 1
def putNumOutAvg(self):
for i in self.outAvg:
if i == 1:
self.numOutAvg['10km+'] += 1
elif i == 2:
self.numOutAvg['5-10km'] += 1
elif i == 3:
self.numOutAvg['3-5km'] += 1
elif i == 4:
self.numOutAvg['1-3km'] += 1
elif i == 5:
self.numOutAvg['1km-'] += 1
def putNumFirstOrderAim(self):
# daily
for i in self.orderAimDaily:
if i == 1:
self.numFirstOrderAim['通勤'] += 1
for i in self.orderAimLife:
if i == 1:
self.numFirstOrderAim['生活'] += 1
for i in self.orderAimPlay:
if i == 1:
self.numFirstOrderAim['娱乐'] += 1
for i in self.orderAimNoOut:
if i == 1:
self.numFirstOrderAim['几乎不出行'] += 1
def putNumFirstOrderWay(self):
for i in self.orderWayPublic:
if i == 1:
self.numFirstOrderWay['公共交通'] += 1
for i in self.orderWayPrivate:
if i == 1:
self.numFirstOrderWay['私家车'] += 1
for i in self.orderWayElec:
if i == 1:
self.numFirstOrderWay['自行车电动车'] += 1
for i in self.orderWayFoot:
if i == 1:
self.numFirstOrderWay['步行'] += 1
for i in self.orderWayOthers:
if i == 1:
self.numFirstOrderWay['其他'] += 1
def putNumIsPublic(self):
for i in self.isPublic:
if i == 1:
self.numIsPublic['每天使用'] += 1
elif i == 2:
self.numIsPublic['经常使用'] += 1
elif i == 3:
self.numIsPublic['偶尔使用'] += 1
elif i == 4:
self.numIsPublic['几乎不使用'] += 1
elif i == 5:
self.numIsPublic['不使用'] += 1
# print functions
# draw functions
def drawNumOutDaily(self):
sNumOutDaily = pd.Series(self.numOutDaily, name='每日出行次数')
sNumOutDaily.plot.bar()
plt.show()
def drawNumOutAvg(self):
sNumOutAvg = pd.Series(self.numOutAvg, name='每日平均出行距离')
sNumOutAvg.plot.bar()
plt.show()
def drawNumFirstOrderAim(self):
sNumFirstOrder = pd.Series(self.numFirstOrderAim)
sNumFirstOrder.plot.bar()
plt.show()
def drawNumFirstOrderWay(self):
sNumFirstOrder = pd.Series(self.numFirstOrderWay)
sNumFirstOrder.plot.bar()
plt.show()
def drawNumIsPublic(self):
sNumIsPublic = pd.Series(self.numIsPublic, name='使用公共交通的频率')
sNumIsPublic.plot.pie(autopct='%.2f')
plt.show()
| 28.238636
| 78
| 0.502414
|
4a1400883fa783f1b08216737c25aef2ecd73c10
| 2,811
|
py
|
Python
|
Assignment_3/2016csb1032.py
|
atlkdr/Social_Networks
|
54b7c3face9f2d54248dc39d30bfd15ae96eb257
|
[
"MIT"
] | null | null | null |
Assignment_3/2016csb1032.py
|
atlkdr/Social_Networks
|
54b7c3face9f2d54248dc39d30bfd15ae96eb257
|
[
"MIT"
] | null | null | null |
Assignment_3/2016csb1032.py
|
atlkdr/Social_Networks
|
54b7c3face9f2d54248dc39d30bfd15ae96eb257
|
[
"MIT"
] | null | null | null |
import matplotlib.pyplot as plt
import networkx as nx
import random as rd
def random_path(G,cwl,show_teleport): # CRAWLS THE GRAPH BY TAKING RANDOM PATHS GENERATED FROM RANDOM NODES WITH PROBABILITY OF 0.2
dictionary={} # Containging key=node , value=times visited while crawling in total
temp=list(G.nodes())
for i in temp:
dictionary[i]=0 # Setting initial values to 0
for i in range(0,cwl): # Max Random Nodes that can be chosen to Crawl the Graph
rand_node=rd.choice(list(G.node()))
dictionary[rand_node]+=1
crawl_probability=rd.uniform(0,1) # A number uniformly chosen between (0,0.2) from (0,1) has probability = 0.2
while(crawl_probability>0.2): # Crawls with probability 0.8
if show_teleport:
print "\n","Crawling_from_selected_node_with_P:",crawl_probability,"-----------------" # Probability of CRAWLING BECOMES 0.8
rand_node=rd.choice(list(G.neighbors(rand_node)))
dictionary[rand_node]+=1
# Exit while loop to Choose a new node with probability 0.2 or continue crawling with probability 0.8
crawl_probability=rd.uniform(0,1)
if show_teleport:
print "\n","-------------Teleporting_to_random_node_with_P:",crawl_probability # Probability of TELEPORTING BECOMES 0.2
max_til=0 # Higest Page Rank Calculated
max_node=-1; # Higest Page Rank Node
total_visit=0
for i in dictionary:
total_visit+=dictionary[i]
print "\nCALCULATED_NODE_VISIT_RATIO_THROUGH_RANDOM_WALK_WITH_TELEPORT_PROB_OF_0.2:\n"
rank=1
for i in sorted(dictionary.iteritems(),key=lambda (k,v):(v,k),reverse=True): # sorted(mydict.iteritems(), key=lambda (k,v): (v,k)):
if i[1]>0:
print "NODE:",i[0]," CAL_RANK:",rank," VISITED",i[1]," PRECENTAGE-OF-TOTAL-VISITS:",float(i[1])/total_visit,"%";
rank+=1
if i[1]>max_til:
max_til=i[1]
max_node=i[0]
print "\n","HIGHEST_VISITED_NODE:",max_node," TIMES_VISITED:",max_til, " PERCENTAGE_OF_VISIT:",float(max_til)/total_visit,"%";
def Pagerank(G):
pr=nx.pagerank(G,alpha=0.9)
print "\n","RESULTS_FROM_BUILT IN PAGE_RANK_FUNCTION","\n"
rank=1
for i in sorted(pr.items(),key=lambda x:x[1],reverse=True):
print "NODE:",i[0]," ACTUAL_RANK:",rank," PAGERANK:",i[1]
rank+=1
def Graph_maker(filename,cwl):
G=nx.read_edgelist(filename)
random_path(G,cwl,show_teleport=False)
Pagerank(G)
f=input("Enter_Filename[1] OR Default[0]: ")
if f==0:
cwl=input("ENTER_WALK_LIMIT(different_walks)[1] OR DEFAULT(50000)[0]: ")
if cwl==1:
cwl=input("ENTER_WALK_LIMIT: ")
Graph_maker('stanford_web_graph.txt',cwl)
else:
Graph_maker('stanford_web_graph.txt',50000)
else:
name=input("Enter_Filename(with-in ' '): ")
cwl=input("ENTER_WALK_LIMIT(different_walks)[1] OR DEFAULT(50000)[0]: ")
if cwl==1:
cwl=input("ENTER_WALK_LIMIT: ")
Graph_maker(name,cwl)
else:
Graph_maker(name,50000)
| 34.280488
| 132
| 0.712914
|
4a1401af54706e4e30252e371cf5b39414a4be25
| 2,118
|
py
|
Python
|
python_client/comm.py
|
slaclab/pabv_control
|
329193bfe219d3472d9d9ab9e9b28a18ff341c2c
|
[
"BSD-3-Clause"
] | 2
|
2020-04-17T22:25:47.000Z
|
2021-06-05T07:44:26.000Z
|
python_client/comm.py
|
slaclab/pabv_control
|
329193bfe219d3472d9d9ab9e9b28a18ff341c2c
|
[
"BSD-3-Clause"
] | 85
|
2020-04-24T14:55:52.000Z
|
2020-09-13T16:36:21.000Z
|
python_client/comm.py
|
slaclab/pabv_control
|
329193bfe219d3472d9d9ab9e9b28a18ff341c2c
|
[
"BSD-3-Clause"
] | 6
|
2020-04-24T05:21:41.000Z
|
2020-12-12T23:25:52.000Z
|
import serial
import serial.tools.list_ports
import io
import message
import traceback
class Comm:
def __init__(self):
self._ser=None
self.port=None
self.id=None
self.connects=0
def connect(self):
self.connects=self.connects+1
ports = list(serial.tools.list_ports.comports())
for port in ports:
port_no=port.device
description=port.description
vid=port.vid
pid=port.pid
if(vid==0x2341): continue #skip nano USN
if (
'USB-Serial' in description or
'USB-to-Serial' in description or
'USB Serial' in description
):
try:
ser=serial.Serial(port=port_no, baudrate=57600, timeout=1)
except:
return
for i in range(1000):
m=message.Message()
self._ser=ser
line=self.readPacket()
self._ser=None
if line is None: continue
try:
m.decode(line)
except: continue
if(m.status!=m.ERR_OK): continue
if(m.id==m.CPU_ID and m.nInt==4):
self._ser=ser
self.port=port_no
self.id=m.intData
break
if self._ser:
return
def readPacket(self):
if(self._ser is None): self.connect()
if(self._ser is None): return
l=''
while(True):
try:
c = self._ser.read().decode('UTF-8')
except:
self._ser=None
return None
if(c!='-'):
l=l+c
else:
return l
def write(self,data):
if self._ser is None: self.connect()
else:
try:
self._ser.write(data)
except:
self._ser=None
| 29.013699
| 78
| 0.435788
|
4a1402fcbf066e4fdfa139e3f69b3d7c5fb2d9d9
| 11,698
|
py
|
Python
|
src/olympia/amo/templatetags/jinja_helpers.py
|
aki21j/addons-server
|
fbabd4f2933de12507f1df9c9b3f5dd4183c3ae6
|
[
"BSD-3-Clause"
] | null | null | null |
src/olympia/amo/templatetags/jinja_helpers.py
|
aki21j/addons-server
|
fbabd4f2933de12507f1df9c9b3f5dd4183c3ae6
|
[
"BSD-3-Clause"
] | null | null | null |
src/olympia/amo/templatetags/jinja_helpers.py
|
aki21j/addons-server
|
fbabd4f2933de12507f1df9c9b3f5dd4183c3ae6
|
[
"BSD-3-Clause"
] | null | null | null |
import json as jsonlib
import os
import random
from urllib.parse import urljoin
from django.conf import settings
from django.core.exceptions import ObjectDoesNotExist
from django.forms import CheckboxInput
from django.template import defaultfilters, Library, loader
from django.templatetags.static import static
from django.urls import reverse
from django.utils.encoding import smart_str
from django.utils.html import (
format_html as django_format_html,
strip_spaces_between_tags,
)
from django.utils.safestring import mark_safe
from django.utils.translation import get_language, to_locale, trim_whitespace, gettext
import jinja2
import waffle
from jinja2.ext import Extension
from babel.support import Format
from django_jinja import library
from rest_framework.reverse import reverse as drf_reverse
from rest_framework.settings import api_settings
from olympia import amo
from olympia.amo import urlresolvers, utils
from olympia.amo.reverse import get_url_prefix
from olympia.lib.jingo_minify_helpers import _build_html, get_css_urls, get_js_urls
register = Library()
# Registering some utils as filters:
library.filter(utils.epoch)
library.filter(utils.isotime)
library.global_function(dict)
library.global_function(utils.randslice)
library.global_function(static)
@library.filter
def urlparams(*args, **kwargs):
return jinja2.Markup(utils.urlparams(*args, **kwargs))
@library.global_function
def switch_is_active(switch_name):
return waffle.switch_is_active(switch_name)
@library.filter
def link(item):
html = """<a href="%s">%s</a>""" % (item.get_url_path(), jinja2.escape(item.name))
return jinja2.Markup(html)
@library.filter
def xssafe(value):
"""
Like |safe but for strings with interpolation.
By using |xssafe you assert that you have written tests proving an
XSS can't happen here.
"""
return jinja2.Markup(value)
@library.global_function
def locale_url(url):
"""Take a URL and give it the locale prefix."""
prefixer = get_url_prefix()
script = prefixer.request.META['SCRIPT_NAME']
parts = [script, prefixer.locale, url.lstrip('/')]
return '/'.join(parts)
@library.global_function
def url(viewname, *args, **kwargs):
"""Helper for Django's ``reverse`` in templates."""
add_prefix = kwargs.pop('add_prefix', True)
host = kwargs.pop('host', '')
url = '%s%s' % (
host,
reverse(viewname, args=args, kwargs=kwargs, add_prefix=add_prefix),
)
return url
@library.global_function
@jinja2.pass_context
def drf_url(context, viewname, *args, **kwargs):
"""Helper for DjangoRestFramework's ``reverse`` in templates."""
request = context.get('request')
if request:
if not hasattr(request, 'versioning_scheme'):
request.versioning_scheme = api_settings.DEFAULT_VERSIONING_CLASS()
request.version = request.versioning_scheme.determine_version(
request, *args, **kwargs
)
return drf_reverse(viewname, request=request, args=args, kwargs=kwargs)
@library.global_function
def services_url(viewname, *args, **kwargs):
"""Helper for ``url`` with host=SERVICES_URL."""
kwargs.update({'host': settings.SERVICES_URL})
return url(viewname, *args, **kwargs)
@library.filter
def paginator(pager):
return PaginationRenderer(pager).render()
@register.filter(needs_autoescape=True)
def dj_paginator(pager, autoescape=True):
return mark_safe(PaginationRenderer(pager).render())
@library.filter
def impala_paginator(pager):
t = loader.get_template('amo/impala/paginator.html')
return jinja2.Markup(t.render({'pager': pager}))
class PaginationRenderer(object):
def __init__(self, pager):
self.pager = pager
self.max = 10
self.span = (self.max - 1) // 2
self.page = pager.number
self.num_pages = pager.paginator.num_pages
self.count = pager.paginator.count
pager.page_range = self.range()
pager.dotted_upper = self.num_pages not in pager.page_range
pager.dotted_lower = 1 not in pager.page_range
def range(self):
"""Return a list of page numbers to show in the paginator."""
page, total, span = self.page, self.num_pages, self.span
if total < self.max:
lower, upper = 0, total
elif page < span + 1:
lower, upper = 0, span * 2
elif page > total - span:
lower, upper = total - span * 2, total
else:
lower, upper = page - span, page + span - 1
return range(max(lower + 1, 1), min(total, upper) + 1)
def render(self):
c = {'pager': self.pager, 'num_pages': self.num_pages, 'count': self.count}
t = loader.get_template('amo/paginator.html').render(c)
return jinja2.Markup(t)
def _get_format():
lang = get_language()
return Format(utils.get_locale_from_lang(lang))
@library.filter
def numberfmt(num, format=None):
return _get_format().decimal(num, format)
@library.global_function
@jinja2.pass_context
def page_title(context, title):
title = smart_str(title)
base_title = gettext('Add-ons for {0}').format(amo.FIREFOX.pretty)
# The following line doesn't use string formatting because we want to
# preserve the type of `title` in case it's a jinja2 `Markup` (safe,
# escaped) object.
return django_format_html('{} :: {}', title, base_title)
@library.filter
def json(s):
return jsonlib.dumps(s)
@library.filter
def absolutify(url, site=None):
"""Take an URL and prepend the EXTERNAL_SITE_URL."""
if url and url.startswith(('http://', 'https://')):
return url
return urljoin(site or settings.EXTERNAL_SITE_URL, url)
@library.filter
def strip_controls(s):
"""
Strips control characters from a string.
"""
# Translation table of control characters.
control_trans = dict((n, None) for n in range(32) if n not in [10, 13])
rv = str(s).translate(control_trans)
return jinja2.Markup(rv) if isinstance(s, jinja2.Markup) else rv
@library.filter
def external_url(url):
"""Bounce a URL off outgoing.prod.mozaws.net."""
return urlresolvers.get_outgoing_url(str(url))
@library.filter
def shuffle(sequence):
"""Shuffle a sequence."""
random.shuffle(sequence)
return sequence
@library.global_function
def field(field, label=None, **attrs):
if label is not None:
field.label = label
# HTML from Django is already escaped.
return jinja2.Markup(
'%s<p>%s%s</p>'
% (field.errors, field.label_tag(), field.as_widget(attrs=attrs))
)
@library.filter
def timesince(time):
if not time:
return ''
ago = defaultfilters.timesince(time)
# L10n: relative time in the past, like '4 days ago'
return gettext('{0} ago').format(ago)
@library.filter
def timeuntil(time):
return defaultfilters.timeuntil(time)
@library.global_function
@library.render_with('amo/recaptcha.html')
@jinja2.pass_context
def recaptcha(context, form):
d = dict(context.items())
d.update(form=form)
return d
@library.filter
def is_choice_field(value):
try:
return isinstance(value.field.widget, CheckboxInput)
except AttributeError:
pass
@library.global_function
@jinja2.evalcontextfunction
def attrs(ctx, *args, **kw):
return jinja2.filters.do_xmlattr(ctx, dict(*args, **kw))
@library.global_function
def loc(s):
"""A noop function for strings that are not ready to be localized."""
return trim_whitespace(s)
@library.global_function
@jinja2.pass_context
def remora_url(context, url, lang=None, app=None, prefix=''):
"""Wrapper for urlresolvers.remora_url"""
if lang is None:
_lang = context['LANG']
if _lang:
lang = to_locale(_lang).replace('_', '-')
if app is None:
try:
app = context['APP'].short
except (AttributeError, KeyError):
pass
return urlresolvers.remora_url(url=url, lang=lang, app=app, prefix=prefix)
@library.global_function
@jinja2.pass_context
def hasOneToOne(context, obj, attr):
try:
getattr(obj, attr)
return True
except ObjectDoesNotExist:
return False
@library.global_function
def no_results_amo():
# This prints a "No results found" message. That's all. Carry on.
t = loader.get_template('amo/no_results.html').render()
return jinja2.Markup(t)
# A (temporary?) copy of this is in services/utils.py. See bug 1055654.
def user_media_path(what):
"""Make it possible to override storage paths in settings.
By default, all storage paths are in the MEDIA_ROOT.
This is backwards compatible.
"""
default = os.path.join(settings.MEDIA_ROOT, what)
key = '{0}_PATH'.format(what.upper())
return getattr(settings, key, default)
# A (temporary?) copy of this is in services/utils.py. See bug 1055654.
def user_media_url(what):
"""
Generate default media url, and make possible to override it from
settings.
"""
default = '%s%s/' % (settings.MEDIA_URL, what)
key = '{0}_URL'.format(what.upper().replace('-', '_'))
return getattr(settings, key, default)
@library.filter
def hidden_field(field):
return field.as_widget(attrs={'style': 'display:none'})
@library.filter
def format_html(string, *args, **kwargs):
"""Uses ``str.format`` for string interpolation.
Uses ``django.utils.html:format_html`` internally.
>>> {{ "{0} arguments, {x} arguments"|format_html('args', x='kwargs') }}
"positional args, kwargs arguments"
Checks both, *args and **kwargs for potentially unsafe arguments (
not marked as `mark_safe`) and escapes them appropriately.
"""
return django_format_html(smart_str(string), *args, **kwargs)
@library.global_function
def js(bundle, debug=None):
"""
If we are in debug mode, just output a single script tag for each js file.
If we are not in debug mode, return a script that points at bundle-min.js.
Copied from jingo-minify until we switch to something better...
"""
urls = get_js_urls(bundle, debug)
attrs = ['src="%s"']
return _build_html(urls, '<script %s></script>' % ' '.join(attrs))
@library.global_function
def css(bundle, media=False, debug=None):
"""
If we are in debug mode, just output a single script tag for each css file.
If we are not in debug mode, return a script that points at bundle-min.css.
"""
urls = get_css_urls(bundle, debug)
if not media:
media = 'all'
return _build_html(urls, '<link rel="stylesheet" media="%s" href="%%s" />' % media)
@library.filter
def nl2br(string):
"""Turn newlines into <br/>."""
if not string:
return ''
return jinja2.Markup('<br/>'.join(jinja2.escape(string).splitlines()))
@library.filter(name='date')
def format_date(value, format='DATE_FORMAT'):
return defaultfilters.date(value, format)
@library.filter(name='datetime')
def format_datetime(value, format='DATETIME_FORMAT'):
return defaultfilters.date(value, format)
@library.filter
def class_selected(a, b):
"""Return ``'class="selected"'`` if ``a == b``."""
return mark_safe('class="selected"' if a == b else '')
class Spaceless(Extension):
tags = {'spaceless'}
def parse(self, parser):
lineno = next(parser.stream).lineno
body = parser.parse_statements(['name:endspaceless'], drop_needle=True)
return jinja2.nodes.CallBlock(
self.call_method('_strip_spaces'),
[],
[],
body,
).set_lineno(lineno)
def _strip_spaces(self, *, caller):
return strip_spaces_between_tags(caller().strip())
| 27.654846
| 87
| 0.68345
|
4a140369aaeffc195770c49d6ce4b42e788e91b0
| 1,281
|
py
|
Python
|
src/biotite/sequence/align/__init__.py
|
danijoo/biotite
|
22072e64676e4e917236eac8493eed4c6a22cc33
|
[
"BSD-3-Clause"
] | 2
|
2020-11-06T13:06:14.000Z
|
2021-11-08T09:46:18.000Z
|
src/biotite/sequence/align/__init__.py
|
danielmuthama/biotite
|
cb238a8d8d7dc82b3bcea274d7d91d5c876badcd
|
[
"BSD-3-Clause"
] | null | null | null |
src/biotite/sequence/align/__init__.py
|
danielmuthama/biotite
|
cb238a8d8d7dc82b3bcea274d7d91d5c876badcd
|
[
"BSD-3-Clause"
] | null | null | null |
# This source code is part of the Biotite package and is distributed
# under the 3-Clause BSD License. Please see 'LICENSE.rst' for further
# information.
"""
This subpackage provides functionality for sequence alignemnts.
The two central classes involved are :class:`SubstitutionMatrix` and
:class:`Alignment`:
Every function that performs an alignment requires a
:class:`SubstitutionMatrix` that provides similarity scores for each
symbol combination of two alphabets (usually both alphabets are equal).
The alphabets in the :class:`SubstitutionMatrix` must match or extend
the alphabets of the sequences to be aligned.
An alignment cannot be directly represented as list of :class:`Sequence`
objects, since a gap indicates the absence of any symbol.
Instead, the aligning functions return one or more :class:`Alignment`
instances.
These objects contain the original sequences and a trace, that describe
which positions (indices) in the sequences are aligned.
Optionally they also contain the similarity score.
The aligning functions are usually C-accelerated, reducing the
computation time substantially.
"""
__name__ = "biotite.sequence.align"
__author__ = "Patrick Kunzmann"
from .alignment import *
from .pairwise import *
from .multiple import *
from .matrix import *
| 36.6
| 72
| 0.800937
|
4a1403897ff25d312f6d958b7b52cc05c0e2034d
| 2,100
|
py
|
Python
|
google/ads/googleads/v9/errors/types/conversion_adjustment_upload_error.py
|
JakobSteixner/google-ads-python
|
df2b802cc7e78295a4ece21cc7ef3787cd35dab0
|
[
"Apache-2.0"
] | null | null | null |
google/ads/googleads/v9/errors/types/conversion_adjustment_upload_error.py
|
JakobSteixner/google-ads-python
|
df2b802cc7e78295a4ece21cc7ef3787cd35dab0
|
[
"Apache-2.0"
] | null | null | null |
google/ads/googleads/v9/errors/types/conversion_adjustment_upload_error.py
|
JakobSteixner/google-ads-python
|
df2b802cc7e78295a4ece21cc7ef3787cd35dab0
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
__protobuf__ = proto.module(
package="google.ads.googleads.v9.errors",
marshal="google.ads.googleads.v9",
manifest={"ConversionAdjustmentUploadErrorEnum",},
)
class ConversionAdjustmentUploadErrorEnum(proto.Message):
r"""Container for enum describing possible conversion adjustment
upload errors.
"""
class ConversionAdjustmentUploadError(proto.Enum):
r"""Enum describing possible conversion adjustment upload errors."""
UNSPECIFIED = 0
UNKNOWN = 1
TOO_RECENT_CONVERSION_ACTION = 2
INVALID_CONVERSION_ACTION = 3
CONVERSION_ALREADY_RETRACTED = 4
CONVERSION_NOT_FOUND = 5
CONVERSION_EXPIRED = 6
ADJUSTMENT_PRECEDES_CONVERSION = 7
MORE_RECENT_RESTATEMENT_FOUND = 8
TOO_RECENT_CONVERSION = 9
CANNOT_RESTATE_CONVERSION_ACTION_THAT_ALWAYS_USES_DEFAULT_CONVERSION_VALUE = (
10
)
TOO_MANY_ADJUSTMENTS_IN_REQUEST = 11
TOO_MANY_ADJUSTMENTS = 12
RESTATEMENT_ALREADY_EXISTS = 13
DUPLICATE_ADJUSTMENT_IN_REQUEST = 14
CUSTOMER_NOT_ACCEPTED_CUSTOMER_DATA_TERMS = 15
CONVERSION_ACTION_NOT_ELIGIBLE_FOR_ENHANCEMENT = 16
INVALID_USER_IDENTIFIER = 17
UNSUPPORTED_USER_IDENTIFIER = 18
GCLID_DATE_TIME_PAIR_AND_ORDER_ID_BOTH_SET = 20
CONVERSION_ALREADY_ENHANCED = 21
DUPLICATE_ENHANCEMENT_IN_REQUEST = 22
__all__ = tuple(sorted(__protobuf__.manifest))
| 34.42623
| 86
| 0.725238
|
4a1403cdccac1399e1d5251a9854850992749fe8
| 189
|
py
|
Python
|
codeforces.com/734A/solution.py
|
zubtsov/competitive-programming
|
919d63130144347d7f6eddcf8f5bc2afb85fddf3
|
[
"MIT"
] | null | null | null |
codeforces.com/734A/solution.py
|
zubtsov/competitive-programming
|
919d63130144347d7f6eddcf8f5bc2afb85fddf3
|
[
"MIT"
] | null | null | null |
codeforces.com/734A/solution.py
|
zubtsov/competitive-programming
|
919d63130144347d7f6eddcf8f5bc2afb85fddf3
|
[
"MIT"
] | null | null | null |
input()
games_result = sum(map(lambda n: 1 if n == 'A' else -1, input()))
if games_result > 0:
print('Anton')
elif games_result == 0:
print('Friendship')
else:
print('Danik')
| 17.181818
| 65
| 0.613757
|
4a1403ec32929fcb4173dd77c1c11cefe3376511
| 2,443
|
py
|
Python
|
src/sentry/tagstore/v2/models/grouptagkey.py
|
apragacz/sf-sentry
|
2fdd6c1195c29a1d401d1cd538c22ea68556699a
|
[
"BSD-3-Clause"
] | 1
|
2018-03-05T15:40:12.000Z
|
2018-03-05T15:40:12.000Z
|
src/sentry/tagstore/v2/models/grouptagkey.py
|
pkaminski/sentry
|
27e948283e27d93ca5192ca7b580830e092c25c7
|
[
"BSD-3-Clause"
] | 1
|
2018-08-22T16:49:48.000Z
|
2018-08-22T16:49:48.000Z
|
src/sentry/tagstore/v2/models/grouptagkey.py
|
pkaminski/sentry
|
27e948283e27d93ca5192ca7b580830e092c25c7
|
[
"BSD-3-Clause"
] | 1
|
2018-07-02T09:46:44.000Z
|
2018-07-02T09:46:44.000Z
|
"""
sentry.tagstore.v2.models.grouptagkey
~~~~~~~~~~~~~~~~~~~~~~~~~
:copyright: (c) 2010-2017 by the Sentry Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from __future__ import absolute_import
import six
from django.db import router, transaction, DataError
from sentry.api.serializers import Serializer, register
from sentry.db.models import (
Model, BoundedPositiveIntegerField, BaseManager, FlexibleForeignKey, sane_repr
)
class GroupTagKey(Model):
"""
Stores a unique tag key name for a group.
An example key might be "url" or "server_name".
"""
__core__ = False
project_id = BoundedPositiveIntegerField(db_index=True)
group_id = BoundedPositiveIntegerField(db_index=True)
_key = FlexibleForeignKey('tagstore.TagKey', db_column='key_id')
values_seen = BoundedPositiveIntegerField(default=0)
objects = BaseManager()
class Meta:
app_label = 'tagstore'
unique_together = (('project_id', 'group_id', '_key'), )
__repr__ = sane_repr('project_id', 'group_id', '_key')
@property
def key(self):
return self._key.key
def merge_counts(self, new_group):
from sentry.tagstore.v2.models import GroupTagValue
try:
with transaction.atomic(using=router.db_for_write(GroupTagKey)):
GroupTagKey.objects.filter(
group_id=new_group.id,
_key_id=self._key_id,
).update(
values_seen=GroupTagValue.objects.filter(
group_id=new_group.id,
_key_id=self._key_id,
).count()
)
except DataError:
# it's possible to hit an out of range value for counters
pass
@register(GroupTagKey)
class GroupTagKeySerializer(Serializer):
def get_attrs(self, item_list, user):
from sentry import tagstore
result = {}
for item in item_list:
key = tagstore.get_standardized_key(item.key)
result[item] = {
'name': tagstore.get_tag_key_label(item.key),
'key': key,
}
return result
def serialize(self, obj, attrs, user):
return {
'id': six.text_type(obj.id),
'name': attrs['name'],
'key': attrs['key'],
'uniqueValues': obj.values_seen,
}
| 28.406977
| 82
| 0.605813
|
4a140412c253bc4fc7a7f2eec4ef2f0b40a005b7
| 889
|
py
|
Python
|
app/dao/letter_branding_dao.py
|
tlwr/notifications-api
|
88a6b7729edb9be41ce3e7c027f1452b7b6d00d2
|
[
"MIT"
] | 41
|
2019-11-28T16:58:41.000Z
|
2022-01-28T21:11:16.000Z
|
app/dao/letter_branding_dao.py
|
tlwr/notifications-api
|
88a6b7729edb9be41ce3e7c027f1452b7b6d00d2
|
[
"MIT"
] | 1,083
|
2019-07-08T12:57:24.000Z
|
2022-03-08T18:53:40.000Z
|
app/dao/letter_branding_dao.py
|
tlwr/notifications-api
|
88a6b7729edb9be41ce3e7c027f1452b7b6d00d2
|
[
"MIT"
] | 9
|
2020-01-24T19:56:43.000Z
|
2022-01-27T21:36:53.000Z
|
from app import db
from app.dao.dao_utils import transactional
from app.models import LetterBranding
def dao_get_letter_branding_by_id(letter_branding_id):
return LetterBranding.query.filter(LetterBranding.id == letter_branding_id).one()
def dao_get_letter_branding_by_name(letter_branding_name):
return LetterBranding.query.filter_by(name=letter_branding_name).first()
def dao_get_all_letter_branding():
return LetterBranding.query.order_by(LetterBranding.name).all()
@transactional
def dao_create_letter_branding(letter_branding):
db.session.add(letter_branding)
@transactional
def dao_update_letter_branding(letter_branding_id, **kwargs):
letter_branding = LetterBranding.query.get(letter_branding_id)
for key, value in kwargs.items():
setattr(letter_branding, key, value or None)
db.session.add(letter_branding)
return letter_branding
| 29.633333
| 85
| 0.807649
|
4a1404a292d30a5a79c43e9fbe09aa129b85b775
| 410
|
py
|
Python
|
tests/parser/functions/test_length.py
|
upgradvisor/vyper
|
642884ea938a25793c1b2fac866e8458e63a7b49
|
[
"Apache-2.0"
] | 1,471
|
2017-12-25T05:47:57.000Z
|
2019-11-19T07:47:53.000Z
|
tests/parser/functions/test_length.py
|
upgradvisor/vyper
|
642884ea938a25793c1b2fac866e8458e63a7b49
|
[
"Apache-2.0"
] | 915
|
2019-11-21T05:48:16.000Z
|
2022-03-31T23:51:03.000Z
|
tests/parser/functions/test_length.py
|
upgradvisor/vyper
|
642884ea938a25793c1b2fac866e8458e63a7b49
|
[
"Apache-2.0"
] | 321
|
2017-12-25T16:37:21.000Z
|
2019-11-15T17:44:06.000Z
|
def test_test_length(get_contract_with_gas_estimation):
test_length = """
y: Bytes[10]
@external
def foo(inp: Bytes[10]) -> uint256:
x: Bytes[5] = slice(inp,1, 5)
self.y = slice(inp, 2, 4)
return len(inp) * 100 + len(x) * 10 + len(self.y)
"""
c = get_contract_with_gas_estimation(test_length)
assert c.foo(b"badminton") == 954, c.foo(b"badminton")
print("Passed length test")
| 27.333333
| 58
| 0.643902
|
4a140513ab80401a2810d4684d789f9bf0cc60bb
| 4,226
|
py
|
Python
|
tests/test_tracing.py
|
xvillaneau/sentry-python
|
8cc48dc1b9e944d1842271af69d8a2aef43cc4ee
|
[
"BSD-2-Clause"
] | null | null | null |
tests/test_tracing.py
|
xvillaneau/sentry-python
|
8cc48dc1b9e944d1842271af69d8a2aef43cc4ee
|
[
"BSD-2-Clause"
] | null | null | null |
tests/test_tracing.py
|
xvillaneau/sentry-python
|
8cc48dc1b9e944d1842271af69d8a2aef43cc4ee
|
[
"BSD-2-Clause"
] | null | null | null |
import weakref
import gc
import pytest
from sentry_sdk import Hub, capture_message
from sentry_sdk.tracing import Span
@pytest.mark.parametrize("sample_rate", [0.0, 1.0])
def test_basic(sentry_init, capture_events, sample_rate):
sentry_init(traces_sample_rate=sample_rate)
events = capture_events()
with Hub.current.start_span(transaction="hi"):
with pytest.raises(ZeroDivisionError):
with Hub.current.start_span(op="foo", description="foodesc"):
1 / 0
with Hub.current.start_span(op="bar", description="bardesc"):
pass
if sample_rate:
event, = events
span1, span2 = event["spans"]
parent_span = event
assert span1["tags"]["status"] == "failure"
assert span1["op"] == "foo"
assert span1["description"] == "foodesc"
assert "status" not in span2.get("tags", {})
assert span2["op"] == "bar"
assert span2["description"] == "bardesc"
assert parent_span["transaction"] == "hi"
else:
assert not events
@pytest.mark.parametrize("sampled", [True, False, None])
def test_continue_from_headers(sentry_init, capture_events, sampled):
sentry_init(traces_sample_rate=1.0, traceparent_v2=True)
events = capture_events()
with Hub.current.start_span(transaction="hi"):
with Hub.current.start_span() as old_span:
old_span.sampled = sampled
headers = dict(Hub.current.iter_trace_propagation_headers())
header = headers["sentry-trace"]
if sampled is True:
assert header.endswith("-1")
if sampled is False:
assert header.endswith("-0")
if sampled is None:
assert header.endswith("-")
span = Span.continue_from_headers(headers)
span.transaction = "WRONG"
assert span is not None
assert span.sampled == sampled
assert span.trace_id == old_span.trace_id
with Hub.current.start_span(span):
with Hub.current.configure_scope() as scope:
scope.transaction = "ho"
capture_message("hello")
if sampled is False:
trace1, message = events
assert trace1["transaction"] == "hi"
else:
trace1, message, trace2 = events
assert trace1["transaction"] == "hi"
assert trace2["transaction"] == "ho"
assert (
trace1["contexts"]["trace"]["trace_id"]
== trace2["contexts"]["trace"]["trace_id"]
== span.trace_id
== message["contexts"]["trace"]["trace_id"]
)
assert message["message"] == "hello"
def test_sampling_decided_only_for_transactions(sentry_init, capture_events):
sentry_init(traces_sample_rate=0.5)
with Hub.current.start_span(transaction="hi") as trace:
assert trace.sampled is not None
with Hub.current.start_span() as span:
assert span.sampled == trace.sampled
with Hub.current.start_span() as span:
assert span.sampled is None
@pytest.mark.parametrize(
"args,expected_refcount",
[({"traces_sample_rate": 1.0}, 100), ({"traces_sample_rate": 0.0}, 0)],
)
def test_memory_usage(sentry_init, capture_events, args, expected_refcount):
sentry_init(**args)
references = weakref.WeakSet()
with Hub.current.start_span(transaction="hi"):
for i in range(100):
with Hub.current.start_span(
op="helloworld", description="hi {}".format(i)
) as span:
def foo():
pass
references.add(foo)
span.set_tag("foo", foo)
pass
del foo
del span
# required only for pypy (cpython frees immediately)
gc.collect()
assert len(references) == expected_refcount
def test_span_trimming(sentry_init, capture_events):
sentry_init(traces_sample_rate=1.0, _experiments={"max_spans": 3})
events = capture_events()
with Hub.current.start_span(transaction="hi"):
for i in range(10):
with Hub.current.start_span(op="foo{}".format(i)):
pass
event, = events
span1, span2 = event["spans"]
assert span1["op"] == "foo0"
assert span2["op"] == "foo1"
| 29.144828
| 77
| 0.622101
|
4a14064f41d19634c8b26199dad07601a75a30da
| 1,867
|
py
|
Python
|
tests/iterators/compact_test.py
|
SSouik/pyutil
|
d2250fb585679e49eb9056a3051bf239a58c2e8b
|
[
"MIT"
] | null | null | null |
tests/iterators/compact_test.py
|
SSouik/pyutil
|
d2250fb585679e49eb9056a3051bf239a58c2e8b
|
[
"MIT"
] | 21
|
2022-01-05T04:51:33.000Z
|
2022-01-28T05:45:57.000Z
|
tests/iterators/compact_test.py
|
SSouik/pyutil
|
d2250fb585679e49eb9056a3051bf239a58c2e8b
|
[
"MIT"
] | null | null | null |
import pytest
from pyutil import compact
def test_compact_with_false():
lst = [1, 2, False]
actual = list(compact(lst))
expected = [1, 2]
assert actual == expected
def test_compact_with_true():
lst = [1, 2, True]
actual = list(compact(lst))
expected = [1, 2, True]
assert actual == expected
def test_compact_with_empty_dict():
lst = [1, 2, {}]
actual = list(compact(lst))
expected = [1, 2]
assert actual == expected
def test_compact_with_dict():
lst = [1, 2, {"a": 1}]
actual = list(compact(lst))
expected = [1, 2, {"a": 1}]
assert actual == expected
def test_compact_with_None():
lst = [1, 2, None]
actual = list(compact(lst))
expected = [1, 2]
assert actual == expected
def test_compact_with_empty_string():
lst = [1, 2, ""]
actual = list(compact(lst))
expected = [1, 2]
assert actual == expected
def test_compact_with_empty_string():
lst = [1, 2, "foo"]
actual = list(compact(lst))
expected = [1, 2, "foo"]
assert actual == expected
def test_compact_with_zero():
lst = [1, 2, 0]
actual = list(compact(lst))
expected = [1, 2]
assert actual == expected
def test_compact_when_seq_is_empty():
actual = list(compact([]))
expected = []
assert actual == expected
def test_compact_when_seq_is_tuple():
tup = (1, 2, False)
actual = list(compact(tup))
expected = [1, 2]
assert actual == expected
def test_compact_when_seq_is_set():
set = {1, 2, False}
actual = list(compact(set))
expected = [1, 2]
assert actual == expected
def test_compact_when_seq_is_tuple():
tup = (1, 2, False)
actual = list(compact(tup))
expected = [1, 2]
assert actual == expected
def test_compact_when_seq_is_not_valid():
with pytest.raises(TypeError):
list(compact("foo"))
| 20.516484
| 41
| 0.619711
|
4a14076d61754255ffd5f17337bda00dde6cb01c
| 14,259
|
py
|
Python
|
sdk/python/pulumi_azure_native/databoxedge/v20200901preview/device.py
|
pulumi-bot/pulumi-azure-native
|
f7b9490b5211544318e455e5cceafe47b628e12c
|
[
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_azure_native/databoxedge/v20200901preview/device.py
|
pulumi-bot/pulumi-azure-native
|
f7b9490b5211544318e455e5cceafe47b628e12c
|
[
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_azure_native/databoxedge/v20200901preview/device.py
|
pulumi-bot/pulumi-azure-native
|
f7b9490b5211544318e455e5cceafe47b628e12c
|
[
"Apache-2.0"
] | null | null | null |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
from . import outputs
from ._enums import *
from ._inputs import *
__all__ = ['Device']
class Device(pulumi.CustomResource):
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
data_box_edge_device_status: Optional[pulumi.Input[Union[str, 'DataBoxEdgeDeviceStatus']]] = None,
device_name: Optional[pulumi.Input[str]] = None,
etag: Optional[pulumi.Input[str]] = None,
identity: Optional[pulumi.Input[pulumi.InputType['ResourceIdentityArgs']]] = None,
location: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
sku: Optional[pulumi.Input[pulumi.InputType['SkuArgs']]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
__props__=None,
__name__=None,
__opts__=None):
"""
The Data Box Edge/Gateway device.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[Union[str, 'DataBoxEdgeDeviceStatus']] data_box_edge_device_status: The status of the Data Box Edge/Gateway device.
:param pulumi.Input[str] device_name: The device name.
:param pulumi.Input[str] etag: The etag for the devices.
:param pulumi.Input[pulumi.InputType['ResourceIdentityArgs']] identity: Msi identity of the resource
:param pulumi.Input[str] location: The location of the device. This is a supported and registered Azure geographical region (for example, West US, East US, or Southeast Asia). The geographical region of a device cannot be changed once it is created, but if an identical geographical region is specified on update, the request will succeed.
:param pulumi.Input[str] resource_group_name: The resource group name.
:param pulumi.Input[pulumi.InputType['SkuArgs']] sku: The SKU type.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: The list of tags that describe the device. These tags can be used to view and group this device (across resource groups).
"""
if __name__ is not None:
warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning)
resource_name = __name__
if __opts__ is not None:
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = dict()
__props__['data_box_edge_device_status'] = data_box_edge_device_status
__props__['device_name'] = device_name
__props__['etag'] = etag
__props__['identity'] = identity
__props__['location'] = location
if resource_group_name is None and not opts.urn:
raise TypeError("Missing required property 'resource_group_name'")
__props__['resource_group_name'] = resource_group_name
__props__['sku'] = sku
__props__['tags'] = tags
__props__['configured_role_types'] = None
__props__['culture'] = None
__props__['description'] = None
__props__['device_hcs_version'] = None
__props__['device_local_capacity'] = None
__props__['device_model'] = None
__props__['device_software_version'] = None
__props__['device_type'] = None
__props__['edge_profile'] = None
__props__['friendly_name'] = None
__props__['kind'] = None
__props__['model_description'] = None
__props__['name'] = None
__props__['node_count'] = None
__props__['resource_move_details'] = None
__props__['serial_number'] = None
__props__['system_data'] = None
__props__['time_zone'] = None
__props__['type'] = None
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-nextgen:databoxedge/v20200901preview:Device"), pulumi.Alias(type_="azure-native:databoxedge:Device"), pulumi.Alias(type_="azure-nextgen:databoxedge:Device"), pulumi.Alias(type_="azure-native:databoxedge/latest:Device"), pulumi.Alias(type_="azure-nextgen:databoxedge/latest:Device"), pulumi.Alias(type_="azure-native:databoxedge/v20190301:Device"), pulumi.Alias(type_="azure-nextgen:databoxedge/v20190301:Device"), pulumi.Alias(type_="azure-native:databoxedge/v20190701:Device"), pulumi.Alias(type_="azure-nextgen:databoxedge/v20190701:Device"), pulumi.Alias(type_="azure-native:databoxedge/v20190801:Device"), pulumi.Alias(type_="azure-nextgen:databoxedge/v20190801:Device"), pulumi.Alias(type_="azure-native:databoxedge/v20200501preview:Device"), pulumi.Alias(type_="azure-nextgen:databoxedge/v20200501preview:Device"), pulumi.Alias(type_="azure-native:databoxedge/v20200901:Device"), pulumi.Alias(type_="azure-nextgen:databoxedge/v20200901:Device"), pulumi.Alias(type_="azure-native:databoxedge/v20201201:Device"), pulumi.Alias(type_="azure-nextgen:databoxedge/v20201201:Device")])
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
super(Device, __self__).__init__(
'azure-native:databoxedge/v20200901preview:Device',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'Device':
"""
Get an existing Device resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = dict()
__props__["configured_role_types"] = None
__props__["culture"] = None
__props__["data_box_edge_device_status"] = None
__props__["description"] = None
__props__["device_hcs_version"] = None
__props__["device_local_capacity"] = None
__props__["device_model"] = None
__props__["device_software_version"] = None
__props__["device_type"] = None
__props__["edge_profile"] = None
__props__["etag"] = None
__props__["friendly_name"] = None
__props__["identity"] = None
__props__["kind"] = None
__props__["location"] = None
__props__["model_description"] = None
__props__["name"] = None
__props__["node_count"] = None
__props__["resource_move_details"] = None
__props__["serial_number"] = None
__props__["sku"] = None
__props__["system_data"] = None
__props__["tags"] = None
__props__["time_zone"] = None
__props__["type"] = None
return Device(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="configuredRoleTypes")
def configured_role_types(self) -> pulumi.Output[Sequence[str]]:
"""
Type of compute roles configured.
"""
return pulumi.get(self, "configured_role_types")
@property
@pulumi.getter
def culture(self) -> pulumi.Output[str]:
"""
The Data Box Edge/Gateway device culture.
"""
return pulumi.get(self, "culture")
@property
@pulumi.getter(name="dataBoxEdgeDeviceStatus")
def data_box_edge_device_status(self) -> pulumi.Output[Optional[str]]:
"""
The status of the Data Box Edge/Gateway device.
"""
return pulumi.get(self, "data_box_edge_device_status")
@property
@pulumi.getter
def description(self) -> pulumi.Output[str]:
"""
The Description of the Data Box Edge/Gateway device.
"""
return pulumi.get(self, "description")
@property
@pulumi.getter(name="deviceHcsVersion")
def device_hcs_version(self) -> pulumi.Output[str]:
"""
The device software version number of the device (eg: 1.2.18105.6).
"""
return pulumi.get(self, "device_hcs_version")
@property
@pulumi.getter(name="deviceLocalCapacity")
def device_local_capacity(self) -> pulumi.Output[float]:
"""
The Data Box Edge/Gateway device local capacity in MB.
"""
return pulumi.get(self, "device_local_capacity")
@property
@pulumi.getter(name="deviceModel")
def device_model(self) -> pulumi.Output[str]:
"""
The Data Box Edge/Gateway device model.
"""
return pulumi.get(self, "device_model")
@property
@pulumi.getter(name="deviceSoftwareVersion")
def device_software_version(self) -> pulumi.Output[str]:
"""
The Data Box Edge/Gateway device software version.
"""
return pulumi.get(self, "device_software_version")
@property
@pulumi.getter(name="deviceType")
def device_type(self) -> pulumi.Output[str]:
"""
The type of the Data Box Edge/Gateway device.
"""
return pulumi.get(self, "device_type")
@property
@pulumi.getter(name="edgeProfile")
def edge_profile(self) -> pulumi.Output['outputs.EdgeProfileResponse']:
"""
The details of Edge Profile for this resource
"""
return pulumi.get(self, "edge_profile")
@property
@pulumi.getter
def etag(self) -> pulumi.Output[Optional[str]]:
"""
The etag for the devices.
"""
return pulumi.get(self, "etag")
@property
@pulumi.getter(name="friendlyName")
def friendly_name(self) -> pulumi.Output[str]:
"""
The Data Box Edge/Gateway device name.
"""
return pulumi.get(self, "friendly_name")
@property
@pulumi.getter
def identity(self) -> pulumi.Output[Optional['outputs.ResourceIdentityResponse']]:
"""
Msi identity of the resource
"""
return pulumi.get(self, "identity")
@property
@pulumi.getter
def kind(self) -> pulumi.Output[str]:
"""
The etag for the devices.
"""
return pulumi.get(self, "kind")
@property
@pulumi.getter
def location(self) -> pulumi.Output[str]:
"""
The location of the device. This is a supported and registered Azure geographical region (for example, West US, East US, or Southeast Asia). The geographical region of a device cannot be changed once it is created, but if an identical geographical region is specified on update, the request will succeed.
"""
return pulumi.get(self, "location")
@property
@pulumi.getter(name="modelDescription")
def model_description(self) -> pulumi.Output[str]:
"""
The description of the Data Box Edge/Gateway device model.
"""
return pulumi.get(self, "model_description")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
The object name.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="nodeCount")
def node_count(self) -> pulumi.Output[int]:
"""
The number of nodes in the cluster.
"""
return pulumi.get(self, "node_count")
@property
@pulumi.getter(name="resourceMoveDetails")
def resource_move_details(self) -> pulumi.Output['outputs.ResourceMoveDetailsResponse']:
"""
The details of the move operation on this resource.
"""
return pulumi.get(self, "resource_move_details")
@property
@pulumi.getter(name="serialNumber")
def serial_number(self) -> pulumi.Output[str]:
"""
The Serial Number of Data Box Edge/Gateway device.
"""
return pulumi.get(self, "serial_number")
@property
@pulumi.getter
def sku(self) -> pulumi.Output[Optional['outputs.SkuResponse']]:
"""
The SKU type.
"""
return pulumi.get(self, "sku")
@property
@pulumi.getter(name="systemData")
def system_data(self) -> pulumi.Output['outputs.SystemDataResponse']:
"""
DataBoxEdge Resource
"""
return pulumi.get(self, "system_data")
@property
@pulumi.getter
def tags(self) -> pulumi.Output[Optional[Mapping[str, str]]]:
"""
The list of tags that describe the device. These tags can be used to view and group this device (across resource groups).
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter(name="timeZone")
def time_zone(self) -> pulumi.Output[str]:
"""
The Data Box Edge/Gateway device timezone.
"""
return pulumi.get(self, "time_zone")
@property
@pulumi.getter
def type(self) -> pulumi.Output[str]:
"""
The hierarchical type of the object.
"""
return pulumi.get(self, "type")
def translate_output_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
def translate_input_property(self, prop):
return _tables.SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop
| 40.856734
| 1,162
| 0.641349
|
4a14081aa75584c2aeef6b284d51475074eda36b
| 9,960
|
py
|
Python
|
syntrain.py
|
ShichengChen/self-supervised-hand-pose-estimation
|
c086b0c3cea2ee92f3ff6177ee37af91f815343e
|
[
"MIT"
] | 2
|
2021-06-09T07:57:44.000Z
|
2021-07-15T19:13:08.000Z
|
syntrain.py
|
ShichengChen/self-supervised-hand-pose-estimation
|
c086b0c3cea2ee92f3ff6177ee37af91f815343e
|
[
"MIT"
] | null | null | null |
syntrain.py
|
ShichengChen/self-supervised-hand-pose-estimation
|
c086b0c3cea2ee92f3ff6177ee37af91f815343e
|
[
"MIT"
] | null | null | null |
import os
import platform
if (platform.node()=='csc-G7-7590'):
os.environ["CUDA_VISIBLE_DEVICES"] = '0'
else:
os.environ["CUDA_VISIBLE_DEVICES"] = '2'
import torchvision.models as models
from torch.optim.lr_scheduler import MultiStepLR
import torch
from cscPy.Nets.AligningHandnet import encoderRGB,decPoseNet
from cscPy.dataloader.MVdataloader import MVDataloader
from cscPy.mano.dataloader.synthesizer import ManoSynthesizer
from cscPy.Loss.utils import pose3d_loss,LossHelper,cloud_dis,cloud_dis2
import numpy as np
from cscPy.mano.network.manoArmLayer import MANO_SMPL
from cscPy.mano.network.biomechanicalLoss import BiomechanicalLayer
from cscPy.globalCamera.camera import CameraIntrinsics,perspective_projection
from cscPy.globalCamera.util import fetch_all_sequences,load_rgb_maps,load_depth_maps,get_cameras_from_dir,visualize_better_qulity_depth_map
from tqdm import tqdm
import cv2
from cscPy.mano.network.utils import *
#if not os.path.exists('/mnt/data/shicheng/RHD_published_v2/'):
encoderRGB=encoderRGB().cuda()
decoderPose=decPoseNet().cuda()
# checkpoint = torch.load('./pretrain/rgb2pose.pt')
# decoderPose.load_state_dict(checkpoint['decoderCloud'])
# encoderRGB.load_state_dict(checkpoint['encoderRGB'])
onlysyn=True
train_dataset1 = ManoSynthesizer()
train_dataset2 = MVDataloader()
if(onlysyn):
train_dataset = torch.utils.data.ConcatDataset( [train_dataset1, train_dataset1])
else:train_dataset = torch.utils.data.ConcatDataset( [train_dataset1, train_dataset2])
def _init_fn(worker_id):np.random.seed(worker_id)
def _init_fn2(worker_id):np.random.seed(worker_id**2+2)
train_loader = torch.utils.data.DataLoader(train_dataset,batch_size=16,num_workers=4, shuffle=True,worker_init_fn=_init_fn)
print('train_loader',len(train_loader))
manoPath='/home/csc/MANO-hand-model-toolkit/mano/models/MANO_RIGHT.pkl'
if not os.path.exists(manoPath):
manoPath = '/home/shicheng/MANO-hand-model-toolkit/mano/models/MANO_RIGHT.pkl'
mano_right = MANO_SMPL(manoPath, ncomps=45, oriorder=True,device='cuda',userotJoints=True)
mylist=[]
mylist.append({'params':encoderRGB.parameters()})
mylist.append({'params':decoderPose.parameters()})
lr=1e-4
optimizer = torch.optim.Adam(mylist, lr=lr)
scheduler = MultiStepLR(optimizer, milestones=[30,60], gamma=0.1)
def getLatentLoss(z_mean, z_stddev, goalStd=1.0, eps=1e-9):
latent_loss = 0.5 * torch.sum(z_mean**2 + z_stddev**2 - torch.log(z_stddev**2) - goalStd, 1)
return latent_loss
biolayer = BiomechanicalLayer(fingerPlaneLoss=True,fingerFlexLoss=True, fingerAbductionLoss=True)
summary="lr:"+str(lr)+" "+__file__+" "
print('summary',summary)
losshelp=LossHelper(useBar=True,usetb=True,summary=summary)
for epoch in tqdm(range(80)):
losshelp.initForEachEpoch(lenFordataloader=len(train_loader))
for idx, inp in enumerate(train_loader):
img,cloud,pose_gt,scale,root,mask=inp['img'].cuda(),inp['cloud'].cuda(),inp['pose3d'].cuda(),inp['scale'].cuda(),\
inp['root'].cuda(),inp['mask'].cuda().reshape(-1)
# img, cloud, pose_gt, scale, root, mask = inp['img'], inp['cloud'], inp['pose3d'], inp[
# 'scale'],inp['root'], inp['mask'].reshape(-1)
# cur=img[0].permute(1,2,0).cpu().numpy()
# cur=(cur*255).astype(np.uint8)
# cv2.imshow('cur',cur)
# cv2.waitKey(1)
encoderRGB.train()
decoderPose.train()
N=img.shape[0]
z_rgb, mn_rgb, sd_rgb = encoderRGB(img,training=False) # encode rgb
# print('img',img)
# z_rgb = encoderRGB(img,training=False) # encode rgb
# print("z_rgb",z_rgb)
pose_rgb = decoderPose(z_rgb,).reshape(N,21,3)
latent_loss_rgb = getLatentLoss(mn_rgb, sd_rgb)
latent_loss_sum = torch.mean(latent_loss_rgb)
pose_loss_syn,eucLoss_syn=pose3d_loss(pose_rgb[mask==1],pose_gt[mask==1],scale[mask==1])
pose_loss_real,eucLoss_real=pose3d_loss(pose_rgb[mask!=1],pose_gt[mask!=1],scale[mask!=1])
#pose_loss_sum = torch.mean(eucLoss_syn)
joints=pose_rgb*scale+root
wrist_trans,local_trans,outjoints=mano_right.matchTemplate2JointsGreedy(joints)
_, Greedymatchloss = pose3d_loss(joints, outjoints, scale)
#print('Greedymatchloss',Greedymatchloss)
vertexPre, joint_pre = mano_right.get_mano_vertices(wrist_trans.reshape([N, 1, 3, 3]),
local_trans.view(N, 15, 3, 3),
torch.zeros([N, 10], dtype=torch.float32).view(N, 10),
torch.ones([N, 1], dtype=torch.float32).view(N, 1),
torch.zeros([N,3], dtype=torch.float32).view(N,3),
pose_type='rot_matrix', mmcp_center=False,
external_transition=None)
mmcp = joint_pre[:, 4:5, :].clone()
joint_pre = (joint_pre - mmcp) / scale
vertexPre = (vertexPre - mmcp) / scale
if (platform.node()=='csc-G7-7590'):
for i in range(N):
if(mask[i]==0):
id=int(inp['idx'][i])
image = np.ones([480, 640]) * 2000
image2 = np.ones([480, 640]) * 2000
v=(vertexPre[i]*scale[i]+root[i]).detach().cpu().numpy()*1000
v2=(cloud[i]*scale[i]+root[i]).detach().cpu().numpy()*1000
# import trimesh
# tmesh = trimesh.Trimesh(v,mano_right.faces)
# tmesh.show()
vertex_uvd=perspective_projection(v,train_dataset2.demo.camera[id%4]).astype(int)
vertex_uvd2=perspective_projection(v2,train_dataset2.demo.camera[id%4]).astype(int)
for i in range(vertex_uvd.shape[0]):
c = 3
u0 = np.clip(vertex_uvd[i, 0] - c, 0, 640)
u1 = np.clip(vertex_uvd[i, 0] + c, 0, 640)
v0 = np.clip(vertex_uvd[i, 1] - c, 0, 480)
v1 = np.clip(vertex_uvd[i, 1] + c, 0, 480)
image[v0:v1, u0:u1] = np.minimum(image[v0:v1, u0:u1], vertex_uvd[i, 2])
u0 = np.clip(vertex_uvd2[i, 0] - c, 0, 640)
u1 = np.clip(vertex_uvd2[i, 0] + c, 0, 640)
v0 = np.clip(vertex_uvd2[i, 1] - c, 0, 480)
v1 = np.clip(vertex_uvd2[i, 1] + c, 0, 480)
image2[v0:v1, u0:u1] = np.minimum(image2[v0:v1, u0:u1], vertex_uvd2[i, 2])
image = 255 - visualize_better_qulity_depth_map(image)
image2 = 255 - visualize_better_qulity_depth_map(image2)
cv2.imshow("dep",image)
cv2.imshow("dep2",image2)
cv2.waitKey(1)
break
synBioLoss,synBioEudloss=biolayer(joint_pre[mask==1],scale.reshape(N)[mask==1])
realBioLoss,realBioEudloss=biolayer(joint_pre[mask!=1],scale.reshape(N)[mask!=1])
synpose_loss_bone, syneucLoss_bone = pose3d_loss(pose_rgb[mask==1], joint_pre[mask==1], scale[mask==1])
realpose_loss_bone, realeucLoss_bone = pose3d_loss(pose_rgb[mask!=1], joint_pre[mask!=1], scale[mask!=1])
syncdloss,syncdlossEud=cloud_dis2(vertexPre[mask==1],cloud[mask==1],scale[mask==1])
realcdloss, realcdlossEud = None,None#,cloud_dis2(vertexPre[mask != 1], cloud[mask != 1], scale[mask != 1])
loss = 0.0001*latent_loss_sum + pose_loss_syn+\
synpose_loss_bone+\
synBioLoss+\
syncdloss
#loss = 0.0001*latent_loss_sum + cloudRec_loss_sum
dicloss={'epoch':int(epoch),'iter':int(idx),
"loss":float(loss),"_epe":float(eucLoss_syn)*1000,"_loss":float(pose_loss_syn),
"_dcd":float(syncdlossEud)*1000,"_lcd":float(syncdloss),
"_dbio":float(synBioEudloss)*1000,"_lbio":float(synBioLoss),
"_dbone":float(syneucLoss_bone)*1000,"_lbone":float(synpose_loss_bone),
"_dgmatch":float(Greedymatchloss)*1000,
}
losshelp.add(dicloss)
# if(idx%5==0):
# print('epoch:{} iteration:{}'.format(epoch,idx))
losshelp.showcurrent()
optimizer.zero_grad()
loss.backward()
optimizer.step()
# print('scheduler.step()')
# scheduler.step()
losshelp.show()
losshelp.finish()
print('save model')
torch.save({
'epoch': epoch,
'encoderRGB': encoderRGB.state_dict(),
'decoderPose': decoderPose.state_dict(),
'optimizer': optimizer.state_dict()}, './models/' + platform.node() + 'rgb2poseSyn.pt')
#print(epoch, 'epoch mean epeloss', np.mean(epe)*1000)
# aveloss,epe=[],[]
# #aveCD,aveEMD=[],[]
# with torch.no_grad():
# for idx,(image, depth, cloud, heatmap, pose_gt, viewRotation, scale) in enumerate(test_loader):
# # image, depth, cloud, heatmap, pose_gt, viewRotation, scale = image.cuda(), depth.cuda(), cloud.cuda(), heatmap.cuda(), \
# # pose_gt.cuda(), viewRotation.cuda(), scale.cuda()
# pose_gt, scale, image =pose_gt.cuda(), scale.cuda(), image.cuda()
# encoderRGB.eval()
# decoderPose.eval()
# z_rgb, mn_rgb, sd_rgb = encoderRGB(image,training=False) # encode rgb
# pose_rgb = decoderPose(z_rgb, )
# pose_loss_rgb, eucLoss_rgb = utils.pose3d_loss(pose_rgb, pose_gt, scale)
# epe.append(float(eucLoss_rgb))
#
# print('len(epe)',len(epe[:len(epe)//2]))
# print(epoch, 'epoch test mean rgb epeloss', np.mean(epe[:len(epe)//2])*1000)
| 48.115942
| 140
| 0.607631
|
4a140847022acc877c91c0d1da4231daad62b057
| 5,266
|
py
|
Python
|
tests/report_tests/test_image_report.py
|
chainer/chainerui
|
91c5c26d9154a008079dbb0bcbf69b5590d105f7
|
[
"MIT"
] | 185
|
2017-12-15T09:24:07.000Z
|
2022-01-20T11:20:13.000Z
|
tests/report_tests/test_image_report.py
|
chainer/chainerui
|
91c5c26d9154a008079dbb0bcbf69b5590d105f7
|
[
"MIT"
] | 191
|
2017-12-15T09:14:52.000Z
|
2022-02-17T14:09:19.000Z
|
tests/report_tests/test_image_report.py
|
chainer/chainerui
|
91c5c26d9154a008079dbb0bcbf69b5590d105f7
|
[
"MIT"
] | 29
|
2017-12-15T09:40:45.000Z
|
2022-03-13T11:21:11.000Z
|
import os
import unittest
import warnings
import numpy as np
import pytest
import six
try:
import chainer # NOQA
_chainer_installed = True
except (ImportError, TypeError):
_chainer_installed = False
if _chainer_installed:
from chainerui.report import image_report
_image_report_available = image_report._available
else:
_image_report_available = False
@unittest.skipUnless(_image_report_available, 'Image report is not available')
def test_available():
with warnings.catch_warnings(record=True) as w:
assert image_report.check_available()
assert len(w) == 0
@unittest.skipUnless(_chainer_installed, 'Chainer is not installed')
def test_available_not_installed():
import sys
is_installed = 'PIL' in sys.modules
def check_available():
with warnings.catch_warnings(record=True) as w:
assert not image_report.check_available()
assert len(w) == 1
if is_installed:
pil = sys.modules['PIL']
try:
sys.modules['PIL'] = ImportError()
six.moves.reload_module(image_report)
check_available()
finally:
sys.modules['PIL'] = pil
six.moves.reload_module(image_report)
else:
check_available()
@unittest.skipUnless(_chainer_installed, 'Chainer is not installed')
def test_report_error(func_dir):
img = np.zeros(10)
with pytest.raises(ValueError) as e:
image_report.report(img, func_dir, 'test', batched=False)
assert 'must be 2 or 3' in str(e.value)
@unittest.skipUnless(_chainer_installed, 'Chainer is not installed')
def test_report_error_batch(func_dir):
img = np.zeros(10).reshape(2, 5)
with pytest.raises(ValueError) as e:
image_report.report(img, func_dir, 'test')
assert 'must be 3 or 4' in str(e.value)
@unittest.skipUnless(_image_report_available, 'Image report is not available')
def test_report_bchw_row0(func_dir):
img = np.zeros(10*3*5*5, dtype=np.float32).reshape((10, 3, 5, 5))
filename, created_at = image_report.report(img, func_dir, 'test')
assert filename.startswith('test_')
path = os.path.join(func_dir, filename)
assert os.path.exists(path)
assert created_at is not None
@unittest.skipUnless(_image_report_available, 'Image report is not available')
def test_report_bchw_row2(func_dir):
img = np.zeros(10*3*5*5, dtype=np.float32).reshape((10, 3, 5, 5))
filename, created_at = image_report.report(img, func_dir, 'test', row=2)
assert filename.startswith('test_')
path = os.path.join(func_dir, filename)
assert os.path.exists(path)
assert created_at is not None
@unittest.skipUnless(_image_report_available, 'Image report is not available')
def test_report_bhwc_row0(func_dir):
img = np.zeros(10*5*5*3, dtype=np.float32).reshape((10, 5, 5, 3))
filename, created_at = image_report.report(
img, func_dir, 'test', ch_axis=-1)
assert filename.startswith('test_')
path = os.path.join(func_dir, filename)
assert os.path.exists(path)
assert created_at is not None
@unittest.skipUnless(_image_report_available, 'Image report is not available')
def test_report_bhwc_row2(func_dir):
img = np.zeros(10*5*5*3, dtype=np.float32).reshape((10, 5, 5, 3))
filename, created_at = image_report.report(
img, func_dir, 'test', ch_axis=-1, row=2)
assert filename.startswith('test_')
path = os.path.join(func_dir, filename)
assert os.path.exists(path)
assert created_at is not None
@unittest.skipUnless(_image_report_available, 'Image report is not available')
def test_report_chw_chainer_variable(func_dir):
img = np.zeros(3*5*5, dtype=np.float32).reshape((3, 5, 5))
img = chainer.Variable(img)
filename, created_at = image_report.report(
img, func_dir, 'test', ch_axis=0, batched=False)
assert filename.startswith('test_')
path = os.path.join(func_dir, filename)
assert os.path.exists(path)
assert created_at is not None
@unittest.skipUnless(_image_report_available, 'Image report is not available')
def test_report_hwc_hsv(func_dir):
img = np.zeros(5*5*3, dtype=np.float32).reshape((5, 5, 3))
filename, created_at = image_report.report(
img, func_dir, 'test', ch_axis=-1, mode='HSV', batched=False)
assert filename.startswith('test_')
path = os.path.join(func_dir, filename)
assert os.path.exists(path)
assert created_at is not None
@unittest.skipUnless(_image_report_available, 'Image report is not available')
def test_report_bhw_uint8(func_dir):
img = np.zeros(8*5*10, dtype=np.uint8).reshape((8, 5, 10))
filename, created_at = image_report.report(img, func_dir, 'test')
assert filename.startswith('test_')
path = os.path.join(func_dir, filename)
assert os.path.exists(path)
assert created_at is not None
@unittest.skipUnless(_image_report_available, 'Image report is not available')
def test_report_hw(func_dir):
img = np.zeros(5*10, dtype=np.float32).reshape((5, 10))
filename, created_at = image_report.report(
img, func_dir, 'test', batched=False)
assert filename.startswith('test_')
path = os.path.join(func_dir, filename)
assert os.path.exists(path)
assert created_at is not None
| 34.644737
| 78
| 0.707938
|
4a1408a343d120bf40fab6c48ffc2e7cb606f534
| 1,216
|
py
|
Python
|
apps/identifyabill/support_functions.py
|
joelmpiper/bill_taxonomy
|
9284dfae905ca8efa558b4fd93469d03cf4b8074
|
[
"MIT"
] | null | null | null |
apps/identifyabill/support_functions.py
|
joelmpiper/bill_taxonomy
|
9284dfae905ca8efa558b4fd93469d03cf4b8074
|
[
"MIT"
] | null | null | null |
apps/identifyabill/support_functions.py
|
joelmpiper/bill_taxonomy
|
9284dfae905ca8efa558b4fd93469d03cf4b8074
|
[
"MIT"
] | null | null | null |
def formatted_query(query_results, col):
bills = []
for i in range(0, query_results.shape[0]):
bills.append(dict(bill_num=query_results.iloc[i]['bill_num'],
bill_name=query_results.iloc[i]['bill_name'],
score=query_results.iloc[i][col]))
seen = set()
uniq_bills = dict()
for bill in bills:
if bill['bill_name'] not in seen:
uniq_bills[bill['bill_name']] = dict(bill_num=[bill['bill_num']],
bill_name=bill['bill_name'],
score=bill['score'])
seen.add(bill['bill_name'])
else:
b1 = uniq_bills[bill['bill_name']]
b1['score'] = (b1['score'] * len(b1['bill_num']) +
bill['score']) / (len(b1['bill_num']) + 1)
b1['bill_num'].append(bill['bill_num'])
sorted_bills = sorted(uniq_bills.values(), key=lambda a: -a['score'])
for bill in sorted_bills:
bill['score'] = "{0:2.2g}".format(bill['score'])
bill['bill_num'] = ", ".join(bill['bill_num'])
return sorted_bills
def create_histogram(query_frame):
return 0
| 39.225806
| 77
| 0.519737
|
4a140a5d7451028abe313a9d08b5ffcf432f2a66
| 35,363
|
py
|
Python
|
post_optimization_studies/mad_analyses/pre_select_two_signal/Output/Histos/MadAnalysis5job_0/selection_8.py
|
sheride/axion_pheno
|
7d3fc08f5ae5b17a3500eba19a2e43f87f076ce5
|
[
"MIT"
] | null | null | null |
post_optimization_studies/mad_analyses/pre_select_two_signal/Output/Histos/MadAnalysis5job_0/selection_8.py
|
sheride/axion_pheno
|
7d3fc08f5ae5b17a3500eba19a2e43f87f076ce5
|
[
"MIT"
] | null | null | null |
post_optimization_studies/mad_analyses/pre_select_two_signal/Output/Histos/MadAnalysis5job_0/selection_8.py
|
sheride/axion_pheno
|
7d3fc08f5ae5b17a3500eba19a2e43f87f076ce5
|
[
"MIT"
] | null | null | null |
def selection_8():
# Library import
import numpy
import matplotlib
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
# Library version
matplotlib_version = matplotlib.__version__
numpy_version = numpy.__version__
# Histo binning
xBinning = numpy.linspace(2.4,8.0,101,endpoint=True)
# Creating data sequence: middle of each bin
xData = numpy.array([2.428,2.484,2.54,2.596,2.652,2.708,2.764,2.82,2.876,2.932,2.988,3.044,3.1,3.156,3.212,3.268,3.324,3.38,3.436,3.492,3.548,3.604,3.66,3.716,3.772,3.828,3.884,3.94,3.996,4.052,4.108,4.164,4.22,4.276,4.332,4.388,4.444,4.5,4.556,4.612,4.668,4.724,4.78,4.836,4.892,4.948,5.004,5.06,5.116,5.172,5.228,5.284,5.34,5.396,5.452,5.508,5.564,5.62,5.676,5.732,5.788,5.844,5.9,5.956,6.012,6.068,6.124,6.18,6.236,6.292,6.348,6.404,6.46,6.516,6.572,6.628,6.684,6.74,6.796,6.852,6.908,6.964,7.02,7.076,7.132,7.188,7.244,7.3,7.356,7.412,7.468,7.524,7.58,7.636,7.692,7.748,7.804,7.86,7.916,7.972])
# Creating weights for histo: y9_sdETA_0
y9_sdETA_0_weights = numpy.array([100.116712404,94.7534770962,90.4546808574,85.5663651344,82.2870080036,78.2174915642,74.61057472,71.3803375463,67.0242613576,62.9547449182,59.7531477194,57.4809497075,53.6325130746,51.646874812,48.7687173302,46.8322390245,43.7739617003,42.0708031905,39.9091290818,36.7894398114,34.8897814735,33.73934648,32.3432637015,30.7957010555,28.7077148824,27.0782723081,26.1652891069,25.1499579952,23.7620632096,22.5870602376,21.4857532012,20.2657142687,19.4714629636,18.1408841278,18.3537799415,16.8103092919,16.4868775749,14.9270309397,14.1491556203,14.1368716311,13.3876562866,12.9946206305,12.4828610782,11.7131737516,11.4429659881,10.5095148048,10.3785029194,10.325278966,9.47780370747,9.13799600478,8.6385164418,8.1595088609,7.90567708299,7.46351346986,7.60271334806,6.86168599642,6.64879418269,6.2844185015,5.99783475224,5.88319885254,5.81359891344,5.26089939702,5.13807550448,5.0520995797,4.74504384836,4.46664409194,4.29469224239,4.1595883606,3.95897893612,3.80749786866,3.54547649791,3.58641766209,3.39399543045,3.18929120955,3.11559767403,2.98049299224,2.82082393194,2.61611971105,2.35000434388,2.37866271881,2.23127564776,2.39503910448,2.00200704836,1.8054908203,1.85461997731,1.74817367045,1.68676252418,1.58850461015,1.38380038925,1.28554247522,1.34695362149,1.19956655045,1.15043739343,1.18319016478,1.01123871522,1.01533271164,0.970297951045,0.888416022687,0.84338126209,0.708276580299])
# Creating weights for histo: y9_sdETA_1
y9_sdETA_1_weights = numpy.array([0.520152518205,0.530569320573,0.543069723413,0.526402519626,0.516680117416,0.543764123571,0.526402519626,0.542375323256,0.572236930042,0.526402519626,0.528485720099,0.53473612152,0.53126372073,0.501401713944,0.53473612152,0.534041721362,0.49654051284,0.545152923887,0.529874920415,0.53473612152,0.477790108578,0.54098612294,0.50904091568,0.537513722151,0.459734104475,0.492373711893,0.485429310315,0.459039704317,0.475706908105,0.474317707789,0.467373306211,0.471540107158,0.429177697531,0.440289300056,0.425010896584,0.427094497058,0.430566497847,0.421538495795,0.41042729327,0.412510493744,0.380565406484,0.404871692008,0.353481360329,0.3819543268,0.357648121276,0.340286557331,0.345842278593,0.316674871965,0.319452712596,0.311119150702,0.317369312123,0.306257909598,0.302091148651,0.293757586757,0.25139537713,0.264590180128,0.244450775552,0.233339373027,0.256256618235,0.218755649713,0.238895054289,0.209033167503,0.218061209555,0.194449484189,0.180560241033,0.190282683242,0.170837758823,0.173615599454,0.167365438034,0.150698354246,0.124308788249,0.126392148723,0.129169989354,0.142364792353,0.133336790301,0.115975226356,0.120141987302,0.13055894967,0.111113985251,0.111113985251,0.0958358217788,0.0895856603585,0.0736130167287,0.0756964172021,0.0715296162552,0.072224096413,0.0729185365708,0.0708351760974,0.0680572954661,0.056945932941,0.0548625324676,0.0513902116785,0.0631960943614,0.0430566497847,0.0513902116785,0.0527791319941,0.0291674186283,0.0409732893112,0.0423622096269,0.0319452712596])
# Creating weights for histo: y9_sdETA_2
y9_sdETA_2_weights = numpy.array([34.6386142067,33.6689260013,31.8709891579,30.2175762342,29.7910183719,27.8848888715,28.1297904213,26.4768501645,25.5620353841,26.7919387465,26.3527991317,26.6938603604,26.341439103,26.5351964897,26.5737468834,25.6972782083,25.9897548854,25.9879483365,26.0623332917,27.0704116648,25.4420580992,26.5007679119,26.5002471771,26.5374877226,26.4642644064,27.3861892189,27.6910834169,27.0955551416,26.8766182271,26.3424525329,26.9501099221,25.7958012217,25.2987919543,26.6463813688,26.756885287,25.1394030606,25.9423560068,26.3783551906,25.8096247264,26.0622091165,24.383456439,25.2245151516,24.5924633425,24.0190662938,23.4385752356,23.5458746323,23.983636303,24.0938558199,22.8937665243,22.4295675513,22.782573634,21.3214480228,21.9559673134,21.568797026,21.0308099398,20.3623788004,20.8747096863,19.3561310103,20.0836174752,19.0040822784,18.373128018,18.2256759669,17.9341726631,17.4110705793,16.0746689505,16.001782109,15.6365547825,14.7381070956,14.8730374789,14.1075493946,13.9723866835,13.2921588993,12.9402984331,12.7218582195,11.6536870571,10.7298275147,11.0826172921,10.1100690512,9.62433969939,9.36848269164,9.1131824695,8.3840256477,8.116512193,7.87413019814,7.31520556966,7.01202178502,6.29412485064,6.59739275397,5.55280286411,5.2000771771,4.62972442382,4.38674158119,4.48336992133,3.82714119924,3.66987970726,3.2442046906,3.12274251054,2.70948501469,2.79483063519,2.43008078239])
# Creating weights for histo: y9_sdETA_3
y9_sdETA_3_weights = numpy.array([45.5722579985,46.4548346101,45.8016312262,46.607144053,47.2483643283,47.6997556343,50.0300157325,50.5413225817,50.9919287875,50.9860611964,51.1843196627,51.6175297005,52.8818303038,52.3996548049,53.0127354354,51.5155079928,52.7429502068,52.4291580447,52.890962259,52.8405092395,54.0570840136,51.9563624274,52.6895220707,52.8740619437,52.0780942828,53.1723173856,52.0363186868,51.7074030152,52.0869369905,52.3178804187,51.1126276162,52.1169360832,50.0787745883,49.1959087292,49.7586189824,49.8892348665,49.3471851455,47.9202448332,46.7666846822,47.4613744146,46.4664458292,46.0863746801,43.7841715846,43.4022823086,43.4019930612,42.8505634586,41.6868796468,42.1269489812,40.7320159653,39.3071664989,38.9766186453,37.4896181661,36.3848912488,36.0844292621,34.7285942372,35.412346284,33.9251102747,32.7110891424,31.284843024,31.5379386748,30.1500839572,29.66836299,27.4287819674,26.313766105,25.1915521133,24.0560947205,23.5746671328,21.8173277228,20.6531521903,21.1848551169,18.965802399,18.7344168354,16.4149291387,16.7267048328,16.2245836562,14.5279448896,13.5351071501,11.8370675994,11.2152062427,10.2007658508,10.4321472823,9.35701037904,8.1119158064,7.6801520059,7.12856951535,6.32536653606,5.4812185142,5.53180789318,4.48779401557,4.08675821129,3.84530270437,3.2331394015,3.24294323721,2.82125392538,2.55031253342,2.26925781098,1.92754748509,1.85761943482,1.5964715483,1.52624433353])
# Creating weights for histo: y9_sdETA_4
y9_sdETA_4_weights = numpy.array([33.1165506516,33.9267195404,34.3058949769,34.0209791723,35.5149143043,34.2129191156,35.5594643915,35.9233239582,36.230490431,35.454341461,36.1207118011,36.2748658279,36.5667773694,36.8457780795,35.5377093562,35.9446036733,35.9173276123,36.1698363364,35.8841770824,35.8579613508,35.4669516626,35.1038152325,34.7684789343,34.4278775519,34.3550642005,33.7829128665,33.2506599829,33.1073245666,32.8640906782,31.8192680382,31.945837249,31.6654715171,31.1039153525,30.6044554932,29.8136609765,28.6455923038,28.3327016769,28.261488976,27.3035971006,26.3793066999,25.2784937898,25.0642909905,23.8052289882,23.6799719858,22.9587163933,22.6842048175,21.633101452,20.8353883873,20.0491358182,19.5973501585,18.7230293066,18.1244062995,17.1502194761,17.051909467,15.955606004,14.8673017314,14.0534643463,13.4972463921,13.0091373392,11.9969533459,11.5957466197,10.7973835446,10.054604483,9.35616831781,8.86656017843,8.27891828438,7.27191812397,7.11190181594,6.23181212182,5.87994118431,5.55568756329,5.10998762582,4.51036116519,3.88273916326,3.3061657274,3.19035337603,2.82726122788,2.48079147046,2.04623636741,1.62804527595,1.79837399883,1.45792049383,1.02869872604,1.20996387375,0.891175621344,0.819485975312,0.709429465948,0.709461153954,0.489455386939,0.478484430308,0.335496119495,0.373998184989,0.318971474082,0.291486597208,0.176064333316,0.15945380528,0.16515159324,0.104464673098,0.0990225829762,0.0495038976199])
# Creating weights for histo: y9_sdETA_5
y9_sdETA_5_weights = numpy.array([7.28097384333,7.44872615569,7.56220177068,7.48805180947,7.52267570076,7.56338823419,7.61654260121,7.82278683624,7.64313381385,7.68649584857,7.57519274447,7.57989049865,7.43882159035,7.52072765594,7.30287932676,7.22860109923,7.14472293889,7.06472082658,7.07376761086,7.01757894357,6.74437968655,6.57641493309,6.37309677361,6.32276827408,6.22796342612,6.1114414859,5.99611402583,5.81467729985,5.67630599272,5.55702632649,5.29630898628,5.15051426555,4.99362851597,4.80690080405,4.61352729314,4.40829716375,4.33433158539,4.10144002121,3.77865300268,3.78676264112,3.47774140516,3.26344645404,3.00101275006,2.93295503849,2.83915828369,2.51746954557,2.44641040198,2.29345320946,2.00923988338,1.91063434386,1.80488757666,1.59481419098,1.52761506213,1.30461243082,1.17825486841,1.10824911202,0.940467538908,0.84573003077,0.784561824221,0.736215039419,0.623749521745,0.580284473131,0.478573686032,0.393713165969,0.33262644862,0.284186189733,0.245699477884,0.265511334214,0.188467724787,0.19241600279,0.135183888943,0.100656758568,0.0858657272236,0.0828953597037,0.063165673989,0.0660963991976,0.0364981108188,0.038498143585,0.0325704917104,0.0207230001823,0.019736223289,0.0148049762989,0.0177677841222,0.00295924682943,0.00690557839133,0.00295972822898,0.00296205866778,0.0029616277731,0.000985643740472,0.0019738608241,0.0,0.000987667542592,0.000983916955063,0.000986323551998,0.0,0.0,0.0,0.0,0.0,0.0])
# Creating weights for histo: y9_sdETA_6
y9_sdETA_6_weights = numpy.array([2.0660214254,2.1673484884,2.12874093892,2.11115138818,2.12243097816,2.08091230862,2.08294801215,2.05141741313,2.02571745631,2.01880054543,1.99944775856,2.02032972367,1.9279040624,1.91222418402,1.89890168874,1.84971232186,1.79082455612,1.77312217748,1.71388472524,1.64128636821,1.63220852283,1.51322749316,1.53546139266,1.44315576108,1.39651302412,1.3327512528,1.26011888736,1.2482903616,1.14793833977,1.1156959647,1.05770642112,0.994439572221,0.946819793584,0.869685514606,0.847479622033,0.778871451968,0.718179639995,0.670544657598,0.609014238197,0.574199626888,0.508417755901,0.48200722332,0.443657337568,0.380866966522,0.36602573558,0.314843435762,0.281066681163,0.248794618747,0.223098342835,0.182760925472,0.181737792403,0.143427316399,0.134364274678,0.102592415986,0.0960287124695,0.0879757605914,0.0640252764971,0.053192236971,0.0491636805173,0.0363011389988,0.0310109984954,0.0234432786382,0.0183986348578,0.0178974388882,0.0103344802087,0.0115946038972,0.00882122990986,0.00629995827943,0.00428640423171,0.00630064244866,0.00252250593574,0.00251949919203,0.00226949655448,0.00176421877524,0.00126035094475,0.000504566403278,0.000756567535148,0.000756113422825,0.000756363884776,0.0,0.000252073989891,0.000252297285123,0.0,0.000503939848301,0.000252040301558,0.000252122441876,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0])
# Creating weights for histo: y9_sdETA_7
y9_sdETA_7_weights = numpy.array([1.05537189438,1.0180994003,0.992929945329,1.00758492819,0.998029734213,0.988522024305,0.964052834376,0.935701147128,0.909222031583,0.85316304088,0.884123352864,0.82113478722,0.820481906279,0.78623039858,0.742966514775,0.741479113838,0.68548860016,0.683937220689,0.638736986582,0.611903190041,0.575134526916,0.554796950719,0.520824949238,0.483022732897,0.467329198499,0.417785021902,0.379573342947,0.36990328751,0.320646114615,0.299188514216,0.290611891903,0.265389754608,0.232752105416,0.218168198708,0.198955145204,0.169517622284,0.15228360476,0.139117722267,0.122583669886,0.103964217212,0.0847204240218,0.0818871246698,0.0681502537591,0.0624184969175,0.047247707125,0.0423740623637,0.0386502717955,0.0317705101404,0.0237669955345,0.0200326884948,0.0163084180876,0.0143133974706,0.0100205678303,0.00829436501772,0.00629712114669,0.00571425271471,0.00372153332559,0.0031502640018,0.00286251354981,0.00257804999497,0.00143118979738,0.00171930112828,0.00228822223997,0.000572739030669,0.000287115465048,0.000575022564482,0.000284455857418,0.000284761754781,0.0,0.0,0.000569994851415,0.0,0.0,0.0,0.0,0.000286302037975,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0])
# Creating weights for histo: y9_sdETA_8
y9_sdETA_8_weights = numpy.array([0.184011618051,0.180702107982,0.174907866972,0.166838107288,0.163867812781,0.155521115064,0.146022702144,0.141226326532,0.135312475785,0.128028812752,0.119006617577,0.112724256521,0.106632080815,0.102592926203,0.0923850272131,0.0860733295016,0.078524085913,0.0731351098506,0.0668425647845,0.0593794452359,0.0559473917642,0.0493924931748,0.042366238343,0.0375926070213,0.0337969089352,0.0291334828571,0.0266030160693,0.0215072976018,0.0195004230678,0.0160858207155,0.0139919755694,0.0120272117096,0.0107125146183,0.00907084792343,0.00671477498255,0.00647503163907,0.00490135893083,0.00393189141596,0.00289202430868,0.0024588073955,0.00200826718697,0.00142544003389,0.0014687237552,0.000777449881496,0.000669479671984,0.000432300352231,0.000475325491867,0.000322282192675,0.000345603241605,0.00017288795439,0.000259163036265,0.000129550421277,0.000172811762903,0.000108005036314,6.47898370572e-05,4.31969267114e-05,8.64389480539e-05,2.15977676578e-05,0.000108021883937,4.31577832304e-05,4.32119722249e-05,2.15940125659e-05,4.31577832304e-05,0.0,4.31895925474e-05,2.15977676578e-05,0.0,2.16179764229e-05,0.0,0.0,0.0,2.15983460091e-05,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0])
# Creating weights for histo: y9_sdETA_9
y9_sdETA_9_weights = numpy.array([0.0542632171968,0.0497148859268,0.0462666321281,0.0443328354908,0.0414030129319,0.0363670040627,0.0359831107191,0.0341607013621,0.0297427055353,0.0255666700265,0.024408218552,0.0200932050995,0.0177851822456,0.0181145327975,0.0145701749858,0.0133790899042,0.0106914801074,0.0109661170965,0.00867702161598,0.00804722091549,0.0061131168903,0.00586145974604,0.00442380501275,0.00347931928205,0.00223858288586,0.0025205140267,0.00198465225904,0.00164620332206,0.0011625740364,0.000790674909618,0.000879071504807,0.000590967806864,0.000511183418991,0.000340443097473,0.000284026872996,0.000170225335399,0.000198748705864,0.00019868128842,0.000139776974143,0.00011371212781,0.0,0.0,8.51037282057e-05,0.0,8.51542170409e-05,0.0,0.0,5.68217983203e-05,0.0,2.82185960966e-05,0.0,2.81447487502e-05,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0])
# Creating weights for histo: y9_sdETA_10
y9_sdETA_10_weights = numpy.array([16058.9919082,16043.6648799,15196.2717091,15152.1940059,14458.917065,13989.6239163,13700.2483924,13108.5312785,12466.872508,11940.3779363,11511.0019678,11257.9214313,10916.7700831,10514.9066302,9709.9108035,9224.46006008,8807.85789693,8536.85096656,8132.26510127,7942.69598611,7397.89049906,7342.8000992,6584.03914144,6704.16751318,5917.04424723,5672.20785231,5207.98269745,5192.27114766,4647.25032855,4493.89929647,4360.85869169,3785.15151625,3807.94979787,3474.72882175,3398.86003188,3318.23086545,2856.79195361,2594.04685287,2486.40697144,2361.69894826,2043.47200475,2147.94381328,1949.75334086,1905.315341,1681.47997406,1605.61464488,1504.01598124,1295.39805392,1219.82995989,1112.92874433,1039.98539427,925.400284937,938.449407639,821.038449566,735.113268079,625.523093442,568.242843463,594.296099847,492.677056563,539.576378749,427.492197456,404.018696028,343.987963533,351.929755893,310.150530164,299.785329301,224.169823767,185.064445413,190.294092054,169.469290293,148.526363518,122.480066973,114.605642785,96.4503433066,106.883989002,78.1989903495,62.5591922796,44.2879211071,62.5523862485,44.3399468703,57.3379282084,26.058785804,33.8608315043,33.887667262,20.8514875549,20.8477500056,33.8740898067,23.4353684275,23.4533140474,13.014338884,7.8173765737,7.82582451175,15.634245579,13.0169882374,7.8223215206,0.0,7.81907615887,0.0,2.60389560286,0.0])
# Creating weights for histo: y9_sdETA_11
y9_sdETA_11_weights = numpy.array([6323.47552141,6047.68663025,5746.41502715,5675.71137103,5463.17716569,5150.27039411,4966.07767392,4528.9008389,4411.86427968,4324.71979189,4146.70633116,3847.56822704,3791.61399383,3593.61874721,3324.11223142,3151.15221985,3111.29599637,2757.21904295,2701.69843523,2624.59697547,2416.12661437,2305.52979772,2171.69111639,2090.66124823,1896.8914454,1802.12061255,1765.23974482,1622.97518536,1451.33374928,1377.65896977,1286.09704251,1154.3610793,1139.53547374,1074.31196657,995.345186158,960.456994934,842.579733234,784.64244253,769.907255772,634.055505611,629.847760816,582.406361685,521.337117559,517.258652414,456.013187812,422.362002761,365.446559956,354.925158737,294.909466805,285.415069281,270.671533212,242.221045313,238.030153045,201.179527516,177.992029554,176.969566037,153.787339299,145.332065437,115.847802053,101.126659066,75.8386746021,91.6451895067,87.408780028,69.5285195017,50.553614321,51.610629363,41.083225875,34.7574071599,43.2015268045,34.755391013,28.4438276879,28.4394298712,23.1797450306,15.7899240163,14.7429935565,9.48106940763,11.5858690785,5.2670760968,6.32419117662,5.26835735047,6.31763485152,8.43165339267,1.05314704104,1.05386615909,4.21069975784,4.21387788257,3.15805521449,1.05386615909,1.0532270713,0.0,2.10401552825,0.0,0.0,0.0,0.0,0.0,3.15992553704,0.0,1.05386615909,2.10794432142])
# Creating weights for histo: y9_sdETA_12
y9_sdETA_12_weights = numpy.array([1284.33575125,1228.36641599,1165.2784403,1113.91679845,1059.7429728,1004.92863473,967.179898969,917.915003523,848.320466783,831.282913308,786.361360619,721.879012074,710.840451031,672.347460589,604.171503241,577.470934288,533.90379448,507.375360839,472.86799446,444.789958789,410.908504065,393.871719052,358.645879865,339.723137352,307.037254577,290.906562176,270.657993538,244.611538767,219.03392218,210.983176742,201.766137727,185.178864098,161.46860825,166.54644623,150.180835237,124.37648417,125.996669361,108.026665055,99.7430374478,94.6661984677,79.6938460705,70.2448854624,65.1882954351,56.6571080494,54.1231456343,52.747369531,41.6900964576,39.1650866157,35.4756006069,31.3215260047,26.7189889677,31.101396449,22.340393054,19.5769988602,14.7419177766,12.6705382711,13.1275921024,12.6682751529,9.21212086343,7.59995840754,6.21989044843,3.91664314228,5.06701036128,4.14385406807,5.98595164474,3.68259483287,3.91766519569,1.38190189179,1.15204323289,1.38260195995,1.15258077151,1.84227893352,0.690229552932,1.15101772141,0.921170205207,0.691319615158,1.1527751922,0.23081493202,0.230104989132,0.460502300908,0.691215488664,0.46056723588,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.230090580484,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0])
# Creating weights for histo: y9_sdETA_13
y9_sdETA_13_weights = numpy.array([140.36643055,133.470725328,126.076891367,119.679891954,111.925426329,107.582073051,102.068994389,96.4201498702,87.0297999243,80.6122182691,76.6483476811,70.030560438,69.0052185226,61.226785165,58.3758945265,56.2414198098,50.8128629918,47.0461202732,44.3583487147,38.5730458521,36.803396191,33.0901326783,32.1499512336,28.9379942031,25.4193194421,24.4509153188,20.8509079994,19.327822061,17.9444414592,16.1982978732,12.9600494325,14.592490556,10.4675167936,10.8813429748,11.492312414,7.81038776986,7.31100486175,7.11687776791,6.39747645915,5.34517757692,4.70768589866,3.73824727723,3.84832149436,3.04600106343,2.6024283483,2.1320067712,2.13150625723,1.68922310614,1.52315057049,1.21850260563,1.21820868351,1.10767050032,0.886064381644,0.581874996833,0.498303398349,0.30452312491,0.63690152574,0.304552055464,0.360081060386,0.331981644445,0.276731326933,0.221497859929,0.110815985756,0.0831133646725,0.110666177809,0.0277207449983,0.0,0.0276982007104,0.0,0.0831080556081,0.0277272966916,0.0,0.0,0.0,0.0,0.0276899024119,0.0276605255889,0.0,0.0,0.0,0.0,0.0276409743822,0.0,0.0276859436965,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0])
# Creating weights for histo: y9_sdETA_14
y9_sdETA_14_weights = numpy.array([29.4910646312,28.3801456294,25.6080638352,24.6615413543,22.5336468204,21.9267675152,21.676801789,19.205131562,17.9755832151,16.605935851,15.3450932419,13.8135312947,12.6326629113,11.6940595718,11.2705523852,10.2547783781,9.24473892161,8.40810135446,7.97550991144,6.80529140787,5.79687340319,5.58530064547,4.88013174855,4.45700383122,4.74899319426,3.61933404495,3.23732376616,3.33752002007,2.67202935915,2.35927788853,1.90570466519,2.12746186353,1.60327902676,1.3512870927,1.15910864603,1.06871574303,0.897111286416,0.816258369776,0.786392587151,0.56478570079,0.484107551409,0.494034331693,0.352755878148,0.363062837903,0.231938908226,0.121035428995,0.171364697785,0.131082847235,0.131167864224,0.0605028168024,0.0504043276863,0.0705390390144,0.131018887273,0.0705957170073,0.0402651141579,0.0402994668472,0.0201979006151,0.0403418175658,0.0201574978231,0.0100786032722,0.0100441049436,0.0,0.0,0.0100703200323,0.0101024274478,0.0,0.0201061660093,0.0100912678301,0.0,0.0100796470211,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0])
# Creating weights for histo: y9_sdETA_15
y9_sdETA_15_weights = numpy.array([12.1686064206,11.2349466497,10.60912236,9.75481166567,9.37596830233,8.42244048289,7.57114616147,7.05584654814,6.55232387059,6.11920503093,5.4176867085,4.92310926965,4.47028012552,4.24648756644,3.76000739614,3.51952398348,3.07539461301,2.76115357124,2.54907908249,1.99163642972,1.90137993302,1.63800094143,1.40342061683,1.17110949731,1.13185894408,0.893974844658,0.854376485166,0.837410541455,0.540413611464,0.506555594402,0.489476151963,0.449909341269,0.339517246899,0.322514214113,0.243286292595,0.195236163779,0.206532903753,0.127322603394,0.135799611756,0.101849256746,0.0735913438558,0.0848556501255,0.0678982861495,0.0254615196954,0.0396075163394,0.0509435384153,0.0226314847082,0.0141470239036,0.011328234909,0.0113161732645,0.0311458783976,0.0141362857698,0.0056596621485,0.0084882312708,0.0,0.0,0.00283439102991,0.00283100684412,0.0,0.00566819956127,0.0,0.00565636106686,0.0,0.0,0.00282583784274,0.0,0.0,0.0,0.0,0.0,0.00282706132055,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0])
# Creating weights for histo: y9_sdETA_16
y9_sdETA_16_weights = numpy.array([1.67400086491,1.62839888973,1.51879557279,1.31325700603,1.29446178896,1.08140619885,1.00857632793,0.889717790452,0.798517621355,0.668683036332,0.645957319417,0.549815991028,0.495241798875,0.451064885422,0.383798072612,0.298596397162,0.266513011252,0.269687259436,0.223973146317,0.213297236145,0.149363366204,0.120296597273,0.133899319538,0.103677848851,0.0594593770059,0.0440993267593,0.0746093074549,0.0304758902887,0.0334421338166,0.0335694795065,0.0182613152138,0.0183258093007,0.0152444737231,0.0152349969445,0.00153120637154,0.00608146981473,0.00457343901168,0.00151695220936,0.00610721427558,0.00304353953755,0.0,0.00303214258869,0.00459134326977,0.0,0.00151695220936,0.0,0.00152062711978,0.00153788856395,0.0,0.0,0.0,0.0,0.0,0.00152228378358,0.0,0.0,0.0,0.00152612057911,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0])
# Creating weights for histo: y9_sdETA_17
y9_sdETA_17_weights = numpy.array([0.455018647635,0.413863210226,0.367625637427,0.317963130556,0.278961323118,0.259104716137,0.21612993896,0.180550754833,0.167028292483,0.143724028939,0.124944814634,0.098215578504,0.0857534862181,0.0660897893325,0.0594084142329,0.0440537873866,0.0373746036594,0.0310600966526,0.0272701585016,0.0180518750783,0.015349007844,0.0115557730063,0.0101098985556,0.0077657077635,0.0054178582653,0.00342913465248,0.00289026616269,0.00180458825027,0.00126345136267,0.0014449767198,0.00108404574435,0.000360835348423,0.000181037285724,0.000541730368248,0.000361456904827,0.00036062418331,0.000180614031193,0.000180410530106,0.000360690232584,0.0,0.000361805829854,0.000361022635665,0.0,0.0,0.0,0.000180821999753,0.0,0.000180183921402,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0])
# Creating a new Canvas
fig = plt.figure(figsize=(12,6),dpi=80)
frame = gridspec.GridSpec(1,1,right=0.7)
pad = fig.add_subplot(frame[0])
# Creating a new Stack
pad.hist(x=xData, bins=xBinning, weights=y9_sdETA_0_weights+y9_sdETA_1_weights+y9_sdETA_2_weights+y9_sdETA_3_weights+y9_sdETA_4_weights+y9_sdETA_5_weights+y9_sdETA_6_weights+y9_sdETA_7_weights+y9_sdETA_8_weights+y9_sdETA_9_weights+y9_sdETA_10_weights+y9_sdETA_11_weights+y9_sdETA_12_weights+y9_sdETA_13_weights+y9_sdETA_14_weights+y9_sdETA_15_weights+y9_sdETA_16_weights+y9_sdETA_17_weights,\
label="$bg\_dip\_1600\_inf$", histtype="step", rwidth=1.0,\
color=None, edgecolor="#f2f2f2", linewidth=1, linestyle="solid",\
bottom=None, cumulative=False, normed=False, align="mid", orientation="vertical")
pad.hist(x=xData, bins=xBinning, weights=y9_sdETA_0_weights+y9_sdETA_1_weights+y9_sdETA_2_weights+y9_sdETA_3_weights+y9_sdETA_4_weights+y9_sdETA_5_weights+y9_sdETA_6_weights+y9_sdETA_7_weights+y9_sdETA_8_weights+y9_sdETA_9_weights+y9_sdETA_10_weights+y9_sdETA_11_weights+y9_sdETA_12_weights+y9_sdETA_13_weights+y9_sdETA_14_weights+y9_sdETA_15_weights+y9_sdETA_16_weights,\
label="$bg\_dip\_1200\_1600$", histtype="step", rwidth=1.0,\
color=None, edgecolor="#ccc6aa", linewidth=1, linestyle="solid",\
bottom=None, cumulative=False, normed=False, align="mid", orientation="vertical")
pad.hist(x=xData, bins=xBinning, weights=y9_sdETA_0_weights+y9_sdETA_1_weights+y9_sdETA_2_weights+y9_sdETA_3_weights+y9_sdETA_4_weights+y9_sdETA_5_weights+y9_sdETA_6_weights+y9_sdETA_7_weights+y9_sdETA_8_weights+y9_sdETA_9_weights+y9_sdETA_10_weights+y9_sdETA_11_weights+y9_sdETA_12_weights+y9_sdETA_13_weights+y9_sdETA_14_weights+y9_sdETA_15_weights,\
label="$bg\_dip\_800\_1200$", histtype="step", rwidth=1.0,\
color=None, edgecolor="#ccc6aa", linewidth=1, linestyle="solid",\
bottom=None, cumulative=False, normed=False, align="mid", orientation="vertical")
pad.hist(x=xData, bins=xBinning, weights=y9_sdETA_0_weights+y9_sdETA_1_weights+y9_sdETA_2_weights+y9_sdETA_3_weights+y9_sdETA_4_weights+y9_sdETA_5_weights+y9_sdETA_6_weights+y9_sdETA_7_weights+y9_sdETA_8_weights+y9_sdETA_9_weights+y9_sdETA_10_weights+y9_sdETA_11_weights+y9_sdETA_12_weights+y9_sdETA_13_weights+y9_sdETA_14_weights,\
label="$bg\_dip\_600\_800$", histtype="step", rwidth=1.0,\
color=None, edgecolor="#c1bfa8", linewidth=1, linestyle="solid",\
bottom=None, cumulative=False, normed=False, align="mid", orientation="vertical")
pad.hist(x=xData, bins=xBinning, weights=y9_sdETA_0_weights+y9_sdETA_1_weights+y9_sdETA_2_weights+y9_sdETA_3_weights+y9_sdETA_4_weights+y9_sdETA_5_weights+y9_sdETA_6_weights+y9_sdETA_7_weights+y9_sdETA_8_weights+y9_sdETA_9_weights+y9_sdETA_10_weights+y9_sdETA_11_weights+y9_sdETA_12_weights+y9_sdETA_13_weights,\
label="$bg\_dip\_400\_600$", histtype="step", rwidth=1.0,\
color=None, edgecolor="#bab5a3", linewidth=1, linestyle="solid",\
bottom=None, cumulative=False, normed=False, align="mid", orientation="vertical")
pad.hist(x=xData, bins=xBinning, weights=y9_sdETA_0_weights+y9_sdETA_1_weights+y9_sdETA_2_weights+y9_sdETA_3_weights+y9_sdETA_4_weights+y9_sdETA_5_weights+y9_sdETA_6_weights+y9_sdETA_7_weights+y9_sdETA_8_weights+y9_sdETA_9_weights+y9_sdETA_10_weights+y9_sdETA_11_weights+y9_sdETA_12_weights,\
label="$bg\_dip\_200\_400$", histtype="step", rwidth=1.0,\
color=None, edgecolor="#b2a596", linewidth=1, linestyle="solid",\
bottom=None, cumulative=False, normed=False, align="mid", orientation="vertical")
pad.hist(x=xData, bins=xBinning, weights=y9_sdETA_0_weights+y9_sdETA_1_weights+y9_sdETA_2_weights+y9_sdETA_3_weights+y9_sdETA_4_weights+y9_sdETA_5_weights+y9_sdETA_6_weights+y9_sdETA_7_weights+y9_sdETA_8_weights+y9_sdETA_9_weights+y9_sdETA_10_weights+y9_sdETA_11_weights,\
label="$bg\_dip\_100\_200$", histtype="step", rwidth=1.0,\
color=None, edgecolor="#b7a39b", linewidth=1, linestyle="solid",\
bottom=None, cumulative=False, normed=False, align="mid", orientation="vertical")
pad.hist(x=xData, bins=xBinning, weights=y9_sdETA_0_weights+y9_sdETA_1_weights+y9_sdETA_2_weights+y9_sdETA_3_weights+y9_sdETA_4_weights+y9_sdETA_5_weights+y9_sdETA_6_weights+y9_sdETA_7_weights+y9_sdETA_8_weights+y9_sdETA_9_weights+y9_sdETA_10_weights,\
label="$bg\_dip\_0\_100$", histtype="step", rwidth=1.0,\
color=None, edgecolor="#ad998c", linewidth=1, linestyle="solid",\
bottom=None, cumulative=False, normed=False, align="mid", orientation="vertical")
pad.hist(x=xData, bins=xBinning, weights=y9_sdETA_0_weights+y9_sdETA_1_weights+y9_sdETA_2_weights+y9_sdETA_3_weights+y9_sdETA_4_weights+y9_sdETA_5_weights+y9_sdETA_6_weights+y9_sdETA_7_weights+y9_sdETA_8_weights+y9_sdETA_9_weights,\
label="$bg\_vbf\_1600\_inf$", histtype="step", rwidth=1.0,\
color=None, edgecolor="#9b8e82", linewidth=1, linestyle="solid",\
bottom=None, cumulative=False, normed=False, align="mid", orientation="vertical")
pad.hist(x=xData, bins=xBinning, weights=y9_sdETA_0_weights+y9_sdETA_1_weights+y9_sdETA_2_weights+y9_sdETA_3_weights+y9_sdETA_4_weights+y9_sdETA_5_weights+y9_sdETA_6_weights+y9_sdETA_7_weights+y9_sdETA_8_weights,\
label="$bg\_vbf\_1200\_1600$", histtype="step", rwidth=1.0,\
color=None, edgecolor="#876656", linewidth=1, linestyle="solid",\
bottom=None, cumulative=False, normed=False, align="mid", orientation="vertical")
pad.hist(x=xData, bins=xBinning, weights=y9_sdETA_0_weights+y9_sdETA_1_weights+y9_sdETA_2_weights+y9_sdETA_3_weights+y9_sdETA_4_weights+y9_sdETA_5_weights+y9_sdETA_6_weights+y9_sdETA_7_weights,\
label="$bg\_vbf\_800\_1200$", histtype="step", rwidth=1.0,\
color=None, edgecolor="#afcec6", linewidth=1, linestyle="solid",\
bottom=None, cumulative=False, normed=False, align="mid", orientation="vertical")
pad.hist(x=xData, bins=xBinning, weights=y9_sdETA_0_weights+y9_sdETA_1_weights+y9_sdETA_2_weights+y9_sdETA_3_weights+y9_sdETA_4_weights+y9_sdETA_5_weights+y9_sdETA_6_weights,\
label="$bg\_vbf\_600\_800$", histtype="step", rwidth=1.0,\
color=None, edgecolor="#84c1a3", linewidth=1, linestyle="solid",\
bottom=None, cumulative=False, normed=False, align="mid", orientation="vertical")
pad.hist(x=xData, bins=xBinning, weights=y9_sdETA_0_weights+y9_sdETA_1_weights+y9_sdETA_2_weights+y9_sdETA_3_weights+y9_sdETA_4_weights+y9_sdETA_5_weights,\
label="$bg\_vbf\_400\_600$", histtype="step", rwidth=1.0,\
color=None, edgecolor="#89a8a0", linewidth=1, linestyle="solid",\
bottom=None, cumulative=False, normed=False, align="mid", orientation="vertical")
pad.hist(x=xData, bins=xBinning, weights=y9_sdETA_0_weights+y9_sdETA_1_weights+y9_sdETA_2_weights+y9_sdETA_3_weights+y9_sdETA_4_weights,\
label="$bg\_vbf\_200\_400$", histtype="step", rwidth=1.0,\
color=None, edgecolor="#829e8c", linewidth=1, linestyle="solid",\
bottom=None, cumulative=False, normed=False, align="mid", orientation="vertical")
pad.hist(x=xData, bins=xBinning, weights=y9_sdETA_0_weights+y9_sdETA_1_weights+y9_sdETA_2_weights+y9_sdETA_3_weights,\
label="$bg\_vbf\_100\_200$", histtype="step", rwidth=1.0,\
color=None, edgecolor="#adbcc6", linewidth=1, linestyle="solid",\
bottom=None, cumulative=False, normed=False, align="mid", orientation="vertical")
pad.hist(x=xData, bins=xBinning, weights=y9_sdETA_0_weights+y9_sdETA_1_weights+y9_sdETA_2_weights,\
label="$bg\_vbf\_0\_100$", histtype="step", rwidth=1.0,\
color=None, edgecolor="#7a8e99", linewidth=1, linestyle="solid",\
bottom=None, cumulative=False, normed=False, align="mid", orientation="vertical")
pad.hist(x=xData, bins=xBinning, weights=y9_sdETA_0_weights+y9_sdETA_1_weights,\
label="$signal2$", histtype="step", rwidth=1.0,\
color=None, edgecolor="#758991", linewidth=1, linestyle="solid",\
bottom=None, cumulative=False, normed=False, align="mid", orientation="vertical")
pad.hist(x=xData, bins=xBinning, weights=y9_sdETA_0_weights,\
label="$signal1$", histtype="step", rwidth=1.0,\
color=None, edgecolor="#688296", linewidth=1, linestyle="solid",\
bottom=None, cumulative=False, normed=False, align="mid", orientation="vertical")
# Axis
plt.rc('text',usetex=False)
plt.xlabel(r"\Delta\eta ( j_{1} , j_{2} ) ",\
fontsize=16,color="black")
plt.ylabel(r"$\mathrm{Events}$ $(\mathcal{L}_{\mathrm{int}} = 40.0\ \mathrm{fb}^{-1})$ ",\
fontsize=16,color="black")
# Boundary of y-axis
ymax=(y9_sdETA_0_weights+y9_sdETA_1_weights+y9_sdETA_2_weights+y9_sdETA_3_weights+y9_sdETA_4_weights+y9_sdETA_5_weights+y9_sdETA_6_weights+y9_sdETA_7_weights+y9_sdETA_8_weights+y9_sdETA_9_weights+y9_sdETA_10_weights+y9_sdETA_11_weights+y9_sdETA_12_weights+y9_sdETA_13_weights+y9_sdETA_14_weights+y9_sdETA_15_weights+y9_sdETA_16_weights+y9_sdETA_17_weights).max()*1.1
ymin=0 # linear scale
#ymin=min([x for x in (y9_sdETA_0_weights+y9_sdETA_1_weights+y9_sdETA_2_weights+y9_sdETA_3_weights+y9_sdETA_4_weights+y9_sdETA_5_weights+y9_sdETA_6_weights+y9_sdETA_7_weights+y9_sdETA_8_weights+y9_sdETA_9_weights+y9_sdETA_10_weights+y9_sdETA_11_weights+y9_sdETA_12_weights+y9_sdETA_13_weights+y9_sdETA_14_weights+y9_sdETA_15_weights+y9_sdETA_16_weights+y9_sdETA_17_weights) if x])/100. # log scale
plt.gca().set_ylim(ymin,ymax)
# Log/Linear scale for X-axis
plt.gca().set_xscale("linear")
#plt.gca().set_xscale("log",nonposx="clip")
# Log/Linear scale for Y-axis
plt.gca().set_yscale("linear")
#plt.gca().set_yscale("log",nonposy="clip")
# Legend
plt.legend(bbox_to_anchor=(1.05,1), loc=2, borderaxespad=0.)
# Saving the image
plt.savefig('../../HTML/MadAnalysis5job_0/selection_8.png')
plt.savefig('../../PDF/MadAnalysis5job_0/selection_8.png')
plt.savefig('../../DVI/MadAnalysis5job_0/selection_8.eps')
# Running!
if __name__ == '__main__':
selection_8()
| 175.064356
| 1,545
| 0.799451
|
4a140ac72a4fbbcbc6796a97ff95340ff9cffe53
| 814
|
py
|
Python
|
foreshadow/intents/intent_type.py
|
adithyabsk/foreshadow
|
ca2e927c396ae0d61923b287d6e32e142f3ba96f
|
[
"Apache-2.0"
] | 25
|
2018-07-26T17:30:31.000Z
|
2021-02-23T22:54:01.000Z
|
foreshadow/intents/intent_type.py
|
adithyabsk/foreshadow
|
ca2e927c396ae0d61923b287d6e32e142f3ba96f
|
[
"Apache-2.0"
] | 150
|
2018-11-02T18:09:12.000Z
|
2020-05-15T01:01:35.000Z
|
foreshadow/intents/intent_type.py
|
adithyabsk/foreshadow
|
ca2e927c396ae0d61923b287d6e32e142f3ba96f
|
[
"Apache-2.0"
] | 1
|
2019-02-20T22:24:00.000Z
|
2019-02-20T22:24:00.000Z
|
"""A utility class for the intents."""
class IntentType:
"""A utility class for the intents."""
NUMERIC = "Numeric"
CATEGORICAL = "Categorical"
TEXT = "Text"
DROPPABLE = "Droppable"
_registered_types = [NUMERIC, CATEGORICAL, TEXT, DROPPABLE]
@classmethod
def is_valid(cls, intent):
"""Check if an intent is valid.
Args:
intent: user provided intent type
Returns:
bool: whether it's a valid intent
"""
if intent in cls._registered_types:
return True
else:
return False
@classmethod
def list_intents(cls):
"""List all the registered/valid intent types.
Returns:
a list of registered intents.
"""
return cls._registered_types
| 20.871795
| 63
| 0.579853
|
4a140ace888b1d71375e73e89bf32fc66787eb96
| 15,080
|
py
|
Python
|
utils/render.py
|
Yosshi999/Tokyo2020-Pictogram-using-MediaPipe
|
edd5a8721d12376d3ea2155de8cbd139f59aea76
|
[
"Apache-2.0"
] | null | null | null |
utils/render.py
|
Yosshi999/Tokyo2020-Pictogram-using-MediaPipe
|
edd5a8721d12376d3ea2155de8cbd139f59aea76
|
[
"Apache-2.0"
] | null | null | null |
utils/render.py
|
Yosshi999/Tokyo2020-Pictogram-using-MediaPipe
|
edd5a8721d12376d3ea2155de8cbd139f59aea76
|
[
"Apache-2.0"
] | null | null | null |
import math
import cv2 as cv
import numpy as np
__all__ = ['draw_stick_figure', 'draw_landmarks']
def draw_stick_figure(
image,
landmarks,
color=(100, 33, 3),
bg_color=(255, 255, 255),
visibility_th=0.5,
):
image_width, image_height = image.shape[1], image.shape[0]
# 各ランドマーク算出
landmark_point = []
for index, landmark in enumerate(landmarks.landmark):
landmark_x = min(int(landmark.x * image_width), image_width - 1)
landmark_y = min(int(landmark.y * image_height), image_height - 1)
landmark_z = landmark.z
landmark_point.append(
[index, landmark.visibility, (landmark_x, landmark_y), landmark_z])
# 脚の付け根の位置を腰の中点に修正
right_leg = landmark_point[23]
left_leg = landmark_point[24]
leg_x = int((right_leg[2][0] + left_leg[2][0]) / 2)
leg_y = int((right_leg[2][1] + left_leg[2][1]) / 2)
landmark_point[23][2] = (leg_x, leg_y)
landmark_point[24][2] = (leg_x, leg_y)
# 距離順にソート
sorted_landmark_point = sorted(landmark_point,
reverse=True,
key=lambda x: x[3])
# 各サイズ算出
face_visible, (face_x, face_y), face_radius = min_enclosing_face_circle(landmark_point, visibility_th)
face_x = int(face_x)
face_y = int(face_y)
face_radius = int(face_radius * 1.5)
stick_radius01 = int(face_radius * (4 / 5))
stick_radius02 = int(stick_radius01 * (3 / 4))
stick_radius03 = int(stick_radius02 * (3 / 4))
# 描画対象リスト
draw_list = [
11, # 右腕
12, # 左腕
23, # 右脚
24, # 左脚
]
# 背景色
cv.rectangle(image, (0, 0), (image_width, image_height),
bg_color,
thickness=-1)
# 顔 描画
if face_visible:
cv.circle(image, (face_x, face_y), face_radius, color, -1)
# 腕/脚 描画
for landmark_info in sorted_landmark_point:
index = landmark_info[0]
if index in draw_list:
point01 = [p for p in landmark_point if p[0] == index][0]
point02 = [p for p in landmark_point if p[0] == (index + 2)][0]
point03 = [p for p in landmark_point if p[0] == (index + 4)][0]
if point01[1] > visibility_th and point02[1] > visibility_th:
image = draw_stick(
image,
point01[2],
stick_radius01,
point02[2],
stick_radius02,
color=color,
bg_color=bg_color,
)
if point02[1] > visibility_th and point03[1] > visibility_th:
image = draw_stick(
image,
point02[2],
stick_radius02,
point03[2],
stick_radius03,
color=color,
bg_color=bg_color,
)
return image
def min_enclosing_face_circle(landmark_point, visibility_th):
landmark_array = np.empty((0, 2), int)
index_list = [1, 4, 7, 8, 9, 10]
for index in index_list:
if landmark_point[index][1] <= visibility_th:
continue
np_landmark_point = [
np.array(
(landmark_point[index][2][0], landmark_point[index][2][1]))
]
landmark_array = np.append(landmark_array, np_landmark_point, axis=0)
if len(landmark_array) > 0:
center, radius = cv.minEnclosingCircle(points=landmark_array)
return True, center, radius
else:
return False, (0, 0), 0
def draw_stick(
image,
point01,
point01_radius,
point02,
point02_radius,
color=(100, 33, 3),
bg_color=(255, 255, 255),
):
cv.circle(image, point01, point01_radius, color, -1)
cv.circle(image, point02, point02_radius, color, -1)
draw_list = []
for index in range(2):
rad = math.atan2(point02[1] - point01[1], point02[0] - point01[0])
rad = rad + (math.pi / 2) + (math.pi * index)
point_x = int(point01_radius * math.cos(rad)) + point01[0]
point_y = int(point01_radius * math.sin(rad)) + point01[1]
draw_list.append([point_x, point_y])
point_x = int(point02_radius * math.cos(rad)) + point02[0]
point_y = int(point02_radius * math.sin(rad)) + point02[1]
draw_list.append([point_x, point_y])
points = np.array((draw_list[0], draw_list[1], draw_list[3], draw_list[2]))
cv.fillConvexPoly(image, points=points, color=color)
return image
def draw_landmarks(
image,
landmarks,
# upper_body_only,
visibility_th=0.5,
):
image_width, image_height = image.shape[1], image.shape[0]
landmark_point = []
for index, landmark in enumerate(landmarks.landmark):
landmark_x = min(int(landmark.x * image_width), image_width - 1)
landmark_y = min(int(landmark.y * image_height), image_height - 1)
landmark_z = landmark.z
landmark_point.append([landmark.visibility, (landmark_x, landmark_y)])
if landmark.visibility < visibility_th:
continue
if index == 0: # 鼻
cv.circle(image, (landmark_x, landmark_y), 5, (0, 255, 0), 2)
if index == 1: # 右目:目頭
cv.circle(image, (landmark_x, landmark_y), 5, (0, 255, 0), 2)
if index == 2: # 右目:瞳
cv.circle(image, (landmark_x, landmark_y), 5, (0, 255, 0), 2)
if index == 3: # 右目:目尻
cv.circle(image, (landmark_x, landmark_y), 5, (0, 255, 0), 2)
if index == 4: # 左目:目頭
cv.circle(image, (landmark_x, landmark_y), 5, (0, 255, 0), 2)
if index == 5: # 左目:瞳
cv.circle(image, (landmark_x, landmark_y), 5, (0, 255, 0), 2)
if index == 6: # 左目:目尻
cv.circle(image, (landmark_x, landmark_y), 5, (0, 255, 0), 2)
if index == 7: # 右耳
cv.circle(image, (landmark_x, landmark_y), 5, (0, 255, 0), 2)
if index == 8: # 左耳
cv.circle(image, (landmark_x, landmark_y), 5, (0, 255, 0), 2)
if index == 9: # 口:左端
cv.circle(image, (landmark_x, landmark_y), 5, (0, 255, 0), 2)
if index == 10: # 口:左端
cv.circle(image, (landmark_x, landmark_y), 5, (0, 255, 0), 2)
if index == 11: # 右肩
cv.circle(image, (landmark_x, landmark_y), 5, (0, 255, 0), 2)
if index == 12: # 左肩
cv.circle(image, (landmark_x, landmark_y), 5, (0, 255, 0), 2)
if index == 13: # 右肘
cv.circle(image, (landmark_x, landmark_y), 5, (0, 255, 0), 2)
if index == 14: # 左肘
cv.circle(image, (landmark_x, landmark_y), 5, (0, 255, 0), 2)
if index == 15: # 右手首
cv.circle(image, (landmark_x, landmark_y), 5, (0, 255, 0), 2)
if index == 16: # 左手首
cv.circle(image, (landmark_x, landmark_y), 5, (0, 255, 0), 2)
if index == 17: # 右手1(外側端)
cv.circle(image, (landmark_x, landmark_y), 5, (0, 255, 0), 2)
if index == 18: # 左手1(外側端)
cv.circle(image, (landmark_x, landmark_y), 5, (0, 255, 0), 2)
if index == 19: # 右手2(先端)
cv.circle(image, (landmark_x, landmark_y), 5, (0, 255, 0), 2)
if index == 20: # 左手2(先端)
cv.circle(image, (landmark_x, landmark_y), 5, (0, 255, 0), 2)
if index == 21: # 右手3(内側端)
cv.circle(image, (landmark_x, landmark_y), 5, (0, 255, 0), 2)
if index == 22: # 左手3(内側端)
cv.circle(image, (landmark_x, landmark_y), 5, (0, 255, 0), 2)
if index == 23: # 腰(右側)
cv.circle(image, (landmark_x, landmark_y), 5, (0, 255, 0), 2)
if index == 24: # 腰(左側)
cv.circle(image, (landmark_x, landmark_y), 5, (0, 255, 0), 2)
if index == 25: # 右ひざ
cv.circle(image, (landmark_x, landmark_y), 5, (0, 255, 0), 2)
if index == 26: # 左ひざ
cv.circle(image, (landmark_x, landmark_y), 5, (0, 255, 0), 2)
if index == 27: # 右足首
cv.circle(image, (landmark_x, landmark_y), 5, (0, 255, 0), 2)
if index == 28: # 左足首
cv.circle(image, (landmark_x, landmark_y), 5, (0, 255, 0), 2)
if index == 29: # 右かかと
cv.circle(image, (landmark_x, landmark_y), 5, (0, 255, 0), 2)
if index == 30: # 左かかと
cv.circle(image, (landmark_x, landmark_y), 5, (0, 255, 0), 2)
if index == 31: # 右つま先
cv.circle(image, (landmark_x, landmark_y), 5, (0, 255, 0), 2)
if index == 32: # 左つま先
cv.circle(image, (landmark_x, landmark_y), 5, (0, 255, 0), 2)
# if not upper_body_only:
if True:
cv.putText(image, "z:" + str(round(landmark_z, 3)),
(landmark_x - 10, landmark_y - 10),
cv.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 1,
cv.LINE_AA)
# 右目
if landmark_point[1][0] > visibility_th and landmark_point[2][
0] > visibility_th:
cv.line(image, landmark_point[1][1], landmark_point[2][1],
(0, 255, 0), 2)
if landmark_point[2][0] > visibility_th and landmark_point[3][
0] > visibility_th:
cv.line(image, landmark_point[2][1], landmark_point[3][1],
(0, 255, 0), 2)
# 左目
if landmark_point[4][0] > visibility_th and landmark_point[5][
0] > visibility_th:
cv.line(image, landmark_point[4][1], landmark_point[5][1],
(0, 255, 0), 2)
if landmark_point[5][0] > visibility_th and landmark_point[6][
0] > visibility_th:
cv.line(image, landmark_point[5][1], landmark_point[6][1],
(0, 255, 0), 2)
# 口
if landmark_point[9][0] > visibility_th and landmark_point[10][
0] > visibility_th:
cv.line(image, landmark_point[9][1], landmark_point[10][1],
(0, 255, 0), 2)
# 肩
if landmark_point[11][0] > visibility_th and landmark_point[12][
0] > visibility_th:
cv.line(image, landmark_point[11][1], landmark_point[12][1],
(0, 255, 0), 2)
# 右腕
if landmark_point[11][0] > visibility_th and landmark_point[13][
0] > visibility_th:
cv.line(image, landmark_point[11][1], landmark_point[13][1],
(0, 255, 0), 2)
if landmark_point[13][0] > visibility_th and landmark_point[15][
0] > visibility_th:
cv.line(image, landmark_point[13][1], landmark_point[15][1],
(0, 255, 0), 2)
# 左腕
if landmark_point[12][0] > visibility_th and landmark_point[14][
0] > visibility_th:
cv.line(image, landmark_point[12][1], landmark_point[14][1],
(0, 255, 0), 2)
if landmark_point[14][0] > visibility_th and landmark_point[16][
0] > visibility_th:
cv.line(image, landmark_point[14][1], landmark_point[16][1],
(0, 255, 0), 2)
# 右手
if landmark_point[15][0] > visibility_th and landmark_point[17][
0] > visibility_th:
cv.line(image, landmark_point[15][1], landmark_point[17][1],
(0, 255, 0), 2)
if landmark_point[17][0] > visibility_th and landmark_point[19][
0] > visibility_th:
cv.line(image, landmark_point[17][1], landmark_point[19][1],
(0, 255, 0), 2)
if landmark_point[19][0] > visibility_th and landmark_point[21][
0] > visibility_th:
cv.line(image, landmark_point[19][1], landmark_point[21][1],
(0, 255, 0), 2)
if landmark_point[21][0] > visibility_th and landmark_point[15][
0] > visibility_th:
cv.line(image, landmark_point[21][1], landmark_point[15][1],
(0, 255, 0), 2)
# 左手
if landmark_point[16][0] > visibility_th and landmark_point[18][
0] > visibility_th:
cv.line(image, landmark_point[16][1], landmark_point[18][1],
(0, 255, 0), 2)
if landmark_point[18][0] > visibility_th and landmark_point[20][
0] > visibility_th:
cv.line(image, landmark_point[18][1], landmark_point[20][1],
(0, 255, 0), 2)
if landmark_point[20][0] > visibility_th and landmark_point[22][
0] > visibility_th:
cv.line(image, landmark_point[20][1], landmark_point[22][1],
(0, 255, 0), 2)
if landmark_point[22][0] > visibility_th and landmark_point[16][
0] > visibility_th:
cv.line(image, landmark_point[22][1], landmark_point[16][1],
(0, 255, 0), 2)
# 胴体
if landmark_point[11][0] > visibility_th and landmark_point[23][
0] > visibility_th:
cv.line(image, landmark_point[11][1], landmark_point[23][1],
(0, 255, 0), 2)
if landmark_point[12][0] > visibility_th and landmark_point[24][
0] > visibility_th:
cv.line(image, landmark_point[12][1], landmark_point[24][1],
(0, 255, 0), 2)
if landmark_point[23][0] > visibility_th and landmark_point[24][
0] > visibility_th:
cv.line(image, landmark_point[23][1], landmark_point[24][1],
(0, 255, 0), 2)
if len(landmark_point) > 25:
# 右足
if landmark_point[23][0] > visibility_th and landmark_point[25][
0] > visibility_th:
cv.line(image, landmark_point[23][1], landmark_point[25][1],
(0, 255, 0), 2)
if landmark_point[25][0] > visibility_th and landmark_point[27][
0] > visibility_th:
cv.line(image, landmark_point[25][1], landmark_point[27][1],
(0, 255, 0), 2)
if landmark_point[27][0] > visibility_th and landmark_point[29][
0] > visibility_th:
cv.line(image, landmark_point[27][1], landmark_point[29][1],
(0, 255, 0), 2)
if landmark_point[29][0] > visibility_th and landmark_point[31][
0] > visibility_th:
cv.line(image, landmark_point[29][1], landmark_point[31][1],
(0, 255, 0), 2)
# 左足
if landmark_point[24][0] > visibility_th and landmark_point[26][
0] > visibility_th:
cv.line(image, landmark_point[24][1], landmark_point[26][1],
(0, 255, 0), 2)
if landmark_point[26][0] > visibility_th and landmark_point[28][
0] > visibility_th:
cv.line(image, landmark_point[26][1], landmark_point[28][1],
(0, 255, 0), 2)
if landmark_point[28][0] > visibility_th and landmark_point[30][
0] > visibility_th:
cv.line(image, landmark_point[28][1], landmark_point[30][1],
(0, 255, 0), 2)
if landmark_point[30][0] > visibility_th and landmark_point[32][
0] > visibility_th:
cv.line(image, landmark_point[30][1], landmark_point[32][1],
(0, 255, 0), 2)
return image
| 39.067358
| 106
| 0.5437
|
4a140b1a501f10649df140d49b2e67da9a015d68
| 3,612
|
py
|
Python
|
train.py
|
vpekar/forecastml
|
55f667c33c5c0b17062117fb271748ef2a0a784e
|
[
"MIT"
] | 2
|
2020-02-10T17:30:34.000Z
|
2021-04-05T22:02:21.000Z
|
train.py
|
vpekar/forecastml
|
55f667c33c5c0b17062117fb271748ef2a0a784e
|
[
"MIT"
] | 1
|
2020-06-25T05:01:35.000Z
|
2020-06-25T08:49:09.000Z
|
train.py
|
vpekar/forecastml
|
55f667c33c5c0b17062117fb271748ef2a0a784e
|
[
"MIT"
] | 1
|
2020-06-24T14:57:42.000Z
|
2020-06-24T14:57:42.000Z
|
# -*- coding: utf-8 -*-
"""
Run multiple configurations
Created on Mon May 21 19:03:56 2018
@author: user
"""
import settings
import importlib
import json
import time
import os
import warnings
warnings.simplefilter(action='ignore', category=FutureWarning)
from utils import run_config_space
from get_logger import get_logger
from learner_configs import ConfigSpace
from run import get_val_results
LOGGER = get_logger('main', 'logs/all-learners.log')
START = time.time()
TOTAL_RUNS = 0
N_RUNS = 0
def save(result):
if os.path.exists("results.json"):
all_results = json.load(open("results.json"))
else:
all_results = []
all_results.append(result)
out = open("results.json", "w")
json.dump(all_results, out)
out.close()
def log_time():
global START, TOTAL_RUNS, N_RUNS
m, s = divmod(time.time()-START, 60)
h, m = divmod(m, 60)
LOGGER.info("Completed %d of %d, %d hrs %02d min %02d sec" %
(N_RUNS, TOTAL_RUNS, h, m, s))
def do_one_config(LearnerConfig, learner_config_settings, preproc_config):
global N_RUNS
config_space = ConfigSpace(LearnerConfig,
learner_config_settings,
preproc_config)
result = run_config_space(preproc_config, config_space,
get_val_results)
save(result)
N_RUNS += 1
log_time()
def get_learner_config(learner):
if learner == "BiLSTM":
learner = "LSTM"
bidirectional = [True]
else:
bidirectional = [False]
# set up parameter space for the learning method
LearnerConfig = getattr(importlib.import_module("learner_configs"),
"Config%s" % learner)
learner_config_settings = settings.__dict__[learner]
if learner == "LSTM":
learner_config_settings["bidirectional"] = bidirectional
return LearnerConfig, learner_config_settings
def do_one_learner(learner):
# parameters of data preprocessing
preproc_config = settings.PREPROCESSING
LearnerConfig, learner_config_settings = get_learner_config(learner)
data_files = [
"data/macroeconomy/macroeconomy.csv",
]
horizons = [1]#[3, 7, 14]
n_features_settings = [0]#[0.05, 0.1, 0.2, 0.3, 0.5, 0.7]
rfe_steps = [0]
preproc_config['difference'] = 1
preproc_config['deseason'] = 0
preproc_config['date_format'] = '%d/%m/%Y'
for data_file in data_files:
print(data_file)
preproc_config['data_file'] = data_file
for rfe_step in rfe_steps:
preproc_config['rfe_step'] = rfe_step
for horizon in horizons:
preproc_config['horizon'] = horizon
# baseline
preproc_config['use_exog'] = 0
preproc_config['feature_selection'] = 0
do_one_config(LearnerConfig, learner_config_settings,
preproc_config)
# exogenous: feature selection
preproc_config['use_exog'] = 1
for n_features in n_features_settings:
preproc_config['feature_selection'] = n_features
do_one_config(LearnerConfig, learner_config_settings,
preproc_config)
def main():
# ['AdaBoost', 'GB', 'RFR', 'LSTM', 'BiLSTM', 'XGBoost', 'Lasso',
# 'LSVR', 'SVRrbf', 'SVRsigmoid', 'SVRpoly', 'KNN', 'ElasticNet',
# 'KernelRidge']
learners = ['RFR']
for learner in learners:
do_one_learner(learner)
if __name__ == "__main__":
main()
| 26.173913
| 74
| 0.620709
|
4a140b97f29928c17796b441cd18064b423638c5
| 14,217
|
py
|
Python
|
stor/test.py
|
anujkumar93/stor
|
dd51ea53784f30b12c4d43fefe646cda13246084
|
[
"MIT"
] | null | null | null |
stor/test.py
|
anujkumar93/stor
|
dd51ea53784f30b12c4d43fefe646cda13246084
|
[
"MIT"
] | 6
|
2018-09-11T18:04:42.000Z
|
2018-12-04T01:30:57.000Z
|
stor/test.py
|
anujkumar93/stor
|
dd51ea53784f30b12c4d43fefe646cda13246084
|
[
"MIT"
] | null | null | null |
import inspect
import mock
import unittest
import os
import sys
import uuid
import dxpy
import vcr
from stor import Path
from stor import s3
from stor.s3 import S3Path
from stor.swift import SwiftPath
from stor import settings
class SwiftTestMixin(object):
"""A mixin with helpers for mocking out swift.
SwiftTestMixin should be used to create base test classes for anything
that accesses swift.
"""
def disable_get_swift_service_mock(self):
"""Disables the mock for getting the swift service.
"""
try:
self._get_swift_patcher.stop()
except RuntimeError:
# If the user disables the mock, the mock will try
# to be stopped on test cleanup. Disable errors from that
pass
def setup_swift_mocks(self):
"""Sets all of the relevant mocks for Swift communication.
If you are testing outside of this library, you should either mock
swift object methods or you should focus on manipulating return value
of mock_swift.
The following variables are set up when calling this:
- mock_swift_service: A mock of the SwiftService class defined in
swiftclient.service.
- mock_swift_get_conn: A mock of the get_conn function in the
swiftclient.service module
- mock_swift_conn: A mock of the SwiftConnection returned by
get_conn
- mock_swift_get_auth_keystone: mock of the get_keystone_auth function
that caches identity credentials
- mock_get_swift_service: A mock of the _get_swift_service method of
SwiftPath
- mock_swift: A mock of the SwiftService instance returned by
_get_swift_service in SwiftPath
"""
# Ensure that SwiftService will never be instantiated in tests
swift_service_patcher = mock.patch('swiftclient.service.SwiftService',
autospec=True)
self.addCleanup(swift_service_patcher.stop)
self.mock_swift_service = swift_service_patcher.start()
# Ensure that SwiftConnections will never be instantiated in tests
swift_get_conn_patcher = mock.patch('swiftclient.service.get_conn',
autospec=True)
self.addCleanup(swift_get_conn_patcher.stop)
self.mock_swift_get_conn = swift_get_conn_patcher.start()
self.mock_swift_conn = mock.Mock()
self.mock_swift_get_conn.return_value = self.mock_swift_conn
# Ensure that no keystone auth calls will go out
swift_keystone_mock_patcher = mock.patch('swiftclient.client.get_auth_keystone',
autospec=True)
self.addCleanup(swift_keystone_mock_patcher.stop)
self.mock_swift_get_auth_keystone = swift_keystone_mock_patcher.start()
self.mock_swift_get_auth_keystone.return_value = ('dummy_storage_url', 'dummy_auth_token')
# This is the mock that will always be returned by _get_swift_service.
# The user can mock out any swift methods on this mock
self.mock_swift = mock.Mock()
self._get_swift_patcher = mock.patch.object(SwiftPath,
'_get_swift_service',
autospec=True)
self.addCleanup(self.disable_get_swift_service_mock)
self.mock_get_swift_service = self._get_swift_patcher.start()
self.mock_get_swift_service.return_value = self.mock_swift
# ensures we never cache data between tests
_cache_patcher = mock.patch.dict('stor.swift._cached_auth_token_map', clear=True)
self.addCleanup(_cache_patcher.stop)
_cache_patcher.start()
def assertSwiftListResultsEqual(self, r1, r2):
"""
Swift list resolves duplicates, so the ordering of the results are not
always the same as what the swift client returns. Compare results as
sorted lists
"""
self.assertEquals(sorted(r1), sorted(r2))
class S3TestMixin(object):
"""A mixin with helpers for mocking out S3.
S3TestMixin should be used to create base test classes for anything
that accesses S3.
"""
def disable_get_s3_client_mock(self):
"""Disables the mock for getting the S3 client."""
try:
self._get_s3_client_patcher.stop()
except RuntimeError:
# If the user disables the mock, the mock will try
# to be stopped on test cleanup. Disable errors from that
pass
def disable_get_s3_iterator_mock(self):
"""Disables the mock for getting the S3 iterator."""
try:
self._get_s3_iterator_patcher.stop()
except RuntimeError:
pass
def setup_s3_mocks(self):
"""Sets all of the relevant mocks for S3 communication.
If you are testing outside of this library, you should either mock
S3 client methods or you should focus on manipulating return value
of mock_s3.
Tests of methods that directly make API calls via _s3_client_call should
mock the return values of the API calls on mock_s3. Tests of methods that
do not directly make the API calls should mock any S3Path methods being called.
The following variables are set up when calling this:
- mock_s3_client: A mock of the Client instance returned by boto3.client
- mock_s3: A mock of the Client instance returned by _get_s3_client in S3Path.
- mock_get_s3_client: A mock of the _get_s3_client method in S3Path.
- mock_get_s3_iterator: A mock of the _get_s3_iterator method in S3Path.
- mock_s3_iterator: A mock of the iterable object returned by _get_s3_iterator in S3Path.
- mock_s3_transfer: A mock of the Transfer instance returned by S3Transfer
- mock_get_s3_transfer: A mock of the boto3.s3.transfer.S3Transfer object
"""
# Ensure that the S3 session will never be instantiated in tests
s3_session_patcher = mock.patch('boto3.session.Session', autospec=True)
self.addCleanup(s3_session_patcher.stop)
self.mock_s3_session = s3_session_patcher.start()
# This is the mock returned by _get_s3_client.
# User can mock s3 methods on this mock.
self.mock_s3 = mock.Mock()
self._get_s3_client_patcher = mock.patch('stor.s3._get_s3_client',
autospec=True)
self.addCleanup(self.disable_get_s3_client_mock)
self.mock_get_s3_client = self._get_s3_client_patcher.start()
self.mock_get_s3_client.return_value = self.mock_s3
# This is the mock returned by _get_s3_iterator.
# User should modify the __iter__.return_value property to specify return values.
self.mock_s3_iterator = mock.MagicMock()
self._get_s3_iterator_patcher = mock.patch.object(S3Path, '_get_s3_iterator',
autospec=True)
self.addCleanup(self.disable_get_s3_iterator_mock)
self.mock_get_s3_iterator = self._get_s3_iterator_patcher.start()
self.mock_get_s3_iterator.return_value = self.mock_s3_iterator
# Ensure that an S3Transfer object will never be instantiated in tests.
# User can mock methods associated with S3Transfer on this mock.
self.mock_s3_transfer = mock.Mock()
s3_transfer_patcher = mock.patch('stor.s3.S3Transfer', autospec=True)
self.addCleanup(s3_transfer_patcher.stop)
self.mock_get_s3_transfer = s3_transfer_patcher.start()
self.mock_get_s3_transfer.return_value = self.mock_s3_transfer
# Mock the TransferConfig object
s3_transfer_config_patcher = mock.patch('stor.s3.TransferConfig',
autospec=True)
self.addCleanup(s3_transfer_config_patcher.stop)
self.mock_get_s3_transfer_config = s3_transfer_config_patcher.start()
class DXTestMixin(object):
"""A mixin with helpers for testing dxpy.
DXTestMixin should be used to create base test classes for anything
that accesses DNAnexus. This Mixin introduces vcrpy into the test case
which records all http interactions for playback.
"""
vcr_enabled = True # switch this to False to deactivate vcr recording
def setUp(self): # pragma: no cover
"""Sets us vcr cassettes if enabled, and starts patcher for time.sleep.
To update the cassettes, the easiest error-free way is to delete
the cassettes and rerecord them.
Note that changing the record_mode to 'all' temporarily updates the cassettes,
but playback from two same set of requests errors in certain scenarios.
"""
super(DXTestMixin, self).setUp()
self.cassette = None
if self.vcr_enabled:
myvcr = vcr.VCR(cassette_library_dir=self._get_cassette_library_dir(),
filter_headers=['authorization'])
cm = myvcr.use_cassette(self._get_cassette_name())
self.cassette = cm.__enter__()
self.addCleanup(cm.__exit__, None, None, None)
if self.cassette and self.cassette.rewound:
patcher = mock.patch('time.sleep')
self.addCleanup(patcher.stop)
patcher.start()
def _get_cassette_library_dir(self):
"""Sets up different directories for Python 2 and 3, as well as by TestClass
subdir, because cassette recording and playback are in different formats
(unicode/binary) in Python 2 vs 3, making them incompatible with each other.
"""
testdir = os.path.dirname(inspect.getfile(self.__class__))
cassette_dir = os.path.join(testdir, 'cassettes_py{}'.format(sys.version_info[0]))
return os.path.join(cassette_dir, self.__class__.__name__)
def _get_cassette_name(self):
return '{}.yaml'.format(self._testMethodName)
def assert_dx_lists_equal(self, r1, r2):
self.assertEqual(sorted(r1), sorted(r2))
class SwiftTestCase(unittest.TestCase, SwiftTestMixin):
"""A TestCase class that sets up swift mocks and provides additional assertions"""
def setUp(self):
super(SwiftTestCase, self).setUp()
self.setup_swift_mocks()
# make sure swift credentials aren't included
settings.update({
'swift': {
'username': '__dummy__',
'password': '__dummy__',
'auth_url': '__dummy__'
}
})
class S3TestCase(unittest.TestCase, S3TestMixin):
"""A TestCase class that sets up S3 mocks"""
def setUp(self):
super(S3TestCase, self).setUp()
self.setup_s3_mocks()
try:
del s3._thread_local.s3_transfer
del s3._thread_local.s3_transfer_config
except AttributeError:
pass
class DXTestCase(DXTestMixin, unittest.TestCase):
"""A TestCase class that sets up DNAnexus vars and provides additional assertions.
Since DXTestCase inherits from DXTestMixin, all the tests under DXTestCase are
auto-wrapped with VCRpy, and hence use cassettes for playback.
Look into `DXTestMixin` to turn off VCRpy and additional details.
"""
def new_proj_name(self):
"""Output a unique project name for each test case.
Should only be called once within a test case, and the result reused
everywhere within a test case.
"""
return '{0}.{1}.{2}'.format(self.__class__.__name__,
self._testMethodName,
str(uuid.uuid4())[:8])
def setup_temporary_project(self):
self.project_handler = self.setup_project()
self.project = self.project_handler.name
self.proj_id = self.project_handler.get_id()
self.addCleanup(self.teardown_project)
def setup_project(self):
test_proj = dxpy.DXProject()
test_proj.new(self.new_proj_name())
return test_proj
def setup_files(self, files):
"""Sets up files for testing.
This does not assume the files will be closed by the end of this function.
Args:
files (List[str]): list of files relative to project root to be created on DX
Only virtual paths are allowed. Path must start with '/'
"""
for i, curr_file in enumerate(files):
dx_p = Path(curr_file)
self.project_handler.new_folder(dx_p.parent, parents=True)
with dxpy.new_dxfile(name=dx_p.name,
folder='/'+dx_p.parent.lstrip('/'),
project=self.proj_id) as f:
f.write('data{}'.format(i).encode())
def setup_file(self, obj):
"""Set up a closed file for testing.
Args:
obj (str): file relative to project root to be created on DX
Only virtual paths are allowed. Path must start with '/'
"""
dx_p = Path(obj)
self.project_handler.new_folder(dx_p.parent, parents=True)
with dxpy.new_dxfile(name=dx_p.name,
folder='/'+dx_p.parent.lstrip('/'),
project=self.proj_id) as f:
f.write('data'.encode())
# to allow for max of 20s for file state to go to closed
f.wait_on_close(20)
return f
def setup_posix_files(self, files):
"""Sets up posix files for testing
Args:
files (List[Str]): list of relative posix files to be created.
"""
for i, curr_file in enumerate(files):
posix_p = Path('./{test_folder}/{path}'.format(
test_folder=self.project, path=curr_file))
posix_p.open(mode='w').write('data'+str(i))
self.addCleanup(self.teardown_posix_files)
def teardown_posix_files(self):
posix_p = Path('./{test_folder}'.format(
test_folder=self.project))
posix_p.rmtree()
def teardown_project(self):
self.project_handler.destroy()
self.project_handler = None
| 40.853448
| 98
| 0.650489
|
4a140bb97029c8fcac912d9476db9eb28380e5e2
| 2,499
|
py
|
Python
|
data/p4VQE/R2/benchmark/startQiskit_noisy96.py
|
UCLA-SEAL/QDiff
|
d968cbc47fe926b7f88b4adf10490f1edd6f8819
|
[
"BSD-3-Clause"
] | null | null | null |
data/p4VQE/R2/benchmark/startQiskit_noisy96.py
|
UCLA-SEAL/QDiff
|
d968cbc47fe926b7f88b4adf10490f1edd6f8819
|
[
"BSD-3-Clause"
] | null | null | null |
data/p4VQE/R2/benchmark/startQiskit_noisy96.py
|
UCLA-SEAL/QDiff
|
d968cbc47fe926b7f88b4adf10490f1edd6f8819
|
[
"BSD-3-Clause"
] | null | null | null |
# qubit number=3
# total number=13
import numpy as np
from qiskit import QuantumCircuit, execute, Aer, QuantumRegister, ClassicalRegister, transpile, BasicAer, IBMQ
import networkx as nx
from qiskit.visualization import plot_histogram
from typing import *
from pprint import pprint
from math import log2
from collections import Counter
from qiskit.test.mock import FakeVigo, FakeYorktown
kernel = 'circuit/bernstein'
def make_circuit(n:int) -> QuantumCircuit:
# circuit begin
input_qubit = QuantumRegister(n,"qc")
prog = QuantumCircuit(input_qubit)
prog.h(input_qubit[0]) # number=1
prog.h(input_qubit[1]) # number=2
prog.h(input_qubit[2]) # number=3
prog.h(input_qubit[3]) # number=4
for edge in E:
k = edge[0]
l = edge[1]
prog.cp(-2 * gamma, input_qubit[k-1], input_qubit[l-1])
prog.p(gamma, k)
prog.p(gamma, l)
prog.rx(2 * beta, range(len(V)))
prog.cx(input_qubit[0],input_qubit[2]) # number=7
prog.x(input_qubit[2]) # number=8
prog.cx(input_qubit[0],input_qubit[2]) # number=9
prog.cx(input_qubit[0],input_qubit[2]) # number=10
prog.x(input_qubit[2]) # number=11
prog.cx(input_qubit[0],input_qubit[2]) # number=12
# circuit end
return prog
if __name__ == '__main__':
n = 4
V = np.arange(0, n, 1)
E = [(0, 1, 1.0), (0, 2, 1.0), (1, 2, 1.0), (3, 2, 1.0), (3, 1, 1.0)]
G = nx.Graph()
G.add_nodes_from(V)
G.add_weighted_edges_from(E)
step_size = 0.1
a_gamma = np.arange(0, np.pi, step_size)
a_beta = np.arange(0, np.pi, step_size)
a_gamma, a_beta = np.meshgrid(a_gamma, a_beta)
F1 = 3 - (np.sin(2 * a_beta) ** 2 * np.sin(2 * a_gamma) ** 2 - 0.5 * np.sin(4 * a_beta) * np.sin(4 * a_gamma)) * (
1 + np.cos(4 * a_gamma) ** 2)
result = np.where(F1 == np.amax(F1))
a = list(zip(result[0], result[1]))[0]
gamma = a[0] * step_size
beta = a[1] * step_size
prog = make_circuit(4)
sample_shot =5600
writefile = open("../data/startQiskit_noisy96.csv", "w")
# prog.draw('mpl', filename=(kernel + '.png'))
backend = FakeYorktown()
circuit1 = transpile(prog, FakeYorktown())
circuit1.measure_all()
prog = circuit1
info = execute(prog,backend=backend, shots=sample_shot).result().get_counts()
print(info, file=writefile)
print("results end", file=writefile)
print(circuit1.depth(), file=writefile)
print(circuit1, file=writefile)
writefile.close()
| 27.461538
| 118
| 0.633854
|
4a140c3c79d11b64e2f60eddaba9639b92aea32e
| 1,651
|
py
|
Python
|
parser/fase2/team25/analizer/statement/pl/codeblock.py
|
webdev188/tytus
|
847071edb17b218f51bb969d335a8ec093d13f94
|
[
"MIT"
] | 35
|
2020-12-07T03:11:43.000Z
|
2021-04-15T17:38:16.000Z
|
parser/fase2/team25/analizer/statement/pl/codeblock.py
|
webdev188/tytus
|
847071edb17b218f51bb969d335a8ec093d13f94
|
[
"MIT"
] | 47
|
2020-12-09T01:29:09.000Z
|
2021-01-13T05:37:50.000Z
|
parser/fase2/team25/analizer/statement/pl/codeblock.py
|
webdev188/tytus
|
847071edb17b218f51bb969d335a8ec093d13f94
|
[
"MIT"
] | 556
|
2020-12-07T03:13:31.000Z
|
2021-06-17T17:41:10.000Z
|
from analizer.abstract import instruction
from analizer.reports.Nodo import Nodo
from analizer.reports.AST import AST
class CodeBlock(instruction.Instruction):
def __init__(self, lista_instrucciones, row , column , lista_declaraciones = None ):
instruction.Instruction.__init__(self, row , column)
self.lista_instrucciones = lista_instrucciones
self.lista_declaraciones = lista_declaraciones
def execute(self, environment):
pass
def generate3d(self, environment, instanciaAux):
# * LAS DECLARACIONES SE TRADUCEN ANTES QUE LAS INSTRUCCIONES
if self.lista_declaraciones:
for element in self.lista_declaraciones:
element.generate3d(environment,instanciaAux)
for element in self.lista_instrucciones:
# * LAS INSTRUCCIONES NO DEVUELVEN nada solo se ejecuta su metodo generate 3d
if element != None:
element.generate3d(environment, instanciaAux)
else:
print('venia un null en codeblock')
def dot(self):
nuevo_nodo = Nodo("CODEBLOCK")
if self.lista_declaraciones:
declaraciones_nodo = Nodo("DECLARACIONES")
for declaracion in self.lista_declaraciones:
declaraciones_nodo.addNode(declaracion.dot())
nuevo_nodo.addNode(declaraciones_nodo)
instrucciones_nodo = Nodo("INSTRUCCIONES")
for instruccion in self.lista_instrucciones:
if instruccion!=None:
instrucciones_nodo.addNode(instruccion.dot())
nuevo_nodo.addNode(instrucciones_nodo)
return nuevo_nodo
| 38.395349
| 89
| 0.67656
|
4a140d89fbc983352c6ca42614ed7896e977c541
| 881
|
py
|
Python
|
tests/routes/authentication_test.py
|
ajorpheus/sbr-ui
|
ea79ace6abb0b2c4aea8c4040c7134da9b873a4c
|
[
"MIT"
] | 1
|
2019-01-11T12:34:16.000Z
|
2019-01-11T12:34:16.000Z
|
tests/routes/authentication_test.py
|
ajorpheus/sbr-ui
|
ea79ace6abb0b2c4aea8c4040c7134da9b873a4c
|
[
"MIT"
] | 2
|
2017-10-11T09:40:12.000Z
|
2018-10-17T14:06:19.000Z
|
tests/routes/authentication_test.py
|
ajorpheus/sbr-ui
|
ea79ace6abb0b2c4aea8c4040c7134da9b873a4c
|
[
"MIT"
] | 4
|
2017-10-19T09:24:57.000Z
|
2021-04-11T08:10:09.000Z
|
from sbr_ui import app
app.testing = True
# TODO: fix tests below, the response is 200 even if the login failed, due to rendering of a template returning 200
def test_login():
with app.test_client() as c:
login_response = c.post('/Login', data={
"username": "admin",
"password": "admin",
}, follow_redirects=True, headers={"content-type": "application/x-www-form-urlencoded"})
assert login_response.status_code == 200
logout_response = c.post('/Logout', follow_redirects=True)
assert logout_response.status_code == 200
def test_logout():
""" This test really shouldn't work as a new context is being used and to log out you have to be logged in. """
with app.test_client() as c:
logout_response = c.post('/Logout', follow_redirects=True)
assert logout_response.status_code == 200
| 33.884615
| 116
| 0.668558
|
4a140e53ca915217379ec66d0341919746bb8c0d
| 58
|
py
|
Python
|
src/apps/trainings/viewsets/__init__.py
|
sanderland/katago-server
|
6414fab080d007c05068a06ff4f25907b92848bd
|
[
"MIT"
] | 27
|
2020-05-03T11:01:27.000Z
|
2022-03-17T05:33:10.000Z
|
src/apps/trainings/viewsets/__init__.py
|
sanderland/katago-server
|
6414fab080d007c05068a06ff4f25907b92848bd
|
[
"MIT"
] | 54
|
2020-05-09T01:18:41.000Z
|
2022-01-22T10:31:15.000Z
|
src/apps/trainings/viewsets/__init__.py
|
sanderland/katago-server
|
6414fab080d007c05068a06ff4f25907b92848bd
|
[
"MIT"
] | 9
|
2020-09-29T11:31:32.000Z
|
2022-03-09T01:37:50.000Z
|
from .network import NetworkViewSet, NetworkViewSetForElo
| 29
| 57
| 0.87931
|
4a140ecc339e32205dccfb6627fb537694ab936a
| 40,679
|
py
|
Python
|
test/base/driver_util.py
|
nzahara1/galaxy
|
fcaeb857b8bbf41d11c40fb7c27e58827d7038c8
|
[
"CC-BY-3.0"
] | null | null | null |
test/base/driver_util.py
|
nzahara1/galaxy
|
fcaeb857b8bbf41d11c40fb7c27e58827d7038c8
|
[
"CC-BY-3.0"
] | null | null | null |
test/base/driver_util.py
|
nzahara1/galaxy
|
fcaeb857b8bbf41d11c40fb7c27e58827d7038c8
|
[
"CC-BY-3.0"
] | 1
|
2020-06-30T17:53:16.000Z
|
2020-06-30T17:53:16.000Z
|
"""Scripts for drivers of Galaxy functional tests."""
import fcntl
import logging
import os
import random
import shutil
import signal
import socket
import string
import struct
import subprocess
import sys
import tempfile
import threading
import time
import nose.config
import nose.core
import nose.loader
import nose.plugins.manager
from paste import httpserver
from six.moves import (
http_client,
shlex_quote
)
from six.moves.urllib.parse import urlparse
from sqlalchemy_utils import (
create_database,
database_exists,
)
from galaxy.app import UniverseApplication as GalaxyUniverseApplication
from galaxy.config import LOGGING_CONFIG_DEFAULT
from galaxy.model import mapping
from galaxy.model.tool_shed_install import mapping as toolshed_mapping
from galaxy.tool_util.verify.interactor import GalaxyInteractorApi, verify_tool
from galaxy.util import asbool, download_to_file
from galaxy.util.properties import load_app_properties
from galaxy.web import buildapp
from galaxy.webapps.tool_shed.app import UniverseApplication as ToolshedUniverseApplication
from .api_util import get_master_api_key, get_user_api_key
from .instrument import StructuredTestDataPlugin
from .nose_util import run
from .test_logging import logging_config_file
galaxy_root = os.path.abspath(os.path.join(os.path.dirname(__file__), os.path.pardir, os.path.pardir))
DEFAULT_WEB_HOST = socket.gethostbyname('localhost')
DEFAULT_CONFIG_PREFIX = "GALAXY"
GALAXY_TEST_DIRECTORY = os.path.join(galaxy_root, "test")
GALAXY_TEST_FILE_DIR = "test-data,https://github.com/galaxyproject/galaxy-test-data.git"
TOOL_SHED_TEST_DATA = os.path.join(GALAXY_TEST_DIRECTORY, "shed_functional", "test_data")
TEST_WEBHOOKS_DIR = os.path.join(galaxy_root, "test", "functional", "webhooks")
FRAMEWORK_TOOLS_DIR = os.path.join(GALAXY_TEST_DIRECTORY, "functional", "tools")
FRAMEWORK_UPLOAD_TOOL_CONF = os.path.join(FRAMEWORK_TOOLS_DIR, "upload_tool_conf.xml")
FRAMEWORK_SAMPLE_TOOLS_CONF = os.path.join(FRAMEWORK_TOOLS_DIR, "samples_tool_conf.xml")
FRAMEWORK_DATATYPES_CONF = os.path.join(FRAMEWORK_TOOLS_DIR, "sample_datatypes_conf.xml")
MIGRATED_TOOL_PANEL_CONFIG = 'config/migrated_tools_conf.xml'
INSTALLED_TOOL_PANEL_CONFIGS = [
os.environ.get('GALAXY_TEST_SHED_TOOL_CONF', 'config/shed_tool_conf.xml')
]
DEFAULT_LOCALES = "en"
log = logging.getLogger("test_driver")
# Global variables to pass database contexts around - only needed for older
# Tool Shed twill tests that didn't utilize the API for such interactions.
galaxy_context = None
tool_shed_context = None
install_context = None
def setup_tool_shed_tmp_dir():
tool_shed_test_tmp_dir = os.environ.get('TOOL_SHED_TEST_TMP_DIR', None)
if tool_shed_test_tmp_dir is None:
tool_shed_test_tmp_dir = tempfile.mkdtemp()
# Here's the directory where everything happens. Temporary directories are created within this directory to contain
# the hgweb.config file, the database, new repositories, etc. Since the tool shed browses repository contents via HTTP,
# the full path to the temporary directroy wher eht repositories are located cannot contain invalid url characters.
os.environ['TOOL_SHED_TEST_TMP_DIR'] = tool_shed_test_tmp_dir
return tool_shed_test_tmp_dir
def get_galaxy_test_tmp_dir():
"""Create test directory for use by Galaxy server being setup for testing."""
galaxy_test_tmp_dir = os.environ.get('GALAXY_TEST_TMP_DIR', None)
if galaxy_test_tmp_dir is None:
galaxy_test_tmp_dir = tempfile.mkdtemp()
return galaxy_test_tmp_dir
def configure_environment():
"""Hack up environment for test cases."""
# no op remove if unused
if 'HTTP_ACCEPT_LANGUAGE' not in os.environ:
os.environ['HTTP_ACCEPT_LANGUAGE'] = DEFAULT_LOCALES
# Used by get_filename in tool shed's twilltestcase.
if "TOOL_SHED_TEST_FILE_DIR" not in os.environ:
os.environ["TOOL_SHED_TEST_FILE_DIR"] = TOOL_SHED_TEST_DATA
os.environ["GALAXY_TEST_ENVIRONMENT_CONFIGURED"] = "1"
def build_logger():
"""Build a logger for test driver script."""
return log
def ensure_test_file_dir_set():
"""Ensure GALAXY_TEST_FILE_DIR setup in environment for test data resolver.
Return first directory for backward compat.
"""
galaxy_test_file_dir = os.environ.get('GALAXY_TEST_FILE_DIR', GALAXY_TEST_FILE_DIR)
os.environ['GALAXY_TEST_FILE_DIR'] = galaxy_test_file_dir
first_test_file_dir = galaxy_test_file_dir.split(",")[0]
return first_test_file_dir
def setup_galaxy_config(
tmpdir,
use_test_file_dir=False,
default_install_db_merged=True,
default_tool_data_table_config_path=None,
default_shed_tool_data_table_config=None,
default_job_config_file=None,
enable_tool_shed_check=False,
default_tool_conf=None,
shed_tool_conf=None,
datatypes_conf=None,
update_integrated_tool_panel=False,
prefer_template_database=False,
log_format=None,
conda_auto_init=False,
conda_auto_install=False
):
"""Setup environment and build config for test Galaxy instance."""
# For certain docker operations this needs to be evaluated out - e.g. for cwltool.
tmpdir = os.path.realpath(tmpdir)
if not os.path.exists(tmpdir):
os.makedirs(tmpdir)
file_path = os.path.join(tmpdir, 'files')
template_cache_path = tempfile.mkdtemp(prefix='compiled_templates_', dir=tmpdir)
new_file_path = tempfile.mkdtemp(prefix='new_files_path_', dir=tmpdir)
job_working_directory = tempfile.mkdtemp(prefix='job_working_directory_', dir=tmpdir)
if use_test_file_dir:
first_test_file_dir = ensure_test_file_dir_set()
if not os.path.isabs(first_test_file_dir):
first_test_file_dir = os.path.join(galaxy_root, first_test_file_dir)
library_import_dir = first_test_file_dir
import_dir = os.path.join(first_test_file_dir, 'users')
if os.path.exists(import_dir):
user_library_import_dir = import_dir
else:
user_library_import_dir = None
else:
user_library_import_dir = None
library_import_dir = None
job_config_file = os.environ.get('GALAXY_TEST_JOB_CONFIG_FILE', default_job_config_file)
tool_path = os.environ.get('GALAXY_TEST_TOOL_PATH', 'tools')
tool_dependency_dir = os.environ.get('GALAXY_TOOL_DEPENDENCY_DIR', None)
if tool_dependency_dir is None:
tool_dependency_dir = tempfile.mkdtemp(dir=tmpdir, prefix="tool_dependencies")
tool_data_table_config_path = _tool_data_table_config_path(default_tool_data_table_config_path)
default_data_manager_config = None
for data_manager_config in ['config/data_manager_conf.xml', 'data_manager_conf.xml']:
if os.path.exists(data_manager_config):
default_data_manager_config = data_manager_config
data_manager_config_file = "test/functional/tools/sample_data_manager_conf.xml"
if default_data_manager_config is not None:
data_manager_config_file = "%s,%s" % (default_data_manager_config, data_manager_config_file)
master_api_key = get_master_api_key()
cleanup_job = 'never' if ("GALAXY_TEST_NO_CLEANUP" in os.environ or
"TOOL_SHED_TEST_NO_CLEANUP" in os.environ) else 'onsuccess'
# Data Manager testing temp path
# For storing Data Manager outputs and .loc files so that real ones don't get clobbered
galaxy_data_manager_data_path = tempfile.mkdtemp(prefix='data_manager_tool-data', dir=tmpdir)
tool_conf = os.environ.get('GALAXY_TEST_TOOL_CONF', default_tool_conf)
conda_auto_install = os.environ.get('GALAXY_TEST_CONDA_AUTO_INSTALL', conda_auto_install)
conda_auto_init = os.environ.get('GALAXY_TEST_CONDA_AUTO_INIT', conda_auto_init)
conda_prefix = os.environ.get('GALAXY_TEST_CONDA_PREFIX')
if tool_conf is None:
# As a fallback always at least allow upload.
tool_conf = FRAMEWORK_UPLOAD_TOOL_CONF
if shed_tool_conf is not None:
tool_conf = "%s,%s" % (tool_conf, shed_tool_conf)
shed_tool_data_table_config = default_shed_tool_data_table_config
config = dict(
admin_users='test@bx.psu.edu',
allow_library_path_paste=True,
allow_user_creation=True,
allow_user_deletion=True,
api_allow_run_as='test@bx.psu.edu',
auto_configure_logging=logging_config_file is None,
check_migrate_tools=False,
chunk_upload_size=100,
conda_prefix=conda_prefix,
conda_auto_init=conda_auto_init,
conda_auto_install=conda_auto_install,
cleanup_job=cleanup_job,
data_manager_config_file=data_manager_config_file,
enable_beta_tool_formats=True,
expose_dataset_path=True,
file_path=file_path,
ftp_upload_purge=False,
galaxy_data_manager_data_path=galaxy_data_manager_data_path,
id_secret='changethisinproductiontoo',
job_config_file=job_config_file,
job_working_directory=job_working_directory,
library_import_dir=library_import_dir,
log_destination="stdout",
new_file_path=new_file_path,
override_tempdir=False,
master_api_key=master_api_key,
running_functional_tests=True,
shed_tool_data_table_config=shed_tool_data_table_config,
template_cache_path=template_cache_path,
template_path='templates',
tool_config_file=tool_conf,
tool_data_table_config_path=tool_data_table_config_path,
tool_parse_help=False,
tool_path=tool_path,
update_integrated_tool_panel=update_integrated_tool_panel,
use_tasked_jobs=True,
use_heartbeat=False,
user_library_import_dir=user_library_import_dir,
webhooks_dir=TEST_WEBHOOKS_DIR,
logging=LOGGING_CONFIG_DEFAULT,
monitor_thread_join_timeout=5,
object_store_store_by="uuid",
)
config.update(database_conf(tmpdir, prefer_template_database=prefer_template_database))
config.update(install_database_conf(tmpdir, default_merged=default_install_db_merged))
if asbool(os.environ.get("GALAXY_TEST_USE_HIERARCHICAL_OBJECT_STORE")):
object_store_config = os.path.join(tmpdir, "object_store_conf.yml")
with open(object_store_config, "w") as f:
contents = """
type: hierarchical
backends:
- id: files1
type: disk
weight: 1
files_dir: "${temp_directory}/files1"
extra_dirs:
- type: temp
path: "${temp_directory}/tmp1"
- type: job_work
path: "${temp_directory}/job_working_directory1"
- id: files2
type: disk
weight: 1
files_dir: "${temp_directory}/files2"
extra_dirs:
- type: temp
path: "${temp_directory}/tmp2"
- type: job_work
path: "${temp_directory}/job_working_directory2"
"""
contents_template = string.Template(contents)
expanded_contents = contents_template.safe_substitute(temp_directory=tmpdir)
f.write(expanded_contents)
config["object_store_config_file"] = object_store_config
if datatypes_conf is not None:
config['datatypes_config_file'] = datatypes_conf
if enable_tool_shed_check:
config["enable_tool_shed_check"] = enable_tool_shed_check
config["hours_between_check"] = 0.001
if tool_dependency_dir:
config["tool_dependency_dir"] = tool_dependency_dir
# Used by shed's twill dependency stuff - todo read from
# Galaxy's config API.
os.environ["GALAXY_TEST_TOOL_DEPENDENCY_DIR"] = tool_dependency_dir
return config
def _tool_data_table_config_path(default_tool_data_table_config_path=None):
tool_data_table_config_path = os.environ.get('GALAXY_TEST_TOOL_DATA_TABLE_CONF', default_tool_data_table_config_path)
if tool_data_table_config_path is None:
# ... otherise find whatever Galaxy would use as the default and
# the sample data for fucntional tests to that.
default_tool_data_config = 'lib/galaxy/config/sample/tool_data_table_conf.xml.sample'
for tool_data_config in ['config/tool_data_table_conf.xml', 'tool_data_table_conf.xml']:
if os.path.exists(tool_data_config):
default_tool_data_config = tool_data_config
tool_data_table_config_path = '%s,test/functional/tool-data/sample_tool_data_tables.xml' % default_tool_data_config
return tool_data_table_config_path
def nose_config_and_run(argv=None, env=None, ignore_files=[], plugins=None):
"""Setup a nose context and run tests.
Tests are specified by argv (defaulting to sys.argv).
"""
if env is None:
env = os.environ
if plugins is None:
plugins = nose.plugins.manager.DefaultPluginManager()
if argv is None:
argv = sys.argv
test_config = nose.config.Config(
env=os.environ,
ignoreFiles=ignore_files,
plugins=plugins,
)
# Add custom plugin to produce JSON data used by planemo.
test_config.plugins.addPlugin(StructuredTestDataPlugin())
test_config.configure(argv)
result = run(test_config)
success = result.wasSuccessful()
return success
def copy_database_template(source, db_path):
"""Copy a 'clean' sqlite template database.
From file or URL to specified path for sqlite database.
"""
db_path_dir = os.path.dirname(db_path)
if not os.path.exists(db_path_dir):
os.makedirs(db_path_dir)
if os.path.exists(source):
shutil.copy(source, db_path)
assert os.path.exists(db_path)
elif source.lower().startswith(("http://", "https://", "ftp://")):
try:
download_to_file(source, db_path)
except Exception as e:
# We log the exception but don't fail startup, since we can
# do all migration steps instead of downloading a template.
log.exception(e)
else:
raise Exception("Failed to copy database template from source %s" % source)
def database_conf(db_path, prefix="GALAXY", prefer_template_database=False):
"""Find (and populate if needed) Galaxy database connection."""
database_auto_migrate = False
check_migrate_databases = True
dburi_var = "%s_TEST_DBURI" % prefix
template_name = None
if dburi_var in os.environ:
database_connection = os.environ[dburi_var]
# only template if postgres - not mysql or sqlite
do_template = prefer_template_database and database_connection.startswith("p")
if do_template:
database_template_parsed = urlparse(database_connection)
template_name = database_template_parsed.path[1:] # drop / from /galaxy
actual_db = "gxtest" + ''.join(random.choice(string.ascii_uppercase) for _ in range(10))
actual_database_parsed = database_template_parsed._replace(path="/%s" % actual_db)
database_connection = actual_database_parsed.geturl()
if not database_exists(database_connection):
# We pass by migrations and instantiate the current table
create_database(database_connection)
mapping.init('/tmp', database_connection, create_tables=True, map_install_models=True)
toolshed_mapping.init(database_connection, create_tables=True)
check_migrate_databases = False
else:
default_db_filename = "%s.sqlite" % prefix.lower()
template_var = "%s_TEST_DB_TEMPLATE" % prefix
db_path = os.path.join(db_path, default_db_filename)
if template_var in os.environ:
# Middle ground between recreating a completely new
# database and pointing at existing database with
# GALAXY_TEST_DBURI. The former requires a lot of setup
# time, the latter results in test failures in certain
# cases (namely tool shed tests expecting clean database).
copy_database_template(os.environ[template_var], db_path)
database_auto_migrate = True
database_connection = 'sqlite:///%s' % db_path
config = {
"check_migrate_databases": check_migrate_databases,
"database_connection": database_connection,
"database_auto_migrate": database_auto_migrate
}
if not database_connection.startswith("sqlite://"):
config["database_engine_option_max_overflow"] = "20"
config["database_engine_option_pool_size"] = "10"
if template_name:
config["database_template"] = template_name
return config
def install_database_conf(db_path, default_merged=False):
if 'GALAXY_TEST_INSTALL_DBURI' in os.environ:
install_galaxy_database_connection = os.environ['GALAXY_TEST_INSTALL_DBURI']
elif asbool(os.environ.get('GALAXY_TEST_INSTALL_DB_MERGED', default_merged)):
install_galaxy_database_connection = None
else:
install_galaxy_db_path = os.path.join(db_path, 'install.sqlite')
install_galaxy_database_connection = 'sqlite:///%s' % install_galaxy_db_path
conf = {}
if install_galaxy_database_connection is not None:
conf["install_database_connection"] = install_galaxy_database_connection
return conf
def database_files_path(test_tmpdir, prefix="GALAXY"):
"""Create a mock database/ directory like in GALAXY_ROOT.
Use prefix to default this if TOOL_SHED_TEST_DBPATH or
GALAXY_TEST_DBPATH is set in the environment.
"""
environ_var = "%s_TEST_DBPATH" % prefix
if environ_var in os.environ:
db_path = os.environ[environ_var]
else:
tempdir = tempfile.mkdtemp(dir=test_tmpdir)
db_path = os.path.join(tempdir, 'database')
return db_path
def _get_static_settings():
"""Configuration required for Galaxy static middleware.
Returns dictionary of the settings necessary for a galaxy App
to be wrapped in the static middleware.
This mainly consists of the filesystem locations of url-mapped
static resources.
"""
static_dir = os.path.join(galaxy_root, "static")
# TODO: these should be copied from config/galaxy.ini
return dict(
static_enabled=True,
static_cache_time=360,
static_dir=static_dir,
static_images_dir=os.path.join(static_dir, 'images', ''),
static_favicon_dir=os.path.join(static_dir, 'favicon.ico'),
static_scripts_dir=os.path.join(static_dir, 'scripts', ''),
static_style_dir=os.path.join(static_dir, 'style', 'blue'),
static_robots_txt=os.path.join(static_dir, 'robots.txt'),
)
def get_webapp_global_conf():
"""Get the global_conf dictionary sent to ``app_factory``."""
# (was originally sent 'dict()') - nothing here for now except static settings
global_conf = dict()
global_conf.update(_get_static_settings())
return global_conf
def wait_for_http_server(host, port, sleep_amount=0.1, sleep_tries=150):
"""Wait for an HTTP server to boot up."""
# Test if the server is up
for i in range(sleep_tries):
# directly test the app, not the proxy
conn = http_client.HTTPConnection(host, port)
try:
conn.request("GET", "/")
response = conn.getresponse()
if response.status == 200:
break
except socket.error as e:
if e.errno not in [61, 111]:
raise
time.sleep(sleep_amount)
else:
template = "Test HTTP server on host %s and port %s did not return '200 OK' after 10 tries"
message = template % (host, port)
raise Exception(message)
def attempt_ports(port):
if port is not None:
yield port
raise Exception("An existing process seems bound to specified test server port [%s]" % port)
else:
random.seed()
for i in range(0, 9):
port = str(random.randint(8000, 10000))
yield port
raise Exception("Unable to open a port between %s and %s to start Galaxy server" % (8000, 10000))
def serve_webapp(webapp, port=None, host=None):
"""Serve the webapp on a recommend port or a free one.
Return the port the webapp is running on.
"""
server = None
for port in attempt_ports(port):
try:
server = httpserver.serve(webapp, host=host, port=port, start_loop=False)
break
except socket.error as e:
if e[0] == 98:
continue
raise
t = threading.Thread(target=server.serve_forever)
t.start()
return server, port
def cleanup_directory(tempdir):
"""Clean up temporary files used by test unless GALAXY_TEST_NO_CLEANUP is set.
Also respect TOOL_SHED_TEST_NO_CLEANUP for legacy reasons.
"""
skip_cleanup = "GALAXY_TEST_NO_CLEANUP" in os.environ or "TOOL_SHED_TEST_NO_CLEANUP" in os.environ
if skip_cleanup:
log.info("GALAXY_TEST_NO_CLEANUP is on. Temporary files in %s" % tempdir)
return
try:
if os.path.exists(tempdir) and not skip_cleanup:
shutil.rmtree(tempdir)
except Exception:
pass
def setup_shed_tools_for_test(app, tmpdir, testing_migrated_tools, testing_installed_tools):
"""Modify Galaxy app's toolbox for migrated or installed tool tests."""
if testing_installed_tools:
# TODO: Do this without modifying app - that is a pretty violation
# of Galaxy's abstraction - we shouldn't require app at all let alone
# be modifying it.
tool_configs = app.config.tool_configs
# Eliminate the migrated_tool_panel_config from the app's tool_configs, append the list of installed_tool_panel_configs,
# and reload the app's toolbox.
relative_migrated_tool_panel_config = os.path.join(app.config.root, MIGRATED_TOOL_PANEL_CONFIG)
if relative_migrated_tool_panel_config in tool_configs:
tool_configs.remove(relative_migrated_tool_panel_config)
for installed_tool_panel_config in INSTALLED_TOOL_PANEL_CONFIGS:
tool_configs.append(installed_tool_panel_config)
from galaxy import tools # delay import because this brings in so many modules for small tests # noqa: E402
app.toolbox = tools.ToolBox(tool_configs, app.config.tool_path, app)
def build_galaxy_app(simple_kwargs):
"""Build a Galaxy app object from a simple keyword arguments.
Construct paste style complex dictionary and use load_app_properties so
Galaxy override variables are respected. Also setup "global" references
to sqlalchemy database context for Galaxy and install databases.
"""
log.info("Galaxy database connection: %s", simple_kwargs["database_connection"])
simple_kwargs['global_conf'] = get_webapp_global_conf()
simple_kwargs['global_conf']['__file__'] = "lib/galaxy/config/sample/galaxy.yml.sample"
simple_kwargs = load_app_properties(
kwds=simple_kwargs
)
# Build the Universe Application
app = GalaxyUniverseApplication(**simple_kwargs)
log.info("Embedded Galaxy application started")
global galaxy_context
global install_context
galaxy_context = app.model.context
install_context = app.install_model.context
return app
def build_shed_app(simple_kwargs):
"""Build a Galaxy app object from a simple keyword arguments.
Construct paste style complex dictionary. Also setup "global" reference
to sqlalchemy database context for tool shed database.
"""
log.info("Tool shed database connection: %s", simple_kwargs["database_connection"])
# TODO: Simplify global_conf to match Galaxy above...
simple_kwargs['__file__'] = 'tool_shed_wsgi.yml.sample'
simple_kwargs['global_conf'] = get_webapp_global_conf()
app = ToolshedUniverseApplication(**simple_kwargs)
log.info("Embedded Toolshed application started")
global tool_shed_context
tool_shed_context = app.model.context
return app
class classproperty(object):
def __init__(self, f):
self.f = f
def __get__(self, obj, owner):
return self.f(owner)
def get_ip_address(ifname):
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
return socket.inet_ntoa(fcntl.ioctl(
s.fileno(),
0x8915, # SIOCGIFADDR
struct.pack('256s', ifname[:15].encode('utf-8'))
)[20:24])
def explicitly_configured_host_and_port(prefix, config_object):
host_env_key = "%s_TEST_HOST" % prefix
port_env_key = "%s_TEST_PORT" % prefix
port_random_env_key = "%s_TEST_PORT_RANDOM" % prefix
default_web_host = getattr(config_object, "default_web_host", DEFAULT_WEB_HOST)
host = os.environ.get(host_env_key, default_web_host)
if os.environ.get(port_random_env_key, None) is not None:
# Ignore the port environment variable, it wasn't explictly configured.
port = None
else:
port = os.environ.get(port_env_key, None)
# If an explicit port wasn't assigned for this test or test case, set this
# environment variable so we know it is random. We can then randomly re-assign
# for new tests.
if port is None:
os.environ["GALAXY_TEST_PORT_RANDOM"] = "1"
return host, port
def set_and_wait_for_http_target(prefix, host, port, sleep_amount=0.1, sleep_tries=150):
host_env_key = "%s_TEST_HOST" % prefix
port_env_key = "%s_TEST_PORT" % prefix
os.environ[host_env_key] = host
os.environ[port_env_key] = port
wait_for_http_server(host, port, sleep_amount=sleep_amount, sleep_tries=sleep_tries)
class ServerWrapper(object):
def __init__(self, name, host, port):
self.name = name
self.host = host
self.port = port
@property
def app(self):
raise NotImplementedError("Test can be run against target - requires a Galaxy app object.")
def stop(self):
raise NotImplementedError()
class PasteServerWrapper(ServerWrapper):
def __init__(self, app, server, name, host, port):
super(PasteServerWrapper, self).__init__(name, host, port)
self._app = app
self._server = server
@property
def app(self):
return self._app
def stop(self):
if self._server is not None:
log.info("Shutting down embedded %s web server" % self.name)
self._server.server_close()
log.info("Embedded web server %s stopped" % self.name)
if self._app is not None:
log.info("Stopping application %s" % self.name)
self._app.shutdown()
log.info("Application %s stopped." % self.name)
class UwsgiServerWrapper(ServerWrapper):
def __init__(self, p, name, host, port):
super(UwsgiServerWrapper, self).__init__(name, host, port)
self._p = p
self._r = None
self._t = threading.Thread(target=self.wait)
self._t.start()
def __del__(self):
self._t.join()
def wait(self):
self._r = self._p.wait()
def stop(self):
try:
os.killpg(os.getpgid(self._p.pid), signal.SIGTERM)
except Exception:
pass
time.sleep(.1)
try:
os.killpg(os.getpgid(self._p.pid), signal.SIGKILL)
except Exception:
pass
self._t.join()
def launch_uwsgi(kwargs, tempdir, prefix=DEFAULT_CONFIG_PREFIX, config_object=None):
name = prefix.lower()
host, port = explicitly_configured_host_and_port(prefix, config_object)
config = {}
config["galaxy"] = kwargs.copy()
yaml_config_path = os.path.join(tempdir, "galaxy.yml")
with open(yaml_config_path, "w") as f:
import yaml
yaml.dump(config, f)
def attempt_port_bind(port):
uwsgi_command = [
"uwsgi",
"--http",
"%s:%s" % (host, port),
"--yaml",
yaml_config_path,
"--module",
"galaxy.webapps.galaxy.buildapp:uwsgi_app_factory()",
"--enable-threads",
"--die-on-term",
]
for p in sys.path:
uwsgi_command.append('--pythonpath')
uwsgi_command.append(p)
handle_uwsgi_cli_command = getattr(
config_object, "handle_uwsgi_cli_command", None
)
if handle_uwsgi_cli_command is not None:
handle_uwsgi_cli_command(uwsgi_command)
# we don't want to quote every argument but we don't want to print unquoted ones either, so do this
log.info("Starting uwsgi with command line: %s", ' '.join([shlex_quote(x) for x in uwsgi_command]))
p = subprocess.Popen(
uwsgi_command,
cwd=galaxy_root,
preexec_fn=os.setsid,
)
return UwsgiServerWrapper(
p, name, host, port
)
for port in attempt_ports(port):
server_wrapper = attempt_port_bind(port)
try:
set_and_wait_for_http_target(prefix, host, port, sleep_tries=50)
log.info("Test-managed uwsgi web server for %s started at %s:%s" % (name, host, port))
return server_wrapper
except Exception:
server_wrapper.stop()
def launch_server(app, webapp_factory, kwargs, prefix=DEFAULT_CONFIG_PREFIX, config_object=None):
"""Launch a web server for a given app using supplied factory.
Consistently read either GALAXY_TEST_HOST and GALAXY_TEST_PORT or
TOOL_SHED_TEST_HOST and TOOL_SHED_TEST_PORT and ensure these are
all set after this method has been called.
"""
name = prefix.lower()
host, port = explicitly_configured_host_and_port(prefix, config_object)
webapp = webapp_factory(
kwargs['global_conf'],
app=app,
use_translogger=False,
static_enabled=True
)
server, port = serve_webapp(
webapp,
host=host, port=port
)
set_and_wait_for_http_target(prefix, host, port)
log.info("Embedded paste web server for %s started at %s:%s" % (name, host, port))
return PasteServerWrapper(
app, server, name, host, port
)
class TestDriver(object):
"""Responsible for the life-cycle of a Galaxy-style functional test.
Sets up servers, configures tests, runs nose, and tears things
down. This is somewhat like a Python TestCase - but different
because it is meant to provide a main() endpoint.
"""
def __init__(self):
"""Setup tracked resources."""
self.server_wrappers = []
self.temp_directories = []
def setup(self):
"""Called before tests are built."""
def build_tests(self):
"""After environment is setup, setup nose tests."""
def tear_down(self):
"""Cleanup resources tracked by this object."""
self.stop_servers()
for temp_directory in self.temp_directories:
cleanup_directory(temp_directory)
def stop_servers(self):
for server_wrapper in self.server_wrappers:
server_wrapper.stop()
self.server_wrappers = []
def mkdtemp(self):
"""Return a temp directory that is properly cleaned up or not based on the config."""
temp_directory = tempfile.mkdtemp()
self.temp_directories.append(temp_directory)
return temp_directory
def run(self):
"""Driver whole test.
Setup environment, build tests (if needed), run test,
and finally cleanup resources.
"""
configure_environment()
self.setup()
self.build_tests()
try:
success = nose_config_and_run()
return 0 if success else 1
except Exception as e:
log.info("Failure running tests")
raise e
finally:
log.info("Shutting down")
self.tear_down()
class GalaxyTestDriver(TestDriver):
"""Instantial a Galaxy-style nose TestDriver for testing Galaxy."""
testing_shed_tools = False
def _configure(self, config_object=None):
"""Setup various variables used to launch a Galaxy server."""
config_object = self._ensure_config_object(config_object)
self.external_galaxy = os.environ.get('GALAXY_TEST_EXTERNAL', None)
# Allow a particular test to force uwsgi or any test to use uwsgi with
# the GALAXY_TEST_UWSGI environment variable.
use_uwsgi = os.environ.get('GALAXY_TEST_UWSGI', None)
if not use_uwsgi:
if getattr(config_object, "require_uwsgi", None):
use_uwsgi = True
self.use_uwsgi = use_uwsgi
# Allow controlling the log format
log_format = os.environ.get('GALAXY_TEST_LOG_FORMAT', None)
if not log_format and use_uwsgi:
log_format = "%(name)s %(levelname)-5.5s %(asctime)s " \
"[p:%(process)s,w:%(worker_id)s,m:%(mule_id)s] " \
"[%(threadName)s] %(message)s"
self.log_format = log_format
self.galaxy_test_tmp_dir = get_galaxy_test_tmp_dir()
self.temp_directories.append(self.galaxy_test_tmp_dir)
self.testing_shed_tools = getattr(config_object, "testing_shed_tools", False)
if getattr(config_object, "framework_tool_and_types", False):
default_tool_conf = FRAMEWORK_SAMPLE_TOOLS_CONF
datatypes_conf_override = FRAMEWORK_DATATYPES_CONF
else:
default_tool_conf = getattr(config_object, "default_tool_conf", None)
datatypes_conf_override = getattr(config_object, "datatypes_conf_override", None)
self.default_tool_conf = default_tool_conf
self.datatypes_conf_override = datatypes_conf_override
def setup(self, config_object=None):
"""Setup a Galaxy server for functional test (if needed).
Configuration options can be specified as attributes on the supplied
```config_object``` (defaults to self).
"""
self._saved_galaxy_config = None
self._configure(config_object)
self._register_and_run_servers(config_object)
def restart(self, config_object=None, handle_config=None):
self.stop_servers()
self._register_and_run_servers(config_object, handle_config=handle_config)
def _register_and_run_servers(self, config_object=None, handle_config=None):
config_object = self._ensure_config_object(config_object)
self.app = None
if self.external_galaxy is None:
if self._saved_galaxy_config is not None:
galaxy_config = self._saved_galaxy_config
else:
tempdir = tempfile.mkdtemp(dir=self.galaxy_test_tmp_dir)
# Configure the database path.
galaxy_db_path = database_files_path(tempdir)
# Allow config object to specify a config dict or a method to produce
# one - other just read the properties above and use the default
# implementation from this file.
galaxy_config = getattr(config_object, "galaxy_config", None)
if hasattr(galaxy_config, '__call__'):
galaxy_config = galaxy_config()
if galaxy_config is None:
setup_galaxy_config_kwds = dict(
use_test_file_dir=not self.testing_shed_tools,
default_install_db_merged=True,
default_tool_conf=self.default_tool_conf,
datatypes_conf=self.datatypes_conf_override,
prefer_template_database=getattr(config_object, "prefer_template_database", False),
log_format=self.log_format,
conda_auto_init=getattr(config_object, "conda_auto_init", False),
conda_auto_install=getattr(config_object, "conda_auto_install", False),
)
galaxy_config = setup_galaxy_config(
galaxy_db_path,
**setup_galaxy_config_kwds
)
isolate_galaxy_config = getattr(config_object, "isolate_galaxy_config", False)
if isolate_galaxy_config:
galaxy_config["config_dir"] = tempdir
self._saved_galaxy_config = galaxy_config
if galaxy_config is not None:
handle_galaxy_config_kwds = handle_config or getattr(
config_object, "handle_galaxy_config_kwds", None
)
if handle_galaxy_config_kwds is not None:
handle_galaxy_config_kwds(galaxy_config)
if self.use_uwsgi:
server_wrapper = launch_uwsgi(
galaxy_config,
tempdir=tempdir,
config_object=config_object,
)
else:
# ---- Build Application --------------------------------------------------
self.app = build_galaxy_app(galaxy_config)
server_wrapper = launch_server(
self.app,
buildapp.app_factory,
galaxy_config,
config_object=config_object,
)
log.info("Functional tests will be run against external Galaxy server %s:%s" % (server_wrapper.host, server_wrapper.port))
self.server_wrappers.append(server_wrapper)
else:
log.info("Functional tests will be run against test managed Galaxy server %s" % self.external_galaxy)
# Ensure test file directory setup even though galaxy config isn't built.
ensure_test_file_dir_set()
def _ensure_config_object(self, config_object):
if config_object is None:
config_object = self
return config_object
def setup_shed_tools(self, testing_migrated_tools=False, testing_installed_tools=True):
setup_shed_tools_for_test(
self.app,
self.galaxy_test_tmp_dir,
testing_migrated_tools,
testing_installed_tools
)
def build_tool_tests(self, testing_shed_tools=None, return_test_classes=False):
if self.app is None:
return
if testing_shed_tools is None:
testing_shed_tools = getattr(self, "testing_shed_tools", False)
# We must make sure that functional.test_toolbox is always imported after
# database_contexts.galaxy_content is set (which occurs in this method above).
# If functional.test_toolbox is imported before database_contexts.galaxy_content
# is set, sa_session will be None in all methods that use it.
import functional.test_toolbox
functional.test_toolbox.toolbox = self.app.toolbox
# When testing data managers, do not test toolbox.
test_classes = functional.test_toolbox.build_tests(
app=self.app,
testing_shed_tools=testing_shed_tools,
master_api_key=get_master_api_key(),
user_api_key=get_user_api_key(),
)
if return_test_classes:
return test_classes
return functional.test_toolbox
def run_tool_test(self, tool_id, index=0, resource_parameters={}):
host, port, url = target_url_parts()
galaxy_interactor_kwds = {
"galaxy_url": url,
"master_api_key": get_master_api_key(),
"api_key": get_user_api_key(),
"keep_outputs_dir": None,
}
galaxy_interactor = GalaxyInteractorApi(**galaxy_interactor_kwds)
verify_tool(
tool_id=tool_id,
test_index=index,
galaxy_interactor=galaxy_interactor,
resource_parameters=resource_parameters
)
def drive_test(test_driver_class):
"""Instantiate driver class, run, and exit appropriately."""
test_driver = test_driver_class()
sys.exit(test_driver.run())
def setup_keep_outdir():
keep_outdir = os.environ.get('GALAXY_TEST_SAVE', '')
if keep_outdir > '':
try:
os.makedirs(keep_outdir)
except Exception:
pass
return keep_outdir
def target_url_parts():
host = socket.gethostbyname(os.environ.get('GALAXY_TEST_HOST', DEFAULT_WEB_HOST))
port = os.environ.get('GALAXY_TEST_PORT')
default_url = "http://%s:%s" % (host, port)
url = os.environ.get('GALAXY_TEST_EXTERNAL', default_url)
return host, port, url
__all__ = (
"copy_database_template",
"build_logger",
"drive_test",
"FRAMEWORK_UPLOAD_TOOL_CONF",
"FRAMEWORK_SAMPLE_TOOLS_CONF",
"FRAMEWORK_DATATYPES_CONF",
"database_conf",
"get_webapp_global_conf",
"nose_config_and_run",
"setup_keep_outdir",
"setup_galaxy_config",
"target_url_parts",
"TestDriver",
"wait_for_http_server",
)
| 38.160413
| 138
| 0.683571
|
4a140f0da6d6360fa1bec14486cd86f9d90bba50
| 715
|
py
|
Python
|
detection/configs/retinanet_pcpvt_s_fpn_1x_coco_pvt_setting.py
|
dumpmemory/Twins
|
4700293a2d0a91826ab357fc5b9bc1468ae0e987
|
[
"Apache-2.0"
] | 394
|
2021-04-29T02:20:32.000Z
|
2022-03-24T12:12:15.000Z
|
detection/configs/retinanet_pcpvt_s_fpn_1x_coco_pvt_setting.py
|
dumpmemory/Twins
|
4700293a2d0a91826ab357fc5b9bc1468ae0e987
|
[
"Apache-2.0"
] | 27
|
2021-05-13T10:03:46.000Z
|
2022-03-17T05:22:24.000Z
|
detection/configs/retinanet_pcpvt_s_fpn_1x_coco_pvt_setting.py
|
dumpmemory/Twins
|
4700293a2d0a91826ab357fc5b9bc1468ae0e987
|
[
"Apache-2.0"
] | 49
|
2021-04-30T03:28:40.000Z
|
2022-02-28T06:51:01.000Z
|
_base_ = [
'_base_/models/retinanet_r50_fpn.py',
'_base_/datasets/coco_detection.py',
'_base_/default_runtime.py'
]
model = dict(
pretrained='pretrained/pcpvt_small.pth',
backbone=dict(
type='pcpvt_small',
style='pytorch'),
neck=dict(
type='FPN',
in_channels=[64, 128, 320, 512],
out_channels=256,
start_level=1,
add_extra_convs='on_input',
num_outs=5))
# optimizer
optimizer = dict(type='AdamW', lr=0.0001, weight_decay=0.0001)
optimizer_config = dict(grad_clip=None)
# learning policy
lr_config = dict(
policy='step',
warmup='linear',
warmup_iters=500,
warmup_ratio=0.001,
step=[8, 11])
total_epochs = 12
| 24.655172
| 62
| 0.639161
|
4a140f6e64f1093dd7f74f2520edc17c74b5a114
| 2,039
|
py
|
Python
|
BackEnd/site_receitas/migrations/0004_descricaoreceita_ingrediente_receita.py
|
GutoSantos/TrabSiteReceita
|
477dce89a0b6b8ac9396f9656de02b56c01c75d3
|
[
"Unlicense"
] | null | null | null |
BackEnd/site_receitas/migrations/0004_descricaoreceita_ingrediente_receita.py
|
GutoSantos/TrabSiteReceita
|
477dce89a0b6b8ac9396f9656de02b56c01c75d3
|
[
"Unlicense"
] | null | null | null |
BackEnd/site_receitas/migrations/0004_descricaoreceita_ingrediente_receita.py
|
GutoSantos/TrabSiteReceita
|
477dce89a0b6b8ac9396f9656de02b56c01c75d3
|
[
"Unlicense"
] | null | null | null |
# -*- coding: utf-8 -*-
# Generated by Django 1.11.6 on 2017-11-16 00:24
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('site_receitas', '0003_usuario_is_staff'),
]
operations = [
migrations.CreateModel(
name='DescricaoReceita',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('nome_usuario', models.CharField(max_length=100)),
('nome_receita', models.CharField(max_length=100)),
('descrição_receita', models.CharField(max_length=100)),
('categoria', models.CharField(max_length=100)),
('porcoes', models.IntegerField()),
('calorias', models.IntegerField()),
('tempo_preparo', models.TimeField()),
('preparo', models.CharField(max_length=500)),
('coximento', models.CharField(max_length=500)),
],
),
migrations.CreateModel(
name='Ingrediente',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('nome_ingrediente', models.CharField(max_length=100)),
],
),
migrations.CreateModel(
name='Receita',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('quantidade', models.IntegerField()),
('unidade', models.CharField(max_length=20)),
('fk_descricaoReceita', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='site_receitas.DescricaoReceita')),
('fk_ingrediente', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='site_receitas.Ingrediente')),
],
),
]
| 41.612245
| 141
| 0.590486
|
4a1412598ef0e9d9c57321e362a438df9b8a75fa
| 3,808
|
py
|
Python
|
frappe/utils/autodoc.py
|
badili/frappe
|
8177d7e745f511fcc4da50fed9e291a58172a613
|
[
"MIT"
] | null | null | null |
frappe/utils/autodoc.py
|
badili/frappe
|
8177d7e745f511fcc4da50fed9e291a58172a613
|
[
"MIT"
] | 7
|
2016-05-30T04:03:38.000Z
|
2019-02-03T03:10:03.000Z
|
frappe/utils/autodoc.py
|
badili/frappe
|
8177d7e745f511fcc4da50fed9e291a58172a613
|
[
"MIT"
] | 5
|
2015-07-22T04:52:43.000Z
|
2016-10-07T01:51:21.000Z
|
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
"""
frappe.utils.autodoc
~~~~~~~~~~~~~~~~~~~~
Inspect elements of a given module and return its objects
"""
from __future__ import unicode_literals
import inspect, importlib, re, frappe
from frappe.model.document import get_controller
def automodule(name):
"""Returns a list of attributes for given module string.
Attribute Format:
{
"name": [__name__],
"type": ["function" or "class"]
"args": [inspect.getargspec(value) (for function)]
"docs": [__doc__ as markdown]
}
:param name: Module name as string."""
attributes = []
obj = importlib.import_module(name)
for attrname in dir(obj):
value = getattr(obj, attrname)
if getattr(value, "__module__", None) != name:
# imported member, ignore
continue
if inspect.isclass(value):
attributes.append(get_class_info(value, name))
if inspect.isfunction(value):
attributes.append(get_function_info(value))
return {
"members": filter(None, attributes),
"docs": get_obj_doc(obj)
}
installed = None
def get_version(name):
print name
global installed
if not installed:
installed = frappe.get_installed_apps()
def _for_module(m):
app_name = m.split(".")[0]
try:
docs_version = frappe.get_attr(app_name + ".config.docs.docs_version")
except AttributeError:
docs_version = None
if docs_version:
return docs_version
return getattr(importlib.import_module(m.split(".")[0]), "__version__", "0.0.0")
if "." in name or name in installed:
return _for_module(name)
else:
return _for_module(get_controller(name).__module__)
def get_class_info(class_obj, module_name):
members = []
for attrname in dir(class_obj):
member = getattr(class_obj, attrname)
if inspect.ismethod(member):
if getattr(member, "__module__", None) != module_name:
# inherited member, ignore
continue
members.append(get_function_info(member))
return {
"name": class_obj.__name__,
"type": "class",
"bases": [b.__module__ + "." + b.__name__ for b in class_obj.__bases__],
"members": filter(None, members),
"docs": parse(get_obj_doc(class_obj))
}
def get_function_info(value):
docs = get_obj_doc(value)
return {
"name": value.__name__,
"type": "function",
"args": inspect.getargspec(value),
"docs": parse(docs) if docs else '<span class="text-muted">No docs</span>',
"whitelisted": value in frappe.whitelisted
}
def parse(docs):
"""Parse __docs__ text into markdown. Will parse directives like `:param name:` etc"""
# strip leading tabs
if not docs:
return ""
if ":param" in docs:
out, title_set = [], False
for line in docs.splitlines():
if ":param" in line:
if not title_set:
# add title and list
out.append("")
out.append("**Parameters:**")
out.append("")
title_set = True
line = re.sub("\s*:param\s([^:]+):(.*)", "- **`\g<1>`** - \g<2>", line)
elif title_set and not ":param" in line:
# marker for end of list
out.append("")
title_set = False
out.append(line)
docs = "\n".join(out)
return docs
def strip_leading_tabs(docs):
"""Strip leading tabs from __doc__ text."""
lines = docs.splitlines()
if len(lines) > 1:
start_line = 1
ref_line = lines[start_line]
while not ref_line:
start_line += 1
if start_line > len(lines): break
ref_line = lines[start_line]
strip_left = len(ref_line) - len(ref_line.lstrip())
if strip_left:
docs = "\n".join([lines[0]] + [l[strip_left:] for l in lines[1:]])
return docs
def automodel(doctype):
"""return doctype template"""
pass
def get_obj_doc(obj):
'''Return `__doc__` of the given object as unicode'''
doc = getattr(obj, "__doc__", "") or ''
if not isinstance(doc, unicode):
doc = unicode(doc, 'utf-8')
return doc
| 23.361963
| 87
| 0.675158
|
4a1413959d92254a8bc735376cc11e5efee06ef9
| 10,816
|
py
|
Python
|
pyecsca/ec/curve.py
|
Tomko10/pyecsca
|
900503e602c3079c6293e17f297e3b111ba9611a
|
[
"MIT"
] | 24
|
2019-07-01T00:27:24.000Z
|
2022-02-17T00:46:28.000Z
|
pyecsca/ec/curve.py
|
Tomko10/pyecsca
|
900503e602c3079c6293e17f297e3b111ba9611a
|
[
"MIT"
] | 18
|
2020-12-10T15:08:56.000Z
|
2022-03-01T11:44:37.000Z
|
pyecsca/ec/curve.py
|
Tomko10/pyecsca
|
900503e602c3079c6293e17f297e3b111ba9611a
|
[
"MIT"
] | 7
|
2020-02-20T18:44:29.000Z
|
2021-11-30T21:16:44.000Z
|
"""This module provides an elliptic curve class."""
from ast import Module
from copy import copy
from typing import MutableMapping, Union, List, Optional
from public import public
from .coordinates import CoordinateModel, AffineCoordinateModel
from .mod import Mod
from .model import CurveModel
from .point import Point, InfinityPoint
@public
class EllipticCurve:
"""Elliptic curve."""
model: CurveModel
"""The model of the curve."""
coordinate_model: CoordinateModel
"""The coordinate system of the curve."""
prime: int
"""The prime specifying the base prime field of the curve."""
parameters: MutableMapping[str, Mod]
"""The values of the parameters defining the curve, these cover the curve model and coordinate system parameters."""
neutral: Point
"""The neutral point on the curve."""
def __init__(
self,
model: CurveModel,
coordinate_model: CoordinateModel,
prime: int,
neutral: Point,
parameters: MutableMapping[str, Union[Mod, int]],
):
if coordinate_model not in model.coordinates.values() and not isinstance(
coordinate_model, AffineCoordinateModel
):
raise ValueError
if (
set(model.parameter_names)
.union(coordinate_model.parameters)
.symmetric_difference(parameters.keys())
):
raise ValueError
self.model = model
self.coordinate_model = coordinate_model
self.prime = prime
self.parameters = {}
for name, value in parameters.items():
if isinstance(value, Mod):
if value.n != prime:
raise ValueError(f"Parameter {name} has wrong modulus.")
else:
value = Mod(value, prime)
self.parameters[name] = value
self.neutral = neutral
def _execute_base_formulas(self, formulas: List[Module], *points: Point) -> Point:
for point in points:
if not isinstance(point.coordinate_model, AffineCoordinateModel):
raise ValueError("Coordinate model of point is not affine.")
if point.coordinate_model.curve_model != self.model:
raise ValueError("Curve model of point does not match the curve.")
locls = {
var + str(i + 1): point.coords[var]
for i, point in enumerate(points)
for var in point.coords
}
locls.update(self.parameters)
for line in formulas:
exec(compile(line, "", mode="exec"), None, locls)
if not isinstance(locls["x"], Mod):
locls["x"] = Mod(locls["x"], self.prime)
if not isinstance(locls["y"], Mod):
locls["y"] = Mod(locls["y"], self.prime)
return Point(AffineCoordinateModel(self.model), x=locls["x"], y=locls["y"])
def affine_add(self, one: Point, other: Point) -> Point:
"""
Add two affine points using the affine addition formula.
Handles the case of point at infinity gracefully (short-circuits).
:param one: One point.
:param other: Another point.
:return: The addition of the two points.
"""
if isinstance(one, InfinityPoint):
return other
if isinstance(other, InfinityPoint):
return one
if one == other:
return self.affine_double(one)
return self._execute_base_formulas(self.model.base_addition, one, other)
def affine_double(self, one: Point) -> Point:
"""
Double an affine point using the affine doubling formula.
Handles the case of point at infinity gracefully (short-circuits).
:param one: A point.
:return: The doubling of the point.
"""
if isinstance(one, InfinityPoint):
return one
return self._execute_base_formulas(self.model.base_doubling, one)
def affine_negate(self, one: Point) -> Point:
"""
Negate an affine point using the affine negation formula.
Handles the case of point at infinity gracefully (short-circuits).
:param one: A point.
:return: The negation of the point.
"""
if isinstance(one, InfinityPoint):
return one
return self._execute_base_formulas(self.model.base_negation, one)
def affine_multiply(self, point: Point, scalar: int) -> Point:
"""
Multiply an affine point by a scalar using the affine doubling and addition formulas.
Handles the case of point at infinity gracefully (short-circuits).
:param point: The point to multiply.
:param scalar: The scalar to use.
:return: The scalar multiplication of `point`.
"""
if isinstance(point, InfinityPoint):
return point
if not isinstance(point.coordinate_model, AffineCoordinateModel):
raise ValueError("Coordinate model of point is not affine.")
if point.coordinate_model.curve_model != self.model:
raise ValueError("Curve model of point does not match the curve.")
q = copy(point)
r = copy(point)
for i in range(scalar.bit_length() - 2, -1, -1):
r = self.affine_double(r)
if scalar & (1 << i) != 0:
r = self.affine_add(r, q)
return r
@property
def affine_neutral(self) -> Optional[Point]:
"""
Get the neutral point in affine form, if it has one, otherwise ``None``.
:return: The affine neutral point or ``None``.
"""
if not self.neutral_is_affine:
return None
locls = {**self.parameters}
for line in self.model.base_neutral:
exec(compile(line, "", mode="exec"), None, locls)
if not isinstance(locls["x"], Mod):
locls["x"] = Mod(locls["x"], self.prime)
if not isinstance(locls["y"], Mod):
locls["y"] = Mod(locls["y"], self.prime)
return Point(AffineCoordinateModel(self.model), x=locls["x"], y=locls["y"])
@property
def neutral_is_affine(self):
"""Whether the neutral point is an affine point."""
return bool(self.model.base_neutral)
def is_neutral(self, point: Point) -> bool:
"""
Check whether the point is the neutral point.
:param point: The point to test.
:return: Whether it is the neutral point.
"""
return self.neutral == point
def is_on_curve(self, point: Point) -> bool:
"""
Check whether the point is on the curve.
:param point: The point to test.
:return: Whether it is on the curve.
"""
if point.coordinate_model.curve_model != self.model:
return False
if self.is_neutral(point):
return True
if isinstance(point.coordinate_model, AffineCoordinateModel):
loc = {**self.parameters, **point.coords}
else:
loc = {**self.parameters, **point.to_affine().coords}
return eval(compile(self.model.equation, "", mode="eval"), loc)
def to_affine(self) -> "EllipticCurve":
"""
Convert this curve into the affine coordinate model, if possible.
:return: The transformed elliptic curve.
"""
coord_model = AffineCoordinateModel(self.model)
return EllipticCurve(self.model, coord_model, self.prime, self.neutral.to_affine(), self.parameters) # type: ignore[arg-type]
def decode_point(self, encoded: bytes) -> Point:
"""
Decode a point encoded as a sequence of bytes (ANSI X9.62).
This decoding is the same as ANSI X9.63 for the affine coordinate system and for others it
only implements the uncompressed variant.
.. warning::
The point is not validated to be on the curve (if the uncompressed encoding is used).
:param encoded: The encoded representation of a point.
:return: The decoded point.
"""
if encoded[0] == 0x00 and len(encoded) == 1:
return InfinityPoint(self.coordinate_model)
coord_len = (self.prime.bit_length() + 7) // 8
if encoded[0] in (0x04, 0x06):
data = encoded[1:]
if len(data) != coord_len * len(self.coordinate_model.variables):
raise ValueError("Encoded point has bad length")
coords = {}
for var in sorted(self.coordinate_model.variables):
coords[var] = Mod(int.from_bytes(data[:coord_len], "big"), self.prime)
data = data[coord_len:]
return Point(self.coordinate_model, **coords)
elif encoded[0] in (0x02, 0x03):
if isinstance(self.coordinate_model, AffineCoordinateModel):
data = encoded[1:]
if len(data) != coord_len:
raise ValueError("Encoded point has bad length")
x = Mod(int.from_bytes(data, "big"), self.prime)
loc = {**self.parameters, "x": x}
rhs = eval(compile(self.model.ysquared, "", mode="eval"), loc)
if not rhs.is_residue():
raise ValueError("Point not on curve")
sqrt = rhs.sqrt()
yp = encoded[0] & 0x01
if int(sqrt) & 0x01 == yp:
y = sqrt
else:
y = -sqrt
return Point(self.coordinate_model, x=x, y=y)
else:
raise NotImplementedError
else:
raise ValueError(
f"Wrong encoding type: {hex(encoded[0])}, should be one of 0x04, 0x06, 0x02, 0x03 or 0x00"
)
def affine_random(self) -> Point:
"""Generate a random affine point on the curve."""
while True:
x = Mod.random(self.prime)
loc = {**self.parameters, "x": x}
ysquared = eval(compile(self.model.ysquared, "", mode="eval"), loc)
if ysquared.is_residue():
y = ysquared.sqrt()
b = Mod.random(2)
if b == 1:
y = -y
return Point(AffineCoordinateModel(self.model), x=x, y=y)
def __eq__(self, other):
if not isinstance(other, EllipticCurve):
return False
return (
self.model == other.model
and self.coordinate_model == other.coordinate_model
and self.prime == other.prime
and self.parameters == other.parameters
)
def __str__(self):
return "EllipticCurve"
def __repr__(self):
params = ", ".join((f"{key}={val}" for key, val in self.parameters.items()))
return f"{self.__class__.__name__}([{params}] on {self.model} using {self.coordinate_model})"
| 37.950877
| 134
| 0.588018
|
4a1413fbf280e430621d2995bfeb782f15631737
| 800
|
py
|
Python
|
baseCoverter.py
|
oof2win2/cs_igcse
|
8c5a6a23f2eeca9414d8d720c1dce4df7f62f324
|
[
"MIT"
] | null | null | null |
baseCoverter.py
|
oof2win2/cs_igcse
|
8c5a6a23f2eeca9414d8d720c1dce4df7f62f324
|
[
"MIT"
] | null | null | null |
baseCoverter.py
|
oof2win2/cs_igcse
|
8c5a6a23f2eeca9414d8d720c1dce4df7f62f324
|
[
"MIT"
] | null | null | null |
import random
def convertQues(rng):
# num = random.randint(0, rng)
num = rng
modes = ["hex", "bin"]
mode = random.choice(modes)
wrongAnswer = True
if mode == "hex":
while wrongAnswer:
inp = input(f"What is hex {hex(num)[2:]} in decimal: ")
if int(inp, 10) == num:
wrongAnswer = False
else:
print("Wrong answer. Please try again")
if mode == 'bin':
while wrongAnswer:
inp = input(f"What is binary {bin(num)[2:]} in decimal: ")
if int(inp, 10) == num:
wrongAnswer = False
else:
print("Wrong answer. Please try again")
while True:
r = random.randint(0, 256)
print(f"{r}, {bin(r)}, {hex(r)}")
convertQues(r)
| 26.666667
| 70
| 0.50375
|
4a141600267458a9a686d062305cd303823da220
| 5,207
|
py
|
Python
|
fhir/immunizations_demo/scripts/assemble_training_data.py
|
mevans845/healthcare
|
55a1d08cddb344821feb3abe207ca4d62c6419ed
|
[
"Apache-2.0"
] | null | null | null |
fhir/immunizations_demo/scripts/assemble_training_data.py
|
mevans845/healthcare
|
55a1d08cddb344821feb3abe207ca4d62c6419ed
|
[
"Apache-2.0"
] | null | null | null |
fhir/immunizations_demo/scripts/assemble_training_data.py
|
mevans845/healthcare
|
55a1d08cddb344821feb3abe207ca4d62c6419ed
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/python3
#
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
r"""Loads resources exported from a FHIR Store to GCS (specifically in this
demo, Patient, QuestionnaireResponse and RiskAssessment) and extracts relevant
features and labels from the resources for training. The genereated tensorflow
record dataset is stored in a specified location on GCS.
Example usage:
python assemble_training_data.py --src_bucket=my-bucket \
--src_folder=export \
--dst_bucket=my-bucket \
--dst_folder=tfrecords
"""
import sys
sys.path.insert(0, '..') # Set up sys path.
import datetime
import json
import random
import tensorflow as tf
import math
from absl import app
from absl import flags
from functools import reduce
from google.cloud import storage
from io import StringIO
from shared.utils import *
FLAGS = flags.FLAGS
flags.DEFINE_string('src_bucket', None,
'GCS bucekt where exported resources are stored.')
flags.DEFINE_string('src_folder', None,
'GCS folder in the src_bucket where exported resources are stored.')
flags.DEFINE_string('dst_bucket', None,
'GCS bucket to save the tensowflow record file.')
flags.DEFINE_string('dst_folder', None,
'GCS folder in the dst_bucket to save the tensowflow record file.')
flags.mark_flag_as_required('src_bucket')
flags.mark_flag_as_required('dst_bucket')
def load_resources_by_type(res_type):
client = storage.Client()
bucket = client.get_bucket(FLAGS.src_bucket)
for blob in bucket.list_blobs(prefix=FLAGS.src_folder):
if blob.name.endswith(res_type):
lines = blob.download_as_string().splitlines()
return [json.loads(r.decode("utf-8")) for r in lines]
return []
def build_examples():
def map_by_id(result, item):
result[item['id']] = item
return result
patients = reduce(map_by_id, load_resources_by_type(PATIENT_TYPE), {})
questionnaire_responses = load_resources_by_type(QUESTIONNAIRERESPONSE_TYPE)
def map_by_qid(result, item):
qid = extract_uuid(extract_evidence_id(item))
if qid in result:
result[qid].append(item)
else:
result[qid] = [item]
return result
conditions = reduce(map_by_qid, load_resources_by_type(CONDITION_TYPE), {})
examples = []
for questionnaire_response in questionnaire_responses:
pid = extract_uuid(questionnaire_response['subject']['reference'])
if pid not in patients:
continue
patient = patients[pid]
diseases = []
qid = questionnaire_response['id']
if qid in conditions:
diseases = map(lambda x: extract_condition_disease(x), conditions[qid])
age = calculate_age(patient['birthDate'])
gender = 1 if patient['gender'] == 'male' else 0
country = COUNTRY_MAP[extract_country(questionnaire_response)]
duration = calculate_duration(
*extract_start_end_date(questionnaire_response))
for disease in DISEASE_MAP:
risk = 1 if disease in diseases else 0
feature = {
'age': tf.train.Feature(int64_list=tf.train.Int64List(value=[age])),
'gender': tf.train.Feature(
int64_list=tf.train.Int64List(value=[gender])),
'country': tf.train.Feature(
int64_list=tf.train.Int64List(value=[country])),
'duration': tf.train.Feature(
int64_list=tf.train.Int64List(value=[duration])),
'disease': tf.train.Feature(
int64_list=tf.train.Int64List(value=[DISEASE_MAP[disease]])),
'risk': tf.train.Feature(int64_list=tf.train.Int64List(value=[risk])),
}
examples.append(tf.train.Example(
features=tf.train.Features(feature=feature)))
return examples
def save_examples(examples):
"""Splits examples into training and evaludate groups and saves to GCS."""
random.shuffle(examples)
# First 80% as training data, rest for evaluation.
idx = int(math.ceil(len(examples) * .8))
training_folder_path = "%s/%s" % (FLAGS.dst_folder, 'training.tfrecord') \
if FLAGS.dst_folder is not None else 'training.tfrecord'
record_path = "gs://%s/%s" % (FLAGS.dst_bucket, training_folder_path)
with tf.python_io.TFRecordWriter(record_path) as w:
for example in examples[:idx]:
w.write(example.SerializeToString())
eval_folder_path = "%s/%s" % (FLAGS.dst_folder, 'eval.tfrecord') \
if FLAGS.dst_folder is not None else 'eval.tfrecord'
record_path = "gs://%s/%s" % (FLAGS.dst_bucket, eval_folder_path)
with tf.python_io.TFRecordWriter(record_path) as w:
for example in examples[idx+1:]:
w.write(example.SerializeToString())
def main(_):
save_examples(build_examples())
if __name__ == '__main__':
app.run(main)
| 34.03268
| 78
| 0.710966
|
4a1416244345f6066c435cd61203af14687c52df
| 5,351
|
py
|
Python
|
setup.py
|
hollerith/pattern
|
302c38f63e4f3580eb546864a2c1c90381a9c263
|
[
"BSD-3-Clause"
] | null | null | null |
setup.py
|
hollerith/pattern
|
302c38f63e4f3580eb546864a2c1c90381a9c263
|
[
"BSD-3-Clause"
] | null | null | null |
setup.py
|
hollerith/pattern
|
302c38f63e4f3580eb546864a2c1c90381a9c263
|
[
"BSD-3-Clause"
] | null | null | null |
#### PATTERN #######################################################################################
from __future__ import print_function
import sys
import os
from io import open
from setuptools import setup
from pattern import __version__
#---------------------------------------------------------------------------------------------------
# "python setup.py zip" will create the zipped distribution and checksum.
if sys.argv[-1] == "zip":
import zipfile
import hashlib
import re
n = "pattern-%s.zip" % __version__
p = os.path.join(os.path.dirname(os.path.realpath(__file__)))
z = zipfile.ZipFile(os.path.join(p, "..", n), "w", zipfile.ZIP_DEFLATED)
for root, folders, files in os.walk(p):
for f in files:
f = os.path.join(root, f)
# Exclude private settings.
if f.endswith(os.path.join("web", "api.py")):
d = "#--- PRIVATE"
s = open(f, "r", encoding="utf-8").read().split(d)
x = open(f, "w", encoding="utf-8")
x.write(s[0])
x.close()
# Exclude revision history (.git).
# Exclude development files (.dev).
if not re.search(r"\.DS|\.git[^i]|\.pyc|\.dev|tmp", f):
z.write(f, os.path.join("pattern-" + __version__, os.path.relpath(f, p)))
if f.endswith(os.path.join("web", "api.py")):
x = open(f, "w", encoding="utf-8")
x.write(d.join(s))
x.close()
z.close()
print(n)
print(hashlib.sha256(open(z.filename).read()).hexdigest())
sys.exit(0)
#---------------------------------------------------------------------------------------------------
# "python setup.py install" will install /pattern in /site-packages.
setup(
name = "Pattern",
version = "3.6",
description = "Web mining module for Python.",
license = "BSD",
author = "Tom De Smedt",
author_email = "tom@organisms.be",
url = "http://www.clips.ua.ac.be/pages/pattern",
packages = [
"pattern",
"pattern.web",
"pattern.web.cache",
"pattern.web.imap",
"pattern.web.locale",
"pattern.web.oauth",
"pattern.db",
"pattern.text",
"pattern.text.de",
"pattern.text.en",
"pattern.text.en.wordlist",
"pattern.text.en.wordnet",
"pattern.text.ru",
"pattern.text.ru.wordlist",
"pattern.text.es",
"pattern.text.fr",
"pattern.text.it",
"pattern.text.nl",
"pattern.vector",
"pattern.vector.svm",
"pattern.graph",
"pattern.server"
],
package_data = {
"pattern" : ["*.js"],
"pattern.web.cache" : ["tmp/*"],
"pattern.web.locale" : ["*"],
"pattern.text.de" : ["*.txt", "*.xml"],
"pattern.text.en" : ["*.txt", "*.xml", "*.slp"],
"pattern.text.en.wordlist": ["*.txt"],
"pattern.text.en.wordnet" : ["*.txt", "dict/*"],
"pattern.text.ru": ["*.txt", "*.xml", "*.slp"],
"pattern.text.ru.wordlist": ["*.txt"],
"pattern.text.es" : ["*.txt", "*.xml"],
"pattern.text.fr" : ["*.txt", "*.xml"],
"pattern.text.it" : ["*.txt", "*.xml"],
"pattern.text.nl" : ["*.txt", "*.xml"],
"pattern.vector" : ["*.txt"],
"pattern.vector.svm" : ["*.txt"],
"pattern.graph" : ["*.js", "*.csv"],
"pattern.server" : ["static/*"],
},
py_modules = [
"pattern.metrics",
"pattern.helpers",
"pattern.text.search",
"pattern.text.tree"
],
classifiers = [
"Development Status :: 5 - Production/Stable",
"Environment :: Web Environment",
"Intended Audience :: Developers",
"Intended Audience :: Education",
"Intended Audience :: Science/Research",
"License :: OSI Approved :: BSD License",
"Natural Language :: Dutch",
"Natural Language :: English",
"Natural Language :: French",
"Natural Language :: German",
"Natural Language :: Italian",
"Natural Language :: Spanish",
"Operating System :: OS Independent",
"Programming Language :: JavaScript",
"Programming Language :: Python",
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.6",
"Topic :: Internet :: WWW/HTTP :: Indexing/Search",
"Topic :: Multimedia :: Graphics",
"Topic :: Scientific/Engineering :: Artificial Intelligence",
"Topic :: Scientific/Engineering :: Visualization",
"Topic :: Software Development :: Libraries :: Python Modules",
"Topic :: Text Processing :: Linguistic",
"Topic :: Text Processing :: Markup :: HTML"
],
install_requires = [
"future",
"backports.csv",
"beautifulsoup4",
"lxml",
"feedparser",
"pdfminer" if sys.version < "3" else "pdfminer.six",
"numpy",
"scipy",
"nltk",
"python-docx",
"requests"
],
zip_safe = False
)
| 35.203947
| 100
| 0.484209
|
4a1416e6acb9f0b154e7dd9e740fe385db9f4456
| 1,944
|
py
|
Python
|
tests/homework/test_homework6.py
|
acc-cosc-1336/cosc-1336-spring-2018-vcruz350
|
0cee9fde3d4129c51626c4e0c870972aebec9b95
|
[
"MIT"
] | null | null | null |
tests/homework/test_homework6.py
|
acc-cosc-1336/cosc-1336-spring-2018-vcruz350
|
0cee9fde3d4129c51626c4e0c870972aebec9b95
|
[
"MIT"
] | 1
|
2018-03-08T19:46:08.000Z
|
2018-03-08T20:00:47.000Z
|
tests/homework/test_homework6.py
|
acc-cosc-1336/cosc-1336-spring-2018-vcruz350
|
0cee9fde3d4129c51626c4e0c870972aebec9b95
|
[
"MIT"
] | null | null | null |
import unittest
#write import statement for homework 6 file
from src.homework.homework6 import get_point_mutations
from src.homework.homework6 import get_dna_complement
from src.homework.homework6 import transcribe_dna_into_rna
from src.homework.homework6 import get_gc_content
class TestHomework6(unittest.TestCase):
def test_sample(self):
self.assertEqual(1,1)
#create a test case for function find_motif_in_dna with arguments GATATATGCATATACTT and ATAT
#the result should be 2 4 10 (three different integers)
#create a test case for function get_point_mutations with arguments GAGCCTACTAACGGGAT and CATCGTAATGACGGCCT
#the result should be 7
def test_get_point_mutations_w_GAGCCTACTAACGGGAT_and_CATCGTAATGACGGCCT(self):
self.assertEqual(7, get_point_mutations('GAGCCTACTAACGGGAT', 'CATCGTAATGACGGCCT'))
#create a test case for function get_dna_complement with argument AAAACCCGGT the result should be ACCGGGTTTT
def test_get_dna_complement_w_AAAACCCGGT(self):
self.assertEqual('ACCGGGTTTT', get_dna_complement('AAAACCCGGT'))
#create a test case for function transcribe_dna_to_rna with argument GATGGAACTTGACTACGTAAATT
#the result should be GAUGGAACUUGACUACGUAAAUU
def test_transcribe_dna_to_rna_w_GATGGAACTTGACTACGTAAATT(self):
self.assertEqual('GAUGGAACUUGACUACGUAAAUU', transcribe_dna_into_rna('GATGGAACTTGACTACGTAAATT'))
#create a test case for function get_gc_content with arguments
#CCACCCTCGTGGTATGGCTAGGCATTCAGGAACCGGAGAACGCTTCAGACCAGCCCGGACTGGGAACCTGCGGGCAGTAGGTGGAAT
#the result should be 60.919540
def test_get_gc_content_w_CCACCCTCGTGGTATGGCTAGGCATTCAGGAACCGGAGAACGCTTCAGACCAGCCCGGACTGGGAACCTGCGGGCAGTAGGTGGAAT(self):
self.assertEqual(60.919540, get_gc_content('CCACCCTCGTGGTATGGCTAGGCATTCAGGAACCGGAGAACGCTTCAGACCAGCCCGGACTGGGAACCTGCGGGCAGTAGGTGGAAT'))
if __name__ == '__main__':
unittest.main(verbosity = 2)
| 46.285714
| 142
| 0.817901
|
4a1416eaa4366e01fc19c26a95c3dd9bb2d80782
| 12,760
|
py
|
Python
|
data/migrations/versions/703298a825c2_backfill_new_encrypted_fields.py
|
kbrwn/quay
|
733c44922b039464641926107bc75c583973468e
|
[
"Apache-2.0"
] | 1
|
2021-03-02T21:15:04.000Z
|
2021-03-02T21:15:04.000Z
|
data/migrations/versions/703298a825c2_backfill_new_encrypted_fields.py
|
kbrwn/quay
|
733c44922b039464641926107bc75c583973468e
|
[
"Apache-2.0"
] | 20
|
2019-12-26T17:32:34.000Z
|
2022-03-21T22:18:06.000Z
|
data/migrations/versions/703298a825c2_backfill_new_encrypted_fields.py
|
kbrwn/quay
|
733c44922b039464641926107bc75c583973468e
|
[
"Apache-2.0"
] | 1
|
2020-05-31T16:28:40.000Z
|
2020-05-31T16:28:40.000Z
|
"""Backfill new encrypted fields
Revision ID: 703298a825c2
Revises: c13c8052f7a6
Create Date: 2019-08-19 16:07:48.109889
"""
# revision identifiers, used by Alembic.
revision = '703298a825c2'
down_revision = 'c13c8052f7a6'
import logging
import uuid
from datetime import datetime
from peewee import (JOIN, IntegrityError, DateTimeField, CharField, ForeignKeyField,
BooleanField, TextField, IntegerField)
from alembic import op as original_op
from data.migrations.progress import ProgressWrapper
import sqlalchemy as sa
from data.database import (BaseModel, User, Repository, AccessTokenKind, Role,
random_string_generator, QuayUserField, BuildTriggerService,
uuid_generator, DisableReason)
from data.fields import Credential, DecryptedValue, EncryptedCharField, EncryptedTextField, EnumField, CredentialField
from data.model.token import ACCESS_TOKEN_NAME_PREFIX_LENGTH
from data.model.appspecifictoken import TOKEN_NAME_PREFIX_LENGTH as AST_TOKEN_NAME_PREFIX_LENGTH
from data.model.oauth import ACCESS_TOKEN_PREFIX_LENGTH as OAUTH_ACCESS_TOKEN_PREFIX_LENGTH
from data.model.oauth import AUTHORIZATION_CODE_PREFIX_LENGTH
BATCH_SIZE = 10
logger = logging.getLogger(__name__)
def _iterate(model_class, clause):
while True:
has_rows = False
for row in list(model_class.select().where(clause).limit(BATCH_SIZE)):
has_rows = True
yield row
if not has_rows:
break
def _decrypted(value):
if value is None:
return None
assert isinstance(value, basestring)
return DecryptedValue(value)
# NOTE: As per standard migrations involving Peewee models, we copy them here, as they will change
# after this call.
class AccessToken(BaseModel):
code = CharField(default=random_string_generator(length=64), unique=True, index=True)
token_name = CharField(default=random_string_generator(length=32), unique=True, index=True)
token_code = EncryptedCharField(default_token_length=32)
class RobotAccountToken(BaseModel):
robot_account = QuayUserField(index=True, allows_robots=True, unique=True)
token = EncryptedCharField(default_token_length=64)
fully_migrated = BooleanField(default=False)
class RepositoryBuildTrigger(BaseModel):
uuid = CharField(default=uuid_generator, index=True)
auth_token = CharField(null=True)
private_key = TextField(null=True)
secure_auth_token = EncryptedCharField(null=True)
secure_private_key = EncryptedTextField(null=True)
fully_migrated = BooleanField(default=False)
class AppSpecificAuthToken(BaseModel):
token_name = CharField(index=True, unique=True, default=random_string_generator(60))
token_secret = EncryptedCharField(default_token_length=60)
token_code = CharField(default=random_string_generator(length=120), unique=True, index=True)
class OAuthAccessToken(BaseModel):
token_name = CharField(index=True, unique=True)
token_code = CredentialField()
access_token = CharField(index=True)
class OAuthAuthorizationCode(BaseModel):
code = CharField(index=True, unique=True, null=True)
code_name = CharField(index=True, unique=True)
code_credential = CredentialField()
class OAuthApplication(BaseModel):
secure_client_secret = EncryptedCharField(default_token_length=40, null=True)
fully_migrated = BooleanField(default=False)
client_secret = CharField(default=random_string_generator(length=40))
def upgrade(tables, tester, progress_reporter):
op = ProgressWrapper(original_op, progress_reporter)
# Empty all access token names to fix the bug where we put the wrong name and code
# in for some tokens.
AccessToken.update(token_name=None).where(AccessToken.token_name >> None).execute()
# AccessToken.
logger.info('Backfilling encrypted credentials for access tokens')
for access_token in _iterate(AccessToken, ((AccessToken.token_name >> None) |
(AccessToken.token_name == ''))):
logger.info('Backfilling encrypted credentials for access token %s', access_token.id)
assert access_token.code is not None
assert access_token.code[:ACCESS_TOKEN_NAME_PREFIX_LENGTH]
assert access_token.code[ACCESS_TOKEN_NAME_PREFIX_LENGTH:]
token_name = access_token.code[:ACCESS_TOKEN_NAME_PREFIX_LENGTH]
token_code = _decrypted(access_token.code[ACCESS_TOKEN_NAME_PREFIX_LENGTH:])
(AccessToken
.update(token_name=token_name, token_code=token_code)
.where(AccessToken.id == access_token.id, AccessToken.code == access_token.code)
.execute())
assert AccessToken.select().where(AccessToken.token_name >> None).count() == 0
# Robots.
logger.info('Backfilling encrypted credentials for robots')
while True:
has_row = False
query = (User
.select()
.join(RobotAccountToken, JOIN.LEFT_OUTER)
.where(User.robot == True, RobotAccountToken.id >> None)
.limit(BATCH_SIZE))
for robot_user in query:
logger.info('Backfilling encrypted credentials for robot %s', robot_user.id)
has_row = True
try:
RobotAccountToken.create(robot_account=robot_user,
token=_decrypted(robot_user.email),
fully_migrated=False)
except IntegrityError:
break
if not has_row:
break
# RepositoryBuildTrigger
logger.info('Backfilling encrypted credentials for repo build triggers')
for repo_build_trigger in _iterate(RepositoryBuildTrigger,
(RepositoryBuildTrigger.fully_migrated == False)):
logger.info('Backfilling encrypted credentials for repo build trigger %s',
repo_build_trigger.id)
(RepositoryBuildTrigger
.update(secure_auth_token=_decrypted(repo_build_trigger.auth_token),
secure_private_key=_decrypted(repo_build_trigger.private_key),
fully_migrated=True)
.where(RepositoryBuildTrigger.id == repo_build_trigger.id,
RepositoryBuildTrigger.uuid == repo_build_trigger.uuid)
.execute())
assert (RepositoryBuildTrigger
.select()
.where(RepositoryBuildTrigger.fully_migrated == False)
.count()) == 0
# AppSpecificAuthToken
logger.info('Backfilling encrypted credentials for app specific auth tokens')
for token in _iterate(AppSpecificAuthToken, ((AppSpecificAuthToken.token_name >> None) |
(AppSpecificAuthToken.token_name == '') |
(AppSpecificAuthToken.token_secret >> None))):
logger.info('Backfilling encrypted credentials for app specific auth %s',
token.id)
assert token.token_code[AST_TOKEN_NAME_PREFIX_LENGTH:]
token_name = token.token_code[:AST_TOKEN_NAME_PREFIX_LENGTH]
token_secret = _decrypted(token.token_code[AST_TOKEN_NAME_PREFIX_LENGTH:])
assert token_name
assert token_secret
(AppSpecificAuthToken
.update(token_name=token_name,
token_secret=token_secret)
.where(AppSpecificAuthToken.id == token.id,
AppSpecificAuthToken.token_code == token.token_code)
.execute())
assert (AppSpecificAuthToken
.select()
.where(AppSpecificAuthToken.token_name >> None)
.count()) == 0
# OAuthAccessToken
logger.info('Backfilling credentials for OAuth access tokens')
for token in _iterate(OAuthAccessToken, ((OAuthAccessToken.token_name >> None) |
(OAuthAccessToken.token_name == ''))):
logger.info('Backfilling credentials for OAuth access token %s', token.id)
token_name = token.access_token[:OAUTH_ACCESS_TOKEN_PREFIX_LENGTH]
token_code = Credential.from_string(token.access_token[OAUTH_ACCESS_TOKEN_PREFIX_LENGTH:])
assert token_name
assert token.access_token[OAUTH_ACCESS_TOKEN_PREFIX_LENGTH:]
(OAuthAccessToken
.update(token_name=token_name,
token_code=token_code)
.where(OAuthAccessToken.id == token.id,
OAuthAccessToken.access_token == token.access_token)
.execute())
assert (OAuthAccessToken
.select()
.where(OAuthAccessToken.token_name >> None)
.count()) == 0
# OAuthAuthorizationCode
logger.info('Backfilling credentials for OAuth auth code')
for code in _iterate(OAuthAuthorizationCode, ((OAuthAuthorizationCode.code_name >> None) |
(OAuthAuthorizationCode.code_name == ''))):
logger.info('Backfilling credentials for OAuth auth code %s', code.id)
user_code = code.code or random_string_generator(AUTHORIZATION_CODE_PREFIX_LENGTH * 2)()
code_name = user_code[:AUTHORIZATION_CODE_PREFIX_LENGTH]
code_credential = Credential.from_string(user_code[AUTHORIZATION_CODE_PREFIX_LENGTH:])
assert code_name
assert user_code[AUTHORIZATION_CODE_PREFIX_LENGTH:]
(OAuthAuthorizationCode
.update(code_name=code_name, code_credential=code_credential)
.where(OAuthAuthorizationCode.id == code.id)
.execute())
assert (OAuthAuthorizationCode
.select()
.where(OAuthAuthorizationCode.code_name >> None)
.count()) == 0
# OAuthApplication
logger.info('Backfilling secret for OAuth applications')
for app in _iterate(OAuthApplication, OAuthApplication.fully_migrated == False):
logger.info('Backfilling secret for OAuth application %s', app.id)
client_secret = app.client_secret or str(uuid.uuid4())
secure_client_secret = _decrypted(client_secret)
(OAuthApplication
.update(secure_client_secret=secure_client_secret, fully_migrated=True)
.where(OAuthApplication.id == app.id, OAuthApplication.fully_migrated == False)
.execute())
assert (OAuthApplication
.select()
.where(OAuthApplication.fully_migrated == False)
.count()) == 0
# Adjust existing fields to be nullable.
op.alter_column('accesstoken', 'code', nullable=True, existing_type=sa.String(length=255))
op.alter_column('oauthaccesstoken', 'access_token', nullable=True, existing_type=sa.String(length=255))
op.alter_column('oauthauthorizationcode', 'code', nullable=True, existing_type=sa.String(length=255))
op.alter_column('appspecificauthtoken', 'token_code', nullable=True, existing_type=sa.String(length=255))
# Adjust new fields to be non-nullable.
op.alter_column('accesstoken', 'token_name', nullable=False, existing_type=sa.String(length=255))
op.alter_column('accesstoken', 'token_code', nullable=False, existing_type=sa.String(length=255))
op.alter_column('appspecificauthtoken', 'token_name', nullable=False, existing_type=sa.String(length=255))
op.alter_column('appspecificauthtoken', 'token_secret', nullable=False, existing_type=sa.String(length=255))
op.alter_column('oauthaccesstoken', 'token_name', nullable=False, existing_type=sa.String(length=255))
op.alter_column('oauthaccesstoken', 'token_code', nullable=False, existing_type=sa.String(length=255))
op.alter_column('oauthauthorizationcode', 'code_name', nullable=False, existing_type=sa.String(length=255))
op.alter_column('oauthauthorizationcode', 'code_credential', nullable=False, existing_type=sa.String(length=255))
def downgrade(tables, tester, progress_reporter):
op = ProgressWrapper(original_op, progress_reporter)
op.alter_column('accesstoken', 'code', nullable=False, existing_type=sa.String(length=255))
op.alter_column('oauthaccesstoken', 'access_token', nullable=False, existing_type=sa.String(length=255))
op.alter_column('oauthauthorizationcode', 'code', nullable=False, existing_type=sa.String(length=255))
op.alter_column('appspecificauthtoken', 'token_code', nullable=False, existing_type=sa.String(length=255))
op.alter_column('accesstoken', 'token_name', nullable=True, existing_type=sa.String(length=255))
op.alter_column('accesstoken', 'token_code', nullable=True, existing_type=sa.String(length=255))
op.alter_column('appspecificauthtoken', 'token_name', nullable=True, existing_type=sa.String(length=255))
op.alter_column('appspecificauthtoken', 'token_secret', nullable=True, existing_type=sa.String(length=255))
op.alter_column('oauthaccesstoken', 'token_name', nullable=True, existing_type=sa.String(length=255))
op.alter_column('oauthaccesstoken', 'token_code', nullable=True, existing_type=sa.String(length=255))
op.alter_column('oauthauthorizationcode', 'code_name', nullable=True, existing_type=sa.String(length=255))
op.alter_column('oauthauthorizationcode', 'code_credential', nullable=True, existing_type=sa.String(length=255))
| 44
| 122
| 0.728918
|
4a1417629c281856b7d2c6a41fd84e3c19768de3
| 9,910
|
py
|
Python
|
rank-change-precompute-1.py
|
stressosaurus/a-statistical-model-of-word-rank-evolution
|
4a06a872b5c84b561510958aed18e76d931443f4
|
[
"MIT"
] | null | null | null |
rank-change-precompute-1.py
|
stressosaurus/a-statistical-model-of-word-rank-evolution
|
4a06a872b5c84b561510958aed18e76d931443f4
|
[
"MIT"
] | null | null | null |
rank-change-precompute-1.py
|
stressosaurus/a-statistical-model-of-word-rank-evolution
|
4a06a872b5c84b561510958aed18e76d931443f4
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
print('Initializing...')
import pickle as pkl
import os
import numpy as np
import pandas as pd
import googleNgram as gn
import wf2020 as wf20 # module for the wright-fisher inspired model
import languageCompute as lc # module for time-series computations
from scipy.stats import variation
# make directory for precomputed values and simulations
try:
os.mkdir('save/')
except:
pass
try:
os.mkdir('save/rank-change-computations')
except:
pass
# directory for the precomputed values and simulations
precomp_gn_dir = 'save/google-1gram-computations'
precomp_wf_dir = 'save/wright-fisher-simulations'
# Languages
print('ranking languages...')
n = '1'
l_codes = ['eng','eng-us','eng-gb','eng-fiction','chi-sim','fre','ger','ita','heb','rus','spa']
for l in l_codes:
print(l+'...')
D = gn.read(n,l,ignore_case=True,restriction=True,annotation=False)
for k in D.keys():
try:
D[k] = D[k].T.sort_index().T
except:
pass
P = D['pscore']
T = P.shape[1]
t_vect = P.columns
ranks = pd.DataFrame(np.zeros(P.shape,dtype=int),index=P.index,columns=P.columns)
olist = pd.DataFrame(np.zeros(P.shape,dtype=str),index=range(1,P.shape[0]+1),columns=P.columns)
for t in t_vect:
val = lc.return_ranks(P[t])
for ii, jj in val.items():
ranks[t][ii] = jj
olist[t][jj] = ii
dranks = lc.generate_delta(ranks)
rc_la = {'drank-variance':{},'drank-sum':{}}
drank_sum = np.sum(dranks,axis=1)
drank_var = np.var(dranks,axis=1)
# save computations
print('saving rank-change-computations/ranks_'+l+'.pkl...')
RANKS_LA = {'ranks':ranks,'dranks':dranks,'olist':olist,'drank-variance':drank_var,'drank-sum':drank_sum}
f = open('save/rank-change-computations/ranks_'+l+'.pkl',"wb")
pkl.dump(RANKS_LA,f)
f.close()
# Wright-Fisher Simulations
print('ranking Wright-Fisher simulations...')
# fixed and varied parameters
c = 1000 # vocabulary words
a = 1 # Zipf parameter
alpha = 0.01 #0.024 # corpus size rate of change
beta = 100000 # initial corpus size
T = 109 # total time elapsed (years)
# alpha varies
print('alpha varies...')
alpha_vect = [0.01,0.015,0.020,0.025,0.030]
for i in alpha_vect:
pca_pre_sim = pkl.load(open(precomp_wf_dir+'/wf_c'+str(c)+'_a'+str(a)+'_alpha'+str(i)+'_beta'+str(beta)+'_T'+str(T)+'.pkl','rb'))
P = pca_pre_sim['pscore']
T = P.shape[1]
t_vect = P.columns
ranks = pd.DataFrame(np.zeros(P.shape,dtype=int),index=P.index,columns=P.columns)
olist = pd.DataFrame(np.zeros(P.shape,dtype=str),index=range(1,P.shape[0]+1),columns=P.columns)
for t in t_vect:
val = lc.return_ranks(P[t])
for ii, jj in val.items():
ranks[t][ii] = jj
olist[t][jj] = ii
dranks = lc.generate_delta(ranks)
rc_la = {'drank-variance':{},'drank-sum':{}}
drank_sum = np.sum(dranks,axis=1)
drank_var = np.var(dranks,axis=1)
# save computations
print('saving rank-change-computations/ranks_wf_c'+str(c)+'_a'+str(a)+'_alpha'+str(i)+'_beta'+str(beta)+'_T'+str(T)+'.pkl...')
RANKS_WF = {'ranks':ranks,'dranks':dranks,'olist':olist,'drank-variance':drank_var,'drank-sum':drank_sum}
f = open('save/rank-change-computations/ranks_wf_c'+str(c)+'_a'+str(a)+'_alpha'+str(i)+'_beta'+str(beta)+'_T'+str(T)+'.pkl',"wb")
pkl.dump(RANKS_WF,f)
f.close()
# beta varies
print('beta varies...')
beta_vect = [100000,200000,300000,400000,800000]
for i in beta_vect:
pca_pre_sim = pkl.load(open(precomp_wf_dir+'/wf_c'+str(c)+'_a'+str(a)+'_alpha'+str(alpha)+'_beta'+str(i)+'_T'+str(T)+'.pkl','rb'))
P = pca_pre_sim['pscore']
T = P.shape[1]
t_vect = P.columns
ranks = pd.DataFrame(np.zeros(P.shape,dtype=int),index=P.index,columns=P.columns)
olist = pd.DataFrame(np.zeros(P.shape,dtype=str),index=range(1,P.shape[0]+1),columns=P.columns)
for t in t_vect:
val = lc.return_ranks(P[t])
for ii, jj in val.items():
ranks[t][ii] = jj
olist[t][jj] = ii
dranks = lc.generate_delta(ranks)
rc_la = {'drank-variance':{},'drank-sum':{}}
drank_sum = np.sum(dranks,axis=1)
drank_var = np.var(dranks,axis=1)
# save computations
print('saving rank-change-computations/ranks_wf_c'+str(c)+'_a'+str(a)+'_alpha'+str(alpha)+'_beta'+str(i)+'_T'+str(T)+'.pkl...')
RANKS_WF = {'ranks':ranks,'dranks':dranks,'olist':olist,'drank-variance':drank_var,'drank-sum':drank_sum}
f = open('save/rank-change-computations/ranks_wf_c'+str(c)+'_a'+str(a)+'_alpha'+str(alpha)+'_beta'+str(i)+'_T'+str(T)+'.pkl',"wb")
pkl.dump(RANKS_WF,f)
f.close()
# c varies
print('c varies...')
c_vect = [1000,2000,3000,4000,8000]
for i in c_vect:
pca_pre_sim = pkl.load(open(precomp_wf_dir+'/wf_c'+str(i)+'_a'+str(a)+'_alpha'+str(alpha)+'_beta'+str(beta)+'_T'+str(T)+'.pkl','rb'))
P = pca_pre_sim['pscore']
T = P.shape[1]
t_vect = P.columns
ranks = pd.DataFrame(np.zeros(P.shape,dtype=int),index=P.index,columns=P.columns)
olist = pd.DataFrame(np.zeros(P.shape,dtype=str),index=range(1,P.shape[0]+1),columns=P.columns)
for t in t_vect:
val = lc.return_ranks(P[t])
for ii, jj in val.items():
ranks[t][ii] = jj
olist[t][jj] = ii
dranks = lc.generate_delta(ranks)
rc_la = {'drank-variance':{},'drank-sum':{}}
drank_sum = np.sum(dranks,axis=1)
drank_var = np.var(dranks,axis=1)
# save computations
print('saving rank-change-computations/ranks_wf_c'+str(i)+'_a'+str(a)+'_alpha'+str(alpha)+'_beta'+str(beta)+'_T'+str(T)+'.pkl...')
RANKS_WF = {'ranks':ranks,'dranks':dranks,'olist':olist,'drank-variance':drank_var,'drank-sum':drank_sum}
f = open('save/rank-change-computations/ranks_wf_c'+str(i)+'_a'+str(a)+'_alpha'+str(alpha)+'_beta'+str(beta)+'_T'+str(T)+'.pkl',"wb")
pkl.dump(RANKS_WF,f)
f.close()
# ratio c/beta = 0.01
print('ratio c/beta = 0.01...')
ratio1_vect = [c_vect[i]/beta_vect[i] for i in range(len(c_vect))]
for i, j in enumerate(c_vect):
pca_pre_sim = pkl.load(open(precomp_wf_dir+'/wf_c'+str(j)+'_a'+str(a)+'_alpha'+str(alpha)+'_beta'+str(int(beta_vect[i]))+'_T'+str(T)+'.pkl','rb'))
P = pca_pre_sim['pscore']
T = P.shape[1]
t_vect = P.columns
ranks = pd.DataFrame(np.zeros(P.shape,dtype=int),index=P.index,columns=P.columns)
olist = pd.DataFrame(np.zeros(P.shape,dtype=str),index=range(1,P.shape[0]+1),columns=P.columns)
for t in t_vect:
val = lc.return_ranks(P[t])
for ii, jj in val.items():
ranks[t][ii] = jj
olist[t][jj] = ii
dranks = lc.generate_delta(ranks)
rc_la = {'drank-variance':{},'drank-sum':{}}
drank_sum = np.sum(dranks,axis=1)
drank_var = np.var(dranks,axis=1)
# save computations
print('saving rank-change-computations/ranks_wf_c'+str(j)+'_a'+str(a)+'_alpha'+str(alpha)+'_beta'+str(int(beta_vect[i]))+'_T'+str(T)+'.pkl...')
RANKS_WF = {'ranks':ranks,'dranks':dranks,'olist':olist,'drank-variance':drank_var,'drank-sum':drank_sum}
f = open('save/rank-change-computations/ranks_wf_c'+str(j)+'_a'+str(a)+'_alpha'+str(alpha)+'_beta'+str(int(beta_vect[i]))+'_T'+str(T)+'.pkl',"wb")
pkl.dump(RANKS_WF,f)
f.close()
# ratio c/beta = 0.05
print('ratio c/beta = 0.05...')
beta2_vect = [i/0.05 for i in c_vect]
ratio2_vect = [c_vect[i]/beta2_vect[i] for i in range(len(c_vect))]
for i, j in enumerate(c_vect):
pca_pre_sim = pkl.load(open(precomp_wf_dir+'/wf_c'+str(j)+'_a'+str(a)+'_alpha'+str(alpha)+'_beta'+str(int(beta2_vect[i]))+'_T'+str(T)+'.pkl','rb'))
P = pca_pre_sim['pscore']
T = P.shape[1]
t_vect = P.columns
ranks = pd.DataFrame(np.zeros(P.shape,dtype=int),index=P.index,columns=P.columns)
olist = pd.DataFrame(np.zeros(P.shape,dtype=str),index=range(1,P.shape[0]+1),columns=P.columns)
for t in t_vect:
val = lc.return_ranks(P[t])
for ii, jj in val.items():
ranks[t][ii] = jj
olist[t][jj] = ii
dranks = lc.generate_delta(ranks)
rc_la = {'drank-variance':{},'drank-sum':{}}
drank_sum = np.sum(dranks,axis=1)
drank_var = np.var(dranks,axis=1)
# save computations
print('saving rank-change-computations/ranks_wf_c'+str(j)+'_a'+str(a)+'_alpha'+str(alpha)+'_beta'+str(int(beta2_vect[i]))+'_T'+str(T)+'.pkl...')
RANKS_WF = {'ranks':ranks,'dranks':dranks,'olist':olist,'drank-variance':drank_var,'drank-sum':drank_sum}
f = open('save/rank-change-computations/ranks_wf_c'+str(j)+'_a'+str(a)+'_alpha'+str(alpha)+'_beta'+str(int(beta2_vect[i]))+'_T'+str(T)+'.pkl',"wb")
pkl.dump(RANKS_WF,f)
f.close()
# Zipf parameter varies
print('Zipf parameter varies...')
a_vect = [0.70,0.80,0.90,1,1.10]
for i in a_vect:
pca_pre_sim = pkl.load(open(precomp_wf_dir+'/wf_c'+str(c)+'_a'+str(i)+'_alpha'+str(alpha)+'_beta'+str(beta)+'_T'+str(T)+'.pkl','rb'))
P = pca_pre_sim['pscore']
T = P.shape[1]
t_vect = P.columns
ranks = pd.DataFrame(np.zeros(P.shape,dtype=int),index=P.index,columns=P.columns)
olist = pd.DataFrame(np.zeros(P.shape,dtype=str),index=range(1,P.shape[0]+1),columns=P.columns)
for t in t_vect:
val = lc.return_ranks(P[t])
for ii, jj in val.items():
ranks[t][ii] = jj
olist[t][jj] = ii
dranks = lc.generate_delta(ranks)
rc_la = {'drank-variance':{},'drank-sum':{}}
drank_sum = np.sum(dranks,axis=1)
drank_var = np.var(dranks,axis=1)
# save computations
print('saving rank-change-computations/ranks_wf_c'+str(c)+'_a'+str(i)+'_alpha'+str(alpha)+'_beta'+str(beta)+'_T'+str(T)+'.pkl...')
RANKS_WF = {'ranks':ranks,'dranks':dranks,'olist':olist,'drank-variance':drank_var,'drank-sum':drank_sum}
f = open('save/rank-change-computations/ranks_wf_c'+str(c)+'_a'+str(i)+'_alpha'+str(alpha)+'_beta'+str(beta)+'_T'+str(T)+'.pkl',"wb")
pkl.dump(RANKS_WF,f)
f.close()
| 44.241071
| 151
| 0.638951
|
4a1417c543098e5d445f7fa867a6883e22a04bc8
| 9,779
|
py
|
Python
|
gammapy/utils/gauss.py
|
Jaleleddine/gammapy
|
de9195df40fa5bbf8840cda4e7cd5e8cc5eaadbb
|
[
"BSD-3-Clause"
] | null | null | null |
gammapy/utils/gauss.py
|
Jaleleddine/gammapy
|
de9195df40fa5bbf8840cda4e7cd5e8cc5eaadbb
|
[
"BSD-3-Clause"
] | 1
|
2017-02-22T23:12:30.000Z
|
2017-02-22T23:12:30.000Z
|
gammapy/utils/gauss.py
|
Jaleleddine/gammapy
|
de9195df40fa5bbf8840cda4e7cd5e8cc5eaadbb
|
[
"BSD-3-Clause"
] | null | null | null |
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""Multi-Gaussian distribution utilities (Gammapy internal)."""
import numpy as np
import scipy.optimize
class Gauss2DPDF:
"""2D symmetric Gaussian PDF.
Reference: http://en.wikipedia.org/wiki/Multivariate_normal_distribution#Bivariate_case
Parameters
----------
sigma : float
Gaussian width.
"""
def __init__(self, sigma=1):
self.sigma = np.asarray(sigma, np.float64)
@property
def _sigma2(self):
"""Sigma squared (float)"""
return self.sigma * self.sigma
@property
def amplitude(self):
"""PDF amplitude at the center (float)"""
return self.__call(0, 0)
def __call__(self, x, y=0):
"""dp / (dx dy) at position (x, y)
Parameters
----------
x : `~numpy.ndarray`
x coordinate
y : `~numpy.ndarray`, optional
y coordinate
Returns
-------
dpdxdy : `~numpy.ndarray`
dp / (dx dy)
"""
x = np.asarray(x, dtype=np.float64)
y = np.asarray(y, dtype=np.float64)
theta2 = x * x + y * y
amplitude = 1 / (2 * np.pi * self._sigma2)
exponent = -0.5 * theta2 / self._sigma2
return amplitude * np.exp(exponent)
def dpdtheta2(self, theta2):
"""dp / dtheta2 at position theta2 = theta ^ 2
Parameters
----------
theta2 : `~numpy.ndarray`
Offset squared
Returns
-------
dpdtheta2 : `~numpy.ndarray`
dp / dtheta2
"""
theta2 = np.asarray(theta2, dtype=np.float64)
amplitude = 1 / (2 * self._sigma2)
exponent = -0.5 * theta2 / self._sigma2
return amplitude * np.exp(exponent)
def containment_fraction(self, theta):
"""Containment fraction.
Parameters
----------
theta : `~numpy.ndarray`
Offset
Returns
-------
containment_fraction : `~numpy.ndarray`
Containment fraction
"""
theta = np.asarray(theta, dtype=np.float64)
return 1 - np.exp(-0.5 * theta ** 2 / self._sigma2)
def containment_radius(self, containment_fraction):
"""Containment angle for a given containment fraction.
Parameters
----------
containment_fraction : `~numpy.ndarray`
Containment fraction
Returns
-------
containment_radius : `~numpy.ndarray`
Containment radius
"""
containment_fraction = np.asarray(containment_fraction, dtype=np.float64)
return self.sigma * np.sqrt(-2 * np.log(1 - containment_fraction))
def gauss_convolve(self, sigma):
"""Convolve with another Gaussian 2D PDF.
Parameters
----------
sigma : `~numpy.ndarray` or float
Gaussian width of the new Gaussian 2D PDF to covolve with.
Returns
-------
gauss_convolve : `~gammapy.modeling.models.Gauss2DPDF`
Convolution of both Gaussians.
"""
sigma = np.asarray(sigma, dtype=np.float64)
new_sigma = np.sqrt(self._sigma2 + sigma ** 2)
return Gauss2DPDF(new_sigma)
class MultiGauss2D:
"""Sum of multiple 2D Gaussians.
Parameters
----------
sigmas : `~numpy.ndarray`
widths of the Gaussians to add
norms : `~numpy.ndarray`, optional
normalizations of the Gaussians to add
Notes
-----
* This sum is no longer a PDF, it is not normalized to 1.
* The "norm" of each component represents the 2D integral,
not the amplitude at the origin.
"""
def __init__(self, sigmas, norms=None):
# If no norms are given, you have a PDF.
sigmas = np.asarray(sigmas, dtype=np.float64)
self.components = [Gauss2DPDF(sigma) for sigma in sigmas]
if norms is None:
self.norms = np.ones(len(self.components))
else:
self.norms = np.asarray(norms, dtype=np.float64)
def __call__(self, x, y=0):
"""dp / (dx dy) at position (x, y)
Parameters
----------
x : `~numpy.ndarray`
x coordinate
y : `~numpy.ndarray`, optional
y coordinate
Returns
-------
total : `~numpy.ndarray`
dp / (dx dy)
"""
x = np.asarray(x, dtype=np.float64)
y = np.asarray(y, dtype=np.float64)
total = np.zeros_like(x)
for norm, component in zip(self.norms, self.components):
total += norm * component(x, y)
return total
@property
def n_components(self):
"""Number of components (int)"""
return len(self.components)
@property
def sigmas(self):
"""Array of Gaussian widths (`~numpy.ndarray`)"""
return np.array([_.sigma for _ in self.components])
@property
def integral(self):
"""Integral as sum of norms (`~numpy.ndarray`)"""
return np.nansum(self.norms)
@property
def amplitude(self):
"""Amplitude at the center (float)"""
return self.__call__(0, 0)
@property
def max_sigma(self):
"""Largest Gaussian width (float)"""
return self.sigmas.max()
@property
def eff_sigma(self):
r"""Effective Gaussian width for single-Gauss approximation (float)
Notes
-----
The effective Gaussian width is given by:
.. math:: \sigma_\mathrm{eff} = \sqrt{\sum_i N_i \sigma_i^2}
where ``N`` is normalization and ``sigma`` is width.
"""
sigma2s = np.array([component._sigma2 for component in self.components])
return np.sqrt(np.sum(self.norms * sigma2s))
def dpdtheta2(self, theta2):
"""dp / dtheta2 at position theta2 = theta ^ 2
Parameters
----------
theta2 : `~numpy.ndarray`
Offset squared
Returns
-------
dpdtheta2 : `~numpy.ndarray`
dp / dtheta2
"""
# Actually this is only a PDF if sum(norms) == 1
theta2 = np.asarray(theta2, dtype=np.float64)
total = np.zeros_like(theta2)
for norm, component in zip(self.norms, self.components):
total += norm * component.dpdtheta2(theta2)
return total
def normalize(self):
"""Normalize function.
Returns
-------
norm_multigauss : `~gammapy.modeling.models.MultiGauss2D`
normalized function
"""
sum = self.integral
if sum != 0:
self.norms /= sum
return self
def containment_fraction(self, theta):
"""Containment fraction.
Parameters
----------
theta : `~numpy.ndarray`
Offset
Returns
-------
containment_fraction : `~numpy.ndarray`
Containment fraction
"""
theta = np.asarray(theta, dtype=np.float64)
total = np.zeros_like(theta)
for norm, component in zip(self.norms, self.components):
total += norm * component.containment_fraction(theta)
return total
def containment_radius(self, containment_fraction):
"""Containment angle for a given containment fraction.
Parameters
----------
containment_fraction : `~numpy.ndarray`
Containment fraction
Returns
-------
containment_radius : `~numpy.ndarray`
Containment radius
"""
theta_max = 1e3
def f(theta):
# positive if theta too large
return self.containment_fraction(theta) - containment_fraction
theta = scipy.optimize.brentq(f, a=0, b=theta_max)
if np.allclose(theta, theta_max):
theta = np.inf
return theta
def match_sigma(self, containment_fraction):
"""Compute equivalent Gauss width.
Find the sigma of a single-Gaussian distribution that
approximates this one, such that theta matches for a given
containment.
Parameters
----------
containment_fraction : `~numpy.ndarray`
Containment fraction
Returns
-------
sigma : `~numpy.ndarray`
Equivalent containment radius
"""
theta1 = self.containment_radius(containment_fraction)
theta2 = Gauss2DPDF(sigma=1).containment_radius(containment_fraction)
return theta1 / theta2
def gauss_convolve(self, sigma, norm=1):
"""Convolve with another Gauss.
Compute new norms and sigmas of all the components such that
the new distribution represents the convolved old distribution
by a Gaussian of width sigma and then multiplied by norm.
This MultiGauss2D is unchanged, a new one is created and returned.
This is useful if you need to e.g. compute theta for one PSF
and many sigmas.
Parameters
----------
sigma : `~numpy.ndarray` or float
Gaussian width of the new Gaussian 2D PDF to covolve with.
norm : `~numpy.ndarray` or float
Normalization of the new Gaussian 2D PDF to covolve with.
Returns
-------
new_multi_gauss_2d : `~gammapy.modeling.models.MultiGauss2D`
Convolution as new MultiGauss2D
"""
sigma = np.asarray(sigma, dtype=np.float64)
norm = np.asarray(norm, dtype=np.float64)
sigmas, norms = [], []
for ii in range(self.n_components):
sigmas.append(self.components[ii].gauss_convolve(sigma).sigma)
norms.append(self.norms[ii] * norm)
return MultiGauss2D(sigmas, norms)
| 27.94
| 91
| 0.568054
|
4a14182229060d0e5d55c98f056dc5f915f500ef
| 8,230
|
py
|
Python
|
docs/conf.py
|
leckronz/django-dynamic-attachments
|
e77fa6a59375bb19c71d1bcc58dbacfde191c45c
|
[
"BSD-2-Clause"
] | 1
|
2021-10-15T14:02:14.000Z
|
2021-10-15T14:02:14.000Z
|
docs/conf.py
|
leckronz/django-dynamic-attachments
|
e77fa6a59375bb19c71d1bcc58dbacfde191c45c
|
[
"BSD-2-Clause"
] | 6
|
2018-07-02T20:16:05.000Z
|
2021-04-14T14:02:22.000Z
|
docs/conf.py
|
leckronz/django-dynamic-attachments
|
e77fa6a59375bb19c71d1bcc58dbacfde191c45c
|
[
"BSD-2-Clause"
] | 14
|
2018-06-29T12:46:46.000Z
|
2022-01-03T19:08:10.000Z
|
# -*- coding: utf-8 -*-
#
# Attachments documentation build configuration file, created by
# sphinx-quickstart on Thu Apr 03 15:34:43 2014.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import os
import sys
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Attachments'
copyright = u'2014, Dan Watson'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '1.0'
# The full version, including alpha/beta/rc tags.
release = '1.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'Attachmentsdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
# 'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'Attachments.tex', u'Attachments Documentation',
u'Dan Watson', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'attachments', u'Attachments Documentation',
[u'Dan Watson'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'Attachments', u'Attachments Documentation',
u'Dan Watson', 'Attachments', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
| 31.412214
| 79
| 0.715431
|
4a141825741197b3531f854c917157e35f05077d
| 115
|
py
|
Python
|
tests/test_methods/test_ratings.py
|
NationalJournal/py-votesmart
|
9194f28564d8d005386f498b1fc915cc7772f38b
|
[
"BSD-3-Clause"
] | 7
|
2018-03-08T16:33:02.000Z
|
2020-12-22T08:12:31.000Z
|
tests/test_methods/test_ratings.py
|
NationalJournal/py-votesmart
|
9194f28564d8d005386f498b1fc915cc7772f38b
|
[
"BSD-3-Clause"
] | 7
|
2018-02-13T02:40:29.000Z
|
2018-03-08T17:04:53.000Z
|
tests/test_methods/test_ratings.py
|
NationalJournal/py-votesmart
|
9194f28564d8d005386f498b1fc915cc7772f38b
|
[
"BSD-3-Clause"
] | 4
|
2018-02-13T13:41:28.000Z
|
2021-12-21T22:14:54.000Z
|
import pytest
from votesmart.methods.ratings import *
def test_Rating():
method = Rating(api_instance='test')
| 19.166667
| 40
| 0.756522
|
4a1418440f655da964689ff78f7877549a33a469
| 7,197
|
py
|
Python
|
ILSVRC15-curation/gen_image_crops_VID.py
|
ralphc1212/SiamFC-PyTorch
|
b99de369c5802a717168e509518043c3a02084ce
|
[
"Apache-2.0"
] | 131
|
2018-10-26T21:04:52.000Z
|
2022-02-24T20:17:03.000Z
|
ILSVRC15-curation/gen_image_crops_VID.py
|
ralphc1212/SiamFC-PyTorch
|
b99de369c5802a717168e509518043c3a02084ce
|
[
"Apache-2.0"
] | 19
|
2018-10-30T15:43:32.000Z
|
2020-03-25T23:24:15.000Z
|
ILSVRC15-curation/gen_image_crops_VID.py
|
ralphc1212/SiamFC-PyTorch
|
b99de369c5802a717168e509518043c3a02084ce
|
[
"Apache-2.0"
] | 36
|
2018-11-03T04:31:39.000Z
|
2022-02-21T16:00:30.000Z
|
'''
Written by Heng Fan
'''
import numpy as np
import os
import glob
import xml.etree.ElementTree as ET
import cv2
import datetime
'''
# default setting for cropping
'''
examplar_size = 127.0
instance_size = 255.0
context_amount = 0.5
def get_subwindow_avg(im, pos, model_sz, original_sz):
'''
# obtain image patch, padding with avg channel if area goes outside of border
'''
avg_chans = [np.mean(im[:, :, 0]), np.mean(im[:, :, 1]), np.mean(im[:, :, 2])]
if original_sz is None:
original_sz = model_sz
sz = original_sz
im_sz = im.shape
# make sure the size is not too small
assert (im_sz[0] > 2) & (im_sz[1] > 2), "The size of image is too small!"
c = (sz + 1) / 2
# check out-of-bounds coordinates, and set them to black
context_xmin = round(pos[1] - c) # floor(pos(2) - sz(2) / 2);
context_xmax = context_xmin + sz - 1
context_ymin = round(pos[0] - c) # floor(pos(1) - sz(1) / 2);
context_ymax = context_ymin + sz - 1
left_pad = max(0, 1 - context_xmin) # in python, index starts from 0
top_pad = max(0, 1 - context_ymin)
right_pad = max(0, context_xmax - im_sz[1])
bottom_pad = max(0, context_ymax - im_sz[0])
context_xmin = context_xmin + left_pad
context_xmax = context_xmax + left_pad
context_ymin = context_ymin + top_pad
context_ymax = context_ymax + top_pad
im_R = im[:, :, 0]
im_G = im[:, :, 1]
im_B = im[:, :, 2]
# padding
if (top_pad != 0) | (bottom_pad != 0) | (left_pad != 0) | (right_pad != 0):
im_R = np.pad(im_R, ((int(top_pad), int(bottom_pad)), (int(left_pad), int(right_pad))), 'constant',
constant_values=avg_chans[0])
im_G = np.pad(im_G, ((int(top_pad), int(bottom_pad)), (int(left_pad), int(right_pad))), 'constant',
constant_values=avg_chans[1])
im_B = np.pad(im_B, ((int(top_pad), int(bottom_pad)), (int(left_pad), int(right_pad))), 'constant',
constant_values=avg_chans[2])
im = np.stack((im_R, im_G, im_B), axis=2)
im_patch_original = im[int(context_ymin) - 1:int(context_ymax), int(context_xmin) - 1:int(context_xmax), :]
if model_sz != original_sz:
im_patch = cv2.resize(im_patch_original, (int(model_sz), int(model_sz)), interpolation=cv2.INTER_CUBIC)
else:
im_patch = im_patch_original
return im_patch
def get_crops(img, bbox, size_z, size_x, context_amount):
'''
# get examplar and search region crops
'''
cx = bbox[0] + bbox[2]/2
cy = bbox[1] + bbox[3]/2
w = bbox[2]
h = bbox[3]
# for examplar
wc_z = w + context_amount * (w + h)
hc_z = h + context_amount * (w + h)
s_z = np.sqrt(wc_z * hc_z)
scale_z = size_z / s_z
im_crop_z = get_subwindow_avg(img, np.array([cy, cx]), size_z, round(s_z))
# for search region
d_search = (size_x - size_z) / 2
pad = d_search / scale_z
s_x = s_z + 2 * pad
scale_x = size_x / s_x
im_crop_x = get_subwindow_avg(img, np.array([cy, cx]), size_x, round(s_x))
return im_crop_z, im_crop_x
def generate_image_crops(vid_root_path, vid_curated_path):
'''
# save image crops to the vid_curated_path
'''
anno_str = "Annotations/VID/train/"
data_str = "Data/VID/train/"
vid_anno_path = os.path.join(vid_root_path, anno_str)
vid_data_path = os.path.join(vid_root_path, data_str)
cur_procesed_fraem = 0
start_time = datetime.datetime.now()
total_time = 0
# dirs of level1: e.g., a/, b/, ...
all_dirs_level1 = os.listdir(vid_anno_path)
for i in range(len(all_dirs_level1)):
all_dirs_level2 = os.listdir(os.path.join(vid_anno_path, all_dirs_level1[i]))
# dirs of level2: e.g., a/ILSVRC2015_train_00000000/, a/ILSVRC2015_train_00001000/, ...
for j in range(len(all_dirs_level2)):
frame_list = glob.glob(os.path.join(vid_anno_path, all_dirs_level1[i], all_dirs_level2[j], "*.xml"))
frame_list.sort()
# level3: frame level
for k in range(len(frame_list)):
frame_xml_name = os.path.join(vid_anno_path, all_dirs_level1[i], all_dirs_level2[j], frame_list[k])
frame_xml_tree = ET.parse(frame_xml_name)
frame_xml_root = frame_xml_tree.getroot()
# image file path
frame_img_name = (frame_list[k].replace(".xml", ".JPEG")).replace(vid_anno_path, vid_data_path)
img = cv2.imread(frame_img_name)
if img is None:
print("Cannot find %s!"%frame_img_name)
exit(0)
# image file name
frame_filename = frame_xml_root.find('filename').text
# process (all objects in) each frame
for object in frame_xml_root.iter("object"):
# get trackid
id = object.find("trackid").text
# get bounding box
bbox_node = object.find("bndbox")
xmax = float(bbox_node.find('xmax').text)
xmin = float(bbox_node.find('xmin').text)
ymax = float(bbox_node.find('ymax').text)
ymin = float(bbox_node.find('ymin').text)
width = xmax - xmin + 1
height = ymax - ymin + 1
bbox = np.array([xmin, ymin, width, height])
# print("processing %s, %s, %s, %s ..." % (all_dirs_level1[i], all_dirs_level2[j], frame_filename+".JPEG", id))
# get crops
im_crop_z, im_crop_x = get_crops(img, bbox, examplar_size, instance_size, context_amount)
# save crops
save_path = os.path.join(vid_curated_path, data_str, all_dirs_level1[i], all_dirs_level2[j])
if not os.path.exists(save_path):
os.makedirs(save_path)
savename_crop_z = os.path.join(save_path, '{}.{:02d}.crop.z.jpg'.format(frame_filename, int(id)))
savename_crop_x = os.path.join(save_path, '{}.{:02d}.crop.x.jpg'.format(frame_filename, int(id)))
cv2.imwrite(savename_crop_z, im_crop_z, [int(cv2.IMWRITE_JPEG_QUALITY), 90])
cv2.imwrite(savename_crop_x, im_crop_x, [int(cv2.IMWRITE_JPEG_QUALITY), 90])
cur_procesed_fraem = cur_procesed_fraem + 1
if cur_procesed_fraem % 1000 == 0:
end_time = datetime.datetime.now()
total_time = total_time + int((end_time-start_time).seconds)
print("finished processing %d frames in %d seconds (FPS: %d ) ..." % (cur_procesed_fraem, total_time, int(1000/(end_time-start_time).seconds)))
start_time = datetime.datetime.now()
if __name__ == "__main__":
# path to your VID dataset
vid_root_path = "/home/hfan/Dataset/ILSVRC2015"
vid_curated_path = "/home/hfan/Dataset/ILSVRC2015_crops"
if not os.path.exists(vid_curated_path):
os.mkdir(vid_curated_path)
generate_image_crops(vid_root_path, vid_curated_path)
| 38.486631
| 167
| 0.593442
|
4a1418e10c51cac77f5eee3cc7affbb8e116cee7
| 5,345
|
py
|
Python
|
examples/pendulum/analyze_pend_SO3.py
|
thaipduong/SE3HamiltonianDynsLearning
|
caf385cf810055e88314e6e4b39b566f9a0be419
|
[
"MIT"
] | 6
|
2021-06-25T03:08:19.000Z
|
2022-03-24T08:54:28.000Z
|
examples/pendulum/analyze_pend_SO3.py
|
nishr/SE3HamDL
|
72dc5070178948f7c307678421bea80fa6d25796
|
[
"MIT"
] | null | null | null |
examples/pendulum/analyze_pend_SO3.py
|
nishr/SE3HamDL
|
72dc5070178948f7c307678421bea80fa6d25796
|
[
"MIT"
] | 4
|
2021-07-15T18:31:00.000Z
|
2022-02-23T14:00:15.000Z
|
# Hamiltonian-based Neural ODE Networks on the SE(3) Manifold For Dynamics Learning and Control, RSS 2021
# Thai Duong, Nikolay Atanasov
# code structure follows the style of HNN by Greydanus et al. and SymODEM by Zhong et al.
# https://github.com/greydanus/hamiltonian-nn
# https://github.com/Physics-aware-AI/Symplectic-ODENet
import torch
import numpy as np
import matplotlib.pyplot as plt
import scipy.integrate
from se3hamneuralode import compute_rotation_matrix_from_quaternion, from_pickle, SO3HamNODE
solve_ivp = scipy.integrate.solve_ivp
plt.rcParams['pdf.fonttype'] = 42
plt.rcParams['ps.fonttype'] = 42
plt.rcParams['text.usetex'] = True
gpu=0
device = torch.device('cuda:' + str(gpu) if torch.cuda.is_available() else 'cpu')
def get_model():
model = SO3HamNODE(device=device, u_dim=1).to(device)
path = './data/pendulum-so3ham-rk4-5p.tar'
model.load_state_dict(torch.load(path, map_location=device))
path = './data/pendulum-so3ham-rk4-5p-stats.pkl'
stats = from_pickle(path)
return model, stats
if __name__ == "__main__":
# Figure and font size
figsize = (12, 7.8)
fontsize = 24
fontsize_ticks = 32
line_width = 4
# Load trained model
model, stats = get_model()
# Scale factor for M^-1, V, g neural networks
beta = 2.32
# Load train/test data
train_x_hat = stats['train_x_hat']
test_x_hat = stats['test_x_hat']
train_x = stats['train_x']
test_x = stats['test_x']
t_eval = stats['t_eval']
print("Loaded data!")
# Plot loss
fig = plt.figure(figsize=figsize, linewidth=5)
ax = fig.add_subplot(111)
train_loss = stats['train_loss']
test_loss = stats['test_loss']
ax.plot(train_loss[0:], 'b', linewidth=line_width, label='train loss')
ax.plot(test_loss[0:], 'r', linewidth=line_width, label='test loss')
plt.xlabel("iterations", fontsize=fontsize_ticks)
plt.xticks(fontsize=fontsize_ticks)
plt.yticks(fontsize=fontsize_ticks)
plt.yscale('log')
plt.legend(fontsize=fontsize)
plt.savefig('./png/loss_log.png', bbox_inches='tight')
plt.show()
# Get state q from a range of pendulum angle theta
theta = np.linspace(-5.0, 5.0, 40)
q_tensor = torch.tensor(theta, dtype=torch.float32).view(40, 1).to(device)
q_zeros = torch.zeros(40,2).to(device)
quat = torch.cat((torch.cos(q_tensor/2), q_zeros, torch.sin(q_tensor/2)), dim=1)
rotmat = compute_rotation_matrix_from_quaternion(quat)
# This is the generalized coordinates q = R
rotmat = rotmat.view(rotmat.shape[0], 9)
# Calculate the M^-1, V, g for the q.
M_q_inv = model.M_net(rotmat)
V_q = model.V_net(rotmat)
g_q = model.g_net(rotmat)
# Plot g(q)
fig = plt.figure(figsize=figsize)
plt.plot(theta, beta*g_q.detach().cpu().numpy()[:,0], 'b--', linewidth=line_width, label=r'$\beta g(q)[1]$')
plt.plot(theta, beta * g_q.detach().cpu().numpy()[:, 1], 'r--', linewidth=line_width, label=r'$\beta g(q)[2]$')
plt.plot(theta, beta * g_q.detach().cpu().numpy()[:, 2], 'g--', linewidth=line_width, label=r'$\beta g(q)[3]$')
plt.xlabel("pendulum angle", fontsize=fontsize_ticks)
plt.xticks(fontsize=fontsize_ticks)
plt.yticks(fontsize=fontsize_ticks)
plt.xlim(-5, 5)
plt.ylim(-0.5, 2.5)
plt.legend(fontsize=fontsize)
plt.savefig('./png/g_x.png', bbox_inches='tight')
plt.show()
# Plot V(q)
fig = plt.figure(figsize=figsize)
plt.plot(theta, 5. - 5. * np.cos(theta), 'k--', label='Ground Truth', color='k', linewidth=line_width)
plt.plot(theta, beta*V_q.detach().cpu().numpy(), 'b', label=r'$\beta V(q)$', linewidth=line_width)
plt.xlabel("pendulum angle", fontsize=fontsize_ticks)
plt.xticks(fontsize=fontsize_ticks)
plt.yticks(fontsize=fontsize_ticks)
plt.xlim(-5, 5)
plt.ylim(-8, 12)
plt.legend(fontsize=fontsize)
plt.savefig('./png/V_x.png', bbox_inches='tight')
plt.show()
# Plot M^-1(q)
fig = plt.figure(figsize=figsize)
plt.plot(theta, 3 * np.ones_like(theta), label='Ground Truth', color='k', linewidth=line_width-1)
plt.plot(theta, M_q_inv.detach().cpu().numpy()[:, 2, 2] / beta, 'b--', linewidth=line_width,
label=r'$M^{-1}(q)[3, 3]/\beta$')
plt.plot(theta, M_q_inv.detach().cpu().numpy()[:, 0, 0] / beta, 'g--', linewidth=line_width,
label=r'Other $M^{-1}(q)[i,j]/\beta$')
plt.plot(theta, M_q_inv.detach().cpu().numpy()[:, 0, 1] / beta, 'g--', linewidth=line_width)
plt.plot(theta, M_q_inv.detach().cpu().numpy()[:, 0, 2] / beta, 'g--', linewidth=line_width)
plt.plot(theta, M_q_inv.detach().cpu().numpy()[:, 1, 0] / beta, 'g--', linewidth=line_width)
plt.plot(theta, M_q_inv.detach().cpu().numpy()[:, 1, 1] / beta, 'g--', linewidth=line_width)
plt.plot(theta, M_q_inv.detach().cpu().numpy()[:, 1, 2] / beta, 'g--', linewidth=line_width)
plt.plot(theta, M_q_inv.detach().cpu().numpy()[:, 2, 0] / beta, 'g--', linewidth=line_width)
plt.plot(theta, M_q_inv.detach().cpu().numpy()[:, 2, 1] / beta, 'g--', linewidth=line_width)
plt.xlabel("pendulum angle", fontsize=fontsize_ticks)
plt.xticks(fontsize=fontsize_ticks)
plt.yticks(fontsize=fontsize_ticks)
plt.xlim(-5, 5)
plt.ylim(-0.5, 6.0)
plt.legend(fontsize=fontsize)
plt.savefig('./png/M_x_all.png', bbox_inches='tight')
plt.show()
| 41.434109
| 115
| 0.658934
|
4a1418ed35077a879f566c6512bebed54544561c
| 251
|
py
|
Python
|
CircuitPython_Quick_Starts/CircuitPython_AnalogIn/code.py
|
gamblor21/Adafruit_Learning_System_Guides
|
f5dab4a758bc82d0bfc3c299683fe89dc093912a
|
[
"MIT"
] | 665
|
2017-09-27T21:20:14.000Z
|
2022-03-31T09:09:25.000Z
|
CircuitPython_Quick_Starts/CircuitPython_AnalogIn/code.py
|
gamblor21/Adafruit_Learning_System_Guides
|
f5dab4a758bc82d0bfc3c299683fe89dc093912a
|
[
"MIT"
] | 641
|
2017-10-03T19:46:37.000Z
|
2022-03-30T18:28:46.000Z
|
CircuitPython_Quick_Starts/CircuitPython_AnalogIn/code.py
|
gamblor21/Adafruit_Learning_System_Guides
|
f5dab4a758bc82d0bfc3c299683fe89dc093912a
|
[
"MIT"
] | 734
|
2017-10-02T22:47:38.000Z
|
2022-03-30T14:03:51.000Z
|
# CircuitPython AnalogIn Demo
import time
import board
from analogio import AnalogIn
analog_in = AnalogIn(board.A1)
def get_voltage(pin):
return (pin.value * 3.3) / 65536
while True:
print((get_voltage(analog_in),))
time.sleep(0.1)
| 13.944444
| 36
| 0.713147
|
4a141905ac5fc26d42c0c679ff6ad40114ac21fa
| 1,824
|
py
|
Python
|
cluster/cluster_clean.py
|
pelagia/votca-scripts
|
5c10cabe6458b4682cd9c214bc665d389f34a939
|
[
"Apache-2.0"
] | 2
|
2017-10-17T16:46:28.000Z
|
2020-01-09T15:06:14.000Z
|
cluster/cluster_clean.py
|
pelagia/votca-scripts
|
5c10cabe6458b4682cd9c214bc665d389f34a939
|
[
"Apache-2.0"
] | null | null | null |
cluster/cluster_clean.py
|
pelagia/votca-scripts
|
5c10cabe6458b4682cd9c214bc665d389f34a939
|
[
"Apache-2.0"
] | 2
|
2018-05-25T09:15:47.000Z
|
2020-06-25T07:18:31.000Z
|
import os
def checkmethod():
check=raw_input("Are you sure?\nEnter \"yes\" or \"no\" \n")
checklist={'yes':True,'no':False}
if check not in checklist:
print 'enter either \"yes\" or \"no\" '
check=checkmethod()
else:
print check
return checklist[check]
def getusername():
username=raw_input("Enter your username on the cluster\n")
print username
confirm=raw_input("Ist this your username? Enter \"yes\" or \"no\" \n")
if confirm != "yes":
print 'enter either \"yes\" or \"no\" '
username=getusername()
else:
print "Using",username
return username
print "Running this script removes all files from your folder on the cluster"
check=checkmethod()
if check==True:
username=getusername()
print "Starting cleanup"
for i in range(200,310):
mach1 = "thinc%03d" % i
#mach2 = "thop%3d" % i
wpath1 = "/scratch/%s/%s" % (mach1,username)
#wpath2 = "/scratch/%s/%s" % (mach2,username)
if os.path.exists(wpath1):
if len(os.listdir(wpath1)) > 0:
print wpath1
os.system("ssh %s rm -rf /usr/scratch/%s/*" % (mach1, username))
#os.chdir(wpath1)
#print wpath1, os.listdir("./")
#delcmds = ["rm -rf %s" % d for d in os.listdir("./") if "job" in d]
#for cmd in delcmds:
# print cmd
# os.system(cmd)
#if os.path.exists(wpath2):
# if len(os.listdir(wpath2)) > 0:
# print wpath2
# os.system("ssh %s rm -rf /usr/scratch/%s/*" % (mach2, username))
#os.chdir(wpath1)
#print wpath1, os.listdir("./")
#delcmds = ["rm -rf %s" % d for d in os.listdir("./") if "job" in d]
#for cmd in delcmds:
# print cmd
# os.system(cmd)
else:
print "Aborted"
| 29.419355
| 79
| 0.560855
|
4a141a4b3d699cf75ba14f7b2288a970a8519ce1
| 6,681
|
py
|
Python
|
CLN/pl_testTube_CLN.py
|
KirillShmilovich/coarse2fine_VAE
|
e4c1022f9570934a2be59ea0989c80102dc46ad4
|
[
"MIT"
] | null | null | null |
CLN/pl_testTube_CLN.py
|
KirillShmilovich/coarse2fine_VAE
|
e4c1022f9570934a2be59ea0989c80102dc46ad4
|
[
"MIT"
] | null | null | null |
CLN/pl_testTube_CLN.py
|
KirillShmilovich/coarse2fine_VAE
|
e4c1022f9570934a2be59ea0989c80102dc46ad4
|
[
"MIT"
] | null | null | null |
"""
Runs a model on a single node across multiple gpus.
"""
import os
from pl_vae_CLN import VAE
from pytorch_lightning import Trainer, seed_everything, loggers
from pl_callbacks import CheckpointEveryNSteps
from test_tube import HyperOptArgumentParser, SlurmCluster
def main(args, cluster_manager):
""" Main training routine specific for this project. """
seed_everything(42)
model = VAE(**vars(args))
# logger = loggers.TensorBoardLogger('./',
# name='ADP_Decoder7v3',
# version=cluster_manager.hpc_exp_number)
logger = loggers.TestTubeLogger(
"./", name="CLN_single", version=cluster_manager.hpc_exp_number
)
trainer = Trainer.from_argparse_args(
args,
callbacks=[CheckpointEveryNSteps(2000)],
val_check_interval=0.25,
gpus=1,
# accelerator="ddp",
terminate_on_nan=True,
# gradient_clip_val=5.0,
logger=logger,
)
# ------------------------
# 3 START TRAINING
# ------------------------
trainer.fit(model)
def remove_options(parser, options):
for option in options:
for action in parser._actions:
if vars(action)["option_strings"][0] == option:
parser._handle_conflict_resolve(None, [(option, action)])
break
if __name__ == "__main__":
root_dir = os.path.dirname(os.path.realpath(__file__))
# parent_parser = ArgumentParser(add_help=False)
# Set up our argparser and make the y_val tunable.
parser = HyperOptArgumentParser(strategy="random_search")
parser = VAE.add_model_specific_args(parser, root_dir)
parser.add_argument("--log_path", default="./")
remove_options(
parser,
[
"--sigma",
"--learning_rate",
"--latent_dim",
"--resolution",
"--fac_encoder",
"--fac_decoder",
"--batch_size",
"--length",
"--noise",
"--beta",
"--use_cg_loss",
"--use_edm_loss",
"--bonds_edm_weight",
"--cg_coord_weight",
"--coord_weight",
"--use_edm_loss",
"--learning_gamma",
],
)
parser.opt_range(
"--sigma",
default=0.01,
low=0.01,
high=1.0,
log_base=10,
nb_samples=16,
type=float,
tunable=True,
)
parser.opt_range(
"--learning_rate",
default=1e-4,
low=1e-4,
high=1e-3,
log_base=10,
nb_samples=16,
type=float,
tunable=True,
)
parser.opt_list("--latent_dim", default=32, options=[64], type=int, tunable=True)
parser.opt_list("--resolution", default=12, options=[12], type=int, tunable=True)
parser.opt_list("--fac_encoder", default=16, options=[16], type=int, tunable=True)
parser.opt_list("--fac_decoder", default=16, options=[8], type=int, tunable=True)
parser.opt_list("--batch_size", default=32, options=[32], type=int, tunable=True)
parser.opt_list(
"--learning_gamma", default=1.0, options=[1.0], type=float, tunable=True
)
parser.opt_list("--length", default=5.5, options=[5.5], type=float, tunable=True)
parser.opt_list("--noise", default=0.00, options=[0.0], type=float, tunable=True)
# parser.opt_range(
# "--beta",
# default=1.0,
# low=1e-4,
# high=1.0,
# log_base=10,
# nb_samples=12,
# type=float,
# tunable=False,
# )
parser.opt_list(
"--beta",
default=1.0,
options=[1.0],
# default="250000",
# options=["250000"],
# default="0.0025,10000,4,25000",
# options=['10000,4,15000'],
# options=["0.0025,10000,4,25000"],
# options=["0.0025,35000,4,80000"],
type=str,
tunable=True,
)
parser.opt_list("--use_cg_loss", default=1, options=[0, 1], type=int, tunable=False)
parser.opt_list(
"--use_edm_loss", default=1, options=[0, 1], type=int, tunable=False
)
parser.opt_range(
"--bonds_edm_weight",
default=1.0,
low=0.01,
high=1.0,
nb_samples=8,
type=float,
tunable=False,
)
parser.opt_range(
"--coord_weight",
default=1.0,
low=0.01,
high=1.0,
nb_samples=8,
type=float,
tunable=False,
)
parser.opt_range(
"--cg_coord_weight",
default=1.0,
low=0.01,
high=1.0,
nb_samples=6,
type=float,
tunable=False,
)
hyperparams = parser.parse_args()
# Enable cluster training.
cluster = SlurmCluster(
hyperparam_optimizer=hyperparams,
log_path=hyperparams.log_path,
python_cmd="/project/andrewferguson/Kirill/torch_kirill/bin/python",
)
# Email results if your hpc supports it.
cluster.notify_job_status(email="kirills@uchicago.edu", on_done=True, on_fail=True)
# SLURM Module to load.
cluster.load_modules(["python"])
# Add commands to the non-SLURM portion.
# cluster.add_command(
# "conda activate /project2/andrewferguson/Kirill/conda_env")
# Add custom SLURM commands which show up as:
# #comment
# #SBATCH --cmd=value
# ############
cluster.add_slurm_cmd(
cmd="partition", value="andrewferguson-gpu", comment="partition"
)
cluster.add_slurm_cmd(cmd="account", value="pi-andrewferguson", comment="account")
# cluster.add_slurm_cmd(cmd="partition", value="gpu", comment="partition")
# cluster.add_slurm_cmd(cmd="partition",
# value="gm4-pmext",
# comment="partition")
# cluster.add_slurm_cmd(cmd="qos", value="gm4", comment="qos")
# cluster.add_slurm_cmd(cmd="account",
# value="early-users",
# comment="partition")
# Set job compute details (this will apply PER set of hyperparameters.)
cluster.per_experiment_nb_gpus = 1
cluster.per_experiment_nb_nodes = 1
cluster.per_experiment_nb_cpus = 10
cluster.memory_mb_per_node = 17000
cluster.minutes_to_checkpoint_before_walltime = 0
# cluster.job_time = "12:00:00"
cluster.job_time = "168:00:00"
# Each hyperparameter combination will use 8 gpus.
cluster.optimize_parallel_cluster_gpu(
# Function to execute:
main,
# Number of hyperparameter combinations to search:
nb_trials=4,
# This is w will display in the slurm queue:
job_name="CLN_single",
)
| 31.074419
| 88
| 0.580901
|
4a141da62e7e54023546ab5a2eef1c90e8e183f3
| 169
|
py
|
Python
|
project/settings/components/locale.py
|
gyukebox/drf-basic-settings
|
53c3df89b8f11169ee924c1c8000624384781c3a
|
[
"MIT"
] | null | null | null |
project/settings/components/locale.py
|
gyukebox/drf-basic-settings
|
53c3df89b8f11169ee924c1c8000624384781c3a
|
[
"MIT"
] | null | null | null |
project/settings/components/locale.py
|
gyukebox/drf-basic-settings
|
53c3df89b8f11169ee924c1c8000624384781c3a
|
[
"MIT"
] | null | null | null |
# Internationalization
# https://docs.djangoproject.com/en/2.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
| 13
| 52
| 0.721893
|
4a141f847b694cb56d9487370048f6fafebebfcb
| 1,626
|
py
|
Python
|
robotsscan.py
|
Moxin1044/Easy-Bird-CMS-Scan-tools
|
639fa05ec3b7d6d10ed4dd0c0f1652ae6f564bf9
|
[
"MulanPSL-1.0"
] | 1
|
2020-12-09T23:58:45.000Z
|
2020-12-09T23:58:45.000Z
|
robotsscan.py
|
Moxin1044/Easy-Bird-CMS-Scan-tools
|
639fa05ec3b7d6d10ed4dd0c0f1652ae6f564bf9
|
[
"MulanPSL-1.0"
] | null | null | null |
robotsscan.py
|
Moxin1044/Easy-Bird-CMS-Scan-tools
|
639fa05ec3b7d6d10ed4dd0c0f1652ae6f564bf9
|
[
"MulanPSL-1.0"
] | null | null | null |
"""
Copyright (c) 2020-2021 Moxin
[Software Name] is licensed under Mulan PSL v2.
You can use this software according to the terms and conditions of the Mulan PSL v2.
You may obtain a copy of Mulan PSL v2 at:
http://license.coscl.org.cn/MulanPSL2
THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
See the Mulan PSL v2 for more details.
By:M0x1n Time:2020.12.23Updated
Ver:1.2(Third edition) 1.2第三版
https://bbs.moxinwangluo.cn/
此脚本可以通过一些网站特征来进行CMS识别,当然,这个脚本也是开源的。
This script can be used for CMS recognition based on some website features, and of course, this script is also open source.
"""
import auxiliary
import files
import os
def discuzrobots(url):
if auxiliary.searchrobots('Discuz! X3', url) == "Pass":
print("从robots.txt检测中得到", url, "可能是Discuz X3站点!")
if auxiliary.searchrobots('/uc_server/', url) == "Pass":
print("从robots.txt检测中得到", url, "可能是Discuz X3站点!(存在uc_server)")
pass
def dederobots(url, uurl):
if auxiliary.searchrobots('/plus/feedback_js.php', url) == "Pass":
print("从robots.txt检测中得到", url, "可能是dedecms站点!")
files.dedever(uurl) # 下同,只有符合条件才会进行文件判断
if auxiliary.searchrobots('/plus/shops_buyaction.php', url) == "Pass":
print("从robots.txt检测中得到", url, "可能是dedecms站点!")
files.dedever(uurl)
pass
def allrobots(url, uurl):
discuzrobots(url)
dederobots(url, uurl)
os.remove("date\\robots.txt")
pass
| 38.714286
| 205
| 0.686962
|
4a142260fe091c8dc12654ea8c1b8a5d284e77c0
| 464
|
py
|
Python
|
Mundo 1/ex_015.py
|
Shock3/Python_Exercicios
|
4420569e881b883728168aabe76b0e9f3a42597f
|
[
"MIT"
] | null | null | null |
Mundo 1/ex_015.py
|
Shock3/Python_Exercicios
|
4420569e881b883728168aabe76b0e9f3a42597f
|
[
"MIT"
] | null | null | null |
Mundo 1/ex_015.py
|
Shock3/Python_Exercicios
|
4420569e881b883728168aabe76b0e9f3a42597f
|
[
"MIT"
] | null | null | null |
"""
Escreva um programa que pergunte a quantidade de Km percorridos por um carro
alugado e a quantidade de dias pelos quais ele foi alugado.
Calcule o preço a pagar, sabendo que o carro custa R$60 por dia
e R$0,15 por Km rodado
"""
print('**' * 19)
distancia = int(input('Qual a distância percorrida: '))
dias = int(input('Quantos dias ficou alugado: '))
preço = (60 * dias) + (0.15 * distancia)
print(f'O preço total a pagar é de R${preço:.2f}')
print('**' * 19)
| 35.692308
| 76
| 0.698276
|
4a1422e86da930a9a635871b24a5a16f98140c89
| 2,873
|
py
|
Python
|
cfai/cf/plainCF.py
|
wangyongjie-ntu/Baselines_CF
|
9b68011e21b26d91c87ac6c9a384cfa0b615f50f
|
[
"MIT"
] | 12
|
2021-04-01T08:22:51.000Z
|
2022-01-30T18:26:14.000Z
|
cfai/cf/plainCF.py
|
wangyongjie-ntu/Baselines_CF
|
9b68011e21b26d91c87ac6c9a384cfa0b615f50f
|
[
"MIT"
] | null | null | null |
cfai/cf/plainCF.py
|
wangyongjie-ntu/Baselines_CF
|
9b68011e21b26d91c87ac6c9a384cfa0b615f50f
|
[
"MIT"
] | 4
|
2021-04-21T13:53:55.000Z
|
2021-11-01T12:15:02.000Z
|
#Filename: plainCF.py
#Author: Wang Yongjie
#Email: yongjie.wang@ntu.edu.sg
#Date: Min 13 Des 2020 09:15:05 WIB
import torch
import numpy as np
import torch.nn.functional as F
import copy
import time
from sklearn.preprocessing import MinMaxScaler, StandardScaler
from cf.baseCF import ExplainerBase
class PlainCF(ExplainerBase):
def __init__(self, data_interface, model_interface):
super().__init__(data_interface, model_interface)
def generate_counterfactuals(self, query_instance, features_to_vary, target = 0.7, feature_weights = None, _lambda = 10,
optimizer = "adam", lr = 0.01, max_iter = 100):
start_time = time.time()
query_instance = self.data_interface.prepare_query(query_instance, normalized = True)
query_instance = torch.FloatTensor(query_instance)
mask = self.data_interface.get_mask_of_features_to_vary(features_to_vary)
mask = torch.LongTensor(mask)
self._lambda = _lambda
if feature_weights == None:
feature_weights = torch.ones(query_instance.shape[1])
else:
feature_weights = torch.ones(query_instance.shape[0])
feature_weights = torch.FloatTensor(feature_weights)
if isinstance(self.data_interface.scaler, MinMaxScaler):
cf_initialize = torch.rand(query_instance.shape)
elif isinstance(self.data_interface.scaler, StandardScaler):
cf_initialize = torch.randn(query_instance.shape)
else:
cf_initialize = torch.rand(query_instance.shape)
cf_initialize = torch.FloatTensor(cf_initialize)
cf_initialize = mask * cf_initialize + (1 - mask) * query_instance
if optimizer == "adam":
optim = torch.optim.Adam([cf_initialize], lr)
else:
optim = torch.optim.RMSprop([cf_initialize], lr)
for i in range(max_iter):
cf_initialize.requires_grad = True
optim.zero_grad()
loss = self.compute_loss(cf_initialize, query_instance, target)
loss.backward()
cf_initialize.grad = cf_initialize.grad * mask
optim.step()
if isinstance(self.data_interface.scaler, MinMaxScaler):
cf_initialize = torch.where(cf_initialize > 1, torch.ones_like(cf_initialize), cf_initialize)
cf_initialize = torch.where(cf_initialize < 0, torch.zeros_like(cf_initialize), cf_initialize)
cf_initialize.detach_()
end_time = time.time()
running_time = time.time()
return cf_initialize
def compute_loss(self, cf_initialize, query_instance, target):
loss1 = F.relu(target - self.model_interface.predict_tensor(cf_initialize)[1])
loss2 = torch.sum((cf_initialize - query_instance)**2)
return self._lambda * loss1 + loss2
| 36.833333
| 124
| 0.670031
|
4a1423b6c0529233844636fa72d286ea18898e55
| 1,252
|
py
|
Python
|
tests/grammpy_test/oldapi_tests/term-nonterm-grammar-handling_tests/TerminalHaveTest.py
|
PatrikValkovic/grammpy
|
8308a1fd349bf9ea0d267360cc9a4ab20d1629e8
|
[
"MIT"
] | 1
|
2021-02-04T12:41:08.000Z
|
2021-02-04T12:41:08.000Z
|
tests/grammpy_test/oldapi_tests/term-nonterm-grammar-handling_tests/TerminalHaveTest.py
|
PatrikValkovic/grammpy
|
8308a1fd349bf9ea0d267360cc9a4ab20d1629e8
|
[
"MIT"
] | 3
|
2017-07-08T16:28:52.000Z
|
2020-04-23T18:06:24.000Z
|
tests/grammpy_test/oldapi_tests/term-nonterm-grammar-handling_tests/TerminalHaveTest.py
|
PatrikValkovic/grammpy
|
8308a1fd349bf9ea0d267360cc9a4ab20d1629e8
|
[
"MIT"
] | 1
|
2021-02-04T12:41:10.000Z
|
2021-02-04T12:41:10.000Z
|
#!/usr/bin/env python
"""
:Author Patrik Valkovic
:Created 03.08.2017 12:28
:Licence MIT
Part of grammpy
"""
from unittest import TestCase, main
from grammpy.old_api import Grammar
class TempClass:
pass
class TerminalHaveTest(TestCase):
def test_haveTermEmpty(self):
gr = Grammar()
self.assertFalse(gr.have_term(TempClass))
self.assertFalse(gr.have_term(1))
self.assertFalse(gr.have_term('asdf'))
def test_haveTermClass(self):
gr = Grammar()
gr.add_term(TempClass)
self.assertTrue(gr.have_term(TempClass))
def test_haveTermArray(self):
gr = Grammar()
gr.add_term([TempClass, 0, 'asdf'])
self.assertTrue(gr.have_term([0, 'asdf']))
def test_dontHaveTermArray(self):
gr = Grammar()
gr.add_term([TempClass, 0, 'asdf'])
self.assertFalse(gr.have_term([TempClass, 'a']))
def test_haveTermTuple(self):
gr = Grammar()
gr.add_term([TempClass, 0, 'asdf'])
self.assertTrue(gr.have_term((0, 'asdf')))
def test_dontHaveTermTuple(self):
gr = Grammar()
gr.add_term([TempClass, 0, 'asdf'])
self.assertFalse(gr.have_term((TempClass, 'a')))
if __name__ == '__main__':
main()
| 24.076923
| 56
| 0.632588
|
4a1424db31a7267c4aef39b9f05bce5a6210e58a
| 282
|
py
|
Python
|
modelproject/modelproject.py
|
NumEconCopenhagen/projects-2020-group-xxv
|
b41f503ee389c8cb4a887807f81caa3bfe6a3aab
|
[
"MIT"
] | null | null | null |
modelproject/modelproject.py
|
NumEconCopenhagen/projects-2020-group-xxv
|
b41f503ee389c8cb4a887807f81caa3bfe6a3aab
|
[
"MIT"
] | 12
|
2020-04-10T16:03:44.000Z
|
2020-05-11T21:32:34.000Z
|
modelproject/modelproject.py
|
NumEconCopenhagen/projects-2020-group-xxv
|
b41f503ee389c8cb4a887807f81caa3bfe6a3aab
|
[
"MIT"
] | null | null | null |
def Y_t(D,K,A,L,E,a,b,e):
return D*(K**a)*((A*L)**b)*(E**e)
def D_t(R,R_0,phi):
return (R/R_0)**phi
def A_t1(A,g):
return A*(1+g)
def L_t1(L,n):
return L*(1+n)
def K_t1(s,Y,d,K):
return s*Y+(1-d)*K
def R_t1(R,E):
return R-E
def E_t(sE,R):
return sE*R
| 20.142857
| 37
| 0.521277
|
4a14259b696687312d53d32e1e522f2e078cf511
| 4,514
|
py
|
Python
|
nova/servicegroup/drivers/db.py
|
bopopescu/nested_quota_final
|
7c3454883de9f5368fa943924540eebe157a319d
|
[
"Apache-2.0"
] | 5
|
2017-06-23T07:37:39.000Z
|
2020-10-21T07:07:50.000Z
|
nova/servicegroup/drivers/db.py
|
bopopescu/nested_quota_final
|
7c3454883de9f5368fa943924540eebe157a319d
|
[
"Apache-2.0"
] | null | null | null |
nova/servicegroup/drivers/db.py
|
bopopescu/nested_quota_final
|
7c3454883de9f5368fa943924540eebe157a319d
|
[
"Apache-2.0"
] | 4
|
2017-06-23T07:37:43.000Z
|
2020-12-28T09:57:22.000Z
|
# Copyright 2012 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from oslo_config import cfg
from oslo_utils import timeutils
import six
from nova import conductor
from nova import context
from nova.i18n import _, _LE
from nova.openstack.common import log as logging
from nova.servicegroup import api
from nova.servicegroup.drivers import base
CONF = cfg.CONF
CONF.import_opt('service_down_time', 'nova.service')
LOG = logging.getLogger(__name__)
class DbDriver(base.Driver):
def __init__(self, *args, **kwargs):
self.db_allowed = kwargs.get('db_allowed', True)
self.conductor_api = conductor.API(use_local=self.db_allowed)
self.service_down_time = CONF.service_down_time
def join(self, member_id, group_id, service=None):
"""Join the given service with its group."""
LOG.debug('DB_Driver: join new ServiceGroup member %(member_id)s to '
'the %(group_id)s group, service = %(service)s',
{'member_id': member_id, 'group_id': group_id,
'service': service})
if service is None:
raise RuntimeError(_('service is a mandatory argument for DB based'
' ServiceGroup driver'))
report_interval = service.report_interval
if report_interval:
service.tg.add_timer(report_interval, self._report_state,
api.INITIAL_REPORTING_DELAY, service)
def is_up(self, service_ref):
"""Moved from nova.utils
Check whether a service is up based on last heartbeat.
"""
last_heartbeat = service_ref['updated_at'] or service_ref['created_at']
if isinstance(last_heartbeat, six.string_types):
# NOTE(russellb) If this service_ref came in over rpc via
# conductor, then the timestamp will be a string and needs to be
# converted back to a datetime.
last_heartbeat = timeutils.parse_strtime(last_heartbeat)
else:
# Objects have proper UTC timezones, but the timeutils comparison
# below does not (and will fail)
last_heartbeat = last_heartbeat.replace(tzinfo=None)
# Timestamps in DB are UTC.
elapsed = timeutils.delta_seconds(last_heartbeat, timeutils.utcnow())
is_up = abs(elapsed) <= self.service_down_time
if not is_up:
LOG.debug('Seems service is down. Last heartbeat was %(lhb)s. '
'Elapsed time is %(el)s',
{'lhb': str(last_heartbeat), 'el': str(elapsed)})
return is_up
def get_all(self, group_id):
"""Returns ALL members of the given group
"""
LOG.debug('DB_Driver: get_all members of the %s group', group_id)
rs = []
ctxt = context.get_admin_context()
services = self.conductor_api.service_get_all_by_topic(ctxt, group_id)
for service in services:
if self.is_up(service):
rs.append(service['host'])
return rs
def _report_state(self, service):
"""Update the state of this service in the datastore."""
ctxt = context.get_admin_context()
state_catalog = {}
try:
report_count = service.service_ref['report_count'] + 1
state_catalog['report_count'] = report_count
service.service_ref = self.conductor_api.service_update(ctxt,
service.service_ref, state_catalog)
# TODO(termie): make this pattern be more elegant.
if getattr(service, 'model_disconnected', False):
service.model_disconnected = False
LOG.error(_LE('Recovered model server connection!'))
# TODO(vish): this should probably only catch connection errors
except Exception:
if not getattr(service, 'model_disconnected', False):
service.model_disconnected = True
LOG.exception(_LE('model server went away'))
| 40.303571
| 79
| 0.647319
|
4a1425e96fba53c00e7c3584594eba581e796cf0
| 739
|
py
|
Python
|
day02.py
|
jjhelmus/adventofcode
|
d421c3ea8c85614d91aa6ca2e4af68bae08cbf98
|
[
"MIT"
] | 5
|
2015-12-06T16:08:33.000Z
|
2022-01-19T11:14:55.000Z
|
day02.py
|
jjhelmus/adventofcode
|
d421c3ea8c85614d91aa6ca2e4af68bae08cbf98
|
[
"MIT"
] | null | null | null |
day02.py
|
jjhelmus/adventofcode
|
d421c3ea8c85614d91aa6ca2e4af68bae08cbf98
|
[
"MIT"
] | 11
|
2016-01-26T18:53:38.000Z
|
2022-01-26T02:35:57.000Z
|
from __future__ import print_function
verbose = False
f = open('inputs/input02.txt')
total_paper = 0
total_ribbon = 0
for line in f:
if verbose:
print("line:", line)
l, w, d = line.split('x')
l, w, d = int(l), int(w), int(d)
if verbose:
print("Dimensions:", l, w, d)
# paper
s1 = l * w
s2 = l * d
s3 = w * d
extra = min(s1, s2, s3)
paper_needed = 2*s1 + 2*s2 + 2*s3 + extra
if verbose:
print("Paper needed for present:", paper_needed)
total_paper += paper_needed
# ribbon
ribbon = min(l+l+w+w, l+l+d+d, w+w+d+d)
bow = l * w * d
total_ribbon += (ribbon + bow)
print("Total paper needed:", total_paper)
print("Total ribbin needed:", total_ribbon)
| 21.735294
| 56
| 0.576455
|
4a1425f4830c5273550555b97144c24f57c8fc2a
| 6,027
|
py
|
Python
|
examples/classify_students.py
|
dssg/diogenes
|
52eda2a7adb22044e82779d05f91c927c1b95f3f
|
[
"MIT"
] | 21
|
2015-09-28T15:04:53.000Z
|
2017-11-19T22:21:31.000Z
|
examples/classify_students.py
|
dssg/diogenes
|
52eda2a7adb22044e82779d05f91c927c1b95f3f
|
[
"MIT"
] | 28
|
2015-09-27T09:51:12.000Z
|
2021-11-08T17:26:39.000Z
|
examples/classify_students.py
|
dssg/diogenes
|
52eda2a7adb22044e82779d05f91c927c1b95f3f
|
[
"MIT"
] | 10
|
2015-11-21T19:50:14.000Z
|
2018-05-21T16:16:25.000Z
|
import sys
import os
import cPickle
import re
import numpy as np
from diogenes.grid_search.experiment import Experiment
from diogenes.grid_search.subset import BaseSubsetIter
from diogenes.grid_search.partition_iterator import SlidingWindowValue
from diogenes.modify import replace_missing_vals
from diogenes.utils import remove_cols
from sklearn.ensemble import RandomForestClassifier
#NEXT REPLACE MISSING VALS
class SubsetSchool(BaseSubsetIter):
def __init__(self, y, col_names, max_grades):
super(SubsetSchool, self).__init__(y, col_names)
self.__max_grades = max_grades
self.__grades = range(9, 13)
def __iter__(self):
# cribbed from Reid
for max_grade in self.__max_grades:
# Filter features from higher grade levels.
higher_grade = False
higher_grade_regex = '^(?!'
for grade2 in self.__grades:
if grade2 > max_grade:
higher_grade = True
higher_grade_regex += r'grade_' + str(grade2) + '|'
if higher_grade:
higher_grade_regex = higher_grade_regex[:-1] # remove last '|'
higher_grade_regex = higher_grade_regex + ').*'
#data = data.filter(regex=higher_grade_regex)
regex = re.compile(higher_grade_regex)
X_cols = filter(lambda i: regex.search(i), self._col_names)
yield (np.arange(self._y.shape[0]), X_cols, {'max_grade' : max_grade})
def __repr__(self):
return 'SubsetSchool({})'.format(grades)
DATA_PATH = '/home/zar1/hs-scratch/'
fin = open(os.path.join(DATA_PATH, 'data_rec_array.pkl'))
print 'loading data'
M = cPickle.load(fin)
fin.close()
print 'data loaded'
y = M['label']
M = remove_cols(M, ['label', 'student_id', 'index'])
print 'set up data'
M = replace_missing_vals(M, 'constant', np.nan)
print 'imputed'
min_year = min(M['cohort'])
clfs = [{'clf': RandomForestClassifier, 'random_state': [0]}]
csvs = []
train_start = min_year
train_window_size = 2
init_train_window_end = train_start + train_window_size - 1
for max_grade in xrange(9, 12):
print 'making experiment'
print max_grade
test_start = init_train_window_end + (12 - max_grade)
subsets = [{'subset': SubsetSchool, 'max_grades': [[max_grade]]}]
cvs = [{'cv': SlidingWindowValue, 'train_start': [train_start],
'train_window_size': [1], 'test_start': [test_start],
'test_window_size': [1], 'inc_value': [1],
'guide_col_name': ['cohort']}]
exp = Experiment(M, y, clfs=clfs, subsets=subsets, cvs=cvs)
print 'running'
exp.run()
csv_name = '_{}.csv'.format(max_grade)
print 'making report'
exp.make_csv(csv_name)
csvs.append(csv_name)
with open(csvs[0]) as fin:
header = fin.readline()
with open('report.csv', 'w') as fout:
fout.write(header)
for in_csv in csvs:
with open(in_csv) as fin:
fin.readline()
fout.write(fin.read())import sys
import os
import cPickle
import re
import numpy as np
from diogenes.grid_search.experiment import Experiment
from diogenes.grid_search.subset import BaseSubsetIter
from diogenes.grid_search.partition_iterator import SlidingWindowValue
from diogenes.modify import replace_missing_vals
from diogenes.utils import remove_cols
from sklearn.ensemble import RandomForestClassifier
#NEXT REPLACE MISSING VALS
class SubsetSchool(BaseSubsetIter):
def __init__(self, y, col_names, max_grades):
super(SubsetSchool, self).__init__(y, col_names)
self.__max_grades = max_grades
self.__grades = range(9, 13)
def __iter__(self):
# cribbed from Reid
for max_grade in self.__max_grades:
# Filter features from higher grade levels.
higher_grade = False
higher_grade_regex = '^(?!'
for grade2 in self.__grades:
if grade2 > max_grade:
higher_grade = True
higher_grade_regex += r'grade_' + str(grade2) + '|'
if higher_grade:
higher_grade_regex = higher_grade_regex[:-1] # remove last '|'
higher_grade_regex = higher_grade_regex + ').*'
#data = data.filter(regex=higher_grade_regex)
regex = re.compile(higher_grade_regex)
X_cols = filter(lambda i: regex.search(i), self._col_names)
yield (np.arange(self._y.shape[0]), X_cols, {'max_grade' : max_grade})
def __repr__(self):
return 'SubsetSchool({})'.format(grades)
DATA_PATH = '/home/zar1/hs-scratch/'
fin = open(os.path.join(DATA_PATH, 'data_rec_array.pkl'))
print 'loading data'
M = cPickle.load(fin)
fin.close()
print 'data loaded'
y = M['label']
M = remove_cols(M, ['label', 'student_id', 'index'])
print 'set up data'
M = replace_missing_vals(M, 'constant', np.nan)
print 'imputed'
min_year = min(M['cohort'])
clfs = [{'clf': RandomForestClassifier, 'random_state': [0]}]
csvs = []
train_start = min_year
train_window_size = 2
init_train_window_end = train_start + train_window_size - 1
for max_grade in xrange(9, 12):
print 'making experiment'
print max_grade
test_start = init_train_window_end + (12 - max_grade)
subsets = [{'subset': SubsetSchool, 'max_grades': [[max_grade]]}]
cvs = [{'cv': SlidingWindowValue, 'train_start': [train_start],
'train_window_size': [1], 'test_start': [test_start],
'test_window_size': [1], 'inc_value': [1],
'guide_col_name': ['cohort']}]
exp = Experiment(M, y, clfs=clfs, subsets=subsets, cvs=cvs)
print 'running'
exp.run()
csv_name = '_{}.csv'.format(max_grade)
print 'making report'
exp.make_csv(csv_name)
csvs.append(csv_name)
with open(csvs[0]) as fin:
header = fin.readline()
with open('report.csv', 'w') as fout:
fout.write(header)
for in_csv in csvs:
with open(in_csv) as fin:
fin.readline()
fout.write(fin.read())
| 34.244318
| 86
| 0.652066
|
4a1425fb8316affd73250c6314440e0fc6f365d7
| 730
|
py
|
Python
|
links/migrations/0002_auto_20160726_0326.py
|
rmad17/crawler
|
d7f28eac23e0fe61b81c9178302847602edba59d
|
[
"MIT"
] | null | null | null |
links/migrations/0002_auto_20160726_0326.py
|
rmad17/crawler
|
d7f28eac23e0fe61b81c9178302847602edba59d
|
[
"MIT"
] | null | null | null |
links/migrations/0002_auto_20160726_0326.py
|
rmad17/crawler
|
d7f28eac23e0fe61b81c9178302847602edba59d
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# Generated by Django 1.9.8 on 2016-07-26 03:26
from __future__ import unicode_literals
import django.contrib.postgres.fields
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('links', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='pagedata',
name='page_links',
field=django.contrib.postgres.fields.ArrayField(base_field=models.CharField(max_length=200), blank=True, size=None),
),
migrations.AlterField(
model_name='pagedata',
name='page_url',
field=models.CharField(max_length=200, unique=True),
),
]
| 27.037037
| 128
| 0.631507
|
4a1426d46d36e1e1da0606199d946d9a0013d866
| 392
|
py
|
Python
|
sh-ip-int.py
|
rogerperkin/nornir-course
|
7d78adc2fae41da6d695be2fcb71a7ba662ecf4c
|
[
"MIT"
] | 1
|
2020-10-29T17:21:49.000Z
|
2020-10-29T17:21:49.000Z
|
sh-ip-int.py
|
rogerperkin/nornir-course
|
7d78adc2fae41da6d695be2fcb71a7ba662ecf4c
|
[
"MIT"
] | null | null | null |
sh-ip-int.py
|
rogerperkin/nornir-course
|
7d78adc2fae41da6d695be2fcb71a7ba662ecf4c
|
[
"MIT"
] | null | null | null |
from nornir import InitNornir
from nornir_utils.plugins.functions import print_result
from nornir_netmiko import netmiko_send_command
from nornir.core.filter import F
nr = InitNornir(config_file="/home/roger/nornir-course/config.yml")
Router = nr.filter(F(groups__contains="CSR_Routers"))
result = Router.run(netmiko_send_command, command_string="sh ip int brief")
print_result(result)
| 28
| 75
| 0.818878
|
4a142726b502549711140a3d26c1b2596757d1cb
| 839
|
py
|
Python
|
neoask/bps/neovis/view.py
|
psiace-archive/neoask
|
4a47b8ca65813535b4399a879dca845729c1cb6e
|
[
"MIT"
] | 2
|
2021-05-19T15:40:42.000Z
|
2021-05-19T15:43:43.000Z
|
neoask/bps/neovis/view.py
|
PsiACE/neoask
|
4a47b8ca65813535b4399a879dca845729c1cb6e
|
[
"MIT"
] | null | null | null |
neoask/bps/neovis/view.py
|
PsiACE/neoask
|
4a47b8ca65813535b4399a879dca845729c1cb6e
|
[
"MIT"
] | null | null | null |
import base64
import functools
import socket
from flask import (
Blueprint,
flash,
g,
redirect,
render_template,
request,
session,
url_for,
)
from werkzeug.security import check_password_hash, generate_password_hash
from neoask.bps.auth.view import login_required
from neoask.core.settings import NEO_HOST, NEO_PASSWORD, NEO_USERNAME
from neoask.db.sqlite import get_sqlite
bp = Blueprint("neovis", __name__)
@bp.route("/")
def index():
try:
return render_template("neovis/index.html")
except:
return render_template("error.html")
@bp.route("/query")
@login_required
def form():
try:
return render_template(
"neovis/query.html", host=NEO_HOST, user=NEO_USERNAME, password=NEO_PASSWORD
)
except:
return render_template("error.html")
| 20.463415
| 88
| 0.698451
|
4a14277c767255db0f3f7479cba6648ae338f8ed
| 3,604
|
py
|
Python
|
groupdocs_conversion_cloud/models/igs_load_options.py
|
groupdocs-conversion-cloud/groupdocs-conversion-cloud-python
|
841d06ad3205e10e8f2726517779ac2d7c33a02a
|
[
"MIT"
] | 5
|
2019-11-21T04:58:45.000Z
|
2021-02-05T05:22:37.000Z
|
groupdocs_conversion_cloud/models/igs_load_options.py
|
groupdocs-conversion-cloud/groupdocs-conversion-cloud-python
|
841d06ad3205e10e8f2726517779ac2d7c33a02a
|
[
"MIT"
] | null | null | null |
groupdocs_conversion_cloud/models/igs_load_options.py
|
groupdocs-conversion-cloud/groupdocs-conversion-cloud-python
|
841d06ad3205e10e8f2726517779ac2d7c33a02a
|
[
"MIT"
] | null | null | null |
# coding: utf-8
# -----------------------------------------------------------------------------------
# <copyright company="Aspose Pty Ltd" file="IgsLoadOptions.py">
# Copyright (c) 2003-2021 Aspose Pty Ltd
# </copyright>
# <summary>
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# </summary>
# -----------------------------------------------------------------------------------
import pprint
import re # noqa: F401
import six
from groupdocs_conversion_cloud.models import CadLoadOptions
class IgsLoadOptions(CadLoadOptions):
"""
Igs load options
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
}
attribute_map = {
}
def __init__(self, **kwargs): # noqa: E501
"""Initializes new instance of IgsLoadOptions""" # noqa: E501
base = super(IgsLoadOptions, self)
base.__init__(**kwargs)
self.swagger_types.update(base.swagger_types)
self.attribute_map.update(base.attribute_map)
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, IgsLoadOptions):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 34.653846
| 85
| 0.596004
|
4a142832115c4983bf13b0d285e30089a58aa66c
| 107,702
|
py
|
Python
|
tests/unit/gapic/dialogflowcx_v3/test_versions.py
|
LaudateCorpus1/python-dialogflow-cx
|
cf9579171290ecf5afeeb6a38a3504857808a4ef
|
[
"Apache-2.0"
] | null | null | null |
tests/unit/gapic/dialogflowcx_v3/test_versions.py
|
LaudateCorpus1/python-dialogflow-cx
|
cf9579171290ecf5afeeb6a38a3504857808a4ef
|
[
"Apache-2.0"
] | null | null | null |
tests/unit/gapic/dialogflowcx_v3/test_versions.py
|
LaudateCorpus1/python-dialogflow-cx
|
cf9579171290ecf5afeeb6a38a3504857808a4ef
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import mock
import grpc
from grpc.experimental import aio
import math
import pytest
from proto.marshal.rules.dates import DurationRule, TimestampRule
from google.api_core import client_options
from google.api_core import exceptions as core_exceptions
from google.api_core import future
from google.api_core import gapic_v1
from google.api_core import grpc_helpers
from google.api_core import grpc_helpers_async
from google.api_core import operation_async # type: ignore
from google.api_core import operations_v1
from google.api_core import path_template
from google.auth import credentials as ga_credentials
from google.auth.exceptions import MutualTLSChannelError
from google.cloud.dialogflowcx_v3.services.versions import VersionsAsyncClient
from google.cloud.dialogflowcx_v3.services.versions import VersionsClient
from google.cloud.dialogflowcx_v3.services.versions import pagers
from google.cloud.dialogflowcx_v3.services.versions import transports
from google.cloud.dialogflowcx_v3.types import flow
from google.cloud.dialogflowcx_v3.types import version
from google.cloud.dialogflowcx_v3.types import version as gcdc_version
from google.longrunning import operations_pb2
from google.oauth2 import service_account
from google.protobuf import field_mask_pb2 # type: ignore
from google.protobuf import struct_pb2 # type: ignore
from google.protobuf import timestamp_pb2 # type: ignore
import google.auth
def client_cert_source_callback():
return b"cert bytes", b"key bytes"
# If default endpoint is localhost, then default mtls endpoint will be the same.
# This method modifies the default endpoint so the client can produce a different
# mtls endpoint for endpoint testing purposes.
def modify_default_endpoint(client):
return (
"foo.googleapis.com"
if ("localhost" in client.DEFAULT_ENDPOINT)
else client.DEFAULT_ENDPOINT
)
def test__get_default_mtls_endpoint():
api_endpoint = "example.googleapis.com"
api_mtls_endpoint = "example.mtls.googleapis.com"
sandbox_endpoint = "example.sandbox.googleapis.com"
sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com"
non_googleapi = "api.example.com"
assert VersionsClient._get_default_mtls_endpoint(None) is None
assert VersionsClient._get_default_mtls_endpoint(api_endpoint) == api_mtls_endpoint
assert (
VersionsClient._get_default_mtls_endpoint(api_mtls_endpoint)
== api_mtls_endpoint
)
assert (
VersionsClient._get_default_mtls_endpoint(sandbox_endpoint)
== sandbox_mtls_endpoint
)
assert (
VersionsClient._get_default_mtls_endpoint(sandbox_mtls_endpoint)
== sandbox_mtls_endpoint
)
assert VersionsClient._get_default_mtls_endpoint(non_googleapi) == non_googleapi
@pytest.mark.parametrize("client_class", [VersionsClient, VersionsAsyncClient,])
def test_versions_client_from_service_account_info(client_class):
creds = ga_credentials.AnonymousCredentials()
with mock.patch.object(
service_account.Credentials, "from_service_account_info"
) as factory:
factory.return_value = creds
info = {"valid": True}
client = client_class.from_service_account_info(info)
assert client.transport._credentials == creds
assert isinstance(client, client_class)
assert client.transport._host == "dialogflow.googleapis.com:443"
@pytest.mark.parametrize(
"transport_class,transport_name",
[
(transports.VersionsGrpcTransport, "grpc"),
(transports.VersionsGrpcAsyncIOTransport, "grpc_asyncio"),
],
)
def test_versions_client_service_account_always_use_jwt(
transport_class, transport_name
):
with mock.patch.object(
service_account.Credentials, "with_always_use_jwt_access", create=True
) as use_jwt:
creds = service_account.Credentials(None, None, None)
transport = transport_class(credentials=creds, always_use_jwt_access=True)
use_jwt.assert_called_once_with(True)
with mock.patch.object(
service_account.Credentials, "with_always_use_jwt_access", create=True
) as use_jwt:
creds = service_account.Credentials(None, None, None)
transport = transport_class(credentials=creds, always_use_jwt_access=False)
use_jwt.assert_not_called()
@pytest.mark.parametrize("client_class", [VersionsClient, VersionsAsyncClient,])
def test_versions_client_from_service_account_file(client_class):
creds = ga_credentials.AnonymousCredentials()
with mock.patch.object(
service_account.Credentials, "from_service_account_file"
) as factory:
factory.return_value = creds
client = client_class.from_service_account_file("dummy/file/path.json")
assert client.transport._credentials == creds
assert isinstance(client, client_class)
client = client_class.from_service_account_json("dummy/file/path.json")
assert client.transport._credentials == creds
assert isinstance(client, client_class)
assert client.transport._host == "dialogflow.googleapis.com:443"
def test_versions_client_get_transport_class():
transport = VersionsClient.get_transport_class()
available_transports = [
transports.VersionsGrpcTransport,
]
assert transport in available_transports
transport = VersionsClient.get_transport_class("grpc")
assert transport == transports.VersionsGrpcTransport
@pytest.mark.parametrize(
"client_class,transport_class,transport_name",
[
(VersionsClient, transports.VersionsGrpcTransport, "grpc"),
(VersionsAsyncClient, transports.VersionsGrpcAsyncIOTransport, "grpc_asyncio"),
],
)
@mock.patch.object(
VersionsClient, "DEFAULT_ENDPOINT", modify_default_endpoint(VersionsClient)
)
@mock.patch.object(
VersionsAsyncClient,
"DEFAULT_ENDPOINT",
modify_default_endpoint(VersionsAsyncClient),
)
def test_versions_client_client_options(client_class, transport_class, transport_name):
# Check that if channel is provided we won't create a new one.
with mock.patch.object(VersionsClient, "get_transport_class") as gtc:
transport = transport_class(credentials=ga_credentials.AnonymousCredentials())
client = client_class(transport=transport)
gtc.assert_not_called()
# Check that if channel is provided via str we will create a new one.
with mock.patch.object(VersionsClient, "get_transport_class") as gtc:
client = client_class(transport=transport_name)
gtc.assert_called()
# Check the case api_endpoint is provided.
options = client_options.ClientOptions(api_endpoint="squid.clam.whelk")
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(transport=transport_name, client_options=options)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host="squid.clam.whelk",
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is
# "never".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}):
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is
# "always".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}):
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_MTLS_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has
# unsupported value.
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}):
with pytest.raises(MutualTLSChannelError):
client = client_class(transport=transport_name)
# Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"}
):
with pytest.raises(ValueError):
client = client_class(transport=transport_name)
# Check the case quota_project_id is provided
options = client_options.ClientOptions(quota_project_id="octopus")
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options, transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id="octopus",
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
@pytest.mark.parametrize(
"client_class,transport_class,transport_name,use_client_cert_env",
[
(VersionsClient, transports.VersionsGrpcTransport, "grpc", "true"),
(
VersionsAsyncClient,
transports.VersionsGrpcAsyncIOTransport,
"grpc_asyncio",
"true",
),
(VersionsClient, transports.VersionsGrpcTransport, "grpc", "false"),
(
VersionsAsyncClient,
transports.VersionsGrpcAsyncIOTransport,
"grpc_asyncio",
"false",
),
],
)
@mock.patch.object(
VersionsClient, "DEFAULT_ENDPOINT", modify_default_endpoint(VersionsClient)
)
@mock.patch.object(
VersionsAsyncClient,
"DEFAULT_ENDPOINT",
modify_default_endpoint(VersionsAsyncClient),
)
@mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"})
def test_versions_client_mtls_env_auto(
client_class, transport_class, transport_name, use_client_cert_env
):
# This tests the endpoint autoswitch behavior. Endpoint is autoswitched to the default
# mtls endpoint, if GOOGLE_API_USE_CLIENT_CERTIFICATE is "true" and client cert exists.
# Check the case client_cert_source is provided. Whether client cert is used depends on
# GOOGLE_API_USE_CLIENT_CERTIFICATE value.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}
):
options = client_options.ClientOptions(
client_cert_source=client_cert_source_callback
)
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options, transport=transport_name)
if use_client_cert_env == "false":
expected_client_cert_source = None
expected_host = client.DEFAULT_ENDPOINT
else:
expected_client_cert_source = client_cert_source_callback
expected_host = client.DEFAULT_MTLS_ENDPOINT
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=expected_host,
scopes=None,
client_cert_source_for_mtls=expected_client_cert_source,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case ADC client cert is provided. Whether client cert is used depends on
# GOOGLE_API_USE_CLIENT_CERTIFICATE value.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}
):
with mock.patch.object(transport_class, "__init__") as patched:
with mock.patch(
"google.auth.transport.mtls.has_default_client_cert_source",
return_value=True,
):
with mock.patch(
"google.auth.transport.mtls.default_client_cert_source",
return_value=client_cert_source_callback,
):
if use_client_cert_env == "false":
expected_host = client.DEFAULT_ENDPOINT
expected_client_cert_source = None
else:
expected_host = client.DEFAULT_MTLS_ENDPOINT
expected_client_cert_source = client_cert_source_callback
patched.return_value = None
client = client_class(transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=expected_host,
scopes=None,
client_cert_source_for_mtls=expected_client_cert_source,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case client_cert_source and ADC client cert are not provided.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}
):
with mock.patch.object(transport_class, "__init__") as patched:
with mock.patch(
"google.auth.transport.mtls.has_default_client_cert_source",
return_value=False,
):
patched.return_value = None
client = client_class(transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
@pytest.mark.parametrize("client_class", [VersionsClient, VersionsAsyncClient])
@mock.patch.object(
VersionsClient, "DEFAULT_ENDPOINT", modify_default_endpoint(VersionsClient)
)
@mock.patch.object(
VersionsAsyncClient,
"DEFAULT_ENDPOINT",
modify_default_endpoint(VersionsAsyncClient),
)
def test_versions_client_get_mtls_endpoint_and_cert_source(client_class):
mock_client_cert_source = mock.Mock()
# Test the case GOOGLE_API_USE_CLIENT_CERTIFICATE is "true".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}):
mock_api_endpoint = "foo"
options = client_options.ClientOptions(
client_cert_source=mock_client_cert_source, api_endpoint=mock_api_endpoint
)
api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source(
options
)
assert api_endpoint == mock_api_endpoint
assert cert_source == mock_client_cert_source
# Test the case GOOGLE_API_USE_CLIENT_CERTIFICATE is "false".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "false"}):
mock_client_cert_source = mock.Mock()
mock_api_endpoint = "foo"
options = client_options.ClientOptions(
client_cert_source=mock_client_cert_source, api_endpoint=mock_api_endpoint
)
api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source(
options
)
assert api_endpoint == mock_api_endpoint
assert cert_source is None
# Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "never".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}):
api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source()
assert api_endpoint == client_class.DEFAULT_ENDPOINT
assert cert_source is None
# Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "always".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}):
api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source()
assert api_endpoint == client_class.DEFAULT_MTLS_ENDPOINT
assert cert_source is None
# Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "auto" and default cert doesn't exist.
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}):
with mock.patch(
"google.auth.transport.mtls.has_default_client_cert_source",
return_value=False,
):
api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source()
assert api_endpoint == client_class.DEFAULT_ENDPOINT
assert cert_source is None
# Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "auto" and default cert exists.
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}):
with mock.patch(
"google.auth.transport.mtls.has_default_client_cert_source",
return_value=True,
):
with mock.patch(
"google.auth.transport.mtls.default_client_cert_source",
return_value=mock_client_cert_source,
):
(
api_endpoint,
cert_source,
) = client_class.get_mtls_endpoint_and_cert_source()
assert api_endpoint == client_class.DEFAULT_MTLS_ENDPOINT
assert cert_source == mock_client_cert_source
@pytest.mark.parametrize(
"client_class,transport_class,transport_name",
[
(VersionsClient, transports.VersionsGrpcTransport, "grpc"),
(VersionsAsyncClient, transports.VersionsGrpcAsyncIOTransport, "grpc_asyncio"),
],
)
def test_versions_client_client_options_scopes(
client_class, transport_class, transport_name
):
# Check the case scopes are provided.
options = client_options.ClientOptions(scopes=["1", "2"],)
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options, transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=["1", "2"],
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
@pytest.mark.parametrize(
"client_class,transport_class,transport_name",
[
(VersionsClient, transports.VersionsGrpcTransport, "grpc"),
(VersionsAsyncClient, transports.VersionsGrpcAsyncIOTransport, "grpc_asyncio"),
],
)
def test_versions_client_client_options_credentials_file(
client_class, transport_class, transport_name
):
# Check the case credentials file is provided.
options = client_options.ClientOptions(credentials_file="credentials.json")
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options, transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file="credentials.json",
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
def test_versions_client_client_options_from_dict():
with mock.patch(
"google.cloud.dialogflowcx_v3.services.versions.transports.VersionsGrpcTransport.__init__"
) as grpc_transport:
grpc_transport.return_value = None
client = VersionsClient(client_options={"api_endpoint": "squid.clam.whelk"})
grpc_transport.assert_called_once_with(
credentials=None,
credentials_file=None,
host="squid.clam.whelk",
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
@pytest.mark.parametrize("request_type", [version.ListVersionsRequest, dict,])
def test_list_versions(request_type, transport: str = "grpc"):
client = VersionsClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_versions), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = version.ListVersionsResponse(
next_page_token="next_page_token_value",
)
response = client.list_versions(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == version.ListVersionsRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, pagers.ListVersionsPager)
assert response.next_page_token == "next_page_token_value"
def test_list_versions_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = VersionsClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_versions), "__call__") as call:
client.list_versions()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == version.ListVersionsRequest()
@pytest.mark.asyncio
async def test_list_versions_async(
transport: str = "grpc_asyncio", request_type=version.ListVersionsRequest
):
client = VersionsAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_versions), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
version.ListVersionsResponse(next_page_token="next_page_token_value",)
)
response = await client.list_versions(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == version.ListVersionsRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, pagers.ListVersionsAsyncPager)
assert response.next_page_token == "next_page_token_value"
@pytest.mark.asyncio
async def test_list_versions_async_from_dict():
await test_list_versions_async(request_type=dict)
def test_list_versions_field_headers():
client = VersionsClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = version.ListVersionsRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_versions), "__call__") as call:
call.return_value = version.ListVersionsResponse()
client.list_versions(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_list_versions_field_headers_async():
client = VersionsAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = version.ListVersionsRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_versions), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
version.ListVersionsResponse()
)
await client.list_versions(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
def test_list_versions_flattened():
client = VersionsClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_versions), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = version.ListVersionsResponse()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.list_versions(parent="parent_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].parent
mock_val = "parent_value"
assert arg == mock_val
def test_list_versions_flattened_error():
client = VersionsClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.list_versions(
version.ListVersionsRequest(), parent="parent_value",
)
@pytest.mark.asyncio
async def test_list_versions_flattened_async():
client = VersionsAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_versions), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = version.ListVersionsResponse()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
version.ListVersionsResponse()
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.list_versions(parent="parent_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].parent
mock_val = "parent_value"
assert arg == mock_val
@pytest.mark.asyncio
async def test_list_versions_flattened_error_async():
client = VersionsAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.list_versions(
version.ListVersionsRequest(), parent="parent_value",
)
def test_list_versions_pager(transport_name: str = "grpc"):
client = VersionsClient(
credentials=ga_credentials.AnonymousCredentials, transport=transport_name,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_versions), "__call__") as call:
# Set the response to a series of pages.
call.side_effect = (
version.ListVersionsResponse(
versions=[version.Version(), version.Version(), version.Version(),],
next_page_token="abc",
),
version.ListVersionsResponse(versions=[], next_page_token="def",),
version.ListVersionsResponse(
versions=[version.Version(),], next_page_token="ghi",
),
version.ListVersionsResponse(
versions=[version.Version(), version.Version(),],
),
RuntimeError,
)
metadata = ()
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", ""),)),
)
pager = client.list_versions(request={})
assert pager._metadata == metadata
results = [i for i in pager]
assert len(results) == 6
assert all(isinstance(i, version.Version) for i in results)
def test_list_versions_pages(transport_name: str = "grpc"):
client = VersionsClient(
credentials=ga_credentials.AnonymousCredentials, transport=transport_name,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_versions), "__call__") as call:
# Set the response to a series of pages.
call.side_effect = (
version.ListVersionsResponse(
versions=[version.Version(), version.Version(), version.Version(),],
next_page_token="abc",
),
version.ListVersionsResponse(versions=[], next_page_token="def",),
version.ListVersionsResponse(
versions=[version.Version(),], next_page_token="ghi",
),
version.ListVersionsResponse(
versions=[version.Version(), version.Version(),],
),
RuntimeError,
)
pages = list(client.list_versions(request={}).pages)
for page_, token in zip(pages, ["abc", "def", "ghi", ""]):
assert page_.raw_page.next_page_token == token
@pytest.mark.asyncio
async def test_list_versions_async_pager():
client = VersionsAsyncClient(credentials=ga_credentials.AnonymousCredentials,)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_versions), "__call__", new_callable=mock.AsyncMock
) as call:
# Set the response to a series of pages.
call.side_effect = (
version.ListVersionsResponse(
versions=[version.Version(), version.Version(), version.Version(),],
next_page_token="abc",
),
version.ListVersionsResponse(versions=[], next_page_token="def",),
version.ListVersionsResponse(
versions=[version.Version(),], next_page_token="ghi",
),
version.ListVersionsResponse(
versions=[version.Version(), version.Version(),],
),
RuntimeError,
)
async_pager = await client.list_versions(request={},)
assert async_pager.next_page_token == "abc"
responses = []
async for response in async_pager:
responses.append(response)
assert len(responses) == 6
assert all(isinstance(i, version.Version) for i in responses)
@pytest.mark.asyncio
async def test_list_versions_async_pages():
client = VersionsAsyncClient(credentials=ga_credentials.AnonymousCredentials,)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_versions), "__call__", new_callable=mock.AsyncMock
) as call:
# Set the response to a series of pages.
call.side_effect = (
version.ListVersionsResponse(
versions=[version.Version(), version.Version(), version.Version(),],
next_page_token="abc",
),
version.ListVersionsResponse(versions=[], next_page_token="def",),
version.ListVersionsResponse(
versions=[version.Version(),], next_page_token="ghi",
),
version.ListVersionsResponse(
versions=[version.Version(), version.Version(),],
),
RuntimeError,
)
pages = []
async for page_ in (await client.list_versions(request={})).pages:
pages.append(page_)
for page_, token in zip(pages, ["abc", "def", "ghi", ""]):
assert page_.raw_page.next_page_token == token
@pytest.mark.parametrize("request_type", [version.GetVersionRequest, dict,])
def test_get_version(request_type, transport: str = "grpc"):
client = VersionsClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_version), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = version.Version(
name="name_value",
display_name="display_name_value",
description="description_value",
state=version.Version.State.RUNNING,
)
response = client.get_version(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == version.GetVersionRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, version.Version)
assert response.name == "name_value"
assert response.display_name == "display_name_value"
assert response.description == "description_value"
assert response.state == version.Version.State.RUNNING
def test_get_version_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = VersionsClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_version), "__call__") as call:
client.get_version()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == version.GetVersionRequest()
@pytest.mark.asyncio
async def test_get_version_async(
transport: str = "grpc_asyncio", request_type=version.GetVersionRequest
):
client = VersionsAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_version), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
version.Version(
name="name_value",
display_name="display_name_value",
description="description_value",
state=version.Version.State.RUNNING,
)
)
response = await client.get_version(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == version.GetVersionRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, version.Version)
assert response.name == "name_value"
assert response.display_name == "display_name_value"
assert response.description == "description_value"
assert response.state == version.Version.State.RUNNING
@pytest.mark.asyncio
async def test_get_version_async_from_dict():
await test_get_version_async(request_type=dict)
def test_get_version_field_headers():
client = VersionsClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = version.GetVersionRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_version), "__call__") as call:
call.return_value = version.Version()
client.get_version(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_get_version_field_headers_async():
client = VersionsAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = version.GetVersionRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_version), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(version.Version())
await client.get_version(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
def test_get_version_flattened():
client = VersionsClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_version), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = version.Version()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.get_version(name="name_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].name
mock_val = "name_value"
assert arg == mock_val
def test_get_version_flattened_error():
client = VersionsClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.get_version(
version.GetVersionRequest(), name="name_value",
)
@pytest.mark.asyncio
async def test_get_version_flattened_async():
client = VersionsAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_version), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = version.Version()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(version.Version())
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.get_version(name="name_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].name
mock_val = "name_value"
assert arg == mock_val
@pytest.mark.asyncio
async def test_get_version_flattened_error_async():
client = VersionsAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.get_version(
version.GetVersionRequest(), name="name_value",
)
@pytest.mark.parametrize("request_type", [gcdc_version.CreateVersionRequest, dict,])
def test_create_version(request_type, transport: str = "grpc"):
client = VersionsClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.create_version), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/spam")
response = client.create_version(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == gcdc_version.CreateVersionRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, future.Future)
def test_create_version_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = VersionsClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.create_version), "__call__") as call:
client.create_version()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == gcdc_version.CreateVersionRequest()
@pytest.mark.asyncio
async def test_create_version_async(
transport: str = "grpc_asyncio", request_type=gcdc_version.CreateVersionRequest
):
client = VersionsAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.create_version), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/spam")
)
response = await client.create_version(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == gcdc_version.CreateVersionRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, future.Future)
@pytest.mark.asyncio
async def test_create_version_async_from_dict():
await test_create_version_async(request_type=dict)
def test_create_version_field_headers():
client = VersionsClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = gcdc_version.CreateVersionRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.create_version), "__call__") as call:
call.return_value = operations_pb2.Operation(name="operations/op")
client.create_version(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_create_version_field_headers_async():
client = VersionsAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = gcdc_version.CreateVersionRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.create_version), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/op")
)
await client.create_version(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
def test_create_version_flattened():
client = VersionsClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.create_version), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/op")
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.create_version(
parent="parent_value", version=gcdc_version.Version(name="name_value"),
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].parent
mock_val = "parent_value"
assert arg == mock_val
arg = args[0].version
mock_val = gcdc_version.Version(name="name_value")
assert arg == mock_val
def test_create_version_flattened_error():
client = VersionsClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.create_version(
gcdc_version.CreateVersionRequest(),
parent="parent_value",
version=gcdc_version.Version(name="name_value"),
)
@pytest.mark.asyncio
async def test_create_version_flattened_async():
client = VersionsAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.create_version), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/op")
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/spam")
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.create_version(
parent="parent_value", version=gcdc_version.Version(name="name_value"),
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].parent
mock_val = "parent_value"
assert arg == mock_val
arg = args[0].version
mock_val = gcdc_version.Version(name="name_value")
assert arg == mock_val
@pytest.mark.asyncio
async def test_create_version_flattened_error_async():
client = VersionsAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.create_version(
gcdc_version.CreateVersionRequest(),
parent="parent_value",
version=gcdc_version.Version(name="name_value"),
)
@pytest.mark.parametrize("request_type", [gcdc_version.UpdateVersionRequest, dict,])
def test_update_version(request_type, transport: str = "grpc"):
client = VersionsClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.update_version), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = gcdc_version.Version(
name="name_value",
display_name="display_name_value",
description="description_value",
state=gcdc_version.Version.State.RUNNING,
)
response = client.update_version(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == gcdc_version.UpdateVersionRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, gcdc_version.Version)
assert response.name == "name_value"
assert response.display_name == "display_name_value"
assert response.description == "description_value"
assert response.state == gcdc_version.Version.State.RUNNING
def test_update_version_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = VersionsClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.update_version), "__call__") as call:
client.update_version()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == gcdc_version.UpdateVersionRequest()
@pytest.mark.asyncio
async def test_update_version_async(
transport: str = "grpc_asyncio", request_type=gcdc_version.UpdateVersionRequest
):
client = VersionsAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.update_version), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
gcdc_version.Version(
name="name_value",
display_name="display_name_value",
description="description_value",
state=gcdc_version.Version.State.RUNNING,
)
)
response = await client.update_version(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == gcdc_version.UpdateVersionRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, gcdc_version.Version)
assert response.name == "name_value"
assert response.display_name == "display_name_value"
assert response.description == "description_value"
assert response.state == gcdc_version.Version.State.RUNNING
@pytest.mark.asyncio
async def test_update_version_async_from_dict():
await test_update_version_async(request_type=dict)
def test_update_version_field_headers():
client = VersionsClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = gcdc_version.UpdateVersionRequest()
request.version.name = "version.name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.update_version), "__call__") as call:
call.return_value = gcdc_version.Version()
client.update_version(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "version.name=version.name/value",) in kw[
"metadata"
]
@pytest.mark.asyncio
async def test_update_version_field_headers_async():
client = VersionsAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = gcdc_version.UpdateVersionRequest()
request.version.name = "version.name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.update_version), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
gcdc_version.Version()
)
await client.update_version(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "version.name=version.name/value",) in kw[
"metadata"
]
def test_update_version_flattened():
client = VersionsClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.update_version), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = gcdc_version.Version()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.update_version(
version=gcdc_version.Version(name="name_value"),
update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]),
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].version
mock_val = gcdc_version.Version(name="name_value")
assert arg == mock_val
arg = args[0].update_mask
mock_val = field_mask_pb2.FieldMask(paths=["paths_value"])
assert arg == mock_val
def test_update_version_flattened_error():
client = VersionsClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.update_version(
gcdc_version.UpdateVersionRequest(),
version=gcdc_version.Version(name="name_value"),
update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]),
)
@pytest.mark.asyncio
async def test_update_version_flattened_async():
client = VersionsAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.update_version), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = gcdc_version.Version()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
gcdc_version.Version()
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.update_version(
version=gcdc_version.Version(name="name_value"),
update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]),
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].version
mock_val = gcdc_version.Version(name="name_value")
assert arg == mock_val
arg = args[0].update_mask
mock_val = field_mask_pb2.FieldMask(paths=["paths_value"])
assert arg == mock_val
@pytest.mark.asyncio
async def test_update_version_flattened_error_async():
client = VersionsAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.update_version(
gcdc_version.UpdateVersionRequest(),
version=gcdc_version.Version(name="name_value"),
update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]),
)
@pytest.mark.parametrize("request_type", [version.DeleteVersionRequest, dict,])
def test_delete_version(request_type, transport: str = "grpc"):
client = VersionsClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.delete_version), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = None
response = client.delete_version(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == version.DeleteVersionRequest()
# Establish that the response is the type that we expect.
assert response is None
def test_delete_version_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = VersionsClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.delete_version), "__call__") as call:
client.delete_version()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == version.DeleteVersionRequest()
@pytest.mark.asyncio
async def test_delete_version_async(
transport: str = "grpc_asyncio", request_type=version.DeleteVersionRequest
):
client = VersionsAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.delete_version), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None)
response = await client.delete_version(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == version.DeleteVersionRequest()
# Establish that the response is the type that we expect.
assert response is None
@pytest.mark.asyncio
async def test_delete_version_async_from_dict():
await test_delete_version_async(request_type=dict)
def test_delete_version_field_headers():
client = VersionsClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = version.DeleteVersionRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.delete_version), "__call__") as call:
call.return_value = None
client.delete_version(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_delete_version_field_headers_async():
client = VersionsAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = version.DeleteVersionRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.delete_version), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None)
await client.delete_version(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
def test_delete_version_flattened():
client = VersionsClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.delete_version), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = None
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.delete_version(name="name_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].name
mock_val = "name_value"
assert arg == mock_val
def test_delete_version_flattened_error():
client = VersionsClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.delete_version(
version.DeleteVersionRequest(), name="name_value",
)
@pytest.mark.asyncio
async def test_delete_version_flattened_async():
client = VersionsAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.delete_version), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = None
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.delete_version(name="name_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].name
mock_val = "name_value"
assert arg == mock_val
@pytest.mark.asyncio
async def test_delete_version_flattened_error_async():
client = VersionsAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.delete_version(
version.DeleteVersionRequest(), name="name_value",
)
@pytest.mark.parametrize("request_type", [version.LoadVersionRequest, dict,])
def test_load_version(request_type, transport: str = "grpc"):
client = VersionsClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.load_version), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/spam")
response = client.load_version(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == version.LoadVersionRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, future.Future)
def test_load_version_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = VersionsClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.load_version), "__call__") as call:
client.load_version()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == version.LoadVersionRequest()
@pytest.mark.asyncio
async def test_load_version_async(
transport: str = "grpc_asyncio", request_type=version.LoadVersionRequest
):
client = VersionsAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.load_version), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/spam")
)
response = await client.load_version(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == version.LoadVersionRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, future.Future)
@pytest.mark.asyncio
async def test_load_version_async_from_dict():
await test_load_version_async(request_type=dict)
def test_load_version_field_headers():
client = VersionsClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = version.LoadVersionRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.load_version), "__call__") as call:
call.return_value = operations_pb2.Operation(name="operations/op")
client.load_version(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_load_version_field_headers_async():
client = VersionsAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = version.LoadVersionRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.load_version), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/op")
)
await client.load_version(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
def test_load_version_flattened():
client = VersionsClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.load_version), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/op")
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.load_version(name="name_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].name
mock_val = "name_value"
assert arg == mock_val
def test_load_version_flattened_error():
client = VersionsClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.load_version(
version.LoadVersionRequest(), name="name_value",
)
@pytest.mark.asyncio
async def test_load_version_flattened_async():
client = VersionsAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.load_version), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/op")
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/spam")
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.load_version(name="name_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].name
mock_val = "name_value"
assert arg == mock_val
@pytest.mark.asyncio
async def test_load_version_flattened_error_async():
client = VersionsAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.load_version(
version.LoadVersionRequest(), name="name_value",
)
@pytest.mark.parametrize("request_type", [version.CompareVersionsRequest, dict,])
def test_compare_versions(request_type, transport: str = "grpc"):
client = VersionsClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.compare_versions), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = version.CompareVersionsResponse(
base_version_content_json="base_version_content_json_value",
target_version_content_json="target_version_content_json_value",
)
response = client.compare_versions(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == version.CompareVersionsRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, version.CompareVersionsResponse)
assert response.base_version_content_json == "base_version_content_json_value"
assert response.target_version_content_json == "target_version_content_json_value"
def test_compare_versions_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = VersionsClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.compare_versions), "__call__") as call:
client.compare_versions()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == version.CompareVersionsRequest()
@pytest.mark.asyncio
async def test_compare_versions_async(
transport: str = "grpc_asyncio", request_type=version.CompareVersionsRequest
):
client = VersionsAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.compare_versions), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
version.CompareVersionsResponse(
base_version_content_json="base_version_content_json_value",
target_version_content_json="target_version_content_json_value",
)
)
response = await client.compare_versions(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == version.CompareVersionsRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, version.CompareVersionsResponse)
assert response.base_version_content_json == "base_version_content_json_value"
assert response.target_version_content_json == "target_version_content_json_value"
@pytest.mark.asyncio
async def test_compare_versions_async_from_dict():
await test_compare_versions_async(request_type=dict)
def test_compare_versions_field_headers():
client = VersionsClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = version.CompareVersionsRequest()
request.base_version = "base_version/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.compare_versions), "__call__") as call:
call.return_value = version.CompareVersionsResponse()
client.compare_versions(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "base_version=base_version/value",) in kw[
"metadata"
]
@pytest.mark.asyncio
async def test_compare_versions_field_headers_async():
client = VersionsAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = version.CompareVersionsRequest()
request.base_version = "base_version/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.compare_versions), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
version.CompareVersionsResponse()
)
await client.compare_versions(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "base_version=base_version/value",) in kw[
"metadata"
]
def test_compare_versions_flattened():
client = VersionsClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.compare_versions), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = version.CompareVersionsResponse()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.compare_versions(base_version="base_version_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].base_version
mock_val = "base_version_value"
assert arg == mock_val
def test_compare_versions_flattened_error():
client = VersionsClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.compare_versions(
version.CompareVersionsRequest(), base_version="base_version_value",
)
@pytest.mark.asyncio
async def test_compare_versions_flattened_async():
client = VersionsAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.compare_versions), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = version.CompareVersionsResponse()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
version.CompareVersionsResponse()
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.compare_versions(base_version="base_version_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].base_version
mock_val = "base_version_value"
assert arg == mock_val
@pytest.mark.asyncio
async def test_compare_versions_flattened_error_async():
client = VersionsAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.compare_versions(
version.CompareVersionsRequest(), base_version="base_version_value",
)
def test_credentials_transport_error():
# It is an error to provide credentials and a transport instance.
transport = transports.VersionsGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
with pytest.raises(ValueError):
client = VersionsClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# It is an error to provide a credentials file and a transport instance.
transport = transports.VersionsGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
with pytest.raises(ValueError):
client = VersionsClient(
client_options={"credentials_file": "credentials.json"},
transport=transport,
)
# It is an error to provide an api_key and a transport instance.
transport = transports.VersionsGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
options = client_options.ClientOptions()
options.api_key = "api_key"
with pytest.raises(ValueError):
client = VersionsClient(client_options=options, transport=transport,)
# It is an error to provide an api_key and a credential.
options = mock.Mock()
options.api_key = "api_key"
with pytest.raises(ValueError):
client = VersionsClient(
client_options=options, credentials=ga_credentials.AnonymousCredentials()
)
# It is an error to provide scopes and a transport instance.
transport = transports.VersionsGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
with pytest.raises(ValueError):
client = VersionsClient(
client_options={"scopes": ["1", "2"]}, transport=transport,
)
def test_transport_instance():
# A client may be instantiated with a custom transport instance.
transport = transports.VersionsGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
client = VersionsClient(transport=transport)
assert client.transport is transport
def test_transport_get_channel():
# A client may be instantiated with a custom transport instance.
transport = transports.VersionsGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
channel = transport.grpc_channel
assert channel
transport = transports.VersionsGrpcAsyncIOTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
channel = transport.grpc_channel
assert channel
@pytest.mark.parametrize(
"transport_class",
[transports.VersionsGrpcTransport, transports.VersionsGrpcAsyncIOTransport,],
)
def test_transport_adc(transport_class):
# Test default credentials are used if not provided.
with mock.patch.object(google.auth, "default") as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
transport_class()
adc.assert_called_once()
def test_transport_grpc_default():
# A client should use the gRPC transport by default.
client = VersionsClient(credentials=ga_credentials.AnonymousCredentials(),)
assert isinstance(client.transport, transports.VersionsGrpcTransport,)
def test_versions_base_transport_error():
# Passing both a credentials object and credentials_file should raise an error
with pytest.raises(core_exceptions.DuplicateCredentialArgs):
transport = transports.VersionsTransport(
credentials=ga_credentials.AnonymousCredentials(),
credentials_file="credentials.json",
)
def test_versions_base_transport():
# Instantiate the base transport.
with mock.patch(
"google.cloud.dialogflowcx_v3.services.versions.transports.VersionsTransport.__init__"
) as Transport:
Transport.return_value = None
transport = transports.VersionsTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
# Every method on the transport should just blindly
# raise NotImplementedError.
methods = (
"list_versions",
"get_version",
"create_version",
"update_version",
"delete_version",
"load_version",
"compare_versions",
)
for method in methods:
with pytest.raises(NotImplementedError):
getattr(transport, method)(request=object())
with pytest.raises(NotImplementedError):
transport.close()
# Additionally, the LRO client (a property) should
# also raise NotImplementedError
with pytest.raises(NotImplementedError):
transport.operations_client
def test_versions_base_transport_with_credentials_file():
# Instantiate the base transport with a credentials file
with mock.patch.object(
google.auth, "load_credentials_from_file", autospec=True
) as load_creds, mock.patch(
"google.cloud.dialogflowcx_v3.services.versions.transports.VersionsTransport._prep_wrapped_messages"
) as Transport:
Transport.return_value = None
load_creds.return_value = (ga_credentials.AnonymousCredentials(), None)
transport = transports.VersionsTransport(
credentials_file="credentials.json", quota_project_id="octopus",
)
load_creds.assert_called_once_with(
"credentials.json",
scopes=None,
default_scopes=(
"https://www.googleapis.com/auth/cloud-platform",
"https://www.googleapis.com/auth/dialogflow",
),
quota_project_id="octopus",
)
def test_versions_base_transport_with_adc():
# Test the default credentials are used if credentials and credentials_file are None.
with mock.patch.object(google.auth, "default", autospec=True) as adc, mock.patch(
"google.cloud.dialogflowcx_v3.services.versions.transports.VersionsTransport._prep_wrapped_messages"
) as Transport:
Transport.return_value = None
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
transport = transports.VersionsTransport()
adc.assert_called_once()
def test_versions_auth_adc():
# If no credentials are provided, we should use ADC credentials.
with mock.patch.object(google.auth, "default", autospec=True) as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
VersionsClient()
adc.assert_called_once_with(
scopes=None,
default_scopes=(
"https://www.googleapis.com/auth/cloud-platform",
"https://www.googleapis.com/auth/dialogflow",
),
quota_project_id=None,
)
@pytest.mark.parametrize(
"transport_class",
[transports.VersionsGrpcTransport, transports.VersionsGrpcAsyncIOTransport,],
)
def test_versions_transport_auth_adc(transport_class):
# If credentials and host are not provided, the transport class should use
# ADC credentials.
with mock.patch.object(google.auth, "default", autospec=True) as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
transport_class(quota_project_id="octopus", scopes=["1", "2"])
adc.assert_called_once_with(
scopes=["1", "2"],
default_scopes=(
"https://www.googleapis.com/auth/cloud-platform",
"https://www.googleapis.com/auth/dialogflow",
),
quota_project_id="octopus",
)
@pytest.mark.parametrize(
"transport_class,grpc_helpers",
[
(transports.VersionsGrpcTransport, grpc_helpers),
(transports.VersionsGrpcAsyncIOTransport, grpc_helpers_async),
],
)
def test_versions_transport_create_channel(transport_class, grpc_helpers):
# If credentials and host are not provided, the transport class should use
# ADC credentials.
with mock.patch.object(
google.auth, "default", autospec=True
) as adc, mock.patch.object(
grpc_helpers, "create_channel", autospec=True
) as create_channel:
creds = ga_credentials.AnonymousCredentials()
adc.return_value = (creds, None)
transport_class(quota_project_id="octopus", scopes=["1", "2"])
create_channel.assert_called_with(
"dialogflow.googleapis.com:443",
credentials=creds,
credentials_file=None,
quota_project_id="octopus",
default_scopes=(
"https://www.googleapis.com/auth/cloud-platform",
"https://www.googleapis.com/auth/dialogflow",
),
scopes=["1", "2"],
default_host="dialogflow.googleapis.com",
ssl_credentials=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
@pytest.mark.parametrize(
"transport_class",
[transports.VersionsGrpcTransport, transports.VersionsGrpcAsyncIOTransport],
)
def test_versions_grpc_transport_client_cert_source_for_mtls(transport_class):
cred = ga_credentials.AnonymousCredentials()
# Check ssl_channel_credentials is used if provided.
with mock.patch.object(transport_class, "create_channel") as mock_create_channel:
mock_ssl_channel_creds = mock.Mock()
transport_class(
host="squid.clam.whelk",
credentials=cred,
ssl_channel_credentials=mock_ssl_channel_creds,
)
mock_create_channel.assert_called_once_with(
"squid.clam.whelk:443",
credentials=cred,
credentials_file=None,
scopes=None,
ssl_credentials=mock_ssl_channel_creds,
quota_project_id=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
# Check if ssl_channel_credentials is not provided, then client_cert_source_for_mtls
# is used.
with mock.patch.object(transport_class, "create_channel", return_value=mock.Mock()):
with mock.patch("grpc.ssl_channel_credentials") as mock_ssl_cred:
transport_class(
credentials=cred,
client_cert_source_for_mtls=client_cert_source_callback,
)
expected_cert, expected_key = client_cert_source_callback()
mock_ssl_cred.assert_called_once_with(
certificate_chain=expected_cert, private_key=expected_key
)
def test_versions_host_no_port():
client = VersionsClient(
credentials=ga_credentials.AnonymousCredentials(),
client_options=client_options.ClientOptions(
api_endpoint="dialogflow.googleapis.com"
),
)
assert client.transport._host == "dialogflow.googleapis.com:443"
def test_versions_host_with_port():
client = VersionsClient(
credentials=ga_credentials.AnonymousCredentials(),
client_options=client_options.ClientOptions(
api_endpoint="dialogflow.googleapis.com:8000"
),
)
assert client.transport._host == "dialogflow.googleapis.com:8000"
def test_versions_grpc_transport_channel():
channel = grpc.secure_channel("http://localhost/", grpc.local_channel_credentials())
# Check that channel is used if provided.
transport = transports.VersionsGrpcTransport(
host="squid.clam.whelk", channel=channel,
)
assert transport.grpc_channel == channel
assert transport._host == "squid.clam.whelk:443"
assert transport._ssl_channel_credentials == None
def test_versions_grpc_asyncio_transport_channel():
channel = aio.secure_channel("http://localhost/", grpc.local_channel_credentials())
# Check that channel is used if provided.
transport = transports.VersionsGrpcAsyncIOTransport(
host="squid.clam.whelk", channel=channel,
)
assert transport.grpc_channel == channel
assert transport._host == "squid.clam.whelk:443"
assert transport._ssl_channel_credentials == None
# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are
# removed from grpc/grpc_asyncio transport constructor.
@pytest.mark.parametrize(
"transport_class",
[transports.VersionsGrpcTransport, transports.VersionsGrpcAsyncIOTransport],
)
def test_versions_transport_channel_mtls_with_client_cert_source(transport_class):
with mock.patch(
"grpc.ssl_channel_credentials", autospec=True
) as grpc_ssl_channel_cred:
with mock.patch.object(
transport_class, "create_channel"
) as grpc_create_channel:
mock_ssl_cred = mock.Mock()
grpc_ssl_channel_cred.return_value = mock_ssl_cred
mock_grpc_channel = mock.Mock()
grpc_create_channel.return_value = mock_grpc_channel
cred = ga_credentials.AnonymousCredentials()
with pytest.warns(DeprecationWarning):
with mock.patch.object(google.auth, "default") as adc:
adc.return_value = (cred, None)
transport = transport_class(
host="squid.clam.whelk",
api_mtls_endpoint="mtls.squid.clam.whelk",
client_cert_source=client_cert_source_callback,
)
adc.assert_called_once()
grpc_ssl_channel_cred.assert_called_once_with(
certificate_chain=b"cert bytes", private_key=b"key bytes"
)
grpc_create_channel.assert_called_once_with(
"mtls.squid.clam.whelk:443",
credentials=cred,
credentials_file=None,
scopes=None,
ssl_credentials=mock_ssl_cred,
quota_project_id=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
assert transport.grpc_channel == mock_grpc_channel
assert transport._ssl_channel_credentials == mock_ssl_cred
# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are
# removed from grpc/grpc_asyncio transport constructor.
@pytest.mark.parametrize(
"transport_class",
[transports.VersionsGrpcTransport, transports.VersionsGrpcAsyncIOTransport],
)
def test_versions_transport_channel_mtls_with_adc(transport_class):
mock_ssl_cred = mock.Mock()
with mock.patch.multiple(
"google.auth.transport.grpc.SslCredentials",
__init__=mock.Mock(return_value=None),
ssl_credentials=mock.PropertyMock(return_value=mock_ssl_cred),
):
with mock.patch.object(
transport_class, "create_channel"
) as grpc_create_channel:
mock_grpc_channel = mock.Mock()
grpc_create_channel.return_value = mock_grpc_channel
mock_cred = mock.Mock()
with pytest.warns(DeprecationWarning):
transport = transport_class(
host="squid.clam.whelk",
credentials=mock_cred,
api_mtls_endpoint="mtls.squid.clam.whelk",
client_cert_source=None,
)
grpc_create_channel.assert_called_once_with(
"mtls.squid.clam.whelk:443",
credentials=mock_cred,
credentials_file=None,
scopes=None,
ssl_credentials=mock_ssl_cred,
quota_project_id=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
assert transport.grpc_channel == mock_grpc_channel
def test_versions_grpc_lro_client():
client = VersionsClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
transport = client.transport
# Ensure that we have a api-core operations client.
assert isinstance(transport.operations_client, operations_v1.OperationsClient,)
# Ensure that subsequent calls to the property send the exact same object.
assert transport.operations_client is transport.operations_client
def test_versions_grpc_lro_async_client():
client = VersionsAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc_asyncio",
)
transport = client.transport
# Ensure that we have a api-core operations client.
assert isinstance(transport.operations_client, operations_v1.OperationsAsyncClient,)
# Ensure that subsequent calls to the property send the exact same object.
assert transport.operations_client is transport.operations_client
def test_version_path():
project = "squid"
location = "clam"
agent = "whelk"
flow = "octopus"
version = "oyster"
expected = "projects/{project}/locations/{location}/agents/{agent}/flows/{flow}/versions/{version}".format(
project=project, location=location, agent=agent, flow=flow, version=version,
)
actual = VersionsClient.version_path(project, location, agent, flow, version)
assert expected == actual
def test_parse_version_path():
expected = {
"project": "nudibranch",
"location": "cuttlefish",
"agent": "mussel",
"flow": "winkle",
"version": "nautilus",
}
path = VersionsClient.version_path(**expected)
# Check that the path construction is reversible.
actual = VersionsClient.parse_version_path(path)
assert expected == actual
def test_common_billing_account_path():
billing_account = "scallop"
expected = "billingAccounts/{billing_account}".format(
billing_account=billing_account,
)
actual = VersionsClient.common_billing_account_path(billing_account)
assert expected == actual
def test_parse_common_billing_account_path():
expected = {
"billing_account": "abalone",
}
path = VersionsClient.common_billing_account_path(**expected)
# Check that the path construction is reversible.
actual = VersionsClient.parse_common_billing_account_path(path)
assert expected == actual
def test_common_folder_path():
folder = "squid"
expected = "folders/{folder}".format(folder=folder,)
actual = VersionsClient.common_folder_path(folder)
assert expected == actual
def test_parse_common_folder_path():
expected = {
"folder": "clam",
}
path = VersionsClient.common_folder_path(**expected)
# Check that the path construction is reversible.
actual = VersionsClient.parse_common_folder_path(path)
assert expected == actual
def test_common_organization_path():
organization = "whelk"
expected = "organizations/{organization}".format(organization=organization,)
actual = VersionsClient.common_organization_path(organization)
assert expected == actual
def test_parse_common_organization_path():
expected = {
"organization": "octopus",
}
path = VersionsClient.common_organization_path(**expected)
# Check that the path construction is reversible.
actual = VersionsClient.parse_common_organization_path(path)
assert expected == actual
def test_common_project_path():
project = "oyster"
expected = "projects/{project}".format(project=project,)
actual = VersionsClient.common_project_path(project)
assert expected == actual
def test_parse_common_project_path():
expected = {
"project": "nudibranch",
}
path = VersionsClient.common_project_path(**expected)
# Check that the path construction is reversible.
actual = VersionsClient.parse_common_project_path(path)
assert expected == actual
def test_common_location_path():
project = "cuttlefish"
location = "mussel"
expected = "projects/{project}/locations/{location}".format(
project=project, location=location,
)
actual = VersionsClient.common_location_path(project, location)
assert expected == actual
def test_parse_common_location_path():
expected = {
"project": "winkle",
"location": "nautilus",
}
path = VersionsClient.common_location_path(**expected)
# Check that the path construction is reversible.
actual = VersionsClient.parse_common_location_path(path)
assert expected == actual
def test_client_with_default_client_info():
client_info = gapic_v1.client_info.ClientInfo()
with mock.patch.object(
transports.VersionsTransport, "_prep_wrapped_messages"
) as prep:
client = VersionsClient(
credentials=ga_credentials.AnonymousCredentials(), client_info=client_info,
)
prep.assert_called_once_with(client_info)
with mock.patch.object(
transports.VersionsTransport, "_prep_wrapped_messages"
) as prep:
transport_class = VersionsClient.get_transport_class()
transport = transport_class(
credentials=ga_credentials.AnonymousCredentials(), client_info=client_info,
)
prep.assert_called_once_with(client_info)
@pytest.mark.asyncio
async def test_transport_close_async():
client = VersionsAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc_asyncio",
)
with mock.patch.object(
type(getattr(client.transport, "grpc_channel")), "close"
) as close:
async with client:
close.assert_not_called()
close.assert_called_once()
def test_transport_close():
transports = {
"grpc": "_grpc_channel",
}
for transport, close_name in transports.items():
client = VersionsClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport
)
with mock.patch.object(
type(getattr(client.transport, close_name)), "close"
) as close:
with client:
close.assert_not_called()
close.assert_called_once()
def test_client_ctx():
transports = [
"grpc",
]
for transport in transports:
client = VersionsClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport
)
# Test client calls underlying transport.
with mock.patch.object(type(client.transport), "close") as close:
close.assert_not_called()
with client:
pass
close.assert_called()
@pytest.mark.parametrize(
"client_class,transport_class",
[
(VersionsClient, transports.VersionsGrpcTransport),
(VersionsAsyncClient, transports.VersionsGrpcAsyncIOTransport),
],
)
def test_api_key_credentials(client_class, transport_class):
with mock.patch.object(
google.auth._default, "get_api_key_credentials", create=True
) as get_api_key_credentials:
mock_cred = mock.Mock()
get_api_key_credentials.return_value = mock_cred
options = client_options.ClientOptions()
options.api_key = "api_key"
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options)
patched.assert_called_once_with(
credentials=mock_cred,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
| 39.10748
| 111
| 0.690247
|
4a1428bcf6287d7e0e6c31e4a5c96e55bc92b0f4
| 3,332
|
py
|
Python
|
movie_ns_project/settings.py
|
KenCz94/movie-ns-api
|
b5cd0ff5974ea9ea4f212ee30bc7716bb2bbc440
|
[
"MIT"
] | null | null | null |
movie_ns_project/settings.py
|
KenCz94/movie-ns-api
|
b5cd0ff5974ea9ea4f212ee30bc7716bb2bbc440
|
[
"MIT"
] | null | null | null |
movie_ns_project/settings.py
|
KenCz94/movie-ns-api
|
b5cd0ff5974ea9ea4f212ee30bc7716bb2bbc440
|
[
"MIT"
] | null | null | null |
from pathlib import Path
import pymysql
pymysql.install_as_MySQLdb()
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/4.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'django-insecure-q@ocjp1k!72w8g7%8g440p@7$sb@qgdgoy-04q0a3ouqzm9_29'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ['*']
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django_filters',
'rest_framework.authtoken',
'rest_framework',
'corsheaders',
'movie_ns_api',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'corsheaders.middleware.CorsMiddleware',
]
ROOT_URLCONF = 'movie_ns_project.urls'
CORS_ALLOW_ALL_ORIGINS = True
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'movie_ns_project.wsgi.application'
# Database
# https://docs.djangoproject.com/en/4.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql',
'NAME': 'movie-ns-db',
'USER': 'admin',
'PASSWORD': '$ns-movies-22',
'HOST': 'ns-movies-db.cd1g6afv4am3.us-east-1.rds.amazonaws.com',
'PORT': '3306',
}
}
# Password validation
# https://docs.djangoproject.com/en/4.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/4.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/4.0/howto/static-files/
STATIC_URL = 'static/'
# Default primary key field type
# https://docs.djangoproject.com/en/4.0/ref/settings/#default-auto-field
DEFAULT_AUTO_FIELD = 'django.db.models.BigAutoField'
| 26.23622
| 91
| 0.689676
|
4a142a3512ffb5d880da06ca91f3f422b124aa1a
| 30,952
|
py
|
Python
|
jax/interpreters/pxla.py
|
tomhennigan/jax
|
fb6c9f64e49880e3c3d0ff9a2ef7345fc9bbe717
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
jax/interpreters/pxla.py
|
tomhennigan/jax
|
fb6c9f64e49880e3c3d0ff9a2ef7345fc9bbe717
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
jax/interpreters/pxla.py
|
tomhennigan/jax
|
fb6c9f64e49880e3c3d0ff9a2ef7345fc9bbe717
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from collections import namedtuple, defaultdict
from contextlib import contextmanager
import itertools as it
import operator as op
import threading
from absl import logging
import numpy as onp
import six
from six.moves import reduce
from .. import core
from .. import linear_util as lu
from ..abstract_arrays import (ConcreteArray, ShapedArray, array_types,
raise_to_shaped)
from ..util import partial, unzip2, concatenate, prod, safe_map
from ..lib import xla_bridge as xb
from .xla import aval_to_xla_shape, xla_destructure
from .partial_eval import trace_to_subjaxpr, merge_pvals, JaxprTrace, PartialVal
from .batching import broadcast, not_mapped
from . import batching
from . import partial_eval as pe
from . import xla
from . import ad
_map = safe_map
### util
def identity(x): return x
def shard_args(backend, devices, assignments, axis_size, tuple_args, args):
"""Shard each argument data array along its leading axis.
Args:
backend: the platform to be used
devices: list of Devices mapping replica index to a physical device.
assignments: list of integers with the same length as `devices` mapping
replica index to an index along the leading axis (i.e. a shard).
axis_size: int, size of the leading axis to be sharded.
args: a sequence of JaxTypes representing arguments to be sharded along
their leading axes and placed on `devices`.
Returns:
A list of device buffers with the same length as `devices` indexed by
replica number, so that the nth element is the argument to be passed to the
nth replica.
"""
nargs, nrep = len(args), len(devices)
buffers = [[None] * nargs for _ in range(nrep)]
for a, arg in enumerate(args):
# The shard_arg_handlers allow an extensible set of types to be sharded, but
# inline handling for ShardedDeviceArray as a special case for performance
if type(arg) is ShardedDeviceArray:
if nrep == len(arg.device_buffers):
# The argument is already prepared for the right number of replicas, so
# we just ensure that buf[r] is on devices[r] for each replica index r
# TODO(mattjj): compared to the other case, this logic has less looping
# but could incur more device-to-device data movement
for r, buf in enumerate(arg.device_buffers):
buffers[r][a] = buf if buf.device() == devices[r] else buf.copy_to_device(devices[r])
else:
# The argument is prepared for a different number of replicas, so for
# each of our replica indices we check if there's already a buffer with
# the correct logical assignment on the correct device, and if not just
# copy one of them
prev_assignments = assign_shards_to_replicas(len(arg.device_buffers), axis_size)
candidates = defaultdict(list)
for r, buf in enumerate(arg.device_buffers):
candidates[prev_assignments[r]].append(buf)
for r in range(nrep):
for buf in candidates[assignments[r]]:
if buf.device() == devices[r]:
buffers[r][a] = buf
break
else:
buffers[r][a] = buf.copy_to_device(devices[r])
else:
bufs = shard_arg_handlers[type(arg)](arg, devices, assignments)
for r, buf in enumerate(bufs):
buffers[r][a] = buf
if tuple_args:
buffers = [[xla.make_tuple(bufs, devices[r], backend)]
for r, bufs in enumerate(buffers)]
return buffers
shard_arg_handlers = {}
shard_arg_handlers[core.Unit] = \
lambda x, devices, _: [xla.device_put(core.unit, d) for d in devices]
def _shard_array(x, devices, assignments):
nrep = len(devices)
return (xla.device_put(x[assignments[r]], devices[r]) for r in range(nrep))
for _t in array_types:
shard_arg_handlers[_t] = _shard_array
def _shard_device_array(x, devices, assignments):
nrep = len(devices)
xs = x._unstack()
return (xla.device_put(xs[assignments[r]], devices[r])
for r in range(nrep))
shard_arg_handlers[xla.DeviceArray] = _shard_device_array
def shard_aval(size, aval):
try:
return shard_aval_handlers[type(aval)](size, aval)
except KeyError:
raise TypeError("No shard_aval handler for type: {}".format(type(aval)))
shard_aval_handlers = {}
shard_aval_handlers[core.AbstractUnit] = lambda size, x: x
def _shard_abstract_array(size, x):
if x.shape[0] != size:
raise ValueError("Axis size {} does not match leading dimension of "
"shape {}".format(size, x.shape))
return ShapedArray(x.shape[1:], x.dtype)
shard_aval_handlers[ShapedArray] = _shard_abstract_array
def aval_to_result_handler(size, nrep, aval):
try:
return pxla_result_handlers[type(aval)](size, nrep, aval)
except KeyError:
raise TypeError("No pxla_result_handler for type: {}".format(type(aval)))
pxla_result_handlers = {}
pxla_result_handlers[core.AbstractUnit] = lambda *_: lambda _: core.unit
def array_result_handler(size, nrep, aval):
full_aval = ShapedArray((size,) + aval.shape, aval.dtype)
return partial(ShardedDeviceArray, full_aval)
pxla_result_handlers[ShapedArray] = array_result_handler
pxla_result_handlers[ConcreteArray] = array_result_handler
def assign_shards_to_replicas(nrep, size):
"""Produce a mapping from replica id to shard index.
Args:
nrep: int, number of replicas (a computation-dependent value).
size: int, size of the data array axis being sharded.
Returns:
A tuple of integers of length nrep in which the elements take on values from
0 to size-1. Replica n is assgined shard data_array[assignments[n]].
"""
groupsize, ragged = divmod(nrep, size)
assert not ragged
indices = onp.tile(onp.arange(size)[:, None], (1, groupsize))
return tuple(indices.ravel())
### applying parallel primitives in op-by-op Python dispatch
# There are at least two cases where we might want to evaluate a parallel
# primitive dispatched from Python, rather than being staged out:
# 1. axis_size = psum(1, 'axis_name'),
# 2. to enable an implicit outermost pmap-like context for multi-host
# multi-controller SPMD programs.
# In each case, we can't rely on any data dependence on a pmap trace; instead we
# need some dynamic context, basically modeling the axis name environment stack.
# To handle the former case, we don't need to communicate at all; we instead
# have a table of parallel_pure_rules. To handle the latter case, we'll have a
# globally-scoped root environment frame and compile and execute a single-op
# XLA collective.
class DynamicAxisEnvFrame(object):
__slots__ = ["name", "pmap_trace", "hard_size", "soft_trace", "soft_size"]
def __init__(self, name, pmap_trace, hard_size):
self.name = name
self.pmap_trace = pmap_trace
self.hard_size = hard_size
self.soft_trace = None
self.soft_size = None
class DynamicAxisEnv(list):
def __contains__(self, axis_name):
return axis_name in (frame.name for frame in self)
def __getitem__(self, axis_name):
if axis_name not in self:
raise NameError("unbound axis name: {}".format(axis_name))
for frame in reversed(self):
if frame.name == axis_name:
return frame
else:
assert False
@property
def sizes(self):
return tuple(frame.hard_size for frame in self)
@property
def nreps(self):
return prod(frame.hard_size for frame in self)
class _ThreadLocalState(threading.local):
def __init__(self):
self.dynamic_axis_env = DynamicAxisEnv()
_thread_local_state = _ThreadLocalState()
@contextmanager
def extend_dynamic_axis_env(axis_name, pmap_trace, hard_size):
dynamic_axis_env = _thread_local_state.dynamic_axis_env
dynamic_axis_env.append(DynamicAxisEnvFrame(axis_name, pmap_trace, hard_size))
try:
yield
finally:
dynamic_axis_env.pop()
def unmapped_device_count(backend=None):
dynamic_axis_env = _thread_local_state.dynamic_axis_env
mapped = prod(frame.hard_size for frame in dynamic_axis_env)
unmapped, ragged = divmod(xb.device_count(backend), mapped)
assert not ragged and unmapped > 0
return unmapped
def apply_parallel_primitive(prim, *args, **params):
# This is the op-by-op version of applying a collective primitive, like a psum
# that doesn't have a data dependence on the argument of a pmap function. In
# particular, this code gets hit when we write `axis_size = psum(1, 'i')`. We
# look up information in the dynamic axis env.
dynamic_axis_env = _thread_local_state.dynamic_axis_env
axis_name = params.pop('axis_name')
logical_size = lambda frame: frame.hard_size * (frame.soft_size or 1)
if isinstance(axis_name, (list, tuple)):
shape = tuple(logical_size(dynamic_axis_env[name]) for name in axis_name)
else:
shape = (logical_size(dynamic_axis_env[axis_name]),)
return parallel_pure_rules[prim](*args, shape=shape, **params)
parallel_pure_rules = {}
def axis_index(axis_name):
dynamic_axis_env = _thread_local_state.dynamic_axis_env
frame = dynamic_axis_env[axis_name]
sizes = dynamic_axis_env.sizes[:dynamic_axis_env.index(frame)+1]
nreps = dynamic_axis_env.nreps
dummy_arg = frame.pmap_trace.pure(core.unit)
if frame.soft_trace:
dummy_arg = frame.soft_trace.pure(dummy_arg)
return axis_index_p.bind(dummy_arg, nreps=nreps, sizes=sizes,
soft_size=frame.soft_size, axis_name=axis_name)
def _axis_index_partial_eval(trace, _, **params):
# This partial_eval rule adds the axis_index primitive into the jaxpr formed
# during pmap lowering. It is like the standard JaxprTrace.process_primitive
# rule except that we don't attempt to lower out of the trace.
out_aval = ShapedArray((), onp.int32)
out_tracer = pe.JaxprTracer(trace, pe.PartialVal((out_aval, core.unit)), None)
eqn = pe.new_eqn_recipe([], [out_tracer], axis_index_p, (), params)
out_tracer.recipe = eqn
return out_tracer
def _axis_index_translation_rule(c, nreps, sizes, soft_size, axis_name):
div = c.Constant(onp.array(nreps // prod(sizes), dtype=onp.uint32))
mod = c.Constant(onp.array(sizes[-1], dtype=onp.uint32))
unsigned_index = c.Rem(c.Div(c.ReplicaId(), div), mod)
return c.ConvertElementType(unsigned_index, xb.dtype_to_etype(onp.int32))
axis_index_p = core.Primitive('axis_index')
xla.translations[axis_index_p] = _axis_index_translation_rule
pe.custom_partial_eval_rules[axis_index_p] = _axis_index_partial_eval
### lazy device-memory persistence and result handling
class ShardedDeviceValue(xla.DeviceValue):
def _check_if_deleted(self):
if self.device_buffers is None:
raise ValueError("ShardedDeviceValue has been deleted.")
def block_until_ready(self):
self._check_if_deleted()
for buf in self.device_buffers:
buf.block_host_until_ready()
return self
class ShardedDeviceArray(ShardedDeviceValue, xla.DeviceArray):
"""A ShardedDeviceArray is an ndarray sharded across devices.
The purpose of a ShardedDeviceArray is to reduce the number of transfers when
executing replicated computations, by allowing results to persist on the
devices that produced them. That way dispatching a similarly replicated
computation that consumes the same sharded memory layout does not incur any
transfers.
A ShardedDeviceArray represents one logical ndarray value, and simulates the
behavior of an ndarray so that it can be treated by user code as an ndarray;
that is, it is only an optimization to reduce transfers.
The number of device buffers underlying a ShardedDeviceArray instance is equal
to the number of replicas of the computation that produced it. Each buffer
represents a shard of the original array, meaning a slice along its leading
axis. These component buffers reside on distinct devices, but need not
represent distinct logical shards. The correspondence can be computed with
the assign_shards_to_replicas function.
"""
__slots__ = ["device_buffers", "axis_size"]
_collect = staticmethod(onp.stack)
def __init__(self, aval, device_buffers):
self.aval = aval
self.device_buffers = device_buffers
self.axis_size = aval.shape[0]
self._npy_value = None
if not core.skip_checks:
assert type(aval) is ShapedArray
def _ids(self):
num_bufs = len(self.device_buffers)
assignments = assign_shards_to_replicas(num_bufs, self.axis_size)
_, ids = onp.unique(assignments, return_index=True)
return ids
def copy_to_host_async(self):
if self._npy_value is None:
for buf in self.device_buffers:
buf.copy_to_host_async()
def delete(self):
for buf in self.device_buffers:
buf.delete()
self.device_buffers = None
self._npy_value = None
@property
def _value(self):
if self._npy_value is None:
ids = self._ids()
self.copy_to_host_async()
self._npy_value = self._collect([self.device_buffers[i].to_py() for i in ids])
return self._npy_value
def __getitem__(self, idx):
if self._npy_value is None and type(idx) is int:
ids = self._ids()
device_buffer = self.device_buffers[ids[idx]]
aval = ShapedArray(self.aval.shape[1:], self.aval.dtype)
handler = xla.aval_to_result_handler(aval)
return handler(device_buffer)
else:
return super(ShardedDeviceArray, self).__getitem__(idx)
# This handler code is effectively dead because we in-lined it in shard_args for
# performance reasons.
def _shard_sharded_device_array(x, devices, assignments):
n = len(devices)
if n == len(x.device_buffers):
return (b if b.device() == devices[r] else b.copy_to_device(devices[r])
for r, b in enumerate(x.device_buffers))
else:
return (xla.device_put(x[assignments[r]], devices[r]) for r in range(n))
shard_arg_handlers[ShardedDeviceArray] = _shard_sharded_device_array
core.pytype_aval_mappings[ShardedDeviceArray] = ConcreteArray
xla.device_put_handlers[ShardedDeviceArray] = xla._device_put_array
xla.pytype_aval_mappings[ShardedDeviceArray] = lambda x: x.aval
xla.canonicalize_dtype_handlers[ShardedDeviceArray] = identity
xb.register_constant_handler(ShardedDeviceArray, xla._device_array_constant_handler)
class ChunkedDeviceArray(ShardedDeviceArray):
__slots__ = []
_collect = staticmethod(onp.concatenate)
def __init__(self, axis_size, aval, device_buffers):
super(ChunkedDeviceArray, self).__init__(aval, device_buffers)
self.axis_size = axis_size
def __getitem__(self, idx):
return xla.DeviceArray.__getitem__(self, idx)
shard_arg_handlers[ChunkedDeviceArray] = _shard_array
core.pytype_aval_mappings[ChunkedDeviceArray] = ConcreteArray
xla.device_put_handlers[ChunkedDeviceArray] = xla._device_put_array
xla.pytype_aval_mappings[ChunkedDeviceArray] = lambda x: x.aval
xla.canonicalize_dtype_handlers[ChunkedDeviceArray] = identity
xb.register_constant_handler(ChunkedDeviceArray,
xla._device_array_constant_handler)
### the xla_pmap primitive and its rules are comparable to xla_call in xla.py
def xla_pmap_impl(fun, *args, **params):
axis_name = params.pop('axis_name')
axis_size = params.pop('axis_size')
devices = params.pop('devices')
backend = params.pop('backend', None)
assert not params
abstract_args = map(xla.abstractify, args)
compiled_fun = parallel_callable(fun, backend, axis_name, axis_size, devices,
*abstract_args)
return compiled_fun(*args)
@lu.cache
def parallel_callable(fun, backend, axis_name, axis_size, devices, *avals):
if devices is not None and len(devices) == 0:
raise ValueError("'devices' argument to pmap must be non-empty, or None.")
if devices:
global_axis_size = len(devices)
elif xb.host_count() > 1:
# TODO(skye): relax this constraint or provide functionality for
# automatically passing appropriate `devices`.
if axis_size != xb.local_device_count():
raise ValueError(
"On multi-host platforms, the input to pmapped functions must have "
"leading axis size equal to the number of local devices if no "
"`devices` argument is specified. Got axis_size=%d, "
"num_local_devices=%d" % (axis_size, xb.local_device_count()))
global_axis_size = xb.device_count()
else:
global_axis_size = axis_size
@lu.wrap_init
def dynamic_fun(dummy, *args):
with extend_dynamic_axis_env(axis_name, dummy.trace, global_axis_size):
return fun.call_wrapped(*args)
avals = tuple(map(partial(shard_aval, axis_size), avals))
pvals = [PartialVal((aval, core.unit)) for aval in avals]
pval = PartialVal([core.abstract_unit, core.unit]) # dummy value for axis env
with core.new_master(JaxprTrace, True) as master:
jaxpr, (out_pvals, consts, env) = \
trace_to_subjaxpr(dynamic_fun, master, False).call_wrapped([pval] + pvals)
jaxpr.invars = jaxpr.invars[1:] # ignore dummy
assert not env
del master
out_pvs, out_consts = unzip2(out_pvals)
if all(pv is None for pv in out_pvs):
# When the output doesn't depend on the input we don't need to compile an
# XLA computation at all; we handle this as a special case so we can stage
# out multi-replica XLA computations regardless of the hardware available.
# The 'None' values here are just dummies we know will be ignored.
handlers = [_pval_to_result_handler(axis_size, None, pval) for pval in out_pvals]
results = [handler(None) for handler in handlers]
return lambda *_: results
jaxpr_replicas = xla.jaxpr_replicas(jaxpr)
num_local_replicas = axis_size * jaxpr_replicas
num_global_replicas = global_axis_size * jaxpr_replicas
axis_env = xla.AxisEnv(num_global_replicas, [axis_name], [global_axis_size], devices)
tuple_args = len(avals) > 100 # pass long arg lists as tuple for TPU
c = xb.make_computation_builder("pmap_{}".format(fun.__name__))
xla_consts = _map(c.Constant, consts)
xla_args = xla._xla_callable_args(c, avals, tuple_args)
out_nodes = xla.jaxpr_subcomp(c, jaxpr, backend, axis_env, xla_consts, (), *xla_args)
built = c.Build(c.Tuple(*out_nodes))
if devices is None:
if num_global_replicas > xb.device_count(backend):
msg = ("compiling computation that requires {} replicas, but only {} XLA "
"devices are available")
raise ValueError(msg.format(num_global_replicas, xb.device_count(backend)))
device_assignment = None
else:
assert any(d.host_id == xb.host_id() for d in devices)
local_devices = [d for d in devices if d.host_id == xb.host_id()]
assert len(local_devices) > 0
if num_local_replicas != len(local_devices):
local_devices_str = ", ".join(map(str, local_devices))
raise ValueError(
"Leading axis size of input to pmapped function must equal the "
"number of local devices passed to pmap. Got axis_size=%d, "
"num_local_devices=%d.\n(Local devices passed to pmap: %s)"
% (axis_size, len(local_devices), local_devices_str))
if num_global_replicas != len(devices):
raise ValueError("compiling computation that requires %s replicas, "
"but %s devices were specified"
% (num_global_replicas, len(devices)))
device_assignment = tuple(d.id for d in devices)
compiled = built.Compile(
compile_options=xb.get_compile_options(num_global_replicas, device_assignment),
backend=xb.get_backend(backend))
handle_args = partial(shard_args, backend, compiled.local_devices(),
assign_shards_to_replicas(num_local_replicas, axis_size),
axis_size, tuple_args)
handle_outs = _pvals_to_results_handler(axis_size, num_local_replicas, out_pvals)
return partial(execute_replicated, compiled, backend, num_local_replicas, handle_args, handle_outs)
class ResultToPopulate(object): pass
result_to_populate = ResultToPopulate()
def _pvals_to_results_handler(size, nrep, out_pvals):
nouts = len(out_pvals)
handlers = [_pval_to_result_handler(size, nrep, pval) for pval in out_pvals]
def handler(out_bufs):
buffers = [[result_to_populate] * nrep for _ in range(nouts)]
for r, tuple_buf in enumerate(out_bufs):
for i, buf in enumerate(tuple_buf.destructure()):
buffers[i][r] = buf
assert not any(buf is result_to_populate for bufs in buffers
for buf in bufs)
return [h(bufs) for h, bufs in zip(handlers, buffers)]
return handler
def _pval_to_result_handler(size, nrep, pval):
pv, const = pval
if pv is None:
bcast_const = core.unit if const is core.unit else broadcast(const, size, 0)
return lambda _: bcast_const
else:
return aval_to_result_handler(size, nrep, pv)
def execute_replicated(compiled, backend, nrep, in_handler, out_handler, *args):
if nrep > xb.device_count(backend):
msg = ("executing pmap computation that requires {} replicas, but only {} "
"XLA devices are available")
raise ValueError(msg.format(nrep, xb.device_count(backend)))
input_bufs = in_handler(args)
out_bufs = compiled.ExecutePerReplica(list(input_bufs))
return out_handler(out_bufs)
xla_pmap_p = core.Primitive('xla_pmap')
xla_pmap_p.multiple_results = True
xla_pmap = partial(core.call_bind, xla_pmap_p)
xla_pmap_p.def_custom_bind(xla_pmap)
xla_pmap_p.def_impl(xla_pmap_impl)
def _pmap_translation_rule(c, jaxpr, axis_env, const_nodes, freevar_nodes,
in_nodes, axis_name, axis_size, devices, backend=None):
# We in-line here rather than generating a Call HLO as in the xla_call
# translation rule just because the extra tuple stuff is a pain.
if axis_env.devices is not None or (axis_env.names and devices is not None):
raise ValueError("Nested pmaps with explicit devices argument.")
new_env = xla.extend_axis_env(axis_env, axis_name, axis_size)
in_nodes_sharded = list(map(partial(_xla_shard, c, new_env), in_nodes))
sharded_outs = xla.jaxpr_subcomp(c, jaxpr, backend, new_env, const_nodes,
freevar_nodes, *in_nodes_sharded)
outs = [_xla_unshard(c, new_env, shard) for shard in sharded_outs]
return c.Tuple(*outs)
xla.call_translations[xla_pmap_p] = _pmap_translation_rule
ad.primitive_transposes[xla_pmap_p] = partial(ad.map_transpose, xla_pmap_p)
pe.map_primitives.add(xla_pmap_p)
def _xla_shard(c, axis_env, x):
xla_shape = c.GetShape(x)
if xla_shape.is_tuple():
assert not xla_shape.tuple_shapes()
return x
else:
dims = list(xla_shape.dimensions())
zero = c.Constant(onp.zeros((), dtype=onp.uint32))
idxs = [_unravel_index(c, axis_env)] + [zero] * (len(dims) - 1)
return c.Reshape(c.DynamicSlice(x, idxs, [1] + dims[1:]), None, dims[1:])
# TODO(b/110096942): more efficient gather
def _xla_unshard(c, axis_env, x):
xla_shape = c.GetShape(x)
if xla_shape.is_tuple():
assert not xla_shape.tuple_shapes()
return x
else:
dims = list(xla_shape.dimensions())
padded = c.Broadcast(c.Constant(onp.array(0, xla_shape.numpy_dtype())),
[axis_env.sizes[-1]] + dims)
zero = c.Constant(onp.zeros((), dtype=onp.uint32))
idxs = [_unravel_index(c, axis_env)] + [zero] * len(dims)
padded = c.DynamicUpdateSlice(padded, c.Reshape(x, None, [1] + dims), idxs)
return c.CrossReplicaSum(padded, xla.axis_groups(axis_env, axis_env.names[-1]))
def _unravel_index(c, axis_env):
div = c.Constant(onp.array(axis_env.nreps // prod(axis_env.sizes), onp.uint32))
mod = c.Constant(onp.array(axis_env.sizes[-1], onp.uint32))
return c.Rem(c.Div(c.ReplicaId(), div), mod)
### soft_pmap axis split transformation
# To allow pmap to map over logical axes larger than the number of XLA devices
# available, we use a transformation that effectively simulates having more
# devices in software. The strategy is to split the mapped axis into two axes,
# one to be hardware-mapped and the other to be software-mapped. Thus the
# transformation rewrites the function to be mapped so that it accepts a new
# leading axis (the software-mapped axis), and so that collectives in the
# original function correspond to both device-local operations and collective
# communication operations across hardware devices that implement the original
# logical semantics.
@lu.transformation
def split_axis(axis_name, chunk_size, *args):
with core.new_master(SplitAxisTrace) as master:
trace = SplitAxisTrace(master, core.cur_sublevel())
in_tracers = list(map(partial(SplitAxisTracer, trace, axis_name), args))
with add_chunk_to_axis_env(axis_name, trace, chunk_size):
outs = yield in_tracers, {}
out_tracers = list(map(trace.full_raise, outs))
out_vals, out_names = unzip2((t.val, t.axis_name) for t in out_tracers)
del master, out_tracers
out_vals = [broadcast(x, chunk_size, 0) if d is not_mapped else x
for x, d in zip(out_vals, out_names)]
yield out_vals
@lu.transformation_with_aux
def split_axis_subtrace(master, names, *vals):
trace = SplitAxisTrace(master, core.cur_sublevel())
outs = yield list(map(partial(SplitAxisTracer, trace), names, vals)), {}
out_tracers = list(map(trace.full_raise, outs))
out_vals, out_names = unzip2((t.val, t.axis_name) for t in out_tracers)
yield out_vals, out_names
@contextmanager
def add_chunk_to_axis_env(axis_name, soft_trace, soft_size):
dynamic_axis_env = _thread_local_state.dynamic_axis_env
dynamic_axis_env[axis_name].soft_trace = soft_trace
dynamic_axis_env[axis_name].soft_size = soft_size
yield
dynamic_axis_env[axis_name].soft_trace = None
dynamic_axis_env[axis_name].soft_size = None
class SplitAxisTracer(core.Tracer):
def __init__(self, trace, axis_name, val):
self.trace = trace
self.axis_name = axis_name
self.val = val
@property
def aval(self):
aval = raise_to_shaped(core.get_aval(self.val))
if self.axis_name is not_mapped:
return aval
else:
return ShapedArray(aval.shape[1:], aval.dtype)
def full_lower(self):
if self.axis_name is not_mapped:
return core.full_lower(self.val)
else:
return self
class SplitAxisTrace(core.Trace):
def pure(self, val):
return SplitAxisTracer(self, not_mapped, val)
def lift(self, val):
return SplitAxisTracer(self, not_mapped, val)
def sublift(self, val):
return SplitAxisTracer(self, val.axis_name, val.val)
def process_primitive(self, primitive, tracers, params):
vals_in, names_in = unzip2((t.val, t.axis_name) for t in tracers)
if primitive is axis_index_p:
dummy, = vals_in
hard_idx = primitive.bind(dummy, **params)
val_out = hard_idx * params['soft_size'] + onp.arange(params['soft_size'])
return SplitAxisTracer(self, params['axis_name'], val_out)
elif all(axis_name is not_mapped for axis_name in names_in):
return primitive.bind(*vals_in, **params)
else:
name, = set(n for n in names_in if n is not not_mapped)
if primitive in xla.parallel_translations:
# if it's a pmap collective primitive, do something special
if name == params['axis_name']:
# if the name matches this tracer's name, apply the split_axis rule
try:
rule = split_axis_rules[primitive]
except KeyError:
msg = "split_axis for {} not implemented. Open a feature request!"
raise NotImplementedError(msg.format(primitive))
which_mapped = [n is not not_mapped for n in names_in]
val_out, is_mapped = rule(vals_in, which_mapped, **params)
name_out = name if is_mapped else not_mapped
return SplitAxisTracer(self, name_out, val_out)
else:
# if not, bind the primitive without any processing
val_out = primitive.bind(*vals_in, **params)
return SplitAxisTracer(self, name, val_out)
else:
# if it's not a pmap collective primitive, act just like batching
rule = batching.get_primitive_batcher(primitive)
axes_in = [n if n is not_mapped else 0 for n in names_in]
val_out, axis_out = rule(vals_in, axes_in, **params)
def new_tracer(x, a):
if a is not_mapped:
return SplitAxisTracer(self, not_mapped, x)
else:
return SplitAxisTracer(self, name, batching.moveaxis(x, a, 0))
if primitive.multiple_results:
return [new_tracer(x, a) for x, a in zip(val_out, axis_out)]
else:
return new_tracer(val_out, axis_out)
def process_call(self, call_primitive, f, tracers, params):
assert call_primitive.multiple_results
if call_primitive in pe.map_primitives:
return self.process_map(call_primitive, f, tracers, params)
else:
vals, names = unzip2((t.val, t.axis_name) for t in tracers)
if all(name is not_mapped for name in names):
return call_primitive.bind(f, *vals, **params)
else:
f, names_out = split_axis_subtrace(f, self.master, names)
vals_out = call_primitive.bind(f, *vals, **params)
return [SplitAxisTracer(self, a, x) for a, x in zip(names_out(), vals_out)]
def process_map(self, map_primitive, f, tracers, params):
vals, names = unzip2((t.val, t.axis_name) for t in tracers)
if all(name is not_mapped for name in names):
return map_primitive.bind(f, *vals, **params)
else:
# because the map primitive maps over leading axes, we need to transpose
# the software-mapped axis on any mapped arguments to be the second axis;
# then we call the map primitive and resume the trace under the call
vals_trans = [batching.moveaxis(x, 0, 1) if d is not not_mapped else x
for x, d in zip(vals, names)]
f, names_out = split_axis_subtrace(f, self.master, names)
vals_out_trans = map_primitive.bind(f, *vals_trans, **params)
vals_out = [batching.moveaxis(x, 1, 0) if d is not not_mapped else x
for x, d in zip(vals_out_trans, names_out())]
return [SplitAxisTracer(self, a, x) for a, x in zip(names_out(), vals_out)]
def post_process_call(self, call_primitive, out_tracer, params):
val, name = out_tracer.val, out_tracer.axis_name
master = self.master
def todo(x):
trace = SplitAxisTrace(master, core.cur_sublevel())
return SplitAxisTracer(trace, name, x)
return val, todo
split_axis_rules = {}
| 41.104914
| 101
| 0.721117
|
4a142bea80acebce2114d51813b42ac41121e091
| 1,087
|
py
|
Python
|
logging_0330.py
|
Jianyang-Hu/numpypractice
|
f4d4a3e28f5dd10f9722f83b1ac66f0f2ccef8b9
|
[
"Apache-2.0"
] | null | null | null |
logging_0330.py
|
Jianyang-Hu/numpypractice
|
f4d4a3e28f5dd10f9722f83b1ac66f0f2ccef8b9
|
[
"Apache-2.0"
] | null | null | null |
logging_0330.py
|
Jianyang-Hu/numpypractice
|
f4d4a3e28f5dd10f9722f83b1ac66f0f2ccef8b9
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# @version : Python3.6
# @Time : 2017/3/30 10:12
# @Author : Jianyang-Hu
# @contact : jianyang1993@163.com
# @File : logging_0330.py
# @Software: PyCharm
"""
Logging模块构成主要分为四个部分:
Loggers:提供应用程序直接使用的接口
Handlers:将Loggers产生的日志传到指定位置
Filters:对输出日志进行过滤
Formatters:控制输出格式
场景 适合使用的方法
在终端输出程序或脚本的使用方法 print
报告一个事件的发生(例如状态的修改) logging.info()或logging.debug()
发生了一个特定的警告性的事件 logging.warn()
发生了一个特定的错误性的事件 raise
发生了一个特定的错误性的事件,
但是又不想因为此错误导致程序退出
(例如程序是一个守护进程) logging.error(),logging.exception(),logging.critical()
"""
import logging
#logging.basicConfig():用默认Formatter为日志系统建立一个StreamHandler,设置基础配置并加到root logger中
logging.basicConfig(filename = 'log.log',
format = '%(asctime)s - %(name)s - %(levelname)s - %(module)s : %(message)s',
datefmt='%Y - %a - %d %H:%H:%S %p',
level=logging.INFO
)
logging.critical('c')
logging.fatal('f')
logging.error('e')
logging.warning('w')
logging.debug('d')
logging.log(logging.INFO,'333')
| 27.175
| 97
| 0.638454
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.