code stringlengths 281 23.7M |
|---|
class ifcfg():
def __init__(self, filename):
self._config = {}
self._deleted = []
self.filename = filename
self.clear()
def clear(self):
self._config = {}
self._deleted = []
def cleanup(self):
self._config.clear()
def get(self, key):
return self._config.get(key.strip())
def set(self, key, value):
_key = key.strip()
self._config[_key] = value.strip()
if (_key in self._deleted):
self._deleted.remove(_key)
def __str__(self):
s = ''
for (key, value) in self._config.items():
if s:
s += '\n'
s += ('%s=%s' % (key, value))
return s
def read(self):
self.clear()
try:
f = open(self.filename, 'r')
except Exception as msg:
log.error("Failed to load '%s': %s", self.filename, msg)
raise
for line in f:
if (not line):
break
line = line.strip()
if ((len(line) < 1) or (line[0] in ['#', ';'])):
continue
pair = [x.strip() for x in line.split('=', 1)]
if (len(pair) != 2):
continue
if ((len(pair[1]) >= 2) and pair[1].startswith('"') and pair[1].endswith('"')):
pair[1] = pair[1][1:(- 1)]
if (pair[1] == ''):
continue
elif (self._config.get(pair[0]) is not None):
log.warning("%s: Duplicate option definition: '%s'", self.filename, line.strip())
continue
self._config[pair[0]] = pair[1]
f.close()
def write(self):
if (len(self._config) < 1):
return
done = []
try:
temp_file = tempfile.NamedTemporaryFile(mode='wt', prefix=('%s.' % os.path.basename(self.filename)), dir=os.path.dirname(self.filename), delete=False)
except Exception as msg:
log.error(('Failed to open temporary file: %s' % msg))
raise
modified = False
empty = False
try:
f = io.open(self.filename, mode='rt', encoding='UTF-8')
except Exception as msg:
if os.path.exists(self.filename):
log.error(("Failed to open '%s': %s" % (self.filename, msg)))
raise
else:
f = None
else:
for line in f:
if (not line):
break
line = line.strip('\n')
if (len(line) < 1):
if (not empty):
temp_file.write('\n')
empty = True
elif (line[0] == '#'):
empty = False
temp_file.write(line)
temp_file.write('\n')
else:
p = line.split('=', 1)
if (len(p) != 2):
empty = False
temp_file.write((line + '\n'))
continue
key = p[0].strip()
value = p[1].strip()
if ((len(value) >= 2) and value.startswith('"') and value.endswith('"')):
value = value[1:(- 1)]
if (key not in done):
if ((key in self._config) and (self._config[key] != value)):
empty = False
temp_file.write(('%s=%s\n' % (key, self._config[key])))
modified = True
elif (key in self._deleted):
modified = True
else:
empty = False
temp_file.write((line + '\n'))
done.append(key)
else:
modified = True
if (len(self._config) > 0):
for (key, value) in self._config.items():
if (key in done):
continue
if (not empty):
empty = True
temp_file.write(('%s=%s\n' % (key, value)))
modified = True
if f:
f.close()
temp_file.close()
if (not modified):
os.remove(temp_file.name)
return
if os.path.exists(self.filename):
try:
shutil.copy2(self.filename, ('%s.bak' % self.filename))
except Exception as msg:
os.remove(temp_file.name)
raise IOError(("Backup of '%s' failed: %s" % (self.filename, msg)))
try:
shutil.move(temp_file.name, self.filename)
except Exception as msg:
os.remove(temp_file.name)
raise IOError(("Failed to create '%s': %s" % (self.filename, msg)))
else:
os.chmod(self.filename, 384) |
class ChoiceTypeLikeFilter(FilterLike):
def __init__(self, column, name, options=None, **kwargs):
super(ChoiceTypeLikeFilter, self).__init__(column, name, options, **kwargs)
def apply(self, query, user_query, alias=None):
column = self.get_column(alias)
choice_types = []
if user_query:
if isinstance(column.type.choices, enum.EnumMeta):
for choice in column.type.choices:
if (user_query.lower() in choice.name.lower()):
choice_types.append(choice.value)
else:
for (type, value) in column.type.choices:
if (user_query.lower() in value.lower()):
choice_types.append(type)
if choice_types:
return query.filter(column.in_(choice_types))
else:
return query |
def test_gauss_tet3():
print('3rd Order Polynomial')
print('Tetrahedron')
gaussTetrahedron.setOrder(1)
int0_f3 = dot(f3(gaussTetrahedron.points), gaussTetrahedron.weights)
print(int0_f3)
gaussTetrahedron.setOrder(2)
int1_f3 = dot(f3(gaussTetrahedron.points), gaussTetrahedron.weights)
print(int1_f3)
gaussTetrahedron.setOrder(3)
int2_f3 = dot(f3(gaussTetrahedron.points), gaussTetrahedron.weights)
print(int2_f3)
gaussTetrahedron.setOrder(4)
int3_f3 = dot(f3(gaussTetrahedron.points), gaussTetrahedron.weights)
print(int3_f3)
npt.assert_almost_equal(int2_f3, int3_f3) |
class HiddenMarkovModel():
def __init__(self, N: int, K: int, concentration: float, mu_loc: float, mu_scale: float, sigma_shape: float, sigma_rate: float) -> None:
self.N = N
self.K = K
self.concentration = concentration
self.mu_loc = mu_loc
self.mu_scale = mu_scale
self.sigma_shape = sigma_shape
self.sigma_rate = sigma_rate
_variable
def Theta(self, k):
return dist.Dirichlet(((torch.ones(self.K) * self.concentration) / self.K))
_variable
def Mu(self, k):
return dist.Normal(self.mu_loc, self.mu_scale)
_variable
def Sigma(self, k):
return dist.Gamma(self.sigma_shape, self.sigma_rate)
_variable
def X(self, n: int):
if (n == 0):
return dist.Categorical(torch.tensor(([1.0] + ([0.0] * (self.K - 1)))))
else:
return dist.Categorical(self.Theta(self.X((n - 1)).item()))
_variable
def Y(self, n: int):
return dist.Normal(self.Mu(self.X(n).item()), self.Sigma(self.X(n).item())) |
def prepare_file_offset_table(data_file_path):
file_offset_table = FileOffsetTable.create_for_data_file(data_file_path)
if (not file_offset_table.is_valid()):
console.info(('Preparing file offset table for [%s] ... ' % data_file_path), end='', flush=True)
line_number = 0
with file_offset_table:
with open(data_file_path, encoding='utf-8') as data_file:
while True:
line = data_file.readline()
if (len(line) == 0):
break
line_number += 1
if ((line_number % 50000) == 0):
file_offset_table.add_offset(line_number, data_file.tell())
console.println('[OK]')
return line_number
else:
return None |
class AbstractPopStatsByEthosDataContainer(AbstractPopStatsDataContainer, abc.ABC):
def _iterate_popstats(self, cd: datamodel.CountryData) -> Iterable[datamodel.PopStatsByEthos]:
return iter(cd.pop_stats_ethos)
def _get_key_from_popstats(self, ps: PopStatsType) -> str:
assert isinstance(ps, datamodel.PopStatsByEthos)
return ps.ethos |
class SqliteInterface(SqlInterfaceCursor, AbsSqliteInterface):
target = sqlite
def __init__(self, filename=None, print_sql=False):
self._filename = filename
super().__init__(print_sql)
def _create_connection(self):
import sqlite3
try:
conn = sqlite3.connect((self._filename or ':memory:'))
except sqlite3.OperationalError as e:
raise ConnectError(*e.args) from e
def sqlite_throw(x):
raise Exception(x)
conn.create_function('power', 2, operator.pow)
conn.create_function('_pql_throw', 1, sqlite_throw)
conn.create_aggregate('_pql_product', 1, _SqliteProduct)
conn.create_aggregate('stddev', 1, _SqliteStddev)
return conn
def quote_name(self, name):
return f'[{name}]' |
class PluginLoader():
def load_plugins(self, generator: PluginPromptGenerator, my_plugins: List[str]) -> PluginPromptGenerator:
logger.info(f'load_select_plugin:{my_plugins}')
for plugin in self.plugins:
if (plugin._name in my_plugins):
if (not plugin.can_handle_post_prompt()):
continue
generator = plugin.post_prompt(generator)
return generator |
class TestASGCriterion(unittest.TestCase):
def setUp(self):
self.device = torch.device('cpu')
if (torch.cuda.device_count() > 0):
self.device = torch.device('cuda')
def test_fwd_bwd(self):
T = 5
N = 6
B = 3
labels = [[2, 1, 5, 1, 3], [4, 3, 5], [3, 2, 2, 1]]
emissions = torch.tensor([[[(- 0.434), (- 0.0254), 0.3667, 0.418, (- 0.3805), (- 0.1707)], [0.106, 0.3631, (- 0.1122), (- 0.3825), (- 0.0031), (- 0.3801)], [0.0443, (- 0.3795), 0.3194, (- 0.313), 0.0094, 0.156], [0.1252, 0.2877, 0.1997, (- 0.4554), 0.2774, (- 0.2526)], [(- 0.4001), (- 0.2402), 0.1295, 0.0172, 0.1805, (- 0.3299)]], [[0.3298, (- 0.2259), (- 0.0959), 0.4909, 0.2996, (- 0.2543)], [(- 0.2863), 0.3239, (- 0.3988), 0.0732, (- 0.2107), (- 0.4739)], [(- 0.0906), 0.048, (- 0.1301), 0.3975, (- 0.3317), (- 0.1967)], [0.4372, (- 0.2006), 0.0094, 0.3281, 0.1873, (- 0.2945)], [0.2399, 0.032, (- 0.3768), (- 0.2849), (- 0.2248), 0.3186]], [[0.0225, (- 0.3867), (- 0.1929), (- 0.2904), (- 0.4958), (- 0.2533)], [0.4001, (- 0.1517), (- 0.2799), (- 0.2915), 0.4198, 0.4506], [0.1446, (- 0.4753), (- 0.0711), 0.2876, (- 0.1851), (- 0.1066)], [0.2081, (- 0.119), (- 0.3902), (- 0.1668), 0.1911, (- 0.2848)], [(- 0.3846), 0.1175, 0.1052, 0.2172, (- 0.0362), 0.3055]]], device=self.device, requires_grad=True)
transitions = torch.zeros(((N + 1), N), device=self.device, requires_grad=True)
fwd = asg.ASGLoss(emissions, transitions, labels)
self.assertAlmostEqual(fwd.item(), 7.47995, places=4)
fwd.backward()
expected_grad = torch.tensor([[[0.106, 0.1595, (- 0.7639), 0.2485, 0.1118, 0.138], [0.1915, (- 0.7524), 0.1539, 0.1175, 0.1717, 0.1178], [0.1738, 0.1137, 0.2288, 0.1216, 0.1678, (- 0.8057)], [0.1766, (- 0.7923), 0.1902, 0.0988, 0.2056, 0.121], [0.1212, 0.1422, 0.2059, (- 0.816), 0.2166, 0.13]], [[0.2029, 0.1164, 0.1325, 0.2383, (- 0.8032), 0.1131], [0.1414, 0.2602, 0.1263, (- 0.3441), (- 0.3009), 0.1172], [0.1557, 0.1788, 0.1496, (- 0.5498), 0.014, 0.0516], [0.2306, 0.1219, 0.1503, (- 0.4244), 0.1796, (- 0.2579)], [0.2149, 0.1745, 0.116, 0.1271, 0.135, (- 0.7675)]], [[0.2195, 0.1458, 0.177, (- 0.8395), 0.1307, 0.1666], [0.2148, 0.1237, (- 0.6613), (- 0.1223), 0.2191, 0.2259], [0.2002, 0.1077, (- 0.8386), 0.231, 0.144, 0.1557], [0.2197, (- 0.1466), (- 0.5742), 0.151, 0.216, 0.1342], [0.105, (- 0.8265), 0.1714, 0.1917, 0.1488, 0.2094]]], device=self.device)
expected_grad = (expected_grad / B)
self.assertTrue(emissions.grad.allclose(expected_grad, rtol=0.001))
expected_trans_grad = (torch.tensor([[0.399, 0.3396, 0.3486, 0.3922, 0.3504, 0.3155], [0.3666, 0.0116, (- 1.6678), 0.3737, 0.3361, (- 0.7152)], [0.3468, 0.3163, (- 1.1583), (- 0.6803), 0.3216, 0.2722], [0.3694, (- 0.6688), 0.3047, (- 0.8531), (- 0.6571), 0.287], [0.3866, 0.3321, 0.3447, 0.3664, (- 0.2163), 0.3039], [0.364, (- 0.6943), 0.2988, (- 0.6722), 0.3215, (- 0.186)]], device=self.device).view(N, N) / B)
self.assertTrue(transitions.grad[1:].allclose(expected_trans_grad, rtol=0.001))
def test_viterbi(self):
T = 4
N = 3
input_list = [0, 0, 0, 7, 0, 5, 4, 3, 0, 5, 8, 5, 0, 5, 4, 3]
trans_list = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 2, 0, 2, 0, 0]
expected_path = [2, 1, 0]
num_replabels = 1
use_garbage = False
asg_criterion = asg.ASG(N, num_replabels, use_garbage)
for param in asg_criterion.parameters():
param.data = torch.tensor(trans_list, device=self.device, dtype=torch.float32).view(((N + num_replabels) + 1), (N + num_replabels))
inputs = torch.tensor(input_list, device=self.device, dtype=torch.float32).view(1, T, (N + num_replabels))
path = asg_criterion.viterbi(inputs)[0].tolist()
self.assertTrue((path == expected_path))
('Enable when gtn supports retain grad graph.')
def test_jacobian(self):
T = 20
N = 15
B = 5
tgt = [[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10], [1, 3], [0, 2, 3], [0, 2, 4, 6, 8], [0, 1, 0, 1]]
def fn(inputs, transition):
return asg.ASGLoss(inputs, transition, tgt)
def fn_mean(inputs, transition):
return asg.ASGLoss(inputs, transition, tgt, 'mean')
inputs = torch.randn(B, T, N, dtype=torch.float, requires_grad=True)
transitions = torch.randn((N + 1), N, dtype=torch.float, requires_grad=True)
self.assertTrue(gradcheck(fn, (inputs, transitions), eps=0.01, rtol=0.001, atol=0.01))
self.assertTrue(gradcheck(fn_mean, (inputs, transitions), eps=0.01, rtol=0.001, atol=0.01)) |
def compile_controlnet(pt_mod, batch_size=2, height=512, width=512, clip_chunks=1, dim=320, hidden_dim=768, use_fp16_acc=False, convert_conv_to_gemm=False, model_name='ControlNetModel', constants=False):
batch_size = (batch_size * 2)
ait_mod = ait_ControlNetModel()
ait_mod.name_parameter_tensor()
pt_mod = pt_mod.eval()
params_ait = map_controlnet_params(pt_mod, dim)
clip_batch_size = IntVar(values=(1, 8), name='batch_size')
clip_chunks = (77, (77 * clip_chunks))
embedding_size = IntVar(values=list(clip_chunks), name='embedding_size')
latent_model_input_ait = Tensor([batch_size, (height // 8), (width // 8), 4], name='input0', is_input=True)
timesteps_ait = Tensor([batch_size], name='input1', is_input=True)
text_embeddings_pt_ait = Tensor([clip_batch_size, embedding_size, hidden_dim], name='input2', is_input=True)
controlnet_condition_ait = Tensor([batch_size, height, width, 3], name='input3', is_input=True)
Y = ait_mod(latent_model_input_ait, timesteps_ait, text_embeddings_pt_ait, controlnet_condition_ait)
mark_output(Y)
target = detect_target(use_fp16_acc=use_fp16_acc, convert_conv_to_gemm=convert_conv_to_gemm)
compile_model(Y, target, './tmp', model_name, constants=(params_ait if constants else None)) |
class SimpleEditor(Editor):
def init(self, parent):
self.control = QtGui.QDateEdit()
if hasattr(self.factory, 'qt_date_format'):
self.control.setDisplayFormat(self.factory.qt_date_format)
if (not self.factory.allow_future):
self.control.setMaximumDate(QtCore.QDate.currentDate())
if getattr(self.factory, 'maximum_date_name', None):
(obj, extended_name, func) = self.parse_extended_name(self.factory.maximum_date_name)
self.factory.maximum_date = func()
if getattr(self.factory, 'minimum_date_name', None):
(obj, extended_name, func) = self.parse_extended_name(self.factory.minimum_date_name)
self.factory.minimum_date = func()
if getattr(self.factory, 'minimum_date', None):
min_date = QtCore.QDate(self.factory.minimum_date.year, self.factory.minimum_date.month, self.factory.minimum_date.day)
self.control.setMinimumDate(min_date)
if getattr(self.factory, 'maximum_date', None):
max_date = QtCore.QDate(self.factory.maximum_date.year, self.factory.maximum_date.month, self.factory.maximum_date.day)
self.control.setMaximumDate(max_date)
self.control.dateChanged.connect(self.update_object)
def dispose(self):
if (self.control is not None):
self.control.dateChanged.disconnect(self.update_object)
super().dispose()
def update_editor(self):
value = self.value
if value:
q_date = QtCore.QDate(value.year, value.month, value.day)
self.control.setDate(q_date)
def update_object(self, q_date):
self.value = datetime.date(q_date.year(), q_date.month(), q_date.day()) |
def get_dcs(request, sr=(), pr=(), order_by=('name',), annotate=None, extra=None):
if sr:
qs = Dc.objects.distinct().select_related(*sr)
else:
qs = Dc.objects.distinct()
if pr:
qs = qs.prefetch_related(*pr)
if request.user.is_staff:
qs = qs.exclude(access=Dc.DELETED).order_by(*order_by)
else:
qs = qs.filter(((Q(access=Dc.PUBLIC) | (Q(owner=request.user.pk) & Q(access=Dc.PRIVATE))) | Q(roles__in=request.user.roles.all()))).order_by(*order_by)
if annotate:
qs = qs.annotate(**annotate)
if extra:
qs = qs.extra(**extra)
return qs |
def test_working_hours_argument_value_is_dictionary_of_other_formatted_data():
with pytest.raises(TypeError) as cm:
WorkingHours(working_hours={'not': 'properly valued'})
assert (str(cm.value) == 'WorkingHours.working_hours should be a dictionary with keys "mon, tue, wed, thu, fri, sat, sun" and the values should a list of lists of two integers like [[540, 720], [800, 1080]], not str') |
_unknown_type()
class OptionDataUnknown(Option):
def __init__(self, buf, option_class=None, type_=None, length=0):
super(OptionDataUnknown, self).__init__(option_class=option_class, type_=type_, length=length)
self.buf = buf
def parse_value(cls, buf):
return {'buf': buf}
def serialize_value(self):
return self.buf |
def extractLagoontranslationsWordpressCom(item):
(vol, chp, frag, postfix) = extractVolChapterFragmentPostfix(item['title'])
if ((not (chp or vol)) or ('preview' in item['title'].lower())):
return None
tagmap = [('S Rank Girl', 'My Daughter who Left for the Capital to Become an Adventurer Reached S Rank', 'translated'), ('My Body Is Completely Invincible', 'It Seems like My Body Is Completely Invincible', 'translated'), ('PRC', 'PRC', 'translated'), ('Loiterous', 'Loiterous', 'oel')]
for (tagname, name, tl_type) in tagmap:
if (tagname in item['tags']):
return buildReleaseMessageWithType(item, name, vol, chp, frag=frag, postfix=postfix, tl_type=tl_type)
return False |
def get_or_create_fides_user_device_id_provided_identity(db: Session, identity_data: Optional[Identity]) -> ProvidedIdentity:
if ((not identity_data) or (not identity_data.fides_user_device_id)):
raise HTTPException(HTTP_422_UNPROCESSABLE_ENTITY, detail='Fides user device id not found in identity data')
identity = get_fides_user_device_id_provided_identity(db, identity_data.fides_user_device_id)
if (not identity):
identity = ProvidedIdentity.create(db, data={'privacy_request_id': None, 'field_name': ProvidedIdentityType.fides_user_device_id.value, 'hashed_value': ProvidedIdentity.hash_value(identity_data.fides_user_device_id), 'encrypted_value': {'value': identity_data.fides_user_device_id}})
return identity |
class JSON():
classes: Dict[(Any, Any)] = {}
def register(cls, *classes):
for klass in classes:
cls.classes[klass.__name__] = klass
def dumps(cls, obj, **kwargs):
def encode_obj(obj):
class_name = obj.__class__.__name__
if (class_name not in cls.classes):
raise TypeError(f'object of type {class_name} is not JSON serializable')
return {'_sv': (class_name, obj.to_json())}
kwargs['default'] = encode_obj
return json.dumps(obj, **kwargs)
def loads(cls, s, **kwargs):
def decode_obj(obj):
if ('_sv' in obj):
(class_name, ser) = obj['_sv']
obj = cls.classes[class_name].from_json(ser)
return obj
kwargs['object_hook'] = decode_obj
return json.loads(s, **kwargs) |
def banner():
banner = "\n _____ _____ _ _ _____\n / ___/ ___| | | / ___|\n \\ `--.\\ `--.| |_| \\ `--. ___ __ _ _ __\n `--. \\`--. | _ |`--. \\/ __/ _` | '_ \\\n /\\__/ /\\__/ | | | /\\__/ | (_| (_| | | | |\n \\____/\\____/\\_| |_\\____/ \\___\\__,_|_| |_|\n evict\n "
return banner |
def edit_default_save_file_path(_: Any) -> None:
locale_manager = locale_handler.LocalManager.from_config()
default_save_file_path = get_config_value('DEFAULT_SAVE_FILE_PATH')
default_save_file_path = helper.select_dir(locale_manager.search_key('select_default_save_path'), default_save_file_path)
set_config_setting('DEFAULT_SAVE_FILE_PATH', os.path.join(default_save_file_path, 'SAVE_DATA')) |
class OptionPlotoptionsArearangeSonificationContexttracksMappingTremolo(Options):
def depth(self) -> 'OptionPlotoptionsArearangeSonificationContexttracksMappingTremoloDepth':
return self._config_sub_data('depth', OptionPlotoptionsArearangeSonificationContexttracksMappingTremoloDepth)
def speed(self) -> 'OptionPlotoptionsArearangeSonificationContexttracksMappingTremoloSpeed':
return self._config_sub_data('speed', OptionPlotoptionsArearangeSonificationContexttracksMappingTremoloSpeed) |
class OptionPlotoptionsAreaSonificationDefaultinstrumentoptionsMappingLowpassResonance(Options):
def mapFunction(self):
return self._config_get(None)
def mapFunction(self, value: Any):
self._config(value, js_type=False)
def mapTo(self):
return self._config_get(None)
def mapTo(self, text: str):
self._config(text, js_type=False)
def max(self):
return self._config_get(None)
def max(self, num: float):
self._config(num, js_type=False)
def min(self):
return self._config_get(None)
def min(self, num: float):
self._config(num, js_type=False)
def within(self):
return self._config_get(None)
def within(self, value: Any):
self._config(value, js_type=False) |
class TestCredentialsOnline():
def test_request_client_token(self, app_env):
c = Credentials(app_env[0], app_env[1])
token = c.request_client_token()
assert (token.refresh_token is None)
assert (str(token.scope) == '')
c.close()
.asyncio
async def test_async_request_client_token(self, app_env):
c = Credentials(app_env[0], app_env[1], asynchronous=True)
token = (await c.request_client_token())
assert (token.refresh_token is None)
assert (str(token.scope) == '')
(await c.close())
def test_refresh_user_token(self, app_env, user_refresh):
c = Credentials(app_env[0], app_env[1])
token = c.refresh_user_token(user_refresh)
assert (token.refresh_token is not None)
assert (len(token.scope) > 0)
c.close()
.asyncio
async def test_async_refresh_user_token(self, app_env, user_refresh):
c = Credentials(app_env[0], app_env[1], asynchronous=True)
token = (await c.refresh_user_token(user_refresh))
assert (token.refresh_token is not None)
assert (len(token.scope) > 0)
(await c.close())
def test_bad_arguments_raises_error(self):
c = Credentials('id', 'secret')
with pytest.raises(HTTPError):
c.request_client_token()
c.close() |
class DatasetDownload(APIView):
permission_classes = [IsAuthenticated]
def get_object(self, pk):
try:
data = Dataset.objects.filter(pk=pk, user=self.request.user)
if (not data):
data = Dataset.objects.filter(pk=pk, purchased__id__exact=self.request.user.id)
if data:
return data[0]
raise Dataset.DoesNotExist
except Dataset.DoesNotExist:
raise Http404
def get(self, request, pk, format=None):
dataset = self.get_object(pk)
file_name = (dataset.name + '.csv')
dataset.download(('media/tmp/' + file_name))
dataset.file = ('tmp/' + file_name)
serializer = DatasetDownloadSerializer(dataset)
return Response(serializer.data) |
def load_overrides(path, custom=False):
try:
if custom:
sys.path.append(path)
from overrides import overrides
log.debug('Imported overrides: %s', repr(overrides))
for provider in overrides:
update_definitions(provider, overrides[provider])
log.info('Successfully loaded overrides from %s', os.path.join(path, 'overrides.py'))
except Exception as e:
import traceback
log.error('Failed importing %soverrides: %s', ('custom ' if custom else ''), repr(e))
map(log.error, traceback.format_exc().split('\n')) |
class Downloader(QtCore.QObject):
def __init__(self, parent: Optional[QtCore.QObject]=None) -> None:
super().__init__(parent=parent)
self.com = Communicate()
self.threadpool = QtCore.QThreadPool()
self.worker: Optional[Worker] = None
def get(self, url: str, timeout: float=30) -> None:
logger.debug('Download %s', url)
self.worker = Worker(url=url, timeout=timeout)
self.worker.com.on_download_finished.connect(self.com.on_download_finished)
self.worker.com.on_download_failed.connect(self.com.on_download_failed)
self.threadpool.start(self.worker) |
class EndpointFactory(ModelFactory):
class Meta():
model = Endpoint
name = factory.LazyFunction((lambda : str(uuid.uuid4())))
monitor_level = 1
time_added = factory.LazyFunction((lambda : (datetime.utcnow() - timedelta(days=1))))
version_added = '1.0'
last_requested = factory.LazyFunction(datetime.utcnow) |
def example():
return ft.Column([ft.TextButton(width=150, content=ft.Row([ft.Icon(name=ft.icons.FAVORITE, color='pink'), ft.Icon(name=ft.icons.AUDIOTRACK, color='green'), ft.Icon(name=ft.icons.BEACH_ACCESS, color='blue')], alignment=ft.MainAxisAlignment.SPACE_AROUND)), ft.TextButton(content=ft.Container(content=ft.Column([ft.Text(value='Compound button', size=20), ft.Text(value='This is secondary text')], alignment=ft.MainAxisAlignment.CENTER, spacing=5), padding=ft.padding.all(10)))]) |
class PexpectWrapper(PopenSpawn):
def __init__(self, *args, **kwargs):
if (platform.system() != 'Windows'):
kwargs['preexec_fn'] = os.setsid
super().__init__(*args, **kwargs)
def control_c(self) -> None:
time.sleep(0.1)
send_control_c(self.proc, True)
def returncode(self) -> Optional[Union[(int, str)]]:
return self.proc.poll()
def wait_to_complete(self, timeout: float=5) -> None:
if (self.proc.poll() is not None):
return
start_time = time.time()
while (((start_time + timeout) > time.time()) and (self.proc.poll() is None)):
time.sleep(0.001)
if (self.proc.poll() is None):
self.terminate(force=True)
self.wait()
self.exitstatus = 'Terminated!'
def expect_all(self, pattern_list: List[str], timeout: float=10, strict: bool=True) -> None:
pattern_list = list(pattern_list)
start_time = time.time()
while pattern_list:
time_spent = (time.time() - start_time)
if (time_spent > timeout):
raise TIMEOUT(timeout)
if strict:
idx = self.expect_exact(pattern_list, (timeout - time_spent))
else:
idx = self.expect(pattern_list, (timeout - time_spent))
pattern_list.pop(idx)
def wait_eof(self, timeout: float=10) -> None:
self.expect(EOF, timeout=timeout)
def terminate(self, *args, **kwargs) -> None:
if (self.proc.poll() is None):
self.kill(signal.SIGKILL) |
def test_mia_analyses_raises_exception_at_init_if_partitions_is_none_and_value_gt_than_255():
d = scared.MIADistinguisher()
with pytest.raises(ValueError):
d.update(traces=np.random.randint(0, 255, (500, 200), dtype='int16'), data=np.random.randint(0, 3000, (500, 16), dtype='uint16')) |
def source_metrics(user_statuses):
total = len(user_statuses)
if (total == 0):
return (0, 0, 0, 0, 0, 0, 0, 0, 0, 0)
adv = 0
android = 0
blackberry = 0
ipad = 0
iphone = 0
mac = 0
websites = 0
windows = 0
non_std = 0
sources = Counter()
for status in user_statuses:
src = status['source'].lower()
sources.update([src])
for (src, count) in sources.items():
if ('twitter for advertisers' in src):
adv += count
elif ('twitter for android' in src):
android += count
elif ('twitter for blackberry' in src):
blackberry += count
elif ('twitter for ipad' in src):
ipad += count
elif ('twitter for iphone' in src):
iphone += count
elif ('twitter for mac' in src):
mac += count
elif ('twitter for websites' in src):
websites += count
elif ('twitter for windows' in src):
windows += count
elif ('twitter.com' not in src):
non_std += count
return (len(sources), (adv / total), (android / total), (blackberry / total), (ipad / total), (iphone / total), (mac / total), (websites / total), (windows / total), (non_std / total)) |
def init_db(sqlite_conn, data_factory, dates):
sqlite_conn.executescript(SCHEMA_SQL)
import_dates(sqlite_conn, dates)
practice_codes = _get_active_practice_codes(data_factory, dates)
sqlite_conn.executemany('INSERT INTO practice (offset, code) VALUES (?, ?)', enumerate(practice_codes)) |
class TaskTreeView(QtWidgets.QTreeView):
def __init__(self, parent=None, tasks=None, allow_multi_selection=False, allow_drag=False, allow_editing=False, context_menu_handler_class=None, horizontal_labels=None, show_asset_and_shot_children=True, show_takes=False, show_dependency_info=False):
super(TaskTreeView, self).__init__(parent=parent)
self._tasks = []
self.horizontal_labels = horizontal_labels
self.show_dependency_info = show_dependency_info
self.show_asset_and_shot_children = show_asset_and_shot_children
self.show_takes = show_takes
if (context_menu_handler_class is None):
self.context_menu_handler = TaskDataContextMenuHandler(parent=self)
else:
self.context_menu_handler = context_menu_handler_class(parent=self)
self.is_updating = False
self.setUniformRowHeights(True)
self.header().setCascadingSectionResizes(True)
self.allow_multi_selection = allow_multi_selection
self.allow_editing = allow_editing
self._allow_drag = False
self.allow_drag = allow_drag
self.setup_signals()
if (tasks is None):
tasks = []
self.tasks = tasks
def allow_drag(self):
return self._allow_drag
_drag.setter
def allow_drag(self, allow_drag):
self._allow_drag = allow_drag
if allow_drag:
self.setSelectionMode(QtWidgets.QAbstractItemView.ExtendedSelection)
self.setDragDropMode(QtWidgets.QAbstractItemView.InternalMove)
self.setDragEnabled(True)
self.setAcceptDrops(True)
self.setDropIndicatorShown(True)
else:
pass
def allow_multi_selection(self):
return self._allow_multi_selection
_multi_selection.setter
def allow_multi_selection(self, allow_multi_selection):
self._allow_multi_selection = allow_multi_selection
if self._allow_multi_selection:
self.setSelectionMode(QtWidgets.QAbstractItemView.ExtendedSelection)
else:
self.setSelectionMode(QtWidgets.QAbstractItemView.SingleSelection)
def setup_signals(self):
self.expanded.connect(self.expand_all_selected)
self.collapsed.connect(self.collapse_all_selected)
self.setContextMenuPolicy(QtCore.Qt.CustomContextMenu)
self.customContextMenuRequested.connect(self.show_context_menu)
self.doubleClicked.connect(self.double_clicked_on_entity)
def replace_with_other(self, layout, index, tree_view=None):
if tree_view:
tree_view.deleteLater()
layout.insertWidget(index, self)
return self
def auto_fit_column(self):
self.resizeColumnToContents(0)
def tasks(self):
return self._tasks
def tasks(self, tasks):
self._tasks = tasks
self.fill_ui(self.horizontal_labels)
def fill_ui(self, horizontal_labels=None):
logger.debug('start filling tasks_treeView')
logger.debug('creating a new model')
if (self.model() is not None):
self.model().deleteLater()
if (horizontal_labels is not None):
self.horizontal_labels = horizontal_labels
task_tree_model = TaskTreeModel(parent=self, show_dependency_info=self.show_dependency_info, show_asset_and_shot_children=self.show_asset_and_shot_children, show_takes=self.show_takes, horizontal_labels=self.horizontal_labels, allow_editing=self.allow_editing)
task_tree_model.populateTree(self.tasks)
self.setModel(task_tree_model)
self.is_updating = False
self.auto_fit_column()
logger.debug('finished filling tasks_treeView')
def show_context_menu(self, position):
if self.context_menu_handler:
self.context_menu_handler.show_context_menu(position)
def double_clicked_on_entity(self, index):
model = self.model()
item = model.itemFromIndex(index)
if (not item):
return
if item.hasChildren():
return
logger.debug(('item : %s' % item))
task_id = None
entity = None
try:
if item.task:
task_id = item.task.id
except AttributeError:
return
if (item.task.entity_type == 'Task'):
if task_id:
entity = SimpleEntity.query.get(task_id)
from anima.ui.dialogs import task_dialog
task_main_dialog = task_dialog.MainDialog(parent=self, tasks=[entity])
task_main_dialog.exec_()
result = task_main_dialog.result()
task_main_dialog.deleteLater()
try:
accepted = QtWidgets.QDialog.DialogCode.Accepted
except AttributeError:
accepted = QtWidgets.QDialog.Accepted
if (result == accepted):
if item.parent:
item.parent.reload()
else:
self.fill_ui()
self.find_and_select_entity_item(entity)
def find_and_select_entity_item(self, tasks, tree_view=None):
if (not tasks):
return
selection_model = self.selectionModel()
selection_model.clearSelection()
selection_flag = QtCore.QItemSelectionModel.Select
if (not tree_view):
tree_view = self
if (not isinstance(tasks, list)):
tasks = [tasks]
items = []
for task in tasks:
item = self.load_task_item_hierarchy(task, tree_view)
if item:
selection_model.select(item.index(), selection_flag)
items.append(item)
if items:
self.scrollTo(items[0].index())
return items
def load_task_item_hierarchy(self, task, tree_view):
if (not task):
return
self.is_updating = True
item = self.find_entity_item(task)
if (not item):
if isinstance(task, Task):
item = self.find_entity_item(task.project, tree_view)
else:
item = self.find_entity_item(task, tree_view)
logger.debug(('item for project: %s' % item))
if item:
tree_view.setExpanded(item.index(), True)
if (isinstance(task, Task) and task.parents):
for parent in task.parents:
item = self.find_entity_item(parent, tree_view)
if item:
tree_view.setExpanded(item.index(), True)
item = self.find_entity_item(task, tree_view)
if (not item):
logger.debug('can not find item')
self.is_updating = False
return item
def find_entity_item(self, entity, tree_view=None):
if (not entity):
return None
if (tree_view is None):
tree_view = self
indexes = self.get_item_indices_containing_text(entity.name, tree_view)
model = tree_view.model()
logger.debug(('items matching name : %s' % indexes))
for index in indexes:
item = model.itemFromIndex(index)
if item:
if (item.task.id == entity.id):
return item
return None
def get_item_indices_containing_text(cls, text, tree_view):
model = tree_view.model()
logger.debug(('searching for text : %s' % text))
return model.match(model.index(0, 0), 0, text, (- 1), QtCore.Qt.MatchRecursive)
def get_selected_items(self):
from anima.ui.models.task import TaskItem
selection_model = self.selectionModel()
logger.debug(('selection_model: %s' % selection_model))
indexes = selection_model.selectedIndexes()
logger.debug(('selected indexes : %s' % indexes))
task_items = []
if indexes:
item_model = self.model()
logger.debug(('indexes: %s' % indexes))
for index in indexes:
current_item = item_model.itemFromIndex(index)
if (current_item and isinstance(current_item, TaskItem)):
task_items.append(current_item)
logger.debug(('task_items: %s' % task_items))
return task_items
def get_selected_task_ids(self):
return [task.id for task in self.get_selected_tasks()]
def get_selected_tasks(self):
return Task.query.filter(Task.id.in_([item.task.id for item in self.get_selected_items()])).all()
def expand_all_selected(self, index):
for item in self.get_selected_items():
self.setExpanded(item.index(), True)
self.auto_fit_column()
def collapse_all_selected(self, index):
for item in self.get_selected_items():
self.setExpanded(item.index(), False)
self.auto_fit_column() |
def filter_firewall_internet_service_custom_group_data(json):
option_list = ['comment', 'member', 'name']
json = remove_invalid_fields(json)
dictionary = {}
for attribute in option_list:
if ((attribute in json) and (json[attribute] is not None)):
dictionary[attribute] = json[attribute]
return dictionary |
class MediaFileTranslation(Translation(MediaFile)):
caption = models.CharField(_('caption'), max_length=1000)
description = models.TextField(_('description'), blank=True)
class Meta():
verbose_name = _('media file translation')
verbose_name_plural = _('media file translations')
unique_together = ('parent', 'language_code')
app_label = 'medialibrary'
def __str__(self):
return self.caption |
class OptionPlotoptionsArcdiagramSonificationContexttracksMappingFrequency(Options):
def mapFunction(self):
return self._config_get(None)
def mapFunction(self, value: Any):
self._config(value, js_type=False)
def mapTo(self):
return self._config_get(None)
def mapTo(self, text: str):
self._config(text, js_type=False)
def max(self):
return self._config_get(None)
def max(self, num: float):
self._config(num, js_type=False)
def min(self):
return self._config_get(None)
def min(self, num: float):
self._config(num, js_type=False)
def within(self):
return self._config_get(None)
def within(self, value: Any):
self._config(value, js_type=False) |
class Surprise(Skill):
associated_action = SurpriseAction
skill_category = ['character', 'active']
target = t_OtherOne()
no_reveal = True
no_drop = True
usage = 'handover'
def check(self):
cl = self.associated_cards
if (not (len(cl) == 1)):
return False
(c,) = cl
if c.is_card(VirtualCard):
return False
if (c.resides_in.type not in ('cards', 'showncards')):
return False
return True |
class LocalTaskExecutor(TaskExecutorBase):
def __init__(self, parallelism: int=LOCAL_TASK_EXECUTOR_PARALLELISM, registry_path: str=PID_REGISTRY_PATH):
self.manager: Optional[SyncManager] = None
self.task_queue: Optional['Queue[TaskExecutionCommandType]'] = None
self.parallelism = parallelism
self.workers = []
self.registry_path = registry_path
super().__init__()
def initialize(self):
if (self.parallelism <= 0):
raise AIFlowException('Parallelism of LocalTaskExecutor should be a positive integer.')
self.manager = Manager()
self.task_queue = self.manager.Queue(maxsize=MAX_QUEUE_SIZE)
if (not os.path.isdir(LOCAL_REGISTRY_PATH)):
os.makedirs(LOCAL_REGISTRY_PATH)
self.workers = [Worker(self.task_queue, self.registry_path) for _ in range(self.parallelism)]
for worker in self.workers:
worker.start()
def destroy(self):
for _ in self.workers:
self.task_queue.put((None, None))
if (self.task_queue is not None):
self.task_queue.join()
if (self.manager is not None):
self.manager.shutdown()
def start_task_execution(self, key: TaskExecutionKey):
registry = LocalRegistry(self.registry_path)
pid = registry.get(str(key))
if (pid is not None):
logger.warning(f'TaskExecution: {key} has been submitted in the past, skipping...')
return
command = self.generate_command(key)
self.task_queue.put((key, command))
def stop_task_execution(self, key: TaskExecutionKey):
try:
registry = LocalRegistry(self.registry_path)
pid = registry.get(str(key))
if pid:
stop_process(int(pid))
except ValueError:
logger.exception('Failed to convert pid with value {}'.format(pid)) |
def link(scatters: Union[(List[Scatter], List[Tuple[(Scatter, str)]])], match_by: Union[(str, List[str])]='index', rows: Optional[int]=1, row_height: int=320, cols: Optional[int]=None):
return compose(scatters, match_by=match_by, rows=rows, row_height=row_height, cols=cols, sync_view=True, sync_selection=True, sync_hover=True) |
class OverworldTiles():
_cached_dict = None
Mountains = Tile(name='Mountains', symbol='', room_prototype='wilderness_mountains')
Water = Tile(name='Water', symbol='', room_prototype='wilderness_water')
Swamp = Tile(name='Swamp', symbol='', room_prototype='wilderness_swamp')
Desert = Tile(name='Desert', symbol='', room_prototype='wilderness_desert')
Trees = Tile(name='Trees', symbol='', room_prototype='wilderness_trees')
Bridge = Tile(name='Bridge', symbol='', room_prototype='wilderness_bridge')
Plains = Tile(name='Plains', symbol='.', room_prototype='wilderness_plains')
Canyon = Tile(name='Canyon', symbol='U', room_prototype='wilderness_canyon')
Road = Tile(name='Road', symbol='', room_prototype='wilderness_road', alt_symbols=('', '', '', '', '', '', '', ''))
City = Tile(name='City', symbol='[]', room_prototype='wilderness_city')
Mystery = Tile(name='Mystery', symbol='(?)', room_prototype='wilderness_mystery')
def _get_cached_dict(cls):
if (not cls._cached_dict):
new_dict = {key: value for (key, value) in cls.__dict__.items() if isinstance(value, Tile)}
cls._cached_dict = new_dict
return cls._cached_dict
def items(cls):
return cls._get_cached_dict().items()
def values(cls):
return cls._get_cached_dict().values() |
class CreateInitialRevisionsDbTest(TestModelMixin, TestBase):
databases = {'default', 'mysql', 'postgres'}
def testCreateInitialRevisionsDb(self):
obj = TestModel.objects.create()
self.callCommand('createinitialrevisions', using='postgres')
self.assertNoRevision()
self.assertSingleRevision((obj,), comment='Initial version.', using='postgres')
def testCreateInitialRevisionsDbMySql(self):
obj = TestModel.objects.create()
self.callCommand('createinitialrevisions', using='mysql')
self.assertNoRevision()
self.assertSingleRevision((obj,), comment='Initial version.', using='mysql') |
class CmdUnban(COMMAND_DEFAULT_CLASS):
key = 'unban'
locks = 'cmd:perm(unban) or perm(Developer)'
help_category = 'Admin'
def func(self):
banlist = ServerConfig.objects.conf('server_bans')
if (not self.args):
self.msg(list_bans(self, banlist))
return
try:
num = int(self.args)
except Exception:
self.msg('You must supply a valid ban id to clear.')
return
if (not banlist):
self.msg('There are no bans to clear.')
elif (not (0 < num < (len(banlist) + 1))):
self.msg(f'Ban id |w{self.args}|n was not found.')
else:
ban = banlist[(num - 1)]
value = ' '.join([s for s in ban[:2]]).strip()
ret = (yield f"Are you sure you want to unban {num}: '|w{value}|n' [Y]/N?")
if (str(ret).lower() in ('n', 'no')):
self.msg('Aborted.')
return
del banlist[(num - 1)]
ServerConfig.objects.conf('server_bans', banlist)
self.msg(f"Cleared ban {num}: '{value}'")
logger.log_sec(f'Unbanned: {value.strip()} (Caller: {self.caller}, IP: {self.session.address}).') |
class TaskBase():
__name__: ClassVar[str]
def __init__(self, get_main_session: Callable[(..., contextlib.AbstractAsyncContextManager[AsyncSession])]=get_single_main_async_session, get_workspace_session: Callable[(..., contextlib.AbstractAsyncContextManager[AsyncSession])]=get_workspace_session_task, email_provider: EmailProvider=email_provider, send_task: SendTask=send_task) -> None:
self.get_main_session = get_main_session
self.get_workspace_session = get_workspace_session
self.email_provider = email_provider
self.send_task = send_task
self.jinja_env = jinja2.Environment(loader=jinja2.FileSystemLoader(EMAIL_TEMPLATES_DIRECTORY), autoescape=True)
self.jinja_env.add_extension('jinja2.ext.i18n')
def __call__(self, *args, **kwargs):
with asyncio.Runner() as runner:
BabelMiddleware(app=None, **get_babel_middleware_kwargs())
logger.info('Start task', task=self.__name__)
result = runner.run(self.run(*args, **kwargs))
logger.info('Done task', task=self.__name__)
return result
async def _get_workspace(self, workspace_id: UUID4) -> Workspace:
async with self.get_main_session() as session:
repository = WorkspaceRepository(session)
workspace = (await repository.get_by_id(workspace_id))
if (workspace is None):
raise TaskError()
return workspace
async def _get_user(self, user_id: UUID4, workspace: Workspace) -> User:
async with self.get_workspace_session(workspace) as session:
repository = UserRepository(session)
user = (await repository.get_by_id(user_id))
if (user is None):
raise TaskError()
return user
async def _get_tenant(self, tenant_id: UUID4, workspace: Workspace) -> Tenant:
async with self.get_workspace_session(workspace) as session:
repository = TenantRepository(session)
tenant = (await repository.get_by_id(tenant_id, (selectinload(Tenant.email_domain),)))
if (tenant is None):
raise TaskError()
return tenant
async def _get_email_template_renderer(self, workspace: Workspace) -> AsyncGenerator[(EmailTemplateRenderer, None)]:
async with self.get_workspace_session(workspace) as session:
repository = EmailTemplateRepository(session)
(yield EmailTemplateRenderer(repository))
async def _get_email_subject_renderer(self, workspace: Workspace) -> AsyncGenerator[(EmailSubjectRenderer, None)]:
async with self.get_workspace_session(workspace) as session:
repository = EmailTemplateRepository(session)
(yield EmailSubjectRenderer(repository)) |
class Normalizer():
def __init__(self, in_domain, out_domain=(0, 1)):
(self.in_min, self.in_max) = (in_domain[0], in_domain[1])
(self.out_min, self.out_max) = (out_domain[0], out_domain[1])
self.slope = ((self.out_max - self.out_min) / (self.in_max - self.in_min))
def normalize(self, v):
if ((v > self.in_max) or (v < self.in_min)):
raise Exception(('ERR: input out of range! input = %.2f, range = [%.2f, %.2f]' % (v, self.in_min, self.in_max)))
return (((self.slope * v) + self.out_min) - (self.slope * self.in_min))
def de_normalize(self, v):
return (((v - self.out_min) + (self.slope * self.in_min)) / self.slope) |
class OptionSeriesHistogramSonificationDefaultinstrumentoptionsMappingHighpassFrequency(Options):
def mapFunction(self):
return self._config_get(None)
def mapFunction(self, value: Any):
self._config(value, js_type=False)
def mapTo(self):
return self._config_get(None)
def mapTo(self, text: str):
self._config(text, js_type=False)
def max(self):
return self._config_get(None)
def max(self, num: float):
self._config(num, js_type=False)
def min(self):
return self._config_get(None)
def min(self, num: float):
self._config(num, js_type=False)
def within(self):
return self._config_get(None)
def within(self, value: Any):
self._config(value, js_type=False) |
.filterwarnings('ignore:Default values*')
def test_default_surface_statistics(default_surface):
small = xtgeo.RegularSurface(**default_surface)
so2 = xtgeo.Surfaces()
for _ in range(10):
tmp = small.copy()
tmp.values += 8.76543
so2.append([tmp])
res2 = so2.statistics(percentiles=[10, 50])
assert (res2['p10'].values.mean() == pytest.approx(16., 0.001)) |
def downgrade():
op.drop_index(op.f('ix_servednoticehistory_vendor_legitimate_interests'), table_name='servednoticehistory')
op.drop_index(op.f('ix_servednoticehistory_vendor_consent'), table_name='servednoticehistory')
op.drop_index(op.f('ix_servednoticehistory_system_legitimate_interests'), table_name='servednoticehistory')
op.drop_index(op.f('ix_servednoticehistory_system_consent'), table_name='servednoticehistory')
op.drop_index(op.f('ix_servednoticehistory_purpose_legitimate_interests'), table_name='servednoticehistory')
op.drop_index(op.f('ix_servednoticehistory_purpose_consent'), table_name='servednoticehistory')
op.drop_column('servednoticehistory', 'system_legitimate_interests')
op.drop_column('servednoticehistory', 'system_consent')
op.drop_column('servednoticehistory', 'vendor_legitimate_interests')
op.drop_column('servednoticehistory', 'vendor_consent')
op.drop_column('servednoticehistory', 'purpose_legitimate_interests')
op.drop_column('servednoticehistory', 'purpose_consent')
op.drop_index(op.f('ix_privacypreferencehistory_vendor_legitimate_interests'), table_name='privacypreferencehistory')
op.drop_index(op.f('ix_privacypreferencehistory_vendor_consent'), table_name='privacypreferencehistory')
op.drop_index(op.f('ix_privacypreferencehistory_system_legitimate_interests'), table_name='privacypreferencehistory')
op.drop_index(op.f('ix_privacypreferencehistory_system_consent'), table_name='privacypreferencehistory')
op.drop_index(op.f('ix_privacypreferencehistory_purpose_legitimate_interests'), table_name='privacypreferencehistory')
op.drop_index(op.f('ix_privacypreferencehistory_purpose_consent'), table_name='privacypreferencehistory')
op.drop_column('privacypreferencehistory', 'system_legitimate_interests')
op.drop_column('privacypreferencehistory', 'system_consent')
op.drop_column('privacypreferencehistory', 'vendor_legitimate_interests')
op.drop_column('privacypreferencehistory', 'vendor_consent')
op.drop_column('privacypreferencehistory', 'purpose_legitimate_interests')
op.drop_column('privacypreferencehistory', 'purpose_consent')
op.drop_constraint('last_served_identity_vendor_consent', 'lastservednotice', type_='unique')
op.drop_constraint('last_served_identity_vendor_leg_interests', 'lastservednotice', type_='unique')
op.drop_constraint('last_served_identity_system_consent', 'lastservednotice', type_='unique')
op.drop_constraint('last_served_identity_system_leg_interests', 'lastservednotice', type_='unique')
op.drop_constraint('last_served_identity_purpose_consent', 'lastservednotice', type_='unique')
op.drop_constraint('last_served_identity_purpose_legitimate_interests', 'lastservednotice', type_='unique')
op.drop_constraint('last_served_fides_user_device_identity_vendor_consent', 'lastservednotice', type_='unique')
op.drop_constraint('last_served_fides_user_device_identity_vendor_leg_interests', 'lastservednotice', type_='unique')
op.drop_constraint('last_served_fides_user_device_identity_system_consent', 'lastservednotice', type_='unique')
op.drop_constraint('last_served_fides_user_device_identity_system_leg_interests', 'lastservednotice', type_='unique')
op.drop_constraint('last_served_fides_user_device_identity_purpose_consent', 'lastservednotice', type_='unique')
op.drop_constraint('last_served_fides_user_device_identity_purpose_leg_interests', 'lastservednotice', type_='unique')
op.drop_index(op.f('ix_lastservednotice_vendor_legitimate_interests'), table_name='lastservednotice')
op.drop_index(op.f('ix_lastservednotice_vendor_consent'), table_name='lastservednotice')
op.drop_index(op.f('ix_lastservednotice_system_legitimate_interests'), table_name='lastservednotice')
op.drop_index(op.f('ix_lastservednotice_system_consent'), table_name='lastservednotice')
op.drop_index(op.f('ix_lastservednotice_purpose_legitimate_interests'), table_name='lastservednotice')
op.drop_index(op.f('ix_lastservednotice_purpose_consent'), table_name='lastservednotice')
op.drop_column('lastservednotice', 'system_legitimate_interests')
op.drop_column('lastservednotice', 'system_consent')
op.drop_column('lastservednotice', 'vendor_legitimate_interests')
op.drop_column('lastservednotice', 'vendor_consent')
op.drop_column('lastservednotice', 'purpose_legitimate_interests')
op.drop_column('lastservednotice', 'purpose_consent')
op.drop_index(op.f('ix_currentprivacypreference_vendor_legitimate_interests'), table_name='currentprivacypreference')
op.drop_index(op.f('ix_currentprivacypreference_vendor_consent'), table_name='currentprivacypreference')
op.drop_index(op.f('ix_currentprivacypreference_system_legitimate_interests'), table_name='currentprivacypreference')
op.drop_index(op.f('ix_currentprivacypreference_system_consent'), table_name='currentprivacypreference')
op.drop_index(op.f('ix_currentprivacypreference_purpose_legitimate_interests'), table_name='currentprivacypreference')
op.drop_index(op.f('ix_currentprivacypreference_purpose_consent'), table_name='currentprivacypreference')
op.drop_constraint('identity_vendor_consent', 'currentprivacypreference', type_='unique')
op.drop_constraint('identity_vendor_leg_interests', 'currentprivacypreference', type_='unique')
op.drop_constraint('identity_system_consent', 'currentprivacypreference', type_='unique')
op.drop_constraint('identity_system_leg_interests', 'currentprivacypreference', type_='unique')
op.drop_constraint('identity_purpose_consent', 'currentprivacypreference', type_='unique')
op.drop_constraint('identity_purpose_leg_interests', 'currentprivacypreference', type_='unique')
op.drop_constraint('fides_user_device_identity_vendor_consent', 'currentprivacypreference', type_='unique')
op.drop_constraint('fides_user_device_identity_vendor_leg_interests', 'currentprivacypreference', type_='unique')
op.drop_constraint('fides_user_device_identity_system_consent', 'currentprivacypreference', type_='unique')
op.drop_constraint('fides_user_device_identity_system_leg_interests', 'currentprivacypreference', type_='unique')
op.drop_constraint('fides_user_device_identity_purpose_consent', 'currentprivacypreference', type_='unique')
op.drop_constraint('fides_user_device_identity_purpose_leg_interests', 'currentprivacypreference', type_='unique')
op.drop_column('currentprivacypreference', 'system_legitimate_interests')
op.drop_column('currentprivacypreference', 'system_consent')
op.drop_column('currentprivacypreference', 'vendor_legitimate_interests')
op.drop_column('currentprivacypreference', 'vendor_consent')
op.drop_column('currentprivacypreference', 'purpose_legitimate_interests')
op.drop_column('currentprivacypreference', 'purpose_consent') |
class TestAsyncExecutor():
class NoopContextManager():
def __init__(self, mock):
self.mock = mock
async def __aenter__(self):
return self
async def __call__(self, *args):
return (await self.mock(*args))
async def __aexit__(self, exc_type, exc_val, exc_tb):
return False
def __str__(self):
return str(self.mock)
class StaticRequestTiming():
def __init__(self, task_start):
self.task_start = task_start
self.current_request_start = self.task_start
def __enter__(self):
self.current_request_start += 5
return self
def request_start(self):
return self.current_request_start
def request_end(self):
return (self.current_request_start + 0.05)
def __exit__(self, exc_type, exc_val, exc_tb):
return False
class RunnerWithProgress():
def __init__(self, iterations=5):
self.iterations_left = iterations
self.iterations = iterations
def completed(self):
return (self.iterations_left <= 0)
def percent_completed(self):
return ((self.iterations - self.iterations_left) / self.iterations)
async def __call__(self, es, params):
self.iterations_left -= 1
class RunnerOverridingThroughput():
async def __call__(self, es, params):
return {'weight': 1, 'unit': 'ops', 'throughput': 1.23}
def context_managed(mock):
return TestAsyncExecutor.NoopContextManager(mock)
def setup_method(self, method):
runner.register_default_runners()
self.runner_with_progress = self.RunnerWithProgress()
self.runner_overriding_throughput = self.RunnerOverridingThroughput()
runner.register_runner('unit-test-recovery', self.runner_with_progress, async_runner=True)
runner.register_runner('override-throughput', self.runner_overriding_throughput, async_runner=True)
('elasticsearch.Elasticsearch')
.asyncio
async def test_execute_schedule_in_throughput_mode(self, es):
task_start = time.perf_counter()
es.new_request_context.return_value = self.StaticRequestTiming(task_start=task_start)
es.bulk = mock.AsyncMock(return_value=io.BytesIO(b'{"errors": false, "took": 8}'))
params.register_param_source_for_name('driver-test-param-source', DriverTestParamSource)
test_track = track.Track(name='unittest', description='unittest track', indices=None, challenges=None)
task = track.Task('time-based', track.Operation('time-based', track.OperationType.Bulk.to_hyphenated_string(), params={'body': ['action_metadata_line', 'index_line'], 'action-metadata-present': True, 'bulk-size': 1, 'unit': 'docs', 'size': 1}, param_source='driver-test-param-source'), warmup_time_period=0, clients=4)
param_source = track.operation_parameters(test_track, task)
task_allocation = driver.TaskAllocation(task=task, client_index_in_task=0, global_client_index=0, total_clients=task.clients)
schedule = driver.schedule_for(task_allocation, param_source)
sampler = driver.Sampler(start_timestamp=task_start)
cancel = threading.Event()
complete = threading.Event()
execute_schedule = driver.AsyncExecutor(client_id=2, task=task, schedule=schedule, es={'default': es}, sampler=sampler, cancel=cancel, complete=complete, on_error='continue')
(await execute_schedule())
samples = sampler.samples
assert (len(samples) > 0)
assert (not complete.is_set()), 'Executor should not auto-complete a normal task'
previous_absolute_time = (- 1.0)
previous_relative_time = (- 1.0)
for sample in samples:
assert (sample.client_id == 2)
assert (sample.task == task)
assert (previous_absolute_time < sample.absolute_time)
previous_absolute_time = sample.absolute_time
assert (previous_relative_time < sample.relative_time)
previous_relative_time = sample.relative_time
assert (metrics.SampleType.Normal == sample.sample_type)
assert (sample.latency == sample.service_time)
assert (sample.total_ops == 1)
assert (sample.total_ops_unit == 'docs')
('elasticsearch.Elasticsearch')
.asyncio
async def test_execute_schedule_with_progress_determined_by_runner(self, es):
task_start = time.perf_counter()
es.new_request_context.return_value = self.StaticRequestTiming(task_start=task_start)
params.register_param_source_for_name('driver-test-param-source', DriverTestParamSource)
test_track = track.Track(name='unittest', description='unittest track', indices=None, challenges=None)
task = track.Task('time-based', track.Operation('time-based', operation_type='unit-test-recovery', params={'indices-to-restore': '*', 'size': None}, param_source='driver-test-param-source'), warmup_time_period=0, clients=4)
param_source = track.operation_parameters(test_track, task)
task_allocation = driver.TaskAllocation(task=task, client_index_in_task=0, global_client_index=0, total_clients=task.clients)
schedule = driver.schedule_for(task_allocation, param_source)
sampler = driver.Sampler(start_timestamp=task_start)
cancel = threading.Event()
complete = threading.Event()
execute_schedule = driver.AsyncExecutor(client_id=2, task=task, schedule=schedule, es={'default': es}, sampler=sampler, cancel=cancel, complete=complete, on_error='continue')
(await execute_schedule())
samples = sampler.samples
assert (len(samples) == 5)
assert (self.runner_with_progress.completed is True)
assert (self.runner_with_progress.percent_completed == 1.0)
assert (not complete.is_set()), 'Executor should not auto-complete a normal task'
previous_absolute_time = (- 1.0)
previous_relative_time = (- 1.0)
for sample in samples:
assert (sample.client_id == 2)
assert (sample.task == task)
assert (previous_absolute_time < sample.absolute_time)
previous_absolute_time = sample.absolute_time
assert (previous_relative_time < sample.relative_time)
previous_relative_time = sample.relative_time
assert (metrics.SampleType.Normal == sample.sample_type)
assert (sample.throughput is None)
assert (sample.latency == sample.service_time)
assert (sample.total_ops == 1)
assert (sample.total_ops_unit == 'ops')
('elasticsearch.Elasticsearch')
.asyncio
async def test_execute_schedule_runner_overrides_times(self, es):
task_start = time.perf_counter()
es.new_request_context.return_value = self.StaticRequestTiming(task_start=task_start)
params.register_param_source_for_name('driver-test-param-source', DriverTestParamSource)
test_track = track.Track(name='unittest', description='unittest track', indices=None, challenges=None)
task = track.Task('override-throughput', track.Operation('override-throughput', operation_type='override-throughput', params={'size': 1}, param_source='driver-test-param-source'), warmup_iterations=0, iterations=1, clients=1)
param_source = track.operation_parameters(test_track, task)
task_allocation = driver.TaskAllocation(task=task, client_index_in_task=0, global_client_index=0, total_clients=task.clients)
schedule = driver.schedule_for(task_allocation, param_source)
sampler = driver.Sampler(start_timestamp=task_start)
cancel = threading.Event()
complete = threading.Event()
execute_schedule = driver.AsyncExecutor(client_id=0, task=task, schedule=schedule, es={'default': es}, sampler=sampler, cancel=cancel, complete=complete, on_error='continue')
(await execute_schedule())
samples = sampler.samples
assert (not complete.is_set()), 'Executor should not auto-complete a normal task'
assert (len(samples) == 1)
sample = samples[0]
assert (sample.client_id == 0)
assert (sample.task == task)
assert (metrics.SampleType.Normal == sample.sample_type)
assert (sample.latency == sample.service_time)
assert (sample.total_ops == 1)
assert (sample.total_ops_unit == 'ops')
assert (sample.throughput == 1.23)
assert (sample.service_time is not None)
assert (sample.time_period is not None)
('elasticsearch.Elasticsearch')
.asyncio
async def test_execute_schedule_throughput_throttled(self, es):
async def perform_request(*args, **kwargs):
return None
es.options.return_value = es
es.init_request_context.return_value = {'request_start': 0, 'request_end': 10}
es.perform_request.side_effect = perform_request
params.register_param_source_for_name('driver-test-param-source', DriverTestParamSource)
test_track = track.Track(name='unittest', description='unittest track', indices=None, challenges=None)
for (target_throughput, bounds) in {10: [2, 4], 100: [24, 26], 1000: [235, 255]}.items():
task = track.Task('time-based', track.Operation('time-based', track.OperationType.Search.to_hyphenated_string(), params={'index': '_all', 'type': None, 'body': {'query': {'match_all': {}}}, 'request-params': {}, 'cache': False, 'response-compression-enabled': True}, param_source='driver-test-param-source'), warmup_time_period=0.5, time_period=0.5, clients=4, params={'target-throughput': target_throughput, 'clients': 4}, completes_parent=True)
sampler = driver.Sampler(start_timestamp=0)
cancel = threading.Event()
complete = threading.Event()
param_source = track.operation_parameters(test_track, task)
task_allocation = driver.TaskAllocation(task=task, client_index_in_task=0, global_client_index=0, total_clients=task.clients)
schedule = driver.schedule_for(task_allocation, param_source)
execute_schedule = driver.AsyncExecutor(client_id=0, task=task, schedule=schedule, es={'default': es}, sampler=sampler, cancel=cancel, complete=complete, on_error='continue')
(await execute_schedule())
samples = sampler.samples
sample_size = len(samples)
lower_bound = bounds[0]
upper_bound = bounds[1]
assert (lower_bound <= sample_size <= upper_bound)
assert complete.is_set(), 'Executor should auto-complete a task that terminates its parent'
('elasticsearch.Elasticsearch')
.asyncio
async def test_cancel_execute_schedule(self, es):
es.init_request_context.return_value = {'request_start': 0, 'request_end': 10}
es.bulk = mock.AsyncMock(return_value=io.BytesIO(b'{"errors": false, "took": 8}'))
params.register_param_source_for_name('driver-test-param-source', DriverTestParamSource)
test_track = track.Track(name='unittest', description='unittest track', indices=None, challenges=None)
for target_throughput in [10, 100, 1000]:
task = track.Task('time-based', track.Operation('time-based', track.OperationType.Bulk.to_hyphenated_string(), params={'body': ['action_metadata_line', 'index_line'], 'action-metadata-present': True, 'bulk-size': 1}, param_source='driver-test-param-source'), warmup_time_period=0.5, time_period=0.5, clients=4, params={'target-throughput': target_throughput, 'clients': 4})
param_source = track.operation_parameters(test_track, task)
task_allocation = driver.TaskAllocation(task=task, client_index_in_task=0, global_client_index=0, total_clients=task.clients)
schedule = driver.schedule_for(task_allocation, param_source)
sampler = driver.Sampler(start_timestamp=0)
cancel = threading.Event()
complete = threading.Event()
execute_schedule = driver.AsyncExecutor(client_id=0, task=task, schedule=schedule, es={'default': es}, sampler=sampler, cancel=cancel, complete=complete, on_error='continue')
cancel.set()
(await execute_schedule())
samples = sampler.samples
sample_size = len(samples)
assert (sample_size == 0)
('elasticsearch.Elasticsearch')
.asyncio
async def test_execute_schedule_aborts_on_error(self, es):
class ExpectedUnitTestException(Exception):
def __str__(self):
return 'expected unit test exception'
def run(*args, **kwargs):
raise ExpectedUnitTestException()
class ScheduleHandle():
def __init__(self):
self.ramp_up_wait_time = 0
def before_request(self, now):
pass
def after_request(self, now, weight, unit, meta_data):
pass
def start(self):
pass
async def __call__(self):
invocations = [(0, metrics.SampleType.Warmup, 0, TestAsyncExecutor.context_managed(run), None)]
for invocation in invocations:
(yield invocation)
task = track.Task('no-op', track.Operation('no-op', track.OperationType.Bulk.to_hyphenated_string(), params={}, param_source='driver-test-param-source'), warmup_time_period=0.5, time_period=0.5, clients=4, params={'clients': 4})
sampler = driver.Sampler(start_timestamp=0)
cancel = threading.Event()
complete = threading.Event()
execute_schedule = driver.AsyncExecutor(client_id=2, task=task, schedule=ScheduleHandle(), es={'default': es}, sampler=sampler, cancel=cancel, complete=complete, on_error='continue')
with pytest.raises(exceptions.RallyError, match='Cannot run task \\[no-op\\]: expected unit test exception'):
(await execute_schedule())
assert (es.call_count == 0)
.asyncio
async def test_execute_single_no_return_value(self):
es = None
params = None
runner = mock.AsyncMock()
(ops, unit, request_meta_data) = (await driver.execute_single(self.context_managed(runner), es, params, on_error='continue'))
assert (ops == 1)
assert (unit == 'ops')
assert (request_meta_data == {'success': True})
.asyncio
async def test_execute_single_tuple(self):
es = None
params = None
runner = mock.AsyncMock(return_value=(500, 'MB'))
(ops, unit, request_meta_data) = (await driver.execute_single(self.context_managed(runner), es, params, on_error='continue'))
assert (ops == 500)
assert (unit == 'MB')
assert (request_meta_data == {'success': True})
.asyncio
async def test_execute_single_dict(self):
es = None
params = None
runner = mock.AsyncMock(return_value={'weight': 50, 'unit': 'docs', 'some-custom-meta-data': 'valid', ' 200})
(ops, unit, request_meta_data) = (await driver.execute_single(self.context_managed(runner), es, params, on_error='continue'))
assert (ops == 50)
assert (unit == 'docs')
assert (request_meta_data == {'some-custom-meta-data': 'valid', ' 200, 'success': True})
.parametrize('on_error', ['abort', 'continue'])
.asyncio
async def test_execute_single_with_connection_error_always_aborts(self, on_error):
es = None
params = None
runner = mock.AsyncMock(side_effect=elasticsearch.ConnectionError(message='Connection error'))
with pytest.raises(exceptions.RallyAssertionError) as exc:
(await driver.execute_single(self.context_managed(runner), es, params, on_error=on_error))
assert (exc.value.args[0] == 'Request returned an error. Error type: transport, Description: Connection error')
.asyncio
async def test_execute_single_with_
es = None
params = None
error_meta = elastic_transport.ApiResponseMeta(status=404, headers={}, duration=0.0, node=None)
runner = mock.AsyncMock(side_effect=elasticsearch.NotFoundError(message='not found', meta=error_meta, body='the requested document could not be found'))
with pytest.raises(exceptions.RallyAssertionError) as exc:
(await driver.execute_single(self.context_managed(runner), es, params, on_error='abort'))
assert (exc.value.args[0] == 'Request returned an error. Error type: api, Description: not found (the requested document could not be found), HTTP Status: 404')
.asyncio
async def test_execute_single_with_
es = None
params = None
empty_body = io.BytesIO(b'')
str_literal_empty_body = str(empty_body)
error_meta = elastic_transport.ApiResponseMeta(status=413, headers={}, duration=0.0, node=None)
runner = mock.AsyncMock(side_effect=elasticsearch.ApiError(message=str_literal_empty_body, meta=error_meta, body=empty_body))
with pytest.raises(exceptions.RallyAssertionError) as exc:
(await driver.execute_single(self.context_managed(runner), es, params, on_error='abort'))
assert (exc.value.args[0] == 'Request returned an error. Error type: api, Description: None, HTTP Status: 413')
.asyncio
async def test_execute_single_with_
es = None
params = None
body = io.BytesIO(b'Huge error')
str_literal = str(body)
error_meta = elastic_transport.ApiResponseMeta(status=499, headers={}, duration=0.0, node=None)
runner = mock.AsyncMock(side_effect=elasticsearch.ApiError(message=str_literal, meta=error_meta, body=body))
with pytest.raises(exceptions.RallyAssertionError) as exc:
(await driver.execute_single(self.context_managed(runner), es, params, on_error='abort'))
assert (exc.value.args[0] == 'Request returned an error. Error type: api, Description: Huge error, HTTP Status: 499')
.asyncio
async def test_execute_single_with_
es = None
params = None
error_meta = elastic_transport.ApiResponseMeta(status=404, headers={}, duration=0.0, node=None)
runner = mock.AsyncMock(side_effect=elasticsearch.NotFoundError(message='not found', meta=error_meta, body='the requested document could not be found'))
(ops, unit, request_meta_data) = (await driver.execute_single(self.context_managed(runner), es, params, on_error='continue'))
assert (ops == 0)
assert (unit == 'ops')
assert (request_meta_data == {' 404, 'error-type': 'api', 'error-description': 'not found (the requested document could not be found)', 'success': False})
.asyncio
async def test_execute_single_with_
es = None
params = None
error_meta = elastic_transport.ApiResponseMeta(status=413, headers={}, duration=0.0, node=None)
runner = mock.AsyncMock(side_effect=elasticsearch.NotFoundError(message='', meta=error_meta, body=''))
(ops, unit, request_meta_data) = (await driver.execute_single(self.context_managed(runner), es, params, on_error='continue'))
assert (ops == 0)
assert (unit == 'ops')
assert (request_meta_data == {' 413, 'error-type': 'api', 'error-description': '', 'success': False})
.asyncio
async def test_execute_single_with_key_error(self):
class FailingRunner():
async def __call__(self, *args):
raise KeyError('bulk-size missing')
def __str__(self):
return 'failing_mock_runner'
es = None
params = collections.OrderedDict()
params['bulk'] = 5000
params['mode'] = 'append'
runner = FailingRunner()
with pytest.raises(exceptions.SystemSetupError) as exc:
(await driver.execute_single(self.context_managed(runner), es, params, on_error='continue'))
assert (exc.value.args[0] == "Cannot execute [failing_mock_runner]. Provided parameters are: ['bulk', 'mode']. Error: ['bulk-size missing'].") |
def subviewsOfView(view):
views = [(view, 0)]
(yield views[0])
while views:
(view, level) = views.pop(0)
subviews = fb.evaluateExpression(('(id)[%s subviews]' % view))
subviewsCount = int(fb.evaluateExpression(('(int)[(id)%s count]' % subviews)))
for i in range(subviewsCount):
subview = fb.evaluateExpression(('(id)[%s objectAtIndex:%i]' % (subviews, i)))
views.append((subview, (level + 1)))
(yield (subview, (level + 1))) |
def extractFlyingsaucertranslatesBlogspotCom(item):
(vol, chp, frag, postfix) = extractVolChapterFragmentPostfix(item['title'])
if ((not (chp or vol)) or ('preview' in item['title'].lower())):
return None
tagmap = [('PRC', 'PRC', 'translated'), ('Loiterous', 'Loiterous', 'oel')]
for (tagname, name, tl_type) in tagmap:
if (tagname in item['tags']):
return buildReleaseMessageWithType(item, name, vol, chp, frag=frag, postfix=postfix, tl_type=tl_type)
titlemap = [('Secret Service Mysterious Doctor', 'Poisoning the World: The Secret Service Mysterious Doctor is a Young Beastly Wife', 'translated'), ('Tensei Shoujo no Rirekisho', 'Tensei Shoujo no Rirekisho', 'translated'), ('Master of Dungeon', 'Master of Dungeon', 'oel')]
for (titlecomponent, name, tl_type) in titlemap:
if (titlecomponent.lower() in item['title'].lower()):
return buildReleaseMessageWithType(item, name, vol, chp, frag=frag, postfix=postfix, tl_type=tl_type)
return False |
_converter(torch.ops.aten._adaptive_avg_pool2d.default)
def aten_ops_adaptive_avg_pool2d(target: Target, args: Tuple[(Argument, ...)], kwargs: Dict[(str, Argument)], name: str) -> ConverterOutput:
input_val = args[0]
if (not isinstance(input_val, AITTensor)):
raise RuntimeError(f'Non-tensor inputs for {name}: {input_val}')
output_size = identical_elem_tuple_to_int(args[1])
shape = [var._attrs['values'][0] for var in input_val._attrs['shape']]
(HI, WI, CI) = (shape[1], shape[2], shape[3])
if ((CI % 2) != 0):
raise RuntimeError(f'AIT avg_pool2d expects input channel dim to align w/ a multiple of 2 but got {CI}')
if (HI != WI):
raise RuntimeError(f'adaptive_avg_pool2d currently only supports square input H/W but got H: {shape[1]} and W: {shape[2]}')
stride = (HI // output_size)
kernel_size = (HI - ((output_size - 1) * stride))
return avg_pool2d(kernel_size=kernel_size, stride=stride, pad=0)(input_val) |
class TestMaps():
def test_return_raise(self, monkeypatch, tmp_path):
with monkeypatch.context() as m:
m.setattr(sys, 'argv', [''])
with pytest.raises(_SpockInstantiationError):
class FailReturnConfig():
val_1: float = 0.5
def __maps__(self):
print(self.val_1)
config = SpockBuilder(FailReturnConfig, desc='Test Builder')
config.generate()
def test_map_return(self, monkeypatch, tmp_path):
with monkeypatch.context() as m:
m.setattr(sys, 'argv', [''])
class ReturnConfig():
val_1: float = 0.5
def __maps__(self):
return DummyClass(value=self.val_1)
config = SpockBuilder(ReturnConfig, desc='Test Builder')
configs = config.generate()
assert (configs.ReturnConfig._maps.value == configs.ReturnConfig.val_1) |
(scope='function')
def shopify_erasure_data(shopify_connection_config, shopify_erasure_identity_email, shopify_secrets) -> Generator:
base_url = f"
faker = Faker()
firstName = faker.first_name()
lastName = faker.last_name()
body = {'customer': {'first_name': firstName, 'last_name': lastName, 'email': shopify_erasure_identity_email, 'verified_email': True, 'addresses': [{'address1': '123 Test', 'city': 'Toronto', 'province': 'ON', 'zip': '66777', 'last_name': lastName, 'first_name': firstName, 'country': 'Canada'}]}}
headers = {'X-Shopify-Access-Token': f"{shopify_secrets['access_token']}"}
customers_response = requests.post(url=f'{base_url}/admin/api/2022-07/customers.json', json=body, headers=headers)
customer = customers_response.json()
assert customers_response.ok
sleep(30)
error_message = f'customer with email {shopify_erasure_identity_email} could not be added to Shopify'
poll_for_existence(customer_exists, (shopify_erasure_identity_email, shopify_secrets), error_message=error_message)
body = {'order': {'email': shopify_erasure_identity_email, 'fulfillment_status': 'fulfilled', 'send_receipt': True, 'financial_status': 'paid', 'send_fulfillment_receipt': True, 'line_items': [{'product_id': , 'name': 'Short leeve t-shirt', 'title': 'Short sleeve t-shirt', 'price': 10, 'quantity': 1}]}}
orders_response = requests.post(url=f'{base_url}/admin/api/2022-07/orders.json', json=body, headers=headers)
assert orders_response.ok
order = orders_response.json()
order_id = order['order']['id']
blogs_response = requests.get(url=f'{base_url}/admin/api/2022-07/blogs.json', headers=headers)
assert blogs_response.ok
blog = blogs_response.json()['blogs'][1]
blog_id = blog['id']
body = {'article': {'title': 'Test Article', 'author': firstName}}
articles_response = requests.post(url=f'{base_url}/admin/api/2022-07/blogs/{blog_id}/articles.json', json=body, headers=headers)
assert articles_response.ok
article = articles_response.json()
article_id = article['article']['id']
body = {'comment': {'body': 'I like comments\nAnd I like posting them *RESTfully*.', 'author': firstName, 'email': shopify_erasure_identity_email, 'ip': faker.ipv4_private(), 'blog_id': blog_id, 'article_id': article_id}}
comments_response = requests.post(url=f'{base_url}/admin/api/2022-07/comments.json', json=body, headers=headers)
assert comments_response.ok
comment = comments_response.json()
(yield (customer, order, blog, article, comment))
order_delete_response = requests.delete(url=f'{base_url}/admin/api/2022-07/orders/{order_id}.json', headers=headers)
assert order_delete_response.ok
article_delete_response = requests.delete(url=f'{base_url}/admin/api/2022-07/articles/{article_id}.json', headers=headers)
assert article_delete_response.ok |
class Enum(RawType):
def __init__(self, cstruct, name, type_, values):
self.type = type_
self.values = values
self.reverse = {}
for (k, v) in values.items():
self.reverse[v] = k
super().__init__(cstruct, name, len(self.type))
def __call__(self, value):
if isinstance(value, int):
return EnumInstance(self, value)
return super(Enum, self).__call__(value)
def __getitem__(self, attr):
return self(self.values[attr])
def __getattr__(self, attr):
try:
return self(self.values[attr])
except KeyError:
raise AttributeError(attr)
def __contains__(self, attr):
return (attr in self.values)
def _read(self, stream):
v = self.type._read(stream)
return self(v)
def _read_array(self, stream, count):
return list(map(self, self.type._read_array(stream, count)))
def _read_0(self, stream):
return list(map(self, self.type._read_0(stream)))
def _write(self, stream, data):
data = (data.value if isinstance(data, EnumInstance) else data)
return self.type._write(stream, data)
def _write_array(self, stream, data):
data = [(d.value if isinstance(d, EnumInstance) else d) for d in data]
return self.type._write_array(stream, data)
def _write_0(self, stream, data):
data = [(d.value if isinstance(d, EnumInstance) else d) for d in data]
return self.type._write_0(stream, data)
def default(self):
return self(0)
def default_array(self, count):
return [self.default() for _ in range(count)] |
class LinearScalingStrategy(TimeGasStrategy):
def __init__(self, initial_gas_price: Wei, max_gas_price: Wei, increment: float=1.125, time_duration: int=30):
super().__init__(time_duration)
self.initial_gas_price = Wei(initial_gas_price)
self.max_gas_price = Wei(max_gas_price)
self.increment = increment
def get_gas_price(self) -> Generator[(Wei, None, None)]:
last_gas_price = self.initial_gas_price
(yield last_gas_price)
while True:
last_gas_price = min(Wei((last_gas_price * self.increment)), self.max_gas_price)
(yield last_gas_price) |
class EchoAction(UserAction):
def __init__(self, source, target, card):
(self.source, self.target, self.card) = (source, target, card)
def apply_action(self):
(src, tgt, c) = (self.target, self.target, self.card)
g = self.game
assert c.detached
shadow = Echo(src)
for a in g.action_stack:
if (isinstance(a, LaunchCard) and (a.card is c)):
a.card = shadow
elif (getattr(a, 'associated_card', None) is c):
a.associated_card = shadow
migrate_cards([c], tgt.cards, unwrap=True)
return True |
class OptionSeriesTreemapSonificationContexttracksMappingTremolo(Options):
def depth(self) -> 'OptionSeriesTreemapSonificationContexttracksMappingTremoloDepth':
return self._config_sub_data('depth', OptionSeriesTreemapSonificationContexttracksMappingTremoloDepth)
def speed(self) -> 'OptionSeriesTreemapSonificationContexttracksMappingTremoloSpeed':
return self._config_sub_data('speed', OptionSeriesTreemapSonificationContexttracksMappingTremoloSpeed) |
class MonitorSubscribe(object):
swagger_types = {'batch_notify': 'MonitorSubscribeBatchNotify', 'callback': 'MonitorWebhook', 'refresh_event': 'float', 'retry_policy': 'MonitorSubscribeRetryPolicy'}
attribute_map = {'batch_notify': 'batchNotify', 'callback': 'callback', 'refresh_event': 'refreshEvent', 'retry_policy': 'retryPolicy'}
def __init__(self, batch_notify=None, callback=None, refresh_event=None, retry_policy=None):
self._batch_notify = None
self._callback = None
self._refresh_event = None
self._retry_policy = None
self.discriminator = None
if (batch_notify is not None):
self.batch_notify = batch_notify
self.callback = callback
if (refresh_event is not None):
self.refresh_event = refresh_event
if (retry_policy is not None):
self.retry_policy = retry_policy
def batch_notify(self):
return self._batch_notify
_notify.setter
def batch_notify(self, batch_notify):
self._batch_notify = batch_notify
def callback(self):
return self._callback
def callback(self, callback):
if (callback is None):
raise ValueError('Invalid value for `callback`, must not be `None`')
self._callback = callback
def refresh_event(self):
return self._refresh_event
_event.setter
def refresh_event(self, refresh_event):
if ((refresh_event is not None) and (refresh_event < 60)):
raise ValueError('Invalid value for `refresh_event`, must be a value greater than or equal to `60`')
self._refresh_event = refresh_event
def retry_policy(self):
return self._retry_policy
_policy.setter
def retry_policy(self, retry_policy):
self._retry_policy = retry_policy
def to_dict(self):
result = {}
for (attr, _) in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map((lambda x: (x.to_dict() if hasattr(x, 'to_dict') else x)), value))
elif hasattr(value, 'to_dict'):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map((lambda item: ((item[0], item[1].to_dict()) if hasattr(item[1], 'to_dict') else item)), value.items()))
else:
result[attr] = value
if issubclass(MonitorSubscribe, dict):
for (key, value) in self.items():
result[key] = value
return result
def to_str(self):
return pprint.pformat(self.to_dict())
def __repr__(self):
return self.to_str()
def __eq__(self, other):
if (not isinstance(other, MonitorSubscribe)):
return False
return (self.__dict__ == other.__dict__)
def __ne__(self, other):
return (not (self == other)) |
def process_value(setting_info, value, selected_preset=None):
dpis = []
if isinstance(value, (int, float)):
dpis = [int(value)]
elif isinstance(value, (list, tuple)):
dpis = [int(dpi) for dpi in value]
else:
dpis = [int(dpi) for dpi in value.replace(' ', '').split(',')]
if (selected_preset is None):
selected_preset = setting_info['first_preset']
else:
selected_preset += setting_info['first_preset']
if (len(dpis) == 0):
raise ValueError('you must provide at least one preset')
if (len(dpis) > setting_info['max_preset_count']):
raise ValueError(('you provided %i preset but the device accepts a maximum of %i presets' % (len(dpis), setting_info['max_preset_count'])))
if ('first_preset' not in setting_info):
raise ValueError("Missing 'first_preset' parameter for 'multidpi_range' handler")
if (not (setting_info['first_preset'] <= selected_preset < (len(dpis) + setting_info['first_preset']))):
raise ValueError('the selected preset is out of range')
if ('dpi_length_byte' not in setting_info):
raise ValueError("Missing 'dpi_length_byte' parameter for 'multidpi_range' handler")
if ('count_mode' not in setting_info):
raise ValueError("Missing 'count_mode' parameter for 'multidpi_range' handler")
if (setting_info['count_mode'] not in ('number', 'flag')):
raise ValueError(("Invalid 'count_mode' parameter '%s'" % setting_info['count_mode']))
dpi_length = setting_info['dpi_length_byte']
count_mode = setting_info['count_mode']
output_values = []
for dpi in dpis:
value = process_range(setting_info, dpi)
value = uint_to_little_endian_bytearray(value, dpi_length)
output_values = merge_bytes(output_values, value)
dpi_count = len(dpis)
if (count_mode == 'flag'):
dpi_count = (255 >> (8 - dpi_count))
return merge_bytes(dpi_count, selected_preset, output_values) |
class TestLang(util.TestCase):
def test_lang(self):
markup = '\n <div lang="de-DE">\n <p id="1"></p>\n </div>\n <div lang="de-DE-1996">\n <p id="2"></p>\n </div>\n <div lang="de-Latn-DE">\n <p id="3"></p>\n </div>\n <div lang="de-Latf-DE">\n <p id="4"></p>\n </div>\n <div lang="de-Latn-DE-1996">\n <p id="5"></p>\n </div>\n <p id="6" lang="de-DE"></p>\n '
self.assert_selector(markup, 'p:lang(de)', ['1', '2', '3', '4', '5', '6'], flags=util.HTML)
def test_iframe(self):
markup = '\n <html>\n <body>\n <div lang="de-DE">\n <p id="1"></p>\n <iframe>\n <html>\n <body>\n <p id="2"></p>\n <p id="3" lang="en-US"></p>\n </body>\n </html>\n </iframe>\n </div>\n </body>\n </html>\n '
self.assert_selector(markup, 'p:lang(en)', ['3'], flags=util.PYHTML)
self.assert_selector(markup, 'p:lang(de)', ['1'], flags=util.PYHTML) |
def test_array_type_spark_to_foundry(spark_session):
foundry_schema = spark_schema_to_foundry_schema(StructType([StructField('purple_alias', ArrayType(StringType()), True, {})]))
assert (foundry_schema == {'fieldSchemaList': [{'type': 'ARRAY', 'name': 'purple_alias', 'nullable': True, 'customMetadata': {}, 'arraySubtype': {'type': 'STRING', 'nullable': True, 'customMetadata': {}}}], 'dataFrameReaderClass': 'com.palantir.foundry.spark.input.ParquetDataFrameReader', 'customMetadata': {'format': 'parquet'}}) |
class Unparser(_unparse.Unparser):
boolops = {'And': 'and', 'Or': 'or'}
def _Interactive(self, tree):
for stmt in tree.body:
self.dispatch(stmt)
def _Expression(self, tree):
self.dispatch(tree.body)
def _ClassDef(self, tree):
self.write('\n')
for deco in tree.decorator_list:
self.fill('')
self.dispatch(deco)
self.fill(('class ' + tree.name))
if tree.bases:
self.write('(')
for a in tree.bases:
self.dispatch(a)
self.write(', ')
self.write(')')
self.enter()
self.dispatch(tree.body)
self.leave() |
class FixedColumns(Options):
def activate(self):
self.leftColumns = 2
return self
def heightMatch(self):
return self._config_get()
def heightMatch(self, val):
self._config(val)
def leftColumns(self):
return self._config_get()
def leftColumns(self, val):
self._config(val)
def rightColumns(self):
return self._config_get()
def rightColumns(self, val):
self._config(val) |
class MaxPool3d(Module):
def __init__(self, kernel_size, stride=None, padding=0):
super().__init__()
self.kernel_size = kernel_size
self.stride = (stride if (stride is not None) else kernel_size)
self.padding = padding
def forward(self, *args):
assert (len(args) == 1)
input_val = args[0]
if (isinstance(self.kernel_size, tuple) and isinstance(self.stride, tuple) and isinstance(self.padding, tuple)):
kernel_size_tuple = self.kernel_size
stride_tuple = self.stride
padding_tuple = self.padding
assert (kernel_size_tuple[0] == 1), 'max_pool3d only supports kT == 1 currently'
assert (stride_tuple[0] == 1), 'max_pool3d only supports sT == 1 currently'
assert (padding_tuple[0] == 0), 'max_pool3d only supports T_padding == 0 currently'
kernel_size = identical_elem_tuple_to_int(kernel_size_tuple[1:])
stride = identical_elem_tuple_to_int(stride_tuple[1:])
padding = identical_elem_tuple_to_int(padding_tuple[1:])
elif (isinstance(self.kernel_size, int) and isinstance(self.stride, int) and isinstance(self.padding, int)):
kernel_size = self.kernel_size
stride = self.stride
padding = self.padding
else:
raise RuntimeError('Only int or tuple types are supported')
(N, D, H, W, C) = input_val.shape()
reshape_op_0 = reshape()
shape_0 = ((- 1), H, W, C)
input_val = reshape_op_0(input_val, shape_0)
output = max_pool2d(kernel_size=kernel_size, stride=stride, pad=padding)(input_val)
(_, H_o, W_o, _) = output.shape()
reshape_op_1 = reshape()
shape_1 = (N, D, H_o, W_o, C)
output = reshape_op_1(output, shape_1)
return output |
def save_data(data):
aux = []
for item in data:
number = item.get('number')
title = item.get('title')
guest = item.get('guest')
like = randint(80, 2000)
unlike = (random() * like)
if guest:
user = User.objects.create_user(username=slugify(guest), first_name=guest.split()[0], last_name=' '.join(guest.split()[1:]).strip())
guest_user = user
obj = Live(number=number, title=title, like=like, unlike=unlike, guest=guest_user)
else:
obj = Live(number=number, title=title, like=like, unlike=unlike)
aux.append(obj)
Live.objects.bulk_create(aux) |
class EventVRRPConfigChangeRequest(event.EventRequestBase):
def __init__(self, instance_name, priority=None, advertisement_interval=None, preempt_mode=None, preempt_delay=None, accept_mode=None):
super(EventVRRPConfigChangeRequest, self).__init__()
self.instance_name = instance_name
self.priority = priority
self.advertisement_interval = advertisement_interval
self.preempt_mode = preempt_mode
self.preempt_delay = preempt_delay
self.accept_mode = accept_mode |
class Once():
def __init__(self):
self._lock = asyncio.Lock()
self._done = False
async def do(self, func, *args, **kwargs):
if (not self._done):
async with self._lock:
if (not self._done):
(await func(*args, **kwargs))
self._done = True |
def test_search_path_expansion(workdir):
paths = set_up(workdir)
os.symlink(paths['repo2'], paths['link1'])
with push_dir('projectA'):
status = mu_repo.main(config_file='.bar_file', args=['register', '--recursive'])
assert status.succeeded
_compare_repos(status.config.repos, ['sectionX/repo1', '../projectB/sectionY/repo2']) |
def delV3User(snmpEngine, userName, securityEngineId=None):
(securityEngineId, usmUserEntry, tblIdx1, pysnmpUsmSecretEntry, tblIdx2) = __cookV3UserInfo(snmpEngine, userName, securityEngineId)
snmpEngine.msgAndPduDsp.mibInstrumController.writeMibObjects((((usmUserEntry.name + (13,)) + tblIdx1), 'destroy'), snmpEngine=snmpEngine)
snmpEngine.msgAndPduDsp.mibInstrumController.writeMibObjects((((pysnmpUsmSecretEntry.name + (4,)) + tblIdx2), 'destroy'), snmpEngine=snmpEngine)
((debug.logger & debug.FLAG_SM) and debug.logger(('delV3User: deleted table entries by index userName "%s" securityEngineId "%s"' % (userName, securityEngineId.prettyPrint()))))
def _cbFun(varBinds, **context):
(name, val) = varBinds[0]
if exval.endOfMib.isSameTypeWith(val):
context['user']['varBinds'] = ()
elif (not (exval.noSuchInstance.isSameTypeWith(val) or exval.noSuchObject.isSameTypeWith(val))):
context['user']['varBinds'] = varBinds
elif (varBinds[0][0][:len(initialVarBinds[0][0])] != initialVarBinds[0][0]):
context['user']['varBinds'] = ()
else:
delV3User(snmpEngine, varBinds[1][1], varBinds[0][1])
context['user']['varBinds'] = initialVarBinds
varBinds = initialVarBinds = (((usmUserEntry.name + (1,)), None), ((usmUserEntry.name + (2,)), None), ((usmUserEntry.name + (4,)), None))
user = {'varBinds': varBinds}
while user['varBinds']:
snmpEngine.msgAndPduDsp.mibInstrumController.readNextMibObjects(*user['varBinds'], snmpEngine=snmpEngine, user=user, cbFun=_cbFun) |
(scope='function')
def emerse_system(db: Session) -> System:
system = System.create(db=db, data={'fides_key': f'emerse{uuid.uuid4()}', 'vendor_id': 'gvl.8', 'name': f'Emerse', 'description': 'Emerse Sverige AB is a provider of programmatic advertising solutions, offering advertisers and publishers tools to manage and optimize their digital ad campaigns.', 'organization_fides_key': 'default_organization', 'system_type': 'Service'})
for data_use in ['functional.storage', 'marketing.advertising.profiling', 'marketing.advertising.third_party.targeted', 'marketing.advertising.first_party.targeted']:
PrivacyDeclaration.create(db=db, data={'system_id': system.id, 'data_use': data_use, 'legal_basis_for_processing': 'Consent', 'features': ['Match and combine data from other data sources', 'Link different devices']})
for data_use in ['marketing.advertising.negative_targeting', 'marketing.advertising.first_party.contextual', 'marketing.advertising.frequency_capping', 'analytics.reporting.ad_performance', 'analytics.reporting.content_performance', 'analytics.reporting.campaign_insights', 'essential.fraud_detection', 'essential.service.security', 'marketing.advertising.serving']:
PrivacyDeclaration.create(db=db, data={'system_id': system.id, 'data_use': data_use, 'legal_basis_for_processing': 'Legitimate interests', 'features': ['Match and combine data from other data sources', 'Link different devices']})
db.refresh(system)
return system |
def test_plugin_versions_in_cli_help(monkeypatch, capsys):
monkeypatch.setitem(PARSER_EXTENSIONS, 'table', ExampleTablePlugin)
with pytest.raises(SystemExit) as exc_info:
run(['--help'])
assert (exc_info.value.code == 0)
captured = capsys.readouterr()
assert ('Installed plugins:' in captured.out)
assert ('tests: unknown' in captured.out) |
(scope='function')
def stravawidget(monkeypatch):
monkeypatch.setattr('qtile_extras.resources.stravadata.sync.get_client', fake_client)
monkeypatch.setattr('qtile_extras.resources.stravadata.sync.APP_ID', True)
monkeypatch.setattr('qtile_extras.resources.stravadata.sync.SECRET', True)
monkeypatch.setattr('qtile_extras.resources.stravadata.sync.check_last_update', (lambda _: True))
monkeypatch.setattr('qtile_extras.resources.stravadata.sync.datetime.datetime', MockDatetime)
monkeypatch.setattr('qtile_extras.resources.stravadata.sync.cache_data', (lambda _: None))
(yield StravaWidget) |
class OptionPlotoptionsScatterSonificationDefaultspeechoptionsMappingTime(Options):
def mapFunction(self):
return self._config_get(None)
def mapFunction(self, value: Any):
self._config(value, js_type=False)
def mapTo(self):
return self._config_get(None)
def mapTo(self, text: str):
self._config(text, js_type=False)
def max(self):
return self._config_get(None)
def max(self, num: float):
self._config(num, js_type=False)
def min(self):
return self._config_get(None)
def min(self, num: float):
self._config(num, js_type=False)
def within(self):
return self._config_get(None)
def within(self, value: Any):
self._config(value, js_type=False) |
def apps_urlconf(*, apps=None):
if (apps is None):
apps = getattr(_apps_urlconf_cache, 'cache', None)
if (apps is None):
apps = _APPS_MODEL._default_manager.active().applications()
_apps_urlconf_cache.cache = apps
if (not apps):
return settings.ROOT_URLCONF
return _build_apps_urlconf(apps) |
def test_hexary_trie_missing_node():
db = {}
trie = HexaryTrie(db, prune=True)
key1 = to_bytes(291)
trie.set(key1, b'use a value long enough that it must be hashed according to trie spec')
key2 = to_bytes(4660)
trie.set(key2, b'val2')
trie_root_hash = trie.root_hash
root_node = trie.root_node.raw
first_child_hash = root_node[0]
del db[first_child_hash]
with pytest.raises(MissingTrieNode) as exc_info:
trie.get(key1)
message = str(exc_info.value)
assert (encode_hex(key1) in message)
assert (encode_hex(trie_root_hash) in message)
assert (encode_hex(first_child_hash) in message)
key1_shared_prefix = to_bytes(564)
with pytest.raises(MissingTrieNode) as set_exc_info:
trie.set(key1_shared_prefix, b'val2')
set_exc_message = str(set_exc_info.value)
assert (encode_hex(key1_shared_prefix) in set_exc_message)
assert (encode_hex(trie_root_hash) in set_exc_message)
assert (encode_hex(first_child_hash) in set_exc_message)
with pytest.raises(MissingTrieNode) as delete_exc_info:
trie.delete(key1)
delete_exc_message = str(delete_exc_info.value)
assert (encode_hex(key1) in delete_exc_message)
assert (encode_hex(trie_root_hash) in delete_exc_message)
assert (encode_hex(first_child_hash) in delete_exc_message)
key1_shared_prefix2 = to_bytes(837)
with pytest.raises(MissingTrieNode) as existance_exc_info:
assert (key1_shared_prefix2 in trie)
existance_exc_message = str(existance_exc_info.value)
assert (encode_hex(key1_shared_prefix2) in existance_exc_message)
assert (encode_hex(trie_root_hash) in existance_exc_message)
assert (encode_hex(first_child_hash) in existance_exc_message)
assert (trie.get(key2) == b'val2') |
class TreeNodeModelAdmin(admin.ModelAdmin):
TREENODE_DISPLAY_MODE_ACCORDION = 'accordion'
TREENODE_DISPLAY_MODE_BREADCRUMBS = 'breadcrumbs'
TREENODE_DISPLAY_MODE_INDENTATION = 'indentation'
treenode_display_mode = TREENODE_DISPLAY_MODE_INDENTATION
form = TreeNodeForm
list_per_page = 1000
ordering = ('tn_order',)
def get_list_display(self, request):
base_list_display = super().get_list_display(request)
base_list_display = list(base_list_display)
def treenode_field_display(obj):
return self._get_treenode_field_display(request, obj)
treenode_field_display.short_description = self.model._meta.verbose_name
if ((len(base_list_display) == 1) and (base_list_display[0] == '__str__')):
return (treenode_field_display,)
else:
treenode_display_field = self.model.treenode_display_field
if ((len(base_list_display) >= 1) and (base_list_display[0] == treenode_display_field)):
base_list_display.pop(0)
return ((treenode_field_display,) + tuple(base_list_display))
return base_list_display
def get_queryset(self, request):
qs = super().get_queryset(request)
qs = qs.select_related('tn_parent')
return qs
def _use_treenode_display_mode(self, request, obj):
querystring = (request.GET.urlencode() or '')
return (len(querystring) <= 2)
def _get_treenode_display_mode(self, request, obj):
return self.treenode_display_mode
def _get_treenode_field_default_display(self, obj):
return self._get_treenode_field_display_with_breadcrumbs(obj)
def _get_treenode_field_display(self, request, obj):
if (not self._use_treenode_display_mode(request, obj)):
return self._get_treenode_field_default_display(obj)
display_mode = self._get_treenode_display_mode(request, obj)
if (display_mode == TreeNodeModelAdmin.TREENODE_DISPLAY_MODE_ACCORDION):
return self._get_treenode_field_display_with_accordion(obj)
elif (display_mode == TreeNodeModelAdmin.TREENODE_DISPLAY_MODE_BREADCRUMBS):
return self._get_treenode_field_display_with_breadcrumbs(obj)
elif (display_mode == TreeNodeModelAdmin.TREENODE_DISPLAY_MODE_INDENTATION):
return self._get_treenode_field_display_with_indentation(obj)
else:
return self._get_treenode_field_default_display(obj)
def _get_treenode_field_display_with_accordion(self, obj):
tn_namespace = f'{obj.__module__}.{obj.__class__.__name__}'
tn_namespace_key = tn_namespace.lower().replace('.', '_')
obj_parent_id = (obj.tn_parent_id if obj.tn_parent_id else '')
obj_display = obj.get_display(indent=False)
return mark_safe(f'<span class="treenode" data-treenode-type="{tn_namespace_key}" data-treenode-pk="{obj.pk}" data-treenode-accordion="1" data-treenode-depth="{obj.tn_depth}" data-treenode-level="{obj.tn_level}" data-treenode-parent="{obj_parent_id}">{obj_display}</span>')
def _get_treenode_field_display_with_breadcrumbs(self, obj):
obj_display = ''
for obj_ancestor in obj.get_ancestors():
obj_ancestor_display = obj_ancestor.get_display(indent=False)
obj_display += f'<span class="treenode-breadcrumbs">{obj_ancestor_display}</span>'
obj_display += obj.get_display(indent=False)
return mark_safe(f'<span class="treenode">{obj_display}</span>')
def _get_treenode_field_display_with_indentation(self, obj):
obj_display = ('<span class="treenode-indentation">—</span>' * obj.ancestors_count)
obj_display += obj.get_display(indent=False)
return mark_safe(f'<span class="treenode">{obj_display}</span>')
class Media():
css = {'all': ('treenode/css/treenode.css',)}
js = ['treenode/js/treenode.js'] |
class RetentionFilterTestCase(TestCase):
REF_TIME = datetime.datetime(2019, 5, 9, 0, 0, 0, 0, tzinfo=None)
versions: Set[Version]
def _make_version(uid: str, date: datetime.datetime) -> Version:
version = MagicMock(spec=('uid', 'date', '__repr__'))
version.uid = VersionUid(uid)
version.date = date
version.__repr__ = Mock()
version.__repr__.return_value = '{} - {}'.format(version.uid, version.date.isoformat(timespec='seconds'))
return version
def setUpClass(cls) -> None:
timestamps = ((cls.REF_TIME - dateutil.relativedelta.relativedelta(minutes=(n * 15))) for n in range(0, (8640 + 1)))
cls.versions = set([cls._make_version(f'v{c}', t) for (c, t) in zip(count(start=1), timestamps)])
([('latest3', 3, (15 * 60)), ('latest10', 10, (15 * 60)), ('hours12', 13, (60 * 60)), ('hours25', 26, (60 * 60)), ('days15', 16, ((24 * 60) * 60)), ('days31', 32, ((24 * 60) * 60)), ('weeks4', 5, (((7 * 24) * 60) * 60)), ('months1', 2, (((31 * 24) * 60) * 60)), ('months2', 3, (((31 * 24) * 60) * 60)), ('years1', 1, ((((366 * 31) * 24) * 60) * 60))])
def test_single(self, spec: str, expected_length: int, base_unit: float) -> None:
filter = RetentionFilter(rules_spec=spec, reference_time=self.REF_TIME, tz=datetime.timezone.utc)
dismissed_versions = set(filter.filter(self.versions))
remaining_versions = (self.versions - dismissed_versions)
self.assertEqual(expected_length, len(remaining_versions))
for version in remaining_versions:
cutoff = (base_unit * expected_length)
self.assertLessEqual((self.REF_TIME - version.date).total_seconds(), cutoff)
sorted_versions = sorted(remaining_versions, key=(lambda version: version.date))
for (older, younger) in pairwise(sorted_versions):
self.assertLessEqual((younger.date - older.date).total_seconds(), base_unit)
dismissed_versions_2 = filter.filter(remaining_versions)
self.assertEqual(0, len(dismissed_versions_2))
([('latest3,months2', 6, ((((3 * 30) * 24) * 60) * 60)), ('latest3,hours24,days3', 29, (((4 * 24) * 60) * 60)), ('latest3,hours48,days3', 52, (((4 * 24) * 60) * 60)), ('latest3,hours48,days3,months2', 55, ((((3 * 30) * 24) * 60) * 60)), ('latest3,hours48,days30,months2', 81, ((((3 * 30) * 24) * 60) * 60))])
def test_multiple(self, spec: str, expected_length: int, cutoff: float) -> None:
filter = RetentionFilter(rules_spec=spec, reference_time=self.REF_TIME, tz=datetime.timezone.utc)
dismissed_versions = set(filter.filter(self.versions))
remaining_versions = (self.versions - set(dismissed_versions))
self.assertEqual(expected_length, len(remaining_versions))
for version in remaining_versions:
self.assertLessEqual((self.REF_TIME - version.date).total_seconds(), cutoff)
dismissed_versions_2 = filter.filter(remaining_versions)
self.assertEqual(0, len(dismissed_versions_2))
def test_moving_single(self) -> None:
current_time = self.REF_TIME
filter = RetentionFilter(rules_spec='hours30', reference_time=current_time, tz=datetime.timezone.utc)
dismissed_versions = set(filter.filter(self.versions))
remaining_versions = (self.versions - set(dismissed_versions))
self.assertEqual(31, len(remaining_versions))
for hour in range(1, 31):
current_time = (self.REF_TIME + dateutil.relativedelta.relativedelta(hours=hour))
filter = RetentionFilter(rules_spec='hours30', reference_time=current_time, tz=datetime.timezone.utc)
dismissed_versions = set(filter.filter(remaining_versions))
self.assertEqual(len(dismissed_versions), 1)
self.assertGreaterEqual((current_time - list(dismissed_versions)[0].date).total_seconds(), ((31 * 60) * 60))
remaining_versions = (remaining_versions - dismissed_versions)
self.assertEqual(1, len(remaining_versions))
([('hours30', ['hours'], [range(0, 31)], [1]), ('latest10', ['latest'], [range(0, 1)], [10]), ('latest8,hours48,days7,weeks4,months2,years1', ['latest', 'hours', 'days', 'weeks', 'months', 'years'], [range(0, 1), range(2, 49), range(2, 8), range(1, 5), range(1, 3), range(0, 1)], [8, 1, 1, 1, 1, 1])])
def test_classification(self, spec: str, expected_categories: Sequence[str], expected_timecounts: Sequence[Sequence[int]], expected_in_each: Sequence[int]) -> None:
remaining_versions = set(self.versions)
previous_versions_by_category_remaining = None
for hour in range(0, 4464):
current_time = (self.REF_TIME + dateutil.relativedelta.relativedelta(hours=hour))
filter = RetentionFilter(rules_spec=spec, reference_time=current_time, tz=datetime.timezone.utc)
dismissed_versions: Union[(Sequence, Set)]
(dismissed_versions, versions_by_category_remaining) = filter._filter(remaining_versions)
dismissed_versions = set(dismissed_versions)
remaining_versions = (remaining_versions - dismissed_versions)
try:
self.assertSetEqual(set(versions_by_category_remaining.keys()), set(expected_categories))
for (i, category) in enumerate(expected_categories):
actual_timecounts_set = set(versions_by_category_remaining[category].keys())
expected_timecounts_set = set(expected_timecounts[i])
if (len(actual_timecounts_set) != len(expected_timecounts_set)):
expected_timecounts_set = set(list(expected_timecounts[i])[1:])
self.assertSetEqual(actual_timecounts_set, expected_timecounts_set, category)
for timecount in versions_by_category_remaining[category].keys():
self.assertEqual(len(versions_by_category_remaining[category][timecount]), expected_in_each[i])
self.assertTrue((versions_by_category_remaining[category][timecount][0] in remaining_versions))
self.assertTrue((versions_by_category_remaining[category][timecount][0] not in dismissed_versions))
except AssertionError:
print('Time at failed assertion: {}.'.format(current_time.isoformat(timespec='seconds')))
if previous_versions_by_category_remaining:
print(previous_versions_by_category_remaining)
print(versions_by_category_remaining)
raise
for minute in (15, 30, 45, 60):
remaining_versions.add(self._make_version('v{}'.format(((1000000 + (hour * 60)) + minute)), (current_time + dateutil.relativedelta.relativedelta(minutes=minute))))
previous_versions_by_category_remaining = versions_by_category_remaining |
def extractMidnightmoontranslationWordpressCom(item):
(vol, chp, frag, postfix) = extractVolChapterFragmentPostfix(item['title'])
if ((not (chp or vol)) or ('preview' in item['title'].lower())):
return None
tagmap = [("the silly wizard's courtship method", "the silly wizard's courtship method", 'translated'), ('PRC', 'PRC', 'translated'), ('Loiterous', 'Loiterous', 'oel')]
for (tagname, name, tl_type) in tagmap:
if (tagname in item['tags']):
return buildReleaseMessageWithType(item, name, vol, chp, frag=frag, postfix=postfix, tl_type=tl_type)
return False |
.django_db(transaction=True)
.skip(reason='Test based on pre-databricks loader code. Remove when fully cut over.')
def test_delete_fabs_success(monkeypatch):
baker.make(AwardSearch, award_id=1, generated_unique_award_id='TEST_AWARD_1')
baker.make(TransactionSearch, transaction_id=1, award_id=1, generated_unique_award_id='TEST_AWARD_1', published_fabs_id=301)
baker.make(AwardSearch, award_id=2, generated_unique_award_id='TEST_AWARD_2')
baker.make(TransactionSearch, transaction_id=2, award_id=2, generated_unique_award_id='2019-01-01', published_fabs_id=302)
baker.make(TransactionSearch, transaction_id=3, award_id=2, generated_unique_award_id='2019-01-02', published_fabs_id=303)
baker.make(AwardSearch, award_id=3, generated_unique_award_id='TEST_AWARD_3')
baker.make(TransactionSearch, transaction_id=4, award_id=3, generated_unique_award_id='TEST_AWARD_3', published_fabs_id=304)
baker.make(AwardSearch, award_id=4, generated_unique_award_id='TEST_AWARD_4')
baker.make(TransactionSearch, transaction_id=5, award_id=4, generated_unique_award_id='TEST_AWARD_4', published_fabs_id=305)
baker.make(SourceAssistanceTransaction, published_fabs_id=306, afa_generated_unique='TEST_TRANSACTION_6', unique_award_key='TEST_AWARD_4', created_at='2022-02-18 18:27:50', updated_at='2022-02-18 18:27:50', action_date='2022-02-18 18:27:50', modified_at=datetime.datetime.utcnow(), is_active=True)
baker.make(ExternalDataType, external_data_type_id=2, name='fabs')
baker.make(ExternalDataLoadDate, external_data_type_id=2, last_load_date='2022-02-01 18:27:50')
update_awards()
monkeypatch.setattr('usaspending_api.broker.management.commands.fabs_nightly_loader.retrieve_deleted_fabs_transactions', (lambda start_datetime, end_datetime=None: {'2022-02-18': ['TEST_TRANSACTION_1', 'TEST_TRANSACTION_2', 'TEST_TRANSACTION_5']}))
monkeypatch.setattr('usaspending_api.broker.management.commands.fabs_nightly_loader.get_delete_pks_for_afa_keys', (lambda afa_ids_to_delete: [301, 302, 305]))
call_command('fabs_nightly_loader')
awards_left = Award.objects.all()
award_ids_left = set([award.id for award in awards_left])
expected_awards_ids_left = [2, 3, 4]
assert (sorted(award_ids_left) == expected_awards_ids_left)
latest_transaction_ids = set([award.latest_transaction_id for award in awards_left])
new_award_transaction_id = TransactionNormalized.objects.filter(award_id=4).values_list('id', flat=True).first()
expected_latest_transaction_ids = sorted([3, 4, new_award_transaction_id])
assert (sorted(latest_transaction_ids) == expected_latest_transaction_ids)
transactions_left = TransactionNormalized.objects.all()
transaction_norm_ids_left = set([transaction.id for transaction in transactions_left])
expected_transaction_norm_ids_left = sorted([3, 4, new_award_transaction_id])
assert (sorted(transaction_norm_ids_left) == expected_transaction_norm_ids_left)
transaction_fabs_left = TransactionFABS.objects.all()
transaction_fabs_ids_left = set([transaction_fabs.published_fabs_id for transaction_fabs in transaction_fabs_left])
expected_transaction_fabs_left = [303, 304, 306]
assert (sorted(transaction_fabs_ids_left) == expected_transaction_fabs_left) |
def sha3(computation: ComputationAPI) -> None:
(start_position, size) = computation.stack_pop_ints(2)
computation.extend_memory(start_position, size)
sha3_bytes = computation.memory_read_bytes(start_position, size)
word_count = (ceil32(len(sha3_bytes)) // 32)
gas_cost = (constants.GAS_SHA3WORD * word_count)
computation.consume_gas(gas_cost, reason='SHA3: word gas cost')
result = keccak(sha3_bytes)
computation.stack_push_bytes(result) |
class Diagnostics(BaseDiagnostics):
def __init__(self, samples: MonteCarloSamples):
super().__init__(samples)
self.mean = self.summaryfn(common_stats.mean, display_names=['avg'])
self.std = self.summaryfn(common_stats.std, display_names=['std'])
self.confidence_interval = self.summaryfn(common_stats.confidence_interval, display_names=['2.5%', '50%', '97.5%'])
self.split_r_hat = self.summaryfn(common_stats.split_r_hat, display_names=['r_hat'])
self.effective_sample_size = self.summaryfn(common_stats.effective_sample_size, display_names=['n_eff'])
self.trace = self.plotfn(common_plots.trace_plot, display_name='trace')
self.autocorr = self.plotfn(common_plots.autocorr, display_name='autocorr') |
()
('search_root')
('--project', help='only render project matches the given name')
_log.simple_verbosity_option(logger)
def render(search_root, project):
template_paths = []
matrix_dirs = []
for (cur_dir, dirs, files) in os.walk(search_root):
for f in files:
if (f == 'matrix.yml'):
logger.info('Found matrix in %s', cur_dir)
matrix_dirs.append(cur_dir)
elif f.endswith('.jinja'):
template_paths.append(os.path.join(cur_dir, f))
jinja2_env = jinja2.Environment(loader=FilesLoader(template_paths))
for maxtrix_dir in matrix_dirs:
if (project and (os.path.basename(maxtrix_dir) != project)):
continue
render_matrix(jinja2_env, maxtrix_dir) |
class OptionSeriesSunburstSonificationContexttracksMappingFrequency(Options):
def mapFunction(self):
return self._config_get(None)
def mapFunction(self, value: Any):
self._config(value, js_type=False)
def mapTo(self):
return self._config_get(None)
def mapTo(self, text: str):
self._config(text, js_type=False)
def max(self):
return self._config_get(None)
def max(self, num: float):
self._config(num, js_type=False)
def min(self):
return self._config_get(None)
def min(self, num: float):
self._config(num, js_type=False)
def within(self):
return self._config_get(None)
def within(self, value: Any):
self._config(value, js_type=False) |
class ModelRunSchema(ExtendedBaseModel):
unique_id: str
invocation_id: str
name: str
schema_name: Optional[str] = Field(alias='schema', default=None)
status: str
execution_time: float
compiled_code: Optional[str] = None
full_refresh: Optional[bool] = None
materialization: Optional[str] = None
generated_at: str
('generated_at', pre=True)
def format_generated_at(cls, generated_at):
return convert_partial_iso_format_to_full_iso_format(generated_at) |
def search_nswlib_jaccard_topk(index_data, query_data, index_params, k):
import nmslib
(index_sets, index_keys, _, index_cache) = index_data
(query_sets, query_keys, _) = query_data
cache_key = json.dumps(index_params)
if (cache_key not in index_cache):
print('Building HNSW Index.')
start = time.perf_counter()
index = nmslib.init(method='hnsw', space='jaccard_sparse', data_type=nmslib.DataType.OBJECT_AS_STRING)
index.addDataPointBatch([' '.join((str(v) for v in s)) for s in index_sets], range(len(index_keys)))
index.createIndex(index_params)
indexing_time = (time.perf_counter() - start)
print('Indexing time: {:.3f}.'.format(indexing_time))
index_cache[cache_key] = (index, {'indexing_time': indexing_time})
(index, indexing) = index_cache[cache_key]
print('Querying.')
times = []
results = []
index.setQueryTimeParams({'efSearch': index_params['efConstruction']})
for (query_set, query_key) in tqdm.tqdm(zip(query_sets, query_keys), total=len(query_keys), desc='Querying', unit=' query'):
start = time.perf_counter()
(result, _) = index.knnQuery(' '.join((str(v) for v in query_set)), k)
result = [[index_keys[i], compute_jaccard(query_set, index_sets[i])] for i in result]
result.sort(key=(lambda x: x[1]), reverse=True)
duration = (time.perf_counter() - start)
times.append(duration)
results.append((query_key, result))
return (indexing, results, times) |
class OptionSeriesBubbleSonificationDefaultinstrumentoptions(Options):
def activeWhen(self) -> 'OptionSeriesBubbleSonificationDefaultinstrumentoptionsActivewhen':
return self._config_sub_data('activeWhen', OptionSeriesBubbleSonificationDefaultinstrumentoptionsActivewhen)
def instrument(self):
return self._config_get('piano')
def instrument(self, text: str):
self._config(text, js_type=False)
def mapping(self) -> 'OptionSeriesBubbleSonificationDefaultinstrumentoptionsMapping':
return self._config_sub_data('mapping', OptionSeriesBubbleSonificationDefaultinstrumentoptionsMapping)
def midiName(self):
return self._config_get(None)
def midiName(self, text: str):
self._config(text, js_type=False)
def pointGrouping(self) -> 'OptionSeriesBubbleSonificationDefaultinstrumentoptionsPointgrouping':
return self._config_sub_data('pointGrouping', OptionSeriesBubbleSonificationDefaultinstrumentoptionsPointgrouping)
def roundToMusicalNotes(self):
return self._config_get(True)
def roundToMusicalNotes(self, flag: bool):
self._config(flag, js_type=False)
def showPlayMarker(self):
return self._config_get(True)
def showPlayMarker(self, flag: bool):
self._config(flag, js_type=False)
def type(self):
return self._config_get('instrument')
def type(self, text: str):
self._config(text, js_type=False) |
def test_details_view():
(app, db, admin) = setup()
(Model1, Model2) = create_models(db)
view_no_details = CustomModelView(Model1)
admin.add_view(view_no_details)
view_w_details = CustomModelView(Model2, can_view_details=True)
admin.add_view(view_w_details)
string_field_view = CustomModelView(Model2, can_view_details=True, column_details_list=['string_field'], endpoint='sf_view')
admin.add_view(string_field_view)
fill_db(Model1, Model2)
client = app.test_client()
m1_id = Model1.objects.first().id
m2_id = Model2.objects.first().id
rv = client.get('/admin/model1/')
data = rv.data.decode('utf-8')
assert ('/admin/model1/details/' not in data)
rv = client.get('/admin/model2/')
data = rv.data.decode('utf-8')
assert ('/admin/model2/details/' in data)
url = ('/admin/model1/details/?url=%2Fadmin%2Fmodel1%2F&id=' + str(m1_id))
rv = client.get(url)
assert (rv.status_code == 302)
url = ('/admin/model2/details/?url=%2Fadmin%2Fmodel2%2F&id=' + str(m2_id))
rv = client.get(url)
data = rv.data.decode('utf-8')
assert ('String Field' in data)
assert ('string_field_val_1' in data)
assert ('Int Field' in data)
url = ('/admin/sf_view/details/?url=%2Fadmin%2Fsf_view%2F&id=' + str(m2_id))
rv = client.get(url)
data = rv.data.decode('utf-8')
assert ('String Field' in data)
assert ('string_field_val_1' in data)
assert ('Int Field' not in data) |
def _dropout_array(model: Model[(InT, InT)], X: ArrayXd, is_train: bool) -> Tuple[(ArrayXd, Callable)]:
rate = model.attrs['dropout_rate']
mask = model.ops.get_dropout_mask(X.shape, rate)
def backprop(dY: ArrayXd) -> ArrayXd:
return (dY * mask)
return (cast(ArrayXd, (X * mask)), backprop) |
def test_chain_adapter_insertion(chain: Chain):
parent = chain
adaptee = parent.Chain
adapter = DummyChainAdapter(adaptee)
assert (adaptee.parent == parent)
adapter.inject()
assert (adapter.parent == parent)
assert (adaptee.parent == adapter)
assert (adapter in iter(parent))
assert (adaptee not in iter(parent))
adapter.eject()
assert (adapter.parent is None)
assert (adaptee.parent == parent)
assert (adapter not in iter(parent))
assert (adaptee in iter(parent)) |
class MPCGameService():
def __init__(self, mpc_game_repository: MPCGameRepository) -> None:
self.logger: logging.Logger = logging.getLogger(__name__)
self.mpc_game_repository: MPCGameRepository = mpc_game_repository
def build_onedocker_args(self, game_name: str, mpc_party: MPCParty, server_ip: Optional[str]=None, port: Optional[int]=None, **kwargs: object) -> Tuple[(str, str)]:
mpc_game_config = self.mpc_game_repository.get_game(game_name)
return (mpc_game_config.onedocker_package_name, self._build_cmd(mpc_game_config=mpc_game_config, mpc_party=mpc_party, server_ip=server_ip, port=port, **kwargs))
def _build_cmd(self, mpc_game_config: MPCGameConfig, mpc_party: MPCParty, server_ip: Optional[str]=None, port: Optional[int]=None, **kwargs: object) -> str:
args = self._prepare_args(mpc_game_config=mpc_game_config, mpc_party=mpc_party, server_ip=server_ip, port=port, **kwargs)
return build_cmd_args(**args)
def _prepare_args(self, mpc_game_config: MPCGameConfig, mpc_party: MPCParty, server_ip: Optional[str]=None, port: Optional[int]=None, **kwargs: object) -> Dict[(str, Any)]:
all_arguments: Dict[(str, Any)] = {}
all_arguments['party'] = (1 if (mpc_party == MPCParty.SERVER) else 2)
if (mpc_party == MPCParty.CLIENT):
if (server_ip is None):
raise ValueError('Client must provide a server ip address.')
all_arguments['server_ip'] = server_ip
if (port is not None):
all_arguments['port'] = port
argument_names = {argument.name for argument in mpc_game_config.arguments}
extra_kwargs = [argument for argument in kwargs if (argument not in argument_names)]
if extra_kwargs:
raise ValueError(f'Unexpected kwargs: {extra_kwargs}. Expected only {argument_names}')
for argument in mpc_game_config.arguments:
key = argument.name
value = kwargs.get(key)
if ((value is None) and argument.required):
if (key == 'game_name'):
all_arguments[key] = mpc_game_config.game_name
else:
raise ValueError(f'Missing required argument {key}!')
if (value is not None):
all_arguments[key] = value
return all_arguments |
class FaucetDisconnectTest(FaucetUntaggedTest):
def update_config(self, dpid):
conf = self._get_faucet_conf()
conf['dps'][self.DP_NAME]['dp_id'] = int(dpid)
self.reload_conf(conf, self.faucet_config_path, restart=True, cold_start=False, change_expected=False)
def test_untagged(self):
mask = int((16 * 'f'), 16)
bad_dpid = ((int(self.dpid) + ) & mask)
self.update_config(dpid=bad_dpid)
self.wait_until_matching_lines_from_faucet_log_files('.*ERROR.*unknown datapath', timeout=60, count=4)
self.update_config(dpid=self.dpid)
super().test_untagged() |
def add_profiler(file_pairs, workdir, op_type, output_name, code):
prefix = os.path.join(workdir, 'profiler', op_type)
if (not os.path.exists(prefix)):
os.makedirs(prefix)
obj_path = os.path.join(prefix, output_name)
if os.path.exists(obj_path):
return
if isinstance(code, dict):
src_paths = []
for (src_name, src_code) in code.items():
src_path = os.path.join(prefix, (src_name + '.cu'))
with open(src_path, 'w') as f:
f.write(src_code)
src_paths.append(src_path)
file_pairs.append((src_paths, obj_path))
else:
src_path = os.path.join(prefix, (output_name + '.cu'))
with open(src_path, 'w') as f:
f.write(code)
file_pairs.append((src_path, obj_path)) |
class OptionSeriesArcdiagramSonificationTracksMappingPlaydelay(Options):
def mapFunction(self):
return self._config_get(None)
def mapFunction(self, value: Any):
self._config(value, js_type=False)
def mapTo(self):
return self._config_get(None)
def mapTo(self, text: str):
self._config(text, js_type=False)
def max(self):
return self._config_get(None)
def max(self, num: float):
self._config(num, js_type=False)
def min(self):
return self._config_get(None)
def min(self, num: float):
self._config(num, js_type=False)
def within(self):
return self._config_get(None)
def within(self, value: Any):
self._config(value, js_type=False) |
def cli_get_output_printer(output_format, fields, list_command=False):
if (output_format == 'json'):
if list_command:
return JsonPrinterListCommand(fields)
return JsonPrinter(fields)
if (output_format == 'text'):
return ColumnTextPrinter(fields)
return RowTextPrinter(fields) |
def crop(self, context, distort=False, selection=None):
selection_mode = bpy.context.scene.tool_settings.uv_select_mode
selected_obs = [ob for ob in bpy.context.selected_objects if (ob.type == 'MESH')]
bpy.ops.uv.select_split()
if (len(selected_obs) <= 1):
bm = bmesh.from_edit_mesh(bpy.context.active_object.data)
uv_layers = bm.loops.layers.uv.verify()
if (selection is None):
selection = utilities_uv.get_selected_uv_faces(bm, uv_layers)
if (not selection):
return {'CANCELLED'}
boundsAll = utilities_uv.get_BBOX(selection, bm, uv_layers)
elif (len(selected_obs) > 1):
unique_selected_obs = [ob for ob in bpy.context.objects_in_mode_unique_data if (ob.type == 'MESH')]
bpy.ops.object.mode_set(mode='OBJECT', toggle=False)
bpy.ops.object.select_all(action='DESELECT')
bpy.context.view_layer.objects.active = unique_selected_obs[0]
for o in unique_selected_obs:
o.select_set(True)
bpy.ops.object.mode_set(mode='EDIT', toggle=False)
all_ob_bounds = utilities_uv.multi_object_loop(utilities_uv.getSelectionBBox, need_results=True)
if (not any(all_ob_bounds)):
return {'CANCELLED'}
boundsAll = utilities_uv.get_BBOX_multi(all_ob_bounds)
prepivot = bpy.context.space_data.pivot_point
precursor = bpy.context.space_data.cursor_location.copy()
bpy.context.space_data.pivot_point = 'CURSOR'
bpy.context.space_data.cursor_location = (0.0, 0.0)
padding = utilities_ui.get_padding()
scale_u = ((1.0 - padding) / boundsAll['width'])
scale_v = ((1.0 - padding) / boundsAll['height'])
if (not distort):
scale_u = scale_v = min(scale_u, scale_v)
bpy.ops.transform.resize(value=(scale_u, scale_v, 1), constraint_axis=(False, False, False), mirror=False, use_proportional_edit=False)
delta_position = Vector((((padding / 2) - (scale_u * boundsAll['min'].x)), ((padding / 2) - (scale_v * boundsAll['min'].y)), 0))
(udim_tile, column, row) = utilities_uv.get_UDIM_tile_coords(bpy.context.active_object)
if (udim_tile != 1001):
delta_position += Vector((column, row, 0))
bpy.ops.transform.translate(value=delta_position, mirror=False, use_proportional_edit=False)
if (len(selected_obs) > 1):
bpy.ops.object.mode_set(mode='OBJECT', toggle=False)
for o in selected_obs:
o.select_set(True)
bpy.ops.object.mode_set(mode='EDIT', toggle=False)
bpy.context.space_data.pivot_point = prepivot
bpy.context.space_data.cursor_location = precursor
bpy.ops.uv.select_mode(type='VERTEX')
bpy.context.scene.tool_settings.uv_select_mode = selection_mode |
def get_datasets_and_modules(tree: Union[(eql.ast.BaseNode, kql.ast.BaseNode)]) -> tuple:
modules = set()
datasets = set()
for node in tree:
if (isinstance(node, eql.ast.Comparison) and (node.comparator == node.EQ) and isinstance(node.right, eql.ast.String)):
if (node.left == eql.ast.Field('event', ['module'])):
modules.add(node.right.render())
elif (node.left == eql.ast.Field('event', ['dataset'])):
datasets.add(node.right.render())
elif isinstance(node, eql.ast.InSet):
if (node.expression == eql.ast.Field('event', ['module'])):
modules.add(node.get_literals())
elif (node.expression == eql.ast.Field('event', ['dataset'])):
datasets.add(node.get_literals())
elif (isinstance(node, kql.ast.FieldComparison) and (node.field == kql.ast.Field('event.module'))):
modules.update((child.value for child in node.value if isinstance(child, kql.ast.String)))
elif (isinstance(node, kql.ast.FieldComparison) and (node.field == kql.ast.Field('event.dataset'))):
datasets.update((child.value for child in node.value if isinstance(child, kql.ast.String)))
return (datasets, modules) |
class CdsRetriever(FileSource):
sphinxdoc = '\n CdsRetriever\n '
def __init__(self, dataset, *args, **kwargs):
super().__init__()
assert isinstance(dataset, str)
if len(args):
assert (len(args) == 1)
assert isinstance(args[0], dict)
assert (not kwargs)
kwargs = args[0]
requests = self.requests(**kwargs)
client()
nthreads = min(self.settings('number-of-download-threads'), len(requests))
if (nthreads < 2):
self.path = [self._retrieve(dataset, r) for r in requests]
else:
with SoftThreadPool(nthreads=nthreads) as pool:
futures = [pool.submit(self._retrieve, dataset, r) for r in requests]
iterator = (f.result() for f in futures)
self.path = list(tqdm(iterator, leave=True, total=len(requests)))
def _retrieve(self, dataset, request):
def retrieve(target, args):
client().retrieve(args[0], args[1], target)
return self.cache_file(retrieve, (dataset, request), extension=EXTENSIONS.get(request.get('format'), '.cache'))
('date', 'date-list(%Y-%m-%d)')
('area', 'bounding-box(list)')
def requests(self, **kwargs):
if ('year' in kwargs):
if ('month' not in kwargs):
kwargs['month'] = [f'{(i + 1):02}' for i in range(0, 12)]
if ('day' not in kwargs):
kwargs['day'] = [f'{(i + 1):02}' for i in range(0, 31)]
split_on = kwargs.pop('split_on', None)
if ((split_on is None) or (not isinstance(kwargs.get(split_on), (list, tuple)))):
return [kwargs]
result = []
for v in kwargs[split_on]:
r = dict(**kwargs)
r[split_on] = v
result.append(r)
return result
def to_pandas(self, **kwargs):
pandas_read_csv_kwargs = dict(comment='#', parse_dates=['report_timestamp'], skip_blank_lines=True, compression='zip')
pandas_read_csv_kwargs.update(kwargs.get('pandas_read_csv_kwargs', {}))
odc_read_odb_kwargs = dict()
odc_read_odb_kwargs.update(kwargs.get('odc_read_odb_kwargs', {}))
return super().to_pandas(pandas_read_csv_kwargs=pandas_read_csv_kwargs, odc_read_odb_kwargs=odc_read_odb_kwargs, **kwargs) |
def test_traverse_args():
provider1 = providers.Object('bar')
provider2 = providers.Object('baz')
provider = providers.List('foo', provider1, provider2)
all_providers = list(provider.traverse())
assert (len(all_providers) == 2)
assert (provider1 in all_providers)
assert (provider2 in all_providers) |
class guessCardManager():
def __init__(self) -> None:
try:
self.path = str(Path(nonebot.get_driver().config.ocg_bot_guess_cfg_path, 'ocg_bot_guess_cfg.json'))
except:
self.path = 'data/ocg_bot/ocg_bot_guess_cfg.json'
self.guess_cd = 20
self.guess_cd = (self.guess_cd if (self.guess_cd > 0) else 0)
self.ReadCfg()
def ReadCfg(self) -> dict:
try:
with open(self.path, 'r', encoding='utf-8') as f:
self.cfg = json.loads(f.read())
return self.cfg
except Exception as e:
logger.warning(f'''setu_perm_cfg.json ,
{e}''')
self.cfg = {}
self.WriteCfg()
return {}
def WriteCfg(self):
os.makedirs(self.path[:(- 18)], mode=511, exist_ok=True)
with open(self.path, 'w', encoding='utf-8') as f:
f.write(json.dumps(self.cfg))
def ReadLastSend(self, sessionId):
try:
return self.cfg['last'][sessionId]
except KeyError:
return 0
def ReadCd(self, group_sessionId):
try:
return self.cfg[group_sessionId]['cd']
except KeyError:
return self.guess_cd
def ReadBanList(self, sessionId):
try:
return (sessionId in self.cfg['ban'])
except KeyError:
return False
def CheckPermission(self, sessionId: str, groupSession: str, userType: str='group'):
if self.ReadBanList(groupSession):
raise PermissionError(f'!')
if (groupSession is None):
timeLeft = ((self.ReadCd(sessionId) + self.ReadLastSend(sessionId)) - time.time())
else:
timeLeft = ((self.ReadCd(groupSession) + self.ReadLastSend(sessionId)) - time.time())
if (timeLeft > 0):
(hours, minutes, seconds) = (0, 0, 0)
if (timeLeft >= 60):
(minutes, seconds) = divmod(timeLeft, 60)
(hours, minutes) = divmod(minutes, 60)
else:
seconds = timeLeft
cd_msg = f"{((str(round(hours)) + '') if hours else '')}{((str(round(minutes)) + '') if minutes else '')}{((str(round(seconds, 3)) + '') if seconds else '')}"
raise PermissionError(f'{random.choice(guess_sendwitchoutcd)} CD{cd_msg}!')
def UpdateLastSend(self, sessionId):
try:
self.cfg['last'][sessionId] = time.time()
except KeyError:
self.cfg['last'] = {sessionId: time.time()}
def UpdateCd(self, sessionId: str, cdTime: int):
cdTime = (cdTime if (cdTime > 0) else 0)
try:
cdTime_old = self.cfg[sessionId]['cd']
except KeyError:
cdTime_old = ''
if (sessionId not in self.cfg.keys()):
self.cfg[sessionId] = {}
self.WriteCfg()
self.cfg[sessionId]['cd'] = cdTime
self.WriteCfg()
return f'cd {cdTime_old} -> {cdTime}'
def UpdateBanList(self, sessionId: str, add_mode: bool):
if add_mode:
try:
if (sessionId in self.cfg['ban']):
return f''
except KeyError:
self.cfg['ban'] = []
self.cfg['ban'].append(sessionId)
self.WriteCfg()
return f''
else:
try:
self.cfg['ban'].remove(sessionId)
self.WriteCfg()
return f''
except ValueError:
return f'' |
def initialize_variation_modules(variation_modules, mode):
log.info('Initializing variation modules...')
for (name, modules_contexts) in variation_modules.items():
body = modules_contexts[0]
for handler in ('init', 'variate', 'shutdown'):
if (handler not in body):
log.error(('missing "%s" handler at variation module "%s"' % (handler, name)))
return 1
try:
body['init'](options=body['args'], mode=mode)
except Exception as exc:
log.error(('Variation module "%s" from "%s" load FAILED: %s' % (body['alias'], body['path'], exc)))
else:
log.info(('Variation module "%s" from "%s" loaded OK' % (body['alias'], body['path']))) |
class Config():
stakeholders = ['development', 'management', 'users', 'customers']
inventors = ['flonatel']
reqs_spec = {'directory': 'doc/requirements', 'commit_interval': ['FILES', 'FILES'], 'default_language': 'en_GB', 'dependency_notation': set(['Solved by'])}
topic_specs = {'ts_common': ['doc/topics', 'ReqsDocument']}
analytics_specs = {'stop_on_errors': False, 'topics': 'ts_common'}
constraints_specs = {'search_dirs': ['rmtoo/collection/constraints', '/usr/share/pyshared/rmtoo/collection/constraints']}
output_specs = [['prios', ['ts_common', 'artifacts/reqsprios.tex', {'start_date': '2011-04-25'}]], ['graph', ['ts_common', 'artifacts/req-graph1.dot']], ['graph2', ['ts_common', 'artifacts/req-graph2.dot']], ['stats_reqs_cnt', ['ts_common', 'artifacts/stats_reqs_cnt.csv']], ['latex2', ['ts_common', 'artifacts//reqtopics.tex']], ['html', ['ts_common', 'artifacts/html', 'doc/html/header.html', 'doc/html/footer.html']], ['version1', ['ts_common', 'artifacts/reqs-version.txt']], ['oopricing1', ['ts_common', 'artifacts/reqspricing']], ['tlp1', ['ts_common', 'artifacts/reqdeps1.tlp']], ['stats_burndown1', ['ts_common', 'artifacts/stats_burndown.csv', '2011-04-25']]]
output_specs2 = [['tlp1', ['ts_common', 'artifacts/reqdeps1.tlp']]] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.