code stringlengths 281 23.7M |
|---|
def butterworth(data, frequency, cutoff, filter_type=FilterType.LOW_PASS, order=3, axis=(- 1), precision='float32'):
(data, frequency, cutoff, filter_type, order, axis, precision) = _butterworth_args_check(data, frequency, cutoff, filter_type, order, axis, precision)
(b_coef, a_coef) = _signal_butter_wrapper(order, frequency, cutoff, filter_type)
if (b_coef.dtype != _np.dtype(precision)):
b_coef = b_coef.astype(precision)
a_coef = a_coef.astype(precision)
return _signal.lfilter(b_coef, a_coef, data, axis) |
class ErrorFrame(Frame):
TYPE = 255
message: str
tracing: bytes
code: int
def __init__(self):
super().__init__()
self.code = 0
self.tracing = bytes(25)
self.message = ''
def read_payload(self, fp: IOWrapper, size: int):
offset = 0
self.code = fp.read_byte('error.code')
offset += 1
self.tracing = fp.read_bytes(25, 'error.tracing')
offset += 25
message_len = fp.read_short('error.message_len')
offset += 2
self.message = fp.read_string(message_len, 'error.message')
offset += message_len
assert (offset == size)
def get_payload_size(self):
return (((_.code(1) + _.tracing(25)) + _.message_len(2)) + len(self.message))
def write_payload(self, fp: IOWrapper):
offset = 0
fp.write_byte(self.code)
offset += 1
fp.write_bytes(self.tracing)
offset += 25
fp.write_short(len(self.message))
offset += 2
fp.write_string(self.message)
offset += len(self.message) |
class TestPseudoLabeler(unittest.TestCase):
def test_noop(self):
pseudo_labeler = NoopPseudoLabeler()
x = np.random.randn(1)
output = pseudo_labeler.label(x)
torch.testing.assert_close(x, output)
def test_relabeltargetinbatch(self):
teacher = DivideInputDictBy2()
teacher.eval()
teacher.device = torch.device('cpu')
relabeler = RelabelTargetInBatch(teacher=teacher)
batched_inputs = _get_input_data(n=2, use_input_target=True)
gt = [{'input': d['input'], 'target': (d['input'] / 2.0)} for d in batched_inputs]
outputs = relabeler.label(batched_inputs)
torch.testing.assert_close(outputs, gt) |
def fetch_production(zone_key: ZoneKey, session: (Session | None)=None, target_datetime: (datetime | None)=None, logger: Logger=getLogger(__name__)) -> list[dict[(str, Any)]]:
if target_datetime:
raise NotImplementedError('This parser is not yet able to parse past dates')
data = get_data(session)
(date, production) = production_processor(data, zone_key)
productions = ProductionBreakdownList(logger)
productions.append(zoneKey=zone_key, datetime=date, source=SOURCE, production=production)
return productions.to_list() |
class GroupResourcePermissionMixin(BaseModel):
__table_args__ = (sa.PrimaryKeyConstraint('group_id', 'resource_id', 'perm_name', name='pk_users_resources_permissions '), {'mysql_engine': 'InnoDB', 'mysql_charset': 'utf8'})
_attr
def __tablename__(self):
return 'groups_resources_permissions'
_attr
def group_id(self):
return sa.Column(sa.Integer, sa.ForeignKey('groups.id', onupdate='CASCADE', ondelete='CASCADE'), primary_key=True)
_attr
def resource_id(self):
return sa.Column(sa.Integer(), sa.ForeignKey('resources.resource_id', onupdate='CASCADE', ondelete='CASCADE'), primary_key=True, autoincrement=False)
_attr
def perm_name(self):
return sa.Column(sa.Unicode(50), primary_key=True)
('perm_name')
def validate_perm_name(self, key, value):
if (value != value.lower()):
raise AssertionError('perm_name needs to be lowercase')
return value
def __repr__(self):
return ('<GroupResourcePermission: g:%s, %s, r:%s>' % (self.group_id, self.perm_name, self.resource_id)) |
def get_sample(item_array, n_iter=None, sample_size=2):
np.random.seed(42)
n = len(item_array)
start_idx = ((n_iter * sample_size) % n)
if (((start_idx + sample_size) >= n) or (start_idx <= sample_size)):
np.random.shuffle(item_array)
return item_array[start_idx:(start_idx + sample_size)] |
def test_workflow_node_sw():
obj = _workflow.WorkflowNode(sub_workflow_ref=_generic_id)
assert (obj.sub_workflow_ref == _generic_id)
assert (obj.reference == _generic_id)
obj2 = _workflow.WorkflowNode.from_flyte_idl(obj.to_flyte_idl())
assert (obj == obj2)
assert (obj2.reference == _generic_id)
assert (obj2.sub_workflow_ref == _generic_id) |
class IntersectionType(abcdtype):
_fields = ('types',)
_attributes = ('lineno', 'col_offset')
def __init__(self, types=[], lineno=0, col_offset=0, **ARGS):
abcdtype.__init__(self, **ARGS)
self.types = list(types)
self.lineno = int(lineno)
self.col_offset = int(col_offset) |
_validator
def validate_groups(request, **kwargs):
groups = request.validated.get('groups')
if (groups is None):
return
db = request.db
bad_groups = []
validated_groups = []
for g in groups:
group = db.query(Group).filter((Group.name == g)).first()
if (not group):
bad_groups.append(g)
else:
validated_groups.append(group)
if bad_groups:
request.errors.add('querystring', 'groups', 'Invalid groups specified: {}'.format(', '.join(bad_groups)))
else:
request.validated['groups'] = validated_groups |
class OptionSeriesVariablepieSonificationContexttracksMappingTime(Options):
def mapFunction(self):
return self._config_get(None)
def mapFunction(self, value: Any):
self._config(value, js_type=False)
def mapTo(self):
return self._config_get(None)
def mapTo(self, text: str):
self._config(text, js_type=False)
def max(self):
return self._config_get(None)
def max(self, num: float):
self._config(num, js_type=False)
def min(self):
return self._config_get(None)
def min(self, num: float):
self._config(num, js_type=False)
def within(self):
return self._config_get(None)
def within(self, value: Any):
self._config(value, js_type=False) |
def pot_job(hash_file=None, session=None, hash_mode=None, attack_mode=None, rules=None, pot_path=None, username=False):
pot_wordlist = valid.val_filepath(path_string=log_dir, file_string='pot_wordlist.txt')
if (not pot_check(pot_wordlist)):
potter(pot_path, pot_wordlist)
outfile = valid.val_filepath(path_string=log_dir, file_string='{}.cracked'.format(session))
hcat = runner(hash_file=hash_file, session=session, wordlist=str(pot_wordlist), outfile=str(outfile), attack_mode=0, hash_mode=hash_mode, username=username, pot_path=pot_path, show=False, brain=False)
hcat.event_connect(callback=finished_callback, signal='EVENT_CRACKER_FINISHED')
hcat.event_connect(callback=any_callback, signal='ANY')
counter = 0
while (counter < 600):
if ((hcat is None) or isinstance(hcat, str)):
return hcat
hc_state = hcat.status_get_status_string()
logger.debug('POT WORDLIST loop')
if (hc_state == 'Exhausted'):
break
elif (hc_state == 'Aborted'):
event_log = hcat.hashcat_status_get_log()
raise ValueError(event_log)
time.sleep(1)
counter += 1
else:
return False
logger.debug('POT WORDLIST loop complete, quitting hashcat')
hcat.hashcat_session_quit()
hcat.reset()
return True |
def main(argv):
if (len(argv) < 3):
usage()
model_dir = sys.argv[1]
data_file = sys.argv[2]
tokenizer = WordTokenizer()
tokenizer.load(os.path.join(model_dir, 'tokenizer'))
classifier = ProductClassifier()
classifier.load(os.path.join(model_dir, 'classifier'))
ner = ProductNER()
ner.load(os.path.join(model_dir, 'ner'))
with open(data_file, 'r', encoding='iso-8859-1') as f:
reader = csv.DictReader(f)
with open('.'.join((data_file.split('.')[:(- 1)] + ['processed', 'csv'])), 'w', encoding='utf-8') as outfile:
writer = csv.DictWriter(outfile, fieldnames=(reader.fieldnames + ['category', 'brand']))
writer.writeheader()
count = 0
for row in reader:
count += 1
processed_row = process(row, tokenizer, classifier, ner)
print(processed_row)
writer.writerow(processed_row) |
def find_files_mentioning_zone(text):
IGNORED_PATHS = ['mobileapp/ios', 'mobileapp/android', 'node_modules', 'dist', 'archived']
VALID_EXTENSIONS = ('.py', '.js', '.jsx', '.ts', '.tsx', '.yaml', '.json', '.md', '.html')
results = []
for (root, dirs, files) in os.walk(ROOT_PATH):
if any([(ignored_path in root) for ignored_path in IGNORED_PATHS]):
continue
for file in files:
if file.endswith(VALID_EXTENSIONS):
with open(os.path.join(root, file)) as f:
if re.search(text, f.read()):
results.append(os.path.relpath(os.path.join(root, file), ROOT_PATH))
if (not results):
print(f' Found no additional files mentioning "{text}" in contrib repository.')
return
print(f' Found {len(results)} files mentioning {text}, please manually clean these files:')
for result in results:
print(f' - {result}') |
def extractCleverneckohomeWpcomstagingCom(item):
(vol, chp, frag, postfix) = extractVolChapterFragmentPostfix(item['title'])
if ((not (chp or vol)) or ('preview' in item['title'].lower())):
return None
tagmap = [('PRC', 'PRC', 'translated'), ('Loiterous', 'Loiterous', 'oel')]
for (tagname, name, tl_type) in tagmap:
if (tagname in item['tags']):
return buildReleaseMessageWithType(item, name, vol, chp, frag=frag, postfix=postfix, tl_type=tl_type)
return False |
def _format_instances(instances, namespace=None):
nested = isinstance(instances, dict)
if nested:
instances_dict = instances
vacuous = list()
case_map = dict()
instances = list()
for (case, instance_list) in sorted(instances_dict.items()):
case = '-'.join(case.lower().split())
if (not instance_list):
vacuous.append((case, instance_list))
continue
for val in instance_list:
instances.append(val)
if isinstance(val, (list, tuple, set)):
val = val[1]
case_map[val] = case
named_instances = list()
for val in instances:
if (not isinstance(val, (list, tuple, set))):
instance = val
else:
instance = val[(- 1)]
assert_valid_estimator(instance)
try:
if (instance is val):
tup = [instance.__class__.__name__.lower(), instance]
else:
tup = ['-'.join(val[0].split()).lower(), val[(- 1)]]
named_instances.append(tup)
except Exception as e:
msg = 'Could not format instance %s. Check that passed instance iterables follow correct syntax:\n- if multiple preprocessing cases, pass a dictionary with instance lists as values and case name as key.\n- else, pass list of (named) instances.\nSee documentation for further information.\nError details: %r'
raise LayerSpecificationError((msg % (instance, e)))
names = [tup[0] for tup in named_instances]
if namespace:
names.extend(namespace)
duplicates = Counter(names)
duplicates = {key: val for (key, val) in duplicates.items() if (val > 1)}
out = list()
name_count = {key: 1 for key in duplicates}
for (name, instance) in named_instances:
if (name in duplicates):
current_name_count = name_count[name]
name_count[name] += 1
name += ('-%d' % current_name_count)
out.append((name, instance))
out = sorted(out)
if nested:
nested_out = dict()
for (name, instance) in out:
case = case_map[instance]
if (case not in nested_out):
nested_out[case] = list()
nested_out[case].append((name, instance))
for (k, v) in vacuous:
nested_out[k] = v
out = nested_out
return out |
def test_rsprfo_hcn_ts_xtb():
geom = geom_from_library('hcn_iso_ts.xyz', coord_type='redund')
xtb = XTB()
geom.set_calculator(xtb)
opt_kwargs = {'thresh': 'gau_tight', 'max_micro_cycles': 1}
opt = RSPRFOptimizer(geom, **opt_kwargs)
opt.run()
assert opt.is_converged
assert (opt.cur_cycle == 7) |
class SessionTester(Session):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.assets_js = []
self.assets_css = []
def send_command(self, *command):
if (command[0] == 'DEFINE'):
if ('JS' in command[1]):
(_, _, name, _) = command
self.assets_js.append(name)
elif ('CSS' in command[1]):
(_, _, name, _) = command
self.assets_css.append(name) |
.parametrize('exclude_attrs, expected', [([], [{'id': '10', 'bu': 'a', 'env': 'prd'}]), (['bu'], [{'id': '10', 'env': 'prd'}]), (['bu', 'env'], [{'id': '10'}]), (['bu', 'env', 'not_there'], [{'id': '10'}])])
def test_load_with_exclude_attrs(exclude_attrs, one_acct_list, expected):
mal = acctload.MetaAccountLoader(one_acct_list, exclude_attrs=exclude_attrs)
assert (mal.accts == expected), 'dicts are not equal'
assert (set(mal.attributes()) == set(expected[0].keys())) |
def lru_cache(func):
cache = {}
(func)
def wrapped(*args):
key = []
bases = []
for arg in args:
if isinstance(arg, numpy.ndarray):
for base in _array_bases(arg):
if base.flags.writeable:
return func(*args)
bases.append((base if (base.base is None) else base.base))
key.append(tuple(map(arg.__array_interface__.__getitem__, ['data', 'strides', 'shape', 'typestr'])))
else:
key.append((type(arg), arg))
if (not bases):
raise ValueError('arguments must include at least one array')
key = tuple(key)
try:
(v, refs_) = cache[key]
except KeyError:
v = func(*args)
assert _isimmutable(v)
popkey = functools.partial(cache.pop, key)
cache[key] = (v, [weakref.ref(base, popkey) for base in bases])
return v
wrapped.cache = cache
return wrapped |
('/controllers')
def handle_controllers(self):
global TXBuffer, navMenuIndex
TXBuffer = ''
navMenuIndex = 2
if rpieGlobals.wifiSetup:
return self.redirect('/setup')
if (not isLoggedIn(self.get, self.cookie)):
return self.redirect('/login')
sendHeadandTail('TmplStd', _HEAD)
if (self.type == 'GET'):
responsearr = self.get
else:
responsearr = self.post
edit = arg('edit', responsearr)
controllerindex = arg('index', responsearr)
controllerNotSet = ((controllerindex == 0) or (controllerindex == ''))
if (controllerindex != ''):
controllerindex = (int(controllerindex) - 1)
controllerip = arg('controllerip', responsearr)
controllerport = arg('controllerport', responsearr)
protocol = arg('protocol', responsearr)
if (protocol != ''):
protocol = int(protocol)
else:
protocol = 0
controlleruser = arg('controlleruser', responsearr)
controllerpassword = arg('controllerpassword', responsearr)
enabled = (arg('controllerenabled', responsearr) == 'on')
if (((protocol == 0) and (edit == '') and (controllerindex != '')) or (arg('del', responsearr) != '')):
try:
Settings.Controllers[controllerindex].controller_exit()
except:
pass
Settings.Controllers[controllerindex] = False
controllerNotSet = True
Settings.savecontrollers()
if (controllerNotSet == False):
if (protocol > 0):
try:
if Settings.Controllers[controllerindex]:
Settings.Controllers[controllerindex].controllerip = controllerip
Settings.Controllers[controllerindex].controllerport = controllerport
Settings.Controllers[controllerindex].controlleruser = controlleruser
if ('**' not in controllerpassword):
Settings.Controllers[controllerindex].controllerpassword = controllerpassword
Settings.Controllers[controllerindex].enabled = enabled
Settings.Controllers[controllerindex].webform_save(responsearr)
Settings.savecontrollers()
except:
pass
else:
try:
if Settings.Controllers[controllerindex]:
protocol = Settings.Controllers[controllerindex].controllerid
except:
pass
TXBuffer += "<form name='frmselect' method='post'>"
if controllerNotSet:
TXBuffer += "<table class='multirow' border=1px frame='box' rules='all'><TR><TH style='width:70px;'>"
TXBuffer += "<TH style='width:50px;'>Nr<TH style='width:100px;'>Enabled<TH>Protocol<TH>Host<TH>Port"
for x in range(rpieGlobals.CONTROLLER_MAX):
TXBuffer += '<tr><td><a class=\'button link\' href="controllers?index='
TXBuffer += str((x + 1))
TXBuffer += '&edit=1">Edit</a><td>'
TXBuffer += getControllerSymbol(x)
TXBuffer += '</td><td>'
try:
if Settings.Controllers[x]:
addEnabled(Settings.Controllers[x].enabled)
TXBuffer += '</td><td>'
TXBuffer += str(Settings.Controllers[x].getcontrollername())
TXBuffer += '</td><td>'
TXBuffer += str(Settings.Controllers[x].controllerip)
TXBuffer += '</td><td>'
TXBuffer += str(Settings.Controllers[x].controllerport)
else:
TXBuffer += '<td><td><td>'
except:
TXBuffer += '<td><td><td>'
TXBuffer += '</table></form>'
else:
TXBuffer += "<table class='normal'><TR><TH style='width:150px;' align='left'>Controller Settings<TH>"
TXBuffer += '<tr><td>Protocol:<td>'
addSelector_Head('protocol', True)
for x in range(len(rpieGlobals.controllerselector)):
addSelector_Item(rpieGlobals.controllerselector[x][2], int(rpieGlobals.controllerselector[x][1]), (str(protocol) == str(rpieGlobals.controllerselector[x][1])), False, '')
addSelector_Foot()
if (int(protocol) > 0):
createnewcontroller = True
try:
if (Settings.Controllers[controllerindex].getcontrollerid() == int(protocol)):
createnewcontroller = False
except:
pass
exceptstr = ''
if createnewcontroller:
for y in range(len(rpieGlobals.controllerselector)):
if (int(rpieGlobals.controllerselector[y][1]) == int(protocol)):
if (len(Settings.Controllers) <= controllerindex):
while (len(Settings.Controllers) <= controllerindex):
Settings.Controllers.append(False)
try:
m = __import__(rpieGlobals.controllerselector[y][0])
except Exception as e:
Settings.Controllers[controllerindex] = False
exceptstr += str(e)
m = False
if m:
try:
Settings.Controllers[controllerindex] = m.Controller(controllerindex)
except Exception as e:
Settings.Controllers.append(m.Controller(controllerindex))
exceptstr += str(e)
break
if (Settings.Controllers[controllerindex] == False):
errormsg = ("Importing failed, please double <a href='plugins'>check dependencies</a>! " + str(exceptstr))
TXBuffer += (errormsg + '</td></tr></table>')
sendHeadandTail('TmplStd', _TAIL)
return TXBuffer
else:
try:
Settings.Controllers[controllerindex].controller_init()
if Settings.Controllers[controllerindex]:
if Settings.Controllers[controllerindex].enabled:
Settings.Controllers[controllerindex].setonmsgcallback(Settings.callback_from_controllers)
for x in range(0, len(Settings.Tasks)):
if (Settings.Tasks[x] and (type(Settings.Tasks[x]) is not bool)):
if Settings.Tasks[x].enabled:
if Settings.Tasks[x].senddataenabled[controllerindex]:
if Settings.Controllers[controllerindex]:
if Settings.Controllers[controllerindex].enabled:
Settings.Tasks[x].controllercb[controllerindex] = Settings.Controllers[controllerindex].senddata
except:
pass
if (controllerindex != ''):
TXBuffer += (("<input type='hidden' name='index' value='" + str((controllerindex + 1))) + "'>")
if (int(protocol) > 0):
addFormCheckBox('Enabled', 'controllerenabled', Settings.Controllers[controllerindex].enabled)
addFormTextBox('Controller Host Address', 'controllerip', Settings.Controllers[controllerindex].controllerip, 96)
addFormNumericBox('Controller Port', 'controllerport', Settings.Controllers[controllerindex].controllerport, 1, 65535)
if Settings.Controllers[controllerindex].usesAccount:
addFormTextBox('Controller User', 'controlleruser', Settings.Controllers[controllerindex].controlleruser, 96)
if Settings.Controllers[controllerindex].usesPassword:
addFormPasswordBox('Controller Password', 'controllerpassword', Settings.Controllers[controllerindex].controllerpassword, 96)
try:
Settings.Controllers[controllerindex].webform_load()
except Exception as e:
misc.addLog(rpieGlobals.LOG_LEVEL_ERROR, ((('Controller' + str(controllerindex)) + ' ') + str(e)))
addFormSeparator(2)
TXBuffer += '<tr><td><td>'
TXBuffer += '<a class=\'button link\' href="controllers">Close</a>'
addSubmitButton()
if (controllerindex != ''):
addSubmitButton('Delete', 'del')
TXBuffer += '</table></form>'
sendHeadandTail('TmplStd', _TAIL)
return TXBuffer |
class TestAsyncProfiler():
.asyncio
async def test_profiler_is_a_transparent_wrapper(self):
f_called = False
async def f(x):
nonlocal f_called
f_called = True
return (x * 2)
profiler = driver.AsyncProfiler(f)
assert ((await profiler(1)) == 2)
assert f_called |
def test_map_pod_task_serialization():
pod = Pod(pod_spec=V1PodSpec(restart_policy='OnFailure', containers=[V1Container(name='primary')]), primary_container_name='primary')
(task_config=pod, environment={'FOO': 'bar'})
def simple_pod_task(i: int):
pass
mapped_task = map_task(simple_pod_task, metadata=TaskMetadata(retries=1))
default_img = Image(name='default', fqn='test', tag='tag')
serialization_settings = SerializationSettings(project='project', domain='domain', version='version', env={'FOO': 'baz'}, image_config=ImageConfig(default_image=default_img, images=[default_img]))
pod_spec = mapped_task.get_k8s_pod(serialization_settings).pod_spec
assert (len(pod_spec['containers']) == 1)
assert (pod_spec['containers'][0]['args'] == ['pyflyte-map-execute', '--inputs', '{{.input}}', '--output-prefix', '{{.outputPrefix}}', '--raw-output-data-prefix', '{{.rawOutputDataPrefix}}', '--checkpoint-path', '{{.checkpointOutputPrefix}}', '--prev-checkpoint', '{{.prevCheckpointPrefix}}', '--resolver', 'MapTaskResolver', '--', 'vars', '', 'resolver', 'flytekit.core.python_auto_container.default_task_resolver', 'task-module', 'tests.test_pod', 'task-name', 'simple_pod_task'])
assert ({'primary_container_name': 'primary'} == mapped_task.get_config(serialization_settings)) |
def build_log_name(name: (str | None), addresses: list[str], connected_address: (str | None)) -> str:
preferred_address = connected_address
for address in addresses:
if (((not name) and address_is_local(address)) or host_is_name_part(address)):
name = address.partition('.')[0]
elif (not preferred_address):
preferred_address = address
if (not preferred_address):
return (name or addresses[0])
if (name and (name != preferred_address) and (not preferred_address.startswith(f'{name}.'))):
return f'{name} {preferred_address}'
return preferred_address |
class OptionPlotoptionsGaugeSonificationDefaultspeechoptionsMappingVolume(Options):
def mapFunction(self):
return self._config_get(None)
def mapFunction(self, value: Any):
self._config(value, js_type=False)
def mapTo(self):
return self._config_get(None)
def mapTo(self, text: str):
self._config(text, js_type=False)
def max(self):
return self._config_get(None)
def max(self, num: float):
self._config(num, js_type=False)
def min(self):
return self._config_get(None)
def min(self, num: float):
self._config(num, js_type=False)
def within(self):
return self._config_get(None)
def within(self, value: Any):
self._config(value, js_type=False) |
class Space(Plugin, metaclass=SpaceMeta):
BASE = ''
NAME = ''
SERIALIZE = ()
CHANNELS = ()
CHANNEL_ALIASES = {}
COLOR_FORMAT = True
GAMUT_CHECK = None
CLIP_SPACE = None
EXTENDED_RANGE = False
WHITE = (0.0, 0.0)
DYNAMIC_RANGE = 'sdr'
def __init__(self, **kwargs: Any) -> None:
self.channels = (self.CHANNELS + (alpha_channel,))
self._chan_index = {c: e for (e, c) in enumerate(self.channels)}
self._color_ids = ((self.NAME,) if (not self.SERIALIZE) else self.SERIALIZE)
self._percents = (([True] * (len(self.channels) - 1)) + [False])
def get_channel_index(self, name: str) -> int:
idx = self._chan_index.get(self.CHANNEL_ALIASES.get(name, name))
return (int(name) if (idx is None) else idx)
def resolve_channel(self, index: int, coords: Vector) -> float:
value = coords[index]
return (self.channels[index].nans if math.isnan(value) else value)
def _serialize(self) -> tuple[(str, ...)]:
return self._color_ids
def normalize(self, coords: Vector) -> Vector:
return coords
def is_achromatic(self, coords: Vector) -> (bool | None):
return None
def white(cls) -> VectorLike:
return cls.WHITE
def to_base(self, coords: Vector) -> Vector:
def from_base(self, coords: Vector) -> Vector:
def to_string(self, parent: Color, *, alpha: (bool | None)=None, precision: (int | None)=None, fit: (bool | str)=True, none: bool=False, percent: (bool | Sequence[bool])=False, **kwargs: Any) -> str:
return serialize.serialize_css(parent, color=True, alpha=alpha, precision=precision, fit=fit, none=none, percent=percent)
def match(self, string: str, start: int=0, fullmatch: bool=True) -> (tuple[(tuple[(Vector, float)], int)] | None):
return None |
def generate_differing_bits(basename1, basename2):
with open(basename1, 'r') as path1:
with open(basename2, 'r') as path2:
diff = difflib.unified_diff(path1.read().splitlines(), path2.read().splitlines(), fromfile='path1', tofile='path2')
for line in diff:
if line.startswith('---'):
continue
if line.startswith('+++'):
continue
if line.startswith(''):
continue
if line.startswith('-'):
(yield (extract_parameters_string(basename1), line.strip('-')))
continue
if line.startswith('+'):
(yield (extract_parameters_string(basename2), line.strip('+')))
continue |
class TestGetFastqGzFiles(unittest.TestCase):
def test_get_fastq_gz_files(self):
file_list = ['sample1.fastq.gz', 'sample2.fastq.gz', 'sample3.fastq', 'sample4.fastq', 'out.log', 'README']
expected = [('sample1.fastq.gz',), ('sample2.fastq.gz',)]
fastqgzs = GetFastqGzFiles('test', file_list=file_list)
self.assertEqual(len(expected), len(fastqgzs))
for (fq1, fq2) in zip(expected, fastqgzs):
self.assertEqual(fq1, fq2)
def test_get_fastq_gz_files_paired_end(self):
file_list = ['sample1_R1.fastq.gz', 'sample1_R2.fastq.gz', 'sample2_R1.fastq.gz', 'sample2_R2.fastq.gz', 'sample3.fastq', 'sample4.fastq', 'out.log', 'README']
expected = [('sample1_R1.fastq.gz',), ('sample1_R2.fastq.gz',), ('sample2_R1.fastq.gz',), ('sample2_R2.fastq.gz',)]
fastqgzs = GetFastqGzFiles('test', file_list=file_list)
self.assertEqual(len(expected), len(fastqgzs))
for (fq1, fq2) in zip(expected, fastqgzs):
self.assertEqual(fq1, fq2)
def test_get_fastq_gz_files_fq_extension(self):
file_list = ['sample1_R1.fq.gz', 'sample1_R2.fq.gz', 'sample2_R1.fq.gz', 'sample2_R2.fq.gz', 'sample3.fq', 'sample4.fq', 'out.log', 'README']
expected = [('sample1_R1.fq.gz',), ('sample1_R2.fq.gz',), ('sample2_R1.fq.gz',), ('sample2_R2.fq.gz',)]
fastqgzs = GetFastqGzFiles('test', file_list=file_list)
self.assertEqual(len(expected), len(fastqgzs))
for (fq1, fq2) in zip(expected, fastqgzs):
self.assertEqual(fq1, fq2) |
class TestIntrospectionAction(unittest.TestCase):
maxDiff = 65000
def test_null_action_name(self):
action = IntrospectionAction(FakeServerOne())
with self.assertRaises(ActionError) as error_context:
action(EnrichedActionRequest(action='introspect', body={'action_name': None}))
self.assertEqual(1, len(error_context.exception.errors))
self.assertEqual(ERROR_CODE_INVALID, error_context.exception.errors[0].code)
self.assertEqual('action_name', error_context.exception.errors[0].field)
def test_invalid_action_name(self):
action = IntrospectionAction(FakeServerOne())
with self.assertRaises(ActionError) as error_context:
action(EnrichedActionRequest(action='introspect', body={'action_name': 'not_a_defined_action'}))
self.assertEqual(1, len(error_context.exception.errors))
self.assertEqual(ERROR_CODE_INVALID, error_context.exception.errors[0].code)
self.assertEqual('action_name', error_context.exception.errors[0].field)
def test_single_action_simple(self):
action = IntrospectionAction(FakeServerOne())
response = action(EnrichedActionRequest(action='introspect', body={'action_name': 'one'}))
self.assertEqual([], response.errors)
self.assertEqual({'action_names': ['one'], 'actions': {'one': {'documentation': 'The real documentation', 'request_schema': None, 'response_schema': None}}}, response.body)
def test_single_action_complex(self):
action = IntrospectionAction(FakeServerTwo())
response = action(EnrichedActionRequest(action='introspect', body={'action_name': 'two'}))
self.assertEqual([], response.errors)
self.assertEqual({'action_names': ['two'], 'actions': {'two': {'documentation': 'Test action documentation', 'request_schema': FakeActionTwo.request_schema.introspect(), 'response_schema': FakeActionTwo.response_schema.introspect()}}}, response.body)
def test_single_action_introspect_default(self):
action = IntrospectionAction(FakeServerOne())
response = action(EnrichedActionRequest(action='introspect', body={'action_name': 'introspect'}))
self.assertEqual([], response.errors)
self.assertEqual({'action_names': ['introspect'], 'actions': {'introspect': {'documentation': IntrospectionAction.description, 'request_schema': IntrospectionAction.request_schema.introspect(), 'response_schema': IntrospectionAction.response_schema.introspect()}}}, response.body)
def test_single_action_status_default(self):
action = IntrospectionAction(FakeServerTwo())
response = action(EnrichedActionRequest(action='introspect', body={'action_name': 'status'}))
self.assertEqual([], response.errors)
self.assertEqual({'action_names': ['status'], 'actions': {'status': {'documentation': BaseStatusAction.description, 'request_schema': BaseStatusAction.request_schema.introspect(), 'response_schema': BaseStatusAction.response_schema.introspect()}}}, response.body)
def test_whole_server_simple(self):
action = IntrospectionAction(FakeServerOne())
response = action(EnrichedActionRequest(action='introspect', body={}))
self.assertEqual([], response.errors)
self.assertEqual({'documentation': 'This is the documentation we should get', 'action_names': ['introspect', 'one', 'status'], 'actions': {'introspect': {'documentation': IntrospectionAction.description, 'request_schema': IntrospectionAction.request_schema.introspect(), 'response_schema': IntrospectionAction.response_schema.introspect()}, 'status': {'documentation': BaseStatusAction.description, 'request_schema': BaseStatusAction.request_schema.introspect(), 'response_schema': BaseStatusAction.response_schema.introspect()}, 'one': {'documentation': 'The real documentation', 'request_schema': None, 'response_schema': None}}}, response.body)
def test_whole_server_complex(self):
action = IntrospectionAction(FakeServerTwo())
response = action(EnrichedActionRequest(action='introspect', body={}))
self.assertEqual([], response.errors)
self.assertEqual({'documentation': 'Instead, we should get this documentation', 'action_names': ['introspect', 'one', 'status', 'two'], 'actions': {'introspect': {'documentation': IntrospectionAction.description, 'request_schema': IntrospectionAction.request_schema.introspect(), 'response_schema': IntrospectionAction.response_schema.introspect()}, 'status': {'documentation': BaseStatusAction.description, 'request_schema': BaseStatusAction.request_schema.introspect(), 'response_schema': BaseStatusAction.response_schema.introspect()}, 'one': {'documentation': 'The real documentation', 'request_schema': None, 'response_schema': None}, 'two': {'documentation': 'Test action documentation', 'request_schema': FakeActionTwo.request_schema.introspect(), 'response_schema': FakeActionTwo.response_schema.introspect()}}}, response.body)
def test_single_action_switched(self):
action = IntrospectionAction(FakeServerThree())
response = action(EnrichedActionRequest(action='introspect', body={'action_name': 'my_switched_action[DEFAULT]'}))
self.assertEqual([], response.errors)
self.assertEqual({'action_names': ['my_switched_action[DEFAULT]'], 'actions': {'my_switched_action[DEFAULT]': {'documentation': 'The real documentation', 'request_schema': None, 'response_schema': None}}}, response.body)
response = action(EnrichedActionRequest(action='introspect', body={'action_name': 'my_switched_action[switch:3]'}))
self.assertEqual([], response.errors)
self.assertEqual({'action_names': ['my_switched_action[switch:3]'], 'actions': {'my_switched_action[switch:3]': {'documentation': 'The real documentation', 'request_schema': None, 'response_schema': None}}}, response.body)
response = action(EnrichedActionRequest(action='introspect', body={'action_name': 'my_switched_action[switch:5]'}))
self.assertEqual([], response.errors)
self.assertEqual({'action_names': ['my_switched_action[switch:5]'], 'actions': {'my_switched_action[switch:5]': {'documentation': 'Test action documentation', 'request_schema': FakeActionTwo.request_schema.introspect(), 'response_schema': FakeActionTwo.response_schema.introspect()}}}, response.body)
response = action(EnrichedActionRequest(action='introspect', body={'action_name': 'your_switched_action'}))
self.assertEqual([], response.errors)
self.assertEqual({'action_names': ['your_switched_action[DEFAULT]', 'your_switched_action[switch:4]'], 'actions': {'your_switched_action[switch:4]': {'documentation': 'The real documentation', 'request_schema': None, 'response_schema': None}, 'your_switched_action[DEFAULT]': {'documentation': 'Test action documentation', 'request_schema': FakeActionTwo.request_schema.introspect(), 'response_schema': FakeActionTwo.response_schema.introspect()}}}, response.body)
def test_whole_server_switched(self):
action = IntrospectionAction(FakeServerThree())
response = action(EnrichedActionRequest(action='introspect', body={}))
self.assertEqual([], response.errors)
self.assertEqual({'documentation': None, 'action_names': ['introspect', 'my_switched_action[DEFAULT]', 'my_switched_action[switch:5]', 'status', 'your_switched_action[DEFAULT]', 'your_switched_action[switch:4]'], 'actions': {'introspect': {'documentation': IntrospectionAction.description, 'request_schema': IntrospectionAction.request_schema.introspect(), 'response_schema': IntrospectionAction.response_schema.introspect()}, 'my_switched_action[DEFAULT]': {'documentation': 'The real documentation', 'request_schema': None, 'response_schema': None}, 'my_switched_action[switch:5]': {'documentation': 'Test action documentation', 'request_schema': FakeActionTwo.request_schema.introspect(), 'response_schema': FakeActionTwo.response_schema.introspect()}, 'status': {'documentation': BaseStatusAction.description, 'request_schema': BaseStatusAction.request_schema.introspect(), 'response_schema': BaseStatusAction.response_schema.introspect()}, 'your_switched_action[switch:4]': {'documentation': 'The real documentation', 'request_schema': None, 'response_schema': None}, 'your_switched_action[DEFAULT]': {'documentation': 'Test action documentation', 'request_schema': FakeActionTwo.request_schema.introspect(), 'response_schema': FakeActionTwo.response_schema.introspect()}}}, response.body) |
def _sortino_ratio(rets, risk_free=0.0, period=TRADING_DAYS_PER_YEAR):
mean = np.mean(rets, axis=0)
negative_rets = rets[(rets < 0)]
if (len(negative_rets) == 0):
dev = 0
else:
dev = np.std(negative_rets, axis=0)
if math.isclose(dev, 0):
sortino = 0
else:
sortino = (((mean * period) - risk_free) / (dev * np.sqrt(period)))
return sortino |
def parse_model_performance_report(model_performance_report: Dict) -> Dict:
assert (len(model_performance_report['metrics']) == 1)
quality_metric: Dict = model_performance_report['metrics'][0]
assert (quality_metric['metric'] == 'RegressionQualityMetric')
raw_quality_metric_result: Dict = quality_metric['result']
current_metrics: Dict = raw_quality_metric_result['current']
raw_quality_metric_result.update(current_metrics)
quality_metric_result: Dict = {k: v for (k, v) in raw_quality_metric_result.items() if isinstance(v, (int, float, str, np.generic))}
quality_metric_result = numpy_to_standard_types(quality_metric_result)
return quality_metric_result |
class Number(RangeValidator):
messages = dict(number=_('Please enter a number'))
def _convert_to_python(self, value, state):
try:
value = float(value)
try:
int_value = int(value)
except OverflowError:
int_value = None
if (value == int_value):
return int_value
return value
except (ValueError, TypeError):
raise Invalid(self.message('number', state), value, state) |
def filter_application_group_data(json):
option_list = ['application', 'behavior', 'category', 'comment', 'name', 'popularity', 'protocols', 'risk', 'technology', 'type', 'vendor']
json = remove_invalid_fields(json)
dictionary = {}
for attribute in option_list:
if ((attribute in json) and (json[attribute] is not None)):
dictionary[attribute] = json[attribute]
return dictionary |
class Test_WindowSet():
def wset(self, *, wtable, event):
return WindowSet('k', wtable.table, wtable, event)
def test_constructor(self, *, event, table, wset, wtable):
assert (wset.key == 'k')
assert (wset.table is table)
assert (wset.wrapper is wtable)
assert (wset.event is event)
def test_apply(self, *, wset, event):
Mock(name='event2', autospec=Event)
wset.wrapper.get_timestamp = Mock(name='wrapper.get_timestamp')
wset.table._apply_window_op = Mock(name='_apply_window_op')
ret = wset.apply(operator.add, 'val')
wset.wrapper.get_timestamp.assert_called_once_with(wset.event)
wset.table._apply_window_op.assert_called_once_with(operator.add, 'k', 'val', wset.wrapper.get_timestamp())
assert (ret is wset)
def mock_get_timestamp(self, wset):
m = wset.wrapper.get_timestamp = Mock(name='wrapper.get_timestamp')
return m
def test_apply__custom_event(self, *, wset, event):
event2 = Mock(name='event2', autospec=Event)
wset.table._apply_window_op = Mock(name='_apply_window_op')
get_timestamp = self.mock_get_timestamp(wset)
ret = wset.apply(operator.add, 'val', event2)
get_timestamp.assert_called_once_with(event2)
wset.table._apply_window_op.assert_called_once_with(operator.add, 'k', 'val', get_timestamp())
assert (ret is wset)
def test_value(self, *, event, wset):
get_timestamp = self.mock_get_timestamp(wset)
wset.table._windowed_timestamp = Mock(name='_windowed_timestamp')
assert wset.value(event)
wset.table._windowed_timestamp.assert_called_once_with('k', get_timestamp())
def test_now(self, *, wset):
wset.table._windowed_now = Mock(name='_windowed_now')
ret = wset.now()
wset.table._windowed_now.assert_called_once_with('k')
assert (ret is wset.table._windowed_now())
def test_current(self, *, table, wset):
event2 = Mock(name='event2', autospec=Event)
table._windowed_timestamp = Mock(name='_windowed_timestamp')
table._relative_event = Mock(name='_relative_event')
ret = wset.current(event2)
table._relative_event.assert_called_once_with(event2)
table._windowed_timestamp.assert_called_once_with('k', table._relative_event())
assert (ret is table._windowed_timestamp())
def test_current__default_event(self, *, table, wset):
table._windowed_timestamp = Mock(name='_windowed_timestamp')
table._relative_event = Mock(name='_relative_event')
ret = wset.current()
table._relative_event.assert_called_once_with(wset.event)
table._windowed_timestamp.assert_called_once_with('k', table._relative_event())
assert (ret is table._windowed_timestamp())
def test_delta(self, *, table, wset):
event2 = Mock(name='event2', autospec=Event)
table._windowed_delta = Mock(name='_windowed_delta')
ret = wset.delta(30.3, event2)
table._windowed_delta.assert_called_once_with('k', 30.3, event2)
assert (ret is table._windowed_delta())
def test_delta__default_event(self, *, table, wset):
table._windowed_delta = Mock(name='_windowed_delta')
ret = wset.delta(30.3)
table._windowed_delta.assert_called_once_with('k', 30.3, wset.event)
assert (ret is table._windowed_delta())
def test_getitem(self, *, wset):
wset.table = {(wset.key, 30.3): 101.1}
assert (wset[30.3] == 101.1)
def test_getitem__event(self, *, app, wset):
e = Event(app, key='KK', value='VV', headers={}, message=Mock(name='message', autospec=Message))
ret = wset[e]
assert isinstance(ret, WindowSet)
assert (ret.key == wset.key)
assert (ret.table is wset.table)
assert (ret.wrapper is wset.wrapper)
assert (ret.event is e)
def test_setitem(self, *, wset):
wset.table = {}
wset[30.3] = 'val'
assert (wset.table[(wset.key, 30.3)] == 'val')
def test_setitem__event(self, *, app, wset):
e = Event(app, key='KK', value='VV', headers={}, message=Mock(name='message', autospec=Message))
with pytest.raises(NotImplementedError):
wset[e] = 'val'
def test_delitem(self, *, wset):
wset.table = {(wset.key, 30.3): 'val'}
del wset[30.3]
assert (not wset.table)
def test_delitem__event(self, *, app, wset):
e = Event(app, key='KK', value='VV', headers={}, message=Mock(name='message', autospec=Message))
with pytest.raises(NotImplementedError):
del wset[e]
.parametrize('meth,expected_op', [('__iadd__', operator.add), ('__isub__', operator.sub), ('__imul__', operator.mul), ('__itruediv__', operator.truediv), ('__ifloordiv__', operator.floordiv), ('__imod__', operator.mod), ('__ipow__', operator.pow), ('__ilshift__', operator.lshift), ('__irshift__', operator.rshift), ('__iand__', operator.and_), ('__ixor__', operator.xor), ('__ior__', operator.or_)])
def test_operators(self, meth, expected_op, *, wset):
other = Mock(name='other')
op = getattr(wset, meth)
wset.apply = Mock(name='apply')
result = op(other)
wset.apply.assert_called_once_with(expected_op, other)
assert (result is wset.apply())
def test_repr(self, *, wset):
assert repr(wset) |
def extractIclynnfrostHomeBlog(item):
(vol, chp, frag, postfix) = extractVolChapterFragmentPostfix(item['title'])
if ((not (chp or vol)) or ('preview' in item['title'].lower())):
return None
tagmap = [('PRC', 'PRC', 'translated'), ('Loiterous', 'Loiterous', 'oel')]
for (tagname, name, tl_type) in tagmap:
if (tagname in item['tags']):
return buildReleaseMessageWithType(item, name, vol, chp, frag=frag, postfix=postfix, tl_type=tl_type)
return False |
class OptionSeriesWaterfallSonificationDefaultspeechoptions(Options):
def activeWhen(self) -> 'OptionSeriesWaterfallSonificationDefaultspeechoptionsActivewhen':
return self._config_sub_data('activeWhen', OptionSeriesWaterfallSonificationDefaultspeechoptionsActivewhen)
def language(self):
return self._config_get('en-US')
def language(self, text: str):
self._config(text, js_type=False)
def mapping(self) -> 'OptionSeriesWaterfallSonificationDefaultspeechoptionsMapping':
return self._config_sub_data('mapping', OptionSeriesWaterfallSonificationDefaultspeechoptionsMapping)
def pointGrouping(self) -> 'OptionSeriesWaterfallSonificationDefaultspeechoptionsPointgrouping':
return self._config_sub_data('pointGrouping', OptionSeriesWaterfallSonificationDefaultspeechoptionsPointgrouping)
def preferredVoice(self):
return self._config_get(None)
def preferredVoice(self, text: str):
self._config(text, js_type=False)
def showPlayMarker(self):
return self._config_get(True)
def showPlayMarker(self, flag: bool):
self._config(flag, js_type=False)
def type(self):
return self._config_get('speech')
def type(self, text: str):
self._config(text, js_type=False) |
def test_recall_by_class_test() -> None:
test_dataset = pd.DataFrame({'target': ['a', 'a', 'a', 'b'], 'prediction': ['a', 'a', 'b', 'b']})
column_mapping = ColumnMapping(pos_label='a')
suite = TestSuite(tests=[TestRecallByClass(label='b', gt=0.8)])
suite.run(current_data=test_dataset, reference_data=None, column_mapping=column_mapping)
suite._inner_suite.raise_for_error()
assert suite
assert suite.show()
assert suite.json() |
class TestEmptyTabFile(unittest.TestCase):
def test_make_empty_tabfile(self):
tabfile = TabFile()
self.assertEqual(len(tabfile), 0, 'new TabFile should have zero length')
def test_add_data_to_new_tabfile(self):
data = ['chr1', 10000, 20000, '+']
tabfile = TabFile()
tabfile.append(data=data)
self.assertEqual(len(tabfile), 1, 'TabFile should now have one line')
for i in range(len(data)):
self.assertEqual(tabfile[0][i], data[i])
def test_add_tab_data_to_new_tabfile(self):
data = 'chr1\t10000\t20000\t+'
tabfile = TabFile()
tabfile.append(tabdata=data)
self.assertEqual(len(tabfile), 1, 'TabFile should now have one line')
self.assertEqual(str(tabfile[0]), data) |
class TCPMappingTLSOriginationContextWithDotTest(AmbassadorTest):
extra_ports = [6789]
target: ServiceType
def init(self) -> None:
self.target = HTTP()
def manifests(self) -> str:
return (f'''
---
apiVersion: v1
kind: Secret
metadata:
name: {self.path.k8s}-clientcert
type: kubernetes.io/tls
data:
tls.crt: {TLSCerts['presto.example.com'].k8s_crt}
tls.key: {TLSCerts['presto.example.com'].k8s_key}
---
apiVersion: getambassador.io/v2
kind: TLSContext
metadata:
name: {self.path.k8s}.tlsclient
spec:
ambassador_id: [ {self.ambassador_id} ]
secret: {self.path.k8s}-clientcert
sni: my-hilarious-name
---
apiVersion: getambassador.io/v2
kind: TCPMapping
metadata:
name: {self.path.k8s}
spec:
ambassador_id: [ {self.ambassador_id} ]
port: 6789
service: {self.target.path.fqdn}:443
tls: {self.path.k8s}.tlsclient
''' + super().manifests())
def queries(self):
(yield Query(self.url('', port=6789)))
def check(self):
assert (self.results[0].json['backend'] == self.target.path.k8s)
assert (self.results[0].json['request']['tls']['enabled'] == True)
assert (self.results[0].json['request']['tls']['server-name'] == 'my-hilarious-name') |
class SelectableListModel(QObject):
modelChanged = Signal()
selectionChanged = Signal()
def __init__(self, items):
QObject.__init__(self)
self._selection = {}
self._items = items
def getList(self):
return self._items
def isValueSelected(self, value):
return self._selection.get(value, True)
def selectValue(self, value):
self._setSelectState(value, True)
self.selectionChanged.emit()
def unselectValue(self, value):
self._setSelectState(value, False)
self.selectionChanged.emit()
def unselectAll(self):
for item in self.getList():
self._setSelectState(item, False)
self.selectionChanged.emit()
def selectAll(self):
for item in self.getList():
self._setSelectState(item, True)
self.selectionChanged.emit()
def getSelectedItems(self):
return [item for item in self.getList() if self.isValueSelected(item)]
def _setSelectState(self, key, state):
self._selection[key] = state |
def test_correctly_decodes_random_large_matrices():
scores = np.array([[0., 0., 0., 0., 0., 0., 0., 0., 0., 0.], [0., 0., 0., 0., 0., 0., 0., 0., 0., 0.], [0., 0., 0., 0., 0., 0., 0., 0., 0., 0.4744205], [0., 0., 0., 0., 0., 0., 0., 0., 0., 0.6448661], [0., 0., 0., 0.1424588, 0., 0., 0.9688441, 0., 0., 0.], [0., 0., 0., 0., 0., 0., 0., 0., 0., 0.3309963], [0., 0., 0., 0., 0., 0.7707749, 0.1686939, 0., 0., 0.], [0., 0., 0.6654594, 0., 0., 0., 0., 0., 0., 0.], [0., 0., 0., 0., 0., 0.8792097, 0., 0., 0., 0.], [0., 0., 0., 0., 0., 0., 0., 0., 0., 0.]], dtype='f')
assert (chu_liu_edmonds(scores, 0) == [None, 4, 1, 2, 9, 0, 3, 8, 5, 7])
scores2 = np.array([[0., 0., 0., 0.5188734, 0., 0., 0., 0., 0., 0.], [0.6110081, 0., 0., 0., 0., 0., 0.3338458, 0., 0., 0.], [0., 0.3509711, 0.8738929, 0., 0., 0., 0., 0.9323746, 0., 0.], [0., 0., 0., 0., 0., 0., 0., 0., 0., 0.], [0., 0., 0., 0., 0.262673, 0., 0.6318016, 0.0442728, 0.2669838, 0.], [0., 0., 0., 0., 0., 0., 0., 0., 0.426987, 0.], [0., 0., 0., 0., 0., 0., 0., 0., 0., 0.], [0., 0., 0., 0.1681499, 0., 0., 0., 0., 0., 0.], [0., 0., 0., 0., 0.6107612, 0., 0., 0., 0., 0.], [0., 0., 0., 0., 0., 0., 0., 0., 0., 0.]], dtype='f')
assert (chu_liu_edmonds(scores2, 0) == [None, 0, 1, 4, 6, 4, 8, 8, 0, 8])
scores3 = np.array([[0., 0., 0., 0., 0., 0., 0., 0., 0., 0.], [0., 0., 0.2769623, 0., 0., 0., 0., 0., 0.5974235, 0.6019087], [0., 0., 0.2635923, 0., 0., 0., 0., 0., 0., 0.], [0., 0., 0., 0., 0., 0., 0., 0.844114, 0., 0.], [0., 0., 0., 0.103729, 0., 0., 0., 0., 0., 0.], [0., 0., 0., 0., 0., 0., 0., 0., 0.6673682, 0.], [0., 0., 0., 0., 0., 0., 0., 0.8322976, 0., 0.], [0., 0., 0., 0., 0., 0., 0., 0., 0., 0.], [0., 0., 0., 0., 0., 0., 0.1435426, 0., 0., 0.], [0., 0., 0., 0.989063, 0., 0., 0., 0., 0.8523681, 0.]], dtype='f')
assert (chu_liu_edmonds(scores3, 0) == [None, 4, 8, 9, 7, 1, 0, 2, 6, 5])
scores4 = np.array([[0., 0., 0., 0.7209569, 0., 0., 0., 0., 0., 0.], [0., 0., 0., 0., 0., 0., 0., 0., 0., 0.], [0., 0., 0., 0., 0., 0., 0., 0., 0., 0.], [0., 0., 0., 0., 0., 0., 0., 0., 0., 0.], [0., 0., 0., 0., 0., 0., 0., 0., 0., 0.], [0., 0., 0., 0., 0., 0.8032823, 0., 0., 0., 0.], [0., 0., 0., 0., 0., 0., 0., 0.3385515, 0., 0.], [0., 0., 0., 0., 0., 0., 0., 0., 0., 0.], [0., 0., 0., 0., 0., 0., 0., 0., 0., 0.], [0., 0., 0.9569687, 0., 0., 0., 0., 0.6038499, 0., 0.]], dtype='f')
assert (chu_liu_edmonds(scores4, 0) == [None, 8, 9, 0, 7, 2, 3, 5, 3, 8])
scores5 = np.array([[0., 0., 0., 0., 0., 0., 0., 0.9075557, 0., 0.], [0., 0., 0.6197687, 0., 0., 0., 0., 0., 0., 0.8626124], [0., 0., 0., 0., 0., 0., 0., 0., 0., 0.], [0., 0., 0., 0.8647926, 0., 0., 0., 0., 0., 0.], [0., 0., 0., 0., 0., 0., 0., 0., 0., 0.], [0., 0., 0.3778254, 0., 0., 0., 0., 0., 0., 0.0905699], [0., 0.6759112, 0., 0., 0., 0.8814458, 0., 0., 0.9278274, 0.], [0., 0., 0., 0., 0., 0.5856659, 0., 0.8265802, 0., 0.], [0., 0., 0., 0., 0., 0., 0., 0., 0., 0.], [0., 0., 0.99308, 0., 0., 0., 0., 0., 0., 0.]], dtype='f')
assert (chu_liu_edmonds(scores5, 0) == [None, 5, 4, 8, 7, 2, 2, 0, 6, 1]) |
def _validate_init_params_and_return_if_found(module_class: Any) -> List[str]:
init_params_raw = list(inspect.signature(module_class.__init__).parameters)
module_init_params = [param for param in init_params_raw if (param not in ['self', 'args', 'kwargs'])]
if (len(module_init_params) > 1):
raise UnsupportedOperation(f'A module class may accept a single `Web3` instance as the first argument of its __init__() method. More than one argument found for {module_class.__name__}: {module_init_params}')
return module_init_params |
class EyeTribe():
def __init__(self, logfilename='default', host='localhost', port=6555):
self._logfile = codecs.open('{}.tsv'.format(logfilename), 'w', 'utf-8')
self._separator = '\t'
self._log_header()
self._queue = Queue()
self._connection = connection(host=host, port=port)
self._tracker = tracker(self._connection)
self._heartbeat = heartbeat(self._connection)
self._lock = Lock()
self._beating = True
self._heartbeatinterval = (self._tracker.get_heartbeatinterval() / 1000.0)
self._hbthread = Thread(target=self._heartbeater, args=[self._heartbeatinterval])
self._hbthread.daemon = True
self._hbthread.name = 'heartbeater'
self._streaming = True
self._samplefreq = self._tracker.get_framerate()
self._intsampletime = (1.0 / self._samplefreq)
self._clockdiff = None
self._newestframe = self._tracker.get_frame()
self._ssthread = Thread(target=self._stream_samples, args=[self._queue])
self._ssthread.daemon = True
self._ssthread.name = 'samplestreamer'
self._processing = True
self._processing_paused = False
self._logdata = False
self._currentsample = copy.deepcopy(self._newestframe)
self._dpthread = Thread(target=self._process_samples, args=[self._queue])
self._dpthread.daemon = True
self._dpthread.name = 'dataprocessor'
self._hbthread.start()
self._ssthread.start()
self._dpthread.start()
self.calibration = calibration(self._connection)
def start_recording(self):
if (not self._logdata):
self._logdata = True
self.log_message('start_recording')
def stop_recording(self):
self._logfile.flush()
os.fsync(self._logfile.fileno())
if self._logdata:
self.log_message('stop_recording')
self._logdata = False
def log_message(self, message):
t = time.time()
ts = '{}.{}'.format(time.strftime('%Y-%m-%d %H:%M:%S'), (round((t % 1), 3) * 1000))
if (self._clockdiff != None):
t = int(((t * 1000) + self._clockdiff))
else:
t = ''
line = self._separator.join(map(str, [u'MSG', ts, t, safe_decode(message)]))
self._logfile.write((line + u'\n'))
def sample(self):
if (self._newestframe == None):
return (None, None)
else:
return (self._newestframe['avgx'], self._newestframe['avgy'])
def pupil_size(self):
if (self._currentsample == None):
return None
else:
return self._newestframe['psize']
def close(self):
if self._logdata:
self.stop_recording()
self._beating = False
self._streaming = False
self._processing = False
self._logfile.close()
self._connection.close()
def _pause_sample_processing(self):
self._processing_paused = True
def _unpause_sample_processing(self):
self._processing_paused = False
def _heartbeater(self, heartbeatinterval):
while self._beating:
self._lock.acquire(True)
self._heartbeat.beat()
self._lock.release()
time.sleep(heartbeatinterval)
def _stream_samples(self, queue):
while self._streaming:
if (not self._processing_paused):
self._lock.acquire(True)
sample = self._tracker.get_frame()
t1 = (time.time() * 1000)
queue.put(sample)
self._lock.release()
self._newestframe = copy.deepcopy(sample)
self._clockdiff = (sample['time'] - t1)
time.sleep((self._intsampletime / 2))
def _process_samples(self, queue):
while self._processing:
self._lock.acquire(True)
if (not queue.empty()):
sample = queue.get()
else:
sample = None
self._lock.release()
if (sample != None):
if (not (self._currentsample['timestamp'] == sample['timestamp'])):
self._currentsample = copy.deepcopy(sample)
if self._logdata:
self._log_sample(sample)
def _log_sample(self, sample):
line = self._separator.join(map(str, [sample['timestamp'], sample['time'], sample['fix'], sample['state'], sample['rawx'], sample['rawy'], sample['avgx'], sample['avgy'], sample['psize'], sample['Lrawx'], sample['Lrawy'], sample['Lavgx'], sample['Lavgy'], sample['Lpsize'], sample['Lpupilx'], sample['Lpupily'], sample['Rrawx'], sample['Rrawy'], sample['Ravgx'], sample['Ravgy'], sample['Rpsize'], sample['Rpupilx'], sample['Rpupily']]))
self._logfile.write((line + '\n'))
def _log_header(self):
header = self._separator.join(['timestamp', 'time', 'fix', 'state', 'rawx', 'rawy', 'avgx', 'avgy', 'psize', 'Lrawx', 'Lrawy', 'Lavgx', 'Lavgy', 'Lpsize', 'Lpupilx', 'Lpupily', 'Rrawx', 'Rrawy', 'Ravgx', 'Ravgy', 'Rpsize', 'Rpupilx', 'Rpupily'])
self._logfile.write((header + '\n'))
self._logfile.flush()
os.fsync(self._logfile.fileno())
self._firstlog = False |
class WordWrapRenderer(AbstractTreeNodeRenderer):
padding = HasBorder(0)
width_hint = Int(100)
max_lines = Int(5)
extra_space = Int(8)
def paint(self, editor, node, column, object, paint_context):
(painter, option, index) = paint_context
text = self.get_label(node, object, column)
if editor.factory.show_icons:
icon_width = (option.decorationSize.width() + self.extra_space)
icon_height = option.decorationSize.height()
else:
icon_width = 0
icon_height = 0
x = ((option.rect.left() + icon_width) + self.padding.left)
y = (option.rect.top() + self.padding.top)
width = (((option.rect.width() - icon_width) - self.padding.left) - self.padding.right)
height = ((option.rect.height() - self.padding.top) - self.padding.bottom)
lines = wrap_text_with_elision(text, option.font, width, height)
old_pen = painter.pen()
if bool((option.state & QtGui.QStyle.StateFlag.State_Selected)):
painter.setPen(QtGui.QPen(option.palette.highlightedText(), 0))
try:
rect = painter.drawText(x, y, width, height, option.displayAlignment, '\n'.join(lines))
finally:
painter.setPen(old_pen)
def size(self, editor, node, column, object, size_context):
(option, index) = size_context
font_metrics = QtGui.QFontMetrics(option.font)
text = self.get_label(node, object, column)
if editor.factory.show_icons:
icon_size = option.decorationSize
icon_width = icon_size.width()
icon_height = icon_size.height()
else:
icon_width = 0
icon_height = 0
width = (((self.width_hint - icon_width) - self.padding.left) - self.padding.right)
max_height = (self.max_lines * font_metrics.lineSpacing())
lines = wrap_text_with_elision(text, option.font, width, max_height)
text_height = (len(lines) * font_metrics.lineSpacing())
height = (((max(icon_height, text_height) + self.padding.top) + self.padding.bottom) + self.extra_space)
return (self.width_hint, height) |
def extractFallengodssanctuaryWordpressCom(item):
(vol, chp, frag, postfix) = extractVolChapterFragmentPostfix(item['title'])
if ((not (chp or vol)) or ('preview' in item['title'].lower())):
return None
tagmap = [('Eiyuu Seirei Musume', 'Chichi wa Eiyuu, Haha wa Seirei, Musume no Watashi wa Tenseisha.', 'translated'), ('PRC', 'PRC', 'translated'), ('Loiterous', 'Loiterous', 'oel')]
for (tagname, name, tl_type) in tagmap:
if (tagname in item['tags']):
return buildReleaseMessageWithType(item, name, vol, chp, frag=frag, postfix=postfix, tl_type=tl_type)
return False |
def is_syncing(request):
def get(*args, **kwargs):
class Response():
def json(*args, **kwargs):
if request.param:
return REPLIES[1]
return REPLIES[0]
def status_code(self):
if (request.param == ERROR):
return 401
return 200
return Response()
(yield get) |
class ApplicationType(Type):
_REQUIRED = {'key', 'title', 'urlconf', 'app_namespace'}
def __init__(self, **kwargs):
kwargs.setdefault('template_name', '')
kwargs.setdefault('regions', [])
kwargs.setdefault('app_namespace', (lambda instance: instance.page_type))
super().__init__(**kwargs) |
def test_add_logging_handle(tmpdir):
with tmpdir.as_cwd():
pm = ErtPluginManager(plugins=[dummy_plugins])
pm.add_logging_handle_to_root(logging.getLogger())
logging.critical('I should write this to spam.log')
with open('spam.log', encoding='utf-8') as fin:
result = fin.read()
assert ('I should write this to spam.log' in result) |
def deserialize_value(klass: Type[McapRecord], field: str, value: Any) -> Any:
field_type = klass.__dataclass_fields__[field].type
if (field_type == str):
return value
if (field_type == int):
return int(value)
if (field_type == bytes):
return bytes([int(v) for v in value])
if (field_type == Dict[(int, int)]):
return {int(k): int(v) for (k, v) in value.items()}
return value |
class NumericUpDownControl(Widget):
DEFAULT_CSS = '\n NumericUpDownControl {\n height: 5;\n }\n NumericUpDownControl Button {\n min-width: 5;\n }\n\n NumericUpDownControl .value-holder {\n min-width: 2;\n border: thick $primary;\n text-style: bold;\n padding: 0 1;\n text-align: right;\n }\n\n NumericUpDownControl .value-holder.modified {\n border: thick $secondary;\n }\n '
value = reactive(0)
def __init__(self, label, watch_value=None, min_value=(- 11), max_value=11, id=''):
id = (id or label.lower().replace(' ', '-').replace(':', ''))
super().__init__(id=id)
self.label = label
self.min_value = min_value
self.max_value = max_value
self.watch_value = watch_value
def compose(self):
(yield Label(self.label))
with Horizontal():
(yield Button(' ', id='{}-down'.format(self.id)))
(yield Label('0', id='{}-value'.format(self.id), classes='value-holder'))
(yield Button(' ', id='{}-up'.format(self.id)))
def on_button_pressed(self, event):
if (event.button.id == '{}-down'.format(self.id)):
if (self.value > self.min_value):
self.value -= 1
elif (event.button.id == '{}-up'.format(self.id)):
if (self.value < self.max_value):
self.value += 1
label = self.query_one('#{}-value'.format(self.id), Label)
label.update(str(self.value))
if (self.value == 0):
label.remove_class('modified')
else:
label.add_class('modified') |
def parse_arguments():
parser = argparse.ArgumentParser(formatter_class=argparse.RawDescriptionHelpFormatter, description='\nUses a BED file of domains or TAD boundaries to merge\nthe bin counts of a Hi-C matrix per TAD.\n\nThe output matrix contains the total counts per TAD and\nthe total contacts with all other TADs.')
parser.add_argument('--matrix', '-m', help='Path to Hi-C matrix to use.', required=True)
parser.add_argument('--domains', help='Path to a bed file containing the domains.', type=argparse.FileType('r'), required=True)
parser.add_argument('--outFile', '-o', help='Name for the resulting matrix file.', required=True)
parser.add_argument('--version', action='version', version='%(prog)s {}'.format(__version__))
return parser |
class OptionSeriesAreasplinerangeMarkerStates(Options):
def hover(self) -> 'OptionSeriesAreasplinerangeMarkerStatesHover':
return self._config_sub_data('hover', OptionSeriesAreasplinerangeMarkerStatesHover)
def normal(self) -> 'OptionSeriesAreasplinerangeMarkerStatesNormal':
return self._config_sub_data('normal', OptionSeriesAreasplinerangeMarkerStatesNormal)
def select(self) -> 'OptionSeriesAreasplinerangeMarkerStatesSelect':
return self._config_sub_data('select', OptionSeriesAreasplinerangeMarkerStatesSelect) |
def extractTaekanWordpressCom(item):
(vol, chp, frag, postfix) = extractVolChapterFragmentPostfix(item['title'])
if ((not (chp or vol)) or ('preview' in item['title'].lower())):
return None
tagmap = [('Seiken Tsukai no World Break', 'Seiken Tsukai no World Break', 'translated'), ('rakudai kishi no eiyuutan', 'Rakudai Kishi no Cavalry', 'translated'), ('unbreakable machine-doll', 'unbreakable machine-doll', 'translated'), ('Ecstas Online', 'Ecstas Online', 'translated'), ('Hundred', 'Hundred', 'translated')]
for (tagname, name, tl_type) in tagmap:
if (tagname in item['tags']):
return buildReleaseMessageWithType(item, name, vol, chp, frag=frag, postfix=postfix, tl_type=tl_type)
return False |
class OptionSeriesVennSonificationTracksMappingRate(Options):
def mapFunction(self):
return self._config_get(None)
def mapFunction(self, value: Any):
self._config(value, js_type=False)
def mapTo(self):
return self._config_get(None)
def mapTo(self, text: str):
self._config(text, js_type=False)
def max(self):
return self._config_get(None)
def max(self, num: float):
self._config(num, js_type=False)
def min(self):
return self._config_get(None)
def min(self, num: float):
self._config(num, js_type=False)
def within(self):
return self._config_get(None)
def within(self, value: Any):
self._config(value, js_type=False) |
def recognize_column_type_(dataset: pd.DataFrame, column_name: str, columns: DatasetColumns) -> ColumnType:
column = dataset[column_name]
reg_condition = ((columns.task == 'regression') or (pd.api.types.is_numeric_dtype(column) and (columns.task != 'classification') and (column.nunique() > 5)))
if (column_name == columns.utility_columns.target):
if reg_condition:
return ColumnType.Numerical
else:
return ColumnType.Categorical
if (isinstance(columns.utility_columns.prediction, str) and (column_name == columns.utility_columns.prediction)):
if (reg_condition or ((not pd.api.types.is_integer_dtype(column)) and pd.api.types.is_numeric_dtype(column) and (column.max() <= 1) and (column.min() >= 0))):
return ColumnType.Numerical
else:
return ColumnType.Categorical
if (column_name in columns.num_feature_names):
return ColumnType.Numerical
if (isinstance(columns.utility_columns.prediction, list) and (column_name in columns.utility_columns.prediction)):
return ColumnType.Numerical
if (column_name in columns.cat_feature_names):
return ColumnType.Categorical
if (column_name in columns.datetime_feature_names):
return ColumnType.Datetime
if (column_name in columns.text_feature_names):
return ColumnType.Text
if (column_name == columns.utility_columns.id):
return ColumnType.Id
if (column_name == columns.utility_columns.date):
return ColumnType.Date
return ColumnType.Unknown |
def _download_and_copy(configured_logger: logging.Logger, cursor: psycopg2._psycopg.cursor, s3_client: BaseClient, s3_bucket_name: str, s3_obj_key: str, target_pg_table: str, ordered_col_names: List[str], gzipped: bool, partition_prefix: str=''):
start = time.time()
configured_logger.info(f'{partition_prefix}Starting write of {s3_obj_key}')
with tempfile.NamedTemporaryFile(delete=True) as temp_file:
download_s3_object(s3_bucket_name, s3_obj_key, temp_file.name, s3_client=s3_client)
try:
if gzipped:
with gzip.open(temp_file.name, 'rb') as csv_file:
cursor.copy_expert(sql=f"COPY {target_pg_table} ({','.join(ordered_col_names)}) FROM STDIN (FORMAT CSV)", file=csv_file)
else:
with open(temp_file.name, 'r', encoding='utf-8') as csv_file:
cursor.copy_expert(sql=f"COPY {target_pg_table} ({','.join(ordered_col_names)}) FROM STDIN (FORMAT CSV)", file=csv_file)
elapsed = (time.time() - start)
rows_copied = cursor.rowcount
configured_logger.info(f'{partition_prefix}Finished writing {rows_copied} row(s) in {elapsed:.3f}s for {s3_obj_key}')
(yield rows_copied)
except Exception as exc:
configured_logger.error(f'{partition_prefix}ERROR writing {s3_obj_key}')
configured_logger.exception(exc)
raise exc |
class AssetStore():
def __init__(self):
self._known_component_classes = set()
self._modules = {}
self._assets = {}
self._associated_assets = {}
self._data = {}
self._used_assets = set()
asset_reset = Asset('reset.css', RESET)
asset_loader = Asset('flexx-loader.js', LOADER)
(func_names, method_names) = get_all_std_names()
mod = create_js_module('pscript-std.js', get_full_std_lib(), [], (func_names + method_names), 'amd-flexx')
asset_pscript = Asset('pscript-std.js', (HEADER + mod))
pre1 = ', '.join([('%s%s = _py.%s%s' % (FUNCTION_PREFIX, n, FUNCTION_PREFIX, n)) for n in JS_EVENT.meta['std_functions']])
pre2 = ', '.join([('%s%s = _py.%s%s' % (METHOD_PREFIX, n, METHOD_PREFIX, n)) for n in JS_EVENT.meta['std_methods']])
mod = create_js_module('flexx.event.js', ('var %s;\nvar %s;\n%s' % (pre1, pre2, JS_EVENT)), ['pscript-std.js as _py'], (['Component', 'loop', 'logger'] + _property.__all__), 'amd-flexx')
asset_event = Asset('flexx.event.js', (HEADER + mod))
code = open(get_resoure_path('bsdf.js'), 'rb').read().decode().replace('\r', '')
code = code.split('"use strict";\n', 1)[1]
code = ('flexx.define("bsdf", [], (function () {\n"use strict";\n' + code)
asset_bsdf = Asset('bsdf.js', code)
code = open(get_resoure_path('bb64.js'), 'rb').read().decode().replace('\r', '')
code = code.split('"use strict";\n', 1)[1]
code = ('flexx.define("bb64", [], (function () {\n"use strict";\n' + code)
asset_bb64 = Asset('bb64.js', code)
for a in [asset_reset, asset_loader, asset_pscript]:
self.add_shared_asset(a)
if getattr(self, '_test_mode', False):
return
self.update_modules()
asset_core = Bundle('flexx-core.js')
asset_core.add_asset(asset_loader)
asset_core.add_asset(asset_bsdf)
asset_core.add_asset(asset_bb64)
asset_core.add_asset(asset_pscript)
asset_core.add_asset(asset_event)
asset_core.add_module(self.modules['flexx.app._clientcore'])
asset_core.add_module(self.modules['flexx.app._component2'])
self.add_shared_asset(asset_core)
def __repr__(self):
t = '<AssetStore with %i assets, and %i data>'
return (t % (len(self._assets), len(self._data)))
def create_module_assets(self, *args, **kwargs):
raise RuntimeError('create_module_assets is deprecated and no longer necessary.')
def modules(self):
return self._modules
def update_modules(self):
current_module_names = set(self._modules)
for cls in AppComponentMeta.CLASSES:
if (cls not in self._known_component_classes):
self._known_component_classes.add(cls)
if (cls.__jsmodule__ not in self._modules):
JSModule(cls.__jsmodule__, self._modules)
self._modules[cls.__jsmodule__].add_variable(cls.__name__)
mcount = 0
bcount = 0
for name in set(self._modules).difference(current_module_names):
mod = self.modules[name]
mcount += 1
bundle_names = []
bundle_names.append(name)
while ('.' in name):
name = name.rsplit('.', 1)[0]
bundle_names.append(name)
bcount += len(bundle_names)
for name in bundle_names:
for suffix in ['.js', '.css']:
bundle_name = (name + suffix)
if (bundle_name not in self._assets):
self._assets[bundle_name] = Bundle(bundle_name)
self._assets[bundle_name].add_module(mod)
if mcount:
logger.info(('Asset store collected %i new modules.' % mcount))
def get_asset(self, name):
if (not name.lower().endswith(('.js', '.css'))):
raise ValueError('Asset names always end in .js or .css')
try:
asset = self._assets[name]
except KeyError:
raise KeyError(('Asset %r is not available in the store.' % name))
self._used_assets.add(asset.name)
return asset
def get_data(self, name):
return self._data.get(name, None)
def get_asset_names(self):
return list(self._assets.keys())
def get_data_names(self):
return list(self._data.keys())
def add_shared_asset(self, asset_name, source=None):
if isinstance(asset_name, Asset):
asset = asset_name
else:
asset = Asset(asset_name, source)
if (asset.name in self._assets):
raise ValueError(('Asset %r already registered.' % asset.name))
self._assets[asset.name] = asset
return ('flexx/assets/shared/' + asset.name)
def associate_asset(self, mod_name, asset_name, source=None):
name = asset_name.replace('\\', '/').split('/')[(- 1)]
if (name in self._assets):
asset = self._assets[name]
if (source is not None):
t = 'associate_asset() for %s got source, but asset %r already exists.'
raise TypeError((t % (mod_name, asset_name)))
else:
asset = Asset(asset_name, source)
self.add_shared_asset(asset)
assets = self._associated_assets.setdefault(mod_name, [])
if (asset.name not in [a.name for a in assets]):
assets.append(asset)
assets.sort(key=(lambda x: x.i))
return ('flexx/assets/shared/' + asset.name)
def get_associated_assets(self, mod_name):
assets = self._associated_assets.get(mod_name, [])
return tuple([a.name for a in assets])
def add_shared_data(self, name, data):
if (not isinstance(name, str)):
raise TypeError('add_shared_data() name must be a str.')
if (name in self._data):
raise ValueError(('add_shared_data() got existing name %r.' % name))
if (not isinstance(data, bytes)):
raise TypeError('add_shared_data() data must be bytes.')
self._data[name] = data
return ('flexx/data/shared/%s' % name)
def _dump_data(self):
d = {}
for fname in self.get_data_names():
d[('flexx/data/shared/' + fname)] = self.get_data(fname)
return d
def _dump_assets(self, also_remote=True):
d = {}
for name in self._used_assets:
asset = self._assets[name]
if (asset.remote and (not also_remote)):
continue
d[('flexx/assets/shared/' + asset.name)] = asset.to_string().encode()
return d |
def _save_svc_state(slave_vm, jsons):
service_state = None
for out in jsons:
if ('service_state' in out):
service_state = REPLICA_SERVICE_STATES.get(out['service_state'], SlaveVm.OFF)
if (service_state is not None):
slave_vm.sync_status = service_state
return service_state |
class OptionSeriesFunnelSonificationTracksMappingHighpassResonance(Options):
def mapFunction(self):
return self._config_get(None)
def mapFunction(self, value: Any):
self._config(value, js_type=False)
def mapTo(self):
return self._config_get(None)
def mapTo(self, text: str):
self._config(text, js_type=False)
def max(self):
return self._config_get(None)
def max(self, num: float):
self._config(num, js_type=False)
def min(self):
return self._config_get(None)
def min(self, num: float):
self._config(num, js_type=False)
def within(self):
return self._config_get(None)
def within(self, value: Any):
self._config(value, js_type=False) |
class ProgressBar():
def __init__(self, description='', disabled=False):
self.progress = None
self.description = description
self.disabled = disabled
self.start = None
def init_progress(self, total):
if (self.progress is None):
self.start = time.time()
self.progress = tqdm(total=total, unit='projects', bar_format='{desc}: {percentage:.1f}%|{bar:80}| {n_fmt}/{total_fmt}{postfix}', desc=self.description, leave=False, disable=self.disabled)
def update_progress_length(self, added):
if (self.progress is not None):
self.progress.total = (self.progress.total + added)
self.progress.refresh()
def show_progress(self, text, category='~'):
if (self.progress is not None):
self.progress.update(1)
postfix = {category: text}
self.progress.set_postfix(postfix)
def finish_progress(self):
if (self.progress is not None):
self.progress.close()
end = time.time()
(hours, rem) = divmod((end - self.start), 3600)
(minutes, seconds) = divmod(rem, 60)
return '{:0>2}:{:0>2}:{:05.2f}'.format(int(hours), int(minutes), seconds) |
def run_scenarios(scenario):
config['SimulationDirectory'] = ((config['SimulationDirectory'][:(- 8)] + scenario) + str('2015'))
if (scenario == 'Baseline_'):
config['NTC'] = (config['NTC'][:(- 8)] + '2018.csv')
elif ((scenario == 'Baseline_NTC_') or (scenario == 'TEMBA_Reference_') or (scenario == 'TEMBA_1.5deg_') or (scenario == 'TEMBA_2.0deg_')):
config['NTC'] = (config['NTC'][:(- 8)] + '2025.csv')
if ((scenario == 'Baseline_') or (scenario == 'Baseline_NTC_')):
for year in range(1980, 1982, 1):
start_date = [year, 1, 1, 0, 0, 0]
stop_date = [year, 12, 31, 23, 59, 0]
config['StartDate'] = tuple(start_date)
config['StopDate'] = tuple(stop_date)
config['SimulationDirectory'] = (config['SimulationDirectory'][:(- 4)] + str(year))
config['RenewablesAF'] = ((config['RenewablesAF'][:(- 8)] + str(year)) + '.csv')
config['ReservoirScaledInflows'] = ((config['ReservoirScaledInflows'][:(- 8)] + str(year)) + '.csv')
SimData = ds.build_simulation(config, mts_plot=True, MTSTimeStep=24)
r = ds.solve_GAMS(config['SimulationDirectory'], config['GAMS_folder'])
else:
config['Demand'] = (((config['Demand'][:(- 12)] + 'TEMBA_') + str(2015)) + '.csv')
config['Outages'] = (((config['Outages'][:(- 12)] + scenario) + str(2015)) + '.csv')
config['PowerPlantData'] = (((config['PowerPlantData'][:(- 12)] + scenario) + str(2015)) + '.csv')
for year in range(2035, 2046, 10):
start_date = [year, 1, 1, 0, 0, 0]
stop_date = [year, 12, 31, 23, 59, 0]
config['StartDate'] = tuple(start_date)
config['StopDate'] = tuple(stop_date)
config['SimulationDirectory'] = (config['SimulationDirectory'][:(- 4)] + str(year))
if ((year >= 2030) and (year < 2040)):
config['NTC'] = (config['NTC'][:(- 8)] + '2030.csv')
if ((year >= 2040) and (year < 2050)):
config['NTC'] = (config['NTC'][:(- 8)] + '2040.csv')
if (year >= 2050):
config['NTC'] = (config['NTC'][:(- 8)] + '2050.csv')
config['default']['PriceTransmission'] = 0.0
config['RenewablesAF'] = ((config['RenewablesAF'][:(- 8)] + str(2005)) + '.csv')
config['ReservoirScaledInflows'] = ((config['ReservoirScaledInflows'][:(- 8)] + str(2001)) + '.csv')
config['Demand'] = ((config['Demand'][:(- 8)] + str(year)) + '.csv')
config['Outages'] = ((config['Outages'][:(- 8)] + str(year)) + '.csv')
config['PowerPlantData'] = ((config['PowerPlantData'][:(- 8)] + str(year)) + '.csv')
if (scenario == 'TEMBA_2.0deg_'):
config['default']['PriceOfCO2'] = ((((0. * (year ** 3)) - (26. * (year ** 2))) + (53203. * year)) - .7886163)
if (scenario == 'TEMBA_1.5deg_'):
config['default']['PriceOfCO2'] = ((((0. * (year ** 3)) - (187. * (year ** 2))) + (377765. * year)) - .818786)
if (config['default']['PriceOfCO2'] < 0):
config['default']['PriceOfCO2'] = 0.0
config['default']['WaterValue'] = (401 + config['default']['PriceOfCO2'])
config['default']['CostLoadShedding'] = (400 + config['default']['PriceOfCO2'])
SimData = ds.build_simulation(config, mts_plot=True, MTSTimeStep=24)
r = ds.solve_GAMS(config['SimulationDirectory'], config['GAMS_folder']) |
def compute(chart):
almutems = {}
hylegic = [chart.getObject(const.SUN), chart.getObject(const.MOON), chart.getAngle(const.ASC), chart.getObject(const.PARS_FORTUNA), chart.getObject(const.SYZYGY)]
for hyleg in hylegic:
row = newRow()
digInfo = essential.getInfo(hyleg.sign, hyleg.signlon)
for dignity in DIGNITY_LIST:
objID = digInfo[dignity]
if objID:
score = essential.SCORES[dignity]
row[objID]['string'] += ('+%s' % score)
row[objID]['score'] += score
almutems[hyleg.id] = row
row = newRow()
for objID in OBJECT_LIST:
obj = chart.getObject(objID)
house = chart.houses.getObjectHouse(obj)
score = HOUSE_SCORES[house.id]
row[objID]['string'] = ('+%s' % score)
row[objID]['score'] = score
almutems['Houses'] = row
row = newRow()
table = planetarytime.getHourTable(chart.date, chart.pos)
ruler = table.currRuler()
hourRuler = table.hourRuler()
row[ruler] = {'string': '+7', 'score': 7}
row[hourRuler] = {'string': '+6', 'score': 6}
almutems['Rulers'] = row
scores = newRow()
for (_property, _list) in almutems.items():
for (objID, values) in _list.items():
scores[objID]['string'] += values['string']
scores[objID]['score'] += values['score']
almutems['Score'] = scores
return almutems |
def pytest_generate_tests(metafunc):
if ('testvs' in metafunc.fixturenames):
global_sizes = [(35,), (((31 * 31) * 4),), (15, 13), (13, 35), (17, 15, 13), (5, 33, 75), (10, 10, 10, 5)]
local_sizes = [None, (4,), (2, 4), (6, 4, 2)]
mngs = [(26, 31), (34, 56, 25)]
mwiss = [(4, 4), (5, 3), (9, 5, 3)]
vals = []
for (gs, ls, mng, mwis) in itertools.product(global_sizes, local_sizes, mngs, mwiss):
testvs = VirtualSizesHelper.try_create(gs, ls, mng, mwis)
if (testvs is None):
continue
vals.append(testvs)
metafunc.parametrize('testvs', vals, ids=[str(x) for x in vals])
if ('incorrect_testvs' in metafunc.fixturenames):
vals = [VirtualSizesHelper((32, 32), (4, 4), (7, 8), (4, 4)), VirtualSizesHelper((32, 32), (5, 4), (16, 16), (4, 4))]
metafunc.parametrize('incorrect_testvs', vals, ids=[str(x) for x in vals]) |
def reduce_collection(collection, set=5, reducer='mean', scoreband='score'):
reducers = {'mean': ee.Reducer.mean(), 'median': ee.Reducer.median(), 'mode': ee.Reducer.mode(), 'interval_mean': ee.Reducer.intervalMean(50, 90), 'first': ee.Reducer.first()}
if (reducer in reducers.keys()):
selected_reducer = reducers[reducer]
elif isinstance(reducer, ee.Reducer):
selected_reducer = reducer
else:
raise ValueError('Reducer {} not recognized'.format(reducer))
collection = collection.map((lambda img: img.unmask()))
array = collection.toArray()
bands_axis = 1
images_axis = 0
bands = ee.Image(collection.first()).bandNames()
score_index = bands.indexOf(scoreband)
score = array.arraySlice(axis=bands_axis, start=score_index, end=score_index.add(1))
sorted_array = array.arraySort(score)
longitud = sorted_array.arrayLength(0)
lastvalues = sorted_array.arraySlice(axis=images_axis, start=longitud.subtract(set), end=longitud)
solopjes = lastvalues.arraySlice(axis=bands_axis, start=score_index, end=score_index.add(1))
processed = lastvalues.arrayReduce(selected_reducer, ee.List([images_axis]))
result_image = processed.arrayProject([bands_axis]).arrayFlatten([bands])
return result_image |
class DocumentHasBinaryMetadata(models.Model):
id = models.AutoField(primary_key=True)
name = models.TextField(null=False)
document = models.ForeignKey(Document, null=False, related_name='binary_metadata', db_column='document_uuid', on_delete=models.CASCADE)
metadata = models.ForeignKey(BinaryMetadata, null=False, related_name='documents', db_column='metadata_uuid', on_delete=models.CASCADE)
created = DateTimeField(null=False, auto_now_add=True)
last_modified = DateTimeField(null=False, auto_now=True)
def set_last_modified(self, datetime_=None):
if (datetime_ is None):
datetime_ = timezone.now()
self.last_modified = datetime_
self.save()
return self.last_modified
def contents_as_str(self):
return self.metadata.contents_as_str()
def __str__(self):
try:
return f'{self.document}->{self.metadata}'
except:
return str(self.id)
class Meta():
db_table = 'DocumentHasBinaryMetadata'
unique_together = ('document', 'metadata') |
.django_db
def test_double_require(client, monkeypatch, elasticsearch_award_index, subaward_with_tas):
_setup_es(client, monkeypatch, elasticsearch_award_index)
resp = query_by_tas_subaward(client, {'require': [_fa_path(BASIC_TAS), _tas_path(BASIC_TAS)]})
assert (resp.json()['results'] == [_subaward1()]) |
def test_async_w3_ens_for_empty_ens_sets_w3_object_reference(local_async_w3):
assert local_async_w3.strict_bytes_type_checking
assert local_async_w3.ens.w3.strict_bytes_type_checking
assert (local_async_w3 == local_async_w3.ens.w3)
local_async_w3.strict_bytes_type_checking = False
assert (not local_async_w3.strict_bytes_type_checking)
assert (not local_async_w3.ens.w3.strict_bytes_type_checking) |
def filter_log_fortiguard_override_filter_data(json):
option_list = ['anomaly', 'dlp_archive', 'dns', 'filter', 'filter_type', 'forward_traffic', 'free_style', 'gtp', 'local_traffic', 'multicast_traffic', 'netscan_discovery', 'netscan_vulnerability', 'severity', 'sniffer_traffic', 'ssh', 'voip', 'ztna_traffic']
json = remove_invalid_fields(json)
dictionary = {}
for attribute in option_list:
if ((attribute in json) and (json[attribute] is not None)):
dictionary[attribute] = json[attribute]
return dictionary |
('cuda.gemm_rrr.func_decl')
def gen_function_decl(func_attrs):
func_name = func_attrs['name']
input_ndims = len(func_attrs['input_accessors'][0].original_shapes)
weight_ndims = len(func_attrs['input_accessors'][1].original_shapes)
return common.FUNC_DECL_TEMPLATE.render(func_name=func_name, input_ndims=input_ndims, weight_ndims=weight_ndims, support_split_k=True) |
class MsrBamBlock(nn.Module):
def __init__(self, conv=default_conv, n_feats=64):
super(MsrBamBlock, self).__init__()
kernel_size_1 = 3
kernel_size_2 = 5
self.conv_3_1 = conv(n_feats, n_feats, kernel_size_1)
self.conv_3_2 = conv((n_feats * 2), (n_feats * 2), kernel_size_1)
self.conv_5_1 = conv(n_feats, n_feats, kernel_size_2)
self.conv_5_2 = conv((n_feats * 2), (n_feats * 2), kernel_size_2)
self.confusion = nn.Conv2d((n_feats * 4), n_feats, 1, padding=0, stride=1)
self.relu = nn.ReLU(inplace=True)
self.attention = BamBlock(n_feats)
def forward(self, x):
input_1 = x
output_3_1 = self.relu(self.conv_3_1(input_1))
output_5_1 = self.relu(self.conv_5_1(input_1))
input_2 = torch.cat([output_3_1, output_5_1], 1)
output_3_2 = self.relu(self.conv_3_2(input_2))
output_5_2 = self.relu(self.conv_5_2(input_2))
input_3 = torch.cat([output_3_2, output_5_2], 1)
output = self.attention(self.confusion(input_3))
output += x
return output |
('/report_bugs', methods=['GET', 'POST'])
def report_bugs():
if current_user.is_authenticated:
MAC_KEY = 'kfggfgiihuerbtjgrjrABCDD'
if (request.method == 'GET'):
bugs = Bug.query.filter((Bug.owner_id == current_user.id)).order_by(Bug.id.desc()).limit(100).all()
timeout = (int(time.time()) + 120)
rand_val = rand_string(5)
real_pow = ('%s_%s' % (rand_val, str(timeout)))
mac = hmac.HMAC(bytes(MAC_KEY, 'utf-8'), bytes(real_pow, 'utf-8')).hexdigest()
resp = make_response(render_template('report_bugs.html', bugs=bugs, real_pow=real_pow.split('_')[0]))
resp.set_cookie('pow', ((real_pow + ':') + mac))
return resp
elif (request.method == 'POST'):
try:
(user_pow, user_mac) = request.cookies.get('pow').split(':')
real_mac = hmac.HMAC(bytes(MAC_KEY, 'utf-8'), bytes(user_pow, 'utf-8')).hexdigest()
if (real_mac != user_mac):
return ('pow is incorrect', 500)
try:
timeout = int(user_pow.split('_')[1])
except Exception as e:
print('inside casting user_pow timeout')
print(e)
return ('pow is incorrect', 500)
if (timeout < int(time.time())):
return ('pow is old!', 500)
pow_sol = request.form.get('pow_sol', None)
if (pow_sol is None):
return ('pow is incorrect', 500)
if (check(pow_sol, user_pow.split('_')[0]) == False):
return ('pow is incorrect', 500)
title = request.form.get('title', None)
body = request.form.get('body', None)
link = request.form.get('link', None)
bug = Bug(title=title, body=body, link=link, owner_id=current_user.id)
db.session.add(bug)
db.session.commit()
except Exception as e:
print('from POST submit_bug')
print(e)
return ('Error!', 500)
return redirect('/report_bugs')
return render_template('report_bugs.html', current_user=current_user) |
class Auth(_common.FlyteIdlEntity):
def __init__(self, assumable_iam_role=None, kubernetes_service_account=None):
self._assumable_iam_role = assumable_iam_role
self._kubernetes_service_account = kubernetes_service_account
def assumable_iam_role(self):
return self._assumable_iam_role
def kubernetes_service_account(self):
return self._kubernetes_service_account
def to_flyte_idl(self):
return _launch_plan.Auth(assumable_iam_role=(self.assumable_iam_role if self.assumable_iam_role else None), kubernetes_service_account=(self.kubernetes_service_account if self.kubernetes_service_account else None))
def from_flyte_idl(cls, pb2_object):
return cls(assumable_iam_role=pb2_object.assumable_iam_role, kubernetes_service_account=pb2_object.kubernetes_service_account) |
def make_header_lines(all_bits):
lines = []
bit_names = [('%d_%d' % (b[0], b[1])) for b in all_bits]
bit_len = 6
for i in range(bit_len):
line = ''
for j in range(len(all_bits)):
bstr = bit_names[j].ljust(bit_len).replace('_', '|')
line += bstr[i]
lines.append(line)
return lines |
_models('spacy.Claude-2.v1')
def anthropic_claude_2(config: Dict[(Any, Any)]=SimpleFrozenDict(), name: Literal[('claude-2', 'claude-2-100k')]='claude-2', strict: bool=Anthropic.DEFAULT_STRICT, max_tries: int=Anthropic.DEFAULT_MAX_TRIES, interval: float=Anthropic.DEFAULT_INTERVAL, max_request_time: float=Anthropic.DEFAULT_MAX_REQUEST_TIME, endpoint: Optional[str]=None) -> Callable[([Iterable[str]], Iterable[str])]:
return Anthropic(name=name, endpoint=(endpoint or Endpoints.COMPLETIONS.value), config=config, strict=strict, max_tries=max_tries, interval=interval, max_request_time=max_request_time) |
def test_adding_envs():
config = '\ndaemonset:\n extraEnvs:\n - name: LOG_LEVEL\n value: DEBUG\n'
r = helm_template(config)
assert ({'name': 'LOG_LEVEL', 'value': 'DEBUG'} in r['daemonset'][name]['spec']['template']['spec']['containers'][0]['env'])
assert ({'name': 'LOG_LEVEL', 'value': 'DEBUG'} not in r['deployment'][(name + '-metrics')]['spec']['template']['spec']['containers'][0]['env'])
config = '\ndeployment:\n extraEnvs:\n - name: LOG_LEVEL\n value: DEBUG\n'
r = helm_template(config)
assert ({'name': 'LOG_LEVEL', 'value': 'DEBUG'} in r['deployment'][(name + '-metrics')]['spec']['template']['spec']['containers'][0]['env'])
assert ({'name': 'LOG_LEVEL', 'value': 'DEBUG'} not in r['daemonset'][name]['spec']['template']['spec']['containers'][0]['env']) |
def geti2cdevname(devaddr):
global I2CDevices
name = ''
for i in range(len(I2CDevices)):
if (int(devaddr) in I2CDevices[i]['addr']):
if (name != ''):
name += '; '
name += I2CDevices[i]['name']
if (name == ''):
name = 'Unknown'
return name |
class HTTPResponse():
status: int = 200
headers: HTTPHeaders = HTTPHeaders()
body: bytes = b''
def __post_init__(self):
if (self.headers is None):
self.headers = HTTPHeaders()
elif isinstance(self.headers, dict):
self.headers = HTTPHeaders.from_dict(self.headers)
elif isinstance(self.headers, (tuple, list)):
self.headers = HTTPHeaders.from_sequence(self.headers)
def to_flask_response(self):
import flask
return flask.Response(status=self.status, headers=self.headers.items(), response=self.body) |
class LAMB(LARS):
def __init__(self, params, lr=0.001, beta1=0.9, beta2=0.999, eps=1e-08, weight_decay=0):
if (not (0.0 <= lr)):
raise ValueError('Invalid learning rate: {}'.format(lr))
if (not (0.0 <= eps)):
raise ValueError('Invalid epsilon value: {}'.format(eps))
if (not (0.0 <= beta1 < 1.0)):
raise ValueError('Invalid beta1: {}'.format(beta1))
if (not (0.0 <= beta2 < 1.0)):
raise ValueError('Invalid beta2: {}'.format(beta2))
if (not (0.0 <= weight_decay)):
raise ValueError('Invalid weight_decay value: {}'.format(weight_decay))
defaults = {'lr': lr, 'beta1': beta1, 'beta2': beta2, 'eps': eps, 'weight_decay': weight_decay}
Optimizer.__init__(self, params, defaults)
_grad()
def get_update(self, p, grad, state, group):
if (len(state) == 0):
state['step'] = 0
state['exp_avg'] = torch.zeros_like(p, memory_format=torch.preserve_format)
state['exp_avg_sq'] = torch.zeros_like(p, memory_format=torch.preserve_format)
(exp_avg, exp_avg_sq) = (state['exp_avg'], state['exp_avg_sq'])
beta1 = group['beta1']
beta2 = group['beta2']
state['step'] += 1
bias_correction1 = (1 - (beta1 ** state['step']))
bias_correction2 = (1 - (beta2 ** state['step']))
exp_avg.mul_(beta1).add_(grad, alpha=(1 - beta1))
exp_avg_sq.mul_(beta2).addcmul_(grad, grad, value=(1 - beta2))
m_update = torch.div(exp_avg, bias_correction1)
v_update = torch.div(exp_avg_sq, bias_correction2)
denom = torch.add(v_update.sqrt(), group['eps'])
m_update.div_(denom)
if (group['weight_decay'] != 0):
m_update.add_(p.data, alpha=group['weight_decay'])
return m_update |
class github_issue_0088_test_case(unittest.TestCase):
def test_flatten_without_keypath_separator(self):
d = benedict({'a': {'b': {'c': 1}}}, keypath_separator=None)
f = d.flatten('.')
self.assertEqual(f, {'a.b.c': 1})
def test_flatten_with_separator_equal_to_keypath_separator(self):
d = benedict({'a': {'b': {'c': 1}}}, keypath_separator='.')
with self.assertRaises(ValueError):
_ = d.flatten('.')
d = benedict({'a': {'b': {'c': 1}}}, keypath_separator='_')
with self.assertRaises(ValueError):
_ = d.flatten('_')
def test_flatten_with_separator_different_from_keypath_separator(self):
d = benedict({'a': {'b': {'c': 1}}}, keypath_separator='_')
f = d.flatten('.')
self.assertEqual(f, {'a.b.c': 1})
d = benedict({'a': {'b': {'c': 1}}}, keypath_separator='.')
f = d.flatten('_')
self.assertEqual(f, {'a_b_c': 1}) |
class HCI(object):
HCI_CMD = 1
ACL_DATA = 2
SCO_DATA = 3
HCI_EVT = 4
HCI_UART_TYPE_STR = {HCI_CMD: 'HCI_CMD', ACL_DATA: 'ACL_DATA', SCO_DATA: 'SCO_DATA', HCI_EVT: 'HCI_EVT'}
def from_data(data):
uart_type = ord(data[0])
return HCI_UART_TYPE_CLASS[uart_type].from_data(data[1:])
def __init__(self, uart_type):
self.uart_type = uart_type
def getRaw(self):
return p8(self.uart_type)
def __str__(self):
return self.HCI_UART_TYPE_STR[self.uart_type] |
class Agent():
def __init__(self):
pass
def require_history(self):
return False
def __call__(self, state: DictTensor, input: DictTensor, user_info: DictTensor, history: TemporalDictTensor=None):
raise NotImplementedError
def update(self, info):
raise NotImplementedError
def close(self):
pass |
def extractStudentcntranslationsWordpressCom(item):
(vol, chp, frag, postfix) = extractVolChapterFragmentPostfix(item['title'])
if ((not (chp or vol)) or ('preview' in item['title'].lower())):
return None
tagmap = [('PRC', 'PRC', 'translated'), ('Loiterous', 'Loiterous', 'oel')]
for (tagname, name, tl_type) in tagmap:
if (tagname in item['tags']):
return buildReleaseMessageWithType(item, name, vol, chp, frag=frag, postfix=postfix, tl_type=tl_type)
return False |
class TrafficLight(Html.Html):
name = 'Light'
tag = 'div'
def __init__(self, page: primitives.PageModel, color, label, height, tooltip, helper, options, profile, html_code: str=None):
super(TrafficLight, self).__init__(page, color, css_attrs={'width': height, 'height': height}, options=options, profile=profile, html_code=html_code)
self.add_helper(helper, css={'margin-top': '-17px'})
self.add_label(label, css={'width': 'auto', 'float': 'none', 'vertical-align': 'middle', 'height': '100%', 'margin': '0 5px', 'display': 'inline-block', 'min-width': '100px'}, html_code=self.html_code)
self.css({'border-radius': '60px', 'background-color': self.val, 'display': 'inline-block', 'vertical-align': 'middle'})
self.set_attrs(name='title', value=tooltip)
self.set_attrs(name='data-status', value=color)
self._jsStyles = {'red': self.page.theme.danger.base, 'green': self.page.theme.success.base, 'orange': self.page.theme.warning.base}
self.action = None
if (tooltip is not None):
self.tooltip(tooltip)
def dom(self) -> JsHtml.JsHtmlBackground:
if (self._dom is None):
self._dom = JsHtml.JsHtmlBackground(self, page=self.page)
return self._dom
def colors(self, green: Optional[str]=None, red: Optional[str]=None, neutral: Optional[str]=None):
if (neutral is not None):
self._jsStyles['orange'] = neutral
if (green is not None):
self._jsStyles['green'] = green
if (red is not None):
self._jsStyles['red'] = red
return self
def resolve(self, js_funcs: Union[(list, str)], profile: Optional[Union[(bool, dict)]]=None):
self.action = self.page.ui.icon('wrench')
self.action.options.managed = False
self.action.tooltip('Click to try to resolve the issue')
self.action.style.css.font_size = 8
self.action.style.css.margin_top = 8
self.action.style.css.cursor = 'pointer'
self.action.style.css.vertical_align = 'top'
self.action.click(js_funcs, profile)
return self
def click(self, js_funcs: Union[(list, str)], profile: Optional[Union[(bool, dict)]]=None, source_event: Optional[str]=None, on_ready: bool=False):
success = Colors.getHexToRgb(self.page.theme.success.base)
self.style.css.cursor = 'pointer'
js_funcs = ([self.dom.querySelector('div').toggle('background-color', ('rgb(%s, %s, %s)' % (success[0], success[1], success[2])), self.page.theme.danger.base)] + js_funcs)
return super(TrafficLight, self).click(js_funcs, profile, source_event, on_ready)
_js__builder__ = '\nif(data === false){htmlObj.firstChild.style.backgroundColor = options.red}\nelse if (data === true){htmlObj.firstChild.style.backgroundColor = options.green}\nelse if (data === null){htmlObj.firstChild.style.backgroundColor = options.orange}\nelse {htmlObj.firstChild.style.backgroundColor = data}'
def __str__(self):
if (self.action is not None):
return ('<div id="%s"><div %s></div>%s</div>%s' % (self.html_code, self.get_attrs(css_class_names=self.style.get_classes(), with_id=False), self.action.html(), self.helper))
return ('<%s id="%s"><div %s></div></%s>%s' % (self.tag, self.html_code, self.get_attrs(css_class_names=self.style.get_classes(), with_id=False), self.tag, self.helper)) |
class RoBERTaTokenizer(ByteBPETokenizer, LegacyFromHFHub):
vocab_files: Dict[(str, str)] = {'vocab': 'vocab.json', 'merges': 'merges.txt'}
def __init__(self, *, vocab: Dict[(str, int)], merges: List[Tuple[(str, str)]], special_pieces: Optional[Dict[(str, int)]]=None, bos_piece: str='<s>', eos_piece: str='</s>'):
super().__init__(vocab=vocab, merges=merges, special_pieces=special_pieces)
bos_id = _get_piece_id_or_fail(self.processor, bos_piece)
eos_id = _get_piece_id_or_fail(self.processor, eos_piece)
self._eos_piece = eos_piece
self.pre_decoder = RoBERTaPreDecoder(bos_id=bos_id, eos_id=eos_id)
self.pre_encoder = AddBosEosPreEncoder(bos_piece=bos_piece, eos_piece=eos_piece)
def from_files(cls: Type[Self], *, vocab_file: RepositoryFile, merges_file: RepositoryFile, bos_piece: str='<s>', eos_piece: str='</s>') -> Self:
with vocab_file.open(mode='r', encoding='utf-8') as vocab:
with merges_file.open(mode='r', encoding='utf-8') as merges:
processor = ByteBPEProcessor.load_from_files(vocab=vocab, merges=merges)
return cls(vocab=processor.vocab, merges=processor.merges, bos_piece=bos_piece, eos_piece=eos_piece)
def eos_piece(self) -> Optional[str]:
return self._eos_piece
def _load_from_vocab_files(cls: Type[Self], *, vocab_files: Mapping[(str, RepositoryFile)], tokenizer_config: Optional[Dict[(str, Any)]]) -> Self:
return cls.from_files(vocab_file=vocab_files['vocab'], merges_file=vocab_files['merges']) |
class LMQLContext():
def __init__(self, interpreter, state, prompt):
self.interpreter = interpreter
if state:
self.program_state: ProgramState = state.program_state
self.state = state
self.prompt = state.prompt
else:
self.program_state = ProgramState(prompt)
self.state = None
self.prompt = prompt
self.program_state.runtime = interpreter
async def json(self):
return self.program_state
def num_calls(self):
dcmodel = self.interpreter.dcmodel
if hasattr(dcmodel, 'calls'):
return (dcmodel.calls - dcmodel.hits)
else:
return 0
async def get_var(self, name):
return self.program_state.get_program_value(name)
async def query(self, qstring, __locals, **kwargs):
return InterpreterCall(qstring, __locals, kwargs, loc=None)
async def set_model(self, model_name):
self.interpreter.set_model(model_name)
async def set_decoder(self, method, **kwargs):
self.interpreter.set_decoder(method, **kwargs)
async def set_where_clause(self, where):
self.interpreter.set_where_clause(where)
async def get_context(self, *args):
return self
async def get_all_vars(self):
return self.program_state.variable_program_values.copy()
async def set_distribution(self, distribution_variable, values):
self.interpreter.distribution_variable = distribution_variable
self.interpreter.distribution_values = values
async def get_return_value(self, *args):
if ('is_f_function' in self.interpreter.extra_kwargs):
result_values = (await self.get_all_vars())
if (len(result_values) == 1):
return list(result_values.values())[0]
else:
return result_values
return LMQLResult(self.state.prompt, (await self.get_all_vars()), self.interpreter.distribution_variable, self.interpreter.distribution_values)
async def score(self, values, **kwargs):
model = kwargs.get('model', None)
if (model is not None):
return (await score(self.prompt, values, **kwargs))
return (await dc_score(self.interpreter.dcmodel, self.prompt, values, **kwargs)) |
class FlickerWalker():
def __init__(self, startView):
self.setCurrentView(startView)
def run(self):
self.keepRunning = True
initialAsync = lldb.debugger.GetAsync()
lldb.debugger.SetAsync(True)
while self.keepRunning:
charRead = sys.stdin.readline().rstrip('\n')
self.inputCallback(charRead)
else:
lldb.debugger.SetAsync(initialAsync)
def inputCallback(self, input):
oldView = self.currentView
if (input == 'q'):
cmd = ('echo %s | tr -d "\n" | pbcopy' % oldView)
os.system(cmd)
print((('\nI hope ' + oldView) + ' was what you were looking for. I put it on your clipboard.'))
viewHelpers.unmaskView(oldView)
self.keepRunning = False
elif (input == 'w'):
v = superviewOfView(self.currentView)
if (not v):
print('There is no superview. Where are you trying to go?!')
self.setCurrentView(v, oldView)
elif (input == 's'):
v = firstSubviewOfView(self.currentView)
if (not v):
print('\nThe view has no subviews.\n')
self.setCurrentView(v, oldView)
elif (input == 'd'):
v = nthSiblingOfView(self.currentView, (- 1))
if (v == oldView):
print('\nThere are no sibling views to this view.\n')
self.setCurrentView(v, oldView)
elif (input == 'a'):
v = nthSiblingOfView(self.currentView, 1)
if (v == oldView):
print('\nThere are no sibling views to this view.\n')
self.setCurrentView(v, oldView)
elif (input == 'p'):
recursionName = 'recursiveDescription'
isMac = runtimeHelpers.isMacintoshArch()
if isMac:
recursionName = '_subtreeDescription'
print(fb.describeObject('[(id){} {}]'.format(oldView, recursionName)))
else:
print((("\nI really have no idea what you meant by '" + input) + "'... =\\\n"))
def setCurrentView(self, view, oldView=None):
if view:
self.currentView = view
if oldView:
viewHelpers.unmaskView(oldView)
viewHelpers.maskView(self.currentView, 'red', '0.4')
print(fb.describeObject(view)) |
def test_robotstxt_test():
user_agents = ['Googlebot', 'Baiduspider', '*']
urls_to_check = ['/', '/help', 'something.html']
result = robotstxt_test(robots_file, user_agents, urls_to_check)
assert isinstance(result, pd.core.frame.DataFrame)
assert all(((col in result) for col in ['robotstxt_url', 'user_agent', 'url_path', 'can_fetch'])) |
def get_function(reply: Dict, response_json: Dict):
try:
if ('function' not in response_json):
return ('Error:', "Missing 'function' object in JSON")
if (not isinstance(response_json, dict)):
return ('Error:', f"'response_json' object is not dictionary {response_json}")
function = response_json['function']
if (not isinstance(function, dict)):
return ('Error:', "'function' object is not a dictionary")
if ('name' not in function):
return ('Error:', "Missing 'name' field in 'function' object")
function_name = function['name']
arguments = function.get('args', {})
reply['function'] = function_name
reply['arguments'] = arguments
return (function_name, arguments)
except json.decoder.JSONDecodeError:
return ('Error:', 'Invalid JSON')
except Exception as e:
return ('Error:', str(e)) |
def count_coins_and_fragments(coins):
user_count = (max(coins) + 1)
coin_count = ([0] * user_count)
frag_count = ([0] * user_count)
for i in range(len(coins)):
coin_count[coins[i]] += 1
if ((i > 0) and (coins[i] != coins[(i - 1)])):
frag_count[coins[i]] += 1
return (coin_count, frag_count) |
('socket.create_connection')
def test_azure_metadata(mock_socket, monkeypatch):
class MockPoolManager():
def request(self, *args, **kwargs):
self.data = AZURE_DATA
return self
monkeypatch.setattr(urllib3, 'PoolManager', MockPoolManager)
metadata = elasticapm.utils.cloud.azure_metadata()
assert (metadata == {'account': {'id': '7657426d-c4c3-44ac-88a2-3b2cd59e6dba'}, 'instance': {'id': 'e11ebedc-019d-427f-84dd-56cd4388d3a8', 'name': 'basepi-test'}, 'project': {'name': 'basepi-testing'}, 'machine': {'type': 'Standard_D2s_v3'}, 'provider': 'azure', 'region': 'westus2'}) |
class AsyncMinHashLSH(object):
def __init__(self, threshold: float=0.9, num_perm: int=128, weights: Tuple[(float, float)]=(0.5, 0.5), params: Tuple[(int, int)]=None, storage_config: Dict=None, prepickle: bool=None):
if (storage_config is None):
storage_config = {'type': 'aiomongo', 'mongo': {'host': 'localhost', 'port': 27017}}
self._storage_config = storage_config.copy()
self._storage_config['basename'] = self._storage_config.get('basename', _random_name(11))
self._basename = self._storage_config['basename']
self._batch_size = 10000
self._threshold = threshold
self._num_perm = num_perm
self._weights = weights
self._params = params
self.prepickle = ((storage_config['type'] == 'aioredis') if (prepickle is None) else prepickle)
if ((self._threshold > 1.0) or (self._threshold < 0.0)):
raise ValueError('threshold must be in [0.0, 1.0]')
if (self._num_perm < 2):
raise ValueError('Too few permutation functions')
if any((((w < 0.0) or (w > 1.0)) for w in self._weights)):
raise ValueError('Weight must be in [0.0, 1.0]')
if (sum(self._weights) != 1.0):
raise ValueError('Weights must sum to 1.0')
self.h = self._num_perm
if (self._params is not None):
(self.b, self.r) = self._params
if ((self.b * self.r) > self._num_perm):
raise ValueError('The product of b and r must be less than num_perm')
else:
(false_positive_weight, false_negative_weight) = self._weights
(self.b, self.r) = _optimal_param(self._threshold, self._num_perm, false_positive_weight, false_negative_weight)
self.hashranges = [((i * self.r), ((i + 1) * self.r)) for i in range(self.b)]
self.hashtables = None
self.keys = None
self._lock = asyncio.Lock()
self._initialized = False
async def __async_init(self):
async with self._lock:
if (not self._initialized):
(await self.init_storages())
self._initialized = True
return self
def __await__(self):
return self.__async_init().__await__()
async def __aenter__(self):
return (await self)
async def __aexit__(self, exc_type, exc_val, exc_tb):
(await self.close())
def __getstate__(self):
state = self.__dict__.copy()
state['_initialized'] = False
state.pop('_lock')
state.pop('hashranges')
state.pop('hashtables')
state.pop('keys')
return state
def __setstate__(self, state):
state['_lock'] = asyncio.Lock()
self.__dict__ = state
self.__init__(self._threshold, self._num_perm, self._weights, self._params, self._storage_config)
def batch_size(self):
return self._batch_size
_size.setter
def batch_size(self, value):
if (self.keys is not None):
self.keys.batch_size = value
else:
raise AttributeError('AsyncMinHash is not initialized.')
for t in self.hashtables:
t.batch_size = value
self._batch_size = value
async def _create_storages(self):
if (self._storage_config['type'] == 'aioredis'):
name_ordered = b''.join([self._basename, b'_keys'])
fs = (async_unordered_storage(config=self._storage_config, name=b''.join([self._basename, b'_bucket_', bytes([i])])) for i in range(self.b))
else:
name_ordered = ''.join([self._basename.decode('utf-8'), '_keys'])
fs = (async_unordered_storage(config=self._storage_config, name=''.join([self._basename.decode('utf-8'), '_bucket_', str(i)])) for i in range(self.b))
fs = chain(fs, (async_ordered_storage(self._storage_config, name=name_ordered),))
storages = (await asyncio.gather(*fs))
(*self.hashtables, self.keys) = storages
async def init_storages(self):
if (self.keys is None):
(await self._create_storages())
if (not self.keys.initialized):
(await self.keys)
fs = (ht for ht in self.hashtables if (not ht.initialized))
(await asyncio.gather(*fs))
async def close(self):
async with self._lock:
for t in self.hashtables:
(await t.close())
if (self.keys is not None):
(await self.keys.close())
self._initialized = False
async def insert(self, key, minhash, check_duplication=True):
(await self._insert(key, minhash, check_duplication=check_duplication, buffer=False))
def insertion_session(self, batch_size=10000):
return AsyncMinHashLSHInsertionSession(self, batch_size=batch_size)
def delete_session(self, batch_size=10000):
return AsyncMinHashLSHDeleteSession(self, batch_size=batch_size)
async def _insert(self, key, minhash, check_duplication=True, buffer=False):
if (len(minhash) != self.h):
raise ValueError(('Expecting minhash with length %d, got %d' % (self.h, len(minhash))))
if self.prepickle:
key = pickle.dumps(key)
if (check_duplication and (await self.has_key(key))):
raise ValueError('The given key already exists')
Hs = [self._H(minhash.hashvalues[start:end]) for (start, end) in self.hashranges]
fs = chain((self.keys.insert(key, *Hs, buffer=buffer),), (hashtable.insert(H, key, buffer=buffer) for (H, hashtable) in zip(Hs, self.hashtables)))
(await asyncio.gather(*fs))
async def query(self, minhash):
if (len(minhash) != self.h):
raise ValueError(('Expecting minhash with length %d, got %d' % (self.h, len(minhash))))
fs = (hashtable.get(self._H(minhash.hashvalues[start:end])) for ((start, end), hashtable) in zip(self.hashranges, self.hashtables))
candidates = frozenset(chain.from_iterable((await asyncio.gather(*fs))))
if self.prepickle:
return [pickle.loads(key) for key in candidates]
else:
return list(candidates)
async def has_key(self, key):
return (await self.keys.has_key(key))
async def remove(self, key):
(await self._remove(key, buffer=False))
async def _remove(self, key, buffer=False):
if (not (await self.has_key(key))):
raise ValueError('The given key does not exist')
for (H, hashtable) in zip((await self.keys.get(key)), self.hashtables):
(await hashtable.remove_val(H, key, buffer=buffer))
if (not (await hashtable.get(H))):
(await hashtable.remove(H, buffer=buffer))
(await self.keys.remove(key, buffer=buffer))
async def is_empty(self):
for t in self.hashtables:
if ((await t.size()) == 0):
return True
return False
def _H(hs):
return bytes(hs.byteswap().data)
async def _query_b(self, minhash, b):
if (len(minhash) != self.h):
raise ValueError(('Expecting minhash with length %d, got %d' % (self.h, len(minhash))))
if (b > len(self.hashtables)):
raise ValueError('b must be less or equal to the number of hash tables')
fs = []
for ((start, end), hashtable) in zip(self.hashranges[:b], self.hashtables[:b]):
H = self._H(minhash.hashvalues[start:end])
if (await hashtable.has_key(H)):
fs.append(hashtable.get(H))
candidates = set(chain.from_iterable((await asyncio.gather(*fs))))
return candidates
async def get_counts(self):
fs = (hashtable.itemcounts() for hashtable in self.hashtables)
return (await asyncio.gather(*fs))
async def get_subset_counts(self, *keys):
key_set = list(set(keys))
hashtables = [unordered_storage({'type': 'dict'}) for _ in range(self.b)]
Hss = (await self.keys.getmany(*key_set))
for (key, Hs) in zip(key_set, Hss):
for (H, hashtable) in zip(Hs, hashtables):
hashtable.insert(H, key)
return [hashtable.itemcounts() for hashtable in hashtables] |
def interpolator(interpolator: str, create: type[Color], colors: Sequence[((ColorInput | stop) | Callable[(..., float)])], space: (str | None), out_space: (str | None), progress: ((Mapping[(str, Callable[(..., float)])] | Callable[(..., float)]) | None), hue: str, premultiplied: bool, extrapolate: bool, domain: (Vector | None)=None, padding: ((float | tuple[(float, float)]) | None)=None, carryforward: bool=False, powerless: bool=False, **kwargs: Any) -> Interpolator:
plugin = create.INTERPOLATE_MAP.get(interpolator)
if (not plugin):
raise ValueError("'{}' is not a recognized interpolator".format(interpolator))
stops = {}
if (space is None):
space = create.INTERPOLATE
if isinstance(colors[0], stop):
current = create(colors[0].color)
stops[0] = colors[0].stop
elif (not callable(colors[0])):
current = create(colors[0])
stops[0] = None
else:
raise ValueError('Cannot have an easing function as the first item in an interpolation list')
if (out_space is None):
out_space = space
cs = current.CS_MAP[space]
is_cyl = isinstance(cs, Cylindrical)
hue_index = (cs.hue_index() if is_cyl else (- 1))
if carryforward:
carryforward_convert(current, space, hue_index, powerless)
elif (space != current.space()):
current.convert(space, in_place=True)
elif (powerless and is_cyl and current.is_achromatic()):
current[hue_index] = math.nan
easing = None
easings = []
coords = [current[:]]
i = 0
for x in colors[1:]:
if isinstance(x, stop):
i += 1
stops[i] = x.stop
color = current.new(x.color)
elif callable(x):
easing = x
continue
else:
i += 1
color = current.new(x)
stops[i] = None
if carryforward:
carryforward_convert(color, space, hue_index, powerless)
elif (space != color.space()):
color.convert(space, in_place=True)
elif (powerless and is_cyl and color.is_achromatic()):
color[hue_index] = math.nan
coords.append(color[:])
easings.append((easing if (easing is not None) else progress))
easing = None
current = color
i += 1
if (i < 2):
raise ValueError('Need at least two colors to interpolate')
stops = calc_stops(stops, i)
kwargs['hue'] = hue
return plugin.interpolator(coords, current._space.channels, create, easings, stops, space, out_space, process_mapping(progress, current._space.CHANNEL_ALIASES), premultiplied, extrapolate, domain, padding, **kwargs) |
def load_config_files(paths):
raw_config = _read_config_files(paths)
tree_config1 = _arrange_config_tree(raw_config)
tree_config2 = _perform_key_renames(tree_config1)
complete_config = _build_node_section(tree_config2)
object_tree = _validate_and_convert(complete_config)
deref_config = _dereference_config_tree(object_tree)
final_config = _cross_validate_final_config(deref_config)
return final_config |
def serialize_css(obj: 'Color', func: str='', color: bool=False, alpha: (bool | None)=None, precision: (int | None)=None, fit: ((bool | str) | dict[(str, Any)])=True, none: bool=False, percent: (bool | Sequence[bool])=False, hexa: bool=False, upper: bool=False, compress: bool=False, name: bool=False, legacy: bool=False, scale: float=1.0) -> str:
if (precision is None):
precision = obj.PRECISION
if color:
return color_function(obj, None, alpha, precision, fit, none, percent, False, 1.0)
if name:
n = named_color(obj, alpha, fit)
if (n is not None):
return n
if hexa:
return hexadecimal(obj, alpha, fit, upper, compress)
if func:
return color_function(obj, func, alpha, precision, fit, none, percent, legacy, scale)
raise RuntimeError('Could not identify a CSS format to serialize to') |
class OptionPlotoptionsWaterfallSonificationDefaultinstrumentoptionsMappingVolume(Options):
def mapFunction(self):
return self._config_get(None)
def mapFunction(self, value: Any):
self._config(value, js_type=False)
def mapTo(self):
return self._config_get(None)
def mapTo(self, text: str):
self._config(text, js_type=False)
def max(self):
return self._config_get(None)
def max(self, num: float):
self._config(num, js_type=False)
def min(self):
return self._config_get(None)
def min(self, num: float):
self._config(num, js_type=False)
def within(self):
return self._config_get(None)
def within(self, value: Any):
self._config(value, js_type=False) |
class OptionSeriesItemSonificationDefaultspeechoptionsMappingPlaydelay(Options):
def mapFunction(self):
return self._config_get(None)
def mapFunction(self, value: Any):
self._config(value, js_type=False)
def mapTo(self):
return self._config_get(None)
def mapTo(self, text: str):
self._config(text, js_type=False)
def max(self):
return self._config_get(None)
def max(self, num: float):
self._config(num, js_type=False)
def min(self):
return self._config_get(None)
def min(self, num: float):
self._config(num, js_type=False)
def within(self):
return self._config_get(None)
def within(self, value: Any):
self._config(value, js_type=False) |
def deprecated(instructions: str, is_property: bool=False, method_name: Optional[str]=None) -> Callable[([Callable[(..., Any)]], Any)]:
def decorator(func: Callable[(..., Any)]) -> Callable[([Callable[(..., Any)]], Any)]:
object_name = ('property' if is_property else 'function')
post_name = ('' if is_property else '(...)')
message = 'Call to deprecated {} {}{}. {}'.format(object_name, (method_name or func.__name__), post_name, instructions)
(func)
def wrapper(*args: Any, **kwargs: Any) -> Callable[(..., Any)]:
warnings.warn(message, category=DeprecatedWarning, stacklevel=2)
return func(*args, **kwargs)
return wrapper
return decorator |
class SingletonHasTraits(HasTraits):
('SingletonHasTraits has been deprecated and will be removed in the future. Avoid using it')
def __new__(cls, *args, **traits):
if ('_the_instance' not in cls.__dict__):
cls._the_instance = HasTraits.__new__(cls, *args, **traits)
return cls._the_instance |
.django_db
def test_defc_date_filter(client, monkeypatch, elasticsearch_transaction_index):
defc1 = baker.make('references.DisasterEmergencyFundCode', code='L', public_law='PUBLIC LAW FOR CODE L', title='TITLE FOR CODE L', group_name='covid_19', earliest_public_law_enactment_date='2020-03-06')
baker.make('accounts.FederalAccount', id=99)
baker.make('accounts.TreasuryAppropriationAccount', federal_account_id=99, treasury_account_identifier=99)
baker.make('awards.FinancialAccountsByAwards', pk=1, award_id=99, disaster_emergency_fund=defc1, treasury_account_id=99)
baker.make('search.AwardSearch', award_id=99, total_obligation=20, piid='0001', action_date='2020-01-01')
baker.make('search.TransactionSearch', transaction_id=99, action_date='2020-04-02', fiscal_action_date='2020-04-02', fiscal_year=2020, federal_action_obligation=10, generated_pragmatic_obligation=10, award_amount=20, award_id=99, is_fpds=True, type='A', piid='0001', disaster_emergency_fund_codes=['L'])
baker.make('search.TransactionSearch', transaction_id=100, action_date='2020-01-01', fiscal_action_date='2020-01-01', federal_action_obligation=22, generated_pragmatic_obligation=22, award_amount=20, award_id=99, is_fpds=True, type='A', disaster_emergency_fund_codes=['L'])
setup_elasticsearch_test(monkeypatch, elasticsearch_transaction_index)
resp = client.post('/api/v2/search/spending_over_time', content_type='application/json', data=json.dumps({'group': 'fiscal_year', 'filters': {'def_codes': ['L']}}))
assert (resp.status_code == status.HTTP_200_OK)
assert ({'aggregated_amount': 10, 'time_period': {'fiscal_year': '2020'}} in resp.json().get('results')) |
def common_options(f):
for decorator in reversed([click.argument('database', type=click.Path(exists=True, file_okay=True, dir_okay=False, allow_dash=False), required=True), click.argument('table', type=click.STRING, required=True), click.option('-l', '--location', type=click.STRING, default='{location}', help='Location query format. See docs for examples.'), click.option('-d', '--delay', type=click.FLOAT, default=1.0, help='Delay between geocoding calls, in seconds.'), click.option('--latitude', type=click.STRING, default='latitude', help='Field name for latitude'), click.option('--longitude', type=click.STRING, default='longitude', help='Field name for longitude'), click.option('--geojson', type=click.BOOL, is_flag=True, default=False, help='Store results as GeoJSON. \nUsing this will add a geometry column instead of latitude and longitude columns.'), click.option('--spatialite', is_flag=True, default=False, help='Store results as a SpatiaLite geometry.\nUsing this will add a geometry column instead of latitude and longitude columns.'), click.option('--raw', is_flag=False, default='', flag_value='raw', help="Store raw geocoding results as JSON.\nThis column will be called 'raw' by default. Pass a value to rename it."), click.pass_context]):
f = decorator(f)
return f |
def segment_dataset_config(db: Session, segment_connection_config: ConnectionConfig, segment_dataset: Dict[(str, Any)]) -> Generator:
fides_key = segment_dataset['fides_key']
segment_connection_config.name = fides_key
segment_connection_config.key = fides_key
segment_connection_config.save(db=db)
ctl_dataset = CtlDataset.create_from_dataset_dict(db, segment_dataset)
dataset = DatasetConfig.create(db=db, data={'connection_config_id': segment_connection_config.id, 'fides_key': fides_key, 'ctl_dataset_id': ctl_dataset.id})
(yield dataset)
dataset.delete(db=db)
ctl_dataset.delete(db=db) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.