code stringlengths 281 23.7M |
|---|
def test_adding_a_extra_volume_with_volume_mount():
config = '\nextraVolumes: |\n - name: extras\n emptyDir: {}\nextraVolumeMounts: |\n - name: extras\n mountPath: /usr/share/extras\n readOnly: true\n'
r = helm_template(config)
extraVolume = r['statefulset'][name]['spec']['template']['spec']['volumes']
assert ({'name': 'extras', 'emptyDir': {}} in extraVolume)
extraVolumeMounts = r['statefulset'][name]['spec']['template']['spec']['containers'][0]['volumeMounts']
assert ({'name': 'extras', 'mountPath': '/usr/share/extras', 'readOnly': True} in extraVolumeMounts) |
class Migration(migrations.Migration):
dependencies = [('frontend', '0021_chemical_is_current')]
operations = [migrations.AddField(model_name='product', name='is_current', field=models.BooleanField(default=True)), migrations.AddField(model_name='section', name='is_current', field=models.BooleanField(default=True))] |
class GetForObjectReferenceDbTest(TestModelMixin, TestBase):
databases = {'default', 'postgres'}
def testGetForObjectReferenceModelDb(self):
with reversion.create_revision(using='postgres'):
obj = TestModel.objects.create()
self.assertEqual(Version.objects.get_for_object_reference(TestModel, obj.pk).count(), 0)
self.assertEqual(Version.objects.using('postgres').get_for_object_reference(TestModel, obj.pk).count(), 1) |
def extractLuciferscansWordpressCom(item):
(vol, chp, frag, postfix) = extractVolChapterFragmentPostfix(item['title'])
if ((not (chp or vol)) or ('preview' in item['title'].lower())):
return None
tagmap = [('PRC', 'PRC', 'translated'), ('Loiterous', 'Loiterous', 'oel')]
for (tagname, name, tl_type) in tagmap:
if (tagname in item['tags']):
return buildReleaseMessageWithType(item, name, vol, chp, frag=frag, postfix=postfix, tl_type=tl_type)
return False |
def get_all_pipelines(app=''):
uri = '/applications/{app}/pipelineConfigs'.format(app=app)
response = gate_request(uri=uri)
assert response.ok, 'Could not retrieve Pipelines for {0}.'.format(app)
pipelines = response.json()
LOG.debug('Pipelines:\n%s', pipelines)
return pipelines |
.parametrize('api_style', ('v4', 'build_filter'))
def test_event_filter_new_events_many_deployed_contracts(w3, emitter, emitter_contract_factory, wait_for_transaction, emitter_contract_event_ids, api_style, create_filter):
matching_transact = emitter.functions.logNoArgs(which=1).transact
deployed_contract_addresses = deploy_contracts(w3, emitter_contract_factory, wait_for_transaction)
def gen_non_matching_transact():
while True:
contract_address = deployed_contract_addresses[random.randint(0, (len(deployed_contract_addresses) - 1))]
(yield w3.eth.contract(address=contract_address, abi=emitter_contract_factory.abi).functions.logNoArgs(which=1).transact)
non_matching_transact = gen_non_matching_transact()
if (api_style == 'build_filter'):
builder = emitter.events.LogNoArguments.build_filter()
builder.fromBlock = 'latest'
event_filter = builder.deploy(w3)
else:
event_filter = emitter.events.LogNoArguments().create_filter(fromBlock='latest')
expected_match_counter = 0
while (w3.eth.block_number < 50):
is_match = bool(random.randint(0, 1))
if is_match:
expected_match_counter += 1
matching_transact()
pad_with_transactions(w3)
continue
next(non_matching_transact)()
pad_with_transactions(w3)
assert (len(event_filter.get_new_entries()) == expected_match_counter) |
class OptionSeriesPictorial(Options):
def accessibility(self) -> 'OptionSeriesPictorialAccessibility':
return self._config_sub_data('accessibility', OptionSeriesPictorialAccessibility)
def allowPointSelect(self):
return self._config_get(False)
def allowPointSelect(self, flag: bool):
self._config(flag, js_type=False)
def animation(self):
return self._config_get(True)
def animation(self, flag: bool):
self._config(flag, js_type=False)
def animationLimit(self):
return self._config_get(None)
def animationLimit(self, num: float):
self._config(num, js_type=False)
def className(self):
return self._config_get(None)
def className(self, text: str):
self._config(text, js_type=False)
def clip(self):
return self._config_get(True)
def clip(self, flag: bool):
self._config(flag, js_type=False)
def color(self):
return self._config_get(None)
def color(self, text: str):
self._config(text, js_type=False)
def colorAxis(self):
return self._config_get(0)
def colorAxis(self, num: float):
self._config(num, js_type=False)
def colorByPoint(self):
return self._config_get(False)
def colorByPoint(self, flag: bool):
self._config(flag, js_type=False)
def colorIndex(self):
return self._config_get(None)
def colorIndex(self, num: float):
self._config(num, js_type=False)
def colors(self):
return self._config_get(None)
def colors(self, value: Any):
self._config(value, js_type=False)
def cropThreshold(self):
return self._config_get(50)
def cropThreshold(self, num: float):
self._config(num, js_type=False)
def cursor(self):
return self._config_get(None)
def cursor(self, text: str):
self._config(text, js_type=False)
def custom(self):
return self._config_get(None)
def custom(self, value: Any):
self._config(value, js_type=False)
def data(self) -> 'OptionSeriesPictorialData':
return self._config_sub_data('data', OptionSeriesPictorialData)
def dataLabels(self) -> 'OptionSeriesPictorialDatalabels':
return self._config_sub_data('dataLabels', OptionSeriesPictorialDatalabels)
def description(self):
return self._config_get(None)
def description(self, text: str):
self._config(text, js_type=False)
def enableMouseTracking(self):
return self._config_get(True)
def enableMouseTracking(self, flag: bool):
self._config(flag, js_type=False)
def events(self) -> 'OptionSeriesPictorialEvents':
return self._config_sub_data('events', OptionSeriesPictorialEvents)
def findNearestPointBy(self):
return self._config_get('x')
def findNearestPointBy(self, text: str):
self._config(text, js_type=False)
def getExtremesFromAll(self):
return self._config_get(False)
def getExtremesFromAll(self, flag: bool):
self._config(flag, js_type=False)
def grouping(self):
return self._config_get(True)
def grouping(self, flag: bool):
self._config(flag, js_type=False)
def groupPadding(self):
return self._config_get(0.2)
def groupPadding(self, num: float):
self._config(num, js_type=False)
def groupZPadding(self):
return self._config_get(1)
def groupZPadding(self, num: float):
self._config(num, js_type=False)
def id(self):
return self._config_get(None)
def id(self, text: str):
self._config(text, js_type=False)
def inactiveOtherPoints(self):
return self._config_get(False)
def inactiveOtherPoints(self, flag: bool):
self._config(flag, js_type=False)
def includeInDataExport(self):
return self._config_get(None)
def includeInDataExport(self, flag: bool):
self._config(flag, js_type=False)
def index(self):
return self._config_get(None)
def index(self, num: float):
self._config(num, js_type=False)
def keys(self):
return self._config_get(None)
def keys(self, value: Any):
self._config(value, js_type=False)
def label(self) -> 'OptionSeriesPictorialLabel':
return self._config_sub_data('label', OptionSeriesPictorialLabel)
def legendIndex(self):
return self._config_get(None)
def legendIndex(self, num: float):
self._config(num, js_type=False)
def legendSymbol(self):
return self._config_get('rectangle')
def legendSymbol(self, text: str):
self._config(text, js_type=False)
def linkedTo(self):
return self._config_get(None)
def linkedTo(self, text: str):
self._config(text, js_type=False)
def maxPointWidth(self):
return self._config_get(None)
def maxPointWidth(self, num: float):
self._config(num, js_type=False)
def minPointLength(self):
return self._config_get(0)
def minPointLength(self, num: float):
self._config(num, js_type=False)
def name(self):
return self._config_get(None)
def name(self, text: str):
self._config(text, js_type=False)
def negativeColor(self):
return self._config_get(None)
def negativeColor(self, text: str):
self._config(text, js_type=False)
def opacity(self):
return self._config_get(1)
def opacity(self, num: float):
self._config(num, js_type=False)
def paths(self) -> 'OptionSeriesPictorialPaths':
return self._config_sub_data('paths', OptionSeriesPictorialPaths)
def point(self) -> 'OptionSeriesPictorialPoint':
return self._config_sub_data('point', OptionSeriesPictorialPoint)
def pointDescriptionFormat(self):
return self._config_get(None)
def pointDescriptionFormat(self, value: Any):
self._config(value, js_type=False)
def pointDescriptionFormatter(self):
return self._config_get(None)
def pointDescriptionFormatter(self, value: Any):
self._config(value, js_type=False)
def pointInterval(self):
return self._config_get(1)
def pointInterval(self, num: float):
self._config(num, js_type=False)
def pointIntervalUnit(self):
return self._config_get(None)
def pointIntervalUnit(self, value: Any):
self._config(value, js_type=False)
def pointPadding(self):
return self._config_get(0.1)
def pointPadding(self, num: float):
self._config(num, js_type=False)
def pointPlacement(self):
return self._config_get(None)
def pointPlacement(self, text: str):
self._config(text, js_type=False)
def pointRange(self):
return self._config_get(None)
def pointRange(self, num: float):
self._config(num, js_type=False)
def pointStart(self):
return self._config_get(0)
def pointStart(self, num: float):
self._config(num, js_type=False)
def pointWidth(self):
return self._config_get(None)
def pointWidth(self, num: float):
self._config(num, js_type=False)
def selected(self):
return self._config_get(False)
def selected(self, flag: bool):
self._config(flag, js_type=False)
def showCheckbox(self):
return self._config_get(False)
def showCheckbox(self, flag: bool):
self._config(flag, js_type=False)
def showInLegend(self):
return self._config_get(None)
def showInLegend(self, flag: bool):
self._config(flag, js_type=False)
def skipKeyboardNavigation(self):
return self._config_get(None)
def skipKeyboardNavigation(self, flag: bool):
self._config(flag, js_type=False)
def softThreshold(self):
return self._config_get(True)
def softThreshold(self, flag: bool):
self._config(flag, js_type=False)
def sonification(self) -> 'OptionSeriesPictorialSonification':
return self._config_sub_data('sonification', OptionSeriesPictorialSonification)
def stack(self):
return self._config_get(None)
def stack(self, num: float):
self._config(num, js_type=False)
def stacking(self):
return self._config_get(None)
def stacking(self, text: str):
self._config(text, js_type=False)
def states(self) -> 'OptionSeriesPictorialStates':
return self._config_sub_data('states', OptionSeriesPictorialStates)
def step(self):
return self._config_get(None)
def step(self, value: Any):
self._config(value, js_type=False)
def stickyTracking(self):
return self._config_get(False)
def stickyTracking(self, flag: bool):
self._config(flag, js_type=False)
def threshold(self):
return self._config_get(0)
def threshold(self, num: float):
self._config(num, js_type=False)
def tooltip(self) -> 'OptionSeriesPictorialTooltip':
return self._config_sub_data('tooltip', OptionSeriesPictorialTooltip)
def turboThreshold(self):
return self._config_get(1000)
def turboThreshold(self, num: float):
self._config(num, js_type=False)
def type(self):
return self._config_get(None)
def type(self, text: str):
self._config(text, js_type=False)
def visible(self):
return self._config_get(True)
def visible(self, flag: bool):
self._config(flag, js_type=False)
def xAxis(self):
return self._config_get(0)
def xAxis(self, num: float):
self._config(num, js_type=False)
def yAxis(self):
return self._config_get(0)
def yAxis(self, num: float):
self._config(num, js_type=False)
def zIndex(self):
return self._config_get(None)
def zIndex(self, num: float):
self._config(num, js_type=False) |
def build_hyperparameter_optimizer(hyperparameters={}, cv=10, n_iter=100, n_jobs=(- 1), verbose=1):
search = sklearn.model_selection.RandomizedSearchCV(RandomForestClassifier(n_jobs=1), param_distributions=hyperparameters, scoring={'accuracy': sklearn.metrics.make_scorer(sklearn.metrics.accuracy_score), 'size': model_size_bytes, 'compute': compute_cost_estimate}, refit='accuracy', n_iter=n_iter, cv=cv, return_train_score=True, n_jobs=n_jobs, verbose=verbose)
return search |
def aggregate_output_types(cell_outputs: List[NotebookNode]) -> CELL_OUTPUTS_TO_PROCESS:
(prioritized_cell_output_dtypes, plotly_flags) = prioritize_dtypes(cell_outputs)
cell_outputs_to_process = {'bokeh': [], 'image': [], 'markdown': [], 'pandas': [], 'plain': [], 'plotly': [], 'tqdm': []}
for (i, cell_output) in enumerate(cell_outputs):
prioritized_data_dtype = prioritized_cell_output_dtypes[i][0]
data = (cell_output['data'][prioritized_data_dtype] if ('data' in cell_output) else cell_output['text'])
bokeh_check = (('bokeh' in prioritized_data_dtype) or ((prioritized_data_dtype == 'text/html') and ('Bokeh Application' in data)))
if bokeh_check:
aggregate_bokeh(prioritized_data_dtype, cell_output, data, cell_outputs_to_process, i)
image_check = prioritized_data_dtype.startswith('image')
if image_check:
aggregate_images_and_plotly(prioritized_data_dtype, cell_output, data, plotly_flags, cell_outputs_to_process, i)
plain_check = (prioritized_data_dtype in ['text/plain', 'stream'])
if plain_check:
aggregate_plain_output(prioritized_data_dtype, cell_output, data, cell_outputs_to_process, i)
if (prioritized_data_dtype == 'text/markdown'):
cell_outputs_to_process['markdown'].append({'index': i, 'data': data})
if ('dataframe' in data):
cell_outputs_to_process['pandas'].append({'index': i, 'data': data})
if (prioritized_data_dtype == 'application/vnd.jupyter.widget-view+json'):
data = cell_output['data']['text/plain']
cell_outputs_to_process['tqdm'].append({'index': i, 'data': data})
return cell_outputs_to_process |
def test_medium2d(log_capture):
sigma = 0.45
thickness = 0.01
cond_med = td.Medium(conductivity=sigma)
medium = td.Medium2D.from_medium(cond_med, thickness=thickness)
_ = medium.plot_sigma(freqs=[.0, .0], ax=AX)
plt.close()
assert np.isclose(medium.ss.to_medium().conductivity, (sigma * thickness), rtol=RTOL)
aniso_medium = td.AnisotropicMedium(xx=td.Medium(permittivity=2), yy=cond_med, zz=td.Medium())
medium = td.Medium2D.from_anisotropic_medium(aniso_medium, axis=2, thickness=thickness)
medium3d = medium.to_anisotropic_medium(axis=2, thickness=(1.5 * thickness))
assert np.isclose(medium3d.xx.to_medium().permittivity, (1 + ((2 - 1) / 1.5)), rtol=RTOL)
assert np.isclose(medium3d.yy.to_medium().conductivity, (sigma / 1.5), rtol=RTOL)
assert np.isclose(medium3d.zz.permittivity, 1, rtol=RTOL)
assert np.isclose(medium.to_medium(thickness=(1.5 * thickness)).conductivity, (sigma / 3), rtol=RTOL)
assert np.isclose(medium.to_pole_residue(thickness=(1.5 * thickness)).to_medium().conductivity, (sigma / 3), rtol=RTOL)
td.Structure(medium=medium3d, geometry=td.Box(size=(1, 1, 1)))
td.Structure(medium=medium3d, geometry=td.Box(size=(1, 0, 1)))
assert_log_level(log_capture, None)
_ = medium.plot(freqs=[.0, .0], ax=AX)
plt.close()
assert_log_level(log_capture, 'WARNING') |
def extractJaptem(item):
(vol, chp, frag, postfix) = extractVolChapterFragmentPostfix(item['title'])
if ((not (chp or vol)) or ('preview' in item['title'].lower())):
return None
if ('[Chinese] Shadow Rogue' in item['tags']):
return buildReleaseMessageWithType(item, 'Shadow Rogue', vol, chp, frag=frag, postfix=postfix)
if (('[Chinese] Unique Legend' in item['tags']) or ('Unique Legend' in item['tags'])):
return buildReleaseMessageWithType(item, 'Unique Legend', vol, chp, frag=frag, postfix=postfix)
if (("[Japanese] Magi's Grandson" in item['tags']) or ("[JP] Magi's Grandson" in item['tags'])):
return buildReleaseMessageWithType(item, "Magi's Grandson", vol, chp, frag=frag, postfix=postfix)
if ('[Japanese / Hosted] Arifureta' in item['tags']):
return buildReleaseMessageWithType(item, 'Arifureta', vol, chp, frag=frag, postfix=postfix)
if ('[Korean] 21st Century Archmage' in item['tags']):
return buildReleaseMessageWithType(item, '21st Century Archmage', vol, chp, frag=frag, postfix=postfix)
if ('[Chinese] Kill No More' in item['tags']):
return buildReleaseMessageWithType(item, 'Kill No More', vol, chp, frag=frag, postfix=postfix)
if ("[JP] Duke's Daughter" in item['tags']):
return buildReleaseMessageWithType(item, "Good Sense of a Duke's Daughter", vol, chp, frag=frag, postfix=postfix)
return False |
class OptionSeriesXrangeSonificationTracksMappingTime(Options):
def mapFunction(self):
return self._config_get(None)
def mapFunction(self, value: Any):
self._config(value, js_type=False)
def mapTo(self):
return self._config_get(None)
def mapTo(self, text: str):
self._config(text, js_type=False)
def max(self):
return self._config_get(None)
def max(self, num: float):
self._config(num, js_type=False)
def min(self):
return self._config_get(None)
def min(self, num: float):
self._config(num, js_type=False)
def within(self):
return self._config_get(None)
def within(self, value: Any):
self._config(value, js_type=False) |
def extractMeanderingotakuWordpressCom(item):
(vol, chp, frag, postfix) = extractVolChapterFragmentPostfix(item['title'])
if ((not (chp or vol)) or ('preview' in item['title'].lower())):
return None
if ('WATTT' in item['tags']):
return buildReleaseMessageWithType(item, 'WATTT', vol, chp, frag=frag, postfix=postfix)
return False |
def _make_tcp_client_connection(address: str, public_key: str, host: str, port: int):
configuration = ConnectionConfig(address=host, port=port, connection_id=TCPClientConnection.connection_id)
tcp_connection = TCPClientConnection(configuration=configuration, data_dir=MagicMock(), identity=Identity('name', address, public_key))
tcp_connection._default_logger_name = 'aea.packages.fetchai.connections.tcp.tcp_client'
return tcp_connection |
def test_intra_function():
cfg = ControlFlowGraph()
cfg.add_node((node := BasicBlock(0, instructions=[Assignment(Variable('a'), Call(imp_function_symbol('foo'), [expr1, expr1, expr1])), Branch(Condition(OperationType.equal, [expr1, Constant(1)]))])))
_run_cse(cfg, _generate_options(intra=False))
assert (len(node.instructions) == 2)
_run_cse(cfg)
replacement = Variable('c0', ssa_label=0)
assert (node.instructions == [Assignment(replacement.copy(), expr1.copy()), Assignment(Variable('a'), Call(imp_function_symbol('foo'), [replacement.copy(), replacement.copy(), replacement.copy()])), Branch(Condition(OperationType.equal, [replacement.copy(), Constant(1)]))]) |
class TestInstanceEditorDemo(unittest.TestCase):
def test_instance_editor_demo(self):
demo = runpy.run_path(DEMO_PATH)['demo']
tester = UITester()
with tester.create_ui(demo) as ui:
simple = tester.find_by_id(ui, 'simple')
custom = tester.find_by_id(ui, 'custom')
occupation = custom.find_by_name('occupation')
occupation.perform(KeySequence('Job'))
occupation.perform(KeyClick('Enter'))
self.assertEqual(demo.sample_instance.occupation, 'Job')
simple.perform(MouseClick())
name = simple.find_by_name('name')
name.perform(KeySequence('ABC'))
name.perform(KeyClick('Enter'))
self.assertEqual(demo.sample_instance.name, 'ABC')
demo.sample_instance.name = 'XYZ'
simple_displayed = name.inspect(DisplayedText())
custom_name = custom.find_by_name('name')
custom_displayed = custom_name.inspect(DisplayedText())
self.assertEqual(simple_displayed, 'XYZ')
self.assertEqual(custom_displayed, 'XYZ') |
def taplog_callback(frame, bp_loc, internal_dict):
parameterExpr = objc.functionPreambleExpressionForObjectParameterAtIndex(0)
print('Gesture Recognizers:\n{}'.format(fb.describeObject(('[[[%s allTouches] anyObject] gestureRecognizers]' % parameterExpr))))
print('View:\n{}'.format(fb.describeObject(('[[[%s allTouches] anyObject] view]' % parameterExpr))))
lldb.debugger.HandleCommand('thread return') |
('overrides', ['hydra.launcher.batch_size=1', 'hydra.launcher.max_nbytes=10000', 'hydra.launcher.max_nbytes=1M', 'hydra.launcher.pre_dispatch=all', 'hydra.launcher.pre_dispatch=10', 'hydra.launcher.pre_dispatch=3*n_jobs'])
def test_example_app_launcher_overrides(hydra_sweep_runner: TSweepRunner, overrides: str) -> None:
with hydra_sweep_runner(calling_file='example/my_app.py', calling_module=None, task_function=None, config_path='.', config_name='config', overrides=[overrides]) as sweep:
assert ((sweep.returns is not None) and (len(sweep.returns[0]) == 1)) |
class CirnoAI(object):
def __init__(self, trans, ilet):
self.trans = trans
self.ilet = ilet
def entry(self):
ilet = self.ilet
trans = self.trans
p = ilet.actor
g = trans.game
g.pause(1.2)
if (trans.name == 'ActionStageAction'):
tl = g.players[1:]
cl = (list(p.showncards) + list(p.cards))
if (random.random() > 0.6):
return False
for c in cl:
if c.is_card(AttackCard):
if self.try_launch(c, tl[:1]):
return True
elif ((trans.name == 'Action') and isinstance(ilet, ActionInputlet)):
if (not (ilet.categories and (not ilet.candidates))):
return True
if isinstance(ilet.initiator, AskForHeal):
return False
cond = cast(CardChooser, ilet.initiator).cond
cl = (list(p.showncards) + list(p.cards))
(_, C) = (chain, (lambda r: combinations(cl, r)))
for c in _(C(1), C(2)):
if cond(c):
ilet.set_result(skills=[], cards=c, characters=[])
return True
elif (trans.name == 'ChoosePeerCard'):
tgt = ilet.target
if tgt.cards:
ilet.set_card(tgt.cards[0])
elif tgt.showncards:
ilet.set_card(tgt.showncards[0])
def try_launch(self, c, tl, skills=[]):
p = self.ilet.actor
act = ActionStageLaunchCard(p, tl, c)
if act.can_fire():
self.ilet.set_result(skills=skills, cards=[c], characters=tl)
return True
return False
def ai_main(cls, trans, ilet):
cls(trans, ilet).entry() |
def test_raises():
with pytest.raises(TypeError):
alert.new('foo')
assert (len(alert.show()) == 0)
with pytest.raises(ValueError):
alert.new(time.time, repeat=(- 1))
assert (len(alert.show()) == 0)
with pytest.raises(TypeError):
alert.new(time.time, args=('potato',))
assert (len(alert.show()) == 0) |
def read_utf16(io, size=(- 1)):
raw = b''
i = 0
while ((size == (- 1)) or (i < size)):
wchar = io.read(2)
i += 2
if ((wchar == b'') or (wchar == b'\x00\x00')):
break
raw += wchar
if (i < size):
io.seek((size - i), 1)
return raw.decode('UTF-16le', 'replace') |
class CsssDivBoxMarginBorder(CssStyle.Style):
_attrs = {'margin': 0, 'padding': '0 2px 0 2px', 'white-space': 'pre-wrap'}
def customize(self):
self.css({'border': ('1px solid %s' % self.page.theme.greys[0]), 'font-family': self.page.body.style.globals.font.family})
self.hover.css({'border': ('1px solid %s' % self.page.theme.greys[5])}) |
class Logger():
def __init__(self, name=None, file=sys.stderr):
debug = os.getenv('DEBUG', '')
if (debug and name.endswith(debug)):
func = self.stderr
else:
func = self.null
self.d = func
self.i = func
self.w = self.stderr
self.e = self.stderr
self.c = self.stderr
self.file = file
def null(self, *_args, end='\n'):
pass
def stderr(self, *args, end='\n'):
if self:
print(*args, end=end, file=self.file) |
class WeathercomHTMLParser(HTMLParser):
user_agent = 'Mozilla/5.0 (X11; Linux x86_64; rv:80.0) Gecko/ Firefox/80.0'
def __init__(self, logger):
self.logger = logger
super(WeathercomHTMLParser, self).__init__()
def get_weather_data(self, url):
self.logger.debug(f'Making request to {url} to retrieve weather data')
self.weather_data = None
req = Request(url, headers={'User-Agent': self.user_agent})
with urlopen(req) as content:
try:
content_type = dict(content.getheaders())['Content-Type']
charset = re.search('charset=(.*)', content_type).group(1)
except AttributeError:
charset = 'utf-8'
html = content.read().decode(charset)
try:
self.feed(html)
except Exception:
self.logger.exception('Exception raised while parsing forecast page', exc_info=True)
def load_json(self, json_input):
self.logger.debug(f'Loading the following data as JSON: {json_input}')
try:
return json.loads(json_input)
except json.decoder.JSONDecodeError as exc:
self.logger.debug(f'Error loading JSON: {exc}')
self.logger.debug(f'String that failed to load: {json_input}')
return None
def handle_data(self, content):
if (self.weather_data is not None):
return
content = content.strip().rstrip(';')
try:
tag_text = self.get_starttag_text().lower()
except AttributeError:
tag_text = ''
if tag_text.startswith('<script'):
begin = content.find('window.__data')
if (begin != (- 1)):
weather_data = None
self.logger.debug('Located window.__data')
raw_json = content[begin:].split('=', 1)[1].lstrip()
if re.match('^JSON\\.parse\\("', raw_json):
raw_json = re.sub('^JSON\\.parse\\("', '', raw_json)
raw_json = re.sub('"\\);?$', '', raw_json)
raw_json = raw_json.replace('\\"', '"').replace('\\\\', '\\')
json_data = self.load_json(raw_json)
if (json_data is not None):
try:
weather_data = json_data['dal']
except KeyError:
pass
if (weather_data is None):
self.logger.debug(f'Failed to locate weather data in the following data: {json_data}')
else:
self.weather_data = weather_data
return |
class RangeFacet(Facet):
agg_type = 'range'
def _range_to_dict(self, range):
(key, range) = range
out = {'key': key}
if (range[0] is not None):
out['from'] = range[0]
if (range[1] is not None):
out['to'] = range[1]
return out
def __init__(self, ranges, **kwargs):
super().__init__(**kwargs)
self._params['ranges'] = list(map(self._range_to_dict, ranges))
self._params['keyed'] = False
self._ranges = dict(ranges)
def get_value_filter(self, filter_value):
(f, t) = self._ranges[filter_value]
limits = {}
if (f is not None):
limits['gte'] = f
if (t is not None):
limits['lt'] = t
return Range(_expand__to_dot=False, **{self._params['field']: limits}) |
class CollectionStripArtistPreference(widgets.ListPreference):
default = _get_default_strip_list()
name = 'collection/strip_list'
def __init__(self, preferences, widget):
widgets.ListPreference.__init__(self, preferences, widget)
self.widget.connect('populate-popup', self._populate_popup_cb)
def _get_value(self):
values = [v.lower() for v in self.widget.get_text().split(' ') if v]
return values
def _populate_popup_cb(self, entry, menu):
from gi.repository import Gtk
entry = Gtk.MenuItem.new_with_mnemonic(_('Reset to _Defaults'))
entry.connect('activate', self._reset_to_defaults_cb)
entry.show()
sep = Gtk.SeparatorMenuItem()
sep.show()
menu.attach(entry, 0, 1, 0, 1)
menu.attach(sep, 0, 1, 1, 2)
def _reset_to_defaults_cb(self, item):
self.widget.set_text(' '.join(_get_default_strip_list())) |
def test_scalar_expression(f):
xs = SpatialCoordinate(f.function_space().mesh())
f.interpolate(xs[1])
assert (abs((assemble((f * ds_t)) - 1.0)) < 1e-07)
assert (abs((assemble((f * ds_b)) - 0.0)) < 1e-07)
assert (abs((assemble((f * ds_tb)) - 1.0)) < 1e-07)
assert (abs((assemble((f * ds_v)) - 1.0)) < 1e-07)
assert (abs((assemble((f('+') * dS_h)) - 1.5)) < 1e-07)
assert (abs((assemble((f('-') * dS_h)) - 1.5)) < 1e-07)
assert (abs((assemble((f('+') * dS_v)) - 1.5)) < 1e-07)
assert (abs((assemble((f('-') * dS_v)) - 1.5)) < 1e-07) |
class QueryRewriteParser(BaseOutputParser):
def __init__(self, is_stream_out: bool, **kwargs):
super().__init__(is_stream_out=is_stream_out, **kwargs)
def parse_prompt_response(self, response, max_length: int=128):
lowercase = True
try:
results = []
response = response.strip()
if response.startswith('queries:'):
response = response[len('queries:'):]
queries = response.split(',')
if (len(queries) == 1):
queries = response.split(',')
if (len(queries) == 1):
queries = response.split('?')
if (len(queries) == 1):
queries = response.split('?')
for k in queries:
rk = k
if lowercase:
rk = rk.lower()
s = rk.strip()
if (s == ''):
continue
results.append(s)
except Exception as e:
logger.error(f'parse query rewrite prompt_response error: {e}')
return []
return results
def parse_view_response(self, speak, data) -> str:
return data |
('/javbus')
def javbus():
if (not ('base' in javbusurl.raw_dict())):
javbusurl['base'] = '
getjavbusurl()
if (not url_is_alive(javbusurl['base'])):
notify(', ')
getjavbusurl()
item = [{'label': '', 'path': plugin.url_for('javlist', qbbb='qb', filtertype='0', filterkey='0', page=1)}, {'label': '', 'path': plugin.url_for('javstarlist', qbbb='qb', page=1)}, {'label': '', 'path': plugin.url_for('javgernefilter', qbbb='qb')}, {'label': '', 'path': plugin.url_for('javlist', qbbb='qb', filtertype='genre', filterkey='sub', page=1)}, {'label': '', 'path': plugin.url_for('javlist', qbbb='bb', filtertype='0', filterkey='0', page=1)}, {'label': '', 'path': plugin.url_for('javstarlist', qbbb='bb', page=1)}, {'label': '', 'path': plugin.url_for('javgernefilter', qbbb='bb')}, {'label': '', 'path': plugin.url_for('javlist', qbbb='bb', filtertype='genre', filterkey='sub', page=1)}, {'label': '', 'path': plugin.url_for('javlist', qbbb='om', filtertype='0', filterkey='0', page=1)}, {'label': '', 'path': plugin.url_for('javstarlist', qbbb='om', page=1)}, {'label': '', 'path': plugin.url_for('javgernefilter', qbbb='om')}, {'label': '', 'path': plugin.url_for('searchinit', stypes='jav', sstr='0', modify='0', otherargs='{}')}, {'label': ('[COLOR FF00FFFF]%s[/COLOR]' % javbusurl['base']), 'path': plugin.url_for('getjavbusurl')}]
return item |
def zone_bounding_boxes(zones_config: dict[(ZoneKey, Any)]) -> dict[(ZoneKey, BoundingBox)]:
bounding_boxes = {}
for (zone_id, zone_config) in zones_config.items():
if ('bounding_box' in zone_config):
bounding_boxes[zone_id] = zone_config['bounding_box']
return bounding_boxes |
def get_builder(uifile: str) -> Gtk.Builder:
builder = Gtk.Builder()
if (sys.platform == 'win32'):
with open(uifile, 'rb') as fp:
template = fp.read()
template_string = get_template_translated(template)
builder.add_from_string(template_string)
else:
builder.add_from_file(uifile)
return builder |
class MeeJeeFetch(PreemptProcessorBase.PreemptProcessorBase):
log_name = 'Main.Processor.MeeJee'
def preemptive_wants_url(lowerspliturl: tuple):
return lowerspliturl.netloc.endswith('meejee.net')
def premptive_handle_content(self, url):
wrapper_step_through_timeout = 60
loading_str = '{{'
with self.wg._chrome_context(url, extra_tid=False) as cr:
self.wg._syncIntoChromium(cr)
try:
response = cr.blocking_navigate_and_get_source(url)
raw_url = cr.get_current_url()
fileN = urllib.parse.unquote(urllib.parse.urlparse(raw_url)[2].split('/')[(- 1)])
fileN = bs4.UnicodeDammit(fileN).unicode_markup
if response['binary']:
return (response['content'], fileN, magic.from_buffer(response['content'], mime=True))
self.log.info('Waiting for content to render...')
for _ in range(wrapper_step_through_timeout):
body = cr.get_rendered_page_source()
if (loading_str not in body):
self.log.info('Content appears to have rendered!')
return (self.de_garbage_html(body), fileN, 'text/html')
time.sleep(1)
finally:
self.wg._syncOutOfChromium(cr)
raise WebRequest.GarbageSiteWrapper('Could not render JS content!') |
class ScopedRateThrottle(SimpleRateThrottle):
scope_attr = 'throttle_scope'
def __init__(self):
pass
def allow_request(self, request, view):
self.scope = getattr(view, self.scope_attr, None)
if (not self.scope):
return True
self.rate = self.get_rate()
(self.num_requests, self.duration) = self.parse_rate(self.rate)
return super().allow_request(request, view)
def get_cache_key(self, request, view):
if (request.user and request.user.is_authenticated):
ident = request.user.pk
else:
ident = self.get_ident(request)
return (self.cache_format % {'scope': self.scope, 'ident': ident}) |
.parametrize('pt,on_curve,is_infinity', [(G1, True, False), (multiply(G1, 5), True, False), (Z1, True, True), ((FQ(5566), FQ(5566), FQ.one()), False, None)])
def test_G1_compress_and_decompress_flags(pt, on_curve, is_infinity):
assert (on_curve == is_on_curve(pt, b))
z = compress_G1(pt)
if on_curve:
x = (z % POW_2_381)
c_flag = ((z % POW_2_384) // POW_2_383)
b_flag = ((z % POW_2_383) // POW_2_382)
a_flag = ((z % POW_2_382) // POW_2_381)
assert (x < q)
assert (c_flag == 1)
if is_infinity:
assert (b_flag == 1)
assert (a_flag == x == 0)
else:
assert (b_flag == 0)
(pt_x, pt_y) = normalize(pt)
assert (a_flag == ((pt_y.n * 2) // q))
assert (x == pt_x.n)
assert (normalize(decompress_G1(z)) == normalize(pt))
else:
with pytest.raises(ValueError):
decompress_G1(z) |
def delete_single_doctype_from_es(index_name=None, doc_type=None, num_doc_id=None):
body = {'query': {'bool': {'must': [{'match': {'type': doc_type}}, {'match': {'doc_id': num_doc_id}}]}}}
es_client = create_es_client()
es_client.delete_by_query(index=index_name, body=body)
logger.info(' Successfully deleted doc_type={} and doc_id={} from index={} on Elasticsearch.'.format(doc_type, num_doc_id, index_name)) |
class PredictionCountEvaluator(DatasetEvaluator):
def __init__(self, distributed: bool=True):
self._distributed = distributed
self.prediction_counts = []
self.confidence_scores = []
def reset(self):
self.prediction_counts = []
self.confidence_scores = []
def process(self, inputs, outputs):
for output_dict in outputs:
instances = output_dict['instances']
self.prediction_counts.append(len(instances))
self.confidence_scores.extend(instances.get('scores').tolist())
def evaluate(self):
if self._distributed:
comm.synchronize()
prediction_counts = comm.gather(self.prediction_counts, dst=0)
prediction_counts = list(itertools.chain(*prediction_counts))
confidence_scores = comm.gather(self.confidence_scores, dst=0)
confidence_scores = list(itertools.chain(*confidence_scores))
if (not comm.is_main_process()):
return {}
else:
prediction_counts = self.prediction_counts
confidence_scores = self.confidence_scores
mpi = np.mean(prediction_counts)
mcp = np.mean(confidence_scores)
output_metrics = OrderedDict({'false_positives': {'predictions_per_image': mpi, 'confidence_per_prediction': mcp}})
logger.info(f'mean predictions per image: {mpi}')
logger.info(f'mean confidence per prediction: {mcp}')
return output_metrics |
def get_main(main_module: tp.Optional[str]=None, package: tp.Optional[str]=None):
if (main_module is None):
main_module = (os.environ.get('DORA_MAIN_MODULE') or 'train')
if (package is None):
package = os.environ.get('DORA_PACKAGE')
if (package is None):
package = _find_package(main_module)
module_name = ((package + '.') + main_module)
sys.path.insert(0, str(Path('.').resolve()))
module = import_or_fatal(module_name)
try:
main = module.main
except AttributeError:
fatal(f'Could not find function `main` in {module_name}.')
if (not isinstance(main, DecoratedMain)):
fatal(f'{module_name}.main was not decorated with `dora.main`.')
return main |
class AnEditor(HasPrivateTraits):
code1 = Code()
code2 = Code()
name = Str('Mike Noggins')
address = Str('1313 Drury Lane')
shell = PythonValue
traits_view = View(VSplit(VGroup('', '|<>'), VGroup('', '|<>'), VGroup('name', 'address'), VGroup('shell', '|{Python Shell}<>'), export='editor', show_labels=False), kind='subpanel', resizable=True, buttons=NoButtons, dock='horizontal') |
class PickBallMode(BaseMode):
name = Mode.pick_ball
keymap = {Action.quit: False, Action.pick_ball: True, Action.done: False}
def enter(self):
mouse.mode(MouseMode.RELATIVE)
self.closest_ball = None
self.register_keymap_event('escape', Action.quit, True)
self.register_keymap_event('q', Action.pick_ball, True)
self.register_keymap_event('q-up', Action.pick_ball, False)
self.register_keymap_event('mouse1-up', 'done', True)
tasks.add(self.pick_ball_task, 'pick_ball_task')
tasks.add(self.shared_task, 'shared_task')
def exit(self):
self.remove_ball_highlight()
tasks.remove('shared_task')
tasks.remove('pick_ball_task')
def pick_ball_task(self, task):
raise NotImplementedError("Woops, this is in a broken state, don't press that")
if (not self.keymap[Action.pick_ball]):
Global.mode_mgr.change_mode(Mode.aim)
return task.done
cam.move_fixation_via_mouse()
closest = self.find_closest_ball()
if (closest != self.closest_ball):
self.remove_ball_highlight()
self.closest_ball = closest
self.ball_highlight = self.closest_ball.get_node('pos')
self.add_ball_highlight()
if self.keymap['done']:
self.remove_ball_highlight()
ball_id = self.closest_ball._ball.id
if (ball_id is not None):
multisystem.active.cue.cue_ball_id = ball_id
visual.cue.init_focus(visual.balls[ball_id])
Global.game.log.add_msg(f'Now cueing the {multisystem.active.cue.cue_ball_id} ball', sentiment='neutral')
Global.mode_mgr.change_mode(Mode.aim)
return task.done
return task.cont
def remove_ball_highlight(self):
if ((self.closest_ball is not None) and tasks.has('pick_ball_highlight_animation')):
node = self.closest_ball.get_node('pos')
node.setScale((node.getScale() / ani.ball_highlight['ball_factor']))
self.closest_ball.get_node('shadow').setAlphaScale(1)
self.closest_ball.get_node('shadow').setScale(1)
self.closest_ball.set_render_state_as_object_state()
tasks.remove('pick_ball_highlight_animation')
def add_ball_highlight(self):
if (self.closest_ball is not None):
tasks.add(self.pick_ball_highlight_animation, 'pick_ball_highlight_animation')
node = self.closest_ball.get_node('pos')
node.setScale((node.getScale() * ani.ball_highlight['ball_factor']))
def pick_ball_highlight_animation(self, task):
phase = (task.time * ani.ball_highlight['ball_frequency'])
new_height = (ani.ball_highlight['ball_offset'] + (ani.ball_highlight['ball_amplitude'] * np.sin(phase)))
self.ball_highlight.setZ(new_height)
new_alpha = (ani.ball_highlight['shadow_alpha_offset'] + (ani.ball_highlight['shadow_alpha_amplitude'] * np.sin((- phase))))
new_scale = (ani.ball_highlight['shadow_scale_offset'] + (ani.ball_highlight['shadow_scale_amplitude'] * np.sin(phase)))
self.closest_ball.get_node('shadow').setAlphaScale(new_alpha)
self.closest_ball.get_node('shadow').setScale(new_scale)
return task.cont
def find_closest_ball(self):
cam_fixation = cam.fixation.getPos()
d_min = np.inf
closest = None
for (ball_id, ball) in visual.balls.items():
if (ball_id not in Global.game.active_player.can_cue):
continue
if (ball._ball.state.s == c.pocketed):
continue
d = ptmath.norm3d((ball._ball.state.rvw[0] - cam_fixation))
if (d < d_min):
(d_min, closest) = (d, ball)
return closest |
class AsyncShieldCancellation():
def __init__(self) -> None:
self._backend = current_async_library()
if (self._backend == 'trio'):
self._trio_shield = trio.CancelScope(shield=True)
elif (self._backend == 'asyncio'):
self._anyio_shield = anyio.CancelScope(shield=True)
def __enter__(self) -> 'AsyncShieldCancellation':
if (self._backend == 'trio'):
self._trio_shield.__enter__()
elif (self._backend == 'asyncio'):
self._anyio_shield.__enter__()
return self
def __exit__(self, exc_type: Optional[Type[BaseException]]=None, exc_value: Optional[BaseException]=None, traceback: Optional[TracebackType]=None) -> None:
if (self._backend == 'trio'):
self._trio_shield.__exit__(exc_type, exc_value, traceback)
elif (self._backend == 'asyncio'):
self._anyio_shield.__exit__(exc_type, exc_value, traceback) |
('cuda.conv2d_bias_few_channels.gen_function')
def conv2d_bias_few_channels_gen_function(func_attrs, exec_cond_template, shape_eval_template, shape_save_template):
return cba.gen_function(func_attrs=func_attrs, exec_cond_template=exec_cond_template, shape_eval_template=shape_eval_template, shape_save_template=shape_save_template) |
def get_form_for(command_path: str):
try:
ctx_and_commands = _get_commands_by_path(command_path)
except CommandNotFound as err:
return abort(404, str(err))
levels = _generate_form_data(ctx_and_commands)
return render_template('command_form.html.j2', levels=levels, command=levels[(- 1)]['command'], command_path=command_path) |
def loader(dataset, *args, shuffle=False, klass=DataLoader, **kwargs):
if (not is_distributed()):
return klass(dataset, *args, shuffle=shuffle, **kwargs)
if shuffle:
sampler = DistributedSampler(dataset)
return klass(dataset, *args, **kwargs, sampler=sampler)
else:
dataset = Subset(dataset, list(range(rank(), len(dataset), world_size())))
return klass(dataset, *args, shuffle=shuffle, **kwargs) |
.parametrize('response,case_sensitive,single_match,gold_spans', [('PER: Jean', False, False, [('jean', 'PER'), ('Jean', 'PER'), ('Jean', 'PER')]), ('PER: Jean', False, True, [('jean', 'PER')]), ('PER: Jean', True, False, [('Jean', 'PER'), ('Jean', 'PER')]), ('PER: Jean', True, True, [('Jean', 'PER')])])
def test_spancat_matching(response, case_sensitive, single_match, gold_spans):
text = 'This guy jean (or Jean) is the president of the Jean Foundation.'
labels = 'PER,ORG,LOC'
llm_spancat = make_spancat_task_v2(labels=labels, case_sensitive_matching=case_sensitive, single_match=single_match)
nlp = spacy.blank('en')
doc_in = nlp.make_doc(text)
doc_out = list(llm_spancat.parse_responses([doc_in], [response]))[0]
pred_spans = [(span.text, span.label_) for span in doc_out.spans['sc']]
assert (pred_spans == gold_spans) |
class TestBlend(util.ColorAsserts, unittest.TestCase):
def test_blend_no_mode(self):
c1 = Color('blue').set('alpha', 0.5)
c2 = Color('yellow')
self.assertEqual(c1.compose(c2, blend='normal'), c1.compose(c2))
def test_blend_different_space(self):
c1 = Color('blue').set('alpha', 0.5)
c2 = Color('yellow')
self.assertColorEqual(c1.compose(c2, blend='normal', space='display-p3'), Color('color(display-p3 0.5 0.5 0.64524)'))
def test_blend_different_space_and_output(self):
c1 = Color('blue').set('alpha', 0.5)
c2 = Color('yellow')
self.assertColorEqual(c1.compose(c2, blend='normal', space='display-p3', out_space='srgb'), Color('rgb(127.5 127.5 167.63)'))
def test_blend_bad_mode(self):
with self.assertRaises(ValueError):
Color('blue').compose('red', blend='bad')
def test_blend_in_place(self):
c1 = Color('blue').set('alpha', 0.5)
c2 = Color('yellow')
c3 = c1.compose(c2, blend='normal', in_place=True)
self.assertTrue((c1 is c3))
self.assertEqual(c1, Color('color(srgb 0.5 0.5 0.5)')) |
def read_status(id):
connection = False
run_records = []
try:
connection = psycopg2.connect(**conf.DB_HANDLER_CONFIG, database='test_event_log_storage')
cursor = connection.cursor()
postgreSQL_select_Query = "\n select event, dagster_event_type, timestamp from event_logs \n where run_id='{id}' AND\n (dagster_event_type LIKE 'STEP_%' OR dagster_event_type LIKE 'PIPELINE_%')\n ".format(id=id)
cursor.execute(postgreSQL_select_Query)
run_records = cursor.fetchall()
except (Exception, psycopg2.Error) as error:
print('Error while fetching data from PostgreSQL', error)
return {'ERROR:': 'Error in Query'}
finally:
if connection:
cursor.close()
connection.close()
if (not run_records):
return False
else:
return run_records |
def is_arn_filter_match(arn: str, filter_arn: str) -> bool:
arn_split = arn.split(':')
filter_arn_split = filter_arn.split(':')
if (len(arn_split) != len(filter_arn_split)):
return False
for i in range(0, len(arn_split), 1):
if (filter_arn_split[i] and (filter_arn_split[i] != arn_split[i])):
return False
return True |
class AgentModel(nn.Module):
def __init__(self, n_observations, n_actions, n_hidden):
super().__init__()
self.linear = nn.Linear(n_observations, n_hidden)
self.linear2 = nn.Linear(n_hidden, n_actions)
def forward(self, frame):
z = torch.tanh(self.linear(frame))
score_actions = self.linear2(z)
probabilities_actions = torch.softmax(score_actions, dim=(- 1))
return probabilities_actions |
def test_A_from_dict():
d = {'terms': {'field': 'tags'}, 'aggs': {'per_author': {'terms': {'field': 'author.raw'}}}}
a = aggs.A(d)
assert isinstance(a, aggs.Terms)
assert (a._params == {'field': 'tags', 'aggs': {'per_author': aggs.A('terms', field='author.raw')}})
assert (a['per_author'] == aggs.A('terms', field='author.raw'))
assert (a.aggs.per_author == aggs.A('terms', field='author.raw')) |
def sstore(evm: Evm) -> None:
key = pop(evm.stack).to_be_bytes32()
new_value = pop(evm.stack)
ensure((evm.gas_left > GAS_CALL_STIPEND), OutOfGasError)
original_value = get_storage_original(evm.env.state, evm.message.current_target, key)
current_value = get_storage(evm.env.state, evm.message.current_target, key)
gas_cost = Uint(0)
if ((evm.message.current_target, key) not in evm.accessed_storage_keys):
evm.accessed_storage_keys.add((evm.message.current_target, key))
gas_cost += GAS_COLD_SLOAD
if ((original_value == current_value) and (current_value != new_value)):
if (original_value == 0):
gas_cost += GAS_STORAGE_SET
else:
gas_cost += (GAS_STORAGE_UPDATE - GAS_COLD_SLOAD)
else:
gas_cost += GAS_WARM_ACCESS
if (current_value != new_value):
if ((original_value != 0) and (current_value != 0) and (new_value == 0)):
evm.refund_counter += int(GAS_STORAGE_CLEAR_REFUND)
if ((original_value != 0) and (current_value == 0)):
evm.refund_counter -= int(GAS_STORAGE_CLEAR_REFUND)
if (original_value == new_value):
if (original_value == 0):
evm.refund_counter += int((GAS_STORAGE_SET - GAS_WARM_ACCESS))
else:
evm.refund_counter += int(((GAS_STORAGE_UPDATE - GAS_COLD_SLOAD) - GAS_WARM_ACCESS))
charge_gas(evm, gas_cost)
ensure((not evm.message.is_static), WriteInStaticContext)
set_storage(evm.env.state, evm.message.current_target, key, new_value)
evm.pc += 1 |
def scan_binary(apkfile):
logging.info(_('Scanning APK with dexdump for known non-free classes.'))
result = get_embedded_classes(apkfile)
(problems, warnings) = (0, 0)
for classname in result:
for (suspect, regexp) in _get_tool().regexs['warn_code_signatures'].items():
if regexp.match(classname):
logging.debug(("Warning: found class '%s'" % classname))
warnings += 1
for (suspect, regexp) in _get_tool().regexs['err_code_signatures'].items():
if regexp.match(classname):
logging.debug(("Problem: found class '%s'" % classname))
problems += 1
if warnings:
logging.warning(_('Found {count} warnings in {filename}').format(count=warnings, filename=apkfile))
if problems:
logging.critical(_('Found {count} problems in {filename}').format(count=problems, filename=apkfile))
return problems |
class TestMain(BasePyTestCase):
def setup_method(self, method):
super().setup_method(method)
self.compose_dir = tempfile.mkdtemp()
def teardown_method(self, method):
shutil.rmtree(self.compose_dir)
super().teardown_method(method)
('bodhi.server.tasks.clean_old_composes.log')
def test_main(self, log):
dirs = ['dist-5E-epel-', 'dist-5E-epel-', 'dist-5E-epel-', 'dist-5E-epel-', 'dist-5E-epel-testing-', 'dist-5E-epel-testing-', 'dist-5E-epel-testing-', 'dist-6E-epel-', 'dist-6E-epel-', 'dist-6E-epel-testing-', 'epel7-', 'epel7-', 'epel7-', 'epel7-', 'epel7-testing-', 'epel7-testing-', 'epel7-testing-', 'f23-updates-', 'f23-updates-', 'f23-updates-', 'f23-updates-', 'f23-updates-testing-', 'f23-updates-testing-', 'f23-updates-testing-', 'f24-updates-', 'f24-updates-', 'f24-updates-testing-', 'this_should_get_left_alone', 'f23-updates-should_be_untouched', 'f23-updates.repocache', 'f23-updates-testing-blank']
[os.makedirs(os.path.join(self.compose_dir, d)) for d in dirs]
with open(os.path.join(self.compose_dir, 'dist-5E-epel-', 'oops.txt'), 'w') as oops:
oops.write('This compose failed to get cleaned and left this file around, oops!')
with open(os.path.join(self.compose_dir, 'COOL_FILE.txt'), 'w') as cool_file:
cool_file.write("This file should be allowed to hang out here because it's cool.")
with patch.dict(config.config, {'compose_dir': self.compose_dir}):
clean_old_composes_main(2)
expected_dirs = {'dist-5E-epel-', 'dist-5E-epel-', 'dist-5E-epel-testing-', 'dist-5E-epel-testing-', 'dist-6E-epel-', 'dist-6E-epel-', 'dist-6E-epel-testing-', 'epel7-', 'epel7-', 'epel7-testing-', 'epel7-testing-', 'f23-updates-', 'f23-updates-', 'f23-updates-testing-', 'f23-updates-testing-', 'f24-updates-', 'f24-updates-', 'f24-updates-testing-', 'this_should_get_left_alone', 'f23-updates-should_be_untouched', 'f23-updates.repocache', 'f23-updates-testing-blank'}
actual_dirs = set([d for d in os.listdir(self.compose_dir) if os.path.isdir(os.path.join(self.compose_dir, d))])
assert (actual_dirs == expected_dirs)
actual_files = [f for f in os.listdir(self.compose_dir) if os.path.isfile(os.path.join(self.compose_dir, f))]
assert (actual_files == ['COOL_FILE.txt'])
expected_output = (set(dirs) - expected_dirs)
expected_output = {os.path.join(self.compose_dir, d) for d in expected_output}
expected_output = (expected_output | {'Deleting the following directories:'})
logged = set([c[0][0] for c in log.info.call_args_list])
assert (logged == expected_output) |
class JsIterable():
def __init__(self, iterable: Union[(primitives.JsDataModel, str)], options: Optional[dict]=None, profile: Optional[Union[(dict, bool)]]=None):
self.__js_it = iterable
self.options = {'var': 'x', 'type': 'in'}
if (options is not None):
self.options.update(options)
self.profile = profile
def var(self):
return self.options['var']
def var(self, value: str):
self.options['var'] = value
def fncs(self, js_funcs: Union[(list, str)], reset: bool=True, profile: Optional[Union[(dict, bool)]]=None):
if (not isinstance(js_funcs, list)):
js_funcs = [js_funcs]
if reset:
self.__js_funcs = js_funcs
else:
self.__js_funcs.extend(js_funcs)
self.profile = profile
return self
def toStr(self):
js_n_funcs = JsUtils.jsConvertFncs(self.__js_funcs, toStr=True, profile=self.profile)
js_iter = JsUtils.jsConvertData(self.__js_it, None)
return ('for(var %s %s %s){%s}' % (self.var, self.options['type'], js_iter, js_n_funcs)) |
def generateActF3(iterationsMap, iteration, t):
msg = generateGenericMessage('EiffelActivityFinishedEvent', t, '1.0.0', 'ActF3', iteration)
link(msg, iterationsMap[iteration]['ActT3'], 'ACTIVITY_EXECUTION')
msg['data']['outcome'] = {'conclusion': getOutcomeValuesFromVerdicts([iterationsMap[iteration]['TSF1']], 'SUCCESSFUL', 'UNSUCCESSFUL')}
return msg |
class TrackTitleFormatPreference(widgets.ComboEntryPreference):
name = 'plugin/minimode/track_title_format'
completion_items = {'$tracknumber': _('Track number'), '$title': _('Title'), '$artist': _('Artist'), '$composer': _('Composer'), '$album': _('Album'), '$__length': _('Length'), '$discnumber': _('Disc number'), '$__rating': _('Rating'), '$date': _('Date'), '$genre': _('Genre'), '$bitrate': _('Bitrate'), '$__loc': _('Location'), '$filename': _('Filename'), '$__playcount': _('Play count'), '$__last_played': _('Last played'), '$bpm': _('BPM')}
preset_items = [_('$tracknumber - $title'), _('$title by $artist'), _('$title ($__length)')]
default = _('$tracknumber - $title') |
class InteractiveItem(QGraphicsRectItem):
def __init__(self, *arg, **karg):
super().__init__(*arg, **karg)
self.node = None
self.label = None
self.setCursor(QtCore.Qt.PointingHandCursor)
self.setAcceptsHoverEvents(True)
def hoverEnterEvent(self, e):
if (not self.label):
self.label = QGraphicsRectItem()
self.label.setParentItem(self)
self.label.setZValue(1)
self.label.setBrush(QBrush(QColor('white')))
self.label.text = QGraphicsSimpleTextItem()
self.label.text.setParentItem(self.label)
self.label.text.setText(self.node.name)
self.label.setRect(self.label.text.boundingRect())
self.label.setVisible(True)
def hoverLeaveEvent(self, e):
if self.label:
self.label.setVisible(False) |
class Selection(OrderOrSelection):
def __init__(self, kwargs, remapping=None):
self.remapping = build_remapping(remapping)
class InList():
def __init__(self, lst):
self.first = True
self.lst = lst
def __call__(self, x):
if (self.first and (x is not None)):
cast = type(x)
self.lst = [cast(y) for y in self.lst]
self.first = False
return (x in self.lst)
self.actions = {}
for (k, v) in kwargs.items():
if ((v is None) or (v is cml.ALL)):
self.actions[k] = (lambda x: True)
continue
if callable(v):
self.actions[k] = v
continue
if (not isinstance(v, (list, tuple, set))):
v = [v]
v = set(v)
self.actions[k] = InList(v)
def match_element(self, element):
metadata = self.remapping(element.metadata)
return all((v(metadata(k)) for (k, v) in self.actions.items())) |
def extractCnobsessionsWordpressCom(item):
(vol, chp, frag, postfix) = extractVolChapterFragmentPostfix(item['title'])
if ((not (chp or vol)) or ('preview' in item['title'].lower())):
return None
tagmap = [('PRC', 'PRC', 'translated'), ('Loiterous', 'Loiterous', 'oel')]
for (tagname, name, tl_type) in tagmap:
if (tagname in item['tags']):
return buildReleaseMessageWithType(item, name, vol, chp, frag=frag, postfix=postfix, tl_type=tl_type)
return False |
class FalseNode(ConditionalNode):
def __str__(self) -> str:
return 'FalseNode'
def __repr__(self) -> str:
return f'''FalseNode({self.reaching_condition})
{(type(self.child) if self.child else '')}'''
def copy(self) -> FalseNode:
return FalseNode(self.reaching_condition)
def branch_condition(self) -> LogicCondition:
assert isinstance(self.parent, ConditionNode), 'True and False Nodes must have a ConditionNode as parent!'
return (~ self.parent.condition)
def accept(self, visitor: ASTVisitorInterface[T]) -> T:
return visitor.visit_false_node(self) |
def test_place_electrodes_equal_spacing():
mesh_obj = load_mesh((parent_dir + '/data/Rectangle.STL'))
plotting_obj = {}
electrode_nodes = place_electrodes_equal_spacing(mesh_obj, n_electrodes=8, starting_angle=np.pi, starting_offset=0, output_obj=plotting_obj)
ccw_electrode_nodes = place_electrodes_equal_spacing(mesh_obj, n_electrodes=8, starting_angle=np.pi, starting_offset=0, counter_clockwise=True)
correct_nodes = [0, 0, 0, 1, 1, 3, 3, 2]
correct_ccw_nodes = [0, 2, 3, 3, 1, 1, 0, 0]
assert np.all((electrode_nodes == correct_nodes))
assert np.all((ccw_electrode_nodes == correct_ccw_nodes)) |
class AsnDB():
__instance = None
asndb = None
def instance():
if (AsnDB.__instance == None):
AsnDB.__instance = AsnDB()
return AsnDB.__instance
def __init__(self):
self.ASN_AVAILABLE = True
self.load()
def is_available(self):
return self.ASN_AVAILABLE
def load(self):
try:
if (self.asndb != None):
return
import pyasn
IPASN_DB_PATH = os.path.expanduser('~/.config/opensnitch/ipasn_db.dat.gz')
AS_NAMES_FILE_PATH = os.path.expanduser('~/.config/opensnitch/asnames.json')
if (os.path.isfile(IPASN_DB_PATH) == False):
IPASN_DB_PATH = '/usr/lib/python3/dist-packages/data/ipasn__v12.dat.gz'
if (os.path.isfile(AS_NAMES_FILE_PATH) == False):
AS_NAMES_FILE_PATH = '/usr/lib/python3/dist-packages/data/asnames.json'
print('using IPASN DB:', IPASN_DB_PATH)
self.asndb = pyasn.pyasn(IPASN_DB_PATH, as_names_file=AS_NAMES_FILE_PATH)
except Exception as e:
self.ASN_AVAILABLE = False
print('exception loading ipasn db:', e)
print("Install python3-pyasn to display IP's network name.")
def lookup(self, ip):
try:
return self.asndb.lookup(ip)
except Exception:
return ('', '')
def get_as_name(self, asn):
try:
asname = self.asndb.get_as_name(asn)
if (asname == None):
asname = ''
return asname
except Exception:
return ''
def get_asn(self, ip):
try:
(asn, prefix) = self.lookup(ip)
return self.get_as_name(asn)
except Exception:
return '' |
class GptAgent(BaseAgent):
_system_prompt: str
_full_message_history: List[dict] = []
_message_tokens: List[int] = []
def __init__(self, caller_context: CallerContext):
super().__init__(caller_context)
self._system_prompt = _generate_first_prompt()
logger.debug(f'Using GptAgent, system prompt is: {self._system_prompt}')
logger.debug(f'{json.dumps(moduleRegistry.to_json_schema())}')
async def _feed_prompt_to_get_response(self, prompt):
(reply_type, assistant_reply) = (await self._chat_with_ai(self._system_prompt, prompt, CFG.token_limit))
if (reply_type == 'content'):
return {'speak': assistant_reply}
elif (reply_type == 'function_call'):
arguments_string = assistant_reply['arguments']
try:
arguments = json.loads(arguments_string)
except:
arguments = (await fix_json_using_multiple_techniques())
return {'function': assistant_reply['name'], 'arguments': arguments}
async def feed_prompt(self, prompt):
logger.debug(f'Trigger: {prompt}')
reply: Dict = None
for i in range(3):
try:
if (i == 0):
reply = (await self._feed_prompt_to_get_response(prompt))
else:
reply = (await self._feed_prompt_to_get_response((prompt + '. Remember to reply using the specified JSON form')))
break
except Exception as e:
logger.debug(f'Failed to get reply, try again! {str(e)}')
continue
if (reply is None):
(await self._caller_context.reply_text("Sorry, but I don't understand what you want me to do."))
return
function_name: str = reply.get('function')
if (function_name is None):
(await self._caller_context.reply_text(reply['speak']))
else:
arguments: Dict = reply['arguments']
function_result = 'Failed'
try:
function_result = (await execute_function(self._caller_context, function_name, **arguments))
finally:
result = f'{function_result}'
if (result is not None):
self.append_history_message_raw({'role': 'function', 'name': function_name, 'content': result})
logger.debug(f'function: {result}')
else:
self.append_history_message_raw({'role': 'function', 'name': function_name, 'content': 'Unable to execute function'})
logger.debug('function: Unable to execute function')
def append_history_message(self, role: str, content: str):
self._full_message_history.append({'role': role, 'content': content})
self._message_tokens.append((- 1))
def append_history_message_raw(self, msg: dict):
self._full_message_history.append(msg)
self._message_tokens.append((- 1))
def clear_history_messages(self):
self._full_message_history.clear()
self._message_tokens.clear()
def save_history(self, to_where):
with open(to_where, 'w') as f:
assert (len(self._message_tokens) == len(self._full_message_history))
s = json.dumps([self._message_tokens, self._full_message_history])
f.write(s)
def load_history(self, from_where):
with contextlib.suppress(Exception):
with open(from_where, 'r') as f:
tmp = json.loads(f.read())
if (isinstance(tmp, list) and (len(tmp[0]) == len(tmp[1]))):
self._message_tokens = tmp[0]
self._full_message_history = tmp[1]
async def _chat_with_ai(self, prompt, user_input, token_limit):
while True:
try:
model = CFG.llm_model
send_token_limit = (token_limit - 1000)
(next_message_to_add_index, current_tokens_used, insertion_index, current_context) = (await self._generate_context(prompt, model))
current_tokens_used += (await token_counter.count_message_tokens([{'role': 'user', 'content': user_input}], model))
current_tokens_used += (await token_counter.count_message_tokens([{'role': 'user', 'content': json.dumps(moduleRegistry.to_json_schema())}], model))
while (next_message_to_add_index >= 0):
tokens_to_add = (await self._get_history_message_tokens(next_message_to_add_index, model))
if ((current_tokens_used + tokens_to_add) > send_token_limit):
break
message_to_add = self._full_message_history[next_message_to_add_index]
current_context.insert(insertion_index, message_to_add)
current_tokens_used += tokens_to_add
next_message_to_add_index -= 1
current_context.extend([{'role': 'user', 'content': user_input}])
tokens_remaining = (token_limit - current_tokens_used)
assert (tokens_remaining >= 0)
async def on_single_chat_timeout(will_retry):
(await self._caller_context.push_notification(f"Thinking timeout{(', retry' if will_retry else ', give up')}."))
(reply_type, assistant_reply) = (await gpt.acreate_chat_completion(model=model, messages=current_context, temperature=CFG.temperature, max_tokens=tokens_remaining, on_single_request_timeout=on_single_chat_timeout, functions=moduleRegistry.to_json_schema()))
if (reply_type == 'content'):
self.append_history_message('user', user_input)
self.append_history_message('assistant', assistant_reply)
pass
elif (reply_type == 'function_call'):
self.append_history_message('user', user_input)
self.append_history_message_raw({'role': 'assistant', 'function_call': assistant_reply, 'content': None})
pass
else:
assert False, 'Unexpected reply type'
return (reply_type, assistant_reply)
except RateLimitError:
print('Error: ', 'API Rate Limit Reached. Waiting 10 seconds...')
(await asyncio.sleep(10))
async def _generate_context(self, prompt, model):
timestamp = ((time.time() + time.timezone) + (self._caller_context.get_tz_offset() * 3600))
time_str = time.strftime('%c', time.localtime(timestamp))
current_context = [{'role': 'system', 'content': prompt}, {'role': 'system', 'content': f'The current time and date is {time_str}'}]
next_message_to_add_index = (len(self._full_message_history) - 1)
insertion_index = len(current_context)
current_tokens_used = (await token_counter.count_message_tokens(current_context, model))
return (next_message_to_add_index, current_tokens_used, insertion_index, current_context)
async def _get_history_message_tokens(self, index, model: str='gpt-3.5-turbo-0301') -> int:
if (self._message_tokens[index] == (- 1)):
self._message_tokens[index] = (await token_counter.count_message_tokens([self._full_message_history[index]], model))
return self._message_tokens[index] |
.parametrize('filename, expected_format', (list(files_formats.items()) + [(pathlib.Path('README.md'), 'unknown')]))
def test_detect_fformat_suffix_only(testpath, filename, expected_format):
xtgeo_file = xtgeo._XTGeoFile((testpath / filename))
assert (xtgeo_file.detect_fformat(suffixonly=True) == expected_format) |
def filter_firewall_region_data(json):
option_list = ['city', 'id', 'name']
json = remove_invalid_fields(json)
dictionary = {}
for attribute in option_list:
if ((attribute in json) and (json[attribute] is not None)):
dictionary[attribute] = json[attribute]
return dictionary |
class OptionPlotoptionsArcdiagramSonificationDefaultspeechoptionsActivewhen(Options):
def crossingDown(self):
return self._config_get(None)
def crossingDown(self, num: float):
self._config(num, js_type=False)
def crossingUp(self):
return self._config_get(None)
def crossingUp(self, num: float):
self._config(num, js_type=False)
def max(self):
return self._config_get(None)
def max(self, num: float):
self._config(num, js_type=False)
def min(self):
return self._config_get(None)
def min(self, num: float):
self._config(num, js_type=False)
def prop(self):
return self._config_get(None)
def prop(self, text: str):
self._config(text, js_type=False) |
class PublishedAwardFinancial():
def __init__(self, submission_attributes, db_cursor, chunk_size):
self.submission_attributes = submission_attributes
self.db_cursor = db_cursor
self.chunk_size = chunk_size
_property
def count(self):
sql = f'select count(*) {self.get_from_where(self.submission_attributes.submission_id)}'
self.db_cursor.execute(sql)
return self.db_cursor.fetchall()[0][0]
def account_nums(self):
sql = f'''
select distinct c.account_num
{self.get_from_where(self.submission_attributes.submission_id)}
and c.account_num is not null
'''
self.db_cursor.execute(sql)
return dictfetchall(self.db_cursor)
def get_from_where(submission_id):
return f'''
from published_award_financial c
inner join submission s on s.submission_id = c.submission_id
where s.submission_id = {submission_id} and
(
COALESCE(c.transaction_obligated_amou, 0) != 0
or COALESCE(c.gross_outlay_amount_by_awa_cpe, 0) != 0
or COALESCE(c.ussgl487200_downward_adjus_cpe, 0) != 0
or COALESCE(c.ussgl497200_downward_adjus_cpe, 0) != 0
)
'''
def __iter__(self):
return PublishedAwardFinancialIterator(self.submission_attributes, self.chunk_size) |
def test_should_guess_all_matching_statements():
input_policy = PolicyDocument(Version='2012-10-17', Statement=[Statement(Effect='Allow', Action=[Action('autoscaling', 'DescribeLaunchConfigurations')], Resource=['*']), Statement(Effect='Allow', Action=[Action('sts', 'AssumeRole')], Resource=['arn:aws:iam:::role/someRole'])])
expected_output = PolicyDocument(Version='2012-10-17', Statement=[Statement(Effect='Allow', Action=[Action('autoscaling', 'DescribeLaunchConfigurations')], Resource=['*']), Statement(Effect='Allow', Action=[Action('autoscaling', 'CreateLaunchConfiguration'), Action('autoscaling', 'DeleteLaunchConfiguration')], Resource=['*']), Statement(Effect='Allow', Action=[Action('sts', 'AssumeRole')], Resource=['arn:aws:iam:::role/someRole'])])
runner = CliRunner()
result = runner.invoke(cli.root_group, args=['guess'], input=input_policy.to_json())
assert (result.exit_code == 0)
assert (parse_policy_document(result.output) == expected_output) |
def StockCutter1D(child_rolls, parent_rolls, output_json=True, large_model=True):
parent_width = parent_rolls[0][1]
if (not checkWidths(demands=child_rolls, parent_width=parent_width)):
return []
print('child_rolls', child_rolls)
print('parent_rolls', parent_rolls)
if (not large_model):
print('Running Small Model...')
(status, numRollsUsed, consumed_big_rolls, unused_roll_widths, wall_time) = solve_model(demands=child_rolls, parent_width=parent_width)
print('consumed_big_rolls before adjustment: ', consumed_big_rolls)
new_consumed_big_rolls = []
for big_roll in consumed_big_rolls:
if (len(big_roll) < 2):
consumed_big_rolls.remove(big_roll)
continue
unused_width = big_roll[0]
subrolls = []
for subitem in big_roll[1:]:
if isinstance(subitem, list):
subrolls = (subrolls + subitem)
else:
subrolls.append(subitem)
new_consumed_big_rolls.append([unused_width, subrolls])
print('consumed_big_rolls after adjustment: ', new_consumed_big_rolls)
consumed_big_rolls = new_consumed_big_rolls
else:
print('Running Large Model...')
(status, A, y, consumed_big_rolls) = solve_large_model(demands=child_rolls, parent_width=parent_width)
numRollsUsed = len(consumed_big_rolls)
STATUS_NAME = ['OPTIMAL', 'FEASIBLE', 'INFEASIBLE', 'UNBOUNDED', 'ABNORMAL', 'NOT_SOLVED']
output = {'statusName': STATUS_NAME[status], 'numSolutions': '1', 'numUniqueSolutions': '1', 'numRollsUsed': numRollsUsed, 'solutions': consumed_big_rolls}
print('numRollsUsed', numRollsUsed)
print('Status:', output['statusName'])
print('Solutions found :', output['numSolutions'])
print('Unique solutions: ', output['numUniqueSolutions'])
if output_json:
return json.dumps(output)
else:
return consumed_big_rolls |
def test_form_and_submission_deletion(client, msend):
r = client.post('/register', data={'email': '', 'password': 'friend'})
assert (r.status_code == 302)
assert (1 == User.query.count())
user = User.query.filter_by(email='').first()
user.plan = Plan.gold
DB.session.add(user)
DB.session.commit()
r = client.post('/api-int/forms', headers={'Accept': 'application/json', 'Content-type': 'application/json', 'Referer': settings.SERVICE_URL}, data=json.dumps({'email': ''}))
resp = json.loads(r.data.decode('utf-8'))
assert (r.status_code == 200)
assert ('submission_url' in resp)
assert ('hashid' in resp)
form_endpoint = resp['hashid']
assert (resp['hashid'] in resp['submission_url'])
assert (1 == Form.query.count())
assert (Form.query.first().id == Form.get_with_hashid(resp['hashid']).id)
r = client.post(('/' + form_endpoint), headers={'Referer': 'formspree.io'}, data={'name': 'bruce'})
form = Form.query.first()
client.get(('/confirm/%s:%s' % (HASH(form.email, str(form.id)), form.hashid)))
assert Form.query.first().confirmed
assert (0 == Submission.query.count())
old_submission_limit = settings.ARCHIVED_SUBMISSIONS_LIMIT
settings.ARCHIVED_SUBMISSIONS_LIMIT = 10
for i in range(5):
r = client.post(('/' + form_endpoint), headers={'Referer': 'formspree.io'}, data={'name': 'ana', 'submission': ('__%s__' % i)})
assert (5 == Submission.query.count())
first_submission = Submission.query.first()
r = client.delete(((('/api-int/forms/' + form_endpoint) + '/submissions/') + str(first_submission.id)), headers={'Referer': settings.SERVICE_URL})
assert (200 == r.status_code)
assert (4 == Submission.query.count())
assert (DB.session.query(Submission.id).filter_by(id='0').scalar() is None)
client.get('/logout')
r = client.delete(('/api-int/forms/' + form_endpoint), headers={'Referer': settings.SERVICE_URL})
assert (401 == r.status_code)
assert (1 == Form.query.count())
r = client.post('/register', data={'email': '', 'password': 'america'})
r = client.delete(('/api-int/forms/' + form_endpoint), headers={'Referer': settings.SERVICE_URL})
assert (401 == r.status_code)
assert (1 == Form.query.count())
client.get('/logout')
r = client.post('/login', data={'email': '', 'password': 'friend'})
r = client.delete(('/api-int/forms/' + form_endpoint), headers={'Referer': settings.SERVICE_URL})
assert (200 == r.status_code)
assert (0 == Form.query.count())
settings.ARCHIVED_SUBMISSIONS_LIMIT = old_submission_limit |
class OptionPlotoptionsStreamgraphZones(Options):
def className(self):
return self._config_get(None)
def className(self, text: str):
self._config(text, js_type=False)
def color(self):
return self._config_get(None)
def color(self, text: str):
self._config(text, js_type=False)
def dashStyle(self):
return self._config_get(None)
def dashStyle(self, text: str):
self._config(text, js_type=False)
def fillColor(self):
return self._config_get(None)
def fillColor(self, text: str):
self._config(text, js_type=False) |
def push_wire_types_data(uclass):
if (uclass.virtual or (not uclass.has_type_members)):
return None
type_members_by_version = {}
for (version, ofclass) in sorted(uclass.version_classes.items()):
pwtms = []
for m in ofclass.members:
if isinstance(m, ir.OFTypeMember):
if ((m.name == 'version') and (m.value == version.wire_version)):
pwtms.append(PushWireTypesMember(m.name, m.offset, m.length, 'obj->version'))
else:
pwtms.append(PushWireTypesMember(m.name, m.offset, m.length, hex(m.value)))
type_members_by_version[version] = pwtms
all_versions = sorted(type_members_by_version.keys())
versioned_type_members = []
for (pwtms, versions) in groupby(all_versions, type_members_by_version.get):
versioned_type_members.append((pwtms, list(versions)))
return PushWireTypesData(class_name=uclass.name, versioned_type_members=versioned_type_members) |
class Session():
session_id: int
_active: bool
_stream_id_to_stream_desc: Dict
_stream_id_to_batch_num: DefaultDict
def __init__(self, session_id: int, enums: object):
self.session_id = session_id
self.enums = enums
self._active = True
self._stream_id_to_batch_num = defaultdict(int)
self._stream_id_to_stream_desc = {}
async def send_samples(self, samples: List, stream_name: str) -> bool:
send_samples_success = False
if self._active:
stream_descs = self.get_streams(stream_name)
send_samples_success = True
for stream_desc in stream_descs:
if (samples is not None):
message = {'stream_batch': {'stream_id': stream_desc.stream_id, stream_name: {'samples': samples, 'batch_num': self.increment_and_get_batch_num(stream_desc.stream_id)}}}
message = json.dumps(message)
write_success = (await self.write(data=message))
if (not write_success):
self.remove_stream(stream_desc)
send_samples_success = False
return send_samples_success
async def write(self, data: str) -> bool:
raise NotImplementedError()
def get_streams(self, stream_name: str) -> List[ApiStreamDesc]:
matching_stream_descs = []
for _stream_desc in self._stream_id_to_stream_desc.values():
if (_stream_desc.stream_name == stream_name):
matching_stream_descs.append(_stream_desc)
return matching_stream_descs
def increment_and_get_batch_num(self, stream_id: str) -> int:
self._stream_id_to_batch_num[stream_id] += 1
return self._stream_id_to_batch_num[stream_id]
def is_active(self) -> bool:
return self._active
def is_stream_name_active(self, stream_name: str) -> bool:
for _stream_desc in self._stream_id_to_stream_desc.values():
if (_stream_desc.stream_name == stream_name):
return True
return False
def get_stream_name(self, stream_name: str) -> ApiStreamDesc:
for (_stream_id, _stream_desc) in self._stream_id_to_stream_desc.items():
if (_stream_desc.stream_name == stream_name):
return _stream_desc
return None
def add_stream(self, stream_desc: ApiStreamDesc) -> None:
stream_id = stream_desc.stream_id
self._stream_id_to_stream_desc[stream_id] = stream_desc
def remove_stream(self, stream_desc: ApiStreamDesc) -> bool:
stream_id = stream_desc.stream_id
if (stream_id in self._stream_id_to_stream_desc.keys()):
del self._stream_id_to_stream_desc[stream_id]
del self._stream_id_to_batch_num[stream_id]
return True
return False |
class VisualizationEvaluator(DatasetEvaluator):
_counter = 0
def __init__(self, cfg, tbx_writer, dataset_mapper, dataset_name, train_iter=None, tag_postfix=None, visualizer: Optional[Type[VisualizerWrapper]]=None):
self.tbx_writer = tbx_writer
self.dataset_mapper = dataset_mapper
self.dataset_name = dataset_name
self._visualizer = (visualizer(cfg) if visualizer else VisualizerWrapper(cfg))
self.train_iter = (train_iter or VisualizationEvaluator._counter)
self.tag_postfix = (tag_postfix or '')
self.log_limit = max(cfg.TENSORBOARD.TEST_VIS_MAX_IMAGES, 0)
self.log_frequency = cfg.TENSORBOARD.TEST_VIS_WRITE_PERIOD
self._metadata = None
self._dataset_dict = None
self._file_name_to_dataset_dict = None
if (self.log_limit > 0):
self._initialize_dataset_dict(dataset_name)
VisualizationEvaluator._counter += 1
self.reset()
def _initialize_dataset_dict(self, dataset_name: str) -> None:
self._metadata = MetadataCatalog.get(dataset_name)
self._dataset_dict = DatasetCatalog.get(dataset_name)
self._file_name_to_dataset_dict = {dic['file_name']: dic for dic in self._dataset_dict}
def reset(self):
self._iter = 0
self._log_remaining = self.log_limit
def process(self, inputs, outputs):
if ((self.log_frequency == 0) or ((self._iter % self.log_frequency) != 0) or (self._log_remaining <= 0)):
self._iter += 1
return
for (input, output) in zip(inputs, outputs):
file_name = input['file_name']
dataset_dict = self._file_name_to_dataset_dict[file_name]
gt_img = self._visualizer.visualize_dataset_dict(self.dataset_name, self.dataset_mapper, dataset_dict)
pred_img = self._visualizer.visualize_test_output(self.dataset_name, self.dataset_mapper, input, output)
tag_base = f'{self.dataset_name}{self.tag_postfix}/eval_iter_{self._iter}/{file_name}'
self.tbx_writer._writer.add_image(f'{tag_base}/GT', gt_img, self.train_iter, dataformats='HWC')
if (not isinstance(pred_img, dict)):
pred_img = {'Pred': pred_img}
for img_type in pred_img.keys():
self.tbx_writer._writer.add_image(f'{tag_base}/{img_type}', pred_img[img_type], self.train_iter, dataformats='HWC')
self._log_remaining -= 1
self._iter += 1
def has_finished_process(self):
return True |
class OptionSeriesLineSonificationTracksMappingVolume(Options):
def mapFunction(self):
return self._config_get(None)
def mapFunction(self, value: Any):
self._config(value, js_type=False)
def mapTo(self):
return self._config_get(None)
def mapTo(self, text: str):
self._config(text, js_type=False)
def max(self):
return self._config_get(None)
def max(self, num: float):
self._config(num, js_type=False)
def min(self):
return self._config_get(None)
def min(self, num: float):
self._config(num, js_type=False)
def within(self):
return self._config_get(None)
def within(self, value: Any):
self._config(value, js_type=False) |
def cmd_error(obj, fun_name, exc):
if ((exc is None) or (not isinstance(exc, CmdError))):
exc = CmdError(Cmd.ERR_UNKNOWN, CmdError.exception_to_string(exc, debug=obj.verbose))
response = exc.cmd_output()
try:
obj.cleanup(fun_name, response)
except Exception as e:
obj.log(('Cleanup failed! (%s)' % CmdError.exception_to_string(e, debug=obj.verbose)))
return response |
.parametrize('rng_seed', np.arange(0, 15))
def test_graphene(rng_seed, log_capture):
rng = default_rng(rng_seed)
gamma_min = GRAPHENE_GAMMA_MIN
gamma_max = GRAPHENE_GAMMA_MAX
mu_min = GRAPHENE_MU_C_MIN
mu_max = GRAPHENE_MU_C_MAX
temp_min = GRAPHENE_TEMP_MIN
temp_max = GRAPHENE_TEMP_MAX
freqs = np.linspace(1, GRAPHENE_FIT_FREQ_MAX, GRAPHENE_FIT_NUM_FREQS)
gamma = (gamma_min + ((gamma_max - gamma_min) * rng.random()))
mu_c = (mu_min + ((mu_max - mu_min) * rng.random()))
temp = (temp_min + ((temp_max - temp_min) * rng.random()))
print(f"Graphene(gamma='{gamma:.6f}', mu_c='{mu_c:.2f}', temp='{temp:.0f}')")
graphene = Graphene(gamma=gamma, mu_c=mu_c, temp=temp)
sigma1 = graphene.medium.sigma_model(freqs)
sigma2 = graphene.numerical_conductivity(freqs)
assert np.allclose(sigma1, sigma2, rtol=0, atol=GRAPHENE_FIT_ATOL)
graphene = Graphene(gamma=gamma, mu_c=mu_c, temp=temp, include_interband=False)
sigma1 = graphene.medium.sigma_model(freqs)
sigma2 = graphene.intraband_drude.sigma_model(freqs)
assert np.allclose(sigma1, sigma2, rtol=0, atol=GRAPHENE_FIT_ATOL) |
def test_basic_forwarding3(golden):
def filter1D(ow: size, kw: size, x: f32[((ow + kw) - 1)], y: f32[ow], w: f32[kw]):
for o in seq(0, ow):
sum: f32
sum = 0.0
for k in seq(0, kw):
sum += (x[(o + k)] * w[k])
y[o] = sum
filter1D = divide_loop(filter1D, 'o', 4, ['outXo', 'outXi'], tail='cut_and_guard')
sum_c = filter1D.find('sum:_')
filter1D = expand_dim(filter1D, sum_c, '4', 'outXi')
filter1D = lift_alloc(filter1D, filter1D.forward(sum_c))
assert (str(filter1D.forward(sum_c)) == golden) |
class OptionZaxisTitle(Options):
def align(self):
return self._config_get('middle')
def align(self, text: str):
self._config(text, js_type=False)
def enabled(self):
return self._config_get(None)
def enabled(self, flag: bool):
self._config(flag, js_type=False)
def margin(self):
return self._config_get(None)
def margin(self, num: float):
self._config(num, js_type=False)
def offset(self):
return self._config_get(None)
def offset(self, num: float):
self._config(num, js_type=False)
def position3d(self):
return self._config_get(None)
.setter
def position3d(self, value: Any):
self._config(value, js_type=False)
def reserveSpace(self):
return self._config_get(True)
def reserveSpace(self, flag: bool):
self._config(flag, js_type=False)
def rotation(self):
return self._config_get(0)
def rotation(self, num: float):
self._config(num, js_type=False)
def skew3d(self):
return self._config_get(None)
.setter
def skew3d(self, flag: bool):
self._config(flag, js_type=False)
def style(self) -> 'OptionZaxisTitleStyle':
return self._config_sub_data('style', OptionZaxisTitleStyle)
def text(self):
return self._config_get(None)
def text(self, text: str):
self._config(text, js_type=False)
def textAlign(self):
return self._config_get(None)
def textAlign(self, text: str):
self._config(text, js_type=False)
def useHTML(self):
return self._config_get(False)
def useHTML(self, flag: bool):
self._config(flag, js_type=False)
def x(self):
return self._config_get(0)
def x(self, num: float):
self._config(num, js_type=False)
def y(self):
return self._config_get(0)
def y(self, num: float):
self._config(num, js_type=False) |
def export_registry(plugin, args, opts_parser, registry):
cmds = registry.get_all_commands()
commands = []
for cmd in cmds:
if cmd.built_in:
continue
inspection = cmd.metadata
if isinstance(inspection, FunctionInspection):
commands.append(_fn_to_dict(inspection))
else:
logger.warning('Command %s is not instance of FunctionInspection', cmd)
model = {'commands': commands, 'options': _dump_opts_parser_common(opts_parser, plugin)}
return json.dumps(model) |
def get_data_after_challenge() -> list[dict[(str, int)]]:
data: list[dict[(str, int)]] = []
val_22 = next_int_len(4)
data.append(val_22)
gv_69 = next_int_len(4)
data.append(gv_69)
val_54 = next_int_len(4)
data.append(val_54)
val_118 = next_int_len(4)
data.append(val_118)
for _ in range(val_54['Value']):
val_15 = next_int_len(1)
data.append(val_15)
val_118 = next_int_len(4)
data.append(val_118)
val_54 = next_int_len(4)
data.append(val_54)
for _ in range(val_118['Value']):
val_65 = next_int_len(8)
data.append(val_65)
val_54 = next_int_len(4)
data.append(val_54)
return data |
def test_custom_form_base():
(app, db, admin) = setup()
class TestForm(form.BaseForm):
pass
(Model1, _) = create_models(db)
view = CustomModelView(Model1, form_base_class=TestForm)
admin.add_view(view)
assert hasattr(view._create_form_class, 'test1')
create_form = view.create_form()
assert isinstance(create_form, TestForm) |
def map_vae(pt_module, device='cuda', dtype='float16'):
if (not isinstance(pt_module, dict)):
pt_params = dict(pt_module.named_parameters())
else:
pt_params = pt_module
params_ait = {}
for (key, arr) in pt_params.items():
if key.startswith('encoder'):
continue
if key.startswith('quant'):
continue
arr = arr.to(device, dtype=torch_dtype_from_str(dtype))
key = key.replace('.', '_')
if (('conv' in key) and ('norm' not in key) and key.endswith('_weight') and (len(arr.shape) == 4)):
params_ait[key] = torch.permute(arr, [0, 2, 3, 1]).contiguous()
elif key.endswith('proj_attn_weight'):
prefix = key[:(- len('proj_attn_weight'))]
key = (prefix + 'attention_proj_weight')
params_ait[key] = arr
elif key.endswith('to_out_0_weight'):
prefix = key[:(- len('to_out_0_weight'))]
key = (prefix + 'attention_proj_weight')
params_ait[key] = arr
elif key.endswith('proj_attn_bias'):
prefix = key[:(- len('proj_attn_bias'))]
key = (prefix + 'attention_proj_bias')
params_ait[key] = arr
elif key.endswith('to_out_0_bias'):
prefix = key[:(- len('to_out_0_bias'))]
key = (prefix + 'attention_proj_bias')
params_ait[key] = arr
elif key.endswith('query_weight'):
prefix = key[:(- len('query_weight'))]
key = (prefix + 'attention_proj_q_weight')
params_ait[key] = arr
elif key.endswith('to_q_weight'):
prefix = key[:(- len('to_q_weight'))]
key = (prefix + 'attention_proj_q_weight')
params_ait[key] = arr
elif key.endswith('query_bias'):
prefix = key[:(- len('query_bias'))]
key = (prefix + 'attention_proj_q_bias')
params_ait[key] = arr
elif key.endswith('to_q_bias'):
prefix = key[:(- len('to_q_bias'))]
key = (prefix + 'attention_proj_q_bias')
params_ait[key] = arr
elif key.endswith('key_weight'):
prefix = key[:(- len('key_weight'))]
key = (prefix + 'attention_proj_k_weight')
params_ait[key] = arr
elif key.endswith('key_bias'):
prefix = key[:(- len('key_bias'))]
key = (prefix + 'attention_proj_k_bias')
params_ait[key] = arr
elif key.endswith('value_weight'):
prefix = key[:(- len('value_weight'))]
key = (prefix + 'attention_proj_v_weight')
params_ait[key] = arr
elif key.endswith('value_bias'):
prefix = key[:(- len('value_bias'))]
key = (prefix + 'attention_proj_v_bias')
params_ait[key] = arr
elif key.endswith('to_k_weight'):
prefix = key[:(- len('to_k_weight'))]
key = (prefix + 'attention_proj_k_weight')
params_ait[key] = arr
elif key.endswith('to_v_weight'):
prefix = key[:(- len('to_v_weight'))]
key = (prefix + 'attention_proj_v_weight')
params_ait[key] = arr
elif key.endswith('to_k_bias'):
prefix = key[:(- len('to_k_bias'))]
key = (prefix + 'attention_proj_k_bias')
params_ait[key] = arr
elif key.endswith('to_v_bias'):
prefix = key[:(- len('to_v_bias'))]
key = (prefix + 'attention_proj_v_bias')
params_ait[key] = arr
else:
params_ait[key] = arr
return params_ait |
def test_GlyphSet_writeGlyph_formatVersion(tmp_path):
src = GlyphSet(GLYPHSETDIR)
dst = GlyphSet(tmp_path, ufoFormatVersion=(2, 0))
glyph = src['A']
dst.writeGlyph('A', glyph)
glif = dst.getGLIF('A')
assert (b'format="1"' in glif)
assert (b'formatMinor' not in glif)
with pytest.raises(UnsupportedGLIFFormat):
dst.writeGlyph('A', glyph, formatVersion=(0, 0))
with pytest.raises(UnsupportedGLIFFormat, match='Unsupported GLIF format version .*for UFO format version'):
dst.writeGlyph('A', glyph, formatVersion=(2, 0)) |
class LoggingAddressAndPort(ModelNormal):
allowed_values = {}
validations = {}
_property
def additional_properties_type():
return (bool, date, datetime, dict, float, int, list, str, none_type)
_nullable = False
_property
def openapi_types():
return {'address': (str,), 'port': (int,)}
_property
def discriminator():
return None
attribute_map = {'address': 'address', 'port': 'port'}
read_only_vars = {}
_composed_schemas = {}
_js_args_to_python_args
def _from_openapi_data(cls, *args, **kwargs):
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
self = super(OpenApiModel, cls).__new__(cls)
if args:
raise ApiTypeError(('Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments.' % (args, self.__class__.__name__)), path_to_item=_path_to_item, valid_classes=(self.__class__,))
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = (_visited_composed_classes + (self.__class__,))
for (var_name, var_value) in kwargs.items():
if ((var_name not in self.attribute_map) and (self._configuration is not None) and self._configuration.discard_unknown_keys and (self.additional_properties_type is None)):
continue
setattr(self, var_name, var_value)
return self
required_properties = set(['_data_store', '_check_type', '_spec_property_naming', '_path_to_item', '_configuration', '_visited_composed_classes'])
_js_args_to_python_args
def __init__(self, *args, **kwargs):
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
raise ApiTypeError(('Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments.' % (args, self.__class__.__name__)), path_to_item=_path_to_item, valid_classes=(self.__class__,))
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = (_visited_composed_classes + (self.__class__,))
for (var_name, var_value) in kwargs.items():
if ((var_name not in self.attribute_map) and (self._configuration is not None) and self._configuration.discard_unknown_keys and (self.additional_properties_type is None)):
continue
setattr(self, var_name, var_value)
if (var_name in self.read_only_vars):
raise ApiAttributeError(f'`{var_name}` is a read-only attribute. Use `from_openapi_data` to instantiate class with read only attributes.') |
class PlayerWidget(QFrame):
time_mode_remain_changed_signal = pyqtSignal(bool)
def __init__(self, player_number, parent):
super().__init__(parent)
self.setObjectName('PlayerFrame')
self.setProperty('on_air', False)
self.setStyleSheet('#PlayerFrame { border: 3px solid white; } #PlayerFrame[on_air=true] { border: 3px solid red; }')
self.labels = {}
self.browse_dialog = None
self.time_mode_remain = False
self.show_color_waveform = parent.show_color_waveform
self.show_color_preview = parent.show_color_preview
self.parent_gui = parent
self.labels['title'] = QLabel(self)
self.labels['title'].setStyleSheet('QLabel { color: white; font: bold 16pt; }')
self.labels['title'].setSizePolicy(QSizePolicy.Ignored, QSizePolicy.Fixed)
self.labels['artist'] = QLabel(self)
self.labels['artist'].setSizePolicy(QSizePolicy.Ignored, QSizePolicy.Fixed)
self.labels['artist'].setStyleSheet('QLabel { color: white; }')
self.labels['album'] = QLabel(self)
self.labels['album'].setSizePolicy(QSizePolicy.Ignored, QSizePolicy.Fixed)
self.labels['album'].setStyleSheet('QLabel { color: white; }')
self.labels['info'] = QLabel(self)
self.labels['info'].setSizePolicy(QSizePolicy.Ignored, QSizePolicy.Fixed)
self.labels['info'].setStyleSheet('QLabel { color: white; }')
self.labels['player_number'] = QLabel(self)
self.labels['player_number'].setStyleSheet('QLabel { font: bold 12pt; qproperty-alignment: AlignCenter; background-color: white; color: black; }')
self.setPlayerNumber(player_number)
self.labels['artwork'] = QLabel(self)
self.pixmap_empty = QPixmap(80, 80)
self.pixmap_empty.fill(QColor(40, 40, 40))
self.labels['artwork'].setPixmap(self.pixmap_empty)
self.menu_button = QPushButton('MENU', self)
self.menu_button.setFlat(True)
self.menu_button.setStyleSheet('QPushButton { color: white; font: 10px; background-color: black; padding: 1px; border-style: outset; border-radius: 2px; border-width: 1px; border-color: gray; }')
self.menu = QMenu(self.menu_button)
action_browse = self.menu.addAction('Browse Media')
action_browse.triggered.connect(self.openBrowseDialog)
action_download = self.menu.addAction('Download track')
action_download.triggered.connect(self.downloadTrack)
action_start = self.menu.addAction('Start playback')
action_start.triggered.connect(self.playbackStart)
action_stop = self.menu.addAction('Stop playback')
action_stop.triggered.connect(self.playbackStop)
self.menu_button.setMenu(self.menu)
self.labels['play_state'] = QLabel(self)
self.labels['play_state'].setSizePolicy(QSizePolicy.Ignored, QSizePolicy.Fixed)
buttons_layout = QHBoxLayout()
buttons_layout.addWidget(self.menu_button)
buttons_layout.addWidget(self.labels['play_state'])
buttons_layout.setStretch(1, 1)
buttons_layout.setSpacing(3)
self.elapsed_label = ClickableLabel('ELAPSED', self)
self.elapsed_label.setStyleSheet('QLabel { color: white; } QLabel:disabled { color: gray; }')
self.remain_label = ClickableLabel('REMAIN', self)
self.remain_label.setStyleSheet('QLabel { color: white; } QLabel:disabled { color: gray; }')
self.remain_label.setEnabled(False)
self.time = ClickableLabel(self)
self.time.setStyleSheet('QLabel { color: white; font: 32px; qproperty-alignment: AlignRight; }')
self.time.setMaximumHeight(32)
self.total_time_label = QLabel('TOTAL', self)
self.total_time = QLabel(self)
self.total_time.setStyleSheet('QLabel { color: white; font: 32px; qproperty-alignment: AlignRight; }')
self.total_time.setMaximumHeight(32)
self.beat_bar = BeatBarWidget(self)
time_layout = QGridLayout()
time_layout.addWidget(self.elapsed_label, 0, 0)
time_layout.addWidget(self.remain_label, 1, 0)
time_layout.addWidget(self.time, 0, 1, 2, 1)
time_layout.addWidget(self.total_time_label, 2, 0)
time_layout.addWidget(self.total_time, 2, 1, 2, 1)
time_layout.addWidget(self.beat_bar, 4, 0, 1, 2)
time_layout.addLayout(buttons_layout, 5, 0, 1, 2)
time_layout.setHorizontalSpacing(0)
self.elapsed_label.clicked.connect(self.toggleTimeMode)
self.remain_label.clicked.connect(self.toggleTimeMode)
self.time.clicked.connect(self.toggleTimeMode)
self.time_mode_remain_changed_signal.connect(self.setTimeMode)
self.waveform = GLWaveformWidget(self)
self.waveform.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding)
self.preview_waveform = PreviewWaveformWidget(self)
qsp = QSizePolicy(QSizePolicy.Preferred, QSizePolicy.Minimum)
qsp.setHeightForWidth(True)
self.preview_waveform.setSizePolicy(qsp)
bpm_label = QLabel('BPM', self)
bpm_label.setContentsMargins(4, 0, 4, 0)
bpm_label.setStyleSheet('QLabel { color: white; font: bold 8pt; qproperty-alignment: AlignLeft; }')
self.labels['slot'] = QLabel('', self)
self.labels['slot'].setContentsMargins(4, 0, 4, 0)
self.labels['slot'].setStyleSheet('QLabel { color: white; font: bold 8pt; qproperty-alignment: AlignLeft; }')
self.labels['bpm'] = QLabel(self)
self.labels['bpm'].setContentsMargins(4, 0, 4, 0)
self.labels['bpm'].setStyleSheet('QLabel { color: white; font: bold 16pt; qproperty-alignment: AlignRight; }')
self.labels['pitch'] = QLabel('+80.00%', self)
self.labels['pitch'].setContentsMargins(4, 0, 4, 0)
self.labels['pitch'].setStyleSheet('QLabel { color: white; font: bold 14pt; qproperty-alignment: AlignRight; }')
self.labels['pitch'].show()
self.labels['pitch'].setMinimumSize(self.labels['pitch'].size())
self.labels['master'] = QLabel('MASTER', self)
self.labels['master'].setStyleSheet('QLabel { font: bold; qproperty-alignment: AlignCenter; background-color: green; color: black; } QLabel:disabled { background-color: gray; }')
self.labels['sync'] = QLabel('SYNC', self)
self.labels['sync'].setStyleSheet('QLabel { font: bold; qproperty-alignment: AlignCenter; background-color: blue; color: black; } QLabel:disabled { background-color: gray; }')
speed_top_layout = QHBoxLayout()
speed_top_layout.addWidget(bpm_label)
speed_top_layout.addWidget(self.labels['slot'])
speed_top_layout.setSpacing(1)
bpm_box = QFrame(self)
bpm_box.setFrameStyle((QFrame.Box | QFrame.Plain))
speed_layout = QVBoxLayout(bpm_box)
speed_layout.addLayout(speed_top_layout)
speed_layout.addWidget(self.labels['bpm'])
speed_layout.addWidget(self.labels['pitch'])
speed_layout.addWidget(self.labels['master'])
speed_layout.addWidget(self.labels['sync'])
speed_layout.setSpacing(0)
speed_layout.setContentsMargins(0, 0, 0, 0)
layout = QGridLayout(self)
layout.addWidget(self.labels['player_number'], 0, 0)
layout.addWidget(self.labels['artwork'], 1, 0, 3, 1)
layout.addWidget(self.labels['title'], 0, 1)
layout.addWidget(self.labels['artist'], 1, 1)
layout.addWidget(self.labels['album'], 2, 1)
layout.addWidget(self.labels['info'], 3, 1)
layout.addLayout(time_layout, 0, 2, 4, 1)
layout.addWidget(bpm_box, 0, 3, 4, 1)
layout.addWidget(self.waveform, 4, 0, 1, 4)
layout.addWidget(self.preview_waveform, 5, 0, 1, 4)
layout.setColumnStretch(1, 2)
self.reset()
def unload(self):
self.setMetadata('Not loaded', '', '')
self.setArtwork(None)
self.setTime(None)
self.setTotalTime(None)
self.beat_bar.setBeat(0)
self.waveform.clear()
self.preview_waveform.clear()
def reset(self):
self.unload()
self.labels['info'].setText('No player connected')
self.setSpeed('')
self.setMaster(False)
self.setSync(False)
self.track_id = 0
def setPlayerNumber(self, player_number):
self.player_number = player_number
self.labels['player_number'].setText('PLAYER {}'.format(self.player_number))
if (self.browse_dialog is not None):
self.browse_dialog.setPlayerNumber(player_number)
def setMaster(self, master):
self.labels['master'].setEnabled(master)
def setSync(self, sync):
self.labels['sync'].setEnabled(sync)
def setPlayerInfo(self, model, ip_addr, fw=''):
self.labels['info'].setText('{} {} {}'.format(model, fw, ip_addr))
def setSpeed(self, bpm, pitch=None):
if (pitch is None):
pitch = 1
self.labels['pitch'].setText('{:+.2f}%'.format(((pitch - 1) * 100)))
if isinstance(bpm, str):
self.labels['bpm'].setText('--.--')
else:
pitched_bpm = (bpm * pitch)
self.labels['bpm'].setText('{:.2f}'.format(pitched_bpm))
def setMetadata(self, title, artist, album):
self.labels['title'].setText(title)
self.labels['artist'].setText(artist)
self.labels['album'].setText(album)
def setArtwork(self, data):
if (data is None):
self.labels['artwork'].setPixmap(self.pixmap_empty)
else:
p = QPixmap()
p.loadFromData(data)
self.labels['artwork'].setPixmap(p)
def setTime(self, seconds, total=None):
if (seconds is not None):
if ((total is not None) and self.time_mode_remain):
seconds = ((total - seconds) if (total > seconds) else 0)
self.time.setText('{}{:02d}:{:02d}'.format(('' if (self.time_mode_remain == False) else '-'), int((seconds // 60)), (int(seconds) % 60)))
else:
self.time.setText('00:00')
def setTotalTime(self, seconds):
if (seconds is not None):
self.total_time.setText('{:02d}:{:02d}'.format(int((seconds // 60)), (int(seconds) % 60)))
else:
self.total_time.setText('00:00')
def setPlayState(self, state):
self.labels['play_state'].setText(printableField(state))
def setSlotInfo(self, player, slot):
self.labels['slot'].setText(f'{player} {slot.upper()}')
def toggleTimeMode(self):
self.time_mode_remain_changed_signal.emit((not self.time_mode_remain))
def setTimeMode(self, time_mode_remain):
self.time_mode_remain = time_mode_remain
self.elapsed_label.setEnabled((not self.time_mode_remain))
self.remain_label.setEnabled(self.time_mode_remain)
def openBrowseDialog(self):
if (self.browse_dialog is None):
self.browse_dialog = Browser(self.parent().prodj, self.player_number)
self.browse_dialog.show()
def downloadTrack(self):
logging.info('Player %d track download requested', self.player_number)
c = self.parent().prodj.cl.getClient(self.player_number)
if (c is None):
logging.error('Download failed, player %d unknown', self.player_number)
return
self.parent().prodj.data.get_mount_info(c.loaded_player_number, c.loaded_slot, c.track_id, self.parent().prodj.nfs.enqueue_download_from_mount_info)
def playbackStart(self):
self.parent_gui.prodj.vcdj.command_fader_start_single(self.player_number, start=True)
def playbackStop(self):
self.parent_gui.prodj.vcdj.command_fader_start_single(self.player_number, start=False)
def hideEvent(self, event):
if (self.browse_dialog is not None):
self.browse_dialog.close()
event.accept()
def setOnAir(self, on_air):
self.setProperty('on_air', on_air)
self.style().unpolish(self)
self.style().polish(self)
self.update() |
class TestSystem():
def test_system_valid(self) -> None:
assert System(description='Test Policy', egress=[DataFlow(fides_key='test_system_2', type='system', data_categories=[])], fides_key='test_system', ingress=[DataFlow(fides_key='test_system_3', type='system', data_categories=[])], meta={'some': 'meta stuff'}, name='Test System', organization_fides_key=1, cookies=[{'name': 'test_cookie'}], privacy_declarations=[PrivacyDeclaration(data_categories=[], data_subjects=[], data_use='provide', egress=['test_system_2'], ingress=['test_system_3'], name='declaration-name', cookies=[{'name': 'test_cookie', 'path': '/', 'domain': 'example.com'}])], system_type='SYSTEM', tags=['some', 'tags'])
def test_system_valid_nested_meta(self) -> None:
assert System(description='Test Policy', egress=[DataFlow(fides_key='test_system_2', type='system', data_categories=[])], fides_key='test_system', ingress=[DataFlow(fides_key='test_system_3', type='system', data_categories=[])], meta={'some': 'meta stuff', 'some': {'nested': 'meta stuff', 'more nested': 'meta stuff'}, 'some more': {'doubly': {'nested': 'meta stuff'}}}, name='Test System', organization_fides_key=1, privacy_declarations=[PrivacyDeclaration(data_categories=[], data_subjects=[], data_use='provide', egress=['test_system_2'], ingress=['test_system_3'], name='declaration-name')], system_type='SYSTEM', tags=['some', 'tags'])
def test_system_valid_no_meta(self) -> None:
system = System(description='Test Policy', egress=[DataFlow(fides_key='test_system_2', type='system', data_categories=[])], fides_key='test_system', ingress=[DataFlow(fides_key='test_system_3', type='system', data_categories=[])], name='Test System', organization_fides_key=1, privacy_declarations=[PrivacyDeclaration(data_categories=[], data_subjects=[], data_use='provide', egress=['test_system_2'], ingress=['test_system_3'], name='declaration-name')], system_type='SYSTEM', tags=['some', 'tags'])
assert (system.meta == None)
def test_system_valid_no_egress_or_ingress(self) -> None:
assert System(description='Test Policy', fides_key='test_system', meta={'some': 'meta stuff'}, name='Test System', organization_fides_key=1, privacy_declarations=[PrivacyDeclaration(data_categories=[], data_subjects=[], data_use='provide', name='declaration-name')], system_type='SYSTEM', tags=['some', 'tags'])
def test_system_no_egress(self) -> None:
with raises(ValueError):
assert System(description='Test Policy', fides_key='test_system', ingress=[DataFlow(fides_key='test_system_3', type='system', data_categories=[])], meta={'some': 'meta stuff'}, name='Test System', organization_fides_key=1, privacy_declarations=[PrivacyDeclaration(data_categories=[], data_subjects=[], data_use='provide', egress=['test_system_2'], ingress=['test_system_3'], name='declaration-name')], system_type='SYSTEM', tags=['some', 'tags'])
def test_system_no_ingress(self) -> None:
with raises(ValueError):
assert System(description='Test Policy', egress=[DataFlow(fides_key='test_system_2', type='system', data_categories=[])], fides_key='test_system', meta={'some': 'meta stuff'}, name='Test System', organization_fides_key=1, privacy_declarations=[PrivacyDeclaration(data_categories=[], data_subjects=[], data_use='provide', egress=['test_system_2'], ingress=['test_system_3'], name='declaration-name')], system_type='SYSTEM', tags=['some', 'tags'])
def test_system_user_ingress_valid(self) -> None:
assert System(description='Test Policy', fides_key='test_system', ingress=[DataFlow(fides_key='user', type='user', data_categories=[])], meta={'some': 'meta stuff'}, name='Test System', organization_fides_key=1, privacy_declarations=[PrivacyDeclaration(data_categories=[], data_subjects=[], data_use='provide', ingress=['user'], name='declaration-name')], system_type='SYSTEM', tags=['some', 'tags'])
def test_expanded_system(self):
assert System(fides_key='test_system', organization_fides_key=1, tags=['some', 'tags'], name='Exponential Interactive, Inc d/b/a VDX.tv', description='My system test', meta={'some': 'meta stuff'}, system_type='SYSTEM', egress=[DataFlow(fides_key='test_system_2', type='system', data_categories=[])], ingress=[DataFlow(fides_key='test_system_3', type='system', data_categories=[])], privacy_declarations=[PrivacyDeclaration(name='declaration-name', data_categories=['user.device.ip_address', 'user.device.cookie_id', 'user.device.device_id', 'user.id.pseudonymous', 'user.behavior.purchase_history', 'user.behavior', 'user.behavior.browsing_history', 'user.behavior.media_consumption', 'user.behavior.search_history', 'user.location.imprecise', 'user.demographic', 'user.privacy_preferences'], data_use='functional.storage', data_subjects=[], egress=['test_system_2'], ingress=['test_system_3'], features=['Match and combine data from other data sources', 'Link different devices', 'Receive and use automatically-sent device characteristics for identification'], legal_basis_for_processing='Legitimate interests', impact_assessment_location='www.example.com/impact_asessment_location', retention_period='3-5 years', processes_special_category_data=True, special_category_legal_basis='Reasons of substantial public interest (with a basis in law)', data_shared_with_third_parties=True, third_parties='advertising; marketing', shared_categories=[], flexible_legal_basis_for_processing=True, cookies=[{'name': 'ANON_ID', 'path': '/', 'domain': 'tribalfusion.com'}])], vendor_id='gvl.1', dataset_references=['test_fides_key_dataset'], processes_personal_data=True, exempt_from_privacy_regulations=False, reason_for_exemption=None, uses_profiling=True, legal_basis_for_profiling=['Explicit consent', 'Contract'], does_international_transfers=True, legal_basis_for_transfers=['Adequacy Decision', 'SCCs', 'New legal basis'], requires_data_protection_assessments=True, dpa_location='www.example.com/dpa_location', privacy_policy=' legal_name='Exponential Interactive, Inc d/b/a VDX.tv', legal_address='Exponential Interactive Spain S.L.;General Martinez Campos Num 41;Madrid;28010;Spain', administrating_department='Privacy Department', responsibility=[DataResponsibilityTitle.CONTROLLER], dpo='', data_security_practices=None, cookie_max_age_seconds='', uses_cookies=True, cookie_refresh=True, uses_non_cookie_access=True, legitimate_interest_disclosure_url=' previous_vendor_id='gacp.10', cookies=[{'name': 'COOKIE_ID_EXAMPLE', 'path': '/', 'domain': 'example.com/cookie'}])
def test_flexible_legal_basis_default(self):
pd = PrivacyDeclaration(data_categories=[], data_subjects=[], data_use='provide', ingress=['user'], name='declaration-name')
assert pd.flexible_legal_basis_for_processing |
class OptionSeriesScatter3dSonificationDefaultspeechoptionsMapping(Options):
def pitch(self) -> 'OptionSeriesScatter3dSonificationDefaultspeechoptionsMappingPitch':
return self._config_sub_data('pitch', OptionSeriesScatter3dSonificationDefaultspeechoptionsMappingPitch)
def playDelay(self) -> 'OptionSeriesScatter3dSonificationDefaultspeechoptionsMappingPlaydelay':
return self._config_sub_data('playDelay', OptionSeriesScatter3dSonificationDefaultspeechoptionsMappingPlaydelay)
def rate(self) -> 'OptionSeriesScatter3dSonificationDefaultspeechoptionsMappingRate':
return self._config_sub_data('rate', OptionSeriesScatter3dSonificationDefaultspeechoptionsMappingRate)
def text(self):
return self._config_get(None)
def text(self, text: str):
self._config(text, js_type=False)
def time(self) -> 'OptionSeriesScatter3dSonificationDefaultspeechoptionsMappingTime':
return self._config_sub_data('time', OptionSeriesScatter3dSonificationDefaultspeechoptionsMappingTime)
def volume(self) -> 'OptionSeriesScatter3dSonificationDefaultspeechoptionsMappingVolume':
return self._config_sub_data('volume', OptionSeriesScatter3dSonificationDefaultspeechoptionsMappingVolume) |
def test_point_in_tetrahedron():
vert = [0, 0, 0, 2, 1, 0, 0, 2, 0, 0, 0, 2]
assert (xcalc.point_in_tetrahedron(0, 0, 0, vert) is True)
assert (xcalc.point_in_tetrahedron((- 1), 0, 0, vert) is False)
assert (xcalc.point_in_tetrahedron(2, 1, 0, vert) is True)
assert (xcalc.point_in_tetrahedron(0, 2, 0, vert) is True)
assert (xcalc.point_in_tetrahedron(0, 0, 2, vert) is True) |
.skipif(sys.platform.startswith('darwin'), reason='Flaky bash mock on mac')
def test_ecl100_retries_once_on_license_failure(tmp_path, monkeypatch):
mock_eclipse_path = (tmp_path / 'mock_eclipse100')
with open((tmp_path / 'mock_config.yaml'), 'w', encoding='utf-8') as fp:
yaml.dump({'versions': {'2015.2': {'scalar': {'executable': str(mock_eclipse_path)}}}}, fp)
case_path = (tmp_path / 'CASE.DATA')
case_path.write_text('', encoding='utf-8')
mock_eclipse_path.write_text(dedent(" #!/usr/bin/bash\n echo 'Errors 1\n Bugs 0\n -- ERROR AT TIME 0.0 DAYS ( 1-JAN-2000):\n LICENSE FAILURE: ERROR NUMBER IS -33' > CASE.PRT\n echo 'Called mock' >> mock_log\n "), encoding='utf-8')
mock_eclipse_path.chmod(((stat.S_IEXEC | stat.S_IWUSR) | mock_eclipse_path.stat().st_mode))
monkeypatch.setenv('ECL100_SITE_CONFIG', str((tmp_path / 'mock_config.yaml')))
econfig = ecl_config.Ecl100Config()
sim = econfig.sim('2015.2')
erun = ecl_run.EclRun(str(case_path), sim)
erun.LICENSE_FAILURE_SLEEP_SECONDS = 1
with pytest.raises(RuntimeError, match='LICENSE FAILURE'):
erun.runEclipse()
max_attempts = 2
assert ((tmp_path / 'mock_log').read_text() == ('Called mock\n' * max_attempts)) |
def get_venv_status(rootdir):
fskind = _fs.check_file(rootdir)
if (not fskind):
return ('missing', None)
elif (fskind not in ('dir', 'dir symlink')):
return ('not-dir', fskind)
elif (not resolve_venv_file(rootdir, 'bin', 'python', checkexists='exe')):
return ('invalid', fskind)
else:
return ('valid', fskind) |
class OptionSeriesWindbarbDatalabels(Options):
def align(self):
return self._config_get('undefined')
def align(self, text: str):
self._config(text, js_type=False)
def allowOverlap(self):
return self._config_get(False)
def allowOverlap(self, flag: bool):
self._config(flag, js_type=False)
def animation(self) -> 'OptionSeriesWindbarbDatalabelsAnimation':
return self._config_sub_data('animation', OptionSeriesWindbarbDatalabelsAnimation)
def backgroundColor(self):
return self._config_get(None)
def backgroundColor(self, text: str):
self._config(text, js_type=False)
def borderColor(self):
return self._config_get(None)
def borderColor(self, text: str):
self._config(text, js_type=False)
def borderRadius(self):
return self._config_get(0)
def borderRadius(self, num: float):
self._config(num, js_type=False)
def borderWidth(self):
return self._config_get(0)
def borderWidth(self, num: float):
self._config(num, js_type=False)
def className(self):
return self._config_get(None)
def className(self, text: str):
self._config(text, js_type=False)
def color(self):
return self._config_get(None)
def color(self, text: str):
self._config(text, js_type=False)
def crop(self):
return self._config_get(True)
def crop(self, flag: bool):
self._config(flag, js_type=False)
def defer(self):
return self._config_get(True)
def defer(self, flag: bool):
self._config(flag, js_type=False)
def enabled(self):
return self._config_get(False)
def enabled(self, flag: bool):
self._config(flag, js_type=False)
def filter(self) -> 'OptionSeriesWindbarbDatalabelsFilter':
return self._config_sub_data('filter', OptionSeriesWindbarbDatalabelsFilter)
def format(self):
return self._config_get('point.value')
def format(self, text: str):
self._config(text, js_type=False)
def formatter(self):
return self._config_get(None)
def formatter(self, value: Any):
self._config(value, js_type=False)
def inside(self):
return self._config_get(None)
def inside(self, flag: bool):
self._config(flag, js_type=False)
def nullFormat(self):
return self._config_get(None)
def nullFormat(self, flag: bool):
self._config(flag, js_type=False)
def nullFormatter(self):
return self._config_get(None)
def nullFormatter(self, value: Any):
self._config(value, js_type=False)
def overflow(self):
return self._config_get('justify')
def overflow(self, text: str):
self._config(text, js_type=False)
def padding(self):
return self._config_get(5)
def padding(self, num: float):
self._config(num, js_type=False)
def position(self):
return self._config_get('center')
def position(self, text: str):
self._config(text, js_type=False)
def rotation(self):
return self._config_get(0)
def rotation(self, num: float):
self._config(num, js_type=False)
def shadow(self):
return self._config_get(False)
def shadow(self, flag: bool):
self._config(flag, js_type=False)
def shape(self):
return self._config_get('square')
def shape(self, text: str):
self._config(text, js_type=False)
def style(self):
return self._config_get(None)
def style(self, value: Any):
self._config(value, js_type=False)
def textPath(self) -> 'OptionSeriesWindbarbDatalabelsTextpath':
return self._config_sub_data('textPath', OptionSeriesWindbarbDatalabelsTextpath)
def useHTML(self):
return self._config_get(False)
def useHTML(self, flag: bool):
self._config(flag, js_type=False)
def verticalAlign(self):
return self._config_get('undefined')
def verticalAlign(self, text: str):
self._config(text, js_type=False)
def x(self):
return self._config_get(0)
def x(self, num: float):
self._config(num, js_type=False)
def y(self):
return self._config_get('undefined')
def y(self, num: float):
self._config(num, js_type=False)
def zIndex(self):
return self._config_get(6)
def zIndex(self, num: float):
self._config(num, js_type=False) |
class OFConfigClient(app_manager.RyuApp):
def __init__(self, *args, **kwargs):
super(OFConfigClient, self).__init__(*args, **kwargs)
self.switch = capable_switch.OFCapableSwitch(host=HOST, port=PORT, username=USERNAME, password=PASSWORD, unknown_host_cb=(lambda host, fingeprint: True))
hub.spawn(self._do_of_config)
def _validate(self, tree):
xmlschema = _get_schema()
try:
xmlschema.assertValid(tree)
except:
traceback.print_exc()
def _do_get(self):
data_xml = self.switch.raw_get()
tree = lxml.etree.fromstring(data_xml)
self._validate(tree)
name_spaces = set()
for e in tree.getiterator():
name_spaces.add(capable_switch.get_ns_tag(e.tag)[0])
print(name_spaces)
return tree
def _do_get_config(self, source):
print(('source = %s' % source))
config_xml = self.switch.raw_get_config(source)
tree = lxml.etree.fromstring(config_xml)
self._validate(tree)
def _do_edit_config(self, config):
tree = lxml.etree.fromstring(config)
self._validate(tree)
self.switch.raw_edit_config(target='running', config=config)
def _print_ports(self, tree, ns):
for port in tree.findall(('{%s}%s/{%s}%s' % (ns, ofc_consts.RESOURCES, ns, ofc_consts.PORT))):
print(lxml.etree.tostring(port, pretty_print=True))
def _set_ports_down(self):
tree = self._do_get()
print(lxml.etree.tostring(tree, pretty_print=True))
qname = lxml.etree.QName(tree.tag)
ns = qname.namespace
self._print_ports(tree, ns)
switch_id = tree.find(('{%s}%s' % (ns, ofc_consts.ID)))
resources = tree.find(('{%s}%s' % (ns, ofc_consts.RESOURCES)))
configuration = tree.find(('{%s}%s/{%s}%s/{%s}%s' % (ns, ofc_consts.RESOURCES, ns, ofc_consts.PORT, ns, ofc_consts.CONFIGURATION)))
admin_state = tree.find(('{%s}%s/{%s}%s/{%s}%s/{%s}%s' % (ns, ofc_consts.RESOURCES, ns, ofc_consts.PORT, ns, ofc_consts.CONFIGURATION, ns, ofc_consts.ADMIN_STATE)))
config_ = lxml.etree.Element(('{%s}%s' % (ncclient.xml_.BASE_NS_1_0, nc_consts.CONFIG)))
capable_switch_ = lxml.etree.SubElement(config_, tree.tag)
switch_id_ = lxml.etree.SubElement(capable_switch_, switch_id.tag)
switch_id_.text = switch_id.text
resources_ = lxml.etree.SubElement(capable_switch_, resources.tag)
for port in tree.findall(('{%s}%s/{%s}%s' % (ns, ofc_consts.RESOURCES, ns, ofc_consts.PORT))):
resource_id = port.find(('{%s}%s' % (ns, ofc_consts.RESOURCE_ID)))
port_ = lxml.etree.SubElement(resources_, port.tag)
resource_id_ = lxml.etree.SubElement(port_, resource_id.tag)
resource_id_.text = resource_id.text
configuration_ = lxml.etree.SubElement(port_, configuration.tag)
configuration_.set(ofc_consts.OPERATION, nc_consts.MERGE)
admin_state_ = lxml.etree.SubElement(configuration_, admin_state.tag)
admin_state_.text = ofc_consts.DOWN
self._do_edit_config(lxml.etree.tostring(config_, pretty_print=True))
tree = self._do_get()
self._print_ports(tree, ns)
def _do_of_config(self):
self._do_get()
self._do_get_config('running')
self._do_get_config('startup')
try:
self._do_get_config('candidate')
except ncclient.NCClientError:
traceback.print_exc()
self._do_edit_config(SWITCH_PORT_DOWN)
self._do_edit_config(SWITCH_ADVERTISED)
self._do_edit_config(SWITCH_CONTROLLER)
self._set_ports_down()
self.switch.close_session() |
class OptionPlotoptionsPackedbubbleSonificationDefaultinstrumentoptionsMappingVolume(Options):
def mapFunction(self):
return self._config_get(None)
def mapFunction(self, value: Any):
self._config(value, js_type=False)
def mapTo(self):
return self._config_get(None)
def mapTo(self, text: str):
self._config(text, js_type=False)
def max(self):
return self._config_get(None)
def max(self, num: float):
self._config(num, js_type=False)
def min(self):
return self._config_get(None)
def min(self, num: float):
self._config(num, js_type=False)
def within(self):
return self._config_get(None)
def within(self, value: Any):
self._config(value, js_type=False) |
class SignatureTestingMeta():
META_FIELDS = ['software_name', 'open_source', 'website', 'description']
missing_meta_fields = []
def check_meta_fields(self, sig_path: Path):
for file in sig_path.iterdir():
self.check_for_file(file)
return self.missing_meta_fields
def check_for_file(self, file_path: Path):
rules = self._split_rules(file_path.read_text())
for rule in rules:
self.check_meta_fields_of_rule(rule)
def _split_rules(raw_rules: str) -> list[str]:
rule_lines = raw_rules.splitlines()
rule_start_indices = [i for i in range(len(rule_lines)) if rule_lines[i].startswith('rule ')]
return [''.join(rule_lines[start:end]) for (start, end) in zip(rule_start_indices, (rule_start_indices[1:] + [len(rule_lines)]))]
def check_meta_fields_of_rule(self, rule: str):
rule_components = [s.strip() for s in rule.split()]
rule_name = rule_components[1].replace('{', '')
if ('meta:' not in rule_components):
self._register_missing_field('ALL', rule_name)
return
for required_field in self.META_FIELDS:
if (required_field not in rule_components):
self._register_missing_field(required_field, rule_name)
def _register_missing_field(self, missing_field: str, rule_name: str):
self.missing_meta_fields.append(f'{missing_field} in {rule_name}')
logging.error(f'CST: No meta field {missing_field} for rule {rule_name}.') |
def get_solar_capacity_at(date: datetime) -> float:
historical_capacities = pd.DataFrame.from_records([('2015-01-01', 1393), ('2016-01-01', 1646), ('2017-01-01', 1859), ('2018-01-01', 2090), ('2019-01-01', 2375), ('2020-01-01', 2795), ('2021-01-01', 3314), ('2022-01-01', 3904)], columns=['datetime', 'capacity.solar']).set_index('datetime')
historical_capacities.index = pd.DatetimeIndex(historical_capacities.index, tz='UTC')
year = date.year
if (year < 2015):
return historical_capacities.loc[('2015-01-01', 'capacity.solar')]
else:
mask = (historical_capacities.index <= date)
return historical_capacities[mask].iloc[(- 1)].loc['capacity.solar'] |
def calculate_score(cluster, std_span, std_pos, span, type):
if ((std_span == None) or (std_pos == None)):
span_deviation_score = 0
pos_deviation_score = 0
else:
span_deviation_score = (1 - min(1, (std_span / span)))
pos_deviation_score = (1 - min(1, (std_pos / span)))
if (type == 'INV'):
directions = [signature.direction for signature in cluster]
direction_counts = [0, 0, 0, 0, 0]
for direction in directions:
if (direction == 'left_fwd'):
direction_counts[0] += 1
if (direction == 'left_rev'):
direction_counts[1] += 1
if (direction == 'right_fwd'):
direction_counts[2] += 1
if (direction == 'right_rev'):
direction_counts[3] += 1
if (direction == 'all'):
direction_counts[4] += 1
left_signatures = (direction_counts[0] + direction_counts[1])
right_signatures = (direction_counts[2] + direction_counts[3])
valid_signatures = (min(left_signatures, right_signatures) + direction_counts[4])
num_signatures = min(80, valid_signatures)
else:
num_signatures = min(80, len(cluster))
return ((num_signatures + (span_deviation_score * (num_signatures / 8))) + (pos_deviation_score * (num_signatures / 8))) |
class KiwoomOpenApiPlusService():
def Call(request, target, options=(), channel_credentials=None, call_credentials=None, insecure=False, compression=None, wait_for_ready=None, timeout=None, metadata=None):
return grpc.experimental.unary_unary(request, target, '/koapy.backend.kiwoom_open_api_plus.grpc.KiwoomOpenApiPlusService/Call', koapy_dot_backend_dot_kiwoom__open__api__plus_dot_grpc_dot_KiwoomOpenApiPlusService__pb2.CallRequest.SerializeToString, koapy_dot_backend_dot_kiwoom__open__api__plus_dot_grpc_dot_KiwoomOpenApiPlusService__pb2.CallResponse.FromString, options, channel_credentials, insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
def Listen(request, target, options=(), channel_credentials=None, call_credentials=None, insecure=False, compression=None, wait_for_ready=None, timeout=None, metadata=None):
return grpc.experimental.unary_stream(request, target, '/koapy.backend.kiwoom_open_api_plus.grpc.KiwoomOpenApiPlusService/Listen', koapy_dot_backend_dot_kiwoom__open__api__plus_dot_grpc_dot_KiwoomOpenApiPlusService__pb2.ListenRequest.SerializeToString, koapy_dot_backend_dot_kiwoom__open__api__plus_dot_grpc_dot_KiwoomOpenApiPlusService__pb2.ListenResponse.FromString, options, channel_credentials, insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
def BidirectionalListen(request_iterator, target, options=(), channel_credentials=None, call_credentials=None, insecure=False, compression=None, wait_for_ready=None, timeout=None, metadata=None):
return grpc.experimental.stream_stream(request_iterator, target, '/koapy.backend.kiwoom_open_api_plus.grpc.KiwoomOpenApiPlusService/BidirectionalListen', koapy_dot_backend_dot_kiwoom__open__api__plus_dot_grpc_dot_KiwoomOpenApiPlusService__pb2.BidirectionalListenRequest.SerializeToString, koapy_dot_backend_dot_kiwoom__open__api__plus_dot_grpc_dot_KiwoomOpenApiPlusService__pb2.ListenResponse.FromString, options, channel_credentials, insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
def LoginCall(request, target, options=(), channel_credentials=None, call_credentials=None, insecure=False, compression=None, wait_for_ready=None, timeout=None, metadata=None):
return grpc.experimental.unary_stream(request, target, '/koapy.backend.kiwoom_open_api_plus.grpc.KiwoomOpenApiPlusService/LoginCall', koapy_dot_backend_dot_kiwoom__open__api__plus_dot_grpc_dot_KiwoomOpenApiPlusService__pb2.LoginRequest.SerializeToString, koapy_dot_backend_dot_kiwoom__open__api__plus_dot_grpc_dot_KiwoomOpenApiPlusService__pb2.ListenResponse.FromString, options, channel_credentials, insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
def TransactionCall(request, target, options=(), channel_credentials=None, call_credentials=None, insecure=False, compression=None, wait_for_ready=None, timeout=None, metadata=None):
return grpc.experimental.unary_stream(request, target, '/koapy.backend.kiwoom_open_api_plus.grpc.KiwoomOpenApiPlusService/TransactionCall', koapy_dot_backend_dot_kiwoom__open__api__plus_dot_grpc_dot_KiwoomOpenApiPlusService__pb2.TransactionRequest.SerializeToString, koapy_dot_backend_dot_kiwoom__open__api__plus_dot_grpc_dot_KiwoomOpenApiPlusService__pb2.ListenResponse.FromString, options, channel_credentials, insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
def OrderCall(request, target, options=(), channel_credentials=None, call_credentials=None, insecure=False, compression=None, wait_for_ready=None, timeout=None, metadata=None):
return grpc.experimental.unary_stream(request, target, '/koapy.backend.kiwoom_open_api_plus.grpc.KiwoomOpenApiPlusService/OrderCall', koapy_dot_backend_dot_kiwoom__open__api__plus_dot_grpc_dot_KiwoomOpenApiPlusService__pb2.OrderRequest.SerializeToString, koapy_dot_backend_dot_kiwoom__open__api__plus_dot_grpc_dot_KiwoomOpenApiPlusService__pb2.ListenResponse.FromString, options, channel_credentials, insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
def RealCall(request, target, options=(), channel_credentials=None, call_credentials=None, insecure=False, compression=None, wait_for_ready=None, timeout=None, metadata=None):
return grpc.experimental.unary_stream(request, target, '/koapy.backend.kiwoom_open_api_plus.grpc.KiwoomOpenApiPlusService/RealCall', koapy_dot_backend_dot_kiwoom__open__api__plus_dot_grpc_dot_KiwoomOpenApiPlusService__pb2.RealRequest.SerializeToString, koapy_dot_backend_dot_kiwoom__open__api__plus_dot_grpc_dot_KiwoomOpenApiPlusService__pb2.ListenResponse.FromString, options, channel_credentials, insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
def LoadConditionCall(request, target, options=(), channel_credentials=None, call_credentials=None, insecure=False, compression=None, wait_for_ready=None, timeout=None, metadata=None):
return grpc.experimental.unary_stream(request, target, '/koapy.backend.kiwoom_open_api_plus.grpc.KiwoomOpenApiPlusService/LoadConditionCall', koapy_dot_backend_dot_kiwoom__open__api__plus_dot_grpc_dot_KiwoomOpenApiPlusService__pb2.LoadConditionRequest.SerializeToString, koapy_dot_backend_dot_kiwoom__open__api__plus_dot_grpc_dot_KiwoomOpenApiPlusService__pb2.ListenResponse.FromString, options, channel_credentials, insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
def ConditionCall(request, target, options=(), channel_credentials=None, call_credentials=None, insecure=False, compression=None, wait_for_ready=None, timeout=None, metadata=None):
return grpc.experimental.unary_stream(request, target, '/koapy.backend.kiwoom_open_api_plus.grpc.KiwoomOpenApiPlusService/ConditionCall', koapy_dot_backend_dot_kiwoom__open__api__plus_dot_grpc_dot_KiwoomOpenApiPlusService__pb2.ConditionRequest.SerializeToString, koapy_dot_backend_dot_kiwoom__open__api__plus_dot_grpc_dot_KiwoomOpenApiPlusService__pb2.ListenResponse.FromString, options, channel_credentials, insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
def BidirectionalRealCall(request_iterator, target, options=(), channel_credentials=None, call_credentials=None, insecure=False, compression=None, wait_for_ready=None, timeout=None, metadata=None):
return grpc.experimental.stream_stream(request_iterator, target, '/koapy.backend.kiwoom_open_api_plus.grpc.KiwoomOpenApiPlusService/BidirectionalRealCall', koapy_dot_backend_dot_kiwoom__open__api__plus_dot_grpc_dot_KiwoomOpenApiPlusService__pb2.BidirectionalRealRequest.SerializeToString, koapy_dot_backend_dot_kiwoom__open__api__plus_dot_grpc_dot_KiwoomOpenApiPlusService__pb2.ListenResponse.FromString, options, channel_credentials, insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
def OrderListen(request, target, options=(), channel_credentials=None, call_credentials=None, insecure=False, compression=None, wait_for_ready=None, timeout=None, metadata=None):
return grpc.experimental.unary_stream(request, target, '/koapy.backend.kiwoom_open_api_plus.grpc.KiwoomOpenApiPlusService/OrderListen', koapy_dot_backend_dot_kiwoom__open__api__plus_dot_grpc_dot_KiwoomOpenApiPlusService__pb2.ListenRequest.SerializeToString, koapy_dot_backend_dot_kiwoom__open__api__plus_dot_grpc_dot_KiwoomOpenApiPlusService__pb2.ListenResponse.FromString, options, channel_credentials, insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
def CustomListen(request, target, options=(), channel_credentials=None, call_credentials=None, insecure=False, compression=None, wait_for_ready=None, timeout=None, metadata=None):
return grpc.experimental.unary_stream(request, target, '/koapy.backend.kiwoom_open_api_plus.grpc.KiwoomOpenApiPlusService/CustomListen', koapy_dot_backend_dot_kiwoom__open__api__plus_dot_grpc_dot_KiwoomOpenApiPlusService__pb2.ListenRequest.SerializeToString, koapy_dot_backend_dot_kiwoom__open__api__plus_dot_grpc_dot_KiwoomOpenApiPlusService__pb2.ListenResponse.FromString, options, channel_credentials, insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
def CustomCallAndListen(request, target, options=(), channel_credentials=None, call_credentials=None, insecure=False, compression=None, wait_for_ready=None, timeout=None, metadata=None):
return grpc.experimental.unary_stream(request, target, '/koapy.backend.kiwoom_open_api_plus.grpc.KiwoomOpenApiPlusService/CustomCallAndListen', koapy_dot_backend_dot_kiwoom__open__api__plus_dot_grpc_dot_KiwoomOpenApiPlusService__pb2.CallAndListenRequest.SerializeToString, koapy_dot_backend_dot_kiwoom__open__api__plus_dot_grpc_dot_KiwoomOpenApiPlusService__pb2.CallAndListenResponse.FromString, options, channel_credentials, insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
def SetLogLevel(request, target, options=(), channel_credentials=None, call_credentials=None, insecure=False, compression=None, wait_for_ready=None, timeout=None, metadata=None):
return grpc.experimental.unary_unary(request, target, '/koapy.backend.kiwoom_open_api_plus.grpc.KiwoomOpenApiPlusService/SetLogLevel', koapy_dot_backend_dot_kiwoom__open__api__plus_dot_grpc_dot_KiwoomOpenApiPlusService__pb2.SetLogLevelRequest.SerializeToString, koapy_dot_backend_dot_kiwoom__open__api__plus_dot_grpc_dot_KiwoomOpenApiPlusService__pb2.SetLogLevelResponse.FromString, options, channel_credentials, insecure, call_credentials, compression, wait_for_ready, timeout, metadata) |
class FileInterface(Interface):
def __init__(self, config):
Interface.__init__(self, config)
def _adapt_dir_path(self, directory):
return directory
def _adapt_ext_path(self, directory):
return directory
def _extended_directory_check(self, directory):
return
def _setup_directories(self, cfg):
all_dirs = {}
tracer.debug('Called.')
for dir_type in ['requirements', 'topics', 'constraints', 'testcases']:
config_dirs = cfg.get_rvalue_default((dir_type + '_dirs'), None)
if (config_dirs is None):
tracer.info('Directory [%s] not configured - skipping.', dir_type)
continue
dirs = list(map(self._adapt_dir_path, config_dirs))
self._check_list_of_strings(dir_type, dirs)
new_directories = []
for directory in dirs:
self._extended_directory_check(directory)
new_directories.append(self._adapt_ext_path(directory))
all_dirs[dir_type] = new_directories
for (dir_type, directory) in iteritems(all_dirs):
tracer.debug('[%s] directories [%s]', dir_type, directory)
return all_dirs |
class Identity():
__slots__ = ('_name', '_address', '_public_key', '_public_keys', '_addresses', '_default_address_key')
def __init__(self, name: SimpleIdOrStr, address: Optional[str]=None, public_key: Optional[str]=None, addresses: Optional[Dict[(str, Address)]]=None, public_keys: Optional[Dict[(str, str)]]=None, default_address_key: str=DEFAULT_LEDGER) -> None:
self._name = SimpleId(name)
if (default_address_key is None):
raise ValueError('Provide a key for the default address.')
if ((address is None) == (addresses is None)):
raise ValueError('Either provide a single address or a dictionary of addresses, and not both.')
if (address is None):
if ((addresses is None) or (len(addresses) == 0)):
raise ValueError('Provide at least one pair of addresses.')
if (public_key is not None):
raise ValueError('If you provide a dictionary of addresses, you must not provide a single public key.')
if (public_keys is None):
raise ValueError('If you provide a dictionary of addresses, you must provide its corresponding dictionary of public keys.')
enforce((public_keys.keys() == addresses.keys()), 'Keys in public keys and addresses dictionaries do not match. They must be identical.')
enforce(((default_address_key in addresses) and (default_address_key in public_keys)), 'The default address key must exist in both addresses and public keys dictionaries.')
address = addresses[default_address_key]
public_key = public_keys[default_address_key]
if (addresses is None):
if (public_keys is not None):
raise ValueError('If you provide a single address, you must not provide a dictionary of public keys.')
if (public_key is None):
raise ValueError('If you provide a single address, you must provide its corresponding public key.')
addresses = {default_address_key: address}
public_keys = {default_address_key: public_key}
self._address = address
self._addresses = addresses
self._public_key = public_key
self._public_keys = public_keys
self._default_address_key = default_address_key
def default_address_key(self) -> str:
return self._default_address_key
def name(self) -> str:
return str(self._name)
def addresses(self) -> Dict[(str, Address)]:
return self._addresses
def address(self) -> Address:
return self._address
def public_keys(self) -> Dict[(str, str)]:
return self._public_keys
def public_key(self) -> str:
return self._public_key |
class SACRunner(TrainingRunner):
eval_concurrency: int
initial_demonstration_trajectories: DictConfig
def __post_init__(self):
if (self.eval_concurrency <= 0):
self.eval_concurrency = query_cpu()
(TrainingRunner)
def setup(self, cfg: DictConfig) -> None:
super().setup(cfg)
assert isinstance(self._model_composer.critic, TorchStateActionCritic), 'Please specify a state action critic for SAC.'
worker_policy = self._model_composer.policy
worker_policy.to('cpu')
model = TorchActorCritic(policy=copy.deepcopy(self._model_composer.policy), critic=self._model_composer.critic, device=cfg.algorithm.device)
worker_env_instance_seeds = [self.maze_seeding.generate_env_instance_seed() for _ in range(cfg.algorithm.num_actors)]
replay_buffer_seed = self.maze_seeding.generate_env_instance_seed()
self._model_selection = BestModelSelection(dump_file=self.state_dict_dump_file, model=model, dump_interval=self.dump_interval)
evaluator = None
if (cfg.algorithm.rollout_evaluator.n_episodes > 0):
eval_env = self.create_distributed_eval_env(self.env_factory, self.eval_concurrency, logging_prefix='eval')
eval_env_instance_seeds = [self.maze_seeding.generate_env_instance_seed() for _ in range(self.eval_concurrency)]
eval_env.seed(eval_env_instance_seeds)
evaluator = Factory(base_type=RolloutEvaluator).instantiate(cfg.algorithm.rollout_evaluator, eval_env=eval_env, model_selection=self._model_selection)
replay_buffer = UniformReplayBuffer(cfg.algorithm.replay_buffer_size, seed=replay_buffer_seed)
if cfg.runner.initial_demonstration_trajectories:
self.load_replay_buffer(replay_buffer=replay_buffer, cfg=cfg)
else:
self.init_replay_buffer(replay_buffer=replay_buffer, initial_sampling_policy=cfg.algorithm.initial_sampling_policy, initial_buffer_size=cfg.algorithm.initial_buffer_size, replay_buffer_seed=replay_buffer_seed, split_rollouts_into_transitions=cfg.algorithm.split_rollouts_into_transitions, n_rollout_steps=cfg.algorithm.n_rollout_steps, env_factory=self.env_factory)
distributed_actors = self.create_distributed_rollout_workers(env_factory=self.env_factory, worker_policy=worker_policy, n_rollout_steps=cfg.algorithm.n_rollout_steps, n_workers=cfg.algorithm.num_actors, batch_size=cfg.algorithm.batch_size, rollouts_per_iteration=cfg.algorithm.rollouts_per_iteration, split_rollouts_into_transitions=cfg.algorithm.split_rollouts_into_transitions, env_instance_seeds=worker_env_instance_seeds, replay_buffer=replay_buffer)
self._trainer = SAC(algorithm_config=cfg.algorithm, learner_model=model, distributed_actors=distributed_actors, model_selection=self._model_selection, evaluator=evaluator)
self._init_trainer_from_input_dir(trainer=self._trainer, state_dict_dump_file=self.state_dict_dump_file, input_dir=cfg.input_dir)
def load_replay_buffer(self, replay_buffer: BaseReplayBuffer, cfg: DictConfig) -> None:
print(f'******* Starting to fill the replay buffer with trajectories from path: {self.initial_demonstration_trajectories.input_data} *******')
with SwitchWorkingDirectoryToInput(cfg.input_dir):
dataset = Factory(base_type=Dataset).instantiate(self.initial_demonstration_trajectories, conversion_env_factory=self.env_factory)
assert isinstance(dataset, InMemoryDataset), 'Only in memory dataset supported at this point'
if cfg.algorithm.split_rollouts_into_transitions:
for step_record in dataset.step_records:
assert (step_record.next_observations is not None), 'Next observations are required for sac'
assert all(map((lambda x: (x is not None)), step_record.next_observations)), 'Next observations are required for sac'
replay_buffer.add_transition(step_record)
else:
for (idx, trajectory_reference) in enumerate(dataset.trajectory_references):
traj = SpacesTrajectoryRecord(id=idx)
traj.step_records = dataset.step_records[trajectory_reference]
replay_buffer.add_transition(traj)
def init_replay_buffer(replay_buffer: BaseReplayBuffer, initial_sampling_policy: Union[(DictConfig, Policy)], initial_buffer_size: int, replay_buffer_seed: int, split_rollouts_into_transitions: bool, n_rollout_steps: int, env_factory: Callable[([], MazeEnv)]) -> None:
epoch_stats = LogStatsAggregator(LogStatsLevel.EPOCH)
replay_stats_logger = get_stats_logger('init_replay_buffer')
epoch_stats.register_consumer(replay_stats_logger)
dummy_env = env_factory()
dummy_env.seed(replay_buffer_seed)
sampling_policy: Policy = Factory(Policy).instantiate(initial_sampling_policy, action_spaces_dict=dummy_env.action_spaces_dict)
sampling_policy.seed(replay_buffer_seed)
rollout_generator = RolloutGenerator(env=dummy_env, record_next_observations=True, record_episode_stats=True)
print(f'******* Starting to fill the replay buffer with {initial_buffer_size} transitions *******')
while (len(replay_buffer) < initial_buffer_size):
trajectory = rollout_generator.rollout(policy=sampling_policy, n_steps=n_rollout_steps)
if split_rollouts_into_transitions:
replay_buffer.add_rollout(trajectory)
else:
replay_buffer.add_transition(trajectory)
for step_record in trajectory.step_records:
if (step_record.episode_stats is not None):
epoch_stats.receive(step_record.episode_stats)
epoch_stats.reduce()
epoch_stats.remove_consumer(replay_stats_logger)
def create_distributed_eval_env(self, env_factory: Callable[([], MazeEnv)], eval_concurrency: int, logging_prefix: str) -> StructuredVectorEnv:
def create_distributed_rollout_workers(self, env_factory: Callable[([], MazeEnv)], worker_policy: TorchPolicy, n_rollout_steps: int, n_workers: int, batch_size: int, rollouts_per_iteration: int, split_rollouts_into_transitions: bool, env_instance_seeds: List[int], replay_buffer: BaseReplayBuffer) -> BaseDistributedWorkersWithBuffer: |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.