code stringlengths 281 23.7M |
|---|
class Pig():
def __init__(self, x, y, space):
self.life = 20
mass = 5
radius = 14
inertia = pm.moment_for_circle(mass, 0, radius, (0, 0))
body = pm.Body(mass, inertia)
body.position = (x, y)
shape = pm.Circle(body, radius, (0, 0))
shape.elasticity = 0.95
shape.friction = 1
shape.collision_type = 1
space.add(body, shape)
self.body = body
self.shape = shape |
class OptionSeriesTimelineSonificationContexttracks(Options):
def activeWhen(self) -> 'OptionSeriesTimelineSonificationContexttracksActivewhen':
return self._config_sub_data('activeWhen', OptionSeriesTimelineSonificationContexttracksActivewhen)
def instrument(self):
return self._config_get('piano')
def instrument(self, text: str):
self._config(text, js_type=False)
def mapping(self) -> 'OptionSeriesTimelineSonificationContexttracksMapping':
return self._config_sub_data('mapping', OptionSeriesTimelineSonificationContexttracksMapping)
def midiName(self):
return self._config_get(None)
def midiName(self, text: str):
self._config(text, js_type=False)
def pointGrouping(self) -> 'OptionSeriesTimelineSonificationContexttracksPointgrouping':
return self._config_sub_data('pointGrouping', OptionSeriesTimelineSonificationContexttracksPointgrouping)
def roundToMusicalNotes(self):
return self._config_get(True)
def roundToMusicalNotes(self, flag: bool):
self._config(flag, js_type=False)
def showPlayMarker(self):
return self._config_get(True)
def showPlayMarker(self, flag: bool):
self._config(flag, js_type=False)
def timeInterval(self):
return self._config_get(None)
def timeInterval(self, num: float):
self._config(num, js_type=False)
def type(self):
return self._config_get('instrument')
def type(self, text: str):
self._config(text, js_type=False)
def valueInterval(self):
return self._config_get(None)
def valueInterval(self, num: float):
self._config(num, js_type=False)
def valueMapFunction(self):
return self._config_get('linear')
def valueMapFunction(self, value: Any):
self._config(value, js_type=False)
def valueProp(self):
return self._config_get('"x"')
def valueProp(self, text: str):
self._config(text, js_type=False) |
class OptionSeriesGaugeSonificationDefaultspeechoptionsMapping(Options):
def pitch(self) -> 'OptionSeriesGaugeSonificationDefaultspeechoptionsMappingPitch':
return self._config_sub_data('pitch', OptionSeriesGaugeSonificationDefaultspeechoptionsMappingPitch)
def playDelay(self) -> 'OptionSeriesGaugeSonificationDefaultspeechoptionsMappingPlaydelay':
return self._config_sub_data('playDelay', OptionSeriesGaugeSonificationDefaultspeechoptionsMappingPlaydelay)
def rate(self) -> 'OptionSeriesGaugeSonificationDefaultspeechoptionsMappingRate':
return self._config_sub_data('rate', OptionSeriesGaugeSonificationDefaultspeechoptionsMappingRate)
def text(self):
return self._config_get(None)
def text(self, text: str):
self._config(text, js_type=False)
def time(self) -> 'OptionSeriesGaugeSonificationDefaultspeechoptionsMappingTime':
return self._config_sub_data('time', OptionSeriesGaugeSonificationDefaultspeechoptionsMappingTime)
def volume(self) -> 'OptionSeriesGaugeSonificationDefaultspeechoptionsMappingVolume':
return self._config_sub_data('volume', OptionSeriesGaugeSonificationDefaultspeechoptionsMappingVolume) |
class Platonic(Boxes):
ui_group = 'Unstable'
description = '\n'
SOLIDS = {'tetrahedron': (4, 3), 'cube': (6, 4), 'octahedron': (8, 3), 'dodecahedron': (12, 5), 'icosahedro': (20, 3)}
def __init__(self) -> None:
Boxes.__init__(self)
self.addSettingsArgs(edges.FingerJointSettings, surroundingspaces=0)
self.buildArgParser(x=60, outside=True)
self.argparser.add_argument('--type', action='store', type=str, default=list(self.SOLIDS)[0], choices=list(self.SOLIDS), help='type of platonic solid')
def render(self):
e = self.x
t = self.thickness
(faces, corners) = self.SOLIDS[self.type]
u = UnevenFingerJointEdge(self, self.edges['f'].settings)
self.addPart(u)
uc = UnevenFingerJointEdgeCounterPart(self, self.edges['f'].settings)
self.addPart(uc)
for _ in range(faces):
self.regularPolygonWall(corners, side=e, edges='u', move='right') |
class SystemMetadata(_common_models.FlyteIdlEntity):
def __init__(self, execution_cluster: str):
self._execution_cluster = execution_cluster
def execution_cluster(self) -> str:
return self._execution_cluster
def to_flyte_idl(self) -> flyteidl.admin.execution_pb2.SystemMetadata:
return _execution_pb2.SystemMetadata(execution_cluster=self.execution_cluster)
def from_flyte_idl(cls, pb2_object: flyteidl.admin.execution_pb2.SystemMetadata) -> SystemMetadata:
return cls(execution_cluster=pb2_object.execution_cluster) |
def _create_unfiltered_gain_and_loss_set(configuration: Configuration, accounting_engine: AccountingEngine, input_data: InputData, unfiltered_taxable_event_set: TransactionSet) -> GainLossSet:
gain_loss_set: GainLossSet = GainLossSet(configuration, input_data.asset, MIN_DATE, MAX_DATE)
new_accounting_engine: AccountingEngine = accounting_engine.__class__(accounting_engine.years_2_methods)
taxable_event_iterator: Iterator[AbstractTransaction] = iter(cast(Iterable[AbstractTransaction], unfiltered_taxable_event_set))
acquired_lot_iterator: Iterator[InTransaction] = iter(cast(Iterable[InTransaction], input_data.unfiltered_in_transaction_set))
new_accounting_engine.initialize(taxable_event_iterator, acquired_lot_iterator)
try:
gain_loss: GainLoss
taxable_event: AbstractTransaction
acquired_lot: Optional[InTransaction]
taxable_event_amount: RP2Decimal
acquired_lot_amount: RP2Decimal
total_amount: RP2Decimal = ZERO
(taxable_event, acquired_lot, taxable_event_amount, acquired_lot_amount) = _get_next_taxable_event_and_acquired_lot(new_accounting_engine, None, None, ZERO, ZERO)
while taxable_event:
AbstractTransaction.type_check('taxable_event', taxable_event)
if (acquired_lot is None):
raise RP2RuntimeError("Parameter 'acquired_lot' is None")
InTransaction.type_check('acquired_lot', acquired_lot)
Configuration.type_check_positive_decimal('taxable_event_amount', taxable_event_amount)
Configuration.type_check_positive_decimal('acquired_lot_amount', acquired_lot_amount)
if taxable_event.transaction_type.is_earn_type():
gain_loss = GainLoss(configuration, taxable_event_amount, taxable_event, None)
LOGGER.debug('tax_engine: taxable is earn: %s / %s + %s = %s: %s', taxable_event_amount, total_amount, taxable_event_amount, (total_amount + taxable_event_amount), gain_loss)
total_amount += taxable_event_amount
gain_loss_set.add_entry(gain_loss)
(taxable_event, acquired_lot, taxable_event_amount, acquired_lot_amount) = new_accounting_engine.get_next_taxable_event_and_amount(taxable_event, acquired_lot, ZERO, acquired_lot_amount)
continue
if (taxable_event_amount == acquired_lot_amount):
gain_loss = GainLoss(configuration, taxable_event_amount, taxable_event, acquired_lot)
LOGGER.debug('tax_engine: taxable == acquired: %s == %s / %s + %s = %s: %s', taxable_event_amount, acquired_lot_amount, total_amount, taxable_event_amount, (total_amount + taxable_event_amount), gain_loss)
total_amount += taxable_event_amount
gain_loss_set.add_entry(gain_loss)
(taxable_event, acquired_lot, taxable_event_amount, acquired_lot_amount) = _get_next_taxable_event_and_acquired_lot(new_accounting_engine, taxable_event, acquired_lot, taxable_event_amount, acquired_lot_amount)
elif (taxable_event_amount < acquired_lot_amount):
gain_loss = GainLoss(configuration, taxable_event_amount, taxable_event, acquired_lot)
LOGGER.debug('tax_engine: taxable < acquired: %s < %s / %s + %s = %s: %s', taxable_event_amount, acquired_lot_amount, total_amount, taxable_event_amount, (total_amount + taxable_event_amount), gain_loss)
total_amount += taxable_event_amount
gain_loss_set.add_entry(gain_loss)
(taxable_event, acquired_lot, taxable_event_amount, acquired_lot_amount) = new_accounting_engine.get_next_taxable_event_and_amount(taxable_event, acquired_lot, taxable_event_amount, acquired_lot_amount)
else:
gain_loss = GainLoss(configuration, acquired_lot_amount, taxable_event, acquired_lot)
LOGGER.debug('tax_engine: taxable > acquired: %s > %s / %s + %s = %s: %s', taxable_event_amount, acquired_lot_amount, total_amount, acquired_lot_amount, (total_amount + acquired_lot_amount), gain_loss)
total_amount += acquired_lot_amount
gain_loss_set.add_entry(gain_loss)
(taxable_event, acquired_lot, taxable_event_amount, acquired_lot_amount) = new_accounting_engine.get_acquired_lot_for_taxable_event(taxable_event, acquired_lot, taxable_event_amount, acquired_lot_amount)
except AcquiredLotsExhaustedException:
raise RP2ValueError('Total in-transaction crypto value < total taxable crypto value') from None
except TaxableEventsExhaustedException:
pass
return gain_loss_set |
class WebSocketOutputWriter(BaseOutputWriter):
def __init__(self, socket):
super().__init__(allows_input=False)
self.socket = socket
async def add_interpreter_head_state(self, variable, head, prompt, where, trace, is_valid, is_final, mask, num_tokens, program_variables):
(await self.socket.send_str(json.dumps({'prompt': prompt, 'variables': program_variables.variable_values}))) |
class MaterialLibray(Queryable, smart_union=True):
id: str = Field(title='Material Library ID', description='Material Library ID')
name: str = Field(title='Material Library Name', description='Material Library Name')
medium: Optional[MediumType] = Field(title='medium', description='medium', alias='calcResult')
medium_type: Optional[str] = Field(title='medium type', description='medium type', alias='mediumType')
json_input: Optional[dict] = Field(title='json input', description='original input', alias='jsonInput')
('medium', 'json_input', pre=True)
def parse_result(cls, values):
return json.loads(values)
def list(cls) -> List[MaterialLibray]:
resp =
return (parse_obj_as(List[MaterialLibray], resp) if resp else None) |
class Connect(object):
def __init__(self, core: Core):
self.core = core
self._wsconn = None
core.events.game_created += self.refresh_status
core.events.game_started += self.refresh_status
core.events.game_ended += self.refresh_status
core.events.game_aborted += self.refresh_status
core.events.user_state_transition += self.refresh_status
def __repr__(self) -> str:
return self.__class__.__name__
def refresh_status(self, ev: Any) -> Any:
self._refresh_status()
return ev
def speaker(self, name: str, text: str) -> None:
core = self.core
self._wssend({'op': 'Message', 'arg': {'entity': 'Speaker', 'channel': core.options.node, 'text': text}})
(1.5)
def _refresh_status(self) -> None:
core = self.core
self._wssend({'op': 'Message', 'arg': {'entity': 'Interconnect', 'channel': 'users', 'text': json.dumps([core.view.User(u) for u in core.lobby.all_users()])}})
def _wssend(self, v: Any) -> None:
core = self.core
if (not core.options.interconnect):
return
for i in range(3):
try:
conn = self._wsconn
if ((not conn) or (not conn.connected)):
conn = websocket.create_connection(core.options.interconnect)
self._wsconn = conn
conn.send(json.dumps(v))
return
except Exception:
log.exception('Error sending interconnect message')
core.runner.sleep(3)
log.error('WebSocket send with multiple failed attempts, giving up, message: %s', v) |
class TestAddProtocolFailsWhenDifferentPublicId():
def setup_class(cls):
cls.runner = CliRunner()
cls.agent_name = 'myagent'
cls.cwd = os.getcwd()
cls.t = tempfile.mkdtemp()
cls.protocol_id = 'different_author/default:1.0.0'
shutil.copytree(Path(CUR_PATH, '..', 'packages'), Path(cls.t, 'packages'))
os.chdir(cls.t)
result = cls.runner.invoke(cli, [*CLI_LOG_OPTION, 'init', '--author', AUTHOR])
assert (result.exit_code == 0)
result = cls.runner.invoke(cli, [*CLI_LOG_OPTION, 'create', '--local', cls.agent_name], standalone_mode=False)
assert (result.exit_code == 0)
os.chdir(cls.agent_name)
cls.result = cls.runner.invoke(cli, [*CLI_LOG_OPTION, 'add', '--local', 'protocol', cls.protocol_id], standalone_mode=False)
def test_exit_code_equal_to_1(self):
assert (self.result.exit_code == 1)
def test_error_message_protocol_wrong_public_id(self):
s = "Cannot find protocol: '{}'.".format(self.protocol_id)
assert (self.result.exception.message == s)
def teardown_class(cls):
os.chdir(cls.cwd)
try:
shutil.rmtree(cls.t)
except (OSError, IOError):
pass |
class RefreshMixin(RefreshTokenMutationMixin, RefreshTokenMixin):
def test_refresh_token(self):
with catch_signal(refresh_token_rotated) as refresh_token_rotated_handler, back_to_the_future(seconds=1):
response = self.execute({'refreshToken': self.refresh_token.token})
data = response.data['refreshToken']
token = data['token']
refresh_token = get_refresh_token(data['refreshToken'])
payload = data['payload']
self.assertIsNone(response.errors)
self.assertEqual(refresh_token_rotated_handler.call_count, 1)
self.assertUsernameIn(payload)
self.assertNotEqual(token, self.token)
self.assertGreater(payload['exp'], self.payload['exp'])
self.assertNotEqual(refresh_token.token, self.refresh_token.token)
self.assertEqual(refresh_token.user, self.user)
self.assertGreater(refresh_token.created, self.refresh_token.created)
_jwt_settings(JWT_REUSE_REFRESH_TOKENS=True)
def test_reuse_refresh_token(self):
with catch_signal(refresh_token_rotated) as refresh_token_rotated_handler, back_to_the_future(seconds=1):
response = self.execute({'refreshToken': self.refresh_token.token})
data = response.data['refreshToken']
token = data['token']
refresh_token = get_refresh_token(data['refreshToken'])
payload = data['payload']
self.assertIsNone(response.errors)
self.assertEqual(refresh_token_rotated_handler.call_count, 1)
self.assertUsernameIn(payload)
self.assertNotEqual(token, self.token)
self.assertNotEqual(refresh_token.token, self.refresh_token.token)
def test_missing_refresh_token(self):
response = self.execute({})
self.assertIsNotNone(response.errors)
def test_refresh_token_expired(self):
with refresh_expired():
response = self.execute({'refreshToken': self.refresh_token.token})
self.assertIsNotNone(response.errors) |
class TestRestoreState(TestCase):
def setUp(self) -> None:
self.restore_state = RestoreRunState()
def test_split_path(self) -> None:
with self.subTest('Valid path'):
self.assertEqual(('fb-pc-data-bucket-wnm6', 'logging/logs_T201923/last'), self.restore_state._split_path('s3://fb-pc-data-bucket-wnm6/logging/logs_T201923/last'))
with self.subTest('Valid path 2'):
self.assertEqual(('fb-pc-data-bucket-wnm6', 'last'), self.restore_state._split_path('s3://fb-pc-data-bucket-wnm6/last'))
with self.subTest('Invalid path'):
self.assertIsNone(self.restore_state._split_path('s3://fb-pc-data-bucket-wnm6'))
with self.subTest('Invalid path 2'):
self.assertIsNone(self.restore_state._split_path('fb-pc-data-bucket-wnm6/logging/logs_T201923/last'))
def test_copy_files(self) -> None:
objects = [mock.Mock(key='key1'), mock.Mock(key='key2/')]
self.restore_state.s3 = mock.MagicMock()
bucket = mock.MagicMock()
bucket.objects = mock.MagicMock()
bucket.objects.filter = mock.MagicMock(return_value=objects)
self.restore_state.s3.Bucket = mock.MagicMock(return_value=bucket)
self.restore_state._copy_files('s3://fb-pc-data-bucket-wnm6/logging/last', '/tmp')
bucket.download_file.assert_called_once_with('key1', ANY) |
.parametrize('ecl_like_file, some_valid_kwords, some_notpresent_kwords', [('E100_BO.EGRID', ['FILEHEAD', 'MAPAXES', 'ZCORN', 'NNC1'], []), ('E100_BO.GRID', ['DIMENS', 'GRIDUNIT', 'COORDS'], ['FILEHEAD']), ('E300_BO.EGRID', ['FILEHEAD', 'MAPAXES', 'ZCORN', 'NNC1'], []), ('E300_BO.GRID', ['DIMENS', 'GRIDUNIT', 'COORDS'], ['FILEHEAD']), ('E300_COMP.EGRID', ['FILEHEAD', 'MAPAXES', 'ZCORN', 'NNC1'], []), ('IX_BO.EGRID', ['FILEHEAD', 'MAPAXES', 'ZCORN', 'NNC1'], []), ('IX_BO.GRID', ['DIMENS', 'GRIDUNIT', 'COORDS'], ['FILEHEAD']), ('IX_COMP.EGRID', ['FILEHEAD', 'MAPAXES', 'ZCORN', 'NNC1'], []), ('IX_COMP_GRIDREPORT.EGRID', ['FILEHEAD', 'MAPAXES', 'ZCORN'], ['NNC1'])])
def test_refo_read_grids(ecl_like_file, some_valid_kwords, some_notpresent_kwords):
kwords = []
ktypes = []
for item in resfo.lazy_read((SIMPLEB8_PATH / ecl_like_file)):
kwords.append(item.read_keyword().strip())
ktypes.append(item.read_type().strip())
for kword in some_valid_kwords:
assert (kword in kwords)
for kword in some_notpresent_kwords:
assert (kword not in kwords) |
def create_functions_section(functions):
section = ''
for f in functions:
section = (((((section + "<span class='separator'>[</span><span class='function_name'><a href='#") + f[0]) + "'>") + f[0]) + "</a></span><span class='separator'>] <span/>")
if (section != ''):
section = (("</pre><code><span class='section_heading'>Functions</span><br /><br />" + section) + '<br /> <br /><br /><br /></code><pre>')
return section |
class Faq(SoftDeletionModel):
id = db.Column(db.Integer, primary_key=True)
question = db.Column(db.String, nullable=False)
answer = db.Column(db.String, nullable=False)
event_id = db.Column(db.Integer, db.ForeignKey('events.id', ondelete='CASCADE'))
faq_type_id = db.Column(db.Integer, db.ForeignKey('faq_types.id', ondelete='CASCADE'))
def __repr__(self):
return ('<FAQ %r>' % self.question) |
class InlineQueryResultCachedMpeg4Gif(InlineQueryResultCachedBase):
def __init__(self, id, mpeg4_file_id, title=None, description=None, caption=None, caption_entities=None, parse_mode=None, reply_markup=None, input_message_content=None):
InlineQueryResultCachedBase.__init__(self)
self.type = 'mpeg4_gif'
self.id = id
self.mpeg4_file_id = mpeg4_file_id
self.title = title
self.description = description
self.caption = caption
self.caption_entities = caption_entities
self.reply_markup = reply_markup
self.input_message_content = input_message_content
self.parse_mode = parse_mode
self.payload_dic['mpeg4_file_id'] = mpeg4_file_id |
def test_py_func_task_get_container():
def foo(i: int):
pass
default_img = Image(name='default', fqn='xyz.com/abc', tag='tag1')
other_img = Image(name='other', fqn='xyz.com/other', tag='tag-other')
cfg = ImageConfig(default_image=default_img, images=[default_img, other_img])
settings = SerializationSettings(project='p', domain='d', version='v', image_config=cfg, env={'FOO': 'bar'})
pytask = PythonFunctionTask(None, foo, None, environment={'BAZ': 'baz'})
c = pytask.get_container(settings)
assert (c.image == 'xyz.com/abc:tag1')
assert (c.env == {'FOO': 'bar', 'BAZ': 'baz'}) |
def extractGuavareadCom(item):
(vol, chp, frag, postfix) = extractVolChapterFragmentPostfix(item['title'])
if ((not (chp or vol)) or ('preview' in item['title'].lower())):
return None
tagmap = [('PRC', 'PRC', 'translated'), ('Loiterous', 'Loiterous', 'oel')]
for (tagname, name, tl_type) in tagmap:
if (tagname in item['tags']):
return buildReleaseMessageWithType(item, name, vol, chp, frag=frag, postfix=postfix, tl_type=tl_type)
return False |
class FieldType(ABC, Generic[T]):
def isinstance(self, obj: Any) -> bool:
raise NotImplementedError()
def description(self) -> str:
raise NotImplementedError()
def python_type(self) -> type:
raise NotImplementedError()
def size(self) -> Optional[int]:
raise NotImplementedError()
def preprocess(self, value: T) -> Any:
return value
def postprocess(self, value: Any) -> T:
assert self.isinstance(value)
return value |
class BaseOptions():
def __init__(self):
self.initialized = False
def initialize(self, parser):
parser.add_argument('--name', type=str, default='label2coco', help='name of the experiment. It decides where to store samples and models')
parser.add_argument('--gpu_ids', type=str, default='0,1,2,3', help='gpu ids: e.g. 0 0,1,2, 0,2. use -1 for CPU')
parser.add_argument('--checkpoints_dir', type=str, default='./checkpoints', help='models are saved here')
parser.add_argument('--model', type=str, default='pix2pix', help='which model to use')
parser.add_argument('--norm_G', type=str, default='spectralinstance', help='instance normalization or batch normalization')
parser.add_argument('--norm_D', type=str, default='spectralinstance', help='instance normalization or batch normalization')
parser.add_argument('--norm_E', type=str, default='spectralinstance', help='instance normalization or batch normalization')
parser.add_argument('--phase', type=str, default='train', help='train, val, test, etc')
parser.add_argument('--blur', type=float, default=0.005, help='blur in OT')
parser.add_argument('--correspondence', type=str, default='ot', help='ot, euc')
parser.add_argument('--ot_weight', action='store_true', help='use euc distance as weight of ot')
parser.add_argument('--batchSize', type=int, default=4, help='input batch size')
parser.add_argument('--preprocess_mode', type=str, default='scale_width_and_crop', help='scaling and cropping of images at load time.', choices=('resize_and_crop', 'crop', 'scale_width', 'scale_width_and_crop', 'scale_shortside', 'scale_shortside_and_crop', 'fixed', 'none'))
parser.add_argument('--load_size', type=int, default=256, help='Scale images to this size. The final image will be cropped to --crop_size.')
parser.add_argument('--crop_size', type=int, default=256, help='Crop to the width of crop_size (after initially scaling the images to load_size.)')
parser.add_argument('--aspect_ratio', type=float, default=1.0, help='The ratio width/height. The final height of the load image will be crop_size/aspect_ratio')
parser.add_argument('--label_nc', type=int, default=182, help='# of input label classes without unknown class. If you have unknown class as class label, specify --contain_dopntcare_label.')
parser.add_argument('--contain_dontcare_label', action='store_true', help='if the label map contains dontcare label (dontcare=255)')
parser.add_argument('--output_nc', type=int, default=3, help='# of output image channels')
parser.add_argument('--dataroot', type=str, default='/mnt/blob/Dataset/ADEChallengeData2016/images')
parser.add_argument('--dataset_mode', type=str, default='ade20k')
parser.add_argument('--serial_batches', action='store_true', help='if true, takes images in order to make batches, otherwise takes them randomly')
parser.add_argument('--no_flip', action='store_true', help='if specified, do not flip the images for data argumentation')
parser.add_argument('--nThreads', default=16, type=int, help='# threads for loading data')
parser.add_argument('--max_dataset_size', type=int, default=sys.maxsize, help='Maximum number of samples allowed per dataset. If the dataset directory contains more than max_dataset_size, only a subset is loaded.')
parser.add_argument('--load_from_opt_file', action='store_true', help='load the options from checkpoints and use that as default')
parser.add_argument('--cache_filelist_write', action='store_true', help='saves the current filelist into a text file, so that it loads faster')
parser.add_argument('--cache_filelist_read', action='store_true', help='reads from the file list cache')
parser.add_argument('--display_winsize', type=int, default=400, help='display window size')
parser.add_argument('--netG', type=str, default='seace', help='selects model to use for netG (pix2pixhd | spade | seace)')
parser.add_argument('--ngf', type=int, default=64, help='# of gen filters in first conv layer')
parser.add_argument('--init_type', type=str, default='xavier', help='network initialization [normal|xavier|kaiming|orthogonal]')
parser.add_argument('--init_variance', type=float, default=0.02, help='variance of the initialization distribution')
parser.add_argument('--z_dim', type=int, default=256, help='dimension of the latent z vector')
parser.add_argument('--CBN_intype', type=str, default='warp_mask', help='type of CBN input for framework, warp/mask/warp_mask')
parser.add_argument('--maskmix', action='store_true', help='use mask in correspondence net')
parser.add_argument('--use_attention', action='store_true', help='and nonlocal block in G and D')
parser.add_argument('--warp_mask_losstype', type=str, default='none', help='type of warped mask loss, none/direct/cycle')
parser.add_argument('--show_warpmask', action='store_true', help='save warp mask')
parser.add_argument('--match_kernel', type=int, default=3, help='correspondence matrix match kernel size')
parser.add_argument('--adaptor_kernel', type=int, default=3, help='kernel size in domain adaptor')
parser.add_argument('--PONO', action='store_true', help='use positional normalization ')
parser.add_argument('--PONO_C', action='store_true', help='use C normalization in corr module')
parser.add_argument('--eqlr_sn', action='store_true', help='if true, use equlr, else use sn')
parser.add_argument('--vgg_normal_correct', action='store_true', help='if true, correct vgg normalization and replace vgg FM model with ctx model')
parser.add_argument('--weight_domainC', type=float, default=0.0, help='weight of Domain classification loss for domain adaptation')
parser.add_argument('--domain_rela', action='store_true', help='if true, use Relativistic loss in domain classifier')
parser.add_argument('--use_ema', action='store_true', help='if true, use EMA in G')
parser.add_argument('--ema_beta', type=float, default=0.999, help='beta in ema setting')
parser.add_argument('--warp_cycle_w', type=float, default=0.0, help='push warp cycle to ref')
parser.add_argument('--two_cycle', action='store_true', help='input to ref and back')
parser.add_argument('--apex', action='store_true', help='if true, use apex')
parser.add_argument('--warp_bilinear', action='store_true', help='if true, upsample warp by bilinear')
parser.add_argument('--adaptor_res_deeper', action='store_true', help='if true, use 6 res block in domain adaptor')
parser.add_argument('--adaptor_nonlocal', action='store_true', help='if true, use nonlocal block in domain adaptor')
parser.add_argument('--adaptor_se', action='store_true', help='if true, use se layer in domain adaptor')
parser.add_argument('--dilation_conv', action='store_true', help='if true, use dilation conv in domain adaptor when adaptor_res_deeper is True')
parser.add_argument('--use_coordconv', action='store_true', help='if true, use coordconv in CorrNet')
parser.add_argument('--warp_patch', action='store_true', help='use corr matrix to warp 4*4 patch')
parser.add_argument('--warp_stride', type=int, default=4, help='corr matrix 256 / warp_stride')
parser.add_argument('--mask_noise', action='store_true', help='use noise with mask')
parser.add_argument('--noise_for_mask', action='store_true', help='replace mask with noise')
parser.add_argument('--video_like', action='store_true', help='useful in deepfashion')
self.initialized = True
return parser
def gather_options(self):
if (not self.initialized):
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser = self.initialize(parser)
(opt, unknown) = parser.parse_known_args()
model_name = opt.model
model_option_setter = models.get_option_setter(model_name)
parser = model_option_setter(parser, self.isTrain)
dataset_mode = opt.dataset_mode
dataset_option_setter = data.get_option_setter(dataset_mode)
parser = dataset_option_setter(parser, self.isTrain)
(opt, unknown) = parser.parse_known_args()
if opt.load_from_opt_file:
parser = self.update_options_from_file(parser, opt)
opt = parser.parse_args()
self.parser = parser
return opt
def print_options(self, opt):
message = ''
message += ' Options \n'
for (k, v) in sorted(vars(opt).items()):
comment = ''
default = self.parser.get_default(k)
if (v != default):
comment = ('\t[default: %s]' % str(default))
message += '{:>25}: {:<30}{}\n'.format(str(k), str(v), comment)
message += ' End '
print(message)
def option_file_path(self, opt, makedir=False):
expr_dir = os.path.join(opt.checkpoints_dir, opt.name)
if makedir:
util.mkdirs(expr_dir)
file_name = os.path.join(expr_dir, 'opt')
return file_name
def save_options(self, opt):
file_name = self.option_file_path(opt, makedir=True)
with open((file_name + '.txt'), 'wt') as opt_file:
for (k, v) in sorted(vars(opt).items()):
comment = ''
default = self.parser.get_default(k)
if (v != default):
comment = ('\t[default: %s]' % str(default))
opt_file.write('{:>25}: {:<30}{}\n'.format(str(k), str(v), comment))
with open((file_name + '.pkl'), 'wb') as opt_file:
pickle.dump(opt, opt_file)
def update_options_from_file(self, parser, opt):
new_opt = self.load_options(opt)
for (k, v) in sorted(vars(opt).items()):
if (hasattr(new_opt, k) and (v != getattr(new_opt, k))):
new_val = getattr(new_opt, k)
parser.set_defaults(**{k: new_val})
return parser
def load_options(self, opt):
file_name = self.option_file_path(opt, makedir=False)
new_opt = pickle.load(open((file_name + '.pkl'), 'rb'))
return new_opt
def parse(self, save=False):
opt = self.gather_options()
opt.isTrain = self.isTrain
self.print_options(opt)
if opt.isTrain:
self.save_options(opt)
opt.semantic_nc = (opt.label_nc + (1 if opt.contain_dontcare_label else 0))
str_ids = opt.gpu_ids.split(',')
opt.gpu_ids = []
for str_id in str_ids:
id = int(str_id)
if (id >= 0):
opt.gpu_ids.append(id)
if (len(opt.gpu_ids) > 0):
torch.cuda.set_device(opt.gpu_ids[0])
assert ((len(opt.gpu_ids) == 0) or ((opt.batchSize % len(opt.gpu_ids)) == 0)), ('Batch size %d is wrong. It must be a multiple of # GPUs %d.' % (opt.batchSize, len(opt.gpu_ids)))
self.opt = opt
return self.opt |
def llvm_build_dir(tool):
generator_suffix = cmake_generator_prefix()
bitness_suffix = ('_32' if (tool.bitness == 32) else '_64')
if hasattr(tool, 'git_branch'):
build_dir = ((('build_' + tool.git_branch.replace(os.sep, '-')) + generator_suffix) + bitness_suffix)
else:
build_dir = ((('build_' + tool.version) + generator_suffix) + bitness_suffix)
return build_dir |
class LineSegmentTool(AbstractOverlay):
component = Instance(Component)
line = Instance(Line, args=())
points = List
event_state = Enum('normal', 'selecting', 'dragging')
proximity_distance = Int(4)
mouse_position = Optional(Tuple)
_dragged = Optional(Int)
_drag_new_point = Bool(False)
_prev_event_state = Any
original_cursor = Pointer('arrow')
normal_cursor = Pointer('pencil')
delete_cursor = Pointer('bullseye')
move_cursor = Pointer('sizing')
visible = Bool(False)
def __init__(self, component=None, **kwtraits):
if ('component' in kwtraits):
component = kwtraits['component']
super().__init__(**kwtraits)
self.component = component
self.reset()
self.line.line_dash = (4.0, 2.0)
def reset(self):
self.points = []
self.event_state = 'normal'
self.visible = False
self.request_redraw()
def _activate(self):
pass
def _deactivate(self, component=None):
self.reset()
def add_point(self, point):
self.points.append(self._map_data(point))
def get_point(self, index):
return self._map_screen(self.points[index])
def set_point(self, index, point):
self.points[index] = self._map_data(point)
def remove_point(self, index):
del self.points[index]
def normal_left_down(self, event):
over = self._over_point(event, self.line.points)
if (over is not None):
if event.control_down:
self.points.pop(over)
self.line.points = list(self.component.map_screen(array(self.points)))
self.request_redraw()
else:
self.event_state = 'dragging'
self._dragged = over
self._drag_new_point = False
self.dragging_mouse_move(event)
else:
self.points.append(self._map_data((event.x, event.y)))
self._dragged = (- 1)
self._drag_new_point = True
self.visible = True
self.event_state = 'dragging'
self.dragging_mouse_move(event)
def normal_mouse_move(self, event):
over = self._over_point(event, self.line.points)
if (over is not None):
if event.control_down:
event.window.set_pointer(self.delete_cursor)
else:
event.window.set_pointer(self.move_cursor)
else:
event.handled = False
event.window.set_pointer(self.normal_cursor)
self.request_redraw()
def normal_draw(self, gc):
self.line.points = list(self.component.map_screen(array(self.points)))
self.line._draw(gc)
def normal_key_pressed(self, event):
if (event.character == 'Enter'):
self._finalize_selection()
self.reset()
def normal_mouse_leave(self, event):
event.window.set_pointer('arrow')
def dragging_mouse_move(self, event):
mouse_position = self._map_data((event.x, event.y))
self.points[self._dragged] = mouse_position
self.line.points = list(self.component.map_screen(array(self.points)))
self.request_redraw()
def dragging_draw(self, gc):
self.line._draw(gc)
def dragging_left_up(self, event):
self.event_state = 'normal'
self._dragged = None
self.updated = self
def dragging_key_pressed(self, event):
if (event.character == 'Esc'):
self._cancel_drag()
def dragging_mouse_leave(self, event):
self._cancel_drag()
event.window.set_pointer('arrow')
def _cancel_drag(self):
if (self._dragged != None):
if self._drag_new_point:
self.points.pop(self._dragged)
self._dragged = None
self.mouse_position = None
self.event_state = 'normal'
self.request_redraw()
def overlay(self, component, gc, view_bounds, mode='normal'):
draw_func = getattr(self, (self.event_state + '_draw'), None)
if draw_func:
with gc:
gc.clip_to_rect(component.x, component.y, (component.width - 1), (component.height - 1))
draw_func(gc)
def request_redraw(self):
self.component.invalidate_draw()
self.component.request_redraw()
def _map_data(self, point):
x_mapper = self.component.x_mapper
y_mapper = self.component.y_mapper
if (self.component.orientation == 'h'):
ndx = x_mapper.map_data(point[0])
val = y_mapper.map_data(point[1])
else:
val = x_mapper.map_data(point[0])
ndx = y_mapper.map_data(point[1])
return (ndx, val)
def _map_screen(self, point):
x_mapper = self.component.x_mapper
y_mapper = self.component.y_mapper
if (self.component.orientation == 'h'):
x = x_mapper.map_screen(point[0])
y = y_mapper.map_screen(point[1])
else:
x = x_mapper.map_screen(point[1])
y = y_mapper.map_screen(point[0])
return (x, y)
def _is_near_point(self, point, event):
event_point = (event.x, event.y)
return ((abs((point[0] - event_point[0])) + abs((point[1] - event_point[1]))) <= self.proximity_distance)
def _over_point(self, event, points):
for (i, point) in enumerate(points):
if self._is_near_point(point, event):
result = i
break
else:
result = None
return result
def _finalize_selection(self):
pass
def _component_changed(self, old, new):
if new:
self.container = new |
class OptionSeriesTreemapSonificationContexttracksMappingGapbetweennotes(Options):
def mapFunction(self):
return self._config_get(None)
def mapFunction(self, value: Any):
self._config(value, js_type=False)
def mapTo(self):
return self._config_get(None)
def mapTo(self, text: str):
self._config(text, js_type=False)
def max(self):
return self._config_get(None)
def max(self, num: float):
self._config(num, js_type=False)
def min(self):
return self._config_get(None)
def min(self, num: float):
self._config(num, js_type=False)
def within(self):
return self._config_get(None)
def within(self, value: Any):
self._config(value, js_type=False) |
class SnapshotDefineView(APIView):
order_by_default = ('vm__hostname', '-id')
order_by_fields = ('name', 'disk_id')
order_by_field_map = {'hostname': 'vm__hostname', 'created': 'id'}
def get(self, vm, define, many=False, extended=False):
if extended:
ser_class = ExtendedSnapshotDefineSerializer
else:
ser_class = SnapshotDefineSerializer
if many:
if (self.full or self.extended):
if define:
res = ser_class(self.request, define, many=True).data
else:
res = []
else:
res = list(define.values_list('name', flat=True))
else:
res = ser_class(self.request, define).data
return SuccessTaskResponse(self.request, res, vm=vm)
_vm_operational
def post(self, vm, define, **kwargs):
data2 = define_schedule_defaults(define.name)
data2.update(self.data)
ser = SnapshotDefineSerializer(self.request, define, data=data2)
if (not ser.is_valid()):
return FailureTaskResponse(self.request, ser.errors, vm=vm)
ser.object.save()
return SuccessTaskResponse(self.request, ser.data, status=scode.HTTP_201_CREATED, vm=vm, detail_dict=detail_dict('snapdef', ser), msg=LOG_SNAPDEF_CREATE)
_vm_operational
def put(self, vm, define, **kwargs):
ser = SnapshotDefineSerializer(self.request, define, data=self.data, partial=True)
if (not ser.is_valid()):
return FailureTaskResponse(self.request, ser.errors, vm=vm)
ser.object.save()
return SuccessTaskResponse(self.request, ser.data, vm=vm, detail_dict=detail_dict('snapdef', ser), msg=LOG_SNAPDEF_UPDATE)
_vm_operational
def delete(self, vm, define, **kwargs):
ser = SnapshotDefineSerializer(self.request, define)
ser.object.delete()
return SuccessTaskResponse(self.request, None, vm=vm, detail_dict=detail_dict('snapdef', ser, data={}), msg=LOG_SNAPDEF_DELETE)
def create_from_template(cls, request, vm, vm_define_snapshot, log=logger):
if (vm_define_snapshot and isinstance(vm_define_snapshot, list)):
request = set_request_method(request, 'POST')
for (i, data) in enumerate(vm_define_snapshot):
try:
try:
snapdef = data['snapdef']
except KeyError:
snapdef = data['name']
(disk_id, real_disk_id, zfs_filesystem) = get_disk_id(request, vm, data)
log.info('Creating snapshot definition [%d] "%s" for vm=%s, disk_id=%d defined by template %s', i, snapdef, vm, disk_id, vm.template)
define = get_object(request, SnapshotDefine, {'name': snapdef, 'vm': vm, 'disk_id': real_disk_id}, sr=('vm', 'periodic_task', 'periodic_task__crontab'))
res = cls(request, data=data).post(vm, define)
if (res.status_code != scode.HTTP_201_CREATED):
raise APIError(('vm_define_snapshot error [%s]: %s' % (res.status_code, res.data)))
except Exception as ex:
log.warn('Failed to create snapshot definition [%d] for vm=%s defined by template %s with data="%s". Error: %s', i, vm, vm.template, data, ex) |
def draw_frame(image, face_landmarks):
mp_drawing.draw_landmarks(image=image, landmark_list=face_landmarks, connections=mp_face_mesh.FACEMESH_TESSELATION, landmark_drawing_spec=None, connection_drawing_spec=mp_drawing_styles.get_default_face_mesh_tesselation_style())
mp_drawing.draw_landmarks(image=image, landmark_list=face_landmarks, connections=mp_face_mesh.FACEMESH_CONTOURS, landmark_drawing_spec=None, connection_drawing_spec=mp_drawing_styles.get_default_face_mesh_contours_style())
mp_drawing.draw_landmarks(image=image, landmark_list=face_landmarks, connections=mp_face_mesh.FACEMESH_IRISES, landmark_drawing_spec=None, connection_drawing_spec=mp_drawing_styles.get_default_face_mesh_iris_connections_style())
frame = cv2.flip(image, 1)
cv2.putText(frame, current_morse, (620, 30), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 2)
cv2.imshow('face', frame) |
def test_setup_load_lists(monkeypatch):
test_object = {'table': {'val1': 4, 'string_val': 'bob'}, 'wrong_table': {'val': 'wrong'}}
(columns, values, pairs) = format_insert_or_update_column_sql(mock_cursor(monkeypatch, 'mogrified'), test_object, 'table')
assert ((columns == '("val1","string_val")') or (columns == '("string_val","val1")'))
assert ((values == '(4,bob)') or (values == '(bob,4)'))
assert ((pairs == ' val1=4, string_val=bob') or (pairs == ' string_val=bob, val1=4')) |
class RelocationSection(Section):
def __init__(self, header, name, elffile):
super(RelocationSection, self).__init__(header, name, elffile)
if (self.header['sh_type'] == 'SHT_REL'):
expected_size = self.structs.Elf_Rel.sizeof()
self.entry_struct = self.structs.Elf_Rel
elif (self.header['sh_type'] == 'SHT_RELA'):
expected_size = self.structs.Elf_Rela.sizeof()
self.entry_struct = self.structs.Elf_Rela
else:
elf_assert(False, 'Unknown relocation type section')
elf_assert((self.header['sh_entsize'] == expected_size), ('Expected sh_entsize of SHT_REL section to be %s' % expected_size))
def is_RELA(self):
return (self.header['sh_type'] == 'SHT_RELA')
def num_relocations(self):
return (self['sh_size'] // self['sh_entsize'])
def get_relocation(self, n):
entry_offset = (self['sh_offset'] + (n * self['sh_entsize']))
entry = struct_parse(self.entry_struct, self.stream, stream_pos=entry_offset)
return Relocation(entry, self.elffile)
def iter_relocations(self):
for i in range(self.num_relocations()):
(yield self.get_relocation(i)) |
class TestAllLoaders(unittest.TestCase):
def test_rc4(self):
filename = os.path.join(os.path.dirname(__file__), './test_apk/loader_rc4_static_key_in_key_class.apk')
apk_object = APK(filename)
dvms = [DEX(dex) for dex in apk_object.get_all_dex()]
rc4 = LoaderRc4(apk_object, dvms, output_dir=None)
res = rc4.main()
assert (res['status'] == 'success')
if rc4.decrypted_payload_path:
os.remove(rc4.decrypted_payload_path)
filename = os.path.join(os.path.dirname(__file__), './test_apk/loader_rc4_second_key_0.apk')
apk_object = APK(filename)
dvms = [DEX(dex) for dex in apk_object.get_all_dex()]
rc4 = LoaderRc4(apk_object, dvms, output_dir=None)
res = rc4.main()
assert (res['status'] == 'success')
if rc4.decrypted_payload_path:
os.remove(rc4.decrypted_payload_path)
filename = os.path.join(os.path.dirname(__file__), './test_apk/loader_rc4_key_0.apk')
apk_object = APK(filename)
dvms = [DEX(dex) for dex in apk_object.get_all_dex()]
rc4 = LoaderRc4(apk_object, dvms, output_dir=None)
res = rc4.main()
assert (res['status'] == 'success')
if rc4.decrypted_payload_path:
os.remove(rc4.decrypted_payload_path)
filename = os.path.join(os.path.dirname(__file__), './test_apk/loader_rc4_multiple_stage.apk')
apk_object = APK(filename)
dvms = [DEX(dex) for dex in apk_object.get_all_dex()]
rc4 = LoaderRc4(apk_object, dvms, output_dir=None)
res = rc4.main()
assert (res['status'] == 'success')
if rc4.decrypted_payload_path:
os.remove(rc4.decrypted_payload_path)
def test_inflate(self):
filename = os.path.join(os.path.dirname(__file__), './test_apk/inflate.apk')
apk_object = APK(filename)
dvms = [DEX(dex) for dex in apk_object.get_all_dex()]
rc4 = LoaderMultidex(apk_object, dvms, output_dir=None)
res = rc4.main()
assert (res['status'] == 'success')
if rc4.decrypted_payload_path:
os.remove(rc4.decrypted_payload_path)
filename = os.path.join(os.path.dirname(__file__), './test_apk/inflate2.apk')
apk_object = APK(filename)
dvms = [DEX(dex) for dex in apk_object.get_all_dex()]
rc4 = LoaderMultidex(apk_object, dvms, output_dir=None)
res = rc4.main()
assert (res['status'] == 'success')
if rc4.decrypted_payload_path:
os.remove(rc4.decrypted_payload_path)
filename = os.path.join(os.path.dirname(__file__), './test_apk/default_dex_protector.apk')
apk_object = APK(filename)
dvms = [DEX(dex) for dex in apk_object.get_all_dex()]
rc4 = LoaderMultidex(apk_object, dvms, output_dir=None)
res = rc4.main()
assert (res['status'] == 'success')
if rc4.decrypted_payload_path:
os.remove(rc4.decrypted_payload_path)
def test_inflate_second(self):
filename = os.path.join(os.path.dirname(__file__), './test_apk/protect_key_chines_manifest_without_zlib.apk')
apk_object = APK(filename)
dvms = [DEX(dex) for dex in apk_object.get_all_dex()]
rc4 = LoaderMultidex(apk_object, dvms, output_dir=None)
res = rc4.main()
assert (res['status'] == 'success')
if rc4.decrypted_payload_path:
os.remove(rc4.decrypted_payload_path)
def test_subapp(self):
filename = os.path.join(os.path.dirname(__file__), './test_apk/subapp.apk')
apk_object = APK(filename)
dvms = [DEX(dex) for dex in apk_object.get_all_dex()]
rc4 = LoaderSubapp(apk_object, dvms, output_dir=None)
res = rc4.main()
assert (res['status'] == 'success')
if rc4.decrypted_payload_path:
os.remove(rc4.decrypted_payload_path)
def test_moqhao(self):
filename = os.path.join(os.path.dirname(__file__), './test_apk/moqhao.apk')
apk_object = APK(filename)
dvms = [DEX(dex) for dex in apk_object.get_all_dex()]
moqhao = LoaderMoqhao(apk_object, dvms, output_dir=None)
res = moqhao.main()
assert (res['status'] == 'success')
if moqhao.decrypted_payload_path:
os.remove(moqhao.decrypted_payload_path)
def test_coper(self):
filename = os.path.join(os.path.dirname(__file__), './test_apk/coper.apk')
apk_object = APK(filename)
dvms = [DEX(dex) for dex in apk_object.get_all_dex()]
coper = LoaderCoper(apk_object, dvms, output_dir=None)
res = coper.main()
assert (res['status'] == 'success')
if coper.decrypted_payload_path:
os.remove(coper.decrypted_payload_path)
def test_sesdex(self):
filename = os.path.join(os.path.dirname(__file__), './test_apk/sesdex.apk')
apk_object = APK(filename)
dvms = [DEX(dex) for dex in apk_object.get_all_dex()]
sesdex = LoaderSesdex(apk_object, dvms, output_dir=None)
res = sesdex.main()
assert (res['status'] == 'success')
if sesdex.decrypted_payload_path:
os.remove(sesdex.decrypted_payload_path)
def test_multidex_header(self):
filename = os.path.join(os.path.dirname(__file__), './test_apk/multidex_without_header.apk')
apk_object = APK(filename)
dvms = [DEX(dex) for dex in apk_object.get_all_dex()]
mwheader = LoaderMultidexHeader(apk_object, dvms, output_dir=None)
res = mwheader.main()
assert (res['status'] == 'success')
if mwheader.decrypted_payload_path:
os.remove(mwheader.decrypted_payload_path)
def test_simple_xor(self):
filename = os.path.join(os.path.dirname(__file__), './test_apk/simplexor.apk')
apk_object = APK(filename)
dvms = [DEX(dex) for dex in apk_object.get_all_dex()]
sxorzlib = LoaderSimpleXor(apk_object, dvms, output_dir=None)
res = sxorzlib.main()
assert (res['status'] == 'success')
if sxorzlib.decrypted_payload_path:
os.remove(sxorzlib.decrypted_payload_path)
def test_simple_xor2(self):
filename = os.path.join(os.path.dirname(__file__), './test_apk/simple_xor2.apk')
apk_object = APK(filename)
dvms = [DEX(dex) for dex in apk_object.get_all_dex()]
sxor2 = LoaderSimpleXor2(apk_object, dvms, output_dir=None)
res = sxor2.main()
assert (res['status'] == 'success')
if sxor2.decrypted_payload_path:
os.remove(sxor2.decrypted_payload_path)
def test_simple_xor_zlib(self):
filename = os.path.join(os.path.dirname(__file__), './test_apk/simple_xor_zlib_base64.apk')
apk_object = APK(filename)
dvms = [DEX(dex) for dex in apk_object.get_all_dex()]
sxorzlib = LoaderSimpleXorZlib(apk_object, dvms, output_dir=None)
res = sxorzlib.main()
assert (res['status'] == 'success')
if sxorzlib.decrypted_payload_path:
os.remove(sxorzlib.decrypted_payload_path)
filename = os.path.join(os.path.dirname(__file__), './test_apk/simple_skip4_zlib_base64.apk')
apk_object = APK(filename)
dvms = [DEX(dex) for dex in apk_object.get_all_dex()]
sxorzlib = LoaderSimpleXorZlib(apk_object, dvms, output_dir=None)
res = sxorzlib.main()
assert (res['status'] == 'success')
if sxorzlib.decrypted_payload_path:
os.remove(sxorzlib.decrypted_payload_path)
def test_simple_aes(self):
filename = os.path.join(os.path.dirname(__file__), './test_apk/simpleaes.apk')
apk_object = APK(filename)
dvms = [DEX(dex) for dex in apk_object.get_all_dex()]
saes = LoaderSimpleAes(apk_object, dvms, output_dir=None)
res = saes.main()
assert (res['status'] == 'success')
if saes.decrypted_payload_path:
os.remove(saes.decrypted_payload_path)
def test_kangapack(self):
filename = os.path.join(os.path.dirname(__file__), './test_apk/kangapack.apk')
apk_object = APK(filename)
dvms = [DEX(dex) for dex in apk_object.get_all_dex()]
skanga = LoaderKangaPack(apk_object, dvms, output_dir=None)
res = skanga.main()
assert (res['status'] == 'success')
if skanga.decrypted_payload_path:
os.remove(skanga.decrypted_payload_path)
def test_pronlocker(self):
filename = os.path.join(os.path.dirname(__file__), './test_apk/pronlocker.apk')
apk_object = APK(filename)
dvms = [DEX(dex) for dex in apk_object.get_all_dex()]
spron = LoaderPr0nLocker(apk_object, dvms, output_dir=None)
res = spron.main()
assert (res['status'] == 'success')
if spron.decrypted_payload_path:
os.remove(spron.decrypted_payload_path) |
(_gemm_matmul)
def do_matmul_i8(N: size, M: size, K: size, A: ([i8][(N, 16)] GEMM_SCRATCH), B: ([i8][(K, 16)] GEMM_SCRATCH), C: ([i32][(N, 16)] GEMM_ACCUM)):
assert (N <= 16)
assert (M <= 16)
assert (K <= 16)
for i in seq(0, N):
for j in seq(0, M):
C[(i, j)] = 0.0
for k in seq(0, K):
a: i32
b: i32
a = A[(i, k)]
b = B[(k, j)]
C[(i, j)] += (a * b) |
def test_fdata_fast_channel_index_conversion(tmpdir, merge_lis_prs):
fpath = os.path.join(str(tmpdir), 'fast-channels-index-conversion.lis')
content = ((headers + ['data/lis/records/curves/dfsr-fast-conversion.lis.part', 'data/lis/records/curves/fdata-fast-conversion.lis.part']) + trailers)
merge_lis_prs(fpath, content)
with lis.load(fpath) as (f,):
dfs = f.data_format_specs()[0]
with pytest.raises(RuntimeError) as exc:
_ = lis.curves(f, dfs, sample_rate=2)
assert ('Unable to create integral index' in str(exc.value)) |
def test_fdata_suppressed_bad(tmpdir, merge_lis_prs):
fpath = os.path.join(str(tmpdir), 'suppressed-bad.lis')
content = ((headers + ['data/lis/records/curves/dfsr-suppressed-bad.lis.part']) + trailers)
merge_lis_prs(fpath, content)
with lis.load(fpath) as (f,):
dfs = f.data_format_specs()[0]
with pytest.raises(ValueError) as exc:
_ = lis.curves(f, dfs)
assert ('Invalid number of entries per sample' in str(exc.value)) |
class TestSF1DailyData():
data_classes = []
if os.path.exists(config['sf1_data_path']):
data_classes.append(SF1DailyData)
if (secrets['mongodb_adminusername'] is not None):
data_classes.append(SF1DailyDataMongo)
.parametrize('data_loader_class', data_classes)
.parametrize(['tickers', 'days_count'], [(['AAPL', 'ZRAN', 'TSLA', 'WORK'], 100), (['INTC', 'ZRAN', 'XRDC', 'XOM', 'PNK'], 50), (['INTC', 'ZRAN', 'XRDC', 'XOM'], None), (['NVDA'], 100), (['ZRAN'], 10)])
def test_load(self, tickers, days_count, data_loader_class):
data_loader = data_loader_class(days_count=days_count)
daily_df = data_loader.load(tickers)
assert (type(daily_df) == pd.DataFrame)
assert ('ticker' in daily_df.columns)
assert ('date' in daily_df.columns)
assert (len(daily_df.drop_duplicates(['ticker', 'date'])) == len(daily_df))
daily_df['date_'] = daily_df['date'].astype(np.datetime64)
daily_df['def_order'] = range(len(daily_df))[::(- 1)]
expected_dates_order = daily_df.sort_values(['ticker', 'date_'], ascending=False)['date'].values
real_dates_order = daily_df.sort_values(['ticker', 'def_order'], ascending=False)['date'].values
np.testing.assert_array_equal(expected_dates_order, real_dates_order)
diffs = (daily_df.groupby('ticker')['date_'].shift(1) - daily_df['date_'])
assert (diffs.dropna() <= np.timedelta64(14, 'D')).min()
if (days_count is not None):
for cnt in daily_df.groupby('ticker').size():
assert (cnt == days_count) |
def create_reg_sphere(settings, subject_id, meshes):
(FS_reg_sphere_name, MSMSulc_reg_sphere_name) = get_reg_sphere_names()
run_fs_reg_LR(subject_id, settings.ciftify_data_dir, settings.high_res, FS_reg_sphere_name, meshes['AtlasSpaceNative'])
if (settings.reg_name == 'MSMSulc'):
reg_sphere_name = MSMSulc_reg_sphere_name
run_MSMSulc_registration(subject_id, settings.ciftify_data_dir, meshes, reg_sphere_name, FS_reg_sphere_name, settings.msm_config)
else:
reg_sphere_name = FS_reg_sphere_name
return reg_sphere_name |
def save_oura_token(token_dict):
app.session.execute(delete(apiTokens).where((apiTokens.service == 'Oura')))
try:
app.session.add(apiTokens(date_utc=datetime.utcnow(), service='Oura', tokens=pickle.dumps(token_dict)))
app.session.commit()
except:
app.session.rollback()
app.session.remove() |
def test_validate_with_no_runtime_config(jp_environ):
runner = CliRunner()
with runner.isolated_filesystem():
pipeline_file_path = (((Path(__file__).parent / 'resources') / 'pipelines') / 'kfp_3_node_custom.pipeline')
result = runner.invoke(pipeline, ['validate', str(pipeline_file_path)])
assert ('Validating pipeline...' in result.output)
assert ("[Error] - This pipeline contains at least one runtime-specific component, but pipeline runtime is 'local'" in result.output)
assert (result.exit_code != 0) |
class Purge(commands.Cog, name='Purge'):
def __init__(self, client):
self.client = client
async def cog_check(self, ctx):
return self.client.user_is_admin(ctx.author)
(name='purge', hidden=True)
async def purge(self, ctx, num_messages: int):
channel = ctx.message.channel
(await ctx.message.delete())
(await channel.purge(limit=num_messages))
return True
(name='purge_until', hidden=True)
async def purge_until(self, ctx, message_id: int):
channel = ctx.message.channel
try:
message = (await channel.fetch_message(message_id))
except errors.NotFound:
(await ctx.send('Message could not be found in this channel'))
return
(await ctx.message.delete())
(await channel.purge(after=message))
return True
(name='purge_user', hidden=True, aliases=['purgeuser'])
async def purge_user(self, ctx, user: User, num_minutes: typing.Optional[int]=5):
after = (ctx.message.created_at - timedelta(minutes=num_minutes))
def check(msg):
return (msg.author.id == user.id)
for channel in (await ctx.guild.fetch_channels()):
if (type(channel) is TextChannel):
try:
(await channel.purge(limit=(10 * num_minutes), check=check, after=after))
except Forbidden:
continue |
class CDCDUT(ConverterDUT):
def do_finalize(self):
self.write_user_port.clock_domain = 'user'
self.read_user_port.clock_domain = 'user'
self.write_crossbar_port.clock_domain = 'native'
self.read_crossbar_port.clock_domain = 'native'
self.submodules.write_converter = LiteDRAMNativePortCDC(port_from=self.write_user_port, port_to=self.write_crossbar_port)
self.submodules.read_converter = LiteDRAMNativePortCDC(port_from=self.read_user_port, port_to=self.read_crossbar_port) |
_ARCH_REGISTRY.register()
class MetaArchForTest(torch.nn.Module):
def __init__(self, cfg):
super().__init__()
self.conv = torch.nn.Conv2d(3, 4, kernel_size=3, stride=1, padding=1)
self.bn = torch.nn.BatchNorm2d(4)
self.relu = torch.nn.ReLU(inplace=True)
self.avgpool = torch.nn.AdaptiveAvgPool2d((1, 1))
def device(self):
return self.conv.weight.device
def forward(self, inputs):
if (not self.training):
return self.inference(inputs)
images = [x['image'] for x in inputs]
images = ImageList.from_tensors(images, 1).to(self.device)
ret = self.conv(images.tensor)
ret = self.bn(ret)
ret = self.relu(ret)
ret = self.avgpool(ret)
return {'loss': ret.norm()}
def inference(self, inputs):
instance = Instances((10, 10))
instance.pred_boxes = Boxes(torch.tensor([[2.5, 2.5, 7.5, 7.5]]))
instance.scores = torch.tensor([0.9])
instance.pred_classes = torch.tensor([1], dtype=torch.int32)
ret = [{'instances': instance}]
return ret |
def CreateSoftmaxOperator(manifest, rank=3):
operation_kind = library.OperationKind.Softmax
in_dtype = library.DataType.f16
out_dtype = library.DataType.f16
tile_descriptions = [softmax.TileDesc(256, 8, 32, 1, 8, 1, 1, 1), softmax.TileDesc(256, 8, 32, 1, 8, 1, 8, 8), softmax.TileDesc(256, 4, 64, 1, 8, 1, 8, 8), softmax.TileDesc(256, 2, 128, 1, 8, 1, 8, 8), softmax.TileDesc(256, 2, 128, 1, 16, 1, 8, 8), softmax.TileDesc(256, 2, 128, 1, 32, 1, 8, 8), softmax.TileDesc(256, 1, 256, 1, 8, 1, 8, 8), softmax.TileDesc(256, 1, 256, 1, 16, 1, 8, 8), softmax.TileDesc(256, 1, 256, 1, 32, 1, 8, 8)]
operations = []
for tile_desc in tile_descriptions:
new_operation = softmax.SoftmaxOperation(operation_kind=operation_kind, extra_kind=rank, In=in_dtype, Out=out_dtype, Rank=rank, NumReduceDim=1, tile_desc=tile_desc)
manifest.append(new_operation)
operations.append(new_operation)
return operations |
class OptionSeriesErrorbarSonificationDefaultinstrumentoptionsMappingTime(Options):
def mapFunction(self):
return self._config_get(None)
def mapFunction(self, value: Any):
self._config(value, js_type=False)
def mapTo(self):
return self._config_get(None)
def mapTo(self, text: str):
self._config(text, js_type=False)
def max(self):
return self._config_get(None)
def max(self, num: float):
self._config(num, js_type=False)
def min(self):
return self._config_get(None)
def min(self, num: float):
self._config(num, js_type=False)
def within(self):
return self._config_get(None)
def within(self, value: Any):
self._config(value, js_type=False) |
def extractWeabooDesu(item):
(vol, chp, frag, postfix) = extractVolChapterFragmentPostfix(item['title'])
if ((not (chp or vol)) or ('preview' in item['title'].lower())):
return None
if ('WATTT' in item['tags']):
return buildReleaseMessageWithType(item, 'WATTT', vol, chp, frag=frag, postfix=postfix)
return False |
def test_to_from_bytes():
bf = BloomFilter(size=100, hash_funcs=2)
for ii in range(0, 1000, 20):
bf.add(ii)
data = bf.to_bytes()
bf2 = BloomFilter()
for ii in range(0, 1000, 20):
assert (ii not in bf2)
bf2.from_bytes(data)
for ii in range(0, 1000, 20):
assert (ii in bf2)
assert (bf2.to_bytes() == data) |
.parametrize('contract_fn,event_name,call_args,expected_args', (('logNoArgs', 'LogAnonymous', [], {}), ('logNoArgs', 'LogNoArguments', [], {}), ('logSingle', 'LogSingleArg', [12345], {'arg0': 12345}), ('logSingle', 'LogSingleWithIndex', [12345], {'arg0': 12345}), ('logSingle', 'LogSingleAnonymous', [12345], {'arg0': 12345}), ('logDouble', 'LogDoubleArg', [12345, 54321], {'arg0': 12345, 'arg1': 54321}), ('logDouble', 'LogDoubleAnonymous', [12345, 54321], {'arg0': 12345, 'arg1': 54321}), ('logDouble', 'LogDoubleWithIndex', [12345, 54321], {'arg0': 12345, 'arg1': 54321}), ('logTriple', 'LogTripleArg', [12345, 54321, 98765], {'arg0': 12345, 'arg1': 54321, 'arg2': 98765}), ('logTriple', 'LogTripleWithIndex', [12345, 54321, 98765], {'arg0': 12345, 'arg1': 54321, 'arg2': 98765}), ('logQuadruple', 'LogQuadrupleArg', [12345, 54321, 98765, 56789], {'arg0': 12345, 'arg1': 54321, 'arg2': 98765, 'arg3': 56789}), ('logQuadruple', 'LogQuadrupleWithIndex', [12345, 54321, 98765, 56789], {'arg0': 12345, 'arg1': 54321, 'arg2': 98765, 'arg3': 56789})))
def test_event_data_extraction(w3, emitter, wait_for_transaction, emitter_contract_log_topics, emitter_contract_event_ids, contract_fn, event_name, call_args, expected_args):
emitter_fn = emitter.functions[contract_fn]
event_id = getattr(emitter_contract_event_ids, event_name)
txn_hash = emitter_fn(event_id, *call_args).transact()
txn_receipt = wait_for_transaction(w3, txn_hash)
assert (len(txn_receipt['logs']) == 1)
log_entry = txn_receipt['logs'][0]
event_abi = emitter._find_matching_event_abi(event_name)
event_topic = getattr(emitter_contract_log_topics, event_name)
is_anonymous = event_abi['anonymous']
if is_anonymous:
assert (event_topic not in log_entry['topics'])
else:
assert (event_topic in log_entry['topics'])
event_data = get_event_data(w3.codec, event_abi, log_entry)
assert (event_data['args'] == expected_args)
assert (event_data['blockHash'] == txn_receipt['blockHash'])
assert (event_data['blockNumber'] == txn_receipt['blockNumber'])
assert (event_data['transactionIndex'] == txn_receipt['transactionIndex'])
assert is_same_address(event_data['address'], emitter.address)
assert (event_data['event'] == event_name) |
class OptionSeriesFunnel3dSonificationDefaultinstrumentoptionsMappingTremolo(Options):
def depth(self) -> 'OptionSeriesFunnel3dSonificationDefaultinstrumentoptionsMappingTremoloDepth':
return self._config_sub_data('depth', OptionSeriesFunnel3dSonificationDefaultinstrumentoptionsMappingTremoloDepth)
def speed(self) -> 'OptionSeriesFunnel3dSonificationDefaultinstrumentoptionsMappingTremoloSpeed':
return self._config_sub_data('speed', OptionSeriesFunnel3dSonificationDefaultinstrumentoptionsMappingTremoloSpeed) |
class AppEngineAppIterator(ResourceIterator):
def iter(self):
gcp = self.client
if self.resource.enumerable():
try:
(data, metadata) = gcp.fetch_gae_app(project_id=self.resource['projectId'])
if data:
(yield FACTORIES['appengine_app'].create_new(data, metadata=metadata))
except ResourceNotSupported as e:
LOGGER.debug(e) |
class ResponseValidationError(PySOAServerError):
def __init__(self, action, errors):
self.action = action
self.errors = errors
def __str__(self):
return '{} had an invalid response:\n\t{}'.format(self.action, '\n\t'.join(('{} {}: {}'.format(error.pointer, error.code, error.message) for error in self.errors))) |
_OptParam.register_type(BGP_OPT_CAPABILITY)
class _OptParamCapability(_OptParam, TypeDisp):
_CAP_HDR_PACK_STR = '!BB'
def __init__(self, cap_code=None, cap_value=None, cap_length=None, type_=None, length=None):
super(_OptParamCapability, self).__init__(type_=BGP_OPT_CAPABILITY, length=length)
if (cap_code is None):
cap_code = self._rev_lookup_type(self.__class__)
self.cap_code = cap_code
if (cap_value is not None):
self.cap_value = cap_value
if (cap_length is not None):
self.cap_length = cap_length
def parse_value(cls, buf):
caps = []
while (len(buf) > 0):
(code, length) = struct.unpack_from(cls._CAP_HDR_PACK_STR, six.binary_type(buf))
value = buf[struct.calcsize(cls._CAP_HDR_PACK_STR):]
buf = buf[(length + 2):]
kwargs = {'cap_code': code, 'cap_length': length}
subcls = cls._lookup_type(code)
kwargs.update(subcls.parse_cap_value(value))
caps.append(subcls(type_=BGP_OPT_CAPABILITY, length=(length + 2), **kwargs))
return caps
def serialize_value(self):
cap_value = self.serialize_cap_value()
self.cap_length = len(cap_value)
buf = bytearray()
msg_pack_into(self._CAP_HDR_PACK_STR, buf, 0, self.cap_code, self.cap_length)
return (buf + cap_value) |
(EcsClient, '__init__')
def test_update_service(client, service):
client.update_service.return_value = RESPONSE_SERVICE
action = EcsAction(client, CLUSTER_NAME, SERVICE_NAME)
new_service = action.update_service(service)
assert isinstance(new_service, EcsService)
client.update_service.assert_called_once_with(cluster=service.cluster, service=service.name, desired_count=None, task_definition=service.task_definition) |
class Solution():
def shortestDistance(self, grid: List[List[int]]) -> int:
def find_neighbors(pos, grid, visited):
deltas = [((- 1), 0), (0, (- 1)), (0, 1), (1, 0)]
for (yd, xd) in deltas:
(y, x) = ((pos[0] + yd), (pos[1] + xd))
if (not (0 <= y < len(grid))):
continue
if (not (0 <= x < len(grid[0]))):
continue
if ((y, x) in visited):
continue
if (grid[y][x] == 0):
(yield (y, x))
buildings = []
for (i, row) in enumerate(grid):
for (j, elem) in enumerate(row):
if (elem == 1):
buildings.append((i, j))
visited = dict(((building, {}) for building in buildings))
queue = deque()
for building in buildings:
queue.append((building, 0, building))
while queue:
(pos, step, start) = queue.popleft()
if (pos in visited[start]):
continue
visited[start][pos] = step
for ne in find_neighbors(pos, grid, visited[start]):
queue.append((ne, (step + 1), start))
mm = None
track = {}
for (start, costs) in visited.items():
for (pos, step) in costs.items():
if (pos in visited):
continue
if (pos not in track):
track[pos] = [step, 1]
else:
track[pos] = [(track[pos][0] + step), (track[pos][1] + 1)]
if (track[pos][1] == len(visited)):
if ((mm is None) or (track[pos][0] < mm)):
mm = track[pos][0]
return (mm if mm else (- 1)) |
class GustafssonFullNewton_dt_controller(SC_base):
def __init__(self, model, nOptions):
from .LinearAlgebraTools import WeightedNorm
import copy
SC_base.__init__(self, model, nOptions)
self.nonlinearGrowthRateMax = 2
self.nonlinearGrowthRateMin = 0.5
self.nonlinearConvergenceRate_ref = 0.2
self.nonlinearIterations_ref = 4
self.useInitialGuessPredictor = False
self.predictorHistoryIsValid = False
self.errorGrowthRateMax = 2
self.errorGrowthRateMin = 0.5
self.errorSafetyFactor = 0.8
self.atol_u = nOptions.atol_u
self.rtol_u = nOptions.rtol_u
self.timeEps = 1e-12
for flag in ['nonlinearGrowthRateMax', 'nonlinearGrowthRateMin', 'nonlinearConvergenceRate_ref', 'nonlinearIterations_ref', 'useInitialGuessPredictor', 'stepExact', 'errorGrowthRateMax', 'errorGrowthRateMin', 'errorSafetyFactor']:
if (flag in dir(nOptions)):
val = getattr(nOptions, flag)
setattr(self, flag, val)
self.dt_nm1 = None
self.unm1ListSave = []
for (mi, u) in zip(self.model.levelModelList, self.model.uList):
self.unm1ListSave.append(copy.deepcopy(u))
nOptions.computeNonlinearSolverRates = True
self.useTemporalErrorEstimate = self.model.levelModelList[(- 1)].timeIntegration.isAdaptive
self.errorNorm = None
if (self.useTemporalErrorEstimate and (self.model.levelModelList[(- 1)].timeIntegration.error_estimate is not None)):
self.errorNorm = {}
for ci in list(self.model.levelModelList[(- 1)].timeIntegration.error_estimate.keys()):
self.errorNorm[ci] = WeightedNorm(self.model.levelModelList[(- 1)].timeIntegration.error_estimate[ci].shape, self.atol_u[ci], self.rtol_u[ci])
self.timeErrorTolerance = 1.0
self.use_cfl_for_initial_dt = True
self.cfl_for_initial_dt = 0.001
def setInitialGuess(self, uList, rList):
for m in self.model.levelModelList:
m.timeIntegration.setInitialGuess()
if (self.useInitialGuessPredictor and self.predictorHistoryIsValid):
assert ((len(self.unm1ListSave) == len(uList)) and (len(uList) == len(self.uListSave)))
for (m, r, u, un, unm1) in zip(self.model.levelModelList, rList, uList, self.uListSave, self.unm1ListSave):
u[:] = un[:]
u -= unm1
u *= old_div((self.t_model - self.t_model_last), (self.dt_nm1 + 1e-16))
u += un
m.setFreeDOF(u)
m.getResidual(u, r)
def saveSolution(self):
for (u, r, uSave, rSave, unSave) in zip(self.model.uList, self.model.rList, self.uListSave, self.rListSave, self.unm1ListSave):
unSave[:] = uSave
uSave[:] = u
rSave[:] = r
def retryStep_solverFailure(self):
self.solverFailures += 1
retry = False
if (self.solverFailures < self.maxSolverFailures):
self.resetSolution()
self.dt_model = self.choose_dt_solverFailure(self.dt_model)
if (self.dt_model > (self.t_model_last * 1e-08)):
self.set_dt_allLevels()
self.setSubsteps([self.t_model])
retry = True
else:
logEvent('Time step reduced to machine precision', level=1)
self.writeSolverStatisticsForStep()
else:
self.writeSolverStatisticsForStep()
return retry
def choose_dt_solverFailure(self, dt):
alpha = self.model.solver.solverList[(- 1)].gustafsson_alpha
nnl = self.model.solver.solverList[(- 1)].its
alpha_ref = self.nonlinearConvergenceRate_ref
nnl_ref = float(self.nonlinearIterations_ref)
r_a = 1.0
if (alpha_ref > alpha):
assert (nnl > 0.0)
r_a = self.phi(old_div(nnl_ref, nnl))
else:
assert (alpha > 0.0)
r_a = self.phi(old_div(alpha_ref, alpha))
r = min(self.nonlinearGrowthRateMax, max(self.nonlinearGrowthRateMin, r_a))
dtout = (dt * r)
if (r >= 1.0):
import pdb
pdb.set_trace()
assert (r < 1.0), ('Gustaffson solver failure r= %s should have r decrease dt_in= %s alpha=%s alpha_ref=%s nnl=%s nnl_ref=%s r_a=%s, dtout=%s ' % (r, dt, alpha, alpha_ref, nnl, nnl_ref, r_a, dtout))
logEvent(('Gustafsson solver failure dt_in= %s alpha=%s alpha_ref=%s nnl=%s nnl_ref=%s r_a=%s, dtout=%s ' % (dt, alpha, alpha_ref, nnl, nnl_ref, r_a, dtout)), level=1)
return dtout
def choose_dt_solverSuccess(self, dt):
alpha = self.model.solver.solverList[(- 1)].gustafsson_alpha
nnl = self.model.solver.solverList[(- 1)].its
alpha_ref = self.nonlinearConvergenceRate_ref
nnl_ref = float(self.nonlinearIterations_ref)
assert ((alpha >= 0.0) or (nnl == 1)), ('invalid alpha = %s nnl = %d ' % (alpha, nnl))
if (alpha <= 0.0):
r_a = self.nonlinearGrowthRateMax
else:
r_a = self.phi(old_div(alpha_ref, alpha))
r = min(self.nonlinearGrowthRateMax, max(self.nonlinearGrowthRateMin, r_a))
dtout = (dt * r)
logEvent(('Gustafsson solver success dt_in= %s alpha=%s alpha_ref=%s nnl=%s nnl_ref=%s r_a=%s, dtout=%s ' % (dt, alpha, alpha_ref, nnl, nnl_ref, r_a, dtout)), level=1)
return dtout
def retryStep_errorFailure(self):
self.errorFailures += 1
retry = False
if (self.errorFailures < self.maxErrorFailures):
self.resetSolution()
dt_e = self.choose_dt_fromError(self.dt_model)
dt_a = self.choose_dt_solverSuccess(self.dt_model)
if self.useTemporalErrorEstimate:
self.dt_model = min(dt_e, dt_a)
else:
self.dt_model = dt_a
if (self.dt_model > (self.t_model_last * 1e-08)):
self.set_dt_allLevels()
self.setSubsteps([self.t_model])
logEvent(('Gustafsson error failure dt_e= %s dt_a=%s dt_model=%s' % (dt_e, dt_a, self.dt_model)), level=1)
retry = True
else:
logEvent('Time step reduced to machine precision', level=1)
self.writeSolverStatisticsForStep()
else:
self.writeSolverStatisticsForStep()
return retry
def estimateError(self):
mFine = self.model.levelModelList[(- 1)]
if ((not mFine.timeIntegration.provides_dt_estimate) and (mFine.timeIntegration.error_estimate is not None) and self.useTemporalErrorEstimate):
error = mFine.timeIntegration.error_estimate
localError = {}
for ci in list(error.keys()):
localError[ci] = self.errorNorm[ci].norm(error[ci], 2)
self.errorEstimate = max(localError.values())
else:
self.errorEstimate = None
logEvent(('Gustafsson estimateError t=%s dt=%s error= %s' % (self.t_model, self.dt_model, self.errorEstimate)))
def choose_dt_fromError(self, dtIn):
for m in self.model.levelModelList:
m.timeIntegration.choose_dt()
mFine = self.model.levelModelList[(- 1)]
if ((not mFine.timeIntegration.provides_dt_estimate) and (mFine.timeIntegration.error_estimate is not None)):
ordInv = old_div(1.0, (mFine.timeIntegration.timeOrder + 1.0))
minErr = max(self.errorEstimate, self.timeEps)
r = (self.errorSafetyFactor * (old_div(self.timeErrorTolerance, minErr) ** ordInv))
r_e = min(self.errorGrowthRateMax, max(self.errorGrowthRateMin, r))
dt_e = (r_e * dtIn)
logEvent(('Gustafsson choose_dt_fromError self t=%s dt=%s error= %s minErr= %s r_e=%s r= %s' % (self.t_model, self.dt_model, self.errorEstimate, minErr, r_e, r)))
return dt_e
else:
assert mFine.timeIntegration.provides_dt_estimate
return mFine.timeIntegration.dt
def initialize_dt_model(self, t0, tOut):
self.saveSolution()
self.t_model_last = t0
for m in self.model.levelModelList:
m.timeIntegration.initialize_dt(t0, tOut, m.q)
if self.use_cfl_for_initial_dt:
maxCFL = 1e-06
for ci in range(m.nc):
if (('cfl', ci) in m.q):
maxCFL = max(maxCFL, globalMax(m.q[('cfl', ci)].max()))
logEvent(('Gustafsson cfl initial step ci = %s maxCFL= %s ' % (ci, maxCFL)))
self.dt_model = min(old_div(self.cfl_for_initial_dt, maxCFL), m.timeIntegration.dt)
else:
self.dt_model = m.timeIntegration.dt
self.dt_model = min(self.dt_model, (0.001 * (tOut - t0)))
self.set_dt_allLevels()
self.setSubsteps([self.t_model])
logEvent(('Gustafsson Initializing time step on model %s to dt = %12.5e t_model_last= %s' % (self.model.name, self.dt_model, self.t_model_last)), level=1)
def choose_dt_model(self):
dt_e = self.choose_dt_fromError(self.dt_model)
dt_a = self.choose_dt_solverSuccess(self.dt_model)
if self.useTemporalErrorEstimate:
self.dt_model = min(dt_e, dt_a)
else:
self.dt_model = dt_a
self.set_dt_allLevels()
self.setSubsteps([self.t_model])
logEvent(('Gustafsson choose_dt_model dt_e= %s dt_a=%s t_model_last= %s dt_model=%s t_model= %s' % (dt_e, dt_a, self.t_model_last, self.dt_model, self.t_model)), level=1)
def updateTimeHistory(self, resetFromDOF=False):
logEvent(('Gustafsson updateTimeHistory t_model_last= %s dt_model=%s t_model=%s' % (self.t_model_last, self.dt_model, self.t_model)), level=1)
self.writeSolverStatisticsForStep()
self.solverFailures = 0
self.errorFailures = 0
self.saveSolution()
self.predictorHistoryIsValid = True
self.dt_nm1 = self.dt_model
self.t_model_last = self.t_model
for m in self.model.levelModelList:
m.updateTimeHistory(resetFromDOF)
m.timeIntegration.updateTimeHistory(resetFromDOF)
if (self.errorNorm is not None):
for ci in range(self.model.levelModelList[(- 1)].nc):
if (('m', ci) in self.model.levelModelList[(- 1)].q):
self.errorNorm[ci].setWeight(self.model.levelModelList[(- 1)].q[('m', ci)])
def errorFailure(self):
mFine = self.model.levelModelList[(- 1)]
timeIntegratorOk = mFine.timeIntegration.lastStepErrorOk()
self.estimateError()
if mFine.timeIntegration.provides_dt_estimate:
return (not timeIntegratorOk)
return (self.errorEstimate >= self.timeErrorTolerance)
def phi(self, x):
return float(x)
def set_dt_allLevels(self):
self.t_model = (self.t_model_last + self.dt_model)
for m in self.model.levelModelList:
m.timeIntegration.set_dt(self.dt_model)
def stepExact_model(self, tOut):
if (self.t_model > (tOut - (tOut * 1e-08))):
logEvent(('StepControl Gustafsson stepExact t_model= %s tOut= %s t_model_last= %s dt_model= %s setting to %s ' % (self.t_model, tOut, self.t_model_last, self.dt_model, (tOut - self.t_model_last))), 1)
self.dt_model = (tOut - self.t_model_last)
self.set_dt_allLevels()
self.setSubsteps([self.t_model]) |
class RateLimiter(logging.Filter):
def __init__(self, rate: int=3600):
self.rate = rate
self._sent = {}
def filter(self, record: logging.LogRecord) -> bool:
key = '{}:{}'.format(record.pathname, record.lineno)
try:
if (self.rate > (record.created - self._sent[key])):
return False
except KeyError:
pass
self._sent[key] = record.created
return True |
.django_db
def test_sibling_filters_on_both_siblings(client, monkeypatch, elasticsearch_award_index, multiple_awards_with_sibling_tas):
_setup_es(client, monkeypatch, elasticsearch_award_index)
resp = query_by_tas(client, {'require': [_tas_path(SISTER_TAS[0]), _tas_path(SISTER_TAS[1])]})
assert (resp.json()['results'].sort(key=(lambda elem: elem['internal_id'])) == [_award1(), _award2()].sort(key=(lambda elem: elem['internal_id']))) |
('input_,is_primitive,expected', [param({'value': 99, 'obj': SimpleClassPrimitiveConf(a={'foo': '${value}'}, b=[1, '${value}'])}, True, SimpleClass(a={'foo': 99}, b=[1, 99]), id='primitive_specified_true'), param({'value': 99, 'obj': SimpleClassNonPrimitiveConf(a={'foo': '${value}'}, b=[1, '${value}'])}, False, SimpleClass(a={'foo': 99}, b=[1, 99]), id='primitive_specified_false'), param({'value': 99, 'obj': SimpleClassDefaultPrimitiveConf(a={'foo': '${value}'}, b=[1, '${value}'])}, False, SimpleClass(a={'foo': 99}, b=[1, 99]), id='default_behavior')])
def test_convert_in_config(instantiate_func: Any, input_: Any, is_primitive: bool, expected: Any) -> None:
cfg = OmegaConf.create(input_)
ret = instantiate_func(cfg.obj)
assert (ret == expected)
if is_primitive:
assert isinstance(ret.a, dict)
assert isinstance(ret.b, list)
else:
assert isinstance(ret.a, DictConfig)
assert isinstance(ret.b, ListConfig) |
def get_created_same_day(nid, pinned, limit) -> List[IndexNote]:
try:
nidMinusOneDay = (nid - (((24 * 60) * 60) * 1000))
nidPlusOneDay = (nid + (((24 * 60) * 60) * 1000))
res = mw.col.db.all(('select distinct notes.id, flds, tags, did, mid from notes left join cards on notes.id = cards.nid where nid > %s and nid < %s order by nid desc' % (nidMinusOneDay, nidPlusOneDay)))
dayOfNote = int(time.strftime('%d', time.localtime((nid / 1000))))
rList = []
c = 0
for r in res:
dayCreated = int(time.strftime('%d', time.localtime((int(r[0]) / 1000))))
if (dayCreated != dayOfNote):
continue
if (not (str(r[0]) in pinned)):
rList.append(IndexNote((r[0], r[1], r[2], r[3], r[1], (- 1), r[4], '')))
c += 1
if (c >= limit):
break
return rList
except:
return [] |
class TestProcessValue(object):
def setting_info(self):
return {'value_type': 'range', 'input_range': [100, 2000, 100], 'output_range': [2, 40, 2]}
def setting_info2(self):
return {'value_type': 'range', 'input_range': [1, 20, 1], 'output_range': [1000, 40000, 2000], 'range_length_byte': 2}
.parametrize('input_,expected_output', [(100, 2), (200, 4), (2000, 40), (149, 2), (150, 4), ('300', 6)])
def test_range_values(self, setting_info, input_, expected_output):
assert (range_.process_value(setting_info, input_) == [expected_output])
.parametrize('input_,expected_output', [(1, [232, 3]), (2, [184, 11]), (20, [88, 152]), (14, [120, 105])])
def test_range_values2(self, setting_info2, input_, expected_output):
assert (range_.process_value(setting_info2, input_) == expected_output) |
class ClockSources(object):
def __init__(self):
self.sources = {}
self.merged_sources = {}
self.source_to_cmt = {}
self.used_sources_from_cmt = {}
def add_clock_source(self, source, cmt):
if (cmt not in self.sources):
self.sources[cmt] = []
self.sources[cmt].append(source)
assert ((source not in self.source_to_cmt) or (self.source_to_cmt[source] == cmt)), source
self.source_to_cmt[source] = cmt
def get_random_source(self, cmt):
if (cmt not in self.merged_sources):
choices = []
if ('ANY' in self.sources):
choices.extend(self.sources['ANY'])
if (cmt in self.sources):
choices.extend(self.sources[cmt])
(x, y) = CMT_XY_FUN(cmt)
if ((x % 2) == 0):
x += 1
else:
x -= 1
paired_cmt = 'X{}Y{}'.format(x, y)
if (paired_cmt in self.sources):
choices.extend(self.sources[paired_cmt])
self.merged_sources[cmt] = choices
if self.merged_sources[cmt]:
source = random.choice(self.merged_sources[cmt])
source_cmt = self.source_to_cmt[source]
if (source_cmt not in self.used_sources_from_cmt):
self.used_sources_from_cmt[source_cmt] = set()
self.used_sources_from_cmt[source_cmt].add(source)
if ((source_cmt != 'ANY') and (len(self.used_sources_from_cmt[source_cmt]) > 14)):
print('//', self.used_sources_from_cmt)
self.used_sources_from_cmt[source_cmt].remove(source)
return None
else:
return source |
class ChannelStatsCollector():
def __init__(self):
self.reset_channel_stats()
def reset_channel_stats(self):
self.communication_stats = {ChannelDirection.CLIENT_TO_SERVER: RandomVariableStatsTracker(), ChannelDirection.SERVER_TO_CLIENT: RandomVariableStatsTracker()}
def get_channel_stats(self):
return self.communication_stats
def collect_channel_stats(self, message_size_bytes: float, client_to_server: bool=True):
direction = (ChannelDirection.CLIENT_TO_SERVER if client_to_server else ChannelDirection.SERVER_TO_CLIENT)
self.communication_stats[direction].update(message_size_bytes) |
def get_bpftrace_basic_examples(query: str) -> str:
loader = JSONLoader(file_path='./tools/examples.json', jq_schema='.data[].content', json_lines=True)
documents = loader.load()
embeddings = OpenAIEmbeddings()
if (not (os.path.exists('./data_save/vector_db.faiss') and os.path.exists('./data_save/vector_db.pkl'))):
db = FAISS.from_documents(documents, embeddings)
db.save_local('./data_save', index_name='vector_db')
else:
db = FAISS.load_local('./data_save', index_name='vector_db', embeddings=embeddings)
results = db.search(query, search_type='similarity')
results = [result.page_content for result in results]
return '\n'.join(results[:2]) |
class CRawHead(ctypes.Structure):
_fields_ = [('headPosX', ctypes.c_double), ('headPosY', ctypes.c_double), ('headPosZ', ctypes.c_double), ('headYaw', ctypes.c_double), ('headPitch', ctypes.c_double), ('headRoll', ctypes.c_double), ('headPoseConfidence', ctypes.c_double), ('headTranslationSpeedX', ctypes.c_double), ('headTranslationSpeedY', ctypes.c_double), ('headTranslationSpeedZ', ctypes.c_double), ('headRotationSpeedX', ctypes.c_double), ('headRotationSpeedY', ctypes.c_double), ('headRotationSpeedZ', ctypes.c_double), ('headSpeedConfidence', ctypes.c_double)] |
def main():
module_spec = schema_to_module_spec(versioned_schema)
mkeyname = 'policyid'
fields = {'access_token': {'required': False, 'type': 'str', 'no_log': True}, 'enable_log': {'required': False, 'type': 'bool', 'default': False}, 'vdom': {'required': False, 'type': 'str', 'default': 'root'}, 'member_path': {'required': False, 'type': 'str'}, 'member_state': {'type': 'str', 'required': False, 'choices': ['present', 'absent']}, 'state': {'required': True, 'type': 'str', 'choices': ['present', 'absent']}, 'firewall_dos_policy': {'required': False, 'type': 'dict', 'default': None, 'options': {}}}
for attribute_name in module_spec['options']:
fields['firewall_dos_policy']['options'][attribute_name] = module_spec['options'][attribute_name]
if (mkeyname and (mkeyname == attribute_name)):
fields['firewall_dos_policy']['options'][attribute_name]['required'] = True
module = AnsibleModule(argument_spec=fields, supports_check_mode=True)
check_legacy_fortiosapi(module)
is_error = False
has_changed = False
result = None
diff = None
versions_check_result = None
if module._socket_path:
connection = Connection(module._socket_path)
if ('access_token' in module.params):
connection.set_option('access_token', module.params['access_token'])
if ('enable_log' in module.params):
connection.set_option('enable_log', module.params['enable_log'])
else:
connection.set_option('enable_log', False)
fos = FortiOSHandler(connection, module, mkeyname)
versions_check_result = check_schema_versioning(fos, versioned_schema, 'firewall_dos_policy')
(is_error, has_changed, result, diff) = fortios_firewall(module.params, fos, module.check_mode)
else:
module.fail_json(**FAIL_SOCKET_MSG)
if (versions_check_result and (versions_check_result['matched'] is False)):
module.warn('Ansible has detected version mismatch between FortOS system and your playbook, see more details by specifying option -vvv')
if (not is_error):
if (versions_check_result and (versions_check_result['matched'] is False)):
module.exit_json(changed=has_changed, version_check_warning=versions_check_result, meta=result, diff=diff)
else:
module.exit_json(changed=has_changed, meta=result, diff=diff)
elif (versions_check_result and (versions_check_result['matched'] is False)):
module.fail_json(msg='Error in repo', version_check_warning=versions_check_result, meta=result)
else:
module.fail_json(msg='Error in repo', meta=result) |
class TestXLSWorkSheet(unittest.TestCase):
def setUp(self):
self.ws = XLSWorkSheet('Test Worksheet')
def test_work_sheet(self):
ws = self.ws
self.assertEqual(ws.title, 'Test Worksheet')
self.assertEqual(ws.last_column, 'A')
self.assertEqual(ws.last_row, 1)
def test_work_sheet_truncate_long_title(self):
short_title = 'Short'
ws = XLSWorkSheet(short_title)
self.assertEqual(ws.title, short_title)
long_title = 'This is a very very very very very very long title'
ws = XLSWorkSheet(long_title)
self.assertEqual(ws.title, long_title[0:31])
def test_insert_single_items(self):
ws = self.ws
self.assertEqual(ws['A1'], None)
self.assertEqual(ws['AB2'], None)
ws['A1'] = 'Some data'
self.assertEqual(ws['A1'], 'Some data')
self.assertEqual(ws['A']['1'], 'Some data')
ws['A1'] = 'Updated data'
self.assertEqual(ws['A1'], 'Updated data')
ws['A']['1'] = 'Updated again'
self.assertEqual(ws['A']['1'], 'Updated again')
def test_del_single_item(self):
ws = self.ws
self.assertEqual(ws['B4'], None)
ws['B4'] = 'Some data'
self.assertEqual(ws['B4'], 'Some data')
del ws['B4']
self.assertEqual(ws['B4'], None)
def test_last_column_and_row(self):
ws = self.ws
ws['A1'] = 'Some data'
self.assertEqual(ws.last_column, 'A')
self.assertEqual(ws.last_row, 1)
ws['A4'] = 'More data'
self.assertEqual(ws.last_column, 'A')
self.assertEqual(ws.last_row, 4)
ws['D2'] = 'Even more data'
self.assertEqual(ws.last_column, 'D')
self.assertEqual(ws.last_row, 4)
ws['E11'] = 'More data again'
self.assertEqual(ws.last_column, 'E')
self.assertEqual(ws.last_row, 11)
del ws['E11']
self.assertEqual(ws.last_column, 'D')
self.assertEqual(ws.last_row, 4)
def test_column_is_empty(self):
ws = self.ws
self.assertTrue(ws.column_is_empty('A'))
self.assertTrue(ws.column_is_empty('D'))
ws['A1'] = 'Some data'
self.assertFalse(ws.column_is_empty('A'))
self.assertTrue(ws.column_is_empty('D'))
ws['D3'] = 'More data'
self.assertFalse(ws.column_is_empty('A'))
self.assertFalse(ws.column_is_empty('D'))
def test_row_is_empty(self):
ws = self.ws
self.assertTrue(ws.row_is_empty(1))
self.assertTrue(ws.row_is_empty(3))
ws['A1'] = 'Some data'
self.assertFalse(ws.row_is_empty(1))
self.assertTrue(ws.row_is_empty(3))
ws['D3'] = 'More data'
self.assertFalse(ws.row_is_empty(1))
self.assertFalse(ws.row_is_empty(3))
def test_columnof(self):
ws = self.ws
ws.insert_row_data(1, ('hello', 'goodbye', 'whatev'))
self.assertEqual(ws.columnof('hello'), 'A')
self.assertEqual(ws.columnof('goodbye'), 'B')
self.assertEqual(ws.columnof('whatev'), 'C')
self.assertRaises(LookupError, ws.columnof, 'nowhere')
def test_insert_column(self):
ws = self.ws
ws.insert_row_data(1, ('hello', 'goodbye', 'whatev'))
self.assertEqual(ws['A1'], 'hello')
self.assertEqual(ws['B1'], 'goodbye')
self.assertEqual(ws['C1'], 'whatev')
self.assertEqual(ws.last_column, 'C')
ws.insert_column('A', text='bonjour')
self.assertEqual(ws.last_column, 'D')
self.assertEqual(ws['A1'], 'bonjour')
self.assertEqual(ws['B1'], 'hello')
self.assertEqual(ws['C1'], 'goodbye')
self.assertEqual(ws['D1'], 'whatev')
ws.insert_column('C', data=('au revoir',))
self.assertEqual(ws.last_column, 'E')
self.assertEqual(ws['A1'], 'bonjour')
self.assertEqual(ws['B1'], 'hello')
self.assertEqual(ws['C1'], 'au revoir')
self.assertEqual(ws['D1'], 'goodbye')
self.assertEqual(ws['E1'], 'whatev')
ws.insert_column('H', data=('hola',))
self.assertEqual(ws.last_column, 'H')
self.assertEqual(ws['A1'], 'bonjour')
self.assertEqual(ws['B1'], 'hello')
self.assertEqual(ws['C1'], 'au revoir')
self.assertEqual(ws['D1'], 'goodbye')
self.assertEqual(ws['E1'], 'whatev')
self.assertEqual(ws['F1'], None)
self.assertEqual(ws['G1'], None)
self.assertEqual(ws['H1'], 'hola')
ws.insert_column('G', data=('adios',))
self.assertEqual(ws.last_column, 'I')
self.assertEqual(ws['A1'], 'bonjour')
self.assertEqual(ws['B1'], 'hello')
self.assertEqual(ws['C1'], 'au revoir')
self.assertEqual(ws['D1'], 'goodbye')
self.assertEqual(ws['E1'], 'whatev')
self.assertEqual(ws['F1'], None)
self.assertEqual(ws['G1'], 'adios')
self.assertEqual(ws['H1'], None)
self.assertEqual(ws['I1'], 'hola')
def test_append_column(self):
ws = self.ws
ws.insert_row_data(1, ('hello', 'goodbye', 'whatev'))
self.assertEqual(ws['A1'], 'hello')
self.assertEqual(ws['B1'], 'goodbye')
self.assertEqual(ws['C1'], 'whatev')
ws.append_column(data=('au revoir',))
self.assertEqual(ws['A1'], 'hello')
self.assertEqual(ws['B1'], 'goodbye')
self.assertEqual(ws['C1'], 'whatev')
self.assertEqual(ws['D1'], 'au revoir')
def test_write_column_with_list(self):
ws = self.ws
col_data = ['hello', 'goodbye', 'whatev']
exp_cell = ['B1', 'B2', 'B3']
ws.write_column('B', data=col_data)
self.assertEqual(ws.last_column, 'B')
self.assertEqual(ws.last_row, 3)
for i in range(3):
self.assertEqual(ws[exp_cell[i]], col_data[i])
exp_cell = ['C3', 'C4', 'C5']
ws.write_column('C', data=col_data, from_row=3)
self.assertEqual(ws.last_column, 'C')
self.assertEqual(ws.last_row, 5)
for i in range(3):
self.assertEqual(ws[exp_cell[i]], col_data[i])
def test_write_column_with_text(self):
ws = self.ws
col_data = ['hello', 'goodbye', 'whatev']
exp_cell = ['A1', 'A2', 'A3']
ws.write_column('A', text='hello\ngoodbye\nwhatev')
self.assertEqual(ws.last_column, 'A')
self.assertEqual(ws.last_row, 3)
for i in range(3):
self.assertEqual(ws[exp_cell[i]], col_data[i])
exp_cell = ['M7', 'M8', 'M9']
ws.write_column('M', text='hello\ngoodbye\nwhatev', from_row=7)
self.assertEqual(ws.last_column, 'M')
self.assertEqual(ws.last_row, 9)
for i in range(3):
self.assertEqual(ws[exp_cell[i]], col_data[i])
def test_write_column_with_fill(self):
ws = self.ws
ws.write_column('A', data=['some', 'random', 'items'])
ws.write_column('L', fill='50')
self.assertEqual(ws.last_column, 'L')
self.assertEqual(ws.last_row, 3)
for idx in ('L1', 'L2', 'L3'):
self.assertEqual(ws[idx], '50')
ws.write_column('M', fill='50', from_row=2)
self.assertEqual(ws.last_column, 'M')
self.assertEqual(ws.last_row, 3)
self.assertEqual(ws['M1'], None)
for idx in ('M2', 'M3'):
self.assertEqual(ws[idx], '50')
def test_insert_column_data(self):
ws = self.ws
col_data = ['hello', 'goodbye', 'whatev']
exp_cell = ['B1', 'B2', 'B3']
ws.insert_column_data('B', col_data)
for i in range(3):
self.assertEqual(ws[exp_cell[i]], col_data[i])
exp_cell = ['C3', 'C4', 'C5']
ws.insert_column_data('C', col_data, start=3)
for i in range(3):
self.assertEqual(ws[exp_cell[i]], col_data[i])
def test_insert_row(self):
ws = self.ws
ws.insert_column_data('A', ('hello', 'goodbye', 'whatev'))
self.assertEqual(ws['A1'], 'hello')
self.assertEqual(ws['A2'], 'goodbye')
self.assertEqual(ws['A3'], 'whatev')
self.assertEqual(ws.last_row, 3)
ws.insert_row(1, text='bonjour')
self.assertEqual(ws.last_row, 4)
self.assertEqual(ws['A1'], 'bonjour')
self.assertEqual(ws['A2'], 'hello')
self.assertEqual(ws['A3'], 'goodbye')
self.assertEqual(ws['A4'], 'whatev')
ws.insert_row(3, data=('au revoir',))
self.assertEqual(ws.last_row, 5)
self.assertEqual(ws['A1'], 'bonjour')
self.assertEqual(ws['A2'], 'hello')
self.assertEqual(ws['A3'], 'au revoir')
self.assertEqual(ws['A4'], 'goodbye')
self.assertEqual(ws['A5'], 'whatev')
ws.insert_row(8, data=('hola',))
self.assertEqual(ws.last_row, 8)
self.assertEqual(ws['A1'], 'bonjour')
self.assertEqual(ws['A2'], 'hello')
self.assertEqual(ws['A3'], 'au revoir')
self.assertEqual(ws['A4'], 'goodbye')
self.assertEqual(ws['A5'], 'whatev')
self.assertEqual(ws['A6'], None)
self.assertEqual(ws['A7'], None)
self.assertEqual(ws['A8'], 'hola')
ws.insert_row(7, data=('adios',))
self.assertEqual(ws.last_row, 9)
self.assertEqual(ws['A1'], 'bonjour')
self.assertEqual(ws['A2'], 'hello')
self.assertEqual(ws['A3'], 'au revoir')
self.assertEqual(ws['A4'], 'goodbye')
self.assertEqual(ws['A5'], 'whatev')
self.assertEqual(ws['A6'], None)
self.assertEqual(ws['A7'], 'adios')
self.assertEqual(ws['A8'], None)
self.assertEqual(ws['A9'], 'hola')
def test_write_row_with_list(self):
ws = self.ws
row_data = ['Dozy', 'Beaky', 'Mick', 'Titch']
exp_cell = ['A4', 'B4', 'C4', 'D4']
ws.write_row(4, data=row_data)
self.assertEqual(ws.last_column, 'D')
self.assertEqual(ws.last_row, 4)
for i in range(4):
self.assertEqual(ws[exp_cell[i]], row_data[i])
exp_cell = ['E5', 'F5', 'G5', 'H5']
ws.write_row(5, data=row_data, from_column='E')
self.assertEqual(ws.last_column, 'H')
self.assertEqual(ws.last_row, 5)
for i in range(4):
self.assertEqual(ws[exp_cell[i]], row_data[i])
def test_write_row_with_text(self):
ws = self.ws
row_data = ['Dozy', 'Beaky', 'Mick', 'Titch']
row_text = 'Dozy\tBeaky\tMick\tTitch'
exp_cell = ['A4', 'B4', 'C4', 'D4']
ws.write_row(4, text=row_text)
self.assertEqual(ws.last_column, 'D')
self.assertEqual(ws.last_row, 4)
for i in range(4):
self.assertEqual(ws[exp_cell[i]], row_data[i])
exp_cell = ['E5', 'F5', 'G5', 'H5']
ws.write_row(5, text=row_text, from_column='E')
self.assertEqual(ws.last_column, 'H')
self.assertEqual(ws.last_row, 5)
for i in range(4):
self.assertEqual(ws[exp_cell[i]], row_data[i])
def test_insert_row_data(self):
ws = self.ws
row_data = ['Dozy', 'Beaky', 'Mick', 'Titch']
exp_cell = ['A4', 'B4', 'C4', 'D4']
ws.insert_row_data(4, row_data)
for i in range(4):
self.assertEqual(ws[exp_cell[i]], row_data[i])
exp_cell = ['E5', 'F5', 'G5', 'H5']
ws.insert_row_data(5, row_data, start='E')
for i in range(4):
self.assertEqual(ws[exp_cell[i]], row_data[i])
def test_insert_block_data(self):
ws = self.ws
expected = {'A1': 'This', 'B1': 'is', 'C1': None, 'D1': 'some', 'A2': None, 'B2': 'random', 'C2': None, 'D2': None, 'A3': None, 'B3': 'data', 'C3': None, 'D3': None}
ws.insert_block_data('This\tis\t\tsome\n\trandom\n\tdata')
for idx in expected:
self.assertEqual(ws[idx], expected[idx])
expected = {'M7': 'This', 'N7': 'is', 'O7': None, 'P7': 'some', 'M8': None, 'N8': 'MORE', 'O8': 'random', 'P8': None, 'M9': None, 'N9': 'data', 'O9': None, 'P9': None}
ws.insert_block_data('This\tis\t\tsome\n\tMORE\trandom\n\tdata', col='M', row=7)
def test_fill_column(self):
ws = self.ws
ws.insert_column_data('A', ['some', 'random', 'items'])
ws.fill_column('L', '50')
for idx in ('L1', 'L2', 'L3'):
self.assertEqual(ws[idx], '50')
def test_fill_column_empty_worksheet(self):
ws = self.ws
ws.fill_column('L', '50')
self.assertEqual(ws['L1'], None)
ws.fill_column('L', '50', start=1)
self.assertEqual(ws['L1'], None)
ws.fill_column('L', '50', end=3)
self.assertEqual(ws['L1'], None)
ws.fill_column('L', '50', start=1, end=3)
for idx in ('L1', 'L2', 'L3'):
self.assertEqual(ws[idx], '50')
def test_columns_and_rows(self):
ws = self.ws
self.assertEqual(ws.columns, [])
self.assertEqual(ws.rows, [])
self.assertEqual(ws.next_column, 'A')
self.assertEqual(ws.next_row, 1)
ws['B12'] = 'A value'
self.assertEqual(ws.columns, ['B'])
self.assertEqual(ws.rows, [12])
self.assertEqual(ws.rows, [12])
self.assertEqual(ws.next_column, 'C')
self.assertEqual(ws.next_row, 13)
ws['F5'] = 'Another value'
self.assertEqual(ws.columns, ['B', 'F'])
self.assertEqual(ws.rows, [5, 12])
self.assertEqual(ws.next_column, 'G')
self.assertEqual(ws.next_row, 13)
ws['AZ93'] = 'Yet another value'
self.assertEqual(ws.columns, ['B', 'F', 'AZ'])
self.assertEqual(ws.rows, [5, 12, 93])
self.assertEqual(ws.next_column, 'BA')
self.assertEqual(ws.next_row, 94)
def test_render_cell(self):
self.ws.insert_column_data(self.ws.next_column, ['4.5'])
self.assertEqual(self.ws.render_cell('A1'), '4.5')
def test_render_cell_formula(self):
self.ws.insert_column_data(self.ws.next_column, ['4.5', '6.7', '=A1+A2'])
self.assertEqual(self.ws.render_cell('A3'), '=A1+A2')
self.assertEqual(self.ws.render_cell('A3', eval_formulae=True), '11.2')
def test_render_cell_formula_with_column_substitution(self):
self.ws.insert_column_data(self.ws.next_column, ['4.5', '6.7', '=#1+#2'])
self.assertEqual(self.ws.render_cell('A3'), '=A1+A2')
self.assertEqual(self.ws.render_cell('A3', eval_formulae=True), '11.2')
def test_render_cell_formula_with_row_substitution(self):
self.ws.insert_column_data(self.ws.next_column, ['4.5'])
self.ws.insert_column_data(self.ws.next_column, ['6.7'])
self.ws.insert_column_data(self.ws.next_column, ['=A?+B?'])
self.assertEqual(self.ws.render_cell('C1'), '=A1+B1')
self.assertEqual(self.ws.render_cell('C1', eval_formulae=True), '11.2')
def test_render_cell_formula_with_formatting(self):
self.ws.insert_column_data(self.ws.next_column, ['1.0', '2.0', '=A1/A2'])
self.ws.set_style(XLSStyle(number_format=NumberFormats.PERCENTAGE), 'A3')
self.assertEqual(self.ws.render_cell('A3'), '=A1/A2')
self.assertEqual(self.ws.render_cell('A3', eval_formulae=True), '0.5')
self.assertEqual(self.ws.render_cell('A3', eval_formulae=True, apply_format=True), '50.0%') |
class GroupService(BaseService):
def get(cls, group_id, db_session=None):
db_session = get_db_session(db_session)
return db_session.query(cls.model).get(group_id)
def by_group_name(cls, group_name, db_session=None):
db_session = get_db_session(db_session)
query = db_session.query(cls.model).filter((cls.model.group_name == group_name))
return query.first()
def get_user_paginator(cls, instance, page=1, item_count=None, items_per_page=50, user_ids=None, GET_params=None):
if (not GET_params):
GET_params = {}
GET_params.pop('page', None)
query = instance.users_dynamic
if user_ids:
query = query.filter(cls.models_proxy.UserGroup.user_id.in_(user_ids))
return SqlalchemyOrmPage(query, page=page, item_count=item_count, items_per_page=items_per_page, **GET_params)
def resources_with_possible_perms(cls, instance, perm_names=None, resource_ids=None, resource_types=None, db_session=None):
db_session = get_db_session(db_session, instance)
query = db_session.query(cls.models_proxy.GroupResourcePermission.perm_name, cls.models_proxy.Group, cls.models_proxy.Resource)
query = query.filter((cls.models_proxy.Resource.resource_id == cls.models_proxy.GroupResourcePermission.resource_id))
query = query.filter((cls.models_proxy.Group.id == cls.models_proxy.GroupResourcePermission.group_id))
if resource_ids:
query = query.filter(cls.models_proxy.GroupResourcePermission.resource_id.in_(resource_ids))
if resource_types:
query = query.filter(cls.models_proxy.Resource.resource_type.in_(resource_types))
if ((perm_names not in ([ANY_PERMISSION], ANY_PERMISSION)) and perm_names):
query = query.filter(cls.models_proxy.GroupResourcePermission.perm_name.in_(perm_names))
query = query.filter((cls.models_proxy.GroupResourcePermission.group_id == instance.id))
perms = [PermissionTuple(None, row.perm_name, 'group', instance, row.Resource, False, True) for row in query]
for resource in instance.resources:
perms.append(PermissionTuple(None, ALL_PERMISSIONS, 'group', instance, resource, True, True))
return perms |
def _snapshot_initial(shot: System, event: Event) -> None:
for agent in event.agents:
if (agent.agent_type == AgentType.CUE):
agent.set_initial(shot.cue)
elif (agent.agent_type == AgentType.BALL):
agent.set_initial(shot.balls[agent.id])
elif (agent.agent_type == AgentType.POCKET):
agent.set_initial(shot.table.pockets[agent.id])
elif (agent.agent_type == AgentType.LINEAR_CUSHION_SEGMENT):
agent.set_initial(shot.table.cushion_segments.linear[agent.id])
elif (agent.agent_type == AgentType.CIRCULAR_CUSHION_SEGMENT):
agent.set_initial(shot.table.cushion_segments.circular[agent.id]) |
class KeyInstanceTable(BaseWalletStore):
LOGGER_NAME = 'db-table-keyinstance'
CREATE_SQL = 'INSERT INTO KeyInstances (keyinstance_id, account_id, masterkey_id, derivation_type, derivation_data, script_type, flags, description, date_created, date_updated) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?)'
READ_SQL = 'SELECT keyinstance_id, account_id, masterkey_id, derivation_type, derivation_data, script_type, flags, description FROM KeyInstances'
UPDATE_DERIVATION_DATA_SQL = 'UPDATE KeyInstances SET date_updated=?, derivation_data=? WHERE keyinstance_id=?'
UPDATE_DESCRIPTION_SQL = 'UPDATE KeyInstances SET date_updated=?, description=? WHERE keyinstance_id=?'
UPDATE_FLAGS_SQL = 'UPDATE KeyInstances SET date_updated=?, flags=? WHERE keyinstance_id=?'
UPDATE_SCRIPT_TYPE_SQL = 'UPDATE KeyInstances SET date_updated=?, script_type=? WHERE keyinstance_id=?'
DELETE_SQL = 'DELETE FROM KeyInstances WHERE keyinstance_id=?'
def create(self, entries: Iterable[KeyInstanceRow], completion_callback: Optional[CompletionCallbackType]=None) -> None:
timestamp = self._get_current_timestamp()
datas = [(*t, timestamp, timestamp) for t in entries]
size_hint = sum((len(t[4]) for t in entries))
def _write(db: sqlite3.Connection):
db.executemany(self.CREATE_SQL, datas)
self._db_context.queue_write(_write, completion_callback, size_hint)
def read(self, mask: Optional[KeyInstanceFlag]=None, key_ids: Optional[List[int]]=None) -> List[KeyInstanceRow]:
results: List[KeyInstanceRow] = []
def _collect_results(cursor: sqlite3.Cursor, results: List[KeyInstanceRow]) -> None:
rows = cursor.fetchall()
cursor.close()
for row in rows:
results.append(KeyInstanceRow(row[0], row[1], row[2], DerivationType(row[3]), row[4], ScriptType(row[5]), KeyInstanceFlag(row[6]), row[7]))
query = self.READ_SQL
params: List[int] = []
if (mask is not None):
query += ' WHERE (flags & ?) != 0'
params = [mask]
if key_ids:
keyword = (' AND' if len(params) else ' WHERE')
batch_size = (SQLITE_MAX_VARS - len(params))
while len(key_ids):
batch_ids = key_ids[:batch_size]
param_str = ','.join(('?' for k in batch_ids))
batch_query = (query + f'{keyword} keyinstance_id IN ({param_str})')
cursor = self._db.execute(batch_query, (params + batch_ids))
_collect_results(cursor, results)
key_ids = key_ids[batch_size:]
else:
cursor = self._db.execute(query, params)
_collect_results(cursor, results)
return results
def update_derivation_data(self, entries: Iterable[Tuple[(bytes, int)]], date_updated: Optional[int]=None, completion_callback: Optional[CompletionCallbackType]=None) -> None:
if (date_updated is None):
date_updated = self._get_current_timestamp()
datas = [((date_updated,) + entry) for entry in entries]
size_hint = sum((len(entry[0]) for entry in entries))
def _write(db: sqlite3.Connection):
db.executemany(self.UPDATE_DERIVATION_DATA_SQL, datas)
self._db_context.queue_write(_write, completion_callback)
def update_descriptions(self, entries: Iterable[Tuple[(str, int)]], date_updated: Optional[int]=None, completion_callback: Optional[CompletionCallbackType]=None) -> None:
if (date_updated is None):
date_updated = self._get_current_timestamp()
datas = [((date_updated,) + entry) for entry in entries]
def _write(db: sqlite3.Connection):
db.executemany(self.UPDATE_DESCRIPTION_SQL, datas)
self._db_context.queue_write(_write, completion_callback)
def update_flags(self, entries: Iterable[Tuple[(KeyInstanceFlag, int)]], date_updated: Optional[int]=None, completion_callback: Optional[CompletionCallbackType]=None) -> None:
if (date_updated is None):
date_updated = self._get_current_timestamp()
datas = [((date_updated,) + entry) for entry in entries]
def _write(db: sqlite3.Connection):
db.executemany(self.UPDATE_FLAGS_SQL, datas)
self._db_context.queue_write(_write, completion_callback)
def update_script_types(self, entries: Iterable[Tuple[(ScriptType, int)]], date_updated: Optional[int]=None, completion_callback: Optional[CompletionCallbackType]=None) -> None:
if (date_updated is None):
date_updated = self._get_current_timestamp()
datas = [((date_updated,) + entry) for entry in entries]
def _write(db: sqlite3.Connection):
db.executemany(self.UPDATE_SCRIPT_TYPE_SQL, datas)
self._db_context.queue_write(_write, completion_callback)
def delete(self, key_ids: Iterable[int], completion_callback: Optional[CompletionCallbackType]=None) -> None:
datas = [(key_id,) for key_id in key_ids]
def _write(db: sqlite3.Connection):
db.executemany(self.DELETE_SQL, datas)
self._db_context.queue_write(_write, completion_callback) |
class RequirementStatusAssigned(RequirementStatusBaseExt):
tval = 'assigned'
def __init__(self, _config, rid, txt):
txt_split = txt.split(':')
if (len(txt_split) != 3):
raise RMTException(93, ("%s: Assigned values invalid '%s'" % (rid, txt)))
assert (txt_split[0] == self.tval)
RequirementStatusBaseExt.__init__(self, txt_split[1], parse_date(rid, txt_split[2]))
def get_output_string(self):
return ('%s (%s, %s)' % (self.tval, self.get_person(), self.get_date_str())) |
class StructuredTransforms1DPeriodicInterfacesRight(TestCase, Common):
def setUp(self):
super().setUp()
self.seq = nutils.transformseq.StructuredTransforms(x1, (nutils.transformseq.IntAxis(0, 4, 4, 0, True),), 0)
self.check = ((x1, i10, e0), (x1, i11, e0), (x1, i12, e0), (x1, i13, e0))
self.checkmissing = ((x1, i10, e1), (x1, i11, e1), (x1, i12, e1), (x1, i13, e1), (x1, i14, e1))
self.checkrefs = References.uniform(point, 4)
self.checktodims = 1
self.checkfromdims = 0 |
class GethMempoolStrategy(BlockGasStrategy):
def __init__(self, position: int=500, graphql_endpoint: str=None, block_duration: int=2, max_gas_price: Wei=None):
super().__init__(block_duration)
self.position = position
if (graphql_endpoint is None):
graphql_endpoint = f'{web3.provider.endpoint_uri}/graphql'
self.graphql_endpoint = graphql_endpoint
self.max_gas_price = (Wei(max_gas_price) or ((2 ** 256) - 1))
def get_gas_price(self) -> Generator[(int, None, None)]:
query = '{ pending { transactions { gasPrice }}}'
while True:
response = requests.post(self.graphql_endpoint, json={'query': query})
response.raise_for_status()
if ('error' in response.json()):
raise RPCRequestError('could not fetch mempool, run geth with `--graphql` flag')
data = response.json()['data']['pending']['transactions']
prices = sorted((int(x['gasPrice'], 16) for x in data), reverse=True)
(yield min(prices[:self.position][(- 1)], self.max_gas_price)) |
class SplitTest(unittest.TestCase):
def test_split_block(self) -> None:
mod = cst.parse_module(b'from a import x\nfrom b import y\nfrom b import y\nfrom c import x\n')
x = ImportSorter(module=mod, path=Path(), config=Config())
blocks = x.sortable_blocks(mod.children)
self.assertEqual(2, len(blocks))
self.assertEqual({'x': 'a.x'}, blocks[0].imported_names)
self.assertEqual({'y': 'b.y', 'x': 'c.x'}, blocks[1].imported_names) |
def test_redis_handler_backend_clear_next_step_handler(telegram_bot, private_chat, update_type):
if (not REDIS_TESTS):
pytest.skip('please install redis and configure redis server, then enable REDIS_TESTS')
telegram_bot.next_step_backend = RedisHandlerBackend(prefix='pyTelegramBotApi:step_backend2')
_bot.message_handler(commands=['start'])
def start(message):
message.text = 'entered start'
telegram_bot.register_next_step_handler_by_chat_id(message.chat.id, next_handler)
telegram_bot.process_new_updates([update_type])
assert (update_type.message.text == 'entered start')
telegram_bot.clear_step_handler_by_chat_id(private_chat.id)
telegram_bot.process_new_updates([update_type])
assert (update_type.message.text == 'entered start') |
def _get_caller_vars() -> Tuple[(Dict[(str, Any)], Dict[(str, Any)])]:
def _should_skip_frame(frame: FrameType) -> bool:
is_testslide = ((os.path.dirname(__file__) in frame.f_code.co_filename) and ('/tests/' not in frame.f_code.co_filename))
is_typeguard = (os.path.dirname(typeguard.__file__) in frame.f_code.co_filename)
return (is_testslide or is_typeguard)
next_stack_count = 1
next_frame = sys._getframe(next_stack_count)
while _should_skip_frame(next_frame):
next_stack_count += 1
next_frame = sys._getframe(next_stack_count)
return (next_frame.f_globals, next_frame.f_locals) |
def test_hasBothOrNeitherAngleBrackets_1():
assert hasBothOrNeitherAngleBrackets('<>')
assert hasBothOrNeitherAngleBrackets('<foo>')
assert hasBothOrNeitherAngleBrackets('< foo >')
assert hasBothOrNeitherAngleBrackets('foo')
assert (not hasBothOrNeitherAngleBrackets('<'))
assert (not hasBothOrNeitherAngleBrackets('<foo'))
assert (not hasBothOrNeitherAngleBrackets('foo<'))
assert (not hasBothOrNeitherAngleBrackets('<foo<'))
assert (not hasBothOrNeitherAngleBrackets('>'))
assert (not hasBothOrNeitherAngleBrackets('>foo'))
assert (not hasBothOrNeitherAngleBrackets('foo>'))
assert (not hasBothOrNeitherAngleBrackets('>foo>')) |
def extractTambutranslationsBlogspotCom(item):
(vol, chp, frag, postfix) = extractVolChapterFragmentPostfix(item['title'])
if ((not (chp or vol)) or ('preview' in item['title'].lower())):
return None
tagmap = [('PRC', 'PRC', 'translated'), ('Loiterous', 'Loiterous', 'oel')]
for (tagname, name, tl_type) in tagmap:
if (tagname in item['tags']):
return buildReleaseMessageWithType(item, name, vol, chp, frag=frag, postfix=postfix, tl_type=tl_type)
return False |
()
def teardown(session: nox.Session, volumes: bool=False, images: bool=False) -> None:
for compose_file in COMPOSE_FILE_LIST:
teardown_command = ('docker', 'compose', '-f', compose_file, 'down', '--remove-orphans')
if (volumes or ('volumes' in session.posargs)):
teardown_command = (*teardown_command, '--volumes')
if images:
teardown_command = (*teardown_command, '--rmi', 'all')
try:
session.run(*teardown_command, external=True)
except nox.command.CommandFailed:
session.warn(f"Teardown failed: '{teardown_command}'")
session.log('Teardown complete') |
def test_snapshot_revert_to_specific(w3):
w3.testing.mine(5)
block_before_snapshot = w3.eth.get_block('latest')
snapshot_idx = w3.testing.snapshot()
block_after_snapshot = w3.eth.get_block('latest')
w3.testing.mine()
w3.testing.snapshot()
w3.testing.mine()
w3.testing.snapshot()
w3.testing.mine()
w3.testing.snapshot()
block_after_mining = w3.eth.get_block('latest')
w3.testing.revert(snapshot_idx)
block_after_revert = w3.eth.get_block('latest')
assert (block_after_mining['number'] > block_before_snapshot['number'])
assert (block_before_snapshot['hash'] == block_after_snapshot['hash'])
assert (block_after_snapshot['hash'] == block_after_revert['hash']) |
def filter_log_syslogd2_setting_data(json):
option_list = ['certificate', 'custom_field_name', 'enc_algorithm', 'facility', 'format', 'interface', 'interface_select_method', 'max_log_rate', 'mode', 'port', 'priority', 'server', 'source_ip', 'ssl_min_proto_version', 'status', 'syslog_type']
json = remove_invalid_fields(json)
dictionary = {}
for attribute in option_list:
if ((attribute in json) and (json[attribute] is not None)):
dictionary[attribute] = json[attribute]
return dictionary |
def main(page: Page):
(fig, ax) = plt.subplots()
fruits = ['apple', 'blueberry', 'cherry', 'orange']
counts = [40, 100, 30, 55]
bar_labels = ['red', 'blue', '_red', 'orange']
bar_colors = ['tab:red', 'tab:blue', 'tab:red', 'tab:orange']
ax.bar(fruits, counts, label=bar_labels, color=bar_colors)
ax.set_ylabel('fruit supply')
ax.set_title('Fruit supply by kind and color')
ax.legend(title='Fruit color')
page.add(MatplotlibChart(fig, expand=True)) |
def upgrade():
op.create_table('email_templates', sa.Column('id', sa.Integer(), nullable=False), sa.Column('form_id', sa.Integer(), nullable=False), sa.Column('subject', sa.Text(), nullable=False), sa.Column('from_name', sa.Text(), nullable=False), sa.Column('style', sa.Text(), nullable=False), sa.Column('body', sa.Text(), nullable=False), sa.ForeignKeyConstraint(['form_id'], ['forms.id']), sa.PrimaryKeyConstraint('id'), sa.UniqueConstraint('form_id'))
op.alter_column('submissions', 'form_id', existing_type=sa.INTEGER(), nullable=False)
plans.create(op.get_bind(), checkfirst=True)
op.add_column('users', sa.Column('plan', plans, nullable=True))
op.execute("UPDATE users SET plan = 'v1_gold' WHERE upgraded")
op.execute("UPDATE users SET plan = 'v1_free' WHERE NOT upgraded")
op.drop_column('users', 'upgraded') |
_fixtures_to_matrixstore
class MeasuresTests(SeleniumTestCase):
maxDiff = None
fixtures = ['functional-measures-dont-edit']
def setUpClass(cls):
call_command('loaddata', *cls.fixtures, **{'verbosity': 0})
super(MeasuresTests, cls).setUpClass()
def tearDownClass(cls):
call_command('flush', verbosity=0, interactive=False, reset_sequences=False)
super(MeasuresTests, cls).tearDownClass()
def _fixture_setup(self):
pass
def _fixture_teardown(self):
pass
def _get(self, path):
url = (self.live_server_url + path)
rsp = requests.get(url)
rsp.raise_for_status()
self.browser.get(url)
def _verify_link(self, base_element, css_selector, exp_text, exp_path):
element = base_element.find_element(By.CSS_SELECTOR, css_selector)
a_element = element.find_element(By.TAG_NAME, 'a')
self.assertEqual(a_element.text, exp_text)
href = _normalize_url(a_element.get_attribute('href'))
expected_href = _normalize_url((self.live_server_url + exp_path))
self.assertEqual(href, expected_href)
def _verify_num_elements(self, base_element, css_selector, exp_num):
self.assertEqual(len(base_element.find_elements(By.CSS_SELECTOR, css_selector)), exp_num)
def _find_measure_panel(self, id_):
return self.find_by_css('#{} .panel'.format(id_))
def test_all_england(self):
self._get('/national/england/')
panel_element = self._find_measure_panel('measure_lpzomnibus')
self._verify_link(panel_element, '.inner li:nth-child(1)', 'Break it down into its constituent measures.', '/national/england/?tags=lowpriority')
self._verify_link(panel_element, '.inner li:nth-child(2)', 'Break the overall score down into individual presentations', '/measure/lpzomnibus/national/england/')
self._verify_link(panel_element, '.inner li:nth-child(3)', 'Compare all Sub-ICB Locations in England on this measure', '/measure/lpzomnibus/')
self._verify_link(panel_element, '.inner li:nth-child(4)', 'View this measure on the analyse page', MEASURE_LPZOMNIBUS_ANALYSE_URL)
self._verify_link(panel_element, '.inner li:nth-child(5)', 'View technical details for this measure', '/measure/lpzomnibus/definition/')
self._verify_link(panel_element, '.measure-panel-title', 'LP omnibus measure', '/measure/lpzomnibus/')
self._verify_num_elements(panel_element, '.inner li', 5)
panel_element = self._find_measure_panel('measure_core_0')
self._verify_link(panel_element, '.inner li:nth-child(1)', 'Break the overall score down into individual presentations', '/measure/core_0/national/england/')
self._verify_link(panel_element, '.inner li:nth-child(2)', 'Compare all Sub-ICB Locations in England on this measure', '/measure/core_0/')
self._verify_link(panel_element, '.inner li:nth-child(3)', 'View this measure on the analyse page', MEASURE_CORE_0_ANALYSE_URL)
self._verify_link(panel_element, '.inner li:nth-child(4)', 'View technical details for this measure', '/measure/core_0/definition/')
self._verify_num_elements(panel_element, '.inner li', 4)
def test_all_england_low_priority(self):
self._get('/national/england/?tags=lowpriority')
panel_element = self._find_measure_panel('measure_lp_2')
self._verify_link(panel_element, '.inner li:nth-child(1)', 'Break the overall score down into individual presentations', '/measure/lp_2/national/england/')
self._verify_link(panel_element, '.inner li:nth-child(2)', 'Compare all Sub-ICB Locations in England on this measure', '/measure/lp_2/')
self._verify_link(panel_element, '.inner li:nth-child(3)', 'View this measure on the analyse page', MEASURE_LP_2_ANALYSE_URL)
self._verify_link(panel_element, '.inner li:nth-child(4)', 'View technical details for this measure', '/measure/lp_2/definition/')
self._verify_num_elements(panel_element, '.inner li', 4)
def test_practice_home_page(self):
self._get('/practice/P00000/')
practice = Practice.objects.get(code='P00000')
mvs = MeasureValue.objects.filter_by_org_type('practice').filter(practice=practice)
extreme_measure = _get_extreme_measure(mvs)
panel_element = self._find_measure_panel('top-measure-container')
self._verify_link(panel_element, '.measure-panel-title', extreme_measure.name, '/measure/{}/practice/P00000/'.format(extreme_measure.id))
panel_element = self._find_measure_panel('lpzomnibus-container')
self._verify_link(panel_element, '.measure-panel-title', 'LP omnibus measure', '/measure/lpzomnibus/practice/P00000/')
def test_pcn_home_page(self):
self._get('/pcn/E/')
pcn = PCN.objects.get(code='E')
mvs = MeasureValue.objects.filter_by_org_type('pcn').filter(pcn=pcn)
extreme_measure = _get_extreme_measure(mvs)
panel_element = self._find_measure_panel('top-measure-container')
self._verify_link(panel_element, '.measure-panel-title', extreme_measure.name, '/measure/{}/pcn/E/'.format(extreme_measure.id))
panel_element = self._find_measure_panel('lpzomnibus-container')
self._verify_link(panel_element, '.measure-panel-title', 'LP omnibus measure', '/measure/lpzomnibus/pcn/E/')
def test_ccg_home_page(self):
self._get('/sicbl/AAA/')
ccg = PCT.objects.get(code='AAA')
mvs = MeasureValue.objects.filter_by_org_type('ccg').filter(pct=ccg)
extreme_measure = _get_extreme_measure(mvs)
panel_element = self._find_measure_panel('top-measure-container')
self._verify_link(panel_element, '.measure-panel-title', extreme_measure.name, '/measure/{}/sicbl/AAA/'.format(extreme_measure.id))
panel_element = self._find_measure_panel('lpzomnibus-container')
self._verify_link(panel_element, '.measure-panel-title', 'LP omnibus measure', '/measure/lpzomnibus/sicbl/AAA/')
def test_stp_home_page(self):
self._get('/icb/E00/')
stp = STP.objects.get(code='E00')
mvs = MeasureValue.objects.filter_by_org_type('stp').filter(stp=stp)
extreme_measure = _get_extreme_measure(mvs)
panel_element = self._find_measure_panel('top-measure-container')
self._verify_link(panel_element, '.measure-panel-title', extreme_measure.name, '/measure/{}/icb/E00/'.format(extreme_measure.id))
panel_element = self._find_measure_panel('lpzomnibus-container')
self._verify_link(panel_element, '.measure-panel-title', 'LP omnibus measure', '/measure/lpzomnibus/icb/E00/')
def test_regional_team_home_page(self):
self._get('/regional-team/Y01/')
rt = RegionalTeam.objects.get(code='Y01')
mvs = MeasureValue.objects.filter_by_org_type('regional_team').filter(regional_team=rt)
extreme_measure = _get_extreme_measure(mvs)
panel_element = self._find_measure_panel('top-measure-container')
self._verify_link(panel_element, '.measure-panel-title', extreme_measure.name, '/measure/{}/regional-team/Y01/'.format(extreme_measure.id))
panel_element = self._find_measure_panel('lpzomnibus-container')
self._verify_link(panel_element, '.measure-panel-title', 'LP omnibus measure', '/measure/lpzomnibus/regional-team/Y01/')
def test_measures_for_one_practice(self):
self._get('/practice/P00000/measures/')
panel_element = self._find_measure_panel('measure_core_0')
self._verify_link(panel_element, '.measure-panel-title', 'Core measure 0', '/measure/core_0/practice/P00000/')
self._verify_link(panel_element, '.inner li:nth-child(1)', 'Break the overall score down into individual presentations', '/measure/core_0/practice/P00000/')
self._verify_link(panel_element, '.inner li:nth-child(2)', 'Compare all practices in this Sub-ICB Location on this measure', '/sicbl/AAA/core_0/')
self._verify_link(panel_element, '.inner li:nth-child(3)', 'Compare all Sub-ICB Locations in England on this measure', '/measure/core_0/')
self._verify_link(panel_element, '.inner li:nth-child(4)', 'View this measure on the analyse page', MEASURE_CORE_0_ANALYSE_URL)
self._verify_link(panel_element, '.inner li:nth-child(5)', 'View technical details for this measure', '/measure/core_0/definition/')
self._verify_num_elements(panel_element, '.inner li', 5)
panel_element = self._find_measure_panel('measure_lpzomnibus')
self._verify_link(panel_element, '.measure-panel-title', 'LP omnibus measure', '/measure/lpzomnibus/practice/P00000/')
self._verify_link(panel_element, '.inner li:nth-child(1)', 'Break it down into its constituent measures.', '/practice/P00000/measures/?tags=lowpriority')
self._verify_link(panel_element, '.inner li:nth-child(2)', 'Break the overall score down into individual presentations', '/measure/lpzomnibus/practice/P00000/')
self._verify_link(panel_element, '.inner li:nth-child(3)', 'Compare all practices in this Sub-ICB Location on this measure', '/sicbl/AAA/lpzomnibus/')
self._verify_link(panel_element, '.inner li:nth-child(4)', 'Compare all Sub-ICB Locations in England on this measure', '/measure/lpzomnibus/')
self._verify_link(panel_element, '.inner li:nth-child(5)', 'View this measure on the analyse page', MEASURE_LPZOMNIBUS_ANALYSE_URL)
self._verify_link(panel_element, '.inner li:nth-child(6)', 'View technical details for this measure', '/measure/lpzomnibus/definition/')
self._verify_num_elements(panel_element, '.inner li', 6)
def test_measures_for_one_practice_low_priority(self):
self._get('/practice/P00000/measures/?tags=lowpriority')
panel_element = self._find_measure_panel('measure_lp_2')
self._verify_link(panel_element, '.measure-panel-title', 'LP measure 2', '/measure/lp_2/practice/P00000/')
self._verify_link(panel_element, '.inner li:nth-child(1)', 'Break the overall score down into individual presentations', '/measure/lp_2/practice/P00000/')
self._verify_link(panel_element, '.inner li:nth-child(2)', 'Compare all practices in this Sub-ICB Location on this measure', '/sicbl/AAA/lp_2/')
self._verify_link(panel_element, '.inner li:nth-child(3)', 'Compare all Sub-ICB Locations in England on this measure', '/measure/lp_2/')
self._verify_link(panel_element, '.inner li:nth-child(4)', 'View this measure on the analyse page', MEASURE_LP_2_ANALYSE_URL)
self._verify_link(panel_element, '.inner li:nth-child(5)', 'View technical details for this measure', '/measure/lp_2/definition/')
self._verify_num_elements(panel_element, '.inner li', 5)
def test_measures_for_one_ccg(self):
self._get('/sicbl/AAA/measures/')
panel_element = self._find_measure_panel('measure_core_0')
self._verify_link(panel_element, '.measure-panel-title', 'Core measure 0', '/measure/core_0/sicbl/AAA/')
self._verify_link(panel_element, '.inner li:nth-child(1)', 'Break the overall score down into individual presentations', '/measure/core_0/sicbl/AAA/')
self._verify_link(panel_element, '.inner li:nth-child(2)', 'Split the measure into charts for individual practices', '/sicbl/AAA/core_0/')
self._verify_link(panel_element, '.inner li:nth-child(3)', 'Compare all Sub-ICB Locations in England on this measure', '/measure/core_0/')
self._verify_link(panel_element, '.inner li:nth-child(5)', 'View technical details for this measure', '/measure/core_0/definition/')
self._verify_num_elements(panel_element, '.inner li', 5)
panel_element = self._find_measure_panel('measure_lpzomnibus')
self._verify_link(panel_element, '.measure-panel-title', 'LP omnibus measure', '/measure/lpzomnibus/sicbl/AAA/')
self._verify_link(panel_element, '.inner li:nth-child(1)', 'Break it down into its constituent measures.', '/sicbl/AAA/measures/?tags=lowpriority')
self._verify_link(panel_element, '.inner li:nth-child(2)', 'Break the overall score down into individual presentations', '/measure/lpzomnibus/sicbl/AAA/')
self._verify_link(panel_element, '.inner li:nth-child(3)', 'Split the measure into charts for individual practices', '/sicbl/AAA/lpzomnibus/')
self._verify_link(panel_element, '.inner li:nth-child(4)', 'Compare all Sub-ICB Locations in England on this measure', '/measure/lpzomnibus/')
self._verify_link(panel_element, '.inner li:nth-child(5)', 'View this measure on the analyse page', MEASURE_LPZOMNIBUS_ANALYSE_URL)
self._verify_link(panel_element, '.inner li:nth-child(6)', 'View technical details for this measure', '/measure/lpzomnibus/definition/')
self._verify_num_elements(panel_element, '.inner li', 6)
def test_measures_for_one_ccg_low_priority(self):
self._get('/sicbl/AAA/measures/?tags=lowpriority')
panel_element = self._find_measure_panel('measure_lp_2')
self._verify_link(panel_element, '.measure-panel-title', 'LP measure 2', '/measure/lp_2/sicbl/AAA/')
self._verify_link(panel_element, '.inner li:nth-child(1)', 'Break the overall score down into individual presentations', '/measure/lp_2/sicbl/AAA/')
self._verify_link(panel_element, '.inner li:nth-child(2)', 'Split the measure into charts for individual practices', '/sicbl/AAA/lp_2/')
self._verify_link(panel_element, '.inner li:nth-child(3)', 'Compare all Sub-ICB Locations in England on this measure', '/measure/lp_2/')
self._verify_link(panel_element, '.inner li:nth-child(4)', 'View this measure on the analyse page', MEASURE_LP_2_ANALYSE_URL)
self._verify_link(panel_element, '.inner li:nth-child(5)', 'View technical details for this measure', '/measure/lp_2/definition/')
self._verify_num_elements(panel_element, '.inner li', 5)
def test_measures_for_one_pcn(self):
self._get('/pcn/E/measures/')
panel_element = self._find_measure_panel('measure_core_0')
self._verify_link(panel_element, '.measure-panel-title', 'Core measure 0', '/measure/core_0/pcn/E/')
self._verify_link(panel_element, '.inner li:nth-child(1)', 'Break the overall score down into individual presentations', '/measure/core_0/pcn/E/')
self._verify_link(panel_element, '.inner li:nth-child(2)', 'Split the measure into charts for individual practices', '/pcn/E/core_0/')
self._verify_link(panel_element, '.inner li:nth-child(3)', 'View this measure on the analyse page', MEASURE_CORE_0_ANALYSE_URL)
self._verify_link(panel_element, '.inner li:nth-child(4)', 'View technical details for this measure', '/measure/core_0/definition/')
self._verify_num_elements(panel_element, '.inner li', 4)
panel_element = self._find_measure_panel('measure_lpzomnibus')
self._verify_link(panel_element, '.measure-panel-title', 'LP omnibus measure', '/measure/lpzomnibus/pcn/E/')
self._verify_link(panel_element, '.inner li:nth-child(1)', 'Break it down into its constituent measures.', '/pcn/E/measures/?tags=lowpriority')
self._verify_link(panel_element, '.inner li:nth-child(2)', 'Break the overall score down into individual presentations', '/measure/lpzomnibus/pcn/E/')
self._verify_link(panel_element, '.inner li:nth-child(3)', 'Split the measure into charts for individual practices', '/pcn/E/lpzomnibus/')
self._verify_link(panel_element, '.inner li:nth-child(4)', 'View this measure on the analyse page', MEASURE_LPZOMNIBUS_ANALYSE_URL)
self._verify_link(panel_element, '.inner li:nth-child(5)', 'View technical details for this measure', '/measure/lpzomnibus/definition/')
self._verify_num_elements(panel_element, '.inner li', 5)
def test_measures_for_one_pcn_low_priority(self):
self._get('/pcn/E/measures/?tags=lowpriority')
panel_element = self._find_measure_panel('measure_lp_2')
self._verify_link(panel_element, '.measure-panel-title', 'LP measure 2', '/measure/lp_2/pcn/E/')
self._verify_link(panel_element, '.inner li:nth-child(1)', 'Break the overall score down into individual presentations', '/measure/lp_2/pcn/E/')
self._verify_link(panel_element, '.inner li:nth-child(2)', 'Split the measure into charts for individual practices', '/pcn/E/lp_2/')
self._verify_link(panel_element, '.inner li:nth-child(3)', 'View this measure on the analyse page', MEASURE_LP_2_ANALYSE_URL)
self._verify_link(panel_element, '.inner li:nth-child(4)', 'View technical details for this measure', '/measure/lp_2/definition/')
self._verify_num_elements(panel_element, '.inner li', 4)
def test_measures_for_one_stp(self):
self._get('/icb/E00/measures/')
panel_element = self._find_measure_panel('measure_core_0')
self._verify_link(panel_element, '.measure-panel-title', 'Core measure 0', '/measure/core_0/icb/E00/')
self._verify_link(panel_element, '.inner li:nth-child(1)', 'Break the overall score down into individual presentations', '/measure/core_0/icb/E00/')
self._verify_link(panel_element, '.inner li:nth-child(2)', 'Split the measure into charts for individual Sub-ICB Locations', '/icb/E00/core_0/')
self._verify_link(panel_element, '.inner li:nth-child(3)', 'Compare all ICBs in England on this measure', '/measure/core_0/icb/')
self._verify_link(panel_element, '.inner li:nth-child(4)', 'View this measure on the analyse page', MEASURE_CORE_0_ANALYSE_URL)
self._verify_link(panel_element, '.inner li:nth-child(5)', 'View technical details for this measure', '/measure/core_0/definition/')
self._verify_num_elements(panel_element, '.inner li', 5)
panel_element = self._find_measure_panel('measure_lpzomnibus')
self._verify_link(panel_element, '.measure-panel-title', 'LP omnibus measure', '/measure/lpzomnibus/icb/E00/')
self._verify_link(panel_element, '.inner li:nth-child(1)', 'Break it down into its constituent measures.', '/icb/E00/measures/?tags=lowpriority')
self._verify_link(panel_element, '.inner li:nth-child(2)', 'Break the overall score down into individual presentations', '/measure/lpzomnibus/icb/E00/')
self._verify_link(panel_element, '.inner li:nth-child(3)', 'Split the measure into charts for individual Sub-ICB Locations', '/icb/E00/lpzomnibus/')
self._verify_link(panel_element, '.inner li:nth-child(4)', 'Compare all ICBs in England on this measure', '/measure/lpzomnibus/icb/')
self._verify_link(panel_element, '.inner li:nth-child(5)', 'View this measure on the analyse page', MEASURE_LPZOMNIBUS_ANALYSE_URL)
self._verify_link(panel_element, '.inner li:nth-child(6)', 'View technical details for this measure', '/measure/lpzomnibus/definition/')
self._verify_num_elements(panel_element, '.inner li', 6)
def test_measures_for_one_regional_team(self):
self._get('/regional-team/Y01/measures/')
panel_element = self._find_measure_panel('measure_core_0')
self._verify_link(panel_element, '.measure-panel-title', 'Core measure 0', '/measure/core_0/regional-team/Y01/')
self._verify_link(panel_element, '.inner li:nth-child(1)', 'Break the overall score down into individual presentations', '/measure/core_0/regional-team/Y01/')
self._verify_link(panel_element, '.inner li:nth-child(2)', 'Split the measure into charts for individual Sub-ICB Locations', '/regional-team/Y01/core_0/')
self._verify_link(panel_element, '.inner li:nth-child(3)', 'Compare all Regional Teams in England on this measure', '/measure/core_0/regional-team/')
self._verify_link(panel_element, '.inner li:nth-child(4)', 'View this measure on the analyse page', MEASURE_CORE_0_ANALYSE_URL)
self._verify_link(panel_element, '.inner li:nth-child(5)', 'View technical details for this measure', '/measure/core_0/definition/')
self._verify_num_elements(panel_element, '.inner li', 5)
panel_element = self._find_measure_panel('measure_lpzomnibus')
self._verify_link(panel_element, '.measure-panel-title', 'LP omnibus measure', '/measure/lpzomnibus/regional-team/Y01/')
self._verify_link(panel_element, '.inner li:nth-child(1)', 'Break it down into its constituent measures.', '/regional-team/Y01/measures/?tags=lowpriority')
self._verify_link(panel_element, '.inner li:nth-child(2)', 'Break the overall score down into individual presentations', '/measure/lpzomnibus/regional-team/Y01/')
self._verify_link(panel_element, '.inner li:nth-child(3)', 'Split the measure into charts for individual Sub-ICB Locations', '/regional-team/Y01/lpzomnibus/')
self._verify_link(panel_element, '.inner li:nth-child(4)', 'Compare all Regional Teams in England on this measure', '/measure/lpzomnibus/regional-team/')
self._verify_link(panel_element, '.inner li:nth-child(5)', 'View this measure on the analyse page', MEASURE_LPZOMNIBUS_ANALYSE_URL)
self._verify_link(panel_element, '.inner li:nth-child(6)', 'View technical details for this measure', '/measure/lpzomnibus/definition/')
self._verify_num_elements(panel_element, '.inner li', 6)
def test_measure_for_all_ccgs(self):
self._get('/measure/core_0/')
panel_element = self._find_measure_panel('ccg_AAA')
self._verify_link(panel_element, '.measure-panel-title', 'AAA: CCG 0/0/0', '/sicbl/AAA/measures/')
self._verify_link(panel_element, '.explanation li:nth-child(1)', 'Split the measure into charts for individual practices', '/sicbl/AAA/core_0/')
self._verify_link(panel_element, '.explanation li:nth-child(2)', 'Break the overall score down into individual presentations', '/measure/core_0/sicbl/AAA/')
self._verify_link(panel_element, '.explanation li:nth-child(3)', 'View this measure on the analyse page', MEASURE_CORE_0_ANALYSE_URL)
self._verify_num_elements(panel_element, '.explanation li', 3)
def test_measure_for_all_ccgs_with_tags_focus(self):
self._get('/measure/lpzomnibus/')
panel_element = self._find_measure_panel('ccg_AAA')
self._verify_link(panel_element, '.measure-panel-title', 'AAA: CCG 0/0/0', '/sicbl/AAA/measures/')
self._verify_link(panel_element, '.explanation li:nth-child(1)', 'Split the measure into charts for individual practices', '/sicbl/AAA/lpzomnibus/')
self._verify_link(panel_element, '.explanation li:nth-child(2)', 'Break it down into its constituent measures', '/sicbl/AAA/measures/?tags=lowpriority')
self._verify_link(panel_element, '.explanation li:nth-child(3)', 'Break the overall score down into individual presentations', '/measure/lpzomnibus/sicbl/AAA/')
self._verify_link(panel_element, '.explanation li:nth-child(4)', 'View this measure on the analyse page', MEASURE_LPZOMNIBUS_ANALYSE_URL)
self._verify_num_elements(panel_element, '.explanation li', 4)
def test_measure_for_all_pcns(self):
self._get('/measure/core_0/pcn/')
panel_element = self._find_measure_panel('pcn_E')
self._verify_link(panel_element, '.measure-panel-title', 'E: PCN 0/0/0', '/pcn/E/measures/')
self._verify_link(panel_element, '.explanation li:nth-child(1)', 'Split the measure into charts for individual practices', '/pcn/E/core_0/')
self._verify_link(panel_element, '.explanation li:nth-child(2)', 'Break the overall score down into individual presentations', '/measure/core_0/pcn/E/')
self._verify_link(panel_element, '.explanation li:nth-child(3)', 'View this measure on the analyse page', MEASURE_CORE_0_ANALYSE_URL)
self._verify_num_elements(panel_element, '.explanation li', 3)
def test_measure_for_all_stps(self):
self._get('/measure/core_0/icb/')
panel_element = self._find_measure_panel('stp_E00')
self._verify_link(panel_element, '.measure-panel-title', 'E00: STP 0/0', '/icb/E00/measures/')
self._verify_link(panel_element, '.explanation li:nth-child(1)', 'Split the measure into charts for individual Sub-ICB Locations', '/icb/E00/core_0/')
self._verify_link(panel_element, '.explanation li:nth-child(2)', 'Break the overall score down into individual presentations', '/measure/core_0/icb/E00/')
self._verify_link(panel_element, '.explanation li:nth-child(3)', 'View this measure on the analyse page', MEASURE_CORE_0_ANALYSE_URL)
self._verify_num_elements(panel_element, '.explanation li', 3)
def test_measure_for_all_regional_teams(self):
self._get('/measure/core_0/regional-team/')
panel_element = self._find_measure_panel('regional_team_Y01')
self._verify_link(panel_element, '.measure-panel-title', 'Y01: Region 1', '/regional-team/Y01/measures/')
self._verify_link(panel_element, '.explanation li:nth-child(1)', 'Split the measure into charts for individual Sub-ICB Locations', '/regional-team/Y01/core_0/')
self._verify_link(panel_element, '.explanation li:nth-child(2)', 'Break the overall score down into individual presentations', '/measure/core_0/regional-team/Y01/')
self._verify_link(panel_element, '.explanation li:nth-child(3)', 'View this measure on the analyse page', MEASURE_CORE_0_ANALYSE_URL)
self._verify_num_elements(panel_element, '.explanation li', 3)
def test_measure_for_all_england(self):
self._get('/measure/lp_2/national/england/')
panel_element = self._find_measure_panel('measure_lp_2')
self._verify_link(panel_element, '.measure-panel-title', 'LP measure 2', '/measure/lp_2/')
self._verify_link(panel_element, '.inner li:nth-child(1)', 'Compare all Sub-ICB Locations in England on this measure', '/measure/lp_2/')
self._verify_link(panel_element, '.inner li:nth-child(2)', 'View this measure on the analyse page', MEASURE_LP_2_ANALYSE_URL)
self._verify_link(panel_element, '.inner li:nth-child(3)', 'View technical details for this measure', '/measure/lp_2/definition/')
self._verify_num_elements(panel_element, '.inner li', 3)
def test_measure_for_one_practice(self):
self._get('/measure/lp_2/practice/P00000/')
panel_element = self._find_measure_panel('measure_lp_2')
self._verify_link(panel_element, '.measure-panel-title', 'LP measure 2', '/measure/lp_2/practice/P00000/')
self._verify_link(panel_element, '.inner li:nth-child(1)', 'Compare all practices in this Sub-ICB Location on this measure', '/sicbl/AAA/lp_2/')
self._verify_link(panel_element, '.inner li:nth-child(2)', 'Compare all Sub-ICB Locations in England on this measure', '/measure/lp_2/')
self._verify_link(panel_element, '.inner li:nth-child(3)', 'View this measure on the analyse page', MEASURE_LP_2_ANALYSE_URL)
self._verify_link(panel_element, '.inner li:nth-child(4)', 'View technical details for this measure', '/measure/lp_2/definition/')
self._verify_num_elements(panel_element, '.inner li', 4)
def test_measure_for_one_ccg(self):
self._get('/measure/lp_2/sicbl/AAA/')
panel_element = self._find_measure_panel('measure_lp_2')
self._verify_link(panel_element, '.measure-panel-title', 'LP measure 2', '/measure/lp_2/sicbl/AAA/')
self._verify_link(panel_element, '.inner li:nth-child(1)', 'Split the measure into charts for individual practices', '/sicbl/AAA/lp_2/')
self._verify_link(panel_element, '.inner li:nth-child(2)', 'Compare all Sub-ICB Locations in England on this measure', '/measure/lp_2/')
self._verify_link(panel_element, '.inner li:nth-child(3)', 'View this measure on the analyse page', MEASURE_LP_2_ANALYSE_URL)
self._verify_link(panel_element, '.inner li:nth-child(4)', 'View technical details for this measure', '/measure/lp_2/definition/')
self._verify_num_elements(panel_element, '.inner li', 4)
def test_measure_for_one_pcn(self):
self._get('/measure/lp_2/pcn/E/')
panel_element = self._find_measure_panel('measure_lp_2')
self._verify_link(panel_element, '.measure-panel-title', 'LP measure 2', '/measure/lp_2/pcn/E/')
self._verify_link(panel_element, '.inner li:nth-child(1)', 'Split the measure into charts for individual practices', '/pcn/E/lp_2/')
self._verify_link(panel_element, '.inner li:nth-child(2)', 'View this measure on the analyse page', MEASURE_LP_2_ANALYSE_URL)
self._verify_link(panel_element, '.inner li:nth-child(3)', 'View technical details for this measure', '/measure/lp_2/definition/')
self._verify_num_elements(panel_element, '.inner li', 3)
def test_measure_for_one_stp(self):
self._get('/measure/lp_2/icb/E00/')
panel_element = self._find_measure_panel('measure_lp_2')
self._verify_link(panel_element, '.measure-panel-title', 'LP measure 2', '/measure/lp_2/icb/E00/')
self._verify_link(panel_element, '.inner li:nth-child(1)', 'Split the measure into charts for individual Sub-ICB Locations', '/icb/E00/lp_2/')
self._verify_link(panel_element, '.inner li:nth-child(2)', 'Compare all ICBs in England on this measure', '/measure/lp_2/icb/')
self._verify_link(panel_element, '.inner li:nth-child(3)', 'View this measure on the analyse page', MEASURE_LP_2_ANALYSE_URL)
self._verify_link(panel_element, '.inner li:nth-child(4)', 'View technical details for this measure', '/measure/lp_2/definition/')
self._verify_num_elements(panel_element, '.inner li', 4)
def test_measure_for_one_regional_team(self):
self._get('/measure/lp_2/regional-team/Y01/')
panel_element = self._find_measure_panel('measure_lp_2')
self._verify_link(panel_element, '.measure-panel-title', 'LP measure 2', '/measure/lp_2/regional-team/Y01/')
self._verify_link(panel_element, '.inner li:nth-child(1)', 'Split the measure into charts for individual Sub-ICB Locations', '/regional-team/Y01/lp_2/')
self._verify_link(panel_element, '.inner li:nth-child(2)', 'Compare all Regional Teams in England on this measure', '/measure/lp_2/regional-team/')
self._verify_link(panel_element, '.inner li:nth-child(3)', 'View this measure on the analyse page', MEASURE_LP_2_ANALYSE_URL)
self._verify_link(panel_element, '.inner li:nth-child(4)', 'View technical details for this measure', '/measure/lp_2/definition/')
self._verify_num_elements(panel_element, '.inner li', 4)
def test_measure_for_practices_in_ccg(self):
self._get('/sicbl/AAA/lp_2/')
panel_element = self._find_measure_panel('practice_P00000')
self._verify_link(panel_element, '.measure-panel-title', 'P00000: Practice 0/0/0/0', '/practice/P00000/measures/')
self._verify_link(panel_element, '.explanation li:nth-child(1)', 'Break the overall score down into individual presentations', '/measure/lp_2/practice/P00000/')
self._verify_num_elements(panel_element, '.explanation li', 1)
def test_measure_for_practices_in_pcn(self):
self._get('/pcn/E/lp_2/')
panel_element = self._find_measure_panel('practice_P00000')
self._verify_link(panel_element, '.measure-panel-title', 'P00000: Practice 0/0/0/0', '/practice/P00000/measures/')
self._verify_link(panel_element, '.explanation li:nth-child(1)', 'Break the overall score down into individual presentations', '/measure/lp_2/practice/P00000/')
self._verify_num_elements(panel_element, '.explanation li', 1)
def test_measure_for_ccgs_in_stp(self):
self._get('/icb/E00/lp_2/')
panel_element = self._find_measure_panel('ccg_AAA')
self._verify_link(panel_element, '.measure-panel-title', 'AAA: CCG 0/0/0', '/sicbl/AAA/measures/')
self._verify_link(panel_element, '.explanation li:nth-child(1)', 'Break the overall score down into individual presentations', '/measure/lp_2/sicbl/AAA/')
self._verify_num_elements(panel_element, '.explanation li', 1)
def test_measure_for_ccgs_in_regional_team(self):
self._get('/regional-team/Y01/lp_2/')
panel_element = self._find_measure_panel('ccg_111')
self._verify_link(panel_element, '.measure-panel-title', '111: CCG 1/1/1', '/sicbl/111/measures/')
self._verify_link(panel_element, '.explanation li:nth-child(1)', 'Break the overall score down into individual presentations', '/measure/lp_2/sicbl/111/')
self._verify_num_elements(panel_element, '.explanation li', 1)
def test_explanation_for_all_england(self):
mvs = MeasureValue.objects.filter_by_org_type('ccg').filter(measure_id='core_0', month__gte='2018-03-01')
cost_saving_10 = sum((mv.cost_savings['10'] for mv in mvs if (mv.cost_savings['10'] > 0)))
cost_saving_50 = sum((mv.cost_savings['50'] for mv in mvs if (mv.cost_savings['50'] > 0)))
self._get('/national/england/')
perf_element = self.find_by_xpath("//*[='measure_core_0']//strong[text()='Performance:']/..")
exp_text = 'Performance: If all Sub-ICB Locations in England had prescribed in line with the median, the NHS would have spent {} less over the past 6 months. If they had prescribed in line with the best 10%, it would have spent {} less.'.format(_humanize(cost_saving_50), _humanize(cost_saving_10))
self.assertEqual(perf_element.text, exp_text)
perf_summary_element = self.find_by_xpath("//*[='perfsummary']")
self.assertEqual(perf_summary_element.text.strip(), '')
def test_explanation_for_practice(self):
pp = []
for p in Practice.objects.all():
mvs = MeasureValue.objects.filter_by_org_type('practice').filter(practice=p, measure_id='core_0', month__gte='2018-03-01')
p.cost_saving_10 = sum((mv.cost_savings['10'] for mv in mvs))
p.cost_saving_50 = sum((mv.cost_savings['50'] for mv in mvs))
pp.append(p)
assert ([p for p in pp if ((p.cost_saving_10 < 0) and (p.cost_saving_50 > 0))] == [])
p1 = [p for p in pp if ((p.cost_saving_10 < 0) and (p.cost_saving_50 < 0))][0]
p2 = [p for p in pp if ((p.cost_saving_10 > 0) and (p.cost_saving_50 < 0))][0]
p3 = [p for p in pp if ((p.cost_saving_10 > 0) and (p.cost_saving_50 > 0))][0]
p1_exp_text = 'By prescribing better than the median, this practice has saved the NHS {} over the past 6 months.'.format(_humanize(p1.cost_saving_50))
p2_exp_text = 'By prescribing better than the median, this practice has saved the NHS {} over the past 6 months. If it had prescribed in line with the best 10%, it would have spent {} less.'.format(_humanize(p2.cost_saving_50), _humanize(p2.cost_saving_10))
p3_exp_text = 'If it had prescribed in line with the median, this practice would have spent {} less over the past 6 months. If it had prescribed in line with the best 10%, it would have spent {} less.'.format(_humanize(p3.cost_saving_50), _humanize(p3.cost_saving_10))
self._get('/measure/core_0/practice/{}/'.format(p1.code))
perf_element = self.find_by_xpath("//*[='measure_core_0']//strong[text()='Performance:']/..")
self.assertIn(p1_exp_text, perf_element.text)
self._get('/measure/core_0/practice/{}/'.format(p2.code))
perf_element = self.find_by_xpath("//*[='measure_core_0']//strong[text()='Performance:']/..")
self.assertIn(p2_exp_text, perf_element.text)
self._get('/measure/core_0/practice/{}/'.format(p3.code))
perf_element = self.find_by_xpath("//*[='measure_core_0']//strong[text()='Performance:']/..")
self.assertIn(p3_exp_text, perf_element.text)
self._get('/practice/{}/measures/'.format(p1.code))
perf_element = self.find_by_xpath("//*[='measure_core_0']//strong[text()='Performance:']/..")
self.assertIn(p1_exp_text, perf_element.text)
ccg = p1.ccg
self._get('/sicbl/{}/core_0/'.format(ccg.code))
panel_element = self._find_measure_panel('practice_{}'.format(p1.code))
perf_element = panel_element.find_element(By.CLASS_NAME, 'explanation')
self.assertIn(p1_exp_text, perf_element.text)
self._get('/practice/{}/'.format(p1.code))
panel_element = self._find_measure_panel('top-measure-container')
perf_element = panel_element.find_element(By.CLASS_NAME, 'explanation')
self.assertIn(p1_exp_text, perf_element.text)
def test_explanation_for_pcn(self):
pp = []
for p in PCN.objects.all():
mvs = MeasureValue.objects.filter_by_org_type('pcn').filter(pcn=p, measure_id='core_0', month__gte='2018-03-01')
p.cost_saving_10 = sum((mv.cost_savings['10'] for mv in mvs))
p.cost_saving_50 = sum((mv.cost_savings['50'] for mv in mvs))
pp.append(p)
assert ([p for p in pp if ((p.cost_saving_10 < 0) and (p.cost_saving_50 > 0))] == [])
p2 = [p for p in pp if ((p.cost_saving_10 > 0) and (p.cost_saving_50 < 0))][0]
p1 = [p for p in pp if ((p.cost_saving_10 < 0) and (p.cost_saving_50 < 0))][0]
p3 = [p for p in pp if ((p.cost_saving_10 > 0) and (p.cost_saving_50 > 0))][0]
p1_exp_text = 'By prescribing better than the median, this PCN has saved the NHS {} over the past 6 months.'.format(_humanize(p1.cost_saving_50))
p2_exp_text = 'By prescribing better than the median, this PCN has saved the NHS {} over the past 6 months. If it had prescribed in line with the best 10%, it would have spent {} less.'.format(_humanize(p2.cost_saving_50), _humanize(p2.cost_saving_10))
p3_exp_text = 'If it had prescribed in line with the median, this PCN would have spent {} less over the past 6 months. If it had prescribed in line with the best 10%, it would have spent {} less.'.format(_humanize(p3.cost_saving_50), _humanize(p3.cost_saving_10))
self._get('/measure/core_0/pcn/{}/'.format(p1.code))
perf_element = self.find_by_xpath("//*[='measure_core_0']//strong[text()='Performance:']/..")
self.assertIn(p1_exp_text, perf_element.text)
self._get('/measure/core_0/pcn/{}/'.format(p2.code))
perf_element = self.find_by_xpath("//*[='measure_core_0']//strong[text()='Performance:']/..")
self.assertIn(p2_exp_text, perf_element.text)
self._get('/measure/core_0/pcn/{}/'.format(p3.code))
perf_element = self.find_by_xpath("//*[='measure_core_0']//strong[text()='Performance:']/..")
self.assertIn(p3_exp_text, perf_element.text)
self._get('/pcn/{}/measures/'.format(p1.code))
perf_element = self.find_by_xpath("//*[='measure_core_0']//strong[text()='Performance:']/..")
self.assertIn(p1_exp_text, perf_element.text)
self._get('/measure/core_0/pcn/')
panel_element = self._find_measure_panel('pcn_{}'.format(p1.code))
perf_element = panel_element.find_element(By.CLASS_NAME, 'explanation')
self.assertIn(p1_exp_text, perf_element.text)
self._get('/pcn/{}/'.format(p1.code))
panel_element = self._find_measure_panel('top-measure-container')
perf_element = panel_element.find_element(By.CLASS_NAME, 'explanation')
self.assertIn(p1_exp_text, perf_element.text)
def test_explanation_for_ccg(self):
cc = []
for c in PCT.objects.all():
mvs = MeasureValue.objects.filter_by_org_type('ccg').filter(pct=c, measure_id='core_0', month__gte='2018-03-01')
c.cost_saving_10 = sum((mv.cost_savings['10'] for mv in mvs))
c.cost_saving_50 = sum((mv.cost_savings['50'] for mv in mvs))
cc.append(c)
assert ([c for c in cc if ((c.cost_saving_10 < 0) and (c.cost_saving_50 > 0))] == [])
c1 = [c for c in cc if ((c.cost_saving_10 < 0) and (c.cost_saving_50 < 0))][0]
c2 = [c for c in cc if ((c.cost_saving_10 > 0) and (c.cost_saving_50 < 0))][0]
c3 = [c for c in cc if ((c.cost_saving_10 > 0) and (c.cost_saving_50 > 0))][0]
c1_exp_text = 'By prescribing better than the median, this Sub-ICB Location has saved the NHS {} over the past 6 months.'.format(_humanize(c1.cost_saving_50))
c2_exp_text = 'By prescribing better than the median, this Sub-ICB Location has saved the NHS {} over the past 6 months. If it had prescribed in line with the best 10%, it would have spent {} less.'.format(_humanize(c2.cost_saving_50), _humanize(c2.cost_saving_10))
c3_exp_text = 'If it had prescribed in line with the median, this Sub-ICB Location would have spent {} less over the past 6 months. If it had prescribed in line with the best 10%, it would have spent {} less.'.format(_humanize(c3.cost_saving_50), _humanize(c3.cost_saving_10))
self._get('/measure/core_0/sicbl/{}/'.format(c1.code))
perf_element = self.find_by_xpath("//*[='measure_core_0']//strong[text()='Performance:']/..")
self.assertIn(c1_exp_text, perf_element.text)
self._get('/measure/core_0/sicbl/{}/'.format(c2.code))
perf_element = self.find_by_xpath("//*[='measure_core_0']//strong[text()='Performance:']/..")
self.assertIn(c2_exp_text, perf_element.text)
self._get('/measure/core_0/sicbl/{}/'.format(c3.code))
perf_element = self.find_by_xpath("//*[='measure_core_0']//strong[text()='Performance:']/..")
self.assertIn(c3_exp_text, perf_element.text)
self._get('/sicbl/{}/measures/'.format(c1.code))
perf_element = self.find_by_xpath("//*[='measure_core_0']//strong[text()='Performance:']/..")
self.assertIn(c1_exp_text, perf_element.text)
self._get('/measure/core_0/')
panel_element = self._find_measure_panel('ccg_{}'.format(c1.code))
perf_element = panel_element.find_element(By.CLASS_NAME, 'explanation')
self.assertIn(c1_exp_text, perf_element.text)
self._get('/sicbl/{}/'.format(c1.code))
panel_element = self._find_measure_panel('top-measure-container')
perf_element = panel_element.find_element(By.CLASS_NAME, 'explanation')
self.assertIn(c1_exp_text, perf_element.text)
def test_explanation_for_stp(self):
ss = []
for s in STP.objects.all():
mvs = MeasureValue.objects.filter_by_org_type('stp').filter(stp=s, measure_id='core_0', month__gte='2018-03-01')
s.cost_saving_10 = sum((mv.cost_savings['10'] for mv in mvs))
s.cost_saving_50 = sum((mv.cost_savings['50'] for mv in mvs))
ss.append(s)
assert ([s for s in ss if ((s.cost_saving_10 < 0) and (s.cost_saving_50 > 0))] == [])
s1 = [s for s in ss if ((s.cost_saving_10 < 0) and (s.cost_saving_50 < 0))][0]
s2 = [s for s in ss if ((s.cost_saving_10 > 0) and (s.cost_saving_50 < 0))][0]
s3 = [s for s in ss if ((s.cost_saving_10 > 0) and (s.cost_saving_50 > 0))][0]
s1_exp_text = 'By prescribing better than the median, this ICB has saved the NHS {} over the past 6 months.'.format(_humanize(s1.cost_saving_50))
s2_exp_text = 'By prescribing better than the median, this ICB has saved the NHS {} over the past 6 months. If it had prescribed in line with the best 10%, it would have spent {} less.'.format(_humanize(s2.cost_saving_50), _humanize(s2.cost_saving_10))
s3_exp_text = 'If it had prescribed in line with the median, this ICB would have spent {} less over the past 6 months. If it had prescribed in line with the best 10%, it would have spent {} less.'.format(_humanize(s3.cost_saving_50), _humanize(s3.cost_saving_10))
self._get('/measure/core_0/icb/{}/'.format(s1.code))
perf_element = self.find_by_xpath("//*[='measure_core_0']//strong[text()='Performance:']/..")
self.assertIn(s1_exp_text, perf_element.text)
self._get('/measure/core_0/icb/{}/'.format(s2.code))
perf_element = self.find_by_xpath("//*[='measure_core_0']//strong[text()='Performance:']/..")
self.assertIn(s2_exp_text, perf_element.text)
self._get('/measure/core_0/icb/{}/'.format(s3.code))
perf_element = self.find_by_xpath("//*[='measure_core_0']//strong[text()='Performance:']/..")
self.assertIn(s3_exp_text, perf_element.text)
self._get('/icb/{}/measures/'.format(s1.code))
perf_element = self.find_by_xpath("//*[='measure_core_0']//strong[text()='Performance:']/..")
self.assertIn(s1_exp_text, perf_element.text)
self._get('/measure/core_0/icb/')
panel_element = self._find_measure_panel('stp_{}'.format(s1.code))
perf_element = panel_element.find_element(By.CLASS_NAME, 'explanation')
self.assertIn(s1_exp_text, perf_element.text)
self._get('/icb/{}/'.format(s1.code))
panel_element = self._find_measure_panel('top-measure-container')
perf_element = panel_element.find_element(By.CLASS_NAME, 'explanation')
self.assertIn(s1_exp_text, perf_element.text)
def test_explanation_for_regional_team(self):
rr = []
for r in RegionalTeam.objects.all():
mvs = MeasureValue.objects.filter_by_org_type('regional_team').filter(regional_team=r, measure_id='core_0', month__gte='2018-03-01')
r.cost_saving_10 = sum((mv.cost_savings['10'] for mv in mvs))
r.cost_saving_50 = sum((mv.cost_savings['50'] for mv in mvs))
rr.append(r)
assert ([r for r in rr if ((r.cost_saving_10 < 0) and (r.cost_saving_50 > 0))] == [])
r1 = [r for r in rr if ((r.cost_saving_10 < 0) and (r.cost_saving_50 < 0))][0]
r3 = [r for r in rr if ((r.cost_saving_10 > 0) and (r.cost_saving_50 > 0))][0]
r1_exp_text = 'By prescribing better than the median, this Regional Team has saved the NHS {} over the past 6 months.'.format(_humanize(r1.cost_saving_50))
r3_exp_text = 'If it had prescribed in line with the median, this Regional Team would have spent {} less over the past 6 months. If it had prescribed in line with the best 10%, it would have spent {} less.'.format(_humanize(r3.cost_saving_50), _humanize(r3.cost_saving_10))
self._get('/measure/core_0/regional-team/{}/'.format(r1.code))
perf_element = self.find_by_xpath("//*[='measure_core_0']//strong[text()='Performance:']/..")
self.assertIn(r1_exp_text, perf_element.text)
self._get('/measure/core_0/regional-team/{}/'.format(r3.code))
perf_element = self.find_by_xpath("//*[='measure_core_0']//strong[text()='Performance:']/..")
self.assertIn(r3_exp_text, perf_element.text)
self._get('/regional-team/{}/measures/'.format(r1.code))
perf_element = self.find_by_xpath("//*[='measure_core_0']//strong[text()='Performance:']/..")
self.assertIn(r1_exp_text, perf_element.text)
self._get('/measure/core_0/regional-team/')
panel_element = self._find_measure_panel('regional_team_{}'.format(r1.code))
perf_element = panel_element.find_element(By.CLASS_NAME, 'explanation')
self.assertIn(r1_exp_text, perf_element.text)
self._get('/regional-team/{}/'.format(r1.code))
panel_element = self._find_measure_panel('top-measure-container')
perf_element = panel_element.find_element(By.CLASS_NAME, 'explanation')
self.assertIn(r1_exp_text, perf_element.text)
def test_performance_summary_for_measure_for_all_pcns(self):
mvs = MeasureValue.objects.filter_by_org_type('pcn').filter(measure_id='core_0', month__gte='2018-03-01')
cost_saving = _get_cost_savings(mvs, rollup_by='pcn_id')
self._get('/measure/core_0/pcn/')
perf_summary_element = self.find_by_xpath('//*[="perfsummary"][not(contains(text(), "Loading..."))]')
exp_text = 'Over the past 6 months, if all PCNs had prescribed at the median ratio or better, then NHS England would have spent {} less.'.format(_humanize(cost_saving))
self.assertIn(exp_text, perf_summary_element.text)
def test_performance_summary_for_measure_for_all_ccgs(self):
mvs = MeasureValue.objects.filter_by_org_type('ccg').filter(measure_id='core_0', month__gte='2018-03-01')
cost_saving = _get_cost_savings(mvs, rollup_by='pct_id')
self._get('/measure/core_0/')
perf_summary_element = self.find_by_xpath('//*[="perfsummary"][not(contains(text(), "Loading..."))]')
exp_text = 'Over the past 6 months, if all Sub-ICB Locations had prescribed at the median ratio or better, then NHS England would have spent {} less.'.format(_humanize(cost_saving))
self.assertIn(exp_text, perf_summary_element.text)
def test_performance_summary_for_measure_for_all_stps(self):
mvs = MeasureValue.objects.filter_by_org_type('stp').filter(measure_id='core_0', month__gte='2018-03-01')
cost_saving = _get_cost_savings(mvs, rollup_by='stp_id')
self._get('/measure/core_0/icb/')
perf_summary_element = self.find_by_xpath('//*[="perfsummary"][not(contains(text(), "Loading..."))]')
exp_text = 'Over the past 6 months, if all ICBs had prescribed at the median ratio or better, then NHS England would have spent {} less.'.format(_humanize(cost_saving))
self.assertIn(exp_text, perf_summary_element.text)
def test_performance_summary_for_measure_for_all_regional_teams(self):
mvs = MeasureValue.objects.filter_by_org_type('regional_team').filter(measure_id='core_0', month__gte='2018-03-01')
cost_saving = _get_cost_savings(mvs, rollup_by='regional_team_id')
self._get('/measure/core_0/regional-team/')
perf_summary_element = self.find_by_xpath('//*[="perfsummary"][not(contains(text(), "Loading..."))]')
exp_text = 'Over the past 6 months, if all Regional Teams had prescribed at the median ratio or better, then NHS England would have spent {} less.'.format(_humanize(cost_saving))
self.assertIn(exp_text, perf_summary_element.text)
def test_performance_summary_for_measure_for_practices_in_pcn(self):
for c in PCN.objects.all():
cost_saving = 0
for p in c.practice_set.all():
mvs = MeasureValue.objects.filter_by_org_type('practice').filter(practice=p, measure_id='core_0', month__gte='2018-03-01')
cost_saving += _get_cost_savings(mvs)
if (cost_saving > 0):
break
else:
assert False, 'Could not find PCN with cost saving!'
self._get('/pcn/{}/core_0/'.format(c.code))
perf_summary_element = self.find_by_xpath('//*[="perfsummary"][not(contains(text(), "Loading..."))]')
exp_text = 'Over the past 6 months, if all practices had prescribed at the median ratio or better, then this PCN would have spent {} less.'.format(_humanize(cost_saving))
self.assertIn(exp_text, perf_summary_element.text)
def test_performance_summary_for_measure_for_practices_in_ccg(self):
for c in PCT.objects.all():
cost_saving = 0
for p in c.practice_set.all():
mvs = MeasureValue.objects.filter_by_org_type('practice').filter(practice=p, measure_id='core_0', month__gte='2018-03-01')
cost_saving += _get_cost_savings(mvs)
if (cost_saving > 0):
break
else:
assert False, 'Could not find Sub-ICB Location with cost saving!'
self._get('/sicbl/{}/core_0/'.format(c.code))
perf_summary_element = self.find_by_xpath('//*[="perfsummary"][not(contains(text(), "Loading..."))]')
exp_text = 'Over the past 6 months, if all practices had prescribed at the median ratio or better, then this Sub-ICB Location would have spent {} less.'.format(_humanize(cost_saving))
self.assertIn(exp_text, perf_summary_element.text)
def test_performance_summary_for_measure_for_ccgs_in_stp(self):
for r in STP.objects.all():
cost_saving = 0
for c in r.pct_set.all():
mvs = MeasureValue.objects.filter_by_org_type('ccg').filter(pct=c, measure_id='core_0', month__gte='2018-03-01')
cost_saving += _get_cost_savings(mvs)
if (cost_saving > 0):
break
else:
assert False, 'Could not find ICB with cost saving!'
self._get('/icb/{}/core_0/'.format(r.code))
perf_summary_element = self.find_by_xpath('//*[="perfsummary"][not(contains(text(), "Loading..."))]')
exp_text = 'Over the past 6 months, if all Sub-ICB Locations had prescribed at the median ratio or better, then this ICB would have spent {} less.'.format(_humanize(cost_saving))
self.assertIn(exp_text, perf_summary_element.text)
def test_performance_summary_for_measure_for_ccgs_in_regional_team(self):
for r in RegionalTeam.objects.all():
cost_saving = 0
for c in r.pct_set.all():
mvs = MeasureValue.objects.filter_by_org_type('ccg').filter(pct=c, measure_id='core_0', month__gte='2018-03-01')
cost_saving += _get_cost_savings(mvs)
if (cost_saving > 0):
break
else:
assert False, 'Could not find RegionalTeam with cost saving!'
self._get('/regional-team/{}/core_0/'.format(r.code))
perf_summary_element = self.find_by_xpath('//*[="perfsummary"][not(contains(text(), "Loading..."))]')
exp_text = 'Over the past 6 months, if all Sub-ICB Locations had prescribed at the median ratio or better, then this Regional Team would have spent {} less.'.format(_humanize(cost_saving))
self.assertIn(exp_text, perf_summary_element.text)
def test_performance_summary_for_measures_for_one_pcn(self):
for c in PCN.objects.all():
mvs = MeasureValue.objects.filter_by_org_type('pcn').filter(pcn=c, measure_id__in=['core_0', 'core_1', 'lpzomnibus'], month__gte='2018-03-01')
cost_saving = _get_cost_savings(mvs, rollup_by='measure_id')
if (cost_saving > 0):
break
else:
assert False, 'Could not find PCN with cost saving!'
self._get('/pcn/{}/measures/'.format(c.code))
perf_summary_element = self.find_by_xpath('//*[="perfsummary"][not(contains(text(), "Loading..."))]')
exp_text = 'Over the past 6 months, if this PCN had prescribed at the median ratio or better on all cost-saving measures below, then it would have spent {} less.'.format(_humanize(cost_saving))
self.assertIn(exp_text, perf_summary_element.text)
def test_performance_summary_for_measures_for_one_ccg(self):
for c in PCT.objects.all():
mvs = MeasureValue.objects.filter_by_org_type('ccg').filter(pct=c, measure_id__in=['core_0', 'core_1', 'lpzomnibus'], month__gte='2018-03-01')
cost_saving = _get_cost_savings(mvs, rollup_by='measure_id')
if (cost_saving > 0):
break
else:
assert False, 'Could not find Sub-ICB Location with cost saving!'
self._get('/sicbl/{}/measures/'.format(c.code))
perf_summary_element = self.find_by_xpath('//*[="perfsummary"][not(contains(text(), "Loading..."))]')
exp_text = 'Over the past 6 months, if this Sub-ICB Location had prescribed at the median ratio or better on all cost-saving measures below, then it would have spent {} less.'.format(_humanize(cost_saving))
self.assertIn(exp_text, perf_summary_element.text)
def test_performance_summary_for_measures_for_one_practice(self):
for p in Practice.objects.all():
mvs = MeasureValue.objects.filter_by_org_type('practice').filter(practice=p, measure_id__in=['core_0', 'core_1', 'lpzomnibus'], month__gte='2018-03-01')
cost_saving = _get_cost_savings(mvs, rollup_by='measure_id')
if (cost_saving > 0):
break
else:
assert False, 'Could not find practice with cost saving!'
self._get('/practice/{}/measures/'.format(p.code))
perf_summary_element = self.find_by_xpath('//*[="perfsummary"][not(contains(text(), "Loading..."))]')
exp_text = 'Over the past 6 months, if this practice had prescribed at the median ratio or better on all cost-saving measures below, then it would have spent {} less.'.format(_humanize(cost_saving))
self.assertIn(exp_text, perf_summary_element.text)
def test_performance_summary_for_measures_for_one_stp(self):
for r in STP.objects.all():
mvs = MeasureValue.objects.filter_by_org_type('stp').filter(stp=r, measure_id__in=['core_0', 'core_1', 'lpzomnibus'], month__gte='2018-03-01')
cost_saving = _get_cost_savings(mvs, rollup_by='measure_id')
if (cost_saving > 0):
break
else:
assert False, 'Could not find ICB with cost saving!'
self._get('/icb/{}/measures/'.format(r.code))
perf_summary_element = self.find_by_xpath('//*[="perfsummary"][not(contains(text(), "Loading..."))]')
exp_text = 'Over the past 6 months, if this ICB had prescribed at the median ratio or better on all cost-saving measures below, then it would have spent {} less.'.format(_humanize(cost_saving))
self.assertIn(exp_text, perf_summary_element.text)
def test_performance_summary_for_measures_for_one_regional_team(self):
for r in RegionalTeam.objects.all():
mvs = MeasureValue.objects.filter_by_org_type('regional_team').filter(regional_team=r, measure_id__in=['core_0', 'core_1', 'lpzomnibus'], month__gte='2018-03-01')
cost_saving = _get_cost_savings(mvs, rollup_by='measure_id')
if (cost_saving > 0):
break
else:
assert False, 'Could not find RegionalTeam with cost saving!'
self._get('/regional-team/{}/measures/'.format(r.code))
perf_summary_element = self.find_by_xpath('//*[="perfsummary"][not(contains(text(), "Loading..."))]')
exp_text = 'Over the past 6 months, if this Regional Team had prescribed at the median ratio or better on all cost-saving measures below, then it would have spent {} less.'.format(_humanize(cost_saving))
self.assertIn(exp_text, perf_summary_element.text) |
class UploadDownloadFiles():
def __init__(self, args):
self.args = args
self.file_handles = getFileHandles()
file_storage = self.args.file_storage
self.obj = None
if (file_storage in self.file_handles):
self.obj = self.file_handles[file_storage](self.args)
def upload(self, **kwargs):
res = ''
if (self.obj is not None):
res = self.obj.upload(**kwargs)
return res
def download(self, **kwargs):
if (self.obj is not None):
self.obj.download(**kwargs) |
class IPv4TCPSrcMasked(MatchTest):
def runTest(self):
match = ofp.match([ofp.oxm.eth_type(2048), ofp.oxm.ip_proto(6), ofp.oxm.tcp_src_masked(52, 254)])
matching = {'tcp sport=53': simple_tcp_packet(tcp_sport=53), 'tcp sport=52': simple_tcp_packet(tcp_sport=52)}
nonmatching = {'tcp sport=54': simple_tcp_packet(tcp_sport=54), 'tcp sport=51': simple_tcp_packet(tcp_sport=51)}
self.verify_match(match, matching, nonmatching) |
class Test_Log_Slow_Processing(Test_verify_event_path_base):
def test_log_slow_processing_stream(self, cthread: AIOKafkaConsumerThread, tp: TP, logger):
cthread._log_slow_processing_stream(SLOW_PROCESSING_STREAM_IDLE_SINCE_START, tp, '3 seconds ago')
logger.error.assert_called_with(((((SLOW_PROCESSING_STREAM_IDLE_SINCE_START + ' ') + (SLOW_PROCESSING_EXPLAINED % {'setting': 'stream_processing_timeout', 'current_value': 300.0})) + ' ') + text.enumeration([SLOW_PROCESSING_CAUSE_STREAM, SLOW_PROCESSING_CAUSE_AGENT], start=2, sep='\n\n')), tp, '3 seconds ago') |
def fortios_firewall(data, fos, check_mode):
fos.do_member_operation('firewall', 'internet-service-group')
if data['firewall_internet_service_group']:
resp = firewall_internet_service_group(data, fos, check_mode)
else:
fos._module.fail_json(msg=('missing task body: %s' % 'firewall_internet_service_group'))
if check_mode:
return resp
return ((not is_successful_status(resp)), (is_successful_status(resp) and (resp['revision_changed'] if ('revision_changed' in resp) else True)), resp, {}) |
def get_arch(metadata=None):
if metadata:
platform = metadata['platform'].lower()
if ('x86_64' in platform):
return 'x86_64'
elif ('amd64' in platform):
return 'amd64'
procinfo = metadata['cpu_model_name'].lower()
if ('aarch64' in procinfo):
return 'arm64'
elif ('arm' in procinfo):
if ('64' in procinfo):
return 'arm64'
else:
return 'arm32'
elif ('intel' in procinfo):
return 'x86_64'
else:
raise NotImplementedError((platform, procinfo))
else:
uname = platform.uname()
machine = uname.machine.lower()
if (machine in ('amd64', 'x86_64')):
return machine
elif (machine == 'aarch64'):
return 'arm64'
elif ('arm' in machine):
return 'arm'
else:
raise NotImplementedError(machine) |
def upgrade():
op.create_table('providedidentity', sa.Column('id', sa.String(length=255), nullable=False), sa.Column('created_at', sa.DateTime(timezone=True), server_default=sa.text('now()'), nullable=True), sa.Column('updated_at', sa.DateTime(timezone=True), server_default=sa.text('now()'), nullable=True), sa.Column('privacy_request_id', sa.String(), nullable=False), sa.Column('field_name', sa.Enum('email', 'phone_number', name='providedidentitytype'), nullable=False), sa.Column('hashed_value', sa.String(), nullable=True), sa.Column('encrypted_value', sqlalchemy_utils.types.encrypted.encrypted_type.StringEncryptedType(), nullable=True), sa.ForeignKeyConstraint(['privacy_request_id'], ['privacyrequest.id']), sa.PrimaryKeyConstraint('id'))
op.create_index(op.f('ix_providedidentity_hashed_value'), 'providedidentity', ['hashed_value'], unique=False)
op.create_index(op.f('ix_providedidentity_id'), 'providedidentity', ['id'], unique=False) |
def configuration_file():
if os.path.isfile('./emissionsapi.yml'):
return './emissionsapi.yml'
expanded_file = os.path.expanduser('~/emissionsapi.yml')
if os.path.isfile(expanded_file):
return expanded_file
if os.path.isfile('/etc/emissionsapi.yml'):
return '/etc/emissionsapi.yml' |
('subprocess.run')
def test_update_sent_alerts(mock_subprocess_run, alerts_fetcher_mock: MockAlertsFetcher):
mock_alerts_ids_to_update = (['mock_alert_id'] * 60)
resource_type = ResourceType.TEST
alerts_fetcher_mock.update_sent_alerts(alert_ids=mock_alerts_ids_to_update, resource_type=resource_type)
assert (mock_subprocess_run.call_count == 2)
calls_args = mock_subprocess_run.call_args_list
for call_args in calls_args:
assert (call_args[0][0][1] == 'run')
assert (call_args[0][0][2] == '-s')
assert (call_args[0][0][3] == 'elementary_cli.update_alerts.update_sent_alerts')
dbt_run_params = json.loads(call_args[0][0][9])
assert ('alert_ids' in dbt_run_params)
assert ('table_name' in dbt_run_params)
assert ('sent_at' in dbt_run_params) |
def add_fsdp_configs(_C: CN):
_C.FSDP = CN()
_C.FSDP.ALGORITHM = 'grad_optim'
_C.FSDP.CPU_OFFLOAD = False
_C.FSDP.BACKWARD_PREFETCH = True
_C.FSDP.USE_ORIG_PARAMS = False
_C.FSDP.AUTO_WRAP_POLICY = 'never_wrap_policy'
_C.FSDP.AUTO_WRAP_MIN_PARAMS = int(10000.0)
_C.FSDP.AUTO_WRAP_LAYER_CLS = []
_C.FSDP.USE_LOCAL_STATE_DICT = True
_C.FSDP.STATE_DICT_TYPE = 'SHARDED_STATE_DICT'
_C.FSDP.STATE_DICT_CPU_OFFLOAD = False
_C.FSDP.STATE_DICT_RANK0_ONLY = True
_C.FSDP.IGNORED_MODULES = None
_C.FSDP.FORWARD_PREFETCH_OPTION = 'no'
_C.FSDP.LIMIT_ALL_GATHERS = False |
def adc_info(esp, efuses, args):
print('')
if (efuses['BLK_VERSION_MINOR'].get() == 1):
print(' RF_REF_I_BIAS_CONFIG: {}'.format(efuses['RF_REF_I_BIAS_CONFIG'].get()))
print(' LDO_VOL_BIAS_CONFIG_LOW: {}'.format(efuses['LDO_VOL_BIAS_CONFIG_LOW'].get()))
print(' LDO_VOL_BIAS_CONFIG_HIGH: {}'.format(efuses['LDO_VOL_BIAS_CONFIG_HIGH'].get()))
print(' PVT_LOW: {}'.format(efuses['PVT_LOW'].get()))
print(' PVT_HIGH: {}'.format(efuses['PVT_HIGH'].get()))
print(' ADC_CALIBRATION_0: {}'.format(efuses['ADC_CALIBRATION_0'].get()))
print(' ADC_CALIBRATION_1: {}'.format(efuses['ADC_CALIBRATION_1'].get()))
print(' ADC_CALIBRATION_2: {}'.format(efuses['ADC_CALIBRATION_2'].get()))
else:
print('BLK_VERSION_MINOR = {}'.format(efuses['BLK_VERSION_MINOR'].get_meaning())) |
def printMatchesInViewOutputStringAndCopyFirstToClipboard(needle, haystack):
first = None
for match in re.finditer((('.*<.*(' + needle) + ')\\S*: (0x[0-9a-fA-F]*);.*'), haystack, re.IGNORECASE):
view = match.groups()[(- 1)]
className = fb.evaluateExpressionValue((('(id)[(' + view) + ') class]')).GetObjectDescription()
print('{} {}'.format(view, className))
if (first is None):
first = view
cmd = ('echo %s | tr -d "\n" | pbcopy' % view)
os.system(cmd) |
class S3Path():
region: str
bucket: str
key: str
def __init__(self, fileURL: str) -> None:
(self.region, self.bucket, self.key) = self._get_region_bucket_key(fileURL)
def __eq__(self, other: 'S3Path') -> bool:
return ((self.region == other.region) and (self.bucket == other.bucket) and (self.key == other.key))
def _get_region_bucket_key(self, fileURL: str) -> Tuple[(str, str, str)]:
match = re.search('^ fileURL)
if (not match):
raise ValueError(f'Could not parse {fileURL} as an S3Path')
(bucket, region, key) = (match.group(1).strip('/'), match.group(2), match.group(3).strip('/'))
return (region, bucket, key) |
class OptionSeriesColumnrangeSonificationContexttracksMappingPlaydelay(Options):
def mapFunction(self):
return self._config_get(None)
def mapFunction(self, value: Any):
self._config(value, js_type=False)
def mapTo(self):
return self._config_get(None)
def mapTo(self, text: str):
self._config(text, js_type=False)
def max(self):
return self._config_get(None)
def max(self, num: float):
self._config(num, js_type=False)
def min(self):
return self._config_get(None)
def min(self, num: float):
self._config(num, js_type=False)
def within(self):
return self._config_get(None)
def within(self, value: Any):
self._config(value, js_type=False) |
(suppress_health_check=[HealthCheck.function_scoped_fixture])
(grids, st.data())
def test_gridprop_to_from_file_is_identity(tmp_path, grid, data):
filepath = (tmp_path / 'gridprop.grdecl')
prop = data.draw(st.sampled_from(grid.get_xyz_corners()))
prop.to_file(filepath, fformat='grdecl')
prop_from_file = xtgeo.gridproperty_from_file(filepath, name=prop.name, fformat='grdecl', grid=grid)
assert_allclose(prop.get_npvalues1d(), prop_from_file.get_npvalues1d(), atol=0.001) |
def _find_files(project_root):
path_exclude_pattern = '\\.git($|\\/)|venv|_build'
file_exclude_pattern = 'fill_template_vars\\.py|\\.swp$'
filepaths = []
for (dir_path, _dir_names, file_names) in os.walk(project_root):
if (not re.search(path_exclude_pattern, dir_path)):
for file in file_names:
if (not re.search(file_exclude_pattern, file)):
filepaths.append(str(Path(dir_path, file)))
return filepaths |
class BasicsCharacteristic(Characteristic):
def __init__(self, bus, index, service):
Characteristic.__init__(self, bus, index, UART_BE_CHARACTERISTIC_UUID, ['write', 'read'], service)
def WriteValue(self, value, options):
global get_app_command
get_app_command = bytes(value)
print('[BASICS]remote: {}'.format(get_app_command))
if (get_app_command not in [b'\xfe\xfe\x02 \xfa', b'\xfe\xfe\x02#\xfa']):
serial_obj.write(get_app_command)
serial_obj.flush()
print('write serial end')
def ReadValue(self, options):
v = [10, 10, 10, 10, 10, 10]
value = []
for b in v:
value.append(dbus.Byte(b))
return value |
def set_sat(rgb: Vector, s: float) -> Vector:
final = ([0.0] * 3)
(indices, rgb_sort) = zip(*sorted(enumerate(rgb), key=itemgetter(1)))
if (rgb_sort[2] > rgb_sort[0]):
final[indices[1]] = (((rgb_sort[1] - rgb_sort[0]) * s) / (rgb_sort[2] - rgb_sort[0]))
final[indices[2]] = s
else:
final[indices[1]] = 0
final[indices[2]] = 0
final[indices[0]] = 0
return final |
def test_iter_sse_whatwg_example1() -> None:
class Body(
def __iter__(self) -> Iterator[bytes]:
(yield b'data: YH00\n')
(yield b'data: +2\n')
(yield b'data: 10\n')
(yield b'\n')
response = headers={'content-type': 'text/event-stream'}, stream=Body())
events = list(EventSource(response).iter_sse())
assert (len(events) == 1)
assert (events[0].event == 'message')
assert (events[0].data == 'YH00\n+2\n10')
assert (events[0].id == '')
assert (events[0].retry is None) |
def get_qubes_version():
is_qubes = False
version = None
try:
with open('/etc/os-release') as f:
for line in f:
try:
(key, value) = line.rstrip().split('=')
except ValueError:
continue
if ((key == 'NAME') and ('qubes' in value.lower())):
is_qubes = True
if (key == 'VERSION_ID'):
version = value
except FileNotFoundError:
return None
if (not is_qubes):
return None
return version |
def kill_on_exception(logname):
def _koe(func):
(func)
def __koe(*args, **kwargs):
try:
func(*args, **kwargs)
except Exception:
logging.getLogger(logname).exception('Unhandled exception, killing RYU')
logging.shutdown()
os.kill(os.getpid(), signal.SIGTERM)
return __koe
return _koe |
class OptionSeriesSplineDragdropGuideboxDefault(Options):
def className(self):
return self._config_get('highcharts-drag-box-default')
def className(self, text: str):
self._config(text, js_type=False)
def color(self):
return self._config_get('rgba(0, 0, 0, 0.1)')
def color(self, text: str):
self._config(text, js_type=False)
def cursor(self):
return self._config_get('move')
def cursor(self, text: str):
self._config(text, js_type=False)
def lineColor(self):
return self._config_get('#888')
def lineColor(self, text: str):
self._config(text, js_type=False)
def lineWidth(self):
return self._config_get(1)
def lineWidth(self, num: float):
self._config(num, js_type=False)
def zIndex(self):
return self._config_get(900)
def zIndex(self, num: float):
self._config(num, js_type=False) |
('task_config', [{'hydra': {'run': {'dir': '${now:%Y%m%d_%H%M%S_%f}'}}}])
def test_run_dir_microseconds(tmpdir: Path, task_config: DictConfig) -> None:
cfg = OmegaConf.create(task_config)
assert isinstance(cfg, DictConfig)
integration_test(tmpdir=tmpdir, task_config=cfg, overrides=[], prints="str('%f' not in os.getcwd())", expected_outputs='True') |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.