code stringlengths 281 23.7M |
|---|
class Local_Stability_ComputeNode(ComputeNode):
_instance = None
def get_instance(cls):
if (cls._instance is None):
cls._instance = Local_Stability_ComputeNode()
return cls._instance
def declare_user_config(cls):
user_config = AIStorage.get_instance().get_user_config()
if (os.getenv('LOCAL_STABILITY_URL') is None):
user_config.add_user_config('local_stability_url', 'local stability url', True, None)
if (os.getenv('TEXT2IMG_OUTPUT_DIR') is None):
home_dir = Path.home()
output_dir = Path.joinpath(home_dir, 'text2img_output')
Path.mkdir(output_dir, exist_ok=True)
user_config.add_user_config('text2img_output_dir', 'text2image output dir', True, output_dir)
if (os.getenv('TEXT2IMG_DEFAULT_MODEL') is None):
user_config.add_user_config('text2img_default_model', 'text2img default model', True, 'v1-5-pruned-emaonly')
def __init__(self) -> None:
super().__init__()
self.is_start = False
self.node_id = 'local_stability_node'
self.url = None
self.default_model = None
self.output_dir = None
self.task_queue = Queue()
async def initial(self):
if (os.getenv('LOCAL_STABILITY_URL') is not None):
self.url = os.getenv('LOCAL_STABILITY_URL')
else:
self.url = AIStorage.get_instance().get_user_config().get_value('local_stability_url')
if (os.getenv('TEXT2IMG_OUTPUT_DIR') is not None):
self.output_dir = os.getenv('TEXT2IMG_OUTPUT_DIR')
else:
self.output_dir = AIStorage.get_instance().get_user_config().get_value('text2img_output_dir')
if (os.getenv('TEXT2IMG_DEFAULT_MODEL') is not None):
self.default_model = os.getenv('TEXT2IMG_DEFAULT_MODEL')
else:
self.default_model = AIStorage.get_instance().get_user_config().get_value('text2img_default_model')
if (self.url is None):
logger.error('local stability url is None!')
return False
if (self.default_model is None):
logger.error('local stability default model is None!')
return False
if (self.output_dir is None):
self.output_dir = './'
self.output_dir = os.path.abspath(self.output_dir)
self.start()
return True
async def push_task(self, task: ComputeTask, proiority: int=0):
logger.info(f'stability_node push task: {task.display()}')
self.task_queue.put_nowait(task)
async def remove_task(self, task_id: str):
pass
def _make_post_request(self, url, json) -> Tuple[(str, requests.Response)]:
try:
response = requests.post(url, json=json)
if (response.status_code != 200):
return (f'{response.status_code}, {response.json()}', None)
return (None, response)
except Exception as e:
return (f'{e}', None)
def _run_task(self, task: ComputeTask):
task.state = ComputeTaskState.RUNNING
result = ComputeTaskResult()
result.result_code = ComputeTaskResultCode.ERROR
result.set_from_task(task)
model_name = task.params['model_name']
prompt = task.params['prompt']
negative_prompt = task.params['negative_prompt']
if ((negative_prompt == None) or (negative_prompt == '')):
negative_prompt = 'sketches, (worst quality:2), (low quality:2), (normal quality:2), lowres, duplicate, mutated hands, mutated legs, (blurry:1.3), (bad anatomy:1.2), bad proportions, extra limbs, more than 2 nipples, extra legs, fused fingers, missing fingers, jpeg artifacts, signature, watermark, username, artist name, heterochromia, muscular legs, monochrome, grayscale, skin spots, acnes, skin blemishes, age spot, skin spots, acnes, logo, badhandv4, easynegative, cropped image, patreon,lowres, bad anatomy, bad hands, text, error, missing fingers, extra digit, fewer digits, cropped, worst quality, low quality, normal quality, jpeg artifacts, signature, watermark, username, blurry, ng_deepnegative_v1_75t, lowres, bad anatomy, bad hands, text, error, missing fingers, extra digit, fewer digits, cropped, worst quality, low quality, normal quality, jpeg artifacts, signature, watermark, username, blurry,(Tiptoe:1.3),looking at viewer, Twisted eyes'
prompt += ',masterpiece, best quality:1.3'
logging.info(f'call local stability {model_name} prompts: {prompt}, nagative_prompt: {negative_prompt}')
if (model_name is not None):
payload = {'sd_model_checkpoint': model_name}
(err, resp) = self._make_post_request(f'{self.url}/sdapi/v1/options', payload)
if (err is not None):
task.state = ComputeTaskState.ERROR
err_msg = f'Set local stability model failed. err:{err}'
logger.error(err_msg)
task.error_str = err_msg
result.error_str = err_msg
return result
logging.info(f'set local stability model {model_name} success')
payload = {'prompt': prompt, 'negative_prompt': negative_prompt, 'steps': 20}
(err, resp) = self._make_post_request(f'{self.url}/sdapi/v1/txt2img', payload)
if (err is not None):
task.state = ComputeTaskState.ERROR
err_msg = f'Failed. err:{err}'
logger.error(err_msg)
task.error_str = err_msg
result.error_str = err_msg
return result
r = resp.json()
for i in r['images']:
image = Image.open(io.BytesIO(base64.b64decode(i.split(',', 1)[0])))
file_name = os.path.join(self.output_dir, (task.task_id + '.png'))
image.save(file_name)
task.state = ComputeTaskState.DONE
result.result_code = ComputeTaskResultCode.OK
result.worker_id = self.node_id
result.result = {'file': file_name}
return result
task.error_str = 'Unknown error!'
result.error_str = 'Unknown error!'
task.state = ComputeTaskState.ERROR
return result
def start(self):
if self.is_start:
return
self.is_start = True
async def _run_task_loop():
while True:
logger.info('local_stability_node is waiting for task...')
task = (await self.task_queue.get())
logger.info(f'stability_node get task: {task.display()}')
result = self._run_task(task)
asyncio.create_task(_run_task_loop())
def display(self) -> str:
return f'Stability_ComputeNode: {self.node_id}'
def get_task_state(self, task_id: str):
pass
def get_capacity(self):
pass
def is_support(self, task: ComputeTask) -> bool:
return (task.task_type == ComputeTaskType.TEXT_2_IMAGE)
def is_local(self) -> bool:
return False |
def award_data_old_and_new(db):
defc = baker.make('references.DisasterEmergencyFundCode', code='M', group_name='covid_19')
award_id_too_old = 988
award_id_too_new = 989
awards = [baker.make('search.AwardSearch', award_id=award_id_too_old, action_date='2020-01-01'), baker.make('search.AwardSearch', award_id=award_id_too_new, action_date='2020-01-01'), *baker.make('search.AwardSearch', award_id=cycle([1, 2, 3, 4, 5, 6, 7, 8, 9]), _quantity=9, action_date='2020-01-01')]
for (index, award) in enumerate(awards):
if ((index % 2) == 0):
baker.make('awards.FinancialAccountsByAwards', submission_id=10, award=award, disaster_emergency_fund=defc)
else:
baker.make('awards.FinancialAccountsByAwards', submission_id=12, award=award, disaster_emergency_fund=defc)
AwardSearch.objects.filter(award_id=award.award_id).update(update_date=OLD_DATE)
(yield (award_id_too_old, award_id_too_new)) |
class TestSaveLoadData():
inp = {'src': ([0, 111, 1111], [0, 0, 0], 250), 'rec': [(np.arange(1, 8) * 1000), np.zeros(7), 300], 'depth': [0, 300], 'res': [.0, 0.3, 1], 'htarg': {'pts_per_dec': (- 1)}, 'verb': 1}
.parametrize('extra', [{'signal': None, 'freqtime': [0.1, 1.0]}, {'signal': 0, 'freqtime': [1.0, 5.0]}, {'signal': None, 'freqtime': [(- 0.1), (- 1.0)]}])
def test_basic(self, tmpdir, extra):
orig = empymod.dipole(**self.inp, freqtime=[0.1, 1, 10, 100])
io.save_data((tmpdir + 'test.txt'), orig, info='Additional info')
io.save_data((tmpdir + 'test.json'), orig, info='Additional info')
orig_txt = io.load_data((tmpdir + 'test.txt'))
orig_json = io.load_data((tmpdir + 'test.json'))
assert_allclose(orig, orig_txt)
assert_allclose(orig, orig_json)
for ending in ['txt', 'json']:
with open(((tmpdir + 'test.') + ending), 'r') as f:
text = f.read()
assert ('date' in text)
assert ('empymod v' in text)
assert ('shape' in text)
assert ('(4, 7, 3)' in text)
assert (str(orig.dtype) in text)
assert ('Additional info' in text)
def test_text(self, tmpdir):
orig = empymod.dipole(**self.inp, freqtime=[0.1, 1, 10, 100])
io.save_data((tmpdir + 'test.txt'), orig)
io.save_data((tmpdir + 'test.json'), orig)
def test_errors(self, tmpdir):
with pytest.raises(ValueError, match='must be 3D'):
io.save_data((tmpdir + '/test.json'), data=np.ones((1, 1)))
with pytest.raises(ValueError, match="Unknown extension '.abc'"):
io.save_data((tmpdir + '/test.abc'), data=np.ones((1, 1, 1)))
with pytest.raises(ValueError, match="Unknown extension '.abc'"):
io.load_data((tmpdir + '/test.abc')) |
def extractSinKnoxtSpace(item):
(vol, chp, frag, postfix) = extractVolChapterFragmentPostfix(item['title'])
if ((not (chp or vol)) or ('preview' in item['title'].lower())):
return None
tagmap = [('PRC', 'PRC', 'translated'), ('Loiterous', 'Loiterous', 'oel')]
for (tagname, name, tl_type) in tagmap:
if (tagname in item['tags']):
return buildReleaseMessageWithType(item, name, vol, chp, frag=frag, postfix=postfix, tl_type=tl_type)
return False |
class MwaaWorkflowService(WorkflowService):
def __init__(self, region: str='us-west-2', access_key_id: Optional[str]=None, access_key_data: Optional[str]=None, config: Optional[Dict[(str, Any)]]=None, session_token: Optional[str]=None) -> None:
self.config: Dict[(str, Any)] = (config or {})
if (access_key_id is not None):
self.config['aws_access_key_id'] = access_key_id
if (access_key_data is not None):
self.config['aws_secret_access_key'] = access_key_data
if (session_token is not None):
self.config['aws_session_token'] = session_token
self.client: BaseClient = boto3.client('mwaa', region_name=region, **self.config)
def start_workflow(self, workflow_conf: Dict[(str, str)], run_id: str, run_conf: Optional[Dict[(str, Any)]]=None) -> str:
payload: str = ((('dags trigger ' + workflow_conf['dag_id']) + ' -r ') + run_id)
if run_conf:
payload += f' -c {0}'.format(json.dumps(run_conf))
mwaa_response: Response = self.trigger_airflow_cli(workflow_conf['env_name'], payload)
(res_stdout, res_stderr) = self.parse_response_plain_result(mwaa_response)
if (TRIGGER_SUCCESS_LOG in res_stdout):
logging.info(f'Trigger run_id {0} successfully'.format(run_id))
elif (RUN_ID_ALREADY_EXISTS_LOG in res_stderr):
logging.error(f'run_id {0} already exists'.format(run_id))
raise Exception(f'run_id {0} already exists'.format(run_id))
else:
logging.error(res_stderr)
raise Exception(res_stderr)
return run_id
def get_workflow_status(self, workflow_conf: Dict[(str, str)], run_id: str) -> WorkflowStatus:
payload: str = (((('tasks states-for-dag-run ' + workflow_conf['dag_id']) + ' ') + run_id) + ' -o json')
mwaa_response: Response = self.trigger_airflow_cli(workflow_conf['env_name'], payload)
try:
tasks = self.parse_response_json_result(mwaa_response)
statuses: List[str] = [task['state'] for task in tasks]
if (not statuses):
return WorkflowStatus.UNKNOWN
if (MwaaReturnStatus.FAILED in statuses):
return WorkflowStatus.FAILED
if ((MwaaReturnStatus.RUNNING in statuses) or ((MwaaReturnStatus.SUCCESS in statuses) and (None in statuses))):
return WorkflowStatus.STARTED
if ((MwaaReturnStatus.SUCCESS in statuses) and (len(set(statuses)) == 1)):
return WorkflowStatus.COMPLETED
return WorkflowStatus.CREATED
except Exception:
return WorkflowStatus.UNKNOWN
def trigger_airflow_cli(self, env_name: str, payload: str) -> Response:
mwaa_cli_token: Dict[(str, str)] = self.client.create_cli_token(Name=env_name)
headers: Dict[(str, str)] = {'Authorization': AUTHORIZATION_TOKEN.format(mwaa_cli_token['CliToken']), 'Content-Type': 'text/plain'}
url: str = AIRFLOW_URL.format(mwaa_cli_token['WebServerHostname'])
return requests.post(url, data=payload, headers=headers)
def parse_response_plain_result(self, response: Response) -> Tuple[(str, str)]:
return (base64.b64decode(response.json()['stdout']).decode('utf8'), base64.b64decode(response.json()['stderr']).decode('utf8'))
def parse_response_json_result(self, response: Response) -> List[Dict[(str, str)]]:
return json.loads(base64.b64decode(response.json()['stdout']).decode('utf8')) |
def create_app():
global app_created
if (not app_created):
BlueprintsManager.register(app)
graphql_views.init_app(app)
Migrate(app, db)
app.config.from_object(env('APP_CONFIG', default='config.ProductionConfig'))
if (not app.config['SECRET_KEY']):
if app.config['PRODUCTION']:
app.logger.error('SECRET_KEY must be set in .env or environment variables in production')
exit(1)
else:
random_secret = secrets.token_hex()
app.logger.warning(f'Using random secret "{random_secret}" for development server. This is NOT recommended. Set proper SECRET_KEY in .env or environment variables')
app.config['SECRET_KEY'] = random_secret
db.init_app(app)
if app.config['CACHING']:
cache.init_app(app, config={'CACHE_TYPE': 'simple'})
else:
cache.init_app(app, config={'CACHE_TYPE': 'null'})
stripe.api_key = 'SomeStripeKey'
app.config['JSONIFY_PRETTYPRINT_REGULAR'] = False
app.config['FILE_SYSTEM_STORAGE_FILE_VIEW'] = 'static'
app.logger.addHandler(logging.StreamHandler(sys.stdout))
app.logger.setLevel(logging.ERROR)
app.config['JWT_HEADER_TYPE'] = 'JWT'
app.config['JWT_ACCESS_TOKEN_EXPIRES'] = timedelta(days=1)
app.config['JWT_REFRESH_TOKEN_EXPIRES'] = timedelta(days=365)
app.config['JWT_ERROR_MESSAGE_KEY'] = 'error'
app.config['JWT_TOKEN_LOCATION'] = ['cookies', 'headers']
app.config['JWT_REFRESH_COOKIE_PATH'] = '/v1/auth/token/refresh'
app.config['JWT_SESSION_COOKIE'] = False
app.config['JWT_BLACKLIST_ENABLED'] = True
app.config['JWT_BLACKLIST_TOKEN_CHECKS'] = ['refresh']
_jwt = JWTManager(app)
_jwt.user_loader_callback_loader(jwt_user_loader)
_jwt.token_in_blacklist_loader(is_token_blacklisted)
app.config['broker_url'] = app.config['REDIS_URL']
app.config['result_backend'] = app.config['broker_url']
app.config['accept_content'] = ['json', 'application/text']
app.config['MAIL_RECORDER'] = MailRecorder(use_env=True)
CORS(app, resources={'/*': {'origins': '*'}})
AuthManager.init_login(app)
if (app.config['TESTING'] and app.config['PROFILE']):
app.wsgi_app = ProfilerMiddleware(app.wsgi_app, restrictions=[30])
with app.app_context():
from app.api.admin_statistics_api.events import event_statistics
from app.api.auth import auth_routes
from app.api.custom.attendees import attendee_blueprint
from app.api.bootstrap import api_v1
from app.api.celery_tasks import celery_routes
from app.api.event_copy import event_copy
from app.api.exports import export_routes
from app.api.imports import import_routes
from app.api.uploads import upload_routes
from app.api.users import user_misc_routes
from app.api.orders import order_misc_routes
from app.api.role_invites import role_invites_misc_routes
from app.api.speaker_invites import speaker_invites_misc_routes
from app.api.auth import authorised_blueprint
from app.api.admin_translations import admin_blueprint
from app.api.orders import alipay_blueprint, stripe_blueprint
from app.api.sessions import sessions_blueprint
from app.api.settings import admin_misc_routes
from app.api.server_version import info_route
from app.api.custom.orders import ticket_blueprint
from app.api.custom.orders import order_blueprint
from app.api.custom.invoices import event_blueprint
from app.api.custom.calendars import calendar_routes
from app.api.tickets import tickets_routes
from app.api.custom.role_invites import role_invites_routes
from app.api.custom.users_groups_roles import users_groups_roles_routes
from app.api.custom.events import events_routes
from app.api.custom.groups import groups_routes
from app.api.custom.group_role_invite import group_role_invites_routes
from app.api.video_stream import streams_routes
from app.api.events import events_blueprint
from app.api.custom.badge_forms import badge_forms_routes
from app.api.custom.tickets import ticket_routes
from app.api.custom.users import users_routes
from app.api.custom.users_check_in import users_check_in_routes
app.register_blueprint(api_v1)
app.register_blueprint(event_copy)
app.register_blueprint(upload_routes)
app.register_blueprint(export_routes)
app.register_blueprint(import_routes)
app.register_blueprint(celery_routes)
app.register_blueprint(auth_routes)
app.register_blueprint(event_statistics)
app.register_blueprint(user_misc_routes)
app.register_blueprint(attendee_blueprint)
app.register_blueprint(order_misc_routes)
app.register_blueprint(role_invites_misc_routes)
app.register_blueprint(speaker_invites_misc_routes)
app.register_blueprint(authorised_blueprint)
app.register_blueprint(admin_blueprint)
app.register_blueprint(alipay_blueprint)
app.register_blueprint(stripe_blueprint)
app.register_blueprint(admin_misc_routes)
app.register_blueprint(info_route)
app.register_blueprint(ticket_blueprint)
app.register_blueprint(order_blueprint)
app.register_blueprint(event_blueprint)
app.register_blueprint(sessions_blueprint)
app.register_blueprint(calendar_routes)
app.register_blueprint(streams_routes)
app.register_blueprint(role_invites_routes)
app.register_blueprint(users_groups_roles_routes)
app.register_blueprint(events_routes)
app.register_blueprint(groups_routes)
app.register_blueprint(events_blueprint)
app.register_blueprint(tickets_routes)
app.register_blueprint(group_role_invites_routes)
app.register_blueprint(badge_forms_routes)
app.register_blueprint(ticket_routes)
app.register_blueprint(users_routes)
app.register_blueprint(check_in_stats_routes)
app.register_blueprint(users_check_in_routes)
add_engine_pidguard(db.engine)
if app.config['SQLALCHEMY_DATABASE_URI'].startswith('sqlite://'):
sqlite_datetime_fix()
sa.orm.configure_mappers()
if app.config['SERVE_STATIC']:
app.add_url_rule('/static/<path:filename>', endpoint='static', view_func=app.send_static_file)
if ((not app_created) and ('SENTRY_DSN' in app.config)):
sentry_sdk.init(app.config['SENTRY_DSN'], integrations=[FlaskIntegration(), RedisIntegration(), CeleryIntegration(), SqlalchemyIntegration()], release=app.config['SENTRY_RELEASE_NAME'], traces_sample_rate=app.config['SENTRY_TRACES_SAMPLE_RATE'])
redis_store.init_app(app)
shell.init_app(app)
limiter.init_app(app)
app_created = True
return app |
def h_coord_from_dipeptide(pdb1, pdb2):
mol = Chem.MolFromPDBBlock((pdb1 + pdb2))
if (mol is None):
print(pdb1)
print(pdb2)
raise RuntimeError
mol_h = Chem.AddHs(mol, addCoords=True)
ps = Chem.SmilesParserParams()
ps.removeHs = False
template = Chem.MolFromSmiles('C(=O)C([H])N([H])C(=O)C([H])N', ps)
h_idx = 5
atom_map = mapping_by_mcs(template, mol_h)
return mol_h.GetConformer().GetAtomPosition(atom_map[h_idx]) |
class OptionSeriesVectorDataEvents(Options):
def click(self):
return self._config_get(None)
def click(self, value: Any):
self._config(value, js_type=False)
def drag(self):
return self._config_get(None)
def drag(self, value: Any):
self._config(value, js_type=False)
def dragStart(self):
return self._config_get(None)
def dragStart(self, value: Any):
self._config(value, js_type=False)
def drop(self):
return self._config_get(None)
def drop(self, value: Any):
self._config(value, js_type=False)
def mouseOut(self):
return self._config_get(None)
def mouseOut(self, value: Any):
self._config(value, js_type=False)
def mouseOver(self):
return self._config_get(None)
def mouseOver(self, value: Any):
self._config(value, js_type=False)
def remove(self):
return self._config_get(None)
def remove(self, value: Any):
self._config(value, js_type=False)
def select(self):
return self._config_get(None)
def select(self, value: Any):
self._config(value, js_type=False)
def unselect(self):
return self._config_get(None)
def unselect(self, value: Any):
self._config(value, js_type=False)
def update(self):
return self._config_get(None)
def update(self, value: Any):
self._config(value, js_type=False) |
class Train_ms_config():
def __init__(self, config_path: str, env: Dict[(str, any)], base: Dict[(str, any)], model: str, num_workers: int, spec_cache: bool, keep_ckpts: int):
self.env = env
self.base = base
self.model = model
self.config_path = config_path
self.num_workers = num_workers
self.spec_cache = spec_cache
self.keep_ckpts = keep_ckpts
def from_dict(cls, dataset_path: str, data: Dict[(str, any)]):
data['config_path'] = os.path.join(dataset_path, data['config_path'])
return cls(**data) |
def _get_block_by_number(chain, block_number):
if (block_number in ('latest', 'safe', 'finalized')):
head_block = chain.get_block()
return chain.get_canonical_block_by_number(max(0, (head_block.number - 1)))
elif (block_number == 'earliest'):
return chain.get_canonical_block_by_number(0)
elif (block_number == 'pending'):
return chain.get_block()
elif is_integer(block_number):
head_block = chain.get_block()
if (block_number < head_block.number):
return chain.get_canonical_block_by_number(block_number)
raise BlockNotFound(f'No block found for block number: {block_number}') |
def _valid_alignment(op: Operator, slice_dim: int, slice_output_tensor: Tensor, slice_input_shape: List[IntVar], start_indices: List[int], end_indices: List[int]) -> bool:
op_type = op._attrs['op']
if ((op_type in ('fused_elementwise', 'concatenate', 'permute021')) or op_type.startswith('layernorm') or op_type.startswith('group_layernorm')):
return True
dtype = slice_output_tensor.dtype()
stride = shape_utils.get_static_stride(slice_input_shape, slice_dim)
assert (stride is not None), f'expected non-None stride for slice_input_shape={slice_input_shape!r} at slice_dim={slice_dim!r}'
start_offset = (start_indices[slice_dim] * stride)
if op_type.startswith('gemm_rcr'):
slice_output_rank = op._attrs['outputs'][0]._rank()
if (slice_output_rank > 2):
for (dim, s_i, e_i) in zip(slice_input_shape[:(- 1)], start_indices[:(- 1)], end_indices[:(- 1)]):
if (not _is_slice_full_range(dim, s_i, e_i)):
return False
k_dim = slice_input_shape[(- 1)]
if (not isinstance(k_dim, IntImm)):
return False
alignment = math.gcd(k_dim.value(), start_offset)
return utils_alignment.valid_alignment(alignment, dtype)
if op_type.startswith('bmm'):
bmm_inputs = op._attrs['inputs']
if (bmm_inputs[0] is slice_output_tensor):
leading_dim = op._get_a_leading_dim(slice_input_shape[op._get_m_idx_in_a(slice_input_shape)], slice_input_shape[op._get_k_idx_in_a(slice_input_shape)])
elif (bmm_inputs[1] is slice_output_tensor):
leading_dim = op._get_b_leading_dim(slice_input_shape[op._get_n_idx_in_b(slice_input_shape)], slice_input_shape[op._get_k_idx_in_b(slice_input_shape)])
else:
return False
if (not isinstance(leading_dim, IntImm)):
return False
alignment = math.gcd(leading_dim.value(), start_offset)
return utils_alignment.valid_alignment(alignment, dtype)
return False |
def set_ev_handler(ev_cls, dispatchers=None):
def _set_ev_cls_dec(handler):
if ('callers' not in dir(handler)):
handler.callers = {}
for e in _listify(ev_cls):
handler.callers[e] = _Caller(_listify(dispatchers), None)
return handler
return _set_ev_cls_dec |
def test_correct_response_without_geo_filters(client, monkeypatch, elasticsearch_award_index, awards_and_transactions):
setup_elasticsearch_test(monkeypatch, elasticsearch_award_index)
test_cases = [_test_correct_response_for_pop_county_without_geo_filters, _test_correct_response_for_pop_district_without_geo_filters, _test_correct_response_for_pop_state_without_geo_filters, _test_correct_response_for_recipient_location_county_without_geo_filters, _test_correct_response_for_recipient_location_district_without_geo_filters, _test_correct_response_for_recipient_location_state_without_geo_filters]
for test in test_cases:
test(client) |
class Trip(object):
swagger_types = {'created_at': 'datetime', 'links': 'TripLinks', 'avg_consumption': 'list[TripAvgConsumption]', 'distance': 'float', 'done': 'bool', 'duration': 'str', 'faults': 'list[str]', 'id': 'str', 'odometer': 'float', 'start_position': 'Position', 'started_at': 'datetime', 'stop_position': 'Position', 'stopped_at': 'datetime', 'zero_emission_ratio': 'float'}
attribute_map = {'created_at': 'createdAt', 'links': '_links', 'avg_consumption': 'avgConsumption', 'distance': 'distance', 'done': 'done', 'duration': 'duration', 'faults': 'faults', 'id': 'id', 'odometer': 'odometer', 'start_position': 'startPosition', 'started_at': 'startedAt', 'stop_position': 'stopPosition', 'stopped_at': 'stoppedAt', 'zero_emission_ratio': 'zeroEmissionRatio'}
def __init__(self, created_at=None, links=None, avg_consumption=None, distance=None, done=None, duration=None, faults=None, id=None, odometer=None, start_position=None, started_at=None, stop_position=None, stopped_at=None, zero_emission_ratio=None):
self._created_at = None
self._links = None
self._avg_consumption = None
self._distance = None
self._done = None
self._duration = None
self._faults = None
self._id = None
self._odometer = None
self._start_position = None
self._started_at = None
self._stop_position = None
self._stopped_at = None
self._zero_emission_ratio = None
self.discriminator = None
if (created_at is not None):
self.created_at = created_at
if (links is not None):
self.links = links
if (avg_consumption is not None):
self.avg_consumption = avg_consumption
if (distance is not None):
self.distance = distance
if (done is not None):
self.done = done
if (duration is not None):
self.duration = duration
if (faults is not None):
self.faults = faults
if (id is not None):
self.id = id
if (odometer is not None):
self.odometer = odometer
if (start_position is not None):
self.start_position = start_position
if (started_at is not None):
self.started_at = started_at
if (stop_position is not None):
self.stop_position = stop_position
if (stopped_at is not None):
self.stopped_at = stopped_at
if (zero_emission_ratio is not None):
self.zero_emission_ratio = zero_emission_ratio
def created_at(self):
return self._created_at
_at.setter
def created_at(self, created_at):
self._created_at = created_at
def links(self):
return self._links
def links(self, links):
self._links = links
def avg_consumption(self):
return self._avg_consumption
_consumption.setter
def avg_consumption(self, avg_consumption):
self._avg_consumption = avg_consumption
def distance(self):
return self._distance
def distance(self, distance):
self._distance = distance
def done(self):
return self._done
def done(self, done):
self._done = done
def duration(self):
return self._duration
def duration(self, duration):
self._duration = duration
def faults(self):
return self._faults
def faults(self, faults):
allowed_values = ['Unstarted', 'DataLacking', 'Unfinished']
if (not set(faults).issubset(set(allowed_values))):
raise ValueError('Invalid values for `faults` [{0}], must be a subset of [{1}]'.format(', '.join(map(str, (set(faults) - set(allowed_values)))), ', '.join(map(str, allowed_values))))
self._faults = faults
def id(self):
return self._id
def id(self, id):
self._id = id
def odometer(self):
return self._odometer
def odometer(self, odometer):
self._odometer = odometer
def start_position(self):
return self._start_position
_position.setter
def start_position(self, start_position):
self._start_position = start_position
def started_at(self):
return self._started_at
_at.setter
def started_at(self, started_at):
self._started_at = started_at
def stop_position(self):
return self._stop_position
_position.setter
def stop_position(self, stop_position):
self._stop_position = stop_position
def stopped_at(self):
return self._stopped_at
_at.setter
def stopped_at(self, stopped_at):
self._stopped_at = stopped_at
def zero_emission_ratio(self):
return self._zero_emission_ratio
_emission_ratio.setter
def zero_emission_ratio(self, zero_emission_ratio):
self._zero_emission_ratio = zero_emission_ratio
def to_dict(self):
result = {}
for (attr, _) in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map((lambda x: (x.to_dict() if hasattr(x, 'to_dict') else x)), value))
elif hasattr(value, 'to_dict'):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map((lambda item: ((item[0], item[1].to_dict()) if hasattr(item[1], 'to_dict') else item)), value.items()))
else:
result[attr] = value
if issubclass(Trip, dict):
for (key, value) in self.items():
result[key] = value
return result
def to_str(self):
return pprint.pformat(self.to_dict())
def __repr__(self):
return self.to_str()
def __eq__(self, other):
if (not isinstance(other, Trip)):
return False
return (self.__dict__ == other.__dict__)
def __ne__(self, other):
return (not (self == other)) |
class OptionPlotoptionsPolygonSonificationContexttracksActivewhen(Options):
def crossingDown(self):
return self._config_get(None)
def crossingDown(self, num: float):
self._config(num, js_type=False)
def crossingUp(self):
return self._config_get(None)
def crossingUp(self, num: float):
self._config(num, js_type=False)
def max(self):
return self._config_get(None)
def max(self, num: float):
self._config(num, js_type=False)
def min(self):
return self._config_get(None)
def min(self, num: float):
self._config(num, js_type=False)
def prop(self):
return self._config_get(None)
def prop(self, text: str):
self._config(text, js_type=False) |
def lazy_import():
from fastly.model.config_store_item import ConfigStoreItem
from fastly.model.config_store_item_response_all_of import ConfigStoreItemResponseAllOf
from fastly.model.timestamps import Timestamps
globals()['ConfigStoreItem'] = ConfigStoreItem
globals()['ConfigStoreItemResponseAllOf'] = ConfigStoreItemResponseAllOf
globals()['Timestamps'] = Timestamps |
class RoughVizBar(OptionsWithTemplates):
component_properties = ('width',)
def data(self):
return self._config_sub_data('data', OptionData)
def element(self):
return self._config_get()
def element(self, html_code):
self._config(html_code)
def axisFontSize(self):
return self._config_get('1rem')
def axisFontSize(self, value):
self._config(value)
def axisRoughness(self):
return self._config_get(0.5)
def axisRoughness(self, num):
self._config(num)
def fillWeight(self):
return self._config_get(0.5)
def fillWeight(self, num):
self._config(num)
def font(self):
return self._config_get()
def font(self, text):
self._config(text)
def highlight(self):
return self._config_get()
def highlight(self, color):
self._config(color)
def height(self):
return self._config_get()
def height(self, num):
self._config(num)
def innerStrokeWidth(self):
return self._config_get(1)
def innerStrokeWidth(self, num):
self._config(num)
def interactive(self):
return self._config_get(True)
def interactive(self, num):
self._config(num)
def labelFontSize(self):
return self._config_get('1rem')
def labelFontSize(self, text):
self._config(text)
def margin(self):
return self._config_get({'top': 50, 'right': 20, 'bottom': 70, 'left': 100})
def margin(self, text):
self._config(text)
def padding(self):
return self._config_get(0.1)
def padding(self, value):
self._config(value)
def roughness(self):
return self._config_get(1)
def roughness(self, value):
self._config(value)
def simplification(self):
return self._config_get(0.2)
def simplification(self, value):
self._config(value)
def color(self):
return self._config_get()
def color(self, value):
self._config(value)
def stroke(self):
return self._config_get()
def stroke(self, value):
self._config(value)
def strokeWidth(self):
return self._config_get()
def strokeWidth(self, num):
self._config(num)
def title(self):
return self._config_get()
def title(self, text):
self._config(text)
def titleFontSize(self):
return self._config_get('1rem')
def titleFontSize(self, text):
self._config(text)
def tooltipFontSize(self):
return self._config_get('0.95rem')
def tooltipFontSize(self, text):
self._config(text)
def xLabel(self):
return self._config_get()
def xLabel(self, text):
self._config(text)
def yLabel(self):
return self._config_get()
def yLabel(self, text):
self._config(text)
def width(self):
return self._config_get('window.innerWidth')
def width(self, num):
self._config(num, js_type=True) |
class RMTTestSCCTests(object):
def rmttest_scc_001(self):
dg = Digraph({'A': ['B'], 'B': ['A']})
sccs = strongly_connected_components(dg)
sccsnames = node_sl_to_node_name_sl(sccs)
assert (sccsnames == [set(['A', 'B'])]), 'incorrect'
def rmttest_scc_002(self):
dg = Digraph({'A': ['B'], 'B': []})
sccs = strongly_connected_components(dg)
sccsnames = node_sl_to_node_name_sl(sccs)
assert (sccsnames == [set(['B']), set(['A'])]), 'incorrect'
def rmttest_scc_003(self):
dg = Digraph({'A': ['B', 'C'], 'B': ['A'], 'C': []})
sccs = strongly_connected_components(dg)
sccsnames = node_sl_to_node_name_sl(sccs)
assert (sccsnames == [set(['C']), set(['A', 'B'])]), 'incorrect'
def rmttest_scc_004(self):
dg = Digraph({'A': ['B', 'C'], 'B': ['A'], 'C': ['A']})
sccs = strongly_connected_components(dg)
sccsnames = node_sl_to_node_name_sl(sccs)
assert (sccsnames == [set(['A', 'C', 'B'])]), 'incorrect'
def rmttest_scc_005(self):
dg = Digraph({'A': ['B', 'C'], 'B': ['A'], 'C': ['B']})
sccs = strongly_connected_components(dg)
sccsnames = node_sl_to_node_name_sl(sccs)
assert (sccsnames == [set(['A', 'C', 'B'])]), 'incorrect'
def rmttest_scc_006(self):
dg = Digraph({'A': ['B', 'C'], 'B': ['A'], 'C': ['B']})
sccs = strongly_connected_components(dg)
scc_exists = check_for_strongly_connected_components(sccs)
assert (scc_exists is True), 'incorrect'
def rmttest_scc_007(self):
dg = Digraph({'A': ['B'], 'B': ['C'], 'C': []})
sccs = strongly_connected_components(dg)
scc_exists = check_for_strongly_connected_components(sccs)
assert (scc_exists is False), 'incorrect'
def rmttest_scc_008(self):
dg = Digraph({'A': ['B'], 'B': ['A'], 'C': []})
sccs = strongly_connected_components(dg)
scc_exists = check_for_strongly_connected_components(sccs)
assert (scc_exists is True), 'incorrect' |
def split_complex(input_arr_t):
output_t = Type(dtypes.real_for(input_arr_t.dtype), shape=input_arr_t.shape)
return Transformation([Parameter('real', Annotation(output_t, 'o')), Parameter('imag', Annotation(output_t, 'o')), Parameter('input', Annotation(input_arr_t, 'i'))], '\n ${real.store_same}(${input.load_same}.x);\n ${imag.store_same}(${input.load_same}.y);\n ') |
class VRRPCommon(app_manager.RyuApp):
_IFNAME0 = None
_IFNAME1 = None
def __init__(self, *args, **kwargs):
super(VRRPCommon, self).__init__(*args, **kwargs)
def _main(self):
self._main_version(vrrp.VRRP_VERSION_V3)
self._main_version(vrrp.VRRP_VERSION_V2)
print('done!')
def _main_version(self, vrrp_version):
self._main_version_priority(vrrp_version, vrrp.VRRP_PRIORITY_ADDRESS_OWNER)
self._main_version_priority(vrrp_version, vrrp.VRRP_PRIORITY_BACKUP_MAX)
self._main_version_priority(vrrp_version, vrrp.VRRP_PRIORITY_BACKUP_DEFAULT)
self._main_version_priority(vrrp_version, vrrp.VRRP_PRIORITY_BACKUP_MIN)
def _main_version_priority(self, vrrp_version, priority):
self._main_version_priority_sleep(vrrp_version, priority, False)
self._main_version_priority_sleep(vrrp_version, priority, True)
def _check(self, vrrp_api, instances):
while True:
while True:
rep = vrrp_api.vrrp_list(self)
if (len(rep.instance_list) >= (len(instances) * 2)):
if any(((i.state == vrrp_event.VRRP_STATE_INITIALIZE) for i in rep.instance_list)):
continue
break
print(('%s / %s' % (len(rep.instance_list), (len(instances) * 2))))
time.sleep(1)
assert (len(rep.instance_list) == (len(instances) * 2))
num_of_master = 0
d = dict(((i.instance_name, i) for i in rep.instance_list))
bad = 0
for i in rep.instance_list:
assert (i.state in (vrrp_event.VRRP_STATE_MASTER, vrrp_event.VRRP_STATE_BACKUP))
if (i.state == vrrp_event.VRRP_STATE_MASTER):
num_of_master += 1
vr = instances[i.config.vrid]
if (((vr[0].config.priority > vr[1].config.priority) and (i.instance_name == vr[1].instance_name)) or ((vr[0].config.priority < vr[1].config.priority) and (i.instance_name == vr[0].instance_name))):
if (i.state == vrrp_event.VRRP_STATE_MASTER):
print('bad master:')
print(('%s %s' % (d[vr[0].instance_name].state, d[vr[0].instance_name].config.priority)))
print(('%s %s' % (d[vr[1].instance_name].state, d[vr[1].instance_name].config.priority)))
bad += 1
if (bad > 0):
print(('%s bad masters' % bad))
time.sleep(1)
continue
if (num_of_master >= len(instances)):
assert (num_of_master == len(instances))
break
print(('%s / %s' % (num_of_master, len(instances))))
time.sleep(1)
continue
def _main_version_priority_sleep(self, vrrp_version, priority, do_sleep):
app_mgr = app_manager.AppManager.get_instance()
self.logger.debug('%s', app_mgr.applications)
vrrp_mgr = app_mgr.applications['VRRPManager']
step = 5
instances = {}
for vrid in range(1, 256, step):
if (vrid == _VRID):
continue
print(('vrid %s' % vrid))
l = {}
prio = max(vrrp.VRRP_PRIORITY_BACKUP_MIN, min(vrrp.VRRP_PRIORITY_BACKUP_MAX, vrid))
rep0 = self._configure_vrrp_router(vrrp_version, prio, _PRIMARY_IP_ADDRESS0, self._IFNAME0, vrid)
assert (rep0.instance_name is not None)
l[0] = rep0
prio = max(vrrp.VRRP_PRIORITY_BACKUP_MIN, min(vrrp.VRRP_PRIORITY_BACKUP_MAX, (256 - vrid)))
rep1 = self._configure_vrrp_router(vrrp_version, prio, _PRIMARY_IP_ADDRESS1, self._IFNAME1, vrid)
assert (rep1.instance_name is not None)
l[1] = rep1
instances[vrid] = l
print(('vrid %s' % _VRID))
l = {}
rep0 = self._configure_vrrp_router(vrrp_version, priority, _PRIMARY_IP_ADDRESS0, self._IFNAME0, _VRID)
assert (rep0.instance_name is not None)
l[0] = rep0
rep1 = self._configure_vrrp_router(vrrp_version, vrrp.VRRP_PRIORITY_BACKUP_DEFAULT, _PRIMARY_IP_ADDRESS1, self._IFNAME1, _VRID)
assert (rep1.instance_name is not None)
l[1] = rep1
instances[_VRID] = l
self.logger.debug('%s', vrrp_mgr._instances)
if do_sleep:
print(('priority %s' % priority))
print('waiting for instances starting')
self._check(vrrp_api, instances)
for vrid in instances.keys():
if (vrid == _VRID):
continue
which = (vrid & 1)
new_priority = int(random.uniform(vrrp.VRRP_PRIORITY_BACKUP_MIN, vrrp.VRRP_PRIORITY_BACKUP_MAX))
i = instances[vrid][which]
vrrp_api.vrrp_config_change(self, i.instance_name, priority=new_priority)
i.config.priority = new_priority
if do_sleep:
print('priority shuffled')
self._check(vrrp_api, instances)
for vrid in instances.keys():
if (vrid == _VRID):
continue
which = (vrid & 1)
vrrp_api.vrrp_shutdown(self, instances[vrid][which].instance_name)
vrrp_api.vrrp_shutdown(self, instances[_VRID][0].instance_name)
if do_sleep:
print('shutting down instances')
while True:
rep = vrrp_api.vrrp_list(self)
if (len(rep.instance_list) <= len(instances)):
break
print(('left %s' % len(rep.instance_list)))
time.sleep(1)
assert (len(rep.instance_list) == len(instances))
print('waiting for the rest becoming master')
while True:
rep = vrrp_api.vrrp_list(self)
if all(((i.state == vrrp_event.VRRP_STATE_MASTER) for i in rep.instance_list)):
break
time.sleep(1)
vrrp_api.vrrp_shutdown(self, instances[_VRID][1].instance_name)
for vrid in instances.keys():
if (vrid == _VRID):
continue
which = (1 - (vrid & 1))
vrrp_api.vrrp_shutdown(self, instances[vrid][which].instance_name)
print('waiting for instances shutting down')
while True:
rep = vrrp_api.vrrp_list(self)
if (not rep.instance_list):
break
print(('left %s' % len(rep.instance_list)))
time.sleep(1) |
class VulnDBCliParserTestSuite(unittest.TestCase):
def test_empty_args(self):
empty_args = generate_args(False, False, None, None, None, None, None, None, None, None, None, None, None, None)
status = VulnCLIParser.verify_args(empty_args)
self.assertEqual(status, 1)
def test_not_only_init(self):
args = generate_args(True, False, None, None, 12345, None, None, None, None, None, None, None, None, None)
status = VulnCLIParser.verify_args(args)
self.assertEqual(status, 2)
def test_not_only_init_status(self):
args = generate_args(False, True, None, None, 12345, None, None, None, None, None, None, None, None, None)
status = VulnCLIParser.verify_args(args)
self.assertEqual(status, 3)
def test_not_only_cve(self):
args = generate_args(False, False, 'CVE-2002-1562', None, 12345, None, None, None, None, None, None, None, None, None)
status = VulnCLIParser.verify_args(args)
self.assertEqual(status, 4)
def test_bad_cve(self):
args = generate_args(False, False, 'CVE-62', None, None, None, None, None, None, None, None, None, None, None)
status = VulnCLIParser.verify_args(args)
self.assertEqual(status, 5)
def test_not_only_cveinfo(self):
args = generate_args(False, False, None, 'CVE-2002-1562', None, 12345, None, None, None, None, None, None, None, None)
status = VulnCLIParser.verify_args(args)
self.assertEqual(status, 6)
def test_bad_cveinfo(self):
args = generate_args(False, False, None, 'CVE-62', None, None, None, None, None, None, None, None, None, None)
status = VulnCLIParser.verify_args(args)
self.assertEqual(status, 7)
def test_not_only_bid(self):
args = generate_args(False, False, None, None, 12345, None, 'openldap', None, None, None, None, None, None, None)
status = VulnCLIParser.verify_args(args)
self.assertEqual(status, 8)
def test_bad_bid(self):
args = generate_args(False, False, None, None, (- 12345), None, None, None, None, None, None, None, None, None)
status = VulnCLIParser.verify_args(args)
self.assertEqual(status, 9)
def test_not_only_bid_info(self):
args = generate_args(False, False, None, None, None, 12345, 'openldap', None, None, None, None, None, None, None)
status = VulnCLIParser.verify_args(args)
self.assertEqual(status, 10)
def test_bad_bid_info(self):
args = generate_args(False, False, None, None, None, (- 12345), None, None, None, None, None, None, None, None)
status = VulnCLIParser.verify_args(args)
self.assertEqual(status, 11)
def test_not_only_exploit_db(self):
args = generate_args(False, False, None, None, None, None, 12345, None, 'openldap', None, None, None, None, None)
status = VulnCLIParser.verify_args(args)
self.assertEqual(status, 12)
def test_bad_exploit_db(self):
args = generate_args(False, False, None, None, None, None, (- 12345), None, None, None, None, None, None, None)
status = VulnCLIParser.verify_args(args)
self.assertEqual(status, 13)
def test_not_only_exploit_db_info(self):
args = generate_args(False, False, None, None, None, None, None, 12345, 'openldap', None, None, None, None, None)
status = VulnCLIParser.verify_args(args)
self.assertEqual(status, 14)
def test_bad_exploit_db_info(self):
args = generate_args(False, False, None, None, None, None, None, (- 12345), None, None, None, None, None, None)
status = VulnCLIParser.verify_args(args)
self.assertEqual(status, 15)
def test_only_product_version(self):
args = generate_args(False, False, None, None, None, None, None, None, None, '2.30', None, None, None, None)
status = VulnCLIParser.verify_args(args)
self.assertEqual(status, 16)
def test_not_only_rhba(self):
args = generate_args(False, False, None, None, None, None, None, None, 'openldap', None, 'RHBA-2012:002', None, None, None)
status = VulnCLIParser.verify_args(args)
self.assertEqual(status, 17)
def test_bad_rhba(self):
args = generate_args(False, False, None, None, None, None, None, None, None, None, 'RHBA--2012:002', None, None, None)
status = VulnCLIParser.verify_args(args)
self.assertEqual(status, 18)
def test_not_only_rhba_info(self):
args = generate_args(False, False, None, None, None, None, None, None, 'openldap', None, None, 'RHBA-2012:002', None, None)
status = VulnCLIParser.verify_args(args)
self.assertEqual(status, 19)
def test_bad_rhba_info(self):
args = generate_args(False, False, None, None, None, None, None, None, None, None, None, 'RHBA--2012:002', None, None)
status = VulnCLIParser.verify_args(args)
self.assertEqual(status, 20)
def test_not_only_rhsa(self):
args = generate_args(False, False, None, None, None, None, None, None, 'openldap', None, None, None, 'RHSA-2012:002', None)
status = VulnCLIParser.verify_args(args)
self.assertEqual(status, 21)
def test_bad_rhsa(self):
args = generate_args(False, False, None, None, None, None, None, None, None, None, None, None, 'RHSA-20122:002', None)
status = VulnCLIParser.verify_args(args)
self.assertEqual(status, 22)
def test_not_only_rhsa_info(self):
args = generate_args(False, False, None, None, None, None, None, None, 'openldap', None, None, None, None, 'RHSA-2012:002')
status = VulnCLIParser.verify_args(args)
self.assertEqual(status, 23)
def test_bad_rhsa_info(self):
args = generate_args(False, False, None, None, None, None, None, None, None, None, None, None, None, 'RHSA-201222:002')
status = VulnCLIParser.verify_args(args)
self.assertEqual(status, 24)
def test_ok(self):
args = generate_args(False, False, None, None, None, None, None, None, 'openldap', '2.2.20', None, None, None, None)
status = VulnCLIParser.verify_args(args)
self.assertEqual(status, 0)
def test_get_cve(self):
sys.argv = ['dagda.py', 'vuln', '--cve', 'CVE-2002-2002']
parsed_args = VulnCLIParser()
self.assertEqual(parsed_args.get_cve(), 'CVE-2002-2002')
def test_get_cve_info(self):
sys.argv = ['dagda.py', 'vuln', '--cve_info', 'CVE-2002-2002']
parsed_args = VulnCLIParser()
self.assertEqual(parsed_args.get_cve_info(), 'CVE-2002-2002')
def test_get_bid(self):
sys.argv = ['dagda.py', 'vuln', '--bid', '15']
parsed_args = VulnCLIParser()
self.assertEqual(parsed_args.get_bid(), 15)
def test_get_bid_info(self):
sys.argv = ['dagda.py', 'vuln', '--bid_info', '15']
parsed_args = VulnCLIParser()
self.assertEqual(parsed_args.get_bid_info(), 15)
def test_get_exploit_db_id(self):
sys.argv = ['dagda.py', 'vuln', '--exploit_db', '15']
parsed_args = VulnCLIParser()
self.assertEqual(parsed_args.get_exploit_db_id(), 15)
def test_get_exploit_db_info_id(self):
sys.argv = ['dagda.py', 'vuln', '--exploit_db_info', '15']
parsed_args = VulnCLIParser()
self.assertEqual(parsed_args.get_exploit_db_info_id(), 15)
def test_check_full_happy_path(self):
sys.argv = ['dagda.py', 'vuln', '--product', 'openldap', '--product_version', '2.2.20']
parsed_args = VulnCLIParser()
self.assertEqual(parsed_args.get_product(), 'openldap')
self.assertEqual(parsed_args.get_product_version(), '2.2.20')
def test_check_exit_1(self):
sys.argv = ['dagda.py', 'vuln']
with self.assertRaises(SystemExit) as cm:
VulnCLIParser()
self.assertEqual(cm.exception.code, 1)
def test_DagdaVulnParser_exit_2(self):
with self.assertRaises(SystemExit) as cm:
DagdaVulnParser().error('fail')
self.assertEqual(cm.exception.code, 2)
def test_DagdaVulnParser_format_help(self):
self.assertEqual(DagdaVulnParser().format_help(), vuln_parser_text) |
class PasswordHelper():
def __init__(self) -> None:
self.context = CryptContext(schemes=['argon2', 'bcrypt'], deprecated='auto')
def verify_and_update(self, plain_password: str, hashed_password: str) -> tuple[(bool, str)]:
return self.context.verify_and_update(plain_password, hashed_password)
def hash(self, password: str) -> str:
return self.context.hash(password)
def generate(self) -> str:
return pwd.genword(entropy=128) |
def _plotHistogram(axes: 'Axes', plot_config: 'PlotConfig', data: pd.DataFrame, label: str, bin_count, use_log_scale=False, minimum=None, maximum=None):
axes.set_xlabel(plot_config.xLabel())
axes.set_ylabel(plot_config.yLabel())
style = plot_config.histogramStyle()
if ((minimum is not None) and (maximum is not None)):
if use_log_scale:
bins = _histogramLogBins(bin_count, minimum, maximum)
else:
bins = numpy.linspace(minimum, maximum, bin_count)
else:
bins = bin_count
axes.hist(data.values, alpha=style.alpha, bins=bins, color=style.color)
if (minimum == maximum):
minimum -= 0.5
maximum += 0.5
axes.set_xlim(minimum, maximum)
rectangle = Rectangle((0, 0), 1, 1, color=style.color)
plot_config.addLegendItem(label, rectangle) |
class PrometheusChannel():
def __init__(self, address: Address, host: str, port: int, logger: Union[(logging.Logger, logging.LoggerAdapter)]):
self.address = address
self.metrics = {}
self.logger = logger
self._loop: Optional[asyncio.AbstractEventLoop] = None
self._queue: Optional[asyncio.Queue] = None
self._dialogues = PrometheusDialogues()
self._host = host
self._port = port
self._service = aioprometheus.Service()
def _get_message_and_dialogue(self, envelope: Envelope) -> Tuple[(PrometheusMessage, Optional[PrometheusDialogue])]:
message = cast(PrometheusMessage, envelope.message)
dialogue = cast(Optional[PrometheusDialogue], self._dialogues.update(message))
return (message, dialogue)
def queue(self) -> asyncio.Queue:
if (self._queue is None):
raise ValueError('Channel is not connected')
return self._queue
async def connect(self) -> None:
if self._queue:
return None
self._loop = asyncio.get_event_loop()
self._queue = asyncio.Queue()
(await self._service.start(addr=self._host, port=self._port))
async def send(self, envelope: Envelope) -> None:
sender = envelope.sender
self.logger.debug('Processing message from {}: {}'.format(sender, envelope))
if (envelope.protocol_specification_id != PrometheusMessage.protocol_specification_id):
raise ValueError(f'Protocol {envelope.protocol_specification_id} is not valid for prometheus.')
(await self._handle_prometheus_message(envelope))
async def _handle_prometheus_message(self, envelope: Envelope) -> None:
enforce(isinstance(envelope.message, PrometheusMessage), 'Message not of type PrometheusMessage')
(message, dialogue) = self._get_message_and_dialogue(envelope)
if (dialogue is None):
self.logger.warning('Could not create dialogue from message={}'.format(message))
return
if (message.performative == PrometheusMessage.Performative.ADD_METRIC):
response = (await self._handle_add_metric(message))
elif (message.performative == PrometheusMessage.Performative.UPDATE_METRIC):
response = (await self._handle_update_metric(message))
else:
self.logger.warning('Unrecognized performative for PrometheusMessage')
return
(response_code, response_msg) = cast(Tuple[(int, str)], response)
msg = dialogue.reply(performative=PrometheusMessage.Performative.RESPONSE, target_message=message, code=response_code, message=response_msg)
envelope = Envelope(to=msg.to, sender=msg.sender, message=msg)
(await self._send(envelope))
async def _handle_add_metric(self, message: PrometheusMessage) -> Tuple[(int, str)]:
if (message.title in self.metrics):
response_code = 409
response_msg = 'Metric already exists.'
else:
metric_type = getattr(aioprometheus, message.type, None)
if ((metric_type is None) or (message.type not in VALID_METRIC_TYPES)):
response_code = 404
response_msg = f'{message.type} is not a recognized prometheus metric.'
else:
self.metrics[message.title] = metric_type(message.title, message.description, message.labels)
self._service.register(self.metrics[message.title])
response_code = 200
response_msg = f'New {message.type} successfully added: {message.title}.'
return (response_code, response_msg)
async def _handle_update_metric(self, message: PrometheusMessage) -> Tuple[(int, str)]:
metric = message.title
if (metric not in self.metrics):
response_code = 404
response_msg = f'Metric {metric} not found.'
else:
update_func = getattr(self.metrics[metric], message.callable, None)
if (update_func is None):
response_code = 400
response_msg = f'Update function {message.callable} not found for metric {metric}.'
elif (message.callable in VALID_UPDATE_FUNCS):
if (message.callable in {'inc', 'dec'}):
update_func(message.labels)
else:
update_func(message.labels, message.value)
response_code = 200
response_msg = f'Metric {metric} successfully updated.'
else:
response_code = 400
response_msg = f'Failed to update metric {metric}: {message.callable} is not a valid update function.'
return (response_code, response_msg)
async def _send(self, envelope: Envelope) -> None:
(await self.queue.put(envelope))
async def disconnect(self) -> None:
if (self._queue is not None):
(await self._queue.put(None))
self._queue = None
(await self._service.stop())
async def get(self) -> Optional[Envelope]:
return (await self.queue.get()) |
class OptionSeriesPieSonificationContexttracksMappingHighpassResonance(Options):
def mapFunction(self):
return self._config_get(None)
def mapFunction(self, value: Any):
self._config(value, js_type=False)
def mapTo(self):
return self._config_get(None)
def mapTo(self, text: str):
self._config(text, js_type=False)
def max(self):
return self._config_get(None)
def max(self, num: float):
self._config(num, js_type=False)
def min(self):
return self._config_get(None)
def min(self, num: float):
self._config(num, js_type=False)
def within(self):
return self._config_get(None)
def within(self, value: Any):
self._config(value, js_type=False) |
class GreetingActivitiesImpl():
def __init__(self):
self.invocation_count = 0
self.details = []
def compose_greeting(self, greeting: str, name: str):
Activity.do_not_complete_on_return()
thread = threading.Thread(target=greeting_activities_thread_func, args=(greeting, name, Activity.get_task_token()))
thread.start() |
class RCareWorldBaseObject(ABC):
def __init__(self, env, id: int, name: str, is_in_scene: bool=False):
self.env = env
self.id = id
self.is_in_scene = is_in_scene
self.object_type = 'base_object'
self.name = name
self.copy_ids = []
def load(self, position=None, rotation=[0, 0, 0]) -> None:
assert (self.is_in_scene is False), 'The object is already in the scene, no need to load again.'
self.env.asset_channel.set_action('InstanceObject', id=self.id, name=self.name)
self.env._step()
if (position is not None):
self.env.instance_channel.set_action('SetTransform', id=self.id, position=position, rotation=rotation)
else:
self.env.instance_channel.set_action('SetTransform', id=self.id, position=[0, 0, 0], rotation=rotation)
self.env._step()
self.is_in_scene = True
self.env._step()
def destroy(self):
self.env.instance_channel.set_action('Destroy', id=self.id)
self.is_in_scene = False
self.env._step()
def copy(self, copy_id: int):
self.copy_ids.append(copy_id)
self.env.instance_channel.set_action('Copy', id=self.id, copy_id=copy_id)
self.env._step()
new_object = RCareWorldBaseObject(self.env, copy_id, (self.name + '_copy'), is_in_scene=True)
return new_object
def setTransform(self, position: list=None, rotation: list=None, scale: list=None, is_world: bool=True):
if (None not in (position, rotation, scale)):
self.env.instance_channel.set_action('SetTransform', id=self.id, position=position, rotation=rotation, scale=scale, is_world=is_world)
elif ((None not in (position, rotation)) and (scale is None)):
self.env.instance_channel.set_action('SetTransform', id=self.id, position=position, rotation=rotation, is_world=is_world)
elif ((None not in (position, scale)) and (rotation is None)):
self.env.instance_channel.set_action('SetTransform', id=self.id, position=position, scale=scale, is_world=is_world)
elif ((None not in (rotation, scale)) and (position is None)):
self.env.instance_channel.set_action('SetTransform', id=self.id, rotation=rotation, scale=scale, is_world=is_world)
elif ((position is None) and (rotation is None) and (scale is not None)):
self.env.instance_channel.set_action('SetTransform', id=self.id, scale=scale, is_world=is_world)
elif ((rotation is None) and (scale is None) and (position is not None)):
self.env.instance_channel.set_action('SetTransform', id=self.id, position=position, is_world=is_world)
elif ((scale is None) and (position is None) and (rotation is not None)):
self.env.instance_channel.set_action('SetTransform', id=self.id, rotation=rotation, is_world=is_world)
self.env._step()
def getThisObjectState(self):
return self.env.instance_channel.data[int(self.id)]
def getPosition(self):
self.env._step()
info = self.env.instance_channel.data[self.id]
position = info['position']
return position
def getLocalPosition(self):
info = self.env.instance_channel.data[self.id]
local_position = info['local_position']
return local_position
def getRotation(self):
info = self.env.instance_channel.data[self.id]
rotation = info['rotation']
return rotation
def getLocalRotation(self):
info = self.env.instance_channel.data[self.id]
local_rotation = info['local_rotation']
return local_rotation
def getQuaternion(self):
info = self.env.instance_channel.data[self.id]
quaternion = info['quaternion']
return quaternion
def getLocalQuaternion(self):
info = self.env.instance_channel.data[self.id]
local_quaternion = info['local_quaternion']
return local_quaternion
def getLocalPositionFromWorld(self, position: list) -> list:
local_pose = self.env.instance_channel.set_action('GetLoaclPointFromWorld', id=self.id, point=position)
return local_pose
def getWorldPositionFromLocal(self, position: list) -> list:
world_pose = self.env.instance_channel.set_action('getWorldPositionFromLocal', id=self.id, point=position)
return world_pose
def setActive(self, active: bool):
self.env.instance_channel.set_action('SetActive', id=self.id, active=active)
self.env._step()
def setParent(self, parent):
parent_id = parent.id
parent_name = parent.name
parent_id = int(parent_id)
parent_name = str(parent_name)
self.env.instance_channel.set_action('SetParent', id=self.id, parent_id=parent_id, parent_name=parent_name)
self.env._step()
def setParentByID(self, parent_id):
self.env.instance_channel.set_action('SetParent', id=self.id, parent_id=parent_id, parent_name='Parent')
self.env._step()
def unsetParent(self):
self.env.instance_channel.set_action('SetParent', id=self.id, parent_id=(- 1), parent_name='')
self.env._step()
def setLayer(self, layer: int):
self.env.instance_channel.set_action('SetLayer', id=self.id, layer=layer)
self.env._step()
def setRotationQuaternion(self, quaternion, is_world: bool=True) -> None:
self.env.instance_channel.set_action('SetRotationQuaternion', id=self.id, quaternion=quaternion, is_world=is_world)
self.env._step() |
def test_plan_created(session):
guest_list = [{'guest_list_state': 'INVITED', 'node': {'id': '3456'}}, {'guest_list_state': 'INVITED', 'node': {'id': '2345'}}, {'guest_list_state': 'GOING', 'node': {'id': '1234'}}]
data = {'irisSeqId': '1111111', 'irisTags': ['DeltaAdminTextMessage', 'is_from_iris_fanout'], 'messageMetadata': {'actorFbId': '1234', 'adminText': 'You created a plan.', 'folderId': {'systemFolderId': 'INBOX'}, 'messageId': 'mid.$XYZ', 'offlineThreadingId': '', 'skipBumpThread': False, 'tags': ['source:titan:web', 'no_push'], 'threadKey': {'threadFbId': '4321'}, 'threadReadStateEffect': 'MARK_UNREAD', 'timestamp': '', 'unsendType': 'deny_log_message'}, 'participants': ['1234', '2345', '3456'], 'requestContext': {'apiArgs': {}}, 'tqSeqId': '1111', 'type': 'lightweight_event_create', 'untypedData': {'event_timezone': '', 'event_creator_id': '1234', 'event_id': '112233', 'event_type': 'EVENT', 'event_track_rsvp': '1', 'event_title': 'A plan', 'event_time': '', 'event_seconds_to_notify_before': '3600', 'guest_state_list': _util.json_minimal(guest_list)}, 'class': 'AdminTextMessage'}
assert (PlanCreated(author=User(session=session, id='1234'), thread=Group(session=session, id='4321'), plan=PlanData(session=session, id='112233', time=datetime.datetime(2020, 9, 13, 12, 26, 40, tzinfo=datetime.timezone.utc), title='A plan', author_id='1234', guests={'1234': GuestStatus.GOING, '2345': GuestStatus.INVITED, '3456': GuestStatus.INVITED}), at=datetime.datetime(2017, 7, 14, 2, 40, tzinfo=datetime.timezone.utc)) == parse_admin_message(session, data)) |
class Test(unittest.TestCase):
def test_IEnumVARIANT(self):
fwmgr = CreateObject('HNetCfg.FwMgr')
services = fwmgr.LocalPolicy.CurrentProfile.Services
self.assertEqual(services.Count, len(services))
cv = iter(services)
names = [p.Name for p in cv]
self.assertEqual(len(services), len(names))
self.assertEqual([p.Name for p in cv], [])
cv.Reset()
self.assertEqual([p.Name for p in cv], names)
cv.Reset()
cv.Skip(3)
self.assertEqual([p.Name for p in cv], names[3:])
cv.Reset()
cv.Skip(300)
self.assertEqual([p.Name for p in cv], names[300:])
self.assertEqual(cv[0].Name, names[0])
self.assertEqual(cv[0].Name, names[0])
self.assertEqual(cv[0].Name, names[0])
if (len(names) > 1):
self.assertEqual(cv[1].Name, names[1])
self.assertEqual(cv[1].Name, names[1])
self.assertEqual(cv[1].Name, names[1])
cv.Reset()
self.assertEqual(names[:3], [p.Name for p in cv.Next(3)])
self.assertEqual(cv.Next(0), [])
cv.Reset()
self.assertEqual(len(cv.Next((len(names) * 2))), len(names))
cv.Reset()
self.assertRaises(ArgumentError, (lambda : cv[:]))
('This test takes a long time. Do we need it? Can it be rewritten?')
def test_leaks_1(self):
fwmgr = CreateObject('HNetCfg.FwMgr')
apps = fwmgr.LocalPolicy.CurrentProfile.AuthorizedApplications
def doit():
for item in iter(apps):
item.ProcessImageFileName
bytes = find_memleak(doit, (20, 20))
self.assertFalse(bytes, ('Leaks %d bytes' % bytes))
('This test takes a long time. Do we need it? Can it be rewritten?')
def test_leaks_2(self):
fwmgr = CreateObject('HNetCfg.FwMgr')
apps = fwmgr.LocalPolicy.CurrentProfile.AuthorizedApplications
def doit():
iter(apps).Next(99)
bytes = find_memleak(doit, (20, 20))
self.assertFalse(bytes, ('Leaks %d bytes' % bytes))
('This test takes a long time. Do we need it? Can it be rewritten?')
def test_leaks_3(self):
fwmgr = CreateObject('HNetCfg.FwMgr')
apps = fwmgr.LocalPolicy.CurrentProfile.AuthorizedApplications
def doit():
for i in range(2):
for what in iter(apps):
pass
bytes = find_memleak(doit, (20, 20))
self.assertFalse(bytes, ('Leaks %d bytes' % bytes)) |
def get_access_token(hostname: str, client_id: str, client_secret: str) -> str:
url = f'
payload = f'client_id={client_id}&client_secret={client_secret}&grant_type=client_credentials'
headers = {'Content-Type': 'application/x-www-form-urlencoded'}
response = requests.post(url, headers=headers, data=payload, verify=True)
if (response.status_code != 200):
print(f'Not able to fetch the access token for client id: {client_id}, error: {response}')
quit()
data = json.loads(response.content)
return data['access_token'] |
class LogisticRegression(OLS):
def fit(self, X, y):
(X, y) = check_X_y(X, y, accept_sparse=False)
self.labels_ = np.unique(y)
models = []
for label in self.labels_:
labels = (y == label)
models.append(OLS().fit(X, labels))
self._models_ = models
self.coef_ = np.vstack([l.coef_ for l in self._models_])
return self
def predict_proba(self, X):
if (not hasattr(self, '_models_')):
raise NotFittedError("Estimator not fitted. Call 'fit' first.")
X = check_array(X, accept_sparse=False)
preds = []
for m in self._models_:
p = (1 / (1 + np.exp((- m.predict(X)))))
preds.append(p)
return np.vstack(preds).T
def predict(self, X):
if (not hasattr(self, '_models_')):
raise NotFittedError("Estimator not fitted. Call 'fit' first.")
X = check_array(X, accept_sparse=False)
preds = self.predict_proba(X)
labels = np.zeros(X.shape[0])
for i in range(X.shape[0]):
labels[i] = self.labels_[preds[i].argmax()]
return labels |
def visit_potential_constraint_def(node: ClassDef, scope: PromptScope):
for d in node.decorator_list:
if (type(d) is ast.Call):
if (type(d.func) is ast.Name):
if ((d.func.id == 'LMQLOp') and (len(d.args) == 1)):
if (type(d.args[0]) is ast.Constant):
scope.defined_constraints.add(d.args[0].value)
elif (type(d.args[0]) is ast.List):
for e in d.args[0].elts:
if (type(e) is ast.Constant):
scope.defined_constraints.add(e.value) |
class FedAvgWithLROptimizer(IServerOptimizer, torch.optim.SGD):
def __init__(self, *, model: nn.Module, **kwargs) -> None:
init_self_cfg(self, component_class=__class__, config_class=FedAvgWithLROptimizerConfig, **kwargs)
IServerOptimizer.__init__(self, model=model, **kwargs)
torch.optim.SGD.__init__(self, params=self.model.parameters(), lr=self.cfg.lr, momentum=self.cfg.momentum)
def step(self, closure=None):
return torch.optim.SGD.step(self, closure)
def zero_grad(self, set_to_none: bool=False):
return torch.optim.SGD.zero_grad(self, set_to_none) |
class ExtraSettingsValidatorsTestCase(TestCase):
def setUp(self):
Setting.objects.bulk_create([Setting(name='TEST_VALIDATE_POSITIVE_INTEGER', value_type=Setting.TYPE_INT, value=10, validator='tests.test_validators.positive_int_validator'), Setting(name='TEST_NO_VALIDATORS_INTEGER', value_type=Setting.TYPE_INT, value=10), Setting(name='TEST_VALIDATE_STRINGS', value_type=Setting.TYPE_STRING, value='This is a correct String', validator='tests.test_validators.alphanumeric_strings_validator'), Setting(name='TEST_VALIDATE_WITH_INVALID_VALIDATOR', value_type=Setting.TYPE_STRING, value='This is a correct String', validator='tests.test_validators.invalid_validator')])
def test_validators(self):
positive_integer = Setting.objects.get(name='TEST_VALIDATE_POSITIVE_INTEGER')
positive_integer.value = (- 10)
with self.assertRaises(ValidationError):
positive_integer.full_clean()
normal_integer = Setting.objects.get(name='TEST_NO_VALIDATORS_INTEGER')
normal_integer.value = (- 10)
normal_integer.full_clean()
alnum_string = Setting.objects.get(name='TEST_VALIDATE_STRINGS')
alnum_string.value = '!-10'
with self.assertRaises(ValidationError):
alnum_string.full_clean()
def test_invalid_validator(self):
setting_obj = Setting.objects.get(name='TEST_VALIDATE_WITH_INVALID_VALIDATOR')
with self.assertRaises(ValueError):
setting_obj.full_clean() |
class API():
build = staticmethod(build)
fork_at = staticmethod(fork_at)
name = staticmethod(name)
chain_id = staticmethod(chain_id)
frontier_at = staticmethod(frontier_at)
homestead_at = staticmethod(homestead_at)
tangerine_whistle_at = staticmethod(tangerine_whistle_at)
spurious_dragon_at = staticmethod(spurious_dragon_at)
byzantium_at = staticmethod(byzantium_at)
constantinople_at = staticmethod(constantinople_at)
istanbul_at = staticmethod(istanbul_at)
muir_glacier_at = staticmethod(muir_glacier_at)
berlin_at = staticmethod(berlin_at)
london_at = staticmethod(london_at)
arrow_glacier_at = staticmethod(arrow_glacier_at)
gray_glacier_at = staticmethod(gray_glacier_at)
paris_at = staticmethod(paris_at)
shanghai_at = staticmethod(shanghai_at)
mainnet_fork_at_fns = mainnet_fork_at_fns
mining_mainnet_fork_at_fns = mining_mainnet_fork_at_fns
dao_fork_at = staticmethod(dao_fork_at)
disable_dao_fork = staticmethod(disable_dao_fork)
enable_pow_mining = staticmethod(enable_pow_mining)
disable_pow_check = staticmethod(disable_pow_check)
genesis = staticmethod(genesis)
copy = staticmethod(copy)
import_block = staticmethod(import_block)
import_blocks = staticmethod(import_blocks)
mine_block = staticmethod(mine_block)
mine_blocks = staticmethod(mine_blocks)
chain_split = staticmethod(chain_split)
at_block_number = staticmethod(at_block_number) |
def forum_category(request, category):
cate_list = Forum.objects.filter(category_id=category, hidden=False)
plate = Forum_plate.objects.all()
job = Forum.objects.filter(hidden=False, category__name='')
type = get_object_or_404(Forum_plate, pk=category)
try:
page = request.GET.get('page', 1)
if (page == ''):
page = 1
except PageNotAnInteger:
page = 1
p = Paginator(cate_list, 20, request=request)
people = p.page(page)
return render(request, 'pc/forum_category.html', locals()) |
class TestRenderBodyPrecedence():
def test_text(self, client):
client.simulate_get('/')
resp = client.resource.captured_resp
resp.text = 'body'
resp.data = b'data'
resp.media = ['media']
assert (resp.render_body() == b'body')
def test_data(self, client):
client.simulate_get('/')
resp = client.resource.captured_resp
resp.data = b'data'
resp.media = ['media']
assert (resp.render_body() == b'data')
def test_media(self, client):
client.simulate_get('/')
resp = client.resource.captured_resp
resp.media = ['media']
assert (json.loads(resp.render_body().decode('utf-8')) == ['media']) |
(IWindow)
class Window(MWindow, Widget):
position = Property(Tuple)
size = Property(Tuple)
title = Str()
activated = Event()
closed = Event()
closing = Event()
deactivated = Event()
key_pressed = Event(KeyPressedEvent)
opened = Event()
opening = Event()
_position = Tuple(((- 1), (- 1)))
_size = Tuple(((- 1), (- 1)))
def show(self, visible):
pass
def _get_position(self):
return self._position
def _set_position(self, position):
old = self._position
self._position = position
self.trait_property_changed('position', old, position)
def _get_size(self):
return self._size
def _set_size(self, size):
old = self._size
self._size = size
self.trait_property_changed('size', old, size) |
class Scheduler():
def __init__(self, driver: Optional[Driver]=None, realizations: Optional[Sequence[Realization]]=None, *, max_submit: int=1, max_running: int=1, ens_id: Optional[str]=None, ee_uri: Optional[str]=None, ee_cert: Optional[str]=None, ee_token: Optional[str]=None) -> None:
if (driver is None):
driver = LocalDriver()
self.driver = driver
self._tasks: MutableMapping[(int, asyncio.Task[None])] = {}
self._jobs: MutableMapping[(int, Job)] = {real.iens: Job(self, real) for real in (realizations or [])}
self._events: asyncio.Queue[Any] = asyncio.Queue()
self._cancelled = False
self._max_submit = max_submit
self._max_running = max_running
self._ee_uri = ee_uri
self._ens_id = ens_id
self._ee_cert = ee_cert
self._ee_token = ee_token
def kill_all_jobs(self) -> None:
self._cancelled = True
for task in self._tasks.values():
task.cancel()
def stop_long_running_jobs(self, minimum_required_realizations: int) -> None:
pass
def set_realization(self, realization: Realization) -> None:
self._jobs[realization.iens] = Job(self, realization)
def is_active(self) -> bool:
return any(((not task.done()) for task in self._tasks.values()))
def count_states(self) -> Dict[(JobState, int)]:
counts: Dict[(JobState, int)] = defaultdict(int)
for job in self._jobs.values():
counts[job.state] += 1
return counts
async def _publisher(self) -> None:
if (not self._ee_uri):
return
tls: Optional[ssl.SSLContext] = None
if self._ee_cert:
tls = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
tls.load_verify_locations(cadata=self._ee_cert)
headers = Headers()
if self._ee_token:
headers['token'] = self._ee_token
async with connect(self._ee_uri, ssl=tls, extra_headers=headers, open_timeout=60, ping_timeout=60, ping_interval=60, close_timeout=60) as conn:
while True:
event = (await self._events.get())
(await conn.send(event))
def add_dispatch_information_to_jobs_file(self) -> None:
for job in self._jobs.values():
self._update_jobs_json(job.iens, job.real.run_arg.runpath)
async def execute(self, minimum_required_realizations: int=0) -> str:
async with background_tasks() as cancel_when_execute_is_done:
cancel_when_execute_is_done(self._publisher())
cancel_when_execute_is_done(self._process_event_queue())
cancel_when_execute_is_done(self.driver.poll())
start = asyncio.Event()
sem = asyncio.BoundedSemaphore(self._max_running)
for (iens, job) in self._jobs.items():
self._tasks[iens] = asyncio.create_task(job(start, sem, self._max_submit))
start.set()
for task in self._tasks.values():
(await task)
(await self.driver.finish())
if self._cancelled:
return EVTYPE_ENSEMBLE_CANCELLED
return EVTYPE_ENSEMBLE_STOPPED
async def _process_event_queue(self) -> None:
while True:
(iens, event) = (await self.driver.event_queue.get())
if (event == JobEvent.STARTED):
self._jobs[iens].started.set()
elif (event == JobEvent.COMPLETED):
self._jobs[iens].returncode.set_result(0)
elif (event == JobEvent.FAILED):
self._jobs[iens].returncode.set_result(1)
elif (event == JobEvent.ABORTED):
self._jobs[iens].aborted.set()
def _update_jobs_json(self, iens: int, runpath: str) -> None:
cert_path = f'{runpath}/{CERT_FILE}'
if (self._ee_cert is not None):
Path(cert_path).write_text(self._ee_cert, encoding='utf-8')
jobs = _JobsJson(experiment_id=None, ens_id=self._ens_id, real_id=iens, dispatch_url=self._ee_uri, ee_token=self._ee_token, ee_cert_path=(cert_path if (self._ee_cert is not None) else None))
jobs_path = os.path.join(runpath, 'jobs.json')
with open(jobs_path, 'r') as fp:
data = json.load(fp)
with open(jobs_path, 'w') as fp:
data.update(asdict(jobs))
json.dump(data, fp, indent=4) |
def test():
assert ('if token.like_num' in __solution__), "Are you checking the token's like_num attribute?"
assert ('next_token.text == "%"' in __solution__), "Are you checking whether the next token's text is a percent sign?"
assert (next_token.text == '%'), "Are you checking whether the next token's text is a percent sign?"
assert (('token.i + 1' in __solution__) or ('1 + token.i' in __solution__)), "Are you using the token's index attribute?"
__msg__.good('Well done! As you can see, you can do a lot of very powerful analyses using the tokens and their attributes.') |
def create_item_from_template(doc):
if doc.is_billable:
uom = (frappe.db.exists('UOM', 'Unit') or frappe.db.get_single_value('Stock Settings', 'stock_uom'))
item = frappe.get_doc({'doctype': 'Item', 'item_code': doc.item_code, 'item_name': doc.name, 'item_group': doc.item_group, 'description': doc.name, 'is_sales_item': 1, 'is_service_item': 1, 'is_purchase_item': 0, 'is_stock_item': 0, 'include_item_in_manufacturing': 0, 'show_in_website': 0, 'is_pro_applicable': 0, 'disabled': 0, 'stock_uom': uom}).insert(ignore_permissions=True, ignore_mandatory=True)
if doc.rate:
make_item_price(item.name, doc.rate)
else:
make_item_price(item.name, 0.0)
frappe.db.set_value('Observation Template', doc.name, 'item', item.name)
doc.reload() |
def get_binaries(file_path: Path) -> list[Path]:
if file_path.is_file():
return [file_path]
binaries = []
for file in file_path.iterdir():
if file.is_symlink():
continue
if file.is_dir():
binaries += get_binaries(file)
elif os.access(file, os.X_OK):
binaries.append(file)
return binaries |
class TestIndexListFilterShards(TestCase):
def builder(self, key='2'):
self.client = Mock()
self.client.info.return_value = get_es_ver()
self.client.cat.indices.return_value = get_testvals(key, 'state')
self.client.indices.get_settings.return_value = get_testvals(key, 'settings')
self.client.indices.stats.return_value = get_testvals(key, 'stats')
self.client.indices.exists_alias.return_value = False
self.ilo = IndexList(self.client)
def test_filter_shards_raise(self):
self.builder()
self.assertRaises(MissingArgument, self.ilo.filter_by_shards)
def test_bad_shard_count_raise_1(self):
self.builder()
self.assertRaises(MissingArgument, self.ilo.filter_by_shards, number_of_shards=0)
def test_bad_shard_count_raise_2(self):
self.builder()
self.assertRaises(ValueError, self.ilo.filter_by_shards, number_of_shards=1, shard_filter_behavior='less_than')
def test_bad_shard_count_raise_3(self):
self.builder()
self.assertRaises(ValueError, self.ilo.filter_by_shards, number_of_shards=(- 1), shard_filter_behavior='greater_than')
def test_greater_than_or_equal(self):
self.builder()
self.ilo.filter_by_shards(number_of_shards=5, shard_filter_behavior='greater_than_or_equal')
self.assertEqual(sorted(['index-2016.03.03', 'index-2016.03.04']), sorted(self.ilo.indices))
def test_greater_than_or_equal_exclude(self):
self.builder()
self.ilo.filter_by_shards(number_of_shards=5, shard_filter_behavior='greater_than_or_equal', exclude=True)
self.assertEqual(sorted([]), sorted(self.ilo.indices))
def test_greater_than(self):
self.builder()
self.ilo.filter_by_shards(number_of_shards=5)
self.assertEqual(sorted([]), sorted(self.ilo.indices))
def test_greater_than_exclude(self):
self.builder()
self.ilo.filter_by_shards(number_of_shards=5, exclude=True)
self.assertEqual(sorted(['index-2016.03.03', 'index-2016.03.04']), sorted(self.ilo.indices))
def test_less_than_or_equal(self):
self.builder()
self.ilo.filter_by_shards(number_of_shards=5, shard_filter_behavior='less_than_or_equal')
self.assertEqual(sorted(['index-2016.03.03', 'index-2016.03.04']), sorted(self.ilo.indices))
def test_less_than_or_equal_exclude(self):
self.builder()
self.ilo.filter_by_shards(number_of_shards=5, shard_filter_behavior='less_than_or_equal', exclude=True)
self.assertEqual(sorted([]), sorted(self.ilo.indices))
def test_less_than(self):
self.builder()
self.ilo.filter_by_shards(number_of_shards=5, shard_filter_behavior='less_than')
self.assertEqual(sorted([]), sorted(self.ilo.indices))
def test_less_than_exclude(self):
self.builder()
self.ilo.filter_by_shards(number_of_shards=5, shard_filter_behavior='less_than', exclude=True)
self.assertEqual(sorted(['index-2016.03.03', 'index-2016.03.04']), sorted(self.ilo.indices))
def test_equal(self):
self.builder()
self.ilo.filter_by_shards(number_of_shards=5, shard_filter_behavior='equal')
self.assertEqual(sorted(['index-2016.03.03', 'index-2016.03.04']), sorted(self.ilo.indices)) |
class ModuleManager(Base):
source = Instance(Base)
children = List(Module, record=True)
lut_data_mode = PrefixList(LUT_DATA_MODE_TYPES, default_value='auto', desc='specify the data type used by the lookup tables')
scalar_lut_manager = Instance(LUTManager, args=(), record=True)
vector_lut_manager = Instance(LUTManager, args=(), record=True)
name = Str('Colors and legends')
icon = Str('modulemanager.ico')
type = Str(' colors and legends')
input_info = PipelineInfo(datasets=['any'])
output_info = PipelineInfo(datasets=['any'])
def __get_pure_state__(self):
d = super(ModuleManager, self).__get_pure_state__()
d.pop('source', None)
return d
def __set_pure_state__(self, state):
set_state(self, state, ignore=['children'])
handle_children_state(self.children, state.children)
set_state(self, state, first=['children'], ignore=['*'])
self.update()
def update(self):
if (len(self.source.outputs) == 0):
return
input = self.source.outputs[0]
helper = DataSetHelper(input)
self._setup_scalar_data(helper)
self._setup_vector_data(helper)
def start(self):
if self.running:
return
self._setup_event_handlers()
for obj in self.children:
obj.start()
for obj in (self.scalar_lut_manager, self.vector_lut_manager):
obj.start()
super(ModuleManager, self).start()
def stop(self):
if (not self.running):
return
self._teardown_event_handlers()
for obj in self.children:
obj.stop()
for obj in (self.scalar_lut_manager, self.vector_lut_manager):
obj.stop()
super(ModuleManager, self).stop()
def add_child(self, child):
if isinstance(child, Module):
self.children.append(child)
else:
self.source.add_child(child)
def remove_child(self, child):
self.children.remove(child)
def tno_can_add(self, node, add_object):
try:
if issubclass(add_object, Module):
return True
except TypeError:
if isinstance(add_object, Module):
return True
return False
def tno_drop_object(self, node, dropped_object):
if isinstance(dropped_object, Module):
return dropped_object
def _children_changed(self, old, new):
self._handle_children(old, new)
def _children_items_changed(self, list_event):
self._handle_children(list_event.removed, list_event.added)
def _handle_children(self, removed, added):
for obj in removed:
obj.stop()
for obj in added:
obj.trait_set(module_manager=self, scene=self.scene, parent=self)
if self.running:
try:
obj.start()
except:
exception()
def _source_changed(self):
self.output_info.copy_traits(self.source.output_info)
self.update()
def _setup_event_handlers(self):
src = self.source
src.on_trait_event(self.update, 'pipeline_changed')
src.on_trait_event(self.update, 'data_changed')
def _teardown_event_handlers(self):
src = self.source
src.on_trait_event(self.update, 'pipeline_changed', remove=True)
src.on_trait_event(self.update, 'data_changed', remove=True)
def _scene_changed(self, value):
for obj in self.children:
obj.scene = value
for obj in (self.scalar_lut_manager, self.vector_lut_manager):
obj.scene = value
def _lut_data_mode_changed(self, value):
self.update()
def _setup_scalar_data(self, helper):
data_attr = DataAttributes(name='No scalars')
point_data_attr = DataAttributes(name='No scalars')
point_data_attr.compute_scalar(helper, 'point')
cell_data_attr = DataAttributes(name='No scalars')
cell_data_attr.compute_scalar(helper, 'cell')
if (self.lut_data_mode == 'auto'):
if (len(point_data_attr.range) > 0):
data_attr.copy_traits(point_data_attr)
elif (len(cell_data_attr.range) > 0):
data_attr.copy_traits(cell_data_attr)
elif (self.lut_data_mode == 'point data'):
data_attr.copy_traits(point_data_attr)
elif (self.lut_data_mode == 'cell data'):
data_attr.copy_traits(cell_data_attr)
data_attr.config_lut(self.scalar_lut_manager)
def _setup_vector_data(self, helper):
data_attr = DataAttributes(name='No vectors')
point_data_attr = DataAttributes(name='No vectors')
point_data_attr.compute_vector(helper, 'point')
cell_data_attr = DataAttributes(name='No vectors')
cell_data_attr.compute_vector(helper, 'cell')
if (self.lut_data_mode == 'auto'):
if (len(point_data_attr.range) > 0):
data_attr.copy_traits(point_data_attr)
elif (len(cell_data_attr.range) > 0):
data_attr.copy_traits(cell_data_attr)
elif (self.lut_data_mode == 'point data'):
data_attr.copy_traits(point_data_attr)
elif (self.lut_data_mode == 'cell data'):
data_attr.copy_traits(cell_data_attr)
data_attr.config_lut(self.vector_lut_manager)
def _visible_changed(self, value):
for c in self.children:
c.visible = value
self.scalar_lut_manager.visible = value
self.vector_lut_manager.visible = value
super(ModuleManager, self)._visible_changed(value)
def _menu_helper_default(self):
from mayavi.core.traits_menu import ModuleMenuHelper
return ModuleMenuHelper(object=self) |
class OptionSeriesCylinderSonificationTracksPointgrouping(Options):
def algorithm(self):
return self._config_get('minmax')
def algorithm(self, text: str):
self._config(text, js_type=False)
def enabled(self):
return self._config_get(True)
def enabled(self, flag: bool):
self._config(flag, js_type=False)
def groupTimespan(self):
return self._config_get(15)
def groupTimespan(self, num: float):
self._config(num, js_type=False)
def prop(self):
return self._config_get('y')
def prop(self, text: str):
self._config(text, js_type=False) |
class HRNet(nn.Module):
def __init__(self, cfg, **kwargs):
super(HRNet, self).__init__()
self.cfg = cfg
blocks_dict = {'Basic': BasicBlock, 'Bottleneck': Bottleneck}
self.blocks_dict = blocks_dict
self.inplanes = 64
self.conv1 = nn.Conv2d(3, 64, kernel_size=4, stride=2, padding=1, bias=False)
self.bn1 = BatchNorm2d(64)
self.conv2 = nn.Conv2d(64, 64, kernel_size=4, stride=2, padding=1, bias=False)
self.bn2 = BatchNorm2d(64)
self.relu = nn.ReLU(inplace=True)
self.stage1_cfg = cfg['STAGE1']
num_channels = self.stage1_cfg['NUM_CHANNELS'][0]
block = blocks_dict[self.stage1_cfg['BLOCK']]
num_blocks = self.stage1_cfg['NUM_BLOCKS'][0]
self.layer1 = self._make_layer(block, 64, num_channels, num_blocks)
if ('STAGE2' in cfg):
self.stage2_cfg = cfg['STAGE2']
num_channels = self.stage2_cfg['NUM_CHANNELS']
block = blocks_dict[self.stage2_cfg['BLOCK']]
num_channels = [(num_channels[i] * block.expansion) for i in range(len(num_channels))]
self.transition1 = self._make_transition_layer([256], num_channels)
(self.stage2, pre_stage_channels) = self._make_stage(self.stage2_cfg, num_channels)
if ('STAGE3' in cfg):
self.stage3_cfg = cfg['STAGE3']
num_channels = self.stage3_cfg['NUM_CHANNELS']
block = blocks_dict[self.stage3_cfg['BLOCK']]
num_channels = [(num_channels[i] * block.expansion) for i in range(len(num_channels))]
self.transition2 = self._make_transition_layer(pre_stage_channels, num_channels)
(self.stage3, pre_stage_channels) = self._make_stage(self.stage3_cfg, num_channels)
if ('STAGE4' in cfg):
self.stage4_cfg = cfg['STAGE4']
num_channels = self.stage4_cfg['NUM_CHANNELS']
block = blocks_dict[self.stage4_cfg['BLOCK']]
num_channels = [(num_channels[i] * block.expansion) for i in range(len(num_channels))]
self.transition3 = self._make_transition_layer(pre_stage_channels, num_channels)
(self.stage4, pre_stage_channels) = self._make_stage(self.stage4_cfg, num_channels, multi_scale_output=True)
if (('last_layer' in self.cfg) and (self.cfg['last_layer'] == True)):
last_inp_channels = int(sum(pre_stage_channels))
self.last_layer = nn.Sequential(nn.Conv2d(in_channels=last_inp_channels, out_channels=256, kernel_size=3, stride=1, padding=1, bias=False), BatchNorm2d(256), nn.ReLU(inplace=True), nn.Upsample(scale_factor=2, mode='bilinear', align_corners=False), nn.Conv2d(in_channels=256, out_channels=128, kernel_size=3, stride=1, padding=1, bias=False), BatchNorm2d(128), nn.ReLU(inplace=True), nn.Upsample(scale_factor=2, mode='bilinear', align_corners=False), nn.Conv2d(in_channels=128, out_channels=64, kernel_size=3, stride=1, padding=1, bias=False), BatchNorm2d(64), nn.ReLU(inplace=True), nn.Conv2d(in_channels=64, out_channels=32, kernel_size=3, stride=1, padding=1))
def _make_transition_layer(self, num_channels_pre_layer, num_channels_cur_layer):
num_branches_cur = len(num_channels_cur_layer)
num_branches_pre = len(num_channels_pre_layer)
transition_layers = []
for i in range(num_branches_cur):
if (i < num_branches_pre):
if (num_channels_cur_layer[i] != num_channels_pre_layer[i]):
transition_layers.append(nn.Sequential(Conv2d(num_channels_pre_layer[i], num_channels_cur_layer[i], 3, 1, 1, bias=False), BatchNorm2d(num_channels_cur_layer[i]), nn.ReLU(inplace=True)))
else:
transition_layers.append(None)
else:
conv3x3s = []
for j in range(((i + 1) - num_branches_pre)):
inchannels = num_channels_pre_layer[(- 1)]
outchannels = (num_channels_cur_layer[i] if (j == (i - num_branches_pre)) else inchannels)
conv3x3s.append(nn.Sequential(Conv2d(inchannels, outchannels, 4, 2, 1, bias=False), BatchNorm2d(outchannels), nn.ReLU(inplace=True)))
transition_layers.append(nn.Sequential(*conv3x3s))
return nn.ModuleList(transition_layers)
def _make_layer(self, block, inplanes, planes, blocks, stride=1):
layers = []
layers.append(block(inplanes, planes, stride))
inplanes = (planes * block.expansion)
for i in range(1, blocks):
layers.append(block(inplanes, planes))
return nn.Sequential(*layers)
def _make_stage(self, layer_config, num_inchannels, multi_scale_output=True):
num_modules = layer_config['NUM_MODULES']
num_branches = layer_config['NUM_BRANCHES']
num_blocks = layer_config['NUM_BLOCKS']
num_channels = layer_config['NUM_CHANNELS']
block = self.blocks_dict[layer_config['BLOCK']]
fuse_method = layer_config['FUSE_METHOD']
modules = []
for i in range(num_modules):
if ((not multi_scale_output) and (i == (num_modules - 1))):
reset_multi_scale_output = False
else:
reset_multi_scale_output = True
modules.append(HighResolutionModule(num_branches, block, num_blocks, num_inchannels, num_channels, fuse_method, reset_multi_scale_output))
num_inchannels = modules[(- 1)].get_num_inchannels()
return (nn.Sequential(*modules), num_inchannels)
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.conv2(x)
x = self.bn2(x)
x = self.relu(x)
x = self.layer1(x)
y_list = [x]
if ('STAGE2' in self.cfg):
x_list = []
for i in range(self.stage2_cfg['NUM_BRANCHES']):
if (self.transition1[i] is not None):
x_list.append(self.transition1[i](x))
else:
x_list.append(x)
y_list = self.stage2(x_list)
if ('STAGE3' in self.cfg):
x_list = []
for i in range(self.stage3_cfg['NUM_BRANCHES']):
if (self.transition2[i] is not None):
x_list.append(self.transition2[i](y_list[(- 1)]))
else:
x_list.append(y_list[i])
y_list = self.stage3(x_list)
if ('STAGE4' in self.cfg):
x_list = []
for i in range(self.stage4_cfg['NUM_BRANCHES']):
if (self.transition3[i] is not None):
x_list.append(self.transition3[i](y_list[(- 1)]))
else:
x_list.append(y_list[i])
y_list = self.stage4(x_list)
(y0_h, y0_w) = (y_list[0].size(2), y_list[0].size(3))
y1 = F.interpolate(y_list[1], size=(y0_h, y0_w), mode='bilinear', align_corners=False)
y2 = F.interpolate(y_list[2], size=(y0_h, y0_w), mode='bilinear', align_corners=False)
y3 = F.interpolate(y_list[3], size=(y0_h, y0_w), mode='bilinear', align_corners=False)
y = torch.cat([y_list[0], y1, y2, y3], 1)
y = self.last_layer(y)
return y
def init_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
def fix_bn(self):
for m in self.modules():
if isinstance(m, nn.BatchNorm2d):
m.eval()
def get_loss(self, data):
pr = self.forward((((data['img'] / 127.5) - 1) * data['mask']))
loss = F.l1_loss(torch.masked_select(pr, data['mask']), torch.masked_select(data['ceof'], data['mask']))
return (loss, {'loss': loss.detach()}) |
class SideChannelManager():
def __init__(self, side_channels=Optional[List[SideChannel]]):
self._side_channels_dict = self._get_side_channels_dict(side_channels)
def process_side_channel_message(self, data: bytes) -> None:
offset = 0
while (offset < len(data)):
try:
channel_id = uuid.UUID(bytes_le=bytes(data[offset:(offset + 16)]))
offset += 16
(message_len,) = struct.unpack_from('<i', data, offset)
offset = (offset + 4)
message_data = data[offset:(offset + message_len)]
offset = (offset + message_len)
except (struct.error, ValueError, IndexError):
raise UnityEnvironmentException('There was a problem reading a message in a SideChannel. Please make sure the version of MLAgents in Unity is compatible with the Python version.')
if (len(message_data) != message_len):
raise UnityEnvironmentException('The message received by the side channel {} was unexpectedly short. Make sure your Unity Environment sending side channel data properly.'.format(channel_id))
if (channel_id in self._side_channels_dict):
incoming_message = IncomingMessage(message_data)
self._side_channels_dict[channel_id].on_message_received(incoming_message)
else:
get_logger(__name__).warning(f'Unknown side channel data received. Channel type: {channel_id}.')
def generate_side_channel_messages(self) -> bytearray:
result = bytearray()
for (channel_id, channel) in self._side_channels_dict.items():
for message in channel.message_queue:
result += channel_id.bytes_le
result += struct.pack('<i', len(message))
result += message
channel.message_queue = []
return result
def _get_side_channels_dict(side_channels: Optional[List[SideChannel]]) -> Dict[(uuid.UUID, SideChannel)]:
side_channels_dict: Dict[(uuid.UUID, SideChannel)] = {}
print('Side channel id in use:')
for _sc in side_channels:
print(_sc.channel_id)
if (side_channels is not None):
for _sc in side_channels:
if (_sc.channel_id in side_channels_dict):
raise UnityEnvironmentException(f'There cannot be two side channels with the same channel id {_sc.channel_id}.')
side_channels_dict[_sc.channel_id] = _sc
return side_channels_dict |
class DirectionSector(Tagged):
def __init__(self, tags: dict[(str, str)], point: np.ndarray) -> None:
super().__init__(tags)
self.point: np.ndarray = point
def draw(self, svg: Drawing, scheme) -> None:
angle: Optional[float] = None
is_revert_gradient: bool = False
direction: str
direction_radius: float
direction_color: Color
if (self.get_tag('man_made') == 'surveillance'):
direction = self.get_tag('camera:direction')
if ('camera:angle' in self.tags):
angle = float(self.get_tag('camera:angle'))
if ('angle' in self.tags):
angle = float(self.get_tag('angle'))
direction_radius = 50.0
direction_color = scheme.get_color('direction_camera_color')
elif (self.get_tag('traffic_sign') == 'stop'):
direction = self.get_tag('direction')
direction_radius = 25.0
direction_color = Color('red')
else:
direction = self.get_tag('direction')
direction_radius = 50.0
direction_color = scheme.get_color('direction_view_color')
is_revert_gradient = True
if (not direction):
return
point: np.ndarray = self.point.astype(int).astype(float)
paths: Iterator[PathCommands]
if (angle is not None):
paths = [Sector(direction, angle).draw(point, direction_radius)]
else:
paths = DirectionSet(direction).draw(point, direction_radius)
for path in paths:
radial_gradient: RadialGradient = svg.radialGradient(center=point, r=direction_radius, gradientUnits='userSpaceOnUse')
gradient: RadialGradient = svg.defs.add(radial_gradient)
if is_revert_gradient:
gradient.add_stop_color(0.0, direction_color.hex, opacity=0.0).add_stop_color(1.0, direction_color.hex, opacity=0.7)
else:
gradient.add_stop_color(0.0, direction_color.hex, opacity=0.4).add_stop_color(1.0, direction_color.hex, opacity=0.0)
path_element: Path = svg.path(d=((['M', point] + path) + ['L', point, 'Z']), fill=gradient.get_funciri())
svg.add(path_element) |
def main(args=None):
from io import open
options = parse_args(args)
svg_file = options.infile
if options.name:
name = options.name
else:
import os
name = os.path.splitext(os.path.basename(svg_file))[0]
with open(svg_file, 'r', encoding='utf-8') as f:
svg = f.read()
glif = svg2glif(svg, name, width=options.width, height=options.height, unicodes=options.unicodes, transform=options.transform, version=options.format)
if (options.outfile is None):
print(glif)
else:
with open(options.outfile, 'w', encoding='utf-8') as f:
f.write(glif) |
class ImInfo():
def __init__(self, args):
self.args = args
def run(self):
with open(self.args.dataset_file, 'r') as f:
imgs = [json.loads(s) for s in f.readlines()]
batch_size = (self.args.batch_size if (self.args.batch_size > 0) else len(imgs))
num_batches = (len(imgs) // batch_size)
assert (len(imgs) == (num_batches * batch_size))
im_infos = []
for i in range(num_batches):
one_batch_info = []
for j in range((i * batch_size), ((i + 1) * batch_size)):
img = imgs[j]
im_scale = self.getScale(img['height'], img['width'])
height = int(np.round((img['height'] * im_scale)))
width = int(np.round((img['width'] * im_scale)))
assert (height <= self.args.max_size), 'height {} is more than the max_size {}'.format(height, self.args.max_size)
assert (width <= self.args.max_size), 'width {} is more than the max_size {}'.format(width, self.args.max_size)
if ((height < self.args.min_size) or (width < self.args.min_size)):
assert ((height == self.args.max_size) or (width == self.args.max_size))
else:
assert ((height == self.args.min_size) or (width == self.args.min_size))
im_info = [height, width, im_scale]
one_batch_info.append(im_info)
im_infos.append(one_batch_info)
with open(self.args.output_file, 'w') as f:
f.write('{}, {}\n'.format((num_batches * batch_size), 3))
for batch in im_infos:
for im_info in batch:
s = ', '.join([str(s) for s in im_info])
f.write('{}\n'.format(s))
def getScale(self, height, width):
min_size = self.args.min_size
max_size = self.args.max_size
im_min_size = (height if (height < width) else width)
im_max_size = (height if (height > width) else width)
im_scale = (float(min_size) / float(im_min_size))
if (np.round((im_scale * im_max_size)) > max_size):
im_scale = (float(max_size) / float(im_max_size))
return im_scale |
def test_scalar_area(f):
f.assign(1)
assert (abs((assemble((f * ds_t)) - 1.0)) < 1e-07)
assert (abs((assemble((f * ds_b)) - 1.0)) < 1e-07)
assert (abs((assemble((f * ds_tb)) - 2.0)) < 1e-07)
assert (abs((assemble((f * ds_v)) - 2.0)) < 1e-07)
assert (abs((assemble((f('+') * dS_h)) - 3.0)) < 1e-07)
assert (abs((assemble((f('-') * dS_h)) - 3.0)) < 1e-07)
assert (abs((assemble((f('+') * dS_v)) - 3.0)) < 1e-07)
assert (abs((assemble((f('-') * dS_v)) - 3.0)) < 1e-07) |
class Hdf5DataLoader(object):
def __init__(self):
self.values = None
self.keys = None
self.inputs = None
self.features = None
def load(self, h5_file):
with h5py.File(h5_file, 'r') as f:
self.values = f['Values'][:]
self.keys = [x.decode('utf-8') for x in f['Keys'][:]]
self.inputs = [x.decode('utf-8') for x in f['Inputs'][:]]
self.features = [x.decode('utf-8') for x in f['Features'][:]] |
def extractAlucardtranslationsCom(item):
(vol, chp, frag, postfix) = extractVolChapterFragmentPostfix(item['title'])
if ((not (chp or vol)) or ('preview' in item['title'].lower())):
return None
tagmap = [('PRC', 'PRC', 'translated'), ('Loiterous', 'Loiterous', 'oel')]
for (tagname, name, tl_type) in tagmap:
if (tagname in item['tags']):
return buildReleaseMessageWithType(item, name, vol, chp, frag=frag, postfix=postfix, tl_type=tl_type)
return False |
def display_musicvideos_type(params, view):
handle = int(sys.argv[1])
xbmcplugin.setContent(handle, 'files')
view_name = view.get('Name')
params = {}
params['ParentId'] = view.get('Id')
params['Recursive'] = False
params['ImageTypeLimit'] = 1
params['IsMissing'] = False
params['Fields'] = '{field_filters}'
path = get_emby_url('{server}/emby/Users/{userid}/Items', params)
url = (((sys.argv[0] + '?url=') + urllib.parse.quote(path)) + '&mode=GET_CONTENT&media_type=musicvideos')
add_menu_directory_item((view_name + string_load(30405)), url)
xbmcplugin.endOfDirectory(handle) |
def seekURL(dic, trail=[]):
for (k, v) in dic.items():
newtrail = (trail + [k])
if (k == 'AudioLibrary'):
for elem in v:
try:
(yield (newtrail, elem['Item1']))
except KeyError:
raise NotImplementedError('AudioLibrary has unexpected structure: {}'.format(v))
elif isinstance(v, dict):
(yield from seekURL(v, newtrail))
elif isinstance(v, list):
for elem in v:
if (not isinstance(elem, dict)):
continue
(yield from seekURL(elem, newtrail))
elif k.lower().endswith('url'):
if (k == 'PageURL'):
continue
if (not v):
continue
v = re.sub('{.*}', '', v)
(yield (newtrail, v)) |
_tlv_types(CFM_DATA_TLV)
class data_tlv(tlv):
_PACK_STR = '!BH'
_MIN_LEN = struct.calcsize(_PACK_STR)
def __init__(self, length=0, data_value=b''):
super(data_tlv, self).__init__(length)
self._type = CFM_DATA_TLV
self.data_value = data_value
def parser(cls, buf):
(type_, length) = struct.unpack_from(cls._PACK_STR, buf)
form = ('%ds' % length)
(data_value,) = struct.unpack_from(form, buf, cls._MIN_LEN)
return cls(length, data_value)
def serialize(self):
if (self.length == 0):
self.length = len(self.data_value)
buf = struct.pack(self._PACK_STR, self._type, self.length)
buf = bytearray(buf)
form = ('%ds' % self.length)
buf.extend(struct.pack(form, self.data_value))
return buf |
class OptionPlotoptionsPyramid3dSonificationTracksMappingLowpassFrequency(Options):
def mapFunction(self):
return self._config_get(None)
def mapFunction(self, value: Any):
self._config(value, js_type=False)
def mapTo(self):
return self._config_get(None)
def mapTo(self, text: str):
self._config(text, js_type=False)
def max(self):
return self._config_get(None)
def max(self, num: float):
self._config(num, js_type=False)
def min(self):
return self._config_get(None)
def min(self, num: float):
self._config(num, js_type=False)
def within(self):
return self._config_get(None)
def within(self, value: Any):
self._config(value, js_type=False) |
class switch_config_failed_error_msg(error_msg):
version = 6
type = 1
err_type = 10
def __init__(self, xid=None, code=None, data=None):
if (xid != None):
self.xid = xid
else:
self.xid = None
if (code != None):
self.code = code
else:
self.code = 0
if (data != None):
self.data = data
else:
self.data = ''
return
def pack(self):
packed = []
packed.append(struct.pack('!B', self.version))
packed.append(struct.pack('!B', self.type))
packed.append(struct.pack('!H', 0))
packed.append(struct.pack('!L', self.xid))
packed.append(struct.pack('!H', self.err_type))
packed.append(struct.pack('!H', self.code))
packed.append(self.data)
length = sum([len(x) for x in packed])
packed[2] = struct.pack('!H', length)
return ''.join(packed)
def unpack(reader):
obj = switch_config_failed_error_msg()
_version = reader.read('!B')[0]
assert (_version == 6)
_type = reader.read('!B')[0]
assert (_type == 1)
_length = reader.read('!H')[0]
orig_reader = reader
reader = orig_reader.slice(_length, 4)
obj.xid = reader.read('!L')[0]
_err_type = reader.read('!H')[0]
assert (_err_type == 10)
obj.code = reader.read('!H')[0]
obj.data = str(reader.read_all())
return obj
def __eq__(self, other):
if (type(self) != type(other)):
return False
if (self.xid != other.xid):
return False
if (self.code != other.code):
return False
if (self.data != other.data):
return False
return True
def pretty_print(self, q):
q.text('switch_config_failed_error_msg {')
with q.group():
with q.indent(2):
q.breakable()
q.text('xid = ')
if (self.xid != None):
q.text(('%#x' % self.xid))
else:
q.text('None')
q.text(',')
q.breakable()
q.text('code = ')
value_name_map = {0: 'OFPSCFC_BAD_FLAGS', 1: 'OFPSCFC_BAD_LEN', 2: 'OFPSCFC_EPERM', 3: 'OFPRRFC_ID_UNSUP', 4: 'OFPRRFC_ID_IN_USE'}
if (self.code in value_name_map):
q.text(('%s(%d)' % (value_name_map[self.code], self.code)))
else:
q.text(('%#x' % self.code))
q.text(',')
q.breakable()
q.text('data = ')
q.pp(self.data)
q.breakable()
q.text('}') |
class HydraConf():
defaults: List[Any] = field(default_factory=(lambda : [{'output': 'default'}, {'launcher': 'basic'}, {'sweeper': 'basic'}, {'help': 'default'}, {'hydra_help': 'default'}, {'hydra_logging': 'default'}, {'job_logging': 'default'}, {'callbacks': None}, {'env': 'default'}]))
mode: Optional[RunMode] = None
searchpath: List[str] = field(default_factory=list)
run: RunDir = field(default_factory=RunDir)
sweep: SweepDir = field(default_factory=SweepDir)
hydra_logging: Dict[(str, Any)] = MISSING
job_logging: Dict[(str, Any)] = MISSING
sweeper: Any = MISSING
launcher: Any = MISSING
callbacks: Dict[(str, Any)] = field(default_factory=dict)
help: HelpConf = field(default_factory=HelpConf)
hydra_help: HydraHelpConf = field(default_factory=HydraHelpConf)
output_subdir: Optional[str] = '.hydra'
overrides: OverridesConf = field(default_factory=OverridesConf)
job: JobConf = field(default_factory=JobConf)
runtime: RuntimeConf = field(default_factory=RuntimeConf)
verbose: Any = False |
class OptionPlotoptionsSolidgaugeSonificationContexttracksMappingTremoloDepth(Options):
def mapFunction(self):
return self._config_get(None)
def mapFunction(self, value: Any):
self._config(value, js_type=False)
def mapTo(self):
return self._config_get(None)
def mapTo(self, text: str):
self._config(text, js_type=False)
def max(self):
return self._config_get(None)
def max(self, num: float):
self._config(num, js_type=False)
def min(self):
return self._config_get(None)
def min(self, num: float):
self._config(num, js_type=False)
def within(self):
return self._config_get(None)
def within(self, value: Any):
self._config(value, js_type=False) |
def visual_angle2position(visual_angle, viewing_distance, monitor_size):
screen_size = _internals.active_exp.screen.surface.get_size()
angle = (((visual_angle[0] * _math.pi) / 360), ((visual_angle[1] * _math.pi) / 360))
cm = (((_math.tan(angle[0]) * viewing_distance) * 2), ((_math.tan(angle[1]) * viewing_distance) * 2))
return (((cm[0] * screen_size[0]) / monitor_size[0]), ((cm[1] * screen_size[1]) / monitor_size[1])) |
def get_database_dsn_string(db_alias: str=DEFAULT_DB_ALIAS):
if (not db_alias):
raise ValueError("Parameter 'db_alias' must have a value, but was None or empty")
if (db_alias in settings.DATABASES):
return build_dsn_string(settings.DATABASES[db_alias])
else:
raise Exception(f'No valid database connection is configured with alias "{db_alias}"') |
def extractGlyphbooksCom(item):
(vol, chp, frag, postfix) = extractVolChapterFragmentPostfix(item['title'])
if ((not (chp or vol)) or ('preview' in item['title'].lower())):
return None
tagmap = [('PRC', 'PRC', 'translated'), ('Loiterous', 'Loiterous', 'oel')]
for (tagname, name, tl_type) in tagmap:
if (tagname in item['tags']):
return buildReleaseMessageWithType(item, name, vol, chp, frag=frag, postfix=postfix, tl_type=tl_type)
return False |
def index(search_service: SearchService=Provide[Container.search_service], default_query: str=Provide[Container.config.default.query], default_limit: int=Provide[Container.config.default.limit.as_int()]):
query = request.args.get('query', default_query)
limit = request.args.get('limit', default_limit, int)
repositories = search_service.search_repositories(query, limit)
return render_template('index.html', query=query, limit=limit, repositories=repositories) |
class PersonLandmarks(BaseModel):
eye_left: Sequence[float] = Field(default_factory=list)
eye_right: Sequence[float] = Field(default_factory=list)
nose: Sequence[float] = Field(default_factory=list)
ear_left: Sequence[float] = Field(default_factory=list)
ear_right: Sequence[float] = Field(default_factory=list)
shoulder_left: Sequence[float] = Field(default_factory=list)
shoulder_right: Sequence[float] = Field(default_factory=list)
elbow_left: Sequence[float] = Field(default_factory=list)
elbow_right: Sequence[float] = Field(default_factory=list)
wrist_left: Sequence[float] = Field(default_factory=list)
wrist_right: Sequence[float] = Field(default_factory=list)
hip_left: Sequence[float] = Field(default_factory=list)
hip_right: Sequence[float] = Field(default_factory=list)
knee_left: Sequence[float] = Field(default_factory=list)
knee_right: Sequence[float] = Field(default_factory=list)
ankle_left: Sequence[float] = Field(default_factory=list)
ankle_right: Sequence[float] = Field(default_factory=list)
mouth_left: Sequence[float] = Field(default_factory=list)
mouth_right: Sequence[float] = Field(default_factory=list) |
def test_textcat_teach(dataset, spacy_model, source, labels, patterns):
recipe = textcat_teach(dataset, spacy_model, source, labels, patterns)
stream = list(recipe['stream'])
assert (recipe['view_id'] == 'classification')
assert (recipe['dataset'] == dataset)
assert (len(stream) >= 2)
assert ('label' in stream[0])
assert ('meta' in stream[0])
assert ('score' in stream[0]['meta']) |
class LocalRegistry(object):
def __init__(self, file_path):
self._db = None
if (not os.path.isdir(os.path.dirname(os.path.abspath(file_path)))):
raise AIFlowException('Parent directory of local registry not exists.')
self._db = dumb.open(file_path, 'c')
def set(self, key, value):
self._db[str.encode(key)] = str(value)
return self
def get(self, key):
if (str.encode(key) in self._db.keys()):
return self._db[str.encode(key)]
else:
return None
def sync(self):
self._db.sync()
def remove(self, key):
if (str.encode(key) in self._db.keys()):
del self._db[str.encode(key)]
def __del__(self):
if self._db:
self._db.close() |
def intervalListToIntervalTree(interval_list):
bin_int_tree = {}
for (intval_id, intval) in enumerate(interval_list):
(chrom, start, end) = intval[0:3]
if (chrom not in bin_int_tree):
bin_int_tree[chrom] = IntervalTree()
bin_int_tree[chrom].add(Interval(start, end, intval_id))
return bin_int_tree |
class AbstractTestODSOutputDiff(unittest.TestCase):
METHODS: List[str] = ['fifo']
def setUp(self) -> None:
self.maxDiff = None
def __get_time_interval(from_date: date=MIN_DATE, to_date: date=MAX_DATE) -> str:
time_interval: str = ''
if ((from_date > MIN_DATE) and (to_date < MAX_DATE)):
time_interval = f'{from_date}_{to_date}_'
elif ((from_date > MIN_DATE) and (to_date >= MAX_DATE)):
time_interval = f'{from_date}_infinity_'
elif ((from_date <= MIN_DATE) and (to_date < MAX_DATE)):
time_interval = f'0_{to_date}_'
return time_interval
def _generate(cls, output_dir: Path, test_name: str, config: str, method: str, input_path: Path=INPUT_PATH, from_date: date=MIN_DATE, to_date: date=MAX_DATE, generation_language: Optional[str]=None, country: str='us') -> None:
config = (test_name if (config is None) else config)
time_interval: str = cls.__get_time_interval(from_date, to_date)
arguments: List[str] = [f'rp2_{country}', '-o', str(output_dir), '-p', f"{test_name}_{(f'{country}_' if (country != 'us') else '')}{(f'{generation_language}_' if generation_language else '')}{time_interval}"]
if (method != 'mixed'):
arguments.extend(['-m', method])
if generation_language:
arguments.extend(['-g', generation_language])
if from_date:
arguments.extend(['-f', str(from_date)])
if to_date:
arguments.extend(['-t', str(to_date)])
arguments.extend([str((CONFIG_PATH / Path(f'{config}.ini'))), str((input_path / Path(f'{test_name}.ods')))])
run(arguments, check=True)
def _compare(self, output_dir: Path, test_name: str, method: str, output_plugin: OutputPlugins, from_date: date=MIN_DATE, to_date: date=MAX_DATE, generation_language: Optional[str]=None, country: str='us') -> None:
time_interval: str = self.__get_time_interval(from_date, to_date)
diff: str
output_file_name: Path = Path(f"{test_name}_{(f'{country}_' if (country != 'us') else '')}{(f'{generation_language}_' if generation_language else '')}{time_interval}{method}_{output_plugin.value}.ods")
full_output_file_name: Path = (output_dir / output_file_name)
full_golden_file_name: Path = (GOLDEN_PATH / output_file_name)
diff = ods_diff(full_golden_file_name, full_output_file_name, generate_ascii_representation=True)
self.assertFalse(diff, msg=diff) |
def test_generate_envoy_image_name_from_tag():
image_name = image_builder.generate_envoy_image_name_from_tag('definitely_not_a_tag')
assert (image_name == 'envoyproxy/envoy-dev:definitely_not_a_tag')
image_name = image_builder.generate_envoy_image_name_from_tag('v1.1.1')
assert (image_name == 'envoyproxy/envoy:v1.1.1') |
class VersionUpdaterTester(unittest.TestCase):
def show_dialog(self, dialog):
dialog.show()
self.app.exec_()
self.app.connect(self.app, QtCore.SIGNAL('lastWindowClosed()'), self.app, QtCore.SLOT('quit()'))
def setUp(self):
db.setup()
db.init()
self.temp_repo_path = tempfile.mkdtemp()
self.user1 = User(name='User 1', login='user1', email='', password='12345')
DBSession.add(self.user1)
DBSession.commit()
self.repo1 = Repository(name='Test Project Repository', linux_path=self.temp_repo_path, windows_path=self.temp_repo_path, osx_path=self.temp_repo_path)
self.status_new = Status.query.filter_by(code='NEW').first()
self.status_wip = Status.query.filter_by(code='WIP').first()
self.status_comp = Status.query.filter_by(code='CMPL').first()
self.task_template = FilenameTemplate(name='Task Template', target_entity_type='Task', path='{{project.code}}/{%- for parent_task in parent_tasks -%}{{parent_task.nice_name}}/{%- endfor -%}', filename='{{version.nice_name}}_v{{"%03d"|format(version.version_number)}}')
self.asset_template = FilenameTemplate(name='Asset Template', target_entity_type='Asset', path='{{project.code}}/{%- for parent_task in parent_tasks -%}{{parent_task.nice_name}}/{%- endfor -%}', filename='{{version.nice_name}}_v{{"%03d"|format(version.version_number)}}')
self.shot_template = FilenameTemplate(name='Shot Template', target_entity_type='Shot', path='{{project.code}}/{%- for parent_task in parent_tasks -%}{{parent_task.nice_name}}/{%- endfor -%}', filename='{{version.nice_name}}_v{{"%03d"|format(version.version_number)}}')
self.sequence_template = FilenameTemplate(name='Sequence Template', target_entity_type='Sequence', path='{{project.code}}/{%- for parent_task in parent_tasks -%}{{parent_task.nice_name}}/{%- endfor -%}', filename='{{version.nice_name}}_v{{"%03d"|format(version.version_number)}}')
self.structure = Structure(name='Project Struture', templates=[self.task_template, self.asset_template, self.shot_template, self.sequence_template])
self.project_status_list = StatusList.query.filter_by(target_entity_type='Project').first()
self.image_format = ImageFormat(name='HD 1080', width=1920, height=1080, pixel_aspect=1.0)
self.project = Project(name='Test Project', code='TP', repository=self.repo1, status_list=self.project_status_list, structure=self.structure, image_format=self.image_format)
self.task_status_list = StatusList.query.filter_by(target_entity_type='Task').first()
self.asset_status_list = StatusList.query.filter_by(target_entity_type='Asset').first()
self.shot_status_list = StatusList.query.filter_by(target_entity_type='Shot').first()
self.sequence_status_list = StatusList.query.filter_by(target_entity_type='Sequence').first()
self.character_type = Type(name='Character', code='CHAR', target_entity_type='Asset')
self.task1 = Task(name='Test Task 1', project=self.project)
self.task2 = Task(name='Test Task 2', project=self.project)
self.task3 = Task(name='Test Task 3', project=self.project)
self.task4 = Task(name='Test Task 4', parent=self.task1)
self.task5 = Task(name='Test Task 5', parent=self.task1)
self.task6 = Task(name='Test Task 6', parent=self.task1)
self.asset1 = Asset(name='Asset 1', code='asset1', type=self.character_type, project=self.project)
self.asset2 = Asset(name='Asset 2', code='asset2', type=self.character_type, parent=self.task4)
self.sequence1 = Sequence(name='Sequence1', code='SEQ1', project=self.project)
self.sequence2 = Sequence(name='Sequence2', code='SEQ2', parent=self.task2)
self.shot1 = Shot(name='SH001', code='SH001', project=self.project)
self.shot2 = Shot(name='SH002', code='SH002', parent=self.sequence1)
self.shot3 = Shot(name='SH003', code='SH003', parent=self.sequence2)
DBSession.add_all([self.repo1, self.status_new, self.status_wip, self.status_comp, self.project_status_list, self.project, self.task_status_list, self.asset_status_list, self.shot_status_list, self.sequence_status_list, self.task1, self.task2, self.task3, self.task4, self.task5, self.task6, self.asset1, self.asset2, self.shot1, self.shot2, self.shot3, self.sequence1, self.sequence2, self.task_template, self.asset_template, self.shot_template, self.sequence_template])
DBSession.commit()
def create_version(task, take_name):
v = Version(task=task, take_name=take_name)
v.update_paths()
DBSession.add(v)
DBSession.commit()
return v
self.version1 = create_version(self.asset2, 'Main')
self.version2 = create_version(self.asset2, 'Main')
self.version3 = create_version(self.asset2, 'Main')
self.version3.description = 'Test Description'
self.version4 = create_version(self.asset2, 'Take1')
self.version5 = create_version(self.asset2, 'Take1')
self.version6 = create_version(self.asset2, 'Take1')
self.version7 = create_version(self.task5, 'Main')
self.version8 = create_version(self.task5, 'Main')
self.version9 = create_version(self.task5, 'Main')
self.version10 = create_version(self.task5, 'Take1')
self.version11 = create_version(self.task5, 'Take1')
self.version12 = create_version(self.task5, 'Take1')
self.version13 = create_version(self.task6, 'Main')
self.version14 = create_version(self.task6, 'Main')
self.version15 = create_version(self.task6, 'Main')
self.version16 = create_version(self.task6, 'Take1')
self.version17 = create_version(self.task6, 'Take1')
self.version18 = create_version(self.task6, 'Take1')
self.version19 = create_version(self.shot3, 'Main')
self.version20 = create_version(self.shot3, 'Main')
self.version21 = create_version(self.shot3, 'Main')
self.version22 = create_version(self.shot3, 'Take1')
self.version23 = create_version(self.shot3, 'Take1')
self.version24 = create_version(self.shot3, 'Take1')
self.version25 = create_version(self.task3, 'Main')
self.version26 = create_version(self.task3, 'Main')
self.version27 = create_version(self.task3, 'Main')
self.version28 = create_version(self.task3, 'Take1')
self.version29 = create_version(self.task3, 'Take1')
self.version30 = create_version(self.task3, 'Take1')
self.version31 = create_version(self.asset1, 'Main')
self.version32 = create_version(self.asset1, 'Main')
self.version33 = create_version(self.asset1, 'Main')
self.version34 = create_version(self.asset1, 'Take1')
self.version35 = create_version(self.asset1, 'Take1')
self.version36 = create_version(self.asset1, 'Take1')
self.version37 = create_version(self.shot2, 'Main')
self.version38 = create_version(self.shot2, 'Main')
self.version39 = create_version(self.shot2, 'Main')
self.version40 = create_version(self.shot2, 'Take1')
self.version41 = create_version(self.shot2, 'Take1')
self.version42 = create_version(self.shot2, 'Take1')
self.version43 = create_version(self.shot1, 'Main')
self.version44 = create_version(self.shot1, 'Main')
self.version45 = create_version(self.shot1, 'Main')
self.version46 = create_version(self.shot1, 'Take1')
self.version47 = create_version(self.shot1, 'Take1')
self.version48 = create_version(self.shot1, 'Take1')
self.version2.is_published = True
self.version3.is_published = True
self.version5.inputs.append(self.version2)
self.version5.is_published = True
self.version12.inputs.append(self.version5)
self.version12.is_published = True
self.version45.is_published = True
self.version48.is_published = True
self.version45.inputs.append(self.version48)
self.version15.inputs.append(self.version12)
self.version15.inputs.append(self.version45)
self.reference_resolution = {'root': [self.version12, self.version45], 'leave': [self.version48, self.version45], 'update': [self.version2], 'create': [self.version5, self.version12]}
self.remove_these_files_buffer = []
self.test_environment = TestEnvironment(name='Test Environment')
self.test_environment._version = self.version15
if (not QtGui.QApplication.instance()):
logger.debug('creating a new QApplication')
self.app = QtGui.QApplication(sys.argv)
else:
logger.debug(('using the present QApplication: %s' % QtGui.qApp))
self.app = QtGui.QApplication.instance()
self.dialog = version_updater.MainDialog(environment=self.test_environment, reference_resolution=self.reference_resolution)
def tearDown(self):
DBSession.remove()
shutil.rmtree(self.temp_repo_path, ignore_errors=True)
for f in self.remove_these_files_buffer:
if os.path.isfile(f):
os.remove(f)
elif os.path.isdir(f):
shutil.rmtree(f, True)
def test_test_setup(self):
visited_versions = []
for v in self.version15.walk_hierarchy():
visited_versions.append(v)
expected_visited_versions = [self.version15, self.version12, self.version5, self.version2, self.version45, self.version48]
self.assertEqual(expected_visited_versions, visited_versions)
def test_versions_treeView_displays_the_root_versions_correctly(self):
version_tree_model = self.dialog.versions_treeView.model()
row_count = version_tree_model.rowCount()
self.assertEqual(2, row_count)
index = version_tree_model.index(0, 0)
version12_item = version_tree_model.itemFromIndex(index)
self.assertEqual(version12_item.version, self.version12)
index = version_tree_model.index(1, 0)
version45_item = version_tree_model.itemFromIndex(index)
self.assertEqual(version45_item.version, self.version45)
def test_versions_treeView_displays_the_version_hierarchy_correctly(self):
version_tree_model = self.dialog.versions_treeView.model()
index = version_tree_model.index(0, 0)
version12_item = version_tree_model.itemFromIndex(index)
index = version_tree_model.index(1, 0)
version45_item = version_tree_model.itemFromIndex(index)
self.dialog.versions_treeView.expand(version12_item.index())
version5_item = version12_item.child(0, 0)
self.assertEqual(version5_item.version, self.version5)
self.dialog.versions_treeView.expand(version5_item.index())
version2_item = version5_item.child(0, 0)
self.assertEqual(version2_item.version, self.version2)
self.dialog.versions_treeView.expand(version45_item.index())
version48_item = version45_item.child(0, 0)
self.assertEqual(version48_item.version, self.version48)
def test_versions_treeView_displays_the_version_hierarchy_colors_correctly(self):
version_tree_model = self.dialog.versions_treeView.model()
index = version_tree_model.index(0, 0)
version12_item = version_tree_model.itemFromIndex(index)
index = version_tree_model.index(1, 0)
version45_item = version_tree_model.itemFromIndex(index)
self.dialog.versions_treeView.expand(version12_item.index())
version5_item = version12_item.child(0, 0)
self.dialog.versions_treeView.expand(version5_item.index())
version2_item = version5_item.child(0, 0)
self.dialog.versions_treeView.expand(version45_item.index())
version48_item = version45_item.child(0, 0)
fg = version12_item.foreground()
color = fg.color()
self.assertEqual(color, QtGui.QColor(192, 0, 0))
fg = version5_item.foreground()
color = fg.color()
self.assertEqual(color, QtGui.QColor(192, 0, 0))
fg = version2_item.foreground()
color = fg.color()
self.assertEqual(color, QtGui.QColor(192, 0, 0))
fg = version45_item.foreground()
color = fg.color()
self.assertEqual(color, QtGui.QColor(0, 192, 0))
fg = version48_item.foreground()
color = fg.color()
self.assertEqual(color, QtGui.QColor(0, 192, 0))
def test_versions_treeView_displays_the_version_hierarchy_labels_correctly(self):
version_tree_model = self.dialog.versions_treeView.model()
index = version_tree_model.index(0, 0)
version12_item = version_tree_model.itemFromIndex(index)
index = version_tree_model.index(1, 0)
version45_item = version_tree_model.itemFromIndex(index)
self.dialog.versions_treeView.expand(version12_item.index())
version5_item = version12_item.child(0, 0)
self.dialog.versions_treeView.expand(version5_item.index())
version2_item = version5_item.child(0, 0)
self.dialog.versions_treeView.expand(version45_item.index())
version48_item = version45_item.child(0, 0)
nice_name_item = version_tree_model.itemFromIndex(version_tree_model.index(0, 2))
take_column_item = version_tree_model.itemFromIndex(version_tree_model.index(0, 3))
current_version_column_item = version_tree_model.itemFromIndex(version_tree_model.index(0, 4))
latest_version_column_item = version_tree_model.itemFromIndex(version_tree_model.index(0, 5))
action_column_item = version_tree_model.itemFromIndex(version_tree_model.index(0, 6))
description_column_item = version_tree_model.itemFromIndex(version_tree_model.index(0, 7))
self.assertEqual(nice_name_item.text(), 'Test_Task_1_Test_Task_5_Take1_v003')
self.assertEqual(take_column_item.text(), 'Take1')
self.assertEqual(current_version_column_item.text(), '3')
self.assertEqual(latest_version_column_item.text(), '3')
self.assertEqual(action_column_item.text(), 'create')
self.assertEqual(description_column_item.text(), '')
nice_name_item = version12_item.child(0, 2)
take_column_item = version12_item.child(0, 3)
current_version_column_item = version12_item.child(0, 4)
latest_version_column_item = version12_item.child(0, 5)
action_column_item = version12_item.child(0, 6)
description_column_item = version12_item.child(0, 7)
self.assertEqual(nice_name_item.text(), 'Asset_2_Take1_v002')
self.assertEqual(take_column_item.text(), 'Take1')
self.assertEqual(current_version_column_item.text(), '2')
self.assertEqual(latest_version_column_item.text(), '2')
self.assertEqual(action_column_item.text(), 'create')
self.assertEqual(description_column_item.text(), '')
nice_name_item = version5_item.child(0, 2)
take_column_item = version5_item.child(0, 3)
current_version_column_item = version5_item.child(0, 4)
latest_version_column_item = version5_item.child(0, 5)
action_column_item = version5_item.child(0, 6)
description_column_item = version5_item.child(0, 7)
self.assertEqual(nice_name_item.text(), 'Asset_2_Main_v002')
self.assertEqual(take_column_item.text(), 'Main')
self.assertEqual(current_version_column_item.text(), '2')
self.assertEqual(latest_version_column_item.text(), '3')
self.assertEqual(action_column_item.text(), 'update')
self.assertEqual(description_column_item.text(), 'Test Description')
nice_name_item = version_tree_model.itemFromIndex(version_tree_model.index(1, 2))
take_column_item = version_tree_model.itemFromIndex(version_tree_model.index(1, 3))
current_version_column_item = version_tree_model.itemFromIndex(version_tree_model.index(1, 4))
latest_version_column_item = version_tree_model.itemFromIndex(version_tree_model.index(1, 5))
action_column_item = version_tree_model.itemFromIndex(version_tree_model.index(1, 6))
description_column_item = version_tree_model.itemFromIndex(version_tree_model.index(1, 7))
self.assertEqual(nice_name_item.text(), 'SH001_Main_v003')
self.assertEqual(take_column_item.text(), 'Main')
self.assertEqual(current_version_column_item.text(), '3')
self.assertEqual(latest_version_column_item.text(), '3')
self.assertEqual(action_column_item.text(), '')
self.assertEqual(description_column_item.text(), '')
nice_name_item = version45_item.child(0, 2)
take_column_item = version45_item.child(0, 3)
current_version_column_item = version45_item.child(0, 4)
latest_version_column_item = version45_item.child(0, 5)
action_column_item = version45_item.child(0, 6)
description_column_item = version45_item.child(0, 7)
self.assertEqual(nice_name_item.text(), 'SH001_Take1_v003')
self.assertEqual(take_column_item.text(), 'Take1')
self.assertEqual(current_version_column_item.text(), '3')
self.assertEqual(latest_version_column_item.text(), '3')
self.assertEqual(action_column_item.text(), '')
self.assertEqual(description_column_item.text(), '')
def test_not_all_of_the_root_version_items_check_state_is_True_by_default(self):
version_tree_model = self.dialog.versions_treeView.model()
index = version_tree_model.index(0, 0)
version12_item = version_tree_model.itemFromIndex(index)
index = version_tree_model.index(1, 0)
version45_item = version_tree_model.itemFromIndex(index)
self.assertEqual(QtCore.Qt.CheckState.Checked, version12_item.checkState())
self.assertEqual(QtCore.Qt.CheckState.Unchecked, version45_item.checkState())
def test_only_update_items_have_check_boxes(self):
version_tree_model = self.dialog.versions_treeView.model()
index = version_tree_model.index(0, 0)
version12_item = version_tree_model.itemFromIndex(index)
index = version_tree_model.index(1, 0)
version45_item = version_tree_model.itemFromIndex(index)
self.assertTrue(version12_item.isCheckable())
self.assertFalse(version45_item.isCheckable())
def test_only_root_items_have_check_boxes(self):
version_tree_model = self.dialog.versions_treeView.model()
index = version_tree_model.index(0, 0)
version12_item = version_tree_model.itemFromIndex(index)
self.assertTrue(version12_item.isCheckable())
index = version_tree_model.index(1, 0)
version45_item = version_tree_model.itemFromIndex(index)
self.assertFalse(version45_item.isCheckable())
self.dialog.versions_treeView.expand(version12_item.index())
version5_item = version12_item.child(0, 0)
self.assertFalse(version5_item.isCheckable())
self.dialog.versions_treeView.expand(version5_item.index())
version2_item = version5_item.child(0, 0)
self.assertFalse(version2_item.isCheckable())
self.dialog.versions_treeView.expand(version45_item.index())
version48_item = version45_item.child(0, 0)
self.assertFalse(version48_item.isCheckable())
def test_there_is_an_open_button_on_deeper_update_items(self):
version_tree_model = self.dialog.versions_treeView.model()
index = version_tree_model.index(0, 0)
version12_item = version_tree_model.itemFromIndex(index)
self.assertTrue(version12_item.isCheckable())
self.dialog.versions_treeView.expand(version12_item.index())
version5_item = version12_item.child(0, 0)
self.assertFalse(version5_item.isCheckable())
self.dialog.versions_treeView.expand(version5_item.index())
version2_item = version5_item.child(0, 0)
self.assertFalse(version2_item.isCheckable())
index = version_tree_model.index(1, 0)
version45_item = version_tree_model.itemFromIndex(index)
self.assertFalse(version45_item.isCheckable())
self.dialog.versions_treeView.expand(version45_item.index())
version48_item = version45_item.child(0, 0)
self.assertFalse(version48_item.isCheckable())
def test_generate_reference_resolution_generate_a_new_reference_resolution_correctly(self):
reference_resolution = self.dialog.generate_reference_resolution()
self.assertEqual({'root': [], 'leave': [], 'update': [self.version12], 'create': []}, reference_resolution)
version_tree_model = self.dialog.versions_treeView.model()
index = version_tree_model.index(0, 0)
version12_item = version_tree_model.itemFromIndex(index)
version12_item.setCheckState(QtCore.Qt.CheckState.Unchecked)
reference_resolution = self.dialog.generate_reference_resolution()
self.assertEqual({'root': [], 'leave': [], 'update': [], 'create': []}, reference_resolution)
def test_update_pushButton_will_call_environment_update_versions_method(self):
self.assertRaises(KeyError, self.test_environment.test_data.__getitem__, 'update_versions')
QTest.mouseClick(self.dialog.update_pushButton, Qt.LeftButton)
self.assertEqual(1, self.test_environment.test_data['update_versions']['call_count'])
def test_select_none_pushButton_will_deselect_all_check_boxes_when_clicked(self):
version_tree_model = self.dialog.versions_treeView.model()
index = version_tree_model.index(0, 0)
version_item1 = version_tree_model.itemFromIndex(index)
version_item1.setCheckState(QtCore.Qt.Checked)
index = version_tree_model.index(1, 0)
version_item2 = version_tree_model.itemFromIndex(index)
version_item2.setCheckState(QtCore.Qt.Checked)
QTest.mouseClick(self.dialog.selectNone_pushButton, Qt.LeftButton)
self.assertEqual(version_item1.checkState(), QtCore.Qt.Unchecked)
self.assertEqual(version_item2.checkState(), QtCore.Qt.Unchecked)
def test_select_all_pushButton_will_select_all_check_boxes_when_clicked(self):
version_tree_model = self.dialog.versions_treeView.model()
index = version_tree_model.index(0, 0)
version_item1 = version_tree_model.itemFromIndex(index)
version_item1.setCheckState(QtCore.Qt.Unchecked)
index = version_tree_model.index(1, 0)
version_item2 = version_tree_model.itemFromIndex(index)
version_item2.setCheckState(QtCore.Qt.CheckState.Unchecked)
QTest.mouseClick(self.dialog.selectAll_pushButton, Qt.LeftButton)
self.assertEqual(version_item1.checkState(), QtCore.Qt.Checked)
self.assertEqual(version_item2.checkState(), QtCore.Qt.Checked)
def test_init_will_fill_reference_resolution_if_it_is_empty_and_there_is_an_environment(self):
self.version1.inputs.append(self.version2)
self.version1.inputs.append(self.version3)
DBSession.commit()
self.test_environment._version = self.version1
new_dialog = version_updater.MainDialog(environment=self.test_environment)
self.assertEqual(new_dialog.reference_resolution, self.test_environment.check_referenced_versions())
def test_init_will_raise_a_RuntimeError_if_the_current_version_is_None(self):
self.test_environment._version = None
def patched(*args, **kwargs):
pass
original = QtGui.QMessageBox.critical
QtGui.QMessageBox.critical = patched
self.assertRaises(RuntimeError, version_updater.MainDialog, environment=self.test_environment)
QtGui.QMessageBox.critical = original |
def is_port_in_use(port):
import socket, errno
result = False
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
s.bind(('localhost', port))
except socket.error as e:
if (e.errno == errno.EADDRINUSE):
result = True
else:
print(e)
s.close()
return result |
class NodeCreator(NodeCreatorBase):
def create(self):
node_type = self.specs.get('node_type')
secondary_type = self.specs.get('secondary_type')
if (secondary_type == 'shader'):
(shader, shading_engine) = pm.createSurfaceShader(node_type)
return shader
elif (secondary_type == 'utility'):
shader = pm.shadingNode(node_type, asUtility=1)
return shader
elif (secondary_type == 'texture'):
shader = pm.shadingNode(node_type, asTexture=1)
return shader
elif (secondary_type == 'light'):
light_transform = pm.shadingNode(node_type, asLight=1)
return light_transform.getShape() |
def test_raises_unknown_ancestor_error(paragon_chain):
head = paragon_chain.get_canonical_head()
next_header = make_next_header(paragon_chain, head, ALICE_PK, RON, NONCE_AUTH)
clique = get_clique(paragon_chain, head)
with pytest.raises(ValidationError, match='Unknown ancestor'):
clique.get_snapshot(next_header) |
def activate_nightmode(shortcuts: List[Tuple], editor: Editor):
editor.web.eval("\n if (document.body.classList.contains('nightMode') || document.body.classList.contains('night-mode')) {\n var props = [];\n for (var i = 0; i < document.styleSheets.length; i++){\n try {\n for (var j = 0; j < document.styleSheets[i].cssRules.length; j++){\n try{\n for (var k = 0; k < document.styleSheets[i].cssRules[j].style.length; k++){\n let name = document.styleSheets[i].cssRules[j].style[k];\n if (name.startsWith('--c-') && !name.endsWith('-night') && props.indexOf(name) == -1) {\n props.push(name);\n }\n }\n } catch (error) {}\n }\n } catch (error) {}\n }\n for (const v of props) {\n document.documentElement.style.setProperty(v, getComputedStyle(document.documentElement).getPropertyValue(v + '-night'));\n }\n }\n ") |
def analyze_read_segments(primary, supplementaries, bam, options):
read_name = primary.query_name
alignments = ([primary] + supplementaries)
alignment_list = []
for alignment in alignments:
if alignment.is_reverse:
q_start = (alignment.infer_read_length() - alignment.query_alignment_end)
q_end = (alignment.infer_read_length() - alignment.query_alignment_start)
else:
q_start = alignment.query_alignment_start
q_end = alignment.query_alignment_end
new_alignment_dict = {'q_start': q_start, 'q_end': q_end, 'ref_id': alignment.reference_id, 'ref_start': alignment.reference_start, 'ref_end': alignment.reference_end, 'is_reverse': alignment.is_reverse}
alignment_list.append(new_alignment_dict)
sorted_alignment_list = sorted(alignment_list, key=(lambda aln: (aln['q_start'], aln['q_end'])))
sv_candidates = []
tandem_duplications = []
translocations = []
inversions = []
for index in range((len(sorted_alignment_list) - 1)):
alignment_current = sorted_alignment_list[index]
alignment_next = sorted_alignment_list[(index + 1)]
distance_on_read = (alignment_next['q_start'] - alignment_current['q_end'])
if (alignment_current['ref_id'] == alignment_next['ref_id']):
ref_chr = bam.get_reference_name(alignment_current['ref_id'])
if (alignment_current['is_reverse'] == alignment_next['is_reverse']):
if alignment_current['is_reverse']:
distance_on_reference = (alignment_current['ref_start'] - alignment_next['ref_end'])
else:
distance_on_reference = (alignment_next['ref_start'] - alignment_current['ref_end'])
if (distance_on_read >= (- options.query_overlap_tolerance)):
if (distance_on_reference >= (- options.reference_overlap_tolerance)):
deviation = (distance_on_read - distance_on_reference)
if (deviation >= options.min_sv_size):
if (distance_on_reference <= options.reference_gap_tolerance):
if (not alignment_current['is_reverse']):
insertion_seq = primary.query_sequence[alignment_current['q_end']:(alignment_current['q_end'] + deviation)]
sv_candidates.append(CandidateInsertion(ref_chr, alignment_current['ref_end'], (alignment_current['ref_end'] + deviation), [read_name], insertion_seq, bam))
else:
insertion_seq = primary.query_sequence[(primary.infer_read_length() - alignment_next['q_start']):((primary.infer_read_length() - alignment_next['q_start']) + deviation)]
sv_candidates.append(CandidateInsertion(ref_chr, alignment_current['ref_start'], (alignment_current['ref_start'] + deviation), [read_name], insertion_seq, bam))
elif ((- options.max_sv_size) <= deviation <= (- options.min_sv_size)):
if (distance_on_read <= options.query_gap_tolerance):
if (not alignment_current['is_reverse']):
sv_candidates.append(CandidateDeletion(ref_chr, alignment_current['ref_end'], (alignment_current['ref_end'] - deviation), [read_name], bam))
else:
sv_candidates.append(CandidateDeletion(ref_chr, alignment_next['ref_end'], (alignment_next['ref_end'] - deviation), [read_name], bam))
elif (deviation < (- options.max_sv_size)):
if (distance_on_read <= options.query_gap_tolerance):
if (not alignment_current['is_reverse']):
sv_candidates.append(CandidateBreakend(ref_chr, (alignment_current['ref_end'] - 1), 'fwd', ref_chr, alignment_next['ref_start'], 'fwd', [read_name], bam))
translocations.append(('fwd', 'fwd', ref_chr, (alignment_current['ref_end'] - 1), ref_chr, alignment_next['ref_start']))
else:
sv_candidates.append(CandidateBreakend(ref_chr, alignment_current['ref_start'], 'rev', ref_chr, (alignment_next['ref_end'] - 1), 'rev', [read_name], bam))
translocations.append(('rev', 'rev', ref_chr, alignment_current['ref_start'], ref_chr, (alignment_next['ref_end'] - 1)))
elif (distance_on_read <= options.query_gap_tolerance):
deviation = (distance_on_read - distance_on_reference)
if (deviation >= options.min_sv_size):
if (not alignment_current['is_reverse']):
if (alignment_next['ref_end'] > alignment_current['ref_start']):
tandem_duplications.append((ref_chr, alignment_next['ref_start'], (alignment_next['ref_start'] + deviation), True, True))
elif (distance_on_reference >= (- options.max_sv_size)):
tandem_duplications.append((ref_chr, alignment_next['ref_start'], (alignment_next['ref_start'] + deviation), False, True))
else:
sv_candidates.append(CandidateBreakend(ref_chr, (alignment_current['ref_end'] - 1), 'fwd', ref_chr, alignment_next['ref_start'], 'fwd', [read_name], bam))
translocations.append(('fwd', 'fwd', ref_chr, (alignment_current['ref_end'] - 1), ref_chr, alignment_next['ref_start']))
elif (alignment_next['ref_start'] < alignment_current['ref_end']):
tandem_duplications.append((ref_chr, alignment_current['ref_start'], (alignment_current['ref_start'] + deviation), True, False))
elif (distance_on_reference >= (- options.max_sv_size)):
tandem_duplications.append((ref_chr, alignment_current['ref_start'], (alignment_current['ref_start'] + deviation), False, False))
else:
sv_candidates.append(CandidateBreakend(ref_chr, alignment_current['ref_start'], 'rev', ref_chr, (alignment_next['ref_end'] - 1), 'rev', [read_name], bam))
translocations.append(('rev', 'rev', ref_chr, alignment_current['ref_start'], ref_chr, (alignment_next['ref_end'] - 1)))
else:
if ((not alignment_current['is_reverse']) and alignment_next['is_reverse']):
distance_on_reference = (alignment_next['ref_end'] - alignment_current['ref_end'])
deviation = (distance_on_read - distance_on_reference)
if ((- options.query_overlap_tolerance) <= distance_on_read <= options.query_gap_tolerance):
if ((alignment_next['ref_start'] - alignment_current['ref_end']) >= (- options.reference_overlap_tolerance)):
if (options.min_sv_size <= (- deviation) <= options.max_sv_size):
inversions.append((ref_chr, alignment_current['ref_end'], (alignment_current['ref_end'] - deviation), 'left_fwd'))
else:
sv_candidates.append(CandidateBreakend(ref_chr, (alignment_current['ref_end'] - 1), 'fwd', ref_chr, (alignment_next['ref_end'] - 1), 'rev', [read_name], bam))
translocations.append(('fwd', 'rev', ref_chr, (alignment_current['ref_end'] - 1), ref_chr, (alignment_next['ref_end'] - 1)))
elif ((alignment_current['ref_start'] - alignment_next['ref_end']) >= (- options.reference_overlap_tolerance)):
if (options.min_sv_size <= deviation <= options.max_sv_size):
inversions.append((ref_chr, alignment_next['ref_end'], (alignment_next['ref_end'] + deviation), 'left_rev'))
else:
sv_candidates.append(CandidateBreakend(ref_chr, (alignment_current['ref_end'] - 1), 'fwd', ref_chr, (alignment_next['ref_end'] - 1), 'rev', [read_name], bam))
translocations.append(('fwd', 'rev', ref_chr, (alignment_current['ref_end'] - 1), ref_chr, (alignment_next['ref_end'] - 1)))
else:
pass
if (alignment_current['is_reverse'] and (not alignment_next['is_reverse'])):
distance_on_reference = (alignment_next['ref_start'] - alignment_current['ref_start'])
deviation = (distance_on_read - distance_on_reference)
if ((- options.query_overlap_tolerance) <= distance_on_read <= options.query_gap_tolerance):
if ((alignment_next['ref_start'] - alignment_current['ref_end']) >= (- options.reference_overlap_tolerance)):
if (options.min_sv_size <= (- deviation) <= options.max_sv_size):
inversions.append((ref_chr, alignment_current['ref_start'], (alignment_current['ref_start'] - deviation), 'right_fwd'))
else:
sv_candidates.append(CandidateBreakend(ref_chr, alignment_current['ref_start'], 'rev', ref_chr, alignment_next['ref_start'], 'fwd', [read_name], bam))
translocations.append(('rev', 'fwd', ref_chr, alignment_current['ref_start'], ref_chr, alignment_next['ref_start']))
elif ((alignment_current['ref_start'] - alignment_next['ref_end']) >= (- options.reference_overlap_tolerance)):
if (options.min_sv_size <= deviation <= options.max_sv_size):
inversions.append((ref_chr, alignment_next['ref_start'], (alignment_next['ref_start'] + deviation), 'right_rev'))
else:
sv_candidates.append(CandidateBreakend(ref_chr, alignment_current['ref_start'], 'rev', ref_chr, alignment_next['ref_start'], 'fwd', [read_name], bam))
translocations.append(('rev', 'fwd', ref_chr, alignment_current['ref_start'], ref_chr, alignment_next['ref_start']))
else:
pass
else:
ref_chr_current = bam.getrname(alignment_current['ref_id'])
ref_chr_next = bam.getrname(alignment_next['ref_id'])
if (alignment_current['is_reverse'] == alignment_next['is_reverse']):
if (distance_on_read >= (- options.query_overlap_tolerance)):
if (distance_on_read <= options.query_gap_tolerance):
if (not alignment_current['is_reverse']):
sv_candidates.append(CandidateBreakend(ref_chr_current, (alignment_current['ref_end'] - 1), 'fwd', ref_chr_next, alignment_next['ref_start'], 'fwd', [read_name], bam))
translocations.append(('fwd', 'fwd', ref_chr_current, (alignment_current['ref_end'] - 1), ref_chr_next, alignment_next['ref_start']))
else:
sv_candidates.append(CandidateBreakend(ref_chr_current, alignment_current['ref_start'], 'rev', ref_chr_next, (alignment_next['ref_end'] - 1), 'rev', [read_name], bam))
translocations.append(('rev', 'rev', ref_chr_current, alignment_current['ref_start'], ref_chr_next, (alignment_next['ref_end'] - 1)))
else:
pass
elif (distance_on_read >= (- options.query_overlap_tolerance)):
if (distance_on_read <= options.query_gap_tolerance):
if (not alignment_current['is_reverse']):
sv_candidates.append(CandidateBreakend(ref_chr_current, (alignment_current['ref_end'] - 1), 'fwd', ref_chr_next, (alignment_next['ref_end'] - 1), 'rev', [read_name], bam))
translocations.append(('fwd', 'rev', ref_chr_current, (alignment_current['ref_end'] - 1), ref_chr_next, (alignment_next['ref_end'] - 1)))
else:
sv_candidates.append(CandidateBreakend(ref_chr_current, alignment_current['ref_start'], 'rev', ref_chr_next, alignment_next['ref_start'], 'fwd', [read_name], bam))
translocations.append(('rev', 'fwd', ref_chr_current, alignment_current['ref_start'], ref_chr_next, alignment_next['ref_start']))
else:
pass
current_chromosome = None
current_starts = []
current_ends = []
current_copy_number = 0
current_fully_covered = []
for tandem_duplication in tandem_duplications:
if (current_chromosome == None):
current_chromosome = tandem_duplication[0]
current_starts.append(tandem_duplication[1])
current_ends.append(tandem_duplication[2])
current_copy_number = 1
current_fully_covered.append(tandem_duplication[3])
current_direction = tandem_duplication[4]
elif (is_similar(current_chromosome, mean(current_starts), mean(current_ends), tandem_duplication[0], tandem_duplication[1], tandem_duplication[2]) and (current_direction == tandem_duplication[4])):
current_starts.append(tandem_duplication[1])
current_ends.append(tandem_duplication[2])
current_copy_number += 1
current_fully_covered.append(tandem_duplication[3])
else:
fully_covered = (True if sum(current_fully_covered) else False)
sv_candidates.append(CandidateDuplicationTandem(current_chromosome, int(mean(current_starts)), int(mean(current_ends)), current_copy_number, fully_covered, [read_name], bam))
current_chromosome = tandem_duplication[0]
current_starts = [tandem_duplication[1]]
current_ends = [tandem_duplication[2]]
current_copy_number = 1
current_fully_covered = [tandem_duplication[3]]
if (current_chromosome != None):
fully_covered = (True if sum(current_fully_covered) else False)
sv_candidates.append(CandidateDuplicationTandem(current_chromosome, int(mean(current_starts)), int(mean(current_ends)), current_copy_number, fully_covered, [read_name], bam))
for this_index in range(len(translocations)):
this_dir1 = translocations[this_index][0]
this_dir2 = translocations[this_index][1]
this_chr1 = translocations[this_index][2]
this_pos1 = translocations[this_index][3]
this_chr2 = translocations[this_index][4]
this_pos2 = translocations[this_index][5]
for (before_dir1, before_dir2, before_chr1, before_pos1, before_chr2, before_pos2) in translocations[:this_index]:
if ((before_dir1 == this_dir2) and (before_dir2 == this_dir1)):
if is_similar(before_chr1, before_pos1, 0, this_chr2, this_pos2, 0):
if (before_chr2 == this_chr1):
if (before_dir2 == before_dir1):
if (before_dir1 == 'fwd'):
length = ((this_pos1 + 1) - before_pos2)
if (options.min_sv_size <= length <= options.max_sv_size):
sv_candidates.append(CandidateDuplicationInterspersed(before_chr2, before_pos2, (this_pos1 + 1), before_chr1, int(mean([(before_pos1 + 1), this_pos2])), (int(mean([(before_pos1 + 1), this_pos2])) + length), [read_name], bam))
elif (before_dir1 == 'rev'):
length = ((before_pos2 + 1) - this_pos1)
if (options.min_sv_size <= length <= options.max_sv_size):
sv_candidates.append(CandidateDuplicationInterspersed(before_chr2, this_pos1, (before_pos2 + 1), before_chr1, int(mean([before_pos1, (this_pos2 + 1)])), (int(mean([before_pos1, (this_pos2 + 1)])) + length), [read_name], bam))
else:
pass
sorted_inversions = sorted(inversions, key=(lambda inversion: (inversion[0], inversion[1], inversion[2])))
active_inversions = []
for inversion in sorted_inversions:
(chrom, start, end, direction) = inversion
if (len(active_inversions) == 0):
active_inversions.append(inversion)
elif ((chrom == active_inversions[(- 1)][0]) and (start < max([i[2] for i in active_inversions]))):
active_inversions.append(inversion)
else:
sv_candidates.extend(process_overlapping_inversions(active_inversions, read_name, bam))
active_inversions = []
if (len(active_inversions) > 0):
sv_candidates.extend(process_overlapping_inversions(active_inversions, read_name, bam))
return sv_candidates |
def extractBrassboltsBlogspotCom(item):
(vol, chp, frag, postfix) = extractVolChapterFragmentPostfix(item['title'])
if ((not (chp or vol)) or ('preview' in item['title'].lower())):
return None
tagmap = [('PRC', 'PRC', 'translated'), ('Loiterous', 'Loiterous', 'oel')]
for (tagname, name, tl_type) in tagmap:
if (tagname in item['tags']):
return buildReleaseMessageWithType(item, name, vol, chp, frag=frag, postfix=postfix, tl_type=tl_type)
return False |
class SecondaryEclipseLightCurve():
def __init__(self, u_primary, u_secondary, surface_brightness_ratio, model=None):
self.primary = LimbDarkLightCurve(u_primary[0], u_primary[1], model=model)
self.secondary = LimbDarkLightCurve(u_secondary[0], u_secondary[1], model=model)
self.surface_brightness_ratio = as_tensor_variable(surface_brightness_ratio)
def get_light_curve(self, orbit=None, r=None, t=None, texp=None, oversample=7, order=0, use_in_transit=None, light_delay=False):
r = as_tensor_variable(r)
orbit2 = orbit._flip(r)
lc1 = self.primary.get_light_curve(orbit=orbit, r=r, t=t, texp=texp, oversample=oversample, order=order, use_in_transit=use_in_transit, light_delay=light_delay)
lc2 = self.secondary.get_light_curve(orbit=orbit2, r=orbit.r_star, t=t, texp=texp, oversample=oversample, order=order, use_in_transit=use_in_transit, light_delay=light_delay)
k = (r / orbit.r_star)
flux_ratio = (self.surface_brightness_ratio * (k ** 2))
return ((lc1 + (flux_ratio * lc2)) / (1 + flux_ratio)) |
class ModelHostedFetcher(ErsiliaBase):
def __init__(self, url, config_json=None):
ErsiliaBase.__init__(self, config_json=config_json, credentials_json=None)
self.logger.debug('Initialized with URL: {0}'.format(url))
self.url = url
def _is_available_known_url(self):
self.logger.debug('Checking if url {0} is reachable'.format(self.url))
try:
response = requests.get(self.url, timeout=5)
response.raise_for_status()
return True
except requests.exceptions.RequestException as err:
self.logger.debug('URL {0} is not reachable. Error: {1}'.format(self.url, err))
return False
def _is_available_unknown_url(self, model_id):
self.logger.debug('Trying to find an available URL where the model is hosted')
url_field = 'Host URL'
identifier_field = 'Identifier'
ai = AirtableInterface(config_json=self.config_json)
for record in ai.items_all():
fields = record['fields']
if (fields[identifier_field] == model_id):
if (url_field not in fields):
self.logger.debug('No hosted URL found for this model')
return False
url = fields[url_field]
if validators.url(url):
self.logger.debug('This model has an associated URL: {0}'.format(url))
return True
else:
self.logger.debug("This doesn't seem to be a valid URL: {0}".format(url))
self.logger.debug('Model was not found in AirTable')
return False
def is_available(self, model_id):
if (self.url is None):
return self._is_available_unknown_url(model_id=model_id)
else:
return self._is_available_known_url()
def _update_url(self, model_id):
if (self.url is None):
from_hosted_file = os.path.join(self._model_path(model_id), IS_FETCHED_FROM_HOSTED_FILE)
with open(from_hosted_file, 'r') as f:
data = json.load(f)
self.url = data['url']
def write_apis(self, model_id):
self.logger.debug('Writing APIs')
di = HostedService(model_id=model_id, config_json=self.config_json, url=self.url)
di.serve()
di.close()
def get_information(self, model_id):
self.logger.debug('Getting information for model identifier: {0}'.format(model_id))
headers = {'accept': '*/*', 'Content-Type': 'application/json'}
data = {}
response = requests.post((self.url + '/info'), headers=headers, data=json.dumps(data))
info = response.json()
info_file = os.path.join(EOS, 'dest', model_id, INFORMATION_FILE)
with open(info_file, 'w') as f:
json.dump(info, f, indent=4)
def get_metadata(self, model_id):
self.logger.debug('Getting api_schema for model identifier: {0}'.format(model_id))
info_file = os.path.join(EOS, 'dest', model_id, INFORMATION_FILE)
with open(info_file, 'r') as f:
info = json.load(f)
api_schema = info['api_schema']
api_schema_file = os.path.join(EOS, 'dest', model_id, API_SCHEMA_FILE)
with open(api_schema_file, 'w') as f:
json.dump(api_schema, f, indent=4)
def write_status(self, model_id):
status = {'done': True}
status_file = os.path.join(EOS, 'dest', model_id, STATUS_FILE)
with open(status_file, 'w') as f:
json.dump(status, f, indent=4)
def fetch(self, model_id):
self.logger.debug('Fetching from hosted, model identifier: {0}'.format(model_id))
mr = ModelRegisterer(model_id=model_id, config_json=self.config_json)
self.logger.debug('Registering model')
mr.register(is_from_hosted=True)
self.logger.debug('Writing APIs')
self._update_url(model_id)
self.write_apis(model_id)
self.get_information(model_id)
self.get_metadata(model_id)
self.write_status(model_id) |
def arg_range_analysis(proc, arg, fast=True):
assert arg.type.is_indexable()
if fast:
if isinstance(arg.type, LoopIR.Size):
return (1, None)
else:
return (None, None)
def lower_bound_check(value):
return Check_ExprBound(proc, [proc.body[0]], LoopIR.Read(name=arg.name, idx=[], type=T.size, srcinfo=proc.srcinfo), value, Check_ExprBound_Options.GEQ, exception=False)
def upper_bound_check(value):
return Check_ExprBound(proc, [proc.body[0]], LoopIR.Read(name=arg.name, idx=[], type=T.size, srcinfo=proc.srcinfo), value, Check_ExprBound_Options.LEQ, exception=False)
def binary_search_lower_bound(left, right):
result = None
while (left <= right):
mid = (left + ((right - left) // 2))
if lower_bound_check(mid):
result = mid
left = (mid + 1)
else:
right = (mid - 1)
return result
def binary_search_upper_bound(left, right):
result = None
while (left <= right):
mid = (left + ((right - left) // 2))
if upper_bound_check(mid):
result = mid
right = (mid - 1)
else:
left = (mid + 1)
return result
max_abs_search = (2 ** 15)
min_search = (1 if isinstance(arg.type, LoopIR.Size) else (- max_abs_search))
lower_bound = binary_search_lower_bound(min_search, max_abs_search)
upper_bound = binary_search_upper_bound(min_search, max_abs_search)
return (lower_bound, upper_bound) |
.usefixtures('use_tmpdir')
def test_forward_model_arglist_with_weird_characters():
with open('exec', 'w', encoding='utf-8') as f:
pass
os.chmod('exec', ((stat.S_IXUSR | stat.S_IXGRP) | stat.S_IXOTH))
with open('CONFIG', 'w', encoding='utf-8') as f:
f.write(dedent('\nSTDERR insert_nosim.stderr\nSTDOUT insert_nosim.stdout\nEXECUTABLE sed\nARGLIST -i s/^RUNSPEC.*/|RUNSPEC\\nNOSIM/ <ECLBASE>.DATA\nMIN_ARG 3\nMAX_ARG 3\nARG_TYPE 0 STRING\nARG_TYPE 0 STRING\nARG_TYPE 0 STRING\n '))
forward_model = ForwardModel.from_config_file('CONFIG')
assert (forward_model.environment == forward_model.default_env)
assert (forward_model.arglist == ['-i', 's/^RUNSPEC.*/|RUNSPEC\nNOSIM/', '<ECLBASE>.DATA']) |
class CircuitBreakingTCPTest(AmbassadorTest):
extra_ports = [6789, 6790]
target1: ServiceType
target2: ServiceType
def init(self):
self.target1 = HTTP(name='target1')
self.target2 = HTTP(name='target2')
def config(self) -> Generator[(Union[(str, Tuple[(Node, str)])], None, None)]:
(yield (self.target1, self.format('\n---\napiVersion: getambassador.io/v3alpha1\nkind: TCPMapping\nname: {self.name}-1\nport: 6789\nservice: {self.target1.path.fqdn}:80\n')))
(yield (self.target2, self.format('\n---\napiVersion: getambassador.io/v3alpha1\nkind: TCPMapping\nname: {self.name}-2\nport: 6790\nservice: {self.target2.path.fqdn}:80\ncircuit_breakers:\n- priority: default\n max_pending_requests: 1\n max_connections: 1\n')))
def queries(self):
for i in range(200):
(yield Query(self.url(self.name, port=6789), headers={'Kat-Req-Http-Requested-Backend-Delay': '1000'}, ignore_result=True, phase=1))
for i in range(200):
(yield Query(self.url(self.name, port=6790), headers={'Kat-Req-Http-Requested-Backend-Delay': '1000'}, ignore_result=True, phase=1))
def check(self):
failures = []
if (len(self.results) != 400):
failures.append(f'wanted 400 results, got {len(self.results)}')
else:
default_limit_result = self.results[0:200]
low_limit_results = self.results[200:400]
default_limit_failure = 0
for result in default_limit_result:
if result.error:
default_limit_failure += 1
if (default_limit_failure != 0):
failures.append(f'expected no failure with default limit, got {default_limit_failure}')
low_limit_failure = 0
for result in low_limit_results:
if result.error:
low_limit_failure += 1
if (not (100 < low_limit_failure < 200)):
failures.append(f'expected 100-200 failure with low limit, got {low_limit_failure}')
if failures:
print(('%s FAILED:\n %s' % (self.name, '\n '.join(failures))))
pytest.xfail(f'FFS {self.name}') |
class OptionPlotoptionsItemStatesSelect(Options):
def animation(self) -> 'OptionPlotoptionsItemStatesSelectAnimation':
return self._config_sub_data('animation', OptionPlotoptionsItemStatesSelectAnimation)
def enabled(self):
return self._config_get(True)
def enabled(self, flag: bool):
self._config(flag, js_type=False)
def halo(self) -> 'OptionPlotoptionsItemStatesSelectHalo':
return self._config_sub_data('halo', OptionPlotoptionsItemStatesSelectHalo)
def lineWidth(self):
return self._config_get(None)
def lineWidth(self, num: float):
self._config(num, js_type=False)
def lineWidthPlus(self):
return self._config_get(1)
def lineWidthPlus(self, num: float):
self._config(num, js_type=False)
def marker(self) -> 'OptionPlotoptionsItemStatesSelectMarker':
return self._config_sub_data('marker', OptionPlotoptionsItemStatesSelectMarker) |
def es_get_latest_daily_user_statistic(user_id, main_chain='eos', security_id='cryptocurrency_contract_RAM-EOS'):
index = get_cryptocurrency_daily_user_statistic_index(main_chain=main_chain)
s = Search(using=es_client, index=index, doc_type='doc').filter('term', userId=user_id).filter('term', securityId=security_id)
s = s.sort({'timestamp': {'order': 'desc'}})
resp = s[0:1].execute()
datas = [hit['_source'].to_dict() for hit in resp['hits']['hits']]
if datas:
return datas[0]
return None |
def test_matcher_any_token_operator(en_vocab):
matcher = Matcher(en_vocab)
matcher.add('TEST', [[{'ORTH': 'test'}, {'OP': '*'}]])
doc = Doc(en_vocab, words=['test', 'hello', 'world'])
matches = [doc[start:end].text for (_, start, end) in matcher(doc)]
assert (len(matches) == 1)
assert (matches[0] == 'test hello world') |
class TestAesXtsFlashEncryption(TestFlashEncryption):
def test_encrypt_decrypt_bootloader(self):
self._test_encrypt_decrypt('bootloader.bin', 'bootloader-encrypted-aes-xts.bin', '256bit_key.bin', 4096, aes_xts=True)
def test_encrypt_decrypt_app(self):
self._test_encrypt_decrypt('hello-world-signed.bin', 'hello-world-signed-encrypted-aes-xts.bin', 'ef-flashencryption-key.bin', 131072, aes_xts=True)
def test_encrypt_decrypt_app_512_bit_key(self):
self._test_encrypt_decrypt('hello-world-signed.bin', 'hello-world-signed-encrypted-aes-xts-256.bin', '512bit_key.bin', 65536, aes_xts=True)
def test_padding(self):
plaintext = binascii.unhexlify('c33b7c49f12a969a9bb45af5f660b73f3bda570df1cf99d1a82eabbfdf6aa16b9675bd8a2f95e871513e1753bc89f57986ecfb2707a3d3b59a469685e6609d2e9c21d4be6e3de2656ee22243f557b925ef39ff782ab56f821e6859ee852000daae7c03a7c77ce58744f15fbdf0ad4ae6e964aedd6316acf0e36935eef895cd14a60fe682fb971eb239eae38b770bdf969017c9decfd91b7c60329fb0c896684f0e7415f99dec1da0572fac360a3e6d7219973a7de07e533b5abfdf5917ed5bfe54d660a6f504732fdb8d07259bfcdc67dac11427b2bae5f00da4a4b2b00b588ff51094c41f07f02f680f8826841b43da3f25b')
plaintext_file = io.BytesIO(plaintext)
ciphertext_full_block = io.BytesIO()
keyfile = self._open('256bit_key.bin')
address = 4096
encrypt_args_padded = self.EncryptArgs(keyfile, ciphertext_full_block, address, None, 'aes_xts', plaintext_file)
espsecure.encrypt_flash_data(encrypt_args_padded)
bytes_per_encrypt = [16, 32, 64, 128]
for b in bytes_per_encrypt:
ciphertext = io.BytesIO()
num_enc_calls = (len(plaintext) // b)
for i in range(0, num_enc_calls):
keyfile.seek(0)
offset = (b * i)
plaintext_sub = io.BytesIO(plaintext[offset:(offset + b)])
encrypt_args = self.EncryptArgs(keyfile, ciphertext, (address + offset), None, 'aes_xts', plaintext_sub)
espsecure.encrypt_flash_data(encrypt_args)
assert (ciphertext_full_block.getvalue() == ciphertext.getvalue()) |
class Track(GObject.Object):
__gsignals__ = {'modified': (GObject.SIGNAL_RUN_LAST, None, ()), 'deleted': (GObject.SIGNAL_RUN_LAST, None, ())}
__hash__ = GObject.__hash__
def __init__(self, entry, db=None):
super(Track, self).__init__()
self.entry = entry
self._db = db
def __eq__(self, other):
return rb.entry_equal(self.entry, other.entry)
def title(self):
return self.entry.get_string(RB.RhythmDBPropType.TITLE)
def artist(self):
return self.entry.get_string(RB.RhythmDBPropType.ARTIST)
def album(self):
return self.entry.get_string(RB.RhythmDBPropType.ALBUM)
def album_artist(self):
return self.entry.get_string(RB.RhythmDBPropType.ALBUM_ARTIST)
def genre(self):
return self.entry.get_string(RB.RhythmDBPropType.GENRE)
def year(self):
return self.entry.get_ulong(RB.RhythmDBPropType.DATE)
def rating(self):
return self.entry.get_double(RB.RhythmDBPropType.RATING)
def rating(self, new_rating):
self._db.entry_set(self.entry, RB.RhythmDBPropType.RATING, new_rating)
def duration(self):
return self.entry.get_ulong(RB.RhythmDBPropType.DURATION)
def location(self):
return self.entry.get_string(RB.RhythmDBPropType.LOCATION)
def composer(self):
return self.entry.get_string(RB.RhythmDBPropType.COMPOSER)
def track_number(self):
return self.entry.get_ulong(RB.RhythmDBPropType.TRACK_NUMBER)
def disc_number(self):
return self.entry.get_ulong(RB.RhythmDBPropType.DISC_NUMBER)
def album_artist_sort(self):
sort = (self.entry.get_string(RB.RhythmDBPropType.ALBUM_ARTIST_SORTNAME_FOLDED) or self.entry.get_string(RB.RhythmDBPropType.ALBUM_ARTIST_FOLDED) or self.entry.get_string(RB.RhythmDBPropType.ARTIST_FOLDED))
return NaturalString(sort)
def album_sort(self):
sort = (self.entry.get_string(RB.RhythmDBPropType.ALBUM_SORTNAME_FOLDED) or self.entry.get_string(RB.RhythmDBPropType.ALBUM_FOLDED))
return NaturalString(sort)
def is_saveable(self):
return self.entry.get_entry_type().props.save_to_disk
def create_ext_db_key(self):
return self.entry.create_ext_db_key(RB.RhythmDBPropType.ALBUM) |
class hyperparameters(dict):
def __init__(self):
for (k, v) in hparams.items():
self.__setattr__(k, v)
def __setattr__(self, name, value):
self[name] = value
def update_params(self, read_arr):
for line in read_arr:
line = line.split('\n')[0]
(key, val) = (line.split('=')[0], line.split('=')[1])
self.__setattr__(key, val)
def __getattr__(self, name):
if (name in self):
return self[name]
else:
raise AttributeError(('No such attribute: ' + name)) |
def ui_wizard(ui, parent):
ui._context = context = ui.context
new_context = {name: (None if (value is None) else value.clone_traits()) for (name, value) in context.items()}
ui.context = new_context
ui.info.bind_context()
title = ui.view.title
if (title == ''):
title = DefaultTitle
ui.control = wizard = wz.Wizard(parent, (- 1), title)
pages = []
editor_pages = []
info = ui.info
shadow_group = ui.view.content.get_shadow(ui)
min_dx = min_dy = 0
group_fields_mapping = {}
for group in shadow_group.get_content():
group_fields_mapping[group] = (group.id, group.enabled_when)
(group.id, group.enabled_when) = ('', '')
page = UIWizardPage(wizard, editor_pages)
pages.append(page)
fill_panel_for_group(page, group, ui)
sizer = page.GetSizer()
sizer.Fit(page)
size = sizer.CalcMin()
min_dx = max(min_dx, size.GetWidth())
min_dy = max(min_dy, size.GetHeight())
(group.id, group.enabled_when) = group_fields_mapping[group]
if (group.id or group.enabled_when):
page.editor = editor = PageGroupEditor(control=page)
if group.id:
page.id = group.id
editor_pages.append(page)
info.bind(page.id, editor)
if group.enabled_when:
ui.add_enabled(group.enabled_when, editor)
wizard.SetPageSize(wx.Size(min_dx, min_dy))
wizard.Bind(wz.EVT_WIZARD_PAGE_CHANGING, page_changing)
prev_page = pages[0]
wizard.FitToPage(prev_page)
for page in pages[1:]:
page.SetPrev(prev_page)
prev_page.SetNext(page)
prev_page = page
try:
ui.prepare_ui()
except:
ui.control.Destroy()
ui.control.ui = None
ui.control = None
ui.result = False
raise
ui.handler.position(ui.info)
restore_window(ui)
if wizard.RunWizard(pages[0]):
original = ui._context
for (name, value) in ui.context.items():
if (value is not None):
original[name].copy_traits(value)
else:
original[name] = None
ui.result = True
else:
ui.result = False
wizard.Unbind(wz.EVT_WIZARD_PAGE_CHANGING, handler=page_changing)
save_window(ui)
ui.finish()
ui.context = ui._context
ui._context = {} |
def make_gate_polynomials(group_order, eqs):
L = [f_inner(0) for _ in range(group_order)]
R = [f_inner(0) for _ in range(group_order)]
M = [f_inner(0) for _ in range(group_order)]
O = [f_inner(0) for _ in range(group_order)]
C = [f_inner(0) for _ in range(group_order)]
for (i, (variables, coeffs)) in enumerate(eqs):
L[i] = f_inner((- coeffs.get(variables[0], 0)))
if (variables[1] != variables[0]):
R[i] = f_inner((- coeffs.get(variables[1], 0)))
C[i] = f_inner((- coeffs.get('', 0)))
O[i] = f_inner(coeffs.get('$output_coeff', 1))
if (None not in variables):
M[i] = f_inner((- coeffs.get(get_product_key(*variables[:2]), 0)))
return (L, R, M, O, C) |
def test_get_bus_bus_switch_indices_from_csv():
node_table = pd.DataFrame([['Bus 1', 'Type 4'], ['Bus 3', 'Type 1'], ['Bus 4', 'auxiliary'], ['Bus 5', 'auxiliary']], columns=['id', 'type'])
switch_table = pd.DataFrame([['Sw 2', 'Bus 1', 'Bus 3'], ['Sw 3', 'Bus 1', 'Bus 4'], ['Sw 7', 'Bus 1', 'Bus 5'], ['Sw 8', 'Bus 4', 'Bus 3'], ['Sw 4', 'Bus 4', 'Bus 1'], ['Sw 9', 'Bus 3', 'Bus 1'], ['Sw 5', 'Bus 5', 'Bus 4'], ['Sw 1', 'Bus 1', 'Bus 2']], columns=['id', 'nodeA', 'nodeB'])
try:
sb.get_bus_bus_switch_indices_from_csv(switch_table, node_table)
bool_ = False
except ValueError:
bool_ = True
assert bool_
switch_table.drop(switch_table.index[(- 1)], inplace=True)
try:
sb.get_bus_bus_switch_indices_from_csv(switch_table, node_table)
bool_ = False
except ValueError:
bool_ = True
assert bool_
switch_table.drop(switch_table.index[(- 1)], inplace=True)
assert (sb.get_bus_bus_switch_indices_from_csv(switch_table, node_table) == [0, 5]) |
class CerjanMiller(AnaPotBase):
def __init__(self, a=1, b=1, c=1):
V_str = f'({a}-{b}*y**2)*x**2*exp(-x**2)+{c}/2*y**2'
xlim = ((- 1.3), 1.3)
ylim = ((- 0.7), 1.9)
super().__init__(V_str=V_str, xlim=xlim, ylim=ylim)
def __str__(self):
return 'CerjanMiller calculator' |
class OptionSeriesAreaMarkerStates(Options):
def hover(self) -> 'OptionSeriesAreaMarkerStatesHover':
return self._config_sub_data('hover', OptionSeriesAreaMarkerStatesHover)
def normal(self) -> 'OptionSeriesAreaMarkerStatesNormal':
return self._config_sub_data('normal', OptionSeriesAreaMarkerStatesNormal)
def select(self) -> 'OptionSeriesAreaMarkerStatesSelect':
return self._config_sub_data('select', OptionSeriesAreaMarkerStatesSelect) |
class _FlowSpecL2VPNComponent(_FlowSpecComponentBase):
TYPE_ETHER_TYPE = 14
TYPE_SOURCE_MAC = 15
TYPE_DESTINATION_MAC = 16
TYPE_LLC_DSAP = 17
TYPE_LLC_SSAP = 18
TYPE_LLC_CONTROL = 19
TYPE_SNAP = 20
TYPE_VLAN_ID = 21
TYPE_VLAN_COS = 22
TYPE_INNER_VLAN_ID = 23
TYPE_INNER_VLAN_COS = 24 |
class Deck():
def __init__(self, include_suits: List[str]=default_include_suits, include_ranks: List[int]=default_include_ranks):
self._include_suits = include_suits
self._include_ranks = include_ranks
self.reset()
def __len__(self) -> int:
return (len(self._cards_in_deck) + len(self._dealt_cards))
def reset(self):
self._cards_in_deck: List[Card] = [Card(rank, suit) for suit in self._include_suits for rank in self._include_ranks]
self._dealt_cards: List[Card] = []
random.shuffle(self._cards_in_deck)
def pick(self, random: bool=True) -> Card:
if (not len(self._cards_in_deck)):
raise ValueError('Deck is empty - please use Deck.reset()')
elif random:
index: int = np.random.randint(len(self._cards_in_deck), size=None)
else:
index: int = (len(self._cards_in_deck) - 1)
card: Card = self._cards_in_deck.pop(index)
self._dealt_cards.append(card)
return card
def remove(self, card):
if (card in self._cards_in_deck):
self._cards_in_deck.remove(card)
self._dealt_cards.append(card) |
_validator
def validate_enums(request, **kwargs):
for (param, enum) in (('request', UpdateRequest), ('severity', UpdateSeverity), ('status', UpdateStatus), ('suggest', UpdateSuggestion), ('type', UpdateType), ('content_type', ContentType), ('state', ReleaseState), ('package_manager', PackageManager), ('gating', TestGatingStatus)):
value = request.validated.get(param)
if (value is None):
continue
if isinstance(value, str):
request.validated[param] = enum.from_string(value)
else:
for (index, item) in enumerate(value):
value[index] = enum.from_string(item)
request.validated[param] = value |
def test():
assert (len(pattern) == 2), 'El patron deberia describir dos tokens (dos diccionarios).'
assert (isinstance(pattern[0], dict) and isinstance(pattern[1], dict)), 'Cada entrada en el patron deberia ser un diccionario.'
assert ((len(pattern[0]) == 1) and (len(pattern[1]) == 1)), 'Cada entrada en el patron deberia tener solo un key.'
assert any(((pattern[0].get(key) == 'descargar') for key in ['lemma', 'LEMMA'])), 'Estas encontrando usando el lemma del primer token?'
assert any(((pattern[1].get(key) == 'PROPN') for key in ['pos', 'POS'])), 'Estas encontrando usando el part-of-speech tag del segundo token y usando el label correcto para un nombre propio?'
__msg__.good('Buen trabajo!') |
def test_execute_benchmark_missing_nighthawk_binary_image():
job_control = proto_control.JobControl(remote=False, scavenging_benchmark=True)
images = generate_test_objects.generate_images(job_control)
images.nighthawk_binary_image = ''
generate_test_objects.generate_envoy_source(job_control)
benchmark = full_docker.Benchmark(job_control, 'test_benchmark')
with pytest.raises(Exception) as validation_exception:
benchmark.execute_benchmark()
assert (str(validation_exception.value) == 'No source specified to build NightHawk image') |
class Extensions(Options):
def __init__(self, options: Options, attrs: dict):
super(Extensions, self).__init__(options.component, attrs)
self.__options = options
def editors(self) -> TbEditors.ExtsEditors:
return TbEditors.ExtsEditors(self.__options, 'editor')
def formatters(self) -> TbFormatters.ExtsFormattors:
return TbFormatters.ExtsFormattors(self.__options, 'formatter')
def mutators(self) -> TbMutators.ExtsMutators:
return TbMutators.ExtsMutators(self.__options, 'mutator')
def validators(self) -> TbValidators.ExtsValidators:
return TbValidators.ExtsValidators(self.__options, 'validator') |
class PlotExample4(PlotExample3):
def _container_default(self):
container = super(PlotExample4, self)._container_default()
(rplot, lplot) = (self.right_plot, self.left_plot)
rplot.orientation = 'v'
rplot.hgrid.mapper = rplot.index_mapper
rplot.vgrid.mapper = rplot.value_mapper
rplot.y_axis.mapper = rplot.index_mapper
rplot.x_axis.mapper = rplot.value_mapper
lplot.overlays.append(LineInspector(component=lplot, axis='value', write_metadata=True, is_listener=True, color='blue'))
lplot.overlays.append(LineInspector(component=lplot, axis='value', write_metadata=True, is_listener=True, color='blue'))
rplot.overlays.append(LineInspector(component=rplot, axis='value', write_metadata=True, is_listener=True, color='blue'))
rplot.overlays.append(LineInspector(component=rplot, axis='value', write_metadata=True, is_listener=True, color='blue'))
return container |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.