code stringlengths 281 23.7M |
|---|
def Unet(backbone_name='vgg16', input_shape=(None, None, 3), input_tensor=None, encoder_weights='imagenet', freeze_encoder=False, skip_connections='default', decoder_block_type='upsampling', decoder_filters=(256, 128, 64, 32, 16), decoder_use_batchnorm=False, n_upsample_blocks=5, upsample_rates=(2, 2, 2, 2, 2), classes=1, activation='sigmoid'):
backbone = get_backbone(backbone_name, input_shape=input_shape, input_tensor=input_tensor, weights=encoder_weights, include_top=False)
if (skip_connections == 'default'):
skip_connections = DEFAULT_SKIP_CONNECTIONS[backbone_name]
(model, hyper_list) = build_unet(backbone, classes, skip_connections, decoder_filters=decoder_filters, block_type=decoder_block_type, activation=activation, n_upsample_blocks=n_upsample_blocks, upsample_rates=upsample_rates, use_batchnorm=decoder_use_batchnorm)
if freeze_encoder:
freeze_model(backbone)
return (model, hyper_list) |
def scrap_ids_from_html(html, filter_root_el=None):
if (filter_root_el is None):
filter_root_el = {'id': 'au_search_items'}
soup = BeautifulSoup(html, 'html.parser')
ids = []
root_el = soup.find(**filter_root_el)
if (root_el is None):
raise ValueError('Could not find root el for audio')
playlist_snippets = soup.find_all('div', {'class': 'audioPlaylistSnippet__list'})
for playlist in playlist_snippets:
playlist.decompose()
for audio in root_el.find_all('div', {'class': 'audio_item'}):
if ('audio_item_disabled' in audio['class']):
continue
data_audio = json.loads(audio['data-audio'])
audio_hashes = data_audio[13].split('/')
full_id = (str(data_audio[1]), str(data_audio[0]), audio_hashes[2], audio_hashes[5])
if all(full_id):
ids.append(full_id)
return ids |
def run_posix(args):
file_path = (args.dir / 'posix-single-file')
data = create_data(args.nbytes)
f = open(file_path, 'wb')
t0 = clock()
res = f.write(data.tobytes())
f.close()
write_time = (clock() - t0)
assert (res == args.nbytes)
f = open(file_path, 'rb')
t0 = clock()
a = cupy.fromfile(f, dtype='uint8', count=len(data))
f.close()
read_time = (clock() - t0)
assert (a.nbytes == args.nbytes)
assert (res == args.nbytes), f'IO mismatch, expected {args.nbytes} got {a.nbytes}'
return (read_time, write_time) |
class TestDynamicDiscriminatorModel():
def test_serialize_parent(self):
m = DynamicMapDiscriminatorTestModel()
m.hash_key = 'foo'
m.value.string_attr = 'foostr'
m.value.bar_attribute = 3
assert (m.serialize() == {'hash_key': {'S': 'foo'}, 'type': {'S': 'Parent'}, 'value': {'M': {'string_attr': {'S': 'foostr'}, 'bar_attribute': {'N': '3'}}}})
def test_deserialize_parent(self):
item = {'hash_key': {'S': 'foo'}, 'type': {'S': 'Parent'}, 'value': {'M': {'string_attr': {'S': 'foostr'}, 'bar_attribute': {'N': '3'}}}}
m = DynamicMapDiscriminatorTestModel.from_raw_data(item)
assert (m.hash_key == 'foo')
assert m.value
assert (m.value.string_attr == 'foostr')
assert (m.value.bar_attribute == 3)
def test_serialize_child(self):
m = DynamicMapDiscriminatorChildTestModel()
m.hash_key = 'foo'
m.value = 'string val'
assert (m.serialize() == {'hash_key': {'S': 'foo'}, 'type': {'S': 'Child'}, 'value': {'S': 'string val'}})
def test_deserialize_child(self):
item = {'hash_key': {'S': 'foo'}, 'type': {'S': 'Child'}, 'value': {'S': 'string val'}}
m = DynamicMapDiscriminatorChildTestModel.from_raw_data(item)
assert (m.hash_key == 'foo')
assert (m.value == 'string val') |
class MpiCommunicationManager(BaseCommunicationManager):
def __init__(self, comm, rank, size, node_type='client'):
self.comm = comm
self.rank = rank
self.size = size
self._observers: List[Observer] = []
if (node_type == 'client'):
(self.q_sender, self.q_receiver) = self.init_client_communication()
elif (node_type == 'server'):
(self.q_sender, self.q_receiver) = self.init_server_communication()
self.server_send_thread = None
self.server_receive_thread = None
self.server_collective_thread = None
self.client_send_thread = None
self.client_receive_thread = None
self.client_collective_thread = None
self.is_running = True
def init_server_communication(self):
server_send_queue = queue.Queue(0)
self.server_send_thread = MPISendThread(self.comm, self.rank, self.size, 'ServerSendThread', server_send_queue)
self.server_send_thread.start()
server_receive_queue = queue.Queue(0)
self.server_receive_thread = MPIReceiveThread(self.comm, self.rank, self.size, 'ServerReceiveThread', server_receive_queue)
self.server_receive_thread.start()
return (server_send_queue, server_receive_queue)
def init_client_communication(self):
client_send_queue = queue.Queue(0)
self.client_send_thread = MPISendThread(self.comm, self.rank, self.size, 'ClientSendThread', client_send_queue)
self.client_send_thread.start()
client_receive_queue = queue.Queue(0)
self.client_receive_thread = MPIReceiveThread(self.comm, self.rank, self.size, 'ClientReceiveThread', client_receive_queue)
self.client_receive_thread.start()
return (client_send_queue, client_receive_queue)
def send_message(self, msg: Message):
self.q_sender.put(msg)
def add_observer(self, observer: Observer):
self._observers.append(observer)
def remove_observer(self, observer: Observer):
self._observers.remove(observer)
def handle_receive_message(self):
self.is_running = True
while self.is_running:
if (self.q_receiver.qsize() > 0):
msg_params = self.q_receiver.get()
self.notify(msg_params)
time.sleep(0.03)
logging.info('!!!!!!handle_receive_message stopped!!!')
def stop_receive_message(self):
self.is_running = False
self.__stop_thread(self.server_send_thread)
self.__stop_thread(self.server_receive_thread)
self.__stop_thread(self.server_collective_thread)
self.__stop_thread(self.client_send_thread)
self.__stop_thread(self.client_receive_thread)
self.__stop_thread(self.client_collective_thread)
def notify(self, msg_params):
msg_type = msg_params.get_type()
for observer in self._observers:
observer.receive_message(msg_type, msg_params)
def __stop_thread(self, thread):
if thread:
thread.raise_exception()
thread.join() |
def run_build(sources: list[BuildSource], options: Options, fscache: FileSystemCache, t0: float, stdout: TextIO, stderr: TextIO) -> tuple[((build.BuildResult | None), list[str], bool)]:
formatter = util.FancyFormatter(stdout, stderr, options.hide_error_codes)
messages = []
messages_by_file = defaultdict(list)
def flush_errors(filename: (str | None), new_messages: list[str], serious: bool) -> None:
if options.pretty:
new_messages = formatter.fit_in_terminal(new_messages)
messages.extend(new_messages)
if new_messages:
messages_by_file[filename].extend(new_messages)
if options.non_interactive:
return
f = (stderr if serious else stdout)
show_messages(new_messages, f, formatter, options)
serious = False
blockers = False
res = None
try:
res = build.build(sources, options, None, flush_errors, fscache, stdout, stderr)
except CompileError as e:
blockers = True
if (not e.use_stdout):
serious = True
if (options.warn_unused_configs and options.unused_configs and (not options.incremental) and (not options.non_interactive)):
print('Warning: unused section(s) in {}: {}'.format(options.config_file, get_config_module_names(options.config_file, [glob for glob in options.per_module_options.keys() if (glob in options.unused_configs)])), file=stderr)
maybe_write_junit_xml((time.time() - t0), serious, messages, messages_by_file, options)
return (res, messages, blockers) |
def _create_datapipe_queue_loop(source_datapipe, req_queue, res_queue, process_name, loop_id, worker_info, custom_reset_fn=None, blocking_request_get=True, request_counter=None):
if isinstance(source_datapipe, IterDataPipe):
pipe_type = communication.iter
protocol_type = communication.protocol.IterDataPipeQueueProtocolServer
elif isinstance(source_datapipe, MapDataPipe):
pipe_type = communication.map
protocol_type = communication.protocol.MapDataPipeQueueProtocolServer
else:
raise Exception('Only supports IterDataPipe or MapDataPipe, got', source_datapipe)
return pipe_type.DataPipeBehindQueues(source_datapipe, protocol_type(req_queue, res_queue), process_name=process_name, loop_id=loop_id, worker_info=worker_info, custom_reset_fn=custom_reset_fn, blocking_request_get=blocking_request_get, request_counter=request_counter) |
def train_epoch(epoch, model, loader, optimizer, args, lr_scheduler=None, saver=None, output_dir='', amp_autocast=suppress, loss_scaler=None, model_ema=None, neptune=None):
batch_time_m = AverageMeter()
data_time_m = AverageMeter()
losses_m = AverageMeter()
model.train()
end = time.time()
last_idx = (len(loader) - 1)
num_updates = (epoch * len(loader))
for (batch_idx, (input, target)) in enumerate(loader):
last_batch = (batch_idx == last_idx)
data_time_m.update((time.time() - end))
if args.channels_last:
input = input.contiguous(memory_format=torch.channels_last)
with amp_autocast():
output = model(input, target)
loss = output['loss']
if args.neptune:
neptune.log_metric('train/loss', loss.item())
if (not args.distributed):
losses_m.update(loss.item(), input.size(0))
optimizer.zero_grad()
if (loss_scaler is not None):
loss_scaler(loss, optimizer, clip_grad=args.clip_grad, parameters=model.parameters())
else:
loss.backward()
if args.clip_grad:
torch.nn.utils.clip_grad_norm_(model.parameters(), args.clip_grad)
optimizer.step()
torch.cuda.synchronize()
if (model_ema is not None):
model_ema.update(model)
num_updates += 1
batch_time_m.update((time.time() - end))
if (last_batch or ((batch_idx % args.log_interval) == 0)):
lrl = [param_group['lr'] for param_group in optimizer.param_groups]
lr = (sum(lrl) / len(lrl))
if args.distributed:
reduced_loss = reduce_tensor(loss.data, args.world_size)
losses_m.update(reduced_loss.item(), input.size(0))
if (args.local_rank == 0):
logging.info('Train: {} [{:>4d}/{} ({:>3.0f}%)] Loss: {loss.val:>9.6f} ({loss.avg:>6.4f}) Time: {batch_time.val:.3f}s, {rate:>7.2f}/s ({batch_time.avg:.3f}s, {rate_avg:>7.2f}/s) LR: {lr:.3e} Data: {data_time.val:.3f} ({data_time.avg:.3f})'.format(epoch, batch_idx, len(loader), ((100.0 * batch_idx) / last_idx), loss=losses_m, batch_time=batch_time_m, rate=((input.size(0) * args.world_size) / batch_time_m.val), rate_avg=((input.size(0) * args.world_size) / batch_time_m.avg), lr=lr, data_time=data_time_m))
if (args.save_images and output_dir):
torchvision.utils.save_image(input, os.path.join(output_dir, ('train-batch-%d.jpg' % batch_idx)), padding=0, normalize=True)
if ((saver is not None) and args.recovery_interval and (last_batch or (((batch_idx + 1) % args.recovery_interval) == 0))):
saver.save_recovery(epoch, batch_idx=batch_idx)
if (lr_scheduler is not None):
lr_scheduler.step_update(num_updates=num_updates, metric=losses_m.avg)
end = time.time()
if hasattr(optimizer, 'sync_lookahead'):
optimizer.sync_lookahead()
return OrderedDict([('loss', losses_m.avg)]) |
class TestValidateWebhookResponse():
def test_spec_not_detected(self, spec_invalid):
request = mock.Mock(spec=WebhookRequest)
response = mock.Mock(spec=Response)
with pytest.raises(SpecError):
validate_webhook_response(request, response, spec=spec_invalid)
def test_spec_not_supported(self, spec_v20):
request = mock.Mock(spec=WebhookRequest)
response = mock.Mock(spec=Response)
with pytest.raises(SpecError):
validate_webhook_response(request, response, spec=spec_v20)
def test_request_type_invalid(self, spec_v31):
request = mock.sentinel.request
response = mock.Mock(spec=Response)
with pytest.raises(TypeError):
validate_webhook_response(request, response, spec=spec_v31)
def test_response_type_invalid(self, spec_v31):
request = mock.Mock(spec=WebhookRequest)
response = mock.sentinel.response
with pytest.raises(TypeError):
validate_webhook_response(request, response, spec=spec_v31)
def test_spec_type_invalid(self):
request = mock.Mock(spec=WebhookRequest)
response = mock.Mock(spec=Response)
spec = mock.sentinel.spec
with pytest.raises(TypeError):
validate_webhook_response(request, response, spec=spec)
def test_cls_type_invalid(self, spec_v31):
request = mock.Mock(spec=WebhookRequest)
response = mock.Mock(spec=Response)
with pytest.raises(TypeError):
validate_webhook_response(request, response, spec=spec_v31, cls=Exception)
def test_spec_oas30_validator_not_found(self, spec_v30):
request = mock.Mock(spec=WebhookRequest)
response = mock.Mock(spec=Response)
with pytest.raises(SpecError):
validate_webhook_response(request, response, spec=spec_v30)
('openapi_core.validation.response.validators.WebhookResponseValidator.validate')
def test_request_response(self, mock_validate, spec_v31):
request = mock.Mock(spec=WebhookRequest)
response = mock.Mock(spec=Response)
result = validate_webhook_response(request, response, spec=spec_v31)
assert (result is None)
mock_validate.assert_called_once_with(request, response) |
def test_scenariooutline_example_colum_width():
scenario_outline = ScenarioOutline(1, 'Scenario Outline', 'Examples', 'I am a Scenario Outline', 'foo.feature', 1, parent=None, tags=None, preconditions=None, background=None)
scenario_outline.examples_header = ['foo', 'bar']
scenario_outline.examples = [ScenarioOutline.Example(['Spiderman', 'Batman'], 'foo.feature', 1), ScenarioOutline.Example(['Peter', 'Bruce Wayne'], 'foo.feature', 2)]
index_0_width = scenario_outline.get_column_width(0)
assert (index_0_width == len('Spiderman'))
index_1_width = scenario_outline.get_column_width(1)
assert (index_1_width == len('Bruce Wayne')) |
def _prep_cmd(python, script, opts, runid, on_set_envvar=None):
env = dict(os.environ)
def set_envvar(name, value):
env[name] = value
if (on_set_envvar is not None):
on_set_envvar(name)
set_envvar('PYPERFORMANCE_RUNID', str(runid))
argv = [python, '-u', script, *(opts or ())]
return (argv, env) |
class KITTIRAWDataset(KITTIDataset):
def __init__(self, *args, **kwargs):
super(KITTIRAWDataset, self).__init__(*args, **kwargs)
if (self.dataset_usage == DataSetUsage.TEST):
return
self.resize_seg = transforms.Resize((self.height, self.width), interpolation=Image.BILINEAR)
def get_image_path(self, folder, frame_index, side, seg=False):
f_str = '{:010d}{}'.format(frame_index, ('.png' if seg else self.img_ext))
assert (side is not None)
if seg:
image_path = os.path.join(self.data_path, folder, 'image_0{}'.format(self.side_map[side]), f_str)
else:
image_path = os.path.join(self.data_path, folder, 'image_0{}/data'.format(self.side_map[side]), f_str)
return image_path
def get_item_custom(self, inputs, folder, frame_index, side, do_flip):
if (self.dataset_usage == DataSetUsage.TEST):
return
raw_seg = self.get_seg_map(folder, frame_index, side, do_flip)
seg = self.resize_seg(raw_seg)
inputs[('seg', 0, 0)] = torch.tensor(np.array(seg)).float().unsqueeze(0)
def get_seg_map(self, folder, frame_index, side, do_flip):
path = self.get_image_path(folder, frame_index, side, True)
path = path.replace('KITTI-RAW', 'KITTI-RAW/segmentation')
seg = self.loader(path, mode='P')
seg_copy = np.array(seg.copy())
for k in np.unique(seg):
seg_copy[(seg_copy == k)] = labels[k].trainId
seg = Image.fromarray(seg_copy, mode='P')
if do_flip:
seg = seg.transpose(pil.FLIP_LEFT_RIGHT)
return seg
def get_depth(self, folder, frame_index, side, do_flip):
calib_path = os.path.join(self.data_path, folder.split('/')[0])
velo_filename = os.path.join(self.data_path, folder, 'velodyne_points/data/{:010d}.bin'.format(int(frame_index)))
depth_gt = generate_depth_map(calib_path, velo_filename, self.side_map[side])
depth_gt = skimage.transform.resize(depth_gt, self.full_res_shape[::(- 1)], order=0, preserve_range=True, mode='constant')
if do_flip:
depth_gt = np.fliplr(depth_gt)
return depth_gt |
class SingleConv(nn.Module):
def __init__(self, in_channels, out_channels, mid_channels=None):
super().__init__()
if (not mid_channels):
mid_channels = out_channels
self.single_conv = nn.Sequential(nn.Conv2d(in_channels, mid_channels, kernel_size=3, padding=1, bias=False), nn.BatchNorm2d(mid_channels), nn.ReLU(inplace=True))
def forward(self, x):
return self.single_conv(x) |
class TextIndexParams(BaseModel, extra='forbid'):
type: 'TextIndexType' = Field(..., description='')
tokenizer: Optional['TokenizerType'] = Field(default=None, description='')
min_token_len: Optional[int] = Field(default=None, description='')
max_token_len: Optional[int] = Field(default=None, description='')
lowercase: Optional[bool] = Field(default=None, description='If true, lowercase all tokens. Default: true') |
class ReferenceTests(TestCase):
def test_read(self):
ref = Reference('initial')
self.assertEqual(ref.read(), Effect(ReadReference(ref=ref)))
def test_modify(self):
ref = Reference(0)
transformer = (lambda x: (x + 1))
eff = ref.modify(transformer)
self.assertEqual(eff, Effect(ModifyReference(ref=ref, transformer=transformer)))
def test_perform_read(self):
ref = Reference('initial')
result = sync_perform(reference_dispatcher, ref.read())
self.assertEqual(result, 'initial')
def test_perform_modify(self):
ref = Reference(0)
transformer = (lambda x: (x + 1))
result = sync_perform(reference_dispatcher, ref.modify(transformer))
self.assertEqual(result, 1)
self.assertEqual(sync_perform(reference_dispatcher, ref.read()), 1) |
class PyrogramClient(BridgedClient):
def __init__(self, cache_duration: int, client: Client):
self._app: Client = client
if (VersionManager.version_tuple(pyrogram.__version__) > VersionManager.version_tuple('2.0.0')):
self._app.send = self._app.invoke
self._cache: ClientCache = ClientCache(cache_duration, self)
_app.on_raw_update()
async def on_update(_, update, __, data2):
if isinstance(update, UpdateGroupCallParticipants):
participants = update.participants
for participant in participants:
result = self._cache.set_participants_cache(update.call.id, self.chat_id(participant.peer), participant.muted, participant.volume, participant.can_self_unmute, ((participant.video is not None) or (participant.presentation is not None)), (participant.presentation is not None), (participant.video is not None), participant.raise_hand_rating, participant.left)
if (result is not None):
if ('PARTICIPANTS_HANDLER' in self.HANDLERS_LIST):
(await self._propagate('PARTICIPANTS_HANDLER', self._cache.get_chat_id(update.call.id), result, participant.just_joined, participant.left))
if isinstance(update, UpdateGroupCall):
chat_id = self.chat_id(data2[update.chat_id])
if isinstance(update.call, GroupCall):
if (update.call.schedule_date is None):
self._cache.set_cache(chat_id, InputGroupCall(access_hash=update.call.access_hash, id=update.call.id))
if isinstance(update.call, GroupCallDiscarded):
self._cache.drop_cache(chat_id)
if ('CLOSED_HANDLER' in self.HANDLERS_LIST):
(await self._propagate('CLOSED_HANDLER', chat_id))
if isinstance(update, UpdateChannel):
chat_id = self.chat_id(update)
if (len(data2) > 0):
if isinstance(data2[update.channel_id], ChannelForbidden):
self._cache.drop_cache(chat_id)
if ('KICK_HANDLER' in self.HANDLERS_LIST):
(await self._propagate('KICK_HANDLER', chat_id))
if (isinstance(update, UpdateNewChannelMessage) or isinstance(update, UpdateNewMessage)):
if isinstance(update.message, MessageService):
if isinstance(update.message.action, MessageActionInviteToGroupCall):
if ('INVITE_HANDLER' in self.HANDLERS_LIST):
(await self._propagate('INVITE_HANDLER', update.message.action))
if isinstance(update.message.action, MessageActionChatDeleteUser):
if isinstance(update.message.peer_id, PeerChat):
chat_id = self.chat_id(update.message.peer_id)
if isinstance(data2[update.message.peer_id.chat_id], ChatForbidden):
self._cache.drop_cache(chat_id)
if ('KICK_HANDLER' in self.HANDLERS_LIST):
(await self._propagate('KICK_HANDLER', chat_id))
if isinstance(data2, Dict):
for group_id in data2:
if (isinstance(update, UpdateNewChannelMessage) or isinstance(update, UpdateNewMessage)):
if isinstance(update.message, MessageService):
if (isinstance(data2[group_id], Channel) or isinstance(data2[group_id], Chat)):
chat_id = self.chat_id(data2[group_id])
if data2[group_id].left:
self._cache.drop_cache(chat_id)
if ('LEFT_HANDLER' in self.HANDLERS_LIST):
(await self._propagate('LEFT_HANDLER', chat_id))
raise ContinuePropagation()
async def _propagate(self, event_name: str, *args, **kwargs):
for event in self.HANDLERS_LIST[event_name]:
asyncio.ensure_future(event(*args, **kwargs))
def on_closed_voice_chat(self) -> Callable:
def decorator(func: Callable) -> Callable:
if (self is not None):
self.HANDLERS_LIST['CLOSED_HANDLER'].append(func)
return func
return decorator
def on_kicked(self) -> Callable:
def decorator(func: Callable) -> Callable:
if (self is not None):
self.HANDLERS_LIST['KICK_HANDLER'].append(func)
return func
return decorator
def on_receive_invite(self) -> Callable:
def decorator(func: Callable) -> Callable:
if (self is not None):
self.HANDLERS_LIST['INVITE_HANDLER'].append(func)
return func
return decorator
def on_left_group(self) -> Callable:
def decorator(func: Callable) -> Callable:
if (self is not None):
self.HANDLERS_LIST['LEFT_HANDLER'].append(func)
return func
return decorator
def on_participants_change(self) -> Callable:
def decorator(func: Callable) -> Callable:
if (self is not None):
self.HANDLERS_LIST['PARTICIPANTS_HANDLER'].append(func)
return func
return decorator
async def get_call(self, chat_id: int) -> Optional[InputGroupCall]:
chat = (await self._app.resolve_peer(chat_id))
if isinstance(chat, InputPeerChannel):
input_call = (await self._app.send(GetFullChannel(channel=InputChannel(channel_id=chat.channel_id, access_hash=chat.access_hash)))).full_chat.call
else:
input_call = (await self._app.send(GetFullChat(chat_id=chat.chat_id))).full_chat.call
if (input_call is not None):
call: GroupCall = (await self._app.send(GetGroupCall(call=input_call, limit=(- 1)))).call
if (call.schedule_date is not None):
return None
return input_call
async def get_group_call_participants(self, chat_id: int):
return (await self._cache.get_participant_list(chat_id))
async def get_participants(self, input_call: InputGroupCall):
return [{'user_id': self.chat_id(participant.peer), 'muted': participant.muted, 'volume': participant.volume, 'can_self_unmute': participant.can_self_unmute, 'video': participant.video, 'presentation': participant.presentation, 'raise_hand_rating': participant.raise_hand_rating, 'left': participant.left} for participant in (await self._app.send(GetGroupParticipants(call=input_call, ids=[], sources=[], offset='', limit=500))).participants]
async def join_group_call(self, chat_id: int, json_join: str, invite_hash: str, have_video: bool, join_as: InputPeer) -> str:
chat_call = (await self._cache.get_full_chat(chat_id))
if (chat_call is not None):
result: Updates = (await self._app.send(JoinGroupCall(call=chat_call, params=DataJSON(data=json_join), muted=False, join_as=join_as, video_stopped=have_video, invite_hash=invite_hash)))
for update in result.updates:
if isinstance(update, UpdateGroupCallParticipants):
participants = update.participants
for participant in participants:
self._cache.set_participants_cache(update.call.id, self.chat_id(participant.peer), participant.muted, participant.volume, participant.can_self_unmute, ((participant.video is not None) or (participant.presentation is not None)), (participant.presentation is not None), (participant.video is not None), participant.raise_hand_rating, participant.left)
if isinstance(update, UpdateGroupCallConnection):
return update.params.data
return json.dumps({'transport': None})
async def create_group_call(self, chat_id: int):
result: Updates = (await self._app.send(CreateGroupCall(peer=(await self.resolve_peer(chat_id)), random_id=self.rnd_id())))
for update in result.updates:
if isinstance(update, UpdateGroupCall):
if isinstance(update.call, GroupCall):
if (update.call.schedule_date is None):
self._cache.set_cache(chat_id, InputGroupCall(access_hash=update.call.access_hash, id=update.call.id))
async def leave_group_call(self, chat_id: int):
chat_call = (await self._cache.get_full_chat(chat_id))
if (chat_call is not None):
(await self._app.send(LeaveGroupCall(call=chat_call, source=0)))
async def change_volume(self, chat_id: int, volume: int, participant: InputPeer):
chat_call = (await self._cache.get_full_chat(chat_id))
if (chat_call is not None):
(await self._app.send(EditGroupCallParticipant(call=chat_call, participant=participant, muted=False, volume=(volume * 100))))
async def set_call_status(self, chat_id: int, muted_status: Optional[bool], paused_status: Optional[bool], stopped_status: Optional[bool], participant: InputPeer):
chat_call = (await self._cache.get_full_chat(chat_id))
if (chat_call is not None):
(await self._app.send(EditGroupCallParticipant(call=chat_call, participant=participant, muted=muted_status, video_stopped=stopped_status, video_paused=paused_status)))
async def get_full_chat(self, chat_id: int):
return (await self._cache.get_full_chat(chat_id))
async def resolve_peer(self, user_id: Union[(int, str)]) -> InputPeer:
return (await self._app.resolve_peer(user_id))
async def get_id(self) -> int:
return (await self._app.get_me()).id
def is_connected(self) -> bool:
return self._app.is_connected
async def start(self):
(await self._app.start()) |
class Label():
def __init__(self) -> None:
self.start: Optional[int] = None
self.end: Optional[int] = None
self.tag: Optional[str] = None
def __eq__(self, other) -> bool:
return ((self.start == other.start) and (self.end == other.end) and (self.tag == other.tag))
def __str__(self) -> str:
return ((((str(self.start) + ',') + str(self.end)) + ' ') + self.tag) |
class CassandraMigration(DatabaseMigration):
def __init__(self) -> None:
self._db_config = CassandraConfig()
super(CassandraMigration, self).__init__(CassandraClient(host=[self._db_config.host], keyspace=self._db_config.keyspace, user=self._db_config.username, password=self._db_config.password))
def _get_parsed_columns(columns: List[Diff]) -> List[str]:
parsed_columns = []
for col in columns:
parsed_columns.append(f'{col.column} {col.value}')
parsed_columns = ', '.join(parsed_columns)
return parsed_columns
def _get_alter_table_add_query(self, columns: List[Diff], table_name: str) -> str:
parsed_columns = self._get_parsed_columns(columns)
return f'ALTER TABLE {table_name} ADD ({parsed_columns});'
def _get_alter_column_type_query(self, column: Diff, table_name: str) -> str:
parsed_columns = self._get_parsed_columns([column])
return f"ALTER TABLE {table_name} ALTER {parsed_columns.replace(' ', ' TYPE ')};"
def _get_create_table_query(columns: List[Dict[(str, Any)]], table_name: str) -> str:
parsed_columns = []
primary_keys = []
for col in columns:
col_str = f"{col['column_name']} {col['type']}"
if col['primary_key']:
primary_keys.append(col['column_name'])
parsed_columns.append(col_str)
joined_parsed_columns = ', '.join(parsed_columns)
if (len(primary_keys) > 0):
joined_primary_keys = ', '.join(primary_keys)
columns_str = f'{joined_parsed_columns}, PRIMARY KEY ({joined_primary_keys})'
else:
columns_str = joined_parsed_columns
keyspace = CassandraConfig().keyspace
return f'CREATE TABLE {keyspace}.{table_name} ({columns_str});'
def _get_alter_table_drop_query(self, columns: List[Diff], table_name: str) -> str:
parsed_columns = self._get_parsed_columns(columns)
return f'ALTER TABLE {table_name} DROP ({parsed_columns});' |
class TestMPM(TestCase):
def test_basic_processing(self):
options = {'thermal': 'isothermal'}
model = pybamm.lithium_ion.MPM(options)
param = pybamm.ParameterValues('Ecker2015')
param = pybamm.get_size_distribution_parameters(param)
modeltest = tests.StandardModelTest(model)
modeltest.test_all()
def test_optimisations(self):
options = {'thermal': 'isothermal'}
model = pybamm.lithium_ion.MPM(options)
optimtest = tests.OptimisationsTest(model)
original = optimtest.evaluate_model()
to_python = optimtest.evaluate_model(to_python=True)
np.testing.assert_array_almost_equal(original, to_python)
if pybamm.have_jax():
to_jax = optimtest.evaluate_model(to_jax=True)
np.testing.assert_array_almost_equal(original, to_jax)
def test_set_up(self):
model = pybamm.lithium_ion.MPM()
optimtest = tests.OptimisationsTest(model)
optimtest.set_up_model(to_python=True)
optimtest.set_up_model(to_python=False)
def test_particle_uniform(self):
options = {'particle': 'uniform profile'}
model = pybamm.lithium_ion.MPM(options)
modeltest = tests.StandardModelTest(model)
modeltest.test_all()
def test_differential_surface_form(self):
options = {'surface form': 'differential'}
model = pybamm.lithium_ion.MPM(options)
modeltest = tests.StandardModelTest(model)
modeltest.test_all()
def test_current_sigmoid_ocp(self):
options = {'open-circuit potential': ('current sigmoid', 'single')}
model = pybamm.lithium_ion.MPM(options)
parameter_values = pybamm.ParameterValues('Chen2020')
parameter_values = pybamm.get_size_distribution_parameters(parameter_values)
parameter_values.update({'Negative electrode lithiation OCP [V]': parameter_values['Negative electrode OCP [V]'], 'Negative electrode delithiation OCP [V]': parameter_values['Negative electrode OCP [V]']}, check_already_exists=False)
modeltest = tests.StandardModelTest(model, parameter_values=parameter_values)
modeltest.test_all(skip_output_tests=True)
def test_voltage_control(self):
options = {'operating mode': 'voltage'}
model = pybamm.lithium_ion.MPM(options)
param = model.default_parameter_values
param.update({'Voltage function [V]': 3.8}, check_already_exists=False)
modeltest = tests.StandardModelTest(model, parameter_values=param)
modeltest.test_all(skip_output_tests=True)
def test_conservation_each_electrode(self):
models = [pybamm.lithium_ion.SPM(), pybamm.lithium_ion.MPM()]
var_pts = {'R_n': 3, 'R_p': 3}
solver = pybamm.CasadiSolver(mode='fast')
neg_Li = []
pos_Li = []
for model in models:
sim = pybamm.Simulation(model, solver=solver)
sim.var_pts.update(var_pts)
solution = sim.solve([0, 3500])
neg = solution['Total lithium in negative electrode [mol]'].entries[(- 1)]
pos = solution['Total lithium in positive electrode [mol]'].entries[(- 1)]
neg_Li.append(neg)
pos_Li.append(pos)
np.testing.assert_array_almost_equal(neg_Li[0], neg_Li[1], decimal=13)
np.testing.assert_array_almost_equal(pos_Li[0], pos_Li[1], decimal=13) |
class ControlledAsymmetricLowRankTrotterStep(LowRankTrotterStep):
def trotter_step(self, qubits: Sequence[cirq.Qid], time: float, control_qubit: Optional[cirq.Qid]=None) -> cirq.OP_TREE:
if (not isinstance(control_qubit, cirq.Qid)):
raise TypeError('Control qudit must be specified.')
n_qubits = len(qubits)
(yield bogoliubov_transform(qubits, self.one_body_basis_change_matrix.T.conj()))
for p in range(n_qubits):
(yield rot11(rads=((- self.one_body_energies[p]) * time)).on(control_qubit, qubits[p]))
prior_basis_matrix = self.one_body_basis_change_matrix
for j in range(len(self.eigenvalues)):
two_body_coefficients = self.scaled_density_density_matrices[j]
basis_change_matrix = self.basis_change_matrices[j]
merged_basis_change_matrix = numpy.dot(prior_basis_matrix, basis_change_matrix.T.conj())
(yield bogoliubov_transform(qubits, merged_basis_change_matrix))
(yield swap_network(qubits, (lambda p, q, a, b: rot111((((- 2) * two_body_coefficients[(p, q)]) * time)).on(cast(cirq.Qid, control_qubit), a, b))))
qubits = qubits[::(- 1)]
(yield (rot11(rads=((- two_body_coefficients[(k, k)]) * time)).on(control_qubit, qubits[k]) for k in range(n_qubits)))
prior_basis_matrix = basis_change_matrix
(yield bogoliubov_transform(qubits, prior_basis_matrix))
(yield cirq.rz(rads=((- self.hamiltonian.constant) * time)).on(control_qubit))
def step_qubit_permutation(self, qubits: Sequence[cirq.Qid], control_qubit: Optional[cirq.Qid]=None) -> Tuple[(Sequence[cirq.Qid], Optional[cirq.Qid])]:
if (len(self.eigenvalues) & 1):
return (qubits[::(- 1)], control_qubit)
else:
return (qubits, control_qubit)
def finish(self, qubits: Sequence[cirq.Qid], n_steps: int, control_qubit: Optional[cirq.Qid]=None, omit_final_swaps: bool=False) -> cirq.OP_TREE:
if (not omit_final_swaps):
if ((n_steps & 1) and (len(self.eigenvalues) & 1)):
(yield swap_network(qubits)) |
class InputOutputOracle():
def __init__(self, gr: TritonGrammar, inputs: List[Input], f_name: Union[(Path, str)]=''):
self._name = Path(f_name)
self.grammar = gr
self._bitsize = self.grammar.size
self.expr_cache = {}
self.lookup_count = 0
self.lookup_found = 0
self.cache_hit = 0
self._ectx = None
self.watchdog = None
self.max_mem = 0
self.stop = False
self.inputs = inputs
def size(self) -> int:
raise NotImplementedError('Should be implemented by child class')
def _get_item(self, h: Hash) -> Optional[str]:
raise NotImplementedError('Should be implemented by child class')
def is_expr_compatible(self, expr: TritonAst) -> bool:
e_vars = Counter((x.getBitSize() for x in expr.symvars))
e_table = Counter(self.grammar.vars_dict.values())
for (sz, count) in e_vars.items():
if (sz in e_table):
if (count > e_table[sz]):
return False
else:
return False
return True
def lookup(self, outputs: List[Output], *args, use_cache: bool=True) -> Optional[TritonAst]:
self.lookup_count += 1
h = self.hash(outputs)
if ((h in self.expr_cache) and use_cache):
self.cache_hit += 1
return self.expr_cache[h]
else:
v = self._get_item(h)
if v:
self.lookup_found += 1
try:
e = self.grammar.str_to_expr(v, *args)
self.expr_cache[h] = e
return e
except NameError:
return None
except TypeError:
return None
else:
return None
def lookup_hash(self, h: Hash) -> Optional[str]:
return self._get_item(h)
def is_writable(self) -> bool:
return False
def name(self) -> str:
return str(self._name)
def bitsize(self) -> BitSize:
return self._bitsize
def var_number(self) -> int:
return len(self.grammar.vars)
def operator_number(self) -> int:
return len(self.grammar.ops)
def input_number(self) -> int:
return len(self.inputs)
def hash(self, outs: List[Output]) -> Hash:
a = array.array('Q', outs)
h = hashlib.md5(a.tobytes())
return h.digest()
def __iter__(self) -> Iterable[Tuple[(Hash, str)]]:
raise NotImplementedError('Should be implemented by child class')
def _get_expr(self, expr: str) -> AstNode:
if (self._ectx is None):
self._ectx = _EvalCtx(self.grammar)
return self._ectx.eval_str(expr)
def _set_input_lcontext(self, i: Union[(int, Input)]) -> None:
if (self._ectx is None):
self._ectx = _EvalCtx(self.grammar)
self._ectx.set_symvar_values((self.inputs[i] if isinstance(i, int) else i))
def _eval_expr_inputs(self, expr: AstNode) -> List[Output]:
outs = []
for i in range(len(self.inputs)):
self._set_input_lcontext(i)
outs.append(expr.evaluate())
return outs
def _watchdog_worker(self, threshold: Union[(float, int)]) -> None:
while (not self.stop):
sleep(2)
mem = psutil.virtual_memory()
self.max_mem = max(mem.used, self.max_mem)
if (mem.percent >= threshold):
logger.warning(f'Threshold reached: {mem.percent}%')
self.stop = True
def _try_linearize(s: str, symbols: Dict[(str, object)]) -> str:
import sympy
try:
lin = eval(s, symbols)
if isinstance(lin, sympy.boolalg.BooleanFalse):
logger.error(f'[linearization] expression {s} False')
logger.debug(f'[linearization] expression linearized {s} => {lin}')
return str(lin).replace(' ', '')
except TypeError:
return s
except AttributeError as _:
return s
def _to_signed(value: int) -> int:
return ctypes.c_longlong(value).value
def _to_unsigned(value: int) -> int:
return ctypes.c_ulonglong(value).value
def _is_constant(v1: str) -> bool:
try:
int(v1)
return True
except ValueError:
return False
def _custom_permutations(l: List[Any]) -> Generator[(Tuple[(bool, Any, Any)], None, None)]:
for i in range(len(l)):
for j in range(0, i):
(yield (False, l[i], l[j]))
(yield (False, l[j], l[i]))
(yield (True, l[i], l[i]))
def generate(self, bitsize: int, constants: List[int]=[], do_watch: bool=False, watchdog_threshold: Union[(int, float)]=90, linearize: bool=False, do_use_blacklist: bool=False, limit: int=0) -> None:
if do_watch:
self.watchdog = threading.Thread(target=self._watchdog_worker, args=[watchdog_threshold], daemon=True)
logger.debug('Start watchdog')
self.watchdog.start()
if linearize:
logger.info('Linearization enabled')
import sympy
symbols = {x: sympy.symbols(x) for x in self.grammar.vars}
t0 = time()
from qsynthesis.grammar import jitting
CU = jitting.make_compilation_unit(bitsize)
N = self.input_number
ArTy = jitting.get_native_array_type(bitsize, N)
worklist = [(ArTy(), k) for k in self.grammar.vars]
for (i, inp) in enumerate(self.inputs):
for (v, k) in worklist:
v[i] = inp[k]
csts = [(ArTy(), str(c)) for c in constants]
for (ar, c) in csts:
jitting.init_array_cst(ar, int(c), N, bitsize)
worklist.extend(csts)
hash_set = set((self.hash(x[0]) for x in worklist))
ops = sorted(self.grammar.non_terminal_operators, key=(lambda x: (x.arity == 1)))
cur_depth = 2
blacklist = set()
item_count = len(worklist)
try:
while (cur_depth > 0):
n_items = len(worklist)
t = (time() - t0)
print(f'Depth {cur_depth} (size:{n_items}) (Time:{int((t / 60))}m{(t % 60):.5f}s)')
c = 0
for (i, (same, (vals1, name1), (vals2, name2))) in enumerate(self._custom_permutations(worklist)):
if same:
c += 1
print(f'''process: {((c * 100) / n_items):.2f}%
''', end='')
if (0 < limit <= item_count):
self.stop = True
if self.stop:
logger.warning('Threshold reached, generation interrupted')
raise KeyboardInterrupt()
(name1_cst, name2_cst) = (self._is_constant(name1), self._is_constant(name2))
is_both_constant = (name1_cst & name2_cst)
for (op, op_eval) in zip(ops, [jitting.get_op_eval_array(CU, x) for x in ops]):
if (op.arity == 1):
new_vals = ArTy()
op_eval(new_vals, vals1, N)
h = self.hash(new_vals)
if (h not in hash_set):
if name1_cst:
fmt = str(self._to_signed(new_vals[0]))
else:
fmt = (f'{op.symbol}({name1})' if (len(name1) > 1) else f'{op.symbol}{name1}')
fmt = (self._try_linearize(fmt, symbols) if linearize else fmt)
logger.debug(f'[add] {fmt: <20} {h}')
hash_set.add(h)
item_count += 1
worklist.append((new_vals, fmt))
else:
logger.debug(f'[drop] {op.symbol}{name1} ')
else:
if (same and (op.id_eq or op.id_zero)):
continue
sn1 = (f'{name1}' if (len(name1) == 1) else f'({name1})')
sn2 = (f'{name2}' if (len(name2) == 1) else f'({name2})')
fmt = (f'{op.symbol}({name1},{name2})' if op.is_prefix else f'{sn1}{op.symbol}{sn2}')
if (not linearize):
if (fmt in blacklist):
continue
new_vals = ArTy()
op_eval(new_vals, vals1, vals2, N)
if is_both_constant:
fmt = str(self._to_signed(new_vals[0]))
h = self.hash(new_vals)
if (h not in hash_set):
if linearize:
fmt = (self._try_linearize(fmt, symbols) if linearize else fmt)
if (fmt in blacklist):
continue
logger.debug(f'[add] {fmt: <20} {h}')
hash_set.add(h)
item_count += 1
worklist.append((new_vals, fmt))
if (op.commutative and do_use_blacklist and (not is_both_constant)):
fmt = (f'{op.symbol}({name2},{name1})' if op.is_prefix else f'{sn2}{op.symbol}{sn1}')
fmt = (self._try_linearize(fmt, symbols) if linearize else fmt)
blacklist.add(fmt)
logger.debug(f'[blacklist] {fmt}')
else:
logger.debug((f'[drop] {op.symbol}({name1},{name2})' if op.is_prefix else f'[drop] ({name1}){op.symbol}({name2})'))
cur_depth += 1
except KeyboardInterrupt:
logger.info('Stop required')
self.stop = True
t = (time() - t0)
print(f'Depth {cur_depth} (size:{len(worklist)}) (Time:{int((t / 60))}m{(t % 60):.5f}s) [RAM:{self.__size_to_str(self.max_mem)}]')
self.add_entries(worklist)
if do_watch:
self.watchdog.join()
def add_entry(self, hash: Hash, value: str) -> None:
raise NotImplementedError('Should be implemented by child class')
def add_entries(self, worklist: List[Tuple[(Hash, str)]]) -> None:
raise NotImplementedError('Should be implemented by child class')
def create(filename: Union[(str, Path)], grammar: TritonGrammar, inputs: List[Input], constants: List[int]=[]) -> 'InputOutputOracle':
raise NotImplementedError('Should be implemented by child class')
def load(file: Union[(Path, str)]) -> 'InputOutputOracle':
raise NotImplementedError('Should be implemented by child class')
def __size_to_str(value: int) -> str:
units = [(float(1024), 'Kb'), (float((1024 ** 2)), 'Mb'), (float((1024 ** 3)), 'Gb')]
for (unit, s) in units[::(- 1)]:
if ((value / unit) < 1):
continue
else:
return f'{(value / unit):.2f}{s}'
return f'{value}B' |
_rewriter(inc_subtensor_ops)
def incsubtensor_rv_replace(fgraph, node):
rv_map_feature: Optional[PreserveRVMappings] = getattr(fgraph, 'preserve_rv_mappings', None)
if (rv_map_feature is None):
return None
rv_var = node.outputs[0]
if (rv_var not in rv_map_feature.rv_values):
return None
base_rv_var = node.inputs[0]
if (not rv_map_feature.request_measurable([base_rv_var])):
return None
data = node.inputs[1]
idx = indices_from_subtensor(getattr(node.op, 'idx_list', None), node.inputs[2:])
value_var = rv_map_feature.rv_values[rv_var]
new_value_var = pt.set_subtensor(value_var[idx], data)
rv_map_feature.update_rv_maps(rv_var, new_value_var, base_rv_var)
return [base_rv_var] |
class BaseDataLoader(DataLoader):
def __init__(self, dataset, batch_size, shuffle, num_workers, val_split=0.0):
self.shuffle = shuffle
self.dataset = dataset
self.nbr_examples = len(dataset)
if val_split:
(self.train_sampler, self.val_sampler) = self._split_sampler(val_split)
else:
(self.train_sampler, self.val_sampler) = (None, None)
self.init_kwargs = {'dataset': self.dataset, 'batch_size': batch_size, 'shuffle': self.shuffle, 'num_workers': num_workers, 'pin_memory': True}
super(BaseDataLoader, self).__init__(sampler=self.train_sampler, **self.init_kwargs)
def _split_sampler(self, split):
if (split == 0.0):
return (None, None)
self.shuffle = False
split_indx = int((self.nbr_examples * split))
np.random.seed(0)
indxs = np.arange(self.nbr_examples)
np.random.shuffle(indxs)
train_indxs = indxs[split_indx:]
val_indxs = indxs[:split_indx]
self.nbr_examples = len(train_indxs)
train_sampler = SubsetRandomSampler(train_indxs)
val_sampler = SubsetRandomSampler(val_indxs)
return (train_sampler, val_sampler)
def get_val_loader(self):
if (self.val_sampler is None):
return None
return DataLoader(sampler=self.val_sampler, **self.init_kwargs) |
class CmdTestMenu(Command):
key = 'testmenu'
def func(self):
pretext = '|cSend a delayed message to another player |n'
posttext = "|c|n|/Syntax: type |c<field> = <new value>|n to change the values of the form. Given|/player must be currently logged in, delay is given in seconds. When you are|/finished, type '|csend|n' to send the message.|/"
init_fill_field(SAMPLE_FORM, self.caller, init_delayed_message, pretext=pretext, posttext=posttext, submitcmd='send', borderstyle='none') |
.parametrize('username_field', ['email', 'identifier'])
.django_project(extra_settings="\n AUTH_USER_MODEL = 'app.MyCustomUser'\n INSTALLED_APPS = [\n 'django.contrib.auth',\n 'django.contrib.contenttypes',\n 'django.contrib.sessions',\n 'django.contrib.sites',\n 'tpkg.app',\n ]\n ROOT_URLCONF = 'tpkg.app.urls'\n ")
def test_custom_user_model(django_pytester: DjangoPytester, username_field: str) -> None:
django_pytester.create_app_file(f'''
from django.contrib.auth.models import AbstractBaseUser, BaseUserManager, PermissionsMixin
from django.db import models
class MyCustomUserManager(BaseUserManager):
def create_user(self, {username_field}, password=None, **extra_fields):
extra_fields.setdefault('is_staff', False)
extra_fields.setdefault('is_superuser', False)
user = self.model({username_field}={username_field}, **extra_fields)
user.set_password(password)
user.save()
return user
def create_superuser(self, {username_field}, password=None, **extra_fields):
extra_fields.setdefault('is_staff', True)
extra_fields.setdefault('is_superuser', True)
return self.create_user(
{username_field}={username_field},
password=password,
**extra_fields
)
class MyCustomUser(AbstractBaseUser, PermissionsMixin):
email = models.EmailField(max_length=100, unique=True)
identifier = models.CharField(unique=True, max_length=100)
is_staff = models.BooleanField(
'staff status',
default=False,
help_text='Designates whether the user can log into this admin site.'
)
objects = MyCustomUserManager()
USERNAME_FIELD = '{username_field}'
''', 'models.py')
django_pytester.create_app_file("\n from django.urls import path\n\n from tpkg.app import views\n\n urlpatterns = [path('admin-required/', views.admin_required_view)]\n ", 'urls.py')
django_pytester.create_app_file("\n from django.http import HttpResponse\n from django.template import Template\n from django.template.context import Context\n\n\n def admin_required_view(request):\n assert request.user.is_staff\n return HttpResponse(Template('You are an admin').render(Context()))\n ", 'views.py')
django_pytester.makepyfile("\n from django.utils.encoding import force_str\n from tpkg.app.models import MyCustomUser\n\n def test_custom_user_model(admin_client):\n resp = admin_client.get('/admin-required/')\n assert force_str(resp.content) == 'You are an admin'\n ")
django_pytester.create_app_file('', 'migrations/__init__.py')
django_pytester.create_app_file("\nfrom django.db import models, migrations\nimport django.utils.timezone\nimport django.core.validators\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('auth', '0001_initial'),\n ('app', '0001_initial'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='MyCustomUser',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('password', models.CharField(max_length=128, verbose_name='password')),\n ('last_login', models.DateTimeField(null=True, verbose_name='last login', blank=True)),\n ('is_superuser', models.BooleanField(default=False, help_text='Designates that this user has all permissions without explicitly assigning them.', verbose_name='superuser status')),\n ('email', models.EmailField(error_messages={'unique': 'A user with that email address already exists.'}, max_length=100, unique=True, verbose_name='email address')),\n ('is_staff', models.BooleanField(default=False, help_text='Designates whether the user can log into this admin site.', verbose_name='staff status')),\n ('identifier', models.CharField(unique=True, max_length=100)),\n ('groups', models.ManyToManyField(related_query_name='user', related_name='user_set', to='auth.Group', blank=True, help_text='The groups this user belongs to. A user will get all permissions granted to each of their groups.', verbose_name='groups')),\n ('user_permissions', models.ManyToManyField(related_query_name='user', related_name='user_set', to='auth.Permission', blank=True, help_text='Specific permissions for this user.', verbose_name='user permissions')),\n ],\n options={\n 'verbose_name': 'user',\n 'verbose_name_plural': 'users',\n },\n bases=None,\n ),\n ]\n ", 'migrations/0002_custom_user_model.py')
result = django_pytester.runpytest_subprocess('-s')
result.stdout.fnmatch_lines(['* 1 passed*'])
assert (result.ret == 0) |
class TFileTypeLoad(TestCase):
filename = os.path.join(DATA_DIR, 'empty.ogg')
def test_old_argument_handling(self):
with warnings.catch_warnings():
warnings.simplefilter('ignore')
f = MyFileType()
self.assertFalse(hasattr(f, 'a'))
f = MyFileType(self.filename)
self.assertEquals(f.arg, 1)
f = MyFileType(self.filename, 42)
self.assertEquals(f.arg, 42)
self.assertEquals(f.filename, self.filename)
f = MyFileType(self.filename, arg=42)
self.assertEquals(f.arg, 42)
f = MyFileType(filename=self.filename, arg=42)
self.assertEquals(f.arg, 42)
self.assertRaises(TypeError, MyFileType, self.filename, nope=42)
self.assertRaises(TypeError, MyFileType, nope=42)
self.assertRaises(TypeError, MyFileType, self.filename, 42, 24)
def test_both_args(self):
x = BytesIO()
f = MyFileType(filename='foo', fileobj=x)
self.assertTrue((f.fileobj is x))
self.assertEquals(f.filename, 'foo')
def test_fileobj(self):
x = BytesIO()
f = MyFileType(fileobj=x)
self.assertTrue((f.fileobj is x))
self.assertTrue((f.filename is None))
def test_magic(self):
x = BytesIO()
f = MyFileType(x)
self.assertTrue((f.fileobj is x))
self.assertTrue((f.filename is None))
def test_filething(self):
x = BytesIO()
self.assertRaises(TypeError, MyFileType, filething=x)
def test_filename_explicit(self):
x = BytesIO()
self.assertRaises(ValueError, MyFileType, filename=x) |
def make_clean_directories(beta, root_folder, iteration):
image_dir = ((((root_folder + '/images_generation_') + str(beta)) + '_') + str(iteration))
if (not os.path.exists(image_dir)):
os.makedirs(image_dir)
elif (len(os.listdir(image_dir)) > 0):
os.system(('rm -r %s/*' % image_dir))
models_dir = ((((root_folder + '/saved_models_') + str(beta)) + '_') + str(iteration))
if (not os.path.exists(models_dir)):
os.makedirs(models_dir)
elif (len(os.listdir(models_dir)) > 0):
os.system(('rm -r %s/*' % models_dir))
data_dir = ((((root_folder + '/results_') + str(beta)) + '_') + str(iteration))
if (not os.path.exists(data_dir)):
os.makedirs(data_dir)
elif (len(os.listdir(data_dir)) > 0):
os.system(('rm -r %s/*' % data_dir))
return (image_dir, models_dir, data_dir) |
.parametrize('aoi_model', ['sapm', 'ashrae', 'physical', 'martin_ruiz'])
def test_aoi_models(sapm_dc_snl_ac_system, location, aoi_model, weather, mocker):
mc = ModelChain(sapm_dc_snl_ac_system, location, dc_model='sapm', aoi_model=aoi_model, spectral_model='no_loss')
m = mocker.spy(sapm_dc_snl_ac_system, 'get_iam')
mc.run_model(weather=weather)
assert (m.call_count == 1)
assert isinstance(mc.results.ac, pd.Series)
assert (not mc.results.ac.empty)
assert ((mc.results.ac.iloc[0] > 150) and (mc.results.ac.iloc[0] < 200))
assert (mc.results.ac.iloc[1] < 1) |
def record_operative_gin_configurations(checkpoint_dir):
gin_log_file = operative_config_path(checkpoint_dir)
if tf.io.gfile.exists(gin_log_file):
tf.io.gfile.rename(gin_log_file, (gin_log_file + '.old'), overwrite=True)
with tf.io.gfile.GFile(gin_log_file, 'w') as f:
f.write(gin.operative_config_str()) |
class Stash():
__slots__ = ('_storage',)
def __init__(self) -> None:
self._storage: Dict[(StashKey[Any], object)] = {}
def __setitem__(self, key: StashKey[T], value: T) -> None:
self._storage[key] = value
def __getitem__(self, key: StashKey[T]) -> T:
return cast(T, self._storage[key])
def get(self, key: StashKey[T], default: D) -> Union[(T, D)]:
try:
return self[key]
except KeyError:
return default
def setdefault(self, key: StashKey[T], default: T) -> T:
try:
return self[key]
except KeyError:
self[key] = default
return default
def __delitem__(self, key: StashKey[T]) -> None:
del self._storage[key]
def __contains__(self, key: StashKey[T]) -> bool:
return (key in self._storage)
def __len__(self) -> int:
return len(self._storage) |
class Block(nn.Module):
def __init__(self, length, dim, adj, drop=0.0, drop_path=0.0, act_layer=nn.GELU, norm_layer=nn.LayerNorm):
super().__init__()
self.norm1 = norm_layer(length)
self.GCN_Block1 = GCN(dim, dim, adj)
self.GCN_Block2 = GCN(dim, dim, adj)
self.adj = adj
self.drop_path = (DropPath(drop_path) if (drop_path > 0.0) else nn.Identity())
self.norm_att1 = norm_layer(dim)
self.num_heads = 8
qkv_bias = True
qk_scale = None
attn_drop = 0.2
proj_drop = 0.25
self.attn = Attention(dim, num_heads=self.num_heads, qkv_bias=qkv_bias, qk_scale=qk_scale, attn_drop=attn_drop, proj_drop=proj_drop)
self.norm2 = norm_layer(dim)
self.uMLP = uMLP(in_features=dim, hidden_features=256, act_layer=act_layer, drop=0.2)
gcn2attn_p = 0.15
Attn2gcn_p = 0.15
self.gcn2Attn_drop = nn.Dropout(p=gcn2attn_p)
self.Attn2gcn_drop = nn.Dropout(p=Attn2gcn_p)
self.s_gcn2attn = nn.Parameter(torch.tensor([0.5], dtype=torch.float32), requires_grad=False)
self.s_attn2gcn = nn.Parameter(torch.tensor([0.8], dtype=torch.float32), requires_grad=False)
def forward(self, x):
res1 = x
x_atten = x.clone()
x_gcn_1 = x.clone()
x_gcn_1 = rearrange(x_gcn_1, 'b j c -> b c j').contiguous()
x_gcn_1 = self.norm1(x_gcn_1)
x_gcn_1 = rearrange(x_gcn_1, 'b j c -> b c j').contiguous()
x_gcn_1 = self.GCN_Block1(x_gcn_1)
x_atten = self.norm_att1(x_atten)
(x_atten, attn2gcn) = self.attn(x_atten, f=self.gcn2Attn_drop((x_gcn_1 * self.s_gcn2attn)))
x_gcn_2 = self.GCN_Block2((x_gcn_1 + self.Attn2gcn_drop((attn2gcn * self.s_attn2gcn))))
x = (res1 + self.drop_path((x_gcn_2 + x_atten)))
res2 = x
x = self.norm2(x)
x = (res2 + self.drop_path(self.uMLP(x)))
return x |
def test_cirq_circuit_to_cbloq():
qubits = cirq.LineQubit.range(6)
circuit = cirq.testing.random_circuit(qubits, n_moments=7, op_density=1.0, random_state=52)
circuit.append(cirq.global_phase_operation((- 1j)))
cbloq = cirq_optree_to_cbloq(circuit)
bloq_unitary = cbloq.tensor_contract()
cirq_unitary = circuit.unitary(qubits)
np.testing.assert_allclose(cirq_unitary, bloq_unitary, atol=1e-08) |
class TestCloseableApp(unittest.TestCase):
def setUpClass(cls):
import closeable_app
cls.AppClass = closeable_app.MyApp
def setUp(self):
self.AppClass.log_request = (lambda x, y: None)
def tearDown(self):
del self.AppClass.log_request
self.app.on_close()
def test_main(self):
self.app = self.AppClass(MockRequest(), ('0.0.0.0', 8888), MockServer())
root_widget = self.app.main()
html = root_widget.repr()
assertValidHTML(html) |
def cnn_get_confidence(model, loader, device='cpu'):
model.eval()
correct = set()
wrong = set()
instance_confidence = {}
correct_cnt = 0
with torch.no_grad():
for (cur_batch_id, batch) in enumerate(loader):
b_x = batch[0].to(device)
b_y = batch[1].to(device)
output = model(b_x)
output = nn.functional.softmax(output, dim=1)
model_pred = output.max(1, keepdim=True)
pred = model_pred[1].to(device)
pred_prob = model_pred[0].to(device)
is_correct = pred.eq(b_y.view_as(pred))
correct_cnt += pred.eq(b_y.view_as(pred)).sum().item()
for (test_id, cur_correct) in enumerate(is_correct):
cur_instance_id = (test_id + (cur_batch_id * loader.batch_size))
instance_confidence[cur_instance_id] = pred_prob[test_id].cpu().numpy()[0]
if (cur_correct == 1):
correct.add(cur_instance_id)
else:
wrong.add(cur_instance_id)
return (correct, wrong, instance_confidence) |
class SPoint(SCoordinate):
def __init__(self, lon, lat):
lon = np.asarray(lon)
lat = np.asarray(lat)
if ((lon.size > 1) or (lat.size > 1)):
raise ValueError('Use SMultiPoint to define multiple points.')
super().__init__(lon, lat)
def from_degrees(cls, lon, lat):
return cls(np.deg2rad(lon), np.deg2rad(lat))
def __str__(self):
return str((float(self.lon), float(self.lat)))
def __repr__(self):
return str((float(self.lon), float(self.lat)))
def to_shapely(self):
from shapely.geometry import Point
point = Point(*self.vertices_in_degrees[0])
return point |
class QuadTreeStructureSingleRing():
def __init__(self, ring):
self.ring = ring
self.root_cell = Cell(0, ring.bounding_box.left, ring.bounding_box.lower, ring.bounding_box.width, ring.bounding_box.height, [ring.vertices], 'maybe')
cells_for_processing = [self.root_cell]
total_cell_count = 1
for _i in range(0, 8):
result_cell_list = []
while (len(cells_for_processing) > 0):
cell = cells_for_processing.pop()
cell.split()
total_cell_count += 4
children_cells = [cell.children_l_b, cell.children_l_t, cell.children_r_b, cell.children_r_t]
for child in children_cells:
if ((child.status == 'out') or (child.status == 'in')):
continue
if ((child.level >= 5) and ((len(child.rings) == 1) and (child.rings[0].len <= 5))):
continue
result_cell_list.append(child)
cells_for_processing = result_cell_list
def contains_point(self, point):
if ((point[0] < self.min_x) or (point[0] > (self.min_x + self.region_width)) or (point[1] < self.min_y) or (point[1] > (self.min_y + self.region_height))):
return False
cell_to_check = self.root_cell
while True:
if (cell_to_check.children_l_b is None):
break
middle_x = (cell_to_check.min_x + (cell_to_check.length_x / 2))
middle_y = (cell_to_check.min_y + (cell_to_check.length_y / 2))
if ((point[0] <= middle_x) and (point[1] <= middle_y)):
cell_to_check = cell_to_check.children_l_b
elif ((point[0] <= middle_x) and (point[1] > middle_y)):
cell_to_check = cell_to_check.children_l_t
elif ((point[0] > middle_x) and (point[1] <= middle_y)):
cell_to_check = cell_to_check.children_r_b
else:
cell_to_check = cell_to_check.children_r_t
return cell_to_check.contains_point(point)
def region_width(self):
return self.ring.bounding_box.width
def region_height(self):
return self.ring.bounding_box.height
def min_x(self):
return self.ring.bounding_box.left
def min_y(self):
return self.ring.bounding_box.lower |
.qt_no_exception_capture
_capture_pyside6
def test_capture_exceptions_context_manager(qapp):
from pytestqt.qt_compat import qt_api
from pytestqt.exceptions import capture_exceptions
class Receiver(qt_api.QtCore.QObject):
def event(self, ev):
raise ValueError('mistakes were made')
r = Receiver()
with capture_exceptions() as exceptions:
qapp.sendEvent(r, qt_api.QtCore.QEvent(qt_api.QtCore.QEvent.Type.User))
qapp.processEvents()
assert ([str(e) for (t, e, tb) in exceptions] == ['mistakes were made']) |
def add_railing_to_balcony(bm, top, balcony_normal, prop):
ret = bmesh.ops.duplicate(bm, geom=[top])
dup_top = filter_geom(ret['geom'], BMFace)[0]
max_offset = (min([*calc_face_dimensions(dup_top)]) / 2)
prop.rail.offset = clamp(prop.rail.offset, 0.0, (max_offset - 0.001))
ret = bmesh.ops.inset_individual(bm, faces=[dup_top], thickness=prop.rail.offset, use_even_offset=True)
bmesh.ops.delete(bm, geom=ret['faces'], context='FACES')
edges = sort_edges(dup_top.edges, balcony_normal)[1:]
railing_geom = bmesh.ops.extrude_edge_only(bm, edges=edges)['geom']
bmesh.ops.translate(bm, verts=filter_geom(railing_geom, BMVert), vec=(0.0, 0.0, prop.rail.corner_post_height))
bmesh.ops.delete(bm, geom=[dup_top], context='FACES')
railing_faces = filter_geom(railing_geom, BMFace)
create_railing(bm, railing_faces, prop.rail, balcony_normal) |
def test_check_private(mocker: MockerFixture, tester: CommandTester, fixture_dir: FixtureDirGetter) -> None:
mocker.patch('poetry.poetry.Poetry.file', return_value=TOMLFile((fixture_dir('private_pyproject') / 'pyproject.toml')), new_callable=mocker.PropertyMock)
tester.execute()
expected = 'All set!\n'
assert (tester.io.fetch_output() == expected) |
class TestAllowEvents(EndianTest):
def setUp(self):
self.req_args_0 = {'mode': 1, 'time': }
self.req_bin_0 = b'#\x01\x02\x00McG+'
def testPackRequest0(self):
bin = request.AllowEvents._request.to_binary(*(), **self.req_args_0)
self.assertBinaryEqual(bin, self.req_bin_0)
def testUnpackRequest0(self):
(args, remain) = request.AllowEvents._request.parse_binary(self.req_bin_0, dummy_display, 1)
self.assertBinaryEmpty(remain)
self.assertEqual(args, self.req_args_0) |
def get_refresh_params(endpoint) -> Dict:
v = 0
while True:
try:
bucket = f'{endpoint}?v={v}'
REFRESH_LIMITER.try_acquire(bucket)
break
except BucketFullException as e:
seconds = int(e.meta_info['remaining_time'])
logger.debug(f'{bucket} cannot be refreshed again for {seconds} seconds')
v += 1
return ({'refresh': True, 'v': v} if (v > 0) else {'refresh': True}) |
class Effect996(BaseEffect):
type = 'passive'
def handler(fit, ship, context, projectionRange, **kwargs):
fit.modules.filteredItemBoost((lambda mod: mod.item.requiresSkill('Small Hybrid Turret')), 'trackingSpeed', ship.getModifiedItemAttr('eliteBonusGunship2'), skill='Assault Frigates', **kwargs) |
class ColorHandler(logging.StreamHandler):
LEVEL_COLORS = {logging.DEBUG: '\x1b[00;32m', logging.INFO: '\x1b[00;36m', logging.AUDIT: '\x1b[01;36m', logging.WARN: '\x1b[01;33m', logging.ERROR: '\x1b[01;31m', logging.CRITICAL: '\x1b[01;31m'}
def format(self, record):
record.color = self.LEVEL_COLORS[record.levelno]
return logging.StreamHandler.format(self, record) |
class BaseOptions():
def __init__(self):
self.parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
self.initialized = False
def initialize(self):
self.parser.add_argument('--dataroot', required=True, help='path to images (should have subfolders trainA, trainB, valA, valB, etc)')
self.parser.add_argument('--batchSize', type=int, default=4, help='input batch size')
self.parser.add_argument('--loadSize', type=int, default=256, help='scale images to this size')
self.parser.add_argument('--fineSize', type=int, default=256, help='then crop to this size')
self.parser.add_argument('--input_nc', type=int, default=3, help='# of input image channels')
self.parser.add_argument('--output_nc', type=int, default=3, help='# of output image channels')
self.parser.add_argument('--ngf', type=int, default=64, help='# of gen filters in first conv layer')
self.parser.add_argument('--ndf', type=int, default=64, help='# of discrim filters in first conv layer')
self.parser.add_argument('--which_model_netD', type=str, default='basic', help='selects model to use for netD')
self.parser.add_argument('--which_model_netG', type=str, default='resnet_9blocks', help='selects model to use for netG')
self.parser.add_argument('--n_layers_D', type=int, default=3, help='only used if which_model_netD==n_layers')
self.parser.add_argument('--gpu_ids', type=str, default='0', help='gpu ids: e.g. 0 0,1,2, 0,2. use -1 for CPU')
self.parser.add_argument('--name', type=str, default='experiment_name', help='name of the experiment. It decides where to store samples and models')
self.parser.add_argument('--dataset_mode', type=str, default='unaligned', help='chooses how datasets are loaded. [unaligned | aligned | single]')
self.parser.add_argument('--model', type=str, default='cycle_gan', help='chooses which model to use. cycle_gan, pix2pix, test')
self.parser.add_argument('--which_direction', type=str, default='AtoB', help='AtoB or BtoA')
self.parser.add_argument('--nThreads', default=2, type=int, help='# threads for loading data')
self.parser.add_argument('--checkpoints_dir', type=str, default='./checkpoints/aicha/', help='models are saved here')
self.parser.add_argument('--norm', type=str, default='instance', help='instance normalization or batch normalization')
self.parser.add_argument('--serial_batches', action='store_true', help='if true, takes images in order to make batches, otherwise takes them randomly')
self.parser.add_argument('--display_winsize', type=int, default=256, help='display window size')
self.parser.add_argument('--display_id', type=int, default=1, help='window id of the web display')
self.parser.add_argument('--display_port', type=int, default=8097, help='visdom port of the web display')
self.parser.add_argument('--no_dropout', action='store_true', help='no dropout for the generator')
self.parser.add_argument('--max_dataset_size', type=int, default=float('inf'), help='Maximum number of samples allowed per dataset. If the dataset directory contains more than max_dataset_size, only a subset is loaded.')
self.parser.add_argument('--resize_or_crop', type=str, default='resize_and_crop', help='scaling and cropping of images at load time [resize_and_crop|crop|scale_width|scale_width_and_crop]')
self.parser.add_argument('--no_flip', action='store_true', help='if specified, do not flip the images for data augmentation')
self.parser.add_argument('--init_type', type=str, default='normal', help='network initialization [normal|xavier|kaiming|orthogonal]')
self.initialized = True
def parse(self):
if (not self.initialized):
self.initialize()
self.opt = self.parser.parse_args()
self.opt.isTrain = self.isTrain
str_ids = self.opt.gpu_ids.split(',')
self.opt.gpu_ids = []
for str_id in str_ids:
id = int(str_id)
if (id >= 0):
self.opt.gpu_ids.append(id)
if (len(self.opt.gpu_ids) > 0):
torch.cuda.set_device(self.opt.gpu_ids[0])
args = vars(self.opt)
print(' Options ')
for (k, v) in sorted(args.items()):
print(('%s: %s' % (str(k), str(v))))
print(' End ')
expr_dir = os.path.join(self.opt.checkpoints_dir, self.opt.name)
util.mkdirs(expr_dir)
file_name = os.path.join(expr_dir, 'opt.txt')
with open(file_name, 'wt') as opt_file:
opt_file.write(' Options \n')
for (k, v) in sorted(args.items()):
opt_file.write(('%s: %s\n' % (str(k), str(v))))
opt_file.write(' End \n')
return self.opt |
def print_args(args):
print('CONFIG', flush=True)
print('', flush=True)
max_tab = 0
for (k, v) in vars(args).items():
max_tab = max(max_tab, math.floor((len(k) / 4)))
for (k, v) in vars(args).items():
cur_tab = math.floor((len(k) / 4))
print(((k + ('\t' * ((max_tab - cur_tab) + 1))) + str(v)))
print('', flush=True) |
def get_log_path(timestamp, opts):
inputs = ((('z' + str(opts.use_z)) + '_alpha') + str(opts.use_alpha))
return (((opts.log_dir % opts.dataset) + '/%s/') + (opts.run_dir % (timestamp, opts.folder_to_save, opts.lr, opts.batch_size, opts.model_type, opts.splatter, opts.noise, opts.norm_G, opts.refine_model_type, opts.depth_predictor_type, ((str(opts.use_camera) + '|') + str(opts.use_xys)), opts.init, opts.image_type, opts.seed, str(opts.use_multi_hypothesis), ''.join(opts.losses).replace('_', '|'), inputs, opts.suffix, opts.discriminator_losses))) |
def parse_input():
description = 'This script allows you to evaluate the ActivityNet detection task which is intended to evaluate the ability of algorithms to temporally localize activities in untrimmed video sequences.'
p = argparse.ArgumentParser(description=description)
p.add_argument('ground_truth_filename', help='Full path to json file containing the ground truth.')
p.add_argument('prediction_filename', help='Full path to json file containing the predictions.')
p.add_argument('--subset', default='validation', help='String indicating subset to evaluate: (training, validation)')
p.add_argument('--tiou_thresholds', type=float, default=np.linspace(0.5, 0.95, 10), help='Temporal intersection over union threshold.')
p.add_argument('--verbose', type=bool, default=True)
p.add_argument('--check_status', type=bool, default=True)
return p.parse_args() |
def parse2dict(bp, meter, line_id='ID'):
dx = {}
dx[parse_hdr] = html_parse(bp, use_caps=True, use_html=True, between_sylls='.', between_words=' ', viols=True, line_id=line_id)
dx[meter_hdr] = (bp.str_meter() if bp else '')
dx['# viol'] = (bp.totalCount if bp else '')
dx['# syll'] = (bp.num_sylls if bp else '')
for c in sorted(meter.constraints, key=(lambda _c: _c.name)):
if (bp and (c in bp.constraintScores)):
val = bp.constraintScores[c]
if (int(val) == val):
val = int(val)
else:
val = ''
dx[rename_constraint(c)] = val
return dx |
def test_moc_state_and_colours(patched_moc):
patched_moc.poll()
assert (patched_moc.layout.colour == patched_moc.play_color)
patched_moc.play()
patched_moc.poll()
assert (patched_moc.layout.colour == patched_moc.noplay_color)
patched_moc.play()
patched_moc.poll()
assert (patched_moc.layout.colour == patched_moc.play_color) |
class Migration(migrations.Migration):
dependencies = [('core', '0007_remove_playlistentry_song')]
operations = [migrations.AlterField(model_name='archivedplaylist', name='list_id', field=models.CharField(max_length=2000)), migrations.AlterField(model_name='archivedsong', name='url', field=models.CharField(max_length=2000, unique=True)), migrations.AlterField(model_name='currentsong', name='external_url', field=models.CharField(blank=True, max_length=2000)), migrations.AlterField(model_name='currentsong', name='internal_url', field=models.CharField(max_length=2000)), migrations.AlterField(model_name='playlistentry', name='url', field=models.CharField(max_length=2000)), migrations.AlterField(model_name='queuedsong', name='external_url', field=models.CharField(blank=True, max_length=2000)), migrations.AlterField(model_name='queuedsong', name='internal_url', field=models.CharField(max_length=2000))] |
class FixItertools(fixer_base.BaseFix):
BM_compatible = True
it_funcs = "('imap'|'ifilter'|'izip'|'izip_longest'|'ifilterfalse')"
PATTERN = ("\n power< it='itertools'\n trailer<\n dot='.' func=%(it_funcs)s > trailer< '(' [any] ')' > >\n |\n power< func=%(it_funcs)s trailer< '(' [any] ')' > >\n " % locals())
run_order = 6
def transform(self, node, results):
prefix = None
func = results['func'][0]
if (('it' in results) and (func.value not in ('ifilterfalse', 'izip_longest'))):
(dot, it) = (results['dot'], results['it'])
prefix = it.prefix
it.remove()
dot.remove()
func.parent.replace(func)
prefix = (prefix or func.prefix)
func.replace(Name(func.value[1:], prefix=prefix)) |
def get_highlighted_subtable(table, cell_indices, with_heuristic_headers=False):
highlighted_table = []
adjusted_table = _add_adjusted_col_offsets(table)
for (row_index, col_index) in cell_indices:
cell = table[row_index][col_index]
if with_heuristic_headers:
row_headers = _get_heuristic_row_headers(adjusted_table, row_index, col_index)
col_headers = _get_heuristic_col_headers(adjusted_table, row_index, col_index)
else:
row_headers = []
col_headers = []
highlighted_cell = {'cell': cell, 'row_headers': row_headers, 'col_headers': col_headers}
highlighted_table.append(highlighted_cell)
return highlighted_table |
def main():
(header, dataSets) = gpstk.readSEM(gpstk.data.full_path('sem_data.txt'), strict=True)
gpstk.writeSEM(gpstk.data.full_path('sem_data.txt.new'), header, dataSets)
orbit = dataSets[0].toAlmOrbit()
austin = gpstk.Position(30, 97, 0, gpstk.Position.Geodetic)
starttime = gpstk.CommonTime()
starttime.setTimeSystem(gpstk.TimeSystem('GPS'))
endtime = gpstk.CommonTime()
endtime.setTimeSystem(gpstk.TimeSystem('GPS'))
endtime.addDays(1)
X = []
Y = []
for t in gpstk.times(starttime, endtime, seconds=1000):
xvt = orbit.svXvt(t)
location = gpstk.Position(xvt.x)
elevation = austin.elevation(location)
X.append(t.getDays())
Y.append(elevation)
fig = plt.figure()
fig.suptitle('Elevation of a GPS satellite throughout the day', fontsize=14, fontweight='bold')
ax = fig.add_subplot(111)
ax.set_xlabel('Time (days)')
ax.set_ylabel('Elevation (degrees)')
plt.plot(X, Y, 'r')
plt.show() |
class Vocabulary(object):
word2count: dict
word2index: dict
index2word: dict
num_words: int
def __init__(self):
self.trimmed = False
self.reset()
def reset(self):
self.word2count = {}
self.word2index = {'PAD': 0, 'GO': 1, 'EOS': 2}
self.index2word = {0: 'PAD', 1: 'GO', 2: 'EOS'}
self.num_words = 3
def index_word(self, word):
if (word not in self.word2count):
self.word2index[word] = self.num_words
self.index2word[self.num_words] = word
self.word2count[word] = 1
self.num_words += 1
else:
self.word2count[word] += 1
def trim(self, min_count):
if self.trimmed:
return
self.trimmed = True
valid_words = []
for (word, frequency) in self.word2count.items():
if (frequency >= min_count):
valid_words.append(word)
num_src_words = len(self.word2index)
num_valid_words = len(valid_words)
print(('Trimmed from %d words to %d, %.4f of total' % (num_valid_words, num_src_words, (float(num_valid_words) / num_src_words))))
self.reset()
for word in valid_words:
self.index_word(word) |
_values
def find_external_links(url, page):
for match in REL.finditer(page):
(tag, rel) = match.groups()
rels = set(map(str.strip, rel.lower().split(',')))
if (('homepage' in rels) or ('download' in rels)):
for match in HREF.finditer(tag):
(yield urllib.parse.urljoin(url, htmldecode(match.group(1))))
for tag in ('<th>Home Page', '<th>Download URL'):
pos = page.find(tag)
if (pos != (- 1)):
match = HREF.search(page, pos)
if match:
(yield urllib.parse.urljoin(url, htmldecode(match.group(1)))) |
def main():
global best_acc
global start_epoch
criterion_cls = nn.CrossEntropyLoss()
if args.evaluate:
checkpoint = torch.load((('./checkpoint/' + model.__name__) + '_best.pth.tar'), map_location=torch.device('cpu'))
net.load_state_dict(checkpoint['net'])
best_acc = checkpoint['acc']
start_epoch = (checkpoint['epoch'] + 1)
test(start_epoch, criterion_cls)
else:
trainable_list = nn.ModuleList([])
trainable_list.append(net)
optimizer = optim.SGD(trainable_list.parameters(), lr=0.1, momentum=0.9, weight_decay=0.0005, nesterov=True)
criterion_list = nn.ModuleList([])
criterion_list.append(criterion_cls)
criterion_list.cuda()
if args.resume:
checkpoint = torch.load((('./checkpoint/' + model.__name__) + '.pth.tar'), map_location=torch.device('cpu'))
net.load_state_dict(checkpoint['net'])
optimizer.load_state_dict(checkpoint['optimizer'])
best_acc = checkpoint['acc']
start_epoch = checkpoint['epoch']
for epoch in range(start_epoch, args.epochs):
train(epoch, criterion_list, optimizer)
acc = test(epoch, criterion_cls)
state = {'net': net.state_dict(), 'acc': acc, 'epoch': epoch, 'optimizer': optimizer.state_dict()}
if (not os.path.isdir('checkpoint')):
os.mkdir('checkpoint')
torch.save(state, (('./checkpoint/' + str(model.__name__)) + '.pth.tar'))
is_best = False
if (best_acc < acc):
best_acc = acc
is_best = True
if is_best:
shutil.copyfile((('./checkpoint/' + str(model.__name__)) + '.pth.tar'), (('./checkpoint/' + str(model.__name__)) + '_best.pth.tar')) |
class SendBase():
def make_td(ones):
raise NotImplementedError
def client(cls, pseudo_rand):
torch.distributed.init_process_group('gloo', rank=1, world_size=2, init_method='tcp://localhost:10017')
td = cls.make_td(ones=True)
td.send(0, pseudo_rand=pseudo_rand)
def server(cls, queue, pseudo_rand):
torch.distributed.init_process_group('gloo', rank=0, world_size=2, init_method='tcp://localhost:10017')
td = cls.make_td(ones=False)
td.recv(1, pseudo_rand=pseudo_rand)
assert (td == 1).all()
queue.put('yuppie')
.flaky(reruns=5, reruns_delay=5)
def test_send(self, pseudo_rand, set_context):
queue = mp.Queue(1)
main_worker = mp.Process(target=type(self).server, args=(queue, pseudo_rand))
secondary_worker = mp.Process(target=type(self).client, args=(pseudo_rand,))
main_worker.start()
secondary_worker.start()
try:
out = queue.get(timeout=TIMEOUT)
assert (out == 'yuppie')
finally:
main_worker.join()
secondary_worker.join() |
class TestConvert(UnaryOpMixin):
def op_numpy(self, mat):
return mat
specialisations = [pytest.param(data.dense.from_csr, data.CSR, data.Dense), pytest.param(data.dense.from_dia, data.Dia, data.Dense), pytest.param(data.csr.from_dense, data.Dense, data.CSR), pytest.param(data.csr.from_dia, data.Dia, data.CSR), pytest.param(data.dia.from_dense, data.Dense, data.Dia), pytest.param(data.dia.from_csr, data.CSR, data.Dia)] |
def _update_component_model_state(old_model_state: _ModelState, new_parent: _ModelState, new_index: int, new_component: ComponentType, schedule_render: Callable[([_LifeCycleStateId], None)]) -> _ModelState:
return _ModelState(parent=new_parent, index=new_index, key=old_model_state.key, model=Ref(), patch_path=f'{new_parent.patch_path}/children/{new_index}', children_by_key={}, targets_by_event={}, life_cycle_state=(_update_life_cycle_state(old_model_state.life_cycle_state, new_component) if old_model_state.is_component_state else _make_life_cycle_state(new_component, schedule_render))) |
class DragWidget(QWidget):
def __init__(self, parent=None):
super(DragWidget, self).__init__(parent)
dictionaryFile = QFile(':/dictionary/words.txt')
dictionaryFile.open(QIODevice.ReadOnly)
x = 5
y = 5
for word in QTextStream(dictionaryFile).readAll().split():
wordLabel = DragLabel(word, self)
wordLabel.move(x, y)
wordLabel.show()
x += (wordLabel.width() + 2)
if (x >= 195):
x = 5
y += (wordLabel.height() + 2)
newPalette = self.palette()
newPalette.setColor(QPalette.Window, Qt.white)
self.setPalette(newPalette)
self.setAcceptDrops(True)
self.setMinimumSize(400, max(200, y))
self.setWindowTitle('Draggable Text')
def dragEnterEvent(self, event):
if event.mimeData().hasText():
if (event.source() in self.children()):
event.setDropAction(Qt.MoveAction)
event.accept()
else:
event.acceptProposedAction()
else:
event.ignore()
def dropEvent(self, event):
if event.mimeData().hasText():
mime = event.mimeData()
pieces = mime.text().split()
position = event.pos()
hotSpot = QPoint()
hotSpotPos = mime.data('application/x-hotspot').split(' ')
if (len(hotSpotPos) == 2):
hotSpot.setX(hotSpotPos[0].toInt()[0])
hotSpot.setY(hotSpotPos[1].toInt()[0])
for piece in pieces:
newLabel = DragLabel(piece, self)
newLabel.move((position - hotSpot))
newLabel.show()
position += QPoint(newLabel.width(), 0)
if (event.source() in self.children()):
event.setDropAction(Qt.MoveAction)
event.accept()
else:
event.acceptProposedAction()
else:
event.ignore() |
class ClassyModelHeadExecutorWrapper(ClassyModelWrapper):
def forward(self, *args, **kwargs):
out = self.classy_model(*args, **kwargs)
if (len(self._heads) == 0):
return out
head_outputs = self.execute_heads()
if (len(head_outputs) == 1):
return list(head_outputs.values())[0]
else:
return head_outputs |
class MaxPool2dStaticSamePadding(nn.Module):
def __init__(self, *args, **kwargs):
super().__init__()
self.pool = nn.MaxPool2d(*args, **kwargs)
self.stride = self.pool.stride
self.kernel_size = self.pool.kernel_size
if isinstance(self.stride, int):
self.stride = ([self.stride] * 2)
elif (len(self.stride) == 1):
self.stride = ([self.stride[0]] * 2)
if isinstance(self.kernel_size, int):
self.kernel_size = ([self.kernel_size] * 2)
elif (len(self.kernel_size) == 1):
self.kernel_size = ([self.kernel_size[0]] * 2)
def forward(self, x):
(h, w) = x.shape[(- 2):]
extra_h = ((((math.ceil((w / self.stride[1])) - 1) * self.stride[1]) - w) + self.kernel_size[1])
extra_v = ((((math.ceil((h / self.stride[0])) - 1) * self.stride[0]) - h) + self.kernel_size[0])
left = (extra_h // 2)
right = (extra_h - left)
top = (extra_v // 2)
bottom = (extra_v - top)
x = F.pad(x, [left, right, top, bottom])
x = self.pool(x)
return x |
_infer_shape
_canonicalize('fast_compile', 'local_cut_useless_reduce')
_useless('local_cut_useless_reduce')
_rewriter(ALL_REDUCE)
def local_useless_reduce(fgraph, node):
if isinstance(node.op, CAReduce):
(summed,) = node.inputs
if (summed.type == node.outputs[0].type):
return [summed] |
def main():
cv2.setNumThreads(1)
p = create_config(args.config_env, args.config_exp)
sys.stdout = Logger(os.path.join(p['retrieval_dir'], 'log_file.txt'))
print('Python script is {}'.format(os.path.abspath(__file__)))
print(colored(p, 'red'))
print(colored('Retrieve model', 'blue'))
model = get_model(p)
print(model)
model = model.cuda()
state_dict = torch.load(p['pretraining'], map_location='cpu')
if ('model' in state_dict.keys()):
state_dict = state_dict['model']
new_state = {}
for (k, v) in state_dict.items():
if k.startswith('module.model_q'):
new_state[k.rsplit('module.model_q.')[1]] = v
msg = model.load_state_dict(new_state, strict=False)
print(msg)
print(colored('Set CuDNN benchmark', 'blue'))
torch.backends.cudnn.benchmark = True
print(colored('Retrieve dataset', 'blue'))
from data.dataloaders.pascal_voc import VOC12
val_transforms = get_val_transformations()
print(val_transforms)
train_dataset = VOC12(split='train', transform=val_transforms, ignore_classes=p['retrieval_kwargs']['ignore_classes'])
val_dataset = VOC12(split='val', transform=val_transforms, ignore_classes=p['retrieval_kwargs']['ignore_classes'])
train_dataloader = get_val_dataloader(p, train_dataset)
val_dataloader = get_val_dataloader(p, val_dataset)
print('Train dataset {} - Val dataset {}'.format(str(train_dataset), str(val_dataset)))
print('Train samples {} - Val samples {}'.format(len(train_dataset), len(val_dataset)))
print(colored('Perform retrieval ...', 'blue'))
memory_bank = build_memory_bank(p, train_dataset, train_dataloader, model)
results = retrieval(p, memory_bank, val_dataset, val_dataloader, model) |
.end_to_end()
def test_if_skipif_decorator_is_applied_any_condition_matches(tmp_path):
source = '\n import pytask\n\n .skipif(condition=False, reason="I am fine")\n .skipif(condition=True, reason="No, I am not.")\n .produces("out.txt")\n def task_first():\n assert False\n\n .depends_on("out.txt")\n def task_second():\n assert False\n '
tmp_path.joinpath('task_module.py').write_text(textwrap.dedent(source))
session = build(paths=tmp_path)
node = session.collection_reports[0].node
assert (len(node.markers) == 2)
assert (node.markers[0].name == 'skipif')
assert (node.markers[0].args == ())
assert (node.markers[0].kwargs == {'condition': True, 'reason': 'No, I am not.'})
assert (node.markers[1].name == 'skipif')
assert (node.markers[1].args == ())
assert (node.markers[1].kwargs == {'condition': False, 'reason': 'I am fine'})
assert (session.execution_reports[0].outcome == TaskOutcome.SKIP)
assert isinstance(session.execution_reports[0].exc_info[1], Skipped)
assert (session.execution_reports[1].outcome == TaskOutcome.SKIP)
assert isinstance(session.execution_reports[1].exc_info[1], Skipped)
assert (session.execution_reports[0].exc_info[1].args[0] == 'No, I am not.') |
class IPCServer():
def __init__(self):
self.logger = logbook.Logger('botogram IPC server')
self.commands = {}
self.auth_key = hashlib.sha1(os.urandom(64)).hexdigest()
self.stop_key = hashlib.sha1(os.urandom(64)).hexdigest()
self.stop = False
(self.port, self.conn) = self._get_connection()
def _get_connection(self):
count = 0
tried = []
while (count < MAX_CONNECT_TRIES):
port = random.randint(*PORTS_RANGE)
if (port in tried):
continue
tried.append(port)
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
sock.bind(('localhost', port))
except socket.error:
sock.close()
count += 1
continue
return (port, sock)
raise RuntimeError("Can't find an open port to bind the IPC socket")
def register_command(self, name, func):
if (not callable(func)):
raise RuntimeError('Commands must be callable!')
self.commands[name] = func
def run(self):
read_from = [self.conn]
needs_authentication = []
self.conn.listen(5)
while (not self.stop):
try:
(readable, *_) = select.select(read_from, [], [])
except InterruptedError:
continue
for conn in readable:
if (conn is self.conn):
(new_conn, addr) = conn.accept()
needs_authentication.append(new_conn)
read_from.append(new_conn)
self.logger.debug(('New IPC connection from %s:%s' % addr))
else:
try:
request = read_packet(conn)
except EOFError:
read_from.remove(conn)
try:
conn.shutdown(socket.SHUT_RDWR)
except OSError:
pass
conn.close()
continue
if (conn in needs_authentication):
if (request['command'] != '__authenticate__'):
write_packet(conn, {'ok': False, 'data': 'Please authenticate'})
continue
if (request['data'] != self.auth_key):
write_packet(conn, {'ok': False, 'data': 'Authentication failed'})
continue
write_packet(conn, {'ok': True, 'data': 'Welcome!'})
needs_authentication.remove(conn)
continue
if (request['command'] == '__stop__'):
if (request['data'] != self.stop_key):
write_packet(conn, {'ok': False, 'data': 'Wrong stop key'})
continue
self.stop = True
write_packet(conn, {'ok': True, 'data': 'Bye!'})
break
self.process(conn, request)
for conn in read_from:
try:
conn.shutdown(socket.SHUT_RDWR)
except OSError:
pass
conn.close()
def process(self, conn, request):
command = request['command']
request_data = request['data']
self.logger.debug(('Received IPC command %s' % command))
def reply(data, ok=True):
response = {'ok': ok, 'data': data}
write_packet(conn, response)
if (command not in self.commands):
reply('Command not supported!', False)
return
self.commands[command](request_data, reply)
def stop(self):
self.stop = True |
()
def database() -> ResourceDatabase:
return ResourceDatabase(game_enum=RandovaniaGame.METROID_PRIME_ECHOES, item=[ItemResourceInfo(i, letter, letter, 1) for (i, letter) in enumerate('ABCDEF')], event=[], trick=[], damage=[], version=[], misc=[SimpleResourceInfo(13, 'Trivial', 'Trivial', ResourceType.MISC), SimpleResourceInfo(14, 'Impossible', 'Impossible', ResourceType.MISC)], requirement_template={}, damage_reductions={}, energy_tank_item=ItemResourceInfo(4, 'E', 'E', 1)) |
class PyEnvCfg():
def __init__(self, content, path) -> None:
self.content = content
self.path = path
def from_folder(cls, folder):
return cls.from_file((folder / 'pyvenv.cfg'))
def from_file(cls, path):
content = (cls._read_values(path) if path.exists() else OrderedDict())
return PyEnvCfg(content, path)
def _read_values(path):
content = OrderedDict()
for line in path.read_text(encoding='utf-8').splitlines():
equals_at = line.index('=')
key = line[:equals_at].strip()
value = line[(equals_at + 1):].strip()
content[key] = value
return content
def write(self):
logging.debug('write %s', self.path)
text = ''
for (key, value) in self.content.items():
line = f'{key} = {value}'
logging.debug('\t%s', line)
text += line
text += '\n'
self.path.write_text(text, encoding='utf-8')
def refresh(self):
self.content = self._read_values(self.path)
return self.content
def __setitem__(self, key, value) -> None:
self.content[key] = value
def __getitem__(self, key):
return self.content[key]
def __contains__(self, item) -> bool:
return (item in self.content)
def update(self, other):
self.content.update(other)
return self
def __repr__(self) -> str:
return f'{self.__class__.__name__}(path={self.path})' |
def loss_fn(teacher_logits, student_logits, teacher_temp, student_temp, centers, eps=1e-20):
teacher_logits = teacher_logits.detach()
student_probs = (student_logits / student_temp).softmax(dim=(- 1))
teacher_probs = ((teacher_logits - centers) / teacher_temp).softmax(dim=(- 1))
return (- (teacher_probs * torch.log((student_probs + eps))).sum(dim=(- 1)).mean()) |
class SuperResTransforms_320(TransformsConfig):
def __init__(self, opts):
super(SuperResTransforms_320, self).__init__(opts)
def get_transforms(self):
if (self.opts.resize_factors is None):
self.opts.resize_factors = '1,2,4,8,16,32'
factors = [int(f) for f in self.opts.resize_factors.split(',')]
print('Performing down-sampling with factors: {}'.format(factors))
transforms_dict = {'transform_gt_train': transforms.Compose([transforms.Resize((320, 320)), transforms.ToTensor(), transforms.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5])]), 'transform_source': transforms.Compose([transforms.Resize((320, 320)), augmentations.BilinearResize(factors=factors), transforms.Resize((320, 320)), transforms.ToTensor(), transforms.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5])]), 'transform_test': transforms.Compose([transforms.Resize((320, 320)), transforms.ToTensor(), transforms.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5])]), 'transform_inference': transforms.Compose([transforms.Resize((320, 320)), augmentations.BilinearResize(factors=factors), transforms.Resize((320, 320)), transforms.ToTensor(), transforms.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5])])}
return transforms_dict |
def integers_to_string(numbers):
if (len(numbers) == 0):
return ''
numbers = sorted(numbers)
t = list()
prev = numbers[0]
for curr in numbers:
if (curr != (prev + 1)):
t.append([curr])
elif (len(t[(- 1)]) > 1):
t[(- 1)][(- 1)] = curr
else:
t[(- 1)].append(curr)
prev = curr
return ' '.join(((str(i[0]) if (len(i) == 1) else ((str(i[0]) + ('..' if (i[0] != (i[1] - 1)) else ' ')) + str(i[1]))) for i in t)) |
class Activation(nn.Module):
def __init__(self, name, **params):
super().__init__()
if ((name is None) or (name == 'identity')):
self.activation = nn.Identity(**params)
elif (name == 'sigmoid'):
self.activation = nn.Sigmoid()
elif (name == 'softmax2d'):
self.activation = nn.Softmax(dim=1, **params)
elif (name == 'softmax'):
self.activation = nn.Softmax(**params)
elif (name == 'logsoftmax'):
self.activation = nn.LogSoftmax(**params)
elif (name == 'tanh'):
self.activation = nn.Tanh()
elif (name == 'argmax'):
self.activation = ArgMax(**params)
elif (name == 'argmax2d'):
self.activation = ArgMax(dim=1, **params)
elif (name == 'clamp'):
self.activation = Clamp(**params)
elif callable(name):
self.activation = name(**params)
else:
raise ValueError(f'Activation should be callable/sigmoid/softmax/logsoftmax/tanh/argmax/argmax2d/clamp/None; got {name}')
def forward(self, x):
return self.activation(x) |
def data_parallelism(devices, fn, *args, **kwargs):
num_worker = len(devices)
if args:
new_args = [_maybe_repeat(arg, num_worker) for arg in args]
new_args = [list(x) for x in zip(*new_args)]
else:
new_args = [[] for _ in range(num_worker)]
new_kwargs = [{} for _ in range(num_worker)]
for (k, v) in kwargs.iteritems():
vals = _maybe_repeat(v, num_worker)
for i in range(num_worker):
new_kwargs[i][k] = vals[i]
fns = _maybe_repeat(fn, num_worker)
outputs = []
for i in range(num_worker):
worker = ('/gpu:%d' % i)
device_setter = _create_device_setter(False, worker, len(devices))
with tf.variable_scope(tf.get_variable_scope(), reuse=(i != 0)):
with tf.name_scope(('parallel_%d' % i)):
with tf.device(device_setter):
outputs.append(fns[i](*new_args[i], **new_kwargs[i]))
if isinstance(outputs[0], tuple):
outputs = list(zip(*outputs))
outputs = tuple([list(o) for o in outputs])
return outputs |
def get_shape(x: Union[(Tuple, List, Dict)]) -> Union[(Tuple, List, Dict)]:
if isinstance(x, (list, tuple)):
assert (len(x) > 0), 'x of tuple/list type must have at least one element'
return [get_shape(xi) for xi in x]
elif isinstance(x, dict):
return {k: get_shape(v) for (k, v) in x.items()}
else:
assert isinstance(x, torch.Tensor), 'x is expected to be a torch tensor'
return x.size() |
def reduce_as_tree(queries_to_reduce):
mid = (len(queries_to_reduce) // 2)
left = queries_to_reduce[:mid]
right = queries_to_reduce[mid:]
to_reduce_right = right[0]
if (len(right) > 1):
to_reduce_right = reduce_as_tree(right)
if (len(left) > 1):
to_reduce_left = reduce_as_tree(left)
elif (len(left) == 1):
to_reduce_left = left[0]
else:
return to_reduce_right
return to_reduce_left.union_all(to_reduce_right) |
def prepare_dirs_and_logger(config):
formatter = logging.Formatter('%(asctime)s:%(levelname)s::%(message)s')
logger = logging.getLogger('tensorflow')
for hdlr in logger.handlers:
logger.removeHandler(hdlr)
handler = logging.StreamHandler()
handler.setFormatter(formatter)
logger.addHandler(handler)
logger.setLevel(tf.logging.INFO)
if config.load_path:
if config.load_path.startswith(config.task):
config.model_name = config.load_path
else:
config.model_name = '{}_{}'.format(config.task, config.load_path)
else:
config.model_name = '{}_{}'.format(config.task, get_time())
config.model_dir = os.path.join(config.log_dir, config.model_name)
for path in [config.log_dir, config.data_dir, config.model_dir]:
if (not os.path.exists(path)):
os.makedirs(path) |
class EventSubmitTests(TestCase):
event_submit_url = reverse_lazy('events:event_submit')
def setUpTestData(cls):
cls.user = UserFactory(password='password')
cls.post_data = {'event_name': 'PyConES17', 'event_type': 'conference', 'python_focus': 'Country-wide conference', 'expected_attendees': '500', 'location': 'Complejo San Francisco, Caceres, Spain', 'date_from': '2017-9-22', 'date_to': '2017-9-24', 'recurrence': 'None', 'link': ' 'description': 'A conference no one can afford to miss'}
def user_login(self):
self.client.login(username=self.user.username, password='password')
def test_submit_not_logged_in_is_redirected(self):
response = self.client.post(self.event_submit_url, self.post_data)
self.assertEqual(response.status_code, 302)
self.assertRedirects(response, '/accounts/login/?next=/events/submit/')
def test_submit_without_data_is_rejected(self):
self.user_login()
response = self.client.post(self.event_submit_url, {})
self.assertEqual(response.status_code, 200)
self.assertEqual(len(mail.outbox), 0)
def test_submit_success_sends_email(self):
self.user_login()
response = self.client.post(self.event_submit_url, self.post_data)
self.assertEqual(response.status_code, 302)
self.assertRedirects(response, reverse('events:event_thanks'))
self.assertEqual(len(mail.outbox), 1)
self.assertEqual(mail.outbox[0].subject, 'New event submission: "{}"'.format(self.post_data['event_name']))
def test_badheadererror(self):
self.user_login()
post_data = self.post_data.copy()
post_data['event_name'] = 'invalid\ntitle'
response = self.client.post(self.event_submit_url, post_data, follow=True)
self.assertEqual(response.status_code, 200)
messages = list(response.context['messages'])
self.assertEqual(len(messages), 1)
self.assertEqual(messages[0].message, 'Invalid header found.') |
class NoisySimulationTest(unittest.TestCase):
sim = QasmSimulator()
num_qubits = 4
readout_errors = []
for i in range(num_qubits):
p_error1 = ((i + 1) * 0.002)
p_error0 = (2 * p_error1)
ro_error = noise.ReadoutError([[(1 - p_error0), p_error0], [p_error1, (1 - p_error1)]])
readout_errors.append(ro_error)
noise_model = noise.NoiseModel()
for i in range(num_qubits):
noise_model.add_readout_error(readout_errors[i], [i])
seed_simulator = 100
shots = 10000
tolerance = 0.05
def execute_circs(self, qc_list: List[QuantumCircuit], noise_model=None) -> Result:
backend = self.sim
qobj = assemble(transpile(qc_list, backend=backend), backend=backend, shots=self.shots, seed_simulator=self.seed_simulator, noise_model=(None if (noise_model is None) else self.noise_model), method='density_matrix')
return backend.run(qobj).result() |
class ArgParserWithDebugger():
def __init__(self):
self.parser = argparse.ArgumentParser()
self.parser.add_argument('-d', '--debug', action='store_true')
def add_argument(self, *args, **kwargs):
self.parser.add_argument(*args, **kwargs)
def parse_args(self, args=None):
args = self.parser.parse_args(args)
if args.debug:
Debugger()
return args |
def reconstruct_switch(ori_tokens: np.array, switch_preds: np.array, wopad: bool=False):
post_tokens = []
(batch_size, seq_len) = ori_tokens.shape
for lidx in range(batch_size):
ori_token = ori_tokens[lidx]
post_token = [101]
switch_pred = switch_preds[lidx]
sw_pidx = switch_pred[0]
while (sw_pidx not in [0, (- 1)]):
post_token.append(ori_token[sw_pidx])
sw_pidx = switch_pred[sw_pidx]
if (ori_token[sw_pidx] == 102):
switch_pred[sw_pidx] = 0
assert (len(post_token) == np.sum((ori_token > 0)))
post_tokens.append(post_token)
if (wopad is not True):
return padding(post_tokens, seq_len, 0)
else:
return (post_tokens, padding(post_tokens, seq_len, 0)) |
class F39Handler(BaseHandler):
version = F39
commandMap = {'auth': commands.authconfig.F35_Authconfig, 'authconfig': commands.authconfig.F35_Authconfig, 'authselect': commands.authselect.F28_Authselect, 'autopart': commands.autopart.F38_AutoPart, 'autostep': commands.autostep.F34_AutoStep, 'bootloader': commands.bootloader.F39_Bootloader, 'btrfs': commands.btrfs.F23_BTRFS, 'cdrom': commands.cdrom.FC3_Cdrom, 'clearpart': commands.clearpart.F28_ClearPart, 'cmdline': commands.displaymode.F26_DisplayMode, 'device': commands.device.F34_Device, 'deviceprobe': commands.deviceprobe.F34_DeviceProbe, 'dmraid': commands.dmraid.F34_DmRaid, 'driverdisk': commands.driverdisk.F14_DriverDisk, 'module': commands.module.F31_Module, 'eula': commands.eula.F20_Eula, 'fcoe': commands.fcoe.F28_Fcoe, 'firewall': commands.firewall.F28_Firewall, 'firstboot': commands.firstboot.FC3_Firstboot, 'graphical': commands.displaymode.F26_DisplayMode, 'group': commands.group.F12_Group, 'halt': commands.reboot.F23_Reboot, 'harddrive': commands.harddrive.F33_HardDrive, 'hmc': commands.hmc.F28_Hmc, 'ignoredisk': commands.ignoredisk.F34_IgnoreDisk, 'install': commands.install.F34_Install, 'iscsi': commands.iscsi.F17_Iscsi, 'iscsiname': commands.iscsiname.FC6_IscsiName, 'keyboard': commands.keyboard.F18_Keyboard, 'lang': commands.lang.F19_Lang, 'liveimg': commands.liveimg.F19_Liveimg, 'logging': commands.logging.F34_Logging, 'logvol': commands.logvol.F29_LogVol, 'mediacheck': commands.mediacheck.FC4_MediaCheck, 'method': commands.method.F34_Method, 'mount': commands.mount.F27_Mount, 'multipath': commands.multipath.F34_MultiPath, 'network': commands.network.F39_Network, 'nfs': commands.nfs.FC6_NFS, 'nvdimm': commands.nvdimm.F28_Nvdimm, 'timesource': commands.timesource.F33_Timesource, 'ostreecontainer': commands.ostreecontainer.F38_OSTreeContainer, 'ostreesetup': commands.ostreesetup.F38_OSTreeSetup, 'part': commands.partition.F34_Partition, 'partition': commands.partition.F34_Partition, 'poweroff': commands.reboot.F23_Reboot, 'raid': commands.raid.F29_Raid, 'realm': commands.realm.F19_Realm, 'reboot': commands.reboot.F23_Reboot, 'repo': commands.repo.F33_Repo, 'reqpart': commands.reqpart.F23_ReqPart, 'rescue': commands.rescue.F10_Rescue, 'rootpw': commands.rootpw.F37_RootPw, 'selinux': commands.selinux.FC3_SELinux, 'services': commands.services.FC6_Services, 'shutdown': commands.reboot.F23_Reboot, 'skipx': commands.skipx.FC3_SkipX, 'snapshot': commands.snapshot.F26_Snapshot, 'sshpw': commands.sshpw.F24_SshPw, 'sshkey': commands.sshkey.F22_SshKey, 'text': commands.displaymode.F26_DisplayMode, 'timezone': commands.timezone.F33_Timezone, 'updates': commands.updates.F34_Updates, 'url': commands.url.F30_Url, 'user': commands.user.F24_User, 'vnc': commands.vnc.F9_Vnc, 'volgroup': commands.volgroup.F21_VolGroup, 'xconfig': commands.xconfig.F14_XConfig, 'zerombr': commands.zerombr.F9_ZeroMbr, 'zfcp': commands.zfcp.F37_ZFCP, 'zipl': commands.zipl.F32_Zipl}
dataMap = {'BTRFSData': commands.btrfs.F23_BTRFSData, 'DriverDiskData': commands.driverdisk.F14_DriverDiskData, 'DeviceData': commands.device.F8_DeviceData, 'DmRaidData': commands.dmraid.FC6_DmRaidData, 'ModuleData': commands.module.F31_ModuleData, 'TimesourceData': commands.timesource.F33_TimesourceData, 'FcoeData': commands.fcoe.F28_FcoeData, 'GroupData': commands.group.F12_GroupData, 'IscsiData': commands.iscsi.F17_IscsiData, 'LogVolData': commands.logvol.F29_LogVolData, 'MountData': commands.mount.F27_MountData, 'MultiPathData': commands.multipath.FC6_MultiPathData, 'NetworkData': commands.network.F39_NetworkData, 'NvdimmData': commands.nvdimm.F28_NvdimmData, 'PartData': commands.partition.F29_PartData, 'RaidData': commands.raid.F29_RaidData, 'RepoData': commands.repo.F30_RepoData, 'SnapshotData': commands.snapshot.F26_SnapshotData, 'SshPwData': commands.sshpw.F24_SshPwData, 'SshKeyData': commands.sshkey.F38_SshKeyData, 'UserData': commands.user.F19_UserData, 'VolGroupData': commands.volgroup.F21_VolGroupData, 'ZFCPData': commands.zfcp.F37_ZFCPData} |
class struct_s_pxe_hw_undi(ctypes.Structure):
_pack_ = True
_functions_ = []
_fields_ = [('Signature', ctypes.c_uint32), ('Len', ctypes.c_ubyte), ('Fudge', ctypes.c_ubyte), ('Rev', ctypes.c_ubyte), ('IFcnt', ctypes.c_ubyte), ('MajorVer', ctypes.c_ubyte), ('MinorVer', ctypes.c_ubyte), ('IFcntExt', ctypes.c_ubyte), ('reserved', ctypes.c_ubyte), ('Implementation', ctypes.c_uint32)] |
def test_create_questionset_page(db):
questionset = QuestionSet.objects.exclude(pages=None).first()
page = questionset.pages.first()
page.locked = True
page.save()
with pytest.raises(ValidationError):
QuestionLockedValidator()({'questionsets': [questionset], 'locked': False}) |
def run_command(args: List[str], *, stdin: BinaryIO, retries: int, timeout: int) -> 'subprocess.CompletedProcess[bytes]':
remaining_retries = retries
while True:
try:
return _run_command(args, stdin=stdin, timeout=timeout)
except subprocess.TimeoutExpired as err:
if (remaining_retries == 0):
raise err
remaining_retries -= 1
logging.warning('(%s/%s) Retrying because command failed with: %r', (retries - remaining_retries), retries, err)
time.sleep(1) |
class _AbstractBox():
owner = None
def __init__(self, ascent, descent, advance, length):
self.ascent = ascent
self.descent = descent
self.advance = advance
self.length = length
def place(self, layout, i, x, y, z, line_x, line_y, rotation, visible, anchor_x, anchor_y, context):
raise NotImplementedError('abstract')
def delete(self, layout):
raise NotImplementedError('abstract')
def get_position_in_box(self, x):
raise NotImplementedError('abstract')
def get_point_in_box(self, position):
raise NotImplementedError('abstract') |
def _convolve1d3o_gpu(inp, out, ker, mode):
d_inp = cp.asarray(inp)
d_kernel = cp.asarray(ker)
(threadsperblock, blockspergrid) = _get_tpb_bpg()
k_type = 'convolve1D3O'
_populate_kernel_cache(out.dtype, k_type)
kernel = _get_backend_kernel(out.dtype, blockspergrid, threadsperblock, k_type)
kernel(d_inp, d_kernel, mode, out)
_print_atts(kernel)
return out |
class TFResNetConvLayer(tf.keras.layers.Layer):
def __init__(self, out_channels: int, kernel_size: int=3, stride: int=1, activation: str='relu', **kwargs) -> None:
super().__init__(**kwargs)
self.pad_value = (kernel_size // 2)
self.conv = tf.keras.layers.Conv2D(out_channels, kernel_size=kernel_size, strides=stride, padding='valid', use_bias=False, name='convolution')
self.normalization = tf.keras.layers.BatchNormalization(epsilon=1e-05, momentum=0.1, name='normalization')
self.activation = (ACT2FN[activation] if (activation is not None) else tf.keras.layers.Activation('linear'))
def convolution(self, hidden_state: tf.Tensor) -> tf.Tensor:
height_pad = width_pad = (self.pad_value, self.pad_value)
hidden_state = tf.pad(hidden_state, [(0, 0), height_pad, width_pad, (0, 0)])
hidden_state = self.conv(hidden_state)
return hidden_state
def call(self, hidden_state: tf.Tensor, training: bool=False) -> tf.Tensor:
hidden_state = self.convolution(hidden_state)
hidden_state = self.normalization(hidden_state, training=training)
hidden_state = self.activation(hidden_state)
return hidden_state |
class ChangeModuleSpool(ContextMenuSingle):
visibilitySetting = 'spoolup'
def __init__(self):
self.mainFrame = gui.mainFrame.MainFrame.getInstance()
self.resetId = None
def display(self, callingWindow, srcContext, mainItem):
if ((srcContext not in ('fittingModule', 'projectedModule')) or (self.mainFrame.getActiveFit() is None)):
return False
if ((mainItem is None) or mainItem.isEmpty):
return False
self.mod = mainItem
self.context = srcContext
return (self.mod.item.group.name in ('Precursor Weapon', 'Mutadaptive Remote Armor Repairer'))
def getText(self, callingWindow, itmContext, mainItem):
return _t('Spoolup Cycles')
def getSubMenu(self, callingWindow, context, mainItem, rootMenu, i, pitem):
m = wx.Menu()
if ('wxMSW' in wx.PlatformInfo):
bindmenu = rootMenu
else:
bindmenu = m
isNotDefault = ((self.mod.spoolType is not None) and (self.mod.spoolAmount is not None))
cycleDefault = self.mod.getSpoolData(spoolOptions=SpoolOptions(SpoolType.SPOOL_SCALE, eos.config.settings['globalDefaultSpoolupPercentage'], True))[0]
cycleCurrent = self.mod.getSpoolData(spoolOptions=SpoolOptions(SpoolType.SPOOL_SCALE, eos.config.settings['globalDefaultSpoolupPercentage'], False))[0]
cycleMin = self.mod.getSpoolData(spoolOptions=SpoolOptions(SpoolType.SPOOL_SCALE, 0, True))[0]
cycleMax = self.mod.getSpoolData(spoolOptions=SpoolOptions(SpoolType.SPOOL_SCALE, 1, True))[0]
cycleTotalMin = min(cycleDefault, cycleCurrent, cycleMin)
cycleTotalMax = max(cycleDefault, cycleCurrent, cycleMax)
def findCycles(val1, val2):
maxSteps = 20
valDiff = (val2 - val1)
valScale = (valDiff / maxSteps)
minStep = math.ceil(round(valScale, 9))
maxStep = math.floor(round((valDiff / 4), 9))
for currentStep in range(minStep, (maxStep + 1)):
if ((valDiff % currentStep) == 0):
return set(range(val1, (val2 + currentStep), currentStep))
else:
cycles = set()
while (val2 >= val1):
cycles.add(val1)
cycles.add(val2)
val1 += minStep
val2 -= minStep
return cycles
self.cycleMap = {}
cyclesToShow = findCycles(cycleMin, cycleMax)
for cycle in range(cycleTotalMin, (cycleTotalMax + 1)):
menuId = ContextMenuSingle.nextID()
if ((not isNotDefault) and (cycle == cycleDefault)):
text = _t('{} (default)').format(cycle)
elif ((cycle == cycleCurrent) or (cycle in cyclesToShow)):
text = '{}'.format(cycle)
else:
continue
item = wx.MenuItem(m, menuId, text, kind=wx.ITEM_CHECK)
bindmenu.Bind(wx.EVT_MENU, self.handleSpoolChange, item)
m.Append(item)
item.Check((isNotDefault and (cycle == cycleCurrent)))
self.cycleMap[menuId] = cycle
self.resetId = ContextMenuSingle.nextID()
item = wx.MenuItem(m, self.resetId, _t('Reset'))
bindmenu.Bind(wx.EVT_MENU, self.handleSpoolChange, item)
m.Append(item)
return m
def handleSpoolChange(self, event):
if (event.Id == self.resetId):
spoolType = None
spoolAmount = None
elif (event.Id in self.cycleMap):
spoolType = SpoolType.CYCLES
spoolAmount = self.cycleMap[event.Id]
else:
return
fitID = self.mainFrame.getActiveFit()
fit = Fit.getInstance().getFit(fitID)
if (self.context == 'fittingModule'):
if (self.mod in fit.modules):
position = fit.modules.index(self.mod)
self.mainFrame.command.Submit(cmd.GuiChangeLocalModuleSpoolCommand(fitID=fitID, position=position, spoolType=spoolType, spoolAmount=spoolAmount))
elif (self.context == 'projectedModule'):
if (self.mod in fit.projectedModules):
position = fit.projectedModules.index(self.mod)
self.mainFrame.command.Submit(cmd.GuiChangeProjectedModuleSpoolCommand(fitID=fitID, position=position, spoolType=spoolType, spoolAmount=spoolAmount)) |
class LayoutLMTokenizer(PreTrainedTokenizer):
vocab_files_names = VOCAB_FILES_NAMES
pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP
pretrained_init_configuration = PRETRAINED_INIT_CONFIGURATION
max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__(self, vocab_file, do_lower_case=True, do_basic_tokenize=True, never_split=None, unk_token='[UNK]', sep_token='[SEP]', pad_token='[PAD]', cls_token='[CLS]', mask_token='[MASK]', tokenize_chinese_chars=True, strip_accents=None, **kwargs):
super().__init__(do_lower_case=do_lower_case, do_basic_tokenize=do_basic_tokenize, never_split=never_split, unk_token=unk_token, sep_token=sep_token, pad_token=pad_token, cls_token=cls_token, mask_token=mask_token, tokenize_chinese_chars=tokenize_chinese_chars, strip_accents=strip_accents, **kwargs)
if (not os.path.isfile(vocab_file)):
raise ValueError(f"Can't find a vocabulary file at path '{vocab_file}'. To load the vocabulary from a Google pretrained model use `tokenizer = LayoutLMTokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`")
self.vocab = load_vocab(vocab_file)
self.ids_to_tokens = collections.OrderedDict([(ids, tok) for (tok, ids) in self.vocab.items()])
self.do_basic_tokenize = do_basic_tokenize
if do_basic_tokenize:
self.basic_tokenizer = BasicTokenizer(do_lower_case=do_lower_case, never_split=never_split, tokenize_chinese_chars=tokenize_chinese_chars, strip_accents=strip_accents)
self.wordpiece_tokenizer = WordpieceTokenizer(vocab=self.vocab, unk_token=self.unk_token)
def do_lower_case(self):
return self.basic_tokenizer.do_lower_case
def vocab_size(self):
return len(self.vocab)
def get_vocab(self):
return dict(self.vocab, **self.added_tokens_encoder)
def _tokenize(self, text):
split_tokens = []
if self.do_basic_tokenize:
for token in self.basic_tokenizer.tokenize(text, never_split=self.all_special_tokens):
if (token in self.basic_tokenizer.never_split):
split_tokens.append(token)
else:
split_tokens += self.wordpiece_tokenizer.tokenize(token)
else:
split_tokens = self.wordpiece_tokenizer.tokenize(text)
return split_tokens
def _convert_token_to_id(self, token):
return self.vocab.get(token, self.vocab.get(self.unk_token))
def _convert_id_to_token(self, index):
return self.ids_to_tokens.get(index, self.unk_token)
def convert_tokens_to_string(self, tokens):
out_string = ' '.join(tokens).replace(' ##', '').strip()
return out_string
def build_inputs_with_special_tokens(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None) -> List[int]:
if (token_ids_1 is None):
return (([self.cls_token_id] + token_ids_0) + [self.sep_token_id])
cls = [self.cls_token_id]
sep = [self.sep_token_id]
return ((((cls + token_ids_0) + sep) + token_ids_1) + sep)
def get_special_tokens_mask(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None, already_has_special_tokens: bool=False) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True)
if (token_ids_1 is not None):
return (((([1] + ([0] * len(token_ids_0))) + [1]) + ([0] * len(token_ids_1))) + [1])
return (([1] + ([0] * len(token_ids_0))) + [1])
def create_token_type_ids_from_sequences(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None) -> List[int]:
sep = [self.sep_token_id]
cls = [self.cls_token_id]
if (token_ids_1 is None):
return (len(((cls + token_ids_0) + sep)) * [0])
return ((len(((cls + token_ids_0) + sep)) * [0]) + (len((token_ids_1 + sep)) * [1]))
def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str]=None) -> Tuple[str]:
index = 0
if os.path.isdir(save_directory):
vocab_file = os.path.join(save_directory, (((filename_prefix + '-') if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file']))
else:
vocab_file = (((filename_prefix + '-') if filename_prefix else '') + save_directory)
with open(vocab_file, 'w', encoding='utf-8') as writer:
for (token, token_index) in sorted(self.vocab.items(), key=(lambda kv: kv[1])):
if (index != token_index):
logger.warning(f'Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive. Please check that the vocabulary is not corrupted!')
index = token_index
writer.write((token + '\n'))
index += 1
return (vocab_file,) |
def set_cf_time_info(data_arr: xr.DataArray, epoch: (str | None)) -> xr.DataArray:
if (epoch is None):
epoch = EPOCH
data_arr['time'].encoding['units'] = epoch
data_arr['time'].attrs['standard_name'] = 'time'
data_arr['time'].attrs.pop('bounds', None)
if (('time' not in data_arr.dims) and (data_arr['time'].size not in data_arr.shape)):
data_arr = data_arr.expand_dims('time')
return data_arr |
def test_command_with_error(qtbot, py_proc, runner, caplog):
(cmd, args) = py_proc("\n import sys, os, json\n\n with open(os.environ['QUTE_FIFO'], 'w') as f:\n json.dump(os.environ['QUTE_TEXT'], f)\n f.write('\\n')\n\n sys.exit(1)\n ")
with caplog.at_level(logging.ERROR):
with qtbot.wait_signal(runner.finished, timeout=10000):
with qtbot.wait_signal(runner.got_cmd, timeout=10000) as blocker:
runner.prepare_run(cmd, *args)
runner.store_text('Hello World')
runner.store_html('')
data = json.loads(blocker.args[0])
assert (not pathlib.Path(data).exists()) |
def _create_tag(repo, manifest, start=None):
name = ('tag-%s' % str(uuid.uuid4()))
now_ms = (int((time.time() * 1000)) if (start is None) else start)
created = Tag.create(name=name, repository=repo, lifetime_start_ms=now_ms, lifetime_end_ms=None, reversion=False, manifest=manifest, tag_kind=Tag.tag_kind.get_id('tag'))
return created |
def test_history_output_file():
app = cmd2.Cmd(multiline_commands=['alias'])
run_cmd(app, 'help')
run_cmd(app, 'shortcuts')
run_cmd(app, 'help history')
run_cmd(app, 'alias create my_alias history;')
(fd, fname) = tempfile.mkstemp(prefix='', suffix='.txt')
os.close(fd)
run_cmd(app, 'history -o "{}"'.format(fname))
assert (app.last_result is True)
expected = normalize('\n'.join(['help', 'shortcuts', 'help history', 'alias create my_alias history;']))
with open(fname) as f:
content = normalize(f.read())
assert (content == expected) |
class VQLoss(nn.Module):
def __init__(self, codebook_weight=1.0):
super().__init__()
self.codebook_weight = codebook_weight
def forward(self, codebook_loss, inputs, reconstructions, split='train'):
rec_loss = torch.abs((inputs.contiguous() - reconstructions.contiguous()))
nll_loss = rec_loss
nll_loss = torch.mean(nll_loss)
loss = (nll_loss + (self.codebook_weight * codebook_loss.mean()))
log = {'loss_total': loss.clone().detach().mean(), 'loss_codebook': codebook_loss.detach().mean(), 'loss_nll': nll_loss.detach().mean(), 'loss_rec': rec_loss.detach().mean()}
return (loss, log) |
def test_register_optics(mocker):
from solcore import registries
mock_gr = mocker.patch('solcore.registries.generic_register')
name = 'custom_optics'
overwrite = False
reason_to_exclude = None
_optics(name, overwrite=overwrite, reason_to_exclude=reason_to_exclude)
def solver(*args, **kwargs):
pass
mock_gr.assert_called_once_with(name=name, registrator_name='Optics solver', registry=registries.OPTICS_METHOD_REGISTRY, signature=registries.OPTICS_METHOD_SIGNATURE, overwrite=overwrite, reason_to_exclude=reason_to_exclude) |
def get_benchmarks(task):
if (task == 'wo'):
bench = {'C7H8N2O2': isomers_c7h8n2o2(), 'CNS MPO': cns_mpo(), 'Pioglitazone MPO': pioglitazone_mpo(), 'QED': qed_benchmark()}
elif (task == 'wa'):
bench = {'SA_isomer': isomers_c7h8n2o2(), 'SA_CNS': cns_mpo(), 'SA_pioglitazone': pioglitazone_mpo(), 'SA_QED': qed_benchmark()}
elif (task == 'wc'):
bench = {'SC_isomer': isomers_c7h8n2o2(), 'SC_CNS': cns_mpo(), 'SC_pioglitazone': pioglitazone_mpo(), 'SC_QED': qed_benchmark()}
elif (task == 'v3'):
bench = {'SA_logP_target': logP_benchmark(target=8), 'SA_osimertinib': hard_osimertinib(), 'SA_fexofenadine': hard_fexofenadine(), 'SA_Ranolazine': ranolazine_mpo(), 'SA_perindopril': perindopril_rings(), 'SA_amlodipine': amlodipine_rings(), 'SA_sitagliptin': sitagliptin_replacement(), 'SA_zaleplon': zaleplon_with_other_formula(), 'SA_valsartan': valsartan_smarts(), 'SA_decoration_hop': scaffold_hop(), 'SA_scaffold_hop': decoration_hop()}
elif (task == 'v4'):
bench = {'Osimertinib MPO': hard_osimertinib(), 'Fexofenadine MPO': hard_fexofenadine(), 'Ranolazine MPO': ranolazine_mpo(), 'Perindopril MPO': perindopril_rings(), 'Amlodipine MPO': amlodipine_rings(), 'Sitagliptin MPO': sitagliptin_replacement(), 'Zaleplon MPO': zaleplon_with_other_formula(), 'Valsartan SMARTS': valsartan_smarts(), 'Deco Hop': scaffold_hop(), 'Scaffold Hop': decoration_hop()}
elif (task == 'v5'):
bench = {'SC_logP_target': logP_benchmark(target=8), 'SC_osimertinib': hard_osimertinib(), 'SC_fexofenadine': hard_fexofenadine(), 'SC_Ranolazine': ranolazine_mpo(), 'SC_ranolazine': ranolazine_mpo(), 'SC_perindopril': perindopril_rings(), 'SC_amlodipine': amlodipine_rings(), 'SC_sitagliptin': sitagliptin_replacement(), 'SC_zaleplon': zaleplon_with_other_formula(), 'SC_valsartan': valsartan_smarts(), 'SC_decoration_hop': scaffold_hop(), 'SC_scaffold_hop': decoration_hop()}
return bench |
def test_no_parse_when_parse_args_false(mock_pipe):
out = run('arb pipe', args_in=['one', 'two'], parse_args=False)
assert (type(out) is Context)
assert (out == {})
assert (not out.is_in_pipeline_scope)
mock_pipe.assert_called_once_with(name='arb pipe', context_args=['one', 'two'], parse_input=False, groups=None, success_group=None, failure_group=None, loader=None, py_dir=None)
mock_pipe.return_value.run.assert_called_once_with({}) |
class Migration(migrations.Migration):
dependencies = [('questions', '0054_meta')]
operations = [migrations.AddField(model_name='catalog', name='locked', field=models.BooleanField(default=False, help_text="Designates whether this catalog (and it's sections, question sets and questions) can be changed.", verbose_name='Locked')), migrations.AddField(model_name='question', name='locked', field=models.BooleanField(default=False, help_text='Designates whether this question can be changed.', verbose_name='Locked')), migrations.AddField(model_name='questionset', name='locked', field=models.BooleanField(default=False, help_text="Designates whether this questionset (and it's questions) can be changed.", verbose_name='Locked')), migrations.AddField(model_name='section', name='locked', field=models.BooleanField(default=False, help_text="Designates whether this section (and it's question sets and questions) can be changed.", verbose_name='Locked'))] |
def s3_request(func: Callable):
(func)
def wrapper(url: str, *args, **kwargs):
try:
return func(url, *args, **kwargs)
except ClientError as exc:
if (int(exc.response['Error']['Code']) == 404):
raise FileNotFoundError('file {} not found'.format(url))
else:
raise
return wrapper |
.parametrize('M, p, size', [(np.array(10, dtype=np.int64), np.array(0.5, dtype=config.floatX), None), (np.array(10, dtype=np.int64), np.array(0.5, dtype=config.floatX), []), (np.array(10, dtype=np.int64), np.array(0.5, dtype=config.floatX), [2, 3]), (np.full((1, 2), 10, dtype=np.int64), np.array(0.5, dtype=config.floatX), None)])
def test_binomial_samples(M, p, size):
compare_sample_values(binomial, M, p, size=size) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.