code stringlengths 281 23.7M |
|---|
class VideoChunkIteratorProvider():
def __init__(self, chunk_frames: int, num_border_frames: int) -> None:
self._chunk_frames = chunk_frames
self._num_border_frames = num_border_frames
def provide(self, video_features: np.ndarray) -> 'VideoChunkIterator':
return VideoChunkIterator(video_features, self._chunk_frames, self._num_border_frames) |
def send_unlock(channel_state: NettingChannelState, message_identifier: MessageID, payment_identifier: PaymentID, secret: Secret, secrethash: SecretHash, block_number: BlockNumber, recipient_metadata: AddressMetadata=None) -> SendUnlock:
lock = get_lock(channel_state.our_state, secrethash)
assert lock, 'caller must ensure the lock exists'
(unlock, pending_locks) = create_unlock(channel_state, message_identifier, payment_identifier, secret, lock, block_number, recipient_metadata)
channel_state.our_state.balance_proof = unlock.balance_proof
channel_state.our_state.pending_locks = pending_locks
_del_lock(channel_state.our_state, lock.secrethash)
return unlock |
def iter_by_batch(sequence: Union[(Sized, Iterable, Dataset)], batch_size: int, log_progress: bool=True):
try:
sequence.__getitem__(0)
size = len(sequence)
step = (batch_size if (batch_size < size) else size)
if log_progress:
iterator = tqdm.tqdm(range(0, size, step), total=(size / step))
else:
iterator = range(0, size, step)
for slice_start_index in iterator:
slice_end_index = (slice_start_index + step)
slice_end_index = (slice_end_index if (slice_end_index < size) else size)
input_batch = [sequence[index] for index in range(slice_start_index, slice_end_index)]
(yield input_batch)
except (AttributeError, NotImplementedError, IndexError):
batch = []
if log_progress:
iterator = tqdm.tqdm(sequence)
else:
iterator = sequence
for item in iterator:
batch.append(item)
if (len(batch) == batch_size):
(yield batch)
batch = []
if (len(batch) > 0):
(yield batch)
return |
def fragment_on_atom_pairs(mol, atom_pairs):
bonds = []
bond_dirs = {}
dummy_labels = []
label = 2
for (a1, a2) in atom_pairs:
bond = mol.GetBondBetweenAtoms(a1, a2)
if bond.IsInRing():
raise ValueError(('Cannot fragment a ring bond (between %d and %d)' % (a1, a2)))
bonds.append(bond.GetIdx())
bond_dir = bond.GetBondDir()
if (bond.GetBeginAtomIdx() == a1):
dummy_labels.append((label, (label + 1)))
if (bond_dir == Chem.BondDir.ENDDOWNRIGHT):
bond_dirs[(a1, (label + 1))] = Chem.BondDir.ENDDOWNRIGHT
bond_dirs[(a2, label)] = Chem.BondDir.ENDUPRIGHT
elif (bond_dir == Chem.BondDir.ENDUPRIGHT):
bond_dirs[(a1, (label + 1))] = Chem.BondDir.ENDUPRIGHT
bond_dirs[(a2, label)] = Chem.BondDir.ENDDOWNRIGHT
else:
dummy_labels.append(((label + 1), label))
if (bond_dir == Chem.BondDir.ENDUPRIGHT):
bond_dirs[(a1, (label + 1))] = Chem.BondDir.ENDDOWNRIGHT
bond_dirs[(a2, label)] = Chem.BondDir.ENDUPRIGHT
elif (bond_dir == Chem.BondDir.ENDDOWNRIGHT):
bond_dirs[(a1, (label + 1))] = Chem.BondDir.ENDUPRIGHT
bond_dirs[(a2, label)] = Chem.BondDir.ENDDOWNRIGHT
label += 2
fragmented_mol = Chem.FragmentOnBonds(mol, bonds, dummyLabels=dummy_labels)
dummy_pairs = [[] for _ in atom_pairs]
for atom in fragmented_mol.GetAtoms():
if (atom.GetAtomicNum() == 0):
label = atom.GetIsotope()
i = ((label // 2) - 1)
dummy_pairs[i].append(atom.GetIdx())
atom.SetIsotope(0)
for bond in atom.GetBonds():
other_atom_id = bond.GetOtherAtomIdx(atom.GetIdx())
bond_dir = bond_dirs.get((other_atom_id, label), None)
if (bond_dir is not None):
bond.SetBondDir(bond_dir)
break
else:
raise AssertionError
other_atom_table = {}
for (a1, a2) in dummy_pairs:
other_atom_table[a1] = a2
other_atom_table[a2] = a1
return (fragmented_mol, other_atom_table) |
('/v1/user/robots/<robot_shortname>/regenerate')
_param('robot_shortname', 'The short name for the robot, without any user or organization prefix')
class RegenerateUserRobot(ApiResource):
_user_admin(disallow_for_restricted_users=True)
('regenerateUserRobotToken')
def post(self, robot_shortname):
parent = get_authenticated_user()
robot = model.regenerate_user_robot_token(robot_shortname, parent)
log_action('regenerate_robot_token', parent.username, {'robot': robot_shortname})
return robot.to_dict(include_token=True) |
def write_tokenizer(tokenizer_path, input_tokenizer_path):
os.makedirs(tokenizer_path, exist_ok=True)
write_json({}, os.path.join(tokenizer_path, 'special_tokens_map.json'))
write_json({'bos_token': '', 'eos_token': '', 'model_max_length': int(1e+30), 'tokenizer_class': 'LLaMATokenizer', 'unk_token': ''}, os.path.join(tokenizer_path, 'tokenizer_config.json'))
shutil.copyfile(input_tokenizer_path, os.path.join(tokenizer_path, 'tokenizer.model')) |
class TestRFC8441(object):
def test_can_send_headers(self, frame_factory):
headers = [(b':authority', b'example.com'), (b':path', b'/'), (b':scheme', b' (b':method', b'CONNECT'), (b':protocol', b'websocket'), (b'user-agent', b'someua/0.0.1')]
client = h2.connection.H2Connection()
client.initiate_connection()
client.send_headers(stream_id=1, headers=headers)
server = h2.connection.H2Connection(config=h2.config.H2Configuration(client_side=False))
events = server.receive_data(client.data_to_send())
event = events[1]
assert isinstance(event, h2.events.RequestReceived)
assert (event.stream_id == 1)
assert (event.headers == headers) |
class CmapDropdown(QtWidgets.QComboBox):
def __init__(self, *args, startcmap='viridis', **kwargs):
super().__init__(*args, **kwargs)
self.setIconSize(QSize(100, 15))
self.view().setVerticalScrollBarPolicy(Qt.ScrollBarAsNeeded)
for (label, cmap) in get_cmap_pixmaps():
self.addItem(label, cmap)
self.setStyleSheet('combobox-popup: 0;')
self.setMaxVisibleItems(10)
idx = self.findText(startcmap)
if (idx != (- 1)):
self.setCurrentIndex(idx)
def enterEvent(self, e):
if (self.window().showhelp is True):
QtWidgets.QToolTip.showText(e.globalPos(), '<h3>Colormap</h3>Set the colormap that should be used when plotting the data.')
def wheelEvent(self, e):
pass |
class CrossEntropyLoss(torch.nn.Module):
def __init__(self, reduction='mean'):
super().__init__()
self.loss_fn = torch.nn.CrossEntropyLoss(reduction='none')
self.reduction = reduction
def forward(self, output: Dict):
logit = output['logit']
tgt = output['tgt']
tgt_len = output['tgt_len']
c = logit.size((- 1))
loss = self.loss_fn(logit.reshape((- 1), c), tgt.reshape((- 1)))
loss = loss.reshape(*tgt.shape)
mask = generate_length_mask(tgt_len).to(logit.device)
loss *= mask
if (self.reduction == 'none'):
return loss
elif (self.reduction == 'mean'):
loss = (loss.sum() / mask.sum())
return loss
elif (self.reduction == 'sum'):
loss = loss.sum()
return loss |
def parse_mul_tree(root):
mul_info = is_mul(root)
if (mul_info is None):
neg_info = is_neg(root)
if (neg_info is None):
return [False, root]
else:
(neg, sub_tree) = parse_mul_tree(neg_info)
return [(not neg), sub_tree]
else:
return [False, list(map(parse_mul_tree, mul_info))] |
def test_list_json(pipx_temp_env, capsys):
pipx_venvs_dir = (constants.PIPX_HOME / 'venvs')
venv_bin_dir = ('Scripts' if constants.WINDOWS else 'bin')
assert (not run_pipx_cli(['install', PKG['pycowsay']['spec']]))
assert (not run_pipx_cli(['install', PKG['pylint']['spec']]))
assert (not run_pipx_cli(['inject', 'pylint', PKG['isort']['spec']]))
captured = capsys.readouterr()
assert (not run_pipx_cli(['list', '--json']))
captured = capsys.readouterr()
assert (not re.search('\\S', captured.err))
json_parsed = json.loads(captured.out, object_hook=_json_decoder_object_hook)
assert (sorted(json_parsed['venvs'].keys()) == ['pycowsay', 'pylint'])
pycowsay_package_ref = create_package_info_ref('pycowsay', 'pycowsay', pipx_venvs_dir)
assert_package_metadata(PackageInfo(**json_parsed['venvs']['pycowsay']['metadata']['main_package']), pycowsay_package_ref)
assert (json_parsed['venvs']['pycowsay']['metadata']['injected_packages'] == {})
pylint_package_ref = create_package_info_ref('pylint', 'pylint', pipx_venvs_dir, **{'app_paths_of_dependencies': {'isort': [(((pipx_venvs_dir / 'pylint') / venv_bin_dir) / app_name('isort'))]}})
assert_package_metadata(PackageInfo(**json_parsed['venvs']['pylint']['metadata']['main_package']), pylint_package_ref)
assert (sorted(json_parsed['venvs']['pylint']['metadata']['injected_packages'].keys()) == ['isort'])
isort_package_ref = create_package_info_ref('pylint', 'isort', pipx_venvs_dir, include_apps=False)
print(isort_package_ref)
print(PackageInfo(**json_parsed['venvs']['pylint']['metadata']['injected_packages']['isort']))
assert_package_metadata(PackageInfo(**json_parsed['venvs']['pylint']['metadata']['injected_packages']['isort']), isort_package_ref) |
class _DefaultCmdLineCallback(object):
def __init__(self):
self.train_vals = {}
def __call__(self, viz, mode, it, k, v):
if (mode == 'train'):
self.train_vals[k] = (self.train_vals.get(k, []) + [v])
elif (mode == 'val'):
viz.append_element(k, it, np.mean(np.array(self.train_vals[k])), 'train')
viz.append_element(k, it, np.mean(np.array(v)), 'val')
self.train_vals[k] = [] |
()
('circle-config-file', type=click.File('rt'), default='.circleci/config.yml')
def main(circle_config_file):
try:
config = yaml.safe_load(circle_config_file)
except ParserError as ex:
click.secho(f'Invalid yaml file: {ex}', fg='red')
sys.exit(1)
(result, message) = _check_workflows_align(config)
if (result is False):
click.echo(message)
sys.exit(1) |
(params=xdist_sort_hack(['scipy.stats.qmc: centered-discrepancy optimization of a Latin hypercube', 'inverse missing in idstn, idctn (#14479)', 'Merge pull request #14447 from AnirudhDagar/rename_ndimage_modules', 'Add tests for args kwarg in quad_vec', 'badge with version of the doc in the navbar (#14132)', 'Bump scipy from 1.7.0 to 1.7.1 (#28)', 'avoid nan if angle=0 in RotvecRotation']))
def subject(request):
return request.param |
class SecurityGenerateAuthorization(rq.ReplyRequest):
_request = rq.Struct(rq.Card8('opcode'), rq.Opcode(1), rq.RequestLength(), rq.LengthOf('auth_proto', 2), rq.LengthOf('auth_data', 2), rq.Card32('value_mask'), rq.String8('auth_proto'), rq.Binary('auth_data'), rq.List('values', rq.Card32Obj))
_reply = rq.Struct(rq.ReplyCode(), rq.Pad(1), rq.Card16('sequence_number'), rq.ReplyLength(), AUTHID('authid'), rq.LengthOf('auth_data_return', 2), rq.Pad(18), rq.Binary('auth_data_return')) |
class TestPerChannelQuantizationKeras(unittest.TestCase):
def test_per_channel_range_learning(self):
if (version.parse(tf.version.VERSION) >= version.parse('2.00')):
tf.keras.backend.clear_session()
inputs = tf.keras.layers.Input(shape=(32, 32, 4))
conv_op = tf.keras.layers.Conv2D(2, (3, 3), kernel_initializer=tf.random_uniform_initializer((- 1), 2), bias_initializer='random_uniform', padding='SAME')(inputs)
relu_op = tf.keras.layers.ReLU()(conv_op)
reshape = tf.keras.layers.Flatten()(relu_op)
dense = tf.keras.layers.Dense(10, bias_initializer='random_uniform')(reshape)
model = tf.keras.Model(inputs=inputs, outputs=dense, name='conv_functional')
save_config_file_bias_quantized_for_per_channel_quantization()
qsim = QuantizationSimModel(model, quant_scheme=QuantScheme.training_range_learning_with_tf_init, default_param_bw=8, default_output_bw=8, config_file='./quantsim_config.json')
for wrapper in qsim.quant_wrappers():
wrapper.input_quantizers[0].disable()
input_shape = inputs.shape.as_list()
batches = 32
input_data = np.random.rand(batches, input_shape[1], input_shape[2], input_shape[3])
labels = np.random.randint(10, size=batches)
one_hot_labels = np.eye(10)[labels]
qsim.compute_encodings((lambda m, _: m.predict(input_data)), None)
qsim.compile(optimizer=tf.keras.optimizers.Adam(), loss=tf.keras.losses.MeanSquaredError())
_get_value = tf.keras.backend.get_value
encoding_min_before_train = _get_value(qsim.model.layers[1].param_quantizers[0].encoding_min)
encoding_max_before_train = _get_value(qsim.model.layers[1].param_quantizers[0].encoding_max)
conv2d_output_encoding_min_before_train = _get_value(qsim.model.layers[1].output_quantizers[0]._encoding_min)
conv2d_output_encoding_max_before_train = _get_value(qsim.model.layers[1].output_quantizers[0]._encoding_min)
dense_bias_encoding_min_before_train = _get_value(qsim.model.layers[4].output_quantizers[0]._encoding_min)
dense_bias_encoding_max_before_train = _get_value(qsim.model.layers[4].output_quantizers[0]._encoding_min)
for _ in range(10):
qsim.fit(input_data, one_hot_labels)
encoding_min_after_train = _get_value(qsim.model.layers[1].param_quantizers[0].encoding_min)
encoding_max_after_train = _get_value(qsim.model.layers[1].param_quantizers[0].encoding_max)
conv2d_output_encoding_min_after_train = _get_value(qsim.model.layers[1].output_quantizers[0]._encoding_min)
conv2d_output_encoding_max_after_train = _get_value(qsim.model.layers[1].output_quantizers[0]._encoding_min)
dense_bias_encoding_min_after_train = _get_value(qsim.model.layers[4].output_quantizers[0]._encoding_min)
dense_bias_encoding_max_after_train = _get_value(qsim.model.layers[4].output_quantizers[0]._encoding_min)
assert (not np.array_equal(encoding_min_before_train, encoding_min_after_train))
assert (not np.array_equal(encoding_max_before_train, encoding_max_after_train))
assert (not np.array_equal(conv2d_output_encoding_min_before_train, conv2d_output_encoding_min_after_train))
assert (not np.array_equal(conv2d_output_encoding_max_before_train, conv2d_output_encoding_max_after_train))
assert (not np.array_equal(dense_bias_encoding_min_before_train, dense_bias_encoding_min_after_train))
assert (not np.array_equal(dense_bias_encoding_max_before_train, dense_bias_encoding_max_after_train)) |
def convert(data_dir):
data_dict = {}
for gnt in os.listdir(data_dir):
if (gnt[(- 3):len(gnt)] == 'gnt'):
load_one_file((data_dir + gnt), data_dict)
for (k, v) in data_dict.items():
num = (v.shape[0] / FLAGS.output_size)
v = v.reshape([num, FLAGS.output_size, FLAGS.output_size, 1])
np.save('{0}/{1}.npy'.format(FLAGS.target_dir, k.encode('utf-8')), v) |
def create_structure_set_roi(roi_data: ROIData) -> Dataset:
structure_set_roi = Dataset()
structure_set_roi.ROINumber = roi_data.number
structure_set_roi.ReferencedFrameOfReferenceUID = roi_data.frame_of_reference_uid
structure_set_roi.ROIName = roi_data.name
structure_set_roi.ROIDescription = roi_data.description
structure_set_roi.ROIGenerationAlgorithm = roi_data.roi_generation_algorithm
return structure_set_roi |
class SingleRealsense(mp.Process):
MAX_PATH_LENGTH = 4096
def __init__(self, shm_manager: SharedMemoryManager, serial_number, resolution=(1280, 720), capture_fps=30, put_fps=None, put_downsample=True, record_fps=None, enable_color=True, enable_depth=False, enable_infrared=False, get_max_k=30, advanced_mode_config=None, transform: Optional[Callable[([Dict], Dict)]]=None, vis_transform: Optional[Callable[([Dict], Dict)]]=None, recording_transform: Optional[Callable[([Dict], Dict)]]=None, video_recorder: Optional[VideoRecorder]=None, verbose=False):
super().__init__()
if (put_fps is None):
put_fps = capture_fps
if (record_fps is None):
record_fps = capture_fps
resolution = tuple(resolution)
shape = resolution[::(- 1)]
examples = dict()
if enable_color:
examples['color'] = np.empty(shape=(shape + (3,)), dtype=np.uint8)
if enable_depth:
examples['depth'] = np.empty(shape=shape, dtype=np.uint16)
if enable_infrared:
examples['infrared'] = np.empty(shape=shape, dtype=np.uint8)
examples['camera_capture_timestamp'] = 0.0
examples['camera_receive_timestamp'] = 0.0
examples['timestamp'] = 0.0
examples['step_idx'] = 0
vis_ring_buffer = SharedMemoryRingBuffer.create_from_examples(shm_manager=shm_manager, examples=(examples if (vis_transform is None) else vis_transform(dict(examples))), get_max_k=1, get_time_budget=0.2, put_desired_frequency=capture_fps)
ring_buffer = SharedMemoryRingBuffer.create_from_examples(shm_manager=shm_manager, examples=(examples if (transform is None) else transform(dict(examples))), get_max_k=get_max_k, get_time_budget=0.2, put_desired_frequency=put_fps)
examples = {'cmd': Command.SET_COLOR_OPTION.value, 'option_enum': rs.option.exposure.value, 'option_value': 0.0, 'video_path': np.array(('a' * self.MAX_PATH_LENGTH)), 'recording_start_time': 0.0, 'put_start_time': 0.0}
command_queue = SharedMemoryQueue.create_from_examples(shm_manager=shm_manager, examples=examples, buffer_size=128)
intrinsics_array = SharedNDArray.create_from_shape(mem_mgr=shm_manager, shape=(7,), dtype=np.float64)
intrinsics_array.get()[:] = 0
if (video_recorder is None):
video_recorder = VideoRecorder.create_h264(fps=record_fps, codec='h264', input_pix_fmt='bgr24', crf=18, thread_type='FRAME', thread_count=1)
self.serial_number = serial_number
self.resolution = resolution
self.capture_fps = capture_fps
self.put_fps = put_fps
self.put_downsample = put_downsample
self.record_fps = record_fps
self.enable_color = enable_color
self.enable_depth = enable_depth
self.enable_infrared = enable_infrared
self.advanced_mode_config = advanced_mode_config
self.transform = transform
self.vis_transform = vis_transform
self.recording_transform = recording_transform
self.video_recorder = video_recorder
self.verbose = verbose
self.put_start_time = None
self.stop_event = mp.Event()
self.ready_event = mp.Event()
self.ring_buffer = ring_buffer
self.vis_ring_buffer = vis_ring_buffer
self.command_queue = command_queue
self.intrinsics_array = intrinsics_array
def get_connected_devices_serial():
serials = list()
for d in rs.context().devices:
if (d.get_info(rs.camera_info.name).lower() != 'platform camera'):
serial = d.get_info(rs.camera_info.serial_number)
product_line = d.get_info(rs.camera_info.product_line)
if (product_line == 'D400'):
serials.append(serial)
serials = sorted(serials)
return serials
def __enter__(self):
self.start()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.stop()
def start(self, wait=True, put_start_time=None):
self.put_start_time = put_start_time
super().start()
if wait:
self.start_wait()
def stop(self, wait=True):
self.stop_event.set()
if wait:
self.end_wait()
def start_wait(self):
self.ready_event.wait()
def end_wait(self):
self.join()
def is_ready(self):
return self.ready_event.is_set()
def get(self, k=None, out=None):
if (k is None):
return self.ring_buffer.get(out=out)
else:
return self.ring_buffer.get_last_k(k, out=out)
def get_vis(self, out=None):
return self.vis_ring_buffer.get(out=out)
def set_color_option(self, option: rs.option, value: float):
self.command_queue.put({'cmd': Command.SET_COLOR_OPTION.value, 'option_enum': option.value, 'option_value': value})
def set_exposure(self, exposure=None, gain=None):
if ((exposure is None) and (gain is None)):
self.set_color_option(rs.option.enable_auto_exposure, 1.0)
else:
self.set_color_option(rs.option.enable_auto_exposure, 0.0)
if (exposure is not None):
self.set_color_option(rs.option.exposure, exposure)
if (gain is not None):
self.set_color_option(rs.option.gain, gain)
def set_white_balance(self, white_balance=None):
if (white_balance is None):
self.set_color_option(rs.option.enable_auto_white_balance, 1.0)
else:
self.set_color_option(rs.option.enable_auto_white_balance, 0.0)
self.set_color_option(rs.option.white_balance, white_balance)
def get_intrinsics(self):
assert self.ready_event.is_set()
(fx, fy, ppx, ppy) = self.intrinsics_array.get()[:4]
mat = np.eye(3)
mat[(0, 0)] = fx
mat[(1, 1)] = fy
mat[(0, 2)] = ppx
mat[(1, 2)] = ppy
return mat
def get_depth_scale(self):
assert self.ready_event.is_set()
scale = self.intrinsics_array.get()[(- 1)]
return scale
def start_recording(self, video_path: str, start_time: float=(- 1)):
assert self.enable_color
path_len = len(video_path.encode('utf-8'))
if (path_len > self.MAX_PATH_LENGTH):
raise RuntimeError('video_path too long.')
self.command_queue.put({'cmd': Command.START_RECORDING.value, 'video_path': video_path, 'recording_start_time': start_time})
def stop_recording(self):
self.command_queue.put({'cmd': Command.STOP_RECORDING.value})
def restart_put(self, start_time):
self.command_queue.put({'cmd': Command.RESTART_PUT.value, 'put_start_time': start_time})
def run(self):
threadpool_limits(1)
cv2.setNumThreads(1)
(w, h) = self.resolution
fps = self.capture_fps
align = rs.align(rs.stream.color)
rs_config = rs.config()
if self.enable_color:
rs_config.enable_stream(rs.stream.color, w, h, rs.format.bgr8, fps)
if self.enable_depth:
rs_config.enable_stream(rs.stream.depth, w, h, rs.format.z16, fps)
if self.enable_infrared:
rs_config.enable_stream(rs.stream.infrared, w, h, rs.format.y8, fps)
try:
rs_config.enable_device(self.serial_number)
pipeline = rs.pipeline()
pipeline_profile = pipeline.start(rs_config)
d = pipeline_profile.get_device().first_color_sensor()
d.set_option(rs.option.global_time_enabled, 1)
if (self.advanced_mode_config is not None):
json_text = json.dumps(self.advanced_mode_config)
device = pipeline_profile.get_device()
advanced_mode = rs.rs400_advanced_mode(device)
advanced_mode.load_json(json_text)
color_stream = pipeline_profile.get_stream(rs.stream.color)
intr = color_stream.as_video_stream_profile().get_intrinsics()
order = ['fx', 'fy', 'ppx', 'ppy', 'height', 'width']
for (i, name) in enumerate(order):
self.intrinsics_array.get()[i] = getattr(intr, name)
if self.enable_depth:
depth_sensor = pipeline_profile.get_device().first_depth_sensor()
depth_scale = depth_sensor.get_depth_scale()
self.intrinsics_array.get()[(- 1)] = depth_scale
if self.verbose:
print(f'[SingleRealsense {self.serial_number}] Main loop started.')
put_idx = None
put_start_time = self.put_start_time
if (put_start_time is None):
put_start_time = time.time()
iter_idx = 0
t_start = time.time()
while (not self.stop_event.is_set()):
frameset = pipeline.wait_for_frames()
receive_time = time.time()
frameset = align.process(frameset)
data = dict()
data['camera_receive_timestamp'] = receive_time
data['camera_capture_timestamp'] = (frameset.get_timestamp() / 1000)
if self.enable_color:
color_frame = frameset.get_color_frame()
data['color'] = np.asarray(color_frame.get_data())
t = (color_frame.get_timestamp() / 1000)
data['camera_capture_timestamp'] = t
if self.enable_depth:
data['depth'] = np.asarray(frameset.get_depth_frame().get_data())
if self.enable_infrared:
data['infrared'] = np.asarray(frameset.get_infrared_frame().get_data())
put_data = data
if (self.transform is not None):
put_data = self.transform(dict(data))
if self.put_downsample:
(local_idxs, global_idxs, put_idx) = get_accumulate_timestamp_idxs(timestamps=[receive_time], start_time=put_start_time, dt=(1 / self.put_fps), next_global_idx=put_idx, allow_negative=True)
for step_idx in global_idxs:
put_data['step_idx'] = step_idx
put_data['timestamp'] = receive_time
self.ring_buffer.put(put_data, wait=False)
else:
step_idx = int(((receive_time - put_start_time) * self.put_fps))
put_data['step_idx'] = step_idx
put_data['timestamp'] = receive_time
self.ring_buffer.put(put_data, wait=False)
if (iter_idx == 0):
self.ready_event.set()
vis_data = data
if (self.vis_transform == self.transform):
vis_data = put_data
elif (self.vis_transform is not None):
vis_data = self.vis_transform(dict(data))
self.vis_ring_buffer.put(vis_data, wait=False)
rec_data = data
if (self.recording_transform == self.transform):
rec_data = put_data
elif (self.recording_transform is not None):
rec_data = self.recording_transform(dict(data))
if self.video_recorder.is_ready():
self.video_recorder.write_frame(rec_data['color'], frame_time=receive_time)
t_end = time.time()
duration = (t_end - t_start)
frequency = np.round((1 / duration), 1)
t_start = t_end
if self.verbose:
print(f'[SingleRealsense {self.serial_number}] FPS {frequency}')
try:
commands = self.command_queue.get_all()
n_cmd = len(commands['cmd'])
except Empty:
n_cmd = 0
for i in range(n_cmd):
command = dict()
for (key, value) in commands.items():
command[key] = value[i]
cmd = command['cmd']
if (cmd == Command.SET_COLOR_OPTION.value):
sensor = pipeline_profile.get_device().first_color_sensor()
option = rs.option(command['option_enum'])
value = float(command['option_value'])
sensor.set_option(option, value)
elif (cmd == Command.SET_DEPTH_OPTION.value):
sensor = pipeline_profile.get_device().first_depth_sensor()
option = rs.option(command['option_enum'])
value = float(command['option_value'])
sensor.set_option(option, value)
elif (cmd == Command.START_RECORDING.value):
video_path = str(command['video_path'])
start_time = command['recording_start_time']
if (start_time < 0):
start_time = None
self.video_recorder.start(video_path, start_time=start_time)
elif (cmd == Command.STOP_RECORDING.value):
self.video_recorder.stop()
put_idx = None
elif (cmd == Command.RESTART_PUT.value):
put_idx = None
put_start_time = command['put_start_time']
iter_idx += 1
finally:
self.video_recorder.stop()
rs_config.disable_all_streams()
self.ready_event.set()
if self.verbose:
print(f'[SingleRealsense {self.serial_number}] Exiting worker process.') |
class TestBinaryPrecision(unittest.TestCase):
def _test_binary_precision_with_input(self, input: torch.Tensor, target: torch.Tensor, threshold: float=0.5) -> None:
input_np = np.where((input.numpy() < threshold), 0, 1)
target_np = target.squeeze().numpy()
sklearn_result = torch.tensor(precision_score(target_np, input_np)).to(torch.float32)
torch.testing.assert_close(binary_precision(input, target, threshold=threshold), sklearn_result, equal_nan=True, atol=1e-08, rtol=1e-05)
def test_binary_precision(self) -> None:
num_classes = 2
input = torch.randint(high=num_classes, size=(BATCH_SIZE,))
target = torch.randint(high=num_classes, size=(BATCH_SIZE,))
self._test_binary_precision_with_input(input, target)
def test_binary_precision_threshold(self) -> None:
num_classes = 2
input = torch.rand(size=(BATCH_SIZE,))
target = torch.randint(high=num_classes, size=(BATCH_SIZE,))
self._test_binary_precision_with_input(input, target, threshold=0)
self._test_binary_precision_with_input(input, target, threshold=0.2)
self._test_binary_precision_with_input(input, target, threshold=0.8)
self._test_binary_precision_with_input(input, target, threshold=1)
self._test_binary_precision_with_input(input, target, threshold=2)
def test_binary_precision_with_rounding(self) -> None:
num_classes = 2
input = torch.rand(size=(BATCH_SIZE,))
target = torch.randint(high=num_classes, size=(BATCH_SIZE,))
self._test_binary_precision_with_input(input, target)
def test_binary_precision_invalid_input(self) -> None:
with self.assertRaisesRegex(ValueError, 'The `input` and `target` should have the same dimensions, got shapes torch.Size\\(\\[4, 2\\]\\) and torch.Size\\(\\[3\\]\\).'):
binary_precision(torch.rand(4, 2), torch.rand(3)) |
def _create_basis_sweeps(H_params: List[sympy.Symbol], S_params: List[sympy.Symbol], n_shots: int, rand_state: np.random.RandomState) -> Tuple[(List[Dict[(str, int)]], np.ndarray)]:
assert (len(H_params) == len(S_params))
all_sweeps = []
all_bases = rand_state.randint(0, 3, size=(n_shots, len(H_params)))
for r in range(n_shots):
basis = all_bases[r]
sweep = dict()
for i in range(len(H_params)):
if (basis[i] == 0):
sweep[H_params[i]] = 0.0
sweep[S_params[i]] = 0.0
elif (basis[i] == 1):
sweep[H_params[i]] = 0.5
sweep[S_params[i]] = 0.5
elif (basis[i] == 2):
sweep[H_params[i]] = (- 1.0)
sweep[S_params[i]] = 0.5
all_sweeps.append(sweep)
return (all_sweeps, all_bases) |
class CompilationDatabase(ClangObject):
def __del__(self):
conf.lib.clang_CompilationDatabase_dispose(self)
def from_result(res, fn, args):
if (not res):
raise CompilationDatabaseError(0, 'CompilationDatabase loading failed')
return CompilationDatabase(res)
def fromDirectory(buildDir):
errorCode = c_uint()
try:
cdb = conf.lib.clang_CompilationDatabase_fromDirectory(buildDir, byref(errorCode))
except CompilationDatabaseError as e:
raise CompilationDatabaseError(int(errorCode.value), 'CompilationDatabase loading failed')
return cdb
def getCompileCommands(self, filename):
return conf.lib.clang_CompilationDatabase_getCompileCommands(self, filename)
def getAllCompileCommands(self):
return conf.lib.clang_CompilationDatabase_getAllCompileCommands(self) |
def generate_distances_network_part5():
alias_method_j_c = {}
layer = 0
while isPickle(('alias_method_j-layer-' + str(layer))):
logging.info('Executing layer {}...'.format(layer))
alias_method_j = restoreVariableFromDisk(('alias_method_j-layer-' + str(layer)))
alias_method_j_c[layer] = alias_method_j
logging.info('Layer {} executed.'.format(layer))
layer += 1
logging.info('Saving nets_weights_alias_method_j on disk...')
saveVariableOnDisk(alias_method_j_c, 'nets_weights_alias_method_j')
return |
class LayerDefinitionCreateView(ResourceMixin, ResourceBaseCreateView):
form_class = UploadForm
is_custom_license_agreement = True
def form_valid(self, form):
obj = form.save(commit=False)
obj.creator = self.request.user
obj.url_datasource = get_url_datasource(obj.file.file)
obj.provider = get_provider(obj.file.file)
obj.save()
resource_notify(obj, resource_type=self.resource_name)
msg = _(self.success_message)
messages.success(self.request, msg, 'success', fail_silently=True)
return super(ResourceBaseCreateView, self).form_valid(form) |
def test_template_basics():
app = flask.Flask(__name__)
babel.Babel(app, default_locale='de_DE')
def t(x):
return flask.render_template_string(('{{ %s }}' % x))
with app.test_request_context():
assert (t("gettext('Hello %(name)s!', name='Peter')") == u'Hallo Peter!')
assert (t("ngettext('%(num)s Apple', '%(num)s Apples', 3)") == u'3 Apfel')
assert (t("ngettext('%(num)s Apple', '%(num)s Apples', 1)") == u'1 Apfel')
assert (flask.render_template_string('\n {% trans %}Hello {{ name }}!{% endtrans %}\n ', name='Peter').strip() == 'Hallo Peter!')
assert (flask.render_template_string('\n {% trans num=3 %}{{ num }} Apple\n {%- pluralize %}{{ num }} Apples{% endtrans %}\n ', name='Peter').strip() == u'3 Apfel') |
class VocParser(Parser):
DEFAULT_CLASSES = ('aeroplane', 'bicycle', 'bird', 'boat', 'bottle', 'bus', 'car', 'cat', 'chair', 'cow', 'diningtable', 'dog', 'horse', 'motorbike', 'person', 'pottedplant', 'sheep', 'sofa', 'train', 'tvmonitor')
def __init__(self, cfg: VocParserCfg):
super().__init__(bbox_yxyx=cfg.bbox_yxyx, has_labels=cfg.has_labels, include_masks=False, include_bboxes_ignore=False, ignore_empty_gt=(cfg.has_labels and cfg.ignore_empty_gt), min_img_size=cfg.min_img_size)
self.correct_bbox = 1
self.keep_difficult = cfg.keep_difficult
self.anns = None
self.img_id_to_idx = {}
self._load_annotations(split_filename=cfg.split_filename, img_filename=cfg.img_filename, ann_filename=cfg.ann_filename, classes=cfg.classes)
def _load_annotations(self, split_filename: str, img_filename: str, ann_filename: str, classes=None):
classes = (classes or self.DEFAULT_CLASSES)
self.cat_names = list(classes)
self.cat_ids = self.cat_names
self.cat_id_to_label = {cat: (i + self.label_offset) for (i, cat) in enumerate(self.cat_ids)}
self.anns = []
with open(split_filename) as f:
ids = f.readlines()
for img_id in ids:
img_id = img_id.strip('\n')
filename = (img_filename % img_id)
xml_path = (ann_filename % img_id)
tree = ET.parse(xml_path)
root = tree.getroot()
size = root.find('size')
width = int(size.find('width').text)
height = int(size.find('height').text)
if (min(width, height) < self.min_img_size):
continue
anns = []
for (obj_idx, obj) in enumerate(root.findall('object')):
name = obj.find('name').text
label = self.cat_id_to_label[name]
difficult = int(obj.find('difficult').text)
bnd_box = obj.find('bndbox')
bbox = [int(bnd_box.find('xmin').text), int(bnd_box.find('ymin').text), int(bnd_box.find('xmax').text), int(bnd_box.find('ymax').text)]
anns.append(dict(label=label, bbox=bbox, difficult=difficult))
if ((not self.ignore_empty_gt) or len(anns)):
self.anns.append(anns)
self.img_infos.append(dict(id=img_id, file_name=filename, width=width, height=height))
self.img_ids.append(img_id)
else:
self.img_ids_invalid.append(img_id)
def merge(self, other):
assert (len(self.cat_ids) == len(other.cat_ids))
self.img_ids.extend(other.img_ids)
self.img_infos.extend(other.img_infos)
self.anns.extend(other.anns)
def get_ann_info(self, idx):
return self._parse_ann_info(self.anns[idx])
def _parse_ann_info(self, ann_info):
bboxes = []
labels = []
bboxes_ignore = []
labels_ignore = []
for ann in ann_info:
ignore = False
(x1, y1, x2, y2) = ann['bbox']
label = ann['label']
w = (x2 - x1)
h = (y2 - y1)
if ((w < 1) or (h < 1)):
ignore = True
if self.yxyx:
bbox = [y1, x1, y2, x2]
else:
bbox = ann['bbox']
if (ignore or (ann['difficult'] and (not self.keep_difficult))):
bboxes_ignore.append(bbox)
labels_ignore.append(label)
else:
bboxes.append(bbox)
labels.append(label)
if (not bboxes):
bboxes = np.zeros((0, 4), dtype=np.float32)
labels = np.zeros((0,), dtype=np.float32)
else:
bboxes = (np.array(bboxes, ndmin=2, dtype=np.float32) - self.correct_bbox)
labels = np.array(labels, dtype=np.float32)
if self.include_bboxes_ignore:
if (not bboxes_ignore):
bboxes_ignore = np.zeros((0, 4), dtype=np.float32)
labels_ignore = np.zeros((0,), dtype=np.float32)
else:
bboxes_ignore = (np.array(bboxes_ignore, ndmin=2, dtype=np.float32) - self.correct_bbox)
labels_ignore = np.array(labels_ignore, dtype=np.float32)
ann = dict(bbox=bboxes.astype(np.float32), cls=labels.astype(np.int64))
if self.include_bboxes_ignore:
ann.update(dict(bbox_ignore=bboxes_ignore.astype(np.float32), cls_ignore=labels_ignore.astype(np.int64)))
return ann |
class TestNotificationApp(unittest.TestCase):
def setUpClass(cls):
import notification_app
cls.AppClass = notification_app.MyApp
def setUp(self):
self.AppClass.log_request = (lambda x, y: None)
def tearDown(self):
del self.AppClass.log_request
self.app.on_close()
def test_main(self):
self.app = self.AppClass(MockRequest(), ('0.0.0.0', 8888), MockServer())
root_widget = self.app.main()
html = root_widget.repr()
assertValidHTML(html) |
_module
class MSELoss(nn.Module):
def __init__(self, reduction='mean', loss_weight=1.0):
super().__init__()
self.reduction = reduction
self.loss_weight = loss_weight
def forward(self, pred, target, weight=None, avg_factor=None):
loss = (self.loss_weight * mse_loss(pred, target, weight, reduction=self.reduction, avg_factor=avg_factor))
return loss |
def infer(model, sentences, inputs):
num_sentences = len(sentences)
total_infer_time = 0
results = {}
for i in range(num_sentences):
input_ids = inputs[i]['input_ids']
attention_masks = inputs[i]['attention_mask']
with torch.no_grad():
if (i == 0):
t0 = time.time()
model(input_ids, attention_masks)
t0 = time.time()
outputs = model(input_ids, attention_masks)
t1 = (time.time() - t0)
total_infer_time += t1
logits = outputs.logits
logits = logits.detach().cpu().numpy()
pred_flat = np.argmax(logits, axis=1).flatten()
orig_sent = sentences[i]
results[orig_sent] = pred_flat[0]
print('\n Number of sentences: {}'.format(num_sentences))
if (num_sentences > 20):
print(' First 20 results:')
print('\t Grammar correctness label (0=unacceptable, 1=acceptable)\n')
count = 0
for (k, v) in results.items():
print('\t{!r} : {!r}'.format(k, v))
if (count == 20):
break
count = (count + 1)
print('\n Average inference time: {:.4f}ms'.format(((total_infer_time / num_sentences) * 1000)))
print(' Total Inference time: {:.4f}ms'.format((total_infer_time * 1000))) |
def parse_changelog(tag_name):
p = (Path(__file__).parent.parent / 'doc/en/changelog.rst')
changelog_lines = p.read_text(encoding='UTF-8').splitlines()
title_regex = re.compile('pytest (\\d\\.\\d+\\.\\d+) \\(\\d{4}-\\d{2}-\\d{2}\\)')
consuming_version = False
version_lines = []
for line in changelog_lines:
m = title_regex.match(line)
if m:
if (m.group(1) == tag_name):
consuming_version = True
elif consuming_version:
break
if consuming_version:
version_lines.append(line)
return '\n'.join(version_lines) |
((sys.version_info[:2] >= (3, 11)), 'asyncio.coroutine has been removed in Python 3.11')
class YieldFromTests(ClientServerTestsMixin, AsyncioTestCase):
_server()
def test_client(self):
with warnings.catch_warnings():
warnings.simplefilter('ignore')
def run_client():
client = (yield from connect(get_server_uri(self.server)))
self.assertEqual(client.state, State.OPEN)
(yield from client.close())
self.assertEqual(client.state, State.CLOSED)
self.loop.run_until_complete(run_client())
def test_server(self):
with warnings.catch_warnings():
warnings.simplefilter('ignore')
def run_server():
server = (yield from serve(default_handler, 'localhost', 0))
self.assertTrue(server.sockets)
server.close()
(yield from server.wait_closed())
self.assertFalse(server.sockets)
self.loop.run_until_complete(run_server()) |
def create_model(args, model_name, output_dim, pretrained=False, device=None, **kwargs):
logging.info(('create_model. model_name = %s, output_dim = %s' % (model_name, output_dim)))
model = None
logging.info(f'model name: {model_name}')
if args.VHL:
if (args.VHL_label_style == 'extra'):
output_dim = (output_dim + args.VHL_num)
logging.info(f'Model output dim is changed into {output_dim}, original is {(output_dim - args.VHL_num)}')
else:
pass
if (model_name in RNN_MODEL_LIST):
pass
else:
image_size = get_dataset_image_size(args.dataset)
if (not args.gate_layer):
if ((model_name == 'lr') and (args.dataset == 'mnist')):
logging.info('LogisticRegression + MNIST')
model = LogisticRegression((28 * 28), output_dim)
elif ((model_name == 'simple-cnn') and (args.dataset in ['cifar10'])):
logging.info('simplecnn + CIFAR10')
model = SimpleCNN(input_dim=((16 * 5) * 5), hidden_dims=[120, 84], output_dim=output_dim, input_channels=args.model_input_channels)
elif ((model_name == 'simple-cnn-mnist') and (args.dataset in ['mnist', 'fmnist'])):
logging.info('simplecnn_mnist + MNIST or FMNIST')
model = SimpleCNNMNIST(input_dim=((16 * 4) * 4), hidden_dims=[120, 84], output_dim=output_dim, input_channels=args.model_input_channels)
elif ((model_name == 'mnistflnet') and (args.dataset in ['mnist', 'fmnist', 'femnist'])):
logging.info('MnistFLNet + MNIST or FMNIST')
if args.model_out_feature:
model = MnistFLNet_feat_out(input_channels=args.model_input_channels, output_dim=output_dim)
else:
model = MnistFLNet(input_channels=args.model_input_channels, output_dim=output_dim)
elif ((model_name == 'cifar10flnet') and (args.dataset == 'cifar10')):
logging.info('Cifar10FLNet + CIFAR-10')
model = Cifar10FLNet()
elif ((model_name == 'SVCCAConvNet') and (args.dataset == 'cifar10')):
logging.info('SVCCAConvNet + CIFAR-10')
model = SVCCAConvNet()
elif ((model_name == 'cnn') and (args.dataset == 'femnist')):
logging.info('CNN + FederatedEMNIST')
model = CNN_DropOut(False)
elif (model_name == 'vgg-9'):
if (args.dataset in ('mnist', 'femnist', 'fmnist')):
model = ModerateCNNMNIST(output_dim=output_dim, input_channels=args.model_input_channels)
elif (args.dataset in ('cifar10', 'cifar100', 'cinic10', 'svhn')):
model = ModerateCNN(output_dim=output_dim)
elif (args.dataset == 'celeba'):
model = ModerateCNN(output_dim=2)
elif ('vgg' in model_name):
logging.info(f'{model_name}')
model = vgg_dict[model_name](input_channels=args.model_input_channels, output_dim=output_dim)
elif ((model_name == 'resnet18_gn') or (model_name == 'resnet18')):
logging.info('ResNet18_GN or resnet18')
model = resnet18(pretrained=pretrained, num_classes=output_dim, group_norm=args.group_norm_num)
elif (model_name == 'resnet18_v2'):
logging.info('ResNet18_v2')
model = ResNet18(args=args, num_classes=output_dim, image_size=image_size, model_input_channels=args.model_input_channels, device=device)
elif (model_name == 'resnet34_v2'):
logging.info('ResNet34_v2')
model = ResNet34(args=args, num_classes=output_dim, image_size=image_size, model_input_channels=args.model_input_channels, device=device)
elif (model_name == 'resnet50_v2'):
model = ResNet50(args=args, num_classes=output_dim, image_size=image_size, model_input_channels=args.model_input_channels, device=device)
elif (model_name == 'resnet10_v2'):
logging.info('ResNet10_v2')
model = ResNet10(args=args, num_classes=output_dim, image_size=image_size, model_input_channels=args.model_input_channels, device=device)
elif ('swdcnn' in model_name):
logging.info(f'{model_name}')
model = build_SWD_CNN(model_name=model_name, output_dim=output_dim, input_channels=3)
elif (model_name in ['resnet8_cifar', 'resnet20_cifar', 'resnet32_cifar', 'resnet54_cifar']):
resnet_size = int(model_name.split('_')[0].split('resnet')[1])
logging.info(f'{model_name}')
model = ResNet_cifar(num_classes=output_dim, args=args, image_size=image_size, model_input_channels=args.model_input_channels, resnet_size=resnet_size)
elif (model_name == 'resnet18_torch'):
logging.info('ResNet18_torch')
model = resnet_torch.resnet18(pretrained=pretrained, num_classes=output_dim, args=args, model_input_channels=args.model_input_channels)
elif (model_name == 'resnet50_torch'):
logging.info('ResNet50_torch')
model = resnet_torch.resnet50(pretrained=pretrained, num_classes=output_dim, args=args, model_input_channels=args.model_input_channels)
elif ((model_name == 'rnn') and (args.dataset == 'shakespeare')):
logging.info('RNN + shakespeare')
model = RNN_OriginalFedAvg(embedding_dim=args.lstm_embedding_dim, hidden_size=args.lstm_hidden_size)
elif ((model_name == 'rnn') and (args.dataset == 'fed_shakespeare')):
logging.info('RNN + fed_shakespeare')
model = RNN_OriginalFedAvg(embedding_dim=args.lstm_embedding_dim, hidden_size=args.lstm_hidden_size)
elif ((model_name == 'lr') and (args.dataset == 'stackoverflow_lr')):
logging.info('lr + stackoverflow_lr')
model = LogisticRegression(10004, output_dim)
elif ((model_name == 'rnn') and (args.dataset == 'stackoverflow_nwp')):
logging.info('CNN + stackoverflow_nwp')
model = RNN_StackOverFlow()
elif (model_name == 'mobilenet'):
model = mobilenet(num_classes=output_dim)
elif (model_name == 'wideres40-2'):
model = WideResNet(args=args, depth=40, num_classes=output_dim, widen_factor=2, dropRate=0.0)
elif (model_name == 'inceptionresnetv2'):
model = inceptionresnetv2(args=args, num_classes=output_dim, image_size=image_size, model_input_channels=args.model_input_channels, pretrained=pretrained)
elif (model_name == 'mobilenet_v3'):
model = MobileNetV3(model_mode='LARGE', num_classes=output_dim)
elif (model_name == 'efficientnet'):
efficientnet_dict = {'efficientnet-b0': (1.0, 1.0, 224, 0.2), 'efficientnet-b1': (1.0, 1.1, 240, 0.2), 'efficientnet-b2': (1.1, 1.2, 260, 0.3), 'efficientnet-b3': (1.2, 1.4, 300, 0.3), 'efficientnet-b4': (1.4, 1.8, 380, 0.4), 'efficientnet-b5': (1.6, 2.2, 456, 0.4), 'efficientnet-b6': (1.8, 2.6, 528, 0.5), 'efficientnet-b7': (2.0, 3.1, 600, 0.5), 'efficientnet-b8': (2.2, 3.6, 672, 0.5), 'efficientnet-l2': (4.3, 5.3, 800, 0.5)}
model = EfficientNet.from_name(model_name='efficientnet-b0', num_classes=output_dim)
elif (model_name == 'lstman4'):
model = LSTMAN4(datapath=args.an4_audio_path)
elif (model_name == 'lstm'):
model = lstmpy.lstm(vocab_size=kwargs['vocab_size'], embedding_dim=args.lstm_embedding_dim, batch_size=args.batch_size, num_steps=args.lstm_num_steps, dp_keep_prob=0.3)
elif (model_name == 'lstmwt2'):
model = lstmpy.lstmwt2(vocab_size=kwargs['vocab_size'], batch_size=args.batch_size, dp_keep_prob=0.5)
elif (model_name == 'unet'):
model = UNet(3)
else:
logging.info(f'model name is {model_name}')
raise NotImplementedError
else:
raise NotImplementedError
return model |
class TestLatexify(TestCase):
def test_latexify(self):
model_dfn = pybamm.lithium_ion.DFN()
func_dfn = str(model_dfn.latexify())
model_spme = pybamm.lithium_ion.SPMe()
func_spme = str(model_spme.latexify())
self.assertIn('Single Particle Model with electrolyte Equations', func_spme)
self.assertIn('\\\\', str(model_spme.latexify(newline=False)))
self.assertIn('Voltage [V]', func_spme)
self.assertIn('\\nabla', func_spme)
self.assertIn('r =', func_spme)
self.assertIn('frac{d}{d t}', func_spme)
self.assertIn('0 < r < ', func_spme)
self.assertIn('; t=0', func_spme)
self.assertIn('0 =', func_dfn)
try:
self.assertIn('begin{cases}', func_spme)
self.assertIn('end{cases}', func_spme)
except AssertionError:
for eqn in model_spme.rhs.values():
concat_displays = model_spme._get_concat_displays(eqn)
if concat_displays:
self.assertIn('begin{cases}', str(concat_displays))
self.assertIn('end{cases}', str(concat_displays))
break
self.assertIn('Parameters and Variables', func_spme)
self.assertIn('coefficient', func_spme)
self.assertIn('diffusivity', func_spme)
def test_latexify_other_variables(self):
model_spme = pybamm.lithium_ion.SPMe()
func_spme = str(model_spme.latexify(output_variables=['Electrolyte concentration [mol.m-3]']))
self.assertIn('Electrolyte concentration [mol.m-3]', func_spme)
model = pybamm.BaseModel()
var = pybamm.Variable('var')
model.rhs = {var: 0}
model.initial_conditions = {var: 0}
func = str(model.latexify())
self.assertNotIn('Voltage [V]', func)
((platform.system() in ['Windows', 'Darwin']), 'Only run for Linux')
def test_sympy_preview(self):
model_spme = pybamm.lithium_ion.SPMe()
for ext in ['png', 'jpg', 'pdf']:
filename = f'{uuid.uuid4()}.{ext}'
model_spme.latexify(filename)
os.remove(filename) |
(params=['single', 'list'])
def hdf5_file_path_or_paths(tmp_path, test_objects, request) -> Union[(os.PathLike, list[os.PathLike])]:
if (request.param == 'single'):
return _write_h5_file((tmp_path / 'test.h5'), test_objects)
elif (request.param == 'list'):
return [_write_h5_file((tmp_path / 'test1.h5'), test_objects), _write_h5_file((tmp_path / 'test2.h5'), test_objects)]
else:
raise ValueError("Invalid requests.param, must be one of 'single' or 'list'") |
class BatchNorm2d(nn.BatchNorm2d, Module):
def __init__(self, num_features, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True, episodic=False, n_episode=4, alpha=False):
super(BatchNorm2d, self).__init__(num_features, eps, momentum, affine, track_running_stats)
self.episodic = episodic
self.n_episode = n_episode
self.alpha = alpha
if self.track_running_stats:
if self.episodic:
for ep in range(n_episode):
self.register_buffer(('running_mean_%d' % ep), torch.zeros(num_features))
self.register_buffer(('running_var_%d' % ep), torch.ones(num_features))
self.register_buffer(('num_batches_tracked_%d' % ep), torch.tensor(0, dtype=torch.int))
if self.alpha:
self.register_buffer('batch_size', torch.tensor(0, dtype=torch.int))
self.alpha_scale = nn.Parameter(torch.tensor(0.0))
self.alpha_offset = nn.Parameter(torch.tensor(0.0))
def is_episodic(self):
return self.episodic
def _batch_norm(self, x, mean, var, weight=None, bias=None):
if self.affine:
assert ((weight is not None) and (bias is not None))
weight = weight.view(1, (- 1), 1, 1)
bias = bias.view(1, (- 1), 1, 1)
x = (((weight * (x - mean)) / ((var + self.eps) ** 0.5)) + bias)
else:
x = ((x - mean) / ((var + self.eps) ** 0.5))
return x
def reset_episodic_running_stats(self, episode):
if self.episodic:
getattr(self, ('running_mean_%d' % episode)).zero_()
getattr(self, ('running_var_%d' % episode)).fill_(1.0)
getattr(self, ('num_batches_tracked_%d' % episode)).zero_()
def forward(self, x, params=None, episode=None):
self._check_input_dim(x)
if (params is not None):
(weight, bias) = (params.get('weight'), params.get('bias'))
if (weight is None):
weight = self.weight
if (bias is None):
bias = self.bias
else:
(weight, bias) = (self.weight, self.bias)
if self.track_running_stats:
if self.episodic:
assert ((episode is not None) and (episode < self.n_episode))
running_mean = getattr(self, ('running_mean_%d' % episode))
running_var = getattr(self, ('running_var_%d' % episode))
num_batches_tracked = getattr(self, ('num_batches_tracked_%d' % episode))
else:
(running_mean, running_var) = (self.running_mean, self.running_var)
num_batches_tracked = self.num_batches_tracked
if self.training:
exp_avg_factor = 0.0
if self.first_pass:
if self.alpha:
self.batch_size = x.size(0)
num_batches_tracked += 1
if (self.momentum is None):
exp_avg_factor = (1.0 / float(num_batches_tracked))
else:
exp_avg_factor = self.momentum
return F.batch_norm(x, running_mean, running_var, weight, bias, True, exp_avg_factor, self.eps)
elif self.alpha:
assert (self.batch_size > 0)
alpha = torch.sigmoid(((self.alpha_scale * self.batch_size) + self.alpha_offset))
running_mean = running_mean.view(1, (- 1), 1, 1)
running_var = running_var.view(1, (- 1), 1, 1)
sample_mean = torch.mean(x, dim=(2, 3), keepdim=True)
sample_var = torch.var(x, dim=(2, 3), unbiased=False, keepdim=True)
mean = ((alpha * running_mean) + ((1 - alpha) * sample_mean))
var = (((alpha * running_var) + ((1 - alpha) * sample_var)) + ((alpha * (1 - alpha)) * ((sample_mean - running_mean) ** 2)))
return self._batch_norm(x, mean, var, weight, bias)
else:
return F.batch_norm(x, running_mean, running_var, weight, bias, False, 0.0, self.eps)
else:
return F.batch_norm(x, None, None, weight, bias, True, 0.0, self.eps) |
_tokenizers
class LongformerTokenizationTest(TokenizerTesterMixin, unittest.TestCase):
tokenizer_class = LongformerTokenizer
test_slow_tokenizer = True
rust_tokenizer_class = LongformerTokenizerFast
test_rust_tokenizer = True
def setUp(self):
super().setUp()
vocab = ['l', 'o', 'w', 'e', 'r', 's', 't', 'i', 'd', 'n', 'G', 'Gl', 'Gn', 'Glo', 'Glow', 'er', 'Glowest', 'Gnewer', 'Gwider', '<unk>']
vocab_tokens = dict(zip(vocab, range(len(vocab))))
merges = ['#version: 0.2', 'G l', 'Gl o', 'Glo w', 'e r', '']
self.special_tokens_map = {'unk_token': '<unk>'}
self.vocab_file = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES['vocab_file'])
self.merges_file = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES['merges_file'])
with open(self.vocab_file, 'w', encoding='utf-8') as fp:
fp.write((json.dumps(vocab_tokens) + '\n'))
with open(self.merges_file, 'w', encoding='utf-8') as fp:
fp.write('\n'.join(merges))
def get_tokenizer(self, **kwargs):
kwargs.update(self.special_tokens_map)
return self.tokenizer_class.from_pretrained(self.tmpdirname, **kwargs)
def get_rust_tokenizer(self, **kwargs):
kwargs.update(self.special_tokens_map)
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname, **kwargs)
def get_input_output_texts(self, tokenizer):
input_text = 'lower newer'
output_text = 'lower newer'
return (input_text, output_text)
def test_full_tokenizer(self):
tokenizer = self.tokenizer_class(self.vocab_file, self.merges_file, **self.special_tokens_map)
text = 'lower newer'
bpe_tokens = ['l', 'o', 'w', 'er', 'G', 'n', 'e', 'w', 'er']
tokens = tokenizer.tokenize(text)
self.assertListEqual(tokens, bpe_tokens)
input_tokens = (tokens + [tokenizer.unk_token])
input_bpe_tokens = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(input_tokens), input_bpe_tokens)
def longformer_dict_integration_testing(self):
tokenizer = self.get_tokenizer()
self.assertListEqual(tokenizer.encode('Hello world!', add_special_tokens=False), [0, 31414, 232, 328, 2])
self.assertListEqual(tokenizer.encode('Hello world! cece herlolip 418', add_special_tokens=False), [0, 31414, 232, 328, 740, 1140, 12695, 69, 46078, 1588, 2])
def test_sequence_builders(self):
tokenizer = self.tokenizer_class.from_pretrained('allenai/longformer-base-4096')
text = tokenizer.encode('sequence builders', add_special_tokens=False)
text_2 = tokenizer.encode('multi-sequence build', add_special_tokens=False)
encoded_text_from_decode = tokenizer.encode('sequence builders', add_special_tokens=True, add_prefix_space=False)
encoded_pair_from_decode = tokenizer.encode('sequence builders', 'multi-sequence build', add_special_tokens=True, add_prefix_space=False)
encoded_sentence = tokenizer.build_inputs_with_special_tokens(text)
encoded_pair = tokenizer.build_inputs_with_special_tokens(text, text_2)
assert (encoded_sentence == encoded_text_from_decode)
assert (encoded_pair == encoded_pair_from_decode)
def test_space_encoding(self):
tokenizer = self.get_tokenizer()
sequence = 'Encode this sequence.'
space_encoding = tokenizer.byte_encoder[' '.encode('utf-8')[0]]
encoded = tokenizer.encode(sequence, add_special_tokens=False, add_prefix_space=False)
first_char = tokenizer.convert_ids_to_tokens(encoded[0])[0]
self.assertNotEqual(first_char, space_encoding)
encoded = tokenizer.encode(sequence, add_special_tokens=False, add_prefix_space=True)
first_char = tokenizer.convert_ids_to_tokens(encoded[0])[0]
self.assertEqual(first_char, space_encoding)
tokenizer.add_special_tokens({'bos_token': '<s>'})
encoded = tokenizer.encode(sequence, add_special_tokens=True)
first_char = tokenizer.convert_ids_to_tokens(encoded[1])[0]
self.assertNotEqual(first_char, space_encoding)
mask = '<mask>'
tokenizer.add_special_tokens({'mask_token': AddedToken(mask, lstrip=True, rstrip=False)})
mask_ind = tokenizer.convert_tokens_to_ids(mask)
sequence = 'Encode <mask> sequence'
sequence_nospace = 'Encode <mask>sequence'
encoded = tokenizer.encode(sequence)
mask_loc = encoded.index(mask_ind)
first_char = tokenizer.convert_ids_to_tokens(encoded[(mask_loc + 1)])[0]
self.assertEqual(first_char, space_encoding)
encoded = tokenizer.encode(sequence_nospace)
mask_loc = encoded.index(mask_ind)
first_char = tokenizer.convert_ids_to_tokens(encoded[(mask_loc + 1)])[0]
self.assertNotEqual(first_char, space_encoding)
def test_pretokenized_inputs(self):
pass
def test_embeded_special_tokens(self):
for (tokenizer, pretrained_name, kwargs) in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})'):
tokenizer_r = self.rust_tokenizer_class.from_pretrained(pretrained_name, **kwargs)
tokenizer_p = self.tokenizer_class.from_pretrained(pretrained_name, **kwargs)
sentence = 'A, <mask> AllenNLP sentence.'
tokens_r = tokenizer_r.encode_plus(sentence, add_special_tokens=True, return_token_type_ids=True)
tokens_p = tokenizer_p.encode_plus(sentence, add_special_tokens=True, return_token_type_ids=True)
self.assertEqual(sum(tokens_r['token_type_ids']), sum(tokens_p['token_type_ids']))
self.assertEqual((sum(tokens_r['attention_mask']) / len(tokens_r['attention_mask'])), (sum(tokens_p['attention_mask']) / len(tokens_p['attention_mask'])))
tokens_r_str = tokenizer_r.convert_ids_to_tokens(tokens_r['input_ids'])
tokens_p_str = tokenizer_p.convert_ids_to_tokens(tokens_p['input_ids'])
self.assertSequenceEqual(tokens_p['input_ids'], [0, 250, 6, 50264, 3823, 487, 21992, 3645, 4, 2])
self.assertSequenceEqual(tokens_r['input_ids'], [0, 250, 6, 50264, 3823, 487, 21992, 3645, 4, 2])
self.assertSequenceEqual(tokens_p_str, ['<s>', 'A', ',', '<mask>', 'GAllen', 'N', 'LP', 'Gsentence', '.', '</s>'])
self.assertSequenceEqual(tokens_r_str, ['<s>', 'A', ',', '<mask>', 'GAllen', 'N', 'LP', 'Gsentence', '.', '</s>'])
def test_change_add_prefix_space_and_trim_offsets_args(self):
for (trim_offsets, add_prefix_space) in itertools.product([True, False], repeat=2):
tokenizer_r = self.rust_tokenizer_class.from_pretrained(self.tmpdirname, use_fast=True, add_prefix_space=add_prefix_space, trim_offsets=trim_offsets)
pre_tokenizer_state = json.loads(tokenizer_r.backend_tokenizer.pre_tokenizer.__getstate__())
post_processor_state = json.loads(tokenizer_r.backend_tokenizer.post_processor.__getstate__())
self.assertEqual(pre_tokenizer_state['add_prefix_space'], add_prefix_space)
self.assertEqual(post_processor_state['add_prefix_space'], add_prefix_space)
self.assertEqual(post_processor_state['trim_offsets'], trim_offsets)
def test_offsets_mapping_with_different_add_prefix_space_and_trim_space_arguments(self):
for (tokenizer, pretrained_name, kwargs) in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})'):
text_of_1_token = 'hello'
text = f'{text_of_1_token} {text_of_1_token}'
tokenizer_r = self.rust_tokenizer_class.from_pretrained(pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=True)
encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False)
self.assertEqual(encoding.offset_mapping[0], (0, len(text_of_1_token)))
self.assertEqual(encoding.offset_mapping[1], ((len(text_of_1_token) + 1), ((len(text_of_1_token) + 1) + len(text_of_1_token))))
tokenizer_r = self.rust_tokenizer_class.from_pretrained(pretrained_name, use_fast=True, add_prefix_space=False, trim_offsets=True)
encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False)
self.assertEqual(encoding.offset_mapping[0], (0, len(text_of_1_token)))
self.assertEqual(encoding.offset_mapping[1], ((len(text_of_1_token) + 1), ((len(text_of_1_token) + 1) + len(text_of_1_token))))
tokenizer_r = self.rust_tokenizer_class.from_pretrained(pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=False)
encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False)
self.assertEqual(encoding.offset_mapping[0], (0, len(text_of_1_token)))
self.assertEqual(encoding.offset_mapping[1], (len(text_of_1_token), ((len(text_of_1_token) + 1) + len(text_of_1_token))))
tokenizer_r = self.rust_tokenizer_class.from_pretrained(pretrained_name, use_fast=True, add_prefix_space=False, trim_offsets=False)
encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False)
self.assertEqual(encoding.offset_mapping[0], (0, len(text_of_1_token)))
self.assertEqual(encoding.offset_mapping[1], (len(text_of_1_token), ((len(text_of_1_token) + 1) + len(text_of_1_token))))
text = f' {text}'
tokenizer_r = self.rust_tokenizer_class.from_pretrained(pretrained_name, use_fast=True, add_prefix_space=False, trim_offsets=True)
encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False)
self.assertEqual(encoding.offset_mapping[0], (1, (1 + len(text_of_1_token))))
self.assertEqual(encoding.offset_mapping[1], (((1 + len(text_of_1_token)) + 1), (((1 + len(text_of_1_token)) + 1) + len(text_of_1_token))))
tokenizer_r = self.rust_tokenizer_class.from_pretrained(pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=False)
encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False)
self.assertEqual(encoding.offset_mapping[0], (0, (1 + len(text_of_1_token))))
self.assertEqual(encoding.offset_mapping[1], ((1 + len(text_of_1_token)), (((1 + len(text_of_1_token)) + 1) + len(text_of_1_token))))
tokenizer_r = self.rust_tokenizer_class.from_pretrained(pretrained_name, use_fast=True, add_prefix_space=False, trim_offsets=False)
encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False)
self.assertEqual(encoding.offset_mapping[0], (0, (1 + len(text_of_1_token))))
self.assertEqual(encoding.offset_mapping[1], ((1 + len(text_of_1_token)), (((1 + len(text_of_1_token)) + 1) + len(text_of_1_token)))) |
class ProgressCallback(TrainerCallback):
def __init__(self):
self.training_bar = None
self.prediction_bar = None
def on_train_begin(self, args, state, control, **kwargs):
if state.is_local_process_zero:
self.training_bar = tqdm(total=state.max_steps)
self.current_step = 0
def on_step_end(self, args, state, control, **kwargs):
if state.is_local_process_zero:
self.training_bar.update((state.global_step - self.current_step))
self.current_step = state.global_step
def on_prediction_step(self, args, state, control, eval_dataloader=None, **kwargs):
if (state.is_local_process_zero and has_length(eval_dataloader)):
if (self.prediction_bar is None):
self.prediction_bar = tqdm(total=len(eval_dataloader), leave=(self.training_bar is None))
self.prediction_bar.update(1)
def on_evaluate(self, args, state, control, **kwargs):
if state.is_local_process_zero:
if (self.prediction_bar is not None):
self.prediction_bar.close()
self.prediction_bar = None
def on_predict(self, args, state, control, **kwargs):
if state.is_local_process_zero:
if (self.prediction_bar is not None):
self.prediction_bar.close()
self.prediction_bar = None
def on_log(self, args, state, control, logs=None, **kwargs):
if (state.is_local_process_zero and (self.training_bar is not None)):
_ = logs.pop('total_flos', None)
self.training_bar.write(str(logs))
def on_train_end(self, args, state, control, **kwargs):
if state.is_local_process_zero:
self.training_bar.close()
self.training_bar = None |
def _generate_image_and_label_batch(image, label, min_queue_examples, batch_size, shuffle):
num_preprocess_threads = 16
if shuffle:
(images, label_batch) = tf.train.shuffle_batch([image, label], batch_size=batch_size, num_threads=num_preprocess_threads, capacity=(min_queue_examples + (3 * batch_size)), min_after_dequeue=min_queue_examples)
else:
(images, label_batch) = tf.train.batch([image, label], batch_size=batch_size, num_threads=1, capacity=min_queue_examples)
return (images, tf.reshape(label_batch, [batch_size])) |
class CycleBatchNormList(nn.ModuleList):
def __init__(self, length: int, bn_class=nn.BatchNorm2d, **kwargs):
self._affine = kwargs.pop('affine', True)
super().__init__([bn_class(**kwargs, affine=False) for k in range(length)])
if self._affine:
channels = self[0].num_features
self.weight = nn.Parameter(torch.ones(channels))
self.bias = nn.Parameter(torch.zeros(channels))
self._pos = 0
def forward(self, x):
ret = self[self._pos](x)
self._pos = ((self._pos + 1) % len(self))
if self._affine:
w = self.weight.reshape(1, (- 1), 1, 1)
b = self.bias.reshape(1, (- 1), 1, 1)
return ((ret * w) + b)
else:
return ret
def extra_repr(self):
return f'affine={self._affine}' |
def test_same_as_the_reference_implementation() -> None:
d = Path(__file__).parent
ds = read_plink(path='hapmap_JPT_CHB_r23a_filtered')
pcs = da.from_array(pd.read_csv(d.joinpath('pcs.csv').as_posix(), usecols=[1, 2]).to_numpy())
ds[sample_pca_projection] = (('samples', 'components'), pcs)
phi = pc_relate(ds).pc_relate_phi.compute()
n_samples = 90
assert isinstance(phi, xr.DataArray)
assert (phi.shape == (n_samples, n_samples))
genesis_phi = pd.read_csv(d.joinpath('kinbtwe.csv'))
genesis_phi = genesis_phi[['kin']].to_numpy()
phi_s = phi.data[np.triu_indices_from(phi.data, 1)]
assert (phi_s.size == genesis_phi.size)
assert np.allclose(phi_s, genesis_phi.T) |
.parametrize('username, expect_success', [('devtable', True), ('public', True), ('buynlarge', False), ('devtable+dtrobot', False), ('unverified', False)])
def test_common_login(username, expect_success, app):
uuid = model.get_namespace_uuid(username)
with app.app_context():
(success, headers) = common_login(uuid)
assert (success == expect_success)
if success:
assert (QUAY_CSRF_UPDATED_HEADER_NAME in headers) |
class BatchNormalization(tf.layers.BatchNormalization):
def __init__(self, fused=False, **kwargs):
if (fused in (True, None)):
raise ValueError('The TPU version of BatchNormalization does not support fused=True.')
super(BatchNormalization, self).__init__(fused=fused, **kwargs)
def _cross_replica_average(self, t):
num_shards = tpu_function.get_tpu_context().number_of_shards
return (tf.tpu.cross_replica_sum(t) / tf.cast(num_shards, t.dtype))
def _moments(self, inputs, reduction_axes, keep_dims):
(shard_mean, shard_variance) = super(BatchNormalization, self)._moments(inputs, reduction_axes, keep_dims=keep_dims)
num_shards = tpu_function.get_tpu_context().number_of_shards
if (num_shards and (num_shards > 1)):
group_mean = self._cross_replica_average(shard_mean)
group_variance = self._cross_replica_average(shard_variance)
mean_distance = tf.square((group_mean - shard_mean))
group_variance += self._cross_replica_average(mean_distance)
return (group_mean, group_variance)
else:
return (shard_mean, shard_variance) |
class TestNCCL(unittest.TestCase):
def test_newuid(self):
if caffe.has_nccl():
uid = caffe.NCCL.new_uid()
if (sys.version_info.major >= 3):
self.assertTrue(isinstance(uid, bytes))
else:
self.assertTrue(isinstance(uid, str)) |
class TFDecoderLayer(nn.Module):
def __init__(self, d_model=512, d_inner=256, n_head=8, d_k=64, d_v=64, dropout=0.1, qkv_bias=False, act_cfg=dict(type='mmcv.GELU'), operation_order=None):
super().__init__()
self.norm1 = nn.LayerNorm(d_model)
self.norm2 = nn.LayerNorm(d_model)
self.norm3 = nn.LayerNorm(d_model)
self.self_attn = MultiHeadAttention(n_head, d_model, d_k, d_v, dropout=dropout, qkv_bias=qkv_bias)
self.enc_attn = MultiHeadAttention(n_head, d_model, d_k, d_v, dropout=dropout, qkv_bias=qkv_bias)
self.mlp = PositionwiseFeedForward(d_model, d_inner, dropout=dropout, act_cfg=act_cfg)
self.operation_order = operation_order
if (self.operation_order is None):
self.operation_order = ('norm', 'self_attn', 'norm', 'enc_dec_attn', 'norm', 'ffn')
assert (self.operation_order in [('norm', 'self_attn', 'norm', 'enc_dec_attn', 'norm', 'ffn'), ('self_attn', 'norm', 'enc_dec_attn', 'norm', 'ffn', 'norm')])
def forward(self, dec_input, enc_output, self_attn_mask=None, dec_enc_attn_mask=None):
if (self.operation_order == ('self_attn', 'norm', 'enc_dec_attn', 'norm', 'ffn', 'norm')):
dec_attn_out = self.self_attn(dec_input, dec_input, dec_input, self_attn_mask)
dec_attn_out += dec_input
dec_attn_out = self.norm1(dec_attn_out)
enc_dec_attn_out = self.enc_attn(dec_attn_out, enc_output, enc_output, dec_enc_attn_mask)
enc_dec_attn_out += dec_attn_out
enc_dec_attn_out = self.norm2(enc_dec_attn_out)
mlp_out = self.mlp(enc_dec_attn_out)
mlp_out += enc_dec_attn_out
mlp_out = self.norm3(mlp_out)
elif (self.operation_order == ('norm', 'self_attn', 'norm', 'enc_dec_attn', 'norm', 'ffn')):
dec_input_norm = self.norm1(dec_input)
dec_attn_out = self.self_attn(dec_input_norm, dec_input_norm, dec_input_norm, self_attn_mask)
dec_attn_out += dec_input
enc_dec_attn_in = self.norm2(dec_attn_out)
enc_dec_attn_out = self.enc_attn(enc_dec_attn_in, enc_output, enc_output, dec_enc_attn_mask)
enc_dec_attn_out += dec_attn_out
mlp_out = self.mlp(self.norm3(enc_dec_attn_out))
mlp_out += enc_dec_attn_out
return mlp_out |
_moment.register(TruncatedRV)
def truncated_moment(op, rv, *inputs):
(*rv_inputs, lower, upper, rng) = inputs
untruncated_rv = op.base_rv_op.make_node(rng, *rv_inputs).default_output()
untruncated_moment = moment(untruncated_rv)
fallback_moment = pt.switch(pt.and_(pt.bitwise_not(pt.isinf(lower)), pt.bitwise_not(pt.isinf(upper))), ((upper - lower) / 2), pt.switch(pt.isinf(upper), (lower + 1), (upper - 1)))
return pt.switch(pt.and_(pt.ge(untruncated_moment, lower), pt.le(untruncated_moment, upper)), untruncated_moment, fallback_moment) |
def run_hook_for_layers(model: torch.nn.Module, input_shapes: Union[(Tuple, List[Tuple])], hook, module_type_for_attaching_hook=None, leaf_node_only=True):
hooks = []
modules = [module for module in model.modules() if ((not leaf_node_only) or is_leaf_module(module))]
if module_type_for_attaching_hook:
modules = [module for module in modules if isinstance(module, module_type_for_attaching_hook)]
for module in modules:
hooks.append(module.register_forward_hook(hook))
device = get_device(model)
dummy_tensors = create_rand_tensors_given_shapes(input_shapes, device)
with in_eval_mode(model), torch.no_grad():
_ = model(*dummy_tensors)
for h in hooks:
h.remove() |
class TestChangeGC(EndianTest):
def setUp(self):
self.req_args_0 = {'attrs': {'function': 8, 'plane_mask': , 'foreground': , 'background': , 'line_width': 36097, 'line_style': 0, 'cap_style': 3, 'join_style': 1, 'fill_style': 0, 'fill_rule': 0, 'tile': , 'stipple': , 'tile_stipple_x_origin': (- 24195), 'tile_stipple_y_origin': (- 15601), 'font': , 'subwindow_mode': 1, 'graphics_exposures': 1, 'clip_x_origin': (- 32135), 'clip_y_origin': (- 25437), 'clip_mask': , 'dash_offset': 42536, 'dashes': 137, 'arc_mode': 1}, 'gc': }
self.req_bin_0 = b'8\x00\x1a\x00h\xac\x90J\xff\xff\x7f\x00\x08\x00\x00\x00x>\\x89>\xb8\xbd% \x01\x8d\x00\x00\x00\x00\x00\x00\x03\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x0b\xfb\xb2*\xe6\x08\xb5b}\xa1\x00\x00\x0f\xc3\x00\x00\xdf\xb7\xaf\x14\x01\x00\x00\x00\x01\x00\x00\x00y\x82\x00\x00\xa3\x9c\x00\x000\x97\xa2\t(\xa6\x00\x00\x89\x00\x00\x00\x01\x00\x00\x00'
def testPackRequest0(self):
bin = request.ChangeGC._request.to_binary(*(), **self.req_args_0)
self.assertBinaryEqual(bin, self.req_bin_0)
def testUnpackRequest0(self):
(args, remain) = request.ChangeGC._request.parse_binary(self.req_bin_0, dummy_display, 1)
self.assertBinaryEmpty(remain)
self.assertEqual(args, self.req_args_0) |
class NetOutBlock(nn.Module):
def __init__(self, in_channels, br_channels, out_channels, classes, layers=1):
super(NetOutBlock, self).__init__()
self.up = nn.ConvTranspose2d(in_channels, out_channels, kernel_size=2, stride=2)
self.bn_up = nn.BatchNorm2d(out_channels)
self.af_up = nn.PReLU(out_channels)
self.convb = NetConvBlock((out_channels + br_channels), out_channels, layers=layers)
self.conv = nn.Conv2d(out_channels, classes, kernel_size=1)
self.bn_out = nn.BatchNorm2d(classes)
self.af_out = nn.PReLU(classes)
def forward(self, x, bridge):
up = self.up(x)
up = self.bn_up(up)
up = self.af_up(up)
out = torch.cat([up, bridge], 1)
out = self.convb(out)
out = torch.add(out, up)
out = self.conv(out)
out = self.bn_out(out)
out = self.af_out(out)
return out |
def conv_bn_relu(data, cfg, num_filters, kernel=(3, 3), stride=(1, 1), pad=(1, 1), group=1, workspace=512, bn_mom=0.9, name=''):
body = mx.sym.Convolution(data=data, num_filter=num_filters, kernel=kernel, stride=stride, pad=pad, num_group=group, no_bias=True, workspace=workspace, name=(name + '_conv'))
body = mx.sym.BatchNorm(data=body, fix_gamma=False, eps=2e-05, momentum=bn_mom, name=(name + '_bn'))
body = mx.sym.Activation(data=body, act_type='relu', name=(name + '_relu'))
if (cfg.network.dropout > 0):
body = mx.symbol.Dropout(data=body, p=cfg.network.dropout, name=(name + '_dp'))
return body |
class Trailer(object):
def set_defaults(self):
self.id = None
def __init__(self, id):
self.set_defaults()
if id:
self.id = id
def is_valid(self):
return (self.file[(- 1)] != '/')
def file(self):
trailer_file = 'gettrailer.php?quality=hd&trailer_id={}'.format(self.id)
return trailer_file |
class Input():
def __init__(self, handle, unit, label, iconID, defaultValue, defaultRange, mainTooltip=None, secondaryTooltip=None, conditions=()):
self.handle = handle
self.unit = unit
self.label = label
self.iconID = iconID
self.defaultValue = defaultValue
self.defaultRange = defaultRange
self.mainTooltip = mainTooltip
self.secondaryTooltip = secondaryTooltip
self.conditions = tuple(conditions)
def __hash__(self):
return hash((self.handle, self.unit, self.label, self.iconID, self.defaultValue, self.defaultRange, self.mainTooltip, self.secondaryTooltip, self.conditions))
def __eq__(self, other):
if (not isinstance(other, Input)):
return False
return all(((self.handle == other.handle), (self.unit == other.unit), (self.label == other.label), (self.iconID == other.iconID), (self.defaultValue == other.defaultValue), (self.defaultRange == other.defaultRange), (self.mainTooltip == other.mainTooltip), (self.secondaryTooltip == other.secondaryTooltip), (self.conditions == other.conditions))) |
def test_elevationprofile():
elevation = xodr.ElevationProfile()
prettyprint(elevation.get_element())
elevation.add_elevation(xodr.elevation._Poly3Profile(0, 0, 0, 0, 0))
prettyprint(elevation.get_element())
elevation2 = xodr.ElevationProfile()
elevation2.add_elevation(xodr.elevation._Poly3Profile(0, 0, 0, 0, 0))
elevation3 = xodr.ElevationProfile()
elevation3.add_elevation(xodr.elevation._Poly3Profile(0, 0, 0, 0, 0))
elevation3.add_userdata(xodr.UserData('stuffs', 'morestuffs'))
prettyprint(elevation3)
assert (elevation == elevation2)
assert (elevation != elevation3)
assert (version_validation('t_road_elevationProfile', elevation, wanted_schema='xodr') == ValidationResponse.OK) |
def attach(parser):
parser.add_argument('inputs', nargs='+', help='Sequence of PDF files.')
parser.add_argument('--pages', nargs='+', default=[], help="Sequence of page texts, definig the pages to include from each PDF. Use '_' as placeholder for all pages.")
parser.add_argument('--passwords', nargs='+', default=[], help='Passwords to unlock encrypted PDFs. Any placeholder may be used for non-encrypted documents.')
parser.add_argument('--output', '-o', required=True, help='Target path for the output document') |
class MockClient(BaseClient):
def api_version(self):
return 'v2018-08-09'
def list_resources(self, **options):
path = '/resources'
return Pager(self, path, **options)
def get_resource(self, resource_id, **options):
path = self._interpolate_path('/resources/%s', resource_id)
return self._make_request('GET', path, None, **options)
def update_resource(self, resource_id, body):
path = self._interpolate_path('/resources/%s', resource_id)
return self._make_request('PUT', path, body)
def delete_resource(self, resource_id):
path = self._interpolate_path('/resources/%s', resource_id)
return self._make_request('DELETE', path, None) |
class FurthestPointSampling(Function):
def forward(ctx, xyz: torch.Tensor, npoint: int) -> torch.Tensor:
assert xyz.is_contiguous()
(B, N, _) = xyz.size()
output = torch.cuda.IntTensor(B, npoint)
temp = torch.cuda.FloatTensor(B, N).fill_(.0)
pointnet2.furthest_point_sampling_wrapper(B, N, npoint, xyz, temp, output)
return output
def backward(xyz, a=None):
return (None, None) |
def add_nitsot_outputs(fgraph: FunctionGraph, old_scan_node: Apply, old_scan_args: ScanArgs, new_outputs_inner) -> tuple[(Apply, dict[(Variable, Variable)])]:
assert isinstance(old_scan_node.op, Scan)
nb_new_outs = len(new_outputs_inner)
new_nitsots_initial_value = [old_scan_node.inputs[0] for i in range(nb_new_outs)]
new_scan_args = copy.copy(old_scan_args)
new_scan_args.inner_out_nit_sot.extend(new_outputs_inner)
new_scan_args.outer_in_nit_sot.extend(new_nitsots_initial_value)
assert isinstance(old_scan_node.op, Scan)
new_scan_op = Scan(new_scan_args.inner_inputs, new_scan_args.inner_outputs, new_scan_args.info, mode=old_scan_node.op.mode, profile=old_scan_node.op.profile, truncate_gradient=old_scan_node.op.truncate_gradient, name=old_scan_node.op.name, allow_gc=old_scan_node.op.allow_gc)
new_scan_outs = new_scan_op(*new_scan_args.outer_inputs, return_list=True)
assert isinstance(new_scan_outs, list)
new_scan_node = new_scan_outs[0].owner
assert (new_scan_node is not None)
new_node_new_outputs_idx = (len(old_scan_args.outer_outputs) - len(old_scan_args.outer_out_shared))
new_node_old_outputs = (new_scan_node.outputs[:new_node_new_outputs_idx] + new_scan_node.outputs[(new_node_new_outputs_idx + nb_new_outs):])
fgraph.replace_all_validate_remove(list(zip(old_scan_node.outputs, new_node_old_outputs)), remove=[old_scan_node], reason='scan_pushout_add')
return (new_scan_node, {}) |
def main(context_switch=0, thread=(- 1)):
cpus = perf.cpu_map()
threads = perf.thread_map(thread)
evsel = perf.evsel(type=perf.TYPE_SOFTWARE, config=perf.COUNT_SW_DUMMY, task=1, comm=1, mmap=0, freq=0, wakeup_events=1, watermark=1, sample_id_all=1, context_switch=context_switch, sample_type=((perf.SAMPLE_PERIOD | perf.SAMPLE_TID) | perf.SAMPLE_CPU))
evsel.open(cpus=cpus, threads=threads)
evlist = perf.evlist(cpus, threads)
evlist.add(evsel)
evlist.mmap()
while True:
evlist.poll(timeout=(- 1))
for cpu in cpus:
event = evlist.read_on_cpu(cpu)
if (not event):
continue
print('cpu: {0}, pid: {1}, tid: {2} {3}'.format(event.sample_cpu, event.sample_pid, event.sample_tid, event)) |
def test_custom_python_executable(monkeypatch, tmpdir):
monkeypatch.setenv('PYTHONPATH', BUILDSYS_PKGS)
runner = Mock(autospec=default_subprocess_runner)
hooks = get_hooks('pkg1', runner=runner, python_executable='some-python')
with hooks.subprocess_runner(runner):
with pytest.raises(FileNotFoundError):
hooks.get_requires_for_build_wheel()
runner.assert_called_once()
assert (runner.call_args[0][0][0] == 'some-python') |
class QueryClientResources(rq.ReplyRequest):
_request = rq.Struct(rq.Card8('opcode'), rq.Opcode(ResQueryClientResources), rq.RequestLength(), rq.Card32('client'))
_reply = rq.Struct(rq.ReplyCode(), rq.Pad(1), rq.Card16('sequence_number'), rq.ReplyLength(), rq.LengthOf('types', 4), rq.Pad(20), rq.List('types', Type)) |
def se_conv_unit(x):
with tf.variable_scope(None, 'se_conv_unit'):
shape = x.get_shape().as_list()
y = slim.avg_pool2d(x, (shape[1], shape[2]), stride=1)
y = slim.conv2d(y, shape[(- 1)], 1, 1, activation_fn=None)
y = slim.batch_norm(y, activation_fn=tf.nn.sigmoid, fused=False)
x = tf.multiply(x, y)
return x |
def test_assert_raises_on_assertthis_not_equals():
context = Context({'assert': {'this': 'boom', 'equals': 'BOOM'}})
with pytest.raises(AssertionError) as err_info:
assert_step.run_step(context)
assert (str(err_info.value) == "assert assert['this'] is of type str and does not equal assert['equals'] of type str.") |
class Pizza(ABC):
name: str = None
dough: Dough = None
sauce: Sauce = None
veggies: List[Veggies] = None
cheese: Cheese = None
pepperoni: Pepperoni = None
clam: Clams = None
def prepare(self) -> None:
raise NotImplementedError
def bake(self) -> None:
print('Bake for 25 minutes at 350')
def cut(self) -> None:
print('Cutting the pizza into diagonal slices')
def box(self) -> None:
print('Place pizza in official PizzaStore box')
def setName(self, name: str) -> None:
self.name = name
def getName(self) -> str:
return self.name
def toString(self) -> str:
result: StringBuffer = StringBuffer()
result.append(f'''---- {self.name} ----
''')
if (self.dough != None):
result.append(self.dough.toString())
result.append('\n')
if (self.sauce != None):
result.append(self.sauce.toString())
result.append('\n')
if (self.cheese != None):
result.append(self.cheese.toString())
result.append('\n')
if ((self.veggies != None) and self.veggies):
for i in range(len(self.veggies)):
result.append(self.veggies[i].toString())
if (i < (len(self.veggies) - 1)):
result.append(', ')
result.append('\n')
if (self.clam != None):
result.append(self.clam.toString())
result.append('\n')
if (self.pepperoni != None):
result.append(self.pepperoni.toString())
result.append('\n')
return result.toString() |
def annotate_file(path: Union[(str, 'os.PathLike[str]')], *, visitor_cls: Type[NameCheckVisitor]=NameCheckVisitor, verbose: bool=False, dump: bool=False, show_errors: bool=False) -> ast.AST:
filename = os.fspath(path)
try:
(mod, _) = load_module_from_file(filename, verbose=verbose)
except Exception:
if verbose:
traceback.print_exc()
mod = None
with open(filename, encoding='utf-8') as f:
code = f.read()
tree = ast.parse(code)
_annotate_module(filename, mod, tree, code, visitor_cls, show_errors=show_errors)
if dump:
dump_annotated_code(tree)
return tree |
class OnnxConfigWithPast(OnnxConfig, ABC):
def __init__(self, config: 'PretrainedConfig', task: str='default', patching_specs: List[PatchingSpec]=None, use_past: bool=False):
super().__init__(config, task=task, patching_specs=patching_specs)
self.use_past = use_past
def with_past(cls, config: 'PretrainedConfig', task: str='default') -> 'OnnxConfigWithPast':
return cls(config, task=task, use_past=True)
def outputs(self) -> Mapping[(str, Mapping[(int, str)])]:
common_outputs = super().outputs
if self.use_past:
self.fill_with_past_key_values_(common_outputs, direction='outputs')
return common_outputs
def values_override(self) -> Optional[Mapping[(str, Any)]]:
if hasattr(self._config, 'use_cache'):
return {'use_cache': self.use_past}
return None
def num_layers(self) -> int:
if (not hasattr(self._config, 'num_layers')):
raise AttributeError('could not find the number of layers attribute in the model configuration, override the num_layers property of the model OnnxConfig to solve this')
return self._config.num_layers
def num_attention_heads(self) -> int:
if (not hasattr(self._config, 'num_attention_heads')):
raise AttributeError('could not find the number of attention heads attribute in the model configuration, override the num_attention_heads property of the model OnnxConfig to solve this')
return self._config.num_attention_heads
def generate_dummy_inputs(self, tokenizer: 'PreTrainedTokenizerBase', batch_size: int=(- 1), seq_length: int=(- 1), is_pair: bool=False, framework: Optional[TensorType]=None) -> Mapping[(str, Any)]:
common_inputs = super().generate_dummy_inputs(tokenizer, batch_size=batch_size, seq_length=seq_length, is_pair=is_pair, framework=framework)
if self.use_past:
if (not is_torch_available()):
raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.')
else:
import torch
(batch, seqlen) = common_inputs['input_ids'].shape
past_key_values_length = (seqlen + 2)
shape = (batch, self.num_attention_heads, past_key_values_length, (self._config.hidden_size // self.num_attention_heads))
if ('attention_mask' in common_inputs):
mask_dtype = common_inputs['attention_mask'].dtype
common_inputs['attention_mask'] = torch.cat([common_inputs['attention_mask'], torch.ones(batch, past_key_values_length, dtype=mask_dtype)], dim=1)
common_inputs['past_key_values'] = []
for _ in range(self.num_layers):
common_inputs['past_key_values'].append((torch.zeros(shape), torch.zeros(shape)))
return common_inputs
def fill_with_past_key_values_(self, inputs_or_outputs: Mapping[(str, Mapping[(int, str)])], direction: str, inverted_values_shape: bool=False):
if (direction not in ['inputs', 'outputs']):
raise ValueError(f'direction must either be "inputs" or "outputs", but {direction} was given')
name = ('past_key_values' if (direction == 'inputs') else 'present')
for i in range(self.num_layers):
inputs_or_outputs[f'{name}.{i}.key'] = {0: 'batch', 2: 'past_sequence + sequence'}
if inverted_values_shape:
inputs_or_outputs[f'{name}.{i}.value'] = {0: 'batch', 1: 'past_sequence + sequence'}
else:
inputs_or_outputs[f'{name}.{i}.value'] = {0: 'batch', 2: 'past_sequence + sequence'}
def _flatten_past_key_values_(self, flattened_output, name, idx, t):
flattened_output[f'{name}.{idx}.key'] = t[0]
flattened_output[f'{name}.{idx}.value'] = t[1]
def flatten_output_collection_property(self, name: str, field: Iterable[Any]) -> Dict[(str, Any)]:
flattened_output = {}
if (name in ['present', 'past_key_values']):
for (idx, t) in enumerate(field):
self._flatten_past_key_values_(flattened_output, name, idx, t)
else:
flattened_output = super().flatten_output_collection_property(name, field)
return flattened_output |
class UserSetNewPWDHandler(BaseHandler):
.authenticated
async def get(self, userid):
email = (await self.db.user.get(userid, fields=('email',)))['email']
(await self.render('user_setnewpwd.html', userid=userid, usermail=email))
return
.authenticated
async def post(self, userid):
try:
log = u''
envs = {}
for (k, _) in self.request.body_arguments.items():
envs[k] = self.get_body_argument(k)
async with self.db.transaction() as sql_session:
adminuser = (await self.db.user.get(email=envs['adminmail'], fields=('role', 'email'), sql_session=sql_session))
newPWD = envs['newpwd']
if ((await self.db.user.challenge_MD5(envs['adminmail'], envs['adminpwd'], sql_session=sql_session)) and (adminuser['role'] == 'admin')):
if (len(newPWD) >= 6):
(await self.db.user.mod(userid, password=newPWD, sql_session=sql_session))
user = (await self.db.user.get(userid, fields=('email', 'password', 'password_md5'), sql_session=sql_session))
hash = MD5.new()
hash.update(newPWD.encode('utf-8'))
tmp = crypto.password_hash(hash.hexdigest(), (await self.db.user.decrypt(userid, user['password'], sql_session=sql_session)))
if (user['password_md5'] != tmp):
(await self.db.user.mod(userid, password_md5=tmp, sql_session=sql_session))
if (not (await self.db.user.challenge(envs['usermail'], newPWD, sql_session=sql_session))):
raise Exception(u'')
else:
raise Exception(u'6')
else:
raise Exception(u'/')
except Exception as e:
if config.traceback_print:
traceback.print_exc()
(await self.render('utils_run_result.html', log=str(e), title=u'', flg='danger'))
logger_Web_Handler.error('UserID: %s set New_Password failed! Reason: %s', (userid or '-1'), str(e))
return
(await self.render('utils_run_result.html', log=log, title=u'', flg='success'))
return |
class LiveServerExecutor(object):
def __init__(self):
self.funcs = {}
def register(self, fn_name, fn):
self.funcs[fn_name] = fn
def apply_blueprint_to_app(self, app):
testbp = Blueprint('testbp', __name__)
def build_invoker(fn_name, fn):
path = ('/' + fn_name)
(path, methods=['POST'], endpoint=fn_name)
def _(**kwargs):
arg_values = request.get_json()['args']
return fn(*arg_values)
for (fn_name, fn) in self.funcs.items():
build_invoker(fn_name, fn)
app.register_blueprint(testbp, url_prefix='/__test')
def on(self, server):
return liveServerExecutorInvoker(self.funcs, server)
def on_session(self, server_session):
return liveServerExecutorInvoker(self.funcs, server_session) |
def find_head(arg_start, arg_end, doc):
cur_i = arg_start
while ((doc[cur_i].head.i >= arg_start) and (doc[cur_i].head.i <= arg_end)):
if (doc[cur_i].head.i == cur_i):
break
else:
cur_i = doc[cur_i].head.i
arg_head = cur_i
return (arg_head, arg_head) |
def hf_from_pretrained(cls, pretrained_model_name_or_path: Union[(str, os.PathLike)], custom_process_state_fn: Callable=None, dtype: jnp.dtype=jnp.float32, *model_args, **kwargs):
config = kwargs.pop('config', None)
cache_dir = kwargs.pop('cache_dir', None)
from_pt = kwargs.pop('from_pt', False)
ignore_mismatched_sizes = kwargs.pop('ignore_mismatched_sizes', False)
force_download = kwargs.pop('force_download', False)
resume_download = kwargs.pop('resume_download', False)
proxies = kwargs.pop('proxies', None)
local_files_only = kwargs.pop('local_files_only', False)
use_auth_token = kwargs.pop('use_auth_token', None)
revision = kwargs.pop('revision', None)
trust_remote_code = kwargs.pop('trust_remote_code', None)
from_pipeline = kwargs.pop('_from_pipeline', None)
from_auto_class = kwargs.pop('_from_auto', False)
_do_init = kwargs.pop('_do_init', True)
subfolder = kwargs.pop('subfolder', '')
commit_hash = kwargs.pop('_commit_hash', None)
if (trust_remote_code is True):
logger.warning('The argument `trust_remote_code` is to be used with Auto classes. It has no effect here and is ignored.')
user_agent = {'file_type': 'model', 'framework': 'flax', 'from_auto_class': from_auto_class}
if (from_pipeline is not None):
user_agent['using_pipeline'] = from_pipeline
if (is_offline_mode() and (not local_files_only)):
logger.info('Offline mode: forcing local_files_only=True')
local_files_only = True
if (not isinstance(config, PretrainedConfig)):
config_path = (config if (config is not None) else pretrained_model_name_or_path)
(config, model_kwargs) = cls.config_class.from_pretrained(config_path, cache_dir=cache_dir, return_unused_kwargs=True, force_download=force_download, resume_download=resume_download, proxies=proxies, local_files_only=local_files_only, use_auth_token=use_auth_token, revision=revision, subfolder=subfolder, _from_auto=from_auto_class, _from_pipeline=from_pipeline, _commit_hash=commit_hash, **kwargs)
else:
model_kwargs = kwargs
if (commit_hash is None):
commit_hash = getattr(config, '_commit_hash', None)
model_kwargs['dtype'] = dtype
is_sharded = False
if (pretrained_model_name_or_path is not None):
pretrained_model_name_or_path = str(pretrained_model_name_or_path)
is_local = os.path.isdir(pretrained_model_name_or_path)
if os.path.isdir(pretrained_model_name_or_path):
if (from_pt and os.path.isfile(os.path.join(pretrained_model_name_or_path, subfolder, WEIGHTS_NAME))):
archive_file = os.path.join(pretrained_model_name_or_path, subfolder, WEIGHTS_NAME)
elif (from_pt and os.path.isfile(os.path.join(pretrained_model_name_or_path, subfolder, WEIGHTS_INDEX_NAME))):
archive_file = os.path.join(pretrained_model_name_or_path, subfolder, WEIGHTS_INDEX_NAME)
is_sharded = True
elif os.path.isfile(os.path.join(pretrained_model_name_or_path, subfolder, FLAX_WEIGHTS_NAME)):
archive_file = os.path.join(pretrained_model_name_or_path, subfolder, FLAX_WEIGHTS_NAME)
elif os.path.isfile(os.path.join(pretrained_model_name_or_path, subfolder, FLAX_WEIGHTS_INDEX_NAME)):
archive_file = os.path.join(pretrained_model_name_or_path, subfolder, FLAX_WEIGHTS_INDEX_NAME)
is_sharded = True
elif os.path.isfile(os.path.join(pretrained_model_name_or_path, subfolder, WEIGHTS_NAME)):
raise EnvironmentError(f'Error no file named {FLAX_WEIGHTS_NAME} found in directory {pretrained_model_name_or_path} but there is a file for PyTorch weights. Use `from_pt=True` to load this model from those weights.')
else:
raise EnvironmentError(f'Error no file named {FLAX_WEIGHTS_NAME} or {WEIGHTS_NAME} found in directory {pretrained_model_name_or_path}.')
elif os.path.isfile(os.path.join(subfolder, pretrained_model_name_or_path)):
archive_file = pretrained_model_name_or_path
is_local = True
elif is_remote_url(pretrained_model_name_or_path):
filename = pretrained_model_name_or_path
resolved_archive_file = download_url(pretrained_model_name_or_path)
else:
filename = (WEIGHTS_NAME if from_pt else FLAX_WEIGHTS_NAME)
try:
cached_file_kwargs = dict(cache_dir=cache_dir, force_download=force_download, proxies=proxies, resume_download=resume_download, local_files_only=local_files_only, use_auth_token=use_auth_token, user_agent=user_agent, revision=revision, subfolder=subfolder, _raise_exceptions_for_missing_entries=False, _commit_hash=commit_hash)
resolved_archive_file = cached_file(pretrained_model_name_or_path, filename, **cached_file_kwargs)
if ((resolved_archive_file is None) and (filename == FLAX_WEIGHTS_NAME)):
resolved_archive_file = cached_file(pretrained_model_name_or_path, FLAX_WEIGHTS_INDEX_NAME, **cached_file_kwargs)
if (resolved_archive_file is not None):
is_sharded = True
elif ((resolved_archive_file is None) and from_pt):
resolved_archive_file = cached_file(pretrained_model_name_or_path, WEIGHTS_INDEX_NAME, **cached_file_kwargs)
if (resolved_archive_file is not None):
is_sharded = True
if (resolved_archive_file is None):
has_file_kwargs = {'revision': revision, 'proxies': proxies, 'use_auth_token': use_auth_token}
if has_file(pretrained_model_name_or_path, WEIGHTS_NAME, **has_file_kwargs):
raise EnvironmentError(f'{pretrained_model_name_or_path} does not appear to have a file named {FLAX_WEIGHTS_NAME} but there is a file for PyTorch weights. Use `from_pt=True` to load this model from those weights.')
elif has_file(pretrained_model_name_or_path, WEIGHTS_INDEX_NAME, **has_file_kwargs):
raise EnvironmentError(f'{pretrained_model_name_or_path} does not appear to have a file named {FLAX_WEIGHTS_INDEX_NAME} but there is a sharded file for PyTorch weights. Use `from_pt=True` to load this model from those weights.')
else:
raise EnvironmentError(f'{pretrained_model_name_or_path} does not appear to have a file named {FLAX_WEIGHTS_NAME} or {WEIGHTS_NAME}.')
except EnvironmentError:
raise
except Exception:
raise EnvironmentError(f"Can't load the model for '{pretrained_model_name_or_path}'. If you were trying to load it from ' make sure you don't have a local directory with the same name. Otherwise, make sure '{pretrained_model_name_or_path}' is the correct path to a directory containing a file named {FLAX_WEIGHTS_NAME} or {WEIGHTS_NAME}.")
if is_local:
logger.info(f'loading weights file {archive_file}')
resolved_archive_file = archive_file
else:
logger.info(f'loading weights file {filename} from cache at {resolved_archive_file}')
else:
resolved_archive_file = None
if is_sharded:
(resolved_archive_file, _) = get_checkpoint_shard_files(pretrained_model_name_or_path, resolved_archive_file, cache_dir=cache_dir, force_download=force_download, proxies=proxies, resume_download=resume_download, local_files_only=local_files_only, use_auth_token=use_auth_token, user_agent=user_agent, revision=revision, subfolder=subfolder, _commit_hash=commit_hash)
model = cls(config, *model_args, _do_init=_do_init, **model_kwargs)
if from_pt:
state = load_pytorch_checkpoint_in_flax_state_dict(model, resolved_archive_file, is_sharded)
else:
if is_sharded:
state = cls.load_flax_sharded_weights(resolved_archive_file)
else:
try:
with open(resolved_archive_file, 'rb') as state_f:
state = from_bytes(cls, state_f.read())
except (UnpicklingError, msgpack.exceptions.ExtraData) as e:
try:
with open(resolved_archive_file) as f:
if f.read().startswith('version'):
raise OSError('You seem to have cloned a repository without having git-lfs installed. Please install git-lfs and run `git lfs install` followed by `git lfs pull` in the folder you cloned.')
else:
raise ValueError from e
except (UnicodeDecodeError, ValueError):
raise EnvironmentError(f'Unable to convert {archive_file} to Flax deserializable object. ')
if _do_init:
state = jax.tree_util.tree_map(jnp.array, state)
else:
state = jax.tree_util.tree_map((lambda x: jax.device_put(x, jax.devices('cpu')[0])), state)
if ((cls.base_model_prefix not in dict(model.params_shape_tree)) and (cls.base_model_prefix in state)):
state = state[cls.base_model_prefix]
if ((cls.base_model_prefix in dict(model.params_shape_tree)) and (cls.base_model_prefix not in state)):
state = {cls.base_model_prefix: state}
state = flatten_dict(state)
state = custom_process_state_fn(state)
random_state = flatten_dict(unfreeze((model.params if _do_init else model.params_shape_tree)))
missing_keys = (model.required_params - set(state.keys()))
unexpected_keys = (set(state.keys()) - model.required_params)
missing_keys_wo_params_axes = [k for k in missing_keys if (k[0] != 'params_axes')]
if (missing_keys and (not _do_init)):
logger.warning(f"The checkpoint {pretrained_model_name_or_path} is missing required keys (ignored missing 'params_axes'): {missing_keys_wo_params_axes}. Make sure to call model.init_weights to initialize the missing weights.")
cls._missing_keys = missing_keys
mismatched_keys = []
for key in state.keys():
if ((key in random_state) and (state[key].shape != random_state[key].shape)):
if ignore_mismatched_sizes:
mismatched_keys.append((key, state[key].shape, random_state[key].shape))
state[key] = random_state[key]
else:
raise ValueError(f'Trying to load the pretrained weight for {key} failed: checkpoint has shape {state[key].shape} which is incompatible with the model shape {random_state[key].shape}. Using `ignore_mismatched_sizes=True` if you really want to load this checkpoint inside this model.')
if (missing_keys and _do_init):
for missing_key in missing_keys:
state[missing_key] = random_state[missing_key]
for unexpected_key in unexpected_keys:
del state[unexpected_key]
if (len(unexpected_keys) > 0):
logger.warning(f'''Some weights of the model checkpoint at {pretrained_model_name_or_path} were not used when initializing {model.__class__.__name__}: {unexpected_keys}
- This IS expected if you are initializing {model.__class__.__name__} from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).
- This IS NOT expected if you are initializing {model.__class__.__name__} from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).''')
else:
logger.info(f'''All model checkpoint weights were used when initializing {model.__class__.__name__}.
''')
if (len(missing_keys) > 0):
logger.warning(f'''Some weights of {model.__class__.__name__} were not initialized from the model checkpoint at {pretrained_model_name_or_path} and are newly initialized (ignored missing 'params_axes'): {missing_keys_wo_params_axes}
You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference.''')
elif (len(mismatched_keys) == 0):
logger.info(f'''All the weights of {model.__class__.__name__} were initialized from the model checkpoint at {pretrained_model_name_or_path}.
If your task is similar to the task the model of the checkpoint was trained on, you can already use {model.__class__.__name__} for predictions without further training.''')
if (len(mismatched_keys) > 0):
mismatched_warning = '\n'.join([f'- {key}: found shape {shape1} in the checkpoint and {shape2} in the model instantiated' for (key, shape1, shape2) in mismatched_keys])
logger.warning(f'''Some weights of {model.__class__.__name__} were not initialized from the model checkpoint at {pretrained_model_name_or_path} and are newly initialized because the shapes did not match:
{mismatched_warning}
You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference.''')
param_dtypes = jax.tree_util.tree_map((lambda x: x.dtype), state)
fp16_params = [k for k in param_dtypes if (param_dtypes[k] == jnp.float16)]
bf16_params = [k for k in param_dtypes if (param_dtypes[k] == jnp.bfloat16)]
if (len(fp16_params) > 0):
logger.warning(f'''Some of the weights of {model.__class__.__name__} were initialized in float16 precision from the model checkpoint at {pretrained_model_name_or_path}:
{fp16_params}
You should probably UPCAST the model weights to float32 if this was not intended. See [`~FlaxPreTrainedModel.to_fp32`] for further information on how to do this.''')
if (len(bf16_params) > 0):
logger.warning(f'''Some of the weights of {model.__class__.__name__} were initialized in bfloat16 precision from the model checkpoint at {pretrained_model_name_or_path}:
{bf16_params}
You should probably UPCAST the model weights to float32 if this was not intended. See [`~FlaxPreTrainedModel.to_fp32`] for further information on how to do this.''')
if model.can_generate():
try:
model.generation_config = GenerationConfig.from_pretrained(pretrained_model_name_or_path, cache_dir=cache_dir, force_download=force_download, resume_download=resume_download, proxies=proxies, local_files_only=local_files_only, use_auth_token=use_auth_token, revision=revision, subfolder=subfolder, _from_auto=from_auto_class, _from_pipeline=from_pipeline, **kwargs)
except OSError:
logger.info('Generation config file not found, using a generation config created from the model config.')
pass
if _do_init:
model.params = unflatten_dict(state)
return model
else:
return (model, unflatten_dict(state)) |
def test_requirement_source_fix_roundtrip(req_file):
req_path = req_file()
with open(req_path, 'w') as f:
f.write('flask==0.5')
source = requirement.RequirementSource([req_path])
specs = list(source.collect())
flask_dep: (ResolvedDependency | None) = None
for spec in specs:
if (isinstance(spec, ResolvedDependency) and (spec.canonical_name == 'flask')):
flask_dep = spec
break
assert (flask_dep is not None)
assert (flask_dep == ResolvedDependency(name='Flask', version=Version('0.5')))
flask_fix = ResolvedFixVersion(dep=flask_dep, version=Version('1.0'))
source.fix(flask_fix)
with open(req_path) as f:
assert (f.read().strip() == 'flask==1.0') |
class ModelConfigs(BaseModelConfigs):
def __init__(self):
super().__init__()
self.model_path = os.path.join('Models/1_image_to_word', datetime.strftime(datetime.now(), '%Y%m%d%H%M'))
self.vocab = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz'
self.height = 32
self.width = 128
self.max_text_length = 23
self.batch_size = 1024
self.learning_rate = 0.0001
self.train_epochs = 100
self.train_workers = 20 |
def test_face_COFW_dataset():
dataset = 'FaceCOFWDataset'
dataset_info = Config.fromfile('configs/_base_/datasets/cofw.py').dataset_info
dataset_class = DATASETS.get(dataset)
dataset_class.load_annotations = MagicMock()
dataset_class.coco = MagicMock()
channel_cfg = dict(num_output_channels=29, dataset_joints=29, dataset_channel=[list(range(29))], inference_channel=list(range(29)))
data_cfg = dict(image_size=[256, 256], heatmap_size=[64, 64], num_output_channels=channel_cfg['num_output_channels'], num_joints=channel_cfg['dataset_joints'], dataset_channel=channel_cfg['dataset_channel'], inference_channel=channel_cfg['inference_channel'])
data_cfg_copy = copy.deepcopy(data_cfg)
_ = dataset_class(ann_file='tests/data/cofw/test_cofw.json', img_prefix='tests/data/cofw/', data_cfg=data_cfg_copy, pipeline=[], dataset_info=dataset_info, test_mode=True)
custom_dataset = dataset_class(ann_file='tests/data/cofw/test_cofw.json', img_prefix='tests/data/cofw/', data_cfg=data_cfg_copy, pipeline=[], dataset_info=dataset_info, test_mode=False)
assert (custom_dataset.dataset_name == 'cofw')
assert (custom_dataset.test_mode is False)
assert (custom_dataset.num_images == 2)
_ = custom_dataset[0]
outputs = convert_db_to_output(custom_dataset.db)
with tempfile.TemporaryDirectory() as tmpdir:
infos = custom_dataset.evaluate(outputs, tmpdir, ['NME'])
assert_almost_equal(infos['NME'], 0.0)
with pytest.raises(KeyError):
_ = custom_dataset.evaluate(outputs, tmpdir, 'mAP') |
def compute_numerator_denominator(lm, h):
log_sum_seen_h = (- math.inf)
log_sum_seen_h_lower = (- math.inf)
base = lm.base
for (w, log_p) in lm._ngrams[len(h)][h].items():
log_sum_seen_h = add_log_p(log_sum_seen_h, log_p, base)
ngram = (h + (w,))
log_p_lower = lm.log_p_raw(ngram[1:])
log_sum_seen_h_lower = add_log_p(log_sum_seen_h_lower, log_p_lower, base)
numerator = (1.0 - (base ** log_sum_seen_h))
denominator = (1.0 - (base ** log_sum_seen_h_lower))
return (numerator, denominator) |
def main():
opts = parse_args()
mkdir2(opts.out_dir)
db = COCO(opts.annot_path)
class_names = [c['name'] for c in db.dataset['categories']]
n_class = len(class_names)
coco_mapping = (None if opts.no_class_mapping else db.dataset.get('coco_mapping', None))
if (coco_mapping is not None):
coco_mapping = np.asarray(coco_mapping)
seqs = db.dataset['sequences']
seq_dirs = db.dataset['seq_dirs']
if opts.cached_res:
cache_in_ccf = ('_ccf' in basename(opts.cached_res))
if cache_in_ccf:
cache_end_idx = 0
cached_res = pickle.load(open(opts.cached_res, 'rb'))
else:
assert (torch.cuda.device_count() == 1)
model = init_detector(opts)
np.random.seed(opts.seed)
runtime = pickle.load(open(opts.runtime, 'rb'))
runtime_dist = dist_from_dict(runtime, opts.perf_factor)
runtime_all = []
n_processed = 0
n_total = 0
for (sid, seq) in enumerate(tqdm(seqs)):
frame_list = [img for img in db.imgs.values() if (img['sid'] == sid)]
n_frame = len(frame_list)
n_total += n_frame
if (not opts.cached_res):
frames = []
for img in frame_list:
img_path = join(opts.data_root, seq_dirs[sid], img['name'])
frames.append(imread(img_path))
timestamps = []
results_parsed = []
input_fidx = []
runtime = []
last_fidx = None
if (opts.cached_res and cache_in_ccf):
results_raw = None
else:
results_raw = []
t_total = (n_frame / opts.fps)
t_elapsed = 0
if opts.dynamic_schedule:
mean_rtf = (runtime_dist.mean() * opts.fps)
else:
stride_cnt = 0
while 1:
if (t_elapsed >= t_total):
break
fidx_continous = (t_elapsed * opts.fps)
fidx = int(np.floor(fidx_continous))
if (fidx == last_fidx):
fidx += 1
if (fidx == n_frame):
break
t_elapsed = (fidx / opts.fps)
last_fidx = fidx
if opts.dynamic_schedule:
if (mean_rtf > 1):
fidx_remainder = (fidx_continous - fidx)
if (mean_rtf < np.floor((fidx_remainder + mean_rtf))):
continue
elif ((stride_cnt % opts.det_stride) == 0):
stride_cnt = 1
else:
stride_cnt += 1
continue
if opts.cached_res:
img = frame_list[fidx]
if cache_in_ccf:
(cache_end_idx, bboxes, scores, labels, masks) = result_from_ccf(cached_res, img['id'], cache_end_idx)
ltwh2ltrb_(bboxes)
else:
result = cached_res[img['id']]
(bboxes, scores, labels, masks) = parse_det_result(result, coco_mapping, n_class)
else:
frame = frames[fidx]
result = inference_detector(model, frame)
(bboxes, scores, labels, masks) = parse_det_result(result, coco_mapping, n_class)
rt_this = runtime_dist.draw()
t_elapsed += rt_this
if (t_elapsed >= t_total):
break
timestamps.append(t_elapsed)
if (results_raw is not None):
results_raw.append(result)
results_parsed.append((bboxes, scores, labels, masks))
input_fidx.append(fidx)
runtime.append(rt_this)
out_path = join(opts.out_dir, (seq + '.pkl'))
if (opts.overwrite or (not isfile(out_path))):
out_dict = {'results_parsed': results_parsed, 'timestamps': timestamps, 'input_fidx': input_fidx, 'runtime': runtime}
if (results_raw is not None):
out_dict['results_raw'] = results_raw
pickle.dump(out_dict, open(out_path, 'wb'))
runtime_all += runtime
n_processed += len(results_parsed)
runtime_all_np = np.array(runtime_all)
n_small_runtime = (runtime_all_np < (1.0 / opts.fps)).sum()
out_path = join(opts.out_dir, 'time_info.pkl')
if (opts.overwrite or (not isfile(out_path))):
pickle.dump({'runtime_all': runtime_all, 'n_processed': n_processed, 'n_total': n_total, 'n_small_runtime': n_small_runtime}, open(out_path, 'wb'))
s2ms = (lambda x: (1000.0 * x))
print(f'{n_processed}/{n_total} frames processed')
print_stats(runtime_all_np, 'Runtime (ms)', cvt=s2ms)
print(f'Runtime smaller than unit time interval: {n_small_runtime}/{n_processed} ({((100.0 * n_small_runtime) / n_processed):.4g}%)') |
class ReaderNode(Node):
def __init__(self, unique_id, reader_name):
super().__init__(unique_id, data={'reader_name': reader_name})
def _copy_name_and_data(self, node_cache):
return ReaderNode(self.name, self.data['reader_name'])
def reader_name(self):
return self.data['reader_name'] |
def calc_fees_for_commitment_tx(*, num_htlcs: int, feerate: int, is_local_initiator: bool, round_to_sat: bool=True) -> Dict[('HTLCOwner', int)]:
overall_weight = (COMMITMENT_TX_WEIGHT + (num_htlcs * HTLC_OUTPUT_WEIGHT))
fee = (feerate * overall_weight)
if round_to_sat:
fee = ((fee // 1000) * 1000)
return {LOCAL: (fee if is_local_initiator else 0), REMOTE: (fee if (not is_local_initiator) else 0)} |
class ModelFormTagFieldTest(TagTestManager, TestCase):
manage_models = [test_models.TagFieldModel]
def setUpExtra(self):
self.form = test_forms.TagFieldModelForm
self.model = test_models.TagFieldModel
self.tag_model = self.model.tags.tag_model
def test_formfield(self):
tag1_field = self.model._meta.get_field('tags').formfield()
self.assertIsInstance(tag1_field, tag_forms.TagField)
self.assertIsInstance(tag1_field.tag_options, tag_models.TagOptions)
tag2_field = self.model.tags.formfield()
self.assertIsInstance(tag2_field, tag_forms.TagField)
self.assertIsInstance(tag2_field.tag_options, tag_models.TagOptions)
def test_media(self):
media = self.form().media
for js in tag_settings.AUTOCOMPLETE_JS:
self.assertTrue((js in media._js))
for (grp, files) in tag_settings.AUTOCOMPLETE_CSS.items():
self.assertTrue((grp in media._css))
for css in files:
self.assertTrue((css in media._css[grp]))
def test_model_form_save(self):
form = test_forms.TagFieldModelForm(data={'name': 'Test 1', 'tags': 'blue, red'})
self.assertTrue(form.is_valid())
t1 = form.save()
self.assertEqual(t1.name, 'Test 1')
self.assertEqual(t1.tags, 'blue, red')
self.assertInstanceEqual(t1, name='Test 1', tags='blue, red')
self.assertTagModel(self.tag_model, {'blue': 1, 'red': 1})
def test_model_form_save_commit_false(self):
form = test_forms.TagFieldModelForm(data={'name': 'Test 1', 'tags': 'blue, red'})
self.assertTrue(form.is_valid())
t1 = form.save(commit=False)
t1.save()
self.assertEqual(t1.name, 'Test 1')
self.assertEqual(t1.tags, '')
self.assertTagModel(self.tag_model, {})
self.assertInstanceEqual(t1, name='Test 1', tags='')
form.save_m2m()
self.assertEqual(t1.name, 'Test 1')
self.assertEqual(t1.tags, 'blue, red')
self.assertInstanceEqual(t1, name='Test 1', tags='blue, red')
self.assertTagModel(self.tag_model, {'blue': 1, 'red': 1})
def test_override_option_dict_formfield(self):
field1 = self.form().fields['tags']
self.assertEqual(field1.tag_options.case_sensitive, False)
field2 = self.model.tags.formfield(tag_options={'case_sensitive': True})
self.assertEqual(field2.tag_options.case_sensitive, True)
def test_override_option_cls_formfield(self):
field1 = self.form().fields['tags']
self.assertEqual(field1.tag_options.case_sensitive, False)
field2 = self.model.tags.formfield(tag_options=tag_models.TagOptions(case_sensitive=True))
self.assertEqual(field2.tag_options.case_sensitive, True)
def test_override_autocomplete_tags_formfield(self):
self.tag_model.objects.create(name='red')
self.tag_model.objects.create(name='blue')
self.tag_model.objects.create(name='green')
field1 = self.form().fields['tags']
self.assertSequenceEqual([t.name for t in field1.autocomplete_tags], [t.name for t in self.tag_model.objects.all()])
field2 = self.model.tags.formfield(autocomplete_tags=['pink', 'lime'])
self.assertSequenceEqual(field2.autocomplete_tags, ['pink', 'lime'])
def test_render_tag_list(self):
self.tag_model.objects.create(name='red')
self.tag_model.objects.create(name='blue')
self.tag_model.objects.create(name='yellow')
self.assertTagModel(self.tag_model, {'red': 0, 'blue': 0, 'yellow': 0})
form = self.form(data={'name': 'Test 1', 'tags': 'red, blue'})
self.assertHTMLEqual(str(form['tags']), '<input autocomplete="off" data-tag-options="{"required": true}" data-tagulous="true" data-tag-list="["blue", "red", "yellow"]" id="id_tags" name="tags" {{required}}type="text" value="red, blue" />')
def test_initial_string(self):
form = test_forms.TagFieldForm(initial={'tags': 'red, blue'})
self.assertHTMLEqual(str(form['tags']), '<input autocomplete="off" data-tag-options="{"required": true}" data-tagulous="true" id="id_tags" name="tags" {{required}}type="text" value="red, blue" />')
def test_initial_tag_list(self):
t1 = self.tag_model.objects.create(name='red')
t2 = self.tag_model.objects.create(name='blue')
form = test_forms.TagFieldForm(initial={'tags': [t1, t2]})
self.assertHTMLEqual(str(form['tags']), '<input autocomplete="off" data-tag-options="{"required": true}" data-tagulous="true" id="id_tags" name="tags" {{required}}type="text" value="blue, red" />')
def test_initial_tag_queryset(self):
self.tag_model.objects.create(name='red')
self.tag_model.objects.create(name='blue')
tags = self.tag_model.objects.all()
form = test_forms.TagFieldForm(initial={'tags': tags})
self.assertHTMLEqual(str(form['tags']), '<input autocomplete="off" data-tag-options="{"required": true}" data-tagulous="true" id="id_tags" name="tags" {{required}}type="text" value="blue, red" />')
def test_tagged_edit(self):
t1 = self.model.objects.create(name='Test 1', tags='blue, red')
form = self.form(instance=t1)
self.assertHTMLEqual(str(form['tags']), '<input autocomplete="off" data-tag-list="["blue", "red"]" data-tag-options="{"required": true}" data-tagulous="true" id="id_tags" name="tags" {{required}}type="text" value="blue, red" />')
def test_tagmeta_without_autocomplete_settings(self):
class TagMetaUserForm(forms.ModelForm):
class Meta():
model = test_models.TagMetaUser
exclude = []
form = TagMetaUserForm()
form['two'].field.widget.default_autocomplete_settings = {'bees': 'buzz'}
self.assertHTMLEqual(str(form['two']), '<input autocomplete="off" data-tag-list="[]" data-tag-options="{"autocomplete_settings": {"bees": "buzz"}, "case_sensitive": true, "force_lowercase": true, "max_count": 10, "required": false}" data-tagulous="true" id="id_two" name="two" type="text" />') |
def test__getting_started__custom_plotting():
from bioptim.examples.getting_started import custom_plotting as ocp_module
bioptim_folder = os.path.dirname(ocp_module.__file__)
ocp_module.prepare_ocp(biorbd_model_path=(bioptim_folder + '/models/pendulum.bioMod'), final_time=2, n_shooting=50, phase_dynamics=PhaseDynamics.SHARED_DURING_THE_PHASE, expand_dynamics=False) |
def test_solver_synchronize_single(package: ProjectPackage, pool: RepositoryPool, io: NullIO) -> None:
package_a = get_package('a', '1.0')
solver = Solver(package, pool, [package_a], [], io)
transaction = solver.solve()
check_solver_result(transaction, [{'job': 'remove', 'package': package_a}], synchronize=True) |
def save_as_playlist(request: WSGIRequest) -> HttpResponse:
try:
(start, end) = _parse_datetimes(request)
except ValueError as error:
return HttpResponseBadRequest(error.args[0])
name = request.POST.get('name')
if (not name):
return HttpResponseBadRequest('Name required')
played = PlayLog.objects.all().filter(created__gte=start).filter(created__lt=end)
list_id = f"playlog {str(start).replace(' ', 'T')} {str(end).replace(' ', 'T')}"
(playlist, created) = ArchivedPlaylist.objects.get_or_create(list_id=list_id, title=name, counter=0)
if (not created):
return HttpResponseBadRequest('Playlist already exists')
song_index = 0
for log in played:
if (not log.song):
continue
external_url = log.song.url
PlaylistEntry.objects.create(playlist=playlist, index=song_index, url=external_url)
song_index += 1
return HttpResponse() |
def test_arrays():
apparent_zenith = np.array([10])
apparent_azimuth = np.array([180])
tracker_data = tracking.singleaxis(apparent_zenith, apparent_azimuth, axis_tilt=0, axis_azimuth=0, max_angle=90, backtrack=True, gcr=(2.0 / 7.0))
assert isinstance(tracker_data, dict)
expect = {'tracker_theta': 0, 'aoi': 10, 'surface_azimuth': 90, 'surface_tilt': 0}
for (k, v) in expect.items():
assert_allclose(tracker_data[k], v, atol=1e-07) |
_db
def test_query_events_map(graphql_client, conference_factory, event_factory):
now = timezone.now()
conference = conference_factory(start=now, end=(now + timezone.timedelta(days=3)))
event_factory(conference=conference, latitude=1, longitude=1)
resp = graphql_client.query('query($code: String!) {\n conference(code: $code) {\n events {\n map {\n latitude\n longitude\n link\n image\n }\n }\n }\n }', variables={'code': conference.code})
assert (not resp.get('errors'))
assert (len(resp['data']['conference']['events']) == 1)
event = resp['data']['conference']['events'][0]
assert (event['map'] is not None) |
def test_local_det_chol():
X = matrix('X')
L = pt.linalg.cholesky(X)
det_X = pt.linalg.det(X)
f = function([X], [L, det_X])
nodes = f.maker.fgraph.toposort()
assert (not any((isinstance(node, Det) for node in nodes)))
f = function([X], [L, det_X, X])
nodes = f.maker.fgraph.toposort()
assert (not any((isinstance(node, Det) for node in nodes))) |
class FSNERTokenizerUtils(object):
def __init__(self, pretrained_model_name_or_path):
self.tokenizer = AutoTokenizer.from_pretrained(pretrained_model_name_or_path)
def tokenize(self, x):
if (isinstance(x, list) and all([isinstance(_x, list) for _x in x])):
d = None
for l in x:
t = self.tokenizer(l, padding='max_length', max_length=384, truncation=True, return_tensors='pt')
t['sizes'] = torch.tensor([len(l)])
if (d is not None):
for k in d.keys():
d[k] = torch.cat((d[k], t[k]), 0)
else:
d = t
d['start_token_id'] = torch.tensor(self.tokenizer.convert_tokens_to_ids('[E]'))
d['end_token_id'] = torch.tensor(self.tokenizer.convert_tokens_to_ids('[/E]'))
elif (isinstance(x, list) and all([isinstance(_x, str) for _x in x])):
d = self.tokenizer(x, padding='max_length', max_length=384, truncation=True, return_tensors='pt')
else:
raise Exception('Type of parameter x was not recognized! Only `list of strings` for query or `list of lists of strings` for supports are supported.')
return d
def extract_entity_from_scores(self, query, W_query, p_start, p_end, thresh=0.7):
final_outputs = []
for idx in range(len(W_query['input_ids'])):
start_indexes = end_indexes = range(p_start.shape[1])
output = []
for start_id in start_indexes:
for end_id in end_indexes:
if (start_id < end_id):
output.append((start_id, end_id, p_start[idx][start_id].item(), p_end[idx][end_id].item()))
output.sort(key=(lambda tup: (tup[2] * tup[3])), reverse=True)
temp = []
for k in range(len(output)):
if ((output[k][2] * output[k][3]) >= thresh):
(c_start_pos, c_end_pos) = (output[k][0], output[k][1])
decoded = self.tokenizer.decode(W_query['input_ids'][idx][c_start_pos:c_end_pos])
temp.append((decoded, (output[k][2] * output[k][3])))
final_outputs.append(temp)
return final_outputs |
class TestCaseTransfer(TestCase):
def name():
return 'transfer'
def abbreviation():
return 'DC'
def desc():
return 'Stream data is being sent and received correctly. Connection close completes with a zero error code.'
def get_paths(self):
self._files = [self._generate_random_file((2 * MB)), self._generate_random_file((3 * MB)), self._generate_random_file((5 * MB))]
return self._files
def check(self) -> TestResult:
num_handshakes = self._count_handshakes()
if (num_handshakes != 1):
logging.info('Expected exactly 1 handshake. Got: %d', num_handshakes)
return TestResult.FAILED
if (not self._check_version_and_files()):
return TestResult.FAILED
return TestResult.SUCCEEDED |
def SM1_policy():
grid = 1
cur_env = SM_env(max_hidden_block=HIDDEN_BLOCK, attacker_fraction=0.4, follower_fraction=GAMMA, dev=0)
sm1_policy = np.zeros((grid, cur_env._state_space_n), dtype=np.int)
for i in range(grid):
for j in range(cur_env._state_space_n):
(h1, h2, status) = cur_env._index_to_name(j)
if (h1 < h2):
a = 0
elif ((h1 == h2) and (h1 == 1) and (status == 'normal')):
a = 0
elif ((h1 == (h2 + 1)) and (h2 > 0)):
a = 1
elif (h1 == (HIDDEN_BLOCK + 1)):
a = 1
else:
a = 2
sm1_policy[(i, j)] = a
return sm1_policy |
class SIDGuesser(OracleDatabase):
def __init__(self, args, SIDFile, timeSleep=0):
logging.debug('SIDGuesser object created')
OracleDatabase.__init__(self, args)
self.SIDFile = SIDFile
self.sids = []
self.valideSIDS = []
self.args['SYSDBA'] = False
self.args['SYSOPER'] = False
self.timeSleep = timeSleep
self.NO_GOOD_SID_STRING_LIST = ['listener does not currently know of service requested', 'listener does not currently know of SID', 'connection to server failed', 'destination host unreachable']
def getValidSIDs(self):
return self.valideSIDS
def appendValideSID(self, sid):
if (sid not in self.valideSIDS):
self.valideSIDS.append(sid)
def __loadSIDsFromFile__(self):
sids = []
logging.info('Load SIDS stored in the {0} file'.format(self.SIDFile))
f = open(self.SIDFile)
for l in f:
sids.append(l.replace('\n', '').replace('\t', ''))
f.close()
return sorted(sids)
def __testIfAGoodSID__(self):
no_good_sid_found = False
self.args['serviceName'] = None
self.__generateConnectionString__(username=self.__generateRandomString__(nb=15), password=self.__generateRandomString__(nb=5))
logging.debug('Try to connect with the {0} SID ({1})'.format(self.args['sid'], self.args['connectionStr']))
status = self.connection()
if (self.__needRetryConnection__(status) == True):
status = self.__retryConnect__(nbTry=4)
if (status != None):
for aNoGoodString in self.NO_GOOD_SID_STRING_LIST:
if (aNoGoodString in str(status)):
no_good_sid_found = True
break
if (no_good_sid_found == False):
self.appendValideSID(self.args['sid'])
logging.info("'{0}' is a valid SID (Server message: {1})".format(self.args['sid'], str(status)))
self.args['print'].goodNews(stringToLinePadded("'{0}' is a valid SID. Continue... ".format(self.args['sid'])))
self.close()
return status
def searchKnownSIDs(self):
self.args['print'].subtitle('Searching valid SIDs thanks to a well known SID list on the {0}:{1} server'.format(self.args['server'], self.args['port']))
self.sids += self.__loadSIDsFromFile__()
(pbar, nb) = (self.getStandardBarStarted(len(self.sids)), 0)
logging.info('Start the research')
for aSID in self.sids:
nb += 1
pbar.update(nb)
self.args['sid'] = aSID
connectionStatus = self.__testIfAGoodSID__()
sleep(self.timeSleep)
pbar.finish()
return True
def bruteforceSIDs(self, size=4, charset=string.ascii_uppercase):
self.args['print'].subtitle('Searching valid SIDs thanks to a brute-force attack on {2} chars now ({0}:{1})'.format(self.args['server'], self.args['port'], size))
(pbar, nb) = (self.getStandardBarStarted((len(charset) ** size)), 0)
logging.info('Start the research')
for aSID in product(list(charset), repeat=size):
nb += 1
pbar.update(nb)
self.args['sid'] = ''.join(aSID)
self.__testIfAGoodSID__()
sleep(self.timeSleep)
pbar.finish()
return True
def loadSidsFromListenerAlias(self):
logging.info('Put listener ALIAS into the SID list to try ALIAS like SID')
tnscmd = Tnscmd(self.args)
tnscmd.getInformation()
self.sids += tnscmd.getAlias() |
def test_expansion_penalty():
x = torch.rand(20, 8192, 3).cuda()
print('Input_size: ', x.shape)
expansion = expansionPenaltyModule()
start_time = time.perf_counter()
(dis, ass, mean_length) = expansion(x, 512, 1.5)
print(('Runtime: %lfs' % (time.perf_counter() - start_time))) |
class AllTests(unittest.TestSuite):
def suite(self):
loader = unittest.defaultTestLoader
self.addTests([loader.loadTestsFromModule(fake_filesystem_test), loader.loadTestsFromModule(fake_filesystem_glob_test), loader.loadTestsFromModule(fake_filesystem_shutil_test), loader.loadTestsFromModule(fake_os_test), loader.loadTestsFromModule(fake_stat_time_test), loader.loadTestsFromModule(fake_open_test), loader.loadTestsFromModule(fake_tempfile_test), loader.loadTestsFromModule(fake_filesystem_vs_real_test), loader.loadTestsFromModule(fake_filesystem_unittest_test), loader.loadTestsFromModule(example_test), loader.loadTestsFromModule(mox3_stubout_test), loader.loadTestsFromModule(dynamic_patch_test), loader.loadTestsFromModule(fake_pathlib_test), loader.loadTestsFromModule(patched_packages_test)])
return self |
class TestGetSynsetsFromIds(tf.test.TestCase):
def test_on_toy_graph(self):
specification = create_toy_graph()
(toy_graph, _, _) = specification
wn_ids = [5, 0, 6]
id_to_synset = imagenet_spec.get_synsets_from_ids(wn_ids, toy_graph)
self.assertEqual(set(id_to_synset.keys()), set(wn_ids))
for (wn_id, synset) in id_to_synset.items():
self.assertEqual(wn_id, synset.wn_id) |
def verify_pretrain_params(args):
assert args.embed_bytes, 'To use pretrained weights, embed_bytes must be set to True.'
assert (args.char_cnn_nonlinear_fn == model_constants.PRETRAINED_CHAR_CNN_NONLINEAR_FN), 'To use pretrained weights, the non linearity used should be relu.'
assert (args.char_embed_dim == model_constants.PRETRAINED_CHAR_EMBED_DIM), 'To use pretrained weights char_embed_dim should be set to 16.'
assert (args.char_cnn_output_dim == model_constants.PRETRAINED_CHAR_CNN_OUTPUT_DIM), 'To use pretrained weights, the output dim of the CNN layer should be 512.'
assert (literal_eval(args.char_cnn_params) == model_constants.PRETRAINED_CHAR_CNN_PARAMS), "CNN Params don't match with the ones needed for loading pretrained weights" |
class Array(PymiereBaseCollection):
def __init__(self, pymiere_id):
super(Array, self).__init__(pymiere_id, 'length')
def __getitem__(self, index):
return _format_object_to_py(_eval_script_returning_object("$._pymiere['{}'][{}]".format(self._pymiere_id, index)))
def __setitem__(self, key, value):
eval_script("$._pymiere['{}'][{}] = {}".format(self._pymiere_id, key, value))
def append(self, item):
self.push(item)
def push(self, item):
self._eval_on_this_object('push({})'.format(_format_object_to_es(item)))
def python_list_to_es_declaration(python_list):
return '[{}]'.format(', '.join([_format_object_to_es(item) for item in python_list]))
def from_python_list(cls, python_list):
return cls(**_eval_script_returning_object(cls.python_list_to_es_declaration(python_list), as_kwargs=True)) |
def get_cell_html(cell, highlight):
if highlight:
color_str = ' class="highlighted" '
else:
color_str = ''
is_header = cell['is_header']
cell_symbol = 'td'
if is_header:
cell_symbol = 'th'
start_marker = ('<%s%s>' % (cell_symbol, color_str))
end_marker = ('</%s>' % cell_symbol)
col_span = cell['column_span']
row_span = cell['row_span']
start_marker = ('<%s%s colspan=%d rowspan=%d >' % (cell_symbol, color_str, col_span, row_span))
val = cell['value']
cell_html = ((((start_marker + ' ') + val) + ' ') + end_marker)
return cell_html |
class CmdLookDark(Command):
key = 'look'
aliases = ['l', 'feel', 'search', 'feel around', 'fiddle']
locks = 'cmd:all()'
help_category = 'TutorialWorld'
def func(self):
caller = self.caller
nr_searches = caller.ndb.dark_searches
if (nr_searches is None):
nr_searches = 0
caller.ndb.dark_searches = nr_searches
if ((nr_searches < 4) and (random.random() < 0.9)):
caller.msg(random.choice(DARK_MESSAGES))
caller.ndb.dark_searches += 1
elif any((obj for obj in caller.contents if utils.inherits_from(obj, LightSource))):
caller.msg(ALREADY_LIGHTSOURCE)
else:
create_object(LightSource, key='splinter', location=caller)
caller.msg(FOUND_LIGHTSOURCE) |
class Word(gym.spaces.MultiDiscrete):
def __init__(self, max_length, vocab):
if (len(vocab) != len(set(vocab))):
raise VocabularyHasDuplicateTokens()
self.max_length = max_length
self.PAD = '<PAD>'
self.UNK = '<UNK>'
self.BOS = '<S>'
self.EOS = '</S>'
self.SEP = '<|>'
special_tokens = [self.PAD, self.UNK, self.EOS, self.BOS, self.SEP]
self.vocab = [w for w in special_tokens if (w not in vocab)]
self.vocab += list(vocab)
self.vocab_set = set(self.vocab)
self.vocab_size = len(self.vocab)
self.id2w = {i: w for (i, w) in enumerate(self.vocab)}
self.w2id = {w: i for (i, w) in self.id2w.items()}
self.PAD_id = self.w2id[self.PAD]
self.UNK_id = self.w2id[self.UNK]
self.BOS_id = self.w2id[self.BOS]
self.EOS_id = self.w2id[self.EOS]
self.SEP_id = self.w2id[self.SEP]
super().__init__(([(len(self.vocab) - 1)] * self.max_length))
self.dtype = np.int64
def tokenize(self, text, padding=False):
text = text.lower()
text = re.sub('.', ' </S> <S> ', text)
text = (('<S> ' + text) + ' </S>')
text = re.sub("'", '', text)
text = re.sub('[^a-z0-9 ]', ' ', text)
words = text.split()
ids = [self.w2id.get(w, self.UNK_id) for w in words]
if padding:
nb_pads = (self.max_length - len(ids))
msg = 'Provided `max_length` was not large enough ({} words).'.format(len(ids))
assert (nb_pads >= 0), msg
ids += ([self.PAD_id] * nb_pads)
return np.array(ids)
def __repr__(self):
return 'Word(L={}, V={})'.format(self.max_length, self.vocab_size) |
def MirrorTest(source_local, dest_local, list_of_dirnames, compare_hardlinks=1, dest_dirname=abs_output_dir):
Globals.set('preserve_hardlinks', compare_hardlinks)
Globals.set('no_compression_regexp_string', os.fsencode(actions.DEFAULT_NOT_COMPRESSED_REGEXP))
dest_rp = rpath.RPath(Globals.local_connection, dest_dirname)
Myrm(dest_dirname)
for dirname in list_of_dirnames:
src_rp = rpath.RPath(Globals.local_connection, dirname)
reset_hardlink_dicts()
_reset_connections(src_rp, dest_rp)
InternalMirror(source_local, dest_local, dirname, dest_dirname, force=True)
_reset_connections(src_rp, dest_rp)
assert compare_recursive(src_rp, dest_rp, compare_hardlinks) |
def save_output(browser: Chrome, filename=None, process_func=None):
raw_html = browser.find_element_by_id('markdown-body').get_attribute('innerHTML')
html = re.sub('"pywebio-scope-.*?"', '', raw_html)
html = re.sub('id="pywebio-.*?"', '', html)
html = re.sub("\\('pywebio-.*?'\\)", '', html)
html = re.sub('WebIO.pushData\\(.*?\\)', '', html)
html = re.sub('</(.*?)>', '</\\g<1>>\\n', html)
html = html.replace('"opacity: 1;"', '""').replace(' open=""', '')
html = html.strip()
if process_func:
html = process_func(html)
if filename:
open(path.join(here_dir, 'output', filename), 'w').write(html)
return (raw_html, html) |
def get_access_code(code: str, flag: str):
if (flag == 'web'):
app_id = current_app.config.get('WEB_ID')
secret = current_app.config.get('WEB_SECRET')
elif (flag == 'app'):
app_id = current_app.config.get('APP_ID')
secret = current_app.config.get('APP_SECRET')
else:
return None
try:
fields = parse.urlencode({'appid': app_id, 'secret': secret, 'code': code, 'grant_type': 'authorization_code'})
url = '
print(url)
req = request.Request(url=url, method='GET')
res = request.urlopen(req, timeout=10)
access_data = json.loads(res.read().decode())
print(access_data)
except Exception as e:
print(e)
return None
if ('openid' in access_data):
return access_data
else:
return None |
class OTRMessage(object):
__slots__ = ['payload']
version = 2
msgtype = 0
def __eq__(self, other):
if (not isinstance(other, self.__class__)):
return False
for slot in getslots(self.__class__, OTRMessage):
if (getattr(self, slot) != getattr(other, slot)):
return False
return True
def __neq__(self, other):
return (not self.__eq__(other)) |
class InteropQubitManager(cirq.ops.SimpleQubitManager):
def __init__(self):
super().__init__()
self._managed_qubits = set()
def manage_qubits(self, qubits: Iterable[cirq.Qid]):
self._managed_qubits |= set(qubits)
def qfree(self, qubits: Iterable[cirq.Qid]):
qs = set(qubits)
managed_qs = (qs & self._managed_qubits)
qs -= managed_qs
self._managed_qubits -= managed_qs
super().qfree(qs) |
_for_td(torch.gather)
def _gather(input: T, dim: int, index: Tensor, *, sparse_grad: bool=False, out: (T | None)=None) -> T:
if sparse_grad:
raise NotImplementedError('sparse_grad=True not implemented for torch.gather(tensordict, ...)')
if (not len(index)):
raise RuntimeError('Cannot use torch.gather with an empty index')
dim_orig = dim
if (dim < 0):
dim = (input.batch_dims + dim)
if ((dim > (input.batch_dims - 1)) or (dim < 0)):
raise RuntimeError(f'Cannot gather tensordict with shape {input.shape} along dim {dim_orig}.')
def _gather_tensor(tensor, dest=None):
index_expand = index
while (index_expand.ndim < tensor.ndim):
index_expand = index_expand.unsqueeze((- 1))
target_shape = list(tensor.shape)
target_shape[dim] = index_expand.shape[dim]
index_expand = index_expand.expand(target_shape)
out = torch.gather(tensor, dim, index_expand, out=dest)
return out
if (out is None):
names = (input.names if input._has_names() else None)
return TensorDict({key: _gather_tensor(value) for (key, value) in input.items()}, batch_size=index.shape, names=names)
TensorDict({key: _gather_tensor(value, out[key]) for (key, value) in input.items()}, batch_size=index.shape)
return out |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.