code stringlengths 281 23.7M |
|---|
class TemporalModulation(nn.Module):
def __init__(self, in_channels, out_channels, downsample_scale=8):
super().__init__()
self.conv = ConvModule(in_channels, out_channels, (3, 1, 1), stride=(1, 1, 1), padding=(1, 0, 0), bias=False, groups=32, conv_cfg=dict(type='Conv3d'), act_cfg=None)
self.pool = nn.MaxPool3d((downsample_scale, 1, 1), (downsample_scale, 1, 1), (0, 0, 0), ceil_mode=True)
def forward(self, x):
x = self.conv(x)
x = self.pool(x)
return x |
class Loss(ABC):
def __call__(self, predict: np.ndarray, target: np.ndarray) -> np.ndarray:
return self.evaluate(predict, target)
def evaluate(self, predict: np.ndarray, target: np.ndarray) -> np.ndarray:
raise NotImplementedError
def _validate_shapes(predict: np.ndarray, target: np.ndarray) -> None:
if (predict.shape != target.shape):
raise QiskitMachineLearningError(f"Shapes don't match, predict: {predict.shape}, target: {target.shape}!")
def gradient(self, predict: np.ndarray, target: np.ndarray) -> np.ndarray:
raise NotImplementedError |
def get_sorted_s_r_embed_limit(s_hist, s, r, ent_embeds, limit):
s_hist_len = to_device(torch.LongTensor(list(map(len, s_hist))))
(s_len, s_idx) = s_hist_len.sort(0, descending=True)
num_non_zero = len(torch.nonzero(s_len))
s_len_non_zero = s_len[:num_non_zero]
s_len_non_zero = torch.where((s_len_non_zero > limit), to_device(torch.tensor(limit)), s_len_non_zero)
s_hist_sorted = []
for idx in s_idx[:num_non_zero]:
s_hist_sorted.append(s_hist[idx.item()])
flat_s = []
len_s = []
for hist in s_hist_sorted:
for neighs in hist[(- limit):]:
len_s.append(len(neighs))
for neigh in neighs:
flat_s.append(neigh[1])
s_tem = s[s_idx]
r_tem = r[s_idx]
embeds = ent_embeds[to_device(torch.LongTensor(flat_s))]
embeds_split = torch.split(embeds, len_s)
return (s_idx, s_len_non_zero, s_tem, r_tem, embeds, len_s, embeds_split) |
def test_read_pinned_buffer(tmpdir):
data_fname = tmpdir.join('test_read.sigmf-data')
actual = cp.random.rand(100).astype(cp.complex64)
actual.tofile(data_fname)
binary = cusignal.read_bin(str(data_fname), dtype=cp.complex64)
buffer = cusignal.get_pinned_mem(binary.shape, cp.complex64)
expect = cusignal.read_bin(str(data_fname), buffer, dtype=cp.complex64)
cp.testing.assert_array_equal(actual, expect) |
def get_train_val_data(data_path, tokenizer, val_data_path=None, val_set_size=1, augment_times=1, load_pre_prompt_dataset=False, vqa=False, add_input_prompt=False, eval_only=False, eval_items=None):
if eval_only:
return (None, get_val_data(val_data_path, tokenizer, val_set_size, eval_items=eval_items))
assert data_path.endswith('.pkl'), 'Only support pkl data format'
if vqa:
train_dataset = _load_vqa_train_dataset(data_path, add_input_prompt=add_input_prompt)
elif load_pre_prompt_dataset:
train_dataset = _load_pre_prompt_dataset(data_path, augment_times)
else:
train_dataset = _load_vector_pkl_dataset(data_path, augment_times)
if (val_set_size > 0):
if (val_data_path is not None):
val_dataset = _load_val_dataset(val_data_path, val_set_size, eval_items=eval_items)
else:
train_val = train_dataset['train'].train_test_split(test_size=val_set_size, shuffle=True, seed=42)
train_dataset = train_val
val_dataset = train_val
train_data = train_dataset['train'].shuffle(seed=42)
train_data = train_data.map(partial(generate_and_tokenize_prompt, tokenizer), remove_columns=[], num_proc=8)
val_data = _val_data_from_val_dataset(val_dataset=val_dataset, tokenizer=tokenizer)
else:
train_data = train_dataset['train'].shuffle(seed=42)
train_data = train_data.map(generate_and_tokenize_prompt, remove_columns=[], num_proc=8)
val_data = None
return (train_data, val_data) |
.parametrize('exc', [ValueError, SystemExit])
def test_wrapper_exception(exc: 'Type[BaseException]') -> None:
out = []
(wrapper=True)
def m1():
out.append('m1 init')
try:
result = (yield)
except BaseException as e:
assert isinstance(e, exc)
raise
finally:
out.append('m1 finish')
return result
def m2():
out.append('m2 init')
raise exc
with pytest.raises(exc):
MC([m2, m1], {})
assert (out == ['m1 init', 'm2 init', 'm1 finish']) |
def create_palette_from_dict(conf):
palette = QtGui.QPalette()
for (key, value) in conf.items():
(group, role) = key.split(':')
if hasattr(QtGui.QPalette.ColorGroup, group):
palette.setColor(getattr(QtGui.QPalette.ColorGroup, group), getattr(QtGui.QPalette.ColorRole, role), QtGui.QColor(*value))
if (group == 'Active'):
palette.setColor(QtGui.QPalette.ColorGroup.Inactive, getattr(QtGui.QPalette.ColorRole, role), QtGui.QColor(*value))
return palette |
class _CanAssignBasedContext():
can_assign_ctx: CanAssignContext
visitor: Optional['NameCheckVisitor'] = None
errors: List[str] = field(default_factory=list)
def on_error(self, message: str, *, code: ErrorCode=ErrorCode.incompatible_call, node: Optional[ast.AST]=None, detail: Optional[str]=..., replacement: Optional[Replacement]=...) -> object:
self.errors.append(message)
return None |
def test_stream_create(stream):
assert (stream.is_unconnected == True)
assert (stream.is_creating == False)
assert (stream.is_ready == False)
assert (stream.is_failed == False)
assert (stream.is_terminated == False)
with stream.mainloop.lock:
stream.delete()
assert (stream.is_unconnected == True)
assert (stream.is_creating == False)
assert (stream.is_ready == False)
assert (stream.is_failed == False)
assert (stream.is_terminated == False) |
class UnbroadcastLayer(Layer):
def __init__(self, incoming, broadcast_layer, **kwargs):
self.broadcast_layer = broadcast_layer
assert (len(incoming.output_shape) == len(self.broadcast_layer.output_shape))
incoming = AwaitLayer(incoming, layer_to_await=broadcast_layer)
super(UnbroadcastLayer, self).__init__(incoming, **kwargs)
def get_output_for(self, input, **kwargs):
if (not hasattr(self.broadcast_layer, 'symbolic_input_shape')):
raise ValueError('UnbroadcastLayer.get_output_for must be called after respective BroadcastLayer.get_output_for')
pre_broadcast_shape = self.broadcast_layer.symbolic_input_shape
broadcasted_axes_shapes = tuple((pre_broadcast_shape[ax] for ax in self.broadcast_layer.broadcasted_axes))
unrolled_shape = (broadcasted_axes_shapes + tuple(input.shape)[1:])
input = input.reshape(unrolled_shape)
current_dim_order = (self.broadcast_layer.broadcasted_axes + self.broadcast_layer.non_broadcasted_axes)
dimshuffle_order = [current_dim_order.index(i) for i in range(len(current_dim_order))]
return input.dimshuffle(dimshuffle_order)
def get_output_shape_for(self, input_shape, **kwargs):
new_non_broadcast_shapes = input_shape[1:]
original_shape = list(self.broadcast_layer.input_shape)
for (ax, new_ax_shape) in zip(self.broadcast_layer.non_broadcasted_axes, new_non_broadcast_shapes):
original_shape[ax] = new_ax_shape
return tuple(original_shape) |
class GeoBUGSTextIO(fileio.FileIO):
FORMATS = ['geobugs_text']
MODES = ['r', 'w']
def __init__(self, *args, **kwargs):
args = args[:2]
fileio.FileIO.__init__(self, *args, **kwargs)
self.file = open(self.dataPath, self.mode)
def read(self, n=(- 1)):
self._complain_ifclosed(self.closed)
w = self._read()
return w
def seek(self, pos) -> int:
if (pos == 0):
self.file.seek(0)
self.pos = 0
def _read(self):
if (self.pos > 0):
raise StopIteration
fbody = self.file.read()
body_structure = {}
for i in ['num', 'adj', 'weights', 'sumNumNeigh']:
i_loc = fbody.find(i)
if (i_loc != (- 1)):
body_structure[i] = (i_loc, i)
body_sequence = sorted(body_structure.values())
body_sequence.append(((- 1), 'eof'))
for i in range((len(body_sequence) - 1)):
(part, next_part) = (body_sequence[i], body_sequence[(i + 1)])
(start, end) = (part[0], next_part[0])
part_text = fbody[start:end]
(part_length, start, end) = (len(part_text), 0, (- 1))
for c in range(part_length):
if part_text[c].isdigit():
start = c
break
for c in range((part_length - 1), 0, (- 1)):
if part_text[c].isdigit():
end = (c + 1)
break
part_text = part_text[start:end]
part_text = part_text.replace('\n', '')
value_type = int
if (part[1] == 'weights'):
value_type = float
body_structure[part[1]] = [value_type(v) for v in part_text.split(',')]
cardinalities = body_structure['num']
adjacency = body_structure['adj']
raw_weights = ([1.0] * int(sum(cardinalities)))
if (('weights' in body_structure) and isinstance(body_structure['weights'], list)):
raw_weights = body_structure['weights']
no_obs = len(cardinalities)
neighbors = {}
weights = {}
pos = 0
for i in range(no_obs):
neighbors[(i + 1)] = []
weights[(i + 1)] = []
no_nghs = cardinalities[i]
if (no_nghs > 0):
neighbors[(i + 1)] = adjacency[pos:(pos + no_nghs)]
weights[(i + 1)] = raw_weights[pos:(pos + no_nghs)]
pos += no_nghs
self.pos += 1
w = W(neighbors, weights)
return w
def write(self, obj):
self._complain_ifclosed(self.closed)
if issubclass(type(obj), W):
(cardinalities, neighbors, weights) = ([], [], [])
for i in obj.id_order:
cardinalities.append(obj.cardinalities[i])
neighbors.extend(obj.neighbors[i])
weights.extend(obj.weights[i])
self.file.write('list(')
self.file.write(('num=c(%s),' % ','.join(map(str, cardinalities))))
self.file.write(('adj=c(%s),' % ','.join(map(str, neighbors))))
self.file.write(('sumNumNeigh=%i)' % sum(cardinalities)))
self.pos += 1
else:
raise TypeError(('Expected a PySAL weights object, got: %s.' % type(obj)))
def close(self):
self.file.close()
fileio.FileIO.close(self) |
def test_hetero_tuple_validation():
c = Converter(detailed_validation=True)
with pytest.raises(IterableValidationError) as exc:
c.structure(['1', 2, 'a'], Tuple[(int, int, int)])
assert (repr(exc.value.exceptions[0]) == repr(ValueError("invalid literal for int() with base 10: 'a'")))
assert (exc.value.exceptions[0].__notes__ == ['Structuring typing.Tuple[int, int, int] index 2']) |
def get_files(**kwargs):
metadata_directory = kwargs.get('metadata_directory', '')
files = []
for f in get_template_files(**kwargs):
if (str(f.path) == 'LICENSE.txt'):
files.append(File(Path(metadata_directory, 'licenses', f.path), f.contents))
if (f.path.parts[0] != kwargs['package_name']):
continue
files.append(f)
files.extend((File(Path(kwargs['package_name'], 'lib.so'), 'custom'), File(Path(metadata_directory, 'WHEEL'), f'''Wheel-Version: 1.0
Generator: hatchling {__version__}
Root-Is-Purelib: false
Tag: {kwargs.get('tag', '')}
'''), File(Path(metadata_directory, 'METADATA'), f'''Metadata-Version: {DEFAULT_METADATA_VERSION}
Name: {kwargs['project_name']}
Version: 0.0.1
License-File: LICENSE.txt
Requires-Python: >3
''')))
record_file = File(Path(metadata_directory, 'RECORD'), '')
update_record_file_contents(record_file, files)
files.append(record_file)
return files |
class MovieGenres(Enum):
Action = '28'
Adventure = '12'
Animation = '16'
Comedy = '35'
Crime = '80'
Documentary = '99'
Drama = '18'
Family = '10751'
Fantasy = '14'
History = '36'
Horror = '27'
Music = '10402'
Mystery = '9648'
Romance = '10749'
Science = '878'
Thriller = '53'
Western = '37' |
def commands_spotting_challenge_validated(specific_features_dir: str, feature_name: str, run_name: str=RUN_NAME, results_dir: str=RESULTS_DIR, models_dir: str=MODELS_DIR, labels_dir: str=LABELS_DIR, splits_dir: str=SPLITS_DIR, base_config_dir: str=BASE_CONFIG_DIR, memory_setup: str=MEMORY_SETUP) -> List[Command]:
dataset_type = DATASET_TYPE_SOCCERNET_V2_CHALLENGE_VALIDATION
protocol_name = SPOTTING_CHALLENGE_VALIDATED
confidence_and_delta_validated_train_commands = _commands_confidence_and_delta_validated_train(memory_setup, dataset_type, protocol_name, specific_features_dir, feature_name, run_name, results_dir, models_dir, labels_dir, splits_dir, base_config_dir)
last_test_commands = []
for split_key in [SPLIT_KEY_VALIDATION, SPLIT_KEY_UNLABELED]:
confidence_and_delta_test_commands = _commands_spotting_confidence_and_delta_test(split_key, NMS_TYPE_SOFT_TUNED, dataset_type, protocol_name, specific_features_dir, feature_name, run_name, results_dir, models_dir, labels_dir, splits_dir, base_config_dir)
last_test_commands.extend(confidence_and_delta_test_commands)
return (confidence_and_delta_validated_train_commands + last_test_commands) |
class InceptionV3Aux(InceptionV3):
def __init__(self, num_classes=1000, in_chans=3, drop_rate=0.0, global_pool='avg', aux_logits=True):
super(InceptionV3Aux, self).__init__(num_classes, in_chans, drop_rate, global_pool, aux_logits)
def forward_features(self, x):
x = self.forward_preaux(x)
aux = (self.AuxLogits(x) if self.training else None)
x = self.forward_postaux(x)
return (x, aux)
def forward(self, x):
(x, aux) = self.forward_features(x)
x = self.forward_head(x)
return (x, aux) |
.skipif((not is_py310_plus), reason='3.10+ union syntax')
def test_roundtrip_generic_with_union() -> None:
c = Converter()
class A():
a: int
class B():
b: int
class Outer(Generic[T]):
member: T
raw = c.unstructure(Outer(A(1)), unstructure_as=Outer[(A | B)])
assert (c.structure(raw, Outer[(A | B)]) == Outer(A(1))) |
class UDPEndpoint():
ip: IPAddress
port: int
def as_python_sockaddr(self) -> (tuple[(str, int)] | tuple[(str, int, int, int)]):
sockaddr: (tuple[(str, int)] | tuple[(str, int, int, int)]) = (self.ip.compressed, self.port)
if isinstance(self.ip, ipaddress.IPv6Address):
sockaddr += (0, 0)
return sockaddr
def from_python_sockaddr(cls: type[T_UDPEndpoint], sockaddr: (tuple[(str, int)] | tuple[(str, int, int, int)])) -> T_UDPEndpoint:
(ip, port) = sockaddr[:2]
return cls(ip=ipaddress.ip_address(ip), port=port) |
class ModbusSlaveContext(ModbusBaseSlaveContext):
def __init__(self, *_args, **kwargs):
self.store = {}
self.store['d'] = kwargs.get('di', ModbusSequentialDataBlock.create())
self.store['c'] = kwargs.get('co', ModbusSequentialDataBlock.create())
self.store['i'] = kwargs.get('ir', ModbusSequentialDataBlock.create())
self.store['h'] = kwargs.get('hr', ModbusSequentialDataBlock.create())
self.zero_mode = kwargs.get('zero_mode', False)
def __str__(self):
return 'Modbus Slave Context'
def reset(self):
for datastore in iter(self.store.values()):
datastore.reset()
def validate(self, fc_as_hex, address, count=1):
if (not self.zero_mode):
address += 1
Log.debug('validate: fc-[{}] address-{}: count-{}', fc_as_hex, address, count)
return self.store[self.decode(fc_as_hex)].validate(address, count)
def getValues(self, fc_as_hex, address, count=1):
if (not self.zero_mode):
address += 1
Log.debug('getValues: fc-[{}] address-{}: count-{}', fc_as_hex, address, count)
return self.store[self.decode(fc_as_hex)].getValues(address, count)
def setValues(self, fc_as_hex, address, values):
if (not self.zero_mode):
address += 1
Log.debug('setValues[{}] address-{}: count-{}', fc_as_hex, address, len(values))
self.store[self.decode(fc_as_hex)].setValues(address, values)
def register(self, function_code, fc_as_hex, datablock=None):
self.store[fc_as_hex] = (datablock or ModbusSequentialDataBlock.create())
self._fx_mapper[function_code] = fc_as_hex |
def calculate_loss_for_nq(start_logits, start_labels, end_logits, end_labels, pooled_logits, pooler_labels):
def cross_entropy(logits, labels, reduction=None):
vocab_size = logits.shape[(- 1)]
labels = (labels[(..., None)] == jnp.arange(vocab_size)[None]).astype('f4')
logits = jax.nn.log_softmax(logits, axis=(- 1))
loss = (- jnp.sum((labels * logits), axis=(- 1)))
if (reduction is not None):
loss = reduction(loss)
return loss
cross_entropy = partial(cross_entropy, reduction=jnp.mean)
start_loss = cross_entropy(start_logits, start_labels)
end_loss = cross_entropy(end_logits, end_labels)
pooled_loss = cross_entropy(pooled_logits, pooler_labels)
return (((start_loss + end_loss) + pooled_loss) / 3) |
class TestJit(object):
def test_add_dict(self):
def add_dict(oper):
rets = (oper['x'] + oper['y'])
return {'result': rets}
def add_dict_pyfunc(oper):
rets = (oper['x'] + oper['y'])
return {'result': rets}
a = torch.rand((3, 4))
b = torch.rand((3, 4))
oper = {'x': a, 'y': b}
rets_t = add_dict(oper)
rets = add_dict_pyfunc(oper)
assert ('result' in rets)
assert (rets_t['result'] == rets['result']).all()
def test_add_list(self):
def add_list(oper, x, y):
rets = {}
for (idx, pair) in enumerate(oper):
rets[f'k{idx}'] = (pair['x'] + pair['y'])
rets[f'k{len(oper)}'] = (x + y)
return rets
def add_list_pyfunc(oper, x, y):
rets = {}
for (idx, pair) in enumerate(oper):
rets[f'k{idx}'] = (pair['x'] + pair['y'])
rets[f'k{len(oper)}'] = (x + y)
return rets
pair_num = 3
oper = []
for _ in range(pair_num):
oper.append({'x': torch.rand((3, 4)), 'y': torch.rand((3, 4))})
a = torch.rand((3, 4))
b = torch.rand((3, 4))
rets = add_list_pyfunc(oper, x=a, y=b)
rets_t = add_list(oper, x=a, y=b)
for idx in range((pair_num + 1)):
assert (f'k{idx}' in rets_t)
assert (rets[f'k{idx}'] == rets_t[f'k{idx}']).all()
_no_parrots
def test_jit_cache(self):
def func(oper):
if (oper['const'] > 1):
return ((oper['x'] * 2) + oper['y'])
else:
return ((oper['x'] * 2) - oper['y'])
def pyfunc(oper):
if (oper['const'] > 1):
return ((oper['x'] * 2) + oper['y'])
else:
return ((oper['x'] * 2) - oper['y'])
assert (len(func._cache._cache) == 0)
oper = {'const': 2, 'x': torch.rand((3, 4)), 'y': torch.rand((3, 4))}
rets_plus = pyfunc(oper)
rets_plus_t = func(oper)
assert (rets_plus == rets_plus_t).all()
assert (len(func._cache._cache) == 1)
oper['const'] = 0.5
rets_minus = pyfunc(oper)
rets_minus_t = func(oper)
assert (rets_minus == rets_minus_t).all()
assert (len(func._cache._cache) == 2)
rets_a = ((rets_minus_t + rets_plus_t) / 4)
assert torch.allclose(oper['x'], rets_a)
_no_parrots
def test_jit_shape(self):
def func(a):
return (a + 1)
assert (len(func._cache._cache) == 0)
a = torch.ones((3, 4))
r = func(a)
assert (r.shape == (3, 4))
assert (r == 2).all()
assert (len(func._cache._cache) == 1)
a = torch.ones((2, 3, 4))
r = func(a)
assert (r.shape == (2, 3, 4))
assert (r == 2).all()
assert (len(func._cache._cache) == 2)
_no_parrots
def test_jit_kwargs(self):
def func(a, b):
return torch.mean(((a - b) * (a - b)))
assert (len(func._cache._cache) == 0)
x = torch.rand((16, 32))
y = torch.rand((16, 32))
func(x, y)
assert (len(func._cache._cache) == 1)
func(x, b=y)
assert (len(func._cache._cache) == 1)
func(b=y, a=x)
assert (len(func._cache._cache) == 1)
def test_jit_derivate(self):
(derivate=True)
def func(x, y):
return ((x + 2) * (y - 2))
a = torch.rand((3, 4))
b = torch.rand((3, 4))
a.requires_grad = True
c = func(a, b)
assert c.requires_grad
d = torch.empty_like(c)
d.fill_(1.0)
c.backward(d)
assert torch.allclose(a.grad, (b - 2))
assert (b.grad is None)
a.grad = None
c = func(a, b)
assert c.requires_grad
d = torch.empty_like(c)
d.fill_(2.7)
c.backward(d)
assert torch.allclose(a.grad, (2.7 * (b - 2)))
assert (b.grad is None)
def test_jit_optimize(self):
(optimize=True)
def func(a, b):
return torch.mean(((a - b) * (a - b)))
def pyfunc(a, b):
return torch.mean(((a - b) * (a - b)))
a = torch.rand((16, 32))
b = torch.rand((16, 32))
c = func(a, b)
d = pyfunc(a, b)
assert torch.allclose(c, d)
_no_elena
def test_jit_coderize(self):
if (not torch.cuda.is_available()):
return
(coderize=True)
def func(a, b):
return ((a + b) * (a - b))
def pyfunc(a, b):
return ((a + b) * (a - b))
a = torch.rand((16, 32), device='cuda')
b = torch.rand((16, 32), device='cuda')
c = func(a, b)
d = pyfunc(a, b)
assert torch.allclose(c, d)
def test_jit_value_dependent(self):
def func(a, b):
torch.nonzero(a)
return torch.mean(((a - b) * (a - b)))
def pyfunc(a, b):
torch.nonzero(a)
return torch.mean(((a - b) * (a - b)))
a = torch.rand((16, 32))
b = torch.rand((16, 32))
c = func(a, b)
d = pyfunc(a, b)
assert torch.allclose(c, d)
_no_parrots
def test_jit_check_input(self):
def func(x):
y = torch.rand_like(x)
return (x + y)
a = torch.ones((3, 4))
with pytest.raises(AssertionError):
func = mmcv.jit(func, check_input=(a,))
_no_parrots
def test_jit_partial_shape(self):
(full_shape=False)
def func(a, b):
return torch.mean(((a - b) * (a - b)))
def pyfunc(a, b):
return torch.mean(((a - b) * (a - b)))
a = torch.rand((3, 4))
b = torch.rand((3, 4))
assert torch.allclose(func(a, b), pyfunc(a, b))
assert (len(func._cache._cache) == 1)
a = torch.rand((6, 5))
b = torch.rand((6, 5))
assert torch.allclose(func(a, b), pyfunc(a, b))
assert (len(func._cache._cache) == 1)
a = torch.rand((3, 4, 5))
b = torch.rand((3, 4, 5))
assert torch.allclose(func(a, b), pyfunc(a, b))
assert (len(func._cache._cache) == 2)
a = torch.rand((1, 9, 8))
b = torch.rand((1, 9, 8))
assert torch.allclose(func(a, b), pyfunc(a, b))
assert (len(func._cache._cache) == 2)
def test_instance_method(self):
class T(object):
def __init__(self, shape):
self._c = torch.rand(shape)
def test_method(self, x, y):
return ((x * self._c) + y)
shape = (16, 32)
t = T(shape)
a = torch.rand(shape)
b = torch.rand(shape)
res = ((a * t._c) + b)
jit_res = t.test_method(a, b)
assert torch.allclose(res, jit_res)
t = T(shape)
res = ((a * t._c) + b)
jit_res = t.test_method(a, b)
assert torch.allclose(res, jit_res) |
def test_RankInvariantChecker_remove_one_alternative():
dm = skc.datasets.load_simple_stock_selection()
dmaker = RemoveAlternativeDMaker(TOPSIS(), ['AA'], 1)
rrt1 = RankInvariantChecker(dmaker, random_state=42, allow_missing_alternatives=True)
result = rrt1.evaluate(dm)
(_, rank) = result.ranks[1]
np.testing.assert_array_equal(rank.e_.rrt1.missing_alternatives, ['AA'])
assert (rank.to_series()['AA'] == 6) |
def init_test_environ():
global _TEMP_DIR, _BUS_INFO, _VDISPLAY
_TEMP_DIR = tempfile.mkdtemp(prefix=fsnative('QL-TEST-'))
os.environ['XDG_CACHE_HOME'] = xdg_get_cache_home()
os.environ['GST_REGISTRY_UPDATE'] = fsnative('no')
if util.is_flatpak():
del os.environ['GST_REGISTRY_UPDATE']
home_dir = tempfile.mkdtemp(prefix=fsnative('HOME-'), dir=_TEMP_DIR)
os.environ['HOME'] = home_dir
os.environ.pop('XDG_DATA_HOME', None)
os.environ.pop('XDG_CONFIG_HOME', None)
os.environ['GSETTINGS_BACKEND'] = 'memory'
os.environ['GSETTINGS_BACKEND'] = 'memory'
os.environ['GTK_THEME'] = 'Adwaita'
if (pyvirtualdisplay is not None):
_VDISPLAY = pyvirtualdisplay.Display()
_VDISPLAY.start()
_BUS_INFO = None
if ((os.name != 'nt') and (sys.platform != 'darwin')):
_BUS_INFO = dbus_launch_user()
os.environ.update(_BUS_INFO)
quodlibet.init(no_translations=True, no_excepthook=True)
quodlibet.app.name = 'QL Tests'
try:
if (os.name != 'nt'):
os.environ['LANG'] = locale.setlocale(locale.LC_ALL, 'en_US.UTF-8')
else:
os.environ['LANG'] = 'en_US.utf8'
locale.setlocale(locale.LC_ALL, 'english')
except locale.Error:
pass |
class TestBitFlags(unittest.TestCase):
def test_bitflags(self):
from functools import reduce
import numpy as np
from satpy.readers.olci_nc import BitFlags
flag_list = ['SEA_ICE', 'MEGLINT', 'HIGHGLINT', 'CASE2_S', 'CASE2_ANOM', 'HAZE_OVER_WATER', 'WHITECAPS', 'AC_FAIL', 'BPAC_ON', 'WHITE_SCATT', 'LOWRW', 'HIGHRW', 'OUT_OF_RANGE_AAC', 'OUT_OF_SCOPE_AAC', 'OUT_OF_RANGE_OC_NN', 'OUT_OF_SCOPE_OC_NN', 'OUT_OF_RANGE_CHL_OC4ME_INPUT', 'OUT_OF_RANGE_CHL_OC4ME']
bits = np.array([(1 << x) for x in range(len(flag_list))])
bflags = BitFlags(bits, flag_list=flag_list)
items = ['SEA_ICE', 'MEGLINT', 'HIGHGLINT', 'HAZE_OVER_WATER', 'WHITECAPS', 'AC_FAIL', 'WHITE_SCATT', 'LOWRW', 'HIGHRW', 'OUT_OF_RANGE_AAC', 'OUT_OF_SCOPE_AAC', 'OUT_OF_RANGE_OC_NN', 'OUT_OF_SCOPE_OC_NN', 'OUT_OF_RANGE_CHL_OC4ME_INPUT', 'OUT_OF_RANGE_CHL_OC4ME']
mask = reduce(np.logical_or, [bflags[item] for item in items])
expected = np.array([True, True, True, False, False, True, True, True, False, True, True, True, True, True, True, True, True, True])
assert all((mask == expected)) |
def get_flat_faces(faces, visited):
flat_edges = list({e for f in faces for e in f.edges if ((len(e.link_faces) > 1) and equal(e.calc_face_angle(), 0))})
flat_faces = []
for e in flat_edges:
for f in e.link_faces:
if (not visited.get(f, False)):
visited[f] = True
flat_faces += get_flat_faces([f], visited)
return list(set((faces + flat_faces))) |
def cnn():
datagen = ImageDataGenerator(rescale=(1.0 / 255))
train_generator = datagen.flow_from_directory(train_data_dir, target_size=(img_width, img_height), batch_size=batch_size, class_mode='binary')
validation_generator = datagen.flow_from_directory(validation_data_dir, target_size=(img_width, img_height), batch_size=batch_size, class_mode='binary')
from keras.layers import Convolution2D, MaxPooling2D, ZeroPadding2D
model = Sequential()
model.add(Convolution2D(32, (3, 3), input_shape=(img_width, img_height, 3)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Convolution2D(32, (3, 3)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Convolution2D(64, (3, 3)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Flatten())
model.add(Dense(64))
model.add(Activation('relu'))
model.add(Dropout(0.5))
model.add(Dense(1))
model.add(Activation('sigmoid'))
model.compile(loss='binary_crossentropy', optimizer='rmsprop', metrics=['accuracy'])
model.fit_generator(train_generator, steps_per_epoch=(train_samples // batch_size), epochs=epochs, validation_data=validation_generator, validation_steps=(validation_samples // batch_size))
model.save_weights((str(exp_url) + 'models/basic_cnn_30_epochs_weights.h5'))
model.save((str(exp_url) + 'models/basic_cnn_30_epochs_full_model.h5')) |
class configuration():
def __init__(self):
self.currentFile = ''
self.currentLabelFile = ''
self.currentCorrectionFile = ''
self.csPath = ''
self.city = ''
self.cityName = ''
self.gtType = ''
self.split = ''
self.labelPath = ''
self.correctionPath = ''
self.transp = 0.5
self.zoom = False
self.zoomFactor = 1.0
self.zoomSize = 400
self.highlight = False
self.highlightLabelSelection = ''
self.screenshotFilename = '%i'
self.correctionMode = False
self.showSaveWarning = True
def load(self, filename):
if os.path.isfile(filename):
with open(filename, 'r') as f:
jsonText = f.read()
jsonDict = json.loads(jsonText)
for key in jsonDict:
if (key in self.__dict__):
self.__dict__[key] = jsonDict[key]
self.fixConsistency()
def fixConsistency(self):
if self.currentFile:
self.currentFile = os.path.normpath(self.currentFile)
if self.currentLabelFile:
self.currentLabelFile = os.path.normpath(self.currentLabelFile)
if self.currentCorrectionFile:
self.currentCorrectionFile = os.path.normpath(self.currentCorrectionFile)
if self.csPath:
self.csPath = os.path.normpath(self.csPath)
if (not os.path.isdir(self.csPath)):
self.csPath = ''
if self.city:
self.city = os.path.normpath(self.city)
if (not os.path.isdir(self.city)):
self.city = ''
if self.labelPath:
self.labelPath = os.path.normpath(self.labelPath)
if self.correctionPath:
self.correctionPath = os.path.normpath(self.correctionPath)
if self.city:
(self.cityName == os.path.basename(self.city))
if ((not os.path.isfile(self.currentFile)) or (os.path.dirname(self.currentFile) != self.city)):
self.currentFile = ''
if ((not os.path.isfile(self.currentLabelFile)) or (not os.path.isdir(os.path.join(self.labelPath, self.cityName))) or (os.path.dirname(self.currentLabelFile) != os.path.join(self.labelPath, self.cityName))):
self.currentLabelFile = ''
if ((not os.path.isfile(self.currentCorrectionFile)) or (not os.path.isdir(os.path.join(self.correctionPath, self.cityName))) or (os.path.dirname(self.currentCorrectionFile) != os.path.join(self.correctionPath, self.cityName))):
self.currentCorrectionFile = ''
def save(self, filename):
with open(filename, 'w') as f:
f.write(json.dumps(self.__dict__, default=(lambda o: o.__dict__), sort_keys=True, indent=4)) |
class Backforward(textbase.TextBase):
def __init__(self, parent=None):
super().__init__(parent)
self.enabled = False
def on_tab_cur_url_changed(self, tabs):
tab = tabs.widget.currentWidget()
if (tab is None):
self.setText('')
self.hide()
return
self.on_tab_changed(tab)
def on_tab_changed(self, tab):
text = ''
if tab.history.can_go_back():
text += '<'
if tab.history.can_go_forward():
text += '>'
if text:
text = (('[' + text) + ']')
self.setText(text)
self.setVisible((bool(text) and self.enabled)) |
class DistortionDataset(data.Dataset):
def __init__(self, distorted_image_dir, corrected_image_dir, transform):
self.distorted_image_paths = []
self.corrected_image_paths = []
for fs in os.listdir(distorted_image_dir):
self.distorted_image_paths.append(os.path.join(distorted_image_dir, fs))
for fs in os.listdir(corrected_image_dir):
self.corrected_image_paths.append(os.path.join(corrected_image_dir, fs))
self.distorted_image_paths.sort()
self.corrected_image_paths.sort()
self.transform = transform
def __getitem__(self, index):
distorted_image_path = self.distorted_image_paths[index]
corrected_image_path = self.corrected_image_paths[index]
distorted_image = skimage.io.imread(distorted_image_path)
distorted_image = (distorted_image.astype(np.float32) / 255.0)
distorted_image = torch.Tensor(distorted_image).permute(2, 0, 1)
corrected_image = skimage.io.imread(corrected_image_path)
corrected_image = (corrected_image.astype(np.float32) / 255.0)
corrected_image = torch.Tensor(corrected_image).permute(2, 0, 1)
tfImg = self.transform(distorted_image)
return (tfImg, corrected_image)
def __len__(self):
return len(self.distorted_image_paths) |
class TransactionMined():
from_address: Address
data: Union[(SmartContractCall, ByteCode, EthTransfer)]
eth_node: Optional[EthClient]
extra_log_details: Dict[(str, Any)]
startgas: int
gas_price: int
nonce: Nonce
transaction_hash: TransactionHash
receipt: TxReceipt
chain_id: ChainID |
def KD_loss(args, teacher_g, noise, fake_img, fake_img_list, percept_loss):
fake_img_teacher_list = teacher_g(noise, return_rgb_list=True)
fake_img_teacher = fake_img_teacher_list[(- 1)]
fake_img_teacher.requires_grad = True
if (args.kd_l1_mode == 'Output_Only'):
kd_l1_loss = (args.kd_l1_lambda * torch.mean(torch.abs((fake_img_teacher - fake_img))))
elif (args.kd_l1_mode == 'Intermediate'):
for fake_img_teacher in fake_img_teacher_list:
fake_img_teacher.requires_grad = True
loss_list = [torch.mean(torch.abs((fake_img_teacher - fake_img))) for (fake_img_teacher, fake_img) in zip(fake_img_teacher_list, fake_img_list)]
kd_l1_loss = (args.kd_l1_lambda * sum(loss_list))
if (args.size > train_sparsity_hyperparams.PERCEPT_LOSS_IMAGE_SIZE):
pooled_kernel_size = (args.size // train_sparsity_hyperparams.PERCEPT_LOSS_IMAGE_SIZE)
fake_img = F.avg_pool2d(fake_img, kernel_size=pooled_kernel_size, stride=pooled_kernel_size)
fake_img_teacher = F.avg_pool2d(fake_img_teacher, kernel_size=pooled_kernel_size, stride=pooled_kernel_size)
if (args.kd_percept_mode == 'LPIPS'):
kd_percept_loss = (args.kd_percept_lambda * torch.mean(percept_loss(fake_img, fake_img_teacher)))
elif (args.kd_percept_mode == 'VGG'):
student_output_vgg_features = percept_loss(fake_img)
teacher_output_vgg_features = percept_loss(fake_img_teacher)
kd_percept_loss = (args.kd_percept_lambda * perceptual_loss(student_output_vgg_features, teacher_output_vgg_features)[0])
return (kd_l1_loss, kd_percept_loss) |
def main(args):
savefolder = args.savefolder
device = args.device
os.makedirs(savefolder, exist_ok=True)
if (not torch.cuda.is_available()):
print('CUDA is not available! use CPU instead')
else:
cudnn.benchmark = True
torch.backends.cudnn.deterministic = False
torch.backends.cudnn.enabled = True
testdata = TestData(args.inputpath, iscrop=args.iscrop, body_detector='rcnn')
pixie_cfg.model.use_tex = args.useTex
pixie = PIXIE(config=pixie_cfg, device=device)
visualizer = Visualizer(render_size=args.render_size, config=pixie_cfg, device=device, rasterizer_type=args.rasterizer_type)
if args.deca_path:
sys.path.insert(0, args.deca_path)
from decalib.deca import DECA
deca = DECA(device=device)
use_deca = True
else:
use_deca = False
for (i, batch) in enumerate(tqdm(testdata, dynamic_ncols=True)):
util.move_dict_to_device(batch, device)
batch['image'] = batch['image'].unsqueeze(0)
batch['image_hd'] = batch['image_hd'].unsqueeze(0)
name = batch['name']
data = {'body': batch}
param_dict = pixie.encode(data, threthold=True, keep_local=True, copy_and_paste=False)
moderator_weight = param_dict['moderator_weight']
codedict = param_dict['body']
opdict = pixie.decode(codedict, param_type='body')
opdict['albedo'] = visualizer.tex_flame2smplx(opdict['albedo'])
if (args.saveObj or args.saveParam or args.savePred or args.saveImages or (args.deca_path is not None)):
os.makedirs(os.path.join(savefolder, name), exist_ok=True)
if ((args.deca_path is not None) and (param_dict['moderator_weight']['head'][(0, 1)].item() > 0.6)):
cropped_face_savepath = os.path.join(savefolder, name, f'{name}_facecrop.jpg')
cv2.imwrite(cropped_face_savepath, util.tensor2image(data['body']['head_image'][0]))
(_, deca_opdict, _) = deca.run(cropped_face_savepath)
flame_displacement_map = deca_opdict['displacement_map']
opdict['displacement_map'] = visualizer.tex_flame2smplx(flame_displacement_map)
if args.lightTex:
visualizer.light_albedo(opdict)
if args.extractTex:
visualizer.extract_texture(opdict, data['body']['image_hd'])
if (args.reproject_mesh and (args.rasterizer_type == 'standard')):
tform = batch['tform'][(None, ...)]
tform = torch.inverse(tform).transpose(1, 2)
original_image = batch['original_image'][(None, ...)]
visualizer.recover_position(opdict, batch, tform, original_image)
if args.saveVis:
if (args.showWeight is False):
moderator_weight = None
visdict = visualizer.render_results(opdict, data['body']['image_hd'], overlay=True, moderator_weight=moderator_weight, use_deca=use_deca)
if args.showParts:
visdict['head'] = data['body']['head_image']
visdict['left_hand'] = data['body']['left_hand_image']
visdict['right_hand'] = data['body']['right_hand_image']
cv2.imwrite(os.path.join(savefolder, f'{name}_vis.jpg'), visualizer.visualize_grid(visdict, size=args.render_size))
if args.saveGif:
visualizer.rotate_results(opdict, visdict=visdict, savepath=os.path.join(savefolder, f'{name}_vis.gif'))
if args.saveObj:
visualizer.save_obj(os.path.join(savefolder, name, f'{name}.obj'), opdict)
if args.saveParam:
codedict['bbox'] = batch['bbox']
util.save_pkl(os.path.join(savefolder, name, f'{name}_param.pkl'), codedict)
np.savetxt(os.path.join(savefolder, name, f'{name}_bbox.txt'), batch['bbox'].squeeze())
if args.savePred:
util.save_pkl(os.path.join(savefolder, name, f'{name}_prediction.pkl'), opdict)
if args.saveImages:
for vis_name in visdict.keys():
cv2.imwrite(os.path.join(savefolder, name, f'{name}_{vis_name}.jpg'), util.tensor2image(visdict[vis_name][0]))
print(f'-- please check the results in {savefolder}') |
class Obelisk(TutorialObject):
def at_object_creation(self):
super().at_object_creation()
self.db.tutorial_info = 'This object changes its desc randomly, and makes sure to remember which one you saw.'
self.db.puzzle_descs = ['You see a normal stone slab']
self.locks.add('get:false()')
def return_appearance(self, caller):
descs = self.db.puzzle_descs
clueindex = random.randint(0, (len(descs) - 1))
string = 'The surface of the obelisk seem to waver, shift and writhe under your gaze, with different scenes and structures appearing whenever you look at it. '
self.db.desc = (string + descs[clueindex])
caller.db.puzzle_clue = clueindex
return super().return_appearance(caller) |
def main():
parser = argparse.ArgumentParser(description='Process some stuff')
parser.add_argument('-r', '--run', dest='model_to_run', nargs='+')
parser.add_argument('-rhs', '--run_hotstart', dest='hotstart_model_to_run', nargs='+')
parser.add_argument('-sp', '--start_pool', dest='start_pool', nargs='+')
parser.add_argument('-cores_left', '--cores_left', dest='cores_left', default=4, type=int)
parser.add_argument('-pp', '--post_process', dest='post_process', nargs='+')
args = parser.parse_args()
wd = os.getcwd()
if (args.model_to_run is not None):
models_paths = [os.path.join(wd, f) for f in args.model_to_run]
print('Adding models to queue:\n\t{}'.format('\n\t'.join(models_paths)))
list(map(run_simple, models_paths))
elif (args.hotstart_model_to_run is not None):
models_paths = [os.path.join(wd, f) for f in args.hotstart_model_to_run]
print('hotstart_model_to_run the model: {}'.format(args.hotstart_model_to_run))
list(map(run_hot_start_sequence, models_paths))
elif (args.start_pool is not None):
models_dirs = [os.path.join(wd, f) for f in args.start_pool]
print('Searching for models in:\n\t{}'.format('\n\t'.join(models_dirs)))
inp_paths = []
for (root, dirs, files) in chain.from_iterable((os.walk(path) for path in models_dirs)):
for f in files:
if (f.endswith('.inp') and ('bk' not in root)):
inp_paths.append(os.path.join(root, f))
start_pool.main(inp_paths, args.cores_left)
print('swmmio has completed running {} models'.format(len(inp_paths)))
else:
print('you need to pass in some args')
return 0 |
def test_fix_cmd_darwin():
export_file_content = '\nC++ geobase5::details::lookup_impl::*\nC++ geobase5::hardcoded_service\n'
filename = write_temp_file(export_file_content)
args = ['-Wl,--version-script={}'.format(filename)]
assert (fix_cmd('DARWIN', args) == ['-Wl,-exported_symbol,__ZN8geobase57details11lookup_impl*', '-Wl,-exported_symbol,__ZTIN8geobase57details11lookup_impl*', '-Wl,-exported_symbol,__ZTSN8geobase57details11lookup_impl*', '-Wl,-exported_symbol,__ZTTN8geobase57details11lookup_impl*', '-Wl,-exported_symbol,__ZTVN8geobase57details11lookup_impl*', '-Wl,-exported_symbol,__ZNK8geobase57details11lookup_impl*', '-Wl,-exported_symbol,__ZN8geobase517hardcoded_serviceE*', '-Wl,-exported_symbol,__ZTIN8geobase517hardcoded_serviceE*', '-Wl,-exported_symbol,__ZTSN8geobase517hardcoded_serviceE*', '-Wl,-exported_symbol,__ZTTN8geobase517hardcoded_serviceE*', '-Wl,-exported_symbol,__ZTVN8geobase517hardcoded_serviceE*', '-Wl,-exported_symbol,__ZNK8geobase517hardcoded_serviceE*']) |
_model_architecture('transformer_lm', 'transformer_lm_gpt3_large')
def transformer_lm_gpt3_large(args):
args.decoder_layers = getattr(args, 'decoder_layers', 24)
args.decoder_embed_dim = getattr(args, 'decoder_embed_dim', 1536)
args.decoder_attention_heads = getattr(args, 'decoder_attention_heads', 16)
base_gpt3_architecture(args) |
def test_method_ports():
incr = IncrMethodPorts()
incr.apply(DefaultPassGroup())
print('\n==== Schedule ====')
for blk in incr._sched.update_schedule:
if (not blk.__name__.startswith('s')):
print(blk.__name__)
print('\n==== Line trace ====')
print(' buf1 buf2')
incr.sim_reset()
for i in range(6):
incr.sim_tick() |
('/list/view_domain', methods=['POST'])
_wrapper_json
def list_domain_name():
json_data = request.get_json(force=True)
domain_name = json_data.get('domain_name', '').strip()
server_rooms = json_data.get('server_rooms', [])
isps = json_data.get('isps', [])
select_cdn = json_data.get('select_cdn', True)
return ViewRecordDal.search_view_domain(domain_name, server_rooms, isps, select_cdn) |
class SmtPrinter(TreeWalker):
def __init__(self, stream, annotations=None):
TreeWalker.__init__(self)
self.stream = stream
self.write = self.stream.write
self.mgr = get_env().formula_manager
self.annotations = annotations
def printer(self, f):
self.walk(f)
def walk_threshold(self, formula):
raise NotImplementedError
_annotations
def walk_nary(self, formula, operator):
self.write(('(%s' % operator))
for s in formula.args():
self.write(' ')
(yield s)
self.write(')')
def walk_and(self, formula):
return self.walk_nary(formula, 'and')
def walk_or(self, formula):
return self.walk_nary(formula, 'or')
def walk_not(self, formula):
return self.walk_nary(formula, 'not')
def walk_implies(self, formula):
return self.walk_nary(formula, '=>')
def walk_iff(self, formula):
return self.walk_nary(formula, '=')
def walk_plus(self, formula):
return self.walk_nary(formula, '+')
def walk_minus(self, formula):
return self.walk_nary(formula, '-')
def walk_times(self, formula):
return self.walk_nary(formula, '*')
def walk_equals(self, formula):
return self.walk_nary(formula, '=')
def walk_le(self, formula):
return self.walk_nary(formula, '<=')
def walk_lt(self, formula):
return self.walk_nary(formula, '<')
def walk_ite(self, formula):
return self.walk_nary(formula, 'ite')
def walk_toreal(self, formula):
return self.walk_nary(formula, 'to_real')
def walk_div(self, formula):
return self.walk_nary(formula, '/')
def walk_pow(self, formula):
return self.walk_nary(formula, 'pow')
def walk_bv_and(self, formula):
return self.walk_nary(formula, 'bvand')
def walk_bv_or(self, formula):
return self.walk_nary(formula, 'bvor')
def walk_bv_not(self, formula):
return self.walk_nary(formula, 'bvnot')
def walk_bv_xor(self, formula):
return self.walk_nary(formula, 'bvxor')
def walk_bv_add(self, formula):
return self.walk_nary(formula, 'bvadd')
def walk_bv_sub(self, formula):
return self.walk_nary(formula, 'bvsub')
def walk_bv_neg(self, formula):
return self.walk_nary(formula, 'bvneg')
def walk_bv_mul(self, formula):
return self.walk_nary(formula, 'bvmul')
def walk_bv_udiv(self, formula):
return self.walk_nary(formula, 'bvudiv')
def walk_bv_urem(self, formula):
return self.walk_nary(formula, 'bvurem')
def walk_bv_lshl(self, formula):
return self.walk_nary(formula, 'bvshl')
def walk_bv_lshr(self, formula):
return self.walk_nary(formula, 'bvlshr')
def walk_bv_ult(self, formula):
return self.walk_nary(formula, 'bvult')
def walk_bv_ule(self, formula):
return self.walk_nary(formula, 'bvule')
def walk_bv_slt(self, formula):
return self.walk_nary(formula, 'bvslt')
def walk_bv_sle(self, formula):
return self.walk_nary(formula, 'bvsle')
def walk_bv_concat(self, formula):
return self.walk_nary(formula, 'concat')
def walk_bv_comp(self, formula):
return self.walk_nary(formula, 'bvcomp')
def walk_bv_ashr(self, formula):
return self.walk_nary(formula, 'bvashr')
def walk_bv_sdiv(self, formula):
return self.walk_nary(formula, 'bvsdiv')
def walk_bv_srem(self, formula):
return self.walk_nary(formula, 'bvsrem')
def walk_bv_tonatural(self, formula):
return self.walk_nary(formula, 'bv2nat')
def walk_array_select(self, formula):
return self.walk_nary(formula, 'select')
def walk_array_store(self, formula):
return self.walk_nary(formula, 'store')
_annotations
def walk_symbol(self, formula):
self.write(quote(formula.symbol_name()))
def walk_function(self, formula):
return self.walk_nary(formula, quote(formula.function_name().symbol_name()))
_annotations
def walk_int_constant(self, formula):
if (formula.constant_value() < 0):
self.write((('(- ' + str((- formula.constant_value()))) + ')'))
else:
self.write(str(formula.constant_value()))
_annotations
def walk_real_constant(self, formula):
if (formula.constant_value() < 0):
template = '(- %s)'
else:
template = '%s'
(n, d) = (abs(formula.constant_value().numerator), formula.constant_value().denominator)
if (d != 1):
res = (template % (((('(/ ' + str(n)) + ' ') + str(d)) + ')'))
else:
res = (template % (str(n) + '.0'))
self.write(res)
_annotations
def walk_bool_constant(self, formula):
if formula.constant_value():
self.write('true')
else:
self.write('false')
_annotations
def walk_bv_constant(self, formula):
self.write(('#b' + formula.bv_bin_str()))
_annotations
def walk_str_constant(self, formula):
self.write((('"' + formula.constant_value().replace('"', '""')) + '"'))
def walk_forall(self, formula):
return self._walk_quantifier('forall', formula)
def walk_exists(self, formula):
return self._walk_quantifier('exists', formula)
_annotations
def _walk_quantifier(self, operator, formula):
assert (len(formula.quantifier_vars()) > 0)
self.write(('(%s (' % operator))
for s in formula.quantifier_vars():
self.write('(')
(yield s)
self.write((' %s)' % s.symbol_type().as_smtlib(False)))
self.write(') ')
(yield formula.arg(0))
self.write(')')
_annotations
def walk_bv_extract(self, formula):
self.write(('((_ extract %d %d) ' % (formula.bv_extract_end(), formula.bv_extract_start())))
(yield formula.arg(0))
self.write(')')
(op.BV_ROR, op.BV_ROL)
_annotations
def walk_bv_rotate(self, formula):
if formula.is_bv_ror():
rotate_type = 'rotate_right'
else:
assert formula.is_bv_rol()
rotate_type = 'rotate_left'
self.write(('((_ %s %d) ' % (rotate_type, formula.bv_rotation_step())))
(yield formula.arg(0))
self.write(')')
(op.BV_ZEXT, op.BV_SEXT)
_annotations
def walk_bv_extend(self, formula):
if formula.is_bv_zext():
extend_type = 'zero_extend'
else:
assert formula.is_bv_sext()
extend_type = 'sign_extend'
self.write(('((_ %s %d) ' % (extend_type, formula.bv_extend_step())))
(yield formula.arg(0))
self.write(')')
_annotations
def walk_str_length(self, formula):
self.write('(str.len ')
self.walk(formula.arg(0))
self.write(')')
_annotations
def walk_str_charat(self, formula, **kwargs):
self.write('( str.at ')
self.walk(formula.arg(0))
self.write(' ')
self.walk(formula.arg(1))
self.write(')')
_annotations
def walk_str_concat(self, formula, **kwargs):
self.write('( str.++ ')
for arg in formula.args():
self.walk(arg)
self.write(' ')
self.write(')')
_annotations
def walk_str_contains(self, formula, **kwargs):
self.write('( str.contains ')
self.walk(formula.arg(0))
self.write(' ')
self.walk(formula.arg(1))
self.write(')')
_annotations
def walk_str_indexof(self, formula, **kwargs):
self.write('( str.indexof ')
self.walk(formula.arg(0))
self.write(' ')
self.walk(formula.arg(1))
self.write(' ')
self.walk(formula.arg(2))
self.write(')')
_annotations
def walk_str_replace(self, formula, **kwargs):
self.write('( str.replace ')
self.walk(formula.arg(0))
self.write(' ')
self.walk(formula.arg(1))
self.write(' ')
self.walk(formula.arg(2))
self.write(')')
_annotations
def walk_str_substr(self, formula, **kwargs):
self.write('( str.substr ')
self.walk(formula.arg(0))
self.write(' ')
self.walk(formula.arg(1))
self.write(' ')
self.walk(formula.arg(2))
self.write(')')
_annotations
def walk_str_prefixof(self, formula, **kwargs):
self.write('( str.prefixof ')
self.walk(formula.arg(0))
self.write(' ')
self.walk(formula.arg(1))
self.write(')')
_annotations
def walk_str_suffixof(self, formula, **kwargs):
self.write('( str.suffixof ')
self.walk(formula.arg(0))
self.write(' ')
self.walk(formula.arg(1))
self.write(')')
_annotations
def walk_str_to_int(self, formula, **kwargs):
self.write('( str.to.int ')
self.walk(formula.arg(0))
self.write(')')
_annotations
def walk_int_to_str(self, formula, **kwargs):
self.write('( int.to.str ')
self.walk(formula.arg(0))
self.write(')')
_annotations
def walk_array_value(self, formula):
assign = formula.array_value_assigned_values_map()
for _ in range(len(assign)):
self.write('(store ')
self.write(('((as const %s) ' % formula.get_type().as_smtlib(False)))
(yield formula.array_value_default())
self.write(')')
for k in sorted(assign, key=str):
self.write(' ')
(yield k)
self.write(' ')
(yield assign[k])
self.write(')') |
def compute_dense_reward(self, action, obs):
handle_dist = np.linalg.norm((obs[:3] - obs[4:7]))
window_diff = np.linalg.norm((obs[4:7] - self.env._get_pos_goal()))
action_reg = np.linalg.norm(action)
(w1, w2, w3) = (1.0, 1.0, 0.1)
reward = ((((- w1) * handle_dist) - (w2 * window_diff)) - (w3 * action_reg))
return reward |
def get_array_date(scn_data, utc_date=None):
if (utc_date is None):
try:
utc_date = scn_data.attrs['start_time']
except KeyError:
try:
utc_date = scn_data.attrs['scheduled_time']
except KeyError:
raise KeyError('Scene has no start_time or scheduled_time attribute.')
return utc_date |
def chat_member_administrator():
return ChatMemberAdministrator(CMDefaults.user, CMDefaults.can_be_edited, CMDefaults.is_anonymous, CMDefaults.can_manage_chat, CMDefaults.can_delete_messages, CMDefaults.can_manage_video_chats, CMDefaults.can_restrict_members, CMDefaults.can_promote_members, CMDefaults.can_change_info, CMDefaults.can_invite_users, CMDefaults.can_post_messages, CMDefaults.can_edit_messages, CMDefaults.can_pin_messages, CMDefaults.can_manage_topics, CMDefaults.custom_title, CMDefaults.can_post_stories, CMDefaults.can_edit_stories, CMDefaults.can_delete_stories) |
def run(config):
config['resolution'] = utils.imsize_dict[config['dataset']]
config['n_classes'] = utils.nclass_dict[config['dataset']]
config['G_activation'] = utils.activation_dict[config['G_nl']]
config['D_activation'] = utils.activation_dict[config['D_nl']]
if config['resume']:
print('Skipping initialization for training resumption...')
config['skip_init'] = True
config = utils.update_config_roots(config)
device = 'cuda'
if config['training_scratch']:
config['E1_fea_w'] = {4: 1, 8: 1, 16: 1, 32: 0.1}
config['D_fea_w'] = {4: 0.1, 8: 0.1, 16: 0.1, 32: 0.01}
utils.seed_rng(config['seed'])
utils.prepare_root(config)
torch.backends.cudnn.benchmark = True
model = __import__(config['model'])
experiment_name = (config['experiment_name'] if config['experiment_name'] else utils.name_from_config(config))
print(('Experiment name is %s' % experiment_name))
G = model.Generator(**config).to(device)
D = model.Discriminator(**config).to(device)
E1 = model.Encoder(**config).to(device)
A1 = model.Alignment(**config).to(device)
if config['ema']:
print('Preparing EMA for G with decay of {}'.format(config['ema_decay']))
G_ema = model.Generator(**{**config, 'skip_init': True, 'no_optim': True}).to(device)
ema = utils.ema(G, G_ema, config['ema_decay'], config['ema_start'])
else:
(G_ema, ema) = (None, None)
if config['G_fp16']:
print('Casting G to float16...')
G = G.half()
if config['ema']:
G_ema = G_ema.half()
if config['D_fp16']:
print('Casting D to fp16...')
D = D.half()
GD = model.G_D(G, D, E1, A1)
print(G)
print(D)
print(E1)
print(A1)
print('Number of params in G: {} D: {} E1: {} A1: {}'.format(*[sum([p.data.nelement() for p in net.parameters()]) for net in [G, D, E1, A1]]))
state_dict = {'itr': 0, 'epoch': 0, 'save_num': 0, 'save_best_num': 0, 'best_IS': 0, 'best_FID': 999999, 'config': config}
if config['resume']:
print('Loading weights...')
utils.load_weights(G, D, E1, A1, state_dict, config['weights_root'], experiment_name, (config['load_weights'] if config['load_weights'] else None), (G_ema if config['ema'] else None), resume_BigGAN=config['resume_BigGAN'])
if config['parallel']:
GD = nn.DataParallel(GD)
if config['cross_replica']:
patch_replication_callback(GD)
train_metrics_fname = ('%s/%s' % (config['logs_root'], experiment_name))
print('Training Metrics will be saved to {}'.format(train_metrics_fname))
train_log = utils.MyLogger(train_metrics_fname, reinitialize=(not config['resume']), logstyle=config['logstyle'])
utils.write_metadata(config['logs_root'], experiment_name, config, state_dict)
D_batch_size = ((config['batch_size'] * config['num_D_steps']) * config['num_D_accumulations'])
loaders = utils.get_data_loaders(**{**config, 'batch_size': D_batch_size, 'start_itr': state_dict['itr'], 'target_domain': None})
loaders_t = utils.get_data_loaders(**{**config, 'batch_size': D_batch_size, 'start_itr': state_dict['itr'], 'target_domain': None})
G_batch_size = max(config['G_batch_size'], config['batch_size'])
(z_, y_) = utils.prepare_z_y(G_batch_size, G.dim_z, config['n_classes'], device=device, fp16=config['G_fp16'], N_target_cate=config['N_target_cate'])
(fixed_z, fixed_y) = utils.prepare_z_y(G_batch_size, G.dim_z, config['n_classes'], device=device, fp16=config['G_fp16'])
fixed_z.sample_()
fixed_y.sample_()
if (config['which_train_fn'] == 'GAN'):
train = train_fns.GAN_training_function(G, D, E1, A1, GD, z_, y_, ema, state_dict, config)
else:
train = train_fns.dummy_training_function()
t_data = iter(loaders_t[0])
(fixed_t_x, fixed_t_y) = (None, None)
print(('Beginning training at epoch %d...' % state_dict['epoch']))
for epoch in range(state_dict['epoch'], config['num_epochs']):
if (config['pbar'] == 'mine'):
pbar = utils.progress(loaders[0], displaytype=('s1k' if config['use_multiepoch_sampler'] else 'eta'))
else:
pbar = tqdm(loaders[0])
for (i, (x, y)) in enumerate(pbar):
state_dict['itr'] += 1
t_batch = next(t_data)
(t_x, t_y) = t_batch
if (len(x) != (config['num_D_accumulations'] * config['batch_size'])):
break
if (len(t_x) != (config['num_D_accumulations'] * config['batch_size'])):
t_data = iter(loaders_t[0])
t_batch = next(t_data)
(t_x, t_y) = t_batch
if (fixed_t_x is None):
(fixed_t_x, fixed_t_y) = (t_x[:G_batch_size].detach().clone(), t_y[:G_batch_size].detach().clone())
fixed_x_v2 = x[:G_batch_size].detach().clone()
(fixed_t_x, fixed_t_y) = (fixed_t_x.to(device), fixed_t_y.to(device))
G.train()
D.train()
E1.train()
A1.train()
if config['ema']:
G_ema.train()
if config['D_fp16']:
(x, y) = (x.to(device).half(), y.to(device))
(t_x, t_y) = (t_x.to(device).half(), t_y.to(device))
else:
(x, y) = (x.to(device), y.to(device))
(t_x, t_y) = (t_x.to(device), t_y.to(device))
if ((state_dict['itr'] > 138500) or config['training_scratch']):
stage = 2
else:
stage = 1
metrics = train(x, y, t_x, t_y, stage=stage, training_scratch=config['training_scratch'])
train_log.log(itr=int(state_dict['itr']), **metrics)
if ((config['sv_log_interval'] > 0) and (not (state_dict['itr'] % config['sv_log_interval']))):
train_log.log(itr=int(state_dict['itr']), **{**utils.get_SVs(G, 'G'), **utils.get_SVs(D, 'D')})
if (config['pbar'] == 'mine'):
print(', '.join(([('itr: %d' % state_dict['itr'])] + [('%s : %+4.3f' % (key, metrics[key])) for key in metrics])), end=' ')
if (not (state_dict['itr'] % config['save_every'])):
if config['G_eval_mode']:
print('Switchin G to eval mode...')
E1.eval()
A1.eval()
G.eval()
if config['ema']:
G_ema.eval()
train_fns.save_and_sample(G, D, E1, A1, G_ema, z_, y_, fixed_z, fixed_y, state_dict, config, experiment_name, fixed_t_x, fixed_t_y, fixed_x_v2)
state_dict['epoch'] += 1 |
('time.sleep')
def test_wait_until_true_invoke_inline(mock_time_sleep):
mock = MagicMock()
mock.side_effect = ['test string 1', 'test string 2', 'test string 3', 'expected value', 'test string 5']
def decorate_me(arg1, arg2):
assert (arg1 == 'v1')
assert (arg2 == 'v2')
return (mock(arg1) == 'expected value')
assert poll.wait_until_true(interval=0.01, max_attempts=10)(decorate_me)('v1', 'v2')
assert (mock.call_count == 4)
mock.assert_called_with('v1')
assert (mock_time_sleep.call_count == 3)
mock_time_sleep.assert_called_with(0.01) |
class DropBertModel():
def __init__(self, args, network, state_dict=None, num_train_step=(- 1)):
self.args = args
self.train_loss = AverageMeter()
self.step = 0
self.updates = 0
self.network = network
if (state_dict is not None):
print('Load Model!')
self.network.load_state_dict(state_dict['state'])
self.mnetwork = (nn.DataParallel(self.network) if (args.gpu_num > 1) else self.network)
self.total_param = sum([p.nelement() for p in self.network.parameters() if p.requires_grad])
no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']
optimizer_parameters = [{'params': [p for (n, p) in self.network.bert.named_parameters() if (not any(((nd in n) for nd in no_decay)))], 'weight_decay': args.bert_weight_decay, 'lr': args.bert_learning_rate}, {'params': [p for (n, p) in self.network.bert.named_parameters() if any(((nd in n) for nd in no_decay))], 'weight_decay': 0.0, 'lr': args.bert_learning_rate}, {'params': [p for (n, p) in self.network.named_parameters() if (not n.startswith('bert.'))], 'weight_decay': args.weight_decay, 'lr': args.learning_rate}]
self.optimizer = Adam(optimizer_parameters, lr=args.learning_rate, warmup=args.warmup, t_total=num_train_step, max_grad_norm=args.grad_clipping, schedule=args.warmup_schedule)
if (self.args.gpu_num > 0):
self.network.cuda()
self.em_avg = AverageMeter()
self.f1_avg = AverageMeter()
def avg_reset(self):
self.train_loss.reset()
self.em_avg.reset()
self.f1_avg.reset()
def update(self, tasks):
self.network.train()
output_dict = self.mnetwork(**tasks)
loss = output_dict['loss']
metrics = self.mnetwork.get_metrics(True)
self.em_avg.update(metrics['em'], 1)
self.f1_avg.update(metrics['f1'], 1)
self.train_loss.update(loss.item(), 1)
if (self.args.gradient_accumulation_steps > 1):
loss /= self.args.gradient_accumulation_steps
loss.backward()
if (((self.step + 1) % self.args.gradient_accumulation_steps) == 0):
self.optimizer.step()
self.optimizer.zero_grad()
self.updates += 1
self.step += 1
_grad()
def evaluate(self, dev_data_list):
dev_data_list.reset()
self.network.eval()
loss_sum = 0
total_batch = 0
total_num = 0
for batch in dev_data_list:
total_num += batch['input_ids'].size(0)
output_dict = self.network(**batch)
loss_sum += output_dict['loss'].item()
total_batch += 1
metrics = self.network.get_metrics(True)
self.network.train()
return (total_num, (loss_sum / total_batch), metrics['em'], metrics['f1'])
def save(self, prefix, epoch):
network_state = dict([(k, v.cpu()) for (k, v) in self.network.state_dict().items()])
other_params = {'optimizer': self.optimizer.state_dict(), 'config': self.args, 'epoch': epoch}
state_path = (prefix + '.pt')
other_path = (prefix + '.ot')
torch.save(other_params, other_path)
torch.save(network_state, state_path)
print('model saved to {}'.format(prefix)) |
class SawyerButtonPressTopdownWallEnvV2(SawyerXYZEnv):
def __init__(self):
hand_low = ((- 0.5), 0.4, 0.05)
hand_high = (0.5, 1, 0.5)
obj_low = ((- 0.1), 0.8, 0.115)
obj_high = (0.1, 0.9, 0.115)
super().__init__(self.model_name, hand_low=hand_low, hand_high=hand_high)
self.init_config = {'obj_init_pos': np.array([0, 0.8, 0.115], dtype=np.float32), 'hand_init_pos': np.array([0, 0.4, 0.2], dtype=np.float32)}
self.goal = np.array([0, 0.88, 0.1])
self.obj_init_pos = self.init_config['obj_init_pos']
self.hand_init_pos = self.init_config['hand_init_pos']
goal_low = self.hand_low
goal_high = self.hand_high
self._random_reset_space = Box(np.array(obj_low), np.array(obj_high))
self.goal_space = Box(np.array(goal_low), np.array(goal_high))
def model_name(self):
return full_v2_path_for('sawyer_xyz/sawyer_button_press_topdown_wall.xml')
_assert_task_is_set
def evaluate_state(self, obs, action):
(reward, tcp_to_obj, tcp_open, obj_to_target, near_button, button_pressed) = self.compute_reward(action, obs)
info = {'success': float((obj_to_target <= 0.02)), 'near_object': float((tcp_to_obj <= 0.05)), 'grasp_success': float((tcp_open > 0)), 'grasp_reward': near_button, 'in_place_reward': button_pressed, 'obj_to_target': obj_to_target, 'unscaled_reward': reward}
return (reward, info)
def _target_site_config(self):
return []
def _get_id_main_object(self):
return self.unwrapped.model.geom_name2id('btnGeom')
def _get_pos_objects(self):
return (self.get_body_com('button') + np.array([0.0, 0.0, 0.193]))
def _get_quat_objects(self):
return self.sim.data.get_body_xquat('button')
def _set_obj_xyz(self, pos):
qpos = self.data.qpos.flat.copy()
qvel = self.data.qvel.flat.copy()
qpos[9] = pos
qvel[9] = 0
self.set_state(qpos, qvel)
def reset_model(self):
self._reset_hand()
self._target_pos = self.goal.copy()
if self.random_init:
goal_pos = self._get_state_rand_vec()
self.obj_init_pos = goal_pos
self.sim.model.body_pos[self.model.body_name2id('box')] = self.obj_init_pos
self._target_pos = self._get_site_pos('hole')
self._obj_to_target_init = abs((self._target_pos[2] - self._get_site_pos('buttonStart')[2]))
return self._get_obs()
def compute_reward(self, action, obs):
del action
obj = obs[4:7]
tcp = self.tcp_center
tcp_to_obj = np.linalg.norm((obj - tcp))
tcp_to_obj_init = np.linalg.norm((obj - self.init_tcp))
obj_to_target = abs((self._target_pos[2] - obj[2]))
tcp_closed = (1 - obs[3])
near_button = reward_utils.tolerance(tcp_to_obj, bounds=(0, 0.01), margin=tcp_to_obj_init, sigmoid='long_tail')
button_pressed = reward_utils.tolerance(obj_to_target, bounds=(0, 0.005), margin=self._obj_to_target_init, sigmoid='long_tail')
reward = (5 * reward_utils.hamacher_product(tcp_closed, near_button))
if (tcp_to_obj <= 0.03):
reward += (5 * button_pressed)
return (reward, tcp_to_obj, obs[3], obj_to_target, near_button, button_pressed) |
class Solution(object):
def largestDivisibleSubset(self, nums):
ls = len(nums)
S = {(- 1): set()}
for num in sorted(nums):
candicate = []
for key in S:
if ((num % key) == 0):
candicate.append(S[key])
S[num] = (max(candicate, key=len) | {num})
return |
class Vgg19(vgg19.Vgg19):
def build(self, gray, train=False):
start_time = time.time()
print('build model started')
net = dict()
with tf.variable_scope('vgg19', reuse=tf.AUTO_REUSE):
gray_scaled = ((gray + 0.5) * 255.0)
bgr = tf.concat([(gray_scaled - VGG_MEAN[0]), (gray_scaled - VGG_MEAN[1]), (gray_scaled - VGG_MEAN[2])], axis=3)
layer_names = ['conv1_1', 'conv1_2', 'pool1', 'conv2_1', 'conv2_2', 'pool2', 'conv3_1', 'conv3_2', 'conv3_3', 'conv3_4', 'pool3', 'conv4_1', 'conv4_2', 'conv4_3', 'conv4_4', 'pool4', 'conv5_1', 'conv5_2', 'conv5_3', 'conv5_4', 'pool5']
activation = bgr
for layer in layer_names:
if layer.startswith('conv'):
net[layer] = self.conv_layer(activation, layer)
elif layer.startswith('pool'):
net[layer] = self.avg_pool(activation, layer)
else:
raise 'Error!'
activation = net[layer]
return net
print('build model finished: {}s'.format((time.time() - start_time))) |
def test_simple():
def foo(a: int, /, b: int, c: str='', *, d: int=0, e: str, **kwargs: int) -> int:
pass
assert (get_callable_shape(foo) == Shape(input=InputShape(fields=(InputField(id='a', type=int, default=NoDefault(), metadata=MappingProxyType({}), original=ANY, is_required=True), InputField(id='b', type=int, default=NoDefault(), metadata=MappingProxyType({}), original=ANY, is_required=True), InputField(id='c', type=str, default=DefaultValue(value=''), metadata=MappingProxyType({}), original=ANY, is_required=False), InputField(id='d', type=int, default=DefaultValue(value=0), metadata=MappingProxyType({}), original=ANY, is_required=False), InputField(id='e', type=str, default=NoDefault(), metadata=MappingProxyType({}), original=ANY, is_required=True)), overriden_types=frozenset({'a', 'b', 'c', 'd', 'e'}), params=(Param(field_id='a', name='a', kind=ParamKind.POS_ONLY), Param(field_id='b', name='b', kind=ParamKind.POS_OR_KW), Param(field_id='c', name='c', kind=ParamKind.POS_OR_KW), Param(field_id='d', name='d', kind=ParamKind.KW_ONLY), Param(field_id='e', name='e', kind=ParamKind.KW_ONLY)), kwargs=ParamKwargs(type=int), constructor=foo), output=None)) |
class Light_estimation(nn.Module):
def __init__(self):
super(Light_estimation, self).__init__()
self.dense = models.densenet121(pretrained=True).features
self.pool = nn.AvgPool2d(8)
self.color1 = nn.Linear(1024, 512, bias=False)
self.relu1 = nn.ReLU()
self.color2 = nn.Linear(512, 128, bias=False)
self.relu2 = nn.ReLU()
self.color3 = nn.Linear(128, 5, bias=True)
self.dir1 = nn.Linear(1024, 512, bias=False)
self.relu3 = nn.ReLU()
self.dir2 = nn.Linear(512, 128, bias=False)
self.relu4 = nn.ReLU()
self.dir3 = nn.Linear(128, 8, bias=True)
def forward(self, x):
features = self.pool(self.dense(x))
color = self.relu1(self.color1(features.squeeze()))
color = self.relu2(self.color2(color))
color = self.color3(color)
dir = self.relu3(self.dir1(features.squeeze()))
dir = self.relu4(self.dir2(dir))
dir = self.dir3(dir)
return (color, dir) |
def test_poetry_with_explicit_pypi_and_other(fixture_dir: FixtureDirGetter, with_simple_keyring: None) -> None:
io = BufferedIO()
poetry = Factory().create_poetry(fixture_dir('with_explicit_pypi_and_other'), io=io)
assert (len(poetry.pool.repositories) == 1)
assert (len(poetry.pool.all_repositories) == 2)
error = io.fetch_error()
assert (error == '') |
class CustomErrorCodePlugin(Plugin):
def get_function_hook(self, fullname: str) -> (Callable[([FunctionContext], Type)] | None):
if fullname.endswith('.main'):
return self.emit_error
return None
def emit_error(self, ctx: FunctionContext) -> Type:
ctx.api.fail('Custom error', ctx.context, code=CUSTOM_ERROR)
return AnyType(TypeOfAny.from_error) |
def prune_completely_outside_window(boxlist, window, scope=None):
with tf.name_scope(scope, 'PruneCompleteleyOutsideWindow'):
(y_min, x_min, y_max, x_max) = tf.split(value=boxlist.get(), num_or_size_splits=4, axis=1)
(win_y_min, win_x_min, win_y_max, win_x_max) = tf.unstack(window)
coordinate_violations = tf.concat([tf.greater_equal(y_min, win_y_max), tf.greater_equal(x_min, win_x_max), tf.less_equal(y_max, win_y_min), tf.less_equal(x_max, win_x_min)], 1)
valid_indices = tf.reshape(tf.where(tf.logical_not(tf.reduce_any(coordinate_violations, 1))), [(- 1)])
return (gather(boxlist, valid_indices), valid_indices) |
class FireUnit(nn.Module):
def __init__(self, in_channels, squeeze_channels, expand1x1_channels, expand3x3_channels, residual):
super(FireUnit, self).__init__()
self.residual = residual
self.squeeze = FireConv(in_channels=in_channels, out_channels=squeeze_channels, kernel_size=1, padding=0)
self.expand1x1 = FireConv(in_channels=squeeze_channels, out_channels=expand1x1_channels, kernel_size=1, padding=0)
self.expand3x3 = FireConv(in_channels=squeeze_channels, out_channels=expand3x3_channels, kernel_size=3, padding=1)
def forward(self, x):
if self.residual:
identity = x
x = self.squeeze(x)
y1 = self.expand1x1(x)
y2 = self.expand3x3(x)
out = torch.cat((y1, y2), dim=1)
if self.residual:
out = (out + identity)
return out |
def test_L1_bit_selection():
a = CaseConnectBitSelToOutComp.DUT()
a.elaborate()
a.apply(StructuralRTLIRGenL1Pass(gen_connections(a)))
connections = a.get_metadata(StructuralRTLIRGenL1Pass.connections)
comp = sexp.CurComp(a, 's')
assert (connections == [(sexp.PartSelection(sexp.CurCompAttr(comp, 'in_'), 0, 1), sexp.CurCompAttr(comp, 'out'))]) |
def tensor4(name: Optional[str]=None, *, dtype: Optional['DTypeLike']=None, shape: Optional[tuple[(ST, ST, ST, ST)]]=(None, None, None, None)) -> 'TensorVariable':
if (dtype is None):
dtype = config.floatX
shape = _validate_static_shape(shape, ndim=4)
type = TensorType(dtype, shape=shape)
return type(name) |
class TransformerEncoderLayer(nn.Module):
def __init__(self, args):
super().__init__()
self.embed_dim = args.encoder_embed_dim
self.lstm_san = args.encoder_lstm_san
if self.lstm_san:
self.self_attn = nn.LSTM(input_size=self.embed_dim, hidden_size=self.embed_dim, bidirectional=True, dropout=args.attention_dropout)
self.lstm_fc = Linear((self.embed_dim * 2), self.embed_dim)
self.lstm_activation_fn = utils.get_activation_fn(activation=getattr(args, 'activation_fn', 'relu'))
else:
self.self_attn = MultiheadAttention(self.embed_dim, args.encoder_attention_heads, dropout=args.attention_dropout, self_attention=True)
self.self_attn_layer_norm = LayerNorm(self.embed_dim)
self.dropout = args.dropout
self.activation_fn = utils.get_activation_fn(activation=getattr(args, 'activation_fn', 'relu'))
self.activation_dropout = getattr(args, 'activation_dropout', 0)
if (self.activation_dropout == 0):
self.activation_dropout = getattr(args, 'relu_dropout', 0)
self.normalize_before = args.encoder_normalize_before
self.fc1 = Linear(self.embed_dim, args.encoder_ffn_embed_dim)
self.fc2 = Linear(args.encoder_ffn_embed_dim, self.embed_dim)
self.final_layer_norm = LayerNorm(self.embed_dim)
def upgrade_state_dict_named(self, state_dict, name):
layer_norm_map = {'0': 'self_attn_layer_norm', '1': 'final_layer_norm'}
for (old, new) in layer_norm_map.items():
for m in ('weight', 'bias'):
k = '{}.layer_norms.{}.{}'.format(name, old, m)
if (k in state_dict):
state_dict['{}.{}.{}'.format(name, new, m)] = state_dict[k]
del state_dict[k]
def forward(self, x, encoder_padding_mask, attn_mask=None):
residual = x
x = self.maybe_layer_norm(self.self_attn_layer_norm, x, before=True)
if (attn_mask is not None):
attn_mask = attn_mask.masked_fill(attn_mask.bool(), (- .0))
if self.lstm_san:
(x, _) = self.self_attn(x)
x = self.lstm_activation_fn(self.lstm_fc(x))
else:
(x, _) = self.self_attn(query=x, key=x, value=x, key_padding_mask=encoder_padding_mask)
x = F.dropout(x, p=self.dropout, training=self.training)
x = (residual + x)
x = self.maybe_layer_norm(self.self_attn_layer_norm, x, after=True)
residual = x
x = self.maybe_layer_norm(self.final_layer_norm, x, before=True)
x = self.activation_fn(self.fc1(x))
x = F.dropout(x, p=self.activation_dropout, training=self.training)
x = self.fc2(x)
x = F.dropout(x, p=self.dropout, training=self.training)
x = (residual + x)
x = self.maybe_layer_norm(self.final_layer_norm, x, after=True)
return x
def maybe_layer_norm(self, layer_norm, x, before=False, after=False):
assert (before ^ after)
if (after ^ self.normalize_before):
return layer_norm(x)
else:
return x |
class TestChangeProperty(EndianTest):
def setUp(self):
self.req_args_0 = {'data': (8, b''), 'mode': 0, 'property': , 'type': , 'window': }
self.req_bin_0 = b'\x12\x00\x00\x06\x1dRr|i1\xd3\xd5\x04\x1c\xdcQ\x08\x00\x00\x00\x00\x00\x00\x00'
self.req_args_1 = {'data': (8, b'foo'), 'mode': 1, 'property': , 'type': , 'window': }
self.req_bin_1 = b'\x12\x01\x00\x073Z^-"FUO\x08y\\x08\x00\x00\x00\x00\x00\x00\x03foo\x00'
self.req_args_2 = {'data': (8, b'zoom'), 'mode': 0, 'property': , 'type': , 'window': }
self.req_bin_2 = b'\x12\x00\x00\x07t\xe2\xa4\xf0x\xb2C\xf2H\x9a[\x1f\x08\x00\x00\x00\x00\x00\x00\x04zoom'
self.req_args_3 = {'data': (16, []), 'mode': 2, 'property': , 'type': , 'window': }
self.req_bin_3 = b'\x12\x02\x00\x06(H]k\x1b8X\xb7S\xe6j\n\x10\x00\x00\x00\x00\x00\x00\x00'
self.req_args_4 = {'data': (16, [1, 2, 3]), 'mode': 1, 'property': , 'type': , 'window': }
self.req_bin_4 = b'\x12\x01\x00\x082\xa5\x10\xe8q>L&u\x10\xe52\x10\x00\x00\x00\x00\x00\x00\x03\x00\x01\x00\x02\x00\x03\x00\x00'
self.req_args_5 = {'data': (16, [1, 2, 3, 4]), 'mode': 2, 'property': , 'type': , 'window': }
self.req_bin_5 = b'\x12\x02\x00\x08:\xbc\xa8T\x12J\x9d\xc1!\xe8\x97\xef\x10\x00\x00\x00\x00\x00\x00\x04\x00\x01\x00\x02\x00\x03\x00\x04'
self.req_args_6 = {'data': (32, []), 'mode': 0, 'property': , 'type': , 'window': }
self.req_bin_6 = b'\x12\x00\x00\x06\x1f\x92}|_\\\xcc\x8cG\xdb\xe9= \x00\x00\x00\x00\x00\x00\x00'
self.req_args_7 = {'data': (32, [1, 2, 3]), 'mode': 2, 'property': , 'type': , 'window': }
self.req_bin_7 = b'\x12\x02\x00\t!\xa2\x84^_\x9f&\x03J\xd4\xb6\xbc \x00\x00\x00\x00\x00\x00\x03\x00\x00\x00\x01\x00\x00\x00\x02\x00\x00\x00\x03'
def testPackRequest0(self):
bin = request.ChangeProperty._request.to_binary(*(), **self.req_args_0)
self.assertBinaryEqual(bin, self.req_bin_0)
def testUnpackRequest0(self):
(args, remain) = request.ChangeProperty._request.parse_binary(self.req_bin_0, dummy_display, 1)
self.assertBinaryEmpty(remain)
self.assertEqual(args, self.req_args_0)
def testPackRequest1(self):
bin = request.ChangeProperty._request.to_binary(*(), **self.req_args_1)
self.assertBinaryEqual(bin, self.req_bin_1)
def testUnpackRequest1(self):
(args, remain) = request.ChangeProperty._request.parse_binary(self.req_bin_1, dummy_display, 1)
self.assertBinaryEmpty(remain)
self.assertEqual(args, self.req_args_1)
def testPackRequest2(self):
bin = request.ChangeProperty._request.to_binary(*(), **self.req_args_2)
self.assertBinaryEqual(bin, self.req_bin_2)
def testUnpackRequest2(self):
(args, remain) = request.ChangeProperty._request.parse_binary(self.req_bin_2, dummy_display, 1)
self.assertBinaryEmpty(remain)
self.assertEqual(args, self.req_args_2)
def testPackRequest3(self):
bin = request.ChangeProperty._request.to_binary(*(), **self.req_args_3)
self.assertBinaryEqual(bin, self.req_bin_3)
def testUnpackRequest3(self):
(args, remain) = request.ChangeProperty._request.parse_binary(self.req_bin_3, dummy_display, 1)
self.assertBinaryEmpty(remain)
self.assertEqual(args, self.req_args_3)
def testPackRequest4(self):
bin = request.ChangeProperty._request.to_binary(*(), **self.req_args_4)
self.assertBinaryEqual(bin, self.req_bin_4)
def testUnpackRequest4(self):
(args, remain) = request.ChangeProperty._request.parse_binary(self.req_bin_4, dummy_display, 1)
self.assertBinaryEmpty(remain)
self.assertEqual(args, self.req_args_4)
def testPackRequest5(self):
bin = request.ChangeProperty._request.to_binary(*(), **self.req_args_5)
self.assertBinaryEqual(bin, self.req_bin_5)
def testUnpackRequest5(self):
(args, remain) = request.ChangeProperty._request.parse_binary(self.req_bin_5, dummy_display, 1)
self.assertBinaryEmpty(remain)
self.assertEqual(args, self.req_args_5)
def testPackRequest6(self):
bin = request.ChangeProperty._request.to_binary(*(), **self.req_args_6)
self.assertBinaryEqual(bin, self.req_bin_6)
def testUnpackRequest6(self):
(args, remain) = request.ChangeProperty._request.parse_binary(self.req_bin_6, dummy_display, 1)
self.assertBinaryEmpty(remain)
self.assertEqual(args, self.req_args_6)
def testPackRequest7(self):
bin = request.ChangeProperty._request.to_binary(*(), **self.req_args_7)
self.assertBinaryEqual(bin, self.req_bin_7)
def testUnpackRequest7(self):
(args, remain) = request.ChangeProperty._request.parse_binary(self.req_bin_7, dummy_display, 1)
self.assertBinaryEmpty(remain)
self.assertEqual(args, self.req_args_7) |
def test_base_history(base_app):
run_cmd(base_app, 'help')
run_cmd(base_app, 'shortcuts')
(out, err) = run_cmd(base_app, 'history')
expected = normalize('\n 1 help\n 2 shortcuts\n')
assert (out == expected)
(out, err) = run_cmd(base_app, 'history he')
expected = normalize('\n 1 help\n')
assert (out == expected)
verify_hi_last_result(base_app, 1)
(out, err) = run_cmd(base_app, 'history sh')
expected = normalize('\n 2 shortcuts\n')
assert (out == expected)
verify_hi_last_result(base_app, 1) |
def convert_urdf(file: Path, urdf_dir: Path, out_dir: Path):
tree = ET.parse(file)
root = tree.getroot()
for mesh_node in root.findall('.//mesh'):
obj_file = mesh_node.attrib['filename']
obj_file = str(Path(obj_file).with_suffix('.obj'))
glb_file = str(Path(obj_file).with_suffix('.glb'))
mesh = trimesh.load(obj_file)
mesh.export(file_type='glb', file_obj=glb_file)
assert os.path.isfile(glb_file)
mesh_node.set('filename', glb_file)
common_prefix = os.path.commonprefix([urdf_dir, file])
rel_file = file.relative_to(common_prefix)
out_file = (out_dir / rel_file)
tree.write(str(out_file))
print('Completed', out_file) |
def _node_to_pattern(node):
if hasattr(node.op, 'connection_pattern'):
connection_pattern = node.op.connection_pattern(node)
if (not isinstance(connection_pattern, list)):
raise TypeError((('Op.connection_pattern should return ' + f'list of list of bool, but for Op={node.op}') + f'got {connection_pattern} with type {type(connection_pattern)}.'))
if (len(connection_pattern) != len(node.inputs)):
raise ValueError((f'{node.op}.connection_pattern should have {len(node.inputs)}' + f' rows but has {len(connection_pattern)}.'))
for (ii, output_pattern) in enumerate(connection_pattern):
if (not isinstance(output_pattern, list)):
raise TypeError(((f'{node.op}.connection_pattern should return' + f' a list of lists, but element {int(ii)}') + f'is {output_pattern} of type {type(output_pattern)}.'))
else:
connection_pattern = [[True for output in node.outputs] for ipt in node.inputs]
assert isinstance(connection_pattern, list)
assert (len(connection_pattern) == len(node.inputs))
for ii in range(len(node.inputs)):
assert isinstance(connection_pattern[ii], list)
assert (len(connection_pattern[ii]) == len(node.outputs))
return connection_pattern |
def test_repeated_show():
mesh = gfx.Mesh(gfx.sphere_geometry(), gfx.MeshPhongMaterial())
camera = gfx.PerspectiveCamera()
scene = gfx.Scene()
scene.add(mesh, camera.add(gfx.DirectionalLight()))
camera.show_object(scene)
cam_width = camera.width
scene_radius = scene.get_world_bounding_sphere()[3]
for _ in range(9):
camera.show_object(scene)
assert (cam_width == camera.width)
assert (scene_radius == scene.get_world_bounding_sphere()[3]) |
_HEADS.register_module()
class ResLayer(nn.Module):
def __init__(self, depth, stage=3, stride=2, dilation=1, style='pytorch', norm_cfg=dict(type='BN', requires_grad=True), norm_eval=True, with_cp=False, dcn=None):
super(ResLayer, self).__init__()
self.norm_eval = norm_eval
self.norm_cfg = norm_cfg
self.stage = stage
self.fp16_enabled = False
(block, stage_blocks) = ResNet.arch_settings[depth]
stage_block = stage_blocks[stage]
planes = (64 * (2 ** stage))
inplanes = ((64 * (2 ** (stage - 1))) * block.expansion)
res_layer = _ResLayer(block, inplanes, planes, stage_block, stride=stride, dilation=dilation, style=style, with_cp=with_cp, norm_cfg=self.norm_cfg, dcn=dcn)
self.add_module(f'layer{(stage + 1)}', res_layer)
def init_weights(self, pretrained=None):
if isinstance(pretrained, str):
logger = get_root_logger()
load_checkpoint(self, pretrained, strict=False, logger=logger)
elif (pretrained is None):
for m in self.modules():
if isinstance(m, nn.Conv2d):
kaiming_init(m)
elif isinstance(m, nn.BatchNorm2d):
constant_init(m, 1)
else:
raise TypeError('pretrained must be a str or None')
_fp16()
def forward(self, x):
res_layer = getattr(self, f'layer{(self.stage + 1)}')
out = res_layer(x)
return out
def train(self, mode=True):
super(ResLayer, self).train(mode)
if self.norm_eval:
for m in self.modules():
if isinstance(m, nn.BatchNorm2d):
m.eval() |
(suppress_health_check=[HealthCheck.function_scoped_fixture], deadline=timedelta(seconds=10))
(args=st.lists(st.integers(min_value=0, max_value=1), max_size=2))
.filterwarnings('ignore:.*:pytest.PytestUnraisableExceptionWarning')
def test_as_completed(ray_context, args):
args = sorted(args, reverse=True)
expected = sorted(args)
actual = list(parallel.as_completed([remote_sleep.remote(i) for i in args]))
assert (expected == actual) |
.parametrize('setting,expect_value', [(None, None), ('1', False), ('0', False)])
def test_no_color_env_var(runner, monkeypatch, setting, expect_value, boxed_context, in_tmp_dir, tmp_path):
if (setting is None):
monkeypatch.delenv('NO_COLOR', raising=False)
else:
monkeypatch.setenv('NO_COLOR', setting)
touch_files(tmp_path, 'foo.json')
runner.invoke(cli_main, ['--schemafile', 'schema.json', 'foo.json'])
assert (boxed_context.ref.color == expect_value) |
class AstroidIndexError(AstroidError):
def __init__(self, message: str='', node: ((nodes.NodeNG | bases.Instance) | None)=None, index: (nodes.Subscript | None)=None, context: (InferenceContext | None)=None, **kws: Any) -> None:
self.node = node
self.index = index
self.context = context
super().__init__(message, **kws) |
def run(kubeconfig_path, scenario, pre_action_output=''):
if (scenario.endswith('.yaml') or scenario.endswith('.yml')):
logging.error('Powerfulseal support has recently been removed. Please switch to using plugins instead.')
elif scenario.endswith('.py'):
action_output = runcommand.invoke(('python3 ' + scenario)).strip()
if pre_action_output:
if (pre_action_output == action_output):
logging.info((scenario + ' post action checks passed'))
else:
logging.info((scenario + ' post action response did not match pre check output'))
logging.info((('Pre action output: ' + str(pre_action_output)) + '\n'))
logging.info(('Post action output: ' + str(action_output)))
return False
elif (scenario != ''):
action_output = runcommand.invoke(scenario).strip()
if pre_action_output:
if (pre_action_output == action_output):
logging.info((scenario + ' post action checks passed'))
else:
logging.info((scenario + ' post action response did not match pre check output'))
return False
return action_output |
class CalendarWrapperTests(unittest.TestCase):
NO_HOLIDAYS_IN_MONTH = 0
CALENDARBK_WIDTH_COEFF = 9
CALENDARBK_HEIGHT_OFFSET = 112
TITLEBK_WIDTH_COEFF = 2
TITLEBK_HEIGHT_COEFF = 26.67
def setUp(self):
self.app = Application().start(os.path.join(mfc_samples_folder, u'CmnCtrl1.exe'))
self.dlg = self.app.Common_Controls_Sample
self.dlg.TabControl.select(4)
self.calendar = self.app.Common_Controls_Sample.CalendarWrapper
rect = self.app['Common Controls Sample']['Calendar'].rectangle()
self.width = rect.width()
self.height = rect.height()
def tearDown(self):
self.app.kill()
def test_can_get_current_date_from_calendar(self):
date = self.calendar.get_current_date()
self.assert_actual_time_is_equal_to_expect_date_time(date, datetime.date.today())
def test_runtime_error_when_try_to_get_current_date_from_calendar_if_calendar_state_is_multiselect(self):
self._set_calendar_state_into_multiselect()
self.assertRaises(RuntimeError, self.calendar.get_current_date)
def test_can_set_current_date_in_calendar(self):
self.calendar.set_current_date(2016, 4, 3, 13)
self.assert_actual_time_is_equal_to_expect_date_time(self.calendar.get_current_date(), datetime.date(2016, 4, 13))
def test_should_throw_runtime_error_when_try_to_set_invalid_date(self):
self.assertRaises(RuntimeError, self.calendar.set_current_date, (- 2016), (- 4), (- 3), (- 13))
def test_can_get_calendar_border(self):
width = self.calendar.get_border()
self.assertEqual(width, 4)
def test_can_set_calendar_border(self):
self.calendar.set_border(6)
self.assertEqual(self.calendar.get_border(), 6)
def test_can_get_calendars_count(self):
count = self.calendar.count()
self.assertEqual(count, 1)
def test_can_get_calendars_view(self):
view = self.calendar.get_view()
self.assertEqual(view, 0)
def test_should_throw_runtime_error_when_try_to_set_invalid_view(self):
self.assertRaises(RuntimeError, self.calendar.set_view, (- 1))
def test_can_set_calendars_view_into_month(self):
self.calendar.set_view(win32defines.MCMV_MONTH)
self.assertEqual(self.calendar.get_view(), win32defines.MCMV_MONTH)
def test_can_set_calendars_view_into_years(self):
self.calendar.set_view(win32defines.MCMV_YEAR)
self.assertEqual(self.calendar.get_view(), win32defines.MCMV_YEAR)
def test_can_set_calendars_view_into_decade(self):
self.calendar.set_view(win32defines.MCMV_DECADE)
self.assertEqual(self.calendar.get_view(), win32defines.MCMV_DECADE)
def test_can_set_calendars_view_into_century(self):
self.calendar.set_view(win32defines.MCMV_CENTURY)
self.assertEqual(self.calendar.get_view(), win32defines.MCMV_CENTURY)
def test_can_set_day_state(self):
month_states = [self.NO_HOLIDAYS_IN_MONTH, self.NO_HOLIDAYS_IN_MONTH, self.NO_HOLIDAYS_IN_MONTH]
self._set_calendar_state_to_display_day_states()
res = self.calendar.set_day_states(month_states)
self.assertNotEqual(0, res)
def test_cant_set_day_state_passing_one_month_state(self):
month_states = [self.NO_HOLIDAYS_IN_MONTH]
self._set_calendar_state_to_display_day_states()
self.assertRaises(RuntimeError, self.calendar.set_day_states, month_states)
def test_can_minimize_rectangle(self):
expected_rect = self._get_expected_minimized_rectangle()
rect = self.calendar.calc_min_rectangle((expected_rect.left + 100), (expected_rect.top + 100), (expected_rect.right + 100), (expected_rect.bottom + 100))
self.assertEqual(expected_rect, rect)
def test_can_minimize_rectangle_handle_less_than_zero_values(self):
expected_rect = self._get_expected_minimized_rectangle()
rect = self.calendar.calc_min_rectangle((- 1), (- 1), (- 1), (- 1))
self.assertEqual(expected_rect, rect)
def test_can_determine_calendar_is_hit(self):
x = int((self.width / self.CALENDARBK_WIDTH_COEFF))
y = int((self.height - self.CALENDARBK_HEIGHT_OFFSET))
res = self.calendar.hit_test(x, y)
self.assertEqual(win32defines.MCHT_CALENDAR, res)
def test_can_determine_calendar_background_is_hit(self):
x = int((self.width / self.CALENDARBK_WIDTH_COEFF))
y = int((self.height - self.CALENDARBK_HEIGHT_OFFSET))
res = self.calendar.hit_test(x, y)
self.assertEqual(win32defines.MCHT_CALENDARBK, res)
def test_can_determine_date_is_hit(self):
x = int((self.width / 1.13))
y = int((self.height / 1.62))
res = self.calendar.hit_test(x, y)
self.assertEqual(win32defines.MCHT_CALENDARDATE, res)
def test_can_determine_next_month_date_is_hit(self):
x = int((self.width / 1.14))
y = int((self.height / 1.23))
res = self.calendar.hit_test(x, y)
self.assertEqual(win32defines.MCHT_CALENDARDATENEXT, res)
def test_can_determine_prev_month_date_is_hit(self):
x = int((self.width / 16))
y = int((self.height / 2.67))
res = self.calendar.hit_test(x, y)
self.assertEqual(win32defines.MCHT_CALENDARDATEPREV, res)
def test_can_determine_nothing_is_hit(self):
res = self.calendar.hit_test(0, 0)
self.assertEqual(win32defines.MCHT_NOWHERE, res)
def test_can_determine_top_left_title_corner_is_hit(self):
x = int((self.width / 16))
y = int((self.height / 16))
res = self.calendar.hit_test(x, y)
self.assertEqual(win32defines.MCHT_TITLEBTNPREV, res)
def test_can_determine_title_is_hit(self):
x = int((self.width / self.TITLEBK_WIDTH_COEFF))
y = int((self.height / self.TITLEBK_HEIGHT_COEFF))
res = self.calendar.hit_test(x, y)
self.assertEqual(win32defines.MCHT_TITLE, res)
def test_can_determine_title_background_is_hit(self):
x = int((self.width / self.TITLEBK_WIDTH_COEFF))
y = int((self.height / self.TITLEBK_HEIGHT_COEFF))
res = self.calendar.hit_test(x, y)
self.assertEqual(win32defines.MCHT_TITLEBK, res)
def test_can_determine_top_right_title_corner_is_hit(self):
x = int((self.width / 1.07))
y = int((self.height / 8))
res = self.calendar.hit_test(x, y)
self.assertEqual(win32defines.MCHT_TITLEBTNNEXT, res)
def test_can_determine_day_abbreviation_is_hit(self):
x = int((self.width / 5.33))
y = int((self.height / 4))
res = self.calendar.hit_test(x, y)
self.assertEqual(win32defines.MCHT_CALENDARDAY, res)
def test_can_determine_week_number_is_hit(self):
self._set_calendar_state_to_display_week_numbers()
x = int((self.width / 13.5))
y = int((self.height / 1.7))
res = self.calendar.hit_test(x, y)
self.assertEqual(win32defines.MCHT_CALENDARWEEKNUM, res)
def test_should_throw_runtime_error_when_try_to_set_invalid_type_of_calendar(self):
self.assertRaises(ValueError, self.calendar.set_id, 'Aloha!')
def test_should_get_valid_type_of_calendar(self):
self.assertEqual(self.calendar.get_id(), 0)
def test_should_throw_runtime_error_when_try_to_set_invalid_type_of_place_for_color(self):
self.assertRaises(ValueError, self.calendar.set_color, 'Aloha!', 0, 0, 0)
def test_return_error_about_color(self):
self.assertRaises(RuntimeError, self.calendar.set_color, 'background', (- 1), (- 1), (- 1))
def test_return_error_when_color_hire_then_255(self):
self.assertRaises(RuntimeError, self.calendar.set_color, 'background', 600, 600, 600)
def test_can_get_today(self):
date = self.calendar.get_today()
self.assert_actual_time_is_equal_to_expect_date_time(date, datetime.date.today())
def test_can_set_today(self):
self.calendar.set_today(2016, 5, 1)
self.assert_actual_time_is_equal_to_expect_date_time(self.calendar.get_today(), datetime.date(2016, 5, 1))
def test_can_set_and_get_first_day_of_week(self):
self.calendar.set_first_weekday(4)
self.assertEqual((True, 4), self.calendar.get_first_weekday())
def test_can_get_default_scroll_rate(self):
actual_rate = 1
self.assertEqual(actual_rate, self.calendar.get_month_delta())
def test_can_set_scroll_rate(self):
actual_rate = 4
self.calendar.set_month_delta(actual_rate)
self.assertEqual(actual_rate, self.calendar.get_month_delta())
def test_should_throw_value_error_when_try_to_set_incorrect_scroll_rate(self):
self.assertRaises(ValueError, self.calendar.set_month_delta, (- 1))
def test_can_get_month_range_when_calendars_view_into_month(self):
self.calendar.set_current_date(2017, 5, 2, 2)
exp_range = 1
start_month = datetime.date(2017, 5, 1)
end_month = datetime.date(2017, 5, 31)
self._check_month_range(exp_range, start_month, end_month)
def test_can_get_month_range_when_calendars_view_into_years(self):
self.calendar.set_current_date(2017, 5, 2, 2)
self.calendar.set_view(win32defines.MCMV_YEAR)
exp_range = 12
start_month = datetime.date(2017, 1, 1)
end_month = datetime.date(2017, 12, 31)
self._check_month_range(exp_range, start_month, end_month)
def test_can_get_month_range_with_include_preceding_and_trailing_months(self):
self.calendar.set_current_date(2017, 5, 2, 2)
res = self.calendar.get_month_range(win32defines.GMR_DAYSTATE)
(range_months, system_time) = res[:2]
exp_range = 3
start_month = datetime.date(2017, 4, 24)
end_month = datetime.date(2017, 6, 4)
self.assertEqual(range_months, exp_range)
self.assertEqual(system_time[0].wYear, start_month.year)
self.assertEqual(system_time[0].wMonth, start_month.month)
self.assertEqual(system_time[1].wYear, end_month.year)
self.assertEqual(system_time[1].wMonth, end_month.month)
def test_should_throw_value_error_when_try_to_get_month_range_and_scope_of_range_is_incorrect(self):
self.assertRaises(ValueError, self.calendar.get_month_range, (- 1))
def _check_month_range(self, exp_range, start_month, end_month):
res = self.calendar.get_month_range(win32defines.GMR_VISIBLE)
(range_months, system_time) = res[:2]
self.assertEqual(range_months, exp_range)
self.assert_actual_time_is_equal_to_expect_date_time(system_time[0], start_month)
self.assert_actual_time_is_equal_to_expect_date_time(system_time[1], end_month)
def assert_actual_time_is_equal_to_expect_date_time(self, actual_date, expect_date):
self.assertEqual(actual_date.wYear, expect_date.year)
self.assertEqual(actual_date.wMonth, expect_date.month)
self.assertEqual(actual_date.wDay, expect_date.day)
def _get_expected_minimized_rectangle(self):
expected_rect = win32structures.RECT()
expected_rect.left = 0
expected_rect.top = 0
expected_rect.right = self.width
expected_rect.bottom = self.height
return expected_rect
def _set_calendar_state_to_display_day_states(self):
self.app['Common Controls Sample']['MCS_DAYSTATE'].click()
def _set_calendar_state_to_display_week_numbers(self):
self.app['Common Controls Sample']['MCS_WEEKNUMBERS'].click()
def _set_calendar_state_into_multiselect(self):
self.app['Common Controls Sample']['MCS_MULTISELECT'].click() |
('document.tables is a list containing three tables')
def then_document_tables_is_a_list_containing_three_tables(context):
document = context.document
tables = document.tables
assert isinstance(tables, list)
assert (len(tables) == 3)
for table in tables:
assert isinstance(table, Table) |
class VBObject(AObject):
FEATURE_FUNCS = {}
def __init__(self, path=None):
AObject.__init__(self, path=path)
def initializeBlank(self):
AObject.initializeBlank(self)
self.a_info.update({'VBObjectType': self.VBOBJECT_TYPE()})
self.features = AFuncDict(owner=self, name='features')
self.features.functions.update(self.FEATURE_FUNCS)
def saveFeature(self, name, path):
return self.features.saveEntry(name=name, path=path)
def saveFeatures(self, path):
return self.features.save(path=path)
def loadFeature(self, name, path):
return self.features.loadEntry(name=name, path=path)
def loadFeatures(self, path):
return self.features.load(path=path)
def getFeature(self, name, force_recompute=False, **kwargs):
params = kwargs
assert (not kwargs.get('params')), 'STILL TRYING TO USE PARAMS INSTEAD OF KWARGS. FIX THIS'
return self.features.getValue(name=name, params=kwargs, force_recompute=force_recompute)
def getFeatureEntry(self, name, params=None, force_recompute=False):
return self.features.getEntry(name=name, params=params, force_recompute=force_recompute)
def getFeatureParams(self, name):
return self.features.getParams(name=name)
def setFeature(self, name, value, params=None):
rval = self.features.setEntry(name=name, d=dict(value=value, params=params))
self.features.setEntryModified(name=name, is_modified=True)
return rval
def removeFeature(self, name, assert_if_absent=True, set_modified=True):
self.features.removeEntry(name=name, assert_if_absent=assert_if_absent, set_modified=set_modified)
def hasFeature(self, name):
return self.features.hasEntry(name=name)
def getFeatureFunction(self, feature_name):
return self.features.getFunction(name=feature_name)
def getFeaturesList(self):
return self.features.getKeyList()
def getFeatureFunctionsList(self):
return self.features.getFunctionList()
def clearFeatureFiles(self, features_to_clear=None, **kwargs):
if self.clear_feature_files_func:
self.clear_feature_files_func(self, features_to_clear=features_to_clear, **kwargs)
else:
VBWARN('CLEAR FEATURE FILES FUNCTION HAS NOT BEEN PROVIDED FOR {} INSTANCE'.format(self.VBOBJECT_TYPE()))
def AOBJECT_TYPE(self):
return 'VBObject'
def VBOBJECT_TYPE(self):
return self.AOBJECT_TYPE() |
def test_describe_evaluated_once(testdir):
testdir.makepyfile('\n count = 0\n def describe_is_evaluated_only_once():\n global count\n count += 1\n def one():\n assert count == 1\n def two():\n assert count == 1\n def describe_nested():\n def three():\n assert count == 1\n ')
result = testdir.runpytest('-v')
result.assert_outcomes(passed=3) |
class TabHistoryItem():
def __init__(self, url, title, *, original_url=None, active=False, user_data=None, last_visited=None):
self.url = url
if (original_url is None):
self.original_url = url
else:
self.original_url = original_url
self.title = title
self.active = active
self.user_data = user_data
self.last_visited = last_visited
def __repr__(self):
return utils.get_repr(self, constructor=True, url=self.url, original_url=self.original_url, title=self.title, active=self.active, user_data=self.user_data, last_visited=self.last_visited) |
class ConvolutionalBoxPredictorBuilderTest(tf.test.TestCase):
def test_box_predictor_calls_conv_argscope_fn(self):
conv_hyperparams_text_proto = '\n regularizer {\n l1_regularizer {\n weight: 0.0003\n }\n }\n initializer {\n truncated_normal_initializer {\n mean: 0.0\n stddev: 0.3\n }\n }\n activation: RELU_6\n '
hyperparams_proto = hyperparams_pb2.Hyperparams()
text_format.Merge(conv_hyperparams_text_proto, hyperparams_proto)
def mock_conv_argscope_builder(conv_hyperparams_arg, is_training):
return (conv_hyperparams_arg, is_training)
box_predictor_proto = box_predictor_pb2.BoxPredictor()
box_predictor_proto.convolutional_box_predictor.conv_hyperparams.CopyFrom(hyperparams_proto)
box_predictor = box_predictor_builder.build(argscope_fn=mock_conv_argscope_builder, box_predictor_config=box_predictor_proto, is_training=False, num_classes=10)
(conv_hyperparams_actual, is_training) = box_predictor._conv_hyperparams
self.assertAlmostEqual(hyperparams_proto.regularizer.l1_regularizer.weight, conv_hyperparams_actual.regularizer.l1_regularizer.weight)
self.assertAlmostEqual(hyperparams_proto.initializer.truncated_normal_initializer.stddev, conv_hyperparams_actual.initializer.truncated_normal_initializer.stddev)
self.assertAlmostEqual(hyperparams_proto.initializer.truncated_normal_initializer.mean, conv_hyperparams_actual.initializer.truncated_normal_initializer.mean)
self.assertEqual(hyperparams_proto.activation, conv_hyperparams_actual.activation)
self.assertFalse(is_training)
def test_construct_non_default_conv_box_predictor(self):
box_predictor_text_proto = '\n convolutional_box_predictor {\n min_depth: 2\n max_depth: 16\n num_layers_before_predictor: 2\n use_dropout: false\n dropout_keep_probability: 0.4\n kernel_size: 3\n box_code_size: 3\n apply_sigmoid_to_scores: true\n }\n '
conv_hyperparams_text_proto = '\n regularizer {\n l1_regularizer {\n }\n }\n initializer {\n truncated_normal_initializer {\n }\n }\n '
hyperparams_proto = hyperparams_pb2.Hyperparams()
text_format.Merge(conv_hyperparams_text_proto, hyperparams_proto)
def mock_conv_argscope_builder(conv_hyperparams_arg, is_training):
return (conv_hyperparams_arg, is_training)
box_predictor_proto = box_predictor_pb2.BoxPredictor()
text_format.Merge(box_predictor_text_proto, box_predictor_proto)
box_predictor_proto.convolutional_box_predictor.conv_hyperparams.CopyFrom(hyperparams_proto)
box_predictor = box_predictor_builder.build(argscope_fn=mock_conv_argscope_builder, box_predictor_config=box_predictor_proto, is_training=False, num_classes=10)
self.assertEqual(box_predictor._min_depth, 2)
self.assertEqual(box_predictor._max_depth, 16)
self.assertEqual(box_predictor._num_layers_before_predictor, 2)
self.assertFalse(box_predictor._use_dropout)
self.assertAlmostEqual(box_predictor._dropout_keep_prob, 0.4)
self.assertTrue(box_predictor._apply_sigmoid_to_scores)
self.assertEqual(box_predictor.num_classes, 10)
self.assertFalse(box_predictor._is_training)
def test_construct_default_conv_box_predictor(self):
box_predictor_text_proto = '\n convolutional_box_predictor {\n conv_hyperparams {\n regularizer {\n l1_regularizer {\n }\n }\n initializer {\n truncated_normal_initializer {\n }\n }\n }\n }'
box_predictor_proto = box_predictor_pb2.BoxPredictor()
text_format.Merge(box_predictor_text_proto, box_predictor_proto)
box_predictor = box_predictor_builder.build(argscope_fn=hyperparams_builder.build, box_predictor_config=box_predictor_proto, is_training=True, num_classes=90)
self.assertEqual(box_predictor._min_depth, 0)
self.assertEqual(box_predictor._max_depth, 0)
self.assertEqual(box_predictor._num_layers_before_predictor, 0)
self.assertTrue(box_predictor._use_dropout)
self.assertAlmostEqual(box_predictor._dropout_keep_prob, 0.8)
self.assertFalse(box_predictor._apply_sigmoid_to_scores)
self.assertEqual(box_predictor.num_classes, 90)
self.assertTrue(box_predictor._is_training) |
def load_results(root_dir_or_dirs, enable_progress=True, enable_monitor=True, verbose=False):
import re
if isinstance(root_dir_or_dirs, str):
rootdirs = [osp.expanduser(root_dir_or_dirs)]
else:
rootdirs = [osp.expanduser(d) for d in root_dir_or_dirs]
allresults = []
for rootdir in rootdirs:
assert osp.exists(rootdir), ("%s doesn't exist" % rootdir)
for (dirname, dirs, files) in os.walk(rootdir):
if ('-proc' in dirname):
files[:] = []
continue
monitor_re = re.compile('(\\d+\\.)?(\\d+\\.)?monitor\\.csv')
if (set(['metadata.json', 'monitor.json', 'progress.json', 'progress.csv']).intersection(files) or any([f for f in files if monitor_re.match(f)])):
result = {'dirname': dirname}
if ('metadata.json' in files):
with open(osp.join(dirname, 'metadata.json'), 'r') as fh:
result['metadata'] = json.load(fh)
progjson = osp.join(dirname, 'progress.json')
progcsv = osp.join(dirname, 'progress.csv')
if enable_progress:
if osp.exists(progjson):
result['progress'] = pandas.DataFrame(read_json(progjson))
elif osp.exists(progcsv):
try:
result['progress'] = read_csv(progcsv)
except pandas.errors.EmptyDataError:
print('skipping progress file in ', dirname, 'empty data')
elif verbose:
print(('skipping %s: no progress file' % dirname))
if enable_monitor:
try:
result['monitor'] = pandas.DataFrame(monitor.load_results(dirname))
except monitor.LoadMonitorResultsError:
print(('skipping %s: no monitor files' % dirname))
except Exception as e:
print(('exception loading monitor file in %s: %s' % (dirname, e)))
if ((result.get('monitor') is not None) or (result.get('progress') is not None)):
allresults.append(Result(**result))
if verbose:
print(('successfully loaded %s' % dirname))
if verbose:
print(('loaded %i results' % len(allresults)))
return allresults |
def test_call_super_creates_temp_invalid(pe_bio_teacher):
pe_bio_teacher.teach_new_classes(['Computer Science', 'Video Production', 'Life Science'])
assert (len(pe_bio_teacher.currently_teaching) == 3)
assert (pe_bio_teacher.wage == (pe_bio_teacher.wage_per_class * len(pe_bio_teacher.currently_teaching))) |
class FrameManager(EventEmitter):
Events = SimpleNamespace(FrameAttached='frameattached', FrameNavigated='framenavigated', FrameDetached='framedetached', LifecycleEvent='lifecycleevent', FrameNavigatedWithinDocument='framenavigatedwithindocument')
def __init__(self, client: CDPSession, frameTree: Dict, page: Any) -> None:
super().__init__()
self._client = client
self._page = page
self._frames: OrderedDict[(str, Frame)] = OrderedDict()
self._mainFrame: Optional[Frame] = None
self._contextIdToContext: Dict[(str, ExecutionContext)] = dict()
client.on('Page.frameAttached', (lambda event: self._onFrameAttached(event.get('frameId', ''), event.get('parentFrameId', ''))))
client.on('Page.frameNavigated', (lambda event: self._onFrameNavigated(event.get('frame'))))
client.on('Page.navigatedWithinDocument', (lambda event: self._onFrameNavigatedWithinDocument(event.get('frameId'), event.get('url'))))
client.on('Page.frameDetached', (lambda event: self._onFrameDetached(event.get('frameId'))))
client.on('Page.frameStoppedLoading', (lambda event: self._onFrameStoppedLoading(event.get('frameId'))))
client.on('Runtime.executionContextCreated', (lambda event: self._onExecutionContextCreated(event.get('context'))))
client.on('Runtime.executionContextDestroyed', (lambda event: self._onExecutionContextDestroyed(event.get('executionContextId'))))
client.on('Runtime.executionContextsCleared', (lambda event: self._onExecutionContextsCleared()))
client.on('Page.lifecycleEvent', (lambda event: self._onLifecycleEvent(event)))
self._handleFrameTree(frameTree)
def _onLifecycleEvent(self, event: Dict) -> None:
frame = self._frames.get(event['frameId'])
if (not frame):
return
frame._onLifecycleEvent(event['loaderId'], event['name'])
self.emit(FrameManager.Events.LifecycleEvent, frame)
def _onFrameStoppedLoading(self, frameId: str) -> None:
frame = self._frames.get(frameId)
if (not frame):
return
frame._onLoadingStopped()
self.emit(FrameManager.Events.LifecycleEvent, frame)
def _handleFrameTree(self, frameTree: Dict) -> None:
frame = frameTree['frame']
if ('parentId' in frame):
self._onFrameAttached(frame['id'], frame['parentId'])
self._onFrameNavigated(frame)
if ('childFrames' not in frameTree):
return
for child in frameTree['childFrames']:
self._handleFrameTree(child)
def mainFrame(self) -> Optional['Frame']:
return self._mainFrame
def frames(self) -> List['Frame']:
return list(self._frames.values())
def frame(self, frameId: str) -> Optional['Frame']:
return self._frames.get(frameId)
def _onFrameAttached(self, frameId: str, parentFrameId: str) -> None:
if (frameId in self._frames):
return
parentFrame = self._frames.get(parentFrameId)
frame = Frame(self._client, parentFrame, frameId)
self._frames[frameId] = frame
self.emit(FrameManager.Events.FrameAttached, frame)
def _onFrameNavigated(self, framePayload: dict) -> None:
isMainFrame = (not framePayload.get('parentId'))
if isMainFrame:
frame = self._mainFrame
else:
frame = self._frames.get(framePayload.get('id', ''))
if (not (isMainFrame or frame)):
raise PageError('We either navigate top level or have old version of the navigated frame')
if frame:
for child in frame.childFrames:
self._removeFramesRecursively(child)
_id = framePayload.get('id', '')
if isMainFrame:
if frame:
self._frames.pop(frame._id, None)
frame._id = _id
else:
frame = Frame(self._client, None, _id)
self._frames[_id] = frame
self._mainFrame = frame
frame._navigated(framePayload)
self.emit(FrameManager.Events.FrameNavigated, frame)
def _onFrameNavigatedWithinDocument(self, frameId: str, url: str) -> None:
frame = self._frames.get(frameId)
if (not frame):
return
frame._navigatedWithinDocument(url)
self.emit(FrameManager.Events.FrameNavigatedWithinDocument, frame)
self.emit(FrameManager.Events.FrameNavigated, frame)
def _onFrameDetached(self, frameId: str) -> None:
frame = self._frames.get(frameId)
if frame:
self._removeFramesRecursively(frame)
def _onExecutionContextCreated(self, contextPayload: Dict) -> None:
if (contextPayload.get('auxData') and contextPayload['auxData'].get('frameId')):
frameId = contextPayload['auxData']['frameId']
else:
frameId = None
frame = self._frames.get(frameId)
def _createJSHandle(obj: Dict) -> JSHandle:
context = self.executionContextById(contextPayload['id'])
return self.createJSHandle(context, obj)
context = ExecutionContext(self._client, contextPayload, _createJSHandle, frame)
self._contextIdToContext[contextPayload['id']] = context
if frame:
frame._addExecutionContext(context)
def _onExecutionContextDestroyed(self, executionContextId: str) -> None:
context = self._contextIdToContext.get(executionContextId)
if (not context):
return
del self._contextIdToContext[executionContextId]
frame = context.frame
if frame:
frame._removeExecutionContext(context)
def _onExecutionContextsCleared(self) -> None:
for context in self._contextIdToContext.values():
frame = context.frame
if frame:
frame._removeExecutionContext(context)
self._contextIdToContext.clear()
def executionContextById(self, contextId: str) -> ExecutionContext:
context = self._contextIdToContext.get(contextId)
if (not context):
raise ElementHandleError(f'INTERNAL ERROR: missing context with id = {contextId}')
return context
def createJSHandle(self, context: ExecutionContext, remoteObject: Dict=None) -> JSHandle:
if (remoteObject is None):
remoteObject = dict()
if (remoteObject.get('subtype') == 'node'):
return ElementHandle(context, self._client, remoteObject, self._page, self)
return JSHandle(context, self._client, remoteObject)
def _removeFramesRecursively(self, frame: 'Frame') -> None:
for child in frame.childFrames:
self._removeFramesRecursively(child)
frame._detach()
self._frames.pop(frame._id, None)
self.emit(FrameManager.Events.FrameDetached, frame) |
class CIFAR_ResNet50_BiFPN(nn.Module):
def __init__(self, num_classes=100):
super(CIFAR_ResNet50_BiFPN, self).__init__()
self.backbone = CIFAR_ResNet50(num_classes=num_classes)
self.bifpn = BiFPNc(self.backbone.network_channels, num_classes, repeat=1, depth=([1] * 3), width=1)
def forward(self, x):
(logit, features) = self.backbone(x, feature=True)
(bi_feats, bi_logits) = self.bifpn(features, preact=False)
return (logit, features, bi_feats, bi_logits) |
def dequantize(arr, min_val, max_val, levels, dtype=np.float64):
if (not (isinstance(levels, int) and (levels > 1))):
raise ValueError(f'levels must be a positive integer, but got {levels}')
if (min_val >= max_val):
raise ValueError(f'min_val ({min_val}) must be smaller than max_val ({max_val})')
dequantized_arr = ((((arr + 0.5).astype(dtype) * (max_val - min_val)) / levels) + min_val)
return dequantized_arr |
def get_rel_loc_tensor(lat1, lon1, lat, lon, zoom):
scale = (1 << zoom)
(ul_proj_x, ul_proj_y) = project_with_scale_tensor(lat1, lon1, scale)
ul_tile_x = (ul_proj_x // 1)
ul_tile_y = (ul_proj_y // 1)
ul_pixel_x = (ul_tile_x * TILE_SIZE)
ul_pixel_y = (ul_tile_y * TILE_SIZE)
(br_proj_x, br_proj_y) = project_with_scale_tensor(lat, lon, scale)
br_pixel_x = (br_proj_x * TILE_SIZE)
br_pixel_y = (br_proj_y * TILE_SIZE)
return ((br_pixel_x - ul_pixel_x), (br_pixel_y - ul_pixel_y)) |
class ContainedInput(PrimitiveInput):
is_contained = True
def __init__(self, containing_input, choice, refresh_widget=None):
self.choice = choice
self.containing_input = containing_input
super().__init__(containing_input.form, choice.field, registers_with_form=False, refresh_widget=refresh_widget)
def make_html_control_css_id(self):
return str(CssId.from_dirty_string(('id-%s-%s' % (self.containing_input.name, self.value))))
def get_input_status(self):
return 'defaulted'
def validation_constraints(self):
return self.containing_input.validation_constraints
def validation_error(self):
return self.containing_input.validation_error
def validate_input(self, input_values):
return self.containing_input.validate_input(input_values)
def format_input(self, input_values):
return self.containing_input.format_input(input_values)
def accept_input(self, input_values):
return self.containing_input.format_input(input_values)
def get_ocurred_event(self):
return self.containing_input.get_ocurred_event()
def get_value_from_input(self, input_values):
return self.containing_input.get_value_from_input(input_values)
def prepare_input(self):
return self.containing_input.prepare_input()
def persist_input(self, input_values):
return self.containing_input.persist_input(input_values)
def enter_value(self, input_value):
return self.containing_input.enter_value(input_value) |
def fixed_padding(inputs, kernel_size, rate):
kernel_size_effective = (kernel_size + ((kernel_size - 1) * (rate - 1)))
pad_total = (kernel_size_effective - 1)
pad_beg = (pad_total // 2)
pad_end = (pad_total - pad_beg)
padded_inputs = F.pad(inputs, (pad_beg, pad_end, pad_beg, pad_end))
return padded_inputs |
def summary_eval(model, loader, dset):
model.eval()
with torch.no_grad():
loss_list = []
top1_list = []
iter = 0
prob = []
target = []
file_name = []
for batch_data in loader:
(loss, info) = model(batch_data)
loss_list.append(loss.cpu().item())
top1_list.append(info['top1'])
prob.append(info['prob'])
target.append(info['target'])
file_name.append(info['file_name'])
if ((iter % 10) == 0):
print(('testing %d / %d: loss %.4f: acc %.4f' % (iter, len(loader), loss, info['top1'])))
iter += 1
info = {'loss': (sum(loss_list) / len(loss_list)), 'top1': (sum(top1_list) / len(top1_list)), 'prob': prob, 'target': target, 'file_name': file_name}
return info |
_kernel_api(params={'oidp': POINTER})
def hook__sysctl_register_oid(ql, address, params):
oidp = sysctl_oid_t(ql, params['oidp'])
oidp = oidp.loadFromMem()
oid_name = ql.mem.string(oidp.oid_name.value)
oid_parent = b''
for (symname, symb) in ql.loader.kernel_extrn_symbols_detail.items():
if (symb['n_value'] == oidp.oid_parent.value):
st = (symname.find(b'sysctl_') + len('sysctl_'))
en = symname.find(b'_children', st)
oid_parent = symname[st:en]
break
if (len(oid_parent) == 0):
for (symname, symb) in ql.loader.kext_local_symbols.items():
if ((symb['n_value'] + ql.loader.loadbase) == oidp.oid_parent.value):
st = (symname.find(b'sysctl_') + len('sysctl_'))
en = symname.find(b'_children', st)
oid_parent = symname[st:en]
break
if (len(oid_parent) == 0):
for (symname, symb) in ql.loader.kext_extern_symbols.items():
if ((symb['n_value'] + ql.loader.loadbase) == oidp.oid_parent.value):
st = (symname.find(b'sysctl_') + len('sysctl_'))
en = symname.find(b'_children', st)
oid_parent = symname[st:en]
break
true_name = (((oid_parent.lstrip(b'_') + b'.') + oid_name.encode()) + b'\x00')
if (oidp.oid_handler.value != 0):
ql.log.debug(('New sysctl callback has been registered: %s' % true_name))
ql.os.ev_manager.register(oidp.oid_handler.value, true_name, MacOSEventType.EV_SYSCTL, ev_obj=oidp, idx=0)
return |
class Bottleneck2D(nn.Module):
expansion = 2
def __init__(self, inplanes, planes, stride=1, resample=None):
super(Bottleneck2D, self).__init__()
self.bn1 = nn.BatchNorm2d(inplanes)
self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1)
self.bn2 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride, padding=1)
self.bn3 = nn.BatchNorm2d(planes)
self.conv3 = nn.Conv2d(planes, (planes * Bottleneck2D.expansion), kernel_size=1)
self.relu = nn.ReLU(inplace=True)
self.resample = resample
self.stride = stride
def forward(self, x):
residual = x
out = self.bn1(x)
out = self.relu(out)
out = self.conv1(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn3(out)
out = self.relu(out)
out = self.conv3(out)
if (self.resample is not None):
residual = self.resample(x)
out += residual
return out |
.parametrize('rv_op, dist_params, base_size, cdf_name, params_conv', [(ptr.beta, [set_test_value(pt.dvector(), np.array([1.0, 2.0], dtype=np.float64)), set_test_value(pt.dscalar(), np.array(1.0, dtype=np.float64))], (2,), 'beta', (lambda *args: args)), (ptr.cauchy, [set_test_value(pt.dvector(), np.array([1.0, 2.0], dtype=np.float64)), set_test_value(pt.dscalar(), np.array(1.0, dtype=np.float64))], (2,), 'cauchy', (lambda *args: args)), (ptr.exponential, [set_test_value(pt.dvector(), np.array([1.0, 2.0], dtype=np.float64))], (2,), 'expon', (lambda *args: (0, args[0]))), (ptr._gamma, [set_test_value(pt.dvector(), np.array([1.0, 2.0], dtype=np.float64)), set_test_value(pt.dvector(), np.array([0.5, 3.0], dtype=np.float64))], (2,), 'gamma', (lambda a, b: (a, 0.0, b))), (ptr.gumbel, [set_test_value(pt.lvector(), np.array([1, 2], dtype=np.int64)), set_test_value(pt.dscalar(), np.array(1.0, dtype=np.float64))], (2,), 'gumbel_r', (lambda *args: args)), (ptr.laplace, [set_test_value(pt.dvector(), np.array([1.0, 2.0], dtype=np.float64)), set_test_value(pt.dscalar(), np.array(1.0, dtype=np.float64))], (2,), 'laplace', (lambda *args: args)), (ptr.logistic, [set_test_value(pt.dvector(), np.array([1.0, 2.0], dtype=np.float64)), set_test_value(pt.dscalar(), np.array(1.0, dtype=np.float64))], (2,), 'logistic', (lambda *args: args)), (ptr.lognormal, [set_test_value(pt.lvector(), np.array([0, 0], dtype=np.int64)), set_test_value(pt.dscalar(), np.array(1.0, dtype=np.float64))], (2,), 'lognorm', (lambda mu, sigma: (sigma, 0, np.exp(mu)))), (ptr.normal, [set_test_value(pt.lvector(), np.array([1, 2], dtype=np.int64)), set_test_value(pt.dscalar(), np.array(1.0, dtype=np.float64))], (2,), 'norm', (lambda *args: args)), (ptr.pareto, [set_test_value(pt.dvector(), np.array([1.0, 2.0], dtype=np.float64)), set_test_value(pt.dvector(), np.array([2.0, 10.0], dtype=np.float64))], (2,), 'pareto', (lambda shape, scale: (shape, 0.0, scale))), (ptr.poisson, [set_test_value(pt.dvector(), np.array([100000.0, 200000.0], dtype=np.float64))], (2,), 'poisson', (lambda *args: args)), (ptr.randint, [set_test_value(pt.lscalar(), np.array(0, dtype=np.int64)), set_test_value(pt.lscalar(), np.array(1000, dtype=np.int64))], (), 'randint', (lambda *args: args)), (ptr.integers, [set_test_value(pt.lscalar(), np.array(0, dtype=np.int64)), set_test_value(pt.lscalar(), np.array(1000, dtype=np.int64))], (), 'randint', (lambda *args: args)), (ptr.standard_normal, [], (2,), 'norm', (lambda *args: args)), (ptr.t, [set_test_value(pt.dscalar(), np.array(2.0, dtype=np.float64)), set_test_value(pt.dvector(), np.array([1.0, 2.0], dtype=np.float64)), set_test_value(pt.dscalar(), np.array(1.0, dtype=np.float64))], (2,), 't', (lambda *args: args)), (ptr.uniform, [set_test_value(pt.dvector(), np.array([1.0, 2.0], dtype=np.float64)), set_test_value(pt.dscalar(), np.array(1000.0, dtype=np.float64))], (2,), 'uniform', (lambda *args: args)), (ptr.halfnormal, [set_test_value(pt.dvector(), np.array([(- 1.0), 200.0], dtype=np.float64)), set_test_value(pt.dscalar(), np.array(1000.0, dtype=np.float64))], (2,), 'halfnorm', (lambda *args: args)), (ptr.invgamma, [set_test_value(pt.dvector(), np.array([10.4, 2.8], dtype=np.float64)), set_test_value(pt.dvector(), np.array([3.4, 7.3], dtype=np.float64))], (2,), 'invgamma', (lambda a, b: (a, 0, b))), (ptr.chisquare, [set_test_value(pt.dvector(), np.array([2.4, 4.9], dtype=np.float64))], (2,), 'chi2', (lambda *args: args)), (ptr.gengamma, [set_test_value(pt.dvector(), np.array([10.4, 2.8], dtype=np.float64)), set_test_value(pt.dvector(), np.array([3.4, 7.3], dtype=np.float64)), set_test_value(pt.dvector(), np.array([0.9, 2.0], dtype=np.float64))], (2,), 'gengamma', (lambda alpha, p, lambd: ((alpha / p), p, 0, lambd))), (ptr.wald, [set_test_value(pt.dvector(), np.array([10.4, 2.8], dtype=np.float64)), set_test_value(pt.dvector(), np.array([4.5, 2.0], dtype=np.float64))], (2,), 'invgauss', (lambda mean, scale: ((mean / scale), 0, scale))), pytest.param(ptr.vonmises, [set_test_value(pt.dvector(), np.array([(- 0.5), 1.3], dtype=np.float64)), set_test_value(pt.dvector(), np.array([5.5, 13.0], dtype=np.float64))], (2,), 'vonmises', (lambda mu, kappa: (kappa, mu)), marks=pytest.mark.skipif((not numpyro_available), reason='VonMises dispatch requires numpyro'))])
def test_random_RandomVariable(rv_op, dist_params, base_size, cdf_name, params_conv):
if (rv_op is ptr.integers):
rng = shared(np.random.default_rng(29402))
else:
rng = shared(np.random.RandomState(29402))
g = rv_op(*dist_params, size=((10000,) + base_size), rng=rng)
g_fn = random_function(dist_params, g, mode=jax_mode)
samples = g_fn(*[i.tag.test_value for i in g_fn.maker.fgraph.inputs if (not isinstance(i, (SharedVariable, Constant)))])
bcast_dist_args = np.broadcast_arrays(*[i.tag.test_value for i in dist_params])
for idx in np.ndindex(*base_size):
cdf_params = params_conv(*tuple((arg[idx] for arg in bcast_dist_args)))
test_res = stats.cramervonmises(samples[((Ellipsis,) + idx)], cdf_name, args=cdf_params)
assert (not np.isnan(test_res.statistic))
assert (test_res.pvalue > 0.01) |
def copy_data_to_device(data: T, device: torch.device, *args: Any, **kwargs: Any) -> T:
if (_is_named_tuple(data) and isinstance(data, tuple)):
return type(data)(**copy_data_to_device(data._asdict(), device, *args, **kwargs))
elif isinstance(data, (list, tuple)):
return type(data)((copy_data_to_device(e, device, *args, **kwargs) for e in data))
elif isinstance(data, defaultdict):
return type(data)(data.default_factory, {k: copy_data_to_device(v, device, *args, **kwargs) for (k, v) in data.items()})
elif isinstance(data, Mapping):
return type(data)({k: copy_data_to_device(v, device, *args, **kwargs) for (k, v) in data.items()})
elif (is_dataclass(data) and (not isinstance(data, type))):
new_data_class = type(data)(**{field.name: copy_data_to_device(getattr(data, field.name), device, *args, **kwargs) for field in fields(data) if field.init})
for field in fields(data):
if (not field.init):
setattr(new_data_class, field.name, copy_data_to_device(getattr(data, field.name), device, *args, **kwargs))
return new_data_class
elif isinstance(data, _CopyableData):
return data.to(device, *args, **kwargs)
return data |
def make_patched_web3_get_block(original_func: Callable[([BlockIdentifier, bool], BlockData)]) -> Callable[([BlockIdentifier, bool], BlockData)]:
def patched_web3_get_block(block_identifier: BlockIdentifier, full_transactions: bool=False) -> BlockData:
last_ex: Optional[Exception] = None
for remaining_retries in range(WEB3_BLOCK_NOT_FOUND_RETRY_COUNT, 0, (- 1)):
try:
return original_func(block_identifier, full_transactions)
except BlockNotFound as ex:
log.warning('Block not found, retrying', remaining_retries=(remaining_retries - 1), block_identifier=block_identifier)
last_ex = ex
gevent.sleep(0.1)
assert last_ex, 'Retries can only happen due to exceptions'
raise last_ex
return patched_web3_get_block |
class Migration(migrations.Migration):
dependencies = [('help', '0001_initial')]
operations = [migrations.AlterField(model_name='helpentry', name='db_tags', field=models.ManyToManyField(blank=True, help_text='tags on this object. Tags are simple string markers to identify, group and alias objects.', to='typeclasses.Tag'))] |
def get_executable(name: str, prefix: PathLike=sys.prefix, include_path=True) -> Optional[str]:
executable = shutil.which(name)
if (include_path and executable):
return executable
candidates = list(Path(prefix).resolve().glob(f'*/{name}*'))
if candidates:
path = (f.parent for f in sorted(candidates, key=(lambda p: len(str(p)))))
return shutil.which(name, path=os.pathsep.join((str(p) for p in path)))
return None |
_model()
_legacy_interface(weights=('pretrained', ResNet34_Weights.IMAGENET1K_V1))
def resnet34(*, weights: Optional[ResNet34_Weights]=None, progress: bool=True, **kwargs: Any) -> ResNet:
weights = ResNet34_Weights.verify(weights)
return _resnet(BasicBlock, [3, 4, 6, 3], weights, progress, **kwargs) |
def test_single_dihedral(tmpdir):
with tmpdir.as_cwd():
mol = Ligand.from_file(get_data('ethane.sdf'))
qc_spec = QCOptions(program='rdkit', method='uff', basis=None)
local_ops = LocalResource(cores=1, memory=1)
tdrive = TorsionDriver(n_workers=1, grid_spacing=60)
t_scan = TorsionScan1D(torsion_driver=tdrive)
t_scan.clear_avoided_torsions()
result_mol = t_scan._run(molecule=mol, qc_spec=qc_spec, local_options=local_ops)
assert (len(result_mol.qm_scans) == 1)
assert np.allclose(mol.coordinates, result_mol.coordinates) |
.xfail(reason='list-like is converted to list.')
def test_type_index(df_checks_output):
with pytest.raises(TypeError):
df_checks_output.pivot_wider(index={'geoid'}, names_from='variable')
with pytest.raises(TypeError):
df_checks_output.pivot_wider(index=('geoid', 'name'), names_from='variable') |
class TestApp():
def test_create_and_delete_app(self):
_name_create = 'appjust4testcreate'
_uri_create = ''
_args = {'name': _name_create, 'title': 'whatever', 'region': app_region}
with Call(acc_client, 'create_app', _args) as r:
assert (r[0] is not None)
_uri_create = r[0]['uri']
with Call(acc_client, 'delete_app', _uri_create) as r:
assert (r[0] == {})
def test_get_app_keys(self):
with Call(acc_client, 'get_app_keys', app_uri) as r:
assert (len(r[0]) > 0)
def test_get_account_info(self):
with Call(acc_client, 'get_account_info') as r:
assert (r[0] is not None) |
class ImageModel(QAbstractTableModel):
def __init__(self, parent=None):
super(ImageModel, self).__init__(parent)
self.modelImage = QImage()
def setImage(self, image):
self.beginResetModel()
self.modelImage = QImage(image)
self.endResetModel()
def rowCount(self, parent):
return self.modelImage.height()
def columnCount(self, parent):
return self.modelImage.width()
def data(self, index, role):
if ((not index.isValid()) or (role != Qt.DisplayRole)):
return None
return qGray(self.modelImage.pixel(index.column(), index.row()))
def headerData(self, section, orientation, role):
if (role == Qt.SizeHintRole):
return QSize(1, 1)
return None |
class RandomForestWrapper(RandomForestClassifier):
def __init__(self, sampler=None, n_estimators=10, criterion='gini', max_depth=None, min_samples_split=2, min_samples_leaf=1, min_weight_fraction_leaf=0.0, max_features='auto', max_leaf_nodes=None, min_impurity_decrease=0.0, min_impurity_split=None, bootstrap=True, oob_score=False, n_jobs=1, random_state=None, verbose=0, warm_start=False, class_weight=None):
super().__init__(n_estimators, criterion, max_depth, min_samples_split, min_samples_leaf, min_weight_fraction_leaf, max_features, max_leaf_nodes, min_impurity_decrease, min_impurity_split, bootstrap, oob_score, n_jobs, random_state, verbose, warm_start, class_weight)
self.sampler = sampler
def fit(self, X, y, sample_weight=None):
if self.sampler:
(X, y) = self.sampler.fit_sample(X, y)
return super().fit(X, y, sample_weight=None) |
def _get_socketname(basedir):
if utils.is_windows:
return _get_socketname_windows(basedir)
parts_to_hash = [getpass.getuser()]
if (basedir is not None):
parts_to_hash.append(basedir)
data_to_hash = '-'.join(parts_to_hash).encode('utf-8')
md5 = hashlib.md5(data_to_hash).hexdigest()
prefix = ('i-' if utils.is_mac else 'ipc-')
filename = '{}{}'.format(prefix, md5)
return os.path.join(standarddir.runtime(), filename) |
def nested_field_condition_1() -> models.Condition:
value = random_real_word()
lt = random.randint(1, 10)
return models.NestedCondition(nested=models.Nested(key='nested.array', filter=models.Filter(must=[models.FieldCondition(key='word', match=models.MatchValue(value=value)), models.FieldCondition(key='number', range=models.Range(lt=lt))]))) |
.parametrize('bitsize', [2, 3, 4, 5])
.parametrize('arctan_bitsize', [5, 6, 7])
def test_phase_oracle(bitsize: int, arctan_bitsize: int):
phase_oracle = ComplexPhaseOracle(ExampleSelect(bitsize), arctan_bitsize)
g = cq_testing.GateHelper(phase_oracle)
assert_valid_bloq_decomposition(phase_oracle)
circuit = cirq.Circuit(cirq.H.on_each(*g.quregs['selection']))
circuit += cirq.Circuit(cirq.decompose_once(g.operation))
qubit_order = cirq.QubitOrder.explicit(g.quregs['selection'], fallback=cirq.QubitOrder.DEFAULT)
result = cirq.Simulator(dtype=np.complex128).simulate(circuit, qubit_order=qubit_order)
state_vector = result.final_state_vector
state_vector = state_vector.reshape((2 ** bitsize), (len(state_vector) // (2 ** bitsize)))
prepared_state = state_vector.sum(axis=1)
for x in range((2 ** bitsize)):
output_val = (((- 2) * np.arctan(x, dtype=np.double)) / np.pi)
output_bits = [*bit_tools.iter_bits_fixed_point(np.abs(output_val), arctan_bitsize)]
approx_val = (np.sign(output_val) * math.fsum([(b * (1 / (2 ** (1 + i)))) for (i, b) in enumerate(output_bits)]))
assert math.isclose(output_val, approx_val, abs_tol=(1 / (2 ** bitsize))), output_bits
y = (np.exp(((1j * approx_val) * np.pi)) / np.sqrt((2 ** bitsize)))
assert np.isclose(prepared_state[x], y) |
def test_no_monitor_reset_unless_done():
def assert_reset_raises(env):
errored = False
try:
env.reset()
except error.Error:
errored = True
assert errored, "Env allowed a reset when it shouldn't have"
with helpers.tempdir() as temp:
env = gym.make('CartPole-v0')
env.reset()
env.step(env.action_space.sample())
env.step(env.action_space.sample())
env.reset()
env.monitor.start(temp, video_callable=False)
env.reset()
assert_reset_raises(env)
env.step(env.action_space.sample())
env.step(env.action_space.sample())
assert_reset_raises(env)
d = False
while (not d):
(_, _, d, _) = env.step(env.action_space.sample())
env.reset()
env.step(env.action_space.sample())
assert_reset_raises(env)
env.monitor.close() |
def oncreate_init_py(unit, *args):
keywords = {'DESTINATION': 1, 'INCLUDING_DEST_DIR': 0, 'RESULT': 1}
(flat_args, spec_args) = sort_by_keywords(keywords, args)
generated = []
dest_dir = (spec_args['DESTINATION'][0] if ('DESTINATION' in spec_args) else '$ARCADIA_BUILD_ROOT')
if ('INCLUDING_DEST_DIR' in spec_args):
generated.append(os.path.join(dest_dir, '__init__.py'))
for proto_file in flat_args:
path_list = proto_file.split(os.sep)[:(- 1)]
for (idx, val) in enumerate(path_list):
generated.append(os.path.join(dest_dir, os.path.join(*path_list[0:(len(path_list) - idx)]), '__init__.py'))
generated = list(set(generated))
unit.ontouch(generated)
if ('RESULT' in spec_args):
unit.set([spec_args['RESULT'][0], ' '.join(generated)]) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.