code stringlengths 281 23.7M |
|---|
class ConsoleMessageCollection():
class Message():
type: str
text: str
class View():
def __init__(self, console, msg_type):
self.console = console
self.msg_type = msg_type
def messages(self):
if (self.msg_type is None):
return self.console._messages
else:
return [msg for msg in self.console._messages if (msg.type == self.msg_type)]
def lines(self):
return [msg.text for msg in self.messages]
def text(self):
return '\n'.join(self.lines)
_COLORS = {'warning': 'brown', 'error': 'darkred', 'js_error': 'red'}
def __init__(self, logger):
self.logger = logger
self._messages = []
self.all = self.View(self, None)
self.log = self.View(self, 'log')
self.debug = self.View(self, 'debug')
self.info = self.View(self, 'info')
self.error = self.View(self, 'error')
self.warning = self.View(self, 'warning')
self.js_error = self.View(self, 'js_error')
def add_message(self, type, text):
msg = self.Message(type=type, text=text)
category = f'console.{msg.type}'
color = self._COLORS.get(msg.type)
self.logger.log(category, msg.text, color=color)
self._messages.append(msg) |
class EvolutionFactory():
def build(operator: OperatorBase=None) -> EvolutionBase:
primitives = operator.primitive_strings()
if ('Matrix' in primitives):
return MatrixEvolution()
elif ('Pauli' in primitives):
return PauliTrotterEvolution()
else:
raise ValueError('Evolutions of mixed Operators not yet supported.') |
def load_checkpoint(model, optimizer, filename, map_location, logger=None):
if os.path.isfile(filename):
logger.info("==> Loading from checkpoint '{}'".format(filename))
checkpoint = torch.load(filename, map_location)
epoch = checkpoint.get('epoch', (- 1))
if ((model is not None) and (checkpoint['model_state'] is not None)):
model.load_state_dict(checkpoint['model_state'])
if ((optimizer is not None) and (checkpoint['optimizer_state'] is not None)):
optimizer.load_state_dict(checkpoint['optimizer_state'])
logger.info('==> Done')
else:
raise FileNotFoundError
return epoch |
def check_cookie(node: str, pod_template, br_name, cookie, kubecli: KrknKubernetes) -> str:
pod_body = yaml.safe_load(pod_template.render(nodename=node))
logging.info(('Creating pod to query duplicate rules on node %s' % node))
kubecli.create_pod(pod_body, 'default', 300)
try:
cmd = ['chroot', '/host', 'ovs-ofctl', '-O', 'OpenFlow13', 'dump-flows', br_name, f'cookie={cookie}/-1']
output = kubecli.exec_cmd_in_pod(cmd, 'modtools', 'default', base_command='chroot')
if (not output):
logging.error(f'Exception occurred while executing command {cmd} in pod')
sys.exit(1)
flow_list = output.split('\n')
finally:
logging.info('Deleting pod to query interface on node')
kubecli.delete_pod('modtools', 'default')
return flow_list |
def test_icdar_dataset():
tmp_dir = tempfile.TemporaryDirectory()
fake_json_file = osp.join(tmp_dir.name, 'fake_data.json')
_create_dummy_icdar_json(fake_json_file)
dataset = IcdarDataset(ann_file=fake_json_file, pipeline=[])
assert (dataset.CLASSES == 'text')
assert (dataset.img_ids == [0, 1])
assert (dataset.select_first_k == (- 1))
ann = dataset.get_ann_info(0)
assert np.allclose(ann['bboxes'], [[50.0, 60.0, 70.0, 80.0], [100.0, 120.0, 130.0, 150.0]])
assert np.allclose(ann['labels'], [0, 0])
assert np.allclose(ann['bboxes_ignore'], [[150.0, 160.0, 190.0, 200.0], [250.0, 260.0, 350.0, 360.0]])
assert np.allclose(ann['masks'], [[[50, 60, 70, 60, 70, 80, 50, 80]], [[100, 120, 130, 120, 120, 150, 100, 150]]])
assert np.allclose(ann['masks_ignore'], [[[150, 160, 190, 160, 190, 200, 150, 200]], [[250, 260, 350, 260, 350, 360, 250, 360]]])
assert (dataset.cat_ids == [0])
tmp_dir.cleanup()
metrics = ['hmean-iou', 'hmean-ic13']
results = [{'boundary_result': [[50, 60, 70, 60, 70, 80, 50, 80, 1], [100, 120, 130, 120, 120, 150, 100, 150, 1]]}, {'boundary_result': []}]
output = dataset.evaluate(results, metrics)
assert (output['hmean-iou:hmean'] == 1)
assert (output['hmean-ic13:hmean'] == 1)
results = [{'boundary_result': [[50, 60, 70, 60, 70, 80, 50, 80, 0.5], [100, 120, 130, 120, 120, 150, 100, 150, 1]]}, {'boundary_result': []}]
output = dataset.evaluate(results, metrics, min_score_thr=0, max_score_thr=1, step=0.5)
assert (output['hmean-iou:hmean'] == 1)
assert (output['hmean-ic13:hmean'] == 1)
output = dataset.evaluate(results, metrics, min_score_thr=0.6, max_score_thr=1, step=0.5)
assert (output['hmean-iou:hmean'] == (1 / 1.5))
assert (output['hmean-ic13:hmean'] == (1 / 1.5)) |
def test_c3d():
config = get_recognizer_cfg('c3d/c3d_sports1m_16x1x1_45e_ucf101_rgb.py')
config.model['backbone']['pretrained'] = None
recognizer = build_recognizer(config.model)
recognizer.cfg = config
input_shape = (1, 1, 3, 16, 112, 112)
target_layer_name = 'backbone/conv5a/activate'
_do_test_3D_models(recognizer, target_layer_name, input_shape, 101) |
def parse_key_value_pair(src: str, pos: Pos, parse_float: ParseFloat) -> Tuple[(Pos, Key, Any)]:
(pos, key) = parse_key(src, pos)
try:
char: Optional[str] = src[pos]
except IndexError:
char = None
if (char != '='):
raise suffixed_err(src, pos, 'Expected "=" after a key in a key/value pair')
pos += 1
pos = skip_chars(src, pos, TOML_WS)
(pos, value) = parse_value(src, pos, parse_float)
return (pos, key, value) |
class BaseReport():
model = None
index = None
order = None
DEFAULT_MAX_RESULTS = 65535
select_related_fields = ('advertisement', 'advertisement__flight')
def __init__(self, queryset, index=None, order=None, max_results=None, export=False, **kwargs):
self.queryset = queryset
if index:
self.index = index
if order:
self.order = order
if max_results:
self.max_results = max_results
else:
self.max_results = self.DEFAULT_MAX_RESULTS
self.export = export
self.kwargs = kwargs
self.total = {}
self.results = []
if (self.queryset.model is not self.model):
raise RuntimeError(f'Report queryset (type {self.queryset.model}) is not type {self.model}')
def get_index_display(self, index):
return index
def generate(self):
raise NotImplementedError('Subclasses implement this method') |
class SimulationParameters(object):
def __init__(self, start_session, end_session, trading_calendar, capital_base=DEFAULT_CAPITAL_BASE, emission_rate='daily', data_frequency='daily', arena='backtest'):
assert (type(start_session) == pd.Timestamp)
assert (type(end_session) == pd.Timestamp)
assert (trading_calendar is not None), 'Must pass in trading calendar!'
assert (start_session <= end_session), 'Period start falls after period end.'
assert (start_session <= trading_calendar.last_trading_session), 'Period start falls after the last known trading day.'
assert (end_session >= trading_calendar.first_trading_session), 'Period end falls before the first known trading day.'
self._start_session = normalize_date(start_session)
self._end_session = normalize_date(end_session)
self._capital_base = capital_base
self._emission_rate = emission_rate
self._data_frequency = data_frequency
self._arena = arena
self._trading_calendar = trading_calendar
if (not trading_calendar.is_session(self._start_session)):
self._start_session = trading_calendar.minute_to_session_label(self._start_session)
if (not trading_calendar.is_session(self._end_session)):
self._end_session = trading_calendar.minute_to_session_label(self._end_session, direction='previous')
self._first_open = trading_calendar.open_and_close_for_session(self._start_session)[0]
self._last_close = trading_calendar.open_and_close_for_session(self._end_session)[1]
def capital_base(self):
return self._capital_base
def emission_rate(self):
return self._emission_rate
def data_frequency(self):
return self._data_frequency
_frequency.setter
def data_frequency(self, val):
self._data_frequency = val
def arena(self):
return self._arena
def arena(self, val):
self._arena = val
def start_session(self):
return self._start_session
def end_session(self):
return self._end_session
def first_open(self):
return self._first_open
def last_close(self):
return self._last_close
def trading_calendar(self):
return self._trading_calendar
_last
def sessions(self):
return self._trading_calendar.sessions_in_range(self.start_session, self.end_session)
def create_new(self, start_session, end_session, data_frequency=None):
if (data_frequency is None):
data_frequency = self.data_frequency
return SimulationParameters(start_session, end_session, self._trading_calendar, capital_base=self.capital_base, emission_rate=self.emission_rate, data_frequency=data_frequency, arena=self.arena)
def __repr__(self):
return '\n{class_name}(\n start_session={start_session},\n end_session={end_session},\n capital_base={capital_base},\n data_frequency={data_frequency},\n emission_rate={emission_rate},\n first_open={first_open},\n last_close={last_close},\n trading_calendar={trading_calendar}\n)'.format(class_name=self.__class__.__name__, start_session=self.start_session, end_session=self.end_session, capital_base=self.capital_base, data_frequency=self.data_frequency, emission_rate=self.emission_rate, first_open=self.first_open, last_close=self.last_close, trading_calendar=self._trading_calendar) |
class CommunicationParameter2(DataElementGroup):
service_type = IntCodeField(enum=ServiceType2, max_length=2, _d='Kommunikationsdienst')
address = DataElementField(type='an', max_length=512, _d='Kommunikationsadresse')
address_adjunct = DataElementField(type='an', max_length=512, required=False, _d='Kommunikationsadresszusatz')
filter_function = DataElementField(type='an', length=3, required=False, _d='Filterfunktion')
filter_function_version = DataElementField(type='num', max_length=3, required=False, _d='Version der Filterfunktion') |
def eval_with_output_tfms(csv_path, model_config_map, checkpoint_path, labelmap, window_size, num_workers, min_segment_dur, n_timebin_from_onoffset=N_TIMEBINS_FROM_ONOFFSET, split='test', spect_scaler_path=None, device='cuda', spect_key='s', timebins_key='t', logger=None, to_annot=False):
from crowsetta import Sequence, Annotation
from vak import io, models, transforms
from vak.datasets.vocal_dataset import VocalDataset
import vak.device
import vak.files
from vak.labeled_timebins import lbl_tb2segments
from vak.logging import log_or_print
if spect_scaler_path:
log_or_print(f'loading spect scaler from path: {spect_scaler_path}', logger=logger, level='info')
spect_standardizer = joblib.load(spect_scaler_path)
else:
log_or_print(f'not using a spect scaler', logger=logger, level='info')
spect_standardizer = None
item_transform = transforms.get_defaults('eval', spect_standardizer, window_size=window_size, return_padding_mask=True)
eval_dataset = VocalDataset.from_csv(csv_path=csv_path, split=split, labelmap=labelmap, spect_key=spect_key, timebins_key=timebins_key, item_transform=item_transform)
eval_data = torch.utils.data.DataLoader(dataset=eval_dataset, shuffle=False, batch_size=1, num_workers=num_workers)
df = pd.read_csv(csv_path)
timebin_dur = io.dataframe.validate_and_get_timebin_dur(df)
pred_transform = transforms.get_defaults('predict', spect_standardizer, window_size=window_size, return_padding_mask=False)
df_split = df[(df['split'] == split)]
pred_dataset = VocalDataset(csv_path=csv_path, spect_paths=df_split['spect_path'].values, annots=None, labelmap=labelmap, spect_key=spect_key, timebins_key=timebins_key, item_transform=pred_transform)
pred_data = torch.utils.data.DataLoader(dataset=pred_dataset, shuffle=False, batch_size=1, num_workers=num_workers)
input_shape = pred_dataset.shape
if (len(input_shape) == 4):
input_shape = input_shape[1:]
if (device is None):
device = vak.device.get_default_device()
to_long_tensor = transforms.ToLongTensor()
records = defaultdict(list)
if to_annot:
annots_by_cleanup = {cleanup: [] for cleanup in CLEANUP_TYPES}
else:
annots_by_cleanup = None
models_map = models.from_model_config_map(model_config_map, num_classes=len(labelmap), input_shape=input_shape)
for (model_name, model) in models_map.items():
model.load(checkpoint_path)
metrics = model.metrics
pred_dict = model.predict(pred_data=pred_data, device=device)
progress_bar = tqdm(eval_data)
for (ind, batch) in enumerate(progress_bar):
for cleanup_type in CLEANUP_TYPES:
records['cleanup'].append(cleanup_type)
(y_true, padding_mask, spect_path) = (batch['annot'], batch['padding_mask'], batch['spect_path'])
if (isinstance(spect_path, list) and (len(spect_path) == 1)):
spect_path = spect_path[0]
t = vak.files.spect.load(spect_path)[timebins_key]
if (len(t) == 1):
t = t[0]
records['spect_path'].append(spect_path)
y_true = y_true.to(device)
y_true_np = np.squeeze(y_true.cpu().numpy())
(y_true_labels, onsets_s, offsets_s) = lbl_tb2segments(y_true_np, labelmap=labelmap, t=t)
y_true_labels = ''.join(y_true_labels.tolist())
y_pred = pred_dict[spect_path]
y_pred = torch.argmax(y_pred, dim=1)
y_pred = torch.flatten(y_pred)
y_pred = y_pred.unsqueeze(0)[padding_mask]
y_pred_np = np.squeeze(y_pred.cpu().numpy())
(y_pred_np, y_pred_labels, pred_onsets_s, pred_offsets_s) = pred2labels(y_pred_np, labelmap, t, timebin_dur, cleanup_type=cleanup_type, min_segment_dur=min_segment_dur)
y_pred = to_long_tensor(y_pred_np).to(device)
metric_vals_batch = compute_metrics(metrics, y_true, y_pred, y_true_labels, y_pred_labels)
for (metric_name, metric_val) in metric_vals_batch.items():
records[metric_name].append(metric_val)
bnd_err = boundary_err(y_pred_np, y_true_np, t, onsets_s, offsets_s, timebin_dur, n_timebin_from_onoffset, unlabeled_class=labelmap['unlabeled'])
records['pct_boundary_err'].append(bnd_err)
if to_annot:
seq = Sequence.from_keyword(labels=y_pred_labels, onsets_s=pred_onsets_s, offsets_s=pred_offsets_s, onsets_Hz=s_to_sample_num(pred_onsets_s, timebin_dur), offsets_Hz=s_to_sample_num(pred_offsets_s, timebin_dur))
annot = Annotation(seq=seq, audio_path=df_split.iloc[ind].audio_path, annot_path=df_split.iloc[ind].annot_path)
annots_by_cleanup[cleanup_type].append(annot)
eval_df = pd.DataFrame.from_records(records)
gb = eval_df.groupby('cleanup').agg('mean')
gb = gb.add_prefix('avg_')
eval_df = gb.reset_index()
return (eval_df, annots_by_cleanup) |
_performer
def perform_parallel_with_pool(pool, dispatcher, parallel_effects):
def perform_child(index_and_effect):
(index, effect) = index_and_effect
try:
return sync_perform(dispatcher, effect)
except Exception as e:
raise FirstError(exception=e, index=index)
return pool.map(perform_child, enumerate(parallel_effects.effects)) |
def param2stroke(param, H, W, meta_brushes):
b = param.shape[0]
param_list = paddle.split(param, 8, axis=1)
(x0, y0, w, h, theta) = [item.squeeze((- 1)) for item in param_list[:5]]
sin_theta = paddle.sin((math.pi * theta))
cos_theta = paddle.cos((math.pi * theta))
index = paddle.full((b,), (- 1), dtype='int64').numpy()
index[(h > w).numpy()] = 0
index[(h <= w).numpy()] = 1
meta_brushes_resize = F.interpolate(meta_brushes, (H, W)).numpy()
brush = paddle.to_tensor(meta_brushes_resize[index])
warp_00 = (cos_theta / w)
warp_01 = ((sin_theta * H) / (W * w))
warp_02 = ((((1 - (2 * x0)) * cos_theta) / w) + ((((1 - (2 * y0)) * sin_theta) * H) / (W * w)))
warp_10 = (((- sin_theta) * W) / (H * h))
warp_11 = (cos_theta / h)
warp_12 = ((((1 - (2 * y0)) * cos_theta) / h) - ((((1 - (2 * x0)) * sin_theta) * W) / (H * h)))
warp_0 = paddle.stack([warp_00, warp_01, warp_02], axis=1)
warp_1 = paddle.stack([warp_10, warp_11, warp_12], axis=1)
warp = paddle.stack([warp_0, warp_1], axis=1)
grid = nn.functional.affine_grid(warp, [b, 3, H, W])
brush = nn.functional.grid_sample(brush, grid)
return brush |
class Item(Resource):
def __init__(self, client=None):
super(Item, self).__init__(client)
self.base_url = (URL.V1 + URL.ITEM_URL)
def create(self, data={}, **kwargs):
url = self.base_url
return self.post_url(url, data, **kwargs)
def fetch(self, item_id, data={}, **kwargs):
return super(Item, self).fetch(item_id, data, **kwargs)
def all(self, data={}, **kwargs):
return super(Item, self).all(data, **kwargs)
def edit(self, item_id, data={}, **kwargs):
url = '{}/{}'.format(self.base_url, item_id)
return self.patch_url(url, data, **kwargs)
def delete(self, item_id, **kwargs):
url = '{}/{}'.format(self.base_url, item_id)
return self.delete_url(url, {}, **kwargs) |
.skipif((not PY_3_8_PLUS), reason='cached_property is 3.8+')
def test_slots_getattr_in_superclass__is_called_for_missing_attributes_when_cached_property_present():
(slots=True)
class A():
x = attr.ib()
def __getattr__(self, item):
return item
(slots=True)
class B(A):
_property
def f(self):
return self.x
b = B(1)
assert (b.f == 1)
assert (b.z == 'z') |
def generate_model_output_multiple_sessions() -> Dict[(str, torch._tensor.Tensor)]:
return {'predictions': torch.tensor([[0.1, 0.2, 0.3, 0.4, 0.5, 0.1, 0.2, 0.3]]), 'session_ids': torch.tensor([[1, 1, 1, 1, 1, 2, 2, 2]]), 'labels': torch.tensor([[0.0, 1.0, 0.0, 0.0, 2.0, 2.0, 1.0, 0.0]]), 'weights': torch.tensor([[1.0, 1.0, 1.0, 1.0, 2.0, 1.0, 1.0, 3.0]]), 'expected_ndcg_exp': torch.tensor([0.6748]), 'expected_ndcg_non_exp': torch.tensor([0.6463])} |
class AverageMeter(object):
def __init__(self):
self.val = None
self.avg = None
self.sum = None
self.count = None
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += (val * n)
self.count += n
self.avg = (self.sum / self.count) |
def _weighted_calibration_update(input: torch.Tensor, target: torch.Tensor, weight: Union[(float, int, torch.Tensor)], *, num_tasks: int) -> Tuple[(torch.Tensor, torch.Tensor)]:
_weighted_calibration_input_check(input, target, weight, num_tasks=num_tasks)
if (isinstance(weight, float) or isinstance(weight, int)):
weighted_input_sum = (weight * torch.sum(input, dim=(- 1)))
weighted_target_sum = (weight * torch.sum(target, dim=(- 1)))
return (weighted_input_sum, weighted_target_sum)
elif (isinstance(weight, torch.Tensor) and (input.size() == weight.size())):
return (torch.sum((weight * input), dim=(- 1)), torch.sum((weight * target), dim=(- 1)))
else:
raise ValueError(f'Weight must be either a float value or a tensor that matches the input tensor size. Got {weight} instead.') |
def _test_rx(dut, divisor):
def tick(cb=None):
for _ in range(divisor):
if (cb is not None):
(yield from cb())
else:
(yield)
def bit(d, cb=None):
(yield dut.rx.eq(d))
(yield from tick(cb))
def bits(d, cb=None):
for dd in d:
(yield from bit(dd, cb))
def byte(d, cb=None):
dd = [int(c) for c in '{:08b}'.format(d)[::(- 1)]]
(yield from bit(0, cb))
(yield from bits(dd, cb))
(yield from bit(1, cb))
def ack():
(yield dut.ack.eq(1))
(yield)
(yield dut.ack.eq(0))
(yield from bits([1, 1, 1, 1]))
assert ((yield dut.ready) == 0)
(yield from byte(85))
(yield from tick())
assert ((yield dut.ready) == 1)
assert ((yield dut.data) == 85)
(yield from ack())
(yield from byte(85))
for latency in range(10):
if ((yield dut.ready) == 1):
break
(yield)
else:
raise Exception('ready latency exceeds 10 clock cycles')
print('ready latency is {} cycles'.format(latency))
(yield from ack())
out = []
last_ready = [False]
def getbyte():
if ((yield dut.ready) == 1):
if (not last_ready[(- 1)]):
out.append((yield dut.data))
(yield from ack())
last_ready.append(True)
else:
(yield)
else:
last_ready.append(False)
(yield)
(yield from tick())
text = 'Lorem ipsum dolor sit amet'
print('Sending "{}" to RX'.format(text))
for c in text:
(yield from byte(ord(c), getbyte))
received = ''.join((chr(c) for c in out))
print('Received: "{}"'.format(received))
assert (received == text) |
def _get_new_logger(name, filename=None):
new_logger = logging.getLogger(name)
if (filename is None):
handler = logging.StreamHandler()
else:
handler = logging.FileHandler(filename)
handler.setFormatter(LOG_FORMATTER)
new_logger.addHandler(handler)
return new_logger |
class PointnetFPModule(nn.Module):
def __init__(self, mlp, bn=True):
super(PointnetFPModule, self).__init__()
self.mlp = build_shared_mlp(mlp, bn=bn)
def forward(self, unknown, known, unknow_feats, known_feats):
if (known is not None):
(dist, idx) = pointnet2_utils.three_nn(unknown, known)
dist_recip = (1.0 / (dist + 1e-08))
norm = torch.sum(dist_recip, dim=2, keepdim=True)
weight = (dist_recip / norm)
interpolated_feats = pointnet2_utils.three_interpolate(known_feats, idx, weight)
else:
interpolated_feats = known_feats.expand(*(known_feats.size()[0:2] + [unknown.size(1)]))
if (unknow_feats is not None):
new_features = torch.cat([interpolated_feats, unknow_feats], dim=1)
else:
new_features = interpolated_feats
new_features = new_features.unsqueeze((- 1))
new_features = self.mlp(new_features)
return new_features.squeeze((- 1)) |
class TestReadFunc(unittest.TestCase):
def setUp(self):
file = tempfile.NamedTemporaryFile(delete=False)
file.write(TEXT.encode('utf-8'))
file.close()
self._path_str = file.name
self._path_obj = pathlib.Path(self._path_str)
def tearDown(self):
self._path_obj.unlink()
def test_read_from_file(self):
with open(self._path_str, mode='rb') as file:
document = read(file)
test_document(document, self)
def test_read_from_path_str(self):
document = read(self._path_str)
test_document(document, self)
def test_read_from_path_obj(self):
document = read(self._path_obj)
test_document(document, self)
def test_read_with_custom_value_converter(self):
custom_float_decoder = Decimal
custom_value_converter = ValueConverter(float_decoder=custom_float_decoder)
document = read(self._path_obj, value_converter=custom_value_converter)
section1 = document.sections[0]
section2 = document.sections[1]
expected = custom_float_decoder
self.assertEqual(expected, section1.value_converter.float_decoder)
self.assertEqual(expected, section2.value_converter.float_decoder) |
_settings(PRETIX_WEBHOOK_SECRET='secret')
def test_pretix_webhook_does_not_allow_method(rest_api_client):
rest_api_client.basic_auth('pretix', 'secret')
for method in ['get', 'delete', 'patch']:
response = getattr(rest_api_client, method)(reverse('pretix-webhook'))
assert (response.status_code == 405) |
.integration
def test_import_and_delete_records(simple_project):
new_record_ids = [4, 5, 6]
test_records = [{'record_id': i} for i in new_record_ids]
res = simple_project.import_records(test_records)
assert (res['count'] == len(test_records))
res = simple_project.import_records(test_records, return_content='ids')
assert (len(res) == len(test_records))
res = simple_project.import_records(test_records, return_content='nothing')
assert (res == [{}])
res = simple_project.delete_records(new_record_ids)
assert (res == 3) |
class AIFFChunk(IffChunk):
def parse_header(cls, header):
return struct.unpack('>4sI', header)
def get_class(cls, id):
if (id == 'FORM'):
return AIFFFormChunk
else:
return cls
def write_new_header(self, id_, size):
self._fileobj.write(pack('>4sI', id_, size))
def write_size(self):
self._fileobj.write(pack('>I', self.data_size)) |
def setup_kubernetes(kubeconfig_path):
if (kubeconfig_path is None):
kubeconfig_path = config.KUBE_CONFIG_DEFAULT_LOCATION
kubeconfig = config.kube_config.KubeConfigMerger(kubeconfig_path)
if (kubeconfig.config is None):
raise Exception(('Invalid kube-config file: %s. No configuration found.' % kubeconfig_path))
loader = config.kube_config.KubeConfigLoader(config_dict=kubeconfig.config)
client_config = client.Configuration()
loader.load_and_set(client_config)
return client.ApiClient(configuration=client_config) |
def test_filter(hatch, helpers, temp_dir, config_file):
config_file.model.template.plugins['default']['tests'] = False
config_file.save()
project_name = 'My.App'
with temp_dir.as_cwd():
result = hatch('new', project_name)
assert (result.exit_code == 0), result.output
project_path = (temp_dir / 'my-app')
data_path = (temp_dir / 'data')
data_path.mkdir()
project = Project(project_path)
helpers.update_project_environment(project, 'default', {'skip-install': True, **project.config.envs['default']})
helpers.update_project_environment(project, 'test', {'matrix': [{'version': ['9000', '42']}], 'overrides': {'matrix': {'version': {'foo-bar-option': {'value': True, 'if': ['42']}}}}})
with project_path.as_cwd(env_vars={ConfigEnvVars.DATA: str(data_path)}):
result = hatch('env', 'run', '--env', 'test', '--filter', '{"foo-bar-option":true}', '--', 'python', '-c', "import os,sys;open('test.txt', 'a').write(sys.executable+os.linesep[-1])")
assert (result.exit_code == 0), result.output
assert (result.output == helpers.dedent('\n test.42 \n Creating environment: test.42\n Checking dependencies\n '))
output_file = (project_path / 'test.txt')
assert output_file.is_file()
env_data_path = ((data_path / 'env') / 'virtual')
assert env_data_path.is_dir()
project_data_path = (env_data_path / project_path.name)
assert project_data_path.is_dir()
storage_dirs = list(project_data_path.iterdir())
assert (len(storage_dirs) == 1)
storage_path = storage_dirs[0]
assert (len(storage_path.name) == 8)
env_dirs = list(storage_path.iterdir())
assert (len(env_dirs) == 1)
env_path = env_dirs[0]
assert (env_path.name == 'test.42')
python_path = str(output_file.read_text()).strip()
assert (str(env_path) in python_path) |
def test_section_descendants(db):
instances = Section.objects.all()
for instance in instances:
descendant_ids = []
for section_page in instance.section_pages.order_by('order'):
page = section_page.page
descendant_ids.append(page.id)
page_elements = sorted([*page.page_questionsets.all(), *page.page_questions.all()], key=(lambda e: e.order))
for page_element in page_elements:
element = page_element.element
descendant_ids.append(element.id)
try:
element_elements = sorted([*element.questionset_questionsets.all(), *element.questionset_questions.all()], key=(lambda e: e.order))
except AttributeError:
element_elements = []
for element_element in element_elements:
element2 = element_element.element
descendant_ids.append(element2.id)
try:
element_elements2 = sorted([*element2.questionset_questionsets.all(), *element2.questionset_questions.all()], key=(lambda e: e.order))
except AttributeError:
element_elements2 = []
for element_element2 in element_elements2:
descendant_ids.append(element_element2.element.id)
assert ([d.id for d in instance.descendants] == descendant_ids) |
def SVHN(train=True, batch_size=None, augm_flag=True, val_size=None):
if (batch_size == None):
if train:
batch_size = train_batch_size
else:
batch_size = test_batch_size
if train:
split = 'train'
else:
split = 'test'
transform_base = [transforms.ToTensor()]
transform_train = transforms.Compose(([transforms.RandomCrop(32, padding=4, padding_mode='edge')] + transform_base))
transform_test = transforms.Compose(transform_base)
transform_train = transforms.RandomChoice([transform_train, transform_test])
transform = (transform_train if (augm_flag and train) else transform_test)
dataset = datasets.SVHN(path, split=split, transform=transform, download=False)
if (train or (val_size is None)):
loader = torch.utils.data.DataLoader(dataset, batch_size=batch_size, shuffle=train, num_workers=4)
return loader
else:
test_size = (len(dataset) - val_size)
(dataset_val, dataset_test) = data_utils.random_split(dataset, (val_size, test_size))
val_loader = torch.utils.data.DataLoader(dataset_val, batch_size=batch_size, shuffle=train, num_workers=4)
test_loader = torch.utils.data.DataLoader(dataset_test, batch_size=batch_size, shuffle=train, num_workers=4)
return (val_loader, test_loader) |
class Effect6764(BaseEffect):
type = 'passive'
def handler(fit, src, context, projectionRange, **kwargs):
lvl = src.level
fit.drones.filteredItemBoost((lambda mod: mod.item.requiresSkill('Ice Harvesting Drone Specialization')), 'duration', (src.getModifiedItemAttr('rofBonus') * lvl), **kwargs)
fit.drones.filteredItemBoost((lambda mod: mod.item.requiresSkill('Ice Harvesting Drone Specialization')), 'maxVelocity', (src.getModifiedItemAttr('maxVelocityBonus') * lvl), **kwargs) |
_tag()
def vendor(vendor_key):
vendor_config = settings.VENDOR[vendor_key]
tags = []
if ('js' in vendor_config):
for file in vendor_config['js']:
if settings.VENDOR_CDN:
tag = '<script src="{url}/{path}" integrity="{sri}" crossorigin="anonymous"></script>'.format(url=vendor_config['url'].rstrip('/'), path=file['path'], sri=(file['sri'] if ('sri' in file) else ''))
else:
tag = '<script src="{static_url}/{vendor_key}/{path}"></script>'.format(static_url=settings.STATIC_URL.rstrip('/'), vendor_key=vendor_key, path=file['path'])
tags.append(tag)
if ('css' in vendor_config):
for file in vendor_config['css']:
if settings.VENDOR_CDN:
tag = '<link rel="stylesheet" href="{url}/{path}" integrity="{sri}" crossorigin="anonymous" />'.format(url=vendor_config['url'].rstrip('/'), path=file['path'], sri=(file['sri'] if ('sri' in file) else ''))
else:
tag = '<link rel="stylesheet" href="{static_url}/{vendor_key}/{path}" />'.format(static_url=settings.STATIC_URL.rstrip('/'), vendor_key=vendor_key, path=file['path'])
tags.append(tag)
return mark_safe(''.join(tags)) |
class TestDebugging():
class _FakePdb():
quitting: bool = False
calls: list[str] = []
def __init__(self, *_: object, **__: object) -> None:
self.calls.append('init')
def reset(self) -> None:
self.calls.append('reset')
def interaction(self, *_: object) -> None:
self.calls.append('interaction')
(autouse=True)
def cleanup_calls(self) -> None:
self._FakePdb.calls.clear()
def test_pdb_fixture(self, pytester: pytest.Pytester, monkeypatch: pytest.MonkeyPatch) -> None:
pytester.makepyfile('\n def test(subtests):\n with subtests.test():\n assert 0\n ')
self.runpytest_and_check_pdb(pytester, monkeypatch)
def test_pdb_unittest(self, pytester: pytest.Pytester, monkeypatch: pytest.MonkeyPatch) -> None:
pytester.makepyfile('\n from unittest import TestCase\n class Test(TestCase):\n def test(self):\n with self.subTest():\n assert 0\n ')
self.runpytest_and_check_pdb(pytester, monkeypatch)
def runpytest_and_check_pdb(self, pytester: pytest.Pytester, monkeypatch: pytest.MonkeyPatch) -> None:
import pytest_subtests
monkeypatch.setattr(pytest_subtests, '_CustomPdb', self._FakePdb, raising=False)
result = pytester.runpytest('--pdb', '--pdbcls=pytest_subtests:_CustomPdb')
result.stdout.fnmatch_lines('*entering PDB*')
assert (self._FakePdb.calls == ['init', 'reset', 'interaction']) |
def _convert_xml(in_path: str, out_path: str):
with open(in_path) as f, open(out_path, 'w') as f_o:
for s in f:
ss = s.strip()
if (not ss.startswith('<seg')):
continue
ss = ss.replace('</seg>', '').split('">')
assert (len(ss) == 2)
f_o.write((ss[1].strip() + '\n')) |
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--data_dir', default='./data', type=str)
parser.add_argument('--model_name_or_path', default='bert-base-cased', type=str)
parser.add_argument('--max_seq_length', default=512, type=int)
parser.add_argument('--batch_size', default=64, type=int)
parser.add_argument('--learning_rate', default=6e-05, type=float)
parser.add_argument('--beta1', type=float, default=0.8)
parser.add_argument('--beta2', type=float, default=0.98)
parser.add_argument('--eps', type=float, default=1e-06)
parser.add_argument('--gradient_accumulation_steps', default=1, type=int)
parser.add_argument('--max_grad_norm', default=1.0, type=float)
parser.add_argument('--warmup_ratio', default=0.06, type=float)
parser.add_argument('--num_train_epochs', default=5.0, type=float)
parser.add_argument('--seed', type=int, default=42)
parser.add_argument('--num_class', type=int, default=42)
parser.add_argument('--dropout_prob', type=float, default=0.1)
parser.add_argument('--project_name', type=str, default='NLL-IE-RE')
parser.add_argument('--n_model', type=int, default=2)
parser.add_argument('--alpha', type=float, default=5.0)
parser.add_argument('--alpha_warmup_ratio', default=0.1, type=float)
args = parser.parse_args()
wandb.init(project=args.project_name)
device = torch.device(('cuda:0' if torch.cuda.is_available() else 'cpu'))
args.n_gpu = torch.cuda.device_count()
args.device = device
set_seed(args)
config = AutoConfig.from_pretrained(args.model_name_or_path, num_labels=args.num_class)
config.gradient_checkpointing = True
tokenizer = AutoTokenizer.from_pretrained(args.model_name_or_path)
model = NLLModel(args, config)
train_file = os.path.join(args.data_dir, 'train.json')
dev_file = os.path.join(args.data_dir, 'dev.json')
test_file = os.path.join(args.data_dir, 'test.json')
dev_rev_file = os.path.join(args.data_dir, 'dev_rev.json')
test_rev_file = os.path.join(args.data_dir, 'test_rev.json')
processor = TACREDProcessor(args, tokenizer)
train_features = processor.read(train_file)
dev_features = processor.read(dev_file)
test_features = processor.read(test_file)
dev_rev_features = processor.read(dev_rev_file)
test_rev_features = processor.read(test_rev_file)
if (len(processor.new_tokens) > 0):
model.resize_token_embeddings(len(tokenizer))
benchmarks = (('dev', dev_features), ('test', test_features), ('dev_rev', dev_rev_features), ('test_rev', test_rev_features))
train(args, model, train_features, benchmarks) |
.parametrize('given, expected, uncertainty', [(0.0, 1.0, 0.0), (((1.0 / 2.0) * np.pi), 0.0, 0.0), (np.pi, (+ 1.0), 0.0), (((3.0 / 2.0) * np.pi), 0.0, 0.0), ((2.0 * np.pi), (+ 1.0), 0.0)])
def test_figure_eight(given, expected, uncertainty):
assert (figure_eight(given) == pytest.approx(expected, uncertainty)) |
def myConvTranspose(nf, n_dims, prefix=None, suffix=None, ks=3, strides=1, kernel_initializer=None, bias_initializer=None):
if (kernel_initializer is None):
kernel_initializer = 'glorot_uniform'
if (bias_initializer is None):
bias_initializer = 'zeros'
if (n_dims == 2):
if (not isinstance(strides, tuple)):
strides = (strides, strides)
return Conv2DTranspose(nf, kernel_size=ks, padding='same', strides=strides, kernel_initializer=kernel_initializer, bias_initializer=bias_initializer, name='_'.join([str(part) for part in [prefix, 'conv2Dtrans', suffix] if ((part is not None) and (len(str(part)) > 0))]))
elif (n_dims == 3):
if (not isinstance(strides, tuple)):
strides = (strides, strides, strides)
return Conv3DTranspose(nf, kernel_size=ks, padding='same', strides=strides, kernel_initializer=kernel_initializer, bias_initializer=bias_initializer, name='_'.join([str(part) for part in [prefix, 'conv3Dtrans', suffix] if ((part is not None) and (len(str(part)) > 0))])) |
def _get_entityv2_instances_meta():
thing_ids = [k['id'] for k in EntityV2_instance_CATEGORIES]
thing_dataset_id_to_contiguous_id = {k: i for (i, k) in enumerate(thing_ids)}
thing_classes = [k['name'] for k in EntityV2_instance_CATEGORIES]
ret = {'thing_dataset_id_to_contiguous_id': thing_dataset_id_to_contiguous_id, 'thing_classes': thing_classes}
return ret |
.parametrize('examplefun', examplefunctions)
.filterwarnings('ignore:numpy.dtype size changed')
.filterwarnings('ignore:numpy.ufunc size changed')
def test_example(examplefun, capsys, recwarn):
examplefun()
captured = capsys.readouterr()
failconditions = [((not (len(captured.out) > 0)), 'Example {} did not print any results\n'), (captured.err, 'Example {} wrote to stderr\n'), (check_warn(recwarn, examplefun), 'Example {} produced a warning.\n')]
for (failed, msgfmt) in failconditions:
if failed:
pytest.fail((msgfmt.format(examplefun.__name__) + 'Captured output:\n{}\nCaptured stderr:\n{}\nCaptured warnings:\n{}\n'.format(captured.out, captured.err, [w.message for w in recwarn]))) |
('a tab_stops having {count} tab stops')
def given_a_tab_stops_having_count_tab_stops(context, count):
paragraph_idx = {'0': 0, '3': 1}[count]
document = Document(test_docx('tab-stops'))
paragraph_format = document.paragraphs[paragraph_idx].paragraph_format
context.tab_stops = paragraph_format.tab_stops |
def test_thread_cache_deref() -> None:
res = [False]
class del_me():
def __call__(self) -> int:
return 42
def __del__(self) -> None:
res[0] = True
q: Queue[Outcome[int]] = Queue()
def deliver(outcome: Outcome[int]) -> None:
q.put(outcome)
start_thread_soon(del_me(), deliver)
outcome = q.get()
assert (outcome.unwrap() == 42)
gc_collect_harder()
assert res[0] |
def assert_applied_techniques(output_model, acc, encoding_path, target_acc, bn_folded_acc, cle_acc, adaround_acc, results_dir):
html_path = os.path.join(results_dir, 'diagnostics.html')
with open(html_path) as f:
html_parsed = BeautifulSoup(f.read(), features='html.parser')
assert output_model.applied_bn_folding
assert_html(html_parsed, {'node_batchnorm_folding': _SUCCESS, 'node_test_batchnorm_folding': _VISITED})
if (bn_folded_acc >= target_acc):
assert (acc == bn_folded_acc)
assert encoding_path.endswith('batchnorm_folding.encodings')
assert (not output_model.applied_cle)
assert (not output_model.applied_adaround)
assert_html(html_parsed, {'node_cross_layer_equalization': _NOT_VISITED, 'node_test_cross_layer_equalization': _NOT_VISITED, 'node_adaround': _NOT_VISITED, 'node_test_adaround': _NOT_VISITED, 'node_result_fail': _NOT_VISITED, 'node_result_success': _VISITED})
return
assert (output_model.applied_cle == (bn_folded_acc < cle_acc))
assert_html(html_parsed, {'node_cross_layer_equalization': (_SUCCESS if output_model.applied_cle else _DISCARDED), 'node_test_cross_layer_equalization': _VISITED})
if (cle_acc >= target_acc):
assert (acc == cle_acc)
assert encoding_path.endswith('cross_layer_equalization.encodings')
assert output_model.applied_cle
assert (not output_model.applied_adaround)
assert_html(html_parsed, {'node_adaround': _NOT_VISITED, 'node_test_adaround': _NOT_VISITED, 'node_result_fail': _NOT_VISITED, 'node_result_success': _VISITED})
return
assert (output_model.applied_adaround == (adaround_acc >= max(bn_folded_acc, cle_acc)))
assert_html(html_parsed, {'node_adaround': (_SUCCESS if output_model.applied_adaround else _DISCARDED), 'node_test_adaround': _VISITED})
if (adaround_acc >= target_acc):
assert (acc == adaround_acc)
assert encoding_path.endswith('adaround.encodings')
assert output_model.applied_adaround
assert_html(html_parsed, {'node_result_fail': _NOT_VISITED, 'node_result_success': _VISITED})
return
assert_html(html_parsed, {'node_result_fail': _VISITED, 'node_result_success': _NOT_VISITED})
assert (acc == max(bn_folded_acc, cle_acc, adaround_acc))
if (max(bn_folded_acc, cle_acc, adaround_acc) == bn_folded_acc):
assert encoding_path.endswith('batchnorm_folding.encodings')
elif (max(bn_folded_acc, cle_acc, adaround_acc) == cle_acc):
assert encoding_path.endswith('cross_layer_equalization.encodings')
else:
assert encoding_path.endswith('adaround.encodings') |
class WeightTensorUtils():
def get_tensor_index_in_given_op(input_op: tf.Operation) -> int:
if (input_op.type not in constants.OP_WEIGHT_INDICES):
raise ValueError((('Op type: ' + input_op.type) + ' does not contain weights!'))
return constants.OP_WEIGHT_INDICES[input_op.type]
def get_tensor_shape(input_op: tf.Operation) -> List[int]:
weight_tensor_index = WeightTensorUtils.get_tensor_index_in_given_op(input_op)
return input_op.inputs[weight_tensor_index].shape
def get_read_op(input_op: tf.Operation) -> tf.Operation:
wt_tensor_index = WeightTensorUtils.get_tensor_index_in_given_op(input_op)
return input_op.inputs[wt_tensor_index].op
def get_wt_tensor(op: tf.Operation) -> tf.Tensor:
wt_tensor_index = WeightTensorUtils.get_tensor_index_in_given_op(op)
wt_var_read_op = op.inputs[wt_tensor_index].op
wt_tensor = wt_var_read_op.inputs[constants.OP_VAR_WEIGHT_INDEX]
return wt_tensor
def get_wt_as_read_var_tensor(op: tf.Operation) -> tf.Tensor:
wt_tensor_index = WeightTensorUtils.get_tensor_index_in_given_op(op)
get_wt_as_read_var_tensor = op.inputs[wt_tensor_index]
if (get_wt_as_read_var_tensor.op.type in ['QcQuantize', 'QcQuantizePerChannel']):
get_wt_as_read_var_tensor = get_wt_as_read_var_tensor.op.inputs[0]
assert (get_wt_as_read_var_tensor.op.type in ['ReadVariableOp', 'Const'])
return get_wt_as_read_var_tensor
def get_tensor_as_numpy_data(sess: tf.compat.v1.Session, op: tf.Operation) -> np.array:
wt_tensor = WeightTensorUtils.get_wt_as_read_var_tensor(op)
numpy_data = sess.run(wt_tensor)
return numpy_data
def update_tensor_for_op(sess: tf.compat.v1.Session, op: tf.Operation, tensor_as_numpy_array):
assert (WeightTensorUtils.get_tensor_shape(op) == tensor_as_numpy_array.shape)
with sess.graph.as_default():
wt_tensor_index = WeightTensorUtils.get_tensor_index_in_given_op(op)
wt_var_read_op = op.inputs[wt_tensor_index].op
if (wt_var_read_op.type in ['QcQuantize', 'QcQuantizePerChannel']):
wt_tensor = wt_var_read_op.inputs[0].op.inputs[constants.OP_VAR_WEIGHT_INDEX]
else:
wt_tensor = wt_var_read_op.inputs[constants.OP_VAR_WEIGHT_INDEX]
assert (wt_tensor is not None), ('Error, no weight tensor found for this op', op.name)
wt_as_var = [var for var in tf.compat.v1.global_variables() if (var.name == wt_tensor.name)][0]
sess.run(tf.compat.v1.assign(wt_as_var, tensor_as_numpy_array)) |
def test_cells():
row_key = b'cell-test'
col = b'cf1:col1'
table.put(row_key, {col: b'old'}, timestamp=1234)
table.put(row_key, {col: b'new'})
with assert_raises(TypeError):
table.cells(row_key, col, versions='invalid')
with assert_raises(TypeError):
table.cells(row_key, col, versions=3, timestamp='invalid')
with assert_raises(ValueError):
table.cells(row_key, col, versions=0)
results = table.cells(row_key, col, versions=1)
assert (len(results) == 1)
assert (b'new' == results[0])
results = table.cells(row_key, col)
assert (len(results) == 2)
assert (b'new' == results[0])
assert (b'old' == results[1])
results = table.cells(row_key, col, timestamp=2345, include_timestamp=True)
assert (len(results) == 1)
assert (b'old' == results[0][0])
assert (1234 == results[0][1]) |
class warmupLR(toptim._LRScheduler):
def __init__(self, optimizer, lr, warmup_steps, momentum, decay):
self.optimizer = optimizer
self.lr = lr
self.warmup_steps = warmup_steps
self.momentum = momentum
self.decay = decay
if (self.warmup_steps < 1):
self.warmup_steps = 1
self.initial_scheduler = toptim.CyclicLR(self.optimizer, base_lr=0, max_lr=self.lr, step_size_up=self.warmup_steps, step_size_down=self.warmup_steps, cycle_momentum=False, base_momentum=self.momentum, max_momentum=self.momentum)
self.last_epoch = (- 1)
self.finished = False
super().__init__(optimizer)
def get_lr(self):
return [(self.lr * (self.decay ** self.last_epoch)) for lr in self.base_lrs]
def step(self, epoch=None):
if (self.finished or (self.initial_scheduler.last_epoch >= self.warmup_steps)):
if (not self.finished):
self.base_lrs = [self.lr for lr in self.base_lrs]
self.finished = True
return super(warmupLR, self).step(epoch)
else:
return self.initial_scheduler.step(epoch) |
def get_color(colorscale, loc):
cv = ColorscaleValidator('colorscale', '')
colorscale = cv.validate_coerce(colorscale)
(locs, colors) = zip(*colorscale)
colors = standardize_colors(colors, colortype='rgb')
colorscale = list(zip(locs, colors))
if isinstance(loc, Iterable):
return [_get_color(colorscale, x) for x in loc]
return _get_color(colorscale, loc) |
def mix_slices_in_checkers(slice1, slice2, checker_size=cfg.default_checkerboard_size):
checkers = _get_checkers(slice1.shape, checker_size)
if ((slice1.shape != slice2.shape) or (slice2.shape != checkers.shape)):
raise ValueError('size mismatch between cropped slices and checkers!!!')
mixed = slice1.copy()
mixed[(checkers > 0)] = slice2[(checkers > 0)]
return mixed |
def convert_weight_and_push(name: str, config: ResNetConfig, save_directory: Path, push_to_hub: bool=True):
print(f'Converting {name}...')
with torch.no_grad():
from_model = timm.create_model(name, pretrained=True).eval()
our_model = ResNetForImageClassification(config).eval()
module_transfer = ModuleTransfer(src=from_model, dest=our_model)
x = torch.randn((1, 3, 224, 224))
module_transfer(x)
assert torch.allclose(from_model(x), our_model(x).logits), "The model logits don't match the original one."
checkpoint_name = f"resnet{'-'.join(name.split('resnet'))}"
print(checkpoint_name)
if push_to_hub:
our_model.push_to_hub(repo_path_or_name=(save_directory / checkpoint_name), commit_message='Add model', use_temp_dir=True)
feature_extractor = AutoFeatureExtractor.from_pretrained('facebook/convnext-base-224-22k-1k')
feature_extractor.push_to_hub(repo_path_or_name=(save_directory / checkpoint_name), commit_message='Add feature extractor', use_temp_dir=True)
print(f'Pushed {checkpoint_name}') |
class Tencode_endian(TestCase):
def test_other(self):
assert (encode_endian(u'a', 'latin-1') == b'\xe4')
assert (encode_endian(u'a', 'utf-8') == b'\xc3\xa4')
with self.assertRaises(LookupError):
encode_endian(u'', 'nopenope')
with self.assertRaises(UnicodeEncodeError):
assert encode_endian(u'', 'latin-1')
assert (encode_endian(u'', 'latin-1', 'replace') == b'?')
def test_utf_16(self):
assert (encode_endian(u'a', 'utf-16', le=True) == b'\xff\xfe\xe4\x00')
assert (encode_endian(u'a', 'utf-16-le') == b'\xe4\x00')
assert (encode_endian(u'a', 'utf-16', le=False) == b'\xfe\xff\x00\xe4')
assert (encode_endian(u'a', 'utf-16-be') == b'\x00\xe4')
def test_utf_32(self):
assert (encode_endian(u'a', 'utf-32', le=True) == b'\xff\xfe\x00\x00\xe4\x00\x00\x00')
assert (encode_endian(u'a', 'utf-32-le') == b'\xe4\x00\x00\x00')
assert (encode_endian(u'a', 'utf-32', le=False) == b'\x00\x00\xfe\xff\x00\x00\x00\xe4')
assert (encode_endian(u'a', 'utf-32-be') == b'\x00\x00\x00\xe4') |
class TestOtherFS(fake_filesystem_unittest.TestCase):
def setUp(self):
self.setUpPyfakefs()
.dict(os.environ, {'HOME': '/home/john'})
def test_real_file_with_home(self):
self.fs.is_windows_fs = (os.name != 'nt')
if self.fs.is_windows_fs:
self.fs.is_macos = False
self.fs.add_real_file(__file__)
with open(__file__) as f:
self.assertTrue(f.read())
home = Path.home()
os.chdir(home)
with open(__file__) as f:
self.assertTrue(f.read())
def test_windows(self):
self.fs.os = OSType.WINDOWS
path = 'C:\\foo\\bar'
self.assertEqual(path, os.path.join('C:\\', 'foo', 'bar'))
self.assertEqual(('C:', '\\foo\\bar'), os.path.splitdrive(path))
self.fs.create_file(path)
self.assertTrue(os.path.exists(path))
self.assertTrue(os.path.exists(path.upper()))
self.assertTrue(os.path.ismount('\\\\share\\foo'))
self.assertTrue(os.path.ismount('C:'))
self.assertEqual('\\', os.sep)
self.assertEqual('\\', os.path.sep)
self.assertEqual('/', os.altsep)
self.assertEqual(';', os.pathsep)
self.assertEqual('\r\n', os.linesep)
self.assertEqual('nul', os.devnull)
def test_linux(self):
self.fs.os = OSType.LINUX
path = '/foo/bar'
self.assertEqual(path, os.path.join('/', 'foo', 'bar'))
self.assertEqual(('', 'C:/foo/bar'), os.path.splitdrive('C:/foo/bar'))
self.fs.create_file(path)
self.assertTrue(os.path.exists(path))
self.assertFalse(os.path.exists(path.upper()))
self.assertTrue(os.path.ismount('/'))
self.assertFalse(os.path.ismount('//share/foo'))
self.assertEqual('/', os.sep)
self.assertEqual('/', os.path.sep)
self.assertEqual(None, os.altsep)
self.assertEqual(':', os.pathsep)
self.assertEqual('\n', os.linesep)
self.assertEqual('/dev/null', os.devnull)
def test_macos(self):
self.fs.os = OSType.MACOS
path = '/foo/bar'
self.assertEqual(path, os.path.join('/', 'foo', 'bar'))
self.assertEqual(('', 'C:/foo/bar'), os.path.splitdrive('C:/foo/bar'))
self.fs.create_file(path)
self.assertTrue(os.path.exists(path))
self.assertTrue(os.path.exists(path.upper()))
self.assertTrue(os.path.ismount('/'))
self.assertFalse(os.path.ismount('//share/foo'))
self.assertEqual('/', os.sep)
self.assertEqual('/', os.path.sep)
self.assertEqual(None, os.altsep)
self.assertEqual(':', os.pathsep)
self.assertEqual('\n', os.linesep)
self.assertEqual('/dev/null', os.devnull)
def test_drivelike_path(self):
self.fs.os = OSType.LINUX
folder = Path('/test')
file_path = (folder / 'C:/testfile')
file_path.parent.mkdir(parents=True)
file_path.touch()
os.chdir(folder)
self.assertTrue(os.path.exists(str(file_path.relative_to(folder))))
((sys.platform != 'win32'), 'Windows-specific test')
def test_tempfile_access(self):
self.fs.os = OSType.LINUX
tmp_file = tempfile.TemporaryFile()
assert tmp_file |
def stop_server_only(when_stopped=None, interactive=False):
def _server_stopped(*args):
if when_stopped:
when_stopped()
else:
print('... Server stopped.')
_reactor_stop()
def _portal_running(response):
(_, srun, _, _, _, _) = _parse_status(response)
if srun:
print('Server stopping ...')
wait_for_status_reply(_server_stopped)
if interactive:
send_instruction(SRELOAD, {})
else:
send_instruction(SSHUTD, {})
elif when_stopped:
when_stopped()
else:
print('Server is not running.')
_reactor_stop()
def _portal_not_running(fail):
print('Evennia is not running.')
if interactive:
print('Start Evennia normally first, then use `istart` to switch to interactive mode.')
_reactor_stop()
send_instruction(PSTATUS, None, _portal_running, _portal_not_running) |
def balanced_accuracy(tp: torch.LongTensor, fp: torch.LongTensor, fn: torch.LongTensor, tn: torch.LongTensor, reduction: Optional[str]=None, class_weights: Optional[List[float]]=None, zero_division: Union[(str, float)]=1.0) -> torch.Tensor:
return _compute_metric(_balanced_accuracy, tp, fp, fn, tn, reduction=reduction, class_weights=class_weights, zero_division=zero_division) |
class DemoTextItem(DemoItem):
(STATIC_TEXT, DYNAMIC_TEXT) = range(2)
def __init__(self, text, font, textColor, textWidth, parent=None, type=STATIC_TEXT, bgColor=QColor()):
super(DemoTextItem, self).__init__(parent)
self.type = type
self.text = text
self.font = font
self.textColor = textColor
self.bgColor = bgColor
self.textWidth = textWidth
self.noSubPixeling = True
def setText(self, text):
self.text = text
self.update()
def createImage(self, transform):
if (self.type == DemoTextItem.DYNAMIC_TEXT):
return None
sx = min(transform.m11(), transform.m22())
sy = max(transform.m22(), sx)
textItem = QGraphicsTextItem()
textItem.setHtml(self.text)
textItem.setTextWidth(self.textWidth)
textItem.setFont(self.font)
textItem.setDefaultTextColor(self.textColor)
textItem.document().setDocumentMargin(2)
w = textItem.boundingRect().width()
h = textItem.boundingRect().height()
image = QImage(int((w * sx)), int((h * sy)), QImage.Format_ARGB32_Premultiplied)
image.fill(QColor(0, 0, 0, 0).rgba())
painter = QPainter(image)
painter.scale(sx, sy)
style = QStyleOptionGraphicsItem()
textItem.paint(painter, style, None)
return image
def animationStarted(self, id=0):
self.noSubPixeling = False
def animationStopped(self, id=0):
self.noSubPixeling = True
def boundingRect(self):
if (self.type == DemoTextItem.STATIC_TEXT):
return super(DemoTextItem, self).boundingRect()
return QRectF(0, 0, 50, 20)
def paint(self, painter, option, widget):
if (self.type == DemoTextItem.STATIC_TEXT):
super(DemoTextItem, self).paint(painter, option, widget)
return
painter.setPen(self.textColor)
painter.drawText(0, 0, self.text) |
def test_with_constraint() -> None:
dependency = Dependency('foo', '^1.2.3', optional=True, groups=['dev'], allows_prereleases=True, extras=['bar', 'baz'])
dependency.marker = parse_marker('python_version >= "3.6" and python_version < "4.0"')
dependency.transitive_marker = parse_marker('python_version >= "3.7" and python_version < "4.0"')
dependency.python_versions = '^3.6'
with pytest.warns(DeprecationWarning):
dependency.transitive_python_versions = '^3.7'
new = dependency.with_constraint('^1.2.6')
assert (new.name == dependency.name)
assert (str(new.constraint) == '>=1.2.6,<2.0.0')
assert new.is_optional()
assert (new.groups == frozenset(['dev']))
assert new.allows_prereleases()
assert (set(new.extras) == {'bar', 'baz'})
assert (new.marker == dependency.marker)
assert (new.transitive_marker == dependency.transitive_marker)
assert (new.python_constraint == dependency.python_constraint)
with pytest.warns(DeprecationWarning):
assert (new.transitive_python_constraint == dependency.transitive_python_constraint) |
class TimeOracle():
def __init__(self, timeline_file):
self.__timeline = Timeline.from_pickle(timeline_file)
self.__costs = self.__timeline_analyser(self.__timeline)
def __timeline_analyser(timeline):
costs = {}
for device in timeline._run_metadata.step_stats.dev_stats:
for n in device.node_stats:
try:
if ('RecvTensor' in n.node_name):
node_name = re.findall('edge_\\d+_(.+) from', n.timeline_label)[0]
elif (':' in n.node_name):
(node_name, op_type) = n.node_name.split(':')
else:
node_name = n.node_name
time = (n.all_end_rel_micros / 1000000.0)
costs[node_name] = (time + costs.get(node_name, 0))
except:
print(n)
return costs
def get(self, name):
return self.__costs.get(name, None) |
def test_font_file():
ff1 = _fontfinder.FontFile('x', 'Foo Sans', 'Regular', {1, 2, 3})
assert (ff1.filename == 'x')
assert (ff1.name == 'FooSans-Regular')
assert (ff1.family == 'Foo Sans')
assert (ff1.variant == 'Regular')
assert (ff1.weight == 400)
assert (ff1.style == 'normal')
assert (ff1.codepoints.intersection((2, 3, 4, 5)) == {2, 3})
ff2 = _fontfinder.FontFile('x', 'Foo Sans', 'Bold', {1, 2, 3})
assert (ff2.name == 'FooSans-Bold')
assert (ff2.family == 'Foo Sans')
assert (ff2.variant == 'Bold')
assert (ff2.weight == 700)
assert (ff2.style == 'normal')
ff3 = _fontfinder.FontFile('x', 'Foo Sans', 'Bold Italic', {1, 2, 3})
assert (ff3.name == 'FooSans-BoldItalic')
assert (ff3.family == 'Foo Sans')
assert (ff3.variant == 'Bold Italic')
assert (ff3.weight == 700)
assert (ff3.style == 'italic')
assert (hash(ff1) != hash(ff2))
assert (hash(ff1) != hash(ff3))
assert (hash(ff2) != hash(ff3)) |
def init(disp, info):
disp.extension_add_method('display', 'xtest_get_version', get_version)
disp.extension_add_method('window', 'xtest_compare_cursor', compare_cursor)
disp.extension_add_method('display', 'xtest_fake_input', fake_input)
disp.extension_add_method('display', 'xtest_grab_control', grab_control) |
(frozen=True)
class ContractReceiveChannelBatchUnlock(ContractReceiveStateChange):
canonical_identifier: CanonicalIdentifier
receiver: Address
sender: Address
locksroot: Locksroot
unlocked_amount: TokenAmount
returned_tokens: TokenAmount
def __post_init__(self) -> None:
super().__post_init__()
typecheck(self.receiver, T_Address)
typecheck(self.sender, T_Address)
def token_network_address(self) -> TokenNetworkAddress:
return self.canonical_identifier.token_network_address |
class Model(nn.Module):
def __init__(self, feature_dim=128, resnet_depth=18):
super(Model, self).__init__()
self.f = []
if (resnet_depth == 18):
my_resnet = resnet18()
resnet_output_dim = 512
elif (resnet_depth == 34):
my_resnet = resnet34()
resnet_output_dim = 512
elif (resnet_depth == 50):
my_resnet = resnet50()
resnet_output_dim = 2048
for (name, module) in my_resnet.named_children():
if (name == 'conv1'):
module = nn.Conv2d(3, 64, kernel_size=3, stride=1, padding=1, bias=False)
if ((not isinstance(module, nn.Linear)) and (not isinstance(module, nn.MaxPool2d))):
self.f.append(module)
self.f = nn.Sequential(*self.f)
self.g = nn.Sequential(nn.Linear(resnet_output_dim, 512, bias=False), nn.BatchNorm1d(512), nn.ReLU(inplace=True), nn.Linear(512, feature_dim, bias=True))
def forward(self, x):
x = self.f(x)
feature = torch.flatten(x, start_dim=1)
out = self.g(feature)
return (F.normalize(feature, dim=(- 1)), F.normalize(out, dim=(- 1))) |
def share_file(comm, path):
(localrank, _) = get_local_rank_size(comm)
if (comm.Get_rank() == 0):
with open(path, 'rb') as fh:
data = fh.read()
comm.bcast(data)
else:
data = comm.bcast(None)
if (localrank == 0):
os.makedirs(os.path.dirname(path), exist_ok=True)
with open(path, 'wb') as fh:
fh.write(data)
comm.Barrier() |
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('--profile-tool', metavar='TOOL', action='store', choices=['kcachegrind', 'snakeviz', 'gprof2dot', 'tuna', 'none'], default='snakeviz', help='The tool to use to view the profiling data')
parser.add_argument('--profile-file', metavar='FILE', action='store', default='profile_data', help='The filename to use with --profile-tool=none')
parser.add_argument('--profile-test', action='store_true', help='Run pytest instead of qutebrowser')
return parser.parse_known_args() |
def test_tags_command(capsys, wheelpath):
args = ['tags', '--python-tag', 'py3', '--abi-tag', 'cp33m', '--platform-tag', 'linux_x86_64', '--build', '7', str(wheelpath)]
p = parser()
args = p.parse_args(args)
args.func(args)
assert wheelpath.exists()
newname = capsys.readouterr().out.strip()
assert ('test-1.0-7-py3-cp33m-linux_x86_64.whl' == newname)
output_file = (wheelpath.parent / newname)
output_file.unlink() |
_grad()
def evaluate_a2d(model, data_loader, postprocessor, device, args):
model.eval()
predictions = []
metric_logger = utils.MetricLogger(delimiter=' ')
header = 'Test:'
for (samples, targets) in metric_logger.log_every(data_loader, 10, header):
image_ids = [t['image_id'] for t in targets]
samples = samples.to(device)
captions = [t['caption'] for t in targets]
targets = utils.targets_to(targets, device)
outputs = model(samples, captions, targets)
orig_target_sizes = torch.stack([t['orig_size'] for t in targets], dim=0)
target_sizes = torch.stack([t['size'] for t in targets], dim=0)
processed_outputs = postprocessor(outputs, orig_target_sizes, target_sizes)
for (p, image_id) in zip(processed_outputs, image_ids):
for (s, m) in zip(p['scores'], p['rle_masks']):
predictions.append({'image_id': image_id, 'category_id': 1, 'segmentation': m, 'score': s.item()})
gathered_pred_lists = utils.all_gather(predictions)
predictions = [p for p_list in gathered_pred_lists for p in p_list]
eval_metrics = {}
if utils.is_main_process():
if (args.dataset_file == 'a2d'):
coco_gt = COCO(os.path.join(args.a2d_path, 'a2d_sentences_test_annotations_in_coco_format.json'))
elif (args.dataset_file == 'jhmdb'):
coco_gt = COCO(os.path.join(args.jhmdb_path, 'jhmdb_sentences_gt_annotations_in_coco_format.json'))
else:
raise NotImplementedError
coco_pred = coco_gt.loadRes(predictions)
coco_eval = COCOeval(coco_gt, coco_pred, iouType='segm')
coco_eval.params.useCats = 0
coco_eval.evaluate()
coco_eval.accumulate()
coco_eval.summarize()
ap_labels = ['mAP 0.5:0.95', 'AP 0.5', 'AP 0.75', 'AP 0.5:0.95 S', 'AP 0.5:0.95 M', 'AP 0.5:0.95 L']
ap_metrics = coco_eval.stats[:6]
eval_metrics = {l: m for (l, m) in zip(ap_labels, ap_metrics)}
(precision_at_k, overall_iou, mean_iou) = calculate_precision_at_k_and_iou_metrics(coco_gt, coco_pred)
eval_metrics.update({f'{k}': m for (k, m) in zip([0.5, 0.6, 0.7, 0.8, 0.9], precision_at_k)})
eval_metrics.update({'overall_iou': overall_iou, 'mean_iou': mean_iou})
print(eval_metrics)
dist.barrier()
return eval_metrics |
class BreakRop(Op):
__props__ = ()
def make_node(self, x):
return Apply(self, [x], [x.type()])
def perform(self, node, inp, out_):
(x,) = inp
(out,) = out_
out[0] = x
def grad(self, inp, grads):
return [grad_undefined(self, 0, inp[0])]
def R_op(self, inputs, eval_points):
return [None] |
class BiSeNet_res18(nn.Module):
def __init__(self, input_h, input_w, n_classes=19):
super().__init__()
self.spatial_path = SpatialPath()
self.context_path = ContextPath(input_h, input_w)
self.ffm = FFM(input_h, input_w, 1152)
self.pred = nn.Conv2d(1152, n_classes, kernel_size=1, stride=1)
def forward(self, x):
x1 = self.spatial_path(x)
x2 = self.context_path(x)
feature = self.ffm(x1, x2)
seg = self.pred(feature)
return F.upsample(seg, x.size()[2:], mode='bilinear', align_corners=False) |
def process_one_shard(corpus_params, params):
(corpus_type, fields, src_reader, tgt_reader, opt, existing_fields, src_vocab, tgt_vocab) = corpus_params
(i, (src_shard, tgt_shard, maybe_id, filter_pred)) = params
sub_sub_counter = defaultdict(Counter)
assert (len(src_shard) == len(tgt_shard))
logger.info(('Building shard %d.' % i))
dataset = inputters.Dataset(fields, readers=([src_reader, tgt_reader] if tgt_reader else [src_reader]), data=([('src', src_shard), ('tgt', tgt_shard)] if tgt_reader else [('src', src_shard)]), dirs=([opt.src_dir, None] if tgt_reader else [opt.src_dir]), sort_key=inputters.str2sortkey[opt.data_type], filter_pred=filter_pred)
if ((corpus_type == 'train') and (existing_fields is None)):
for ex in dataset.examples:
for (name, field) in fields.items():
if ((opt.data_type == 'audio') and (name == 'src')):
continue
try:
f_iter = iter(field)
except TypeError:
f_iter = [(name, field)]
all_data = [getattr(ex, name, None)]
else:
all_data = getattr(ex, name)
for ((sub_n, sub_f), fd) in zip(f_iter, all_data):
has_vocab = (((sub_n == 'src') and (src_vocab is not None)) or ((sub_n == 'tgt') and (tgt_vocab is not None)))
if (hasattr(sub_f, 'sequential') and sub_f.sequential and (not has_vocab)):
val = fd
sub_sub_counter[sub_n].update(val)
if maybe_id:
shard_base = ((corpus_type + '_') + maybe_id)
else:
shard_base = corpus_type
data_path = '{:s}.{:s}.{:d}.pt'.format(opt.save_data, shard_base, i)
logger.info((' * saving %sth %s data shard to %s.' % (i, shard_base, data_path)))
dataset.save(data_path)
del dataset.examples
gc.collect()
del dataset
gc.collect()
return sub_sub_counter |
()
('new_version')
def bump_version(new_version: str) -> None:
base_dir = pathlib.Path(__file__).parent
replace_version((base_dir / 'pyproject.toml'), 'version', new_version)
replace_version((base_dir / 'src/cryptography/__about__.py'), '__version__', new_version)
replace_version((base_dir / 'vectors/pyproject.toml'), 'version', new_version)
replace_version((base_dir / 'vectors/cryptography_vectors/__about__.py'), '__version__', new_version) |
def gen_src0_dep_nottaken_test():
return [gen_br2_src0_dep_test(5, 'bne', 1, 1, False), gen_br2_src0_dep_test(4, 'bne', 2, 2, False), gen_br2_src0_dep_test(3, 'bne', 3, 3, False), gen_br2_src0_dep_test(2, 'bne', 4, 4, False), gen_br2_src0_dep_test(1, 'bne', 5, 5, False), gen_br2_src0_dep_test(0, 'bne', 6, 6, False)] |
class TestRateShiftCoefficient():
def assert_f_equals_rate_shift(f, coeffs, tlist, **kw):
def g(t):
return (2 * np.abs(min(([0] + [np.real(c(t)) for c in coeffs]))))
assert_functions_equal(f, g, tlist, **kw)
def test_call(self, rates):
rs = RateShiftCoefficient(rates.coeffs)
self.assert_f_equals_rate_shift(rs, rates.coeffs, rates.tlist)
def test_as_double(self, rates):
rs = RateShiftCoefficient(rates.coeffs)
self.assert_f_equals_rate_shift(rs.as_double, rates.coeffs, rates.tlist)
assert all((isinstance(rs.as_double(t), float) for t in rates.tlist))
def test_copy(self, rates):
rs = RateShiftCoefficient(rates.coeffs)
rs = rs.copy()
self.assert_f_equals_rate_shift(rs, rates.coeffs, rates.tlist)
def test_replace_arguments(self):
coeff = coefficient((lambda t, w: np.sin((w * t))), args={'w': 1.0})
tlist = np.linspace(0, (2 * np.pi), 100)
rs = RateShiftCoefficient([coeff])
for w in [0, 1, 2, 3]:
rs2 = rs.replace_arguments(w=w)
self.assert_f_equals_rate_shift(rs2, [coeff.replace_arguments(w=w)], tlist)
def test_reduce(self):
coeff = coefficient(sin_t)
tlist = np.linspace(0, (2 * np.pi), 20)
rs = RateShiftCoefficient([coeff])
data = pickle.dumps(rs, protocol=(- 1))
rs = pickle.loads(data)
self.assert_f_equals_rate_shift(rs, [coeff], tlist) |
class FrankWolfeSSVM(BaseSSVM):
def __init__(self, model, max_iter=1000, C=1.0, verbose=0, n_jobs=1, show_loss_every=0, logger=None, batch_mode=False, line_search=True, check_dual_every=10, tol=0.001, do_averaging=True, sample_method='perm', random_state=None):
if (n_jobs != 1):
warnings.warn('FrankWolfeSSVM does not support multiprocessing yet. Ignoring n_jobs != 1.')
if (sample_method not in ['perm', 'rnd', 'seq']):
raise ValueError('sample_method can only be perm, rnd, or seq')
BaseSSVM.__init__(self, model, max_iter, C, verbose=verbose, n_jobs=n_jobs, show_loss_every=show_loss_every, logger=logger)
self.tol = tol
self.batch_mode = batch_mode
self.line_search = line_search
self.check_dual_every = check_dual_every
self.do_averaging = do_averaging
self.sample_method = sample_method
self.random_state = random_state
def _calc_dual_gap(self, X, Y):
n_samples = len(X)
joint_feature_gt = self.model.batch_joint_feature(X, Y, Y)
Y_hat = self.model.batch_loss_augmented_inference(X, Y, self.w, relaxed=True)
djoint_feature = (joint_feature_gt - self.model.batch_joint_feature(X, Y_hat))
ls = np.sum(self.model.batch_loss(Y, Y_hat))
ws = (djoint_feature * self.C)
l_rescaled = ((self.l * n_samples) * self.C)
dual_val = (((- 0.5) * np.sum((self.w ** 2))) + l_rescaled)
w_diff = (self.w - ws)
dual_gap = ((w_diff.T.dot(self.w) - l_rescaled) + (ls * self.C))
primal_val = (dual_val + dual_gap)
return (dual_val, dual_gap, primal_val)
def _frank_wolfe_batch(self, X, Y):
l = 0.0
n_samples = float(len(X))
joint_feature_gt = self.model.batch_joint_feature(X, Y, Y)
for iteration in range(self.max_iter):
Y_hat = self.model.batch_loss_augmented_inference(X, Y, self.w, relaxed=True)
djoint_feature = (joint_feature_gt - self.model.batch_joint_feature(X, Y_hat))
ls = np.mean(self.model.batch_loss(Y, Y_hat))
ws = (djoint_feature * self.C)
w_diff = (self.w - ws)
dual_gap = ((((1.0 / (self.C * n_samples)) * w_diff.T.dot(self.w)) - l) + ls)
if self.line_search:
eps = 1e-15
gamma = (dual_gap / ((np.sum((w_diff ** 2)) / (self.C * n_samples)) + eps))
gamma = max(0.0, min(1.0, gamma))
else:
gamma = (2.0 / (iteration + 2.0))
dual_val = (((- 0.5) * np.sum((self.w ** 2))) + (l * (n_samples * self.C)))
dual_gap_display = ((dual_gap * n_samples) * self.C)
primal_val = (dual_val + dual_gap_display)
self.primal_objective_curve_.append(primal_val)
self.objective_curve_.append(dual_val)
self.timestamps_.append((time() - self.timestamps_[0]))
if (self.verbose > 0):
print(('iteration %d, dual: %f, dual_gap: %f, primal: %f, gamma: %f' % (iteration, dual_val, dual_gap_display, primal_val, gamma)))
self.w = (((1.0 - gamma) * self.w) + (gamma * ws))
l = (((1.0 - gamma) * l) + (gamma * ls))
if (self.logger is not None):
self.logger(self, iteration)
if (dual_gap < self.tol):
return
def _frank_wolfe_bc(self, X, Y):
n_samples = len(X)
w = self.w.copy()
w_mat = np.zeros((n_samples, self.model.size_joint_feature))
l_mat = np.zeros(n_samples)
l = 0.0
k = 0
rng = check_random_state(self.random_state)
for iteration in range(self.max_iter):
if (self.verbose > 0):
print(('Iteration %d' % iteration))
perm = np.arange(n_samples)
if (self.sample_method == 'perm'):
rng.shuffle(perm)
elif (self.sample_method == 'rnd'):
perm = rng.randint(low=0, high=n_samples, size=n_samples)
for j in range(n_samples):
i = perm[j]
(x, y) = (X[i], Y[i])
(y_hat, delta_joint_feature, slack, loss) = find_constraint(self.model, x, y, w)
ws = (delta_joint_feature * self.C)
ls = (loss / n_samples)
if self.line_search:
eps = 1e-15
w_diff = (w_mat[i] - ws)
gamma = ((w_diff.T.dot(w) - ((self.C * n_samples) * (l_mat[i] - ls))) / (np.sum((w_diff ** 2)) + eps))
gamma = max(0.0, min(1.0, gamma))
else:
gamma = ((2.0 * n_samples) / (k + (2.0 * n_samples)))
w -= w_mat[i]
w_mat[i] = (((1.0 - gamma) * w_mat[i]) + (gamma * ws))
w += w_mat[i]
l -= l_mat[i]
l_mat[i] = (((1.0 - gamma) * l_mat[i]) + (gamma * ls))
l += l_mat[i]
if self.do_averaging:
rho = (2.0 / (k + 2.0))
self.w = (((1.0 - rho) * self.w) + (rho * w))
self.l = (((1.0 - rho) * self.l) + (rho * l))
else:
self.w = w
self.l = l
k += 1
if ((self.check_dual_every != 0) and ((iteration % self.check_dual_every) == 0)):
(dual_val, dual_gap, primal_val) = self._calc_dual_gap(X, Y)
self.primal_objective_curve_.append(primal_val)
self.objective_curve_.append(dual_val)
self.timestamps_.append((time() - self.timestamps_[0]))
if (self.verbose > 0):
print(('dual: %f, dual_gap: %f, primal: %f' % (dual_val, dual_gap, primal_val)))
if (self.logger is not None):
self.logger(self, iteration)
if (dual_gap < self.tol):
return
def fit(self, X, Y, constraints=None, initialize=True):
if initialize:
self.model.initialize(X, Y)
(self.objective_curve_, self.primal_objective_curve_) = ([], [])
self.timestamps_ = [time()]
self.w = getattr(self, 'w', np.zeros(self.model.size_joint_feature))
self.l = getattr(self, 'l', 0)
try:
if self.batch_mode:
self._frank_wolfe_batch(X, Y)
else:
self._frank_wolfe_bc(X, Y)
except KeyboardInterrupt:
pass
if self.verbose:
print('Calculating final objective.')
self.timestamps_.append((time() - self.timestamps_[0]))
self.primal_objective_curve_.append(self._objective(X, Y))
self.objective_curve_.append(self.objective_curve_[(- 1)])
if (self.logger is not None):
self.logger(self, 'final')
return self |
class CovarianceNotPosDefWarning(QtWidgets.QMessageBox):
def __init__(self, model, *args, **kwargs):
QtWidgets.QMessageBox.__init__(self, *args, **kwargs)
self.setIcon(QtWidgets.QMessageBox.Warning)
self.setWindowTitle('Covariance Warning')
self.setText('<b><span style="font-family: monospace;">Covariance.covariance_matrix_focal</span> is not positive definite!</b>')
self.setInformativeText('Change the <span style="font-family: monospace;">Covariance.model_function</span> to exponential<br>or move the noise patch to fix.')
self.model = model
()
def test(self):
covariance = self.model.covariance
if (not covariance.isMatrixPosDefinite(full=False)):
self.show() |
def test_setuptools_version_keyword_ensures_regex(wd: WorkDir, monkeypatch: pytest.MonkeyPatch) -> None:
wd.commit_testfile('test')
wd('git tag 1.0')
monkeypatch.chdir(wd.cwd)
from setuptools_scm._integration.setuptools import version_keyword
import setuptools
dist = setuptools.Distribution({'name': 'test'})
version_keyword(dist, 'use_scm_version', {'tag_regex': '(1.0)'}) |
def install_sundials(download_dir, install_dir):
logger = logging.getLogger('scikits.odes setup')
sundials_version = '6.5.0'
try:
subprocess.run(['cmake', '--version'])
except OSError:
raise RuntimeError('CMake must be installed to build SUNDIALS.')
url = (' + 'sundials/releases/download/v{}/sundials-{}.tar.gz'.format(sundials_version, sundials_version))
logger.info('Downloading sundials')
download_extract_library(url, download_dir)
cmake_args = ['-DLAPACK_ENABLE=ON', '-DSUNDIALS_INDEX_SIZE=32', '-DBUILD_ARKODE:BOOL=OFF', '-DEXAMPLES_ENABLE:BOOL=OFF', ('-DCMAKE_INSTALL_PREFIX=' + install_dir)]
build_directory = os.path.abspath(join(download_dir, 'build_sundials'))
if (not os.path.exists(build_directory)):
print(('\n-' * 10), 'Creating build dir', ('-' * 40))
os.makedirs(build_directory)
print(('-' * 10), 'Running CMake prepare', ('-' * 40))
subprocess.run(['cmake', f'../sundials-{sundials_version}', *cmake_args], cwd=build_directory, check=True)
print(('-' * 10), 'Building the sundials', ('-' * 40))
make_cmd = ['make', 'install']
subprocess.run(make_cmd, cwd=build_directory, check=True) |
class CocoGenerator(Generator):
def __init__(self, data_dir, set_name, **kwargs):
self.data_dir = data_dir
self.set_name = set_name
self.coco = COCO(os.path.join(data_dir, 'annotations', (('instances_' + set_name) + '.json')))
self.image_ids = self.coco.getImgIds()
self.load_classes()
super(CocoGenerator, self).__init__(**kwargs)
def load_classes(self):
categories = self.coco.loadCats(self.coco.getCatIds())
categories.sort(key=(lambda x: x['id']))
self.classes = {}
self.coco_labels = {}
self.coco_labels_inverse = {}
for c in categories:
self.coco_labels[len(self.classes)] = c['id']
self.coco_labels_inverse[c['id']] = len(self.classes)
self.classes[c['name']] = len(self.classes)
self.labels = {}
for (key, value) in self.classes.items():
self.labels[value] = key
def size(self):
return len(self.image_ids)
def num_classes(self):
return len(self.classes)
def has_label(self, label):
return (label in self.labels)
def has_name(self, name):
return (name in self.classes)
def name_to_label(self, name):
return self.classes[name]
def label_to_name(self, label):
return self.labels[label]
def coco_label_to_label(self, coco_label):
return self.coco_labels_inverse[coco_label]
def coco_label_to_name(self, coco_label):
return self.label_to_name(self.coco_label_to_label(coco_label))
def label_to_coco_label(self, label):
return self.coco_labels[label]
def image_aspect_ratio(self, image_index):
image = self.coco.loadImgs(self.image_ids[image_index])[0]
return (float(image['width']) / float(image['height']))
def load_image(self, image_index):
image_info = self.coco.loadImgs(self.image_ids[image_index])[0]
path = os.path.join(self.data_dir, 'images', self.set_name, image_info['file_name'])
image = cv2.imread(path)
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
return image
def load_annotations(self, image_index):
annotations_ids = self.coco.getAnnIds(imgIds=self.image_ids[image_index], iscrowd=False)
annotations = {'labels': np.empty((0,), dtype=np.float32), 'bboxes': np.empty((0, 4), dtype=np.float32)}
if (len(annotations_ids) == 0):
return annotations
coco_annotations = self.coco.loadAnns(annotations_ids)
for (idx, a) in enumerate(coco_annotations):
if ((a['bbox'][2] < 1) or (a['bbox'][3] < 1)):
continue
annotations['labels'] = np.concatenate([annotations['labels'], [self.coco_label_to_label(a['category_id'])]], axis=0)
annotations['bboxes'] = np.concatenate([annotations['bboxes'], [[a['bbox'][0], a['bbox'][1], (a['bbox'][0] + a['bbox'][2]), (a['bbox'][1] + a['bbox'][3])]]], axis=0)
return annotations |
def Unit(st, *args, **kwargs):
import astropy.units as u
try:
st = st.replace('', 'u')
st = st.replace('/molecule', '')
st = st.replace('/molec', '')
except AttributeError:
pass
with warnings.catch_warnings():
warnings.filterwarnings('ignore', '.*multiple slashes.*', category=u.UnitsWarning)
unit = u.Unit(st, *args, **kwargs)
return unit |
def reduce_embd_id_len(E1, tasks, cutoff=100):
if (len(tasks) > 1):
raise NotImplementedError('Not implemented minimum length with multiple tasks yet.')
E1_short = []
for sub in E1:
d = np.delete(sub, np.s_[cutoff:], 1)
E1_short.append(d)
assert (E1_short[(- 1)].shape == (E1[(- 1)].shape[0], 100))
return E1_short |
class ClassDefTransformer(ast.NodeTransformer):
def __init__(self, class_replace_map: Optional[Dict[(str, str)]]):
self.class_replace_map = (class_replace_map if (class_replace_map is not None) else {})
def visit_ClassDef(self, node: ast.ClassDef) -> ast.AST:
for (old_value, new_value) in self.class_replace_map.items():
node.name = node.name.replace(old_value, new_value)
for base in node.bases:
for (old_value, new_value) in self.class_replace_map.items():
if hasattr(base, 'id'):
base.id = base.id.replace(old_value, new_value)
return self.generic_visit(node) |
def _convert_stix_campaigns_to_dict(stix_attack_data):
attack_data = []
for stix_campaign in stix_attack_data:
campaign = json.loads(stix_campaign.serialize(), object_hook=_date_hook)
campaign['campaign_id'] = get_attack_id(stix_campaign)
attack_data.append(campaign)
return attack_data |
def test_FullMultiplicativeForm_only_minimize():
dm = skcriteria.mkdm(matrix=[[1, 2, 3], [4, 5, 6], [7, 8, 9]], objectives=[min, min, min])
expected = RankResult('FullMultiplicativeForm', ['A0', 'A1', 'A2'], [1, 2, 3], {'score': np.log([398., 19., 4.])})
transformer = VectorScaler(target='matrix')
dm = transformer.transform(dm)
ranker = FullMultiplicativeForm()
result = ranker.evaluate(dm)
assert result.values_equals(expected)
assert (result.method == expected.method)
assert np.allclose(result.e_.score, expected.e_.score, atol=0.0001) |
def _filter_commands(ctx, commands=None):
lookup = getattr(ctx.command, 'commands', {})
if ((not lookup) and isinstance(ctx.command, click.MultiCommand)):
lookup = _get_lazyload_commands(ctx.command)
if (commands is None):
return sorted(lookup.values(), key=(lambda item: item.name))
names = [name.strip() for name in commands.split(',')]
return [lookup[name] for name in names if (name in lookup)] |
class WeightNormLinear(nn.Linear):
def __init__(self, in_features, out_features, init_scale=1.0, polyak_decay=0.9995):
super(WeightNormLinear, self).__init__(in_features, out_features, bias=True)
self.V = self.weight
self.g = Parameter(torch.Tensor(out_features))
self.b = self.bias
self.register_buffer('V_avg', torch.zeros(out_features, in_features))
self.register_buffer('g_avg', torch.zeros(out_features))
self.register_buffer('b_avg', torch.zeros(out_features))
self.init_scale = init_scale
self.polyak_decay = polyak_decay
self.reset_parameters()
def reset_parameters(self):
return
def forward(self, x, init=False):
if (init is True):
self.V.data.copy_((torch.randn(self.V.data.size()).type_as(self.V.data) * 0.05))
v_norm = (self.V.data / self.V.data.norm(2, 1).expand_as(self.V.data))
x_init = F.linear(x, v_norm).data
(m_init, v_init) = (x_init.mean(0).squeeze(0), x_init.var(0).squeeze(0))
scale_init = (self.init_scale / torch.sqrt((v_init + 1e-10)))
self.g.data.copy_(scale_init)
self.b.data.copy_(((- m_init) * scale_init))
x_init = (scale_init.view(1, (- 1)).expand_as(x_init) * (x_init - m_init.view(1, (- 1)).expand_as(x_init)))
self.V_avg.copy_(self.V.data)
self.g_avg.copy_(self.g.data)
self.b_avg.copy_(self.b.data)
return x_init
else:
(v, g, b) = get_vars_maybe_avg(self, ['V', 'g', 'b'], self.training, polyak_decay=self.polyak_decay)
x = F.linear(x, v)
scalar = (g / torch.norm(v, 2, 1).squeeze(1))
x = ((scalar.view(1, (- 1)).expand_as(x) * x) + b.view(1, (- 1)).expand_as(x))
return x |
('/v1/user/quota/<quota_id>/limit')
_if(features.SUPER_USERS)
_if(features.QUOTA_MANAGEMENT)
class UserQuotaLimitList(ApiResource):
_user_admin()
('listUserQuotaLimit')
def get(self, quota_id):
parent = get_authenticated_user()
quota = get_quota(parent.username, quota_id)
return [limit_view(limit) for limit in model.namespacequota.get_namespace_quota_limit_list(quota)] |
class TestSnapshotWithDTensor(DTensorTestBase):
def _create_model(self, seed: int, optim_lr: float, device_mesh: Optional[DeviceMesh]=None):
torch.manual_seed(seed)
if device_mesh:
model = FSDP(DummyModel().cuda(), device_mesh=device_mesh, sharding_strategy=ShardingStrategy.HYBRID_SHARD)
else:
mesh_2d = init_device_mesh('cuda', (2, (WORLD_SIZE // 2)))
intra_node_pg = mesh_2d.get_group(mesh_dim=1)
inter_node_pg = mesh_2d.get_group(mesh_dim=0)
model = FSDP(DummyModel().cuda(), process_group=(intra_node_pg, inter_node_pg), sharding_strategy=ShardingStrategy.HYBRID_SHARD)
FSDP.set_state_dict_type(model, StateDictType.SHARDED_STATE_DICT, state_dict_config=ShardedStateDictConfig(), optim_state_dict_config=ShardedOptimStateDictConfig())
optim = torch.optim.Adam(model.parameters(), lr=optim_lr)
optim.step(closure=None)
optim.zero_grad(set_to_none=True)
optim = FSDPOptimizerAdapter(model, optim)
return (model, optim)
_comms
_if_lt_x_gpu(WORLD_SIZE)
def test_save_and_load_same_world_size(self):
mesh_2d = init_device_mesh('cuda', (2, (WORLD_SIZE // 2)))
(src_model, src_optim) = self._create_model(seed=42, optim_lr=0.1, device_mesh=mesh_2d)
(dst_model, dst_optim) = self._create_model(seed=24, optim_lr=0.2, device_mesh=mesh_2d)
assert (not check_state_dict_eq(src_model.state_dict(), dst_model.state_dict()))
assert (not check_state_dict_eq(src_optim.state_dict(), dst_optim.state_dict()))
tmp_path = f'/tmp/{uuid.uuid4()}'
if (dist.get_rank() == 0):
logger.info(f'Saving to {tmp_path}')
snapshot = Snapshot.take(str(tmp_path), {'model': src_model, 'optim': src_optim})
snapshot.restore({'model': dst_model, 'optim': dst_optim})
logging.info(f'{dst_model.state_dict()}')
assert check_state_dict_eq(dst_model.state_dict(), src_model.state_dict())
assert check_state_dict_eq(dst_optim.state_dict(), src_optim.state_dict()) |
def purerpc_server_wrong_method_name_port(greeter_pb2):
service = purerpc.Service('Greeter')
('SomeOtherMethod')
async def say_hello(message: greeter_pb2.HelloRequest) -> greeter_pb2.HelloReply:
return greeter_pb2.HelloReply(message=('Hello, ' + message.name))
with run_purerpc_service_in_process(service) as port:
(yield port) |
def load_groups(cli, manage_dict):
cli.manage_groups = {}
groups = manage_dict.get('groups')
if (not groups):
return
is_dict = isinstance(groups[0], dict)
for group in groups:
if is_dict:
for (group_name, data) in group.items():
data = (data or {})
if ('help_text' in data):
data['help'] = data.pop('help_text')
cli.manage_groups[group_name] = cli.group(name=group_name, **data)((lambda : None))
else:
cli.manage_groups[group] = cli.group(name=group)((lambda : None)) |
def test_run_strict_exception_groups_nursery_override() -> None:
async def main() -> NoReturn:
async with _core.open_nursery(strict_exception_groups=False):
raise Exception('foo')
with pytest.raises(Exception, match='^foo$'):
_core.run(main, strict_exception_groups=True) |
class RTLIRConversionError(Exception):
def __init__(self, obj, msg):
obj = str(obj)
(_, _, tb) = sys.exc_info()
tb_info = traceback.extract_tb(tb)
(fname, line, func, text) = tb_info[(- 1)]
return super().__init__(f'''
In file {fname}, Line {line}, Method {func}:
Error trying to convert {obj} into RTLIR:
- {msg}
{text}''') |
def _concat_dataset(cfg):
ann_files = cfg['ann_file']
img_prefixes = cfg.get('img_prefix', None)
partial_files = cfg.get('partial_file', None)
pseudo_files = cfg.get('pseudo_file', None)
datasets = []
num_dset = len(ann_files)
for i in range(num_dset):
data_cfg = copy.deepcopy(cfg)
data_cfg['ann_file'] = ann_files[i]
if isinstance(img_prefixes, (list, tuple)):
data_cfg['img_prefix'] = img_prefixes[i]
if isinstance(partial_files, (list, tuple)):
data_cfg['partial_file'] = partial_files[i]
if isinstance(pseudo_files, (list, tuple)):
data_cfg['pseudo_file'] = pseudo_files[i]
datasets.append(build_dataset(data_cfg))
return ConcatDataset(datasets) |
def normal_ordered_ladder_term(term, coefficient, parity=(- 1)):
term = list(term)
if (parity == (- 1)):
Op = FermionOperator
elif (parity == 1):
Op = BosonOperator
ordered_term = Op()
for i in range(1, len(term)):
for j in range(i, 0, (- 1)):
right_operator = term[j]
left_operator = term[(j - 1)]
if (right_operator[1] and (not left_operator[1])):
term[(j - 1)] = right_operator
term[j] = left_operator
coefficient *= parity
if (right_operator[0] == left_operator[0]):
new_term = (term[:(j - 1)] + term[(j + 1):])
ordered_term += normal_ordered_ladder_term(tuple(new_term), (parity * coefficient), parity)
elif (right_operator[1] == left_operator[1]):
if ((parity == (- 1)) and (right_operator[0] == left_operator[0])):
return ordered_term
elif (right_operator[0] > left_operator[0]):
term[(j - 1)] = right_operator
term[j] = left_operator
coefficient *= parity
ordered_term += Op(tuple(term), coefficient)
return ordered_term |
def shortestdistance(ifst, reverse=False, source=_fst.NO_STATE_ID, queue_type='auto', delta=_weight.DELTA):
try:
queue_type = _getters.GetQueueType(queue_type)
except ValueError:
raise ValueError('Unknown queue type: {!r}'.format(queue_type))
return ifst._ops.shortestdistance(ifst, reverse, source, queue_type, delta) |
class NNQFunction(MLPFunction):
def __init__(self, env_spec, hidden_layer_sizes=(100, 100), name='qf', observation_ph=None, action_ph=None):
Serializable.quick_init(self, locals())
self._Da = env_spec.action_space.flat_dim
self._Do = env_spec.observation_space.flat_dim
self._obs_pl = (observation_ph if (observation_ph is not None) else tf_utils.get_placeholder(name='observation', dtype=tf.float32, shape=[None, self._Do]))
self._action_pl = (action_ph if (action_ph is not None) else tf_utils.get_placeholder(name='actions', dtype=tf.float32, shape=[None, self._Da]))
self.name = name
super(NNQFunction, self).__init__(name, (self._obs_pl, self._action_pl), hidden_layer_sizes) |
def determineIndentationAndTrailingWS(text):
text = text[:32768]
indents = {}
indents[(- 1)] = 0
trailing = 0
lines = text.splitlines()
lines.insert(0, '')
for i in range(len(lines)):
line = lines[i]
lineA = line.lstrip()
lineB = line.rstrip()
lineC = lineA.rstrip()
indent = (len(line) - len(lineA))
if (len(lineB) < len(line)):
trailing += 1
line = lineC
if line.startswith('#'):
continue
else:
line = line.split('#', 1)[0].rstrip()
if (not line):
continue
if line.endswith(':'):
if (len(lines) > (i + 2)):
line2 = lines[(i + 1)]
tmp = line2.lstrip()
if (not tmp):
line2 = lines[(i + 2)]
tmp = line2.lstrip()
if tmp:
ind2 = (len(line2) - len(tmp))
ind3 = (ind2 - indent)
if line2.startswith('\t'):
indents[(- 1)] += 1
elif (ind3 > 0):
if (ind3 not in indents):
indents[ind3] = 1
indents[ind3] += 1
(indent, maxvotes) = (0, 0)
for nspaces in indents:
if (indents[nspaces] > maxvotes):
(indent, maxvotes) = (nspaces, indents[nspaces])
return (indent, trailing) |
def setup_sentry() -> None:
sentry_logging = LoggingIntegration(level=logging.DEBUG, event_level=logging.WARNING)
sentry_sdk.init(dsn=constants.Bot.sentry_dsn, integrations=[sentry_logging, RedisIntegration()], release=f'{constants.GIT_SHA}', traces_sample_rate=0.5, _experiments={'profiles_sample_rate': 0.5}) |
class ModuleNodeTest(ModuleLoader, unittest.TestCase):
def test_special_attributes(self) -> None:
self.assertEqual(len(self.module.getattr('__name__')), 2)
self.assertIsInstance(self.module.getattr('__name__')[0], nodes.Const)
self.assertEqual(self.module.getattr('__name__')[0].value, 'data.module')
self.assertIsInstance(self.module.getattr('__name__')[1], nodes.Const)
self.assertEqual(self.module.getattr('__name__')[1].value, '__main__')
self.assertEqual(len(self.module.getattr('__doc__')), 1)
self.assertIsInstance(self.module.getattr('__doc__')[0], nodes.Const)
self.assertEqual(self.module.getattr('__doc__')[0].value, 'test module for astroid\n')
self.assertEqual(len(self.module.getattr('__file__')), 1)
self.assertIsInstance(self.module.getattr('__file__')[0], nodes.Const)
self.assertEqual(self.module.getattr('__file__')[0].value, os.path.abspath(resources.find('data/module.py')))
self.assertEqual(len(self.module.getattr('__dict__')), 1)
self.assertIsInstance(self.module.getattr('__dict__')[0], nodes.Dict)
self.assertRaises(AttributeInferenceError, self.module.getattr, '__path__')
self.assertEqual(len(self.pack.getattr('__path__')), 1)
self.assertIsInstance(self.pack.getattr('__path__')[0], nodes.List)
def test_dict_interface(self) -> None:
_test_dict_interface(self, self.module, 'YO')
def test_getattr(self) -> None:
yo = self.module.getattr('YO')[0]
self.assertIsInstance(yo, nodes.ClassDef)
self.assertEqual(yo.name, 'YO')
red = next(self.module.igetattr('redirect'))
self.assertIsInstance(red, nodes.FunctionDef)
self.assertEqual(red.name, 'four_args')
namenode = next(self.module.igetattr('NameNode'))
self.assertIsInstance(namenode, nodes.ClassDef)
self.assertEqual(namenode.name, 'Name')
mod = resources.build_file('data/appl/myConnection.py', 'data.appl.myConnection')
ssl = next(mod.igetattr('SSL1'))
cnx = next(ssl.igetattr('Connection'))
self.assertEqual(cnx.__class__, nodes.ClassDef)
self.assertEqual(cnx.name, 'Connection')
self.assertEqual(cnx.root().name, 'data.SSL1.Connection1')
self.assertEqual(len(self.nonregr.getattr('enumerate')), 2)
self.assertRaises(InferenceError, self.nonregr.igetattr, 'YOAA')
def test_wildcard_import_names(self) -> None:
m = resources.build_file('data/all.py', 'all')
self.assertEqual(m.wildcard_import_names(), ['Aaa', '_bla', 'name'])
m = resources.build_file('data/notall.py', 'notall')
res = sorted(m.wildcard_import_names())
self.assertEqual(res, ['Aaa', 'func', 'name', 'other'])
def test_public_names(self) -> None:
m = builder.parse("\n name = 'a'\n _bla = 2\n other = 'o'\n class Aaa: pass\n def func(): print('yo')\n __all__ = 'Aaa', '_bla', 'name'\n ")
values = sorted(['Aaa', 'name', 'other', 'func'])
self.assertEqual(sorted(m.public_names()), values)
m = builder.parse("\n name = 'a'\n _bla = 2\n other = 'o'\n class Aaa: pass\n\n def func(): return 'yo'\n ")
res = sorted(m.public_names())
self.assertEqual(res, values)
m = builder.parse('\n from missing import tzop\n trop = "test"\n __all__ = (trop, "test1", tzop, 42)\n ')
res = sorted(m.public_names())
self.assertEqual(res, ['trop', 'tzop'])
m = builder.parse("\n test = tzop = 42\n __all__ = ('test', ) + ('tzop', )\n ")
res = sorted(m.public_names())
self.assertEqual(res, ['test', 'tzop'])
def test_module_getattr(self) -> None:
data = '\n appli = application\n appli += 2\n del appli\n '
astroid = builder.parse(data, __name__)
self.assertEqual(len(astroid.getattr('appli')), 2, astroid.getattr('appli'))
def test_relative_to_absolute_name(self) -> None:
mod = nodes.Module('very.multi.package', package=True)
modname = mod.relative_to_absolute_name('utils', 1)
self.assertEqual(modname, 'very.multi.package.utils')
modname = mod.relative_to_absolute_name('utils', 2)
self.assertEqual(modname, 'very.multi.utils')
modname = mod.relative_to_absolute_name('utils', 0)
self.assertEqual(modname, 'very.multi.package.utils')
modname = mod.relative_to_absolute_name('', 1)
self.assertEqual(modname, 'very.multi.package')
mod = nodes.Module('very.multi.module', package=False)
modname = mod.relative_to_absolute_name('utils', 0)
self.assertEqual(modname, 'very.multi.utils')
modname = mod.relative_to_absolute_name('utils', 1)
self.assertEqual(modname, 'very.multi.utils')
modname = mod.relative_to_absolute_name('utils', 2)
self.assertEqual(modname, 'very.utils')
modname = mod.relative_to_absolute_name('', 1)
self.assertEqual(modname, 'very.multi')
def test_relative_to_absolute_name_beyond_top_level(self) -> None:
mod = nodes.Module('a.b.c', package=True)
for level in (5, 4):
with self.assertRaises(TooManyLevelsError) as cm:
mod.relative_to_absolute_name('test', level)
expected = f'Relative import with too many levels ({(level - 1)}) for module {mod.name!r}'
self.assertEqual(expected, str(cm.exception))
def test_import_1(self) -> None:
data = 'from . import subpackage'
sys.path.insert(0, resources.find('data'))
astroid = builder.parse(data, 'package', 'data/package/__init__.py')
try:
m = astroid.import_module('', level=1)
self.assertEqual(m.name, 'package')
inferred = list(astroid.igetattr('subpackage'))
self.assertEqual(len(inferred), 1)
self.assertEqual(inferred[0].name, 'package.subpackage')
finally:
del sys.path[0]
def test_import_2(self) -> None:
data = 'from . import subpackage as pouet'
astroid = builder.parse(data, 'package', 'data/package/__init__.py')
sys.path.insert(0, resources.find('data'))
try:
m = astroid.import_module('', level=1)
self.assertEqual(m.name, 'package')
inferred = list(astroid.igetattr('pouet'))
self.assertEqual(len(inferred), 1)
self.assertEqual(inferred[0].name, 'package.subpackage')
finally:
del sys.path[0]
('astroid.nodes.scoped_nodes.scoped_nodes.AstroidManager.ast_from_module_name')
def test_import_unavailable_module(self, mock) -> None:
unavailable_modname = ('posixpath' if WIN32 else 'ntpath')
module = builder.parse(f'import {unavailable_modname}')
mock.side_effect = AstroidBuildingError
with pytest.raises(AstroidBuildingError):
module.import_module(unavailable_modname)
mock.assert_called_once()
def test_file_stream_in_memory(self) -> None:
data = 'irrelevant_variable is irrelevant'
astroid = builder.parse(data, 'in_memory')
with astroid.stream() as stream:
self.assertEqual(stream.read().decode(), data)
def test_file_stream_physical(self) -> None:
path = resources.find('data/all.py')
astroid = builder.AstroidBuilder().file_build(path, 'all')
with open(path, 'rb') as file_io:
with astroid.stream() as stream:
self.assertEqual(stream.read(), file_io.read())
def test_file_stream_api(self) -> None:
path = resources.find('data/all.py')
file_build = builder.AstroidBuilder().file_build(path, 'all')
with self.assertRaises(AttributeError):
file_build.file_stream
def test_stream_api(self) -> None:
path = resources.find('data/all.py')
astroid = builder.AstroidBuilder().file_build(path, 'all')
stream = astroid.stream()
self.assertTrue(hasattr(stream, 'close'))
with stream:
with open(path, 'rb') as file_io:
self.assertEqual(stream.read(), file_io.read())
def test_singleline_docstring() -> None:
data = textwrap.dedent(" '''Hello World'''\n foo = 1\n ")
module = builder.parse(data, __name__)
assert isinstance(module.doc_node, nodes.Const)
assert (module.doc_node.lineno == 1)
assert (module.doc_node.col_offset == 0)
assert (module.doc_node.end_lineno == 1)
assert (module.doc_node.end_col_offset == 17)
def test_multiline_docstring() -> None:
data = textwrap.dedent(" '''Hello World\n\n Also on this line.\n '''\n foo = 1\n ")
module = builder.parse(data, __name__)
assert isinstance(module.doc_node, nodes.Const)
assert (module.doc_node.lineno == 1)
assert (module.doc_node.col_offset == 0)
assert (module.doc_node.end_lineno == 4)
assert (module.doc_node.end_col_offset == 3)
def test_comment_before_docstring() -> None:
data = textwrap.dedent(" # Some comment\n '''This is\n\n a multiline docstring.\n '''\n ")
module = builder.parse(data, __name__)
assert isinstance(module.doc_node, nodes.Const)
assert (module.doc_node.lineno == 2)
assert (module.doc_node.col_offset == 0)
assert (module.doc_node.end_lineno == 5)
assert (module.doc_node.end_col_offset == 3)
def test_without_docstring() -> None:
data = textwrap.dedent(' foo = 1\n ')
module = builder.parse(data, __name__)
assert (module.doc_node is None) |
class TestCheng2020():
.parametrize('func,cls', ((cheng2020_anchor, Cheng2020Anchor), (cheng2020_attn, Cheng2020Attention)))
def test_anchor_ok(self, func, cls):
for i in range(1, 4):
net = func(i, metric='mse')
assert isinstance(net, cls)
assert (net.state_dict()['g_a.0.conv1.weight'].size(0) == 128)
for i in range(4, 7):
net = func(i, metric='mse')
assert isinstance(net, cls)
assert (net.state_dict()['g_a.0.conv1.weight'].size(0) == 192)
.slow
.pretrained
def test_pretrained(self):
for i in range(1, 4):
net = cheng2020_anchor(i, metric='mse', pretrained=True)
assert (net.state_dict()['g_a.0.conv1.weight'].size(0) == 128)
for i in range(4, 7):
net = cheng2020_anchor(i, metric='mse', pretrained=True)
assert (net.state_dict()['g_a.0.conv1.weight'].size(0) == 192) |
def D_r1(D, reals, real_labels=None, gamma=10, *args, **kwargs):
loss = None
reg = None
if gamma:
reals.requires_grad_(True)
real_scores = D(reals, labels=real_labels)
reg = _grad_reg(input=reals, output=real_scores, gamma=gamma, retain_graph=False).float()
return (loss, reg) |
class Cholesky(Op):
__props__ = ('lower', 'destructive', 'on_error')
gufunc_signature = '(m,m)->(m,m)'
def __init__(self, *, lower=True, on_error='raise'):
self.lower = lower
self.destructive = False
if (on_error not in ('raise', 'nan')):
raise ValueError('on_error must be one of "raise" or ""nan"')
self.on_error = on_error
def infer_shape(self, fgraph, node, shapes):
return [shapes[0]]
def make_node(self, x):
x = as_tensor_variable(x)
assert (x.ndim == 2)
return Apply(self, [x], [x.type()])
def perform(self, node, inputs, outputs):
x = inputs[0]
z = outputs[0]
try:
z[0] = scipy.linalg.cholesky(x, lower=self.lower).astype(x.dtype)
except scipy.linalg.LinAlgError:
if (self.on_error == 'raise'):
raise
else:
z[0] = (np.zeros(x.shape) * np.nan).astype(x.dtype)
def L_op(self, inputs, outputs, gradients):
dz = gradients[0]
chol_x = outputs[0]
if (self.on_error == 'nan'):
ok = (~ ptm.any(ptm.isnan(chol_x)))
chol_x = ptb.switch(ok, chol_x, 1)
dz = ptb.switch(ok, dz, 1)
if (not self.lower):
chol_x = chol_x.T
dz = dz.T
def tril_and_halve_diagonal(mtx):
return (ptb.tril(mtx) - ptb.diag((ptb.diagonal(mtx) / 2.0)))
def conjugate_solve_triangular(outer, inner):
solve_upper = SolveTriangular(lower=False, b_ndim=2)
return solve_upper(outer.T, solve_upper(outer.T, inner.T).T)
s = conjugate_solve_triangular(chol_x, tril_and_halve_diagonal(chol_x.T.dot(dz)))
if self.lower:
grad = (ptb.tril((s + s.T)) - ptb.diag(ptb.diagonal(s)))
else:
grad = (ptb.triu((s + s.T)) - ptb.diag(ptb.diagonal(s)))
if (self.on_error == 'nan'):
return [ptb.switch(ok, grad, np.nan)]
else:
return [grad] |
def deserialize_exports(w_exports):
(r_exports, exports_len) = to_rpython_list(w_exports)
exports = {}
for (i, exp) in enumerate(r_exports):
if looks_like_an_export(exp):
k = exp.cdr().car()
gen_int_id = exp.cdr().cdr().car()
ext_id = exp.cdr().cdr().cdr().car()
exports[k] = Export(gen_int_id, ext_id)
else:
raise SchemeException(('looks like an invalid serialization of export : %s' % exp.tostring()))
return exports |
(hookwrapper=True, trylast=True)
def pytest_runtest_teardown(item):
def report():
gevent.util.print_run_info()
raise RetryTestError(f'Teardown timeout >{item.timeout_setup_and_call}s. This must not happen, when the teardown times out not all finalizers got a chance to run. This means not all fixtures are cleaned up, which can make subsequent tests flaky. This would be the case for pending greenlets which are not cleared by previous run.')
def handler(signum, frame):
report()
signal.signal(signal.SIGALRM, handler)
signal.setitimer(signal.ITIMER_REAL, item.timeout_teardown)
(yield)
signal.setitimer(signal.ITIMER_REAL, 0)
signal.signal(signal.SIGALRM, signal.SIG_DFL) |
def test_struct_inheritance2():
m = run_mod("\n #lang pycket\n (require racket/private/kw)\n\n (struct posn (x y))\n (define (raven-constructor super-type)\n (struct raven ()\n #:super super-type\n #:transparent\n #:property prop:procedure (lambda (self) 'nevermore)) raven)\n (define r ((raven-constructor struct:posn) 1 2))\n (define x (posn-x r))\n ")
ov = m.defs[W_Symbol.make('x')]
assert (ov.value == 1) |
def build(setup_kwargs: Any) -> None:
if os.environ.get('SKIP_CYTHON', False):
return
try:
from Cython.Build import cythonize
setup_kwargs.update(dict(ext_modules=cythonize(['src/zeroconf/_dns.py', 'src/zeroconf/_cache.py', 'src/zeroconf/_history.py', 'src/zeroconf/_record_update.py', 'src/zeroconf/_listener.py', 'src/zeroconf/_protocol/incoming.py', 'src/zeroconf/_protocol/outgoing.py', 'src/zeroconf/_handlers/answers.py', 'src/zeroconf/_handlers/record_manager.py', 'src/zeroconf/_handlers/multicast_outgoing_queue.py', 'src/zeroconf/_handlers/query_handler.py', 'src/zeroconf/_services/__init__.py', 'src/zeroconf/_services/browser.py', 'src/zeroconf/_services/info.py', 'src/zeroconf/_services/registry.py', 'src/zeroconf/_updates.py', 'src/zeroconf/_utils/ipaddress.py', 'src/zeroconf/_utils/time.py'], compiler_directives={'language_level': '3'}), cmdclass=dict(build_ext=BuildExt)))
setup_kwargs['exclude_package_data'] = {pkg: ['*.c'] for pkg in setup_kwargs['packages']}
except Exception:
if os.environ.get('REQUIRE_CYTHON'):
raise
pass |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.