code stringlengths 281 23.7M |
|---|
def decay_batch_step(batch_size, num_intra_steps=2, no_odd=False):
if (batch_size <= 1):
return 0
base_batch_size = int((2 ** (math.log((batch_size - 1)) // math.log(2))))
step_size = max((base_batch_size // num_intra_steps), 1)
batch_size = (base_batch_size + ((((batch_size - base_batch_size) - 1) // step_size) * step_size))
if (no_odd and (batch_size % 2)):
batch_size -= 1
return batch_size |
def collect_results_gpu(result_part, size):
(rank, world_size) = get_dist_info()
part_tensor = torch.tensor(bytearray(pickle.dumps(result_part)), dtype=torch.uint8, device='cuda')
shape_tensor = torch.tensor(part_tensor.shape, device='cuda')
shape_list = [shape_tensor.clone() for _ in range(world_size)]
dist.all_gather(shape_list, shape_tensor)
shape_max = torch.tensor(shape_list).max()
part_send = torch.zeros(shape_max, dtype=torch.uint8, device='cuda')
part_send[:shape_tensor[0]] = part_tensor
part_recv_list = [part_tensor.new_zeros(shape_max) for _ in range(world_size)]
dist.all_gather(part_recv_list, part_send)
if (rank == 0):
part_list = []
for (recv, shape) in zip(part_recv_list, shape_list):
part_result = pickle.loads(recv[:shape[0]].cpu().numpy().tobytes())
if part_result:
part_list.append(part_result)
ordered_results = []
for res in zip(*part_list):
ordered_results.extend(list(res))
ordered_results = ordered_results[:size]
return ordered_results |
def _read_annotations(csv_reader, classes):
result = OrderedDict()
for (line, row) in enumerate(csv_reader):
line += 1
try:
(img_file, x1, y1, x2, y2, class_name) = row[:6]
except ValueError:
raise_from(ValueError("line {}: format should be 'img_file,x1,y1,x2,y2,class_name' or 'img_file,,,,,'".format(line)), None)
if (img_file not in result):
result[img_file] = []
if ((x1, y1, x2, y2, class_name) == ('', '', '', '', '')):
continue
x1 = _parse(x1, int, 'line {}: malformed x1: {{}}'.format(line))
y1 = _parse(y1, int, 'line {}: malformed y1: {{}}'.format(line))
x2 = _parse(x2, int, 'line {}: malformed x2: {{}}'.format(line))
y2 = _parse(y2, int, 'line {}: malformed y2: {{}}'.format(line))
if (x2 <= x1):
raise ValueError('line {}: x2 ({}) must be higher than x1 ({})'.format(line, x2, x1))
if (y2 <= y1):
raise ValueError('line {}: y2 ({}) must be higher than y1 ({})'.format(line, y2, y1))
if (class_name not in classes):
raise ValueError("line {}: unknown class name: '{}' (classes: {})".format(line, class_name, classes))
result[img_file].append({'x1': x1, 'x2': x2, 'y1': y1, 'y2': y2, 'class': class_name})
return result |
class ProgressModel(object):
def __init__(self, start_date, end_date):
self._start_date = start_date
self._end_date = end_date
self._total_days = ((end_date - start_date).days + 1)
self._progress = 0.0
self._days_completed = 0
self._state = 'init'
self._current_chunk_size = None
self._current_chunk_bounds = None
self._completed_term_increment = None
self._completed_chunk_increment = None
self._current_work = None
self._start_time = time.time()
self._end_time = None
def state(self):
return self._state
def percent_complete(self):
return round((self._progress * 100.0), 3)
def execution_time(self):
if (self._end_time is None):
raise ValueError("Can't get execution_time until execution is complete.")
return (self._end_time - self._start_time)
def execution_bounds(self):
return (self._start_date, self._end_date)
def current_chunk_bounds(self):
return self._current_chunk_bounds
def current_work(self):
return self._current_work
def start_chunk(self, terms, start_date, end_date):
days_since_start = ((end_date - self._start_date).days + 1)
self._current_chunk_size = (days_since_start - self._days_completed)
self._current_chunk_bounds = (start_date, end_date)
chunk_percent = (float(self._current_chunk_size) / self._total_days)
nterms = len(terms)
if nterms:
self._completed_term_increment = (chunk_percent / len(terms))
self._completed_chunk_increment = 0.0
else:
self._completed_term_increment = 0.0
self._completed_chunk_increment = chunk_percent
def finish_chunk(self, terms, start_date, end_date):
self._days_completed += self._current_chunk_size
self._progress += self._completed_chunk_increment
def start_load_terms(self, terms):
self._state = 'loading'
self._current_work = terms
def finish_load_terms(self, terms):
self._finish_terms(nterms=len(terms))
def start_compute_term(self, term):
self._state = 'computing'
self._current_work = [term]
def finish_compute_term(self, term):
self._finish_terms(nterms=1)
def finish(self, success):
self._end_time = time.time()
if success:
self._state = 'success'
else:
self._state = 'error'
def _finish_terms(self, nterms):
self._progress += (nterms * self._completed_term_increment) |
def write_sac_zpk(zeros, poles, constant, filename):
if hasattr(filename, 'write'):
f = filename
else:
f = open('w', filename)
def write_complex(x):
f.write(('%12.8g %12.8g\n' % (complex(x).real, complex(x).imag)))
f.write(('POLES %i\n' % len(poles)))
for p in poles:
if (p != 0.0):
write_complex(p)
f.write(('ZEROS %i\n' % len(zeros)))
for z in zeros:
if (z != 0.0):
write_complex(z)
f.write(('CONSTANT %12.8g\n' % constant))
if (not hasattr(filename, 'write')):
f.close() |
class Scenario(ScenarioGenerator):
def __init__(self):
super().__init__()
self.open_scenario_version = 2
def scenario(self, **kwargs):
catalog = xosc.Catalog()
catalog.add_catalog('VehicleCatalog', '../xosc/Catalogs/Vehicles')
road = xosc.RoadNetwork(roadfile='../xodr/e6mini.xodr', scenegraph='../models/e6mini.osgb')
paramdec = xosc.ParameterDeclarations()
egoname = 'Ego'
targetname = 'Target'
entities = xosc.Entities()
entities.add_scenario_object(egoname, xosc.CatalogReference('VehicleCatalog', 'car_white'))
entities.add_scenario_object(targetname, xosc.CatalogReference('VehicleCatalog', 'car_red'))
init = xosc.Init()
step_time = xosc.TransitionDynamics(xosc.DynamicsShapes.step, xosc.DynamicsDimension.time, 0)
egospeed = xosc.AbsoluteSpeedAction(20, step_time)
egostart = xosc.TeleportAction(xosc.LanePosition(25, 0, (- 3), 0))
targetspeed = xosc.AbsoluteSpeedAction(30, step_time)
targetstart = xosc.TeleportAction(xosc.LanePosition(15, 0, (- 2), 0))
init.add_init_action(egoname, egospeed)
init.add_init_action(egoname, egostart)
init.add_init_action(targetname, targetspeed)
init.add_init_action(targetname, targetstart)
ego_event = xosc.Event('ego_speed_change', xosc.Priority.overwrite)
ego_event.add_trigger(xosc.ValueTrigger('sim_time_trigger', 0, xosc.ConditionEdge.none, xosc.SimulationTimeCondition(1, xosc.Rule.greaterThan)))
ego_speed_change = xosc.SpeedProfileAction([20, 30, 25, 40], xosc.FollowingMode.follow, [0, 5, 10, 20])
ego_event.add_action('ego_speed_change', ego_speed_change)
ego_man = xosc.Maneuver('ego_speed').add_event(ego_event)
sb = xosc.StoryBoard(init, xosc.ValueTrigger('sim_time_trigger', 0, xosc.ConditionEdge.none, xosc.SimulationTimeCondition(30, xosc.Rule.greaterThan), 'stop'))
sb.add_maneuver(ego_man, actors=egoname)
sce = xosc.Scenario('speed_profile_example', 'Mandolin', paramdec, entities=entities, storyboard=sb, roadnetwork=road, catalog=catalog, osc_minor_version=self.open_scenario_version)
return sce |
def test_override():
class TestObject(object):
def __init__(self):
self.v = None
o = TestObject()
o.v = 'a'
()
def test_body():
assert_eq(o.v, 'a')
(yield None)
with async_override(o, 'v', 'b'):
assert_eq(o.v, 'b')
(yield None)
try:
with async_override(o, 'v', 'c'):
assert_eq(o.v, 'c')
(yield None)
raise NotImplementedError()
except NotImplementedError:
pass
assert_eq(o.v, 'b')
(yield None)
assert_eq(o.v, 'a')
test_body() |
def patch_norm_fp32(module):
if isinstance(module, (nn.modules.batchnorm._BatchNorm, nn.GroupNorm)):
module.float()
if (isinstance(module, nn.GroupNorm) or (torch.__version__ < '1.3')):
module.forward = patch_forward_method(module.forward, torch.half, torch.float)
for child in module.children():
patch_norm_fp32(child)
return module |
class DataModule(LightningDataModule):
def __init__(self, cfg):
super().__init__()
self.cfg = DefaultTrainer.auto_scale_workers(cfg, comm.get_world_size())
def train_dataloader(self):
return build_detection_train_loader(self.cfg)
def val_dataloader(self):
dataloaders = []
for dataset_name in self.cfg.DATASETS.TEST:
dataloaders.append(build_detection_test_loader(self.cfg, dataset_name))
return dataloaders |
class ServiceDiscoveryConsulTests(unittest.TestCase):
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
def setUp(self):
os.environ[CONFIGMAP_FILE_ENVIRONMENT] = os.path.join(self.BASE_DIR, 'config-tests-service-discovery-consul.yml')
ms = Microservice(path=__file__)
self.ms = ms
(ServiceDiscoveryConsul, 'register_service', return_value=None)
def test_init(self, mock_consul):
self.ms.create_app()
mock_consul.assert_called_once_with(app_name='Python Microservice Service Discovery', healtcheck_url=' interval='10s')
def test_get_client(self):
client = self.ms.service_discovery.get_client()
self.assertTrue(isinstance(client, ServiceDiscoveryConsul)) |
def dict2str(opt, indent_l=1):
msg = ''
for (k, v) in opt.items():
if isinstance(v, dict):
msg += (((' ' * (indent_l * 2)) + k) + ':[\n')
msg += dict2str(v, (indent_l + 1))
msg += ((' ' * (indent_l * 2)) + ']\n')
else:
msg += (((((' ' * (indent_l * 2)) + k) + ': ') + str(v)) + '\n')
return msg |
def main(args):
img_size = args.img_size
z_dim = 128
lamb_obj = 1.0
lamb_app = 1.0
lamb_img = 0.1
num_classes = (184 if (args.dataset == 'coco') else 179)
num_obj = (8 if (args.dataset == 'coco') else 31)
args.out_path = os.path.join(args.out_path, args.dataset, str(args.img_size))
num_gpus = torch.cuda.device_count()
num_workers = 2
if (num_gpus > 1):
parallel = True
args.batch_size = (args.batch_size * num_gpus)
num_workers = (num_workers * num_gpus)
else:
parallel = False
train_data = get_dataset(args.dataset, img_size)
dataloader = torch.utils.data.DataLoader(train_data, batch_size=args.batch_size, drop_last=True, shuffle=True, num_workers=8)
device = torch.device('cuda')
netG = ResnetGenerator128(num_classes=num_classes, output_dim=3).to(device)
netD = CombineDiscriminator128_app(num_classes=num_classes).to(device)
parallel = True
if parallel:
netG = DataParallelWithCallback(netG)
netD = nn.DataParallel(netD)
(g_lr, d_lr) = (args.g_lr, args.d_lr)
gen_parameters = []
for (key, value) in dict(netG.named_parameters()).items():
if value.requires_grad:
if ('mapping' in key):
gen_parameters += [{'params': [value], 'lr': (g_lr * 0.1)}]
else:
gen_parameters += [{'params': [value], 'lr': g_lr}]
g_optimizer = torch.optim.Adam(gen_parameters, betas=(0, 0.999))
dis_parameters = []
for (key, value) in dict(netD.named_parameters()).items():
if value.requires_grad:
dis_parameters += [{'params': [value], 'lr': d_lr}]
d_optimizer = torch.optim.Adam(dis_parameters, betas=(0, 0.999))
if (not os.path.exists(args.out_path)):
os.makedirs(args.out_path)
if (not os.path.exists(os.path.join(args.out_path, 'model/'))):
os.makedirs(os.path.join(args.out_path, 'model/'))
writer = SummaryWriter(os.path.join(args.out_path, 'log'))
logger = setup_logger('lostGAN', args.out_path, 0)
logger.info(netG)
logger.info(netD)
start_time = time.time()
vgg_loss = VGGLoss()
vgg_loss = nn.DataParallel(vgg_loss)
l1_loss = nn.DataParallel(nn.L1Loss())
for epoch in range(args.total_epoch):
netG.train()
netD.train()
for (idx, data) in enumerate(tqdm(dataloader)):
(real_images, label, bbox) = data
(real_images, label, bbox) = (real_images.to(device), label.long().to(device).unsqueeze((- 1)), bbox.float())
netD.zero_grad()
(real_images, label) = (real_images.to(device), label.long().to(device))
(d_out_real, d_out_robj, d_out_robj_app) = netD(real_images, bbox, label)
d_loss_real = torch.nn.ReLU()((1.0 - d_out_real)).mean()
d_loss_robj = torch.nn.ReLU()((1.0 - d_out_robj)).mean()
d_loss_robj_app = torch.nn.ReLU()((1.0 - d_out_robj_app)).mean()
z = torch.randn(real_images.size(0), num_obj, z_dim).to(device)
fake_images = netG(z, bbox, y=label.squeeze(dim=(- 1)))
(d_out_fake, d_out_fobj, d_out_fobj_app) = netD(fake_images.detach(), bbox, label)
d_loss_fake = torch.nn.ReLU()((1.0 + d_out_fake)).mean()
d_loss_fobj = torch.nn.ReLU()((1.0 + d_out_fobj)).mean()
d_loss_fobj_app = torch.nn.ReLU()((1.0 + d_out_fobj_app)).mean()
d_loss = (((lamb_obj * (d_loss_robj + d_loss_fobj)) + (lamb_img * (d_loss_real + d_loss_fake))) + (lamb_app * (d_loss_robj_app + d_loss_fobj_app)))
d_loss.backward()
d_optimizer.step()
if ((idx % 1) == 0):
netG.zero_grad()
(g_out_fake, g_out_obj, g_out_obj_app) = netD(fake_images, bbox, label)
g_loss_fake = (- g_out_fake.mean())
g_loss_obj = (- g_out_obj.mean())
g_loss_obj_app = (- g_out_obj_app.mean())
pixel_loss = l1_loss(fake_images, real_images).mean()
feat_loss = vgg_loss(fake_images, real_images).mean()
g_loss = (((((g_loss_obj * lamb_obj) + (g_loss_fake * lamb_img)) + pixel_loss) + feat_loss) + (lamb_app * g_loss_obj_app))
g_loss.backward()
g_optimizer.step()
if (((idx + 1) % 500) == 0):
elapsed = (time.time() - start_time)
elapsed = str(datetime.timedelta(seconds=elapsed))
logger.info('Time Elapsed: [{}]'.format(elapsed))
logger.info('Step[{}/{}], d_out_real: {:.4f}, d_out_fake: {:.4f}, g_out_fake: {:.4f} '.format((epoch + 1), (idx + 1), d_loss_real.item(), d_loss_fake.item(), g_loss_fake.item()))
logger.info(' d_obj_real: {:.4f}, d_obj_fake: {:.4f}, g_obj_fake: {:.4f} '.format(d_loss_robj.item(), d_loss_fobj.item(), g_loss_obj.item()))
logger.info(' d_obj_real_app: {:.4f}, d_obj_fake_app: {:.4f}, g_obj_fake_app: {:.4f} '.format(d_loss_robj_app.item(), d_loss_fobj_app.item(), g_loss_obj_app.item()))
logger.info(' pixel_loss: {:.4f}, feat_loss: {:.4f}'.format(pixel_loss.item(), feat_loss.item()))
writer.add_image('real images', make_grid(((real_images.cpu().data * 0.5) + 0.5), nrow=4), (((epoch * len(dataloader)) + idx) + 1))
writer.add_image('fake images', make_grid(((fake_images.cpu().data * 0.5) + 0.5), nrow=4), (((epoch * len(dataloader)) + idx) + 1))
writer.add_scalars('D_loss_real', {'real': d_loss_real.item(), 'robj': d_loss_robj.item(), 'robj_app': d_loss_robj_app.item(), 'loss': d_loss.item()})
writer.add_scalars('D_loss_fake', {'fake': d_loss_fake.item(), 'fobj': d_loss_fobj.item(), 'fobj_app': d_loss_fobj_app.item()})
writer.add_scalars('G_loss', {'fake': g_loss_fake.item(), 'obj': g_loss_obj.item(), 'loss': g_loss.item()})
if (((epoch + 1) % 5) == 0):
torch.save(netG.state_dict(), os.path.join(args.out_path, 'model/', ('G_%d.pth' % (epoch + 1)))) |
class MacroElement(Element):
_template = Template('')
def __init__(self):
super().__init__()
self._name = 'MacroElement'
def render(self, **kwargs):
figure = self.get_root()
assert isinstance(figure, Figure), 'You cannot render this Element if it is not in a Figure.'
header = self._template.module.__dict__.get('header', None)
if (header is not None):
figure.header.add_child(Element(header(self, kwargs)), name=self.get_name())
html = self._template.module.__dict__.get('html', None)
if (html is not None):
figure.html.add_child(Element(html(self, kwargs)), name=self.get_name())
script = self._template.module.__dict__.get('script', None)
if (script is not None):
figure.script.add_child(Element(script(self, kwargs)), name=self.get_name())
for (name, element) in self._children.items():
element.render(**kwargs) |
_start_docstrings('The bare Cvt Model transformer outputting raw hidden-states without any specific head on top.', CVT_START_DOCSTRING)
class CvtModel(CvtPreTrainedModel):
def __init__(self, config, add_pooling_layer=True):
super().__init__(config)
self.config = config
self.encoder = CvtEncoder(config)
self.post_init()
def _prune_heads(self, heads_to_prune):
for (layer, heads) in heads_to_prune.items():
self.encoder.layer[layer].attention.prune_heads(heads)
_start_docstrings_to_model_forward(CVT_INPUTS_DOCSTRING)
_code_sample_docstrings(checkpoint=_CHECKPOINT_FOR_DOC, output_type=BaseModelOutputWithCLSToken, config_class=_CONFIG_FOR_DOC, modality='vision', expected_output=_EXPECTED_OUTPUT_SHAPE)
def forward(self, pixel_values: Optional[torch.Tensor]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[(Tuple, BaseModelOutputWithCLSToken)]:
output_hidden_states = (output_hidden_states if (output_hidden_states is not None) else self.config.output_hidden_states)
return_dict = (return_dict if (return_dict is not None) else self.config.use_return_dict)
if (pixel_values is None):
raise ValueError('You have to specify pixel_values')
encoder_outputs = self.encoder(pixel_values, output_hidden_states=output_hidden_states, return_dict=return_dict)
sequence_output = encoder_outputs[0]
if (not return_dict):
return ((sequence_output,) + encoder_outputs[1:])
return BaseModelOutputWithCLSToken(last_hidden_state=sequence_output, cls_token_value=encoder_outputs.cls_token_value, hidden_states=encoder_outputs.hidden_states) |
def package_modpaths(pkgpath, with_pkg=False, with_mod=True, followlinks=True, recursive=True, with_libs=False, check=True):
if isfile(pkgpath):
(yield pkgpath)
else:
if with_pkg:
root_path = join(pkgpath, '__init__.py')
if ((not check) or exists(root_path)):
(yield root_path)
valid_exts = ['.py']
if with_libs:
valid_exts += _platform_pylib_exts()
for (dpath, dnames, fnames) in os.walk(pkgpath, followlinks=followlinks):
ispkg = exists(join(dpath, '__init__.py'))
if (ispkg or (not check)):
check = True
if with_mod:
for fname in fnames:
if (splitext(fname)[1] in valid_exts):
if (fname != '__init__.py'):
path = join(dpath, fname)
(yield path)
if with_pkg:
for dname in dnames:
path = join(dpath, dname, '__init__.py')
if exists(path):
(yield path)
else:
del dnames[:]
if (not recursive):
break |
def get_active_window():
active_window = None
try:
active_window = _app.get_active_window()
except:
return None
active_window_number = active_window.get_id()
for (uid, browser_view_instance) in BrowserView.instances.items():
if (browser_view_instance.window.get_id() == active_window_number):
return browser_view_instance.pywebview_window
return None |
def build_dataset(config, ratio, charge, model_name, seed):
vocab = pkl.load(open(config.vocab_path, 'rb'))
print(f'Vocab size: {len(vocab)}')
def load_dataset(text, labels, word_idx, word_key, chains, model_name):
contents = []
for i in range(len(text)):
if ((model_name == 'BiLSTM_Att_Cons') or (model_name == 'BiLSTM_Att') or (model_name == 'BiLSTM')):
mask = ([0] * pad_size)
token = text[i]
label = np.argmax(labels[i])
words_line = []
seq_len = len(token)
if (len(token) < pad_size):
token.extend(([PAD] * (pad_size - len(token))))
else:
token = token[:pad_size]
seq_len = pad_size
for word in token:
idx = vocab.get(word, vocab.get(UNK))
words_line.append(idx)
words_line_key = ([([factor_num] * pad_size)] * len(factor_list))
att_weights_temp = ([(- 10000.0)] * pad_size)
assert (len(word_key[i]) == len(word_idx[i]))
for (j, key) in enumerate(word_key[i]):
if (word_idx[i][j] < pad_size):
for p in range(len(factor_list)):
if (key in factor_list[p]):
l = factor_list[p]
num_l = (- 1)
for k in range(len(l)):
if (key == l[k]):
num_l = k
break
words_line_key[p][word_idx[i][j]] = key
if (int(label) == p):
att_weights_temp[word_idx[i][j]] = strength[p][num_l]
att_weights = []
sum_int = 0.0
for j in range(pad_size):
if ((att_weights_temp[j] > (- 10000)) and (att_weights_temp[j] < 10)):
sum_int += np.exp(att_weights_temp[j])
for j in range(pad_size):
if ((att_weights_temp[j] > (- 10000)) and (att_weights_temp[j] < 10)):
att_weights.append(((np.exp(att_weights_temp[j]) * 1.0) / sum_int))
else:
att_weights.append(0.0)
if (sum_int != 0.0):
sum_int = 1.0
contents.append((words_line, words_line_key, att_weights, sum_int, int(label), seq_len))
elif (model_name == 'CausalChain'):
t = chains[i]
chain_content = []
scores = []
for (li, score) in t:
scores.append(score)
chain_content.append(np.array(text[i])[li].tolist())
label = np.argmax(labels[i])
tokens = chain_content
words_lines = []
seq_lens = []
it = 0
chain_length = 8
masks = []
for token in tokens:
words_line = []
seq_len = len(token)
mask = ([0] * chain_length)
if pad_size:
if (len(token) < chain_length):
token.extend(([PAD] * (chain_length - len(token))))
if (len(token) != 0):
mask[(len(token) - 1)] = 1
else:
token = token[:chain_length]
seq_len = chain_length
mask[(chain_length - 1)] = 1
seq_lens.append(seq_len)
for word in token:
idx = vocab.get(word, vocab.get(UNK))
words_line.append(idx)
words_lines.append(words_line)
masks.append(mask)
it += 1
if (it >= chain_num):
break
for t1 in range((chain_num - len(words_lines))):
words_lines.append(([10001] * chain_length))
seq_lens.append(0)
masks.append(([0] * chain_length))
scores.append(0)
if (len(scores) > chain_num):
scores = scores[:chain_num]
scores = np.array(scores)
if (np.sum(scores) > 0):
scores = (scores / np.sum(scores))
contents.append((words_lines, int(label), seq_lens, masks, scores))
return contents
prefix = 'nn_data'
if (ratio == 0.1):
ratio_str = '19'
elif (ratio == 0.01):
ratio_str = '199'
elif (ratio == 0.05):
ratio_str = '119'
elif (ratio == 0.3):
ratio_str = '37'
elif (ratio == 0.5):
ratio_str = '55'
filename = (((((((prefix + '_') + ratio_str) + '_') + charge) + '_') + str(seed)) + '.pkl')
chain_num = config.chain
batch_size = config.batch_size
factor_num = config.factor_num
pad_size = config.pad_size
with open(('data/' + filename), 'rb') as f:
data = pkl.load(f)
strength = data['strength']
factor_list_t = data['factor_list']
factor_list = [[int(y[1:]) for y in x] for x in factor_list_t]
text_train = data['text_train']
labels_train = data['labels_train']
text_test = data['text_test']
labels_test = data['labels_test']
word_idx_train = data['word_idx_train']
word_idx_test = data['word_idx_test']
word_key_train = data['word_key_train']
word_key_test = data['word_key_test']
chains_train = data['chains_train']
chains_test = data['chains_test']
cnt = int((len(text_train) / 10))
train_split = (cnt * 9)
random_idx = np.arange(len(text_train))
np.random.shuffle(random_idx)
train_idx = random_idx[:train_split]
val_idx = random_idx[train_split:]
train = load_dataset(text_train[train_idx], labels_train[train_idx], word_idx_train[train_idx], word_key_train[train_idx], chains_train[train_idx], model_name)
dev = load_dataset(text_train[val_idx], labels_train[val_idx], word_idx_train[val_idx], word_key_train[val_idx], chains_train[val_idx], model_name)
test = load_dataset(text_test, labels_test, word_idx_test, word_key_test, chains_test, model_name)
if (not os.path.exists('saved_dict/')):
os.makedirs('saved_dict/')
return (vocab, train, dev, test) |
def save_checkpoint(state, args, is_best, filename='checkpoint.pth.tar'):
directory = ('experiments/segmentation/runs/%s/%s/%s/' % (args.dataset, args.model, args.checkname))
if (not os.path.exists(directory)):
os.makedirs(directory)
filename = (directory + filename)
torch.save(state, filename)
if is_best:
shutil.copyfile(filename, (directory + 'model_best.pth.tar')) |
def pauli_string_iterator(num_qubits, max_word_size=2):
if (max_word_size > num_qubits):
raise ValueError('Number of qubits is too few')
if (max_word_size <= 0):
raise ValueError('Word size too small')
qubit_list = list(range(num_qubits))
partitions = partition_iterator(qubit_list, max_word_size)
pauli_string = ['I' for temp in range(num_qubits)]
pauli_letters = ['X', 'Y', 'Z']
for partition in partitions:
for lettering in range((3 ** max_word_size)):
for p in partition:
letter = pauli_letters[(lettering % 3)]
for qubit in p:
pauli_string[qubit] = letter
lettering = (lettering // 3)
(yield tuple(pauli_string)) |
def accuracy(pred, target, topk=1):
assert isinstance(topk, (int, tuple))
if isinstance(topk, int):
topk = (topk,)
return_single = True
else:
return_single = False
maxk = max(topk)
(_, pred_label) = pred.topk(maxk, dim=1)
pred_label = pred_label.t()
correct = pred_label.eq(target.view(1, (- 1)).expand_as(pred_label))
res = []
for k in topk:
correct_k = correct[:k].view((- 1)).float().sum(0, keepdim=True)
res.append(correct_k.mul_((100.0 / pred.size(0))))
return (res[0] if return_single else res) |
.parametrize('truncated_dist, lower, upper, shape, expected', [(icdf_normal(0, 1), (- 1), 2, None, 0), (icdf_normal(3, 1), (- 1), 2, (2,), np.full((2,), (3 / 2))), (icdf_normal((- 3), 1), (- 1), None, (2, 3), np.full((2, 3), 0)), (icdf_normal([0, 3, 3], 1), None, [2, 2, 4], (4, 3), np.full((4, 3), [0, 1, 3]))])
def test_truncated_moment(truncated_dist, lower, upper, shape, expected):
with Model() as model:
Truncated('x', dist=truncated_dist, lower=lower, upper=upper, shape=shape)
assert_moment_is_expected(model, expected) |
.slow
.requires_src
_on_conda_build
def test_update_version_3_0_to_3_1_pretend(tmp_path, with_coverage, venv_mgr):
with chdir(str(tmp_path)):
name = 'my_old_project'
project = (tmp_path / 'my_old_project')
venv_mgr.install_pyscaffold(3, 0).putup(name).uninstall_pyscaffold().install_this_pyscaffold().putup(f'--pretend --update {project}', with_coverage=with_coverage)
setup_cfg = Path(project, 'setup.cfg').read_text(encoding='utf-8')
assert ('[options.entry_points]' not in setup_cfg) |
def run_test_commands_with_gui_process(commands):
gui_command = [pmp_test_utils.get_executable_even_when_embedded(), '-m', 'pymedphys', 'gui']
with pmp_test_utils.process(gui_command, cwd=HERE):
for command in commands:
subprocess.check_call(command, cwd=HERE, shell=True) |
def rtn_mempcpy(se: 'SymbolicExecutor', pstate: 'ProcessState'):
logger.debug('mempcpy hooked')
(dst, dst_ast) = pstate.get_full_argument(0)
src = pstate.get_argument_value(1)
cnt = pstate.get_argument_value(2)
pstate.concretize_argument(2)
for index in range(cnt):
sym_src = pstate.read_symbolic_memory_byte((src + index))
pstate.write_symbolic_memory_byte((dst + index), sym_src)
return (dst + cnt) |
class _NetG(nn.Module):
def __init__(self, in_c=1, out_c=1, n_feat=80, scale_unetfeats=48, scale_orsnetfeats=32, num_cab=8, kernel_size=3, reduction=4, bias=False):
super(_NetG, self).__init__()
act = nn.PReLU()
self.shallow_feat1 = nn.Sequential(conv(1, n_feat, kernel_size, bias=bias), CAB(n_feat, kernel_size, reduction, bias=bias, act=act))
self.shallow_feat2 = nn.Sequential(conv(1, n_feat, kernel_size, bias=bias), CAB(n_feat, kernel_size, reduction, bias=bias, act=act))
self.shallow_feat3 = nn.Sequential(conv(1, n_feat, kernel_size, bias=bias), CAB(n_feat, kernel_size, reduction, bias=bias, act=act))
self.stage1_encoder = Encoder(n_feat, kernel_size, reduction, act, bias, scale_unetfeats, csff=False)
self.stage1_decoder = Decoder(n_feat, kernel_size, reduction, act, bias, scale_unetfeats)
self.sam12 = SAM(n_feat, kernel_size=1, bias=bias)
def forward(self, x3_img):
H = x3_img.size(2)
W = x3_img.size(3)
fea1 = self.shallow_feat1(x3_img)
fea1_enc = self.stage1_encoder(fea1)
fea1_dec = self.stage1_decoder(fea1_enc)
(x2top_samfeats, stage1_img) = self.sam12(fea1_dec[0], x3_img)
return stage1_img |
def cvt_list_toavi(dirpath):
filenames_dict = {}
for file in os.listdir(dirpath):
if ((file == 'mapping_table') or (file == 'avi_txt')):
continue
else:
old_txt = open(file, 'r')
clip_names = old_txt.read()
clip_names = clip_names.split('\n')
filenames_dict[file] = clip_names
old_txt.close()
print('All old txt files have been loaded')
for old_txt in filenames_dict.keys():
new_path = ((os.getcwd() + '/avi_txt/') + old_txt)
print(new_path)
new_txt = open(new_path, 'w')
clipnames = filenames_dict[old_txt]
for clip_name in clipnames:
new_name = clip_name.replace('.mp4', '.avi')
new_txt.write(new_name)
new_txt.write('\n')
print(('The transformation of %s is completed.' % old_txt))
new_txt.close() |
.slow
.xfail(reason='Memory test is not stable')
def test_memory_leak_on_unsuccessful_connect():
p = psutil.Process()
m0 = p.memory_full_info()
for i in range(10):
gc.collect()
try:
pymssql.connect(server='www.google.com', port=81, user='username', password='password', login_timeout=1)
except:
pass
gc.collect()
m1 = p.memory_full_info()
duss = (m1.uss - m0.uss)
print(i, 'uss=', m1.uss, 'duss:', duss)
if (i > 5):
assert (duss <= 0)
m0 = m1 |
def parse_args():
parser = argparse.ArgumentParser()
data_group = parser.add_argument_group(title='Data-related configuration')
model_group = parser.add_argument_group(title='Model-related configuration')
atk_group = parser.add_argument_group(title='Attack-related configuration')
add_data_group(data_group)
add_model_group(model_group)
add_atk_group(atk_group)
return parser.parse_args() |
('beeref.scene.BeeGraphicsScene.clearSelection')
('PyQt6.QtGui.QClipboard.text')
('PyQt6.QtGui.QClipboard.image')
def test_on_action_paste_when_empty(img_mock, text_mock, clear_mock, view):
view.scene.cancel_crop_mode = MagicMock()
img_mock.return_value = QtGui.QImage()
text_mock.return_value = ''
view.on_action_paste()
assert (len(view.scene.items()) == 0)
clear_mock.assert_not_called()
view.scene.cancel_crop_mode.assert_not_called() |
class CandlestickItem(pg.GraphicsObject):
def __init__(self, data):
pg.GraphicsObject.__init__(self)
self.data = data
self.generatePicture()
def generatePicture(self):
self.picture = QtGui.QPicture()
p = QtGui.QPainter(self.picture)
p.setPen(pg.mkPen('w'))
w = ((self.data[1][0] - self.data[0][0]) / 3.0)
for (t, open, close, min, max) in self.data:
p.drawLine(QtCore.QPointF(t, min), QtCore.QPointF(t, max))
if (open > close):
p.setBrush(pg.mkBrush('r'))
else:
p.setBrush(pg.mkBrush('g'))
p.drawRect(QtCore.QRectF((t - w), open, (w * 2), (close - open)))
p.end()
def paint(self, p, *args):
p.drawPicture(0, 0, self.picture)
def boundingRect(self):
return QtCore.QRectF(self.picture.boundingRect()) |
class Memory():
data_pointer = 0
isfull = False
def __init__(self, capacity):
self.memory = np.empty(capacity, dtype=object)
self.capacity = capacity
def update(self, transition):
self.memory[self.data_pointer] = transition
self.data_pointer += 1
if (self.data_pointer == self.capacity):
self.data_pointer = 0
self.isfull = True
def sample(self, batch_size):
return np.random.choice(self.memory, batch_size) |
def f_conv2d_bias(in_channels, out_channels):
def padding_same(kernel, stride):
return [((((k - 1) * s) + 1) // 2) for (k, s) in zip(kernel, stride)]
padding = padding_same([3, 3], [1, 1])
assert (padding == [1, 1]), padding
return nn.Sequential(nn.Conv2d(in_channels=in_channels, out_channels=out_channels, kernel_size=[3, 3], stride=1, padding=1, bias=True)) |
def _timed_dedupe(object_ids: List[Any], sort_keys: List[SortKey], num_materialize_buckets: int, dedupe_task_index: int, enable_profiler: bool, object_store: Optional[IObjectStore], **kwargs):
task_id = get_current_ray_task_id()
worker_id = get_current_ray_worker_id()
with (memray.Tracker(f'dedupe_{worker_id}_{task_id}.bin') if enable_profiler else nullcontext()):
logger.info(f'[Dedupe task {dedupe_task_index}] Getting delta file envelope groups for {len(object_ids)} object refs...')
delta_file_envelope_groups_list: List[object] = object_store.get_many(object_ids)
hb_index_to_delta_file_envelopes_list = defaultdict(list)
for delta_file_envelope_groups in delta_file_envelope_groups_list:
for (hb_idx, dfes) in enumerate(delta_file_envelope_groups):
if (dfes is not None):
hb_index_to_delta_file_envelopes_list[hb_idx].append(dfes)
src_file_id_to_row_indices = defaultdict(list)
deduped_tables = []
logger.info(f'[Dedupe task {dedupe_task_index}] Running {len(hb_index_to_delta_file_envelopes_list)} dedupe rounds...')
total_deduped_records = 0
for (hb_idx, dfe_list) in hb_index_to_delta_file_envelopes_list.items():
logger.info(f'{dedupe_task_index}: union primary keys for hb_index: {hb_idx}')
(table, union_time) = timed_invocation(func=_union_primary_key_indices, hash_bucket_index=hb_idx, df_envelopes_list=dfe_list)
logger.info(f'[Dedupe {dedupe_task_index}] Dedupe round input record count: {len(table)}, took {union_time}s')
if len(sort_keys):
sort_keys.extend([SortKey.of(sc._PARTITION_STREAM_POSITION_COLUMN_NAME, SortOrder.ASCENDING), SortKey.of(sc._ORDERED_FILE_IDX_COLUMN_NAME, SortOrder.ASCENDING)])
table = table.take(pc.sort_indices(table, sort_keys=sort_keys))
logger.info(f'[Dedupe task index {dedupe_task_index}] Dropping duplicates for {hb_idx}')
hb_table_record_count = len(table)
(table, drop_time) = timed_invocation(func=_drop_duplicates_by_primary_key_hash, table=table)
deduped_record_count = (hb_table_record_count - len(table))
total_deduped_records += deduped_record_count
logger.info(f'[Dedupe task index {dedupe_task_index}] Dedupe round output record count: {len(table)}, took: {drop_time}s')
deduped_tables.append((hb_idx, table))
stream_position_col = sc.stream_position_column_np(table)
file_idx_col = sc.file_index_column_np(table)
row_idx_col = sc.record_index_column_np(table)
is_source_col = sc.is_source_column_np(table)
file_record_count_col = sc.file_record_count_column_np(table)
for row_idx in range(len(table)):
src_dfl = DeltaFileLocator.of(is_source_col[row_idx], stream_position_col[row_idx], file_idx_col[row_idx], file_record_count_col[row_idx])
src_file_id_to_row_indices[src_dfl].append(row_idx_col[row_idx])
logger.info(f'Finished all dedupe rounds...')
mat_bucket_to_src_file_records: Dict[(MaterializeBucketIndex, DeltaFileLocatorToRecords)] = defaultdict(dict)
for (src_dfl, src_row_indices) in src_file_id_to_row_indices.items():
mat_bucket = delta_file_locator_to_mat_bucket_index(src_dfl, num_materialize_buckets)
mat_bucket_to_src_file_records[mat_bucket][src_dfl] = np.array(src_row_indices)
mat_bucket_to_dd_idx_obj_id: Dict[(MaterializeBucketIndex, DedupeTaskIndexWithObjectId)] = {}
for (mat_bucket, src_file_records) in mat_bucket_to_src_file_records.items():
object_ref = object_store.put(src_file_records)
mat_bucket_to_dd_idx_obj_id[mat_bucket] = (dedupe_task_index, object_ref)
del object_ref
logger.info(f'Count of materialize buckets with object refs: {len(mat_bucket_to_dd_idx_obj_id)}')
peak_memory_usage_bytes = get_current_node_peak_memory_usage_in_bytes()
return DedupeResult(mat_bucket_to_dd_idx_obj_id, np.int64(total_deduped_records), np.double(peak_memory_usage_bytes), np.double(0.0), np.double(time.time())) |
def _instance_init_in_callstack(instance: Any) -> bool:
frame = inspect.currentframe().f_back
while frame:
frame_context_name = inspect.getframeinfo(frame).function
frame_context_self = frame.f_locals.get('self')
frame_context_vars = frame.f_code.co_varnames
if ((frame_context_name == '__init__') and (frame_context_self is instance) and (frame_context_vars[0] == 'self')):
return True
frame = frame.f_back
return False |
class BrowserStack(Provider):
API = '
def auth(self):
return (self.username, self.key)
def executor(self):
return '
def username(self):
return self.get_credential('username', ['BROWSERSTACK_USERNAME', 'BROWSERSTACK_USR'])
def key(self):
return self.get_credential('key', ['BROWSERSTACK_ACCESS_KEY', 'BROWSERSTACK_PSW'])
def job_access(self):
try:
field = self.get_setting(key='job_access', envs=['BROWSERSTACK_JOB_ACCESS'], section='report', allowed_values=['browser_url', 'public_url'])
except MissingCloudSettingError:
field = 'browser_url'
return field |
def read_batchfile(pythonpath, file_ending='.py'):
abspaths = utils.pypath_to_realpath(pythonpath, file_ending, settings.BASE_BATCHPROCESS_PATHS)
if (not abspaths):
raise IOError('Absolute batchcmd paths could not be found.')
text = None
decoderr = []
for abspath in abspaths:
for file_encoding in _ENCODINGS:
try:
with codecs.open(abspath, 'r', encoding=file_encoding) as fobj:
text = fobj.read()
except (ValueError, UnicodeDecodeError) as e:
decoderr.append(str(e))
continue
break
if ((not text) and decoderr):
raise UnicodeDecodeError('\n'.join(decoderr))
return text |
class EventMarker(Marker):
def __init__(self, event, kind=0, event_hash=None):
Marker.__init__(self, [], event.time, event.time, kind)
self._event = event
self.active = False
self._event_hash = event_hash
def get_event_hash(self):
if (self._event_hash is not None):
return self._event_hash
else:
return self._event.get_hash()
def label(self):
t = []
mag = self._event.magnitude
if (mag is not None):
t.append(('M%3.1f' % mag))
reg = self._event.region
if (reg is not None):
t.append(reg)
nam = self._event.name
if (nam is not None):
t.append(nam)
s = ' '.join(t)
if (not s):
s = '(Event)'
return s
def draw(self, p, time_projection, y_projection, with_label=False):
Marker.draw(self, p, time_projection, y_projection, draw_line=False, draw_triangle=True)
if with_label:
self.draw_label(p, time_projection, y_projection)
def draw_label(self, p, time_projection, y_projection):
from ..qt_compat import qg
from .. import util as gui_util
u = time_projection(self.tmin)
(v0, v1) = y_projection.get_out_range()
label_bg = qg.QBrush(qg.QColor(255, 255, 255))
gui_util.draw_label(p, u, (v0 - 10.0), self.label(), label_bg, 'CB', outline=self.active)
def get_event(self):
return self._event
def draw_trace(self, viewer, p, tr, time_projection, track_projection, gain):
pass
def hoover_message(self):
ev = self.get_event()
evs = []
for k in 'magnitude lat lon depth name region catalog'.split():
if ((ev.__dict__[k] is not None) and (ev.__dict__[k] != '')):
if (k == 'depth'):
sv = ('%g km' % (ev.depth * 0.001))
else:
sv = ('%s' % ev.__dict__[k])
evs.append(('%s = %s' % (k, sv)))
return ', '.join(evs)
def get_attributes(self, fdigits=3):
attributes = ['event:']
attributes.extend(Marker.get_attributes(self, fdigits=fdigits))
del attributes[(- 1)]
e = self._event
attributes.extend([e.get_hash(), e.lat, e.lon, e.depth, e.magnitude, e.catalog, e.name, e.region])
return attributes
def get_attribute_widths(self, fdigits=3):
ws = [6]
ws.extend(Marker.get_attribute_widths(self, fdigits=fdigits))
del ws[(- 1)]
ws.extend([14, 12, 12, 12, 4, 5, 0, 0])
return ws
def from_attributes(vals):
(nslc_ids, tmin, tmax, kind) = Marker.parse_attributes((vals[1:] + ['None']))
(lat, lon, depth, magnitude) = [str_to_float_or_none(x) for x in vals[5:9]]
(catalog, name, region) = [str_to_str_or_none(x) for x in vals[9:]]
e = model.Event(lat, lon, time=tmin, name=name, depth=depth, magnitude=magnitude, region=region, catalog=catalog)
marker = EventMarker(e, kind, event_hash=str_to_str_or_none(vals[4]))
return marker |
def get_component_unique_name(c_rtype):
full_name = get_component_full_name(c_rtype)
special_chars = [' ', '<', '>', '.', '[', ']']
if ((len(full_name) < 64) and (not any([(c in full_name) for c in special_chars]))):
return full_name
comp_name = c_rtype.get_name()
param_hash = blake2b(digest_size=8)
param_hash.update(full_name[len(comp_name):].encode('ascii'))
param_name = param_hash.hexdigest()
return ((comp_name + '__') + param_name) |
class InternalBaseplateSession(BaseplateSession):
def _add_span_context(self, span: Span, request: PreparedRequest) -> None:
request.headers['X-Trace'] = str(span.trace_id)
request.headers['X-Parent'] = str(span.parent_id)
request.headers['X-Span'] = str(span.id)
if span.sampled:
request.headers['X-Sampled'] = '1'
if (span.flags is not None):
request.headers['X-Flags'] = str(span.flags)
try:
edge_context = span.context.raw_edge_context
except AttributeError:
pass
else:
request.headers['X-Edge-Request'] = base64.b64encode(edge_context).decode() |
class _GroupBase(base._TextBox, base.PaddingMixin, base.MarginMixin):
defaults: list[tuple[(str, Any, str)]] = [('borderwidth', 3, 'Current group border width'), ('center_aligned', True, 'center-aligned group box')]
def __init__(self, **config):
base._TextBox.__init__(self, **config)
self.add_defaults(_GroupBase.defaults)
self.add_defaults(base.PaddingMixin.defaults)
self.add_defaults(base.MarginMixin.defaults)
def box_width(self, groups):
(width, _) = self.drawer.max_layout_size([self.fmt.format(i.label) for i in groups], self.font, self.fontsize)
return ((width + (self.padding_x * 2)) + (self.borderwidth * 2))
def _configure(self, qtile, bar):
base._Widget._configure(self, qtile, bar)
if (self.fontsize is None):
calc = (((self.bar.height - (self.margin_y * 2)) - (self.borderwidth * 2)) - (self.padding_y * 2))
self.fontsize = max(calc, 1)
self.layout = self.drawer.textlayout('', 'ffffff', self.font, self.fontsize, self.fontshadow)
self.setup_hooks()
def _hook_response(self, *args, **kwargs):
self.bar.draw()
def setup_hooks(self):
hook.subscribe.client_managed(self._hook_response)
hook.subscribe.client_urgent_hint_changed(self._hook_response)
hook.subscribe.client_killed(self._hook_response)
hook.subscribe.setgroup(self._hook_response)
hook.subscribe.group_window_add(self._hook_response)
hook.subscribe.current_screen_change(self._hook_response)
hook.subscribe.changegroup(self._hook_response)
def remove_hooks(self):
hook.unsubscribe.client_managed(self._hook_response)
hook.unsubscribe.client_urgent_hint_changed(self._hook_response)
hook.unsubscribe.client_killed(self._hook_response)
hook.unsubscribe.setgroup(self._hook_response)
hook.unsubscribe.group_window_add(self._hook_response)
hook.unsubscribe.current_screen_change(self._hook_response)
hook.unsubscribe.changegroup(self._hook_response)
def drawbox(self, offset, text, bordercolor, textcolor, highlight_color=None, width=None, rounded=False, block=False, line=False, highlighted=False):
self.layout.text = self.fmt.format(text)
self.layout.font_family = self.font
self.layout.font_size = self.fontsize
self.layout.colour = textcolor
if (width is not None):
self.layout.width = width
if line:
pad_y = [(((self.bar.height - self.layout.height) - self.borderwidth) / 2), (((self.bar.height - self.layout.height) + self.borderwidth) / 2)]
else:
pad_y = self.padding_y
if (bordercolor is None):
border_width = 0
framecolor = (self.background or self.bar.background)
else:
border_width = self.borderwidth
framecolor = bordercolor
framed = self.layout.framed(border_width, framecolor, 0, pad_y, highlight_color)
y = self.margin_y
if self.center_aligned:
for t in base.MarginMixin.defaults:
if (t[0] == 'margin'):
y += (((self.bar.height - framed.height) / 2) - t[1])
break
if (block and (bordercolor is not None)):
framed.draw_fill(offset, y, rounded)
elif line:
framed.draw_line(offset, y, highlighted)
else:
framed.draw(offset, y, rounded)
def finalize(self):
self.remove_hooks()
base._TextBox.finalize(self) |
def noneuclidian_distance_calculation():
from sympy import solve, sqrt
metric = '0 # #,# 0 #,# # 1'
(X, Y, e) = MV.setup('X Y e', metric)
print('g_{ij} =', MV.metric)
print('(X^Y)**2 =', ((X ^ Y) * (X ^ Y)))
L = ((X ^ Y) ^ e)
B = (L * e)
print('B =', B)
Bsq = (B * B)
print('B**2 =', Bsq)
Bsq = Bsq.scalar()
print('#L = X^Y^e is a non-euclidian line')
print('B = L*e =', B)
BeBr = ((B * e) * B.rev())
print('B*e*B.rev() =', BeBr)
print('B**2 =', (B * B))
print('L**2 =', (L * L))
(s, c, Binv, M, S, C, alpha, XdotY, Xdote, Ydote) = symbols('s c (1/B) M S C alpha (X.Y) (X.e) (Y.e)')
Bhat = (Binv * B)
R = (c + (s * Bhat))
print('s = sinh(alpha/2) and c = cosh(alpha/2)')
print('exp(alpha*B/(2*|B|)) =', R)
Z = ((R * X) * R.rev())
Z.obj = expand(Z.obj)
Z.obj = Z.obj.collect([Binv, s, c, XdotY])
Z.Fmt(3, 'R*X*R.rev()')
W = (Z | Y)
print('Objective is to determine value of C = cosh(alpha) such that W = 0')
W = W.scalar()
print('Z|Y =', W)
W = expand(W)
W = simplify(W)
W = W.collect([(s * Binv)])
M = (1 / Bsq)
W = W.subs((Binv ** 2), M)
W = simplify(W)
Bmag = sqrt(((XdotY ** 2) - (((2 * XdotY) * Xdote) * Ydote)))
W = W.collect([((Binv * c) * s), XdotY])
W = W.subs(((2 * (XdotY ** 2)) - (((4 * XdotY) * Xdote) * Ydote)), (2 / (Binv ** 2)))
W = W.subs(((2 * c) * s), S)
W = W.subs((c ** 2), ((C + 1) / 2))
W = W.subs((s ** 2), ((C - 1) / 2))
W = simplify(W)
W = W.subs((1 / Binv), Bmag)
W = expand(W)
print('S = sinh(alpha) and C = cosh(alpha)')
print('W =', W)
Wd = collect(W, [C, S], exact=True, evaluate=False)
Wd_1 = Wd[ONE]
Wd_C = Wd[C]
Wd_S = Wd[S]
print('Scalar Coefficient =', Wd_1)
print('Cosh Coefficient =', Wd_C)
print('Sinh Coefficient =', Wd_S)
print('|B| =', Bmag)
Wd_1 = Wd_1.subs(Bmag, (1 / Binv))
Wd_C = Wd_C.subs(Bmag, (1 / Binv))
Wd_S = Wd_S.subs(Bmag, (1 / Binv))
lhs = (Wd_1 + (Wd_C * C))
rhs = ((- Wd_S) * S)
lhs = (lhs ** 2)
rhs = (rhs ** 2)
W = expand((lhs - rhs))
W = expand(W.subs((1 / (Binv ** 2)), (Bmag ** 2)))
W = expand(W.subs((S ** 2), ((C ** 2) - 1)))
W = W.collect([C, (C ** 2)], evaluate=False)
a = simplify(W[(C ** 2)])
b = simplify(W[C])
c = simplify(W[ONE])
print('Require a*C**2+b*C+c = 0')
print('a =', a)
print('b =', b)
print('c =', c)
x = Symbol('x')
C = solve((((a * (x ** 2)) + (b * x)) + c), x)[0]
print('cosh(alpha) = C = -b/(2*a) =', expand(simplify(expand(C))))
return |
class Post(models.Model):
title = models.CharField(max_length=70, verbose_name='', unique=True)
html_content = models.TextField(verbose_name='HTML')
md_content = models.TextField(verbose_name='markdown')
created_time = models.DateTimeField(auto_now_add=True, verbose_name='')
modified_time = models.DateTimeField(auto_now_add=True, verbose_name='')
author = models.ForeignKey('users.UserProfile', verbose_name='', on_delete=models.CASCADE)
views = models.PositiveIntegerField(default=0)
def increase_views(self):
self.views += 1
self.save(update_fields=['views'])
class Meta():
db_table = 'ops_wiki_post'
verbose_name = 'wiki'
verbose_name_plural = 'wiki' |
def main():
parser = HfArgumentParser((DataTrainingArguments, TeacherModelArguments, StudentModelArguments, DistillTrainingArguments), description=DESCRIPTION)
if ((len(sys.argv) == 2) and sys.argv[1].endswith('.json')):
(data_args, teacher_args, student_args, training_args) = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1]))
else:
(data_args, teacher_args, student_args, training_args) = parser.parse_args_into_dataclasses()
last_checkpoint = None
if (os.path.isdir(training_args.output_dir) and training_args.do_train and (not training_args.overwrite_output_dir)):
last_checkpoint = get_last_checkpoint(training_args.output_dir)
if ((last_checkpoint is None) and (len(os.listdir(training_args.output_dir)) > 0)):
raise ValueError(f'Output directory ({training_args.output_dir}) already exists and is not empty. Use --overwrite_output_dir to overcome.')
elif (last_checkpoint is not None):
logger.info(f'Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change the `--output_dir` or add `--overwrite_output_dir` to train from scratch.')
logging.basicConfig(format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', handlers=[logging.StreamHandler(sys.stdout)])
logger.setLevel((logging.INFO if is_main_process(training_args.local_rank) else logging.WARN))
logger.warning((f'Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}' + f'distributed training: {bool((training_args.local_rank != (- 1)))}, 16-bits training: {training_args.fp16}'))
if is_main_process(training_args.local_rank):
utils.logging.set_verbosity_info()
utils.logging.enable_default_handler()
utils.logging.enable_explicit_format()
if (training_args.local_rank != (- 1)):
raise ValueError('Distributed training is not currently supported.')
if (training_args.tpu_num_cores is not None):
raise ValueError('TPU acceleration is not currently supported.')
logger.info(f'Training/evaluation parameters {training_args}')
set_seed(training_args.seed)
examples = read_lines(data_args.data_file)
class_names = read_lines(data_args.class_names_file)
logger.info('Generating predictions from zero-shot teacher model')
teacher_soft_preds = get_teacher_predictions(teacher_args.teacher_name_or_path, examples, class_names, teacher_args.hypothesis_template, teacher_args.teacher_batch_size, teacher_args.temperature, teacher_args.multi_label, data_args.use_fast_tokenizer, training_args.no_cuda, training_args.fp16)
dataset = Dataset.from_dict({'text': examples, 'labels': teacher_soft_preds})
logger.info('Initializing student model')
model = AutoModelForSequenceClassification.from_pretrained(student_args.student_name_or_path, num_labels=len(class_names))
tokenizer = AutoTokenizer.from_pretrained(student_args.student_name_or_path, use_fast=data_args.use_fast_tokenizer)
model.config.id2label = {i: label for (i, label) in enumerate(class_names)}
model.config.label2id = {label: i for (i, label) in enumerate(class_names)}
dataset = dataset.map(tokenizer, input_columns='text')
dataset.set_format('torch')
def compute_metrics(p, return_outputs=False):
preds = p.predictions.argmax((- 1))
proxy_labels = p.label_ids.argmax((- 1))
return {'agreement': (preds == proxy_labels).mean().item()}
trainer = DistillationTrainer(model=model, tokenizer=tokenizer, args=training_args, train_dataset=dataset, compute_metrics=compute_metrics)
if training_args.do_train:
logger.info('Training student model on teacher predictions')
trainer.train()
if training_args.do_eval:
agreement = trainer.evaluate(eval_dataset=dataset)['eval_agreement']
logger.info(f'Agreement of student and teacher predictions: {(agreement * 100):0.2f}%')
trainer.save_model() |
def get_conv_output_size(input_size, kernel_size, stride, padding, dilation):
ndim = len(input_size)
output_size = []
for i in range(ndim):
size = (((((input_size[i] + (2 * padding[i])) - (dilation[i] * (kernel_size[i] - 1))) - 1) // stride[i]) + 1)
if (kernel_size[i] == (- 1)):
output_size.append(1)
else:
output_size.append(size)
return output_size |
class Predictor_length(nn.Module):
def __init__(self, opt, key_name):
super(Predictor_length, self).__init__()
self.net = nn.Sequential(nn.Linear(opt['dim_hidden'], opt['dim_hidden']), nn.ReLU(), nn.Dropout(opt['hidden_dropout_prob']), nn.Linear(opt['dim_hidden'], opt['max_len']))
self.key_name = key_name
def forward(self, enc_output, **kwargs):
if isinstance(enc_output, list):
assert (len(enc_output) == 1)
enc_output = enc_output[0]
assert (len(enc_output.shape) == 3)
out = self.net(enc_output.mean(1))
return {self.key_name: torch.log_softmax(out, dim=(- 1))} |
def test_defaults():
assert (pressure('1000').value() == 1000.0)
assert (pressure('1000', 'HPA').value() == 1000.0)
assert (pressure('30', 'in').value() == 30.0)
assert (pressure('30', 'in').string() == '30.00 inches')
assert (pressure('1000').value('MB') == 1000)
assert (pressure('1000').string() == '1000.0 mb')
assert (pressure('1000', 'HPA').string() == '1000.0 hPa') |
def get_train_op_for_scope(loss, optimizer, scopes, clip_gradient_norm):
for var in tf.trainable_variables():
if (not (var in tf.model_variables())):
tf.contrib.framework.add_model_variable(var)
is_trainable = (lambda x: (x in tf.trainable_variables()))
var_list = []
update_ops = []
for scope in scopes:
var_list.extend(filter(is_trainable, tf.contrib.framework.get_model_variables(scope)))
update_ops.extend(tf.get_collection(tf.GraphKeys.UPDATE_OPS, scope))
for var in tf.contrib.framework.get_model_variables(scope):
print(('%s\t%s' % (scope, var)))
return slim.learning.create_train_op(loss, optimizer, update_ops=update_ops, variables_to_train=var_list, clip_gradient_norm=clip_gradient_norm) |
class TestMPM(TestCase):
def test_well_posed(self):
options = {'thermal': 'isothermal'}
model = pybamm.lithium_ion.MPM(options)
model.check_well_posedness()
model = pybamm.lithium_ion.MPM(build=False)
model.build_model()
model.check_well_posedness()
def test_default_parameter_values(self):
model = pybamm.lithium_ion.MPM()
self.assertEqual(model.default_parameter_values['Negative minimum particle radius [m]'], 0.0)
def test_lumped_thermal_model_1D(self):
options = {'thermal': 'lumped'}
model = pybamm.lithium_ion.MPM(options)
model.check_well_posedness()
def test_x_full_thermal_not_implemented(self):
options = {'thermal': 'x-full'}
with self.assertRaises(NotImplementedError):
pybamm.lithium_ion.MPM(options)
def test_thermal_1plus1D(self):
options = {'current collector': 'potential pair', 'dimensionality': 1, 'thermal': 'x-lumped'}
model = pybamm.lithium_ion.MPM(options)
model.check_well_posedness()
def test_particle_uniform(self):
options = {'particle': 'uniform profile'}
model = pybamm.lithium_ion.MPM(options)
model.check_well_posedness()
def test_particle_quadratic(self):
options = {'particle': 'quadratic profile'}
with self.assertRaises(NotImplementedError):
pybamm.lithium_ion.MPM(options)
def test_differential_surface_form(self):
options = {'surface form': 'differential'}
model = pybamm.lithium_ion.MPM(options)
model.check_well_posedness()
def test_current_sigmoid(self):
options = {'open-circuit potential': 'current sigmoid'}
model = pybamm.lithium_ion.MPM(options)
model.check_well_posedness()
def test_necessary_options(self):
options = {'particle size': 'single'}
with self.assertRaises(pybamm.OptionError):
pybamm.lithium_ion.MPM(options)
options = {'surface form': 'false'}
with self.assertRaises(pybamm.OptionError):
pybamm.lithium_ion.MPM(options)
def test_nonspherical_particle_not_implemented(self):
options = {'particle shape': 'user'}
with self.assertRaises(NotImplementedError):
pybamm.lithium_ion.MPM(options)
def test_loss_active_material_stress_negative_not_implemented(self):
options = {'loss of active material': ('stress-driven', 'none')}
with self.assertRaises(NotImplementedError):
pybamm.lithium_ion.MPM(options)
def test_loss_active_material_stress_positive_not_implemented(self):
options = {'loss of active material': ('none', 'stress-driven')}
with self.assertRaises(NotImplementedError):
pybamm.lithium_ion.MPM(options)
def test_loss_active_material_stress_both_not_implemented(self):
options = {'loss of active material': 'stress-driven'}
with self.assertRaises(NotImplementedError):
pybamm.lithium_ion.MPM(options)
def test_reversible_plating_with_porosity_not_implemented(self):
options = {'lithium plating': 'reversible', 'lithium plating porosity change': 'true'}
with self.assertRaises(NotImplementedError):
pybamm.lithium_ion.MPM(options)
def test_stress_induced_diffusion_not_implemented(self):
options = {'stress-induced diffusion': 'true'}
with self.assertRaises(NotImplementedError):
pybamm.lithium_ion.MPM(options)
def test_msmr(self):
options = {'open-circuit potential': 'MSMR', 'particle': 'MSMR', 'number of MSMR reactions': ('6', '4'), 'intercalation kinetics': 'MSMR'}
model = pybamm.lithium_ion.MPM(options)
model.check_well_posedness() |
def _get_display_cls(format):
dummy = (lambda *args, **kwargs: None)
try:
import IPython.display as display
except ImportError:
return dummy
if (format in IPYTHON_NO_DISPLAY_FORMATS):
return dummy
elif (format in IPYTHON_IMAGE_FORMATS):
return partial(display.Image, format=format)
elif (format == 'svg'):
return display.SVG
else:
raise ValueError(("Unknown format '%s' passed to `dot_graph`" % format)) |
def test_triggeringentities():
cond = OSC.TriggeringEntities(OSC.TriggeringEntitiesRule.all)
cond.add_entity('ego')
prettyprint(cond.get_element())
cond2 = OSC.TriggeringEntities(OSC.TriggeringEntitiesRule.all)
cond2.add_entity('ego')
cond3 = OSC.TriggeringEntities(OSC.TriggeringEntitiesRule.all)
cond3.add_entity('ego')
cond3.add_entity('target')
assert (cond == cond2)
assert (cond != cond3)
cond4 = OSC.TriggeringEntities.parse(cond.get_element())
assert (cond == cond4)
cond5 = OSC.TriggeringEntities.parse(cond3.get_element())
assert (cond5 == cond3)
assert (version_validation('TriggeringEntities', cond, 0) == ValidationResponse.OK)
assert (version_validation('TriggeringEntities', cond, 1) == ValidationResponse.OK)
assert (version_validation('TriggeringEntities', cond, 2) == ValidationResponse.OK) |
def _camel_killer(attr):
try:
attr = str(attr)
except UnicodeEncodeError:
attr = attr.encode('utf-8', 'ignore')
s1 = _first_cap_re.sub('\\1_\\2', attr)
s2 = _all_cap_re.sub('\\1_\\2', s1)
return re.sub('_+', '_', (s2.casefold() if hasattr(s2, 'casefold') else s2.lower())) |
def build(image_resizer_config):
if (not isinstance(image_resizer_config, image_resizer_pb2.ImageResizer)):
raise ValueError('image_resizer_config not of type image_resizer_pb2.ImageResizer.')
if (image_resizer_config.WhichOneof('image_resizer_oneof') == 'keep_aspect_ratio_resizer'):
keep_aspect_ratio_config = image_resizer_config.keep_aspect_ratio_resizer
if (not (keep_aspect_ratio_config.min_dimension <= keep_aspect_ratio_config.max_dimension)):
raise ValueError('min_dimension > max_dimension')
return functools.partial(preprocessor.resize_to_range, min_dimension=keep_aspect_ratio_config.min_dimension, max_dimension=keep_aspect_ratio_config.max_dimension)
if (image_resizer_config.WhichOneof('image_resizer_oneof') == 'fixed_shape_resizer'):
fixed_shape_resizer_config = image_resizer_config.fixed_shape_resizer
return functools.partial(preprocessor.resize_image, new_height=fixed_shape_resizer_config.height, new_width=fixed_shape_resizer_config.width)
raise ValueError('Invalid image resizer option.') |
.parametrize('is_no_update', [False, True])
def test_lock_with_incompatible_lockfile(command_tester_factory: CommandTesterFactory, poetry_with_incompatible_lockfile: Poetry, repo: TestRepository, is_no_update: bool) -> None:
repo.add_package(get_package('sampleproject', '1.3.1'))
locker = Locker(lock=(poetry_with_incompatible_lockfile.pyproject.file.path.parent / 'poetry.lock'), local_config=poetry_with_incompatible_lockfile.locker._local_config)
poetry_with_incompatible_lockfile.set_locker(locker)
tester = command_tester_factory('lock', poetry=poetry_with_incompatible_lockfile)
if is_no_update:
expected = '(?s)lock file is not compatible .* regenerate the lock file with the `poetry lock` command'
with pytest.raises(RuntimeError, match=expected):
tester.execute('--no-update')
else:
status_code = tester.execute()
assert (status_code == 0) |
class InformationRetrievalEvaluator(SentenceEvaluator):
def __init__(self, queries: Dict[(str, str)], corpus: Dict[(str, str)], relevant_docs: Dict[(str, Set[str])], query_chunk_size: int=1000, corpus_chunk_size: int=500000, mrr_at_k: List[int]=[10], ndcg_at_k: List[int]=[10], accuracy_at_k: List[int]=[1, 3, 5, 10], precision_recall_at_k: List[int]=[1, 3, 5, 10], map_at_k: List[int]=[100], show_progress_bar: bool=False, batch_size: int=32, name: str=''):
self.queries_ids = []
for qid in queries:
if ((qid in relevant_docs) and (len(relevant_docs[qid]) > 0)):
self.queries_ids.append(qid)
self.queries = [queries[qid] for qid in self.queries_ids]
self.corpus_ids = list(corpus.keys())
self.corpus = [corpus[cid] for cid in self.corpus_ids]
self.relevant_docs = relevant_docs
self.query_chunk_size = query_chunk_size
self.corpus_chunk_size = corpus_chunk_size
self.mrr_at_k = mrr_at_k
self.ndcg_at_k = ndcg_at_k
self.accuracy_at_k = accuracy_at_k
self.precision_recall_at_k = precision_recall_at_k
self.map_at_k = map_at_k
self.show_progress_bar = show_progress_bar
self.batch_size = batch_size
self.name = name
if name:
name = ('_' + name)
self.csv_file: str = (('Information-Retrieval_evaluation' + name) + '_results.csv')
self.csv_headers = ['epoch', 'steps']
for k in accuracy_at_k:
self.csv_headers.append('{}'.format(k))
for k in precision_recall_at_k:
self.csv_headers.append('{}'.format(k))
self.csv_headers.append('{}'.format(k))
for k in mrr_at_k:
self.csv_headers.append('{}'.format(k))
for k in ndcg_at_k:
self.csv_headers.append('{}'.format(k))
for k in map_at_k:
self.csv_headers.append('{}'.format(k))
def __call__(self, model, output_path: str=None, epoch: int=(- 1), steps: int=(- 1)) -> float:
if (epoch != (- 1)):
out_txt = (' after epoch {}:'.format(epoch) if (steps == (- 1)) else ' in epoch {} after {} steps:'.format(epoch, steps))
else:
out_txt = ':'
logging.info(((('Information Retrieval Evaluation on ' + self.name) + ' dataset') + out_txt))
max_k = max(max(self.mrr_at_k), max(self.ndcg_at_k), max(self.accuracy_at_k), max(self.precision_recall_at_k), max(self.map_at_k))
query_embeddings = model.encode(self.queries, show_progress_bar=self.show_progress_bar, batch_size=self.batch_size, convert_to_tensor=True)
num_hits_at_k = {k: 0 for k in self.accuracy_at_k}
precisions_at_k = {k: [] for k in self.precision_recall_at_k}
recall_at_k = {k: [] for k in self.precision_recall_at_k}
MRR = {k: 0 for k in self.mrr_at_k}
ndcg = {k: [] for k in self.ndcg_at_k}
AveP_at_k = {k: [] for k in self.map_at_k}
corpus_embeddings = model.encode(self.corpus, show_progress_bar=self.show_progress_bar, batch_size=self.batch_size, convert_to_tensor=True)
for query_start_idx in range(0, len(query_embeddings), self.query_chunk_size):
query_end_idx = min((query_start_idx + self.query_chunk_size), len(query_embeddings))
queries_result_list = [[] for _ in range(query_start_idx, query_end_idx)]
for corpus_start_idx in range(0, len(corpus_embeddings), self.corpus_chunk_size):
corpus_end_idx = min((corpus_start_idx + self.corpus_chunk_size), len(corpus_embeddings))
cos_scores = pytorch_cos_sim(query_embeddings[query_start_idx:query_end_idx], corpus_embeddings[corpus_start_idx:corpus_end_idx]).cpu()
(cos_scores_top_k_values, cos_scores_top_k_idx) = torch.topk(cos_scores, min(max_k, (len(cos_scores[0]) - 1)), dim=1, largest=True, sorted=False)
cos_scores_top_k_values = cos_scores_top_k_values.tolist()
cos_scores_top_k_idx = cos_scores_top_k_idx.tolist()
for query_itr in range(len(cos_scores)):
for (sub_corpus_id, score) in zip(cos_scores_top_k_idx[query_itr], cos_scores_top_k_values[query_itr]):
corpus_id = self.corpus_ids[(corpus_start_idx + sub_corpus_id)]
queries_result_list[query_itr].append({'corpus_id': corpus_id, 'score': score})
for query_itr in range(len(queries_result_list)):
query_id = self.queries_ids[(query_start_idx + query_itr)]
top_hits = sorted(queries_result_list[query_itr], key=(lambda x: x['score']), reverse=True)
query_relevant_docs = self.relevant_docs[query_id]
for k_val in self.accuracy_at_k:
for hit in top_hits[0:k_val]:
if (hit['corpus_id'] in query_relevant_docs):
num_hits_at_k[k_val] += 1
break
for k_val in self.precision_recall_at_k:
num_correct = 0
for hit in top_hits[0:k_val]:
if (hit['corpus_id'] in query_relevant_docs):
num_correct += 1
precisions_at_k[k_val].append((num_correct / k_val))
recall_at_k[k_val].append((num_correct / len(query_relevant_docs)))
for k_val in self.mrr_at_k:
for (rank, hit) in enumerate(top_hits[0:k_val]):
if (hit['corpus_id'] in query_relevant_docs):
MRR[k_val] += (1.0 / (rank + 1))
break
for k_val in self.ndcg_at_k:
predicted_relevance = [(1 if (top_hit['corpus_id'] in query_relevant_docs) else 0) for top_hit in top_hits[0:k_val]]
true_relevances = ([1] * len(query_relevant_docs))
ndcg_value = (self.compute_dcg_at_k(predicted_relevance, k_val) / self.compute_dcg_at_k(true_relevances, k_val))
ndcg[k_val].append(ndcg_value)
for k_val in self.map_at_k:
num_correct = 0
sum_precisions = 0
for (rank, hit) in enumerate(top_hits[0:k_val]):
if (hit['corpus_id'] in query_relevant_docs):
num_correct += 1
sum_precisions += (num_correct / (rank + 1))
avg_precision = (sum_precisions / min(k_val, len(query_relevant_docs)))
AveP_at_k[k_val].append(avg_precision)
for k in num_hits_at_k:
num_hits_at_k[k] /= len(self.queries)
for k in precisions_at_k:
precisions_at_k[k] = np.mean(precisions_at_k[k])
for k in recall_at_k:
recall_at_k[k] = np.mean(recall_at_k[k])
for k in ndcg:
ndcg[k] = np.mean(ndcg[k])
for k in MRR:
MRR[k] /= len(self.queries)
for k in AveP_at_k:
AveP_at_k[k] = np.mean(AveP_at_k[k])
for k in num_hits_at_k:
logging.info('{}: {:.2f}%'.format(k, (num_hits_at_k[k] * 100)))
for k in precisions_at_k:
logging.info('{}: {:.2f}%'.format(k, (precisions_at_k[k] * 100)))
for k in recall_at_k:
logging.info('{}: {:.2f}%'.format(k, (recall_at_k[k] * 100)))
for k in MRR:
logging.info('{}: {:.4f}'.format(k, MRR[k]))
for k in ndcg:
logging.info('{}: {:.4f}'.format(k, ndcg[k]))
for k in AveP_at_k:
logging.info('{}: {:.4f}'.format(k, AveP_at_k[k]))
logging.info('Queries: {}'.format(len(self.queries)))
logging.info('Corpus: {}\n'.format(len(self.corpus)))
if (output_path is not None):
csv_path = os.path.join(output_path, self.csv_file)
if (not os.path.isfile(csv_path)):
fOut = open(csv_path, mode='w', encoding='utf-8')
fOut.write(','.join(self.csv_headers))
fOut.write('\n')
else:
fOut = open(csv_path, mode='a', encoding='utf-8')
output_data = [epoch, steps]
for k in self.accuracy_at_k:
output_data.append(num_hits_at_k[k])
for k in self.precision_recall_at_k:
output_data.append(precisions_at_k[k])
output_data.append(recall_at_k[k])
for k in self.mrr_at_k:
output_data.append(MRR[k])
for k in self.ndcg_at_k:
output_data.append(ndcg[k])
for k in self.map_at_k:
output_data.append(AveP_at_k[k])
fOut.write(','.join(map(str, output_data)))
fOut.write('\n')
fOut.close()
return AveP_at_k[max(self.map_at_k)]
def compute_dcg_at_k(relevances, k):
dcg = 0
for i in range(min(len(relevances), k)):
dcg += (relevances[i] / np.log2((i + 2)))
return dcg |
def happy_path_fixture(chain_state, token_network_state, our_address):
(token_network_state, addresses, channel_states) = create_square_network_topology(token_network_state=token_network_state, our_address=our_address)
(address1, address2, address3, address4) = addresses
chain_state.nodeaddresses_to_networkstates = {address1: NetworkState.REACHABLE, address2: NetworkState.REACHABLE, address3: NetworkState.REACHABLE, address4: NetworkState.REACHABLE}
json_data = {'result': [{'path': [to_checksum_address(our_address), to_checksum_address(address2), to_checksum_address(address3), to_checksum_address(address4)], 'estimated_fee': 0}], 'feedback_token': DEFAULT_FEEDBACK_TOKEN.hex}
response = mocked_json_response(response_data=json_data)
return (addresses, chain_state, channel_states, response, token_network_state) |
def make_casa_mask(SpecCube, outname, append_to_image=True, img=None, add_stokes=True, stokes_posn=None, overwrite=False):
try:
from casatools import image
ia = image()
except ImportError:
try:
from taskinit import ia
except ImportError:
raise ImportError('Cannot import casa. Must be run in a CASA environment.')
maskname = os.path.split(outname)[1]
maskpath = outname
temp = tempfile.NamedTemporaryFile()
temp2 = tempfile.NamedTemporaryFile(delete=False)
if add_stokes:
my_wcs = SpecCube.wcs
if (stokes_posn is None):
stokes_posn = my_wcs.wcs.naxis
new_wcs = add_stokes_axis_to_wcs(my_wcs, stokes_posn)
header = new_wcs.to_header()
shape = SpecCube.shape[::(- 1)]
shape = ((shape[:stokes_posn] + (1,)) + shape[stokes_posn:])
shape = shape[::(- 1)]
else:
header = SpecCube.header
shape = SpecCube.shape
hdu = fits.PrimaryHDU(header=header, data=np.empty(shape, dtype='int16'))
hdu.writeto(temp.name)
ia.fromfits(infile=temp.name, outfile=temp2.name, overwrite=overwrite)
temp.close()
cs = ia.coordsys()
ia.done()
ia.close()
temp2.close()
mask_arr = SpecCube.mask.include()
mask_arr = mask_arr.reshape(shape)
mask_arr = mask_arr.T
ia.fromarray(outfile=maskpath, pixels=mask_arr.astype('int16'), overwrite=overwrite)
ia.done()
ia.close()
ia.open(maskpath, cache=False)
ia.setcoordsys(cs.torecord())
ia.done()
ia.close()
if append_to_image:
if (img is None):
raise TypeError('img argument must be specified to append the mask.')
ia.open(maskpath, cache=False)
ia.calcmask((maskname + '>0.5'))
ia.done()
ia.close()
ia.open(img, cache=False)
ia.maskhandler('copy', [(maskpath + ':mask0'), maskname])
ia.maskhandler('set', maskname)
ia.done()
ia.close() |
def linkify(weburl_match):
(domain, path) = (weburl_match.group(1), (weburl_match.group(2) or ''))
if (domain.endswith(settings.DOMAIN) and (len(path) > 7)):
if (permalink := re.match('^/entry/([0-9]+)/?$', path)):
return f'({SEE}: <a href="{path}">#{permalink.group(1)}</a>)'
if (topic := re.match('^/topic/([-a-zA-Z0-9]+)/?$', path)):
slug = topic.group(1)
guess = slug.replace('-', ' ').strip()
return f'({SEE}: <a href="{path}">{guess}</a>)'
if (image := re.match('^/img/([a-z0-9]{8})/?$', path)):
return f'<a role="button" tabindex="0" data-img="/img/{image.group(1)}" aria-expanded="false">{IMAGE}</a>'
path_repr = (f'/...{path[(- 32):]}' if (len(path) > 35) else path)
url = (domain + path)
return f'<a rel="ugc nofollow noopener" target="_blank" title="{url}" href="{url}">{domain}{path_repr}</a>' |
('inspector-superior?', [values_struct.W_StructInspector, values_struct.W_StructInspector])
def inspector_superior_huh(w_inspector, maybe_subinspector):
if (w_inspector is maybe_subinspector):
return values.w_false
s = maybe_subinspector.w_super
while (s is not None):
if (w_inspector is s):
return values.w_true
s = s.w_super
return values.w_false |
class PlaneAlignment(BaseCascade):
_id = 37
_iconName = 'Assembly_ConstraintAlignment.svg'
_props = (['Cascade', 'Offset'] + _AngleProps)
_tooltip = QT_TRANSLATE_NOOP('asm3', 'Add a "{}" constraint to align planar faces of two or more parts.\nThe faces become coplanar or parallel with an optional distance') |
class TargetWeightMolecule(Molecule):
def __init__(self, target_weight, **kwargs):
super(TargetWeightMolecule, self).__init__(**kwargs)
self.target_weight = target_weight
def _reward(self):
molecule = Chem.MolFromSmiles(self._state)
if (molecule is None):
return (- (self.target_weight ** 2))
(lower, upper) = ((self.target_weight - 25), (self.target_weight + 25))
mw = Descriptors.MolWt(molecule)
if (lower <= mw <= upper):
return 1
return (- min(abs((lower - mw)), abs((upper - mw)))) |
class InnerProductTest(unittest.TestCase):
def test_inner_product(self):
state_1 = numpy.array([1.0, 1j])
state_2 = numpy.array([1.0, (- 1j)])
self.assertAlmostEqual(inner_product(state_1, state_1), 2.0)
self.assertAlmostEqual(inner_product(state_1, state_2), 0.0) |
def get_f1_score(prediction, ground_truth):
prediction_tokens = normalize_prediction(prediction, lowercase=True).split()
ground_truth_tokens = normalize_prediction(ground_truth, lowercase=True).split()
common = (Counter(prediction_tokens) & Counter(ground_truth_tokens))
num_same = sum(common.values())
if (num_same == 0):
return 0
precision = ((1.0 * num_same) / len(prediction_tokens))
recall = ((1.0 * num_same) / len(ground_truth_tokens))
f1 = (((2 * precision) * recall) / (precision + recall))
return f1 |
def test_pythontag_in_setup_cfg(temp_pkg):
temp_pkg.joinpath('setup.cfg').write_text('[bdist_wheel]\npython_tag=py32', encoding='utf-8')
subprocess.check_call([sys.executable, 'setup.py', 'bdist_wheel'], cwd=str(temp_pkg))
dist_dir = temp_pkg.joinpath('dist')
assert dist_dir.is_dir()
wheels = list(dist_dir.iterdir())
assert (len(wheels) == 1)
assert wheels[0].name.startswith('Test-1.0-py32-')
assert (wheels[0].suffix == '.whl') |
def from_csv(fp, field_names=None, **kwargs):
fmtparams = {}
for param in ['delimiter', 'doublequote', 'escapechar', 'lineterminator', 'quotechar', 'quoting', 'skipinitialspace', 'strict']:
if (param in kwargs):
fmtparams[param] = kwargs.pop(param)
if fmtparams:
reader = csv.reader(fp, **fmtparams)
else:
dialect = csv.Sniffer().sniff(fp.read(1024))
fp.seek(0)
reader = csv.reader(fp, dialect)
table = PrettyTable(**kwargs)
if field_names:
table.field_names = field_names
elif py3k:
table.field_names = [x.strip() for x in next(reader)]
else:
table.field_names = [x.strip() for x in reader.next()]
for row in reader:
table.add_row([x.strip() for x in row])
return table |
def parse_type_comment(type_comment: str, line: int, column: int, errors: (Errors | None)) -> tuple[((list[str] | None), (ProperType | None))]:
try:
typ = ast3_parse(type_comment, '<type_comment>', 'eval')
except SyntaxError:
if (errors is not None):
stripped_type = type_comment.split('#', 2)[0].strip()
err_msg = message_registry.TYPE_COMMENT_SYNTAX_ERROR_VALUE.format(stripped_type)
errors.report(line, column, err_msg.value, blocker=True, code=err_msg.code)
return (None, None)
else:
raise
else:
extra_ignore = TYPE_IGNORE_PATTERN.match(type_comment)
if extra_ignore:
tag: (str | None) = extra_ignore.group(1)
ignored: (list[str] | None) = parse_type_ignore_tag(tag)
if (ignored is None):
if (errors is not None):
errors.report(line, column, message_registry.INVALID_TYPE_IGNORE.value, code=codes.SYNTAX)
else:
raise SyntaxError
else:
ignored = None
assert isinstance(typ, ast3.Expression)
converted = TypeConverter(errors, line=line, override_column=column, is_evaluated=False).visit(typ.body)
return (ignored, converted) |
def reshape_for_gwas(spark, label_df):
assert check_argument_types()
if (label_df.index.nlevels == 1):
transposed_df = label_df.T
column_names = ['label', 'values']
elif (label_df.index.nlevels == 2):
ordered_cols = pd.unique(label_df.index.get_level_values(0))
transposed_df = label_df.T.stack()[ordered_cols]
column_names = ['label', 'contigName', 'values']
else:
raise ValueError('label_df must be indexed by sample id or by (sample id, contig name)')
transposed_df['values_array'] = transposed_df.to_numpy().tolist()
return spark.createDataFrame(transposed_df[['values_array']].reset_index(), column_names) |
def process_url(item, exclude_websites):
source = item.get('source').get('href')
if (not all([(not re.match(website, source)) for website in [f'^ for website in exclude_websites]])):
return
url = item.get('link')
if re.match(GOOGLE_NEWS_REGEX, url):
url = requests.head(url).headers.get('location', url)
return url |
def multiply_inv_gaussians_batch(mus, lambdas):
assert (len(mus) == len(lambdas))
batch_size = mus[0].shape.as_list()[:(- 1)]
d_z = lambdas[0].shape.as_list()[(- 1)]
identity_matrix = tf.tile(tf.expand_dims(tf.expand_dims(tf.eye(d_z), axis=0), axis=0), (batch_size + [1, 1]))
lambda_new = (tf.reduce_sum(lambdas, axis=0) + identity_matrix)
mus_summed = tf.reduce_sum([tf.einsum('bcij, bcj -> bci', lamb, mu) for (lamb, mu) in zip(lambdas, mus)], axis=0)
sigma_new = tf.linalg.inv(lambda_new)
mu_new = tf.einsum('bcij, bcj -> bci', sigma_new, mus_summed)
return (mu_new, sigma_new, lambda_new) |
def get_cams(latitude, longitude, start, end, email, identifier='mcclear', altitude=None, time_step='1h', time_ref='UT', verbose=False, integrated=False, label=None, map_variables=True, server=URL, timeout=30):
try:
time_step_str = TIME_STEPS_MAP[time_step]
except KeyError:
raise ValueError(f'Time step not recognized. Must be one of {list(TIME_STEPS_MAP.keys())}')
if (verbose and ((time_step != '1min') or (time_ref != 'UT'))):
verbose = False
warnings.warn('Verbose mode only supports 1 min. UT time series!')
if (identifier not in ['mcclear', 'cams_radiation']):
raise ValueError('Identifier must be either mcclear or cams_radiation')
verbose = str(verbose).lower()
if (altitude is None):
altitude = (- 999)
start = pd.to_datetime(start).strftime('%Y-%m-%d')
end = pd.to_datetime(end).strftime('%Y-%m-%d')
email = email.replace('', '%2540')
identifier = 'get_{}'.format(identifier.lower())
base_url = f'
data_inputs_dict = {'latitude': latitude, 'longitude': longitude, 'altitude': altitude, 'date_begin': start, 'date_end': end, 'time_ref': time_ref, 'summarization': time_step_str, 'username': email, 'verbose': verbose}
data_inputs = ';'.join([f'{key}={value}' for (key, value) in data_inputs_dict.items()])
params = {'Service': 'WPS', 'Request': 'Execute', 'Identifier': identifier, 'version': '1.0.0', 'RawDataOutput': 'irradiation'}
res = requests.get(((base_url + '?DataInputs=') + data_inputs), params=params, timeout=timeout)
if (not res.ok):
errors = res.text.split('ows:ExceptionText')[1][1:(- 2)]
res.reason = ('%s: <%s>' % (res.reason, errors))
res.raise_for_status()
else:
fbuf = io.StringIO(res.content.decode('utf-8'))
(data, metadata) = parse_cams(fbuf, integrated=integrated, label=label, map_variables=map_variables)
return (data, metadata) |
def merge_pks(string):
curdir = os.getcwd()
files = os.listdir(curdir)
relevant_files = sorted([fl for fl in files if (string in fl)])
dfs = [pickle.load(open(fl, 'rb')) for fl in relevant_files]
merged_dfs = {}
for df in dfs:
for (key, value) in df.items():
if (key == 'bss_evals'):
try:
merged_dfs[key].extend(value)
except:
merged_dfs[key] = []
merged_dfs[key].extend(value)
else:
merged_dfs[key] = value
return merged_dfs |
def fmt_relation(relation):
labels = relation.subsystem.node_labels
body = fmt_relata(relation.relata, node_labels=labels)
data = [('', relation.phi), ('Purview', fmt_mechanism(relation.purview, node_labels=labels)), ('Relata', '')]
data = '\n'.join(align_columns(data))
body = center(header(data, body))
return header('Relation', body, over_char=HEADER_BAR_3, under_char=HEADER_BAR_3) |
class LeakyReLUBNConv2d(nn.Module):
def __init__(self, n_in, n_out, kernel_size, stride, padding=0):
super(LeakyReLUBNConv2d, self).__init__()
model = []
model += [nn.Conv2d(n_in, n_out, kernel_size=kernel_size, stride=stride, padding=padding, bias=True)]
model += [nn.BatchNorm2d(n_out)]
model += [nn.LeakyReLU(inplace=True)]
self.model = nn.Sequential(*model)
self.model.apply(gaussian_weights_init)
def forward(self, x):
return self.model(x) |
.parametrize('username,password', users)
def test_create_empty(db, client, username, password):
client.login(username=username, password=password)
url = reverse(urlnames['list'])
response = client.post(url, {})
assert (response.status_code == status_map['create_error'][username]), response.json() |
class RandConv2d(nn.Module):
def __init__(self, sigma_0, N, init_s, in_channels, out_channels, kernel_size, stride=1, padding=0, dilation=1, groups=1, bias=True):
super(RandConv2d, self).__init__()
if ((in_channels % groups) != 0):
raise ValueError('in_channels must be divisible by groups')
if ((out_channels % groups) != 0):
raise ValueError('out_channels must be divisible by groups')
self.sigma_0 = sigma_0
self.N = N
self.init_s = init_s
self.in_channels = in_channels
self.out_channels = out_channels
self.kernel_size = kernel_size
self.stride = stride
self.padding = padding
self.dilation = dilation
self.groups = groups
self.bias = bias
self.mu_weight = Parameter(torch.Tensor(out_channels, (in_channels // groups), kernel_size, kernel_size))
self.sigma_weight = Parameter(torch.Tensor(out_channels, (in_channels // groups), kernel_size, kernel_size))
self.register_buffer('eps_weight', torch.Tensor(out_channels, (in_channels // groups), kernel_size, kernel_size))
if bias:
self.mu_bias = Parameter(torch.Tensor(out_channels))
self.sigma_bias = Parameter(torch.Tensor(out_channels))
self.register_buffer('eps_bias', torch.Tensor(out_channels))
else:
self.register_parameter('mu_bias', None)
self.register_parameter('sigma_bias', None)
self.register_parameter('eps_bias', None)
self.reset_parameters()
def reset_parameters(self):
n = self.in_channels
n *= (self.kernel_size ** 2)
stdv = (1.0 / math.sqrt(n))
self.mu_weight.data.uniform_((- stdv), stdv)
self.sigma_weight.data.fill_(self.init_s)
if (self.mu_bias is not None):
self.mu_bias.data.uniform_((- stdv), stdv)
self.sigma_bias.data.fill_(self.init_s)
def forward_(self, input):
weight = noise_fn(self.mu_weight, self.sigma_weight, self.eps_weight, self.sigma_0, self.N)
bias = None
if (self.mu_bias is not None):
bias = noise_fn(self.mu_bias, self.sigma_bias, self.eps_bias, self.sigma_0, self.N)
out = F.conv2d(input, weight, bias, self.stride, self.padding, self.dilation, self.groups)
return out
def forward(self, input):
sig_weight = torch.exp(self.sigma_weight)
weight = (self.mu_weight + (sig_weight * self.eps_weight.normal_()))
kl_weight = (((math.log(self.sigma_0) - self.sigma_weight) + (((sig_weight ** 2) + (self.mu_weight ** 2)) / (2 * (self.sigma_0 ** 2)))) - 0.5)
bias = None
if (self.mu_bias is not None):
sig_bias = torch.exp(self.sigma_bias)
bias = (self.mu_bias + (sig_bias * self.eps_bias.normal_()))
kl_bias = (((math.log(self.sigma_0) - self.sigma_bias) + (((sig_bias ** 2) + (self.mu_bias ** 2)) / (2 * (self.sigma_0 ** 2)))) - 0.5)
out = F.conv2d(input, weight, bias, self.stride, self.padding, self.dilation, self.groups)
kl = ((kl_weight.sum() + kl_bias.sum()) if (self.mu_bias is not None) else kl_weight.sum())
return (out, kl) |
class GPSPilot(AutopilotPilot):
def __init__(self, ap):
super(GPSPilot, self).__init__('gps', ap)
self.wind_gps_offset = HeadingOffset()
self.true_wind_gps_offset = HeadingOffset()
self.gains = {}
self.PosGain('P', 0.003, 0.02)
self.PosGain('D', 0.1, 1.0)
self.PosGain('DD', 0.05, 1.0)
self.PosGain('FF', 0.6, 3.0)
self.wind_gps_offset = HeadingOffset()
self.true_wind_gps_offset = HeadingOffset()
def compute_heading(self):
ap = self.ap
sensors = ap.sensors
gps_course = ap.sensors.gps.track.value
if (sensors.wind.source.value != 'none'):
offset = resolv((sensors.wind.wdirection + gps_course), self.wind_gps_offset.value)
self.wind_gps_offset.update(offset, sensors.wind.wfactor)
if (sensors.truewind.source.value != 'none'):
offset = resolv((sensors.truewind.wdirection + gps_course), self.true_wind_gps_offset.value)
self.true_wind_gps_offset.update(offset, sensors.truewind.wfactor)
mode = ap.mode.value
if (mode == 'compass'):
compass = ap.boatimu.SensorValues['heading_lowpass'].value
ap.heading.set(compass)
if ((mode == 'gps') or (mode == 'nav')):
ap.heading.set(gps_course)
elif (mode == 'wind'):
wind = resolv((self.wind_gps_offset.value - gps_course), 180)
ap.heading.set(wind)
elif (mode == 'true wind'):
true_wind = resolve((self.true_wind_gps_offset.value - gps_course), 180)
ap.heading.set(true_wind)
def best_mode(self, mode):
modes = self.ap.modes.value
if (not (mode in modes)):
if ('gps' in modes):
return 'gps'
return 'compass'
return mode
def process(self):
ap = self.ap
headingrate = ap.boatimu.SensorValues['headingrate_lowpass'].value
headingraterate = ap.boatimu.SensorValues['headingraterate_lowpass'].value
gain_values = {'P': ap.heading_error.value, 'D': headingrate, 'DD': headingraterate, 'FF': ap.heading_command_rate.value}
command = self.Compute(gain_values)
if ap.enabled.value:
ap.servo.command.command(command) |
def has_arg(fn, name, accept_all=False):
if (sys.version_info < (3,)):
arg_spec = inspect.getargspec(fn)
if (accept_all and (arg_spec.keywords is not None)):
return True
return (name in arg_spec.args)
elif (sys.version_info < (3, 3)):
arg_spec = inspect.getfullargspec(fn)
if (accept_all and (arg_spec.varkw is not None)):
return True
return ((name in arg_spec.args) or (name in arg_spec.kwonlyargs))
else:
signature = inspect.signature(fn)
parameter = signature.parameters.get(name)
if (parameter is None):
if accept_all:
for param in signature.parameters.values():
if (param.kind == inspect.Parameter.VAR_KEYWORD):
return True
return False
return (parameter.kind in (inspect.Parameter.POSITIONAL_OR_KEYWORD, inspect.Parameter.KEYWORD_ONLY)) |
class TermsOfService(Object):
def __init__(self, *, id: str, text: str, entities: List['types.MessageEntity']):
super().__init__()
self.id = id
self.text = text
self.entities = entities
def _parse(terms_of_service: 'raw.types.help.TermsOfService') -> 'TermsOfService':
return TermsOfService(id=terms_of_service.id.data, text=terms_of_service.text, entities=([types.MessageEntity._parse(None, entity, {}) for entity in terms_of_service.entities] if terms_of_service.entities else None)) |
_module()
class BaseDecoder(BaseModule):
def __init__(self, init_cfg=None, **kwargs):
super().__init__(init_cfg=init_cfg)
def forward_train(self, feat, out_enc, targets_dict, img_metas):
raise NotImplementedError
def forward_test(self, feat, out_enc, img_metas):
raise NotImplementedError
def forward(self, feat, out_enc, targets_dict=None, img_metas=None, train_mode=True):
self.train_mode = train_mode
if train_mode:
return self.forward_train(feat, out_enc, targets_dict, img_metas)
return self.forward_test(feat, out_enc, img_metas) |
.mongo
def test_mongo_core_keywords():
(mongetter=_test_mongetter)
def _test_mongo_caching(arg_1, arg_2):
return ((random() + arg_1) + arg_2)
_test_mongo_caching.clear_cache()
val1 = _test_mongo_caching(1, arg_2=2)
val2 = _test_mongo_caching(1, arg_2=2)
assert (val1 == val2)
val3 = _test_mongo_caching(1, arg_2=2, ignore_cache=True)
assert (val3 != val1)
val4 = _test_mongo_caching(1, arg_2=2)
assert (val4 == val1)
val5 = _test_mongo_caching(1, arg_2=2, overwrite_cache=True)
assert (val5 != val1)
val6 = _test_mongo_caching(1, arg_2=2)
assert (val6 == val5) |
def select_cond_path(mode):
path = 'data/example_conditioning'
path = os.path.join(path, mode)
onlyfiles = [f for f in sorted(os.listdir(path))]
selected = widgets.RadioButtons(options=onlyfiles, description='Select conditioning:', disabled=False)
display(selected)
selected_path = os.path.join(path, selected.value)
return selected_path |
(params=[pytest.param(('linux', 'linux', 'x86_64', '64'), id='linux-64'), pytest.param(('linux', 'linux', 'i686', '32'), id='linux-32'), pytest.param(('linux', 'linux', 'aarch64', 'arm'), id='linux-arm'), pytest.param(('macos', 'darwin', 'x86_64', '64'), id='macos-64'), pytest.param(('macos', 'darwin', 'arm64', 'arm'), id='macos-arm'), pytest.param(('windows', 'win32', 'x86', '32'), id='windows-32'), pytest.param(('windows', 'win32', 'AMD64', '64'), id='windows-64'), pytest.param(('windows', 'win32', 'ARM64', 'arm'), id='windows-arm')])
def platform_machine(request, monkeypatch):
(platform_name, platform_value, machine_value, machine_name) = request.param
monkeypatch.setattr(sys, 'platform', platform_value)
monkeypatch.setattr(platform_module, 'machine', (lambda : machine_value))
return (platform_name, machine_name) |
class KGESmoothCELoss(nn.Module):
def __init__(self, smoothing=0.001, mode='multiply'):
super(KGESmoothCELoss, self).__init__()
self.loss_function = CESmoothLossOnevsAll(smoothing=smoothing)
self.mode = mode
def forward(self, head_emb, tail_emb, all_rel_emb, labels):
if (self.mode == 'multiply'):
ent_emb = (head_emb * tail_emb)
elif (self.mode == 'trans'):
ent_emb = (head_emb - tail_emb)
else:
raise ValueError(('Training batch mode %s not supported' % self.mode))
scores = torch.matmul(ent_emb, all_rel_emb.transpose(1, 0))
loss = self.loss_function(scores, labels)
return loss |
class HardSwishJitAutoFn(torch.autograd.Function):
def forward(ctx, x):
ctx.save_for_backward(x)
return hard_swish_jit_fwd(x)
def backward(ctx, grad_output):
x = ctx.saved_tensors[0]
return hard_swish_jit_bwd(x, grad_output)
def symbolic(g, self):
input = g.op('Add', self, g.op('Constant', value_t=torch.tensor(3, dtype=torch.float)))
hardtanh_ = g.op('Clip', input, g.op('Constant', value_t=torch.tensor(0, dtype=torch.float)), g.op('Constant', value_t=torch.tensor(6, dtype=torch.float)))
hardtanh_ = g.op('Div', hardtanh_, g.op('Constant', value_t=torch.tensor(6, dtype=torch.float)))
return g.op('Mul', self, hardtanh_) |
def str_for_potential_or_deterministic(var: TensorVariable, formatting: str='plain', include_params: bool=True, dist_name: str='Deterministic') -> str:
print_name = (var.name if (var.name is not None) else '<unnamed>')
if ('latex' in formatting):
print_name = (('\\text{' + _latex_escape(print_name.strip('$'))) + '}')
if include_params:
return f'${print_name} \sim \operatorname{{{dist_name}}}({_str_for_expression(var, formatting=formatting)})$'
else:
return f'${print_name} \sim \operatorname{{{dist_name}}}$'
elif include_params:
return f'{print_name} ~ {dist_name}({_str_for_expression(var, formatting=formatting)})'
else:
return f'{print_name} ~ {dist_name}' |
class SHHA(data.Dataset):
def __init__(self, data_path, mode, main_transform=None, img_transform=None, gt_transform=None, data_augment=1):
self.img_path = (data_path + '/img')
self.gt_path = (data_path + '/den')
self.data_files = [filename for filename in os.listdir(self.img_path) if os.path.isfile(os.path.join(self.img_path, filename))]
self.num_samples = len(self.data_files)
self.main_transform = main_transform
self.img_transform = img_transform
self.gt_transform = gt_transform
self.data_augment = data_augment
def __getitem__(self, index):
index = int((index / self.data_augment))
fname = self.data_files[index]
(img, den) = self.read_image_and_gt(fname)
if (self.main_transform is not None):
(img, den) = self.main_transform(img, den)
if (self.img_transform is not None):
img = self.img_transform(img)
if (self.gt_transform is not None):
den = self.gt_transform(den)
return (img, den)
def __len__(self):
return self.num_samples
def read_image_and_gt(self, fname):
img = Image.open(os.path.join(self.img_path, fname))
if (img.mode == 'L'):
img = img.convert('RGB')
den = pd.read_csv(os.path.join(self.gt_path, (os.path.splitext(fname)[0] + '.csv')), sep=',', header=None).values
den = den.astype(np.float32, copy=False)
den = Image.fromarray(den)
return (img, den)
def get_num_samples(self):
return self.num_samples |
def save_embedding(word_list, word_embedding, word_list_file='embedding/yelp_words.txt', word_embedding_file='embedding/yelp_embedding.txt'):
with open(word_list_file, 'w') as fopen:
for w in word_list:
fopen.write((w + '\n'))
with open(word_embedding_file, 'w') as fopen:
for i in range(len(word_list)):
w = word_list[i]
fopen.write(w)
for n in word_embedding[i]:
fopen.write(' {:.5f}'.format(n))
fopen.write('\n') |
class ReportQuerysetMixin():
impression_model = None
def get_queryset(self, **kwargs):
queryset = self.impression_model.objects.all()
if (('start_date' in kwargs) and kwargs['start_date']):
queryset = queryset.filter(date__gte=kwargs['start_date'])
if (('end_date' in kwargs) and kwargs['end_date']):
queryset = queryset.filter(date__lte=kwargs['end_date'])
if (('advertiser' in kwargs) and kwargs['advertiser']):
if isinstance(kwargs['advertiser'], Advertiser):
queryset = queryset.filter(advertisement__flight__campaign__advertiser=kwargs['advertiser'])
else:
queryset = queryset.filter(advertisement__flight__campaign__advertiser__slug=kwargs['advertiser'])
if (('flight' in kwargs) and kwargs['flight']):
queryset = queryset.filter(advertisement__flight=kwargs['flight'])
if (('publisher' in kwargs) and kwargs['publisher']):
if isinstance(kwargs['publisher'], Publisher):
queryset = queryset.filter(publisher=kwargs['publisher'])
else:
queryset = queryset.filter(publisher__slug=kwargs['publisher'])
if (('publishers' in kwargs) and kwargs['publishers']):
queryset = queryset.filter(publisher__in=kwargs['publishers'])
if (('campaign_type' in kwargs) and (kwargs['campaign_type'] in ALL_CAMPAIGN_TYPES)):
queryset = queryset.filter(advertisement__flight__campaign__campaign_type=kwargs['campaign_type'])
if (('region' in kwargs) and kwargs['region']):
queryset = queryset.filter(region=kwargs['region'])
if (('topic' in kwargs) and kwargs['topic']):
queryset = queryset.filter(topic=kwargs['topic'])
return queryset |
class TestDataHandler(TestCase):
def setUpClass(cls):
cls.spx_index_ticker = BloombergTicker('SPX Index')
cls.google_ticker = BloombergTicker('GOOGL US Equity')
cls.microsoft_ticker = BloombergTicker('MSFT US Equity')
cls.start_date = str_to_date('2018-01-02')
cls.end_date = str_to_date('2018-01-31')
cls.end_date_trimmed = str_to_date('2018-01-30')
cls.get_history_field = 'PX_TO_BOOK_RATIO'
def setUp(self):
try:
self.price_data_provider = get_data_provider()
except Exception as e:
raise self.skipTest(e)
self.timer = SettableTimer()
self.data_handler = DailyDataHandler(self.price_data_provider, self.timer)
MarketOpenEvent.set_trigger_time({'hour': 13, 'minute': 30, 'second': 0, 'microsecond': 0})
MarketCloseEvent.set_trigger_time({'hour': 20, 'minute': 0, 'second': 0, 'microsecond': 0})
def test_get_price_when_end_date_is_in_the_past(self):
self.timer.set_current_time(str_to_date('2018-02-12 00:00:00.000000', DateFormat.FULL_ISO))
prices_tms = self.data_handler.get_price(self.spx_index_ticker, PriceField.Close, self.start_date, self.end_date)
self.assertEqual(self.start_date, prices_tms.index[0].to_pydatetime())
self.assertEqual(self.end_date, prices_tms.index[(- 1)].to_pydatetime())
def test_get_price_when_end_date_is_today_after_market_close(self):
self.timer.set_current_time(((str_to_date('2018-01-31') + MarketCloseEvent.trigger_time()) + RelativeDelta(hours=1)))
prices_tms = self.data_handler.get_price(self.spx_index_ticker, PriceField.Close, self.start_date, self.end_date)
self.assertEqual(self.start_date, prices_tms.index[0].to_pydatetime())
self.assertEqual(self.end_date, prices_tms.index[(- 1)].to_pydatetime())
def test_get_price_when_end_date_is_today_before_market_close(self):
self.timer.set_current_time(((str_to_date('2018-01-31') + MarketOpenEvent.trigger_time()) + RelativeDelta(hours=1)))
close_prices_tms = self.data_handler.get_price(self.spx_index_ticker, PriceField.Close, self.start_date, self.end_date)
self.assertEqual(self.start_date, close_prices_tms.index[0].to_pydatetime())
self.assertEqual(self.end_date_trimmed, close_prices_tms.index[(- 1)].to_pydatetime())
def test_get_open_price_when_end_date_is_today_before_market_close__single_ticker(self):
self.timer.set_current_time(((str_to_date('2018-01-31') + MarketOpenEvent.trigger_time()) + RelativeDelta(hours=1)))
open_prices_tms = self.data_handler.get_price(self.spx_index_ticker, PriceField.Open, self.start_date)
self.assertEqual(self.start_date, open_prices_tms.index[0].to_pydatetime())
self.assertEqual(str_to_date('2018-01-30'), open_prices_tms.index[(- 1)].to_pydatetime())
def test_get_open_price_when_end_date_is_today_before_market_close__multiple_tickers(self):
self.timer.set_current_time(((str_to_date('2018-01-31') + MarketOpenEvent.trigger_time()) + RelativeDelta(hours=1)))
tickers = [self.spx_index_ticker, self.microsoft_ticker]
open_prices_tms = self.data_handler.get_price(tickers, PriceField.Open, self.start_date, self.timer.now())
self.assertEqual(self.start_date, open_prices_tms.index[0].to_pydatetime())
self.assertEqual(str_to_date('2018-01-30'), open_prices_tms.index[(- 1)].to_pydatetime())
def test_get_price_when_end_date_is_tomorrow(self):
self.timer.set_current_time(((str_to_date('2018-01-30') + MarketCloseEvent.trigger_time()) + RelativeDelta(hours=1)))
prices_tms = self.data_handler.get_price(self.spx_index_ticker, PriceField.Close, self.start_date, self.end_date_trimmed)
self.assertEqual(self.start_date, prices_tms.index[0].to_pydatetime())
self.assertEqual(self.end_date_trimmed, prices_tms.index[(- 1)].to_pydatetime())
def test_get_last_price_single_ticker(self):
with self.subTest('Test if getting single ticker value works, when a single ticker is passed'):
self.timer.set_current_time(((str_to_date('2018-01-31') + MarketOpenEvent.trigger_time()) + RelativeDelta(hours=1)))
single_price = self.data_handler.get_last_available_price(self.spx_index_ticker)
self.assertTrue(isinstance(single_price, float))
with self.subTest('Test at market open'):
self.timer.set_current_time((str_to_date('2018-01-31') + MarketOpenEvent.trigger_time()))
at_market_open = self.data_handler.get_last_available_price([self.spx_index_ticker])
self.assertEqual(self.spx_index_ticker, at_market_open.index[0])
self.assertEqual(single_price, at_market_open[0])
with self.subTest('Test during the trading session'):
self.timer.set_current_time(((str_to_date('2018-01-31') + MarketOpenEvent.trigger_time()) + RelativeDelta(hours=1)))
during_the_day_last_prices = self.data_handler.get_last_available_price([self.spx_index_ticker])
self.assertEqual(self.spx_index_ticker, during_the_day_last_prices.index[0])
self.assertEqual(single_price, during_the_day_last_prices[0])
with self.subTest('Test after the trading session'):
self.timer.set_current_time(((str_to_date('2018-01-31') + MarketCloseEvent.trigger_time()) + RelativeDelta(hours=1)))
after_close_last_prices = self.data_handler.get_last_available_price([self.spx_index_ticker])
self.assertEqual(self.spx_index_ticker, after_close_last_prices.index[0])
self.assertNotEqual(during_the_day_last_prices[0], after_close_last_prices[0])
with self.subTest('Test before the trading session'):
self.timer.set_current_time(((str_to_date('2018-01-31') + MarketOpenEvent.trigger_time()) - RelativeDelta(hours=1)))
before_trading_session_prices = self.data_handler.get_last_available_price([self.spx_index_ticker])
self.assertEqual(self.spx_index_ticker, before_trading_session_prices.index[0])
self.assertNotEqual(during_the_day_last_prices[0], before_trading_session_prices[0])
self.assertNotEqual(after_close_last_prices[0], before_trading_session_prices[0])
def test_get_last_price_with_multiple_tickers_when_current_data_is_unavailable(self):
self.timer.set_current_time(((str_to_date('2018-01-01') + MarketOpenEvent.trigger_time()) + RelativeDelta(hours=1)))
last_prices = self.data_handler.get_last_available_price([self.spx_index_ticker, self.google_ticker])
self.assertEqual(self.spx_index_ticker, last_prices.index[0])
self.assertEqual(self.google_ticker, last_prices.index[1])
def test_get_last_price_with_empty_tickers_list(self):
self.timer.set_current_time(((str_to_date('2018-01-31') + MarketOpenEvent.trigger_time()) + RelativeDelta(hours=1)))
last_prices = self.data_handler.get_last_available_price([])
assert_series_equal(PricesSeries(), last_prices)
def test_get_history_when_end_date_is_in_the_past(self):
self.timer.set_current_time(str_to_date('2018-02-12 00:00:00.000000', DateFormat.FULL_ISO))
prices_tms = self.data_handler.get_history(self.spx_index_ticker, self.get_history_field, self.start_date, self.end_date)
self.assertEqual(self.start_date, prices_tms.index[0].to_pydatetime())
self.assertEqual(self.end_date, prices_tms.index[(- 1)].to_pydatetime())
def test_get_history_when_end_date_is_today_after_market_close(self):
self.timer.set_current_time(((str_to_date('2018-01-31') + MarketCloseEvent.trigger_time()) + RelativeDelta(hours=1)))
prices_tms = self.data_handler.get_history(self.spx_index_ticker, self.get_history_field, self.start_date, self.end_date)
self.assertEqual(self.start_date, prices_tms.index[0].to_pydatetime())
self.assertEqual(self.end_date, prices_tms.index[(- 1)].to_pydatetime())
def test_get_history_when_end_date_is_today_before_market_close(self):
self.timer.set_current_time(((str_to_date('2018-01-31') + MarketOpenEvent.trigger_time()) + RelativeDelta(hours=1)))
prices_tms = self.data_handler.get_history(self.spx_index_ticker, self.get_history_field, self.start_date, self.end_date)
self.assertEqual(self.start_date, prices_tms.index[0].to_pydatetime())
self.assertEqual(self.end_date_trimmed, prices_tms.index[(- 1)].to_pydatetime())
def test_get_history_when_end_date_is_tomorrow(self):
self.timer.set_current_time(((str_to_date('2018-01-30') + MarketCloseEvent.trigger_time()) + RelativeDelta(hours=1)))
prices_tms = self.data_handler.get_history(self.spx_index_ticker, self.get_history_field, self.start_date, self.end_date_trimmed)
self.assertEqual(self.start_date, prices_tms.index[0].to_pydatetime())
self.assertEqual(self.end_date_trimmed, prices_tms.index[(- 1)].to_pydatetime())
def test_get_history_with_multiple_tickers(self):
self.timer.set_current_time(((str_to_date('2018-01-31') + MarketOpenEvent.trigger_time()) + RelativeDelta(hours=1)))
resilt_df = self.data_handler.get_history([self.microsoft_ticker, self.google_ticker], self.get_history_field, self.start_date, self.end_date_trimmed)
self.assertEqual(self.microsoft_ticker, resilt_df.columns[0])
self.assertEqual(self.google_ticker, resilt_df.columns[1])
self.assertEqual(self.start_date, resilt_df.index[0].to_pydatetime())
self.assertEqual(self.end_date_trimmed, resilt_df.index[(- 1)].to_pydatetime())
self.assertEqual(resilt_df.shape, (20, 2))
def test_historical_price_many_tickers_many_fields(self):
self.timer.set_current_time(((str_to_date('2018-01-31') + MarketOpenEvent.trigger_time()) + RelativeDelta(hours=1)))
result_array = self.data_handler.historical_price([self.microsoft_ticker], [PriceField.Open, PriceField.Close], nr_of_bars=5)
self.assertEqual(QFDataArray, type(result_array))
self.assertEqual((5, 1, 2), result_array.shape)
expected_dates_str = ['2018-01-24', '2018-01-25', '2018-01-26', '2018-01-29', '2018-01-30']
expected_dates = [str_to_date(date_str) for date_str in expected_dates_str]
assert_same_index(pd.DatetimeIndex(expected_dates, name=DATES), result_array.dates.to_index(), check_index_type=True, check_names=True)
def test_historical_price_many_tickers_one_field(self):
self.timer.set_current_time(((str_to_date('2018-01-04') + MarketOpenEvent.trigger_time()) + RelativeDelta(hours=1)))
result_df = self.data_handler.historical_price([self.microsoft_ticker], PriceField.Open, nr_of_bars=5)
self.assertEqual(PricesDataFrame, type(result_df))
expected_dates_idx = pd.DatetimeIndex(['2017-12-27', '2017-12-28', '2017-12-29', '2018-01-02', '2018-01-03'], name=DATES)
assert_same_index(expected_dates_idx, result_df.index, check_index_type=True, check_names=True)
expected_tickers_idx = pd.Index([self.microsoft_ticker], name=TICKERS)
assert_same_index(expected_tickers_idx, result_df.columns, check_index_type=True, check_names=True) |
def read_dataset(dname):
(d, ext) = op.splitext(dname)
if (ext.lower() == '.csv'):
dname = d
if (dname not in dts['dataset'].to_numpy()):
raise ValueError('Dataset does not exist. Valid datasets names are', dts['dataset'].to_numpy())
return pd.read_csv(op.join(ddir, (dname + '.csv')), sep=',') |
def test__shaded_fraction_array():
solar_zenith = np.array([0.0, 60.0, 90.0, 60.0])
solar_azimuth = np.array([180.0, 180.0, 180.0, 180.0])
surface_azimuth = np.array([180.0, 180.0, 180.0, 210.0])
surface_tilt = np.array([30.0, 60.0, 0.0, 30.0])
gcr = 1.0
result = infinite_sheds._shaded_fraction(solar_zenith, solar_azimuth, surface_tilt, surface_azimuth, gcr)
x = (0.75 + (np.sqrt(3) / 2))
expected = np.array([0.0, 0.5, 0.0, ((x - 1) / x)])
assert np.allclose(result, expected) |
def extract_connecting_borders_between_points(cell_min_point, cell_length_x, cell_length_y, point_begin, point_end, zero_tolerance):
if (point_begin == point_end):
return ([], [])
border_id_p_begin = (- 1)
border_id_p_end = (- 1)
if (cwt(point_begin[0], cell_min_point[0], zero_tolerance) == 0):
border_id_p_begin = 0
elif (cwt(point_begin[1], (cell_min_point[1] + cell_length_y), zero_tolerance) == 0):
border_id_p_begin = 1
elif (cwt(point_begin[0], (cell_min_point[0] + cell_length_x), zero_tolerance) == 0):
border_id_p_begin = 2
elif (cwt(point_begin[1], cell_min_point[1], zero_tolerance) == 0):
border_id_p_begin = 3
if (cwt(point_end[0], cell_min_point[0], zero_tolerance) == 0):
border_id_p_end = 0
elif (cwt(point_end[1], (cell_min_point[1] + cell_length_y), zero_tolerance) == 0):
border_id_p_end = 1
elif (cwt(point_end[0], (cell_min_point[0] + cell_length_x), zero_tolerance) == 0):
border_id_p_end = 2
elif (cwt(point_end[1], cell_min_point[1], zero_tolerance) == 0):
border_id_p_end = 3
if ((border_id_p_begin == (- 1)) or (border_id_p_end == (- 1))):
print((cell_min_point, (cell_min_point[0] + cell_length_x), (cell_min_point[1] + cell_length_y), point_begin, point_end, cell_length_x, cell_length_y))
raise Exception("Error! begin/end point doesn't lie on the cell border!!!")
segments = [point_begin]
involved_border_ids = [border_id_p_begin]
border_id_p_search = border_id_p_begin
if (border_id_p_search == border_id_p_end):
if (border_id_p_search == 0):
if (cwt(point_begin[1], point_end[1], zero_tolerance) == (- 1)):
segments.append(point_end)
return (segments, involved_border_ids)
else:
segments.append([cell_min_point[0], (cell_min_point[1] + cell_length_y)])
border_id_p_search = ((border_id_p_search + 1) % 4)
elif (border_id_p_search == 1):
if (cwt(point_begin[0], point_end[0], zero_tolerance) == (- 1)):
segments.append(point_end)
return (segments, involved_border_ids)
else:
segments.append([(cell_min_point[0] + cell_length_x), (cell_min_point[1] + cell_length_y)])
border_id_p_search = ((border_id_p_search + 1) % 4)
elif (border_id_p_search == 2):
if (cwt(point_begin[1], point_end[1], zero_tolerance) == 1):
segments.append(point_end)
return (segments, involved_border_ids)
else:
segments.append([(cell_min_point[0] + cell_length_x), cell_min_point[1]])
border_id_p_search = ((border_id_p_search + 1) % 4)
elif (border_id_p_search == 3):
if (cwt(point_begin[0], point_end[0], zero_tolerance) == 1):
segments.append(point_end)
return (segments, involved_border_ids)
else:
segments.append([cell_min_point[0], cell_min_point[1]])
border_id_p_search = ((border_id_p_search + 1) % 4)
while True:
involved_border_ids.append(border_id_p_search)
if (border_id_p_search != border_id_p_end):
if (border_id_p_search == 0):
segments.append([cell_min_point[0], (cell_min_point[1] + cell_length_y)])
elif (border_id_p_search == 1):
segments.append([(cell_min_point[0] + cell_length_x), (cell_min_point[1] + cell_length_y)])
elif (border_id_p_search == 2):
segments.append([(cell_min_point[0] + cell_length_x), cell_min_point[1]])
elif (border_id_p_search == 3):
segments.append([cell_min_point[0], cell_min_point[1]])
border_id_p_search = ((border_id_p_search + 1) % 4)
else:
segments.append(point_end)
return (segments, list(set(involved_border_ids))) |
(suggest_parser)
def do_suggest(args: argparse.Namespace) -> None:
response = request(args.status_file, 'suggest', function=args.function, json=args.json, callsites=args.callsites, no_errors=args.no_errors, no_any=args.no_any, flex_any=args.flex_any, use_fixme=args.use_fixme, max_guesses=args.max_guesses)
check_output(response, verbose=False, junit_xml=None, perf_stats_file=None) |
def mmd(datasetA, datasetB, kernel):
KAA = kernel.compute_K_symm(datasetA)
KAA_corrected = (KAA - np.diag(np.diag(KAA)))
KBB = kernel.compute_K_symm(datasetB)
KBB_corrected = (KBB - np.diag(np.diag(KBB)))
KAB = kernel.compute_K(datasetA, datasetB)
M = KAA.shape[0]
return np.sum(((((KAA_corrected / M) / (M - 1)) + ((KBB_corrected / M) / (M - 1))) - (((2 * KAB) / M) / M))) |
def test_connect_lambda():
class Top(ComponentLevel3):
def construct(s, x):
s.in_ = InPort(Bits32)
s.out = OutPort(Bits32)
s.out //= (lambda : ((s.in_ + x) + globalvar))
x = Top(3)
x.elaborate()
simple_sim_pass(x)
x.in_ = 10
x.tick()
assert (x.out == ((10 + 3) + 2))
y = Top(33)
y.elaborate()
simple_sim_pass(y)
y.in_ = 100
y.tick()
assert (y.out == ((100 + 33) + 2)) |
class TestSidekiqCollector(CollectorTestCase):
def setUp(self):
config = get_collector_config('SidekiqWebCollector', {'password': 'TEST_PASSWORD'})
self.collector = SidekiqCollector(config, None)
def test_import(self):
self.assertTrue(SidekiqCollector)
_only_if_redis_is_available
(Collector, 'publish')
def test_sidekiq_queue(self, publish_mock):
self.collector.config.update({'ports': '6379'})
patch_collector = patch.object(redis.Redis, 'smembers', Mock(return_value=['queue_1']))
length_collector = patch.object(redis.Redis, 'llen', Mock(return_value=123))
zcard_collector = patch.object(redis.Redis, 'zcard', Mock(return_value=100))
patch_collector.start()
length_collector.start()
zcard_collector.start()
self.collector.collect()
patch_collector.stop()
length_collector.stop()
zcard_collector.stop()
metrics = {'queue.6379.0.queue_1': 123, 'queue.6379.0.retry': 100, 'queue.6379.0.schedule': 100}
self.assertPublishedMany(publish_mock, metrics)
_only_if_redis_is_available
(Collector, 'publish')
def test_sidekiq_queue_with_databases(self, publish_mock):
self.collector.config.update({'ports': ['6379', '6380'], 'sentinel_ports': ['26379', '26380'], 'databases': 2})
patch_collector = patch.object(redis.Redis, 'smembers', Mock(return_value=['queue_1']))
length_collector = patch.object(redis.Redis, 'llen', Mock(return_value=123))
zcard_collector = patch.object(redis.Redis, 'zcard', Mock(return_value=100))
patch_collector.start()
length_collector.start()
zcard_collector.start()
self.collector.collect()
patch_collector.stop()
length_collector.stop()
zcard_collector.stop()
metrics = {'queue.6379.0.queue_1': 123, 'queue.6379.0.retry': 100, 'queue.6379.0.schedule': 100, 'queue.6380.0.queue_1': 123, 'queue.6380.0.retry': 100, 'queue.6380.0.schedule': 100, 'queue.6379.1.queue_1': 123, 'queue.6379.1.retry': 100, 'queue.6379.1.schedule': 100, 'queue.6380.1.queue_1': 123, 'queue.6380.1.retry': 100, 'queue.6380.1.schedule': 100}
self.assertPublishedMany(publish_mock, metrics)
_only_if_redis_is_available
(Collector, 'publish')
def test_sidekiq_queue_with_cluster_prefix(self, publish_mock):
self.collector.config.update({'cluster_prefix': 'test-sidekiq', 'sentinel_ports': '63790'})
patch_collector = patch.object(redis.Redis, 'smembers', Mock(return_value=['queue_1', 'queue_2']))
length_collector = patch.object(redis.Redis, 'llen', Mock(return_value=123))
zcard_collector = patch.object(redis.Redis, 'zcard', Mock(return_value=100))
patch_collector.start()
length_collector.start()
zcard_collector.start()
self.collector.collect()
patch_collector.stop()
length_collector.stop()
zcard_collector.stop()
metrics = {'queue.test-sidekiq.6379.0.queue_1': 123, 'queue.test-sidekiq.6379.0.schedule': 100, 'queue.test-sidekiq.6379.0.retry': 100}
self.setDocExample(collector=self.collector.__class__.__name__, metrics=metrics, defaultpath=self.collector.config['path'])
self.assertPublishedMany(publish_mock, metrics) |
class CatEmbeddings(nn.Module):
def __init__(self, _cardinalities_and_maybe_dimensions: Union[(list[int], list[tuple[(int, int)]])], d_embedding: Optional[int]=None, *, stack: bool=False) -> None:
assert _cardinalities_and_maybe_dimensions
spec = _cardinalities_and_maybe_dimensions
if (not ((isinstance(spec[0], tuple) and (d_embedding is None)) or (isinstance(spec[0], int) and (d_embedding is not None)))):
raise ValueError('Invalid arguments. Valid combinations are: (1) the first argument is a list of (cardinality, embedding)-tuples AND d_embedding is None (2) the first argument is a list of cardinalities AND d_embedding is an integer')
if (stack and (d_embedding is None)):
raise ValueError('stack can be True only when d_embedding is not None')
super().__init__()
spec_ = cast(list[tuple[(int, int)]], (spec if (d_embedding is None) else [(x, d_embedding) for x in spec]))
self._embeddings = nn.ModuleList()
for (cardinality, d_embedding) in spec_:
self._embeddings.append(nn.Embedding(cardinality, d_embedding))
self.stack = stack
self.reset_parameters()
def reset_parameters(self) -> None:
for module in self._embeddings:
_initialize_embeddings(module.weight, None)
def forward(self, x: Tensor) -> Tensor:
assert (x.ndim == 2)
assert (x.shape[1] == len(self._embeddings))
out = [module(column) for (module, column) in zip(self._embeddings, x.T)]
return (torch.stack(out, dim=1) if self.stack else torch.cat(out, dim=1)) |
.cli
_CLI_ENDPONTS
.parametrize('option', [['-h'], []])
def test_sync(input_command, option, tmpdir):
with tmp_chdir(str(tmpdir)):
output = subprocess.check_output(((input_command + ['sync']) + option), stderr=subprocess.STDOUT).decode('utf-8')
assert ('Tool for synchronizing PROJ datum and transformation support data.' in output)
assert ('--bbox' in output)
assert ('--spatial-test' in output)
assert ('--source-id' in output)
assert ('--area-of-use' in output)
assert ('--file' in output)
assert ('--exclude-world-coverage' in output)
assert ('--include-already-downloaded' in output)
assert ('--list-files' in output)
assert ('--system-directory' in output)
assert ('--target-directory' in output)
assert ('-v, --verbose' in output) |
def get_saver(cfg: DictConfig) -> ModelCheckpoint:
args = dict(cfg[__key__].saver)
args['filename'] = args['filename'].format(experiment=cfg[__key__].name)
args = {str(k).lower(): v for (k, v) in args.items()}
args['dirpath'] = cfg.disk.model_dir
saver = ModelCheckpoint(**args)
if cfg.train_all:
saver.CHECKPOINT_NAME_LAST = (cfg.experiment.name + '-last')
else:
saver.CHECKPOINT_NAME_LAST = (args['filename'] + '-last')
return saver |
def get_pose_net(cfg, is_train, **kwargs):
num_layers = cfg.MODEL.EXTRA.NUM_LAYERS
style = cfg.MODEL.STYLE
kwargs['groups'] = cfg.MODEL.GROUPS
kwargs['width_per_group'] = cfg.MODEL.WIDTH_PER_GROUP
(block_class, layers) = resnet_spec[num_layers]
if (style == 'caffe'):
block_class = Bottleneck_CAFFE
model = PoseResNet(block_class, layers, cfg, **kwargs)
if (is_train and cfg.MODEL.INIT_WEIGHTS):
model.init_weights(cfg.MODEL.PRETRAINED)
if (cfg.MODEL.NAME == 'simple_distillation'):
tea_models = []
for i in range(cfg.TEACHER.NUM):
num_layers = cfg.TEACHER.NUM_LAYERS[i]
(block_class, layers) = resnet_spec[num_layers]
t_model = PoseResNet(block_class, layers, cfg, **kwargs)
t_model.init_weights(cfg.TEACHER.CKT[i])
for p in t_model.parameters():
p.requires_grad = False
t_model.eval()
t_model = torch.nn.DataParallel(t_model, device_ids=[int(i) for i in cfg.GPUS.split(',')]).cuda()
tea_models.append(t_model)
return (model, tea_models)
return model |
class RawMetadata(TypedDict, total=False):
metadata_version: str
name: str
version: str
platforms: List[str]
summary: str
description: str
keywords: List[str]
home_page: str
author: str
author_email: str
license: str
supported_platforms: List[str]
download_url: str
classifiers: List[str]
requires: List[str]
provides: List[str]
obsoletes: List[str]
maintainer: str
maintainer_email: str
requires_dist: List[str]
provides_dist: List[str]
obsoletes_dist: List[str]
requires_python: str
requires_external: List[str]
project_urls: Dict[(str, str)]
description_content_type: str
provides_extra: List[str]
dynamic: List[str] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.