code stringlengths 101 5.91M |
|---|
def print_diff_dict(d1, d2, n):
for key in d1:
print('{}:{}{}'.format(key, key_to_tabs(n, key), (d1[key] - d2[key]))) |
class FakeArguments(object):
def __init__(self, config_file='', output_dir=''):
self.config_file = config_file
self.output_dir = output_dir |
def Main():
_ParseAndStripGTestFlags(sys.argv)
if (GTEST_OUTPUT_VAR_NAME in os.environ):
del os.environ[GTEST_OUTPUT_VAR_NAME]
_test_module.main() |
.parametrize('flatlist_as_rvec', [False, True])
def test_nested_ListOffsetArray_NumpyArray(flatlist_as_rvec):
v2a = ak.contents.ListOffsetArray(ak.index.Index64(np.array([0, 1, 5], dtype=np.int64)), ak.contents.listoffsetarray.ListOffsetArray(ak.index.Index(np.array([1, 1, 4, 4, 6, 7], np.int64)), ak.contents.numpyarray.NumpyArray(np.array([6.6, 1.1, 2.2, 3.3, 4.4, 5.5, 7.7]))))
layout = v2a
generator = ak._connect.cling.togenerator(layout.form, flatlist_as_rvec=flatlist_as_rvec)
lookup = ak._lookup.Lookup(layout, generator)
generator.generate(compiler)
ROOT.gInterpreter.Declare(f'''
void roottest_nested_ListOffsetArray_NumpyArray_{flatlist_as_rvec}(double* out, ssize_t length, ssize_t* ptrs) {{
auto obj = {generator.dataset()}[1];
out[0] = obj.size();
out[1] = obj[0].size();
out[2] = obj[0][0];
out[3] = obj[0][1];
out[4] = obj[0][2];
out[5] = obj[1].size();
out[6] = obj[2].size();
out[7] = obj[2][0];
out[8] = obj[2][1];
out[9] = obj[3].size();
out[10] = obj[3][0];
}}
''')
out = np.zeros(11, dtype=np.float64)
getattr(ROOT, f'roottest_nested_ListOffsetArray_NumpyArray_{flatlist_as_rvec}')(out, len(layout), lookup.arrayptrs)
assert (out.tolist() == [4.0, 3.0, 1.1, 2.2, 3.3, 0.0, 2.0, 4.4, 5.5, 1.0, 7.7]) |
def distributed_broadcast_scalars(scalars: List[Union[(int, float)]], num_total_examples: Optional[int]=None) -> torch.Tensor:
try:
tensorized_scalar = torch.tensor(scalars).cuda()
output_tensors = [tensorized_scalar.clone() for _ in range(torch.distributed.get_world_size())]
torch.distributed.all_gather(output_tensors, tensorized_scalar)
concat = torch.cat(output_tensors, dim=0)
if (num_total_examples is not None):
concat = concat[:num_total_examples]
return concat
except AssertionError:
raise AssertionError('Not currently using distributed training') |
class AutoModelForMultipleChoice(_BaseAutoModelClass):
_model_mapping = MODEL_FOR_MULTIPLE_CHOICE_MAPPING |
def test_empty_staterror():
spec = {'channels': [{'name': 'channel', 'samples': [{'name': 'sample', 'data': [10.0], 'modifiers': [{'name': 'staterror_channel', 'type': 'staterror', 'data': []}]}]}]}
with pytest.raises(pyhf.exceptions.InvalidSpecification):
pyhf.Model(spec) |
def FedAvg(models, weights=None):
new_model = models[0]
state_dicts = [model.state_dict() for model in models]
state_dict = new_model.state_dict()
for key in models[1].state_dict():
state_dict[key] = torch.from_numpy(np.average([state[key].numpy() for state in state_dicts], axis=0, weights=weights))
new_model.load_state_dict(state_dict)
return new_model |
class LinearIntParam(RandomHyperparameter):
def __init__(self, name, min_value, max_value):
super(LinearIntParam, self).__init__(name)
self._min = min_value
self._max = max_value
def generate_next_value(self):
return random.randint(self._min, self._max) |
class KerasModelValidation(ModelValidation):
def __init__(self, model: Model, fw_info: FrameworkInfo):
super(KerasModelValidation, self).__init__(model=model, fw_info=fw_info)
def validate_output_channel_consistency(self):
for layer in self.model.layers:
data_format = layer.get_config().get(CHANNELS_FORMAT)
if (data_format is not None):
assert (((data_format == CHANNELS_FORMAT_LAST) and (self.fw_info.out_channel_axis_mapping.get(layer) == ChannelAxis.NHWC.value)) or ((data_format == CHANNELS_FORMAT_FIRST) and (self.fw_info.out_channel_axis_mapping.get(layer) == ChannelAxis.NCHW.value))), f'Model can not have layers with different data formats.' |
def resnext101():
model = resnext101_32x8d()
model.conv1 = nn.Conv2d(1, 64, (7, 7), (2, 2), (3, 3), 1, 1, bias=False)
model.avgpool = nn.AvgPool2d((7, 7), (1, 1))
model.fc = nn.Sequential(Lambda((lambda x: x.view(x.size(0), (- 1)))), Lambda((lambda x: (x.view(1, (- 1)) if (1 == len(x.size())) else x))), nn.Linear(2048, 1000))
return model |
def test_pyramid_reduce_gray():
(rows, cols) = image_gray.shape
out1 = pyramids.pyramid_reduce(image_gray, downscale=2, channel_axis=None)
assert_array_equal(out1.shape, ((rows / 2), (cols / 2)))
assert_almost_equal(out1.ptp(), 1.0, decimal=2)
out2 = pyramids.pyramid_reduce(image_gray, downscale=2, channel_axis=None, preserve_range=True)
assert_almost_equal((out2.ptp() / image_gray.ptp()), 1.0, decimal=2) |
def build_transforms_open(cfg, is_train=True, PIXEL_MEAN=[0.485, 0.456, 0.406], PIXEL_STD=[0.229, 0.224, 0.225]):
normalize_transform = T.Normalize(mean=PIXEL_MEAN, std=PIXEL_STD)
if is_train:
transform = T.Compose([T.Resize([cfg.height, cfg.width]), T.RandomHorizontalFlip(p=0.5), T.Pad(10), T.RandomCrop([cfg.height, cfg.width]), T.ToTensor(), normalize_transform, RandomErasing(probability=0.5, mean=PIXEL_MEAN)])
else:
transform = T.Compose([T.Resize([cfg.height, cfg.width]), T.ToTensor(), normalize_transform])
return transform |
class EvalHookUDA(BaseEvalHook):
def __init__(self, *args, dynamic_intervals=None, out_dir=None, **kwargs):
super(EvalHookUDA, self).__init__(*args, **kwargs)
self.latest_results = None
self.out_dir = out_dir
self.debug = kwargs['debug']
self.dataset_name = kwargs['dataset_name']
self.use_dynamic_intervals = (dynamic_intervals is not None)
if self.use_dynamic_intervals:
(self.dynamic_milestones, self.dynamic_intervals) = _calc_dynamic_intervals(self.interval, dynamic_intervals)
def _decide_interval(self, runner):
if self.use_dynamic_intervals:
progress = (runner.epoch if self.by_epoch else runner.iter)
step = bisect.bisect(self.dynamic_milestones, (progress + 1))
self.interval = self.dynamic_intervals[(step - 1)]
def before_train_epoch(self, runner):
self._decide_interval(runner)
super().before_train_epoch(runner)
def before_train_iter(self, runner):
self._decide_interval(runner)
super().before_train_iter(runner)
def _do_evaluate(self, runner):
if (not self._should_evaluate(runner)):
return
from mmdet.apis import single_gpu_test_uda
results = single_gpu_test_uda(runner.model, self.dataloader, show=False, out_dir=self.out_dir, debug=self.debug, dataset_name=self.dataset_name)
self.latest_results = results
runner.log_buffer.output['eval_iter_num'] = len(self.dataloader)
key_score = self.evaluate(runner, results)
if (self.save_best and key_score):
self._save_ckpt(runner, key_score) |
def derivTest(fdemType, comp, src):
prb = getFDEMProblem(fdemType, comp, SrcType, freq)
print(f'{fdemType} formulation {src} - {comp}')
x0 = np.log((np.ones(prb.sigmaMap.nP) * CONDUCTIVITY))
if (addrandoms is True):
x0 = (x0 + ((np.random.randn(prb.sigmaMap.nP) * np.log(CONDUCTIVITY)) * 0.1))
def fun(x):
return (prb.dpred(x), (lambda x: prb.Jvec(x0, x)))
return tests.check_derivative(fun, x0, num=2, plotIt=False, eps=FLR) |
class SpeechCommands09(AbstractAudioDataset):
CLASSES = ['zero', 'one', 'two', 'three', 'four', 'five', 'six', 'seven', 'eight', 'nine']
CLASS_TO_IDX = dict(zip(CLASSES, range(len(CLASSES))))
def __init__(self, path, bits=8, split='train', sample_len=16000, quantization='linear', return_type='autoregressive', drop_last=False, target_sr=None, dequantize=False, pad_len=None, **kwargs):
super().__init__(bits=bits, sample_len=sample_len, quantization=quantization, return_type=return_type, split=split, drop_last=drop_last, target_sr=target_sr, path=path, dequantize=dequantize, pad_len=pad_len, **kwargs)
def setup(self):
with open(join(self.path, 'validation_list.txt')) as f:
validation_files = set([line.rstrip() for line in f.readlines()])
with open(join(self.path, 'testing_list.txt')) as f:
test_files = set([line.rstrip() for line in f.readlines()])
self.file_names = []
for class_name in self.CLASSES:
self.file_names += [(class_name, file_name) for file_name in listdir(join(self.path, class_name)) if file_name.endswith('.wav')]
if (self.split == 'train'):
self.file_names = [join(self.path, class_name, file_name) for (class_name, file_name) in self.file_names if ((join(class_name, file_name) not in validation_files) and (join(class_name, file_name) not in test_files))]
elif (self.split == 'validation'):
self.file_names = [join(self.path, class_name, file_name) for (class_name, file_name) in self.file_names if (join(class_name, file_name) in validation_files)]
elif (self.split == 'test'):
self.file_names = [join(self.path, class_name, file_name) for (class_name, file_name) in self.file_names if (join(class_name, file_name) in test_files)]
def __getitem__(self, index):
item = super().__getitem__(index)
(x, y, *z) = item
if self.dequantize:
x = self.dequantizer(x).unsqueeze(1)
return (x, y, *z) |
('/name')
def name():
name = request.args.get('name')
return make_response('Your name is {}'.format(name)) |
def test_arrays_transformer_plain_list():
X = np.array([[0, 0], [1, 1]])
y = np.array([[0, 0], [1, 1]])
arrays_transformer = ArraysTransformer(X.tolist(), y.tolist())
(X_res, y_res) = arrays_transformer.transform(X, y)
assert isinstance(X_res, list)
assert isinstance(y_res, list) |
def test_bernoulli_ts_update_params():
policy = BernoulliTS(n_actions=2)
policy.action_counts_temp = np.array([4, 3])
policy.action_counts = np.copy(policy.action_counts_temp)
policy.reward_counts_temp = np.array([2.0, 0.0])
policy.reward_counts = np.copy(policy.reward_counts_temp)
action = 0
reward = 1.0
policy.update_params(action, reward)
assert np.array_equal(policy.action_counts, np.array([5, 3]))
next_reward = (2.0 + reward)
assert np.allclose(policy.reward_counts, np.array([next_reward, 0.0])) |
def process_files(wav_files, process_audio, sample_rate):
folders = set((os.path.dirname(tgt_file_name) for (_, tgt_file_name) in wav_files))
for folder in folders:
if (not os.path.exists(folder)):
os.makedirs(folder)
for (src_file_name, tgt_file_name) in tqdm(wav_files):
result = process_file(src_file_name, tgt_file_name, process_audio, sample_rate)
(yield (tgt_file_name, result)) |
def auc_score(net_ensemble, test_loader):
actual = []
posterior = []
for (x, y) in test_loader:
if opt.cuda:
x = x.cuda()
with torch.no_grad():
(_, out) = net_ensemble.forward(x)
prob = (1.0 - (1.0 / torch.exp(out)))
prob = prob.cpu().numpy().tolist()
posterior.extend(prob)
actual.extend(y.numpy().tolist())
score = auc(actual, posterior)
return score |
class Net(nn.Module):
def __init__(self, _, cfg):
super().__init__()
self.cfg = cfg
self.dim = dim = cfg.dim
self.out_dim = out_dim = cfg.out_dim
self.hidden_size = hidden_size = cfg.hidden_size
self.n_blocks = n_blocks = cfg.n_blocks
self.blocks = nn.ModuleList()
self.blocks.append(nn.Linear(dim, hidden_size))
for _ in range(n_blocks):
self.blocks.append(nn.Linear(hidden_size, hidden_size))
self.blocks.append(nn.Linear(hidden_size, out_dim))
self.act = Sine()
self.apply(sine_init)
self.blocks[0].apply(first_layer_sine_init)
if getattr(cfg, 'zero_init_last_layer', False):
torch.nn.init.constant_(self.blocks[(- 1)].weight, 0)
torch.nn.init.constant_(self.blocks[(- 1)].bias, 0)
def forward(self, x):
net = x
for block in self.blocks[:(- 1)]:
net = self.act(block(net))
out = self.blocks[(- 1)](net)
return out |
class search_engine():
def __init__(self, model_handle, config):
self.model = model_handle.model
self.config = config
self.model_handle = model_handle
self.save_file = config['answer_file']
if (not os.path.exists(os.path.dirname(self.save_file))):
os.makedirs(os.path.dirname(self.save_file))
def search(self, str_searchs, search_size=10):
instances = []
answers = []
for str_search in str_searchs:
instance = {}
str_search = ' '.join(str_search)
instance['doc_graph'] = build_desc_graph(str_search, file=None)
instance['doc_graph'] = normalize_des_graph(instance['doc_graph'])
instances.append(Graph(instance, docGraph=True, isLower='True'))
ex = self.build_batch_data(instances)
query_embedded = cal_query_features(self.model.network, ex)
client = Elasticsearch()
for index in range(query_embedded.shape[0]):
script_query = {'script_score': {'query': {'match_all': {}}, 'script': {'source': "cosineSimilarity(params.query_vector, doc['code_state']) + 1.0", 'params': {'query_vector': query_embedded[index].tolist()}}}}
response = client.search(index=self.config['index_name'], body={'size': search_size, 'query': script_query, '_source': {'includes': ['code_func', 'identifier', 'url']}})
for hit in response['hits']['hits']:
answers.append({'query': ' '.join(str_searchs[index]), 'function': hit['_source']['code_func'], 'identifier': hit['_source']['identifier'], 'url': hit['_source']['url'], 'score': hit['_score']})
df = pd.DataFrame(answers, columns=['query', 'function', 'identifier', 'url', 'score'])
df.to_csv(self.save_file, index=False)
print('Answer query finished')
def build_batch_data(self, instances):
doc_word_lengths = []
doc_words = []
doc_graphs = []
for (i, doc_graph) in enumerate(instances):
doc_idx = []
for word in doc_graph.graph['backbone_sequence']:
idx = self.model.vocab_model.word_vocab.getIndex(word)
doc_idx.append(idx)
doc_word_lengths.append(len(doc_idx))
doc_words.append(doc_idx)
doc_graphs.append(doc_graph.graph)
batch_doc_graphs = cons_batch_graph(doc_graphs, self.model.vocab_model.word_vocab)
doc_words = pad_2d_vals_no_size(doc_words)
doc_word_lengths = np.array(doc_word_lengths, dtype=np.int32)
doc_words = torch.LongTensor(doc_words)
doc_word_lengths = torch.LongTensor(doc_word_lengths)
batch_doc_graphs = vectorize_batch_graph(batch_doc_graphs, self.config, self.model.vocab_model.edge_vocab)
with torch.set_grad_enabled(False):
example = {'batch_size': len(instances), 'doc_graphs': batch_doc_graphs, 'targets': (doc_words.to(self.model_handle.device) if self.model_handle.device else doc_words), 'target_lens': (doc_word_lengths.to(self.model_handle.device) if self.model_handle.device else doc_word_lengths)}
return example
def test(self, query_embedded):
client = Elasticsearch()
script_query = {'script_score': {'query': {'match_all': {}}, 'script': {'source': "cosineSimilarity(params.query_vector, doc['code_vector']) + 1.0", 'params': {'query_vector': query_embedded}}}}
response = client.search(index=self.config['index_name'], body={'size': 1, 'query': script_query, '_source': {'includes': ['code_func']}})
for hit in response['hits']['hits']:
print('score: {}'.format(hit['_score']))
print(hit['_source']['code_func'])
print('') |
.vcr
_api_key('OPENAI_API_KEY')
def test_generate_aiconfig_automatic_fallback(patched_api_requestor):
user_inputs = ['T&GFOIBECC()!*', 'Chef-GPT', 'an AI designed to browse bake a cake.', 'Purchase ingredients', 'Bake a cake', '', '']
with patch('autogpt.utils.session.prompt', side_effect=user_inputs):
ai_config = prompt_user()
assert isinstance(ai_config, AIConfig)
assert (ai_config.ai_name == 'Chef-GPT')
assert (ai_config.ai_role == 'an AI designed to browse bake a cake.')
assert (ai_config.ai_goals == ['Purchase ingredients', 'Bake a cake']) |
def add_additional_arguments(parser):
parser.add_argument('--log_interval', default=10, type=int)
parser.add_argument('--cuda', default=True, type=utils.bool_flag, nargs='?', const=True, help='use GPU')
parser.add_argument('--seed', default=0, type=int, help='random seed')
parser.add_argument('--debug', default=False, type=utils.bool_flag, nargs='?', const=True, help='run in debug mode')
(args, _) = parser.parse_known_args()
if args.debug:
parser.set_defaults(batch_size=1, log_interval=1, eval_interval=5) |
def train(train_loader, model, criterion, optimizer, epoch):
global opt
losses = AverageMeter()
model.train()
criterion.train()
step = (epoch * len(train_loader))
pred_gt_same = []
for (i, (box, cls, feature, lfeat, lrel, sents, sents_gt, gt_boxes, img_ids, sent_ids)) in enumerate(train_loader):
step += 1
if (opt['gpus'] is not None):
box = box.cuda()
cls = cls.cuda()
feature = feature.cuda()
lfeat = lfeat.cuda()
lrel = lrel.cuda()
sents = sents.cuda()
sents_gt = sents_gt.cuda()
score = model(feature, cls, lfeat, lrel, sents)
(loss, score) = criterion(score, box, cls, sents_gt)
losses.update(loss.item())
cls = to_numpy(cls)
final_score = to_numpy(score.detach())
final_score[(cls == (- 1))] = (- 999)
pred_ind = np.argmax(final_score, 1)
sents_gt = to_numpy(sents_gt)
for j in range(pred_ind.size):
if (sents_gt[j] == pred_ind[j]):
pred_gt_same.append(1)
else:
pred_gt_same.append(0)
optimizer.zero_grad()
loss.backward()
clip_gradient(optimizer, opt['grad_clip'])
optimizer.step()
if ((i % args.print_freq) == 0):
if (i != 0):
same = (np.sum(pred_gt_same[((- args.print_freq) * opt['batch_size']):]) / float((args.print_freq * opt['batch_size'])))
print('Epoch: [{0}][{1}/{2}]\tLoss {loss.val:.4f} ({loss.avg:.4f})\tPrec {same:.4f}'.format(epoch, i, len(train_loader), loss=losses, same=same)) |
_ordering
class FractionWithFactoredDenominator(RingElement):
def __init__(self, parent, numerator, denominator_factored, reduce=True):
super().__init__(parent)
from sage.rings.semirings.non_negative_integer_semiring import NN
self._numerator = parent._numerator_ring(numerator)
self._denominator_factored = list(((parent._denominator_ring(d), NN(n)) for (d, n) in denominator_factored))
R = self.denominator_ring
if ((numerator in R) and reduce):
numer = R(self._numerator)
df = self._denominator_factored
new_df = []
for (q, e) in df:
ee = e
(quo, rem) = numer.quo_rem(q)
while ((rem == 0) and (ee > 0)):
ee -= 1
numer = quo
(quo, rem) = numer.quo_rem(q)
if (ee > 0):
new_df.append((q, ee))
self._numerator = numer
self._denominator_factored = new_df
def numerator(self):
return self._numerator
def denominator(self):
return prod(((q ** e) for (q, e) in self.denominator_factored()))
def denominator_factored(self):
return self._denominator_factored
def denominator_ring(self):
return self.parent()._denominator_ring
def numerator_ring(self):
return self.parent()._numerator_ring
def dimension(self):
from sage.rings.polynomial.polynomial_ring import is_PolynomialRing
from sage.rings.polynomial.multi_polynomial_ring_base import is_MPolynomialRing
R = self.denominator_ring
if (is_PolynomialRing(R) or is_MPolynomialRing(R)):
return R.ngens()
raise NotImplementedError('only polynomial rings are supported as base')
def quotient(self):
return (self.numerator() / self.denominator())
def _repr_(self):
return repr((self.numerator(), self.denominator_factored()))
_richcmp_ = richcmp_by_eq_and_lt('_eq_', '_lt_')
def _eq_(self, other):
return ((self.numerator() * other.denominator()) == (other.numerator() * self.denominator()))
def _total_order_key_(self):
return (len(self.denominator_factored()), self.denominator(), self.numerator())
def univariate_decomposition(self):
if (self.dimension() > 1):
return FractionWithFactoredDenominatorSum([self])
R = self.denominator_ring
p = self.numerator()
q = self.denominator()
try:
(whole, p) = R(p).quo_rem(q)
mn = R.one()
except (TypeError, ValueError):
whole = R(0)
mn = p
p = R.one()
df = self.denominator_factored()
decomp = [self.parent()(whole, [])]
denominator = prod(((b ** n) for (b, n) in df))
for (a, m) in df:
am = (a ** m)
(q, r) = denominator.quo_rem(am)
assert (r == 0)
numer = ((p * q.inverse_mod(am)) % am)
decomp.append(self.parent()((mn * numer), [(a, m)]))
return FractionWithFactoredDenominatorSum(decomp)
def nullstellensatz_certificate(self):
R = self.denominator_ring
df = self.denominator_factored()
J = R.ideal([(q ** e) for (q, e) in df])
if (R.one() in J):
return R.one().lift(J)
return None
def nullstellensatz_decomposition(self):
L = self.nullstellensatz_certificate()
if (L is None):
return FractionWithFactoredDenominatorSum([self])
decomp = FractionWithFactoredDenominatorSum()
p = self.numerator()
df = self.denominator_factored()
m = len(df)
iteration1 = FractionWithFactoredDenominatorSum([self.parent()((p * L[i]), [df[j] for j in range(m) if (j != i)]) for i in range(m) if (L[i] != 0)])
for r in iteration1:
decomp.extend(r.nullstellensatz_decomposition())
return decomp._combine_like_terms_().whole_and_parts()
def algebraic_dependence_certificate(self):
R = self.denominator_ring
df = self.denominator_factored()
if (not df):
return R.ideal()
m = len(df)
F = R.base_ring()
Xs = list(R.gens())
d = len(Xs)
S = 'S'
while (S in [str(x) for x in Xs]):
S = (S + 'S')
Ss = [(S + str(i)) for i in range(m)]
T = 'T'
while (T in [str(x) for x in Xs]):
T = (T + 'T')
Ts = [(T + str(i)) for i in range(m)]
Vs = (([str(x) for x in Xs] + Ss) + Ts)
RR = PolynomialRing(F, Vs)
Xs = RR.gens()[:d]
Ss = RR.gens()[d:(d + m)]
Ts = RR.gens()[(d + m):(d + (2 * m))]
J = RR.ideal(([(Ss[j] - RR(df[j][0])) for j in range(m)] + [((Ss[j] ** df[j][1]) - Ts[j]) for j in range(m)]))
J = J.elimination_ideal((Xs + Ss))
RRR = PolynomialRing(F, [str(t) for t in Ts], order='negdeglex')
return RRR.ideal(J)
def algebraic_dependence_decomposition(self, whole_and_parts=True):
J = self.algebraic_dependence_certificate()
if (not J):
return FractionWithFactoredDenominatorSum([self])
decomp = FractionWithFactoredDenominatorSum()
p = self.numerator()
df = self.denominator_factored()
m = len(df)
g = J.gens()[0]
new_vars = J.ring().gens()
gg = ((g.lt() - g) / g.lc())
numers = map(prod, zip(gg.coefficients(), gg.monomials()))
e = list(g.lt().exponents())[0:m]
denoms = [(new_vars[j], (e[0][j] + 1)) for j in range(m)]
FFPD = FractionWithFactoredDenominatorRing(J.ring())
iteration1_temp = FractionWithFactoredDenominatorSum([FFPD(a, denoms) for a in numers])._combine_like_terms_()
qpowsub = {new_vars[j]: (df[j][0] ** df[j][1]) for j in range(m)}
iteration1 = FractionWithFactoredDenominatorSum()
for r in iteration1_temp:
num1 = (p * J.ring()(r.numerator()).subs(qpowsub))
denoms1 = []
for (q, e) in r.denominator_factored():
j = new_vars.index(q)
denoms1.append((df[j][0], (df[j][1] * e)))
iteration1.append(self.parent()(num1, denoms1))
for r in iteration1:
decomp.extend(r.algebraic_dependence_decomposition())
return decomp._combine_like_terms_().whole_and_parts()
def leinartas_decomposition(self):
if (self.dimension() == 1):
return self.univariate_decomposition()
temp = self.nullstellensatz_decomposition()
decomp = FractionWithFactoredDenominatorSum()
for r in temp:
decomp.extend(r.algebraic_dependence_decomposition())
return decomp._combine_like_terms_().whole_and_parts()
def cohomology_decomposition(self):
from sage.calculus.functions import jacobian
from sage.arith.misc import XGCD as xgcd
from sage.sets.set import Set
R = self.denominator_ring
df = self.denominator_factored()
n = len(df)
if (sum((e for (q, e) in df)) <= n):
return FractionWithFactoredDenominatorSum([self])
decomp = FractionWithFactoredDenominatorSum()
p = self.numerator()
qs = [q for (q, e) in df]
X = sorted(R.gens())
var_sets_n = sorted((sorted(s) for s in Set(X).subsets(n)))
Par = self.parent()
dets = [R(jacobian(qs, x).determinant()) for x in var_sets_n]
if (self.dimension() == 1):
L = xgcd(qs[0], dets[0])[1:]
else:
L = R.one().lift(R.ideal((qs + dets)))
iteration1 = FractionWithFactoredDenominatorSum()
for i in range(n):
if (L[i] == 0):
continue
new_df = [list(t) for t in df]
if (new_df[i][1] > 1):
new_df[i][1] -= 1
else:
del new_df[i]
iteration1.append(Par((p * L[i]), new_df))
for j in range(n):
if (df[j][1] > 1):
J = j
break
new_df = [list(t) for t in df]
new_df[J][1] -= 1
for (k, x) in enumerate(var_sets_n):
if (L[(n + k)] == 0):
continue
jac = jacobian(([SR((p * L[(n + k)]))] + [SR(qs[j]) for j in range(n) if (j != J)]), [SR(xx) for xx in x])
det = jac.determinant()
iteration1.append(Par(((((- 1) ** J) * det) / new_df[J][1]), new_df))
for r in iteration1:
decomp.extend(r.cohomology_decomposition())
return decomp._combine_like_terms_().whole_and_parts()
def asymptotic_decomposition(self, alpha, asy_var=None):
R = self.denominator_ring
d = self.dimension()
n = len(self.denominator_factored())
X = [SR(x) for x in R.gens()]
decomp1 = FractionWithFactoredDenominatorSum([self])
if (n > d):
decomp1 = decomp1[0].leinartas_decomposition()
if (asy_var is None):
asy_var = var('r')
cauchy_stuff = prod([(X[j] ** (((- alpha[j]) * asy_var) - 1)) for j in range(d)])
decomp2 = FractionWithFactoredDenominatorSum()
for f in decomp1:
ff = self.parent()((f.numerator() * cauchy_stuff), f.denominator_factored())
decomp2.extend(ff.cohomology_decomposition())
decomp2 = decomp2._combine_like_terms_()
decomp3 = FractionWithFactoredDenominatorSum()
for f in decomp2:
ff = self.parent()((f.numerator() / cauchy_stuff).simplify_full().collect(asy_var), f.denominator_factored())
decomp3.append(ff)
return decomp3
def asymptotics(self, p, alpha, N, asy_var=None, numerical=0, verbose=False):
R = self.denominator_ring
p = coerce_point(R, p)
if (asy_var is None):
asy_var = var('r')
d = self.dimension()
X = list(R.gens())
alpha = list(alpha)
df = self.denominator_factored()
n = len(df)
i = (d - 1)
while (0 in [(X[i] * diff(h, X[i])).subs(p) for (h, e) in df]):
i -= 1
coordinate = i
if (n == 1):
return self.asymptotics_smooth(p, alpha, N, asy_var, coordinate, numerical, verbose=verbose)
return self.asymptotics_multiple(p, alpha, N, asy_var, coordinate, numerical, verbose=verbose)
def asymptotics_smooth(self, p, alpha, N, asy_var, coordinate=None, numerical=0, verbose=False):
from sage.calculus.functions import jacobian
from sage.calculus.var import function
from sage.functions.other import factorial
from sage.misc.functional import sqrt
from sage.functions.gamma import gamma
from sage.functions.log import exp, log
from sage.matrix.constructor import matrix
from sage.modules.free_module_element import vector
from sage.symbolic.constants import pi
from sage.symbolic.relation import solve
from sage.rings.cc import CC
from sage.rings.rational_field import QQ
R = self.denominator_ring
d = self.dimension()
I = sqrt((- ZZ.one()))
X = [SR(x) for x in R.gens()]
G = SR(self.numerator())
H = SR(self.denominator())
p = {SR(x): p[x] for x in R.gens()}
alpha = [SR(a) for a in alpha]
if (coordinate is not None):
x = X.pop(coordinate)
X.append(x)
a = alpha.pop(coordinate)
alpha.append(a)
if (d == 1):
det = jacobian(H, X).subs(p).determinant().abs()
exp_scale = prod([(p[X[i]] ** (- alpha[i])).subs(p) for i in range(d)])
subexp_part = ((- G.subs(p)) / (det * prod(p.values())))
if numerical:
exp_scale = exp_scale.n(digits=numerical)
subexp_part = subexp_part.n(digits=numerical)
return (((exp_scale ** asy_var) * subexp_part), exp_scale, subexp_part)
if (vector(p.values()) in (QQ ** d)):
P = p
else:
sP = [var(('p' + str(j))) for j in range(d)]
P = {X[j]: sP[j] for j in range(d)}
p = {sP[j]: p[X[j]] for j in range(d)}
if verbose:
print('Creating auxiliary functions...')
h = function('h')(*tuple(X[:(d - 1)]))
U = function('U')(*tuple(X))
Gcheck = (((- G) / U) * (h / X[(d - 1)]))
A = (Gcheck.subs({X[(d - 1)]: (ZZ.one() / h)}) / h)
t = 't'
L = [str(elt) for elt in X]
while (t in L):
t = (t + 't')
T = [var((t + str(i))) for i in range((d - 1))]
e = {X[i]: (P[X[i]] * exp((I * T[i]))) for i in range((d - 1))}
ht = h.subs(e)
At = A.subs(e)
Phit = ((- log((P[X[(d - 1)]] * ht))) + (I * sum([((alpha[i] / alpha[(d - 1)]) * T[i]) for i in range((d - 1))])))
Tstar = {t: ZZ.zero() for t in T}
atP = P.copy()
atP.update({h.subs(P): (ZZ.one() / P[X[(d - 1)]])})
hderivs1 = {}
for i in range((d - 1)):
s = solve(diff(H.subs({X[(d - 1)]: (ZZ.one() / h)}), X[i]), diff(h, X[i]))[0].rhs().simplify()
hderivs1.update({diff(h, X[i]): s})
atP.update({diff(h, X[i]).subs(P): s.subs(P).subs(atP)})
hderivs = diff_all(h, X[0:(d - 1)], (2 * N), sub=hderivs1, rekey=h)
for k in hderivs:
atP.update({k.subs(P): hderivs[k].subs(atP)})
Hderivs = diff_all(H, X, (2 * N), ending=[X[(d - 1)]], sub_final=P)
if verbose:
print('Computing derivatives of auxiliary functions...')
Uderivs = {}
atP.update({U.subs(P): diff(H, X[(d - 1)]).subs(P)})
end = [X[(d - 1)]]
Hcheck = (X[(d - 1)] - (ZZ.one() / h))
k = (H.polynomial(CC).degree() - 1)
if (k == 0):
for l in range(1, ((2 * N) + 1)):
for s in combinations_with_replacement(X, l):
Uderivs[diff(U, list(s)).subs(P)] = ZZ.zero()
elif ((k > 0) and (k < (2 * N))):
all_zero = True
Uderivs = diff_prod(Hderivs, U, Hcheck, X, range(1, (k + 1)), end, Uderivs, atP)
if any(Uderivs.values()):
all_zero = False
if all_zero:
for l in range((k + 1), ((2 * N) + 1)):
for s in combinations_with_replacement(X, l):
Uderivs.update({diff(U, list(s)).subs(P): ZZ.zero()})
else:
Uderivs = diff_prod(Hderivs, U, Hcheck, X, range((k + 1), ((2 * N) + 1)), end, Uderivs, atP)
else:
Uderivs = diff_prod(Hderivs, U, Hcheck, X, range(1, ((2 * N) + 1)), end, Uderivs, atP)
atP.update(Uderivs)
if (d == 2):
v = Integer(2)
Phitderiv = diff(Phit, T[0], 2)
splat = Phitderiv.subs(Tstar).subs(atP).subs(p).simplify()
while (splat == 0):
v += 1
if (v > (2 * N)):
hderivs.update({diff(h, X[0], v): diff(hderivs[diff(h, X[0], (v - 1))], X[0]).subs(hderivs1)})
atP.update({diff(h, X[0], v).subs(P): hderivs[diff(h, X[0], v)].subs(atP)})
Phitderiv = diff(Phitderiv, T[0])
splat = Phitderiv.subs(Tstar).subs(atP).subs(p).simplify()
if ((d == 2) and (v > 2)):
t = T[0]
a = (splat / factorial(v))
Phitu = (Phit - (a * (t ** v)))
if verbose:
print('Computing derivatives of more auxiliary functions...')
AA = function('AA')(t)
BB = function('BB')(t)
if (v.mod(2) == 0):
At_derivs = diff_all(At, T, ((2 * N) - 2), sub=hderivs1, sub_final=[Tstar, atP], rekey=AA)
Phitu_derivs = diff_all(Phitu, T, (((2 * N) - 2) + v), sub=hderivs1, sub_final=[Tstar, atP], zero_order=(v + 1), rekey=BB)
else:
At_derivs = diff_all(At, T, (N - 1), sub=hderivs1, sub_final=[Tstar, atP], rekey=AA)
Phitu_derivs = diff_all(Phitu, T, ((N - 1) + v), sub=hderivs1, sub_final=[Tstar, atP], zero_order=(v + 1), rekey=BB)
AABB_derivs = At_derivs
AABB_derivs.update(Phitu_derivs)
AABB_derivs[AA] = At.subs(Tstar).subs(atP)
AABB_derivs[BB] = Phitu.subs(Tstar).subs(atP)
if verbose:
print('Computing second order differential operator actions...')
DD = diff_op_simple(AA, BB, AABB_derivs, t, v, a, N)
L = []
if (v.mod(2) == 0):
for k in range(N):
L.append(sum([(((((- 1) ** l) * gamma(((((2 * k) + (v * l)) + 1) / v))) / (factorial(l) * factorial(((2 * k) + (v * l))))) * DD[(k, l)]) for l in range(0, ((2 * k) + 1))]))
chunk = (((a ** ((- 1) / v)) / (pi * v)) * sum([(((alpha[(d - 1)] ** ((- ((2 * k) + 1)) / v)) * L[k]) * (asy_var ** ((- ((2 * k) + 1)) / v))) for k in range(N)]))
else:
zeta = exp(((I * pi) / (2 * v)))
for k in range(N):
L.append(sum([((((((- 1) ** l) * gamma((((k + (v * l)) + 1) / v))) / (factorial(l) * factorial((k + (v * l))))) * ((zeta ** ((k + (v * l)) + 1)) + (((- 1) ** (k + (v * l))) * (zeta ** (- ((k + (v * l)) + 1)))))) * DD[(k, l)]) for l in range(0, (k + 1))]))
chunk = (((abs(a) ** ((- 1) / v)) / ((2 * pi) * v)) * sum([(((alpha[(d - 1)] ** ((- (k + 1)) / v)) * L[k]) * (asy_var ** ((- (k + 1)) / v))) for k in range(N)]))
else:
Phit1 = jacobian(Phit, T).subs(hderivs1)
a = jacobian(Phit1, T).subs(hderivs1).subs(Tstar).subs(atP)
a_inv = a.inverse()
Phitu = (Phit - ((((1 / QQ(2)) * matrix([T])) * a) * matrix([T]).transpose()))
Phitu = Phitu[0][0]
if verbose:
print('Computing derivatives of more auxiliary functions...')
AA = function('AA')(*tuple(T))
At_derivs = diff_all(At, T, ((2 * N) - 2), sub=hderivs1, sub_final=[Tstar, atP], rekey=AA)
BB = function('BB')(*tuple(T))
Phitu_derivs = diff_all(Phitu, T, (2 * N), sub=hderivs1, sub_final=[Tstar, atP], rekey=BB, zero_order=3)
AABB_derivs = At_derivs
AABB_derivs.update(Phitu_derivs)
AABB_derivs[AA] = At.subs(Tstar).subs(atP)
AABB_derivs[BB] = Phitu.subs(Tstar).subs(atP)
if verbose:
print('Computing second order differential operator actions...')
DD = diff_op(AA, BB, AABB_derivs, T, a_inv, 1, N)
L = []
for k in range(N):
L.append(sum([(DD[(0, k, l)] / (((((- 1) ** k) * (2 ** (l + k))) * factorial(l)) * factorial((l + k)))) for l in range(0, ((2 * k) + 1))]))
chunk = sum([((((((2 * pi) ** ((1 - d) / Integer(2))) * (a.determinant() ** ((- ZZ.one()) / Integer(2)))) * (alpha[(d - 1)] ** (((ZZ.one() - d) / Integer(2)) - k))) * L[k]) * (asy_var ** (((ZZ.one() - d) / Integer(2)) - k))) for k in range(N)])
chunk = chunk.subs(p).simplify()
coeffs = chunk.coefficients(asy_var)
coeffs.reverse()
coeffs = coeffs[:N]
if numerical:
subexp_part = sum([(co[0].subs(p).n(digits=numerical) * (asy_var ** co[1])) for co in coeffs])
exp_scale = prod([(P[X[i]] ** (- alpha[i])).subs(p) for i in range(d)]).n(digits=numerical)
else:
subexp_part = sum([(co[0].subs(p) * (asy_var ** co[1])) for co in coeffs])
exp_scale = prod([(P[X[i]] ** (- alpha[i])).subs(p) for i in range(d)])
return (((exp_scale ** asy_var) * subexp_part), exp_scale, subexp_part)
def asymptotics_multiple(self, p, alpha, N, asy_var, coordinate=None, numerical=0, verbose=False):
from itertools import product
from sage.calculus.functions import jacobian
from sage.calculus.var import function
from sage.combinat.combinat import stirling_number1
from sage.functions.log import exp, log
from sage.functions.other import factorial
from sage.misc.functional import sqrt
from sage.matrix.constructor import matrix
from sage.misc.mrange import xmrange
from sage.modules.free_module_element import vector
from sage.rings.cc import CC
from sage.arith.misc import binomial
from sage.rings.rational_field import QQ
from sage.symbolic.constants import pi
from sage.symbolic.relation import solve
R = self.denominator_ring
p = coerce_point(R, p)
d = self.dimension()
I = sqrt((- ZZ.one()))
X = [SR(x) for x in R.gens()]
G = SR(self.numerator())
H = [SR(h) for (h, e) in self.denominator_factored()]
Hprod = prod(H)
n = len(H)
P = {SR(x): p[x] for x in R.gens()}
Sstar = self._crit_cone_combo(p, alpha, coordinate)
if (coordinate is not None):
x = X.pop(coordinate)
X.append(x)
a = alpha.pop(coordinate)
alpha.append(a)
if (n == d):
det = jacobian(H, X).subs(P).determinant().abs()
exp_scale = prod([(P[X[i]] ** (- alpha[i])).subs(P) for i in range(d)])
subexp_part = (G.subs(P) / (det * prod(P.values())))
if numerical:
exp_scale = exp_scale.n(digits=numerical)
subexp_part = subexp_part.n(digits=numerical)
return (((exp_scale ** asy_var) * subexp_part), exp_scale, subexp_part)
if (vector(P.values()) not in (QQ ** d)):
sP = [var(('p' + str(j))) for j in range(d)]
P = {X[j]: sP[j] for j in range(d)}
p = {sP[j]: p[X[j]] for j in range(d)}
if verbose:
print('Creating auxiliary functions...')
t = 't'
L = [str(elt) for elt in X]
while (t in L):
t = (t + 't')
T = [var((t + str(i))) for i in range((d - 1))]
s = 's'
while (s in L):
s = (s + 't')
S = [var((s + str(i))) for i in range((n - 1))]
Sstar = {S[j]: Sstar[j] for j in range((n - 1))}
thetastar = {t: ZZ.zero() for t in T}
thetastar.update(Sstar)
h = [function(('h' + str(j)))(*tuple(X[:(d - 1)])) for j in range(n)]
U = function('U')(*tuple(X))
Hcheck = prod([(X[(d - 1)] - (ZZ.one() / h[j])) for j in range(n)])
Gcheck = (((- G) / U) * prod([((- h[j]) / X[(d - 1)]) for j in range(n)]))
A = [((((- 1) ** (n - 1)) * (X[(d - 1)] ** ((- n) + j))) * diff(Gcheck.subs({X[(d - 1)]: (ZZ.one() / X[(d - 1)])}), X[(d - 1)], j)) for j in range(n)]
e = {X[i]: (P[X[i]] * exp((I * T[i]))) for i in range((d - 1))}
ht = [hh.subs(e) for hh in h]
hsumt = (sum([(S[j] * ht[j]) for j in range((n - 1))]) + ((ZZ.one() - sum(S)) * ht[(n - 1)]))
At = [AA.subs(e).subs({X[(d - 1)]: hsumt}) for AA in A]
Phit = ((- log((P[X[(d - 1)]] * hsumt))) + (I * sum([((alpha[i] / alpha[(d - 1)]) * T[i]) for i in range((d - 1))])))
atP = P.copy()
atP.update({hh.subs(P): (ZZ.one() / P[X[(d - 1)]]) for hh in h})
hderivs1 = {}
for (i, j) in xmrange([(d - 1), n], tuple):
s = solve(diff(H[j].subs({X[(d - 1)]: (ZZ.one() / h[j])}), X[i]), diff(h[j], X[i]))[0].rhs().simplify()
hderivs1.update({diff(h[j], X[i]): s})
atP.update({diff(h[j], X[i]).subs(P): s.subs(P).subs(atP)})
hderivs = diff_all(h, X[0:(d - 1)], (2 * N), sub=hderivs1, rekey=h)
for k in hderivs:
atP.update({k.subs(P): hderivs[k].subs(atP)})
if verbose:
print('Computing derivatives of auxiliary functions...')
m = min(n, N)
end = [X[(d - 1)] for j in range(n)]
Hprodderivs = diff_all(Hprod, X, (((2 * N) - 2) + n), ending=end, sub_final=P)
atP.update({U.subs(P): (diff(Hprod, X[(d - 1)], n).subs(P) / factorial(n))})
Uderivs = {}
k = (Hprod.polynomial(CC).degree() - n)
if (k == 0):
for l in range(1, (((2 * N) - 2) + m)):
for s in combinations_with_replacement(X, l):
Uderivs[diff(U, list(s)).subs(P)] = ZZ.zero()
elif ((k > 0) and (k < ((((2 * N) - 2) + m) - 1))):
all_zero = True
Uderivs = diff_prod(Hprodderivs, U, Hcheck, X, range(1, (k + 1)), end, Uderivs, atP)
if any(Uderivs.values()):
all_zero = False
if all_zero:
for l in range((k + 1), (((2 * N) - 2) + m)):
for s in combinations_with_replacement(X, l):
Uderivs.update({diff(U, list(s)).subs(P): ZZ.zero()})
else:
Uderivs = diff_prod(Hprodderivs, U, Hcheck, X, range((k + 1), (((2 * N) - 2) + m)), end, Uderivs, atP)
else:
Uderivs = diff_prod(Hprodderivs, U, Hcheck, X, range(1, (((2 * N) - 2) + m)), end, Uderivs, atP)
atP.update(Uderivs)
Phit1 = jacobian(Phit, (T + S)).subs(hderivs1)
a = jacobian(Phit1, (T + S)).subs(hderivs1).subs(thetastar).subs(atP)
a_inv = a.inverse()
Phitu = (Phit - ((((1 / Integer(2)) * matrix([(T + S)])) * a) * matrix([(T + S)]).transpose()))
Phitu = Phitu[0][0]
if verbose:
print('Computing derivatives of more auxiliary functions...')
AA = [function(('A' + str(j)))(*tuple((T + S))) for j in range(n)]
At_derivs = diff_all(At, (T + S), ((2 * N) - 2), sub=hderivs1, sub_final=[thetastar, atP], rekey=AA)
BB = function('BB')(*tuple((T + S)))
Phitu_derivs = diff_all(Phitu, (T + S), (2 * N), sub=hderivs1, sub_final=[thetastar, atP], rekey=BB, zero_order=3)
AABB_derivs = At_derivs
AABB_derivs.update(Phitu_derivs)
for j in range(n):
AABB_derivs[AA[j]] = At[j].subs(thetastar).subs(atP)
AABB_derivs[BB] = Phitu.subs(thetastar).subs(atP)
if verbose:
print('Computing second-order differential operator actions...')
DD = diff_op(AA, BB, AABB_derivs, (T + S), a_inv, n, N)
L = {}
for (j, k) in product(range(min(n, N)), range(max(0, ((N - 1) - n)), N)):
if ((j + k) <= (N - 1)):
L[(j, k)] = sum([(DD[(j, k, l)] / (((((- 1) ** k) * (2 ** (k + l))) * factorial(l)) * factorial((k + l)))) for l in range(((2 * k) + 1))])
det = ((a.determinant() ** ((- 1) / Integer(2))) * ((2 * pi) ** ((n - d) / Integer(2))))
chunk = (det * sum([(((alpha[(d - 1)] * asy_var) ** (((n - d) / Integer(2)) - q)) * sum([(((L[(j, k)] * binomial((n - 1), j)) * stirling_number1((n - j), ((n + k) - q))) * ((- 1) ** ((q - j) - k))) for (j, k) in product(range((min((n - 1), q) + 1)), range(max(0, (q - n)), (q + 1))) if ((j + k) <= q)])) for q in range(N)]))
chunk = chunk.subs(P).simplify()
coeffs = chunk.coefficients(asy_var)
coeffs.reverse()
coeffs = coeffs[:N]
if numerical:
subexp_part = sum([(co[0].subs(p).n(digits=numerical) * (asy_var ** co[1])) for co in coeffs])
exp_scale = prod([(P[X[i]] ** (- alpha[i])).subs(p) for i in range(d)]).n(digits=numerical)
else:
subexp_part = sum([(co[0].subs(p) * (asy_var ** co[1])) for co in coeffs])
exp_scale = prod([(P[X[i]] ** (- alpha[i])).subs(p) for i in range(d)])
return (((exp_scale ** asy_var) * subexp_part), exp_scale, subexp_part)
def _crit_cone_combo(self, p, alpha, coordinate=None):
from sage.matrix.constructor import matrix
from sage.symbolic.relation import solve
R = self.denominator_ring
p = coerce_point(R, p)
d = self.dimension()
n = len(self.denominator_factored())
Gamma = matrix([direction(v, coordinate) for v in self.log_grads(p)])
beta = direction(alpha, coordinate)
V = [var(('sss' + str(i))) for i in range(n)]
M = (matrix(V) * Gamma)
eqns = [(M[0][j] == beta[j]) for j in range(d)]
s = solve(eqns, V, solution_dict=True)[0]
return [s[v] for v in V]
def grads(self, p):
R = self.denominator_ring
p = coerce_point(R, p)
X = R.gens()
d = self.dimension()
H = [h for (h, e) in self.denominator_factored()]
n = len(H)
return [tuple([diff(H[i], X[j]).subs(p) for j in range(d)]) for i in range(n)]
def log_grads(self, p):
R = self.denominator_ring
p = coerce_point(R, p)
X = R.gens()
d = self.dimension()
H = [h for (h, e) in self.denominator_factored()]
n = len(H)
return [tuple([(X[j] * diff(H[i], X[j])).subs(p) for j in range(d)]) for i in range(n)]
def critical_cone(self, p, coordinate=None):
from sage.geometry.cone import Cone
R = self.denominator_ring
p = coerce_point(R, p)
d = self.dimension()
lg = self.log_grads(p)
n = len(lg)
if (coordinate not in range(d)):
for j in reversed(range(d)):
if (0 not in [lg[i][j] for i in range(n)]):
coordinate = j
break
Gamma = [direction(v, coordinate) for v in lg]
try:
cone = Cone(Gamma)
except TypeError:
cone = None
return (Gamma, cone)
def is_convenient_multiple_point(self, p):
from sage.combinat.subset import Subsets
from sage.matrix.constructor import matrix
R = self.denominator_ring
p = coerce_point(R, p)
H = [h for (h, e) in self.denominator_factored()]
n = len(H)
d = self.dimension()
if ([h.subs(p) for h in H] != [0 for h in H]):
return (False, 'not a singular point')
grads = self.grads(p)
for v in grads:
if (v == [0 for i in range(d)]):
return (False, 'not smooth point of factors')
if (n <= d):
M = matrix(grads)
if (M.rank() != n):
return (False, 'not a transverse intersection')
else:
for S in Subsets(grads, d, submultiset=True):
M = matrix(S)
if (M.rank() != d):
return (False, 'not a transverse intersection')
M = matrix(self.log_grads(p))
convenient_coordinates = []
for j in range(d):
if (0 not in M.columns()[j]):
convenient_coordinates.append(j)
if (not convenient_coordinates):
return (False, 'multiple point but not convenient')
X = R.gens()
convenientX = [X[i] for i in convenient_coordinates]
return (True, 'convenient in variables {}'.format(convenientX))
def singular_ideal(self):
R = self.denominator_ring
Hred = prod([h for (h, e) in self.denominator_factored()])
J = R.ideal(([Hred] + Hred.gradient()))
return R.ideal(J.groebner_basis())
def smooth_critical_ideal(self, alpha):
from sage.rings.polynomial.polynomial_ring_constructor import PolynomialRing
R = self.denominator_ring
Hred = prod([h for (h, e) in self.denominator_factored()])
K = R.base_ring()
d = self.dimension()
indets = []
for a in alpha:
if ((a not in K) and (a in SR)):
indets.append(a)
indets = sorted(set(indets), key=str)
if indets:
L = PolynomialRing(K, indets).fraction_field()
S = R.change_ring(L)
alpha = [L(a) for a in alpha]
else:
S = R
X = S.gens()
Hred = S(Hred)
J = S.ideal(([Hred] + [(((alpha[(d - 1)] * X[i]) * diff(Hred, X[i])) - ((alpha[i] * X[(d - 1)]) * diff(Hred, X[(d - 1)]))) for i in range((d - 1))]))
return S.ideal(J.groebner_basis())
def maclaurin_coefficients(self, multi_indices, numerical=0):
R = self.denominator_ring
d = self.dimension()
coeffs = {}
if (d == 1):
f = SR(self.quotient())
x = SR(R.gens()[0])
m = max(multi_indices)[0]
f = f.taylor(x, 0, m)
F = R(f)
tmp = F.coefficients()
for nu in multi_indices:
val = tmp[nu[0]]
if numerical:
val = val.n(digits=numerical)
coeffs[tuple(nu)] = val
return coeffs
alpha = []
for i in range(d):
alpha.append(max((nu[i] for nu in multi_indices)))
f = SR(self.quotient())
X = [SR(g) for g in R.gens()]
for i in range(d):
f = f.taylor(X[i], 0, alpha[i])
F = R(f)
X = R.gens()
for nu in multi_indices:
monomial = prod(((X[i] ** nu[i]) for i in range(d)))
val = F.monomial_coefficient(monomial)
if numerical:
val = val.n(digits=numerical)
coeffs[tuple(nu)] = val
return coeffs
def relative_error(self, approx, alpha, interval, exp_scale=Integer(1), digits=10):
from sage.modules.free_module_element import vector
if (not isinstance(approx, (list, tuple))):
approx = [approx]
if approx[0].variables():
av = approx[0].variables()[0]
else:
av = ZZ.one()
alpha = vector(alpha)
multi_indices = [(r * alpha) for r in interval]
mac = self.maclaurin_coefficients(multi_indices, numerical=digits)
mac_approx = {}
stats = []
for r in interval:
exp_s_r = (exp_scale ** r)
beta = tuple((r * alpha))
mac[beta] = (mac[beta] / exp_s_r).n(digits=digits)
mac_approx[beta] = [(f.subs({av: r}) / exp_s_r).n(digits=digits) for f in approx]
stats_row = [beta, mac[beta], mac_approx[beta]]
if (mac[beta] == 0):
stats_row.extend([None for a in mac_approx[beta]])
else:
stats_row.append([((mac[beta] - a) / mac[beta]) for a in mac_approx[beta]])
stats.append(tuple(stats_row))
return stats
def _add_(left, right):
return FractionWithFactoredDenominatorSum([left, right]).sum()
def _mul_(left, right):
numer = (left.numerator() * right.numerator())
df = (left.denominator_factored() + right.denominator_factored())
return left.parent()(numer, df) |
()
('patchset', default='-')
('--name', help='The name of the patch to extract.', default=None)
('--output-file', help='The location of the output json file. If not specified, prints to screen.', default=None)
('--with-metadata/--without-metadata', default=False, help='Include patchset metadata in output.')
def extract(patchset, name, output_file, with_metadata):
with click.open_file(patchset, 'r', encoding='utf-8') as fstream:
patchset_spec = json.load(fstream)
patchset = PatchSet(patchset_spec)
patch = patchset[name]
if with_metadata:
result = {'metadata': patch.metadata, 'patch': patch.patch}
result['metadata'].update(patchset.metadata)
else:
result = patch.patch
if output_file:
with open(output_file, 'w', encoding='utf-8') as out_file:
json.dump(result, out_file, indent=4, sort_keys=True)
log.debug(f'Written to {output_file:s}')
else:
click.echo(json.dumps(result, indent=4, sort_keys=True)) |
class BayesianNN(object):
def __init__(self, optimizer):
pass
def build_model(self):
pass
def train(self, data):
pass
def sample(self, steps):
pass |
def gather(outputs, target_device, dim=0):
def gather_map(outputs):
out = outputs[0]
if isinstance(out, torch.Tensor):
return Gather.apply(target_device, dim, *outputs)
if (out is None):
return None
if isinstance(out, dict):
if (not all(((len(out) == len(d)) for d in outputs))):
raise ValueError('All dicts must have the same number of keys')
return type(out)(((k, gather_map([d[k] for d in outputs])) for k in out))
return type(out)(map(gather_map, zip(*outputs)))
try:
res = gather_map(outputs)
finally:
gather_map = None
return res |
def test_UnionArray_NumpyArray():
v2a = ak.contents.unionarray.UnionArray(ak.index.Index(np.array([1, 1, 0, 0, 1, 0, 1], np.int8)), ak.index.Index(np.array([4, 3, 0, 1, 2, 2, 4, 100], np.int64)), [ak.from_iter([[1], [2], [3]], highlevel=False), ak.contents.numpyarray.NumpyArray(np.array([1.1, 2.2, 3.3, 4.4, 5.5]))])
resultv2 = v2a[np.array([0, 1, 3], np.int64)]
assert (to_list(resultv2) == [5.5, 4.4, [2]])
assert (v2a.to_typetracer()[np.array([0, 1, 3], np.int64)].form == resultv2.form) |
def categories_count_embedding_id1(x: list) -> np.ndarray:
return categories_count_embedding(x, key='category_id1') |
def coordinate_features(world):
features = np.zeros((world.n_states, world.size))
for s in range(world.n_states):
(x, y) = world.state_index_to_point(s)
features[(s, x)] += 1
features[(s, y)] += 1
return features |
class BoxBlur(MultibandFilter):
name = 'BoxBlur'
def __init__(self, radius):
self.radius = radius
def filter(self, image):
return image.box_blur(self.radius) |
def register_configs_from_directory(dir_path) -> None:
model_metadata_path = os.path.join(dir_path, MODEL_METADATA_FILE)
if os.path.isfile(model_metadata_path):
register_model_metadata_from_path(model_metadata_path)
tokenizer_configs_path = os.path.join(dir_path, TOKENIZER_CONFIGS_FILE)
if os.path.isfile(tokenizer_configs_path):
register_tokenizer_configs_from_path(tokenizer_configs_path)
model_deployments_path = os.path.join(dir_path, MODEL_DEPLOYMENTS_FILE)
if os.path.isfile(model_deployments_path):
register_model_deployments_from_path(model_deployments_path) |
def gpu_setup(use_gpu, gpu_id):
os.environ['CUDA_DEVICE_ORDER'] = 'PCI_BUS_ID'
os.environ['CUDA_VISIBLE_DEVICES'] = str(gpu_id)
if (torch.cuda.is_available() and use_gpu):
print('cuda available with GPU:', torch.cuda.get_device_name(0))
device = torch.device('cuda')
else:
print('cuda not available')
device = torch.device('cpu')
return device |
def _load_metadata(args, shard_paths):
shards_size = []
shards_size_dt = {}
metadata = {}
pbar = shard_paths
if (du.get_rank() == 0):
print('loading metadata from json files')
pbar = tqdm(shard_paths)
for shard_path in pbar:
shard_name = Path(shard_path).stem
if (args.data.meta.path is not None):
meta_path = Path(args.data.meta.path)
if meta_path.is_dir():
meta_path = (meta_path / '{}.json'.format(shard_name))
else:
meta_path = (Path(shard_path).parent / '{}.json'.format(shard_name))
if meta_path.is_file():
filenames = get_filenames_from_tar(shard_path)
with open(meta_path, 'r') as f:
shard_file = json.load(f)
filenames_in_meta = set([Path(line['filename']).stem for line in shard_file])
filenames = (filenames & filenames_in_meta)
count = len(filenames)
for line in shard_file:
idx = Path(line['filename']).stem
if (idx in filenames):
line['shard_size'] = count
line['shard_name'] = shard_name
metadata[idx] = line
'\n else:\n print(f"filename {idx} in tar file ({shard_path}) not present in metadata json file ({meta_path})")\n '
shards_size.append(count)
shards_size_dt[shard_name] = count
return (metadata, shards_size, shards_size_dt) |
class RPNLossComputation(object):
def __init__(self, proposal_matcher, fg_bg_sampler, box_coder, generate_labels_func):
self.proposal_matcher = proposal_matcher
self.fg_bg_sampler = fg_bg_sampler
self.box_coder = box_coder
self.copied_fields = []
self.generate_labels_func = generate_labels_func
self.discard_cases = ['not_visibility', 'between_thresholds']
def match_targets_to_anchors(self, anchor, target, copied_fields=[]):
match_quality_matrix = boxlist_iou(target, anchor)
matched_idxs = self.proposal_matcher(match_quality_matrix)
target = target.copy_with_fields(copied_fields)
matched_targets = target[matched_idxs.clamp(min=0)]
matched_targets.add_field('matched_idxs', matched_idxs)
return matched_targets
def prepare_targets(self, anchors, targets):
labels = []
regression_targets = []
for (anchors_per_image, targets_per_image) in zip(anchors, targets):
matched_targets = self.match_targets_to_anchors(anchors_per_image, targets_per_image, self.copied_fields)
matched_idxs = matched_targets.get_field('matched_idxs')
labels_per_image = self.generate_labels_func(matched_targets)
labels_per_image = labels_per_image.to(dtype=torch.float32)
bg_indices = (matched_idxs == Matcher.BELOW_LOW_THRESHOLD)
labels_per_image[bg_indices] = 0
if ('not_visibility' in self.discard_cases):
labels_per_image[(~ anchors_per_image.get_field('visibility'))] = (- 1)
if ('between_thresholds' in self.discard_cases):
inds_to_discard = (matched_idxs == Matcher.BETWEEN_THRESHOLDS)
labels_per_image[inds_to_discard] = (- 1)
regression_targets_per_image = self.box_coder.encode(matched_targets.bbox, anchors_per_image.bbox)
labels.append(labels_per_image)
regression_targets.append(regression_targets_per_image)
return (labels, regression_targets)
def __call__(self, anchors, objectness, box_regression, targets):
anchors = [cat_boxlist(anchors_per_image) for anchors_per_image in anchors]
(labels, regression_targets) = self.prepare_targets(anchors, targets)
(sampled_pos_inds, sampled_neg_inds) = self.fg_bg_sampler(labels)
sampled_pos_inds = torch.nonzero(torch.cat(sampled_pos_inds, dim=0)).squeeze(1)
sampled_neg_inds = torch.nonzero(torch.cat(sampled_neg_inds, dim=0)).squeeze(1)
sampled_inds = torch.cat([sampled_pos_inds, sampled_neg_inds], dim=0)
(objectness, box_regression) = concat_box_prediction_layers(objectness, box_regression)
objectness = objectness.squeeze()
labels = torch.cat(labels, dim=0)
regression_targets = torch.cat(regression_targets, dim=0)
box_loss = (smooth_l1_loss(box_regression[sampled_pos_inds], regression_targets[sampled_pos_inds], beta=cfg.RPN.SMOOTH_L1_BETA, size_average=False) / sampled_inds.numel())
objectness_loss = F.binary_cross_entropy_with_logits(objectness[sampled_inds], labels[sampled_inds])
return (objectness_loss, box_loss) |
def test_import_prepHistFactory(tmp_path, script_runner):
temp = tmp_path.joinpath('parsed_output.json')
command = f'pyhf xml2json validation/xmlimport_input/config/example.xml --basedir validation/xmlimport_input/ --output-file {temp} --hide-progress'
ret = script_runner.run(shlex.split(command))
assert ret.success
assert (ret.stdout == '')
assert (ret.stderr == '')
parsed_xml = json.loads(temp.read_text())
spec = {'channels': parsed_xml['channels']}
pyhf.schema.validate(spec, 'model.json') |
def test_RecordView_unbox_box():
record = ak.highlevel.Array([{'x': 0.0, 'y': []}, {'x': 1.1, 'y': [1]}, {'x': 2.2, 'y': [2, 2]}, {'x': 3.3, 'y': [3, 3, 3]}, {'x': 4.4, 'y': [4, 4, 4, 4]}], check_valid=True)[3]
assert (ak.operations.to_list(ak_numba_arrayview.RecordView.fromrecord(record).torecord()) == {'x': 3.3, 'y': [3, 3, 3]})
def f1(x):
return 3.14
assert (f1(record) == 3.14)
def f2(x):
return x
assert (ak.operations.to_list(f2(record)) == {'x': 3.3, 'y': [3, 3, 3]}) |
class Result(Generic[ResultType], ABC):
def is_ok(self) -> bool:
def is_err(self) -> bool:
return (not self.is_ok)
def unwrap(self) -> ResultType: |
def init_dataloaders(config, module_data):
if (('type' in config['data_loader']) and ('args' in config['data_loader'])):
data_loader = [config.initialize('data_loader', module_data)]
config['data_loader']['args'] = replace_nested_dict_item(config['data_loader']['args'], 'split', 'val')
if config['data_loader']['args']['dataset_name'].startswith('Conceptual'):
config['data_loader']['args'] = replace_nested_dict_item(config['data_loader']['args'], 'subsample', 0.01)
valid_data_loader = [config.initialize('data_loader', module_data)]
elif isinstance(config['data_loader'], list):
data_loader = [config.initialize('data_loader', module_data, index=idx) for idx in range(len(config['data_loader']))]
new_cfg_li = []
for dl_cfg in config['data_loader']:
dl_cfg['args'] = replace_nested_dict_item(dl_cfg['args'], 'split', 'val')
if dl_cfg['args']['dataset_name'].startswith('Conceptual'):
dl_cfg['args'] = replace_nested_dict_item(dl_cfg['args'], 'subsample', 0.01)
new_cfg_li.append(dl_cfg)
config._config['data_loader'] = new_cfg_li
valid_data_loader = [config.initialize('data_loader', module_data, index=idx) for idx in range(len(config['data_loader']))]
else:
raise ValueError('Check data_loader config, not correct format.')
return (data_loader, valid_data_loader) |
def _get_or_eval(value_or_function: Union[(T, Callable[([], T)])]) -> T:
if callable(value_or_function):
return value_or_function()
return value_or_function |
def add_summary(summary_writer, global_step, tag, value):
summary = tf.Summary(value=[tf.Summary.Value(tag=tag, simple_value=value)])
summary_writer.add_summary(summary, global_step) |
class Berkovich_Element_Cp_Projective(Berkovich_Element_Cp):
def __init__(self, parent, center, radius=None, power=None, prec=20, error_check=True):
Element.__init__(self, parent)
self._p = parent.prime()
self._base_space = parent.base()
self._base_type = parent._base_type
self._ideal = parent._ideal
if isinstance(center, Berkovich_Element_Cp_Affine):
raise TypeError('use as_projective_point to convert to projective Berkovich space')
Berkovich_Element_Cp.__init__(self, parent=parent, center=center, radius=radius, power=power, prec=prec, space_type='projective', error_check=error_check)
def as_affine_point(self):
if (self.center()[1] == 0):
raise ValueError('cannot convert infinity to affine Berkovich space')
from sage.schemes.berkovich.berkovich_space import Berkovich_Cp_Affine
new_space = Berkovich_Cp_Affine(self.parent().base_ring(), self.parent().ideal())
if (self.type_of_point() in [1, 2, 3]):
center = self.center()[0]
if (self.type_of_point() == 1):
return new_space(center)
elif (self.type_of_point() == 2):
return new_space(center, power=self.power())
elif (self.type_of_point() == 3):
return new_space(center, self.radius())
if (self._center_func is None):
center = [i[0] for i in self.center()]
else:
center = self.center_function()
if (self._radius_func is None):
radius = self.radius()
else:
radius = self.radius_function()
return new_space(center, radius, prec=self.prec())
def __eq__(self, other):
if (other is self):
return True
if (not isinstance(other, Berkovich_Element_Cp_Projective)):
return False
if (other.parent() != self.parent()):
return False
stype = self.type_of_point()
otype = other.type_of_point()
if ((stype == otype) and (stype == 1)):
return (self.center() == other.center())
elif ((stype == otype) and (stype == 4)):
raise NotImplementedError('equality for type IV points not implemented')
elif ((stype in [2, 3]) and (otype in [2, 3])):
if (self.radius() != other.radius()):
return False
scent = self.center()[0]
ocent = other.center()[0]
center_dist = self._custom_abs((scent - ocent))
return (center_dist <= self.radius())
else:
return False
def __hash__(self):
if (self.type_of_point() == 1):
return hash(self.center())
elif (self.type_of_point() == 4):
raise ValueError('hash not defined for type IV points')
return hash(self.radius())
def lt(self, other):
if (not isinstance(other, Berkovich_Element_Cp_Projective)):
raise TypeError(('other must be a point of a projective Berkovich space, but was %s' % other))
if (self.parent() != other.parent()):
raise ValueError('other must be a point of the same projective Berkovich space')
if (self == other):
return False
infinity = self.parent()((1, 0))
if (self == infinity):
return False
if (other == infinity):
return True
if (other.type_of_point() in [1, 4]):
return False
if (self.type_of_point() == 4):
center = self.center()[(- 1)]
dist = self._custom_abs((other.center()[0] - center[0]))
return ((dist <= other.radius()) and (self.radius()[(- 1)] <= other.radius()))
else:
dist = self._custom_abs((self.center()[0] - other.center()[0]))
return ((dist <= other.radius()) and (self.radius() <= other.radius()))
def gt(self, other):
if (not isinstance(other, Berkovich_Element_Cp_Projective)):
raise TypeError(('other must be a point of a projective Berkovich space, but was %s' % other))
if (self.parent() != other.parent()):
raise ValueError('other must be a point of the same projective Berkovich space')
if (self == other):
return False
infinity = self.parent()((1, 0))
if (self == infinity):
return True
if (other == infinity):
return False
if (self.type_of_point() in [1, 4]):
return False
if (other.type_of_point() == 4):
center = other.center()[(- 1)]
dist = self._custom_abs((self.center()[0] - center[0]))
return ((dist <= self.radius()) and (other.radius()[(- 1)] <= self.radius()))
else:
dist = self._custom_abs((self.center()[0] - other.center()[0]))
return ((dist <= self.radius()) and (other.radius() <= self.radius()))
def join(self, other, basepoint=Infinity):
if (not isinstance(other, Berkovich_Element_Cp_Projective)):
raise TypeError(('other must be a point of a projective Berkovich line, instead was %s' % other))
if (other.parent() != self.parent()):
raise ValueError('other must be a point of the same projective Berkovich line')
if (self.type_of_point() == 4):
new_center = self.center()[(- 1)]
new_radius = self.radius()[(- 1)]
return self.parent()(new_center, new_radius).join(other)
if (other.type_of_point() == 4):
new_center = other.center()[(- 1)]
new_radius = other.radius()[(- 1)]
return self.join(self.parent()(new_center, new_radius))
infty = self.parent()((1, 0))
if ((basepoint == Infinity) or (basepoint == infty)):
if ((self == infty) or (other == infty)):
return infty
dist = self._custom_abs((self.center()[0] - other.center()[0]))
maximum = max(dist, self.radius(), other.radius())
if ((maximum == self.radius()) and (self.type_of_point() == 2)):
return self.parent()(self.center(), power=self.power())
if ((maximum == other.radius()) and (other.type_of_point() == 2)):
return self.parent()(self.center(), power=other.power())
return self.parent()(self.center(), maximum)
if (not isinstance(basepoint, Berkovich_Element_Cp_Projective)):
raise TypeError(('basepoint must be a point of a projective Berkovich line, instead was %s' % basepoint))
if (basepoint.parent() != self.parent()):
raise ValueError('basepoint must be a point of the same Berkovich projective line')
if (basepoint.type_of_point() == 4):
new_center = other.center()[(- 1)]
new_radius = other.radius()[(- 1)]
return self.join(other, self.parent()(new_center, new_radius))
if (self == infty):
return other.join(basepoint)
if (other == infty):
return self.join(basepoint)
b_ge_s = (basepoint.gt(self) or (basepoint == self))
b_lt_s = basepoint.lt(self)
b_ge_o = (basepoint.gt(other) or (basepoint == other))
b_lt_o = basepoint.lt(other)
s_ge_o = (self.gt(other) or (self == other))
s_lt_o = self.lt(other)
if (not (s_lt_o or s_ge_o)):
if (not (b_ge_o or b_lt_o)):
if (not (b_ge_s or b_lt_s)):
dist_b_s = self._custom_abs((self.center()[0] - basepoint.center()[0]))
dist_b_o = self._custom_abs((other.center()[0] - basepoint.center()[0]))
return self.parent()(basepoint.center(), min(max(dist_b_o, other.radius(), basepoint.radius()), max(dist_b_s, self.radius(), basepoint.radius())))
elif b_ge_s:
return basepoint
else:
return self
elif b_ge_o:
return basepoint
else:
return other
elif s_ge_o:
if (not (b_ge_s or b_lt_s)):
return self
if b_ge_s:
return self
if b_ge_o:
return basepoint
if b_lt_o:
return other
else:
return other.join(self, basepoint)
def involution_map(self):
infty = self.parent()((1, 0))
zero = self.parent()(0)
if (self.type_of_point() == 1):
if (self == infty):
return zero
if (self == zero):
return infty
return self.parent()((1 / self.center()[0]))
if (self.type_of_point() in [2, 3]):
zero_contained_in_self = self.gt(zero)
if zero_contained_in_self:
if (self.type_of_point() == 2):
power = self.power()
return self.parent()(ZZ(0), power=(- power))
return self.parent()(ZZ(0), (1 / self.radius()))
return self.parent()((1 / self.center()[0]), (self.radius() / (self._custom_abs(self.center()[0]) ** 2)))
new_center_lst = []
new_radius_lst = []
for i in range(len(self.center())):
berk_point = self.parent()(self.center()[i], self.radius()[i])
zero_check = berk_point.gt(zero)
if zero_check:
continue
else:
new_center = (1 / self.center()[i][0])
new_radius = (self.radius()[i] / (self._custom_abs(self.center()[i][0]) ** 2))
new_center_lst.append(new_center)
new_radius_lst.append(new_radius)
if (not new_center_lst):
raise ValueError('precision of type IV is not high enough to define image')
return self.parent()(new_center_lst, new_radius_lst)
def contained_in_interval(self, start, end):
if (not isinstance(start, Berkovich_Element_Cp_Projective)):
raise TypeError('start must be a point of Berkovich space')
if (start.parent() != self.parent()):
raise ValueError('start must be a point of the same Berkovich space as this point')
if (not isinstance(end, Berkovich_Element_Cp_Projective)):
raise TypeError('start must be a point of Berkovich space')
if (end.parent() != self.parent()):
raise ValueError('start must be a point of the same Berkovich space as this point')
infty = self.parent()((1, 0))
zero = self.parent()(ZZ(0))
if (self == infty):
if ((start == zero) or (end == zero)):
return ((end == infty) or (start == infty))
return self.involution_map().contained_in_interval(start.involution_map(), end.involution_map())
if ((start == infty) or (end == infty)):
if (self == zero):
return ((end == zero) or (start == zero))
if ((start == zero) or (end == zero)):
gauss = self.parent()(ZZ(0), ZZ(1))
return (self.contained_in_interval(start, gauss) or self.contained_in_interval(gauss, end))
return self.involution_map().contained_in_interval(start.involution_map(), end.involution_map())
join = start.join(end)
j_ge_s = (join.gt(self) or (join == self))
s_ge_start = (self.gt(start) or (self == start))
s_ge_end = (self.gt(end) or (self == end))
return (j_ge_s and (s_ge_end or s_ge_start)) |
class _Conv(_ConvOrTransposedConv):
_transposed = False
def __init__(self, in_dim: Dim, out_dim: Dim, filter_size: Union[(Sequence[Union[(int, Dim)]], int, Dim)], *, padding: str, strides: Optional[Union[(int, Sequence[int])]]=None, dilation_rate: Optional[Union[(int, Sequence[int])]]=None, groups: Optional[int]=None, with_bias: bool=True):
self.groups = groups
super().__init__(in_dim=in_dim, out_dim=out_dim, filter_size=filter_size, padding=padding, with_bias=with_bias)
if isinstance(strides, int):
strides = ([strides] * self.nd)
self.strides = strides
self.dilation_rate = dilation_rate
def __call__(self, source: Tensor, *, in_spatial_dims: Sequence[Dim], out_spatial_dims: Optional[Sequence[Dim]]=None) -> Tuple[(Tensor, Sequence[Dim])]:
return conv(source, in_dim=self.in_dim, out_dim=self.out_dim, in_spatial_dims=in_spatial_dims, out_spatial_dims=out_spatial_dims, filter=self.filter, filter_size=self.filter_size, padding=self.padding, strides=self.strides, dilation_rate=self.dilation_rate, groups=self.groups, bias=(self.bias if self.with_bias else None)) |
class semantic3d_params():
def __init__(self):
self.class_freq = np.asarray([41.227, 24.391, 6.845, 5.153, 14.673, 4.23, 2.7, 0.782])
self.class_weights = (- np.log((self.class_freq / 100.0)))
self.num_classes = (len(self.class_freq) + 1)
self.color_map = [[255, 255, 255], [128, 128, 128], [255, 225, 25], [124, 152, 0], [170, 110, 40], [128, 0, 0], [245, 130, 48], [250, 190, 190], [0, 130, 200]] |
_module()
class ApexOptimizerHook(OptimizerHook):
def __init__(self, update_interval=1, grad_clip=None, coalesce=True, bucket_size_mb=(- 1), use_fp16=False):
self.grad_clip = grad_clip
self.coalesce = coalesce
self.bucket_size_mb = bucket_size_mb
self.update_interval = update_interval
self.use_fp16 = use_fp16
def before_run(self, runner):
runner.optimizer.zero_grad()
def after_train_iter(self, runner):
runner.outputs['loss'] /= self.update_interval
if self.use_fp16:
with apex.amp.scale_loss(runner.outputs['loss'], runner.optimizer) as scaled_loss:
scaled_loss.backward()
else:
runner.outputs['loss'].backward()
if self.every_n_iters(runner, self.update_interval):
if (self.grad_clip is not None):
self.clip_grads(runner.model.parameters())
runner.optimizer.step()
runner.optimizer.zero_grad() |
class GeneralizedMeanPoolingP(GeneralizedMeanPooling):
def __init__(self, norm=3, output_size=1, eps=1e-06):
super(GeneralizedMeanPoolingP, self).__init__(norm, output_size, eps)
self.p = nn.Parameter((torch.ones(1) * norm)) |
def _modify_array(sdfg: dace.SDFG, storage: dace.StorageType):
for (nsdfg, aname, aval) in sdfg.arrays_recursive():
if (aname == 't'):
if (storage == dace.StorageType.GPU_Shared):
aval = dace.data.Array(aval.dtype, [1], transient=aval.transient)
nsdfg.arrays[aname] = aval
aval.storage = storage
break
else:
raise ValueError('Array not found') |
class TFCamembertForMultipleChoice():
def __init__(self, *args, **kwargs):
requires_tf(self)
def from_pretrained(self, *args, **kwargs):
requires_tf(self) |
def default_regression_model(num_anchors, pyramid_feature_size=256, regression_feature_size=256, name='regression_submodel'):
options = {'kernel_size': 3, 'strides': 1, 'padding': 'same', 'kernel_initializer': keras.initializers.normal(mean=0.0, stddev=0.01, seed=None), 'bias_initializer': 'zeros'}
inputs = keras.layers.Input(shape=(None, None, pyramid_feature_size))
outputs = inputs
for i in range(4):
outputs = keras.layers.Conv2D(filters=regression_feature_size, activation='relu', name='pyramid_regression_{}'.format(i), **options)(outputs)
outputs = keras.layers.Conv2D(300, name='pyramid_regression_shafin1', **options)(outputs)
outputs = keras.layers.Reshape(((- 1), 300), name='pyramid_regression_shafin2')(outputs)
outputs = MyLayer(output_dim=65)(outputs)
outputs = keras.layers.Reshape(((- 1), 1, 65))(outputs)
outputs = keras.layers.Conv2D((num_anchors * 4), name='pyramid_regression', **options)(outputs)
outputs = keras.layers.Reshape(((- 1), 4), name='pyramid_regression_reshape')(outputs)
return keras.models.Model(inputs=inputs, outputs=outputs, name=name) |
()
('--policy_file', type=str, default=None)
('--run_group', type=str, default=None)
('--epoch', type=int, default=None)
def main(policy_file, run_group, epoch):
import glob
tf.compat.v1.disable_eager_execution()
if (policy_file is not None):
policy_file = glob.glob(policy_file)[0]
base = os.path.splitext(policy_file)[0]
with open(policy_file, 'rb') as f:
pretrain = pickle.load(f)
pretrain_weights = save_weight(pretrain.sess)
output_file = open((base + '_weight.pkl'), 'wb')
pickle.dump(pretrain_weights, output_file)
output_file.close()
else:
runs = glob.glob(f'logs/{run_group}*/*')
print(runs)
for run in sorted(runs):
policy_file = f'{run}/policy_{epoch}.pkl'
print(policy_file)
subprocess.Popen(['python', 'save_weight.py', f'--policy_file={policy_file}']) |
def maybe_posaxis(layout: Content, axis: int, depth: int) -> (int | None):
from awkward.record import Record
if isinstance(layout, Record):
if (axis == 0):
raise AxisError('Record type at axis=0 is a scalar, not an array')
return maybe_posaxis(layout._array, axis, depth)
if (axis >= 0):
return axis
else:
(is_branching, additional_depth) = layout.branch_depth
if (not is_branching):
return (((axis + depth) + additional_depth) - 1)
else:
return None |
def generate(output_dir: Path) -> None:
generate_between_factors(types=TYPES, output_dir=(output_dir / 'factors'))
generate_pose3_extra_factors((output_dir / 'factors')) |
class Dataset_Custom(Dataset):
def __init__(self, root_path, flag='train', size=None, features='S', data_path='ETTh1.csv', target='OT', scale=True, timeenc=0, freq='h'):
if (size == None):
self.seq_len = ((24 * 4) * 4)
self.label_len = (24 * 4)
self.pred_len = (24 * 4)
else:
self.seq_len = size[0]
self.label_len = size[1]
self.pred_len = size[2]
assert (flag in ['train', 'test', 'val'])
type_map = {'train': 0, 'val': 1, 'test': 2}
self.set_type = type_map[flag]
self.features = features
self.target = target
self.scale = scale
self.timeenc = timeenc
self.freq = freq
self.root_path = root_path
self.data_path = data_path
self.__read_data__()
def __read_data__(self):
self.scaler = StandardScaler()
df_raw = pd.read_csv(os.path.join(self.root_path, self.data_path))
cols = list(df_raw.columns)
cols.remove(self.target)
cols.remove('date')
df_raw = df_raw[((['date'] + cols) + [self.target])]
num_train = int((len(df_raw) * 0.7))
num_test = int((len(df_raw) * 0.2))
num_vali = ((len(df_raw) - num_train) - num_test)
border1s = [0, (num_train - self.seq_len), ((len(df_raw) - num_test) - self.seq_len)]
border2s = [num_train, (num_train + num_vali), len(df_raw)]
border1 = border1s[self.set_type]
border2 = border2s[self.set_type]
if ((self.features == 'M') or (self.features == 'MS')):
cols_data = df_raw.columns[1:]
df_data = df_raw[cols_data]
elif (self.features == 'S'):
df_data = df_raw[[self.target]]
if self.scale:
train_data = df_data[border1s[0]:border2s[0]]
self.scaler.fit(train_data.values)
data = self.scaler.transform(df_data.values)
else:
data = df_data.values
df_stamp = df_raw[['date']][border1:border2]
df_stamp['date'] = pd.to_datetime(df_stamp.date)
if (self.timeenc == 0):
df_stamp['month'] = df_stamp.date.apply((lambda row: row.month), 1)
df_stamp['day'] = df_stamp.date.apply((lambda row: row.day), 1)
df_stamp['weekday'] = df_stamp.date.apply((lambda row: row.weekday()), 1)
df_stamp['hour'] = df_stamp.date.apply((lambda row: row.hour), 1)
data_stamp = df_stamp.drop(['date'], 1).values
elif (self.timeenc == 1):
data_stamp = time_features(pd.to_datetime(df_stamp['date'].values), freq=self.freq)
data_stamp = data_stamp.transpose(1, 0)
self.data_x = data[border1:border2]
self.data_y = data[border1:border2]
self.data_stamp = data_stamp
def __getitem__(self, index):
s_begin = index
s_end = (s_begin + self.seq_len)
r_begin = (s_end - self.label_len)
r_end = ((r_begin + self.label_len) + self.pred_len)
seq_x = self.data_x[s_begin:s_end]
seq_y = self.data_y[r_begin:r_end]
seq_x_mark = self.data_stamp[s_begin:s_end]
seq_y_mark = self.data_stamp[r_begin:r_end]
return (seq_x, seq_y, seq_x_mark, seq_y_mark)
def __len__(self):
return (((len(self.data_x) - self.seq_len) - self.pred_len) + 1)
def inverse_transform(self, data):
return self.scaler.inverse_transform(data) |
def _seg_22():
return [(8450, 'M', u'c'), (8451, 'M', u'c'), (8452, 'V'), (8453, '3', u'c/o'), (8454, '3', u'c/u'), (8455, 'M', u''), (8456, 'V'), (8457, 'M', u'f'), (8458, 'M', u'g'), (8459, 'M', u'h'), (8463, 'M', u''), (8464, 'M', u'i'), (8466, 'M', u'l'), (8468, 'V'), (8469, 'M', u'n'), (8470, 'M', u'no'), (8471, 'V'), (8473, 'M', u'p'), (8474, 'M', u'q'), (8475, 'M', u'r'), (8478, 'V'), (8480, 'M', u'sm'), (8481, 'M', u'tel'), (8482, 'M', u'tm'), (8483, 'V'), (8484, 'M', u'z'), (8485, 'V'), (8486, 'M', u''), (8487, 'V'), (8488, 'M', u'z'), (8489, 'V'), (8490, 'M', u'k'), (8491, 'M', u'a'), (8492, 'M', u'b'), (8493, 'M', u'c'), (8494, 'V'), (8495, 'M', u'e'), (8497, 'M', u'f'), (8498, 'X'), (8499, 'M', u'm'), (8500, 'M', u'o'), (8501, 'M', u''), (8502, 'M', u''), (8503, 'M', u''), (8504, 'M', u''), (8505, 'M', u'i'), (8506, 'V'), (8507, 'M', u'fax'), (8508, 'M', u''), (8509, 'M', u''), (8511, 'M', u''), (8512, 'M', u''), (8513, 'V'), (8517, 'M', u'd'), (8519, 'M', u'e'), (8520, 'M', u'i'), (8521, 'M', u'j'), (8522, 'V'), (8528, 'M', u'17'), (8529, 'M', u'19'), (8530, 'M', u'110'), (8531, 'M', u'13'), (8532, 'M', u'23'), (8533, 'M', u'15'), (8534, 'M', u'25'), (8535, 'M', u'35'), (8536, 'M', u'45'), (8537, 'M', u'16'), (8538, 'M', u'56'), (8539, 'M', u'18'), (8540, 'M', u'38'), (8541, 'M', u'58'), (8542, 'M', u'78'), (8543, 'M', u'1'), (8544, 'M', u'i'), (8545, 'M', u'ii'), (8546, 'M', u'iii'), (8547, 'M', u'iv'), (8548, 'M', u'v'), (8549, 'M', u'vi'), (8550, 'M', u'vii'), (8551, 'M', u'viii'), (8552, 'M', u'ix'), (8553, 'M', u'x'), (8554, 'M', u'xi'), (8555, 'M', u'xii'), (8556, 'M', u'l'), (8557, 'M', u'c'), (8558, 'M', u'd'), (8559, 'M', u'm'), (8560, 'M', u'i'), (8561, 'M', u'ii'), (8562, 'M', u'iii'), (8563, 'M', u'iv'), (8564, 'M', u'v'), (8565, 'M', u'vi'), (8566, 'M', u'vii'), (8567, 'M', u'viii'), (8568, 'M', u'ix'), (8569, 'M', u'x')] |
class BO(flexs.Explorer):
def __init__(self, model: flexs.Model, rounds: int, sequences_batch_size: int, model_queries_per_batch: int, starting_sequence: str, alphabet: str, log_file: Optional[str]=None, method: str='EI', recomb_rate: float=0):
name = f'BO_method={method}'
if (not isinstance(model, flexs.Ensemble)):
model = flexs.Ensemble([model], combine_with=(lambda x: x))
super().__init__(model, name, rounds, sequences_batch_size, model_queries_per_batch, starting_sequence, log_file)
self.alphabet = alphabet
self.method = method
self.recomb_rate = recomb_rate
self.best_fitness = 0
self.num_actions = 0
self.state = None
self.seq_len = None
self.memory = None
self.initial_uncertainty = None
def initialize_data_structures(self):
self.state = string_to_one_hot(self.starting_sequence, self.alphabet)
self.seq_len = len(self.starting_sequence)
self.memory = PrioritizedReplayBuffer((len(self.alphabet) * self.seq_len), 100000, self.sequences_batch_size, 0.6)
def train_models(self):
if (len(self.memory) >= self.sequences_batch_size):
batch = self.memory.sample_batch()
else:
self.memory.batch_size = len(self.memory)
batch = self.memory.sample_batch()
self.memory.batch_size = self.sequences_batch_size
states = batch['next_obs']
state_seqs = [one_hot_to_string(state.reshape(((- 1), len(self.alphabet))), self.alphabet) for state in states]
rewards = batch['rews']
self.model.train(state_seqs, rewards)
def _recombine_population(self, gen):
np.random.shuffle(gen)
ret = []
for i in range(0, (len(gen) - 1), 2):
strA = []
strB = []
switch = False
for ind in range(len(gen[i])):
if (np.random.random() < self.recomb_rate):
switch = (not switch)
if switch:
strA.append(gen[i][ind])
strB.append(gen[(i + 1)][ind])
else:
strB.append(gen[i][ind])
strA.append(gen[(i + 1)][ind])
ret.append(''.join(strA))
ret.append(''.join(strB))
return ret
def EI(self, vals):
return np.mean([max((val - self.best_fitness), 0) for val in vals])
def UCB(vals):
discount = 0.01
return (np.mean(vals) - (discount * np.std(vals)))
def sample_actions(self):
actions = set()
pos_changes = []
for pos in range(self.seq_len):
pos_changes.append([])
for res in range(len(self.alphabet)):
if (self.state[(pos, res)] == 0):
pos_changes[pos].append((pos, res))
while (len(actions) < (self.model_queries_per_batch / self.sequences_batch_size)):
action = []
for pos in range(self.seq_len):
if (np.random.random() < (1 / self.seq_len)):
pos_tuple = pos_changes[pos][np.random.randint((len(self.alphabet) - 1))]
action.append(pos_tuple)
if ((len(action) > 0) and (tuple(action) not in actions)):
actions.add(tuple(action))
return list(actions)
def pick_action(self, all_measured_seqs):
state = self.state.copy()
actions = self.sample_actions()
actions_to_screen = []
states_to_screen = []
for i in range((self.model_queries_per_batch // self.sequences_batch_size)):
x = np.zeros((self.seq_len, len(self.alphabet)))
for action in actions[i]:
x[action] = 1
actions_to_screen.append(x)
state_to_screen = construct_mutant_from_sample(x, state)
states_to_screen.append(one_hot_to_string(state_to_screen, self.alphabet))
ensemble_preds = self.model.get_fitness(states_to_screen)
method_pred = ([self.EI(vals) for vals in ensemble_preds] if (self.method == 'EI') else [self.UCB(vals) for vals in ensemble_preds])
action_ind = np.argmax(method_pred)
uncertainty = np.std(method_pred[action_ind])
action = actions_to_screen[action_ind]
new_state_string = states_to_screen[action_ind]
self.state = string_to_one_hot(new_state_string, self.alphabet)
new_state = self.state
reward = np.mean(ensemble_preds[action_ind])
if (new_state_string not in all_measured_seqs):
self.best_fitness = max(self.best_fitness, reward)
self.memory.store(state.ravel(), action.ravel(), reward, new_state.ravel())
self.num_actions += 1
return (uncertainty, new_state_string, reward)
def Thompson_sample(measured_batch):
fitnesses = np.cumsum([np.exp((10 * x[0])) for x in measured_batch])
fitnesses = (fitnesses / fitnesses[(- 1)])
x = np.random.uniform()
index = bisect_left(fitnesses, x)
sequences = [x[1] for x in measured_batch]
return sequences[index]
def propose_sequences(self, measured_sequences: pd.DataFrame) -> Tuple[(np.ndarray, np.ndarray)]:
if (self.num_actions == 0):
self.initialize_data_structures()
else:
last_round_num = measured_sequences['round'].max()
last_batch = measured_sequences[(measured_sequences['round'] == last_round_num)]
_last_batch_seqs = last_batch['sequence'].tolist()
_last_batch_true_scores = last_batch['true_score'].tolist()
last_batch_seqs = _last_batch_seqs
if ((self.recomb_rate > 0) and (len(last_batch) > 1)):
last_batch_seqs = self._recombine_population(last_batch_seqs)
measured_batch = []
for seq in last_batch_seqs:
if (seq in _last_batch_seqs):
measured_batch.append((_last_batch_true_scores[_last_batch_seqs.index(seq)], seq))
else:
measured_batch.append((np.mean(self.model.get_fitness([seq])), seq))
measured_batch = sorted(measured_batch)
sampled_seq = self.Thompson_sample(measured_batch)
self.state = string_to_one_hot(sampled_seq, self.alphabet)
self.initial_uncertainty = None
samples = set()
prev_cost = self.model.cost
all_measured_seqs = set(measured_sequences['sequence'].tolist())
while ((self.model.cost - prev_cost) < self.model_queries_per_batch):
(uncertainty, new_state_string, _) = self.pick_action(all_measured_seqs)
all_measured_seqs.add(new_state_string)
samples.add(new_state_string)
if (self.initial_uncertainty is None):
self.initial_uncertainty = uncertainty
if (uncertainty > (2 * self.initial_uncertainty)):
sampled_seq = self.Thompson_sample(measured_batch)
self.state = string_to_one_hot(sampled_seq, self.alphabet)
self.initial_uncertainty = None
if (len(samples) < self.sequences_batch_size):
random_sequences = generate_random_sequences(self.seq_len, (self.sequences_batch_size - len(samples)), self.alphabet)
samples.update(random_sequences)
samples = list(samples)
preds = np.mean(self.model.get_fitness(samples), axis=1)
self.train_models()
return (samples, preds) |
(config_path='./conf', config_name='config')
def main(cfg: DictConfig) -> None:
print(cfg)
logger.info(f'The current working directory is {Path().cwd()}')
start_time = time.time()
logger.info('initializing experimental condition..')
lambdas = list(dict(cfg.estimator_hyperparams)['lambdas'])
ope_estimators = [InverseProbabilityWeighting(estimator_name='IPW'), SelfNormalizedInverseProbabilityWeighting(estimator_name='SNIPW'), DirectMethod(estimator_name='DM'), DoublyRobust(estimator_name='DR'), SelfNormalizedDoublyRobust(estimator_name='SNDR'), SwitchDoublyRobustTuning(lambdas=lambdas, estimator_name='Switch-DR'), DoublyRobustWithShrinkageTuning(lambdas=lambdas, estimator_name='DRos')]
n_seeds = cfg.setting.n_seeds
sample_size = cfg.setting.sample_size
reg_model = cfg.setting.reg_model
campaign = cfg.setting.campaign
behavior_policy = cfg.setting.behavior_policy
test_size = cfg.setting.test_size
is_timeseries_split = cfg.setting.is_timeseries_split
n_folds = cfg.setting.n_folds
obd_path = ((Path().cwd().parents[5] / 'open_bandit_dataset') if cfg.setting.is_full_obd else None)
random_state = cfg.setting.random_state
np.random.seed(random_state)
dataset_ts = OpenBanditDataset(behavior_policy='bts', campaign=campaign, data_path=obd_path)
dataset_ur = OpenBanditDataset(behavior_policy='random', campaign=campaign, data_path=obd_path)
if (behavior_policy == 'random'):
if is_timeseries_split:
bandit_feedback_ur = dataset_ur.obtain_batch_bandit_feedback(test_size=test_size, is_timeseries_split=True)[0]
else:
bandit_feedback_ur = dataset_ur.obtain_batch_bandit_feedback()
bandit_feedbacks = [bandit_feedback_ur]
ground_truth_ts = OpenBanditDataset.calc_on_policy_policy_value_estimate(behavior_policy='bts', campaign=campaign, data_path=obd_path, test_size=test_size, is_timeseries_split=is_timeseries_split)
policy_ts = BernoulliTS(n_actions=dataset_ts.n_actions, len_list=dataset_ts.len_list, random_state=random_state, is_zozotown_prior=True, campaign=campaign)
action_dist_ts = policy_ts.compute_batch_action_dist(n_rounds=1000000)
evaluation_policies = [(ground_truth_ts, action_dist_ts)]
else:
if is_timeseries_split:
bandit_feedback_ts = dataset_ts.obtain_batch_bandit_feedback(test_size=test_size, is_timeseries_split=True)[0]
else:
bandit_feedback_ts = dataset_ts.obtain_batch_bandit_feedback()
bandit_feedbacks = [bandit_feedback_ts]
ground_truth_ur = OpenBanditDataset.calc_on_policy_policy_value_estimate(behavior_policy='random', campaign=campaign, data_path=obd_path, test_size=test_size, is_timeseries_split=is_timeseries_split)
policy_ur = Random(n_actions=dataset_ur.n_actions, len_list=dataset_ur.len_list, random_state=random_state)
action_dist_ur = policy_ur.compute_batch_action_dist(n_rounds=1000000)
evaluation_policies = [(ground_truth_ur, action_dist_ur)]
hyperparams = dict(cfg.reg_model_hyperparams)[reg_model]
regression_models = [reg_model_dict[reg_model](**hyperparams)]
evaluator = InterpretableOPEEvaluator(random_states=np.arange(n_seeds), bandit_feedbacks=bandit_feedbacks, evaluation_policies=evaluation_policies, ope_estimators=ope_estimators, regression_models=regression_models)
logger.info('experiment started')
_ = evaluator.estimate_policy_value(sample_size=sample_size, n_folds_=n_folds)
mean = evaluator.calculate_mean(root=True)
mean_scaled = evaluator.calculate_mean(scale=True, root=True)
log_path = Path('./outputs')
log_path.mkdir(exist_ok=True, parents=True)
root_mse_df = DataFrame()
root_mse_df['estimator'] = list(mean.keys())
root_mse_df['mean'] = list(mean.values())
root_mse_df['mean(scaled)'] = list(mean_scaled.values())
root_mse_df.to_csv((log_path / 'root_mse.csv'))
se_df = DataFrame(evaluator.calculate_squared_error())
se_df = DataFrame(se_df.stack()).reset_index(1)
se_df.rename(columns={'level_1': 'estimators', 0: 'se'}, inplace=True)
nonparam_ttests = pg.pairwise_ttests(data=se_df, dv='se', parametric=False, between='estimators').round(4).drop(['Contrast', 'Parametric', 'Paired'], axis=1)
nonparam_ttests.to_csv((log_path / 'nonparam_ttests.csv'))
DataFrame(evaluator.reg_model_metrics).describe().to_csv((log_path / 'reg_model_metrics.csv'))
print(root_mse_df)
experiment = f'{campaign}-{behavior_policy}-{sample_size}'
elapsed_time = np.round(((time.time() - start_time) / 60), 2)
logger.info(f'finish experiment {experiment} in {elapsed_time}min') |
.parametrize('X, expected_voting', [(X, 'soft'), (sparse.csr_matrix(X), 'hard')])
.filterwarnings('ignore:The default value of `n_init` will change')
def test_fit_resample_check_voting(X, expected_voting):
cc = ClusterCentroids(random_state=RND_SEED)
cc.fit_resample(X, Y)
assert (cc.voting_ == expected_voting) |
def test_masker_call_pretrained_tokenizer():
AutoTokenizer = pytest.importorskip('transformers').AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained('distilbert-base-uncased', use_fast=False)
masker = shap.maskers.Text(tokenizer)
test_text = 'I ate a Cannoli'
test_input_mask = np.array([True, False, True, True, False, True, True, True])
output_masked_text = masker(test_input_mask, test_text)
correct_masked_text = '[MASK] ate a [MASK]noli'
assert (output_masked_text[0] == correct_masked_text) |
class VertexGroup(object):
def __init__(self, p_gen, p_hgr):
self.p_gen = p_gen
self.p_hgr = p_hgr
self.hg_n = None
self.hg_d = None
self.C = []
def __call__(self):
return self.C
def add_vertex(self, V):
if (V not in self.C):
self.C.append(V)
def homology_group_rank(self):
if (self.hg_n is None):
self.hg_n = sum((1 for v in self.C if v.minimiser()))
return self.hg_n
def homology_group_differential(self):
if (self.hg_d is None):
self.hgd = (self.hg_n - self.p_hgr)
return self.hgd
def polytopial_sperner_lemma(self):
pass
def print_out(self):
for v in self():
v.print_out() |
def get_mask(mask_type, n_quantiles, risk_kwargs):
if (mask_type in _masks):
if (n_quantiles in _masks[mask_type]):
return _masks[mask_type][n_quantiles]
else:
_masks[mask_type] = dict()
if (mask_type in _inverse_beta_funcs):
_masks[mask_type][n_quantiles] = create_mask(_inverse_beta_funcs[mask_type], n_quantiles=n_quantiles, risk_kwargs=risk_kwargs)
else:
raise NotImplementedError('mask_type not recognized')
return _masks[mask_type][n_quantiles] |
class StorageTypeDoubleComplex(StorageTypeSimple):
def assign_c_from_py(self, c, py):
return je('{{ c }} = CDE_to_dz({{ py }})', c=c, py=py) |
def init_data(M, N):
fn = np.float32(N)
A = np.empty((M, N), dtype=np.float32)
x = np.empty((N,), dtype=np.float32)
y = np.empty((N,), dtype=np.float32)
for i in range(N):
x[i] = (1 + (i / fn))
for i in range(M):
for j in range(N):
A[(i, j)] = (((i + j) % N) / (5 * M))
return (A, x, y) |
def get_valid_directions(goal_loc):
xys = [[0, 1], [0, (- 1)], [1, 0], [(- 1), 0]]
crosses = [[1, 0], [1, 0], [0, 1], [0, 1]]
(goal_loci, goal_locj) = (goal_loc[0], goal_loc[1])
rel_vec = (np.array([121, 121]) - np.array(goal_loc))
rel_vec = (rel_vec / math.sqrt(((rel_vec[0] ** 2) + (rel_vec[1] ** 2))))
xy_returns = []
for (xyi, xy) in enumerate(xys):
xy = np.array(xy)
cross = crosses[xyi]
dot_p = ((rel_vec[0] * xy[0]) + (rel_vec[1] * xy[1]))
if (dot_p >= 0):
xy_returns.append(xy)
else:
pass
return xy_returns |
def profile(hparams, run_opts):
import ptflops
import torchinfo
class Model(torch.nn.Module):
def __init__(self):
super().__init__()
self.wavlm = hparams['wavlm']
self.wavs = torch.randn(1, hparams['sample_rate'], device=run_opts['device'])
_grad()
def forward(self, _=None):
logits = self.wavlm(self.wavs)
return logits
model = Model().eval().to(run_opts['device'])
(macs, params) = ptflops.get_model_complexity_info(model, (1,), as_strings=True, print_per_layer_stat=False)
time_start = time.time()
model()
torch.cuda.synchronize()
time_stop = (time.time() - time_start)
max_mem = (torch.cuda.max_memory_allocated('cuda') / (10 ** 9))
result = {'MACs': macs, 'memory': max_mem, 'time': time_stop}
summary = torchinfo.summary(model, verbose=0)
summary.trainable_params = hparams['wavlm'].model.decoder.out_proj.weight.numel()
summary.total_params = sum((p.numel() for p in hparams['wavlm'].model.parameters()))
for (i, (k, v)) in enumerate(hparams['decoder_mask'].items()):
if (v is None):
continue
for buffer in v.values():
summary.total_params += buffer.numel()
logging.info(summary)
logging.info(result) |
class BertTokenizer(PreTrainedTokenizer):
vocab_files_names = VOCAB_FILES_NAMES
pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP
max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__(self, vocab_file, do_lower_case=True, do_basic_tokenize=True, never_split=None, unk_token='[UNK]', sep_token='[SEP]', pad_token='[PAD]', cls_token='[CLS]', mask_token='[MASK]', tokenize_chinese_chars=True, **kwargs):
super(BertTokenizer, self).__init__(unk_token=unk_token, sep_token=sep_token, pad_token=pad_token, cls_token=cls_token, mask_token=mask_token, **kwargs)
self.max_len_single_sentence = (self.max_len - 2)
self.max_len_sentences_pair = (self.max_len - 3)
if (not os.path.isfile(vocab_file)):
raise ValueError("Can't find a vocabulary file at path '{}'. To load the vocabulary from a Google pretrained model use `tokenizer = BertTokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`".format(vocab_file))
self.vocab = load_vocab(vocab_file)
self.ids_to_tokens = collections.OrderedDict([(ids, tok) for (tok, ids) in self.vocab.items()])
self.do_basic_tokenize = do_basic_tokenize
if do_basic_tokenize:
self.basic_tokenizer = BasicTokenizer(do_lower_case=do_lower_case, never_split=never_split, tokenize_chinese_chars=tokenize_chinese_chars)
self.wordpiece_tokenizer = WordpieceTokenizer(vocab=self.vocab, unk_token=self.unk_token)
def vocab_size(self):
return len(self.vocab)
def _tokenize(self, text, basic_done=False):
split_tokens = []
if (self.do_basic_tokenize and (not basic_done)):
for token in self.basic_tokenizer.tokenize(text, never_split=self.all_special_tokens):
for sub_token in self.wordpiece_tokenizer.tokenize(token):
split_tokens.append(sub_token)
else:
split_tokens = self.wordpiece_tokenizer.tokenize(text)
return split_tokens
def _convert_token_to_id(self, token):
return self.vocab.get(token, self.vocab.get(self.unk_token))
def _convert_id_to_token(self, index):
return self.ids_to_tokens.get(index, self.unk_token)
def convert_tokens_to_string(self, tokens):
out_string = ' '.join(tokens).replace(' ##', '').strip()
return out_string
def add_special_tokens_single_sentence(self, token_ids):
return (([self._convert_token_to_id(self.cls_token)] + token_ids) + [self._convert_token_to_id(self.sep_token)])
def add_special_tokens_sentences_pair(self, token_ids_0, token_ids_1):
sep = [self._convert_token_to_id(self.sep_token)]
cls = [self._convert_token_to_id(self.cls_token)]
return ((((cls + token_ids_0) + sep) + token_ids_1) + sep)
def save_vocabulary(self, vocab_path):
index = 0
if os.path.isdir(vocab_path):
vocab_file = os.path.join(vocab_path, VOCAB_FILES_NAMES['vocab_file'])
else:
vocab_file = vocab_path
with open(vocab_file, 'w', encoding='utf-8') as writer:
for (token, token_index) in sorted(self.vocab.items(), key=(lambda kv: kv[1])):
if (index != token_index):
logger.warning('Saving vocabulary to {}: vocabulary indices are not consecutive. Please check that the vocabulary is not corrupted!'.format(vocab_file))
index = token_index
writer.write((token + u'\n'))
index += 1
return (vocab_file,)
def from_pretrained(cls, pretrained_model_name_or_path, *inputs, **kwargs):
if (pretrained_model_name_or_path in PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES):
if (('-cased' in pretrained_model_name_or_path) and kwargs.get('do_lower_case', True)):
logger.warning('The pre-trained model you are loading is a cased model but you have not set `do_lower_case` to False. We are setting `do_lower_case=False` for you but you may want to check this behavior.')
kwargs['do_lower_case'] = False
elif (('-cased' not in pretrained_model_name_or_path) and (not kwargs.get('do_lower_case', True))):
logger.warning('The pre-trained model you are loading is an uncased model but you have set `do_lower_case` to False. We are setting `do_lower_case=True` for you but you may want to check this behavior.')
kwargs['do_lower_case'] = True
return super(BertTokenizer, cls)._from_pretrained(pretrained_model_name_or_path, *inputs, **kwargs) |
class MaskingScheme():
def __init__(self, args):
self.args = args
self.mask_ratio = getattr(self.args, 'mask_ratio', None)
def mask(tokens, tagmap=None):
pass |
def mockingjay_logMelLinearLarge_T_AdamW_b32_500k_360hr_drop1(refresh=False, *args, **kwargs):
kwargs['ckpt'] = '
return mockingjay_url(*args, refresh=refresh, **kwargs) |
def read_paragraphs(inputfile, wrapped=True):
lines = map(str.strip, inputfile)
if wrapped:
paragraph = []
for line in lines:
if line:
paragraph.append(line)
elif paragraph:
(yield paragraph)
paragraph = []
if paragraph:
(yield paragraph)
else:
for line in lines:
(yield ([line] if line else [])) |
def read_datas(dataset_name, batch_size):
with open(('../data/%s/train_data.json' % dataset_name), encoding='utf-8') as f:
train_raw = json.load(f)
train_raw = sorted(train_raw, key=(lambda x: len(x)))
new_train_raw = []
for i in range(0, len(train_raw), batch_size):
new_train_raw.append(train_raw[i:(i + batch_size)])
with open(('../data/%s/dev_data.json' % dataset_name), encoding='utf-8') as f:
dev_raw = json.load(f)
dev_raw = sorted(dev_raw, key=(lambda x: len(x)))
new_dev_raw = []
for i in range(0, len(dev_raw), batch_size):
new_dev_raw.append(dev_raw[i:(i + batch_size)])
with open(('../data/%s/test_data.json' % dataset_name), encoding='utf-8') as f:
test_raw = json.load(f)
test_raw = sorted(test_raw, key=(lambda x: len(x)))
new_test_raw = []
for i in range(0, len(test_raw), batch_size):
new_test_raw.append(test_raw[i:(i + batch_size)])
return (new_train_raw, new_dev_raw, new_test_raw) |
def foldername_from_config_override(args):
cfg_override = None
if hasattr(args, 'config_override'):
cfg_override = args.config_override
elif ('config_override' in args):
cfg_override = args['config_override']
folder_name = ''
if ((cfg_override is not None) and (len(cfg_override) > 0)):
folder_name = str(cfg_override)
folder_name = folder_name.replace(':', '.').replace('\n', ' ')
folder_name = folder_name.replace('/', '_')
folder_name = ' '.join(folder_name.split())
folder_name = folder_name.replace('. ', '.').replace(' ', '_')
folder_name = ('_' + folder_name)
return folder_name |
class docInternalS3TypeSub(supermod.docInternalS3Type):
def __init__(self, para=None, sect3=None, mixedclass_=None, content_=None):
supermod.docInternalS3Type.__init__(self, mixedclass_, content_) |
def get_lvis_instances_meta(dataset_name):
if ('v0.5' in dataset_name):
return _get_lvis_instances_meta_v0_5()
raise ValueError('No built-in metadata for dataset {}'.format(dataset_name)) |
def load_obs_label_dict(path):
if (not path):
return {}
with open(path, 'r') as f:
obs_labels = [line.rstrip() for line in f]
return {c: i for (i, c) in enumerate(obs_labels)} |
class Profiler():
def __init__(self, folder: str, name: str, with_jax: bool=False) -> None:
super().__init__()
self._name = name
self._folder = folder
self._with_jax = with_jax
self._vistracer = VizTracer(output_file=os.path.join(folder, 'viztracer', (name + '.html')), max_stack_depth=3)
self._jax_folder = os.path.join(folder, ('jax_profiler/' + name))
def start(self) -> None:
if self._with_jax:
jax.profiler.start_trace(self._jax_folder)
self._vistracer.start()
def stop(self) -> None:
self._vistracer.stop()
if self._with_jax:
jax.profiler.stop_trace()
def save(self) -> None:
self._vistracer.save()
def stop_and_save(self) -> None:
self.stop()
self.save() |
class Catalan(Constant):
def __init__(self, name='catalan'):
conversions = dict(mathematica='Catalan', kash='Catalan(R)', maple='Catalan', maxima='catalan', pynac='Catalan')
Constant.__init__(self, name, conversions=conversions, domain='positive')
def _mpfr_(self, R):
return R.catalan_constant()
def _real_double_(self, R):
return R('0.')
def __float__(self):
return 0.
def _sympy_(self):
import sympy
return sympy.Catalan |
def _imagenet_stem(inputs, hparams, stem_cell, current_step=None):
num_stem_cells = 2
num_stem_filters = int((32 * hparams.stem_multiplier))
net = slim.conv2d(inputs, num_stem_filters, [3, 3], stride=2, scope='conv0', padding='VALID')
net = slim.batch_norm(net, scope='conv0_bn')
cell_outputs = [None, net]
filter_scaling = (1.0 / (hparams.filter_scaling_rate ** num_stem_cells))
for cell_num in range(num_stem_cells):
net = stem_cell(net, scope='cell_stem_{}'.format(cell_num), filter_scaling=filter_scaling, stride=2, prev_layer=cell_outputs[(- 2)], cell_num=cell_num, current_step=current_step)
cell_outputs.append(net)
filter_scaling *= hparams.filter_scaling_rate
return (net, cell_outputs) |
def _get_typecons_docstring(cons: ONNXTypeConstraint) -> str:
return ' * **{}** -- {}'.format(cons.type_str, ', '.join((':class:`{}`'.format(t.to_string()) for t in cons.types))) |
def softmarginloss_double_backwards(ctx, ggI):
size_average = ctx.additional_args[0]
(input, target, gO) = ctx.saved_tensors
div_factor = (input.nelement() if size_average else 1)
t0 = (1 + ((- target) * input).exp()).pow((- 1))
t1 = ((- target) * ((- target) * input).exp())
first_deriv = (t0 * t1)
gI = (((((- 1) * gO) * ggI) / div_factor) * (first_deriv.pow(2) + (first_deriv * target)))
ggO = ((ggI * first_deriv).sum() / div_factor)
return (gI, None, ggO, None, None, None) |
class TestPairWiseLossOps(serial.SerializedTestCase):
(X=hu.arrays(dims=[2, 1], elements=hu.floats(min_value=0.0, max_value=10.0)), label=hu.arrays(dims=[2, 1], elements=st.integers(min_value=0, max_value=1), dtype=np.float32), **hu.gcs_cpu_only)
def test_pair_wise_loss_predictions(self, X, label, gc, dc):
workspace.FeedBlob('X', X)
workspace.FeedBlob('label', label)
new_label = np.array([label[1], label[0]])
new_x = np.array([X[1], X[0]])
workspace.FeedBlob('new_x', new_x)
workspace.FeedBlob('new_label', new_label)
net = core.Net('net')
net.PairWiseLoss(['X', 'label'], ['output'])
net.PairWiseLoss(['new_x', 'new_label'], ['new_output'])
plan = core.Plan('predict_data')
plan.AddStep(core.execution_step('predict_data', [net], num_iter=1))
workspace.RunPlan(plan)
output = workspace.FetchBlob('output')
new_output = workspace.FetchBlob('new_output')
sign = (1 if (label[0] > label[1]) else (- 1))
if (label[0] == label[1]):
self.assertEqual(np.asscalar(output), 0)
return
self.assertAlmostEqual(np.asscalar(output), np.asscalar(np.log((1 + np.exp((sign * (X[1] - X[0])))))), delta=0.0001)
self.assertAlmostEqual(output, new_output)
(X=hu.arrays(dims=[2, 1], elements=hu.floats(min_value=0.0, max_value=10.0)), label=hu.arrays(dims=[2, 1], elements=st.integers(min_value=0, max_value=1), dtype=np.float32), dY=hu.arrays(dims=[1], elements=hu.floats(min_value=1, max_value=10)), **hu.gcs_cpu_only)
def test_pair_wise_loss_gradient(self, X, label, dY, gc, dc):
workspace.FeedBlob('X', X)
workspace.FeedBlob('dY', dY)
workspace.FeedBlob('label', label)
net = core.Net('net')
net.PairWiseLossGradient(['X', 'label', 'dY'], ['dX'])
plan = core.Plan('predict_data')
plan.AddStep(core.execution_step('predict_data', [net], num_iter=1))
workspace.RunPlan(plan)
dx = workspace.FetchBlob('dX')
sign = (1 if (label[0] > label[1]) else (- 1))
if (label[0] == label[1]):
self.assertEqual(np.asscalar(dx[0]), 0)
return
self.assertAlmostEqual(np.asscalar(dx[0]), np.asscalar((((- dY[0]) * sign) / (1 + np.exp((sign * (X[0] - X[1])))))), delta=(0.01 * abs(np.asscalar(dx[0]))))
self.assertEqual(np.asscalar(dx[0]), np.asscalar((- dx[1])))
delta = 0.001
up_x = np.array([[(X[0] + delta)], [X[1]]], dtype=np.float32)
down_x = np.array([[(X[0] - delta)], [X[1]]], dtype=np.float32)
workspace.FeedBlob('up_x', up_x)
workspace.FeedBlob('down_x', down_x)
new_net = core.Net('new_net')
new_net.PairWiseLoss(['up_x', 'label'], ['up_output'])
new_net.PairWiseLoss(['down_x', 'label'], ['down_output'])
plan = core.Plan('predict_data')
plan.AddStep(core.execution_step('predict_data', [new_net], num_iter=1))
workspace.RunPlan(plan)
down_output_pred = workspace.FetchBlob('down_output')
up_output_pred = workspace.FetchBlob('up_output')
np.testing.assert_allclose(np.asscalar(dx[0]), np.asscalar((((0.5 * dY[0]) * (up_output_pred[0] - down_output_pred[0])) / delta)), rtol=0.01, atol=0.01)
(n=st.integers(0, 10), k=st.integers(1, 5), **hu.gcs_cpu_only)
def test_pair_wise_loss_batch(self, n, k, gc, dc):
lengths = (np.random.randint(k, size=n).astype(np.int32) + 1)
X = np.random.rand(sum(lengths)).astype(np.float32)
label = np.random.randint(k, size=sum(lengths)).astype(np.float32)
def pair_wise_op(X, label, lengths):
N = lengths.size
output = np.zeros(N).astype(np.float32)
def f(x):
return np.log((1 + np.exp(x)))
offset = 0
for idx in range(N):
offset += (lengths[(idx - 1)] if (idx > 0) else 0)
count = 0
for i in range(offset, (offset + lengths[idx])):
for j in range(offset, i):
if (label[i] == label[j]):
continue
sign = (1 if (label[i] > label[j]) else (- 1))
output[idx] += f((sign * (X[j] - X[i])))
count += 1
if (count > 0):
output[idx] /= count
return [output]
op = core.CreateOperator('PairWiseLoss', ['X', 'label', 'lengths'], 'out')
self.assertReferenceChecks(device_option=gc, op=op, inputs=[X, label, lengths], reference=pair_wise_op)
self.assertDeviceChecks(dc, op, [X, label, lengths], [0])
self.assertGradientChecks(gc, op, [X, label, lengths], 0, [0]) |
class BaselineH(nn.Module):
def __init__(self, num_classes=None, cla_type=None):
super().__init__()
self.base_enc = BasicEncoder()
self.base_cla = (Classifier() if (cla_type == 'mlp') else LinearClassifier())
def forward(self, x: torch.Tensor):
out = self.base_enc(x)
out = self.base_cla(out)
return out |
class Fuser():
def fuse(self, model, inplace=False):
if (not inplace):
model = copy.deepcopy(model)
input_root = model
input_graph = model.graph
self.modules = dict(input_root.named_modules())
fusion_patterns = get_fusion_patterns()
fusion_pairs = self._find_matches(input_root, input_graph, fusion_patterns)
self.fused_graph = Graph()
env = {}
def load_arg(a):
return map_arg(a, (lambda node: env[node.name]))
for node in input_graph.nodes:
(root_node, obj) = fusion_pairs.get(node.name, (None, None))
if (root_node is node):
env[node.name] = obj.fuse(self, load_arg)
elif (root_node is None):
env[node.name] = self.fused_graph.node_copy(node, load_arg)
self.fused_graph.output(load_arg(input_graph.result))
model = GraphModule(input_root, self.fused_graph)
return model
def _find_matches(self, root, graph, patterns):
modules = dict(root.named_modules())
match_map = {}
def apply_match(pattern, node, match):
if isinstance(pattern, tuple):
(s, *args) = pattern
apply_match(s, node, match)
for (subpattern, arg) in zip(args, node.args):
apply_match(subpattern, arg, match)
elif (node.name not in match_map):
match_map[node.name] = match
for node in reversed(graph.nodes):
if (node.name not in match_map):
for (pattern, value) in patterns.items():
if is_match(modules, node, pattern):
apply_match(pattern, node, (node, value(self, node)))
return match_map |
def read_wyckoff_csv(filename):
with open(filename) as wyckoff_file:
return parse_wyckoff_csv(wyckoff_file) |
class ConditionSet(Condition):
def __init__(self, conditions: List[Condition], order_matters: bool=False, simultaneously_met: bool=True):
self._conditions = conditions
self._order_matters = order_matters
self._simultaneously_met = simultaneously_met
self._current_condition_index = 0
def condition_met(self):
met = True
if self._order_matters:
if (self._current_condition_index < len(self._conditions)):
for cond in self._conditions[self._current_condition_index:]:
(ismet, term) = cond.condition_met()
if (not ismet):
break
self._current_condition_index += 1
met = (self._current_condition_index >= len(self._conditions))
else:
for cond in self._conditions:
(ismet, term) = cond.condition_met()
met &= ismet
return (met, False)
def reset(self):
self._current_condition_index = 0 |
def get_default_optimizer_params(model: torch.nn.Module, base_lr: Optional[float]=None, weight_decay: Optional[float]=None, weight_decay_norm: Optional[float]=None, bias_lr_factor: Optional[float]=1.0, weight_decay_bias: Optional[float]=None, overrides: Optional[Dict[(str, Dict[(str, float)])]]=None):
if (overrides is None):
overrides = {}
defaults = {}
if (base_lr is not None):
defaults['lr'] = base_lr
if (weight_decay is not None):
defaults['weight_decay'] = weight_decay
bias_overrides = {}
if ((bias_lr_factor is not None) and (bias_lr_factor != 1.0)):
if (base_lr is None):
raise ValueError('bias_lr_factor requires base_lr')
bias_overrides['lr'] = (base_lr * bias_lr_factor)
if (weight_decay_bias is not None):
bias_overrides['weight_decay'] = weight_decay_bias
if len(bias_overrides):
if ('bias' in overrides):
raise ValueError("Conflicting overrides for 'bias'")
overrides['bias'] = bias_overrides
norm_module_types = (torch.nn.BatchNorm1d, torch.nn.BatchNorm2d, torch.nn.BatchNorm3d, torch.nn.SyncBatchNorm, torch.nn.GroupNorm, torch.nn.InstanceNorm1d, torch.nn.InstanceNorm2d, torch.nn.InstanceNorm3d, torch.nn.LayerNorm, torch.nn.LocalResponseNorm)
params: List[Dict[(str, Any)]] = []
memo: Set[torch.nn.parameter.Parameter] = set()
for module in model.modules():
for (module_param_name, value) in module.named_parameters(recurse=False):
if (not value.requires_grad):
continue
if (value in memo):
continue
memo.add(value)
hyperparams = copy.copy(defaults)
if (isinstance(module, norm_module_types) and (weight_decay_norm is not None)):
hyperparams['weight_decay'] = weight_decay_norm
hyperparams.update(overrides.get(module_param_name, {}))
params.append({'params': [value], **hyperparams})
return params |
class TrainArgParser(BaseArgParser):
def __init__(self):
super(TrainArgParser, self).__init__()
self.is_training = True
self.parser.add_argument('--model', dest='model_args.model', choices=('DenseNet121', 'ResNet152', 'Inceptionv4', 'ResNet18', 'ResNet50', 'ResNet34', 'ResNeXt101', 'SEResNeXt101', 'NASNetA', 'SENet154', 'MNASNet'), default='DenseNet121', help='Model name.')
self.parser.add_argument('--pretrained', dest='model_args.pretrained', type=util.str_to_bool, default=True, help='Use a pretrained network.')
self.parser.add_argument('--moco', dest='model_args.moco', type=util.str_to_bool, default=True, help='Using moco')
self.parser.add_argument('--fine_tuning', dest='model_args.fine_tuning', type=str, default='None', help='Layer to fine tune')
self.parser.add_argument('--experiment_name', dest='logger_args.experiment_name', type=str, default='debugging', help='Experiment name.')
self.parser.add_argument('--train_custom_csv', dest='data_args.csv', type=str, default=None, help='csv for custom dataset.')
self.parser.add_argument('--val_custom_csv', dest='data_args.csv_dev', type=str, default=None, help='csv for custom dev dataset.')
self.parser.add_argument('--iters_per_print', dest='logger_args.iters_per_print', type=int, default=64, help=(('Number of iterations between ' + 'printing loss to the console and ') + 'TensorBoard.'))
self.parser.add_argument('--iters_per_save', dest='logger_args.iters_per_save', type=int, default=8192, help=('Number of iterations between ' + 'saving a checkpoint to save_dir.'))
self.parser.add_argument('--iters_per_eval', dest='logger_args.iters_per_eval', type=int, default=8192, help=('Number of iterations between ' + 'evaluations of the model.'))
self.parser.add_argument('--iters_per_visual', dest='logger_args.iters_per_visual', type=int, default=16384, help=('Number of iterations between ' + 'visualizing training examples.'))
self.parser.add_argument('--max_ckpts', dest='logger_args.max_ckpts', type=int, default=10, help=('Number of checkpoints to keep ' + 'before overwriting old ones.'))
self.parser.add_argument('--keep_topk', dest='logger_args.keep_topk', type=util.str_to_bool, default=True, help=('Keep the top K checkpoints instead ' + 'of most recent K checkpoints.'))
self.parser.add_argument('--num_epochs', dest='optim_args.num_epochs', type=int, default=50, help=('Number of epochs to train. If 0, ' + 'train forever.'))
self.parser.add_argument('--metric_name', dest='optim_args.metric_name', choices=('chexpert-log_loss', 'cxr14-log_loss', 'chexpert-competition-log_loss', 'chexpert-competition-AUROC', 'shenzhen-AUROC', 'chexpert-competition-single-AUROC'), default='chexpert-competition-AUROC', help='Validation metric to optimize.')
self.parser.add_argument('--maximize_metric', dest='optim_args.maximize_metric', type=util.str_to_bool, default=True, help=(('If True, maximize the metric ' + 'specified by metric_name. ') + 'Otherwise, minimize it.'))
self.parser.add_argument('--optimizer', dest='optim_args.optimizer', type=str, default='adam', choices=('sgd', 'adam'), help='Optimizer.')
self.parser.add_argument('--sgd_momentum', dest='optim_args.sgd_momentum', type=float, default=0.9, help='SGD momentum (SGD only).')
self.parser.add_argument('--sgd_dampening', dest='optim_args.sgd_dampening', type=float, default=0.9, help='SGD momentum (SGD only).')
self.parser.add_argument('--weight_decay', dest='optim_args.weight_decay', type=float, default=0.0, help='Weight decay (L2 coefficient).')
self.parser.add_argument('--lr', dest='optim_args.lr', type=float, default=0.0001, help='Initial learning rate.')
self.parser.add_argument('--lr_scheduler', dest='optim_args.lr_scheduler', type=str, default=None, choices=(None, 'step', 'multi_step', 'plateau'), help='LR scheduler to use.')
self.parser.add_argument('--lr_decay_gamma', dest='optim_args.lr_decay_gamma', type=float, default=0.1, help=(('Multiply learning rate by this ' + 'value every LR step (step and ') + 'multi_step only).'))
self.parser.add_argument('--lr_decay_step', dest='optim_args.lr_decay_step', type=int, default=100, help=('Number of epochs between each ' + 'multiply-by-gamma step.'))
self.parser.add_argument('--lr_milestones', dest='optim_args.lr_milestones', type=str, default='50,125,250', help=('Epochs to step the LR when using ' + 'multi_step LR scheduler.'))
self.parser.add_argument('--lr_patience', dest='optim_args.lr_patience', type=int, default=2, help=('Number of stagnant epochs before ' + 'stepping LR.'))
self.parser.add_argument('--loss_fn', dest='optim_args.loss_fn', choices=('cross_entropy',), default='cross_entropy', help='loss function.')
self.parser.add_argument('--scale', dest='transform_args.scale', default=320, type=int)
self.parser.add_argument('--crop', dest='transform_args.crop', type=int, default=320)
self.parser.add_argument('--normalization', dest='transform_args.normalization', default='imagenet', choices=('imagenet', 'chexpert_norm'))
self.parser.add_argument('--maintain_ratio', dest='transform_args.maintain_ratio', type=util.str_to_bool, default=True)
self.parser.add_argument('--rotate_min', dest='transform_args.rotate_min', type=float, default=0)
self.parser.add_argument('--rotate_max', dest='transform_args.rotate_max', type=float, default=0)
self.parser.add_argument('--rotate_prob', dest='transform_args.rotate_prob', type=float, default=0)
self.parser.add_argument('--contrast_min', dest='transform_args.contrast_min', type=float, default=0)
self.parser.add_argument('--contrast_max', dest='transform_args.contrast_max', type=float, default=0)
self.parser.add_argument('--contrast_prob', dest='transform_args.contrast_prob', type=float, default=0)
self.parser.add_argument('--brightness_min', dest='transform_args.brightness_min', type=float, default=0)
self.parser.add_argument('--brightness_max', dest='transform_args.brightness_max', type=float, default=0)
self.parser.add_argument('--brightness_prob', dest='transform_args.brightness_prob', type=float, default=0)
self.parser.add_argument('--sharpness_min', dest='transform_args.sharpness_min', type=float, default=0)
self.parser.add_argument('--sharpness_max', dest='transform_args.sharpness_max', type=float, default=0)
self.parser.add_argument('--sharpness_prob', dest='transform_args.sharpness_prob', type=float, default=0)
self.parser.add_argument('--horizontal_flip_prob', dest='transform_args.horizontal_flip_prob', type=float, default=0) |
class GPT2LoraInt8(CausalLoraInt8Model):
config_name: str = 'gpt2_lora_int8'
def __init__(self, weights_path: Optional[str]=None):
super().__init__(GPT2LoraInt8Engine.config_name, weights_path) |
def set_up_sampler_parser(parser):
set_up_base_parser(parser)
add_vqgan_args(parser)
add_sampler_args(parser)
return parser |
def register_Ns3DefaultDeleter__Ns3WifiMacQueueItem_methods(root_module, cls):
cls.add_constructor([])
cls.add_constructor([param('ns3::DefaultDeleter< ns3::WifiMacQueueItem > const &', 'arg0')])
cls.add_method('Delete', 'void', [param('ns3::WifiMacQueueItem *', 'object')], is_static=True)
return |
class TextContractFeatureExtractorWithLM(TextContractFeatureExtractor):
()
def language_model_coherence(self, tb1, tb2, tb3, tb4):
if ((tb1 is None) or (tb3 is None)):
loss_diff_next = 0.0
loss_diff_prev = 0.0
else:
loss_diff_next = compare_losses('en', tb2.text, tb3.text, prev=tb1.text)
loss_diff_prev = compare_losses('en', tb2.text, tb1.text, next=tb3.text)
return {'loss_diff_next': loss_diff_next, 'loss_diff_prev': loss_diff_prev} |
class OffsetBricksSetABreakoutWorld(RandomOffsetBricksBreakoutWorld):
brick_offset_range_start = 0
brick_offset_range_end = 80 |
_properties
class InLocalStorage(LocalStorage):
def can_be_applied(self, graph, expr_index, sdfg, permissive=False):
node_a = self.node_a
node_b = self.node_b
if (isinstance(node_a, nodes.EntryNode) and isinstance(node_b, nodes.EntryNode)):
for edge in graph.edges_between(node_a, node_b):
if (edge.data.data is not None):
return True
return False |
.expansion
class ExpandBcastMPI(ExpandTransformation):
environments = [environments.mpi.MPI]
def expansion(node, parent_state, parent_sdfg, n=None, **kwargs):
((buffer, count_str), root) = node.validate(parent_sdfg, parent_state)
dtype = buffer.dtype.base_type
mpi_dtype_str = 'MPI_BYTE'
if (dtype == dtypes.float32):
mpi_dtype_str = 'MPI_FLOAT'
elif (dtype == dtypes.float64):
mpi_dtype_str = 'MPI_DOUBLE'
elif (dtype == dtypes.complex64):
mpi_dtype_str = 'MPI_COMPLEX'
elif (dtype == dtypes.complex128):
mpi_dtype_str = 'MPI_COMPLEX_DOUBLE'
elif (dtype == dtypes.int32):
mpi_dtype_str = 'MPI_INT'
elif (dtype == dtypes.int64):
mpi_dtype_str = 'MPI_LONG_LONG'
else:
raise NotImplementedError((('The datatype ' + str(dtype)) + ' is not supported!'))
if (buffer.dtype.veclen > 1):
raise NotImplementedError
if ((root.dtype.base_type != dtypes.int32) and (root.dtype.base_type != dtypes.int64)):
raise ValueError('Bcast root must be an integer!')
ref = ''
if isinstance(buffer, dace.data.Scalar):
ref = '&'
init = ''
comm = 'MPI_COMM_WORLD'
if node.grid:
comm = f'__state->{node.grid}_comm'
elif node.fcomm:
init = f'MPI_Comm __comm = MPI_Comm_f2c({node.fcomm});'
comm = '__comm'
code = f'''
{init}
MPI_Bcast({ref}_inbuffer, {count_str}, {mpi_dtype_str}, _root, {comm});
_outbuffer = _inbuffer;'''
tasklet = dace.sdfg.nodes.Tasklet(node.name, node.in_connectors, node.out_connectors, code, language=dtypes.Language.CPP)
return tasklet |
class Model(sqlalchemy_base):
__tablename__ = 'models'
uuid = sqla.Column(sqla.String, primary_key=True)
name = sqla.Column(sqla.String, unique=True)
description = sqla.Column(sqla.String)
username = sqla.Column(sqla.String)
creation_time = sqla.Column(sqla.DateTime(timezone=False), server_default=sqla.sql.func.now())
extra_info = sqla.Column(sqla.JSON)
checkpoints = sqla.orm.relationship('Checkpoint', back_populates='model', cascade='all, delete, delete-orphan', foreign_keys='Checkpoint.model_uuid')
final_checkpoint_uuid = sqla.Column(sqla.String, sqla.ForeignKey('checkpoints.uuid'), nullable=True)
final_checkpoint = sqla.orm.relationship('Checkpoint', foreign_keys=[final_checkpoint_uuid], uselist=False)
completed = sqla.Column(sqla.Boolean)
hidden = sqla.Column(sqla.Boolean)
logdir_filepaths = sqla.Column(sqla.JSON)
def __repr__(self):
return f'<Model(uuid="{self.uuid}", name="{self.name}")>'
def __hash__(self):
return hash((hash(self.uuid) + hash(self.name)))
def __eq__(self, other):
return (self.__hash__() == hash(other)) |
def test_two_columns_as_rvecs():
ak_array_1 = ak.Array([1.1, 2.2, 3.3, 4.4, 5.5])
ak_array_2 = ak.Array([{'x': 1.1}, {'x': 2.2}, {'x': 3.3}, {'x': 4.4}, {'x': 5.5}])
data_frame = ak.to_rdataframe({'x': ak_array_1, 'y': ak_array_2})
assert (set(data_frame.GetColumnNames()) == {'x', 'y'})
assert (data_frame.GetColumnType('x') == 'double')
assert data_frame.GetColumnType('y').startswith('awkward::Record_')
cpp_list_x = ', '.join((str(e) for e in ak_array_1.to_list()))
cpp_list_y = ', '.join((str(e) for e in ak_array_2.x.to_list()))
done = compiler(f'''
int ix = 0;
double x_val[5] = {{ {cpp_list_x} }};
template<typename T>
struct CheckX {{
void operator()(T const& x) {{
R__ASSERT(x == x_val[ix++]);
}}
}};
int iy = 0;
double y_val[5] = {{ {cpp_list_y} }};
template<typename T>
struct CheckY {{
void operator()(T const& y) {{
R__ASSERT(y.x() == y_val[iy++]);
}}
}};
''')
assert (done is True)
f_x = ROOT.CheckX[data_frame.GetColumnType('x')]()
f_y = ROOT.CheckY[data_frame.GetColumnType('y')]()
data_frame.Foreach(f_x, ['x'])
data_frame.Foreach(f_y, ['y']) |
class DataBuffer(sympy.MatrixSymbol):
__sympy_module__: T.Any = None
def __new__(cls, name: str, n: T.Optional[T.Scalar]=None, m: T.Optional[T.Scalar]=None) -> DataBuffer:
if (n is None):
n = DataBuffer.__sympy_module__.Symbol((name + '_dim'))
if (m is not None):
assert (m == 1), 'DataBuffer is 1-D only!'
instance = super(DataBuffer, cls).__new__(cls, name, n, DataBuffer.__sympy_module__.S(1))
return instance
def __getitem__(self, key: T.Any) -> sympy.matrices.expressions.matexpr.MatrixElement:
return super().__getitem__((key, DataBuffer.__sympy_module__.S(0))) |
def register_Ns3DsrDsrPassiveBuffEntry_methods(root_module, cls):
cls.add_binary_comparison_operator('==')
cls.add_constructor([param('ns3::dsr::DsrPassiveBuffEntry const &', 'arg0')])
cls.add_constructor([param('ns3::Ptr< ns3::Packet const >', 'pa', default_value='0'), param('ns3::Ipv4Address', 'd', default_value='ns3::Ipv4Address()'), param('ns3::Ipv4Address', 's', default_value='ns3::Ipv4Address()'), param('ns3::Ipv4Address', 'n', default_value='ns3::Ipv4Address()'), param('uint16_t', 'i', default_value='0'), param('uint16_t', 'f', default_value='0'), param('uint8_t', 'seg', default_value='0'), param('ns3::Time', 'exp', default_value='ns3::Simulator::Now()'), param('uint8_t', 'p', default_value='0')])
cls.add_method('GetDestination', 'ns3::Ipv4Address', [], is_const=True)
cls.add_method('GetExpireTime', 'ns3::Time', [], is_const=True)
cls.add_method('GetFragmentOffset', 'uint16_t', [], is_const=True)
cls.add_method('GetIdentification', 'uint16_t', [], is_const=True)
cls.add_method('GetNextHop', 'ns3::Ipv4Address', [], is_const=True)
cls.add_method('GetPacket', 'ns3::Ptr< ns3::Packet const >', [], is_const=True)
cls.add_method('GetProtocol', 'uint8_t', [], is_const=True)
cls.add_method('GetSegsLeft', 'uint8_t', [], is_const=True)
cls.add_method('GetSource', 'ns3::Ipv4Address', [], is_const=True)
cls.add_method('SetDestination', 'void', [param('ns3::Ipv4Address', 'd')])
cls.add_method('SetExpireTime', 'void', [param('ns3::Time', 'exp')])
cls.add_method('SetFragmentOffset', 'void', [param('uint16_t', 'f')])
cls.add_method('SetIdentification', 'void', [param('uint16_t', 'i')])
cls.add_method('SetNextHop', 'void', [param('ns3::Ipv4Address', 'n')])
cls.add_method('SetPacket', 'void', [param('ns3::Ptr< ns3::Packet const >', 'p')])
cls.add_method('SetProtocol', 'void', [param('uint8_t', 'p')])
cls.add_method('SetSegsLeft', 'void', [param('uint8_t', 'seg')])
cls.add_method('SetSource', 'void', [param('ns3::Ipv4Address', 's')])
return |
_module
class ODC(nn.Module):
def __init__(self, backbone, with_sobel=False, neck=None, head=None, memory_bank=None, pretrained=None):
super(ODC, self).__init__()
self.with_sobel = with_sobel
if with_sobel:
self.sobel_layer = Sobel()
self.backbone = builder.build_backbone(backbone)
self.neck = builder.build_neck(neck)
if (head is not None):
self.head = builder.build_head(head)
if (memory_bank is not None):
self.memory_bank = builder.build_memory(memory_bank)
self.init_weights(pretrained=pretrained)
self.num_classes = head.num_classes
self.loss_weight = torch.ones((self.num_classes,), dtype=torch.float32).cuda()
self.loss_weight /= self.loss_weight.sum()
def init_weights(self, pretrained=None):
if (pretrained is not None):
print_log('load model from: {}'.format(pretrained), logger='root')
self.backbone.init_weights(pretrained=pretrained)
self.neck.init_weights(init_linear='kaiming')
self.head.init_weights(init_linear='normal')
def forward_backbone(self, img):
if self.with_sobel:
img = self.sobel_layer(img)
x = self.backbone(img)
return x
def forward_train(self, img, idx, **kwargs):
x = self.forward_backbone(img)
feature = self.neck(x)
outs = self.head(feature)
if self.memory_bank.label_bank.is_cuda:
loss_inputs = (outs, self.memory_bank.label_bank[idx])
else:
loss_inputs = (outs, self.memory_bank.label_bank[idx.cpu()].cuda())
losses = self.head.loss(*loss_inputs)
change_ratio = self.memory_bank.update_samples_memory(idx, feature[0].detach())
losses['change_ratio'] = change_ratio
return losses
def forward_test(self, img, **kwargs):
x = self.forward_backbone(img)
outs = self.head(x)
keys = ['head{}'.format(i) for i in range(len(outs))]
out_tensors = [out.cpu() for out in outs]
return dict(zip(keys, out_tensors))
def forward(self, img, mode='train', **kwargs):
if (mode == 'train'):
return self.forward_train(img, **kwargs)
elif (mode == 'test'):
return self.forward_test(img, **kwargs)
elif (mode == 'extract'):
return self.forward_backbone(img)
else:
raise Exception('No such mode: {}'.format(mode))
def set_reweight(self, labels=None, reweight_pow=0.5):
if (labels is None):
if self.memory_bank.label_bank.is_cuda:
labels = self.memory_bank.label_bank.cpu().numpy()
else:
labels = self.memory_bank.label_bank.numpy()
hist = np.bincount(labels, minlength=self.num_classes).astype(np.float32)
inv_hist = ((1.0 / (hist + 1e-05)) ** reweight_pow)
weight = (inv_hist / inv_hist.sum())
self.loss_weight.copy_(torch.from_numpy(weight))
self.head.criterion = nn.CrossEntropyLoss(weight=self.loss_weight) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.