code stringlengths 101 5.91M |
|---|
def register_Ns3RttHistory_methods(root_module, cls):
cls.add_constructor([param('ns3::SequenceNumber32', 's'), param('uint32_t', 'c'), param('ns3::Time', 't')])
cls.add_constructor([param('ns3::RttHistory const &', 'h')])
cls.add_instance_attribute('count', 'uint32_t', is_const=False)
cls.add_instance_attribute('retx', 'bool', is_const=False)
cls.add_instance_attribute('seq', 'ns3::SequenceNumber32', is_const=False)
cls.add_instance_attribute('time', 'ns3::Time', is_const=False)
return |
def load_nx(name, dataset_dir):
try:
with open('{}/{}.pkl'.format(dataset_dir, name), 'rb') as file:
graphs = pickle.load(file)
except Exception:
graphs = nx.read_gpickle('{}/{}.gpickle'.format(dataset_dir, name))
if (not isinstance(graphs, list)):
graphs = [graphs]
return graphs |
def get_fid_metric_specs() -> List[MetricSpec]:
return [MetricSpec(class_name='helm.benchmark.metrics.image_generation.fidelity_metrics.FidelityMetric', args={})] |
def get_output_filename(file):
file_name = get_exp_id(file)
if (len(sys.argv) > 1):
file_name = (file_name + get_argv())
return file_name |
class MT5ForConditionalGeneration(T5ForConditionalGeneration):
model_type = 'mt5'
config_class = MT5Config
_keys_to_ignore_on_load_missing = ['encoder\\.embed_tokens\\.weight']
_keys_to_ignore_on_save = ['encoder\\.embed_tokens\\.weight'] |
def job_fssdp_opt(p, data_source, tr, te, r, J):
null_sim = gof.FSSDH0SimCovDraw(n_draw=2000, n_simulate=2000, seed=r)
return job_fssdq_opt(p, data_source, tr, te, r, J, null_sim=null_sim) |
_module()
class ToPIL(object):
def __init__(self):
pass
def __call__(self, results):
results['img'] = Image.fromarray(results['img'])
return results |
.parametrize('observation_shape', [(100,), ((100,), (200,))])
.parametrize('action_size', [2])
.parametrize('num_heads', [2])
.parametrize('num_layers', [3])
.parametrize('max_timestep', [20])
.parametrize('context_size', [10])
.parametrize('dropout', [0.0])
.parametrize('batch_size', [32])
def test_continuous_decision_transformer(observation_shape: Shape, action_size: int, num_heads: int, num_layers: int, max_timestep: int, context_size: int, dropout: float, batch_size: int) -> None:
encoder = DummyEncoder(observation_shape)
model = ContinuousDecisionTransformer(encoder=encoder, embed_size=encoder.get_feature_size(), position_encoding=SimplePositionEncoding(encoder.get_feature_size(), max_timestep), action_size=action_size, num_heads=num_heads, num_layers=num_layers, context_size=context_size, attn_dropout=dropout, resid_dropout=dropout, embed_dropout=dropout, activation=torch.nn.ReLU())
x = create_torch_batched_observations(observation_shape, batch_size, context_size)
action = torch.rand(batch_size, context_size, action_size)
rtg = torch.rand(batch_size, context_size, 1)
timesteps = torch.randint(0, max_timestep, size=(batch_size, context_size))
y = model(x, action, rtg, timesteps)
assert (y.shape == (batch_size, context_size, action_size))
check_parameter_updates(model, (x, action, rtg, timesteps)) |
class HRNet(hr.HRNet):
def __init__(self, cfg, stride=32):
super(HRNet, self).__init__()
self.dim_in = 3
self.spatial_in = [1]
block_1 = Bottleneck
block_2 = BasicBlock
base_width = cfg.BACKBONE.HRNET.WIDTH
use_global = cfg.BACKBONE.HRNET.USE_GLOBAL
stage_with_conv = cfg.BACKBONE.HRNET.STAGE_WITH_CONV
norm = cfg.BACKBONE.HRNET.NORM
stage_with_ctx = cfg.BACKBONE.HRNET.STAGE_WITH_CTX
self.avg_down = cfg.BACKBONE.HRNET.AVG_DOWN
self.base_width = base_width
self.norm = norm
self.stride = stride
multi_out = (1 if (self.stride == 4) else 4)
self.inplanes = 64
self.conv1 = nn.Conv2d(self.dim_in, 64, 3, 2, 1, bias=False)
self.bn1 = make_norm(64, norm=norm.replace('Mix', ''))
self.conv2 = nn.Conv2d(64, 64, 3, 2, 1, bias=False)
self.bn2 = make_norm(64, norm=norm.replace('Mix', ''))
self.relu = nn.ReLU(inplace=True)
self.layer1 = self._make_layer(block_1, 64, 4, 1, conv=stage_with_conv[0], ctx=stage_with_ctx[0])
self.transition1 = self._make_transition(index=1, stride=2)
self.stage2 = nn.Sequential(hr.StageModule(block_2, base_width, 2, 2, stage_with_conv[1], norm, stage_with_ctx[1], False))
self.transition2 = self._make_transition(index=2, stride=2)
self.stage3 = nn.Sequential(hr.StageModule(block_2, base_width, 3, 3, stage_with_conv[2], norm, stage_with_ctx[2], use_global), hr.StageModule(block_2, base_width, 3, 3, stage_with_conv[2], norm, stage_with_ctx[2], use_global), hr.StageModule(block_2, base_width, 3, 3, stage_with_conv[2], norm, stage_with_ctx[2], use_global), hr.StageModule(block_2, base_width, 3, 3, stage_with_conv[2], norm, stage_with_ctx[2], use_global))
self.transition3 = self._make_transition(index=3, stride=2)
self.stage4 = nn.Sequential(hr.StageModule(block_2, base_width, 4, 4, stage_with_conv[3], norm, stage_with_ctx[3], use_global), hr.StageModule(block_2, base_width, 4, 4, stage_with_conv[3], norm, stage_with_ctx[3], use_global), hr.StageModule(block_2, base_width, 4, multi_out, stage_with_conv[3], norm, stage_with_ctx[3], use_global))
self.dim_out = self.stage_out_dim[1:int(math.log(self.stride, 2))]
self.spatial_out = self.stage_out_spatial[1:int(math.log(self.stride, 2))]
del self.incre_modules
del self.downsamp_modules
del self.final_layer
del self.avgpool
del self.classifier
self._init_weights()
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.conv2(x)
x = self.bn2(x)
x = self.relu(x)
x = self.layer1(x)
x = [trans(x) for trans in self.transition1]
x = self.stage2(x)
x = [self.transition2[0](x[0]), self.transition2[1](x[1]), self.transition2[2](x[(- 1)])]
x = self.stage3(x)
x = [self.transition3[0](x[0]), self.transition3[1](x[1]), self.transition3[2](x[2]), self.transition3[3](x[(- 1)])]
x = self.stage4(x)
if (self.stride == 4):
return x[:1]
return x |
class Laplace(nn.Module):
def __init__(self, mu=0, scale=1):
super(Laplace, self).__init__()
self.normalization = Variable(torch.Tensor([(- math.log(2))]))
self.mu = Variable(torch.Tensor([mu]))
self.logscale = Variable(torch.Tensor([math.log(scale)]))
def _check_inputs(self, size, mu_logscale):
if ((size is None) and (mu_logscale is None)):
raise ValueError('Either one of size or params should be provided.')
elif ((size is not None) and (mu_logscale is not None)):
mu = mu_logscale.select((- 1), 0).expand(size)
logscale = mu_logscale.select((- 1), 1).expand(size)
return (mu, logscale)
elif (size is not None):
mu = self.mu.expand(size)
logscale = self.logscale.expand(size)
return (mu, logscale)
elif (mu_logscale is not None):
mu = mu_logscale.select((- 1), 0)
logscale = mu_logscale.select((- 1), 1)
return (mu, logscale)
else:
raise ValueError('Given invalid inputs: size={}, mu_logscale={})'.format(size, mu_logscale))
def sample(self, size=None, params=None):
(mu, logscale) = self._check_inputs(size, params)
scale = torch.exp(logscale)
u = (Variable(torch.rand(mu.size()).type_as(mu.data)) - 0.5)
sample = (mu - ((scale * torch.sign(u)) * torch.log(((1 - (2 * torch.abs(u))) + eps))))
return sample
def log_density(self, sample, params=None):
if (params is not None):
(mu, logscale) = self._check_inputs(None, params)
else:
(mu, logscale) = self._check_inputs(sample.size(), None)
mu = mu.type_as(sample)
logscale = logscale.type_as(sample)
c = self.normalization.type_as(sample.data)
inv_scale = torch.exp((- logscale))
ins_exp = ((- torch.abs((sample - mu))) * inv_scale)
return ((ins_exp + c) - logscale)
def get_params(self):
return torch.cat([self.mu, self.logscale])
def nparams(self):
return 2
def ndim(self):
return 1
def is_reparameterizable(self):
return True
def __repr__(self):
tmpstr = (self.__class__.__name__ + ' ({:.3f}, {:.3f})'.format(self.mu.data[0], self.logscale.exp().data[0]))
return tmpstr |
.parametrize('directory,expected_result', [('first_level', False), ('first_level/second_level', False), (os.getcwd(), False), (Path(os.getcwd(), 'first_level'), False), (Path(os.getcwd(), 'first_level/second_level'), False), ('first_level/second_level/..', False), ('first_level/../first_level', False), ('..', True), ('../../file', True), ('/home/naive_hacker', True), ('first_level/second_level/../../..', True), ('..', True), ('../../file', True)])
def test_is_directory_traversal(directory, expected_result):
assert (is_directory_traversal(directory) is expected_result) |
def split_date_duration(amr):
while True:
index = None
x = None
for (i, lemma) in enumerate(amr.lemmas):
if (re.search('^-\\d{8}$', lemma) or re.search('^-\\d{6}$', lemma)):
index = i
(_, x) = lemma.split('-')
break
else:
break
amr.replace_span([index], [x], ['CD'], ['DATE']) |
('vectorization', 'fasttext', FastTextParams)
class FastText(VectorizationAlgo):
def __init__(self, params: FastTextParams):
self.params = params
self.model = None
def fit(self, loglines: pd.Series):
max_token_len = self.params.max_token_len
doc = []
for sentence in loglines:
token_list = sentence.split(' ')[:max_token_len]
for tk in token_list:
if (tk != '*'):
doc.append(word_tokenize(tk.lower()))
self.model = gensim.models.FastText(doc, vector_size=self.params.vector_size, window=self.params.window, min_count=self.params.min_count, sample=self.params.sample, workers=self.params.workers, sg=self.params.sg, epochs=self.params.epochs)
def transform(self, loglines: pd.Series) -> pd.Series:
log_vectors = []
max_len = 0
for ll in loglines:
token_list = ll.split(' ')
log_vector = []
token_list = token_list[:self.params.max_token_len]
max_len = max(max_len, len(token_list))
for tk in token_list:
if (tk == '*'):
continue
log_vector.append(self.model.wv[word_tokenize(tk.lower())][0])
log_vectors.append(np.array(log_vector).flatten())
log_vector_series = pd.Series(log_vectors, index=loglines.index)
return log_vector_series
def summary(self):
return self.model.summary() |
def model_wh(resolution_str):
(width, height) = map(int, resolution_str.split('x'))
if (((width % 16) != 0) or ((height % 16) != 0)):
raise Exception(('Width and height should be multiples of 16. w=%d, h=%d' % (width, height)))
return (int(width), int(height)) |
def _as_tensor(o):
from torch.autograd import Variable
if isinstance(o, SKIP_TYPES):
return o
if isinstance(o, Variable):
return o
if torch.is_tensor(o):
return o
return torch.from_numpy(np.array(o)) |
def bn_opp_lenet(image, test=False, channel_last=False, w_bias=False):
axes = get_channel_axes(image, channel_last)
h = PF.batch_normalization(image, axes=axes, batch_stat=(not test), name='conv1-bn')
h = PF.convolution(h, 16, (5, 5), (1, 1), with_bias=w_bias, channel_last=channel_last, name='conv1')
h = F.max_pooling(h, (2, 2))
h = F.relu(h)
axes = get_channel_axes(h, channel_last)
h = PF.batch_normalization(h, axes=axes, batch_stat=(not test), name='conv2-bn')
h = PF.convolution(h, 16, (5, 5), (1, 1), with_bias=w_bias, channel_last=channel_last, name='conv2')
h = F.max_pooling(h, (2, 2), channel_last=channel_last)
h = F.relu(h)
axes = get_channel_axes(h, channel_last)
h = PF.batch_normalization(h, axes=axes, batch_stat=(not test), name='fc1-bn')
h = PF.affine(h, 10, with_bias=True, name='fc1')
h = F.relu(h)
pred = PF.affine(h, 10, with_bias=True, name='fc2')
return pred |
def details_of_diff(rep, given):
print(['{} --> {}'.format(given['entities'][e]['mention'], e) for e in given['entities']])
print('By Surface Score')
for x in rep:
if x:
print(x[0]['mention'], '\t-->\t', [y['id'] for y in x[:5]])
print('By Pop Score')
for x in [sorted(x, key=(lambda y: y['pop_score']), reverse=True) for x in rep]:
if x:
print(x[0]['mention'], '\t-->\t', [y['id'] for y in x[:5]]) |
def get_data(root: str, name: str) -> Tuple[(Data, int, int)]:
if (name.lower() in ['cora', 'citeseer', 'pubmed']):
return get_planetoid(root, name)
elif (name.lower() in ['coauthorcs', 'coauthorphysics']):
return get_coauthor(root, name[8:])
elif (name.lower() in ['amazoncomputers', 'amazonphoto']):
return get_amazon(root, name[6:])
elif (name.lower() == 'wikics'):
return get_wikics(root)
elif (name.lower() in ['cluster', 'pattern']):
return get_sbm(root, name)
elif (name.lower() == 'reddit'):
return get_reddit(root)
elif (name.lower() == 'ppi'):
return get_ppi(root)
elif (name.lower() == 'flickr'):
return get_flickr(root)
elif (name.lower() == 'yelp'):
return get_yelp(root)
elif (name.lower() in ['ogbn-arxiv', 'arxiv']):
return get_arxiv(root)
elif (name.lower() in ['ogbn-products', 'products']):
return get_products(root)
else:
raise NotImplementedError |
class AudioConfig():
audio_extension: str = 'pcm'
sample_rate: int = 16000
frame_length: int = 20
frame_shift: int = 10
normalize: bool = True
del_silence: bool = True
feature_extract_by: str = 'kaldi'
time_mask_num: int = 4
freq_mask_num: int = 2
spec_augment: bool = True
input_reverse: bool = False |
def write_pfm(file, image, scale=1):
file = open(file, 'wb')
color = None
if (image.dtype.name != 'float32'):
raise Exception('Image dtype must be float32.')
image = np.flipud(image)
if ((len(image.shape) == 3) and (image.shape[2] == 3)):
color = True
elif ((len(image.shape) == 2) or ((len(image.shape) == 3) and (image.shape[2] == 1))):
color = False
else:
raise Exception('Image must have H x W x 3, H x W x 1 or H x W dimensions.')
file.write((bytes('PF\n', 'UTF-8') if color else bytes('Pf\n', 'UTF-8')))
temp_str = ('%d %d\n' % (image.shape[1], image.shape[0]))
file.write(bytes(temp_str, 'UTF-8'))
endian = image.dtype.byteorder
if ((endian == '<') or ((endian == '=') and (sys.byteorder == 'little'))):
scale = (- scale)
temp_str = ('%f\n' % scale)
file.write(bytes(temp_str, 'UTF-8'))
image.tofile(file) |
class FileSourceDescriptor(SourceDescriptor):
def __init__(self, filename, path_description=None):
filename = Utils.decode_filename(filename)
self.path_description = (path_description or filename)
self.filename = filename
workdir = (os.path.abspath('.') + os.sep)
self.file_path = (filename[len(workdir):] if filename.startswith(workdir) else filename)
self.set_file_type_from_name(filename)
self._cmp_name = filename
self._lines = {}
def get_lines(self, encoding=None, error_handling=None):
key = (encoding, error_handling)
try:
lines = self._lines[key]
if (lines is not None):
return lines
except KeyError:
pass
with Utils.open_source_file(self.filename, encoding=encoding, error_handling=error_handling) as f:
lines = list(f)
if (key in self._lines):
self._lines[key] = lines
else:
self._lines[key] = None
return lines
def get_description(self):
try:
return os.path.relpath(self.path_description)
except ValueError:
return self.path_description
def get_error_description(self):
path = self.filename
cwd = Utils.decode_filename((os.getcwd() + os.path.sep))
if path.startswith(cwd):
return path[len(cwd):]
return path
def get_filenametable_entry(self):
return self.file_path
def __eq__(self, other):
return (isinstance(other, FileSourceDescriptor) and (self.filename == other.filename))
def __hash__(self):
return hash(self.filename)
def __repr__(self):
return ('<FileSourceDescriptor:%s>' % self.filename) |
class MultiHop(dspy.Module):
def __init__(self, num_passages=3):
super().__init__()
self.retrieve = dspy.Retrieve(k=num_passages)
self.generate_query = dspy.ChainOfThought('question -> search_query')
self.generate_query_from_context = None
self.generate_answer = dspy.ChainOfThought('context, question -> answer')
def forward(self, question):
passages = []
search_query = self.generate_query(question=question).search_query
passages += self.retrieve(search_query).passages
search_query2 = None
passages += None
return self.generate_answer(context=deduplicate(passages), question=question) |
def ip_interface(address):
try:
return IPv4Interface(address)
except (AddressValueError, NetmaskValueError):
pass
try:
return IPv6Interface(address)
except (AddressValueError, NetmaskValueError):
pass
raise ValueError(('%r does not appear to be an IPv4 or IPv6 interface' % address)) |
.parametrize('problem, input_dim', [(Branin, 2), (ScaledBranin, 2), (SimpleQuadratic, 2), (GramacyLee, 1), (Michalewicz2, 2), (Michalewicz5, 5), (Michalewicz10, 10), (LogarithmicGoldsteinPrice, 2), (Hartmann3, 3), (Rosenbrock4, 4), (Shekel4, 4), (Ackley5, 5), (Hartmann6, 6), (Trid10, 10), (Levy8, 8)])
.parametrize('num_obs', [5, 1])
def test_search_space_has_correct_shape_and_default_dtype(problem: SingleObjectiveTestProblem, input_dim: int, num_obs: int) -> None:
x = problem.search_space.sample(num_obs)
assert (x.dtype == tf.float64)
tf.debugging.assert_shapes([(x, [num_obs, input_dim])]) |
class LabelGuessor(object):
def __init__(self, args):
self.label_generator = Cluster(((args.n_labeled // args.n_classes) * 10), num_class=args.n_classes, feature_len=128).cuda()
self.dataset = args.dataset
self.args = args
self.start_flag = 0
def init_for_add_sample(self, epoch, start_epoch):
if (self.dataset == 'CIFAR10'):
if (self.args.n_labeled == 40):
num_add_sample = min((int(((epoch - start_epoch) * 0.1)) + 1), 4)
if (self.args.n_labeled == 250):
num_add_sample = min(((epoch - start_epoch) * 2), 50)
if (self.args.n_labeled == 4000):
num_add_sample = min(((epoch - start_epoch) * 10), 800)
if (self.dataset == 'CIFAR100'):
if (self.args.n_labeled == 400):
num_add_sample = min((int(((epoch - start_epoch) * 0.01)) + 1), 4)
if (self.args.n_labeled == 2500):
num_add_sample = min((int(((epoch - start_epoch) * 0.2)) + 1), 50)
if (self.args.n_labeled == 10000):
num_add_sample = min(((epoch - start_epoch) * 2), 200)
if (self.dataset == 'SVHN'):
if (self.args.n_labeled == 40):
num_add_sample = min((int(((epoch - start_epoch) * 0.1)) + 1), 4)
if (self.args.n_labeled == 250):
num_add_sample = min((int(((epoch - start_epoch) * 1)) + 1), 50)
if (self.args.n_labeled == 1000):
num_add_sample = min(((epoch - start_epoch) * 2), 200)
if (self.dataset == 'STL10'):
if (self.args.n_labeled == 40):
num_add_sample = min((int(((epoch - start_epoch) * 0.1)) + 1), 4)
if (self.args.n_labeled == 250):
num_add_sample = min(((epoch - start_epoch) * 1), 50)
if (self.args.n_labeled == 1000):
num_add_sample = min(((epoch - start_epoch) * 2), 200)
self.label_generator.init_(num_add_sample, (num_add_sample + (self.args.n_labeled // self.args.n_classes)))
def __call__(self, model, img_l_weak, ims_u_weak, lbs_l, unlabeled_index=None):
org_state = {k: v.clone().detach() for (k, v) in model.state_dict().items()}
is_train = model.training
with torch.no_grad():
input = torch.cat([img_l_weak, ims_u_weak], dim=0).detach()
(f_l, f_u, pred_l_w, pred_u_w) = model(input)
self.label_generator.add_sample(f_l.detach(), lbs_l.detach())
if (self.start_flag == 0):
count = 0
for i in range(self.args.n_classes):
if (self.label_generator.class_pool.num_imgs[i] > 10):
count += 1
if (count == self.args.n_classes):
self.start_flag = 1
pseudo = torch.zeros(f_u.size(0)).fill_((- 1))
idx = (pseudo > (- 1))
lbs = pseudo[idx]
model.load_state_dict(org_state)
if is_train:
model.train()
else:
model.eval()
return (lbs.detach(), idx)
else:
pseudo = self.label_generator.forward(f_u.detach(), pred_u_w.detach(), unlabeled_index).long().cuda()
idx = (pseudo > (- 1))
lbs = pseudo[idx]
model.load_state_dict(org_state)
if is_train:
model.train()
else:
model.eval()
return (lbs.detach(), idx) |
_utils.test()
def test_trace_op():
def test_fun() -> ti.f32:
x = ti.Matrix([[0.1, 3.0], [5.0, 7.0]])
return x.trace()
assert (np.abs((test_fun() - 7.1)) < 1e-06)
x = ti.Matrix([[0.1, 3.0], [5.0, 7.0]])
assert (np.abs((x.trace() - 7.1)) < 1e-06)
with pytest.raises(TaichiCompilationError, match='expected a square matrix, got shape \\(3, 2\\)'):
x = ti.Matrix([[0.1, 3.0], [5.0, 7.0], [1.0, 2.0]])
print(x.trace())
def failed_func():
x = ti.Matrix([[0.1, 3.0], [5.0, 7.0], [1.0, 2.0]])
print(x.trace())
with pytest.raises(TaichiCompilationError, match='expected a square matrix, got shape \\(3, 2\\)'):
failed_func() |
class EfficientNet(nn.Module):
def __init__(self, nc, input_channels=3, n_classes=1000, stage_config=DEFAULT_CONFIG, survival_prob=0.8, se_ratio=0.25, p_drop_out=0.2, nl_type=NonLinearType.SWISH, batch_norm_epsilon=0.001, batch_norm_momentum=0.01):
super(EfficientNet, self).__init__()
blocks_list = []
n_channels = input_channels
base_drop_rate = (1.0 - survival_prob)
n_blocks = sum([(sc.stem * sc.n_repeat) for sc in stage_config])
drop_rate = (base_drop_rate / n_blocks)
past_index = 0
for (i, sc) in enumerate(stage_config):
if sc.stem:
blocks_list.append(ConvBNNonLinear(nc, n_channels, sc.output_channels, kernel_size=sc.kernel_size, stride=sc.stride_first, nl_type=nl_type, batch_norm_epsilon=batch_norm_epsilon, batch_norm_momentum=batch_norm_momentum, tf_padding=True))
else:
survival_prob_start = (1.0 - (drop_rate * past_index))
blocks_list.append(RepeatedInvertedResidual(nc, sc.n_repeat, n_channels, sc.output_channels, sc.stride_first, expand_ratio=sc.expand_ratio, kernel_size=sc.kernel_size, nl_type=nl_type, se_ratio=se_ratio, survival_prob_start=survival_prob_start, drop_rate=drop_rate, batch_norm_epsilon=batch_norm_epsilon, tf_padding=True))
past_index += sc.n_repeat
n_channels = sc.output_channels
self.conv_blocks = nn.Sequential(*blocks_list)
self.conv_blocks_list = blocks_list
self.conv_head = ConvBNNonLinear(nc, n_channels, 1280, kernel_size=1, nl_type=nl_type, batch_norm_epsilon=batch_norm_epsilon)
self.gap = GlobalAvgPool2d()
self.drop_out = nn.Dropout(p=p_drop_out)
self.fc = layers.FullyConnected(nc, 1280, n_classes)
def forward(self, x):
res_list = []
for (i, b) in enumerate(self.conv_blocks_list):
x = b(x)
res_list.append(x)
x = self.conv_head(x)
x = self.gap(x)
x = self.drop_out(x.squeeze(dim=(- 1)).squeeze(dim=(- 1)))
return self.fc(x) |
def conv1x1(in_chs, out_chs=16):
return nn.Conv2d(in_chs, out_chs, kernel_size=1, stride=1, padding=0) |
_utils.test(require=ti.extension.sparse)
def test_loop_unique_binary_op_1d():
(x, y) = (ti.field(ti.i32), ti.field(ti.i32))
N = 16
ti.root.pointer(ti.i, N).place(x)
ti.root.pointer(ti.i, N).place(y)
def inc_y():
for i in x:
a = loop_unique(x[i])
y[(a + 1)] += 1
x[1] = 2
x[2] = 3
x[7] = 5
y[3] = 2
y[4] = 3
inc_y()
expected_result = {3: 3, 4: 4, 6: 1}
for i in range(N):
assert (y[i] == expected_result.get(i, 0)) |
def gen_from_webdataset_shards(shards, multimodal_cfg, tokenizer, is_train: bool):
_preprocess_multimodal = partial(preprocess_multimodal_mappable, multimodal_cfg=multimodal_cfg)
_preprocess_for_lm = partial(preprocess_for_lm_mappable, tokenizer=tokenizer)
dataset = wds.WebDataset(shards, resampled=is_train, handler=wds.warn_and_continue, shardshuffle=is_train)
dataset = dataset.decode(wds.imagehandler('torchrgb'))
dataset = dataset.compose(webdataset_element_to_conversation)
dataset = dataset.map(_preprocess_multimodal).map(_preprocess_for_lm)
if is_train:
dataset_len =
dataset = dataset.repeat(2).with_epoch(dataset_len)
for sample in dataset:
(yield sample) |
(scope='package')
def pandas_interactions():
interactions = pd.DataFrame({'user_id': [1, 1, 2, 2, 2, 3, 4, 4, 4, 4, 4, 4], 'item_id': [1, 2, 1, 3, 4, 2, 1, 2, 3, 4, 5, 6], 'timestamp': [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]})
return interactions |
_model
def poolformer_s12(pretrained=False, **kwargs):
layers = [2, 2, 6, 2]
embed_dims = [64, 128, 320, 512]
mlp_ratios = [4, 4, 4, 4]
downsamples = [True, True, True, True]
model = PoolFormer(layers, embed_dims=embed_dims, mlp_ratios=mlp_ratios, downsamples=downsamples, **kwargs)
model.default_cfg = default_cfgs['poolformer_s']
if pretrained:
url = model_urls['poolformer_s12']
checkpoint = torch.hub.load_state_dict_from_url(url=url, map_location='cpu', check_hash=True)
model.load_state_dict(checkpoint)
return model |
class UnpairedImagePaths(Dataset):
def __init__(self, size=None, random_crop=False, folder1=None, folder2=None, numpy_folder1=None, numpy_folder2=None, wikiart_info1=None, wikiart_key1=None, wikiart_info2=None, wikiart_key2=None):
self.size = size
self.random_crop = random_crop
self.numpy_folder1 = numpy_folder1
self.numpy_folder2 = numpy_folder2
paths1 = []
numpy_paths1 = []
if ((wikiart_info1 is not None) and (wikiart_key1 is not None)):
with open(wikiart_info1, newline='') as csvfile:
reader = csv.DictReader(csvfile)
for row in reader:
if (row['genre'] == wikiart_key1):
paths1.append(os.path.join(folder1, row['filename']))
else:
paths_ = os.listdir(folder1)
for path in paths_:
paths1.append(os.path.join(folder1, path))
if (numpy_folder1 is not None):
image_paths1 = []
for image_path in paths1:
path = os.path.basename(image_path)
numpy_path = os.path.join(numpy_folder1, (path + '.npy'))
if os.path.exists(numpy_path):
image_paths1.append(image_path)
numpy_paths1.append(numpy_path)
paths1 = image_paths1
paths2 = []
numpy_paths2 = []
if ((wikiart_info2 is not None) and (wikiart_key2 is not None)):
with open(wikiart_info2, newline='') as csvfile:
reader = csv.DictReader(csvfile)
for row in reader:
if (row['genre'] == wikiart_key2):
paths2.append(os.path.join(folder2, row['filename']))
else:
paths_ = os.listdir(folder2)
for path in paths_:
paths2.append(os.path.join(folder2, path))
if (numpy_folder2 is not None):
image_paths2 = []
for image_path in paths2:
path = os.path.basename(image_path)
numpy_path = os.path.join(numpy_folder2, (path + '.npy'))
if os.path.exists(numpy_path):
image_paths2.append(image_path)
numpy_paths2.append(numpy_path)
paths2 = image_paths2
if (len(paths2) > 1):
self._length = min(len(paths1), len(paths2))
else:
paths2 = (paths2 * len(paths1))
self._length = len(paths1)
self.paths1 = paths1
self.paths2 = paths2
self.numpy_paths1 = numpy_paths1
self.numpy_paths2 = numpy_paths2
if ((self.size is not None) and (self.size > 0)):
self.rescaler = albumentations.Resize(height=self.size, width=self.size)
if (not self.random_crop):
self.cropper = albumentations.CenterCrop(height=self.size, width=self.size)
else:
self.cropper = albumentations.RandomCrop(height=self.size, width=self.size)
self.preprocessor = albumentations.Compose([self.rescaler, self.cropper])
else:
self.preprocessor = (lambda **kwargs: kwargs)
def __len__(self):
return self._length
def preprocess_image(self, image_path):
image = Image.open(image_path)
if (not (image.mode == 'RGB')):
image = image.convert('RGB')
image = np.array(image).astype(np.uint8)
image = self.preprocessor(image=image)['image']
image = ((image / 127.5) - 1.0).astype(np.float32)
return (image, image_path)
def __getitem__(self, i):
example = dict()
(example['image1'], example['image1_path']) = self.preprocess_image(self.paths1[i])
i2 = random.randrange(len(self.paths2))
(example['image2'], example['image2_path']) = self.preprocess_image(self.paths2[i2])
if (self.numpy_folder1 is not None):
example['numpy1'] = np.load(self.numpy_paths1[i])
if (self.numpy_folder2 is not None):
example['numpy2'] = np.load(self.numpy_paths2[i2])
return example |
def aggregate_passage_embeddings_whole_doc(run: dict, p_emb_dict: dict, aggregation_mode: str):
p_ids_w_emb = dict_ids_with_embeddings(p_emb_dict)
p_ids_agg_emb = aggregate_ids_with_embeddings(p_ids_w_emb, aggregation_mode)
run_pd_id_emb_agg = {}
for (q_id, retrieved_lists) in run.items():
run_pd_id_emb_agg.update({q_id: {}})
for (q_p_number, ranked_list) in retrieved_lists.items():
for (p_id, score) in ranked_list.items():
p_id_short = p_id.split('_')[0]
run_pd_id_emb_agg.get(q_id).update({p_id_short: p_ids_agg_emb.get(p_id_short)})
return run_pd_id_emb_agg |
class LossWrapper(nn.Module):
def get_args(parser):
parser.add('--seg_loss_type', type=str, default='bce')
parser.add('--seg_loss_weights', type=float, default=10.0)
parser.add('--seg_loss_apply_to', type=str, default='pred_target_inf_segs_logits, target_segs', help='can specify multiple tensor names from data_dict')
parser.add('--seg_loss_names', type=str, default='BCE', help='name for each loss')
def __init__(self, args):
super(LossWrapper, self).__init__()
self.apply_to = [rn_utils.parse_str_to_list(s, sep=',') for s in rn_utils.parse_str_to_list(args.seg_loss_apply_to, sep=';')]
self.names = rn_utils.parse_str_to_list(args.seg_loss_names, sep=',')
losses = {'bce': F.binary_cross_entropy_with_logits, 'dice': (lambda fake_seg, real_seg: (torch.log(((fake_seg ** 2).sum() + (real_seg ** 2).sum())) - torch.log(((2 * fake_seg) * real_seg).sum())))}
self.loss = losses[args.seg_loss_type]
self.weights = args.seg_loss_weights
self.eps = args.eps
def forward(self, data_dict, losses_dict):
for (i, (tensor_name, target_tensor_name)) in enumerate(self.apply_to):
real_segs = data_dict[target_tensor_name]
fake_segs = data_dict[tensor_name]
(b, t) = fake_segs.shape[:2]
fake_segs = fake_segs.view((b * t), *fake_segs.shape[2:])
if ('HalfTensor' in fake_segs.type()):
real_segs = real_segs.type(fake_segs.type())
real_segs = real_segs.view((b * t), *real_segs.shape[2:])
losses_dict[('G_' + self.names[i])] = (self.loss(fake_segs, real_segs) * self.weights)
return losses_dict |
class AutoStaticCheck(TdbPlugin, TdbPluginCmd):
name = 'static-check'
def default(self, args: str):
if (args.strip() in {'?', ''}):
self.tdb.message(self.tdb.checker)
return
elif (self.tdb.status == TdbStatus.UNINIT):
self.tdb.error('do check after load context, type s/start to load')
return
for arg in args.split(','):
self.tdb.checker.do_checker(arg)
self.tdb.message(f'[DONE] {arg}')
def after_load(self, tdb: TdbCmdBackend):
for check_name in tdb.extra_check:
tdb.checker.do_checker(check_name)
tdb.message(f'[DONE] {check_name}') |
def _import_cffi():
global ffi, CData
if (ffi is not None):
return
try:
import cffi
ffi = cffi.FFI()
CData = ffi.CData
except ImportError:
ffi = False |
def prune(action_sequences, action):
need_prune = False
conflict_action = ['A7', 'A8', 'A9']
if ((action in conflict_action) and (len((set(conflict_action) & set(action_sequences))) != 0)):
need_prune = True
conflict_action = ['A10', 'A11', 'A12', 'A13', 'A14']
if ((action in conflict_action) and (len((set(conflict_action) & set(action_sequences))) != 0)):
need_prune = True
conflict_action = ['A23']
if ((len(action_sequences) != 0) and (action in conflict_action) and (action == action_sequences[(- 1)])):
need_prune = True
conflict_action = ['A1', 'A5', 'A6', 'A7', 'A8', 'A9', 'A23']
if ((action == 'A15') and (len(action_sequences) != 0) and (action_sequences[(- 1)] in conflict_action)):
need_prune = True
conflict_action = ['A4', 'A22']
if ((len(action_sequences) != 0) and (action_sequences[(- 1)] in conflict_action) and (action != 'A15')):
need_prune = True
conflict_action = ['A4', 'A22']
if ((len(action_sequences) > 1) and (action_sequences[(- 2)] in conflict_action) and (action_sequences[(- 1)] in conflict_action) and (action in conflict_action)):
need_prune = True
cont = 0
for x in action_sequences:
if (x == action):
cont += 1
if (cont >= 3):
need_prune = True
return need_prune |
class Phi3(CompositeBase):
def __init__(self, N, quad='LG', bc=((0,) * 6), domain=((- 1), 1), dtype=float, padding_factor=1, dealias_direct=False, coordinates=None, **kw):
CompositeBase.__init__(self, N, quad=quad, domain=domain, dtype=dtype, bc=bc, padding_factor=padding_factor, dealias_direct=dealias_direct, coordinates=coordinates)
self._stencil = {0: (1 / (2 * (((4 * (n ** 2)) + (16 * n)) + 15))), 2: ((- 3) / (((8 * (n ** 2)) + (48 * n)) + 54)), 4: (3 / (2 * (((4 * (n ** 2)) + (32 * n)) + 55))), 6: ((- 1) / (((8 * (n ** 2)) + (80 * n)) + 198))}
def boundary_condition():
return '6th order'
def short_name():
return 'P3' |
def tuplify(item: Union[(Any, Sequence[Any])]) -> Tuple[(Any, ...)]:
if isinstance(item, Sequence):
return tuple(item)
else:
return (item,) |
def main():
import sys
if ((len(sys.argv) < 2) or (sys.argv[1] not in ['convert', 'train', 'predict', 'serve'])):
print('First argument to `transformers` command line interface should be one of: \n>> convert serve train predict')
if (sys.argv[1] == 'convert'):
from transformers.commands import convert
convert(sys.argv)
elif (sys.argv[1] == 'train'):
from transformers.commands import train
train(sys.argv)
elif (sys.argv[1] == 'serve'):
pass |
class GaussianEmbedding(object):
def __init__(self, N, size=100, covariance_type='spherical', energy='KL', C=1.0, m=0.1, M=10.0, Closs=1.0, eta=1.0):
self.dist = GaussianDistribution(N, size=size, covariance_type=covariance_type)
if (energy == 'KL'):
self.energy = KLEnergy(self.dist)
else:
raise NotImplementedError
self.C = C
self.m = m
self.M = M
self.Closs = Closs
self.dist.init_params(0.1, M, 1.0, m, M)
self.eta = eta
self._acc_grad_mu = np.zeros(N)
self._acc_grad_sigma = np.zeros(N)
def _loss(self, pos, neg):
return max(0.0, ((self.Closs - self.energy.energy(*pos)) + self.energy.energy(*neg)))
def train_single(self, pairs):
for (pos, neg) in pairs:
loss = self._loss(pos, neg)
if (loss < 1e-14):
continue
for (pn, fac) in [(pos, (- 1.0)), (neg, 1.0)]:
(i, j) = pn
(di, dj) = self.energy.gradient(i, j)
for (k, d) in [(i, di), (j, dj)]:
(dmu, dsigma) = d
self._acc_grad_mu[k] += (np.sum((dmu ** 2)) / len(dmu))
eta = (self.eta / np.sqrt((self._acc_grad_mu[k] + 1.0)))
self.dist.mu[k] -= ((fac * eta) * dmu)
l2_mu = np.sqrt(np.sum((self.dist.mu[k] ** 2)))
if (l2_mu > self.C):
self.dist.mu[k] *= (self.C / l2_mu)
self._acc_grad_sigma[k] += (np.sum((dsigma ** 2)) / len(dsigma))
eta = (self.eta / np.sqrt((self._acc_grad_sigma[k] + 1.0)))
self.dist.Sigma[k] -= ((fac * eta) * dsigma)
self.dist.Sigma[k] = np.maximum(self.m, np.minimum(self.M, self.dist.Sigma[k])) |
class InPlaceABNSync(ABN):
def forward(self, x):
return inplace_abn_sync(x, self.weight, self.bias, self.running_mean, self.running_var, self.training, self.momentum, self.eps, self.activation, self.slope)
def __repr__(self):
rep = '{name}({num_features}, eps={eps}, momentum={momentum}, affine={affine}, activation={activation}'
if (self.activation == 'leaky_relu'):
rep += ', slope={slope})'
else:
rep += ')'
return rep.format(name=self.__class__.__name__, **self.__dict__) |
class OpenImagesObjV5Cfg(OpenImagesObjCfg):
splits: Dict[(str, dict)] = field(default_factory=(lambda : dict(train=dict(img_dir='train', img_info='annotations/train-info.csv', has_labels=True, prefix_levels=1, ann_bbox='annotations/train-annotations-bbox.csv', ann_img_label='annotations/train-annotations-human-imagelabels-boxable.csv'), val=dict(img_dir='validation', img_info='annotations/validation-info.csv', has_labels=True, prefix_levels=0, ann_bbox='annotations/validation-annotations-bbox.csv', ann_img_label='annotations/validation-annotations-human-imagelabels-boxable.csv'), test=dict(img_dir='test', img_info='', has_labels=True, prefix_levels=0, ann_bbox='annotations/test-annotations-bbox.csv', ann_img_label='annotations/test-annotations-human-imagelabels-boxable.csv')))) |
def calculate_fid_given_paths(paths, batch_size, cuda, dims, bootstrap=True, n_bootstraps=10, model_type='inception'):
pths = []
for p in paths:
if (not os.path.exists(p)):
raise RuntimeError(('Invalid path: %s' % p))
if os.path.isdir(p):
pths.append(p)
elif p.endswith('.npy'):
np_imgs = np.load(p)
if (np_imgs.shape[0] > 25000):
np_imgs = np_imgs[:50000]
pths.append(np_imgs)
block_idx = InceptionV3.BLOCK_INDEX_BY_DIM[dims]
if (model_type == 'inception'):
model = InceptionV3([block_idx])
elif (model_type == 'lenet'):
model = LeNet5()
model.load_state_dict(torch.load('./models/lenet.pth'))
if cuda:
model.cuda()
act_true = _compute_activations(pths[0], model, batch_size, dims, cuda, model_type)
n_bootstraps = (n_bootstraps if bootstrap else 1)
pths = pths[1:]
results = []
for (j, pth) in enumerate(pths):
print(paths[(j + 1)])
actj = _compute_activations(pth, model, batch_size, dims, cuda, model_type)
fid_values = np.zeros(n_bootstraps)
with tqdm(range(n_bootstraps), desc='FID') as bar:
for i in bar:
act1_bs = act_true[np.random.choice(act_true.shape[0], act_true.shape[0], replace=True)]
act2_bs = actj[np.random.choice(actj.shape[0], actj.shape[0], replace=True)]
(m1, s1) = calculate_activation_statistics(act1_bs)
(m2, s2) = calculate_activation_statistics(act2_bs)
fid_values[i] = calculate_frechet_distance(m1, s1, m2, s2)
bar.set_postfix({'mean': fid_values[:(i + 1)].mean()})
results.append((paths[(j + 1)], fid_values.mean(), fid_values.std()))
return results |
def _create_dataset(uri, batch_size, shuffle, no_image_normalization, cache_dir, overwrite_cache, create_cache_explicitly, prepare_data_iterator, dataset_index):
class Dataset():
pass
dataset = Dataset()
dataset.uri = uri
dataset.cache_dir = cache_dir
dataset.normalize = (not no_image_normalization)
comm = current_communicator()
rng = numpy.random.RandomState(dataset_index)
use_memory_cache = ((comm.size == 1) if comm else True)
if prepare_data_iterator:
if (cache_dir == ''):
cache_dir = None
if (cache_dir and (create_cache_explicitly or comm)):
cache_index = os.path.join(cache_dir, 'cache_index.csv')
if ((not os.path.exists(cache_index)) or overwrite_cache):
if single_or_rankzero():
logger.log(99, (('Creating cache data for "' + uri) + '"'))
try:
os.makedirs(cache_dir)
except OSError:
pass
if os.path.exists(uri):
cc = CreateCache(uri, rng=rng, shuffle=shuffle)
cc.create(cache_dir, normalize=False)
else:
with data_iterator_csv_dataset(uri, batch_size, shuffle, rng=rng, normalize=False, cache_dir=cache_dir, with_memory_cache=False) as di:
pass
rng = numpy.random.RandomState(dataset_index)
dataset.data_iterator = (lambda : data_iterator_cache(cache_dir, batch_size, shuffle, rng=rng, normalize=dataset.normalize, with_memory_cache=use_memory_cache))
elif ((not cache_dir) or overwrite_cache or (not os.path.exists(cache_dir)) or (len(os.listdir(cache_dir)) == 0)):
if comm:
logger.critical('Implicit cache creation does not support with MPI')
import sys
sys.exit((- 1))
else:
if cache_dir:
try:
os.makedirs(cache_dir)
except OSError:
pass
dataset.data_iterator = (lambda : data_iterator_csv_dataset(uri, batch_size, shuffle, rng=rng, normalize=dataset.normalize, cache_dir=cache_dir))
else:
dataset.data_iterator = (lambda : data_iterator_cache(cache_dir, batch_size, shuffle, rng=rng, normalize=dataset.normalize, with_memory_cache=use_memory_cache))
else:
dataset.data_iterator = None
return dataset |
def _path_is_ancestor(path, other):
return (os.path.join(path, other[len(path):].lstrip(os.sep)) == other) |
class ClusterRec(QueryRecommender):
can_predict_cold_queries = True
_search_space = {'num_clusters': {'type': 'int', 'args': [2, 20]}}
item_rel_in_cluster: SparkDataFrame
def __init__(self, num_clusters: int=10):
self.num_clusters = num_clusters
def _init_args(self):
return {'num_clusters': self.num_clusters}
def _save_model(self, path: str, additional_params: Optional[dict]=None):
super()._save_model(path, additional_params)
self.model.write().overwrite().save(join(path, 'model'))
def _load_model(self, path: str):
super()._load_model(path)
self.model = KMeansModel.load(join(path, 'model'))
def _fit(self, dataset: Dataset) -> None:
kmeans = KMeans().setK(self.num_clusters).setFeaturesCol('features')
query_features_vector = self._transform_features(dataset.query_features)
self.model = kmeans.fit(query_features_vector)
queries_clusters = self.model.transform(query_features_vector).select(self.query_column, 'prediction').withColumnRenamed('prediction', 'cluster')
interactions = dataset.interactions.join(queries_clusters, on=self.query_column, how='left')
self.item_rel_in_cluster = interactions.groupBy(['cluster', self.item_column]).agg(sf.count(self.item_column).alias('item_count'))
max_count_per_cluster = self.item_rel_in_cluster.groupby('cluster').agg(sf.max('item_count').alias('max_count_in_cluster'))
self.item_rel_in_cluster = self.item_rel_in_cluster.join(max_count_per_cluster, on='cluster')
self.item_rel_in_cluster = self.item_rel_in_cluster.withColumn(self.rating_column, (sf.col('item_count') / sf.col('max_count_in_cluster'))).drop('item_count', 'max_count_in_cluster')
self.item_rel_in_cluster.cache().count()
def _clear_cache(self):
if hasattr(self, 'item_rel_in_cluster'):
self.item_rel_in_cluster.unpersist()
def _dataframes(self):
return {'item_rel_in_cluster': self.item_rel_in_cluster}
def _transform_features(self, query_features):
feature_columns = query_features.drop(self.query_column).columns
vec = VectorAssembler(inputCols=feature_columns, outputCol='features')
return vec.transform(query_features).select(self.query_column, 'features')
def _make_query_clusters(self, queries, query_features):
query_cnt_in_fv = query_features.select(self.query_column).distinct().join(queries.distinct(), on=self.query_column).count()
query_cnt = queries.distinct().count()
if (query_cnt_in_fv < query_cnt):
self.logger.info("%s query(s) don't have a feature vector. The results will not be calculated for them.", (query_cnt - query_cnt_in_fv))
query_features_vector = self._transform_features(query_features.join(queries, on=self.query_column))
return self.model.transform(query_features_vector).select(self.query_column, 'prediction').withColumnRenamed('prediction', 'cluster')
def _predict(self, dataset: Dataset, k: int, queries: SparkDataFrame, items: SparkDataFrame, filter_seen_items: bool=True) -> SparkDataFrame:
query_clusters = self._make_query_clusters(queries, dataset.query_features)
filtered_items = self.item_rel_in_cluster.join(items, on=self.item_column)
pred = query_clusters.join(filtered_items, on='cluster').drop('cluster')
return pred
def _predict_pairs(self, pairs: SparkDataFrame, dataset: Optional[Dataset]=None) -> SparkDataFrame:
if (not dataset.query_features):
raise ValueError('Query features are missing for predict')
query_clusters = self._make_query_clusters(pairs.select(self.query_column).distinct(), dataset.query_features)
pairs_with_clusters = pairs.join(query_clusters, on=self.query_column)
filtered_items = self.item_rel_in_cluster.join(pairs.select(self.item_column).distinct(), on=self.item_column)
pred = pairs_with_clusters.join(filtered_items, on=['cluster', self.item_column]).select(self.query_column, self.item_column, self.rating_column)
return pred |
class ALSModel(JavaModel, _ALSModelParams, JavaMLWritable, ALSModelJavaMLReadable):
('3.0.0')
def setUserCol(self, value):
return self._set(userCol=value)
('3.0.0')
def setItemCol(self, value):
return self._set(itemCol=value)
('3.0.0')
def setColdStartStrategy(self, value):
return self._set(coldStartStrategy=value)
('3.0.0')
def setPredictionCol(self, value):
return self._set(predictionCol=value)
('3.0.0')
def setBlockSize(self, value):
return self._set(blockSize=value)
('1.4.0')
def rank(self):
return self._call_java('rank')
('1.4.0')
def userFactors(self):
return self._call_java('userFactors')
('1.4.0')
def itemFactors(self):
return self._call_java('itemFactors')
def recommendForAllUsers(self, numItems):
return self._call_java('recommendForAllUsers', numItems)
def recommendForAllItems(self, numUsers):
return self._call_java('recommendForAllItems', numUsers)
def recommendForUserSubset(self, dataset, numItems):
return self._call_java('recommendForUserSubset', dataset, numItems)
def recommendForItemSubset(self, dataset, numUsers):
return self._call_java('recommendForItemSubset', dataset, numUsers)
def recommendItemsForUserItemSubset(self, usersDataset, itemsDataset, numItems):
return self._call_java('recommendItemsForUserItemSubset', usersDataset, itemsDataset, numItems)
def _from_java(java_stage):
def __get_class(clazz):
parts = clazz.split('.')
module = '.'.join(parts[:(- 1)])
m = __import__(module)
for comp in parts[1:]:
m = getattr(m, comp)
return m
stage_name = 'replay.experimental.models.extensions.spark_custom_models.als_extension.ALSModel'
py_type = __get_class(stage_name)
if issubclass(py_type, JavaParams):
py_stage = py_type()
py_stage._java_obj = java_stage
if issubclass(py_type, JavaModel):
py_stage._create_params_from_java()
py_stage._resetUid(java_stage.uid())
py_stage._transfer_params_from_java()
elif hasattr(py_type, '_from_java'):
py_stage = py_type._from_java(java_stage)
else:
raise NotImplementedError(('This Java stage cannot be loaded into Python currently: %r' % stage_name))
return py_stage |
_model
def efficientnet_em(pretrained=False, **kwargs):
model = _gen_efficientnet_edge('efficientnet_em', channel_multiplier=1.0, depth_multiplier=1.1, pretrained=pretrained, **kwargs)
return model |
class InContextLearningAdapter(Adapter, ABC):
def generate_requests(self, eval_instance: Instance, train_trial_index: int, training_instances: List[Instance]) -> List[RequestState]:
pass
(None)
def adapt(self, instances: List[Instance], parallelism: int) -> ScenarioState:
all_train_instances: List[Instance] = [instance for instance in instances if (instance.split == TRAIN_SPLIT)]
if (len(all_train_instances) < self.adapter_spec.max_train_instances):
hlog(f'WARNING: only {len(all_train_instances)} training instances, wanted {self.adapter_spec.max_train_instances}')
eval_instances: List[Instance] = [instance for instance in instances if (instance.split in EVAL_SPLITS)]
hlog(f'{len(instances)} instances, choosing {self.adapter_spec.max_train_instances}/{len(all_train_instances)} train instances, {len(eval_instances)} eval instances')
all_request_states: List[RequestState] = []
prompt: Prompt
for train_trial_index in range(self.adapter_spec.num_train_trials):
with htrack_block(f'Adapting with train_trial_index={train_trial_index}'):
all_request_states.extend(self._adapt_trial_index(all_train_instances, train_trial_index, eval_instances, parallelism))
hlog(f'{len(all_request_states)} requests')
return ScenarioState(self.adapter_spec, all_request_states)
def _adapt_trial_index(self, all_train_instances: List[Instance], train_trial_index: int, eval_instances: List[Instance], parallelism: int) -> List[RequestState]:
training_instances: List[Instance] = self.sample_examples(all_train_instances, seed=train_trial_index, sample_train=self.adapter_spec.sample_train)
hlog(f'Sampled {len(training_instances)} examples for trial #{train_trial_index}.')
def generate_requests_for_training_trial(eval_instance: Instance):
return self.generate_requests(eval_instance, train_trial_index, training_instances)
results: List[List[RequestState]] = parallel_map(generate_requests_for_training_trial, eval_instances, parallelism=parallelism)
if ((train_trial_index == 0) and (len(results) > 0)):
with htrack_block('Sample prompts'):
for request_state in results[0]:
with htrack_block(f'reference index = {request_state.reference_index}, request_mode = {request_state.request_mode}'):
for line in request_state.request.prompt.split('\n'):
hlog(line)
all_request_states: List[RequestState] = [request_state for result in results for request_state in result]
return self._add_trials(all_request_states)
def _add_trials(self, request_states: List[RequestState]) -> List[RequestState]:
if (self.adapter_spec.num_trials <= 1):
return request_states
all_request_states: List[RequestState] = request_states.copy()
for i in range(1, self.adapter_spec.num_trials):
seed: str = str(i)
for request_state in request_states:
request: Request = replace(request_state.request, random=seed)
all_request_states.append(replace(request_state, request=request))
assert (len(all_request_states) == (len(request_states) * self.adapter_spec.num_trials))
return all_request_states
def sample_examples(self, all_train_instances: List[Instance], seed: int, sample_train: bool=True) -> List[Instance]:
random.seed(seed)
num_instances_to_sample: int = min(len(all_train_instances), self.adapter_spec.max_train_instances)
examples: List[Instance] = []
if (not sample_train):
examples = all_train_instances[(num_instances_to_sample * seed):(num_instances_to_sample * (seed + 1))]
return examples
unlabeled_instances: List[Instance] = []
label_to_instances: Dict[(str, List[Instance])] = defaultdict(list)
for instance in all_train_instances:
if instance.first_correct_reference:
label_to_instances[instance.first_correct_reference.output.text].append(instance)
else:
unlabeled_instances.append(instance)
instances: List[Instance]
counts_to_labels: Dict[(int, List[str])] = defaultdict(list)
for (label, instances) in sorted(label_to_instances.items()):
counts_to_labels[len(instances)].append(label)
sorted_labels: List[str] = []
for count in sorted(counts_to_labels, reverse=True):
labels: List[str] = counts_to_labels[count]
random.shuffle(labels)
sorted_labels.extend(labels)
labels_iterable = cycle(sorted_labels)
while (num_instances_to_sample > 0):
next_label: Optional[str] = next(labels_iterable, None)
if (not next_label):
break
instances = label_to_instances[next_label]
if (len(instances) == 0):
continue
examples.append(instances.pop(random.randrange(len(instances))))
num_instances_to_sample -= 1
examples += random.sample(unlabeled_instances, num_instances_to_sample)
return examples
def construct_prompt(self, train_instances: List[Instance], eval_instance: Instance, include_output: bool, reference_index: Optional[int]) -> Prompt:
instructions_block: str = self.adapter_spec.instructions
train_instance_blocks: List[str] = [self.construct_example_prompt(inst, include_output=True, reference_index=None) for inst in train_instances]
eval_instance_block: str = self.construct_example_prompt(eval_instance, include_output=include_output, reference_index=reference_index)
prompt = Prompt(global_prefix=self.adapter_spec.global_prefix, global_suffix=self.adapter_spec.global_suffix, instructions_block=instructions_block, train_instance_blocks=train_instance_blocks, eval_instance_block=eval_instance_block, instance_prefix=self.adapter_spec.instance_prefix, substitutions=self.adapter_spec.substitutions)
prompt = self._make_prompt_fit(prompt)
return prompt
def construct_example_prompt(self, instance: Instance, include_output: bool, reference_index: Optional[int]) -> str:
result: str = ((self.adapter_spec.input_prefix + (instance.input.text or '')) + self.adapter_spec.input_suffix)
if include_output:
output: str = self.construct_output(instance, reference_index)
result += ((self.adapter_spec.output_prefix + output) + self.adapter_spec.output_suffix)
else:
result += self.adapter_spec.output_prefix.rstrip()
return result
def construct_output(self, instance: Instance, reference_index: Optional[int]) -> str:
delimiter: str = ', '
no_correct_references: str = 'n/a'
output: str
if (reference_index is not None):
reference = instance.references[reference_index]
output = reference.output.text
elif self.adapter_spec.multi_label:
correct_references: List[Reference] = instance.all_correct_references
if (not correct_references):
output = no_correct_references
else:
output = delimiter.join([correct_reference.output.text for correct_reference in correct_references])
else:
first_correct_reference: Optional[Reference] = instance.first_correct_reference
if (not first_correct_reference):
output = no_correct_references
else:
output = first_correct_reference.output.text
return output
def _make_prompt_fit(self, prompt: Prompt) -> Prompt:
orig_train_instances_count: int = prompt.num_train_instances
while (prompt.num_train_instances > 0):
if self.window_service.fits_within_context_window(text=prompt.text, expected_completion_token_length=self.adapter_spec.max_tokens):
removed_train_instances_count: int = (orig_train_instances_count - prompt.num_train_instances)
if (removed_train_instances_count > 0):
hlog(f'The original constructed prompt exceeded the max context length. Removed {removed_train_instances_count} in-context examples to fit it within the context window.')
return prompt
prompt = replace(prompt, train_instance_blocks=prompt.train_instance_blocks[:(len(prompt.train_instance_blocks) - 1)])
text = prompt.text
truncated_text = self.window_service.truncate_from_right(text, self.adapter_spec.max_tokens)
if (len(truncated_text) < len(text)):
prompt = replace(prompt, truncated_text=truncated_text)
return prompt |
def is_misnamed_test_class(node: (ast.expr | ast.stmt), names: Sequence[str], line: str) -> bool:
return (isinstance(node, ast.ClassDef) and (not node.name.startswith('Test')) and (names.count(node.name) == 0) and (PRAGMA not in line) and ('KDTreeTest' not in [decorator.id for decorator in node.decorator_list])) |
def save_item_for_vis(item, out_file):
mask = (item['mask'] > 0.5)
if (mask.ndim == 3):
mask = mask[0]
img = mark_boundaries(np.transpose(item['image'], (1, 2, 0)), mask, color=(1.0, 0.0, 0.0), outline_color=(1.0, 1.0, 1.0), mode='thick')
if ('inpainted' in item):
inp_img = mark_boundaries(np.transpose(item['inpainted'], (1, 2, 0)), mask, color=(1.0, 0.0, 0.0), mode='outer')
img = np.concatenate((img, inp_img), axis=1)
img = np.clip((img * 255), 0, 255).astype('uint8')
io.imsave(out_file, img) |
def copy_examples(examples_dir, destination_dir):
pathlib.Path(destination_dir).mkdir(exist_ok=True)
for file in os.listdir(examples_dir):
if (not file.endswith('.py')):
continue
module_path = os.path.join(examples_dir, file)
(docstring, starting_line) = get_module_docstring(module_path)
destination_file = os.path.join(destination_dir, (file[:(- 2)] + 'md'))
with open(destination_file, 'w+') as f_out, open(os.path.join(examples_dir, file), 'r+') as f_in:
f_out.write((docstring + '\n\n'))
for _ in range(starting_line):
next(f_in)
f_out.write('```python\n')
line = next(f_in)
if (line != '\n'):
f_out.write(line)
for line in f_in:
f_out.write(line)
f_out.write('```') |
def load_annotation(gold_file):
source_sentences = []
gold_edits = []
fgold = smart_open(gold_file, 'r')
puffer = fgold.read()
fgold.close()
puffer = puffer.decode('utf8')
for item in paragraphs(puffer.splitlines(True)):
item = item.splitlines(False)
sentence = [line[2:].strip() for line in item if line.startswith('S ')]
assert (sentence != [])
annotations = {}
for line in item[1:]:
if (line.startswith('I ') or line.startswith('S ')):
continue
assert line.startswith('A ')
line = line[2:]
fields = line.split('|||')
start_offset = int(fields[0].split()[0])
end_offset = int(fields[0].split()[1])
etype = fields[1]
if (etype == 'noop'):
start_offset = (- 1)
end_offset = (- 1)
corrections = [(c.strip() if (c != '-NONE-') else '') for c in fields[2].split('||')]
original = ' '.join(' '.join(sentence).split()[start_offset:end_offset])
annotator = int(fields[5])
if (annotator not in annotations.keys()):
annotations[annotator] = []
annotations[annotator].append((start_offset, end_offset, original, corrections))
tok_offset = 0
for this_sentence in sentence:
tok_offset += len(this_sentence.split())
source_sentences.append(this_sentence)
this_edits = {}
for (annotator, annotation) in annotations.iteritems():
this_edits[annotator] = [edit for edit in annotation if ((edit[0] <= tok_offset) and (edit[1] <= tok_offset) and (edit[0] >= 0) and (edit[1] >= 0))]
if (len(this_edits) == 0):
this_edits[0] = []
gold_edits.append(this_edits)
return (source_sentences, gold_edits) |
def log_args_to_file(args, pre='args', logger=None):
for (key, val) in args.__dict__.items():
print_log(f'{pre}.{key} : {val}', logger=logger) |
def find_topic_gold_clusters(topic):
event_mentions = []
entity_mentions = []
event_gold_tag_to_cluster = {}
entity_gold_tag_to_cluster = {}
for (doc_id, doc) in topic.docs.items():
for (sent_id, sent) in doc.sentences.items():
event_mentions.extend(sent.gold_event_mentions)
entity_mentions.extend(sent.gold_entity_mentions)
for event in event_mentions:
if (event.gold_tag != '-'):
if (event.gold_tag not in event_gold_tag_to_cluster):
event_gold_tag_to_cluster[event.gold_tag] = []
event_gold_tag_to_cluster[event.gold_tag].append(event)
for entity in entity_mentions:
if (entity.gold_tag != '-'):
if (entity.gold_tag not in entity_gold_tag_to_cluster):
entity_gold_tag_to_cluster[entity.gold_tag] = []
entity_gold_tag_to_cluster[entity.gold_tag].append(entity)
return (event_gold_tag_to_cluster, entity_gold_tag_to_cluster, event_mentions, entity_mentions) |
def _make_new_ngettext(func):
def ngettext(__context, __singular, __plural, __num, **variables):
variables.setdefault('num', __num)
rv = __context.call(func, __singular, __plural, __num)
if __context.eval_ctx.autoescape:
rv = Markup(rv)
return (rv % variables)
return ngettext |
class TestLanguageModelingDatasetReader(AllenNlpTestCase):
def test_read_from_file(self):
reader = LanguageModelingReader(tokens_per_instance=3)
dataset = reader.read('tests/fixtures/data/language_modeling.txt')
instances = dataset.instances
assert (len(instances) == 5)
assert ([t.text for t in instances[0].fields['input_tokens'].tokens] == ['This', 'is', 'a'])
assert ([t.text for t in instances[0].fields['output_tokens'].tokens] == ['is', 'a', 'sentence'])
assert ([t.text for t in instances[1].fields['input_tokens'].tokens] == ['sentence', 'for', 'language'])
assert ([t.text for t in instances[1].fields['output_tokens'].tokens] == ['for', 'language', 'modelling'])
assert ([t.text for t in instances[2].fields['input_tokens'].tokens] == ['modelling', '.', 'Here'])
assert ([t.text for t in instances[2].fields['output_tokens'].tokens] == ['.', 'Here', "'s"])
assert ([t.text for t in instances[3].fields['input_tokens'].tokens] == ["'s", 'another', 'one'])
assert ([t.text for t in instances[3].fields['output_tokens'].tokens] == ['another', 'one', 'for'])
assert ([t.text for t in instances[4].fields['input_tokens'].tokens] == ['for', 'extra', 'language'])
assert ([t.text for t in instances[4].fields['output_tokens'].tokens] == ['extra', 'language', 'modelling']) |
def recv_fun(ser):
while True:
res = ser.read(32)
lck.acquire()
if (res != b''):
print(''.join((chr(x) for x in res)), end='', flush=True)
lck.release() |
def mk_log_header(file, name, params):
file.write(('void log_%s(' % name))
i = 0
for p in params:
if (i > 0):
file.write(', ')
file.write(('%s a%s' % (param2str(p), i)))
i = (i + 1)
file.write(')') |
def count_acc(logits, label):
pred = torch.argmax(logits, dim=1)
if torch.cuda.is_available():
return (pred == label).type(torch.cuda.FloatTensor).mean().item()
else:
return (pred == label).type(torch.FloatTensor).mean().item() |
def test_forward_labeled_attention(pretrain_file):
model = build_model(pretrain_file, '--lattn_d_proj', '64', '--lattn_d_l', '16')
run_forward_checks(model)
model = build_model(pretrain_file, '--lattn_d_proj', '0', '--lattn_d_l', '0')
run_forward_checks(model)
model = build_model(pretrain_file, '--lattn_d_proj', '64', '--lattn_d_l', '16', '--lattn_combined_input')
run_forward_checks(model) |
class TrafficRandomizerAttachment():
def __init__(self):
self._eb = world.world.config['variation']['emergency_break']
self._eb_interval = world.world.config['variation']['emergency_break_interval']
self._eb_break_time = 3.0
self._eb_stand_still_time = 10.0
self._ts = world.world.config['variation']['traffic_speed']
self._ts_interval = world.world.config['variation']['traffic_speed_interval']
self._ts_range = world.world.config['variation']['traffic_speed_mult_range']
self._ts_duration = self._ts_interval
self._eb_time_step = None
self._eb_vehs = None
self._ts_time_step = None
def reset(self):
self._eb_time_step = 0.0
self._ts_time_step = 0.0
self._eb_vehs = []
def step(self):
if self._eb:
_eb_vehs = []
for (t, vehID) in self._eb_vehs:
if (((world.world.time_step * world.world.config['simulation']['dt']) - (t * world.world.config['simulation']['dt'])) >= self._eb_break_time):
world.world.traci.vehicle.slowDown(vehID, 0.0, self._eb_stand_still_time)
else:
_eb_vehs.append([t, vehID])
self._eb_vehs = _eb_vehs
trafficIDs = copy.deepcopy(world.world.traffic_manager.trafficIDs)
world.world.rng_traffic.shuffle(trafficIDs)
if (((world.world.time_step * world.world.config['simulation']['dt']) - (self._eb_time_step * world.world.config['simulation']['dt'])) >= self._eb_interval):
for vehID in trafficIDs:
veh_idx = world.world.traffic_manager.trafficIDs.index(vehID)
to_vec = (world.world.traffic_manager.traffic_state_transformed('position')[veh_idx] - world.world.traffic_manager.ego_state_transformed('position'))
to_angle = utils.wrap_to_pi(utils.vector2d_rad(*to_vec))
ego_angle = utils.wrap_to_pi(world.world.traffic_manager.ego_state_transformed('angle'))
if (abs((ego_angle - to_angle)) < np.radians(35)):
self._eb_vehs.append([world.world.time_step, vehID])
world.world.traci.vehicle.slowDown(vehID, 0.0, self._eb_break_time)
self._eb_time_step = world.world.time_step
break
else:
continue
if self._ts:
if ((((world.world.time_step * world.world.config['simulation']['dt']) - (self._ts_time_step * world.world.config['simulation']['dt'])) >= self._ts_interval) and (len(world.world.traffic_manager.trafficIDs) > 0)):
self._ts_time_step = world.world.time_step
mult_factor = world.world.rng_traffic.uniform(low=self._ts_range[0], high=self._ts_range[1])
vehID = world.world.rng_traffic.choice(world.world.traffic_manager.trafficIDs)
old_speed = world.world.traci.vehicle.getSpeed(vehID)
speed = (world.world.traci.vehicle.getSpeed(vehID) * mult_factor)
world.world.traci.vehicle.slowDown(vehID, speed, self._ts_duration)
logging.debug(f'Adapted speed of {vehID} from {old_speed} to {speed}.') |
class HyperellipticJacobian_g2(jacobian_generic.HyperellipticJacobian_generic):
def kummer_surface(self):
try:
return self._kummer_surface
except AttributeError:
return kummer_surface.KummerSurface(self) |
def _seg_12():
return [(3077, 'V'), (3085, 'X'), (3086, 'V'), (3089, 'X'), (3090, 'V'), (3113, 'X'), (3114, 'V'), (3130, 'X'), (3133, 'V'), (3141, 'X'), (3142, 'V'), (3145, 'X'), (3146, 'V'), (3150, 'X'), (3157, 'V'), (3159, 'X'), (3160, 'V'), (3163, 'X'), (3168, 'V'), (3172, 'X'), (3174, 'V'), (3184, 'X'), (3192, 'V'), (3204, 'X'), (3205, 'V'), (3213, 'X'), (3214, 'V'), (3217, 'X'), (3218, 'V'), (3241, 'X'), (3242, 'V'), (3252, 'X'), (3253, 'V'), (3258, 'X'), (3260, 'V'), (3269, 'X'), (3270, 'V'), (3273, 'X'), (3274, 'V'), (3278, 'X'), (3285, 'V'), (3287, 'X'), (3294, 'V'), (3295, 'X'), (3296, 'V'), (3300, 'X'), (3302, 'V'), (3312, 'X'), (3313, 'V'), (3315, 'X'), (3328, 'V'), (3332, 'X'), (3333, 'V'), (3341, 'X'), (3342, 'V'), (3345, 'X'), (3346, 'V'), (3397, 'X'), (3398, 'V'), (3401, 'X'), (3402, 'V'), (3408, 'X'), (3412, 'V'), (3428, 'X'), (3430, 'V'), (3456, 'X'), (3458, 'V'), (3460, 'X'), (3461, 'V'), (3479, 'X'), (3482, 'V'), (3506, 'X'), (3507, 'V'), (3516, 'X'), (3517, 'V'), (3518, 'X'), (3520, 'V'), (3527, 'X'), (3530, 'V'), (3531, 'X'), (3535, 'V'), (3541, 'X'), (3542, 'V'), (3543, 'X'), (3544, 'V'), (3552, 'X'), (3558, 'V'), (3568, 'X'), (3570, 'V'), (3573, 'X'), (3585, 'V'), (3635, 'M', u''), (3636, 'V'), (3643, 'X'), (3647, 'V'), (3676, 'X'), (3713, 'V'), (3715, 'X'), (3716, 'V'), (3717, 'X')] |
class Gemm(ExecutableOperation):
def __init__(self, operation):
super().__init__(operation)
self.emitter = EmitGemmUniversalInstance('_type')
self.threadblock_swizzle = ThreadblockSwizzle(GemmCoord(128, 128, 8))
self.threads = 256
self.shared_memory_capacity = (32 << 10)
self.params_A = PredicatedTileAccessIteratorParams(PredicatedTileAccessIteratorDesc(32, 1, PitchLinearCoord(128, 8), PitchLinearCoord(1, 4), PitchLinearCoord(1, 2)), 'A')
self.params_B = PredicatedTileAccessIteratorParams(PredicatedTileAccessIteratorDesc(32, 1, PitchLinearCoord(128, 8), PitchLinearCoord(1, 4), PitchLinearCoord(1, 2)), 'B')
self.params_C = EpilogueTileIteratorParams(EpilogueThreadMap(256, 1, 32, EpilogueTileDesc(128, 1, 4, 4, 1), EpilogueTileDesc(4, 1, 2, 1, 1), EpilogueTileDesc(32, 1, 8, 1, 1), EpilogueTileDesc(1, 4, 2, 1, 8)), 'C')
self.params_D = EpilogueTileIteratorParams(EpilogueThreadMap(256, 1, 32, EpilogueTileDesc(128, 1, 4, 4, 1), EpilogueTileDesc(4, 1, 2, 1, 1), EpilogueTileDesc(32, 1, 8, 1, 1), EpilogueTileDesc(1, 4, 2, 1, 8)), 'D')
self.output_op = LinearCombinationFunctor()
def emit(self):
return self.emitter.emit(self.operation)
def can_implement(self, configuration, arguments):
pass
def get_host_workspace_size(self, arguments):
return 336
def get_device_workspace_size(self, arguments):
return 0
def plan(self, arguments):
grid = self.threadblock_swizzle.grid_tiled_shape(arguments.problem_size)
return LaunchConfiguration([grid.m, grid.n, grid.k], [self.threads, 1, 1], self.shared_memory_capacity)
def initialize(self, host_workspace, device_workspace, launch_config, arguments, stream=cuda.CUstream(0)):
offset = 0
swizzle_log_tile = 0
gemm_mode = 0
batch_count = 1
gemm_k_size = arguments.problem_size.k
offset = arguments.problem_size.pack_into(host_workspace, offset)
grid_tiled_shape = self.threadblock_swizzle.grid_tiled_shape(arguments.problem_size)
offset = grid_tiled_shape.pack_into(host_workspace, offset)
offset = PackInteger(host_workspace, offset, swizzle_log_tile)
offset = self.params_A.initialize(host_workspace, offset, arguments.A.layout)
offset = self.params_B.initialize(host_workspace, offset, arguments.B.layout)
offset = self.params_C.initialize(host_workspace, offset, arguments.C.layout)
offset = self.params_D.initialize(host_workspace, offset, arguments.D.layout)
offset = self.output_op.initialize(host_workspace, offset, arguments.output_op)
offset = PackInteger(host_workspace, offset, gemm_mode)
offset = PackInteger(host_workspace, offset, batch_count)
offset = PackInteger(host_workspace, offset, gemm_k_size)
offset = PackDevicePointer(host_workspace, offset, int(arguments.A.pointer))
offset = PackDevicePointer(host_workspace, offset, int(arguments.B.pointer))
offset = PackDevicePointer(host_workspace, offset, int(arguments.C.pointer))
offset = PackDevicePointer(host_workspace, offset, int(arguments.D.pointer))
return offset |
def configure_data():
defaults = {'world_size': 1, 'rank': (- 1), 'persist_state': 0, 'lazy': False, 'transpose': False, 'data_set_type': 'supervised', 'seq_length': 256, 'eval_seq_length': 256, 'samples_per_shard': 100}
return DataConfig(defaults=defaults) |
def kfpoly(mu, nu, t=None):
if (mu == nu):
return 1
if (t is None):
t = polygen(ZZ, 't')
nuc = _Partitions(nu).conjugate()
f = (lambda x: (weight(x, t) if (x[0] == nuc) else 0))
return sum((f(rg) for rg in riggings(mu))) |
class BackpackSenses(StateDictSerializationMixin, eqx.Module):
dropout: hnn.Dropout
block: NoMixBlock
ln: hnn.LayerNorm
final_mlp: BackpackMlp
Pos: Axis = eqx.static_field()
def init(config, dropout_prob: float, *, key):
(k_block, k_mlp) = jrandom.split(key, 2)
dropout = hnn.Dropout(pdrop=dropout_prob)
block = NoMixBlock.init(config, key=k_block)
ln = hnn.LayerNorm.init(config.Embed, eps=config.layer_norm_epsilon)
final_mlp = BackpackMlp.init(Embed=config.Embed, Mlp=config.SenseIntermediate, Out=(config.Senses, config.Embed), activation_fn=config.activation_function, key=k_mlp, use_bias=config.use_bias)
return BackpackSenses(dropout=dropout, block=block, ln=ln, final_mlp=final_mlp, Pos=config.Pos)
_call
def sense_embed(self, input_embeds, *, key):
hidden_states = self.ln(input_embeds)
hidden_states = self.block(hidden_states, input_embeds, key=key)
senses = self.final_mlp(hidden_states)
return senses |
def mask_cross_entropy(pred, target, label):
num_rois = pred.size()[0]
inds = torch.arange(0, num_rois, dtype=torch.long, device=pred.device)
pred_slice = pred[(inds, label)].squeeze(1)
return F.binary_cross_entropy_with_logits(pred_slice, target, reduction='mean')[None] |
def write_remote(client: ServiceClient, host_data: HostData, api_name: (str | None), location: str, base_url: (str | None), started_at: str, in_queue: Queue, out_queue: Queue, usage_data: (dict[(str, Any)] | None)) -> None:
payload = BytesIO()
try:
with tarfile.open(mode='w:gz', fileobj=payload) as tar:
writer = ReportWriter(tar)
ci_environment = ci.environment()
writer.add_metadata(api_name=api_name, location=location, base_url=base_url, started_at=started_at, metadata=Metadata(), ci_environment=ci_environment, usage_data=usage_data)
if (consume_events(writer, in_queue) == ConsumeResult.INTERRUPT):
return
data = payload.getvalue()
out_queue.put(events.Metadata(size=len(data), ci_environment=ci_environment))
provider = (ci_environment.provider if (ci_environment is not None) else None)
response = client.upload_report(data, host_data.correlation_id, provider)
event: events.Event
if isinstance(response, UploadResponse):
host_data.store_correlation_id(response.correlation_id)
event = events.Completed(message=response.message, next_url=response.next_url)
else:
event = events.Failed(detail=response.detail)
out_queue.put(event)
except Exception as exc:
out_queue.put(events.Error(exc)) |
def find_peaks(signal, fs, max_hz=950, min_hz=75, analysis_win_ms=40, max_change=1.5, min_change=0.6):
N = len(signal)
min_period = (fs // max_hz)
max_period = (fs // min_hz)
sequence = int(((analysis_win_ms / 1000) * fs))
periods = compute_periods_per_sequence(signal, sequence, min_period, max_period)
mean_period = np.median(periods)
max_period = int((mean_period * 1.1))
min_period = int((mean_period * 0.9))
periods = compute_periods_per_sequence(signal, sequence, min_period, max_period)
peaks = [np.argmax(signal[:int((periods[0] * 1.1))])]
while True:
prev = peaks[(- 1)]
idx = min((prev // sequence), (len(periods) - 1))
if ((prev + int((periods[idx] * max_change))) >= N):
break
peaks.append(((prev + int((periods[idx] * min_change))) + np.argmax(signal[(prev + int((periods[idx] * min_change))):(prev + int((periods[idx] * max_change)))])))
return np.array(peaks) |
def _import_dotted_name(name):
components = name.split('.')
obj = __import__(components[0])
for component in components[1:]:
obj = getattr(obj, component)
return obj |
class HierarchicalAttention(nn.Module):
def __init__(self, backbone, class_num):
super().__init__()
self.drop_out = nn.Dropout(0.4)
self.private = nn.ModuleList([copy.deepcopy(backbone) for num in class_num])
d_model = backbone.d_model
self.w = nn.ModuleList([nn.Linear(d_model, 1) for num in class_num])
self.class_num = class_num
self.gru = nn.ModuleList([nn.GRU(d_model, d_model, num_layers=1, bidirectional=False, batch_first=True) for num in class_num])
self.linear = nn.ModuleList([nn.Linear(d_model, num) for num in class_num])
for layer in self.linear:
init_params(layer)
for layer in self.gru:
init_params(layer)
for layer in self.w:
init_params(layer)
def forward(self, input_ids, **kwargs):
(bc_size, dialog_his, utt_len) = input_ids.size()
input_ids = input_ids.view((- 1), utt_len)
attention_mask = input_ids.ne(0).detach()
res = []
for (private_module, gru, w, cls_layer) in zip(self.private, self.gru, self.w, self.linear):
private_out = private_module(input_ids=input_ids, attention_mask=attention_mask, **kwargs)
private_out = private_out.view(bc_size, dialog_his, (- 1))
(H, hidden) = gru(private_out)
wh = w(H).squeeze(2)
attention = F.softmax(F.tanh(wh)).unsqueeze(1)
hidden = torch.bmm(attention, H).squeeze(1)
hidden = self.drop_out(hidden)
rep = hidden
res.append(cls_layer(rep))
return res |
def _build_rhs(equation, sols):
rhss = {}
tst.report(('%s:' % equation.name))
tst.report('evaluating terms, "<=" is solution, "=>" is the rhs:')
for term in equation.terms:
if (not hasattr(term, 'symbolic')):
tst.report(('term %s has no symbolic description!' % term.name))
raise ValueError
expr = term.symbolic['expression']
arg_map = term.symbolic['map']
tst.report(('%s(%s)' % (term.name, ', '.join(term.ats))))
tst.report(('multiplicator: %f' % term.sign))
tst.report(' symbolic:', expr)
tst.report(' using argument map:', arg_map)
for (sol_name, sol) in sols.items():
rhs = _eval_term(sol[1], term, sops)
srhs = ('(%s * (%s))' % (term.sign, rhs))
rhss.setdefault(sol_name, []).append(srhs)
for (key, val) in rhss.items():
rhss[key] = '+'.join(val)
return rhss |
_module()
class NASFCOS(SingleStageDetector):
def __init__(self, backbone, neck, bbox_head, train_cfg=None, test_cfg=None, pretrained=None):
super(NASFCOS, self).__init__(backbone, neck, bbox_head, train_cfg, test_cfg, pretrained) |
def test_static_field_eq_2(static_field_mock):
ref = vr.StaticFieldReference(static_field_mock)
ref_2 = vr.StaticFieldReference(gao.GenericStaticField(MagicMock, 'bar', int))
assert (ref != ref_2) |
class CosineMixture(Benchmark):
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self.change_dimensionality = True
self._bounds = list(zip(([(- 1.0)] * self.N), ([1.0] * self.N)))
self.global_optimum = [[(- 1.0) for _ in range(self.N)]]
self.fglob = ((- 0.9) * self.N)
def fun(self, x, *args):
self.nfev += 1
return (((- 0.1) * sum(cos(((5.0 * pi) * x)))) - sum((x ** 2.0))) |
def char_span_to_token_span(token_offsets: List[Tuple[(int, int)]], character_span: Tuple[(int, int)]) -> Tuple[(Tuple[(int, int)], bool)]:
error = False
start_index = 0
while ((start_index < len(token_offsets)) and (token_offsets[start_index][0] < character_span[0])):
start_index += 1
if (token_offsets[start_index][0] > character_span[0]):
logger.debug("Bad labelling or tokenization - start offset doesn't match")
start_index -= 1
if (token_offsets[start_index][0] != character_span[0]):
error = True
end_index = start_index
while ((end_index < len(token_offsets)) and (token_offsets[end_index][1] < character_span[1])):
end_index += 1
if ((end_index == start_index) and (token_offsets[end_index][1] > character_span[1])):
logger.debug("Bad tokenization - end offset doesn't match")
elif (token_offsets[end_index][1] > character_span[1]):
logger.debug("Bad labelling or tokenization - end offset doesn't match")
if (token_offsets[end_index][1] != character_span[1]):
error = True
return ((start_index, end_index), error) |
def test_deepcopy_diff_state_edge():
sdfg = dace.SDFG('deepcopy_nested_sdfg')
sdfg.add_array('A', [1], dace.int32)
state_0 = sdfg.add_state('state_0')
state_1 = sdfg.add_state('state_1')
nsdfg = dace.SDFG('nested')
nsdfg_node = state_0.add_nested_sdfg(nsdfg, None, {}, {})
copy_nsdfg = copy.deepcopy(nsdfg_node)
assert (copy_nsdfg.sdfg.parent_nsdfg_node is copy_nsdfg)
assert (copy_nsdfg.sdfg.parent is None)
assert (copy_nsdfg.sdfg.parent_sdfg is None)
a = state_1.add_access('A')
state_1.add_edge(a, None, copy_nsdfg, None, dace.Memlet())
assert (copy_nsdfg.sdfg.parent is state_1)
assert (copy_nsdfg.sdfg.parent_sdfg is sdfg) |
def dynamic_nem_iteration(input_data, target_data, h_old, preds_old, gamma_old, k, collisions=None, actions=None):
input_shape = tf.shape(input_data)
assert (input_shape.get_shape()[0].value == 5), 'Requires 5D input (B, K, W, H, C) but {}'.format(input_shape.get_shape()[0].value)
(W, H, C) = (x.value for x in input_data.get_shape()[(- 3):])
pixel_dist = 'bernoulli'
inner_cell = build_network(K=k)
nem_cell = NEMCell(inner_cell, input_shape=(W, H, C), distribution=pixel_dist)
prior = compute_prior(distribution=pixel_dist)
with tf.variable_scope('R-RNNEM') as varscope:
with tf.name_scope('step_{}'.format(0)):
inputs = (input_data, target_data)
if (actions is not None):
h_old = {'state': h_old, 'action': actions}
hidden_state = (h_old, preds_old, gamma_old)
(hidden_state, output) = nem_cell(inputs, hidden_state)
(theta, pred, gamma) = output
collision = (tf.zeros((1, 1, 1, 1, 1)) if (collisions is None) else collisions)
(total_loss, intra_loss, inter_loss, r_total_loss, r_intra_loss, r_inter_loss) = compute_outer_loss(pred, gamma, target_data, prior, pixel_distribution=pixel_dist, collision=collision)
(total_ub_loss, intra_ub_loss, inter_ub_loss, r_total_ub_loss, r_intra_ub_loss, r_inter_ub_loss) = compute_outer_ub_loss(pred, target_data, prior, pixel_distribution=pixel_dist, collision=collision)
other_losses = tf.stack([total_loss, intra_loss, inter_loss])
other_ub_losses = tf.stack([total_ub_loss, intra_ub_loss, inter_ub_loss])
r_other_losses = tf.stack([r_total_loss, r_intra_loss, r_inter_loss])
r_other_ub_losses = tf.stack([r_total_ub_loss, r_intra_ub_loss, r_inter_ub_loss])
return (total_loss, total_ub_loss, r_total_loss, r_total_ub_loss, theta, pred, gamma, other_losses, other_ub_losses, r_other_losses, r_other_ub_losses) |
def generate_xorg_conf(devices):
xorg_conf = []
device_section = '\nSection "Device"\n Identifier "Device{device_id}"\n Driver "nvidia"\n VendorName "NVIDIA Corporation"\n BusID "{bus_id}"\nEndSection\n'
server_layout_section = '\nSection "ServerLayout"\n Identifier "Layout0"\n {screen_records}\nEndSection\n'
screen_section = '\nSection "Screen"\n Identifier "Screen{screen_id}"\n Device "Device{device_id}"\n DefaultDepth 24\n Option "AllowEmptyInitialConfiguration" "True"\n SubSection "Display"\n Depth 24\n Virtual 1024 768\n EndSubSection\nEndSection\n'
screen_records = []
for (i, bus_id) in enumerate(devices):
xorg_conf.append(device_section.format(device_id=i, bus_id=bus_id))
xorg_conf.append(screen_section.format(device_id=i, screen_id=i))
screen_records.append('Screen {screen_id} "Screen{screen_id}" 0 0'.format(screen_id=i))
xorg_conf.append(server_layout_section.format(screen_records='\n '.join(screen_records)))
output = '\n'.join(xorg_conf)
print(output)
return output |
def spm_initializer(spm_path: str):
global spm
spm = SentencePieceProcessor()
spm.Load(spm_path)
global vocab
vocab = {index: spm.IdToPiece(index) for index in range(spm.GetPieceSize())} |
class OvercookedGridworld(object):
ORDER_TYPES = (ObjectState.SOUP_TYPES + ['any'])
def __init__(self, terrain, start_player_positions, start_order_list=None, cook_time=20, num_items_for_soup=3, delivery_reward=20, rew_shaping_params=None, layout_name='unnamed_layout'):
self.height = len(terrain)
self.width = len(terrain[0])
self.shape = (self.width, self.height)
self.terrain_mtx = terrain
self.terrain_pos_dict = self._get_terrain_type_pos_dict()
self.start_player_positions = start_player_positions
self.num_players = len(start_player_positions)
self.start_order_list = start_order_list
self.soup_cooking_time = cook_time
self.num_items_for_soup = num_items_for_soup
self.delivery_reward = delivery_reward
self.reward_shaping_params = (NO_REW_SHAPING_PARAMS if (rew_shaping_params is None) else rew_shaping_params)
self.layout_name = layout_name
def __eq__(self, other):
return (np.array_equal(self.terrain_mtx, other.terrain_mtx) and (self.start_player_positions == other.start_player_positions) and (self.start_order_list == other.start_order_list) and (self.soup_cooking_time == other.soup_cooking_time) and (self.num_items_for_soup == other.num_items_for_soup) and (self.delivery_reward == other.delivery_reward) and (self.reward_shaping_params == other.reward_shaping_params) and (self.layout_name == other.layout_name))
def copy(self):
return OvercookedGridworld(terrain=self.terrain_mtx.copy(), start_player_positions=self.start_player_positions, start_order_list=(None if (self.start_order_list is None) else list(self.start_order_list)), cook_time=self.soup_cooking_time, num_items_for_soup=self.num_items_for_soup, delivery_reward=self.delivery_reward, rew_shaping_params=copy.deepcopy(self.reward_shaping_params), layout_name=self.layout_name)
def mdp_params(self):
return {'layout_name': self.layout_name, 'terrain': self.terrain_mtx, 'start_player_positions': self.start_player_positions, 'start_order_list': self.start_order_list, 'cook_time': self.soup_cooking_time, 'num_items_for_soup': self.num_items_for_soup, 'delivery_reward': self.delivery_reward, 'rew_shaping_params': copy.deepcopy(self.reward_shaping_params)}
def from_layout_name(layout_name, **params_to_overwrite):
params_to_overwrite = params_to_overwrite.copy()
base_layout_params = read_layout_dict(layout_name)
grid = base_layout_params['grid']
del base_layout_params['grid']
base_layout_params['layout_name'] = layout_name
grid = [layout_row.strip() for layout_row in grid.split('\n')]
return OvercookedGridworld.from_grid(grid, base_layout_params, params_to_overwrite)
def from_grid(layout_grid, base_layout_params={}, params_to_overwrite={}, debug=False):
mdp_config = base_layout_params.copy()
layout_grid = [[c for c in row] for row in layout_grid]
OvercookedGridworld._assert_valid_grid(layout_grid)
player_positions = ([None] * 9)
for (y, row) in enumerate(layout_grid):
for (x, c) in enumerate(row):
if (c in ['1', '2', '3', '4', '5', '6', '7', '8', '9']):
layout_grid[y][x] = ' '
assert (player_positions[(int(c) - 1)] is None), 'Duplicate player in grid'
player_positions[(int(c) - 1)] = (x, y)
num_players = len([x for x in player_positions if (x is not None)])
player_positions = player_positions[:num_players]
mdp_config['terrain'] = layout_grid
mdp_config['start_player_positions'] = player_positions
for (k, v) in params_to_overwrite.items():
curr_val = mdp_config[k]
if debug:
print('Overwriting mdp layout standard config value {}:{} -> {}'.format(k, curr_val, v))
mdp_config[k] = v
return OvercookedGridworld(**mdp_config)
def get_actions(self, state):
self._check_valid_state(state)
return [self._get_player_actions(state, i) for i in range(len(state.players))]
def _get_player_actions(self, state, player_num):
return Action.ALL_ACTIONS
def _check_action(self, state, joint_action):
for (p_action, p_legal_actions) in zip(joint_action, self.get_actions(state)):
if (p_action not in p_legal_actions):
raise ValueError('Invalid action')
def get_standard_start_state(self):
start_state = OvercookedState.from_player_positions(self.start_player_positions, order_list=self.start_order_list)
return start_state
def get_random_start_state(self, random_player_pos=False):
state = self.get_standard_start_state()
empty_slots = []
for y in range(len(self.terrain_mtx)):
for x in range(len(self.terrain_mtx[0])):
pos = (x, y)
terrain_type = self.get_terrain_type_at_pos(pos)
if (terrain_type == ' '):
empty_slots.append(pos)
elif (terrain_type == 'X'):
obj_type = random.choice(['onion', 'dish', 'soup', 'empty'])
if (obj_type == 'onion'):
state.add_object(ObjectState('onion', pos))
elif (obj_type == 'dish'):
state.add_object(ObjectState('dish', pos))
elif (obj_type == 'soup'):
state.add_object(ObjectState('soup', pos, ('onion', 3, 20)))
elif (obj_type == 'empty'):
pass
else:
raise KeyError(f'undefined object {obj_type}')
elif (terrain_type == 'P'):
num = random.randint(0, 3)
if (num == 0):
pass
elif (num == 1):
state.add_object(ObjectState('soup', pos, ('onion', 1, 0)))
elif (num == 2):
state.add_object(ObjectState('soup', pos, ('onion', 2, 0)))
elif (num == 3):
state.add_object(ObjectState('soup', pos, ('onion', 3, 0)))
else:
raise KeyError(f'cannot put {num} onions in soup')
if random_player_pos:
p = []
ori = []
players = []
for i in range(self.num_players):
b = True
while b:
pos = random.choice(empty_slots)
if (pos not in p):
p.append(pos)
ori.append(random.choice(Direction.ALL_DIRECTIONS))
players.append(PlayerState(p[(- 1)], ori[(- 1)]))
b = False
state.players = tuple(players)
return state
def get_random_start_state_fn(self, random_start_pos=False, rnd_obj_prob_thresh=0.0):
def start_state_fn():
if random_start_pos:
valid_positions = self.get_valid_joint_player_positions()
start_pos = valid_positions[np.random.choice(len(valid_positions))]
else:
start_pos = self.start_player_positions
start_state = OvercookedState.from_player_positions(start_pos, order_list=self.start_order_list)
if (rnd_obj_prob_thresh == 0):
return start_state
pots = self.get_pot_states(start_state)['empty']
for pot_loc in pots:
p = np.random.rand()
if (p < rnd_obj_prob_thresh):
n = int(np.random.randint(low=1, high=4))
start_state.objects[pot_loc] = ObjectState('soup', pot_loc, ('onion', n, 0))
for player in start_state.players:
p = np.random.rand()
if (p < rnd_obj_prob_thresh):
obj = np.random.choice(['dish', 'onion', 'soup'], p=[0.2, 0.6, 0.2])
if (obj == 'soup'):
player.set_object(ObjectState(obj, player.position, ('onion', self.num_items_for_soup, self.soup_cooking_time)))
else:
player.set_object(ObjectState(obj, player.position))
return start_state
return start_state_fn
def is_terminal(self, state):
if (state.order_list is None):
return False
return (len(state.order_list) == 0)
def get_valid_player_positions(self):
return self.terrain_pos_dict[' ']
def get_valid_joint_player_positions(self):
valid_positions = self.get_valid_player_positions()
all_joint_positions = list(itertools.product(valid_positions, repeat=self.num_players))
valid_joint_positions = [j_pos for j_pos in all_joint_positions if (not self.is_joint_position_collision(j_pos))]
return valid_joint_positions
def get_valid_player_positions_and_orientations(self):
valid_states = []
for pos in self.get_valid_player_positions():
valid_states.extend([(pos, d) for d in Direction.ALL_DIRECTIONS])
return valid_states
def get_valid_joint_player_positions_and_orientations(self):
valid_player_states = self.get_valid_player_positions_and_orientations()
valid_joint_player_states = []
for players_pos_and_orientations in itertools.product(valid_player_states, repeat=self.num_players):
joint_position = [plyer_pos_and_or[0] for plyer_pos_and_or in players_pos_and_orientations]
if (not self.is_joint_position_collision(joint_position)):
valid_joint_player_states.append(players_pos_and_orientations)
return valid_joint_player_states
def get_adjacent_features(self, player):
adj_feats = []
pos = player.position
for d in Direction.ALL_DIRECTIONS:
adj_pos = Action.move_in_direction(pos, d)
adj_feats.append((pos, self.get_terrain_type_at_pos(adj_pos)))
return adj_feats
def get_terrain_type_at_pos(self, pos):
(x, y) = pos
return self.terrain_mtx[y][x]
def get_dish_dispenser_locations(self):
return list(self.terrain_pos_dict['D'])
def get_onion_dispenser_locations(self):
return list(self.terrain_pos_dict['O'])
def get_tomato_dispenser_locations(self):
return list(self.terrain_pos_dict['T'])
def get_serving_locations(self):
return list(self.terrain_pos_dict['S'])
def get_pot_locations(self):
return list(self.terrain_pos_dict['P'])
def get_counter_locations(self):
return list(self.terrain_pos_dict['X'])
def get_pot_states(self, state):
pots_states_dict = {}
pots_states_dict['empty'] = []
pots_states_dict['onion'] = defaultdict(list)
pots_states_dict['tomato'] = defaultdict(list)
for pot_pos in self.get_pot_locations():
if (not state.has_object(pot_pos)):
pots_states_dict['empty'].append(pot_pos)
else:
soup_obj = state.get_object(pot_pos)
(soup_type, num_items, cook_time) = soup_obj.state
if (0 < num_items < self.num_items_for_soup):
pots_states_dict[soup_type]['{}_items'.format(num_items)].append(pot_pos)
elif (num_items == self.num_items_for_soup):
assert (cook_time <= self.soup_cooking_time)
if (cook_time == self.soup_cooking_time):
pots_states_dict[soup_type]['ready'].append(pot_pos)
else:
pots_states_dict[soup_type]['cooking'].append(pot_pos)
else:
raise ValueError('Pot with more than {} items'.format(self.num_items_for_soup))
if (0 < num_items < self.num_items_for_soup):
pots_states_dict[soup_type]['partially_full'].append(pot_pos)
return pots_states_dict
def get_counter_objects_dict(self, state, counter_subset=None):
counters_considered = (self.terrain_pos_dict['X'] if (counter_subset is None) else counter_subset)
counter_objects_dict = defaultdict(list)
for obj in state.objects.values():
if (obj.position in counters_considered):
counter_objects_dict[obj.name].append(obj.position)
return counter_objects_dict
def get_empty_counter_locations(self, state):
counter_locations = self.get_counter_locations()
return [pos for pos in counter_locations if (not state.has_object(pos))]
def get_state_transition(self, state, joint_action):
assert (not self.is_terminal(state)), 'Trying to find successor of a terminal state: {}'.format(state)
for (action, action_set) in zip(joint_action, self.get_actions(state)):
if (action not in action_set):
raise ValueError(('Illegal action %s in state %s' % (action, state)))
new_state = state.deepcopy()
(sparse_reward_by_agent, shaped_reward_by_agent, shaped_info_by_agent) = self.resolve_interacts(new_state, joint_action)
assert (new_state.player_positions == state.player_positions)
assert (new_state.player_orientations == state.player_orientations)
self.resolve_movement(new_state, joint_action)
self.step_environment_effects(new_state)
infos = {'sparse_reward_by_agent': sparse_reward_by_agent, 'shaped_reward_by_agent': shaped_reward_by_agent, 'shaped_info_by_agent': shaped_info_by_agent}
return (new_state, infos)
def resolve_interacts(self, new_state, joint_action):
pot_states = self.get_pot_states(new_state)
ready_pots = (pot_states['tomato']['ready'] + pot_states['onion']['ready'])
cooking_pots = ((ready_pots + pot_states['tomato']['cooking']) + pot_states['onion']['cooking'])
nearly_ready_pots = ((cooking_pots + pot_states['tomato']['partially_full']) + pot_states['onion']['partially_full'])
(sparse_reward, shaped_reward) = (([0] * self.num_players), ([0] * self.num_players))
shaped_info = [{'put_onion_on_X': 0, 'put_dish_on_X': 0, 'put_soup_on_X': 0, 'pickup_onion_from_X': 0, 'pickup_onion_from_O': 0, 'pickup_dish_from_X': 0, 'pickup_dish_from_D': 0, 'pickup_soup_from_X': 0, 'USEFUL_DISH_PICKUP': 0, 'SOUP_PICKUP': 0, 'PLACEMENT_IN_POT': 0, 'delivery': 0} for _ in range(self.num_players)]
for (player_idx, player, action) in zip(range(self.num_players), new_state.players, joint_action):
if (action != Action.INTERACT):
continue
(pos, o) = (player.position, player.orientation)
i_pos = Action.move_in_direction(pos, o)
terrain_type = self.get_terrain_type_at_pos(i_pos)
if (terrain_type == 'X'):
if (player.has_object() and (not new_state.has_object(i_pos))):
shaped_info[player_idx][f'put_{player.get_object().name}_on_X'] += 1
new_state.add_object(player.remove_object(), i_pos)
elif ((not player.has_object()) and new_state.has_object(i_pos)):
player.set_object(new_state.remove_object(i_pos))
shaped_info[player_idx][f'pickup_{player.get_object().name}_from_X'] += 1
elif ((terrain_type == 'O') and (player.held_object is None)):
player.set_object(ObjectState('onion', pos))
shaped_info[player_idx][f'pickup_{player.get_object().name}_from_O'] += 1
elif ((terrain_type == 'T') and (player.held_object is None)):
player.set_object(ObjectState('tomato', pos))
shaped_info[player_idx][f'pickup_{player.get_object().name}_from_T'] += 1
elif ((terrain_type == 'D') and (player.held_object is None)):
dishes_already = len(new_state.player_objects_by_type['dish'])
player.set_object(ObjectState('dish', pos))
dishes_on_counters = self.get_counter_objects_dict(new_state)['dish']
if ((len(nearly_ready_pots) > dishes_already) and (len(dishes_on_counters) == 0)):
shaped_reward[player_idx] += self.reward_shaping_params['DISH_PICKUP_REWARD']
shaped_info[player_idx]['USEFUL_DISH_PICKUP'] += 1
shaped_info[player_idx][f'pickup_{player.get_object().name}_from_D'] += 1
elif ((terrain_type == 'P') and player.has_object()):
if ((player.get_object().name == 'dish') and new_state.has_object(i_pos)):
obj = new_state.get_object(i_pos)
assert (obj.name == 'soup'), 'Object in pot was not soup'
(_, num_items, cook_time) = obj.state
if ((num_items == self.num_items_for_soup) and (cook_time >= self.soup_cooking_time)):
player.remove_object()
player.set_object(new_state.remove_object(i_pos))
shaped_reward[player_idx] += self.reward_shaping_params['SOUP_PICKUP_REWARD']
shaped_info[player_idx]['SOUP_PICKUP'] += 1
elif (player.get_object().name in ['onion', 'tomato']):
item_type = player.get_object().name
if (not new_state.has_object(i_pos)):
player.remove_object()
new_state.add_object(ObjectState('soup', i_pos, (item_type, 1, 0)), i_pos)
shaped_reward[player_idx] += self.reward_shaping_params['PLACEMENT_IN_POT_REW']
shaped_info[player_idx]['PLACEMENT_IN_POT'] += 1
else:
obj = new_state.get_object(i_pos)
assert (obj.name == 'soup'), 'Object in pot was not soup'
(soup_type, num_items, cook_time) = obj.state
if ((num_items < self.num_items_for_soup) and (soup_type == item_type)):
player.remove_object()
obj.state = (soup_type, (num_items + 1), 0)
shaped_reward[player_idx] += self.reward_shaping_params['PLACEMENT_IN_POT_REW']
shaped_info[player_idx]['PLACEMENT_IN_POT'] += 1
elif ((terrain_type == 'S') and player.has_object()):
obj = player.get_object()
if (obj.name == 'soup'):
(new_state, delivery_rew) = self.deliver_soup(new_state, player, obj)
sparse_reward[player_idx] += delivery_rew
shaped_info[player_idx]['delivery'] += 1
if ((new_state.order_list is not None) and (len(new_state.order_list) == 0)):
break
return (sparse_reward, shaped_reward, shaped_info)
def deliver_soup(self, state, player, soup_obj):
(soup_type, num_items, cook_time) = soup_obj.state
assert (soup_type in ObjectState.SOUP_TYPES)
assert (num_items == self.num_items_for_soup)
assert (cook_time >= self.soup_cooking_time), 'Cook time {} mdp cook time {}'.format(cook_time, self.soup_cooking_time)
player.remove_object()
if (state.order_list is None):
return (state, self.delivery_reward)
assert (not self.is_terminal(state))
current_order = state.order_list[0]
if ((current_order == 'any') or (soup_type == current_order)):
state.order_list = state.order_list[1:]
return (state, self.delivery_reward)
return (state, 0)
def resolve_movement(self, state, joint_action):
(new_positions, new_orientations) = self.compute_new_positions_and_orientations(state.players, joint_action)
for (player_state, new_pos, new_o) in zip(state.players, new_positions, new_orientations):
player_state.update_pos_and_or(new_pos, new_o)
def compute_new_positions_and_orientations(self, old_player_states, joint_action):
(new_positions, new_orientations) = list(zip(*[self._move_if_direction(p.position, p.orientation, a) for (p, a) in zip(old_player_states, joint_action)]))
old_positions = tuple((p.position for p in old_player_states))
new_positions = self._handle_collisions(old_positions, new_positions)
return (new_positions, new_orientations)
def is_transition_collision(self, old_positions, new_positions):
if self.is_joint_position_collision(new_positions):
return True
for (idx0, idx1) in itertools.combinations(range(self.num_players), 2):
(p1_old, p2_old) = (old_positions[idx0], old_positions[idx1])
(p1_new, p2_new) = (new_positions[idx0], new_positions[idx1])
if ((p1_new == p2_old) and (p1_old == p2_new)):
return True
return False
def is_joint_position_collision(self, joint_position):
return any(((pos0 == pos1) for (pos0, pos1) in itertools.combinations(joint_position, 2)))
def step_environment_effects(self, state):
reward = 0
state.timestep += 1
for obj in state.objects.values():
if (obj.name == 'soup'):
(x, y) = obj.position
(soup_type, num_items, cook_time) = obj.state
if ((self.terrain_mtx[y][x] == 'P') and (num_items == self.num_items_for_soup) and (cook_time < self.soup_cooking_time)):
obj.state = (soup_type, num_items, (cook_time + 1))
return reward
def _handle_collisions(self, old_positions, new_positions):
if self.is_transition_collision(old_positions, new_positions):
return old_positions
return new_positions
def _get_terrain_type_pos_dict(self):
pos_dict = defaultdict(list)
for (y, terrain_row) in enumerate(self.terrain_mtx):
for (x, terrain_type) in enumerate(terrain_row):
pos_dict[terrain_type].append((x, y))
return pos_dict
def _move_if_direction(self, position, orientation, action):
if (action == Action.INTERACT):
return (position, orientation)
new_pos = Action.move_in_direction(position, action)
new_orientation = (orientation if (action == Action.STAY) else action)
if (new_pos not in self.get_valid_player_positions()):
return (position, new_orientation)
return (new_pos, new_orientation)
def _check_valid_state(self, state):
all_objects = list(state.objects.values())
for player_state in state.players:
pos = player_state.position
assert (pos in self.get_valid_player_positions())
if (player_state.held_object is not None):
all_objects.append(player_state.held_object)
assert (player_state.held_object.position == player_state.position)
for (obj_pos, obj_state) in state.objects.items():
assert (obj_state.position == obj_pos)
assert (self.get_terrain_type_at_pos(obj_pos) != ' ')
all_pos = [player_state.position for player_state in state.players]
all_pos += [obj_state.position for obj_state in state.objects.values()]
assert (len(all_pos) == len(set(all_pos))), 'Overlapping players or objects'
for obj_state in all_objects:
assert obj_state.is_valid()
def _assert_valid_grid(grid):
height = len(grid)
width = len(grid[0])
assert all(((len(row) == width) for row in grid)), 'Ragged grid'
def is_not_free(c):
return (c in 'XOPDST')
for y in range(height):
assert is_not_free(grid[y][0]), 'Left border must not be free'
assert is_not_free(grid[y][(- 1)]), 'Right border must not be free'
for x in range(width):
assert is_not_free(grid[0][x]), 'Top border must not be free'
assert is_not_free(grid[(- 1)][x]), 'Bottom border must not be free'
all_elements = [element for row in grid for element in row]
digits = ['1', '2', '3', '4', '5', '6', '7', '8', '9']
layout_digits = [e for e in all_elements if (e in digits)]
num_players = len(layout_digits)
assert (num_players > 0), 'No players (digits) in grid'
layout_digits = list(sorted(map(int, layout_digits)))
assert (layout_digits == list(range(1, (num_players + 1)))), 'Some players were missing'
assert all(((c in 'XOPDST ') for c in all_elements)), 'Invalid character in grid'
assert (all_elements.count('1') == 1), "'1' must be present exactly once"
assert (all_elements.count('D') >= 1), "'D' must be present at least once"
assert (all_elements.count('S') >= 1), "'S' must be present at least once"
assert (all_elements.count('P') >= 1), "'P' must be present at least once"
assert ((all_elements.count('O') >= 1) or (all_elements.count('T') >= 1)), "'O' or 'T' must be present at least once"
def state_string(self, state):
players_dict = {player.position: player for player in state.players}
grid_string = ''
for (y, terrain_row) in enumerate(self.terrain_mtx):
for (x, element) in enumerate(terrain_row):
if ((x, y) in players_dict.keys()):
player = players_dict[(x, y)]
orientation = player.orientation
assert (orientation in Direction.ALL_DIRECTIONS)
grid_string += Action.ACTION_TO_CHAR[orientation]
player_object = player.held_object
if player_object:
grid_string += player_object.name[:1]
else:
player_idx_lst = [i for (i, p) in enumerate(state.players) if (p.position == player.position)]
assert (len(player_idx_lst) == 1)
grid_string += str(player_idx_lst[0])
elif ((element == 'X') and state.has_object((x, y))):
state_obj = state.get_object((x, y))
grid_string = ((grid_string + element) + state_obj.name[:1])
elif ((element == 'P') and state.has_object((x, y))):
soup_obj = state.get_object((x, y))
(soup_type, num_items, cook_time) = soup_obj.state
if (soup_type == 'onion'):
grid_string += ''
elif (soup_type == 'tomato'):
grid_string += ''
else:
raise ValueError()
if (num_items == self.num_items_for_soup):
grid_string += str(cook_time)
elif (num_items == 2):
grid_string += '='
else:
grid_string += '-'
else:
grid_string += (element + ' ')
grid_string += '\n'
if (state.order_list is not None):
grid_string += "Current orders: {}/{} are any's\n".format(len(state.order_list), len([(order == 'any') for order in state.order_list]))
return grid_string
def lossless_state_encoding(self, overcooked_state, debug=False):
assert (type(debug) is bool)
base_map_features = ['pot_loc', 'counter_loc', 'onion_disp_loc', 'dish_disp_loc', 'serve_loc']
variable_map_features = ['onions_in_pot', 'onions_cook_time', 'onion_soup_loc', 'dishes', 'onions']
all_objects = overcooked_state.all_objects_list
def make_layer(position, value):
layer = np.zeros(self.shape)
layer[position] = value
return layer
def process_for_player(primary_agent_idx):
other_agent_idx = (1 - primary_agent_idx)
ordered_player_features = (['player_{}_loc'.format(primary_agent_idx), 'player_{}_loc'.format(other_agent_idx)] + ['player_{}_orientation_{}'.format(i, Direction.DIRECTION_TO_INDEX[d]) for (i, d) in itertools.product([primary_agent_idx, other_agent_idx], Direction.ALL_DIRECTIONS)])
LAYERS = ((ordered_player_features + base_map_features) + variable_map_features)
state_mask_dict = {k: np.zeros(self.shape) for k in LAYERS}
for loc in self.get_counter_locations():
state_mask_dict['counter_loc'][loc] = 1
for loc in self.get_pot_locations():
state_mask_dict['pot_loc'][loc] = 1
for loc in self.get_onion_dispenser_locations():
state_mask_dict['onion_disp_loc'][loc] = 1
for loc in self.get_dish_dispenser_locations():
state_mask_dict['dish_disp_loc'][loc] = 1
for loc in self.get_serving_locations():
state_mask_dict['serve_loc'][loc] = 1
for (i, player) in enumerate(overcooked_state.players):
player_orientation_idx = Direction.DIRECTION_TO_INDEX[player.orientation]
state_mask_dict['player_{}_loc'.format(i)] = make_layer(player.position, 1)
state_mask_dict['player_{}_orientation_{}'.format(i, player_orientation_idx)] = make_layer(player.position, 1)
for obj in all_objects:
if (obj.name == 'soup'):
(soup_type, num_onions, cook_time) = obj.state
if (soup_type == 'onion'):
if (obj.position in self.get_pot_locations()):
(soup_type, num_onions, cook_time) = obj.state
state_mask_dict['onions_in_pot'] += make_layer(obj.position, num_onions)
state_mask_dict['onions_cook_time'] += make_layer(obj.position, cook_time)
else:
state_mask_dict['onion_soup_loc'] += make_layer(obj.position, 1)
else:
raise ValueError('Unrecognized soup')
elif (obj.name == 'dish'):
state_mask_dict['dishes'] += make_layer(obj.position, 1)
elif (obj.name == 'onion'):
state_mask_dict['onions'] += make_layer(obj.position, 1)
else:
raise ValueError('Unrecognized object')
if debug:
print(len(LAYERS))
print(len(state_mask_dict))
for (k, v) in state_mask_dict.items():
print(k)
print(np.transpose(v, (1, 0)))
state_mask_stack = np.array([state_mask_dict[layer_id] for layer_id in LAYERS])
state_mask_stack = np.transpose(state_mask_stack, (1, 2, 0))
assert (state_mask_stack.shape[:2] == self.shape)
assert (state_mask_stack.shape[2] == len(LAYERS))
return np.array(state_mask_stack).astype(int)
num_players = len(overcooked_state.players)
final_obs_for_players = tuple((process_for_player(i) for i in range(num_players)))
return final_obs_for_players
def featurize_state(self, overcooked_state, mlp):
all_features = {}
def make_closest_feature(idx, name, locations):
all_features['p{}_closest_{}'.format(idx, name)] = self.get_deltas_to_closest_location(player, locations, mlp)
IDX_TO_OBJ = ['onion', 'soup', 'dish']
OBJ_TO_IDX = {o_name: idx for (idx, o_name) in enumerate(IDX_TO_OBJ)}
counter_objects = self.get_counter_objects_dict(overcooked_state)
pot_state = self.get_pot_states(overcooked_state)
for (i, player) in enumerate(overcooked_state.players):
orientation_idx = Direction.DIRECTION_TO_INDEX[player.orientation]
all_features['p{}_orientation'.format(i)] = np.eye(4)[orientation_idx]
obj = player.held_object
if (obj is None):
held_obj_name = 'none'
all_features['p{}_objs'.format(i)] = np.zeros(len(IDX_TO_OBJ))
else:
held_obj_name = obj.name
obj_idx = OBJ_TO_IDX[held_obj_name]
all_features['p{}_objs'.format(i)] = np.eye(len(IDX_TO_OBJ))[obj_idx]
if (held_obj_name == 'onion'):
all_features['p{}_closest_onion'.format(i)] = (0, 0)
else:
make_closest_feature(i, 'onion', (self.get_onion_dispenser_locations() + counter_objects['onion']))
make_closest_feature(i, 'empty_pot', pot_state['empty'])
make_closest_feature(i, 'one_onion_pot', pot_state['onion']['one_onion'])
make_closest_feature(i, 'two_onion_pot', pot_state['onion']['two_onion'])
make_closest_feature(i, 'cooking_pot', pot_state['onion']['cooking'])
make_closest_feature(i, 'ready_pot', pot_state['onion']['ready'])
if (held_obj_name == 'dish'):
all_features['p{}_closest_dish'.format(i)] = (0, 0)
else:
make_closest_feature(i, 'dish', (self.get_dish_dispenser_locations() + counter_objects['dish']))
if (held_obj_name == 'soup'):
all_features['p{}_closest_soup'.format(i)] = (0, 0)
else:
make_closest_feature(i, 'soup', counter_objects['soup'])
make_closest_feature(i, 'serving', self.get_serving_locations())
for (direction, pos_and_feat) in enumerate(self.get_adjacent_features(player)):
(adj_pos, feat) = pos_and_feat
if (direction == player.orientation):
facing_counter = ((feat == 'X') and (adj_pos not in overcooked_state.objects.keys()))
facing_counter_feature = ([1] if facing_counter else [0])
all_features['p{}_facing_empty_counter'.format(i)] = facing_counter_feature
all_features['p{}_wall_{}'.format(i, direction)] = ([0] if (feat == ' ') else [1])
features_np = {k: np.array(v) for (k, v) in all_features.items()}
(p0, p1) = overcooked_state.players
p0_dict = {k: v for (k, v) in features_np.items() if (k[:2] == 'p0')}
p1_dict = {k: v for (k, v) in features_np.items() if (k[:2] == 'p1')}
p0_features = np.concatenate(list(p0_dict.values()))
p1_features = np.concatenate(list(p1_dict.values()))
p1_rel_to_p0 = np.array(pos_distance(p1.position, p0.position))
abs_pos_p0 = np.array(p0.position)
ordered_features_p0 = np.squeeze(np.concatenate([p0_features, p1_features, p1_rel_to_p0, abs_pos_p0]))
p0_rel_to_p1 = np.array(pos_distance(p0.position, p1.position))
abs_pos_p1 = np.array(p0.position)
ordered_features_p1 = np.squeeze(np.concatenate([p1_features, p0_features, p0_rel_to_p1, abs_pos_p1]))
return (ordered_features_p0, ordered_features_p1)
def get_deltas_to_closest_location(self, player, locations, mlp):
(_, closest_loc) = mlp.mp.min_cost_to_feature(player.pos_and_or, locations, with_argmin=True)
if (closest_loc is None):
return (0, 0)
(dy_loc, dx_loc) = pos_distance(closest_loc, player.position)
return (dy_loc, dx_loc)
def calculate_distance_based_shaped_reward(self, state, new_state):
distance_based_shaped_reward = 0
pot_states = self.get_pot_states(new_state)
ready_pots = (pot_states['tomato']['ready'] + pot_states['onion']['ready'])
cooking_pots = ((ready_pots + pot_states['tomato']['cooking']) + pot_states['onion']['cooking'])
nearly_ready_pots = ((cooking_pots + pot_states['tomato']['partially_full']) + pot_states['onion']['partially_full'])
dishes_in_play = len(new_state.player_objects_by_type['dish'])
for (player_old, player_new) in zip(state.players, new_state.players):
max_dist = 8
if ((player_new.held_object is not None) and (player_new.held_object.name == 'dish') and (len(nearly_ready_pots) >= dishes_in_play)):
min_dist_to_pot_new = np.inf
min_dist_to_pot_old = np.inf
for pot in nearly_ready_pots:
new_dist = np.linalg.norm((np.array(pot) - np.array(player_new.position)))
old_dist = np.linalg.norm((np.array(pot) - np.array(player_old.position)))
if (new_dist < min_dist_to_pot_new):
min_dist_to_pot_new = new_dist
if (old_dist < min_dist_to_pot_old):
min_dist_to_pot_old = old_dist
if (min_dist_to_pot_old > min_dist_to_pot_new):
distance_based_shaped_reward += (self.reward_shaping_params['POT_DISTANCE_REW'] * (1 - min((min_dist_to_pot_new / max_dist), 1)))
if ((player_new.held_object is None) and (len(cooking_pots) > 0) and (dishes_in_play == 0)):
min_dist_to_d_new = np.inf
min_dist_to_d_old = np.inf
for serving_loc in self.terrain_pos_dict['D']:
new_dist = np.linalg.norm((np.array(serving_loc) - np.array(player_new.position)))
old_dist = np.linalg.norm((np.array(serving_loc) - np.array(player_old.position)))
if (new_dist < min_dist_to_d_new):
min_dist_to_d_new = new_dist
if (old_dist < min_dist_to_d_old):
min_dist_to_d_old = old_dist
if (min_dist_to_d_old > min_dist_to_d_new):
distance_based_shaped_reward += (self.reward_shaping_params['DISH_DISP_DISTANCE_REW'] * (1 - min((min_dist_to_d_new / max_dist), 1)))
if ((player_new.held_object is not None) and (player_new.held_object.name == 'soup')):
min_dist_to_s_new = np.inf
min_dist_to_s_old = np.inf
for serving_loc in self.terrain_pos_dict['S']:
new_dist = np.linalg.norm((np.array(serving_loc) - np.array(player_new.position)))
old_dist = np.linalg.norm((np.array(serving_loc) - np.array(player_old.position)))
if (new_dist < min_dist_to_s_new):
min_dist_to_s_new = new_dist
if (old_dist < min_dist_to_s_old):
min_dist_to_s_old = old_dist
if (min_dist_to_s_old > min_dist_to_s_new):
distance_based_shaped_reward += (self.reward_shaping_params['SOUP_DISTANCE_REW'] * (1 - min((min_dist_to_s_new / max_dist), 1)))
return distance_based_shaped_reward |
class FeedForward(nn.Module):
def __init__(self, dim, ffn_expansion_factor, bias):
super(FeedForward, self).__init__()
hidden_features = int((dim * ffn_expansion_factor))
self.project_in = nn.Conv2d(dim, (hidden_features * 2), kernel_size=1, bias=bias)
self.dwconv = nn.Conv2d((hidden_features * 2), (hidden_features * 2), kernel_size=3, stride=1, padding=1, groups=(hidden_features * 2), bias=bias)
self.project_out = nn.Conv2d(hidden_features, dim, kernel_size=1, bias=bias)
def forward(self, x):
x = self.project_in(x)
(x1, x2) = self.dwconv(x).chunk(2, dim=1)
x = (F.gelu(x1) * x2)
x = self.project_out(x)
return x |
class LocalAggregation(nn.Module):
def __init__(self, channels: List[int], aggr_args: dict, conv_args=None, norm_args=None, act_args=None, group_args=None, use_res=False):
super(LocalAggregation, self).__init__()
aggr_type = aggr_args.get('NAME', 'convpool')
feature_type = aggr_args.get('feature_type', 'dp_fj')
reduction = aggr_args.get('reduction', 'max')
use_inverted_dims = aggr_args.get('use_inverted_dims', False)
use_pooled_as_identity = aggr_args.get('use_pooled_as_identity', False)
if (aggr_type.lower() == 'convpool'):
self.SA_CONFIG_operator = ConvPool(channels, conv_args, norm_args, act_args, group_args, feature_type, reduction, use_res, use_pooled_as_identity)
elif (aggr_type.lower() == 'assa'):
self.SA_CONFIG_operator = ASSA(channels, conv_args, norm_args, act_args, group_args, feature_type, reduction, use_res, use_inverted_dims)
else:
raise NotImplementedError(f'LocalAggregation {aggr_type.lower()} not implemented')
def forward(self, query_xyz, support_xyz, support_features, query_idx=None):
return self.SA_CONFIG_operator(query_xyz, support_xyz, support_features, query_idx) |
def score_dep_bigrams():
corp1_dict = dep_bigram(ct.ldcorpus(corpus1), 'dobj')
corp2_dict = dep_bigram(ct.ldcorpus(corpus2), 'dobj')
corp1_bigrams = sorted(corp1_dict['bi_freq'].items(), key=(lambda kv: kv[1]), reverse=True)
corp2_bigrams = sorted(corp2_dict['bi_freq'].items(), key=(lambda kv: kv[1]), reverse=True)
return (corp1_bigrams, corp2_bigrams) |
_params({'a': [tuple], 'b': [tuple], 'similarity': [callable, StrOptions({'jaccard'})]}, prefer_skip_nested_validation=True)
def consensus_score(a, b, *, similarity='jaccard'):
if (similarity == 'jaccard'):
similarity = _jaccard
matrix = _pairwise_similarity(a, b, similarity)
(row_indices, col_indices) = linear_sum_assignment((1.0 - matrix))
n_a = len(a[0])
n_b = len(b[0])
return (matrix[(row_indices, col_indices)].sum() / max(n_a, n_b)) |
class ExponentialGeneratingSeriesRing(LazyPowerSeriesRing):
def __init__(self, base_ring):
super().__init__(base_ring, names='z')
Element = ExponentialGeneratingSeries |
def get_file_from_repo(path_or_repo: Union[(str, os.PathLike)], filename: str, cache_dir: Optional[Union[(str, os.PathLike)]]=None, force_download: bool=False, resume_download: bool=False, proxies: Optional[Dict[(str, str)]]=None, use_auth_token: Optional[Union[(bool, str)]]=None, revision: Optional[str]=None, local_files_only: bool=False, subfolder: str=''):
return cached_file(path_or_repo_id=path_or_repo, filename=filename, cache_dir=cache_dir, force_download=force_download, resume_download=resume_download, proxies=proxies, use_auth_token=use_auth_token, revision=revision, local_files_only=local_files_only, subfolder=subfolder, _raise_exceptions_for_missing_entries=False, _raise_exceptions_for_connection_errors=False) |
def display_help(exit_code):
print('mk_make.py: Z3 Makefile generator\n')
print('This script generates the Makefile for the Z3 theorem prover.')
print('It must be executed from the Z3 root directory.')
print('\nOptions:')
print(' -h, --help display this message.')
print(' -s, --silent do not print verbose messages.')
if (not IS_WINDOWS):
print((' -p <dir>, --prefix=<dir> installation prefix (default: %s).' % PREFIX))
else:
print(" --parallel=num use cl option /MP with 'num' parallel processes")
print((' --pypkgdir=<dir> Force a particular Python package directory (default %s)' % PYTHON_PACKAGE_DIR))
print((' -b <subdir>, --build=<subdir> subdirectory where Z3 will be built (default: %s).' % BUILD_DIR))
print(' --githash=hash include the given hash in the binaries.')
print(" --git-describe include the output of 'git describe' in the version information.")
print(' -d, --debug compile Z3 in debug mode.')
print(' -t, --trace enable tracing in release mode.')
if IS_WINDOWS:
print(' --guardcf enable Control Flow Guard runtime checks.')
print(' -x, --x64 create 64 binary when using Visual Studio.')
else:
print(' --x86 force 32-bit x86 build on x64 systems.')
print(' -m, --makefiles generate only makefiles.')
if IS_WINDOWS:
print(' -v, --vsproj generate Visual Studio Project Files.')
print(' --optimize generate optimized code during linking.')
print(' --dotnet generate .NET platform bindings.')
print(' --dotnet-key=<file> sign the .NET assembly using the private key in <file>.')
print(' --java generate Java bindings.')
print(' --ml generate OCaml bindings.')
print(' --js generate JScript bindings.')
print(' --python generate Python bindings.')
print(' --staticlib build Z3 static library.')
print(' --staticbin build a statically linked Z3 binary.')
if (not IS_WINDOWS):
print(' -g, --gmp use GMP.')
print(' --gprof enable gprof')
print(' --log-sync synchronize access to API log files to enable multi-thread API logging.')
print(' --single-threaded non-thread-safe build')
print('')
print('Some influential environment variables:')
if (not IS_WINDOWS):
print(' CXX C++ compiler')
print(' CC C compiler')
print(' LDFLAGS Linker flags, e.g., -L<lib dir> if you have libraries in a non-standard directory')
print(' CPPFLAGS Preprocessor flags, e.g., -I<include dir> if you have header files in a non-standard directory')
print(' CXXFLAGS C++ compiler flags')
print(' JDK_HOME JDK installation directory (only relevant if -j or --java option is provided)')
print(' JNI_HOME JNI bindings directory (only relevant if -j or --java option is provided)')
print(' OCAMLC Ocaml byte-code compiler (only relevant with --ml)')
print(' OCAMLFIND Ocaml find tool (only relevant with --ml)')
print(' OCAMLOPT Ocaml native compiler (only relevant with --ml)')
print(' OCAML_LIB Ocaml library directory (only relevant with --ml)')
print(' Z3_INSTALL_BIN_DIR Install directory for binaries relative to install prefix')
print(' Z3_INSTALL_LIB_DIR Install directory for libraries relative to install prefix')
print(' Z3_INSTALL_INCLUDE_DIR Install directory for header files relative to install prefix')
print(' Z3_INSTALL_PKGCONFIG_DIR Install directory for pkgconfig files relative to install prefix')
exit(exit_code) |
def query_test_instance(conn, test_id):
with conn:
cursor = conn.cursor(cursor_factory=psycopg2.extras.DictCursor)
cursor.execute('SELECT b.id, mode, b.name, b.version, b.status, b.updated_at, b.created_at,\n sum(total) total,\n sum(success) success,\n sum(intent_error) intent_error,\n sum(ner_error) ner_error,\n sum(other_error) other_error,\n sum(turns) turns\n FROM bots b, results r\n where b.id = r.bot_id\n and b.id = %s\n group by b.id, mode ', [test_id])
rows = cursor.fetchall()
cursor.close()
return load_reports(test_id, rows) |
def test_clean_split(df_addresses: pd.DataFrame) -> None:
df_clean = clean_address(df_addresses, 'messy_address', split=True)
df_check = df_addresses.copy()
df_check['building'] = [np.nan, np.nan, np.nan, 'Robie House', np.nan, 'Staples Center', np.nan, np.nan, np.nan, np.nan]
df_check['house_number'] = ['123', '1234', np.nan, '789', '1111', '1111', np.nan, np.nan, np.nan, np.nan]
df_check['street_prefix_abbr'] = [np.nan, 'W.', np.nan, 'N.', 'S.', 'S.', np.nan, np.nan, np.nan, np.nan]
df_check['street_name'] = ['Pine', 'Main', np.nan, 'Main', 'Figueroa', 'Figueroa', np.nan, np.nan, np.nan, np.nan]
df_check['street_suffix_abbr'] = ['Ave.', 'Hts.', np.nan, 'St.', 'St.', 'St.', np.nan, np.nan, np.nan, np.nan]
df_check['apartment'] = [np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan]
df_check['city'] = [np.nan, np.nan, np.nan, 'Manhattan', 'Los Angeles', 'Los Angeles', np.nan, np.nan, np.nan, np.nan]
df_check['state_abbr'] = [np.nan, np.nan, np.nan, 'NY', 'CA', np.nan, np.nan, np.nan, np.nan, np.nan]
df_check['zipcode'] = [np.nan, '57033', np.nan, np.nan, '90015', np.nan, np.nan, np.nan, np.nan, np.nan]
assert df_check.equals(df_clean) |
class AlmondDataset(CQA):
base_url = None
def __init__(self, path, *, make_example, **kwargs):
subsample = kwargs.get('subsample')
num_workers = kwargs.get('num_workers', 0)
dir_name = os.path.basename(os.path.dirname(path))
n = 0
with open(path, 'r', encoding='utf-8') as fp:
for line in fp:
n += 1
max_examples = (min(n, subsample) if (subsample is not None) else n)
if (num_workers > 0):
num_processes = min(num_workers, int(mp.cpu_count()))
logger.info(f'Using {num_processes} workers...')
chunk_size = int(math.ceil((max_examples / num_processes)))
num_chunks = int(math.ceil((max_examples / chunk_size)))
(base_path, extension) = path.rsplit('.', 1)
chunk_file_paths = [f'{base_path}_{chunk_id}.tsv' for chunk_id in range(num_chunks)]
chunk_file(path, chunk_file_paths, chunk_size, num_chunks)
num_processes = min(num_processes, num_chunks)
with mp.Pool(processes=num_processes) as pool:
process_args = [{'in_file': chunk_file_paths[i], 'chunk_size': chunk_size, 'dir_name': dir_name, 'example_batch_size': 1, 'make_process_example': make_example, 'kwargs': kwargs} for i in range(num_chunks)]
results = pool.map(create_examples_from_file, process_args)
examples = [item for sublist in results for item in sublist]
for file in chunk_file_paths:
os.remove(file)
else:
process_args = {'in_file': path, 'chunk_size': max_examples, 'dir_name': dir_name, 'example_batch_size': 1, 'make_process_example': make_example, 'kwargs': kwargs}
examples = create_examples_from_file(process_args)
super().__init__(examples, **kwargs)
def return_splits(cls, path, train='train', validation='eval', test='test', **kwargs):
train_data = (None if (train is None) else cls(os.path.join(path, (train + '.tsv')), **kwargs))
validation_data = (None if (validation is None) else cls(os.path.join(path, (validation + '.tsv')), **kwargs))
test_data = (None if (test is None) else cls(os.path.join(path, (test + '.tsv')), **kwargs))
aux_data = None
do_curriculum = kwargs.get('curriculum', False)
if do_curriculum:
kwargs.pop('curriculum')
aux_data = cls(os.path.join(path, ('aux' + '.tsv')), **kwargs)
data_splits = Split(train=(None if (train is None) else train_data), eval=(None if (validation is None) else validation_data), test=(None if (test is None) else test_data), aux=(None if (do_curriculum is False) else aux_data))
all_paths = Split(train=(None if (train is None) else os.path.join(path, (train + '.tsv'))), eval=(None if (validation is None) else os.path.join(path, (validation + '.tsv'))), test=(None if (test is None) else os.path.join(path, (test + '.tsv'))), aux=(None if (do_curriculum is False) else os.path.join(path, ('aux' + '.tsv'))))
return (data_splits, all_paths) |
class TestPower(unittest.TestCase):
def test_objective_function(self):
param = None
obj = objective.Power(objective.Constant(3), 2)
self.assertEqual(obj.calculate_objective_function(param), 9)
obj = objective.Power(objective.Constant(9), 0.5)
self.assertAlmostEqual(obj.calculate_objective_function(param), 3)
obj = objective.Power(objective.Constant(5), (- 1))
self.assertAlmostEqual(obj.calculate_objective_function(param), 0.2)
obj = objective.Power(objective.Constant(5), 0)
self.assertAlmostEqual(obj.calculate_objective_function(param), 1)
def test_gradient(self):
param = DirectParam([5, 7])
obj = objective.Power(objective.Product([objective.ValueSlice(Variable(2), 0), objective.Constant(3)]), 2)
np.testing.assert_allclose(obj.calculate_gradient(param), [90, 0])
param = DirectParam([12, 7])
obj = objective.Power(objective.Product([objective.ValueSlice(Variable(2), 0), objective.Constant(3)]), 0.5)
np.testing.assert_allclose(obj.calculate_gradient(param), [0.25, 0])
param = DirectParam([(1 / 3), 7])
obj = objective.Power(objective.Product([objective.ValueSlice(Variable(2), 0), objective.Constant(3)]), (- 1))
np.testing.assert_allclose(obj.calculate_gradient(param), [(- 3), 0])
param = DirectParam([12, 7])
obj = objective.Power(objective.Product([objective.ValueSlice(Variable(2), 0), objective.Constant(3.0)]), 0)
np.testing.assert_allclose(obj.calculate_gradient(param), [0, 0])
def test_string(self):
obj = objective.Power(objective.ValueSlice(Variable(1), 1), 2)
self.assertEqual(str(obj), 'p[1]**2') |
class BasicTokenizer(object):
def __init__(self, do_lower_case=True):
self.do_lower_case = do_lower_case
def tokenize(self, text):
text = convert_to_unicode(text)
text = self._clean_text(text)
text = self._tokenize_chinese_chars(text)
orig_tokens = whitespace_tokenize(text)
split_tokens = []
for token in orig_tokens:
if self.do_lower_case:
token = token.lower()
token = self._run_strip_accents(token)
split_tokens.extend(self._run_split_on_punc(token))
output_tokens = whitespace_tokenize(' '.join(split_tokens))
return output_tokens
def _run_strip_accents(self, text):
text = unicodedata.normalize('NFD', text)
output = []
for char in text:
cat = unicodedata.category(char)
if (cat == 'Mn'):
continue
output.append(char)
return ''.join(output)
def _run_split_on_punc(self, text):
chars = list(text)
i = 0
start_new_word = True
output = []
while (i < len(chars)):
char = chars[i]
if _is_punctuation(char):
output.append([char])
start_new_word = True
else:
if start_new_word:
output.append([])
start_new_word = False
output[(- 1)].append(char)
i += 1
return [''.join(x) for x in output]
def _tokenize_chinese_chars(self, text):
output = []
for char in text:
cp = ord(char)
if self._is_chinese_char(cp):
output.append(' ')
output.append(char)
output.append(' ')
else:
output.append(char)
return ''.join(output)
def _is_chinese_char(self, cp):
if (((cp >= 19968) and (cp <= 40959)) or ((cp >= 13312) and (cp <= 19903)) or ((cp >= 131072) and (cp <= 173791)) or ((cp >= 173824) and (cp <= 177983)) or ((cp >= 177984) and (cp <= 178207)) or ((cp >= 178208) and (cp <= 183983)) or ((cp >= 63744) and (cp <= 64255)) or ((cp >= 194560) and (cp <= 195103))):
return True
return False
def _clean_text(self, text):
output = []
for char in text:
cp = ord(char)
if ((cp == 0) or (cp == 65533) or _is_control(char)):
continue
if _is_whitespace(char):
output.append(' ')
else:
output.append(char)
return ''.join(output) |
def get_deepdb_size(spn_ensemble):
spn = spn_ensemble.spns[0].mspn
size = 0
nodes = get_nodes_by_type(spn, Product)
for node in nodes:
size += (len(node.children) + len(node.scope))
nodes = get_nodes_by_type(spn, Sum)
for node in nodes:
assert (len(node.children) == len(node.weights) == len(node.cluster_centers))
assert (len(node.cluster_centers[0]) == len(node.scope))
num_child = len(node.children)
num_var = len(node.scope)
size += (((2 * num_child) + num_var) + (num_var * num_child))
nodes = get_nodes_by_type(spn, Categorical)
for node in nodes:
assert (len(node.scope) == 1)
size += (2 + len(node.p))
nodes = get_nodes_by_type(spn, IdentityNumericLeaf)
for node in nodes:
assert (len(node.scope) == 1)
assert ((len(node.unique_vals) + 1) == len(node.prob_sum))
size += ((3 + len(node.unique_vals)) + len(node.prob_sum))
return (((size * 4) / 1024) / 1024) |
def register_Ns3SimpleRefCount__Ns3QueueItem_Ns3Empty_Ns3DefaultDeleter__lt__ns3QueueItem__gt___methods(root_module, cls):
cls.add_constructor([])
cls.add_constructor([param('ns3::SimpleRefCount< ns3::QueueItem, ns3::empty, ns3::DefaultDeleter< ns3::QueueItem > > const &', 'o')])
cls.add_method('Cleanup', 'void', [], is_static=True)
return |
def make_conv(in_channels, out_channels, kernel=3, stride=1, dilation=1, padding=None, groups=1, use_dwconv=False, conv_type='normal', use_bn=False, use_gn=False, use_relu=False, kaiming_init=True, suffix_1x1=False, inplace=True, eps=1e-05, gn_group=32):
_padding = ((((dilation * kernel) - dilation) // 2) if (padding is None) else padding)
if use_dwconv:
assert (in_channels == out_channels)
_groups = out_channels
else:
_groups = groups
if (conv_type == 'normal'):
conv_op = nn.Conv2d
elif (conv_type == 'deform'):
conv_op = ops.DeformConvPack
elif (conv_type == 'deformv2'):
conv_op = ops.ModulatedDeformConvPack
elif (conv_type == 'convws'):
conv_op = ops.Conv2dWS
else:
raise ValueError('{} type conv operation is not supported.'.format(conv))
conv = conv_op(in_channels, out_channels, kernel_size=kernel, stride=stride, padding=_padding, dilation=dilation, groups=_groups, bias=(False if (use_bn or use_gn) else True))
if kaiming_init:
nn.init.kaiming_normal_(conv.weight, mode='fan_out', nonlinearity='relu')
else:
torch.nn.init.normal_(conv.weight, std=0.01)
if (not (use_bn or use_gn)):
nn.init.constant_(conv.bias, 0)
module = [conv]
if use_bn:
module.append(nn.BatchNorm2d(out_channels, eps=eps))
if use_gn:
module.append(nn.GroupNorm(gn_group, out_channels, eps=eps))
if use_relu:
module.append(nn.ReLU(inplace=inplace))
if suffix_1x1:
module.append(nn.Conv2d(out_channels, out_channels, kernel_size=1, stride=1, bias=(False if (use_bn or use_gn) else True)))
if use_bn:
module.append(nn.BatchNorm2d(out_channels, eps=eps))
if use_gn:
module.append(nn.GroupNorm(gn_group, out_channels, eps=eps))
if use_relu:
module.append(nn.ReLU(inplace=inplace))
if (len(module) > 1):
return nn.Sequential(*module)
return conv |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.