code stringlengths 101 5.91M |
|---|
class DotProductAttention(nn.Module):
def __init__(self, dim: int, scale: bool=True) -> None:
super(DotProductAttention, self).__init__()
if scale:
self.sqrt_dim = np.sqrt(dim)
else:
self.sqrt_dim = 1
def forward(self, query: torch.FloatTensor, key: torch.FloatTensor, value: torch.FloatTensor, mask: Optional[torch.FloatTensor]=None) -> Tuple[(torch.FloatTensor, torch.FloatTensor)]:
score = (torch.matmul(query, key.transpose(2, 3)) / self.sqrt_dim)
if (mask is not None):
score.masked_fill_(mask, (- 10000.0))
attn = F.softmax(score, (- 1))
if (len(query.size()) == 3):
context = torch.bmm(attn, value)
else:
context = torch.matmul(attn, value)
return (context, attn) |
def _binary_erosion(input, structure, iterations, mask, output, border_value, origin, invert, brute_force):
try:
iterations = operator.index(iterations)
except TypeError:
raise TypeError('iterations parameter should be an integer')
input = numpy.asarray(input)
if numpy.iscomplexobj(input):
raise TypeError('Complex type not supported')
if (structure is None):
structure = generate_binary_structure(input.ndim, 1)
else:
structure = numpy.asarray(structure, dtype=bool)
if (structure.ndim != input.ndim):
raise RuntimeError('structure and input must have same dimensionality')
if (not structure.flags.contiguous):
structure = structure.copy()
if (numpy.prod(structure.shape, axis=0) < 1):
raise RuntimeError('structure must not be empty')
if (mask is not None):
mask = numpy.asarray(mask)
if (mask.shape != input.shape):
raise RuntimeError('mask and input must have equal sizes')
origin = _ni_support._normalize_sequence(origin, input.ndim)
cit = _center_is_true(structure, origin)
if isinstance(output, numpy.ndarray):
if numpy.iscomplexobj(output):
raise TypeError('Complex output type not supported')
else:
output = bool
output = _ni_support._get_output(output, input)
if (iterations == 1):
_nd_image.binary_erosion(input, structure, mask, output, border_value, origin, invert, cit, 0)
return output
elif (cit and (not brute_force)):
(changed, coordinate_list) = _nd_image.binary_erosion(input, structure, mask, output, border_value, origin, invert, cit, 1)
structure = structure[tuple(([slice(None, None, (- 1))] * structure.ndim))]
for ii in range(len(origin)):
origin[ii] = (- origin[ii])
if (not (structure.shape[ii] & 1)):
origin[ii] -= 1
if (mask is not None):
mask = numpy.asarray(mask, dtype=numpy.int8)
if (not structure.flags.contiguous):
structure = structure.copy()
_nd_image.binary_erosion2(output, structure, mask, (iterations - 1), origin, invert, coordinate_list)
return output
else:
tmp_in = numpy.empty_like(input, dtype=bool)
tmp_out = output
if ((iterations >= 1) and (not (iterations & 1))):
(tmp_in, tmp_out) = (tmp_out, tmp_in)
changed = _nd_image.binary_erosion(input, structure, mask, tmp_out, border_value, origin, invert, cit, 0)
ii = 1
while ((ii < iterations) or ((iterations < 1) and changed)):
(tmp_in, tmp_out) = (tmp_out, tmp_in)
changed = _nd_image.binary_erosion(tmp_in, structure, mask, tmp_out, border_value, origin, invert, cit, 0)
ii += 1
return output |
def get_datapoints_map(influences_collections: List[Dict[(int, float)]]) -> Tuple[(List[int], Dict[(int, int)])]:
possible_datapoints = []
for influences in influences_collections:
possible_datapoints.extend(list(influences.keys()))
possible_datapoints = sorted(set(possible_datapoints))
datapoints_map = dict(((v, k) for (k, v) in enumerate(possible_datapoints)))
return (possible_datapoints, datapoints_map) |
def smallest_positions(word, subword, pos=0) -> (list | bool):
pos -= 1
n = len(word)
res = ([None] * len(subword))
for (i, swi) in enumerate(subword):
for j in range((pos + 1), (n + 1)):
if (j == n):
return False
if (word[j] == swi):
pos = j
break
if (pos != j):
return False
res[i] = pos
return res |
def opt_score(method, data, global_knowledge):
task = 'llm-scorer'
evaluator = LocalLLM(method, task=task)
template = prompt_templates['opt_score']
(scores, meta_data) = ([], [])
for sample in tqdm(data):
target = sample['output']
local_knowledge = sample['knowledge']
prompt = 'Generate a text that is factually correct and consistent with the instruction.\n'
instruction = template.format(instruction=get_instruction(sample), local_knowledge=local_knowledge, global_knowledge=retrieve_global_knowledge(target, global_knowledge))
score = evaluator.score(instruction, target, prompt)
ppl = np.exp(avg_loss)
scores.append((1 / ppl))
meta_data.append(f'PPL:{ppl}')
return (scores, meta_data) |
def test_angular_size():
from skypy.galaxies import morphology
cosmology = FlatLambdaCDM(Om0=1.0, H0=70.0)
scalar_radius = (1.0 * units.kpc)
scalar_redshift = 1.0
angular_size = morphology.angular_size(scalar_radius, scalar_redshift, cosmology)
assert np.isscalar(angular_size.value)
assert angular_size.unit.is_equivalent(units.rad)
radius_without_units = 1.0
with pytest.raises(units.UnitTypeError):
morphology.angular_size(radius_without_units, scalar_redshift, cosmology) |
def from_music21(stream: Stream, resolution: int=DEFAULT_RESOLUTION) -> Union[(Music, List[Music], Track, List[Track])]:
if isinstance(stream, Opus):
return from_music21_opus(stream, resolution)
elif isinstance(stream, Part):
return from_music21_part(stream, resolution)
else:
return from_music21_score(stream, resolution) |
def labelled_pronoun(row):
txt = row.text
tokens = txt.split(' ')
if ('tokens' in row.index):
tokens = row.tokens
if ('a_span' in row.index):
start_a = row.a_span[0]
end_a = row.a_span[1]
start_b = row.b_span[0]
end_b = row.b_span[1]
start_p = row.pronoun_offset_token
end_p = row.pronoun_offset_token
else:
start_a = (len(txt[:row.a_offset].split(' ')) - 1)
end_a = ((start_a + len(row.a.split(' '))) - 1)
start_b = (len(txt[:row.b_offset].split(' ')) - 1)
end_b = ((start_b + len(row.b.split(' '))) - 1)
start_p = (len(txt[:row.pronoun_offset].split(' ')) - 1)
end_p = ((start_p + len(row.pronoun.split(' '))) - 1)
clusters = [[[start_a, end_a]], [[start_b, end_b]]]
if row.a_coref:
clusters[0].append([start_p, end_p])
elif row.b_coref:
clusters[1].append([start_p, end_p])
else:
clusters.append([[start_p, end_p]])
return (tokens, clusters) |
class HybridEmbedding(Layer):
def __init__(self, embed_size, unfixed_embed_size, embed_dim, init='uniform', name='HybridEmbedding'):
super(HybridEmbedding, self).__init__()
self.init = initializations.get(init)
self.unfixed_embed_size = unfixed_embed_size
self.W_unfixed = self.init((embed_size, embed_dim))
self.W_fixed = self.init((embed_size, embed_dim))
self.W_fixed.name = 'HybridEmbedding_fiexed_embed_matrix'
self.params = [self.W_unfixed]
if (name is not None):
self.set_name(name)
def get_output_mask(self, X):
return (T.ones_like(X) * (1 - T.eq(X, 0)))
def __call__(self, X, mask_zero=False):
cond = T.lt(X, self.unfixed_embed_size)
out = T.switch(T.shape_padright(cond), self.W_unfixed[X], self.W_fixed[X])
if mask_zero:
return (out, self.get_output_mask(X))
else:
return out |
def test_fbms(model):
test_set = TestFBMS('../DB/FBMS/TestSet')
test_loader = torch.utils.data.DataLoader(test_set, batch_size=1, num_workers=4)
model.cuda()
ious = []
for vos_data in test_loader:
imgs = vos_data['imgs'].cuda()
flows = vos_data['flows'].cuda()
masks = vos_data['masks']
video_name = vos_data['video_name'][0]
files = vos_data['files']
os.makedirs('outputs/FBMS_test/{}'.format(video_name), exist_ok=True)
vos_out = model(imgs, flows)
iou = 0
count = 0
for i in range(masks.size(1)):
tv.utils.save_image(vos_out['masks'][(0, i)].float().cpu(), 'outputs/FBMS_test/{}/{}'.format(video_name, files[i][0].split('/')[(- 1)]))
if (torch.sum(masks[(0, i)]) == 0):
continue
iou = (iou + (torch.sum((masks[(0, i)] * vos_out['masks'][(0, i)].cpu())) / torch.sum((masks[(0, i)] + vos_out['masks'][(0, i)].cpu()).clamp(0, 1))))
count = (count + 1)
print('{} iou: {:.5f}'.format(video_name, (iou / count)))
ious.append((iou / count))
print("total seqs' iou: {:.5f}\n".format((sum(ious) / len(ious)))) |
class PreTrainingConfig(Config):
def __init__(self, **kwargs):
self.tokenizer: transformers.PreTrainedTokenizer = kwargs.pop('tokenizer')
self.stoi = self.tokenizer.get_vocab()
self.special_ids = list(set(self.tokenizer.all_special_ids))
self.non_special_ids = [idx for idx in self.stoi.values() if (idx not in self.special_ids)]
super().__init__(**kwargs)
def cls_id(self):
return self.tokenizer.cls_token_id
def sep_id(self):
return self.tokenizer.sep_token_id
def unk_id(self):
return self.tokenizer.unk_token_id
def pad_id(self):
return self.tokenizer.pad_token_id
def mask_id(self):
return self.tokenizer.mask_token_id |
class Main():
def regular(self):
for family in LANGS.keys():
print('regular', family)
regular(family)
def hall(self):
for family in LANGS.keys():
print('hall', family)
halluication(family)
def concat(self):
for family in LANGS.keys():
print('concat', family)
concat_langid(family)
def concat_hall(self):
for family in LANGS.keys():
print('concat_hall', family)
concat_halluication(family)
def all(self):
self.regular()
self.hall()
self.concat()
self.concat_hall()
def gen_langs(self):
langs = []
for family in LANGS.keys():
langs.extend(LANGS[family])
print(' '.join(sorted(langs)))
print(len(langs))
def gen_family(self):
family = list(LANGS.keys())
print(' '.join(sorted(family)))
print(len(family)) |
class BaseModel(nn.Module, ABC):
name = 'base'
def __init__(self):
super().__init__()
self.best_state_dict = None
def set_best(self):
self.best_state_dict = copy.deepcopy(self.state_dict())
def recover_best(self):
self.load_state_dict(self.best_state_dict)
def save(self, path):
fname = self.get_name(path)
torch.save({'kwargs': self.get_args(), 'model_state_dict': self.state_dict()}, fname)
def get_args(self):
pass
def load(cls, path):
checkpoints = cls.load_checkpoint(path)
model = cls(**checkpoints['kwargs'])
model.load_state_dict(checkpoints['model_state_dict'])
return model
def load_checkpoint(cls, path):
fname = cls.get_name(path)
return torch.load(fname, map_location=constants.device)
def get_name(cls, path):
return ('%s/model.tch' % path) |
def build_dataloader(dataset, batch_size=1, num_workers=1, training=True, dist=False, persistent_workers=True):
shuffle = training
sampler = (DistributedSampler(dataset, shuffle=shuffle) if dist else None)
if (sampler is not None):
shuffle = False
if training:
return DataLoader(dataset, batch_size=batch_size, num_workers=num_workers, collate_fn=dataset.collate_fn, shuffle=shuffle, sampler=sampler, drop_last=True, pin_memory=True, persistent_workers=persistent_workers)
else:
assert (batch_size == 1)
return DataLoader(dataset, batch_size=batch_size, num_workers=num_workers, collate_fn=dataset.collate_fn, shuffle=False, sampler=sampler, drop_last=False, pin_memory=True, persistent_workers=persistent_workers) |
def update_edge_info(tensor_info):
info = tensor_info.filter(regex='tensor|dtype|shape')
info = info.to_dict('index')
def format_value(meta):
msg = '{0}\n {1} \n {2}'
if (meta['fp32net_shape'] == meta['int8net_shape']):
shape = meta['int8net_shape']
else:
shape = [meta['fp32net_shape'], meta['int8net_shape']]
dtype = (meta['fp32net_dtype'], meta['int8net_dtype'])
return msg.format(meta['tensor'], shape, dtype)
return {k: format_value(v) for (k, v) in info.items()} |
def register_Ns3EpcX2Sap_methods(root_module, cls):
cls.add_constructor([])
cls.add_constructor([param('ns3::EpcX2Sap const &', 'arg0')])
cls.add_static_attribute('m_maxPdcpSn', 'uint16_t const', is_const=True)
return |
_model
def tf_efficientnet_b4(pretrained=False, **kwargs):
kwargs['bn_eps'] = BN_EPS_TF_DEFAULT
kwargs['pad_type'] = 'same'
model = _gen_efficientnet('tf_efficientnet_b4', channel_multiplier=1.4, depth_multiplier=1.8, pretrained=pretrained, **kwargs)
return model |
class HMMSearchJob(ExternalSeppJob):
def __init__(self, **kwargs):
self.job_type = 'hmmsearch'
ExternalSeppJob.__init__(self, self.job_type, **kwargs)
self.hmmmodel = None
self.fragments = None
self.outfile = None
self.elim = None
self.filters = None
if hasattr(sepp.config.options().hmmsearch, 'piped'):
self.pipe = (sepp.config.options().hmmsearch.piped.strip().lower() == 'true')
else:
self.pipe = True
self.results_on_temp = (not self.pipe)
def setup(self, hmmmodel, fragments, output_file, elim=None, filters=True, **kwargs):
self.hmmmodel = hmmmodel
self.fragments = fragments
self.outfile = output_file
self.elim = elim
self.filters = filters
self._kwargs = kwargs
def partial_setup_for_subproblem(self, fragments_file, subproblem, elim=None, filters=True, **kwargs):
assert isinstance(subproblem, sepp.problem.SeppProblem)
self.outfile = sepp.filemgr.tempfile_for_subproblem('hmmsearch.results.', subproblem)
self.fragments = fragments_file
if (not self.fragments):
self.fake_run = True
self.elim = elim
self.filters = filters
self._kwargs = kwargs
def get_invocation(self):
invoc = [self.path, '--noali', '--cpu', '1']
if (not self.pipe):
invoc.extend(['-o', self.outfile])
if (self.elim is not None):
invoc.extend(['-E', str(self.elim)])
if (not self.filters):
invoc.extend(['--max'])
if ('user_options' in self._kwargs):
invoc.extend(self._kwargs['user_options'].split())
invoc.extend([self.hmmmodel, self.fragments])
return invoc
def characterize_input(self):
return ('model:%s, fragments:%s, elim:%s, filter:%s, output:%s' % (self.hmmmodel, self.fragments, self.elim, self.filters, ('Piped' if self.pipe else self.outfile)))
def read_results(self):
if self.results_on_temp:
if self.fake_run:
res = {}
else:
assert os.path.exists(self.outfile)
assert (os.stat(self.outfile)[stat.ST_SIZE] != 0)
with open(self.outfile, 'r') as outfile:
res = self.read_results_from_temp(outfile)
with open(self.outfile, 'w') as target:
target.write(str(res))
return self.outfile
else:
if self.fake_run:
res = {}
elif self.pipe:
outfile = self.stdoutdata.split('\n')
res = self.read_results_from_temp(outfile)
else:
outfile = open(self.outfile, 'r')
res = self.read_results_from_temp(outfile)
outfile.close()
return res
def read_results_from_temp(self, outfile):
results = {}
pattern = re.compile('([^\\s]+)\\s+([^\\s]+)\\s+([^\\s]+)\\s+([^\\s]+)\\s+([^\\s]+)\\s+([^\\s]+)\\s+([^\\s]+)\\s+([^\\s]+)\\s+([^\\s]+)')
start_reading = False
for line in outfile:
line = line.strip()
if ((not start_reading) and (line.startswith('E-value') is True)):
start_reading = True
elif (start_reading and (line == '')):
start_reading = False
break
elif start_reading:
matches = pattern.search(line)
if ((matches is not None) and (matches.group(0).find('--') == (- 1))):
results[matches.group(9).strip()] = (float(matches.group(1).strip()), float(matches.group(2).strip()))
return results |
def init_pretrained_weights(model, model_url):
pretrain_dict = model_zoo.load_url(model_url)
pattern = re.compile('^(.*denselayer\\d+\\.(?:norm|relu|conv))\\.((?:[12])\\.(?:weight|bias|running_mean|running_var))$')
for key in list(pretrain_dict.keys()):
res = pattern.match(key)
if res:
new_key = (res.group(1) + res.group(2))
pretrain_dict[new_key] = pretrain_dict[key]
del pretrain_dict[key]
model_dict = model.state_dict()
pretrain_dict = {k: v for (k, v) in pretrain_dict.items() if ((k in model_dict) and (model_dict[k].size() == v.size()))}
model_dict.update(pretrain_dict)
model.load_state_dict(model_dict) |
def IntegralLatticeDirectSum(Lattices, return_embeddings=False):
for L in Lattices:
if (not isinstance(L, FreeQuadraticModule_integer_symmetric)):
raise ValueError('Lattices must be a list of lattices')
N = len(Lattices)
dims = [L_i.dimension() for L_i in Lattices]
degrees = [L_i.degree() for L_i in Lattices]
degree_tot = sum(degrees)
sum_degree = [sum(degrees[:i]) for i in range((N + 1))]
inner_product_list = [copy(L_i.inner_product_matrix()) for L_i in Lattices]
IM = matrix.block_diagonal(inner_product_list)
ambient = FreeQuadraticModule(ZZ, degree_tot, inner_product_matrix=IM)
basis = [matrix.block(1, 3, [matrix.zero(dims[i], sum_degree[i]), Lattices[i].basis_matrix(), matrix.zero(dims[i], (sum_degree[(- 1)] - sum_degree[(i + 1)]))]) for i in range(N)]
basis_matrix = matrix.block(N, 1, basis)
ipm = ambient.inner_product_matrix()
direct_sum = FreeQuadraticModule_integer_symmetric(ambient=ambient, basis=basis_matrix, inner_product_matrix=ipm, already_echelonized=False)
if (not return_embeddings):
return direct_sum
sum_dims = [sum(dims[:i]) for i in range((N + 1))]
phi = [Lattices[i].hom(direct_sum.basis()[sum_dims[i]:sum_dims[(i + 1)]]) for i in range(N)]
return [direct_sum, phi] |
def plot_log(axs, log, info, xticks=None, yticks=None, xnbins=None, ynbins=None, groups=None, show_legends=True, swap_axes=False):
import matplotlib.pyplot as plt
if (axs is None):
fig = plt.figure()
else:
fig = None
if (groups is None):
n_gr = len(info)
groups = nm.arange(n_gr)
else:
n_gr = len(groups)
n_col = min(5.0, nm.fix(nm.sqrt(n_gr)))
if (int(n_col) == 0):
n_row = 0
else:
n_row = int(nm.ceil((n_gr / n_col)))
n_col = int(n_col)
if (xticks is None):
xticks = ([None] * n_gr)
if (yticks is None):
yticks = ([None] * n_gr)
if (xnbins is None):
xnbins = ([None] * n_gr)
if (ynbins is None):
ynbins = ([None] * n_gr)
isub = offset = 0
for (ig, (xlabel, ylabel, yscale, names, plot_kwargs)) in ordered_iteritems(info):
if (ig not in groups):
offset += len(names)
continue
if (axs is None):
ax = fig.add_subplot(n_row, n_col, (isub + 1))
else:
ax = axs[ig]
if (not swap_axes):
(xnb, ynb) = (xnbins[isub], ynbins[isub])
(xti, yti) = (xticks[isub], yticks[isub])
ax.set_yscale(yscale)
for (ip, name) in enumerate(names):
(xs, ys, vlines) = log[(ip + offset)]
draw_data(ax, xs, ys, name, plot_kwargs[ip])
for x in vlines:
ax.axvline(x, color='k', alpha=0.3)
else:
(xlabel, ylabel) = (ylabel, xlabel)
(xti, yti) = (yticks[isub], xticks[isub])
(xnb, ynb) = (ynbins[isub], xnbins[isub])
ax.set_xscale(yscale)
for (ip, name) in enumerate(names):
(xs, ys, vlines) = log[(ip + offset)]
draw_data(ax, xs, ys, name, plot_kwargs[ip], swap_axes=True)
for x in vlines:
ax.axhline(x, color='k', alpha=0.3)
offset += len(names)
if (xti is not None):
ax.set_xticks(xti)
if (yti is not None):
ax.set_yticks(yti)
if (xnb is not None):
ax.locator_params(tight=True, axis='x', nbins=xnb)
if (ynb is not None):
ax.locator_params(tight=True, axis='y', nbins=ynb)
if xlabel:
ax.set_xlabel(xlabel)
if ylabel:
ax.set_ylabel(ylabel)
if show_legends:
ax.legend(loc='best')
isub += 1
plt.tight_layout(pad=0.5) |
()
('serialization-dir', type=click.Path(exists=True))
('save-dir', type=click.Path())
def convert_allennlp_to_huggingface_model(serialization_dir: str, save_dir: str):
logger.info(f'Loading allennlp data from {serialization_dir}...')
model_weights_path = (Path(serialization_dir) / 'best.th')
config_path = (Path(serialization_dir) / 'config.json')
vocabulary_path = (Path(serialization_dir) / 'vocabulary/labels.txt')
model_weights = torch.load(model_weights_path)
config = json.load(open(config_path))
if (config['dataset_reader']['tokenizer']['type'] != 'pretrained_transformer'):
raise ValueError('Only models that use a HuggingFace tokenizer can be converted.')
huggingface_tokenizer_name = config['dataset_reader']['tokenizer']['model_name']
if (config['model']['type'] != 'span_ner'):
raise ValueError('This script converts the weights of ExhaustiveNERModel (registered as `span_ner`).')
if (config['model']['feature_extractor']['embedder']['type'] != 'transformers-luke'):
raise ValueError('Only models that use TransformersLukeEmbedder (registered as `transformers-luke`) can be converted.')
huggingface_model_name = config['model']['feature_extractor']['embedder']['model_name']
config = AutoConfig.from_pretrained(huggingface_model_name)
tokenizer = AutoTokenizer.from_pretrained(huggingface_tokenizer_name)
setattr(config, 'classifier_bias', ('classifier.bias' in model_weights))
setattr(config, 'num_labels', model_weights['classifier.weight'].size(0))
labels = [label.strip() for label in open(vocabulary_path, 'r')]
config.id2label = {id_: label for (id_, label) in enumerate(labels)}
config.label2id = {label: id_ for (id_, label) in enumerate(labels)}
downstream_luke_model = LukeForEntitySpanClassification(config)
huggingface_model_weights = {}
for (key, w) in model_weights.items():
huggingface_model_weights[key.replace('feature_extractor.embedder.luke_model', 'luke')] = w
downstream_luke_model.load_state_dict(huggingface_model_weights, strict=True)
downstream_luke_model.save_pretrained(save_dir)
tokenizer.save_pretrained(save_dir)
logger.info(f'Saved hugging face model in {save_dir}.') |
def get_bias_parameters(model):
return [param for (name, param) in model.named_parameters() if ('bias' in name)] |
class Recorder():
def __init__(self, scoring_num_list, record_filtered=True, prefix=''):
self.elems = []
self.filtered_elems = []
self.seen_smis = set()
self.record_filtered = record_filtered
if self.record_filtered:
self.rd_filter = RDFilter()
self.scoring_num_list = scoring_num_list
self.prefix = prefix
self.max_size = max(scoring_num_list)
self.t = 0
def __len__(self):
return len(self.elems)
def add_list(self, smis, scores):
new_elems = [RecorderElement(smi=smi, score=score) for (smi, score) in zip(smis, scores)]
new_elems = list(set(new_elems))
new_elems = list(filter((lambda elem: (elem.smi not in self.seen_smis)), new_elems))
self.seen_smis = self.seen_smis.union(smis)
self.elems.extend(new_elems)
self.elems = list(set(self.elems))
if (len(self.elems) > self.max_size):
self.elems = sorted(self.elems, reverse=True)[:self.max_size]
if self.record_filtered:
filtered_new_elems = new_elems
if (len(self.filtered_elems) > 0):
min_filtered_elem_score = min(self.filtered_elems).score
filtered_new_elems = list(filter((lambda elem: (elem.score > min_filtered_elem_score)), filtered_new_elems))
if (len(filtered_new_elems) > self.max_size):
filtered_new_elems = sorted(filtered_new_elems, reverse=True)[:self.max_size]
filtered_new_elems = list(filter((lambda elem: (self.rd_filter(elem.smi) > 0.5)), filtered_new_elems))
self.filtered_elems.extend(filtered_new_elems)
if (len(self.filtered_elems) > self.max_size):
self.filtered_elems = sorted(self.filtered_elems, reverse=True)[:self.max_size]
def full(self):
return ((len(self.elems) == self.max_size) and ((not self.record_filtered) or (self.record_filtered and (len(self.filtered_elems) == self.max_size))))
def evaluate(self, rd_filtered):
if (not rd_filtered):
scores = [elem.score for elem in sorted(self.elems, reverse=True)]
else:
scores = [elem.score for elem in sorted(self.filtered_elems, reverse=True)]
evaluation_elemwise_scores = np.array(scores)
evaluation_score = 0.0
for scoring_num in self.scoring_num_list:
evaluation_score += (evaluation_elemwise_scores[:scoring_num].mean() / len(self.scoring_num_list))
return evaluation_score
def log(self):
score = self.evaluate(rd_filtered=False)
neptune.log_metric(f'{self.prefix}eval_optimized_score', score)
if self.record_filtered:
filtered_score = self.evaluate(rd_filtered=True)
neptune.log_metric(f'{self.prefix}eval_filtered_score', filtered_score)
self.t += 1
def log_final(self):
for elem in self.elems:
neptune.log_text('optimized_smi', elem.smi)
neptune.log_metric('optimized_score', elem.score)
if self.record_filtered:
for elem in self.filtered_elems:
neptune.log_text('filtered_smi', elem.smi)
neptune.log_metric('filtered_score', elem.score)
def get_topk(self, k):
self.elems = sorted(self.elems, reverse=True)[:k]
return ([elem.smi for elem in self.elems], [elem.score for elem in self.elems]) |
def main():
with tf.Session() as sess:
(model_cfg, model_outputs) = posenet.load_model(args.model, sess)
output_stride = model_cfg['output_stride']
num_images = args.num_images
filenames = [f.path for f in os.scandir(args.image_dir) if (f.is_file() and f.path.endswith(('.png', '.jpg')))]
if (len(filenames) > num_images):
filenames = filenames[:num_images]
images = {f: posenet.read_imgfile(f, 1.0, output_stride)[0] for f in filenames}
start = time.time()
for i in range(num_images):
(heatmaps_result, offsets_result, displacement_fwd_result, displacement_bwd_result) = sess.run(model_outputs, feed_dict={'image:0': images[filenames[(i % len(filenames))]]})
output = posenet.decode_multiple_poses(heatmaps_result.squeeze(axis=0), offsets_result.squeeze(axis=0), displacement_fwd_result.squeeze(axis=0), displacement_bwd_result.squeeze(axis=0), output_stride=output_stride, max_pose_detections=10, min_pose_score=0.25)
print('Average FPS:', (num_images / (time.time() - start))) |
def loss_lsgan_gen(dis_out_fake):
gen_loss = (0.5 * ((dis_out_fake - torch.ones_like(dis_out_fake)) ** 2))
return gen_loss.mean() |
class Partition10(nn.Module):
LAYER_SCOPES = ['T5ForConditionalGeneration/T5Stack[encoder]/T5Block[13]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[v]', 'T5ForConditionalGeneration/T5Stack[encoder]/T5Block[13]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Dropout[dropout]', 'T5ForConditionalGeneration/T5Stack[encoder]/T5Block[13]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[o]', 'T5ForConditionalGeneration/T5Stack[encoder]/T5Block[13]/T5LayerSelfAttention[0]/Dropout[dropout]', 'T5ForConditionalGeneration/T5Stack[encoder]/T5Block[13]/T5LayerFF[1]/T5LayerNorm[layer_norm]', 'T5ForConditionalGeneration/T5Stack[encoder]/T5Block[13]/T5LayerFF[1]/T5DenseReluDense[DenseReluDense]/Linear[wi]', 'T5ForConditionalGeneration/T5Stack[encoder]/T5Block[13]/T5LayerFF[1]/T5DenseReluDense[DenseReluDense]/Dropout[dropout]', 'T5ForConditionalGeneration/T5Stack[encoder]/T5Block[13]/T5LayerFF[1]/T5DenseReluDense[DenseReluDense]/Linear[wo]', 'T5ForConditionalGeneration/T5Stack[encoder]/T5Block[13]/T5LayerFF[1]/Dropout[dropout]', 'T5ForConditionalGeneration/T5Stack[encoder]/T5Block[14]/T5LayerSelfAttention[0]/T5LayerNorm[layer_norm]', 'T5ForConditionalGeneration/T5Stack[encoder]/T5Block[14]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[q]', 'T5ForConditionalGeneration/T5Stack[encoder]/T5Block[14]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[k]']
TENSORS = []
def __init__(self, layers, tensors, device='cuda:10'):
super().__init__()
for (idx, layer_scope) in enumerate(self.LAYER_SCOPES):
self.add_module(f'l_{idx}', layers[layer_scope])
b = p = 0
for tensor_scope in self.TENSORS:
tensor = tensors[tensor_scope]
if isinstance(tensor, nn.Parameter):
self.register_parameter(f'p_{p}', tensor)
p += 1
else:
self.register_buffer(f'b_{b}', tensor)
b += 1
self.device = torch.device(device)
self.input_structure = [1, 1, 1, 1, 1, 1]
self.lookup = {'l_0': 'encoder.13.0.SelfAttention.v', 'l_1': 'encoder.13.0.SelfAttention.dropout', 'l_2': 'encoder.13.0.SelfAttention.o', 'l_3': 'encoder.13.0.dropout', 'l_4': 'encoder.13.1.layer_norm', 'l_5': 'encoder.13.1.DenseReluDense.wi', 'l_6': 'encoder.13.1.DenseReluDense.dropout', 'l_7': 'encoder.13.1.DenseReluDense.wo', 'l_8': 'encoder.13.1.dropout', 'l_9': 'encoder.14.0.layer_norm', 'l_10': 'encoder.14.0.SelfAttention.q', 'l_11': 'encoder.14.0.SelfAttention.k'}
self.to(self.device)
def forward(self, *args):
(x0, x1, x2, x3, x4, x5) = unflatten(args, self.input_structure)
t_0 = self.l_0(x2)
t_0 = t_0.view(x3, (- 1), 32, 128)
t_0 = t_0.transpose(1, 2)
t_1 = x5.transpose(3, 2)
t_1 = torch.matmul(x4, t_1)
t_1 += x0
t_2 = t_1.float()
t_2 = torch.nn.functional.softmax(t_2, dim=(- 1), _stacklevel=3, dtype=None)
t_1 = t_2.type_as(t_1)
t_1 = self.l_1(t_1)
t_0 = torch.matmul(t_1, t_0)
t_0 = t_0.transpose(1, 2)
t_0 = t_0.contiguous()
t_0 = t_0.view(x3, (- 1), 4096)
t_0 = self.l_2(t_0)
t_0 = self.l_3(t_0)
t_0 = (x1 + t_0)
t_1 = self.l_4(t_0)
t_1 = self.l_5(t_1)
t_1 = torch.nn.functional.relu(t_1, inplace=False)
t_1 = self.l_6(t_1)
t_1 = self.l_7(t_1)
t_1 = self.l_8(t_1)
t_1 = (t_0 + t_1)
t_0 = self.l_9(t_1)
t_2 = t_0.size()
t_3 = self.l_10(t_0)
t_4 = self.l_11(t_0)
t_2 = t_2[0]
t_3 = t_3.view(t_2, (- 1), 32, 128)
t_3 = t_3.transpose(1, 2)
t_4 = t_4.view(t_2, (- 1), 32, 128)
t_4 = t_4.transpose(1, 2)
t_4 = t_4.transpose(3, 2)
t_4 = torch.matmul(t_3, t_4)
t_4 += x0
return list(flatten((x0, t_1, t_0, t_2, t_4)))
def state_dict(self, *args, **kwargs):
return state_dict(self, *args, **kwargs)
def load_state_dict(self, *args, **kwargs):
return load_state_dict(self, *args, **kwargs)
def named_parameters(self, *args, **kwargs):
return named_parameters(self, *args, **kwargs)
def named_buffers(self, *args, **kwargs):
return named_buffers(self, *args, **kwargs)
def cpu(self):
return cpu(self)
def cuda(self, device=None):
return cuda(self, device=device)
def to(self, *args, **kwargs):
return to(self, *args, **kwargs) |
def read_binary_audio(bin_data, tar_sr=None):
(data, ori_sr) = sf.read(io.BytesIO(bin_data), dtype='float32')
data = data.T
if ((tar_sr is not None) and (ori_sr != tar_sr)):
data = librosa.resample(data, ori_sr, tar_sr)
else:
tar_sr = ori_sr
data = np.clip(data, (- 1), 1)
data = (data * 32768.0)
return (torch.FloatTensor(data.astype(np.float32)), tar_sr) |
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None, num_group=32):
super(BasicBlock, self).__init__()
self.conv1 = conv3x3(inplanes, (planes * 2), stride)
self.bn1 = nn.BatchNorm2d((planes * 2))
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3((planes * 2), (planes * 2), groups=num_group)
self.bn2 = nn.BatchNorm2d((planes * 2))
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if (self.downsample is not None):
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out |
def test_serialization(explainer_type, model, masker, data, rtol=1e-05, atol=1e-08, **kwargs):
explainer_kwargs = {k: v for (k, v) in kwargs.items() if (k in ['algorithm'])}
explainer_original = explainer_type(model, masker, **explainer_kwargs)
shap_values_original = explainer_original(data[:1])
with tempfile.TemporaryFile() as temp_serialization_file:
save_kwargs = {k: v for (k, v) in kwargs.items() if (k in ['model_saver', 'masker_saver'])}
explainer_original.save(temp_serialization_file, **save_kwargs)
temp_serialization_file.seek(0)
load_kwargs = {k: v for (k, v) in kwargs.items() if (k in ['model_loader', 'masker_loader'])}
explainer_new = explainer_type.load(temp_serialization_file, **load_kwargs)
call_kwargs = {k: v for (k, v) in kwargs.items() if (k in ['max_evals'])}
shap_values_new = explainer_new(data[:1], **call_kwargs)
assert np.allclose(shap_values_original.base_values, shap_values_new.base_values, rtol=rtol, atol=atol)
assert np.allclose(shap_values_original[0].values, shap_values_new[0].values, rtol=rtol, atol=atol)
assert isinstance(explainer_original, type(explainer_new))
assert isinstance(explainer_original.masker, type(explainer_new.masker)) |
class BNForwardFoldingNetTest(BasePytorchTest):
def __init__(self, unit_test, test_layer, fold_applied=True, add_bn=False, is_dw=False):
super().__init__(unit_test, float_reconstruction_error=1e-06, val_batch_size=2)
self.test_layer = test_layer
self.bn_layer = nn.BatchNorm2d
self.fold_applied = fold_applied
self.add_bn = add_bn
self.is_dw = is_dw
def create_feature_network(self, input_shape):
return BNForwardFoldingNet(self.test_layer, self.add_bn, self.is_dw)
def get_tpc(self):
return {'no_quantization': super().get_tpc()['no_quantization']}
def get_core_configs(self):
return {'no_quantization': super().get_core_configs()['no_quantization']}
def compare(self, quantized_models, float_model, input_x=None, quantization_info=None):
set_model(float_model)
quant_model = quantized_models['no_quantization']
set_model(quant_model)
if self.is_dw:
is_bn_in_model = (sum([(type(module) is nn.Conv2d) for (name, module) in float_model.named_modules()]) == sum([(type(module) is nn.Conv2d) for (name, module) in quant_model.named_modules()]))
else:
is_bn_in_model = (nn.BatchNorm2d in [type(module) for (name, module) in quant_model.named_modules()])
self.unit_test.assertTrue((self.fold_applied is not is_bn_in_model))
self.unit_test.assertEqual(input_x[0].shape[0], 2, 'Expecting batch of size 2 for BN folding test.')
out_float = torch_tensor_to_numpy(float_model(*input_x))
out_quant = torch_tensor_to_numpy(quant_model(*input_x))
(norm_mse, _, max_error, _) = normalized_mse(out_float, out_quant)
self.unit_test.assertTrue((np.isclose(norm_mse[0], 0, atol=1e-05) or np.isclose(norm_mse[1], 0, atol=1e-05)))
self.unit_test.assertTrue((np.isclose(max_error[0], 0, atol=0.0001) or np.isclose(max_error[1], 0, atol=0.0001))) |
def compute_error(model_file, data_loader, cuda_on=False, name=''):
model = torch.load(model_file)
if cuda_on:
model.cuda()
model.eval()
test_loss = 0
correct = 0
for (data, target) in data_loader:
if cuda_on:
(data, target) = (data.cuda(), target.cuda())
(data, target) = (Variable(data, volatile=True), Variable(target))
output = model(data)
test_loss += F.nll_loss(output, target, size_average=False).data[0]
pred = output.data.max(1, keepdim=True)[1]
correct += pred.eq(target.data.view_as(pred)).cpu().sum()
test_loss /= len(data_loader.dataset)
print((name + 'Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)'.format(test_loss, correct, len(data_loader.dataset), ((100.0 * correct) / len(data_loader.dataset))))) |
class NeumannDirichlet(CompositeBase):
def __init__(self, N, quad='GC', bc=(0, 0), domain=((- 1), 1), dtype=float, padding_factor=1, dealias_direct=False, coordinates=None, **kw):
if isinstance(bc, (tuple, list)):
bc = BoundaryConditions({'left': {'N': bc[0]}, 'right': {'D': bc[1]}}, domain=domain)
CompositeBase.__init__(self, N, quad=quad, domain=domain, dtype=dtype, bc=bc, padding_factor=padding_factor, dealias_direct=dealias_direct, coordinates=coordinates)
self._stencil = {0: 1, 1: ((- ((4 * n) + 4)) / (((2 * (n ** 2)) + (6 * n)) + 5)), 2: ((- (((2 * (n ** 2)) + (2 * n)) + 1)) / (((2 * (n ** 2)) + (6 * n)) + 5))}
def boundary_condition():
return 'NeumannDirichlet'
def short_name():
return 'ND' |
def prepare_test(recipe_folder='tests/recipes', script_field='Script_file', hparam_field='Hparam_file', test_field='test_debug_flags', check_field='test_debug_checks', download_field='test_download', message_field='test_message', filters_fields=[], filters=[]):
test_script = {}
test_hparam = {}
test_flag = {}
test_check = {}
test_download = {}
test_message = {}
print(f' filters_fields={filters_fields} => filters={filters}')
for recipe_csvfile in os.listdir(recipe_folder):
if (recipe_csvfile in __skip_list):
continue
print(f'Loading recipes from: {recipe_csvfile}')
with open(os.path.join(recipe_folder, recipe_csvfile), newline='') as csvf:
reader = csv.DictReader(csvf, delimiter=',', skipinitialspace=True)
for (row_id, row) in enumerate(reader):
recipe_id = f'{recipe_csvfile[:(- 4)]}_row_{(row_id + 2):02d}'
if (not check_row_for_test(row, filters_fields, filters, test_field)):
print(f' Skipped {recipe_id}')
continue
test_script[recipe_id] = row[script_field].strip()
test_hparam[recipe_id] = row[hparam_field].strip()
test_flag[recipe_id] = row[test_field].strip()
test_check[recipe_id] = row[check_field].strip()
if (download_field in row):
test_download[recipe_id] = row[download_field].strip()
if (message_field in row):
test_message[recipe_id] = row[message_field].strip()
return (test_script, test_hparam, test_flag, test_check, test_download, test_message) |
def DFG_c(root_node, index_to_code, states):
assignment = ['assignment_expression']
def_statement = ['init_declatator', 'pointer_declarator', 'array_declarator']
increment_statement = ['update_expression']
if_statement = ['if_statement', 'else']
for_statement = ['for_statement']
while_statement = ['while_statement']
parameter_statement = ['parameter_declaration']
do_first_statement = []
states = states.copy()
if (((len(root_node.children) == 0) or (root_node.type == 'string')) and (root_node.type != 'comment')):
(idx, code) = index_to_code[(root_node.start_point, root_node.end_point)]
if ((root_node.type == code) or ((root_node.parent.type == 'function_declarator') and root_node)):
return ([], states)
elif (code in states):
return ([(code, idx, 'comesFrom', [code], states[code].copy())], states)
elif (root_node.type == 'identifier'):
if (root_node.parent.type == 'declaration'):
states[code] = [idx]
return ([(code, idx, 'comesFrom', [], [])], states)
return ([], states)
else:
return ([], states)
elif (root_node.type in def_statement):
if (root_node.parent.type == 'function_definition'):
while ((root_node.type == 'pointer_declarator') and (root_node.child_by_field_name('declarator').type == 'pointer_declarator')):
root_node = root_node.child_by_field_name('declarator')
DFG = []
for child in root_node.children:
if (child.type not in do_first_statement):
(temp, states) = DFG_c(child, index_to_code, states)
DFG += temp
return (sorted(DFG, key=(lambda x: x[1])), states)
name = root_node.child_by_field_name('declarator')
value = root_node.child_by_field_name('value')
DFG = []
if (value is None):
indexs = tree_to_variable_index(name, index_to_code)
for index in indexs:
(idx, code) = index_to_code[index]
DFG.append((code, idx, 'comesFrom', [], []))
states[code] = [idx]
return (sorted(DFG, key=(lambda x: x[1])), states)
else:
name_indexs = tree_to_variable_index(name, index_to_code)
value_indexs = tree_to_variable_index(value, index_to_code)
(temp, states) = DFG_c(value, index_to_code, states)
DFG += temp
for index1 in name_indexs:
(idx1, code1) = index_to_code[index1]
for index2 in value_indexs:
(idx2, code2) = index_to_code[index2]
DFG.append((code1, idx1, 'comesFrom', [code2], [idx2]))
states[code1] = [idx1]
return (sorted(DFG, key=(lambda x: x[1])), states)
elif (root_node.type in assignment):
return ([], states)
elif (root_node.type in increment_statement):
DFG = []
indexs = tree_to_variable_index(root_node, index_to_code)
for index1 in indexs:
(idx1, code1) = index_to_code[index1]
for index2 in indexs:
(idx2, code2) = index_to_code[index2]
DFG.append((code1, idx1, 'computedFrom', [code2], [idx2]))
states[code1] = [idx1]
return (sorted(DFG, key=(lambda x: x[1])), states)
elif (root_node.type in if_statement):
DFG = []
current_states = states.copy()
others_states = []
flag = False
tag = False
if ('else' in root_node.type):
tag = True
for child in root_node.children:
if ('else' in child.type):
tag = True
if ((child.type not in if_statement) and (flag is False)):
(temp, current_states) = DFG_c(child, index_to_code, current_states)
DFG += temp
else:
flag = True
(temp, new_states) = DFG_c(child, index_to_code, states)
DFG += temp
others_states.append(new_states)
others_states.append(current_states)
if (tag is False):
others_states.append(states)
new_states = {}
for dic in others_states:
for key in dic:
if (key not in new_states):
new_states[key] = dic[key].copy()
else:
new_states[key] += dic[key]
for key in states:
if (key not in new_states):
new_states[key] = states[key]
else:
new_states[key] += states[key]
for key in new_states:
new_states[key] = sorted(list(set(new_states[key])))
return (sorted(DFG, key=(lambda x: x[1])), new_states)
elif (root_node.type in for_statement):
DFG = []
for child in root_node.children:
(temp, states) = DFG_c(child, index_to_code, states)
DFG += temp
flag = False
for child in root_node.children:
if flag:
(temp, states) = DFG_c(child, index_to_code, states)
DFG += temp
elif (child.type == 'variable_declaration'):
flag = True
dic = {}
for x in DFG:
if ((x[0], x[1], x[2]) not in dic):
dic[(x[0], x[1], x[2])] = [x[3], x[4]]
else:
dic[(x[0], x[1], x[2])][0] = list(set((dic[(x[0], x[1], x[2])][0] + x[3])))
dic[(x[0], x[1], x[2])][1] = sorted(list(set((dic[(x[0], x[1], x[2])][1] + x[4]))))
DFG = [(x[0], x[1], x[2], y[0], y[1]) for (x, y) in sorted(dic.items(), key=(lambda t: t[0][1]))]
return (sorted(DFG, key=(lambda x: x[1])), states)
elif (root_node.type in while_statement):
DFG = []
for i in range(2):
for child in root_node.children:
(temp, states) = DFG_c(child, index_to_code, states)
DFG += temp
dic = {}
for x in DFG:
if ((x[0], x[1], x[2]) not in dic):
dic[(x[0], x[1], x[2])] = [x[3], x[4]]
else:
dic[(x[0], x[1], x[2])][0] = list(set((dic[(x[0], x[1], x[2])][0] + x[3])))
dic[(x[0], x[1], x[2])][1] = sorted(list(set((dic[(x[0], x[1], x[2])][1] + x[4]))))
DFG = [(x[0], x[1], x[2], y[0], y[1]) for (x, y) in sorted(dic.items(), key=(lambda t: t[0][1]))]
return (sorted(DFG, key=(lambda x: x[1])), states)
elif (root_node.type in parameter_statement):
child = root_node.child_by_field_name('declarator')
if (not child):
return ([], states)
while (child.type != 'identifier'):
if (child.type == 'parenthesized_declarator'):
child = child.children[1]
else:
child = child.child_by_field_name('declarator')
if (not child):
return ([], states)
(idx, code) = index_to_code[(child.start_point, child.end_point)]
states[code] = [idx]
return ([(code, idx, 'comesFrom', [], [])], states)
else:
DFG = []
for child in root_node.children:
if (child.type not in do_first_statement):
(temp, states) = DFG_c(child, index_to_code, states)
DFG += temp
return (sorted(DFG, key=(lambda x: x[1])), states) |
def system_worker(system_id, story_generator_class, request_queue, result_queue):
story_generator = story_generator_class(system_id)
while True:
worker_request = request_queue.get()
action = worker_request['action']
if (action == 'generate'):
result = story_generator.generate_response(worker_request['topic'], kw_temp=worker_request['kw_temp'], story_temp=worker_request['story_temp'], dedup=worker_request.get('dedup', None), max_len=worker_request.get('max_len', None), use_gold_titles=worker_request.get('use_gold_titles', None))
elif (action == 'generate_storyline'):
result = story_generator.generate_storyline(worker_request['topic'], kw_temp=worker_request['kw_temp'], dedup=worker_request.get('dedup', None), max_len=worker_request.get('max_len', None), use_gold_titles=worker_request.get('use_gold_titles', None))
elif (action == 'collab_storyline'):
result = story_generator.collab_storyline(worker_request['topic'], worker_request['storyline'], kw_temp=worker_request.get('kw_temp', None), dedup=worker_request.get('dedup', None), max_len=worker_request.get('max_len', None))
elif (action == 'generate_interactive_story'):
result = story_generator.generate_interactive_story(worker_request['topic'], worker_request['storyline'], worker_request['story_sentences'], story_temp=worker_request.get('story_temp', None), only_one=worker_request['only_one'])
elif (action == 'generate_story'):
result = story_generator.generate_story(worker_request['topic'], worker_request['storyline'], story_temp=worker_request.get('story_temp', None))
else:
result = {system_id: 'internal error'}
result_queue.put(result) |
def train(epoch):
global lr, train_acc
model.train()
batch_idx = 1
total_loss = 0
correct = 0
pred = np.array([])
X_train = audio_features[(train_dep_idxs + train_non_idxs)]
Y_train = audio_targets[(train_dep_idxs + train_non_idxs)]
for i in range(0, X_train.shape[0], config['batch_size']):
if ((i + config['batch_size']) > X_train.shape[0]):
(x, y) = (X_train[i:], Y_train[i:])
else:
(x, y) = (X_train[i:(i + config['batch_size'])], Y_train[i:(i + config['batch_size'])])
if config['cuda']:
(x, y) = (Variable(torch.from_numpy(x).type(torch.FloatTensor), requires_grad=True).cuda(), Variable(torch.from_numpy(y)).cuda())
else:
(x, y) = (Variable(torch.from_numpy(x).type(torch.FloatTensor), requires_grad=True), Variable(torch.from_numpy(y)).type(torch.FloatTensor))
optimizer.zero_grad()
output = model(x)
loss = criterion(output, y.view_as(output))
loss.backward()
optimizer.step()
batch_idx += 1
pred = np.hstack((pred, output.flatten().detach().numpy()))
total_loss += loss.item()
train_mae = mean_absolute_error(Y_train, pred)
print('Train Epoch: {:2d}\t Learning rate: {:.4f}\t Loss: {:.4f}\t MAE: {:.4f}\t RMSE: {:.4f}\n '.format((epoch + 1), config['learning_rate'], total_loss, train_mae, np.sqrt(mean_squared_error(Y_train, pred))))
return train_mae |
def miniBatchStdDev(x, subGroupSize=4):
size = x.size()
subGroupSize = min(size[0], subGroupSize)
if ((size[0] % subGroupSize) != 0):
subGroupSize = size[0]
G = int((size[0] / subGroupSize))
if (subGroupSize > 1):
y = x.view((- 1), subGroupSize, size[1], size[2], size[3])
y = torch.var(y, 1)
y = torch.sqrt((y + 1e-08))
y = y.view(G, (- 1))
y = torch.mean(y, 1).view(G, 1)
y = y.expand(G, (size[2] * size[3])).view((G, 1, 1, size[2], size[3]))
y = y.expand(G, subGroupSize, (- 1), (- 1), (- 1))
y = y.contiguous().view(((- 1), 1, size[2], size[3]))
else:
y = torch.zeros(x.size(0), 1, x.size(2), x.size(3), device=x.device)
return torch.cat([x, y], dim=1) |
def get_loaders_and_models(args, device):
return get_facetranslation_latent_conv_perceptual(args, device) |
_LAYERS.register_module()
class DropPath(nn.Module):
def __init__(self, drop_prob=0.1):
super(DropPath, self).__init__()
self.drop_prob = drop_prob
def forward(self, x):
return drop_path(x, self.drop_prob, self.training) |
def update_lr(optimizer, n_vals_without_improvement):
global ndecs
if ((ndecs == 0) and (n_vals_without_improvement > (args.early_stopping // 3))):
for param_group in optimizer.param_groups:
param_group['lr'] = (args.lr / 10)
ndecs = 1
elif ((ndecs == 1) and (n_vals_without_improvement > ((args.early_stopping // 3) * 2))):
for param_group in optimizer.param_groups:
param_group['lr'] = (args.lr / 100)
ndecs = 2
else:
for param_group in optimizer.param_groups:
param_group['lr'] = (args.lr / (10 ** ndecs)) |
class dotdict(dict):
__getattr__ = dict.get
__setattr__ = dict.__setitem__
__delattr__ = dict.__delitem__ |
class HfDeepSpeedConfig():
def __init__(self, config_file_or_dict):
set_hf_deepspeed_config(self)
dep_version_check('deepspeed')
if isinstance(config_file_or_dict, dict):
config = deepcopy(config_file_or_dict)
elif isinstance(config_file_or_dict, str):
with io.open(config_file_or_dict, 'r', encoding='utf-8') as f:
config = json.load(f)
else:
raise ValueError('expecting either a path to a DeepSpeed config file or a pre-populated dict')
self.config = config
self._stage = self.get_value('zero_optimization.stage', (- 1))
self._offload = False
if (self.is_zero2() or self.is_zero3()):
offload_devices_valid = set(['cpu', 'nvme'])
offload_devices = set([self.get_value('zero_optimization.offload_optimizer.device'), self.get_value('zero_optimization.offload_param.device')])
if (len((offload_devices & offload_devices_valid)) > 0):
self._offload = True
def find_config_node(self, ds_key_long):
config = self.config
nodes = ds_key_long.split('.')
ds_key = nodes.pop()
for node in nodes:
config = config.get(node)
if (config is None):
return (None, ds_key)
return (config, ds_key)
def get_value(self, ds_key_long, default=None):
(config, ds_key) = self.find_config_node(ds_key_long)
if (config is None):
return default
return config.get(ds_key, default)
def del_config_sub_tree(self, ds_key_long, must_exist=False):
config = self.config
nodes = ds_key_long.split('.')
for node in nodes:
parent_config = config
config = config.get(node)
if (config is None):
if must_exist:
raise ValueError(f"Can't find {ds_key_long} entry in the config: {self.config}")
else:
return
if (parent_config is not None):
parent_config.pop(node)
def is_true(self, ds_key_long):
value = self.get_value(ds_key_long)
return (False if (value is None) else bool(value))
def is_false(self, ds_key_long):
value = self.get_value(ds_key_long)
return (False if (value is None) else (not bool(value)))
def is_zero2(self):
return (self._stage == 2)
def is_zero3(self):
return (self._stage == 3)
def is_offload(self):
return self._offload |
def get_unique_graph(tops, check_types=None, none_if_empty=False):
if isinstance(tops, tf_ops.Graph):
return tops
if (not is_iterable(tops)):
raise TypeError('{} is not iterable'.format(type(tops)))
if (check_types is None):
check_types = (tf_ops.Operation, tf_ops.Tensor)
elif (not is_iterable(check_types)):
check_types = (check_types,)
g = None
for op in tops:
if (not isinstance(op, check_types)):
raise TypeError('Expected a type in ({}), got: {}'.format(', '.join([str(t) for t in check_types]), type(op)))
if (g is None):
g = op.graph
elif (g is not op.graph):
raise ValueError('Operation {} does not belong to given graph'.format(op))
if ((g is None) and (not none_if_empty)):
raise ValueError("Can't find the unique graph of an empty list")
return g |
def register_Ns3CallbackImplBase_methods(root_module, cls):
cls.add_constructor([])
cls.add_constructor([param('ns3::CallbackImplBase const &', 'arg0')])
cls.add_method('GetTypeid', 'std::string', [], is_pure_virtual=True, is_const=True, is_virtual=True)
cls.add_method('IsEqual', 'bool', [param('ns3::Ptr< ns3::CallbackImplBase const >', 'other')], is_pure_virtual=True, is_const=True, is_virtual=True)
cls.add_method('Demangle', 'std::string', [param('std::string const &', 'mangled')], is_static=True, visibility='protected')
cls.add_method('GetCppTypeid', 'std::string', [], is_static=True, visibility='protected', template_parameters=[u'ns3::ObjectBase*'])
cls.add_method('GetCppTypeid', 'std::string', [], is_static=True, visibility='protected', template_parameters=[u'void'])
cls.add_method('GetCppTypeid', 'std::string', [], is_static=True, visibility='protected', template_parameters=[u'ns3::WifiMacHeader const&'])
cls.add_method('GetCppTypeid', 'std::string', [], is_static=True, visibility='protected', template_parameters=[u'ns3::Ipv4Address'])
cls.add_method('GetCppTypeid', 'std::string', [], is_static=True, visibility='protected', template_parameters=[u'unsigned char'])
cls.add_method('GetCppTypeid', 'std::string', [], is_static=True, visibility='protected', template_parameters=[u'ns3::Ptr<ns3::Socket> '])
cls.add_method('GetCppTypeid', 'std::string', [], is_static=True, visibility='protected', template_parameters=[u'bool'])
cls.add_method('GetCppTypeid', 'std::string', [], is_static=True, visibility='protected', template_parameters=[u'ns3::Address const&'])
cls.add_method('GetCppTypeid', 'std::string', [], is_static=True, visibility='protected', template_parameters=[u'unsigned int'])
cls.add_method('GetCppTypeid', 'std::string', [], is_static=True, visibility='protected', template_parameters=[u'ns3::Ptr<ns3::NetDevice> '])
cls.add_method('GetCppTypeid', 'std::string', [], is_static=True, visibility='protected', template_parameters=[u'ns3::Ptr<ns3::Packet const> '])
cls.add_method('GetCppTypeid', 'std::string', [], is_static=True, visibility='protected', template_parameters=[u'unsigned short'])
cls.add_method('GetCppTypeid', 'std::string', [], is_static=True, visibility='protected', template_parameters=[u'ns3::NetDevice::PacketType'])
cls.add_method('GetCppTypeid', 'std::string', [], is_static=True, visibility='protected', template_parameters=[u'ns3::Ipv4Header const&'])
cls.add_method('GetCppTypeid', 'std::string', [], is_static=True, visibility='protected', template_parameters=[u'ns3::Ptr<ns3::Ipv4> '])
cls.add_method('GetCppTypeid', 'std::string', [], is_static=True, visibility='protected', template_parameters=[u'ns3::Ipv4L3Protocol::DropReason'])
cls.add_method('GetCppTypeid', 'std::string', [], is_static=True, visibility='protected', template_parameters=[u'ns3::Ptr<ns3::Packet> '])
cls.add_method('GetCppTypeid', 'std::string', [], is_static=True, visibility='protected', template_parameters=[u'ns3::Ptr<ns3::Ipv4Route> '])
cls.add_method('GetCppTypeid', 'std::string', [], is_static=True, visibility='protected', template_parameters=[u'ns3::Mac48Address'])
cls.add_method('GetCppTypeid', 'std::string', [], is_static=True, visibility='protected', template_parameters=[u'ns3::Ptr<ns3::MobilityModel const> '])
cls.add_method('GetCppTypeid', 'std::string', [], is_static=True, visibility='protected', template_parameters=[u'ns3::WifiTxVector'])
cls.add_method('GetCppTypeid', 'std::string', [], is_static=True, visibility='protected', template_parameters=[u'ns3::MpduInfo'])
cls.add_method('GetCppTypeid', 'std::string', [], is_static=True, visibility='protected', template_parameters=[u'ns3::SignalNoiseDbm'])
cls.add_method('GetCppTypeid', 'std::string', [], is_static=True, visibility='protected', template_parameters=[u'ns3::WifiMacHeader const*'])
cls.add_method('GetCppTypeid', 'std::string', [], is_static=True, visibility='protected', template_parameters=[u'ns3::dsr::DsrOptionSRHeader const&'])
return |
def istft_backward(grad_inputs, inputs, input_shapes, outputs, output_shapes, window_size, stride, fft_size, window_type='hanning', center=True, pad_mode='reflect', as_stft_backward=False):
dy = grad_inputs[0]
(dx_r, dx_i) = F.stft(dy, window_size, stride, fft_size, window_type, center, pad_mode, as_istft_backward=(not as_stft_backward))
return (dx_r, dx_i) |
def main():
parser = argparse.ArgumentParser(description='PyTorch MNIST Conlinual leanring Example')
parser.add_argument('--model', type=str, default='MLP_CL', help='Model name: MLP_CL')
parser.add_argument('--bnmomentum', type=float, default=0.15, help='BN layer momentum value')
parser.add_argument('--optim', type=str, default='BayesBiNN', help='Optimizer: BayesBiNN')
parser.add_argument('--val-split', type=float, default=0, help='Random validation set ratio')
parser.add_argument('--criterion', type=str, default='cross-entropy', help='loss funcion: square-hinge or cross-entropy')
parser.add_argument('--batch-size', type=int, default=100, metavar='N', help='input batch size for training (default: 64)')
parser.add_argument('--test-batch-size', type=int, default=1000, metavar='N', help='input batch size for testing (default: 1000)')
parser.add_argument('--train-samples', type=int, default=1, metavar='N', help='number of Monte Carlo samples used in BayesBiNN (default: 1)')
parser.add_argument('--test-samples', type=int, default=100, metavar='N', help='number of Monte Carlo samples used in evaluation for BayesBiNN (default: 1), if 0, point estimate using meanis applied, which is similar to the Bop optimizer')
parser.add_argument('--epochs', type=int, default=100, metavar='N', help='number of epochs to train (default: 10)')
parser.add_argument('--lr', type=float, default=0.001, metavar='LR', help='learning rate (default: 0.001)')
parser.add_argument('--lr-end', type=float, default=1e-16, metavar='LR-end', help='learning rate (default: 0.01)')
parser.add_argument('--lr-decay', type=float, default=0.9, metavar='LR-decay', help='learning rated decay factor for each epoch (default: 0.9)')
parser.add_argument('--decay-steps', type=int, default=1, metavar='N', help='LR rate decay steps (default: 1)')
parser.add_argument('--momentum', type=float, default=0.0, metavar='M', help='BayesBiNN momentum (default: 0.0)')
parser.add_argument('--data-augmentation', action='store_true', default=False, help='Enable data augmentation')
parser.add_argument('--log-interval', type=int, default=5000, metavar='N', help='how many batches to wait before logging training status')
parser.add_argument('--save-model', action='store_true', default=False, help='For Saving the current Model')
parser.add_argument('--experiment-id', type=int, default=0, help='Experiment ID for log files (int)')
parser.add_argument('--no-cuda', action='store_true', default=False, help='disables CUDA training')
parser.add_argument('--seed', type=int, default=1, metavar='S', help='random seed (default: 1)')
parser.add_argument('--lrschedular', type=str, default='Cosine', help='Mstep,Expo,Cosine')
parser.add_argument('--drop-prob', type=float, default=0.0, help='dropout rate')
parser.add_argument('--trainset_scale', type=int, default=1, help='scale of training set')
parser.add_argument('--lamda', type=float, default=10, metavar='lamda-init', help='initial mean value of the natural parameter lamda(default: 10)')
parser.add_argument('--lamda-std', type=float, default=0, metavar='lamda-init', help='linitial std value of the natural parameter lamda(default: 0)')
parser.add_argument('--temperature', type=float, default=0.01, metavar='temperature', help='initial temperature for BayesBiNN (default: 1)')
parser.add_argument('--kl-reweight', type=float, default=1.0, metavar='min temperature', help='initial temperature for BayesBiNN (default: 1)')
parser.add_argument('--bn-affine', type=float, default=0, metavar='bn-affine', help='whether there is bn learnable parameters, 1: learnable, 0: no (default: 1)')
parser.add_argument('--num-tasks', type=int, default=5, metavar='num-tasks', help='number of tasks for continual learning')
parser.add_argument('--scenario', type=str, default='class', choices=['task', 'domain', 'class'])
parser.add_argument('--experiment', type=str, default='permMNIST', choices=['permMNIST', 'splitMNIST'])
args = parser.parse_args()
args.use_cuda = ((not args.no_cuda) and torch.cuda.is_available())
torch.manual_seed((args.seed + args.experiment_id))
np.random.seed((args.seed + args.experiment_id))
now = time.strftime('%Y_%m_%d_%H_%M_%S', time.localtime(time.time()))
args.out_dir = os.path.join('./outputs', 'mnist_CL_{}_{}_lr{}_{}'.format(args.model, args.optim, args.lr, now))
os.makedirs(args.out_dir, exist_ok=True)
config_save_path = os.path.join(args.out_dir, 'configs', 'config_{}.json'.format(args.experiment_id))
os.makedirs(os.path.dirname(config_save_path), exist_ok=True)
with open(config_save_path, 'w') as f:
json.dump(args.__dict__, f, indent=2)
args.device = torch.device(('cuda' if args.use_cuda else 'cpu'))
print('Running on', args.device)
print('')
for (key, val) in vars(args).items():
print('{}: {}'.format(key, val))
print('\n')
if args.data_augmentation:
transform_train = transforms.Compose([transforms.RandomCrop(32, padding=4), transforms.RandomHorizontalFlip(), transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,))])
else:
transform_train = transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,))])
transform_test = transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,))])
kwargs = ({'num_workers': 2, 'pin_memory': True, 'drop_last': True} if args.use_cuda else {})
train_dataset = datasets.MNIST('./data', train=True, download=True, transform=transform_train)
if ((args.val_split > 0) and (args.val_split < 1)):
val_dataset = datasets.MNIST('./data', train=True, download=True, transform=transform_train)
num_train = len(train_dataset)
indices = list(range(num_train))
split = int(np.floor((args.val_split * num_train)))
np.random.shuffle(indices)
(train_idx, val_idx) = (indices[split:], indices[:split])
train_sampler = SubsetRandomSampler(train_idx)
val_sampler = SubsetRandomSampler(val_idx)
train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=args.batch_size, sampler=train_sampler, **kwargs)
val_loader = torch.utils.data.DataLoader(val_dataset, batch_size=args.batch_size, sampler=val_sampler, **kwargs)
print('{} train and {} validation datapoints.'.format(len(train_loader.sampler), len(val_loader.sampler)))
else:
train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=args.batch_size, shuffle=True, **kwargs)
val_loader = None
print('{} train and {} validation datapoints.'.format(len(train_loader.sampler), 0))
test_dataset = datasets.MNIST('./data', train=False, transform=transform_test)
test_loader = torch.utils.data.DataLoader(test_dataset, batch_size=args.test_batch_size, shuffle=True, **kwargs)
print('{} test datapoints.\n'.format(len(test_loader.sampler)))
(in_features, out_features) = ((28 * 28), 10)
if (args.model == 'MLP_CL'):
num_units = 100
model = MLP_CL_h100(in_features, out_features, num_units, eps=0.0001, momentum=args.bnmomentum, batch_affine=(args.bn_affine == 1))
else:
raise ValueError('Please select a network out of {MLP, BinaryConnect, BinaryNet}')
print(model)
model = model.to(args.device)
if (args.optim == 'Adam'):
optimizer = optim.Adam(model.parameters(), lr=args.lr)
elif (args.optim == 'BayesBiNN'):
effective_trainsize = (len(train_loader.sampler) * args.trainset_scale)
optimizer = BayesBiNN(model, lamda_init=args.lamda, lamda_std=args.lamda_std, temperature=args.temperature, train_set_size=effective_trainsize, lr=args.lr, betas=args.momentum, num_samples=args.train_samples, reweight=args.kl_reweight)
if (args.criterion == 'square-hinge'):
criterion = SquaredHingeLoss()
elif (args.criterion == 'cross-entropy'):
criterion = nn.CrossEntropyLoss()
else:
raise ValueError('Please select loss criterion in {square-hinge, cross-entropy}')
start = time.time()
permute_list = []
for task in range(args.num_tasks):
permute_list.append(torch.Tensor(np.random.permutation(784).astype(np.float64)).long())
test_results_save = []
test_results_record_save = []
for task in range(args.num_tasks):
print('')
print(('Current Training task %d' % task))
if (not (task == 0)):
optimizer.state['prior_lamda'] = optimizer.state['lamda']
permute_state = permute_list[task]
for param_group in optimizer.param_groups:
param_group['lr'] = args.lr
results = train_model_cl(args, model, [train_loader, test_loader], criterion, optimizer, permute_list, task)
(model, optimizer, train_loss, train_acc, test_results_record) = results
(test_loss, test_accuracy) = test_model_cl(args, model, test_loader, criterion, optimizer, permute=permute_state)
print('test acc: {}'.format(test_accuracy))
test_acc_total = 0
for test_id in range((task + 1)):
permute_state_id = permute_list[test_id]
if (test_loader is not None):
(test_loss, test_accuracy) = test_model_cl(args, model, test_loader, criterion, optimizer, permute=permute_state_id)
test_acc_total += test_accuracy
print(('## Individual Task[%d], Test Loss: %f & Test Accuracy: %f' % (test_id, test_loss, test_accuracy)))
print('')
test_acc_average = (test_acc_total / (task + 1))
print(('## After Task[%d],Averaged Test Accuracy: %f' % (task, test_acc_average)))
test_results_save.append(test_acc_average)
test_results_record_save.append(test_results_record)
save_train_history_CL(args, test_results_save)
array_record = np.array(test_results_record_save)
np.save('CL_test_record_seed0.npy', array_record)
print(test_results_save)
time_total = timeSince(start)
print('Task completed in {:.0f}m {:.0f}s'.format((time_total // 60), (time_total % 60))) |
class TestCreationConfiguration():
max_recursion: int = 10
max_delta: int = 20
max_int: int = 2048
string_length: int = 20
bytes_length: int = 20
collection_size: int = 5
primitive_reuse_probability: float = 0.5
object_reuse_probability: float = 0.9
none_weight: float = 1
any_weight: float = 5
original_type_weight: float = 5
type_tracing_weight: float = 10
type4py_weight: float = 10
type_tracing_kept_guesses: int = 2
wrap_var_param_type_probability: float = 0.7
negate_type: float = 0.1
skip_optional_parameter_probability: float = 0.7
max_attempts: int = 1000
insertion_uut: float = 0.5
max_size: int = 100
use_random_object_for_call: float = 0.1 |
_fl_task(model='model', data_loader='train_loader', device='device', optimizer='optimizer')
def train(model, train_loader, device, optimizer):
device = 'hpu'
total_loss = []
train_loader = tqdm.tqdm(train_loader, desc='train')
model.train()
model.to(device)
criterion = nn.CrossEntropyLoss()
for (inputs, targets) in train_loader:
optimizer.zero_grad()
outputs = model(inputs.to(device))
targets = torch.squeeze(targets, 1).long().to(device)
loss = criterion(outputs, targets)
total_loss.append(loss.item())
loss.backward()
htcore.mark_step()
optimizer.step()
htcore.mark_step()
return {'train_loss': np.mean(total_loss)} |
class IndexedFileWriter(object):
def __init__(self, path):
self.f = open(path, 'wb')
self.index_f = open((path + '.index'), 'wb')
def append(self, record):
offset = self.f.tell()
self.f.write(record)
self.index_f.write(struct.pack('<Q', offset))
def close(self):
self.f.close()
self.index_f.close() |
class SPTokenizer(Tokenizer):
def __init__(self, model_path) -> None:
self.sp = spm.SentencePieceProcessor()
self.sp.Load(model_path)
def encode(self, text):
return self.sp.EncodeAsPieces(text) |
def weights_init(m):
classname = m.__class__.__name__
if (classname.find('Linear') != (- 1)):
if (hasattr(m, 'weight') and (m.weight is not None)):
init_weight(m.weight)
if (hasattr(m, 'bias') and (m.bias is not None)):
init_bias(m.bias)
elif (classname.find('AdaptiveEmbedding') != (- 1)):
if hasattr(m, 'emb_projs'):
for i in range(len(m.emb_projs)):
if (m.emb_projs[i] is not None):
nn.init.normal_(m.emb_projs[i], 0.0, args.proj_init_std)
elif (classname.find('Embedding') != (- 1)):
if hasattr(m, 'weight'):
init_weight(m.weight)
elif (classname.find('ProjectedAdaptiveLogSoftmax') != (- 1)):
if (hasattr(m, 'cluster_weight') and (m.cluster_weight is not None)):
init_weight(m.cluster_weight)
if (hasattr(m, 'cluster_bias') and (m.cluster_bias is not None)):
init_bias(m.cluster_bias)
if hasattr(m, 'out_projs'):
for i in range(len(m.out_projs)):
if (m.out_projs[i] is not None):
nn.init.normal_(m.out_projs[i], 0.0, args.proj_init_std)
elif (classname.find('LayerNorm') != (- 1)):
if hasattr(m, 'weight'):
nn.init.normal_(m.weight, 1.0, args.init_std)
if (hasattr(m, 'bias') and (m.bias is not None)):
init_bias(m.bias)
elif (classname.find('TransformerLM') != (- 1)):
if hasattr(m, 'r_emb'):
init_weight(m.r_emb)
if hasattr(m, 'r_w_bias'):
init_weight(m.r_w_bias)
if hasattr(m, 'r_r_bias'):
init_weight(m.r_r_bias)
if hasattr(m, 'r_bias'):
init_bias(m.r_bias) |
def get_contact_map_paths(names, images_root='data/SCOPe/pdbstyle-2.06/'):
cmap_paths = glob.glob((images_root + '*/*.png'))
cmap_dict = {os.path.basename(path)[:7]: path for path in cmap_paths}
paths = []
for name in names:
if (name not in cmap_dict):
name = ('d' + name[1:])
path = cmap_dict[name]
paths.append(path)
return paths |
def check_lemmas(train_file):
with open(train_file) as fin:
for line in fin:
line = line.strip()
if ((not line) or line.startswith('#')):
continue
pieces = line.split('\t')
word = pieces[1].lower().strip()
lemma = pieces[2].lower().strip()
if ((not lemma) or (lemma == '_') or (lemma == '-')):
continue
if (word == lemma):
continue
return True
return False |
def get_args_and_hdf5_file(output_mode, config_file_index: int):
output_name = ('run_%s_%d' % (output_mode, config_file_index))
parameters = [sys.executable, 'volnet/train_volnet.py', CONFIG_FILES[config_file_index], '--volume_filenames', VOLUME_FILES, '--time_keyframes', f'{VOLUME_TIMESTEP}:{(VOLUME_TIMESTEP + 1)}:1', '--time_train', f'{VOLUME_TIMESTEP}:{(VOLUME_TIMESTEP + 1)}:1', '--time_val', f'{VOLUME_TIMESTEP}:{(VOLUME_TIMESTEP + 1)}:1', '--train:mode', 'world', '--train:samples', '256**3', '--train:batchsize', '64*64*128', '--train:sampler_importance', '0.01', '--val:copy_and_split', '--outputmode', ('%s:direct' % output_mode), '--lossmode', ('%s' % output_mode), '-l1', '1', '--lr_step', '100', '-i', '200', '--fouriercount', str(((BEST_NETWORK[0] - 4) // 2)), '--fourierstd', '1.0', '--activation', BEST_ACTIVATION, '--layers', ':'.join(([str(BEST_NETWORK[0])] * (BEST_NETWORK[1] - 1))), '--volumetric_features_resolution', str(GRID_RESOLUTION), '--volumetric_features_channels', str(GRID_CHANNELS), '--logdir', 'volnet/results/eval_world_DensityVsColorGrid/log', '--modeldir', 'volnet/results/eval_world_DensityVsColorGrid/model', '--hdf5dir', 'volnet/results/eval_world_DensityVsColorGrid/hdf5', '--name', output_name, '--save_frequency', '50']
hdf5_file = (('volnet/results/eval_world_DensityVsColorGrid/hdf5/' + output_name) + '.hdf5')
return (parameters, hdf5_file) |
class My_Dataset():
def __init__(self, folder, transform=None, int_class=False):
self.img_paths = np.asarray(sorted([str(i) for i in sorted(p(folder).glob('**/*')) if (i.suffix in ['.jpg', '.JPG', '.jpeg', '.JPEG', '.png', '.PNG', '.tiff', '.TIFF', '.bmp', '.BMP', '.gif', '.GIF'])]))
self.classes = sorted([i.name for i in p(self.img_paths[0]).parents[1].iterdir() if i.is_dir()])
self.transform = transform
self.dict = dict(zip(self.classes, torch.arange(len(self.classes))))
def __getitem__(self, index):
img_path = self.img_paths[index]
img = Image.open(img_path)
label = self.dict[p(img_path).parts[(- 2)]]
if (self.transform != None):
img = self.transform(img)
return (img, label)
def __len__(self):
return len(self.img_paths)
def split_(self, val_ratio, transform=None):
random.seed(10)
(data_paths, data_num) = self.paths_in_every_class()
self.img_paths = []
new_set = deepcopy(self)
new_set.img_paths = []
new_set.transform = transform
for (class_name, paths) in data_paths.items():
random.shuffle(paths)
sub_num = int((data_num[class_name] * val_ratio))
new_set.img_paths.extend(paths[:sub_num])
self.img_paths.extend(paths[sub_num:])
self.img_paths = np.asarray(sorted(self.img_paths))
new_set.img_paths = np.asarray(sorted(new_set.img_paths))
return new_set
def paths_in_every_class(self):
data_path = {i: [] for i in self.classes}
data_num = {i: 0 for i in self.classes}
for path in self.img_paths:
class_name = p(path).parts[(- 2)]
data_path[class_name].append(path)
data_num[class_name] += 1
return (data_path, data_num) |
class BasicBlock(nn.Module):
def __init__(self, in_planes, planes, stride=1):
super(BasicBlock, self).__init__()
self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=1, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.shortcut = nn.Sequential()
if ((stride != 1) or (in_planes != planes)):
self.shortcut = nn.Sequential(nn.Conv2d(in_planes, planes, kernel_size=1, stride=stride, bias=False), nn.BatchNorm2d(planes))
self.fc1 = nn.Conv2d(planes, (planes // 16), kernel_size=1)
self.fc2 = nn.Conv2d((planes // 16), planes, kernel_size=1)
def forward(self, x):
out = F.tanh(self.bn1(self.conv1(x)))
out = self.bn2(self.conv2(out))
w = F.avg_pool2d(out, out.size(2))
w = F.tanh(self.fc1(w))
w = F.tanh(self.fc2(w))
out = (out * w)
out += self.shortcut(x)
out = F.tanh(out)
return out |
.core
.usefixtures('full_pandas_dataset')
def test_get_encoder_empty_rules_list(full_pandas_dataset):
encoder = DatasetLabelEncoder()
user_item_features = ['user_id', 'item_id']
dataset_for_fit = Dataset(feature_schema=get_features(full_pandas_dataset).subset(user_item_features), interactions=full_pandas_dataset['interactions'])
encoder_from_get = encoder.fit(dataset_for_fit).get_encoder(['timestamp', 'rating'])
assert (encoder_from_get is None) |
class MapFreeModuleToOneStep(pAdicModuleIsomorphism):
def _call_(self, x):
return self.codomain()(list(x))
def _call_with_args(self, x, args=(), kwds={}):
return self.codomain()(list(x), *args, **kwds) |
class Model(object):
def __init__(self):
self.image_size = 28
self.num_channels = 1
self.num_labels = 10
self.W_conv1 = self._weight_variable([5, 5, 1, 32])
self.b_conv1 = self._bias_variable([32])
self.W_conv2 = self._weight_variable([5, 5, 32, 64])
self.b_conv2 = self._bias_variable([64])
self.W_fc1 = self._weight_variable([((7 * 7) * 64), 1024])
self.b_fc1 = self._bias_variable([1024])
self.W_fc2 = self._weight_variable([1024, 10])
self.b_fc2 = self._bias_variable([10])
def predict(self, x_image):
h_conv1 = tf.nn.relu((self._conv2d(x_image, self.W_conv1) + self.b_conv1))
h_pool1 = self._max_pool_2x2(h_conv1)
h_conv2 = tf.nn.relu((self._conv2d(h_pool1, self.W_conv2) + self.b_conv2))
h_pool2 = self._max_pool_2x2(h_conv2)
h_pool2_flat = tf.reshape(h_pool2, [(- 1), ((7 * 7) * 64)])
h_fc1 = tf.nn.relu((tf.matmul(h_pool2_flat, self.W_fc1) + self.b_fc1))
pre_softmax = (tf.matmul(h_fc1, self.W_fc2) + self.b_fc2)
return pre_softmax
def _weight_variable(shape):
initial = tf.truncated_normal(shape, stddev=0.1)
return tf.Variable(initial)
def _bias_variable(shape):
initial = tf.constant(0.1, shape=shape)
return tf.Variable(initial)
def _conv2d(x, W):
return tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding='SAME')
def _max_pool_2x2(x):
return tf.nn.max_pool(x, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME') |
class TestShapImage(unittest.TestCase):
def setUp(self) -> None:
batch_size = 128
num_classes = 10
epochs = 10
(img_rows, img_cols) = (28, 28)
((x_train, y_train), (x_test, y_test)) = tf.keras.datasets.mnist.load_data()
if (tf.keras.backend.image_data_format() == 'channels_first'):
x_train = x_train.reshape(x_train.shape[0], 1, img_rows, img_cols)
x_test = x_test.reshape(x_test.shape[0], 1, img_rows, img_cols)
input_shape = (1, img_rows, img_cols)
else:
x_train = x_train.reshape(x_train.shape[0], img_rows, img_cols, 1)
x_test = x_test.reshape(x_test.shape[0], img_rows, img_cols, 1)
input_shape = (img_rows, img_cols, 1)
self.x_train = Image(x_train.astype('float32'), batched=True)
self.x_test = Image(x_test.astype('float32'), batched=True)
self.preprocess_func = (lambda x: np.expand_dims((x.to_numpy() / 255), axis=(- 1)))
x_train = self.preprocess_func(self.x_train)
x_test = self.preprocess_func(self.x_test)
print('x_train shape:', x_train.shape)
print(x_train.shape[0], 'train samples')
print(x_test.shape[0], 'test samples')
y_train = tf.keras.utils.to_categorical(y_train, num_classes)
y_test = tf.keras.utils.to_categorical(y_test, num_classes)
model = tf.keras.models.Sequential()
model.add(tf.keras.layers.Conv2D(32, kernel_size=(3, 3), activation='relu', input_shape=input_shape))
model.add(tf.keras.layers.Conv2D(64, (3, 3), activation='relu'))
model.add(tf.keras.layers.MaxPooling2D(pool_size=(2, 2)))
model.add(tf.keras.layers.Dropout(0.1))
model.add(tf.keras.layers.Flatten())
model.add(tf.keras.layers.Dense(128, activation='relu'))
model.add(tf.keras.layers.Dropout(0.1))
model.add(tf.keras.layers.Dense(num_classes, activation='softmax'))
model.compile(loss=tf.keras.losses.categorical_crossentropy, optimizer=tf.keras.optimizers.Adadelta(), metrics=['accuracy'])
model.fit(x_train, y_train, batch_size=batch_size, epochs=epochs, verbose=1, validation_data=(x_test, y_test))
score = model.evaluate(x_test, y_test, verbose=0)
print('Test loss:', score[0])
print('Test accuracy:', score[1])
self.model = model
def test_explain(self):
explainer = ShapImage(model=self.model, preprocess_function=self.preprocess_func)
explanations = explainer.explain(self.x_test[0:5])
explanations.plot() |
def _concatenate_with_negation(row):
ones = scipy.ones(row.shape)
if issparse(row):
return scipy.sparse.hstack((row, (ones - row)))
else:
return numpy.concatenate((row, (ones - row)), int((len(row.shape) != 1))) |
def pred_to_img(x, range):
(range_min, range_max) = range
x -= range_min
if ((range_max - range_min) > 0):
x /= (range_max - range_min)
return tensor_to_img(x) |
.core
def test_wrong_type():
with pytest.raises(ValueError):
next(KFolds(2, strategy='totally not query')) |
def run_with(command, SAGE_BOOTSTRAP):
env = dict(os.environ)
env['SAGE_BOOTSTRAP'] = SAGE_BOOTSTRAP
proc = subprocess.Popen([sys.executable, __file__, command], env=env, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
(out, err) = proc.communicate()
return (out.decode('ascii'), err.decode('ascii')) |
class LYTNetV2(nn.Module):
def __init__(self, n_class=5, input_size=768, width_mult=1.0):
super(LYTNetV2, self).__init__()
input_channel = 16
last_channel = 1280
mobile_setting = [[3, 16, 16, False, 'RE', 1], [3, 64, 24, False, 'RE', 2], [3, 72, 24, False, 'RE', 1], [5, 72, 40, True, 'RE', 2], [5, 120, 40, True, 'RE', 1], [3, 240, 80, False, 'HS', 2], [3, 200, 80, False, 'HS', 1], [3, 480, 112, True, 'HS', 1], [5, 672, 160, True, 'HS', 2], [5, 960, 160, True, 'HS', 1], [3, 960, 320, False, 'RE', 1]]
assert ((input_size % 32) == 0)
self.last_channel = (make_divisible((last_channel * width_mult)) if (width_mult > 1.0) else last_channel)
self.features = [conv_bn(3, input_channel, 2, nlin_layer=Hswish)]
for (k, exp, c, se, nl, s) in mobile_setting:
output_channel = make_divisible((c * width_mult))
exp_channel = make_divisible((exp * width_mult))
self.features.append(MobileBottleneck(input_channel, output_channel, k, s, exp_channel, se, nl))
input_channel = output_channel
last_conv = make_divisible((960 * width_mult))
self.features.append(conv_1x1_bn(input_channel, last_conv, nlin_layer=Hswish))
self.features.append(nn.AvgPool2d(12, 9))
self.features.append(Hswish(inplace=True))
self.features.append(nn.Conv2d(last_conv, last_channel, 1, 1, 0))
self.features.append(Hswish(inplace=True))
self.features = nn.Sequential(*self.features, nn.Dropout(0.1))
self.light_classifier = nn.Sequential(nn.Linear(self.last_channel, 5), nn.Softmax())
self.direction_regression = nn.Sequential(nn.Linear(self.last_channel, 4))
self._initialize_weights()
def forward(self, x):
x = self.features(x)
x = x.view(x.size(0), (- 1))
x1 = self.light_classifier(x)
x2 = self.direction_regression(x)
return (x1, x2)
def _initialize_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
if (m.bias is not None):
m.bias.data.zero_()
nn.init.xavier_normal_(m.weight.data)
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.BatchNorm1d):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.Linear):
m.weight.data.normal_(0, 0.01) |
class pAdicRingGeneric(pAdicGeneric, sage.rings.abc.pAdicRing):
def is_field(self, proof=True):
return False
def krull_dimension(self):
return 1
def _xgcd_univariate_polynomial(self, f, g):
from sage.misc.stopgap import stopgap
stopgap('Extended gcd computations over p-adic fields are performed using the standard Euclidean algorithm which might produce mathematically incorrect results in some cases.', 13439)
base_ring = f.base_ring()
fracfield = base_ring.fraction_field()
f_field = f.change_ring(fracfield)
g_field = g.change_ring(fracfield)
xgcd = fracfield._xgcd_univariate_polynomial(f_field, g_field)
lcm = base_ring(1)
for f in xgcd:
for i in f:
lcm = i.denominator().lcm(lcm)
returnlst = []
for f in xgcd:
f *= lcm
returnlst.append(f.change_ring(base_ring))
return tuple(returnlst)
def _gcd_univariate_polynomial(self, f, g):
return self._xgcd_univariate_polynomial(f, g)[0] |
def _decode_inference_indices(model, sess, output_infer, output_infer_summary_prefix, inference_indices, tgt_eos, subword_option):
utils.print_out((' decoding to output %s , num sents %d.' % (output_infer, len(inference_indices))))
start_time = time.time()
with codecs.getwriter('utf-8')(tf.gfile.GFile(output_infer, mode='wb')) as trans_f:
trans_f.write('')
for decode_id in inference_indices:
(nmt_outputs, infer_summary) = model.decode(sess)
assert (nmt_outputs.shape[0] == 1)
translation = nmt_utils.get_translation(nmt_outputs, sent_id=0, tgt_eos=tgt_eos, subword_option=subword_option)
if (infer_summary is not None):
image_file = ((output_infer_summary_prefix + str(decode_id)) + '.png')
utils.print_out((' save attention image to %s*' % image_file))
image_summ = tf.Summary()
image_summ.ParseFromString(infer_summary)
with tf.gfile.GFile(image_file, mode='w') as img_f:
img_f.write(image_summ.value[0].image.encoded_image_string)
trans_f.write(('%s\n' % translation))
utils.print_out((translation + b'\n'))
utils.print_time(' done', start_time) |
class SGConv(torch_geometric.nn.SGConv):
def __init__(self, dropout=0.5, **kwargs):
super(SGConv, self).__init__(**kwargs)
self.normalize = True
self.dropout = nn.Dropout(p=dropout)
def forward(self, x: Tensor, edge_index: Adj, edge_weight: OptTensor=None) -> Tensor:
cache = self._cached_x
if (cache is None):
if self.normalize:
if isinstance(edge_index, Tensor):
(edge_index, edge_weight) = gcn_norm(edge_index, edge_weight, x.size(self.node_dim), False, self.add_self_loops, dtype=x.dtype)
elif isinstance(edge_index, SparseTensor):
edge_index = gcn_norm(edge_index, edge_weight, x.size(self.node_dim), False, self.add_self_loops, dtype=x.dtype)
for k in range(self.K):
x = self.propagate(edge_index, x=x, edge_weight=edge_weight, size=None)
if self.cached:
self._cached_x = x
else:
x = cache
return self.lin(self.dropout(x)) |
class LinearModel(object):
def __init__(self):
pass
def train_model(self, dataset_handler: inlp_dataset_handler.DatasetHandler) -> float:
raise NotImplementedError
def get_weights(self) -> np.ndarray:
raise NotImplementedError |
class DenseNet3(nn.Module):
def __init__(self, depth=100, num_classes=10, growth_rate=12, reduction=0.5, bottleneck=True, dropRate=0.0):
super(DenseNet3, self).__init__()
in_planes = (2 * growth_rate)
n = ((depth - 4) / 3)
if (bottleneck == True):
n = (n / 2)
block = BottleneckBlock
else:
block = BasicBlock
self.conv1 = nn.Conv2d(3, in_planes, kernel_size=3, stride=1, padding=1, bias=False)
self.block1 = DenseBlock(n, in_planes, growth_rate, block, dropRate)
in_planes = int((in_planes + (n * growth_rate)))
self.trans1 = TransitionBlock(in_planes, int(math.floor((in_planes * reduction))), dropRate=dropRate)
in_planes = int(math.floor((in_planes * reduction)))
self.block2 = DenseBlock(n, in_planes, growth_rate, block, dropRate)
in_planes = int((in_planes + (n * growth_rate)))
self.trans2 = TransitionBlock(in_planes, int(math.floor((in_planes * reduction))), dropRate=dropRate)
in_planes = int(math.floor((in_planes * reduction)))
self.block3 = DenseBlock(n, in_planes, growth_rate, block, dropRate)
in_planes = int((in_planes + (n * growth_rate)))
self.bn1 = nn.BatchNorm2d(in_planes)
self.relu = nn.ReLU(inplace=True)
self.avgpool = nn.AvgPool2d(8, stride=1)
self.fc = nn.Linear(in_planes, num_classes)
self.in_planes = in_planes
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = ((m.kernel_size[0] * m.kernel_size[1]) * m.out_channels)
m.weight.data.normal_(0, math.sqrt((2.0 / n)))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.Linear):
m.bias.data.zero_()
def forward(self, x):
out = self.conv1(x)
out = self.trans1(self.block1(out))
out = self.trans2(self.block2(out))
out = self.block3(out)
out = self.relu(self.bn1(out))
out = self.avgpool(out)
out = out.view((- 1), self.in_planes)
feture = out
return self.fc(out) |
def test_no_tensorflow_metaclass_overwritten():
assert (LayerWithObservations.__bases__ == (TrackableLayer,))
assert (type(TrackableLayer) is type)
assert (type(LayerWithObservations) is abc.ABCMeta) |
def load_ranking(path, qrels=None):
with open(path, 'r') as file:
qid_to_ranked_candidate_passages = {}
for line in file:
try:
line = line.strip().split()
if (qrels is not None):
if (line[0] not in qrels):
continue
if (len(line) == 4):
qid = line[0]
pid = line[1].strip()
if (len(line) == 6):
qid = line[0]
pid = line[2].strip()
if (qid not in qid_to_ranked_candidate_passages):
qid_to_ranked_candidate_passages[qid] = []
qid_to_ranked_candidate_passages[qid].append(pid)
except Exception:
raise IOError(('"%s" is not valid format' % line))
return qid_to_ranked_candidate_passages |
class AugmentedArgumentParser(argparse.ArgumentParser):
def parse_args(self, args=None, namespace=None):
print('parsing args...')
if ((args is None) and hasattr(self, 'custom_command') and (self.custom_command is not None)):
print('using custom command')
print(self.custom_command)
args = shlex.split(self.custom_command)[2:]
return super().parse_args(args, namespace)
def parse_known_args(self, args=None, namespace=None):
if ((args is None) and hasattr(self, 'custom_command') and (self.custom_command is not None)):
args = shlex.split(self.custom_command)[2:]
return super().parse_known_args(args, namespace)
def add_argument(self, *args, **kwargs):
if (('type' in kwargs) and (kwargs['type'] == util.str2bool)):
if ('nargs' not in kwargs):
kwargs['nargs'] = '?'
if ('const' not in kwargs):
kwargs['const'] = True
super().add_argument(*args, **kwargs) |
class Errors(object):
def __init__(self, msg, rtol=0.001, atol=1e-05):
self.msg = msg
self.errors = []
self.context = []
self.rtol = rtol
self.atol = atol
class ShortCircuit(Exception):
pass
self.exc_class = ShortCircuit
def requireAlmostEqual(self, x, y, msg=None):
self.almostEqualAndThen(x, y, msg, self.failWith)
def checkAlmostEqual(self, x, y, msg=None):
self.almostEqualAndThen(x, y, msg, self.addErr)
def almostEqualAndThen(self, x, y, msg, k):
if (isinstance(x, np.ndarray) and isinstance(y, np.ndarray)):
try:
np.testing.assert_allclose(x, y, rtol=self.rtol, atol=self.atol, equal_nan=True, verbose=True)
except AssertionError as e:
raise
k('{}{}'.format(colonize(msg), str(e).lstrip()))
else:
raise RuntimeError('Unsupported almost equal test')
def requireEqual(self, x, y, msg=None):
self.equalAndThen(x, y, msg, self.failWith)
def checkEqual(self, x, y, msg=None):
self.equalAndThen(x, y, msg, self.addErr)
def equalAndThen(self, x, y, msg, k):
if (isinstance(x, onnx.TensorProto) and isinstance(y, onnx.TensorProto)):
self.equalAndThen(x.name, y.name, msg, k)
t1 = onnx.numpy_helper.to_array(x)
t2 = onnx.numpy_helper.to_array(y)
new_msg = "{}In embedded parameter '{}'".format(colonize(msg), x.name)
self.equalAndThen(t1, t2, new_msg, k)
elif (isinstance(x, np.ndarray) and isinstance(y, np.ndarray)):
try:
np.testing.assert_equal(x, y)
except AssertionError as e:
raise
k('{}{}'.format(colonize(msg, ': '), str(e).lstrip()))
elif (x != y):
sx = str(x)
sy = str(y)
if ((len(sx) > 40) or (len(sy) > 40) or ('\n' in sx) or ('\n' in sy)):
l = ('=' * 50)
k('\n{}The value\n{}\n{}\n{}\n\ndoes not equal\n\n{}\n{}\n{}'.format(colonize(msg, ':\n'), l, sx, l, l, sy, l))
else:
k('{}{} != {}'.format(colonize(msg), sx, sy))
def requireMultiLineEqual(self, x, y, msg=None):
self.multiLineEqualAndThen(x, y, msg, self.failWith)
def multiLineEqualAndThen(self, x, y, msg, k):
if (msg is None):
msg = 'Strings are not equal'
if (x != y):
diff = difflib.ndiff(x.splitlines(True), y.splitlines(True))
k('{}{}'.format(colonize(msg, ':\n\n'), ''.join(diff)))
def addErr(self, msg):
msg_w_ctx = msg
for c in reversed(self.context):
msg += ('\n\n * ' + '\n '.join(c.splitlines()))
self.errors.append(msg)
def fail(self):
raise self.exc_class()
def failWith(self, msg):
self.addErr(msg)
self.fail()
def failIfErrs(self):
if self.errors:
self.fail()
def recover(self):
parent_self = self
class Recover(object):
def __enter__(self):
pass
def __exit__(self, exc_type, exc_value, traceback):
if (exc_type == parent_self.exc_class):
return True
return Recover()
def addErrCtxt(self, msg):
parent_self = self
class AddContext(object):
def __enter__(self):
parent_self.context.append(msg)
def __exit__(self, exc_type, exc_value, traceback):
parent_self.context.pop()
return AddContext()
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
if self.errors:
errors_msg = '\n\n'.join(map((lambda x: ('ERROR: ' + x)), self.errors))
final_msg = '{}\n{}\n{}'.format(self.msg, ('-' * 70), errors_msg)
raise AssertionError(final_msg)
if (exc_type == self.exc_class):
raise RuntimeError('ShortCircuit was raised, but no errors were recorded') |
class TransformerDecoderLayer(nn.Module):
def __init__(self, args, no_encoder_attn=False, add_bias_kv=False, add_zero_attn=False):
super().__init__()
self.plug_in_dec_self_attn = args.plug_in_dec_self_attn
self.embed_dim = args.decoder_embed_dim
self.dropout_module = FairseqDropout(args.dropout, module_name=self.__class__.__name__)
self.quant_noise = getattr(args, 'quant_noise_pq', 0)
self.quant_noise_block_size = getattr(args, 'quant_noise_pq_block_size', 8)
self.cross_self_attention = getattr(args, 'cross_self_attention', False)
self.self_attn = self.build_self_attention(self.embed_dim, args, add_bias_kv=add_bias_kv, add_zero_attn=add_zero_attn)
self.activation_fn = utils.get_activation_fn(activation=(str(args.activation_fn) if (getattr(args, 'activation_fn', None) is not None) else 'relu'))
activation_dropout_p = getattr(args, 'activation_dropout', 0)
if (activation_dropout_p == 0):
activation_dropout_p = getattr(args, 'relu_dropout', 0)
self.activation_dropout_module = FairseqDropout(float(activation_dropout_p), module_name=self.__class__.__name__)
self.normalize_before = args.decoder_normalize_before
export = getattr(args, 'char_inputs', False)
self.self_attn_layer_norm = LayerNorm(self.embed_dim, export=export)
if no_encoder_attn:
self.encoder_attn = None
self.encoder_attn_layer_norm = None
else:
self.encoder_attn = self.build_encoder_attention(self.embed_dim, args)
self.encoder_attn_layer_norm = LayerNorm(self.embed_dim, export=export)
self.fc1 = self.build_fc1(self.embed_dim, args.decoder_ffn_embed_dim, self.quant_noise, self.quant_noise_block_size)
self.fc2 = self.build_fc2(args.decoder_ffn_embed_dim, self.embed_dim, self.quant_noise, self.quant_noise_block_size)
self.final_layer_norm = LayerNorm(self.embed_dim, export=export)
self.need_attn = True
self.onnx_trace = False
def build_fc1(self, input_dim, output_dim, q_noise, qn_block_size):
return quant_noise(nn.Linear(input_dim, output_dim), q_noise, qn_block_size)
def build_fc2(self, input_dim, output_dim, q_noise, qn_block_size):
return quant_noise(nn.Linear(input_dim, output_dim), q_noise, qn_block_size)
def build_self_attention(self, embed_dim, args, add_bias_kv=False, add_zero_attn=False):
return MultiheadAttention(embed_dim, args.decoder_attention_heads, dropout=args.attention_dropout, add_bias_kv=add_bias_kv, add_zero_attn=add_zero_attn, self_attention=(not getattr(args, 'cross_self_attention', False)), q_noise=self.quant_noise, qn_block_size=self.quant_noise_block_size)
def build_encoder_attention(self, embed_dim, args):
return MultiheadAttention(embed_dim, args.decoder_attention_heads, kdim=getattr(args, 'encoder_embed_dim', None), vdim=getattr(args, 'encoder_embed_dim', None), dropout=args.attention_dropout, encoder_decoder_attention=True, q_noise=self.quant_noise, qn_block_size=self.quant_noise_block_size)
def prepare_for_onnx_export_(self):
self.onnx_trace = True
def forward(self, x, encoder_out: Optional[torch.Tensor]=None, encoder_padding_mask: Optional[torch.Tensor]=None, incremental_state: Optional[Dict[(str, Dict[(str, Optional[Tensor])])]]=None, prev_self_attn_state: Optional[List[torch.Tensor]]=None, prev_attn_state: Optional[List[torch.Tensor]]=None, self_attn_mask: Optional[torch.Tensor]=None, self_attn_padding_mask: Optional[torch.Tensor]=None, need_attn: bool=False, need_head_weights: bool=False, past_key: Optional[Tensor]=None, past_value: Optional[Tensor]=None, past_key_padding_mask: Optional[torch.Tensor]=None, past_kv_forward: str='none'):
if need_head_weights:
need_attn = True
residual = x
if self.normalize_before:
x = self.self_attn_layer_norm(x)
_self_attn_input_buffer = self.self_attn._get_input_buffer(incremental_state)
y = x
if self.plug_in_dec_self_attn:
(x, attn) = self.self_attn(query=x, key=y, value=y, key_padding_mask=self_attn_padding_mask, incremental_state=incremental_state, need_weights=False, attn_mask=self_attn_mask, past_key=past_value, past_value=past_value, past_key_padding_mask=past_key_padding_mask, past_kv_forward=past_kv_forward)
else:
(x, attn) = self.self_attn(query=x, key=y, value=y, key_padding_mask=self_attn_padding_mask, incremental_state=incremental_state, need_weights=False, attn_mask=self_attn_mask)
x = self.dropout_module(x)
x = (residual + x)
if (not self.normalize_before):
x = self.self_attn_layer_norm(x)
if (self.encoder_attn is not None):
residual = x
if self.normalize_before:
x = self.encoder_attn_layer_norm(x)
(x, attn) = self.encoder_attn(query=x, key=encoder_out, value=encoder_out, key_padding_mask=encoder_padding_mask, incremental_state=incremental_state, static_kv=True, need_weights=(need_attn or ((not self.training) and self.need_attn)), need_head_weights=need_head_weights, past_key=past_key, past_value=past_value, past_key_padding_mask=past_key_padding_mask, past_kv_forward=past_kv_forward)
x = self.dropout_module(x)
x = (residual + x)
if (not self.normalize_before):
x = self.encoder_attn_layer_norm(x)
residual = x
if self.normalize_before:
x = self.final_layer_norm(x)
x = self.activation_fn(self.fc1(x))
x = self.activation_dropout_module(x)
x = self.fc2(x)
x = self.dropout_module(x)
x = (residual + x)
if (not self.normalize_before):
x = self.final_layer_norm(x)
return (x, attn, None)
def make_generation_fast_(self, need_attn: bool=False, **kwargs):
self.need_attn = need_attn |
def show_hsv_equalized(directory, fileName):
cap = cv2.VideoCapture((directory + fileName))
frameNumber = 1
frameLuminosityInfo = {}
while cap.isOpened():
(ret, frame) = cap.read()
if (frame is None):
frameLuminosityInfo = pd.DataFrame(frameLuminosityInfo)
frameLuminosityInfo.to_csv(((directory + fileName) + '.csv'))
return frameLuminosityInfo
break
blur_frame = cv2.medianBlur(frame, 3)
(H, S, V) = cv2.split(cv2.cvtColor(blur_frame, cv2.COLOR_BGR2HSV))
eq_V = cv2.equalizeHist(V)
eq_image = cv2.cvtColor(cv2.merge([H, S, eq_V]), cv2.COLOR_HSV2RGB)
frameLuminosityInfo[frameNumber] = {'meanValue': cv2.meanStdDev(V)[0][0][0], 'stdValue': cv2.meanStdDev(V)[1][0][0]}
cv2.imshow('frame', eq_image)
if ((cv2.waitKey(1) & 255) == ord('q')):
break
frameNumber = (frameNumber + 1)
cap.release()
cv2.destroyAllWindows() |
class SRS_GoogLeNet(nn.Module):
def __init__(self):
super(SRS_GoogLeNet, self).__init__()
self.pre_layers = nn.Sequential(nn.Conv2d(3, 192, kernel_size=3, padding=1), nn.BatchNorm2d(192), SRS())
self.a3 = Inception(192, 64, 96, 128, 16, 32, 32)
self.b3 = Inception(256, 128, 128, 192, 32, 96, 64)
self.maxpool = nn.MaxPool2d(3, stride=2, padding=1)
self.a4 = Inception(480, 192, 96, 208, 16, 48, 64)
self.b4 = Inception(512, 160, 112, 224, 24, 64, 64)
self.c4 = Inception(512, 128, 128, 256, 24, 64, 64)
self.d4 = Inception(512, 112, 144, 288, 32, 64, 64)
self.e4 = Inception(528, 256, 160, 320, 32, 128, 128)
self.a5 = Inception(832, 256, 160, 320, 32, 128, 128)
self.b5 = Inception(832, 384, 192, 384, 48, 128, 128)
self.avgpool = nn.AvgPool2d(8, stride=1)
self.linear = nn.Linear(1024, 100)
def forward(self, x):
out = self.pre_layers(x)
out = self.a3(out)
out = self.b3(out)
out = self.maxpool(out)
out = self.a4(out)
out = self.b4(out)
out = self.c4(out)
out = self.d4(out)
out = self.e4(out)
out = self.maxpool(out)
out = self.a5(out)
out = self.b5(out)
out = self.avgpool(out)
out = out.view(out.size(0), (- 1))
out = self.linear(out)
return out |
def _test_typing(code, inject=False):
sys.path.insert(0, TOOLS_DIR)
try:
import jedityper
finally:
sys.path.remove(TOOLS_DIR)
lines = []
with _tempfile(code) as f:
types = jedityper.analyse(f.name)
if inject:
lines = jedityper.inject_types(f.name, types)
return (types, lines) |
def test_final_low_high_pred() -> None:
y_preds = np.array([[4, 3, 2], [3, 3, 3], [2, 3, 4]])
y_pred_low = np.array([4, 3, 2])
y_pred_up = np.array([3, 3, 3])
with pytest.warns(UserWarning, match='WARNING: The predictions of .*'):
check_lower_upper_bounds(y_preds, y_pred_low, y_pred_up) |
def pytest_addoption(parser):
parser.addoption('--runupstream', action='store_true', help='run upstream tests')
parser.addoption('--runslow', action='store_true', help='run slow tests')
parser.addoption('--runcorpus', action='store_true', help='run tests with corpus path dependency')
parser.addoption('--practice', action='store_true', help='for test scripts only for practice and not real test cases.')
parser.addoption('--runextra', action='store_true', help='run tests with extra dependencies')
parser.addoption('--fairseq', action='store_true', help='run tests with fairseq dependencies')
parser.addoption('--upstream_names', action='store') |
def test_shuffle_players():
key = jax.random.PRNGKey(0)
for i in range(100):
(key, subkey) = jax.random.split(key)
shuffled_players = _shuffle_players(subkey)
assert ((shuffled_players[0] - shuffled_players[2]) % 2)
assert ((shuffled_players[1] - shuffled_players[3]) % 2) |
def integrity_check(model_tuned, hf_hub_name):
model_sum = sum((param.sum() for param in model_tuned.state_dict().values())).item()
model_sum_file = hf_hub_download(repo_id=hf_hub_name, filename='model_sum.txt')
with open(model_sum_file, 'r') as f:
model_sum_hf_hub = float(f.read())
return np.isclose(model_sum_hf_hub, model_sum) |
def main():
args = parse_args()
random.seed(args.seed)
data = utils.load_csv_text(args.csv_filename, True)
label_map = {youtube_id: text for (youtube_id, _, text, _) in data}
infos = []
filenames = list(args.root_audio.rglob('*.wav'))
for filename in filenames:
suffix = filename.with_suffix('').relative_to(args.root_audio)
frame_dir = (args.root_frame / suffix)
n_frames = len(list(frame_dir.rglob('*.jpg')))
if (n_frames > (args.fps * 8)):
youtube_id = filename.stem
label = label_map[youtube_id].replace(', ', ' ')
infos.append(f'{filename},{frame_dir},{n_frames},{label}')
print(f'{len(infos)} audio/frames pairs found.')
n_train = int((len(infos) * (1 - args.ratio)))
random.shuffle(infos)
trainset = infos[:n_train]
valset = infos[n_train:]
for (name, subset) in zip(('train', 'val'), (trainset, valset)):
filename = (args.out_dir / f'{name}.csv')
with open(filename, 'w') as f:
for item in subset:
f.write((item + '\n'))
print(f'{len(subset)} items saved to {filename}.')
print('Done!') |
class Trainer(object):
def __init__(self, network, optimizer, dataloader, args):
self.args = args
self.network = network
self.optimizer = optimizer
self.dataloader = dataloader
if (not os.path.exists('weights')):
os.makedirs('weights')
self.timeformat = '%Y-%m-%d %H:%M:%S'
def train(self):
lossAcc = 0.0
self.network.train()
dataiter = iter(self.dataloader)
for _ in range((self.args.resume_iter // self.args.lr_step)):
self.adjustLR()
self.showLR()
for step in range(self.args.resume_iter, self.args.max_step):
losses = []
for _ in range(self.args.iter_size):
try:
(data, target) = next(dataiter)
except StopIteration:
dataiter = iter(self.dataloader)
(data, target) = next(dataiter)
(data, target) = (data.cuda(self.args.gpu_id), target.cuda(self.args.gpu_id))
(data, target) = (Variable(data), Variable(target))
loss = self.network(data, target)
if np.isnan(float(loss.data[0])):
raise ValueError('loss is nan while training')
losses.append(loss)
lossAcc += loss.data[0]
bLoss = torch.mean(torch.cat(losses))
self.optimizer.zero_grad()
bLoss.backward()
self.optimizer.step()
if ((step > 0) and ((step % self.args.lr_step) == 0)):
self.adjustLR()
self.showLR()
if (((step + 1) % self.args.disp_interval) == 0):
timestr = time.strftime(self.timeformat, time.localtime())
print('{} iter={} loss={:<8.2f}'.format(timestr, (step + 1), ((lossAcc / self.args.disp_interval) / self.args.iter_size)))
lossAcc = 0.0
if (((step + 1) % self.args.save_interval) == 0):
torch.save(self.network.state_dict(), './weights/hed_sklarge/{}_{}.pth'.format(self.args.network, (step + 1)))
torch.save(self.network.state_dict(), './weights/hed_sklarge/{}.pth'.format(self.args.network))
def adjustLR(self):
for param_group in self.optimizer.param_groups:
param_group['lr'] *= self.args.lr_gamma
def showLR(self):
for param_group in self.optimizer.param_groups:
print(param_group['lr'], end=' ')
print('') |
def make_folder(path, version):
if (not os.path.exists(os.path.join(path, version))):
os.makedirs(os.path.join(path, version)) |
def get_filenames(data_root, task, sub_task, split=''):
if (task == 'concode'):
data_dir = '{}/{}'.format(data_root, task)
train_fn = '{}/train.json'.format(data_dir)
dev_fn = '{}/dev.json'.format(data_dir)
test_fn = '{}/test.json'.format(data_dir)
elif (task == 'summarize'):
data_dir = '{}/{}/{}'.format(data_root, task, sub_task)
train_fn = '{}/train.jsonl'.format(data_dir)
dev_fn = '{}/valid.jsonl'.format(data_dir)
test_fn = '{}/test.jsonl'.format(data_dir)
elif (task == 'refine'):
data_dir = '{}/{}/{}'.format(data_root, task, sub_task)
train_fn = '{}/train.buggy-fixed.buggy,{}/train.buggy-fixed.fixed'.format(data_dir, data_dir)
dev_fn = '{}/valid.buggy-fixed.buggy,{}/valid.buggy-fixed.fixed'.format(data_dir, data_dir)
test_fn = '{}/test.buggy-fixed.buggy,{}/test.buggy-fixed.fixed'.format(data_dir, data_dir)
elif (task == 'translate'):
data_dir = '{}/{}'.format(data_root, task)
if (sub_task == 'cs-java'):
train_fn = '{}/train.java-cs.txt.cs,{}/train.java-cs.txt.java'.format(data_dir, data_dir)
dev_fn = '{}/valid.java-cs.txt.cs,{}/valid.java-cs.txt.java'.format(data_dir, data_dir)
test_fn = '{}/test.java-cs.txt.cs,{}/test.java-cs.txt.java'.format(data_dir, data_dir)
else:
train_fn = '{}/train.java-cs.txt.java,{}/train.java-cs.txt.cs'.format(data_dir, data_dir)
dev_fn = '{}/valid.java-cs.txt.java,{}/valid.java-cs.txt.cs'.format(data_dir, data_dir)
test_fn = '{}/test.java-cs.txt.java,{}/test.java-cs.txt.cs'.format(data_dir, data_dir)
elif (task == 'clone'):
data_dir = '{}/{}'.format(data_root, task)
train_fn = '{}/train.txt'.format(data_dir)
dev_fn = '{}/valid.txt'.format(data_dir)
test_fn = '{}/test.txt'.format(data_dir)
elif (task == 'defect'):
data_dir = '{}/{}'.format(data_root, task)
train_fn = '{}/train.jsonl'.format(data_dir)
dev_fn = '{}/valid.jsonl'.format(data_dir)
test_fn = '{}/test.jsonl'.format(data_dir)
if (split == 'train'):
return train_fn
elif (split == 'dev'):
return dev_fn
elif (split == 'test'):
return test_fn
else:
return (train_fn, dev_fn, test_fn) |
class UploadCommand(BaseUserCommand):
def walk_dir(self, rel_path):
entries: List[os.DirEntry] = list(os.scandir(rel_path))
files = [(os.path.join(os.getcwd(), f.path), f.path) for f in entries if f.is_file()]
for f in entries:
if f.is_dir():
files += self.walk_dir(f.path)
return files
def run(self):
token = HfFolder.get_token()
if (token is None):
print('Not logged in')
exit(1)
local_path = os.path.abspath(self.args.path)
if os.path.isdir(local_path):
if (self.args.filename is not None):
raise ValueError('Cannot specify a filename override when uploading a folder.')
rel_path = os.path.basename(local_path)
files = self.walk_dir(rel_path)
elif os.path.isfile(local_path):
filename = (self.args.filename if (self.args.filename is not None) else os.path.basename(local_path))
files = [(local_path, filename)]
else:
raise ValueError('Not a valid file or directory: {}'.format(local_path))
for (filepath, filename) in files:
print('About to upload file {} to S3 under filename {}'.format(ANSI.bold(filepath), ANSI.bold(filename)))
choice = input('Proceed? [Y/n] ').lower()
if (not ((choice == '') or (choice == 'y') or (choice == 'yes'))):
print('Abort')
exit()
print(ANSI.bold('Uploading... This might take a while if files are large'))
for (filepath, filename) in files:
access_url = self._api.presign_and_upload(token=token, filename=filename, filepath=filepath)
print('Your file now lives at:')
print(access_url) |
class PositionwiseFeedForwardNetwork(nn.Module):
def __init__(self, d_model: int=512, d_ff: int=2048, dropout_p: float=0.3) -> None:
super(PositionwiseFeedForwardNetwork, self).__init__()
self.feed_forward = nn.Sequential(nn.Linear(d_model, d_ff), nn.Dropout(dropout_p), nn.ReLU(), nn.Linear(d_ff, d_model), nn.Dropout(dropout_p))
def forward(self, inputs: torch.Tensor) -> torch.Tensor:
return self.feed_forward(inputs) |
def add_overviews(rst_out, tile_size, verbose=False):
overview_level = rasterio.rio.overview.get_maximum_overview_level(*rst_out.shape, tile_size)
overviews = [(2 ** j) for j in range(1, (overview_level + 1))]
if verbose:
print(f'Adding pyramid overviews to raster {overviews}')
rst_out.build_overviews(overviews, rasterio.warp.Resampling.average)
rst_out.update_tags(ns='rio_overview', resampling='nearest')
tags = rst_out.tags()
tags.update(OVR_RESAMPLING_ALG='NEAREST')
rst_out.update_tags(**tags)
rst_out._set_all_scales([rst_out.scales[(b - 1)] for b in rst_out.indexes])
rst_out._set_all_offsets([rst_out.offsets[(b - 1)] for b in rst_out.indexes]) |
def stackedunet128(output_stride='32'):
return Stackedunet_imagenet(in_dim=512, start_planes=64, filters_base=128, num_classes=1000, depth=4, ost=output_stride) |
def register_Ns3RrpaaWifiManager_methods(root_module, cls):
cls.add_constructor([param('ns3::RrpaaWifiManager const &', 'arg0')])
cls.add_constructor([])
cls.add_method('AssignStreams', 'int64_t', [param('int64_t', 'stream')])
cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True)
cls.add_method('SetHeSupported', 'void', [param('bool', 'enable')], is_virtual=True)
cls.add_method('SetHtSupported', 'void', [param('bool', 'enable')], is_virtual=True)
cls.add_method('SetVhtSupported', 'void', [param('bool', 'enable')], is_virtual=True)
cls.add_method('SetupMac', 'void', [param('ns3::Ptr< ns3::WifiMac > const', 'mac')], is_virtual=True)
cls.add_method('SetupPhy', 'void', [param('ns3::Ptr< ns3::WifiPhy > const', 'phy')], is_virtual=True)
cls.add_method('DoCreateStation', 'ns3::WifiRemoteStation *', [], is_const=True, visibility='private', is_virtual=True)
cls.add_method('DoGetDataTxVector', 'ns3::WifiTxVector', [param('ns3::WifiRemoteStation *', 'station')], visibility='private', is_virtual=True)
cls.add_method('DoGetRtsTxVector', 'ns3::WifiTxVector', [param('ns3::WifiRemoteStation *', 'station')], visibility='private', is_virtual=True)
cls.add_method('DoNeedRts', 'bool', [param('ns3::WifiRemoteStation *', 'st'), param('ns3::Ptr< ns3::Packet const >', 'packet'), param('bool', 'normally')], visibility='private', is_virtual=True)
cls.add_method('DoReportDataFailed', 'void', [param('ns3::WifiRemoteStation *', 'station')], visibility='private', is_virtual=True)
cls.add_method('DoReportDataOk', 'void', [param('ns3::WifiRemoteStation *', 'station'), param('double', 'ackSnr'), param('ns3::WifiMode', 'ackMode'), param('double', 'dataSnr')], visibility='private', is_virtual=True)
cls.add_method('DoReportFinalDataFailed', 'void', [param('ns3::WifiRemoteStation *', 'station')], visibility='private', is_virtual=True)
cls.add_method('DoReportFinalRtsFailed', 'void', [param('ns3::WifiRemoteStation *', 'station')], visibility='private', is_virtual=True)
cls.add_method('DoReportRtsFailed', 'void', [param('ns3::WifiRemoteStation *', 'station')], visibility='private', is_virtual=True)
cls.add_method('DoReportRtsOk', 'void', [param('ns3::WifiRemoteStation *', 'station'), param('double', 'ctsSnr'), param('ns3::WifiMode', 'ctsMode'), param('double', 'rtsSnr')], visibility='private', is_virtual=True)
cls.add_method('DoReportRxOk', 'void', [param('ns3::WifiRemoteStation *', 'station'), param('double', 'rxSnr'), param('ns3::WifiMode', 'txMode')], visibility='private', is_virtual=True)
cls.add_method('IsLowLatency', 'bool', [], is_const=True, visibility='private', is_virtual=True)
return |
class TestSegmentedImageDataset():
def test_init(self):
pass
def test_len(self):
pass
def test_get_item(self):
pass |
def get_wav_files(tgt_split_path):
wavs_pattern = os.path.join(tgt_split_path, '**', '*.wav')
return list(sorted(glob(wavs_pattern))) |
class ResnetCSNNoGC(Resnet3d101):
def __init__(self, tw=8, sample_size=112, e_dim=7, decoders=None):
decoders = ([Decoder3dNoGC()] if (decoders is None) else decoders)
print('Creating decoders {}'.format(decoders))
super(ResnetCSNNoGC, self).__init__(tw, sample_size, e_dim, decoders)
self.encoder = Encoder3d_csn_ir(tw, sample_size) |
def message(con=None, log=None, queue=None):
if (con and (log == '')):
log = sb.colors.strip(con)
if (con and (not quiet)):
print(con, flush=True)
if log:
if queue:
queue.put(log)
else:
__prolog.append(log) |
class ImmutableMultiDictMixin(ImmutableDictMixin):
def __reduce_ex__(self, protocol):
return (type(self), (list(iteritems(self, multi=True)),))
def _iter_hashitems(self):
return iteritems(self, multi=True)
def add(self, key, value):
is_immutable(self)
def popitemlist(self):
is_immutable(self)
def poplist(self, key):
is_immutable(self)
def setlist(self, key, new_list):
is_immutable(self)
def setlistdefault(self, key, default_list=None):
is_immutable(self) |
def _object_to_tensor(obj):
buffer = pickle.dumps(obj)
byte_storage = torch.ByteStorage.from_buffer(buffer)
byte_tensor = torch.ByteTensor(byte_storage)
local_size = torch.LongTensor([byte_tensor.numel()])
return (byte_tensor, local_size) |
()
('--checkpoint-file', type=click.Path(exists=True))
('--data-dir', default='data/record', type=click.Path(exists=True))
('--doc-stride', default=128)
('--do-eval/--no-eval', default=True)
('--do-train/--no-train', default=True)
('--eval-batch-size', default=32)
('--max-query-length', default=90)
('--max-seq-length', default=512)
('--num-train-epochs', default=2.0)
('--seed', default=4)
('--train-batch-size', default=1)
_args
_obj
def run(common_args, **task_args):
task_args.update(common_args)
args = Namespace(**task_args)
set_seed(args.seed)
args.experiment.log_parameters({p.name: getattr(args, p.name) for p in run.params})
args.model_config.vocab_size += 3
word_emb = args.model_weights['embeddings.word_embeddings.weight']
highlight_emb = word_emb[args.tokenizer.convert_tokens_to_ids([''])[0]].unsqueeze(0)
placeholder_emb = word_emb[args.tokenizer.convert_tokens_to_ids(['#'])[0]].unsqueeze(0)
marker_emb = word_emb[args.tokenizer.convert_tokens_to_ids(['*'])[0]].unsqueeze(0)
args.model_weights['embeddings.word_embeddings.weight'] = torch.cat([word_emb, highlight_emb, placeholder_emb, marker_emb])
args.tokenizer.add_special_tokens(dict(additional_special_tokens=[HIGHLIGHT_TOKEN, PLACEHOLDER_TOKEN, ENTITY_MARKER_TOKEN]))
args.model_config.entity_vocab_size = 2
entity_emb = args.model_weights['entity_embeddings.entity_embeddings.weight']
mask_emb = entity_emb[args.entity_vocab[MASK_TOKEN]].unsqueeze(0)
args.model_weights['entity_embeddings.entity_embeddings.weight'] = torch.cat([entity_emb[:1], mask_emb])
results = {}
if args.do_train:
model = LukeForEntitySpanQA(args)
model.load_state_dict(args.model_weights, strict=False)
model.to(args.device)
(train_dataloader, _, _, _) = load_examples(args, 'train')
num_train_steps_per_epoch = (len(train_dataloader) // args.gradient_accumulation_steps)
num_train_steps = int((num_train_steps_per_epoch * args.num_train_epochs))
best_dev_score = [(- 1)]
best_weights = [None]
def step_callback(model, global_step):
if (((global_step % num_train_steps_per_epoch) == 0) and (args.local_rank in (0, (- 1)))):
epoch = int(((global_step / num_train_steps_per_epoch) - 1))
dev_results = evaluate(args, model, fold='dev')
args.experiment.log_metrics({f'dev_{k}_epoch{epoch}': v for (k, v) in dev_results.items()}, epoch=epoch)
results.update({f'dev_{k}_epoch{epoch}': v for (k, v) in dev_results.items()})
tqdm.write(('dev: ' + str(dev_results)))
if (dev_results['exact_match'] > best_dev_score[0]):
if hasattr(model, 'module'):
best_weights[0] = {k: v.to('cpu').clone() for (k, v) in model.module.state_dict().items()}
else:
best_weights[0] = {k: v.to('cpu').clone() for (k, v) in model.state_dict().items()}
best_dev_score[0] = dev_results['exact_match']
results['best_epoch'] = epoch
model.train()
trainer = Trainer(args, model=model, dataloader=train_dataloader, num_train_steps=num_train_steps, step_callback=step_callback)
trainer.train()
if (args.do_train and (args.local_rank in (0, (- 1)))):
logger.info('Saving the model checkpoint to %s', args.output_dir)
torch.save(best_weights[0], os.path.join(args.output_dir, WEIGHTS_NAME))
if (args.local_rank not in (0, (- 1))):
return {}
model = None
torch.cuda.empty_cache()
if args.do_eval:
model = LukeForEntitySpanQA(args)
if args.checkpoint_file:
model.load_state_dict(torch.load(args.checkpoint_file, map_location='cpu'))
else:
model.load_state_dict(torch.load(os.path.join(args.output_dir, WEIGHTS_NAME), map_location='cpu'))
model.to(args.device)
output_file = os.path.join(args.output_dir, 'predictions.json')
results.update({f'dev_{k}': v for (k, v) in evaluate(args, model, fold='dev', output_file=output_file).items()})
logger.info('Results: %s', json.dumps(results, indent=2, sort_keys=True))
args.experiment.log_metrics(results)
with open(os.path.join(args.output_dir, 'results.json'), 'w') as f:
json.dump(results, f)
return results |
def test_pcpvt():
(H, W) = (224, 224)
temp = torch.randn((1, 3, H, W))
model = PCPVT(embed_dims=[32, 64, 160, 256], num_heads=[1, 2, 5, 8], mlp_ratios=[8, 8, 4, 4], qkv_bias=True, depths=[3, 4, 6, 3], sr_ratios=[8, 4, 2, 1], norm_after_stage=False)
model.init_weights()
outs = model(temp)
assert (outs[0].shape == (1, 32, (H // 4), (W // 4)))
assert (outs[1].shape == (1, 64, (H // 8), (W // 8)))
assert (outs[2].shape == (1, 160, (H // 16), (W // 16)))
assert (outs[3].shape == (1, 256, (H // 32), (W // 32))) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.