code stringlengths 101 5.91M |
|---|
def predict_cases(model, list_of_lists, output_filenames, folds, save_npz, num_threads_preprocessing, num_threads_nifti_save, segs_from_prev_stage=None, do_tta=True, overwrite_existing=False):
assert (len(list_of_lists) == len(output_filenames))
if (segs_from_prev_stage is not None):
assert (len(segs_from_prev_stage) == len(output_filenames))
prman = Pool(num_threads_nifti_save)
results = []
cleaned_output_files = []
for o in output_filenames:
(dr, f) = os.path.split(o)
if (len(dr) > 0):
maybe_mkdir_p(dr)
if (not f.endswith('.nii.gz')):
(f, _) = os.path.splitext(f)
f = (f + '.nii.gz')
cleaned_output_files.append(join(dr, f))
if (not overwrite_existing):
print('number of cases:', len(list_of_lists))
not_done_idx = [i for (i, j) in enumerate(cleaned_output_files) if (not isfile(j))]
cleaned_output_files = [cleaned_output_files[i] for i in not_done_idx]
list_of_lists = [list_of_lists[i] for i in not_done_idx]
if (segs_from_prev_stage is not None):
segs_from_prev_stage = [segs_from_prev_stage[i] for i in not_done_idx]
print('number of cases that still need to be predicted:', len(cleaned_output_files))
print('emptying cuda cache')
torch.cuda.empty_cache()
print('loading parameters for folds,', folds)
(trainer, params) = load_model_and_checkpoint_files(model, folds)
print('starting preprocessing generator')
preprocessing = preprocess_multithreaded(trainer, list_of_lists, cleaned_output_files, num_threads_preprocessing, segs_from_prev_stage)
print('starting prediction...')
for preprocessed in preprocessing:
(output_filename, (d, dct)) = preprocessed
if isinstance(d, str):
data = np.load(d)
os.remove(d)
d = data
print('predicting', output_filename)
softmax = []
for p in params:
trainer.load_checkpoint_ram(p, False)
softmax.append(trainer.predict_preprocessed_data_return_softmax(d, do_tta, 1, False, 1, trainer.data_aug_params['mirror_axes'], True, True, 2, trainer.patch_size, True)[None])
softmax = np.vstack(softmax)
softmax_mean = np.mean(softmax, 0)
transpose_forward = trainer.plans.get('transpose_forward')
if (transpose_forward is not None):
transpose_backward = trainer.plans.get('transpose_backward')
softmax_mean = softmax_mean.transpose(([0] + [(i + 1) for i in transpose_backward]))
if save_npz:
npz_file = (output_filename[:(- 7)] + '.npz')
else:
npz_file = None
'There is a problem with python process communication that prevents us from communicating obejcts \n larger than 2 GB between processes (basically when the length of the pickle string that will be sent is \n communicated by the multiprocessing.Pipe object then the placeholder (\\%i I think) does not allow for long \n enough strings (lol). This could be fixed by changing i to l (for long) but that would require manually \n patching system python code. We circumvent that problem here by saving softmax_pred to a npy file that will \n then be read (and finally deleted) by the Process. save_segmentation_nifti_from_softmax can take either \n filename or np.ndarray and will handle this automatically'
if (np.prod(softmax_mean.shape) > ((.0 / 4) * 0.9)):
print('This output is too large for python process-process communication. Saving output temporarily to disk')
np.save((output_filename[:(- 7)] + '.npy'), softmax_mean)
softmax_mean = (output_filename[:(- 7)] + '.npy')
results.append(prman.starmap_async(save_segmentation_nifti_from_softmax, ((softmax_mean, output_filename, dct, 1, None, None, None, npz_file),)))
_ = [i.get() for i in results] |
def remove(text, n_max_gram=3):
tokens = text.split()
n_gram = random.randint(1, n_max_gram)
remove_token_idx = random.randint(0, (len(tokens) - n_gram))
tokens = (tokens[:remove_token_idx] + tokens[(remove_token_idx + n_gram):])
new_text = ' '.join(tokens)
return new_text |
class StructureConsensuLossFunction(nn.Module):
def __init__(self, consensus_loss_alpha=10.0, consensus_loss_beta=5.0, reduce_pixel='idx', reduce_pixel_kl='idx'):
super(StructureConsensuLossFunction, self).__init__()
self.consensus_loss_alpha = consensus_loss_alpha
self.consensus_loss_beta = consensus_loss_beta
self.reduce_pixel = reduce_pixel
self.reduce_pixel_kl = reduce_pixel_kl
logger.info(f'''
Loss instanciated with
alpha: {self.consensus_loss_alpha}
beta: {self.consensus_loss_beta}
normalization on 1st (label) term: {self.reduce_pixel}
normalization on 2nd (consensus) term: {self.reduce_pixel_kl}''')
def structure_via_consensus(self, logit, blobs, target):
total_loss_blobs = 0.0
count = 0.0
(n, c, h, w) = logit.size()
blobs = blobs.squeeze(1)
blobs_cc = torch.unique(blobs, sorted=True, dim=None)
for s in blobs_cc:
idx_blob = (blobs == s)
target_blob = target.clone()
loss_pix_avg = self.structure_via_consensus_over_blob(idx_blob, target, logit)
total_loss_blobs += loss_pix_avg
count += 1.0
total_loss_blobs /= count
return total_loss_blobs
def structure_via_consensus_over_blob(self, idx_blob, target, logit):
assert (torch.unique(target[idx_blob]).shape[0] == 1)
(n, c, h, w) = logit.size()
label_id = target[idx_blob][0]
target_blob_t = label_id.repeat(n).long()
prob = F.softmax(logit, dim=1)
idx_blob_t = idx_blob.unsqueeze(1).repeat(1, c, 1, 1)
prob_blob = (prob * idx_blob_t.float())
support_logit = idx_blob_t.sum(dim=(2, 3))
zero_mask = (support_logit == 0)
if (self.reduce_pixel != 'all'):
(prob_blob_mean, _) = self.custom_div(prob_blob.sum(dim=(2, 3)), support_logit.float())
else:
prob_blob_mean = (prob_blob.sum(dim=(2, 3)) / (h * w))
loss_avg = torch.nn.functional.nll_loss(torch.log(prob_blob_mean), target_blob_t, reduction='none')
invalid_samples = zero_mask.any(dim=1)
loss_avg[invalid_samples] = 0.0
loss_avg = loss_avg.mean()
nozero_log_mask = (prob_blob != 0)
zero_log_mask = (prob_blob == 0)
log_prob_blob = prob_blob.clone()
log_prob_blob[nozero_log_mask] = torch.log(prob_blob[nozero_log_mask])
prob_blob_mean_target = prob_blob_mean.unsqueeze(dim=2).unsqueeze(dim=3).repeat(1, 1, h, w)
prob_blob_mean_target[zero_log_mask] = 1.0
log_prob_blob[zero_log_mask] = 0.0
loss_dev = torch.nn.functional.kl_div(log_prob_blob, prob_blob_mean_target, reduction='none')
if (self.reduce_pixel_kl == 'all'):
loss_dev = torch.mean(torch.sum(loss_dev, dim=1))
else:
loss_dev = (torch.sum(loss_dev) / nozero_log_mask.float().sum())
final_loss = ((self.consensus_loss_alpha * loss_avg) + (self.consensus_loss_beta * loss_dev))
return final_loss
def custom_div(self, num, den):
x = den.clone()
one_mask = (den != 0)
x[one_mask] = (num[one_mask] / den[one_mask])
y = x
zero_mask = (den == 0)
y[zero_mask] = 0
return (y, zero_mask)
def forward(self, logit, blobs, target):
return self.structure_via_consensus(logit, blobs, target) |
def random_pair_range(a, b, min_dist=1, index1=None):
r1 = (random.randint(a, b) if (index1 is None) else index1)
d_left = min((r1 - a), min_dist)
d_right = min((b - r1), min_dist)
r2 = random.randint(a, (((b - 1) - d_left) - d_right))
r2 = ((((r2 + d_left) + 1) + d_right) if (r2 >= (r1 - d_left)) else r2)
return (r1, r2) |
def load_params(path, params):
pp = numpy.load(path)
for (kk, vv) in params.iteritems():
if (kk not in pp):
warnings.warn(('%s is not in the archive' % kk))
continue
params[kk] = pp[kk]
return params |
def inception_v4_base(inputs, final_endpoint='Mixed_7d', scope=None):
end_points = {}
def add_and_check_final(name, net):
end_points[name] = net
return (name == final_endpoint)
with tf.variable_scope(scope, 'InceptionV4', [inputs]):
with slim.arg_scope([slim.conv2d, slim.max_pool2d, slim.avg_pool2d], stride=1, padding='SAME'):
net = slim.conv2d(inputs, 32, [3, 3], stride=2, padding='VALID', scope='Conv2d_1a_3x3')
if add_and_check_final('Conv2d_1a_3x3', net):
return (net, end_points)
net = slim.conv2d(net, 32, [3, 3], padding='VALID', scope='Conv2d_2a_3x3')
if add_and_check_final('Conv2d_2a_3x3', net):
return (net, end_points)
net = slim.conv2d(net, 64, [3, 3], scope='Conv2d_2b_3x3')
if add_and_check_final('Conv2d_2b_3x3', net):
return (net, end_points)
with tf.variable_scope('Mixed_3a'):
with tf.variable_scope('Branch_0'):
branch_0 = slim.max_pool2d(net, [3, 3], stride=2, padding='VALID', scope='MaxPool_0a_3x3')
with tf.variable_scope('Branch_1'):
branch_1 = slim.conv2d(net, 96, [3, 3], stride=2, padding='VALID', scope='Conv2d_0a_3x3')
net = tf.concat(axis=3, values=[branch_0, branch_1])
if add_and_check_final('Mixed_3a', net):
return (net, end_points)
with tf.variable_scope('Mixed_4a'):
with tf.variable_scope('Branch_0'):
branch_0 = slim.conv2d(net, 64, [1, 1], scope='Conv2d_0a_1x1')
branch_0 = slim.conv2d(branch_0, 96, [3, 3], padding='VALID', scope='Conv2d_1a_3x3')
with tf.variable_scope('Branch_1'):
branch_1 = slim.conv2d(net, 64, [1, 1], scope='Conv2d_0a_1x1')
branch_1 = slim.conv2d(branch_1, 64, [1, 7], scope='Conv2d_0b_1x7')
branch_1 = slim.conv2d(branch_1, 64, [7, 1], scope='Conv2d_0c_7x1')
branch_1 = slim.conv2d(branch_1, 96, [3, 3], padding='VALID', scope='Conv2d_1a_3x3')
net = tf.concat(axis=3, values=[branch_0, branch_1])
if add_and_check_final('Mixed_4a', net):
return (net, end_points)
with tf.variable_scope('Mixed_5a'):
with tf.variable_scope('Branch_0'):
branch_0 = slim.conv2d(net, 192, [3, 3], stride=2, padding='VALID', scope='Conv2d_1a_3x3')
with tf.variable_scope('Branch_1'):
branch_1 = slim.max_pool2d(net, [3, 3], stride=2, padding='VALID', scope='MaxPool_1a_3x3')
net = tf.concat(axis=3, values=[branch_0, branch_1])
if add_and_check_final('Mixed_5a', net):
return (net, end_points)
for idx in xrange(4):
block_scope = ('Mixed_5' + chr((ord('b') + idx)))
net = block_inception_a(net, block_scope)
if add_and_check_final(block_scope, net):
return (net, end_points)
net = block_reduction_a(net, 'Mixed_6a')
if add_and_check_final('Mixed_6a', net):
return (net, end_points)
for idx in xrange(7):
block_scope = ('Mixed_6' + chr((ord('b') + idx)))
net = block_inception_b(net, block_scope)
if add_and_check_final(block_scope, net):
return (net, end_points)
net = block_reduction_b(net, 'Mixed_7a')
if add_and_check_final('Mixed_7a', net):
return (net, end_points)
for idx in xrange(3):
block_scope = ('Mixed_7' + chr((ord('b') + idx)))
net = block_inception_c(net, block_scope)
if add_and_check_final(block_scope, net):
return (net, end_points)
raise ValueError(('Unknown final endpoint %s' % final_endpoint)) |
class ClusterNet5g(ResNet):
def __init__(self, num_channel: int=3, output_k: int=10, num_sub_heads: int=5, batchnorm_track: bool=True):
super(ClusterNet5g, self).__init__()
self.batchnorm_track = batchnorm_track
self.trunk = ClusterNet5gTrunk(num_channel=num_channel, batchnorm_track=self.batchnorm_track)
self.head = ClusterNet5gHead(output_k=output_k, num_sub_heads=num_sub_heads, batchnorm_track=self.batchnorm_track)
self._initialize_weights()
def forward(self, x, kmeans_use_features=False, trunk_features=False, penultimate_features=False):
x = self.trunk(x, penultimate_features=penultimate_features)
if trunk_features:
return x
x = self.head(x, kmeans_use_features=kmeans_use_features)
return x |
def convert(msh_file, h5_file):
(root, _) = os.path.splitext(msh_file)
assert (os.path.splitext(msh_file)[1] == '.msh')
assert (os.path.splitext(h5_file)[1] == '.h5')
xml_file = '.'.join([root, 'xml'])
subprocess.call([('dolfin-convert %s %s' % (msh_file, xml_file))], shell=True)
assert os.path.exists(xml_file)
mesh = Mesh(xml_file)
out = HDF5File(mesh.mpi_comm(), h5_file, 'w')
out.write(mesh, 'mesh')
for region in ('facet_region.xml',):
(name, _) = region.split('_')
r_xml_file = '_'.join([root, region])
f = MeshFunction('size_t', mesh, r_xml_file)
out.write(f, name)
assert os.path.exists(h5_file)
return mesh |
def imagenet_resnet34_pretrained(output_dim):
return _replace_fc(torchvision.models.resnet34(pretrained=True), output_dim) |
class QTranBase(nn.Module):
def __init__(self, args):
super(QTranBase, self).__init__()
self.args = args
self.n_agents = args.n_agents
self.n_actions = args.n_actions
self.state_dim = int(np.prod(args.state_shape))
self.arch = self.args.qtran_arch
self.embed_dim = args.mixing_embed_dim
if (self.arch == 'coma_critic'):
q_input_size = (self.state_dim + (self.n_agents * self.n_actions))
elif (self.arch == 'qtran_paper'):
q_input_size = ((self.state_dim + self.args.rnn_hidden_dim) + self.n_actions)
else:
raise Exception('{} is not a valid QTran architecture'.format(self.arch))
if (self.args.network_size == 'small'):
self.Q = nn.Sequential(nn.Linear(q_input_size, self.embed_dim), nn.ReLU(), nn.Linear(self.embed_dim, self.embed_dim), nn.ReLU(), nn.Linear(self.embed_dim, 1))
self.V = nn.Sequential(nn.Linear(self.state_dim, self.embed_dim), nn.ReLU(), nn.Linear(self.embed_dim, self.embed_dim), nn.ReLU(), nn.Linear(self.embed_dim, 1))
ae_input = (self.args.rnn_hidden_dim + self.n_actions)
self.action_encoding = nn.Sequential(nn.Linear(ae_input, ae_input), nn.ReLU(), nn.Linear(ae_input, ae_input))
elif (self.args.network_size == 'big'):
self.Q = nn.Sequential(nn.Linear(q_input_size, self.embed_dim), nn.ReLU(), nn.Linear(self.embed_dim, self.embed_dim), nn.ReLU(), nn.Linear(self.embed_dim, self.embed_dim), nn.ReLU(), nn.Linear(self.embed_dim, 1))
self.V = nn.Sequential(nn.Linear(self.state_dim, self.embed_dim), nn.ReLU(), nn.Linear(self.embed_dim, self.embed_dim), nn.ReLU(), nn.Linear(self.embed_dim, self.embed_dim), nn.ReLU(), nn.Linear(self.embed_dim, 1))
ae_input = (self.args.rnn_hidden_dim + self.n_actions)
self.action_encoding = nn.Sequential(nn.Linear(ae_input, ae_input), nn.ReLU(), nn.Linear(ae_input, ae_input))
else:
assert False
def forward(self, batch, hidden_states, actions=None):
bs = batch.batch_size
ts = batch.max_seq_length
states = batch['state'].reshape((bs * ts), self.state_dim)
if (self.arch == 'coma_critic'):
if (actions is None):
actions = batch['actions_onehot'].reshape((bs * ts), (self.n_agents * self.n_actions))
else:
actions = actions.reshape((bs * ts), (self.n_agents * self.n_actions))
inputs = th.cat([states, actions], dim=1)
elif (self.arch == 'qtran_paper'):
if (actions is None):
actions = batch['actions_onehot'].reshape((bs * ts), self.n_agents, self.n_actions)
else:
actions = actions.reshape((bs * ts), self.n_agents, self.n_actions)
hidden_states = hidden_states.reshape((bs * ts), self.n_agents, (- 1))
agent_state_action_input = th.cat([hidden_states, actions], dim=2)
agent_state_action_encoding = self.action_encoding(agent_state_action_input.reshape(((bs * ts) * self.n_agents), (- 1))).reshape((bs * ts), self.n_agents, (- 1))
agent_state_action_encoding = agent_state_action_encoding.sum(dim=1)
inputs = th.cat([states, agent_state_action_encoding], dim=1)
q_outputs = self.Q(inputs)
states = batch['state'].reshape((bs * ts), self.state_dim)
v_outputs = self.V(states)
return (q_outputs, v_outputs) |
def assert_is_mag(arg1: str):
if ((not isinstance(arg1, str)) or (not is_mag(arg1))):
raise ValueError(f'Invalid magnification {arg1}. Must be of format [int/float]x, such as "10x", "20X", or "2.5x"') |
def cleanup(processes):
for (process, stdout, stderr) in processes:
if (stdout is not None):
stdout.close()
if (stderr is not None):
stderr.close()
if (process.poll() is None):
process.terminate() |
def convert_episode_to_batch_major(episode):
episode_batch = {}
for key in episode.keys():
val = np.array(episode[key]).copy()
episode_batch[key] = val.swapaxes(0, 1)
return episode_batch |
def load_google_mobility(data_dir='.'):
cur_dir = os.getcwd()
os.chdir(data_dir)
os.system('wget -O google_mobility.csv')
raw = pd.read_csv('google_mobility.csv')
os.chdir(cur_dir)
return raw |
class XLMTokenizer(PreTrainedTokenizer):
vocab_files_names = VOCAB_FILES_NAMES
pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP
pretrained_init_configuration = PRETRAINED_INIT_CONFIGURATION
max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__(self, vocab_file, merges_file, unk_token='<unk>', bos_token='<s>', sep_token='</s>', pad_token='<pad>', cls_token='</s>', mask_token='<special1>', additional_special_tokens=['<special0>', '<special1>', '<special2>', '<special3>', '<special4>', '<special5>', '<special6>', '<special7>', '<special8>', '<special9>'], lang2id=None, id2lang=None, do_lowercase_and_remove_accent=True, **kwargs):
super().__init__(unk_token=unk_token, bos_token=bos_token, sep_token=sep_token, pad_token=pad_token, cls_token=cls_token, mask_token=mask_token, additional_special_tokens=additional_special_tokens, lang2id=lang2id, id2lang=id2lang, do_lowercase_and_remove_accent=do_lowercase_and_remove_accent, **kwargs)
self.cache_moses_punct_normalizer = dict()
self.cache_moses_tokenizer = dict()
self.lang_with_custom_tokenizer = set(['zh', 'th', 'ja'])
self.do_lowercase_and_remove_accent = do_lowercase_and_remove_accent
self.lang2id = lang2id
self.id2lang = id2lang
if ((lang2id is not None) and (id2lang is not None)):
assert (len(lang2id) == len(id2lang))
self.ja_word_tokenizer = None
self.zh_word_tokenizer = None
with open(vocab_file, encoding='utf-8') as vocab_handle:
self.encoder = json.load(vocab_handle)
self.decoder = {v: k for (k, v) in self.encoder.items()}
with open(merges_file, encoding='utf-8') as merges_handle:
merges = merges_handle.read().split('\n')[:(- 1)]
merges = [tuple(merge.split()[:2]) for merge in merges]
self.bpe_ranks = dict(zip(merges, range(len(merges))))
self.cache = {}
def do_lower_case(self):
return self.do_lowercase_and_remove_accent
def moses_punct_norm(self, text, lang):
if (lang not in self.cache_moses_punct_normalizer):
punct_normalizer = sm.MosesPunctNormalizer(lang=lang)
self.cache_moses_punct_normalizer[lang] = punct_normalizer
else:
punct_normalizer = self.cache_moses_punct_normalizer[lang]
return punct_normalizer.normalize(text)
def moses_tokenize(self, text, lang):
if (lang not in self.cache_moses_tokenizer):
moses_tokenizer = sm.MosesTokenizer(lang=lang)
self.cache_moses_tokenizer[lang] = moses_tokenizer
else:
moses_tokenizer = self.cache_moses_tokenizer[lang]
return moses_tokenizer.tokenize(text, return_str=False, escape=False)
def moses_pipeline(self, text, lang):
text = replace_unicode_punct(text)
text = self.moses_punct_norm(text, lang)
text = remove_non_printing_char(text)
return text
def ja_tokenize(self, text):
if (self.ja_word_tokenizer is None):
try:
import Mykytea
self.ja_word_tokenizer = Mykytea.Mykytea(('-model %s/local/share/kytea/model.bin' % os.path.expanduser('~')))
except (AttributeError, ImportError):
logger.error("Make sure you install KyTea ( and it's python wrapper ( with the following steps")
logger.error('1. git clone :neubig/kytea.git && cd kytea')
logger.error('2. autoreconf -i')
logger.error('3. ./configure --prefix=$HOME/local')
logger.error('4. make && make install')
logger.error('5. pip install kytea')
raise
return list(self.ja_word_tokenizer.getWS(text))
def vocab_size(self):
return len(self.encoder)
def get_vocab(self):
return dict(self.encoder, **self.added_tokens_encoder)
def bpe(self, token):
word = (tuple(token[:(- 1)]) + ((token[(- 1)] + '</w>'),))
if (token in self.cache):
return self.cache[token]
pairs = get_pairs(word)
if (not pairs):
return (token + '</w>')
while True:
bigram = min(pairs, key=(lambda pair: self.bpe_ranks.get(pair, float('inf'))))
if (bigram not in self.bpe_ranks):
break
(first, second) = bigram
new_word = []
i = 0
while (i < len(word)):
try:
j = word.index(first, i)
except ValueError:
new_word.extend(word[i:])
break
else:
new_word.extend(word[i:j])
i = j
if ((word[i] == first) and (i < (len(word) - 1)) and (word[(i + 1)] == second)):
new_word.append((first + second))
i += 2
else:
new_word.append(word[i])
i += 1
new_word = tuple(new_word)
word = new_word
if (len(word) == 1):
break
else:
pairs = get_pairs(word)
word = ' '.join(word)
if (word == '\n </w>'):
word = '\n</w>'
self.cache[token] = word
return word
def _tokenize(self, text, lang='en', bypass_tokenizer=False):
if (lang and self.lang2id and (lang not in self.lang2id)):
logger.error('Supplied language code not found in lang2id mapping. Please check that your language is supported by the loaded pretrained model.')
if bypass_tokenizer:
text = text.split()
elif (lang not in self.lang_with_custom_tokenizer):
text = self.moses_pipeline(text, lang=lang)
if (lang == 'ro'):
text = romanian_preprocessing(text)
text = self.moses_tokenize(text, lang=lang)
elif (lang == 'th'):
text = self.moses_pipeline(text, lang=lang)
try:
if ('pythainlp' not in sys.modules):
from pythainlp.tokenize import word_tokenize as th_word_tokenize
else:
th_word_tokenize = sys.modules['pythainlp'].word_tokenize
except (AttributeError, ImportError):
logger.error('Make sure you install PyThaiNLP ( with the following steps')
logger.error('1. pip install pythainlp')
raise
text = th_word_tokenize(text)
elif (lang == 'zh'):
try:
if ('jieba' not in sys.modules):
import jieba
else:
jieba = sys.modules['jieba']
except (AttributeError, ImportError):
logger.error('Make sure you install Jieba ( with the following steps')
logger.error('1. pip install jieba')
raise
text = ' '.join(jieba.cut(text))
text = self.moses_pipeline(text, lang=lang)
text = text.split()
elif (lang == 'ja'):
text = self.moses_pipeline(text, lang=lang)
text = self.ja_tokenize(text)
else:
raise ValueError('It should not reach here')
if (self.do_lowercase_and_remove_accent and (not bypass_tokenizer)):
text = lowercase_and_remove_accent(text)
split_tokens = []
for token in text:
if token:
split_tokens.extend([t for t in self.bpe(token).split(' ')])
return split_tokens
def _convert_token_to_id(self, token):
return self.encoder.get(token, self.encoder.get(self.unk_token))
def _convert_id_to_token(self, index):
return self.decoder.get(index, self.unk_token)
def convert_tokens_to_string(self, tokens):
out_string = ''.join(tokens).replace('</w>', ' ').strip()
return out_string
def build_inputs_with_special_tokens(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None) -> List[int]:
bos = [self.bos_token_id]
sep = [self.sep_token_id]
if (token_ids_1 is None):
return ((bos + token_ids_0) + sep)
return ((((bos + token_ids_0) + sep) + token_ids_1) + sep)
def get_special_tokens_mask(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None, already_has_special_tokens: bool=False) -> List[int]:
if already_has_special_tokens:
if (token_ids_1 is not None):
raise ValueError('You should not supply a second sequence if the provided sequence of ids is already formatted with special tokens for the model.')
return list(map((lambda x: (1 if (x in [self.sep_token_id, self.cls_token_id]) else 0)), token_ids_0))
if (token_ids_1 is not None):
return (((([1] + ([0] * len(token_ids_0))) + [1]) + ([0] * len(token_ids_1))) + [1])
return (([1] + ([0] * len(token_ids_0))) + [1])
def create_token_type_ids_from_sequences(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None) -> List[int]:
sep = [self.sep_token_id]
cls = [self.cls_token_id]
if (token_ids_1 is None):
return (len(((cls + token_ids_0) + sep)) * [0])
return ((len(((cls + token_ids_0) + sep)) * [0]) + (len((token_ids_1 + sep)) * [1]))
def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str]=None) -> Tuple[str]:
if (not os.path.isdir(save_directory)):
logger.error('Vocabulary path ({}) should be a directory'.format(save_directory))
return
vocab_file = os.path.join(save_directory, (((filename_prefix + '-') if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file']))
merge_file = os.path.join(save_directory, (((filename_prefix + '-') if filename_prefix else '') + VOCAB_FILES_NAMES['merges_file']))
with open(vocab_file, 'w', encoding='utf-8') as f:
f.write(json.dumps(self.encoder, ensure_ascii=False))
index = 0
with open(merge_file, 'w', encoding='utf-8') as writer:
for (bpe_tokens, token_index) in sorted(self.bpe_ranks.items(), key=(lambda kv: kv[1])):
if (index != token_index):
logger.warning('Saving vocabulary to {}: BPE merge indices are not consecutive. Please check that the tokenizer is not corrupted!'.format(merge_file))
index = token_index
writer.write((' '.join(bpe_tokens) + '\n'))
index += 1
return (vocab_file, merge_file) |
class DAVISLoader(MyDataset):
def __init__(self, args, transform=None, target_transform=None, augment=False, split='train', resize=False, inputRes=None, video_mode=True, use_prev_mask=False):
self._year = args.year
self._phase = split
self._single_object = args.single_object
self._length_clip = args.length_clip
self.transform = transform
self.target_transform = target_transform
self.split = split
self.inputRes = inputRes
self.video_mode = video_mode
self.max_seq_len = args.gt_maxseqlen
self.dataset = args.dataset
self.flip = augment
self.use_prev_mask = use_prev_mask
if augment:
if (self._length_clip == 1):
self.augmentation_transform = RandomAffine(rotation_range=args.rotation, translation_range=args.translation, shear_range=args.shear, zoom_range=(args.zoom, max((args.zoom * 2), 1.0)), interp='nearest')
else:
self.augmentation_transform = RandomAffine(rotation_range=args.rotation, translation_range=args.translation, shear_range=args.shear, zoom_range=(args.zoom, max((args.zoom * 2), 1.0)), interp='nearest', lazy=True)
else:
self.augmentation_transform = None
assert ((args.year == '2017') or (args.year == '2016'))
if (args.year == '2016'):
if (not ((self._phase == phase.TRAIN) or (self._phase == phase.VAL) or (self._phase == phase.TRAINVAL))):
raise Exception("Set '{}' not available in DAVIS 2016 ({},{},{})".format(self._phase.name, phase.TRAIN.name, phase.VAL.name, phase.TRAINVAL.name))
if (self._single_object and (self._year != '2016')):
raise Exception("Single object segmentation only available for 'year=2016'")
self._db_sequences = db_read_sequences(args.year, self._phase)
lmdb_env_seq_dir = osp.join(cfg.PATH.DATA, 'lmdb_seq')
lmdb_env_annot_dir = osp.join(cfg.PATH.DATA, 'lmdb_annot')
if (osp.isdir(lmdb_env_seq_dir) and osp.isdir(lmdb_env_annot_dir)):
lmdb_env_seq = lmdb.open(lmdb_env_seq_dir)
lmdb_env_annot = lmdb.open(lmdb_env_annot_dir)
else:
lmdb_env_seq = None
lmdb_env_annot = None
print('LMDB not found. This could affect the data loading time. It is recommended to use LMDB.')
self.sequences = [Sequence(self._phase, s.name, lmdb_env=lmdb_env_seq) for s in self._db_sequences]
self._db_sequences = db_read_sequences(args.year, self._phase)
self.annotations = [Annotation(self._phase, s.name, self._single_object, lmdb_env=lmdb_env_annot) for s in self._db_sequences]
self.sequence_clips = []
self._db_sequences = db_read_sequences(args.year, self._phase)
for (seq, s) in zip(self.sequences, self._db_sequences):
if (self.use_prev_mask == False):
images = seq.files
starting_frame_idx = 0
starting_frame = int(osp.splitext(osp.basename(images[starting_frame_idx]))[0])
self.sequence_clips.append(SequenceClip_simple(seq, starting_frame))
num_frames = self.sequence_clips[(- 1)]._numframes
num_clips = int((num_frames / self._length_clip))
for idx in range((num_clips - 1)):
starting_frame_idx += self._length_clip
starting_frame = int(osp.splitext(osp.basename(images[starting_frame_idx]))[0])
self.sequence_clips.append(SequenceClip_simple(seq, starting_frame))
else:
annot_seq_dir = osp.join(cfg.PATH.ANNOTATIONS, s.name)
annotations = glob.glob(osp.join(annot_seq_dir, '*.png'))
annotations.sort()
starting_frame = int(osp.splitext(osp.basename(annotations[0]))[0])
self.sequence_clips.append(SequenceClip(self._phase, s.name, starting_frame, lmdb_env=lmdb_env_seq))
self.annotation_clips = []
self._db_sequences = db_read_sequences(args.year, self._phase)
for (annot, s) in zip(self.annotations, self._db_sequences):
images = annot.files
starting_frame_idx = 0
starting_frame = int(osp.splitext(osp.basename(images[starting_frame_idx]))[0])
self.annotation_clips.append(AnnotationClip_simple(annot, starting_frame))
num_frames = self.annotation_clips[(- 1)]._numframes
num_clips = int((num_frames / self._length_clip))
for idx in range((num_clips - 1)):
starting_frame_idx += self._length_clip
starting_frame = int(osp.splitext(osp.basename(images[starting_frame_idx]))[0])
self.annotation_clips.append(AnnotationClip_simple(annot, starting_frame))
self._keys = dict(zip([s for s in self.sequences], range(len(self.sequences))))
self._keys_clips = dict(zip([(s.name + str(s.starting_frame)) for s in self.sequence_clips], range(len(self.sequence_clips))))
try:
self.color_palette = np.array(Image.open(self.annotations[0].files[0]).getpalette()).reshape((- 1), 3)
except Exception as e:
self.color_palette = np.array([[0, 255, 0]])
def get_raw_sample(self, key):
if isinstance(key, str):
sid = self._keys[key]
elif isinstance(key, int):
sid = key
else:
raise InputError()
return edict({'images': self.sequences[sid], 'annotations': self.annotations[sid]})
def get_raw_sample_clip(self, key):
if isinstance(key, str):
sid = self._keys_clips[key]
elif isinstance(key, int):
sid = key
else:
raise InputError()
return edict({'images': self.sequence_clips[sid], 'annotations': self.annotation_clips[sid]})
def sequence_name_to_id(self, name):
return self._keys[name]
def sequence_name_to_id_clip(self, name):
return self._keys_clips[name]
def sequence_id_to_name(self, sid):
return self._db_sequences[sid].name
def sequence_id_to_name_clip(self, sid):
return self.sequence_clips[sid]
def iternames(self):
for s in self._db_sequences:
(yield s)
def iternames_clips(self):
for s in self.sequence_clips:
(yield s)
def iteritems(self):
return self.__iter__() |
def distribute_config_updates(prefixes, scaffolding, config_updates):
for (path, value) in iterate_flattened(config_updates):
(scaffold_name, suffix) = find_best_match(path, prefixes)
scaff = scaffolding[scaffold_name]
set_by_dotted_path(scaff.config_updates, suffix, value) |
def spawn_shelf(shelf_coordinates: chex.Array, requested: chex.Array) -> chex.Array:
(x, y) = shelf_coordinates
shelf_pos = Position(x=x, y=y)
shelf = Shelf(position=shelf_pos, is_requested=requested)
return shelf |
class HDF5Writer(BaseWriter):
def __init__(self, wspecifier, write_num_frames=None, compress=False):
spec_dict = parse_wspecifier(wspecifier)
self.filename = spec_dict['ark']
if compress:
self.kwargs = {'compression': 'gzip'}
else:
self.kwargs = {}
self.writer = h5py.File(spec_dict['ark'], 'w')
if ('scp' in spec_dict):
self.writer_scp = open(spec_dict['scp'], 'w', encoding='utf-8')
else:
self.writer_scp = None
if (write_num_frames is not None):
self.writer_nframe = get_num_frames_writer(write_num_frames)
else:
self.writer_nframe = None
def __setitem__(self, key, value):
self.writer.create_dataset(key, data=value, **self.kwargs)
if (self.writer_scp is not None):
self.writer_scp.write(f'''{key} {self.filename}:{key}
''')
if (self.writer_nframe is not None):
self.writer_nframe.write(f'''{key} {len(value)}
''') |
def print_evaluate_index(model, eval_dataLoader, num_indexes=7):
name_index = ['ACC', 'Sens', 'Spec', 'PPV', 'NPV', 'F1', 'MCC']
ret_index = evaluate(model, eval_dataLoader, num_indexes=7)
for i in range(num_indexes):
print(f'{name_index[i]}:{round((ret_index[i] * 100), 2)}', end=' ')
print('\n')
return |
def _create_annotations(gt, camera_id, frame_shape):
annotations = SequenceAnnotations()
delta = (- 8)
for (start_frame, end_frame, x, y) in gt['BallPos']:
for i in range(start_frame, (end_frame + 1)):
if ((camera_id == 2) or (camera_id == 6)):
x = (frame_shape[1] - x)
annotations.ball_pos[(i + delta)].append((x, y))
for (start_frame, end_frame, value) in gt['BallShot']:
if value:
for i in range(start_frame, (end_frame + 1)):
annotations.ball_shot[(i + delta)] = True
for (start_frame, end_frame, value) in gt['PlayerInteractingID']:
if (value > (- 1)):
for i in range(start_frame, (end_frame + 1)):
annotations.interacting_player[(i + delta)].append(value)
for player in gt['Person']:
for (start_frame, end_frame, height, width, x, y) in gt['Person'][player]:
assert (start_frame <= end_frame)
for i in range(start_frame, (end_frame + 1)):
annotations.persons[(i + delta)].append((player, height, width, x, y))
return annotations |
class ClusterBasedBucketer(object):
def __init__(self, encoder, clustering):
self.encoder = encoder
self.clustering = clustering
def fit(self, X, y=None):
dt_encoded = self.encoder.fit_transform(X)
self.clustering.fit(dt_encoded)
return self
def predict(self, X, y=None):
dt_encoded = self.encoder.transform(X)
return self.clustering.predict(dt_encoded)
def fit_predict(self, X, y=None):
self.fit(X)
return self.predict(X) |
def _cast_to_type_if_compatible(name, param_type, value):
fail_msg = ("Could not cast hparam '%s' of type '%s' from value %r" % (name, param_type, value))
if issubclass(param_type, type(None)):
return value
if (issubclass(param_type, (six.string_types, six.binary_type)) and (not isinstance(value, (six.string_types, six.binary_type)))):
raise ValueError(fail_msg)
if (issubclass(param_type, bool) != isinstance(value, bool)):
raise ValueError(fail_msg)
if (issubclass(param_type, numbers.Integral) and (not isinstance(value, numbers.Integral))):
raise ValueError(fail_msg)
if (issubclass(param_type, numbers.Number) and (not isinstance(value, numbers.Number))):
raise ValueError(fail_msg)
return param_type(value) |
class Prop_Inflected_Verbs(object):
def __init__(self, sentence_objs):
self.sentence_objs = sentence_objs
def handle(self):
(tot_num_inflected_verbs, tot_num_verbs) = (0, 0)
for so in self.sentence_objs:
tot_num_verbs += so.pos_tag_counter.get_pos_tag_count(VERB)
tot_num_inflected_verbs += num_inflected_verbs(so.stanza_doc)
if (tot_num_verbs != 0):
return (tot_num_inflected_verbs / tot_num_verbs)
return NOT_AVAILABLE |
class GQADataset(VQADataset, __DisplMixin):
def __init__(self, vis_processor, text_processor, vis_root, ann_paths):
super().__init__(vis_processor, text_processor, vis_root, ann_paths)
def __getitem__(self, index):
ann = self.annotation[index]
image_path = os.path.join(self.vis_root, ann['image'])
image = Image.open(image_path).convert('RGB')
image = self.vis_processor(image)
question = self.text_processor(ann['question'])
answers = [ann['answer']]
weights = [1]
return {'image': image, 'text_input': question, 'answers': answers, 'weights': weights} |
class GLPNImageProcessingTester(unittest.TestCase):
def __init__(self, parent, batch_size=7, num_channels=3, image_size=18, min_resolution=30, max_resolution=400, do_resize=True, size_divisor=32, do_rescale=True):
self.parent = parent
self.batch_size = batch_size
self.num_channels = num_channels
self.image_size = image_size
self.min_resolution = min_resolution
self.max_resolution = max_resolution
self.do_resize = do_resize
self.size_divisor = size_divisor
self.do_rescale = do_rescale
def prepare_image_processor_dict(self):
return {'do_resize': self.do_resize, 'size_divisor': self.size_divisor, 'do_rescale': self.do_rescale} |
def run_net(args, config, train_writer=None, val_writer=None):
logger = get_logger(args.log_name)
((train_sampler, train_dataloader), (_, test_dataloader)) = (builder.dataset_builder(args, config.dataset.train), builder.dataset_builder(args, config.dataset.val))
(_, extra_train_dataloader) = (builder.dataset_builder(args, config.dataset.extra_train) if config.dataset.get('extra_train') else (None, None))
base_model = builder.model_builder(config.model)
if args.use_gpu:
base_model.to(args.local_rank)
start_epoch = 0
best_metrics = Acc_Metric(0.0)
metrics = Acc_Metric(0.0)
print(args.start_ckpts)
if args.resume:
print('start the resume process')
(start_epoch, best_metric) = builder.resume_model(base_model, args, logger=logger)
best_metrics = Acc_Metric(best_metric)
elif (args.start_ckpts is not None):
builder.load_model(base_model, args.start_ckpts, logger=logger)
if args.distributed:
if args.sync_bn:
base_model = torch.nn.SyncBatchNorm.convert_sync_batchnorm(base_model)
print_log('Using Synchronized BatchNorm ...', logger=logger)
base_model = nn.parallel.DistributedDataParallel(base_model, device_ids=[(args.local_rank % torch.cuda.device_count())], find_unused_parameters=True)
print_log('Using Distributed Data parallel ...', logger=logger)
else:
print_log('Using Data parallel ...', logger=logger)
base_model = nn.DataParallel(base_model).cuda()
(optimizer, scheduler) = builder.build_opti_sche(base_model, config)
base_model.zero_grad()
reset_optimizer = True
for epoch in range(start_epoch, (config.max_epoch + 1)):
if args.distributed:
train_sampler.set_epoch(epoch)
base_model.train()
epoch_start_time = time.time()
batch_start_time = time.time()
batch_time = AverageMeter()
data_time = AverageMeter()
losses = AverageMeter(['Loss'])
losses_normal = AverageMeter(['Loss'])
num_iter = 0
gradual_weight = (float(epoch) / float(config.max_epoch))
print(gradual_weight)
base_model.train()
n_batches = len(train_dataloader)
for (idx, (taxonomy_ids, model_ids, data)) in enumerate(train_dataloader):
num_iter += 1
n_itr = ((epoch * n_batches) + idx)
data_time.update((time.time() - batch_start_time))
npoints = config.dataset.train.others.npoints
dataset_name = config.dataset.train._base_.NAME
if (dataset_name == 'ShapeNet'):
points = data.cuda()
elif (dataset_name == 'ModelNet'):
points = data[0].cuda()
points = misc.fps(points, npoints)
else:
raise NotImplementedError(f'Train phase do not support {dataset_name}')
assert (points.size(1) == npoints)
points = train_transforms(points)
(loss_xyz, loss_normal) = base_model(points)
if (config.loss_type == 'xyz'):
loss = loss_xyz
elif (config.loss_type == 'normal'):
loss = (float(config.normal_weight) * loss_normal)
elif (config.loss_type == 'xyznormal'):
loss = (loss_xyz + (float(config.normal_weight) * loss_normal))
elif (config.loss_type == 'xyznormal_gradual'):
loss = (loss_xyz + ((float(config.normal_weight) * loss_normal) * gradual_weight))
elif (config.loss_type == 'xyznormal_xyzfirst'):
if (epoch < 300):
loss = loss_xyz
else:
loss = (loss_xyz + (float(config.normal_weight) * loss_normal))
elif (config.loss_type == 'xyznormal_xyzfirst_gradual'):
if (epoch < 300):
loss = loss_xyz
else:
if reset_optimizer:
reset_optimizer = False
(optimizer, scheduler) = builder.build_opti_sche(base_model, config)
gradual_weight = (float((epoch - 299)) / float((config.max_epoch - 299)))
loss = (loss_xyz + ((float(config.normal_weight) * loss_normal) * gradual_weight))
else:
raise NotImplementedError
try:
loss.backward()
except:
loss = loss.mean()
loss.backward()
if (num_iter == config.step_per_update):
num_iter = 0
optimizer.step()
base_model.zero_grad()
if args.distributed:
loss = dist_utils.reduce_tensor(loss, args)
losses.update([(loss_xyz.item() * 1000)])
losses_normal.update([(loss_normal.item() * 1000)])
else:
losses.update([(loss_xyz.item() * 1000)])
losses_normal.update([(loss_normal.item() * 1000)])
if args.distributed:
torch.cuda.synchronize()
if (train_writer is not None):
train_writer.add_scalar('Loss/Batch/Loss', loss.item(), n_itr)
train_writer.add_scalar('Loss/Batch/LR', optimizer.param_groups[0]['lr'], n_itr)
batch_time.update((time.time() - batch_start_time))
batch_start_time = time.time()
if ((idx % 20) == 0):
print_log(('[Epoch %d/%d][Batch %d/%d] BatchTime = %.3f (s) DataTime = %.3f (s) Lossxyz = %s Lossnormal = %s lr = %.6f' % (epoch, config.max_epoch, (idx + 1), n_batches, batch_time.val(), data_time.val(), [('%.4f' % l) for l in losses.val()], [('%.4f' % l) for l in losses_normal.val()], optimizer.param_groups[0]['lr'])), logger=logger)
if isinstance(scheduler, list):
for item in scheduler:
item.step(epoch)
else:
scheduler.step(epoch)
epoch_end_time = time.time()
if (train_writer is not None):
train_writer.add_scalar('Loss/Epoch/Loss_1', losses.avg(0), epoch)
print_log(('[Training] EPOCH: %d EpochTime = %.3f (s) Losses = %s Lossnormal = %s lr = %.6f' % (epoch, (epoch_end_time - epoch_start_time), [('%.4f' % l) for l in losses.avg()], [('%.4f' % l) for l in losses_normal.avg()], optimizer.param_groups[0]['lr'])), logger=logger)
builder.save_checkpoint(base_model, optimizer, epoch, metrics, best_metrics, 'ckpt-last', args, logger=logger)
if (((epoch % 25) == 0) and (epoch >= 250)):
builder.save_checkpoint(base_model, optimizer, epoch, metrics, best_metrics, f'ckpt-epoch-{epoch:03d}', args, logger=logger)
if (train_writer is not None):
train_writer.close()
if (val_writer is not None):
val_writer.close() |
class MemoryDataParameter(_message.Message):
__metaclass__ = _reflection.GeneratedProtocolMessageType
DESCRIPTOR = _MEMORYDATAPARAMETER |
class QActionEx(QAction):
def __init__(self, icon, text, shortcut=None, trigger_func=None, shortcut_in_tooltip=False, is_checkable=False, is_auto_repeat=False):
super().__init__(icon, text)
if (shortcut is not None):
self.setShortcut(shortcut)
if shortcut_in_tooltip:
self.setToolTip(f"{text} ( {StringsDB['S_HOT_KEY']}: {shortcut} )")
if (trigger_func is not None):
self.triggered.connect(trigger_func)
if is_checkable:
self.setCheckable(True)
self.setAutoRepeat(is_auto_repeat) |
_MASK_HEAD_REGISTRY.register()
class MaskRCNNConvUpsamplePointSupHead(MaskRCNNConvUpsampleHead):
def forward(self, x, instances: List[Instances]) -> Any:
x = self.layers(x)
if self.training:
(N, C, H, W) = x.shape
assert (H == W)
proposal_boxes = [x.proposal_boxes for x in instances]
assert (N == np.sum((len(x) for x in proposal_boxes)))
if (N == 0):
return {'loss_mask': (x.sum() * 0)}
(point_coords, point_labels) = get_point_coords_from_point_annotation(instances)
mask_logits = point_sample(x, point_coords, align_corners=False)
return {'loss_mask': roi_mask_point_loss(mask_logits, instances, point_labels)}
else:
mask_rcnn_inference(x, instances)
return instances |
def decode_with_crf(crf, word_reps, mask_v, l_map):
seq_len = word_reps.size(0)
bat_size = word_reps.size(1)
decoded_crf = crf.decode(word_reps, mask_v)
scores = crf.cal_score(word_reps).data
mask_v = mask_v.data
decoded_crf = decoded_crf.data
decoded_crf_withpad = torch.cat((torch.cuda.LongTensor(1, bat_size).fill_(l_map['<start>']), decoded_crf), 0)
decoded_crf_withpad = decoded_crf_withpad.transpose(0, 1).cpu().numpy()
label_size = len(l_map)
bi_crf = []
cur_len = (decoded_crf_withpad.shape[1] - 1)
for i_l in decoded_crf_withpad:
bi_crf.append(([((i_l[ind] * label_size) + i_l[(ind + 1)]) for ind in range(0, cur_len)] + [((i_l[cur_len] * label_size) + l_map['<pad>'])]))
bi_crf = torch.cuda.LongTensor(bi_crf).transpose(0, 1).unsqueeze(2)
tg_energy = torch.gather(scores.view(seq_len, bat_size, (- 1)), 2, bi_crf).view(seq_len, bat_size)
tg_energy = tg_energy.transpose(0, 1).masked_select(mask_v.transpose(0, 1))
tg_energy = tg_energy.cpu().numpy()
masks = mask_v.sum(0)
crf_result_scored_by_crf = []
start = 0
for (i, mask) in enumerate(masks):
end = (start + mask)
crf_result_scored_by_crf.append(tg_energy[start:end].sum())
start = end
crf_result_scored_by_crf = np.array(crf_result_scored_by_crf)
return (decoded_crf.cpu().transpose(0, 1).numpy(), crf_result_scored_by_crf) |
def prepare_data_taskmaster(args):
ds_name = 'TaskMaster'
example_type = args['example_type']
max_line = args['max_line']
fr_trn_id = open(os.path.join(args['data_path'], 'Taskmaster/TM-1-2019/train-dev-test/train.csv'), 'r')
fr_dev_id = open(os.path.join(args['data_path'], 'Taskmaster/TM-1-2019/train-dev-test/dev.csv'), 'r')
fr_trn_id = fr_trn_id.readlines()
fr_dev_id = fr_dev_id.readlines()
fr_trn_id = [_id.replace('\n', '').replace(',', '') for _id in fr_trn_id]
fr_dev_id = [_id.replace('\n', '').replace(',', '') for _id in fr_dev_id]
fr_data_woz = open(os.path.join(args['data_path'], 'Taskmaster/TM-1-2019/woz-dialogs.json'), 'r')
fr_data_self = open(os.path.join(args['data_path'], 'Taskmaster/TM-1-2019/self-dialogs.json'), 'r')
dials_all = (json.load(fr_data_woz) + json.load(fr_data_self))
_example_type = ('dial' if ('dial' in example_type) else example_type)
pair_trn = globals()['read_langs_{}'.format(_example_type)](args, dials_all, ds_name, max_line)
pair_dev = []
pair_tst = []
print('Read {} pairs train from {}'.format(len(pair_trn), ds_name))
print('Read {} pairs valid from {}'.format(len(pair_dev), ds_name))
print('Read {} pairs test from {}'.format(len(pair_tst), ds_name))
meta_data = {'num_labels': 0}
return (pair_trn, pair_dev, pair_tst, meta_data) |
class GTestEnvVarTest(gtest_test_utils.TestCase):
def testEnvVarAffectsFlag(self):
TestFlag('break_on_failure', '1', '0')
TestFlag('color', 'yes', 'auto')
TestFlag('filter', 'FooTest.Bar', '*')
SetEnvVar('XML_OUTPUT_FILE', None)
TestFlag('output', 'xml:tmp/foo.xml', '')
TestFlag('print_time', '0', '1')
TestFlag('repeat', '999', '1')
TestFlag('throw_on_failure', '1', '0')
TestFlag('death_test_style', 'threadsafe', 'fast')
TestFlag('catch_exceptions', '0', '1')
if IS_LINUX:
TestFlag('death_test_use_fork', '1', '0')
TestFlag('stack_trace_depth', '0', '100')
def testXmlOutputFile(self):
SetEnvVar('GTEST_OUTPUT', None)
SetEnvVar('XML_OUTPUT_FILE', 'tmp/bar.xml')
AssertEq('xml:tmp/bar.xml', GetFlag('output'))
def testXmlOutputFileOverride(self):
SetEnvVar('GTEST_OUTPUT', 'xml:tmp/foo.xml')
SetEnvVar('XML_OUTPUT_FILE', 'tmp/bar.xml')
AssertEq('xml:tmp/foo.xml', GetFlag('output')) |
class DataItem():
def __init__(self, x, y, block, code_id):
self.x = x
self.y = y
self.block = block
self.code_id = code_id |
def _compute_fans_stacked(shape):
if (len(shape) < 1):
fan_in = fan_out = 1
elif (len(shape) == 1):
fan_in = fan_out = shape[0]
elif (len(shape) == 2):
fan_in = shape[1]
fan_out = 1
else:
fan_in = shape[(- 2)]
fan_out = shape[(- 1)]
return (fan_in, fan_out) |
class ResNet(SimpleNet):
def __init__(self, block, num_blocks, num_classes=10, name=None, created_time=None):
super(ResNet, self).__init__()
self.in_planes = 32
self.conv1 = nn.Conv2d(3, 32, kernel_size=3, stride=1, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(32)
self.layer1 = self._make_layer(block, 32, num_blocks[0], stride=1)
self.layer2 = self._make_layer(block, 64, num_blocks[1], stride=2)
self.layer3 = self._make_layer(block, 128, num_blocks[2], stride=2)
self.layer4 = self._make_layer(block, 256, num_blocks[3], stride=2)
self.linear = nn.Linear((256 * block.expansion), num_classes)
def _make_layer(self, block, planes, num_blocks, stride):
strides = ([stride] + ([1] * (num_blocks - 1)))
layers = []
for stride in strides:
layers.append(block(self.in_planes, planes, stride))
self.in_planes = (planes * block.expansion)
return nn.Sequential(*layers)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = self.layer1(out)
out = self.layer2(out)
out = self.layer3(out)
out = self.layer4(out)
out = F.avg_pool2d(out, 4)
out = out.view(out.size(0), (- 1))
out = self.linear(out)
return out |
def require_torch_gpu(test_case):
if (not torch.cuda.is_available()):
return unittest.skip('test requires GPU')(test_case)
else:
return test_case |
def get_deepnorm_coefficients(encoder_layers: int, decoder_layers: int) -> Tuple[(Optional[DeepNormCoefficients], Optional[DeepNormCoefficients])]:
N = encoder_layers
M = decoder_layers
if (decoder_layers == 0):
return (DeepNormCoefficients(alpha=((2 * N) ** 0.25), beta=((8 * N) ** (- 0.25))), None)
elif (encoder_layers == 0):
return (None, DeepNormCoefficients(alpha=((2 * M) ** 0.25), beta=((8 * M) ** (- 0.25))))
else:
encoder_coeffs = DeepNormCoefficients(alpha=(0.81 * (((N ** 4) * M) ** 0.0625)), beta=(0.87 * (((N ** 4) * M) ** (- 0.0625))))
decoder_coeffs = DeepNormCoefficients(alpha=((3 * M) ** 0.25), beta=((12 * M) ** (- 0.25)))
return (encoder_coeffs, decoder_coeffs) |
class MBartTokenizerFast(XLMRobertaTokenizerFast):
vocab_files_names = {'vocab_file': 'sentencepiece.bpe.model'}
max_model_input_sizes = {m: 1024 for m in _all_mbart_models}
pretrained_vocab_files_map = {'vocab_file': {m: SPM_URL for m in _all_mbart_models}}
slow_tokenizer_class = MBartTokenizer
prefix_tokens: List[int] = []
suffix_tokens: List[int] = []
def __init__(self, *args, tokenizer_file=None, **kwargs):
super().__init__(*args, tokenizer_file=tokenizer_file, **kwargs)
self.cur_lang_code = self.convert_tokens_to_ids('en_XX')
self.set_src_lang_special_tokens(kwargs.get('src_lang', 'en_XX'))
self.add_special_tokens({'additional_special_tokens': FAIRSEQ_LANGUAGE_CODES})
def get_special_tokens_mask(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None, already_has_special_tokens: bool=False) -> List[int]:
if already_has_special_tokens:
if (token_ids_1 is not None):
raise ValueError('You should not supply a second sequence if the provided sequence of ids is already formatted with special tokens for the model.')
return list(map((lambda x: (1 if (x in [self.sep_token_id, self.cls_token_id]) else 0)), token_ids_0))
prefix_ones = ([1] * len(self.prefix_tokens))
suffix_ones = ([1] * len(self.suffix_tokens))
if (token_ids_1 is None):
return ((prefix_ones + ([0] * len(token_ids_0))) + suffix_ones)
return (((prefix_ones + ([0] * len(token_ids_0))) + ([0] * len(token_ids_1))) + suffix_ones)
def build_inputs_with_special_tokens(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None) -> List[int]:
if (token_ids_1 is None):
return ((self.prefix_tokens + token_ids_0) + self.suffix_tokens)
return (((self.prefix_tokens + token_ids_0) + token_ids_1) + self.suffix_tokens)
_start_docstrings(PREPARE_SEQ2SEQ_BATCH_DOCSTRING)
def prepare_seq2seq_batch(self, src_texts: List[str], src_lang: str='en_XX', tgt_texts: Optional[List[str]]=None, tgt_lang: str='ro_RO', max_length: Optional[int]=None, max_target_length: Optional[int]=None, truncation: bool=True, padding: str='longest', return_tensors: str=None, **kwargs) -> BatchEncoding:
if (max_length is None):
max_length = self.model_max_length
self.set_src_lang_special_tokens(src_lang)
model_inputs: BatchEncoding = self(src_texts, add_special_tokens=True, return_tensors=return_tensors, max_length=max_length, padding=padding, truncation=truncation, **kwargs)
if (tgt_texts is None):
return model_inputs
if (max_target_length is None):
max_target_length = max_length
self.set_tgt_lang_special_tokens(tgt_lang)
labels = self(tgt_texts, add_special_tokens=True, return_tensors=return_tensors, padding=padding, max_length=max_target_length, truncation=True, **kwargs)['input_ids']
model_inputs['labels'] = labels
self.set_src_lang_special_tokens(src_lang)
return model_inputs
def set_src_lang_special_tokens(self, src_lang) -> None:
self.cur_lang_code = self.convert_tokens_to_ids(src_lang)
self.prefix_tokens = []
self.suffix_tokens = [self.eos_token_id, self.cur_lang_code]
prefix_tokens_str = self.convert_ids_to_tokens(self.prefix_tokens)
suffix_tokens_str = self.convert_ids_to_tokens(self.suffix_tokens)
self._tokenizer.post_processor = processors.TemplateProcessing(single=((prefix_tokens_str + ['$A']) + suffix_tokens_str), pair=((prefix_tokens_str + ['$A', '$B']) + suffix_tokens_str), special_tokens=list(zip((prefix_tokens_str + suffix_tokens_str), (self.prefix_tokens + self.suffix_tokens))))
def set_tgt_lang_special_tokens(self, lang: str) -> None:
self.cur_lang_code = self.convert_tokens_to_ids(lang)
self.prefix_tokens = []
self.suffix_tokens = [self.eos_token_id, self.cur_lang_code]
prefix_tokens_str = self.convert_ids_to_tokens(self.prefix_tokens)
suffix_tokens_str = self.convert_ids_to_tokens(self.suffix_tokens)
self._tokenizer.post_processor = processors.TemplateProcessing(single=((prefix_tokens_str + ['$A']) + suffix_tokens_str), pair=((prefix_tokens_str + ['$A', '$B']) + suffix_tokens_str), special_tokens=list(zip((prefix_tokens_str + suffix_tokens_str), (self.prefix_tokens + self.suffix_tokens)))) |
def _isotropy_on_leaf(r_ei_leaf: Array, norbitals: int, kernel_initializer: WeightInitializer) -> Array:
x_nion = jnp.swapaxes(r_ei_leaf, axis1=(- 1), axis2=(- 2))
x_nion = jnp.expand_dims(x_nion, axis=(- 1))
x_nion = jnp.broadcast_to(x_nion, (*x_nion.shape[:(- 1)], norbitals))
iso_out = ElementWiseMultiply(1, kernel_initializer)(x_nion)
return jnp.swapaxes(iso_out, axis1=(- 1), axis2=(- 3)) |
class _AsyncEventLoop():
class _Task():
_g_next_id = 0
def __init__(self, func, *args, **kwargs):
self.task_id = self._g_next_id
self.func = (func, args, kwargs)
_AsyncEventLoop._Task._g_next_id += 1
def __init__(self):
o3d.utility.reset_print_function()
self._lock = threading.Lock()
self._cv = threading.Condition(self._lock)
self._run_queue = deque()
self._return_vals = {}
self._started = False
self._start()
def _start(self):
if (not self._started):
self._thread = threading.Thread(name='GUIMain', target=self._thread_main)
self._thread.start()
self._started = True
def run_sync(self, func, *args, **kwargs):
if (not self._started):
raise RuntimeError('GUI thread has exited.')
with self._lock:
task = _AsyncEventLoop._Task(func, *args, **kwargs)
self._run_queue.append(task)
while True:
with self._cv:
self._cv.wait_for((lambda : (task.task_id in self._return_vals)))
with self._lock:
return self._return_vals.pop(task.task_id)
def _thread_main(self):
app = o3d.visualization.gui.Application.instance
app.initialize()
done = False
while (not done):
while (len(self._run_queue) > 0):
with self._lock:
task = self._run_queue.popleft()
(func, args, kwargs) = task.func
retval = func(*args, **kwargs)
with self._cv:
self._return_vals[task.task_id] = retval
self._cv.notify_all()
done = (not app.run_one_tick())
self._started = False |
(nopython=True)
def diagonal_update(spins, op_string, bonds, beta):
n_bonds = bonds.shape[0]
M = op_string.shape[0]
n = np.sum((op_string != (- 1)))
prob_ratio = ((0.5 * beta) * n_bonds)
for p in range(M):
op = op_string[p]
if (op == (- 1)):
b = np.random.randint(0, n_bonds)
if (spins[bonds[(b, 0)]] != spins[bonds[(b, 1)]]):
prob = (prob_ratio / (M - n))
if (np.random.rand() < prob):
op_string[p] = (2 * b)
n += 1
elif (np.mod(op, 2) == 0):
prob = ((1 / prob_ratio) * ((M - n) + 1))
if (np.random.rand() < prob):
op_string[p] = (- 1)
n -= 1
else:
b = (op // 2)
spins[bonds[(b, 0)]] = (- spins[bonds[(b, 0)]])
spins[bonds[(b, 1)]] = (- spins[bonds[(b, 1)]])
return n |
def bind_optional(x: (T | None), f: Callable[([T], U)]) -> (U | None):
return (None if (x is None) else f(x)) |
def get_args_parser():
parser = argparse.ArgumentParser('Training Vision Transformers for Image Retrieval', add_help=False)
parser.add_argument('--model', default='deit_small_distilled_patch16_224', type=str, help='Name of model to train')
parser.add_argument('--input-size', default=224, type=int, help='images input size')
parser.add_argument('--drop', type=float, default=0.0, help='Dropout rate (default: 0.)')
parser.add_argument('--drop-path', type=float, default=0.1, metavar='PCT', help='Drop path rate (default: 0.1)')
parser.add_argument('--max-iter', default=2000, type=int)
parser.add_argument('--batch-size', default=64, type=int)
parser.add_argument('--lr', type=float, default=3e-05, help='learning rate (3e-5 for category level)')
parser.add_argument('--opt', default='adamw', type=str, help='Optimizer (default: "adamw"')
parser.add_argument('--opt-eps', default=1e-08, type=float, help='Optimizer Epsilon (default: 1e-8)')
parser.add_argument('--opt-betas', default=None, type=float, nargs='+', help='Optimizer Betas (default: None, use opt default)')
parser.add_argument('--clip-grad', type=float, default=None, help='Clip gradient norm (default: None, no clipping)')
parser.add_argument('--momentum', type=float, default=0.9, help='SGD momentum (default: 0.9)')
parser.add_argument('--weight-decay', type=float, default=0.0005, help='weight decay (default: 5e-4)')
parser.add_argument('--dataset', default='cub200', choices=['cub200', 'sop', 'inshop'], type=str, help='dataset path')
parser.add_argument('--data-path', default='/data/CUB_200_2011', type=str, help='dataset path')
parser.add_argument('--m', default=0, type=int, help='sample m images per class')
parser.add_argument('--rank', default=[1, 2, 4, 8], nargs='+', type=int, help='compute ')
parser.add_argument('--num-workers', default=16, type=int)
parser.add_argument('--pin-mem', action='store_true')
parser.add_argument('--no-pin-mem', action='store_false', dest='pin_mem')
parser.set_defaults(pin_mem=True)
parser.add_argument('--lambda-reg', type=float, default=0.7, help='regularization strength')
parser.add_argument('--margin', type=float, default=0.5, help='negative margin of contrastive loss(beta)')
parser.add_argument('--memory-ratio', type=float, default=1.0, help='size of the xbm queue')
parser.add_argument('--encoder-momentum', type=float, default=None, help='momentum for the key encoder (0.999 for In-Shop dataset)')
parser.add_argument('--logging-freq', type=int, default=50)
parser.add_argument('--output-dir', default='./outputs', help='path where to save, empty for no saving')
parser.add_argument('--log-dir', default='./logs', help='path where to tensorboard log')
parser.add_argument('--device', default='cuda:0', help='device to use for training / testing')
parser.add_argument('--seed', default=0, type=int)
return parser |
class TFXLMRobertaForMultipleChoice(metaclass=DummyObject):
_backends = ['tf']
def __init__(self, *args, **kwargs):
requires_backends(self, ['tf']) |
def compute_density(user_product_graph, product_user_graph, c, t):
density = {}
aux_user_graph = copy.deepcopy(user_product_graph)
aux_prod_graph = copy.deepcopy(product_user_graph)
for u in c:
aux_user_graph[u].append((t, 1, (- 1), '2012-06-01'))
aux_prod_graph[t].append((u, 1, (- 1), '2012-06-01'))
for u in c:
user_degree = len(aux_user_graph[u])
prod_degree = sum([len(aux_prod_graph[review[0]]) for review in aux_user_graph[u]])
density[u] = (user_degree / prod_degree)
return density |
class NN_MBE_Linear():
def __init__(self, tfm_=None):
self.mbe_order = PARAMS['MBE_ORDER']
self.nn_mbe = tfm_
self.max_num_frags = None
self.nnz_frags = None
return
def EnergyForceDipole(self, N_MB):
eval_set = MSet('TmpMBESet')
MBE_C = []
MBE_Index = []
NAtom = []
self.max_num_frags = ((N_MB.nf + ((N_MB.nf * (N_MB.nf - 1)) / 2)) + (((N_MB.nf * (N_MB.nf - 1)) * (N_MB.nf - 2)) / 6))
if (self.mbe_order >= 1):
for i in range(N_MB.nf):
natom = np.count_nonzero(N_MB.singz[i])
NAtom.append(natom)
eval_set.mols.append(Mol(N_MB.singz[i][:natom], N_MB.sings[i][:natom]))
MBE_C.append(N_MB.singC[i])
MBE_Index.append(N_MB.singI[i])
if (self.mbe_order >= 2):
for i in range(N_MB.npair):
natom = np.count_nonzero(N_MB.pairz[i])
NAtom.append(natom)
eval_set.mols.append(Mol(N_MB.pairz[i][:natom], N_MB.pairs[i][:natom]))
MBE_C.append(N_MB.pairC[i])
MBE_Index.append(N_MB.pairI[i])
if (self.mbe_order >= 3):
for i in range(N_MB.ntrip):
natom = np.count_nonzero(N_MB.tripz[i])
NAtom.append(natom)
eval_set.mols.append(Mol(N_MB.tripz[i][:natom], N_MB.trips[i][:natom]))
MBE_C.append(N_MB.tripC[i])
MBE_Index.append(N_MB.tripI[i])
if (self.mbe_order >= 4):
raise Exception('Linear MBE only implemented up to order 3')
MBE_C = np.asarray(MBE_C)
self.nnz_frags = MBE_C.shape[0]
for dummy_index in range(self.nnz_frags, self.max_num_frags):
eval_set.mols.append(Mol(np.zeros(1, dtype=np.uint8), np.zeros((1, 3), dtype=float)))
(Etotal, Ebp, Ecc, mol_dipole, atom_charge, gradient) = self.nn_mbe.EvalBPDirectEESet(eval_set, PARAMS['AN1_r_Rc'], PARAMS['AN1_a_Rc'], PARAMS['EECutoffOff'])
print(('Etotal:', Etotal, ' self.nnz_frags:', self.nnz_frags))
E_mbe = np.sum((Etotal[:self.nnz_frags] * MBE_C))
gradient_mbe = np.zeros((N_MB.nt, 3))
atom_charge_mbe = np.zeros(N_MB.nt)
for (i, index) in enumerate(MBE_Index):
gradient_mbe[index] += (gradient[i][:NAtom[i]] * MBE_C[i])
atom_charge_mbe[index] += (atom_charge[i][:NAtom[i]] * MBE_C[i])
return (E_mbe, gradient_mbe, atom_charge_mbe) |
class FDA4(FDA):
M = 3
def __init__(self, number_of_variables: int=12):
super(FDA4, self).__init__()
self.number_of_variables = number_of_variables
self.number_of_objectives = 3
self.number_of_constraints = 0
self.obj_directions = [self.MINIMIZE, self.MINIMIZE]
self.obj_labels = ['f(x)', 'f(y)', 'f(z)']
self.lower_bound = (self.number_of_variables * [0.0])
self.upper_bound = (self.number_of_variables * [1.0])
def evaluate(self, solution: FloatSolution) -> FloatSolution:
g = self.__eval_g(solution, (self.M - 1))
solution.objectives[0] = self.__eval_f1(solution, g)
solution.objectives[1] = self.__eval_fk(solution, g, 2)
solution.objectives[2] = self.__eval_fm(solution, g)
return solution
def __eval_g(self, solution: FloatSolution, lower_limit: int):
gt = abs(sin(((0.5 * pi) * self.time)))
g = sum([pow((v - gt), 2) for v in solution.variables[lower_limit:]])
return g
def __eval_f1(self, solution: FloatSolution, g: float) -> float:
f = (1.0 + g)
mult = numpy.prod([cos(((v * pi) / 2.0)) for v in solution.variables[:(self.M - 1)]])
return (f * mult)
def __eval_fk(self, solution: FloatSolution, g: float, k: int) -> float:
f = (1.0 + g)
aux = sin(((solution.variables[(self.M - k)] * pi) / 2.0))
mult = numpy.prod([cos(((v * pi) / 2.0)) for v in solution.variables[:(self.M - k)]])
return ((f * mult) * aux)
def __eval_fm(self, solution: FloatSolution, g: float) -> float:
fm = (1.0 + g)
fm *= sin(((solution.variables[0] * pi) / 2.0))
return fm
def get_name(self):
return 'FDA4' |
def main():
global args, v_id
args = parser.parse_args()
net = SiamRPNotb()
net.load_state_dict(torch.load(join(realpath(dirname(__file__)), 'SiamRPNOTB.model')))
net.eval().cuda()
dataset = load_dataset(args.dataset)
fps_list = []
for (v_id, video) in enumerate(dataset.keys()):
fps_list.append(track_video(net, dataset[video]))
print('Mean Running Speed {:.1f}fps'.format(np.mean(np.array(fps_list)))) |
def gen_name_from_header(header_array, header_order):
names = ['article_sections', 'ico_encoder', 'article_encoder', 'attn', 'cond_attn', 'tokenwise_attention', 'data_config', 'pretrain_attention']
final_name = ''
for n in names:
final_name += ((n + '=') + str(header_array[header_order[n]]))
if (n != names[(- 1)]):
final_name += ','
return final_name |
def quantize_nparray(qtype, arr, scale, zero_point, low=None, high=None):
dtype = (np.uint8 if (qtype == 'uint8') else np.int8)
cliplow = max((0 if (dtype == np.uint8) else (- 127)), ((- 127) if (low is None) else low))
cliphigh = min((255 if (dtype == np.uint8) else 127), (255 if (high is None) else high))
arr_fp32 = np.asarray(((arr.astype(np.float32) / scale).round() + zero_point))
np.clip(arr_fp32, cliplow, cliphigh, out=arr_fp32)
return arr_fp32.astype(dtype) |
def get_torsion_energy(m):
mp = ChemicalForceFields.MMFFGetMoleculeProperties(m)
if (mp is None):
return 0.0
ffTerms = ('Bond', 'Angle', 'StretchBend', 'Torsion', 'Oop', 'VdW', 'Ele')
iTerm = 'Torsion'
for jTerm in ffTerms:
state = (iTerm == jTerm)
setMethod = getattr(mp, (('SetMMFF' + jTerm) + 'Term'))
setMethod(state)
ff = rdForceFieldHelpers.MMFFGetMoleculeForceField(m, mp)
e = ff.CalcEnergy()
return e |
def test_isotropic_hernquist_meanvr_directint():
pot = potential.HernquistPotential(amp=2.3, a=1.3)
dfh = isotropicHernquistdf(pot=pot)
tol = 1e-08
check_meanvr_directint(dfh, pot, tol, beta=0.0, rmin=(pot._scale / 10.0), rmax=(pot._scale * 10.0), bins=31)
return None |
class Encoder():
def __init__(self):
pass
def __mul__(self, x: Any):
raise NotImplementedError
def __rmul__(self, x: Any):
raise NotImplementedError |
def test_center_to_corner_box2d():
from mmdet3d.core.bbox.box_np_ops import center_to_corner_box2d
center = np.array([[9.348705, (- 3.6271024)]])
dims = np.array([[0.47, 0.98]])
angles = np.array([(- 3.14)])
corner = center_to_corner_box2d(center, dims, angles)
expected_corner = np.array([[[9.584485, (- 3.1374772)], [9.582925, (- 4.117476)], [9.112926, (- 4.1167274)], [9.114486, (- 3.1367288)]]])
assert np.allclose(corner, expected_corner) |
def get_process_cpu_percent():
try:
procTotalPercent = 0
result = {}
proc_info = []
for proc in psutil.process_iter(['pid', 'ppid', 'name', 'username', 'cmdline']):
proc_percent = proc.cpu_percent()
procTotalPercent += proc_percent
proc.info['cpu_percent'] = round(proc_percent, 2)
proc_info.append(proc.info)
result['proc_info'] = proc_info
cpu_count = psutil.cpu_count(logical=True)
cpu_percent = round((procTotalPercent / cpu_count), 2)
except Exception:
cpu_percent = 0.0
return (cpu_percent / 100.0) |
_module()
class HeadMixin():
def __init__(self, loss, postprocessor):
assert isinstance(loss, dict)
assert isinstance(postprocessor, dict)
self.loss_module = build_loss(loss)
self.postprocessor = build_postprocessor(postprocessor)
def resize_boundary(self, boundaries, scale_factor):
assert check_argument.is_2dlist(boundaries)
assert isinstance(scale_factor, np.ndarray)
assert (scale_factor.shape[0] == 4)
for b in boundaries:
sz = len(b)
check_argument.valid_boundary(b, True)
b[:(sz - 1)] = (np.array(b[:(sz - 1)]) * np.tile(scale_factor[:2], int(((sz - 1) / 2))).reshape(1, (sz - 1))).flatten().tolist()
return boundaries
def get_boundary(self, score_maps, img_metas, rescale):
assert check_argument.is_type_list(img_metas, dict)
assert isinstance(rescale, bool)
score_maps = score_maps.squeeze()
boundaries = self.postprocessor(score_maps)
if rescale:
boundaries = self.resize_boundary(boundaries, ((1.0 / self.downsample_ratio) / img_metas[0]['scale_factor']))
results = dict(boundary_result=boundaries, filename=img_metas[0]['filename'])
return results
def loss(self, pred_maps, **kwargs):
losses = self.loss_module(pred_maps, self.downsample_ratio, **kwargs)
return losses |
class vgg16bn(torch.nn.Module):
def __init__(self, pretrained=False):
super(vgg16bn, self).__init__()
model = list(torchvision.models.vgg16_bn(pretrained=pretrained).features.children())
model = (model[:33] + model[34:43])
self.model = torch.nn.Sequential(*model)
def forward(self, x):
return self.model(x) |
class Emomusic(Dataset):
_ext_audio = '.mp3'
def __init__(self, root: Union[(str, Path)], audio_transform: Callable=None, subset: Optional[str]='training') -> None:
super().__init__()
self.subset = subset
assert ((subset is None) or (subset in ['training', 'validation', 'testing'])), ('When `subset` not None, it must take a value from ' + "{'training', 'validation', 'testing'}.")
self._path = os.fspath(root)
if (not os.path.isdir(self._path)):
raise RuntimeError('Dataset not found. Please use `download=True` to download it.')
self.annotations = parse_annotation_file(os.path.join(self._path, 'emomusic.json'))
if (self.subset == 'training'):
self.file_list = [i for i in self.annotations if (self.annotations[i]['split'] == 'train')]
elif (self.subset == 'validation'):
self.file_list = [i for i in self.annotations if (self.annotations[i]['split'] == 'valid')]
elif (self.subset == 'testing'):
self.file_list = [i for i in self.annotations if (self.annotations[i]['split'] == 'test')]
if (not os.path.isdir(self._path)):
raise RuntimeError('Dataset not found. Please use `download=True` to download it.')
def load_audio(self, audio_id):
path_id = self.annotations[audio_id]['track_id']
path_to_audio = os.path.join(self._path, 'clips_45seconds', (path_id + Emomusic._ext_audio))
(waveform, sample_rate) = torchaudio.load(path_to_audio)
waveform = torch.mean(waveform, dim=0)
if (sample_rate != 16000):
waveform = resample(waveform, sample_rate)
return (waveform[:480000], sample_rate)
def get_label(self, audio_id):
label = self.annotations[audio_id]['labels']
label = torch.tensor(label)
return label
def __len__(self) -> int:
return len(self.file_list)
def __getitem__(self, n: int) -> Tuple[(Tensor, int, str)]:
audio_id = self.file_list[n]
(waveform, sample_rate) = self.load_audio(audio_id)
waveform = waveform.squeeze()
label = self.get_label(audio_id)
return (waveform, label)
def num_classes(cls):
return 2 |
class TableTransformerConfig(PretrainedConfig):
model_type = 'table-transformer'
keys_to_ignore_at_inference = ['past_key_values']
attribute_map = {'hidden_size': 'd_model', 'num_attention_heads': 'encoder_attention_heads'}
def __init__(self, use_timm_backbone=True, backbone_config=None, num_channels=3, num_queries=100, encoder_layers=6, encoder_ffn_dim=2048, encoder_attention_heads=8, decoder_layers=6, decoder_ffn_dim=2048, decoder_attention_heads=8, encoder_layerdrop=0.0, decoder_layerdrop=0.0, is_encoder_decoder=True, activation_function='relu', d_model=256, dropout=0.1, attention_dropout=0.0, activation_dropout=0.0, init_std=0.02, init_xavier_std=1.0, auxiliary_loss=False, position_embedding_type='sine', backbone='resnet50', use_pretrained_backbone=True, dilation=False, class_cost=1, bbox_cost=5, giou_cost=2, mask_loss_coefficient=1, dice_loss_coefficient=1, bbox_loss_coefficient=5, giou_loss_coefficient=2, eos_coefficient=0.1, **kwargs):
if ((backbone_config is not None) and use_timm_backbone):
raise ValueError("You can't specify both `backbone_config` and `use_timm_backbone`.")
if (not use_timm_backbone):
if (backbone_config is None):
logger.info('`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.')
backbone_config = CONFIG_MAPPING['resnet'](out_features=['stage4'])
elif isinstance(backbone_config, dict):
backbone_model_type = backbone_config.get('model_type')
config_class = CONFIG_MAPPING[backbone_model_type]
backbone_config = config_class.from_dict(backbone_config)
(dilation, backbone, use_pretrained_backbone) = (None, None, None)
self.use_timm_backbone = use_timm_backbone
self.backbone_config = backbone_config
self.num_channels = num_channels
self.num_queries = num_queries
self.d_model = d_model
self.encoder_ffn_dim = encoder_ffn_dim
self.encoder_layers = encoder_layers
self.encoder_attention_heads = encoder_attention_heads
self.decoder_ffn_dim = decoder_ffn_dim
self.decoder_layers = decoder_layers
self.decoder_attention_heads = decoder_attention_heads
self.dropout = dropout
self.attention_dropout = attention_dropout
self.activation_dropout = activation_dropout
self.activation_function = activation_function
self.init_std = init_std
self.init_xavier_std = init_xavier_std
self.encoder_layerdrop = encoder_layerdrop
self.decoder_layerdrop = decoder_layerdrop
self.num_hidden_layers = encoder_layers
self.auxiliary_loss = auxiliary_loss
self.position_embedding_type = position_embedding_type
self.backbone = backbone
self.use_pretrained_backbone = use_pretrained_backbone
self.dilation = dilation
self.class_cost = class_cost
self.bbox_cost = bbox_cost
self.giou_cost = giou_cost
self.mask_loss_coefficient = mask_loss_coefficient
self.dice_loss_coefficient = dice_loss_coefficient
self.bbox_loss_coefficient = bbox_loss_coefficient
self.giou_loss_coefficient = giou_loss_coefficient
self.eos_coefficient = eos_coefficient
super().__init__(is_encoder_decoder=is_encoder_decoder, **kwargs)
def num_attention_heads(self) -> int:
return self.encoder_attention_heads
def hidden_size(self) -> int:
return self.d_model |
class SchemaField(namedtuple('SchemaField', ('feature_type', 'dtype', 'shape'))):
def to_dict(self) -> Dict[(str, Any)]:
return {'feature_type': self.feature_type, 'dtype': self.dtype, 'shape': self.shape}
def from_dict(cls, d: Dict[(str, Union[(FeatureType, DType, List[int])])]) -> 'SchemaField':
return cls(**d) |
def _bytes_feature_list(values):
return tf.train.FeatureList(feature=[_bytes_feature(v) for v in values]) |
def test_model(model_range: Union[(int, tuple)]):
network = Network()
network.eval()
network.to(device)
test_set = configs.test_env_settings
pool = mp.Pool(mp.cpu_count())
if isinstance(model_range, int):
state_dict = torch.load('./models/{}.pth'.format(model_range), map_location=device)
network.load_state_dict(state_dict)
network.eval()
network.share_memory()
print('test model {}'.format(model_range))
for case in test_set:
print('test set: {} length {} agents {} density'.format(case[0], case[1], case[2]))
with open('./test_set/{}length_{}agents_{}density.pth'.format(case[0], case[1], case[2]), 'rb') as f:
tests = pickle.load(f)
tests = [(test, network) for test in tests]
ret = pool.map(test_one_case, tests)
success = 0
avg_step = 0
for (i, j) in ret:
success += i
avg_step += j
print('success rate: {:.2f}%'.format(((success / len(ret)) * 100)))
print('average step: {}'.format((avg_step / len(ret))))
print()
elif isinstance(model_range, tuple):
for model_name in range(model_range[0], (model_range[1] + 1), configs.save_interval):
state_dict = torch.load('./models/{}.pth'.format(model_name), map_location=device)
network.load_state_dict(state_dict)
network.eval()
network.share_memory()
print('test model {}'.format(model_name))
for case in test_set:
print('test set: {} length {} agents {} density'.format(case[0], case[1], case[2]))
with open('./test_set/{}length_{}agents_{}density.pth'.format(case[0], case[1], case[2]), 'rb') as f:
tests = pickle.load(f)
tests = [(test, network) for test in tests]
ret = pool.map(test_one_case, tests)
success = 0
avg_step = 0
for (i, j) in ret:
success += i
avg_step += j
print('success rate: {:.2f}%'.format(((success / len(ret)) * 100)))
print('average step: {}'.format((avg_step / len(ret))))
print()
print('\n') |
class AutoTCN(BaseAutomodel):
def __init__(self, input_feature_num, output_target_num, past_seq_len, future_seq_len, optimizer, loss, metric, metric_mode=None, hidden_units=None, levels=None, num_channels=None, kernel_size=7, lr=0.001, dropout=0.2, backend='torch', logs_dir='/tmp/auto_tcn', cpus_per_trial=1, name='auto_tcn', remote_dir=None):
self.search_space = dict(input_feature_num=input_feature_num, output_feature_num=output_target_num, past_seq_len=past_seq_len, future_seq_len=future_seq_len, nhid=hidden_units, levels=levels, num_channels=num_channels, kernel_size=kernel_size, lr=lr, dropout=dropout)
self.metric = metric
self.metric_mode = metric_mode
self.backend = backend
self.optimizer = optimizer
self.loss = loss
self._auto_est_config = dict(logs_dir=logs_dir, resources_per_trial={'cpu': cpus_per_trial}, remote_dir=remote_dir, name=name)
if self.backend.startswith('torch'):
from bigdl.chronos.model.tcn import model_creator
elif self.backend.startswith('keras'):
from bigdl.chronos.model.tf2.TCN_keras import model_creator
else:
from bigdl.nano.utils.common import invalidInputError
invalidInputError(False, f'We only support keras and torch as backend, but got {self.backend}')
self._model_creator = model_creator
super().__init__() |
def dobldobl_usolve(pol, mxi, eps):
from phcpy.phcpy2c3 import py2c_usolve_dobldobl
from phcpy.interface import store_dobldobl_system, load_dobldobl_solutions
store_dobldobl_system([pol])
nit = py2c_usolve_dobldobl(mxi, eps)
rts = load_dobldobl_solutions()
return (nit, rts) |
class Government(BaseEntity):
name = 'government'
def __init__(self, entity_args):
super().__init__()
self.entity_args = entity_args
self.reset()
self.action_dim = entity_args['action_shape']
self.action_space = Box(low=(- 1), high=1, shape=(self.action_dim,), dtype=np.float32)
def reset(self, **custom_cfg):
pass |
def create_aspect_ratio_groups(dataset, k=0):
aspect_ratios = compute_aspect_ratios(dataset)
bins = ((2 ** np.linspace((- 1), 1, ((2 * k) + 1))).tolist() if (k > 0) else [1.0])
groups = _quantize(aspect_ratios, bins)
counts = np.unique(groups, return_counts=True)[1]
fbins = (([0] + bins) + [np.inf])
print('Using {} as bins for aspect ratio quantization'.format(fbins))
print('Count of instances per bin: {}'.format(counts))
return groups |
class ImpalaBlock(nn.Module):
def __init__(self, in_channels, out_channels):
super(ImpalaBlock, self).__init__()
self.conv = nn.Conv2d(in_channels=in_channels, out_channels=out_channels, kernel_size=3, stride=1, padding=1)
self.res1 = ResidualBlock(out_channels)
self.res2 = ResidualBlock(out_channels)
def forward(self, x):
x = self.conv(x)
x = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)(x)
x = self.res1(x)
x = self.res2(x)
return x |
def to_pytorch_func(tvm_func):
import torch
import torch.utils.dlpack
return convert_func(tvm_func, torch.Tensor, torch.utils.dlpack.to_dlpack) |
def create_explanation(topics):
topics = ['**{}**'.format(topic) for topic in topics]
last = topics.pop()
topic_str = ', '.join(topics)
topic_str += ((' and ' + last) if topic_str else last)
explanation = 'This article seems to be about {}.'.format(topic_str)
return explanation |
class DebertaForSequenceClassification(metaclass=DummyObject):
_backends = ['torch']
def __init__(self, *args, **kwargs):
requires_backends(self, ['torch']) |
def dump_cfg(cfg, logdir):
out_f = os.path.join(logdir, 'config.yaml')
with open(out_f, 'w') as f:
f.write(OmegaConf.to_yaml(cfg))
print('Wrote config to: {}'.format(out_f)) |
def setup_logging(output_dir=None):
_FORMAT = '[%(levelname)s: %(filename)s: %(lineno)4d]: %(message)s'
if du.is_master_proc():
logging.root.handlers = []
else:
_suppress_print()
return EmptyLogger('ignore')
logger = logging.getLogger()
logger.setLevel(logging.DEBUG)
logger.propagate = False
plain_formatter = logging.Formatter('[%(asctime)s][%(levelname)s] %(filename)s: %(lineno)3d: %(message)s', datefmt='%m/%d %H:%M:%S')
if du.is_master_proc():
ch = logging.StreamHandler(stream=sys.stdout)
ch.setLevel(logging.DEBUG)
ch.setFormatter(plain_formatter)
logger.addHandler(ch)
if ((output_dir is not None) and du.is_master_proc(du.get_world_size())):
filename = os.path.join(output_dir, 'stdout.log')
fh = logging.FileHandler(filename)
fh.setLevel(logging.DEBUG)
fh.setFormatter(plain_formatter)
logger.addHandler(fh) |
def add_args(parser: ArgumentParser):
parser.add_argument('--dynamic-linear', action='store_true')
parser.add_argument('--dynamic-ntk', type=float)
parser.add_argument('--dynamic-part-ntk', action='store_true')
parser.add_argument('--dynamic-yarn', action='store_true')
parser.add_argument('--ntk', type=float)
parser.add_argument('--part-ntk', type=float)
parser.add_argument('--linear', type=float)
parser.add_argument('--yarn', type=float)
parser.add_argument('--rerope', type=float)
parser.add_argument('--factor', type=float)
parser.add_argument('--load-in-8bit', action='store_true')
parser.add_argument('--load-in-4bit', action='store_true')
parser.add_argument('--finetuned', action='store_true')
parser.add_argument('--gpt-neox-max-length', type=int)
parser.add_argument('--adapter', type=str)
parser.add_argument('--max-position-embeddings', type=int)
parser.add_argument('--original-max-position-embeddings', type=int)
parser.add_argument('--sliding-window-attention', type=int)
parser.add_argument('--custom-model', action='store_true')
parser.add_argument('--custom-model-together', action='store_true')
parser.add_argument('--custom-model-mistral', action='store_true')
parser.add_argument('--flash-attention', action='store_true')
parser.add_argument('--no-use-cache', action='store_true')
return parser |
def convert_pytorch_state_dict_to_flax(pt_state_dict, flax_model, init_key=42):
pt_state_dict = {k: v.numpy() for (k, v) in pt_state_dict.items()}
random_flax_params = flax_model.init_weights(PRNGKey(init_key))
random_flax_state_dict = flatten_dict(random_flax_params)
flax_state_dict = {}
for (pt_key, pt_tensor) in pt_state_dict.items():
renamed_pt_key = rename_key(pt_key)
pt_tuple_key = tuple(renamed_pt_key.split('.'))
(flax_key, flax_tensor) = rename_key_and_reshape_tensor(pt_tuple_key, pt_tensor, random_flax_state_dict)
if (flax_key in random_flax_state_dict):
if (flax_tensor.shape != random_flax_state_dict[flax_key].shape):
raise ValueError(f'PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape {random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.')
flax_state_dict[flax_key] = jnp.asarray(flax_tensor)
return unflatten_dict(flax_state_dict) |
def dense_model():
num_imgs = 10
nncg = NNCG()
dense_model = Sequential()
dense_model.add(Convolution2D(8, (3, 3), input_shape=(70, 50, 1), activation='relu', padding='same'))
dense_model.add(MaxPooling2D(pool_size=(2, 2)))
dense_model.add(Convolution2D(16, (3, 3), padding='valid', activation='relu', bias_initializer='random_uniform'))
dense_model.add(MaxPooling2D(pool_size=(2, 2)))
dense_model.add(Convolution2D(32, (3, 3), padding='valid', activation='relu', bias_initializer='random_uniform'))
dense_model.add(MaxPooling2D(pool_size=(2, 2)))
dense_model.add(Dropout(0.4))
dense_model.add(Flatten())
dense_model.add(Dense(2, activation='softmax'))
images = random_imdb(num_imgs, dense_model.input.shape[1:].as_list())
nncg.keras_compile(images, dense_model, 'dense_model.c')
print_success('dense_model') |
class SyntheticSimpleVisualizer(object):
def __init__(self, dataset_loader: str, dataset_path: str, postures_generator: Optional[Generator]=None, video_name: str=None, **kwargs):
resize_options = ResizeOptions(**kwargs)
dataset = load_dataset(dataset_loader, dataset_path, resize_options=resize_options, **kwargs)
if (postures_generator is None):
postures_generator = PosturesModel().generate()
if (video_name is None):
video_name = dataset.video_names[0]
features = dataset.features_dataset[video_name]
self.skeletons = features.skeletons
self.measurements = features.measurements
self.output_image_shape = dataset.image_shape
self.synthetic_dataset = SyntheticDataset(frame_preprocessing=dataset.frame_preprocessing, output_image_shape=self.output_image_shape, enable_random_augmentations=False)
skel_is_not_nan = (~ np.any(np.isnan(self.skeletons), axis=(1, 2)))
self.labelled_indexes = np.where(skel_is_not_nan)[0]
if (len(self.labelled_indexes) == 0):
raise ValueError("No template frames found in the dataset, can't generate synthetic images.")
self.frames_dataset = dataset.frames_dataset
self.video_name = video_name
self.postures_generator = postures_generator
def generate(self):
out_image = np.empty(self.output_image_shape, dtype=np.uint8)
with self.frames_dataset.open(self.video_name) as frames:
while True:
theta = next(self.postures_generator)
random_label_index = np.random.choice(self.labelled_indexes)
self.synthetic_dataset.generate(theta=theta, template_skeleton=self.skeletons[random_label_index], template_frame=frames[random_label_index], out_image=out_image, template_measurements=self.measurements)
(yield (out_image, theta)) |
def softmax_smooth(a, b, smooth=0.0):
t = (smooth / 2.0)
return (torch.log((torch.exp((((1.0 - t) * a) + (b * t))) + torch.exp((((1.0 - t) * b) + (t * a))))) - np.log((1.0 + smooth))) |
def version_greaterorequal(l1, l2):
if (l1[0] > l2[0]):
return True
elif (l1[0] < l2[0]):
return False
elif (l1[0] == l2[0]):
if (len(l1) == 1):
return True
else:
return version_greaterorequal(l1[1:], l2[1:]) |
class SparseTransformerSentenceEncoder(TransformerSentenceEncoder):
def __init__(self, padding_idx: int, vocab_size: int, num_encoder_layers: int=6, embedding_dim: int=768, ffn_embedding_dim: int=3072, num_attention_heads: int=8, dropout: float=0.1, attention_dropout: float=0.1, activation_dropout: float=0.1, max_seq_len: int=256, num_segments: int=2, use_position_embeddings: bool=True, offset_positions_by_padding: bool=True, encoder_normalize_before: bool=False, apply_bert_init: bool=False, activation_fn: str='relu', learned_pos_embedding: bool=True, embed_scale: float=None, freeze_embeddings: bool=False, n_trans_layers_to_freeze: int=0, export: bool=False, is_bidirectional: bool=True, stride: int=32, expressivity: int=8) -> None:
super().__init__(padding_idx, vocab_size, num_encoder_layers, embedding_dim, ffn_embedding_dim, num_attention_heads, dropout, attention_dropout, activation_dropout, max_seq_len, num_segments, use_position_embeddings, offset_positions_by_padding, encoder_normalize_before, apply_bert_init, activation_fn, learned_pos_embedding, embed_scale, freeze_embeddings, n_trans_layers_to_freeze, export)
self.layers = nn.ModuleList([SparseTransformerSentenceEncoderLayer(embedding_dim=self.embedding_dim, ffn_embedding_dim=ffn_embedding_dim, num_attention_heads=num_attention_heads, dropout=dropout, attention_dropout=attention_dropout, activation_dropout=activation_dropout, activation_fn=activation_fn, export=export, is_bidirectional=is_bidirectional, stride=stride, expressivity=expressivity) for _ in range(num_encoder_layers)])
def freeze_module_params(m):
if (m is not None):
for p in m.parameters():
p.requires_grad = False
for layer in range(n_trans_layers_to_freeze):
freeze_module_params(self.layers[layer]) |
def efficientnet_lite4(pretrained=False, **kwargs):
model = _gen_efficientnet_lite('efficientnet_lite4', channel_multiplier=1.4, depth_multiplier=1.8, pretrained=pretrained, **kwargs)
return model |
class FlaubertForQuestionAnswering(metaclass=DummyObject):
_backends = ['torch']
def __init__(self, *args, **kwargs):
requires_backends(self, ['torch']) |
class Vision(BaseWDModelComponent):
('pretrained_model_setup', ['pretrained_model_name'])
def __init__(self, pretrained_model_setup: Union[(str, Dict[(str, Union[(str, WeightsEnum)])])]=None, n_trainable: Optional[int]=None, trainable_params: Optional[List[str]]=None, channel_sizes: List[int]=[64, 128, 256, 512], kernel_sizes: Union[(int, List[int])]=[7, 3, 3, 3], strides: Union[(int, List[int])]=[2, 1, 1, 1], head_hidden_dims: Optional[List[int]]=None, head_activation: str='relu', head_dropout: Union[(float, List[float])]=0.1, head_batchnorm: bool=False, head_batchnorm_last: bool=False, head_linear_first: bool=False):
super(Vision, self).__init__()
self._check_pretrained_model_setup(pretrained_model_setup, n_trainable, trainable_params)
self.pretrained_model_setup = pretrained_model_setup
self.n_trainable = n_trainable
self.trainable_params = trainable_params
self.channel_sizes = channel_sizes
self.kernel_sizes = kernel_sizes
self.strides = strides
self.head_hidden_dims = head_hidden_dims
self.head_activation = head_activation
self.head_dropout = head_dropout
self.head_batchnorm = head_batchnorm
self.head_batchnorm_last = head_batchnorm_last
self.head_linear_first = head_linear_first
(self.features, self.backbone_output_dim) = self._get_features()
if (pretrained_model_setup is not None):
self._freeze(self.features)
if (self.head_hidden_dims is not None):
head_hidden_dims = ([self.backbone_output_dim] + self.head_hidden_dims)
self.vision_mlp = MLP(head_hidden_dims, self.head_activation, self.head_dropout, self.head_batchnorm, self.head_batchnorm_last, self.head_linear_first)
def forward(self, X: Tensor) -> Tensor:
x = self.features(X)
if (len(x.shape) > 2):
if (x.shape[2] > 1):
x = nn.functional.adaptive_avg_pool2d(x, (1, 1))
x = torch.flatten(x, 1)
if (self.head_hidden_dims is not None):
x = self.vision_mlp(x)
return x
def output_dim(self) -> int:
return (self.head_hidden_dims[(- 1)] if (self.head_hidden_dims is not None) else self.backbone_output_dim)
def _get_features(self) -> Tuple[(nn.Module, int)]:
if (self.pretrained_model_setup is not None):
if isinstance(self.pretrained_model_setup, str):
if (self.pretrained_model_setup in allowed_pretrained_models.keys()):
model = allowed_pretrained_models[self.pretrained_model_setup]
pretrained_model = torchvision.models.__dict__[model](weights=torchvision.models.get_model_weights(model).DEFAULT)
warnings.warn(f'{self.pretrained_model_setup} defaulting to {model}', UserWarning)
else:
pretrained_model = torchvision.models.__dict__[self.pretrained_model_setup](weights='IMAGENET1K_V1')
elif isinstance(self.pretrained_model_setup, Dict):
model_name = next(iter(self.pretrained_model_setup))
model_weights = self.pretrained_model_setup[model_name]
if (model_name in allowed_pretrained_models.keys()):
model_name = allowed_pretrained_models[model_name]
pretrained_model = torchvision.models.__dict__[model_name](weights=model_weights)
output_dim: int = self.get_backbone_output_dim(pretrained_model)
features = nn.Sequential(*list(pretrained_model.children())[:(- 1)])
else:
features = self._basic_cnn()
output_dim = self.channel_sizes[(- 1)]
return (features, output_dim)
def _basic_cnn(self):
channel_sizes = ([3] + self.channel_sizes)
kernel_sizes = (([self.kernel_sizes] * len(self.channel_sizes)) if isinstance(self.kernel_sizes, int) else self.kernel_sizes)
strides = (([self.strides] * len(self.channel_sizes)) if isinstance(self.strides, int) else self.strides)
BasicCNN = nn.Sequential()
for i in range(1, len(channel_sizes)):
BasicCNN.add_module('conv_layer_{}'.format((i - 1)), conv_layer(channel_sizes[(i - 1)], channel_sizes[i], kernel_sizes[(i - 1)], strides[(i - 1)], maxpool=(i == 1), adaptiveavgpool=(i == (len(channel_sizes) - 1))))
return BasicCNN
def _freeze(self, features):
if (self.trainable_params is not None):
for (name, param) in features.named_parameters():
for tl in self.trainable_params:
param.requires_grad = (tl in name)
elif (self.n_trainable is not None):
for (i, (name, param)) in enumerate(reversed(list(features.named_parameters()))):
param.requires_grad = (i < self.n_trainable)
else:
warnings.warn("Both 'trainable_params' and 'n_trainable' are 'None' and the entire network will be trained", UserWarning)
def get_backbone_output_dim(features):
try:
return features.fc.in_features
except AttributeError:
try:
features.classifier.__dict__['_modules']['0'].in_features
except AttributeError:
try:
return features.classifier.__dict__['_modules']['1'].in_features
except AttributeError:
return features.classifier.__dict__['_modules']['1'].in_channels
def _check_pretrained_model_setup(pretrained_model_setup, n_trainable, trainable_params):
if (pretrained_model_setup is not None):
if isinstance(pretrained_model_setup, str):
pretrained_model_name = pretrained_model_setup
elif isinstance(pretrained_model_setup, Dict):
pretrained_model_name = list(pretrained_model_setup.keys())[0]
else:
pretrained_model_name = None
if (pretrained_model_name is not None):
valid_pretrained_model_name = any([(name in pretrained_model_name) for name in allowed_pretrained_models])
if (not valid_pretrained_model_name):
raise ValueError(f'{pretrained_model_setup} is not among the allowed pretrained models. These are {allowed_pretrained_models.keys()}. Please choose a variant of these architectures')
if ((n_trainable is not None) and (trainable_params is not None)):
raise UserWarning("Both 'n_trainable' and 'trainable_params' are not None. 'trainable_params' will be used") |
class Generator(nn.Module):
def __init__(self, G_ch=64, dim_z=128, bottom_width=4, resolution=128, G_kernel_size=3, G_attn='64', n_classes=1000, num_G_SVs=1, num_G_SV_itrs=1, G_shared=True, shared_dim=0, hier=False, cross_replica=False, mybn=False, G_activation=nn.ReLU(inplace=True), optimizer='Adam', G_lr=5e-05, G_B1=0.0, G_B2=0.999, adam_eps=1e-08, BN_eps=1e-05, SN_eps=1e-12, G_mixed_precision=False, G_fp16=False, G_init='ortho', skip_init=False, no_optim=False, G_param='SN', norm_style='bn', **kwargs):
super(Generator, self).__init__()
self.ch = G_ch
self.dim_z = dim_z
self.bottom_width = bottom_width
self.resolution = resolution
self.kernel_size = G_kernel_size
self.attention = G_attn
self.n_classes = n_classes
self.G_shared = G_shared
self.shared_dim = (shared_dim if (shared_dim > 0) else dim_z)
self.hier = hier
self.cross_replica = cross_replica
self.mybn = mybn
self.activation = G_activation
self.init = G_init
self.G_param = G_param
self.norm_style = norm_style
self.BN_eps = BN_eps
self.SN_eps = SN_eps
self.fp16 = G_fp16
self.arch = G_arch(self.ch, self.attention)[resolution]
if self.hier:
self.num_slots = (len(self.arch['in_channels']) + 1)
self.z_chunk_size = (self.dim_z // self.num_slots)
self.dim_z = (self.z_chunk_size * self.num_slots)
else:
self.num_slots = 1
self.z_chunk_size = 0
if (self.G_param == 'SN'):
self.which_conv = functools.partial(layers.SNConv2d, kernel_size=3, padding=1, num_svs=num_G_SVs, num_itrs=num_G_SV_itrs, eps=self.SN_eps)
self.which_linear = functools.partial(layers.SNLinear, num_svs=num_G_SVs, num_itrs=num_G_SV_itrs, eps=self.SN_eps)
else:
self.which_conv = functools.partial(nn.Conv2d, kernel_size=3, padding=1)
self.which_linear = nn.Linear
self.which_embedding = nn.Embedding
bn_linear = (functools.partial(self.which_linear, bias=False) if self.G_shared else self.which_embedding)
self.which_bn = functools.partial(layers.ccbn, which_linear=bn_linear, cross_replica=self.cross_replica, mybn=self.mybn, input_size=((self.shared_dim + self.z_chunk_size) if self.G_shared else self.n_classes), norm_style=self.norm_style, eps=self.BN_eps)
self.shared = (self.which_embedding(n_classes, self.shared_dim) if G_shared else layers.identity())
self.linear = self.which_linear((self.dim_z // self.num_slots), (self.arch['in_channels'][0] * (self.bottom_width ** 2)))
self.blocks = []
for index in range(len(self.arch['out_channels'])):
self.blocks += [[layers.GBlock(in_channels=self.arch['in_channels'][index], out_channels=self.arch['out_channels'][index], which_conv=self.which_conv, which_bn=self.which_bn, activation=self.activation, upsample=(functools.partial(F.interpolate, scale_factor=2) if self.arch['upsample'][index] else None))]]
if self.arch['attention'][self.arch['resolution'][index]]:
print(('Adding attention layer in G at resolution %d' % self.arch['resolution'][index]))
self.blocks[(- 1)] += [layers.Attention(self.arch['out_channels'][index], self.which_conv)]
self.blocks = nn.ModuleList([nn.ModuleList(block) for block in self.blocks])
self.output_layer = nn.Sequential(layers.bn(self.arch['out_channels'][(- 1)], cross_replica=self.cross_replica, mybn=self.mybn), self.activation, self.which_conv(self.arch['out_channels'][(- 1)], 3))
if (not skip_init):
self.init_weights()
if no_optim:
return
(self.lr, self.B1, self.B2, self.adam_eps) = (G_lr, G_B1, G_B2, adam_eps)
if G_mixed_precision:
print('Using fp16 adam in G...')
import utils
self.optim = utils.Adam16(params=self.parameters(), lr=self.lr, betas=(self.B1, self.B2), weight_decay=0, eps=self.adam_eps)
elif (optimizer == 'Adam'):
self.optim = optim.Adam(params=self.parameters(), lr=self.lr, betas=(self.B1, self.B2), weight_decay=0, eps=self.adam_eps)
elif (optimizer == 'SGD'):
self.optim = optim.SGD(params=self.parameters(), lr=self.lr, momentum=0.9, weight_decay=0)
else:
raise ValueError('optim has to be Adam or SGD, but got {}'.format(optimizer))
def init_weights(self):
self.param_count = 0
for module in self.modules():
if (isinstance(module, nn.Conv2d) or isinstance(module, nn.Linear) or isinstance(module, nn.Embedding)):
if (self.init == 'ortho'):
init.orthogonal_(module.weight)
elif (self.init == 'N02'):
init.normal_(module.weight, 0, 0.02)
elif (self.init in ['glorot', 'xavier']):
init.xavier_uniform_(module.weight)
else:
print('Init style not recognized...')
self.param_count += sum([p.data.nelement() for p in module.parameters()])
print(('Param count for Gs initialized parameters: %d' % self.param_count))
def reset_in_init(self):
for (index, blocklist) in enumerate(self.blocks):
for block in blocklist:
if isinstance(block, layers.GBlock):
block.reset_in_init()
def get_params(self, index=0, update_embed=False):
if (index == 0):
for param in self.linear.parameters():
(yield param)
if update_embed:
for param in self.shared.parameters():
(yield param)
elif (index < (len(self.blocks) + 1)):
for param in self.blocks[(index - 1)].parameters():
(yield param)
elif (index == (len(self.blocks) + 1)):
for param in self.output_layer.parameters():
(yield param)
else:
raise ValueError('Index out of range')
def forward(self, z, y, use_in=False):
if self.hier:
zs = torch.split(z, self.z_chunk_size, 1)
z = zs[0]
ys = [torch.cat([y, item], 1) for item in zs[1:]]
else:
ys = ([y] * len(self.blocks))
h = self.linear(z)
h = h.view(h.size(0), (- 1), self.bottom_width, self.bottom_width)
for (index, blocklist) in enumerate(self.blocks):
for block in blocklist:
h = block(h, ys[index], use_in)
return torch.tanh(self.output_layer(h)) |
def _find_bn(module):
for m in module.modules():
if isinstance(m, (nn.BatchNorm1d, nn.BatchNorm2d, SynchronizedBatchNorm1d, SynchronizedBatchNorm2d)):
return m |
class _ParallelDomainDataset(_SynchronizedDataset):
def __init__(self, dataset_metadata, scenes=None, datum_names=None, requested_annotations=None, requested_autolabels=None, forward_context=0, backward_context=0, generate_depth_from_datum=None, only_annotated_datums=False, use_virtual_camera_datums=True, accumulation_context=None, transform_accumulated_box_points=False, autolabel_root=None):
self.coalesce_point_cloud = ((datum_names is not None) and (COALESCED_LIDAR_DATUM_NAME in datum_names))
self.use_virtual_camera_datums = use_virtual_camera_datums
if self.coalesce_point_cloud:
self._datum_name_to_index = {datum_name: datum_idx for (datum_idx, datum_name) in enumerate(datum_names)}
new_datum_names = [datum_name for datum_name in datum_names if (COALESCED_LIDAR_DATUM_NAME != datum_name)]
new_datum_names.extend(LIDAR_DATUM_NAMES)
if use_virtual_camera_datums:
new_datum_names.extend(VIRTUAL_CAMERA_DATUM_NAMES)
if ((accumulation_context is not None) and (COALESCED_LIDAR_DATUM_NAME in accumulation_context)):
acc_context = accumulation_context.pop(COALESCED_LIDAR_DATUM_NAME)
updated_acc = {datum_name: acc_context for datum_name in LIDAR_DATUM_NAMES}
accumulation_context.update(updated_acc)
logging.info('Datum names with lidar datums={}'.format(new_datum_names))
datum_names = new_datum_names
super().__init__(dataset_metadata=dataset_metadata, scenes=scenes, datum_names=datum_names, requested_annotations=requested_annotations, requested_autolabels=requested_autolabels, forward_context=forward_context, backward_context=backward_context, generate_depth_from_datum=generate_depth_from_datum, only_annotated_datums=only_annotated_datums, accumulation_context=accumulation_context, transform_accumulated_box_points=transform_accumulated_box_points, autolabel_root=autolabel_root)
def coalesce_pc_data(self, items):
pc_items = [item for item in items if (POINT_CLOUD_KEY in item)]
assert self.coalesce_point_cloud
assert (len(pc_items) == len(LIDAR_DATUM_NAMES))
if (len(self.requested_autolabels) > 0):
logging.warning('autolabels were requested, however point cloud coalesce does not support coalescing autolabels')
coalesced_pc = OrderedDict()
(X_V_merged, bbox_3d_V_merged, instance_ids_merged) = ([], [], [])
total_bounding_box_3d = 0
for item in pc_items:
X_S = item[POINT_CLOUD_KEY]
p_VS = item['extrinsics']
X_V_merged.append((p_VS * X_S))
if ('bounding_box_3d' in item):
total_bounding_box_3d += len(item['bounding_box_3d'])
for bbox_3d in item['bounding_box_3d']:
if (bbox_3d.instance_id not in instance_ids_merged):
instance_ids_merged.append(bbox_3d.instance_id)
bbox_3d_V_merged.append((p_VS * bbox_3d))
coalesced_pc['datum_name'] = COALESCED_LIDAR_DATUM_NAME
coalesced_pc['timestamp'] = pc_items[0]['timestamp']
coalesced_pc[POINT_CLOUD_KEY] = np.vstack(X_V_merged)
coalesced_pc['extra_channels'] = np.vstack([item['extra_channels'] for item in pc_items])
coalesced_pc['extrinsics'] = Pose()
p_LS = pc_items[0]['pose']
p_VS = pc_items[0]['extrinsics']
p_LV = (p_LS * p_VS.inverse())
coalesced_pc['pose'] = p_LV
if len(bbox_3d_V_merged):
ontology = pc_items[0]['bounding_box_3d'].ontology
coalesced_pc['bounding_box_3d'] = BoundingBox3DAnnotationList(ontology, bbox_3d_V_merged)
if ('bounding_box_3d' in coalesced_pc.keys()):
assert (len(coalesced_pc['bounding_box_3d']) <= total_bounding_box_3d)
return coalesced_pc
def coalesce_sample(self, sample):
items_dict = OrderedDict()
items_dict[self._datum_name_to_index[COALESCED_LIDAR_DATUM_NAME]] = self.coalesce_pc_data(sample)
items_dict.update({self._datum_name_to_index[item['datum_name']]: item for item in sample if ((POINT_CLOUD_KEY not in item) and (item['datum_name'] not in VIRTUAL_CAMERA_DATUM_NAMES))})
if self.use_virtual_camera_datums:
virtual_camera_datums = [item for item in sample if (item['datum_name'] in VIRTUAL_CAMERA_DATUM_NAMES)]
virtual_camera_datums = {(idx + len(items_dict)): item for (idx, item) in enumerate(virtual_camera_datums)}
items_dict.update(virtual_camera_datums)
indices_and_items_sorted = sorted(list(items_dict.items()), key=(lambda tup: tup[0]))
aligned_items = list(map((lambda tup: tup[1]), indices_and_items_sorted))
return aligned_items
def __getitem__(self, idx):
sample_data = super().__getitem__(idx)
if self.coalesce_point_cloud:
if ((self.forward_context > 0) or (self.backward_context > 0)):
sample_data = [self.coalesce_sample(t_item) for t_item in sample_data]
else:
sample_data = [self.coalesce_sample(sample_data[0])]
return sample_data |
class TransitionBlock(nn.Module):
def __init__(self, in_channels, out_channels):
super(TransitionBlock, self).__init__()
self.conv = conv1x1_block(in_channels=in_channels, out_channels=out_channels)
self.pool = nn.AvgPool2d(kernel_size=2, stride=2, padding=0)
def forward(self, x):
x = self.conv(x)
x = self.pool(x)
return x |
class AttrDict(dict):
def __init__(self, *args, **kwargs):
super(AttrDict, self).__init__(*args, **kwargs)
super(AttrDict, self).__setattr__('_mutable', False)
def __getattr__(self, key):
if key.startswith('__'):
raise AttributeError
return self.get(key, None)
def __setattr__(self, key, value):
if (not self._mutable):
message = "Cannot set attribute '{}'.".format(key)
message += " Use 'with obj.unlocked:' scope to set attributes."
raise RuntimeError(message)
if key.startswith('__'):
raise AttributeError("Cannot set magic attribute '{}'".format(key))
self[key] = value
def unlocked(self):
super(AttrDict, self).__setattr__('_mutable', True)
(yield)
super(AttrDict, self).__setattr__('_mutable', False)
def copy(self):
return type(self)(super(AttrDict, self).copy()) |
def download_dataset(to_folder, dl_dataset, completed_urls={}):
download_files(to_folder, dl_dataset.train_urls, completed_urls)
download_files(to_folder, dl_dataset.valid_urls, completed_urls)
download_files(to_folder, dl_dataset.test_urls, completed_urls)
print('completed downloading')
return completed_urls |
def read_langs_dial(file_name, ontology, dialog_act, max_line=None, domain_act_flag=False):
print('Reading from {} for read_langs_dial'.format(file_name))
raise NotImplementedError |
class Claude(AgentClient):
def __init__(self, api_args=None, *args, **config):
super().__init__(*args, **config)
if (not api_args):
api_args = {}
api_args = deepcopy(api_args)
self.key = (api_args.pop('key', None) or os.getenv('Claude_API_KEY'))
api_args['model'] = api_args.pop('model', None)
if (not self.key):
raise ValueError('Claude API KEY is required, please assign api_args.key or set OPENAI_API_KEY environment variable.')
if (not api_args['model']):
raise ValueError('Claude model is required, please assign api_args.model.')
self.api_args = api_args
if (not self.api_args.get('stop_sequences')):
self.api_args['stop_sequences'] = [anthropic.HUMAN_PROMPT]
def inference(self, history: List[dict]) -> str:
prompt = ''
for message in history:
if (message['role'] == 'user'):
prompt += (anthropic.HUMAN_PROMPT + message['content'])
else:
prompt += (anthropic.AI_PROMPT + message['content'])
prompt += anthropic.AI_PROMPT
c = anthropic.Client(api_key=self.key)
resp = c.completions.create(prompt=prompt, **self.api_args)
return str(resp.completion) |
def get():
cls = (InProcessCommunicator if __use_threads else DistributedCommunicator)
if (not cls.is_initialized()):
raise RuntimeError('Crypten not initialized. Please call crypten.init() first.')
return cls.get() |
def main(args):
data_path = Path(args.data_path)
output_path = Path(args.out_path)
os.makedirs(str(output_path), exist_ok=True)
(next_img_id, next_id) = (0, 0)
for dataset_name in ['refcoco/refs(unc).p', 'refcoco+/refs(unc).p', 'refcocog/refs(umd).p']:
for split in ['train', 'val']:
(next_img_id, next_id) = convert(data_path, dataset_name, split, output_path, args.coco_path, next_img_id=next_img_id, next_id=next_id) |
class _TFTrainModelInputTensorsFormer(ModelInputTensorsFormer):
def to_model_input_form(self, input_tensors: ReaderInputTensors):
return (input_tensors.target_index, input_tensors.path_source_token_indices, input_tensors.path_indices, input_tensors.path_target_token_indices, input_tensors.context_valid_mask)
def from_model_input_form(self, input_row) -> ReaderInputTensors:
return ReaderInputTensors(target_index=input_row[0], path_source_token_indices=input_row[1], path_indices=input_row[2], path_target_token_indices=input_row[3], context_valid_mask=input_row[4]) |
class ModuleProxyWrapper(nn.Module):
def __init__(self, module: nn.Module):
super().__init__()
assert hasattr(module, 'module'), 'ModuleProxyWrapper expects input to wrap another module'
self.module = module
def __getattr__(self, name):
try:
return super().__getattr__(name)
except AttributeError:
try:
return getattr(self.module, name)
except AttributeError:
return getattr(self.module.module, name)
def state_dict(self, *args, **kwargs):
return self.module.module.state_dict(*args, **kwargs)
def load_state_dict(self, *args, **kwargs):
return self.module.module.load_state_dict(*args, **kwargs)
def forward(self, *args, **kwargs):
return self.module(*args, **kwargs) |
def grid_search(model_class, init_args, param_grid, x_unvec, y, num_class, k=3, max_num_sample=10000):
param_list = _param_combinations(param_grid)
(best_param_set, best_loss, worst_loss) = _search(model_class, init_args, param_list, x_unvec, y, num_class=num_class, k=k, max_num_sample=max_num_sample)
print('During parameter tuning, best loss: {:.4f} / Worst loss: {:.4f}'.format(best_loss, worst_loss))
return best_param_set |
_model('fconv_self_att')
class FConvModelSelfAtt(FairseqEncoderDecoderModel):
def __init__(self, encoder, decoder, pretrained_encoder=None):
super().__init__(encoder, decoder)
self.encoder.num_attention_layers = sum(((layer is not None) for layer in decoder.attention))
self.pretrained_encoder = pretrained_encoder
if (self.pretrained_encoder is None):
encoders = {'encoder': encoder}
else:
encoders = {'encoder': encoder, 'pretrained': self.pretrained_encoder}
self.encoder = CompositeEncoder(encoders)
def add_args(parser):
parser.add_argument('--dropout', type=float, metavar='D', help='dropout probability')
parser.add_argument('--encoder-embed-dim', type=int, metavar='N', help='encoder embedding dimension')
parser.add_argument('--encoder-layers', type=str, metavar='EXPR', help='encoder layers [(dim, kernel_size), ...]')
parser.add_argument('--decoder-embed-dim', type=int, metavar='N', help='decoder embedding dimension')
parser.add_argument('--decoder-layers', type=str, metavar='EXPR', help='decoder layers [(dim, kernel_size), ...]')
parser.add_argument('--decoder-out-embed-dim', type=int, metavar='N', help='decoder output embedding dimension')
parser.add_argument('--decoder-attention', type=str, metavar='EXPR', help='decoder attention [True, ...]')
parser.add_argument('--self-attention', type=str, metavar='EXPR', help='decoder self-attention layers, ex: [True] + [False]*5')
parser.add_argument('--multihead-attention-nheads', type=int, help='Number of heads to use in attention')
parser.add_argument('--multihead-self-attention-nheads', type=int, help='Number of heads to use in self-attention')
parser.add_argument('--encoder-attention', type=str, metavar='EXPR', help='encoder attention [True, ...]')
parser.add_argument('--encoder-attention-nheads', type=int, help='Number of heads to use in encoder attention')
parser.add_argument('--project-input', type=str, metavar='EXPR', help='Use projections in self-attention [True, ...]')
parser.add_argument('--gated-attention', type=str, metavar='EXPR', help='Use GLU layers in self-attention projections [True, ...]')
parser.add_argument('--downsample', type=str, metavar='EXPR', help='Use downsampling in self-attention [True, ...]')
parser.add_argument('--pretrained-checkpoint', metavar='DIR', help='path to load checkpoint from pretrained model')
parser.add_argument('--pretrained', type=str, metavar='EXPR', help='use pretrained model when training [True, ...]')
def build_model(cls, args, task):
(trained_encoder, trained_decoder) = (None, None)
pretrained = eval(args.pretrained)
if pretrained:
print('| loading pretrained model')
trained_model = checkpoint_utils.load_model_ensemble(filenames=[args.pretrained_checkpoint], task=task)[0][0]
trained_decoder = list(trained_model.children())[1]
trained_encoder = list(trained_model.children())[0]
for param in trained_decoder.parameters():
param.requires_grad = False
for param in trained_encoder.parameters():
param.requires_grad = False
'Build a new model instance.'
encoder = FConvEncoder(task.source_dictionary, embed_dim=args.encoder_embed_dim, convolutions=eval(args.encoder_layers), dropout=args.dropout, max_positions=args.max_source_positions, attention=eval(args.encoder_attention), attention_nheads=args.encoder_attention_nheads)
decoder = FConvDecoder(task.target_dictionary, embed_dim=args.decoder_embed_dim, convolutions=eval(args.decoder_layers), out_embed_dim=args.decoder_out_embed_dim, attention=eval(args.decoder_attention), dropout=args.dropout, max_positions=args.max_target_positions, selfattention=eval(args.self_attention), attention_nheads=args.multihead_attention_nheads, selfattention_nheads=args.multihead_self_attention_nheads, project_input=eval(args.project_input), gated_attention=eval(args.gated_attention), downsample=eval(args.downsample), pretrained=pretrained, trained_decoder=trained_decoder)
model = FConvModelSelfAtt(encoder, decoder, trained_encoder)
return model
def pretrained(self):
return (self.pretrained_encoder is not None) |
def makeVocabulary(filename, size, is_target, char=False):
if is_target:
vocab = dict.Dict([], lower=opt.lower)
else:
vocab = dict.Dict([dict.PAD_WORD, dict.UNK_WORD, dict.BOS_WORD, dict.EOS_WORD], lower=opt.lower)
if char:
vocab.addSpecial(dict.SPA_WORD)
lengths = []
if (type(filename) == list):
for _filename in filename:
with open(_filename) as f:
for sent in f.readlines():
for word in sent.strip().split():
lengths.append(len(word))
if char:
for ch in word:
vocab.add(ch)
else:
vocab.add((word + ' '))
else:
with open(filename) as f:
for sent in f.readlines():
for word in sent.strip().split():
lengths.append(len(word))
if char:
for ch in word:
vocab.add(ch)
else:
vocab.add((word + ' '))
print(('max: %d, min: %d, avg: %.2f' % (max(lengths), min(lengths), (sum(lengths) / len(lengths)))))
originalSize = vocab.size()
vocab = vocab.prune(size)
print(('Created dictionary of size %d (pruned from %d)' % (vocab.size(), originalSize)))
return vocab |
def get_host_info():
host = ''
try:
host = f'{getuser()}{gethostname()}'
except Exception as e:
warnings.warn(f'Host or user not found: {str(e)}')
finally:
return host |
class ZDT2TestCases(unittest.TestCase):
def test_should_constructor_create_a_non_null_object(self) -> None:
problem = ZDT2()
self.assertIsNotNone(problem)
def test_should_constructor_create_a_valid_problem_with_default_settings(self) -> None:
problem = ZDT2()
self.assertEqual(30, problem.number_of_variables())
self.assertEqual(2, problem.number_of_objectives())
self.assertEqual(0, problem.number_of_constraints())
self.assertEqual((30 * [0.0]), problem.lower_bound)
self.assertEqual((30 * [1.0]), problem.upper_bound)
def test_should_constructor_create_a_valid_problem_with_7_variables(self) -> None:
problem = ZDT2(7)
self.assertEqual(7, problem.number_of_variables())
self.assertEqual(2, problem.number_of_objectives())
self.assertEqual(0, problem.number_of_constraints())
self.assertEqual((7 * [0.0]), problem.lower_bound)
self.assertEqual((7 * [1.0]), problem.upper_bound)
def test_should_create_solution_create_a_valid_float_solution(self) -> None:
problem = ZDT2()
solution = problem.create_solution()
self.assertEqual(30, solution.number_of_variables)
self.assertEqual(30, len(solution.variables))
self.assertEqual(2, solution.number_of_objectives)
self.assertEqual(2, len(solution.objectives))
self.assertEqual(0, problem.number_of_constraints())
self.assertEqual((30 * [0.0]), problem.lower_bound)
self.assertEqual((30 * [1.0]), problem.upper_bound)
self.assertTrue(all(((value >= 0.0) for value in solution.variables)))
self.assertTrue(all(((value <= 1.0) for value in solution.variables)))
def test_should_get_name_return_the_right_name(self):
problem = ZDT2()
self.assertEqual('ZDT2', problem.name()) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.