code stringlengths 101 5.91M |
|---|
def is_even_matrix(A):
for i in range(A.nrows()):
if (A[(i, i)] % 2):
return (False, i)
return (True, (- 1)) |
def showOrigDec(orig, dec, num=10):
import matplotlib.pyplot as plt
n = num
plt.figure(figsize=(20, 4))
for i in range(n):
ax = plt.subplot(2, n, (i + 1))
plt.imshow(orig[i].reshape(32, 32, 3))
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
ax = plt.subplot(2, n, ((i + 1) + n))
plt.imshow(dec[i].reshape(32, 32, 3))
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
plt.show() |
def list_of_subfunctions(root, only_local_functions=True):
if inspect.ismodule(root):
ismodule = True
elif inspect.isclass(root):
ismodule = False
superclasses = inspect.getmro(root)[1:]
else:
raise ValueError("'root' must be a module or a class.")
def local_filter(f, name):
if only_local_functions:
if ismodule:
return (inspect.getmodule(root) == inspect.getmodule(f))
else:
return (not any((hasattr(s, name) for s in superclasses)))
else:
return (inspect.isclass(root) or (not (f is gen_rest_table_index)))
def can_import(f):
try:
hasattr(f, 'xyz')
except ImportError:
return False
return True
functions = {getattr(root, name): name for (name, f) in root.__dict__.items() if ((not name.startswith('_')) and can_import(f) and (not hasattr(f, 'issue_number')) and (not inspect.isclass(f)) and callable(getattr(f, '__func__', f)) and local_filter(f, name))}
return (list(functions.keys()), functions) |
def register_Ns3MmWaveMacCschedSapUserCschedLcConfigCnfParameters_methods(root_module, cls):
cls.add_constructor([])
cls.add_constructor([param('ns3::MmWaveMacCschedSapUser::CschedLcConfigCnfParameters const &', 'arg0')])
cls.add_instance_attribute('m_logicalChannelIdentity', 'std::vector< unsigned char >', is_const=False)
cls.add_instance_attribute('m_result', 'ns3::Result_e', is_const=False)
cls.add_instance_attribute('m_rnti', 'uint16_t', is_const=False)
cls.add_instance_attribute('m_vendorSpecificList', 'std::vector< ns3::VendorSpecificListElement_s >', is_const=False)
return |
class Encoder(abc.ABC):
def spec(self):
def input_dim(self):
def output_dim(self):
def reset(self, do_resets=None): |
def getDistanceByHaversine(loc1, loc2):
(lat1, lon1) = loc1
(lat2, lon2) = loc2
lon1 = ((lon1 * pi) / 180.0)
lon2 = ((lon2 * pi) / 180.0)
lat1 = ((lat1 * pi) / 180.0)
lat2 = ((lat2 * pi) / 180.0)
dlon = (lon2 - lon1)
dlat = (lat2 - lat1)
a = ((sin((dlat / 2)) ** 2) + ((cos(lat1) * cos(lat2)) * (sin((dlon / 2.0)) ** 2)))
c = (2.0 * atan2(sqrt(a), sqrt((1.0 - a))))
km = (earthradius * c)
return km |
class Exif(MutableMapping):
endian = '<'
def __init__(self):
self._data = {}
self._ifds = {}
self._info = None
self._loaded_exif = None
def _fixup(self, value):
try:
if ((len(value) == 1) and (not isinstance(value, dict))):
return value[0]
except Exception:
pass
return value
def _fixup_dict(self, src_dict):
return {k: self._fixup(v) for (k, v) in src_dict.items()}
def _get_ifd_dict(self, tag):
try:
self.fp.seek(self[tag])
except (KeyError, TypeError):
pass
else:
from . import TiffImagePlugin
info = TiffImagePlugin.ImageFileDirectory_v1(self.head)
info.load(self.fp)
return self._fixup_dict(info)
def load(self, data):
if (data == self._loaded_exif):
return
self._loaded_exif = data
self._data.clear()
self._ifds.clear()
self._info = None
if (not data):
return
self.fp = io.BytesIO(data[6:])
self.head = self.fp.read(8)
from . import TiffImagePlugin
self._info = TiffImagePlugin.ImageFileDirectory_v1(self.head)
self.endian = self._info._endian
self.fp.seek(self._info.next)
self._info.load(self.fp)
ifd = self._get_ifd_dict(34665)
if ifd:
self._data.update(ifd)
self._ifds[34665] = ifd
def tobytes(self, offset=0):
from . import TiffImagePlugin
if (self.endian == '<'):
head = b'II*\x00\x08\x00\x00\x00'
else:
head = b'MM\x00*\x00\x00\x00\x08'
ifd = TiffImagePlugin.ImageFileDirectory_v2(ifh=head)
for (tag, value) in self.items():
ifd[tag] = value
return ((b'Exif\x00\x00' + head) + ifd.tobytes(offset))
def get_ifd(self, tag):
if ((tag not in self._ifds) and (tag in self)):
if (tag in [34853, 40965]):
self._ifds[tag] = self._get_ifd_dict(tag)
elif (tag == 37500):
from .TiffImagePlugin import ImageFileDirectory_v2
if (self[37500][:8] == b'FUJIFILM'):
exif_data = self[37500]
ifd_offset = i32le(exif_data[8:12])
ifd_data = exif_data[ifd_offset:]
makernote = {}
for i in range(0, struct.unpack('<H', ifd_data[:2])[0]):
(ifd_tag, typ, count, data) = struct.unpack('<HHL4s', ifd_data[((i * 12) + 2):(((i + 1) * 12) + 2)])
try:
(unit_size, handler) = ImageFileDirectory_v2._load_dispatch[typ]
except KeyError:
continue
size = (count * unit_size)
if (size > 4):
(offset,) = struct.unpack('<L', data)
data = ifd_data[(offset - 12):((offset + size) - 12)]
else:
data = data[:size]
if (len(data) != size):
warnings.warn(('Possibly corrupt EXIF MakerNote data. Expecting to read %d bytes but only got %d. Skipping tag %s' % (size, len(data), ifd_tag)))
continue
if (not data):
continue
makernote[ifd_tag] = handler(ImageFileDirectory_v2(), data, False)
self._ifds[37500] = dict(self._fixup_dict(makernote))
elif (self.get(271) == 'Nintendo'):
ifd_data = self[37500]
makernote = {}
for i in range(0, struct.unpack('>H', ifd_data[:2])[0]):
(ifd_tag, typ, count, data) = struct.unpack('>HHL4s', ifd_data[((i * 12) + 2):(((i + 1) * 12) + 2)])
if (ifd_tag == 4353):
(offset,) = struct.unpack('>L', data)
self.fp.seek(offset)
camerainfo = {'ModelID': self.fp.read(4)}
self.fp.read(4)
camerainfo['TimeStamp'] = i32le(self.fp.read(12))
self.fp.read(4)
camerainfo['InternalSerialNumber'] = self.fp.read(4)
self.fp.read(12)
parallax = self.fp.read(4)
handler = ImageFileDirectory_v2._load_dispatch[TiffTags.FLOAT][1]
camerainfo['Parallax'] = handler(ImageFileDirectory_v2(), parallax, False)
self.fp.read(4)
camerainfo['Category'] = self.fp.read(2)
makernote = {4353: dict(self._fixup_dict(camerainfo))}
self._ifds[37500] = makernote
return self._ifds.get(tag, {})
def __str__(self):
if (self._info is not None):
for tag in self._info.keys():
self[tag]
return str(self._data)
def __len__(self):
keys = set(self._data)
if (self._info is not None):
keys.update(self._info)
return len(keys)
def __getitem__(self, tag):
if ((self._info is not None) and (tag not in self._data) and (tag in self._info)):
self._data[tag] = self._fixup(self._info[tag])
if (tag == 34853):
self._data[tag] = self.get_ifd(tag)
del self._info[tag]
return self._data[tag]
def __contains__(self, tag):
return ((tag in self._data) or ((self._info is not None) and (tag in self._info)))
def __setitem__(self, tag, value):
if ((self._info is not None) and (tag in self._info)):
del self._info[tag]
self._data[tag] = value
def __delitem__(self, tag):
if ((self._info is not None) and (tag in self._info)):
del self._info[tag]
del self._data[tag]
def __iter__(self):
keys = set(self._data)
if (self._info is not None):
keys.update(self._info)
return iter(keys) |
def test_interval_raises():
with pytest.raises(ValueError, match='One must have low <= high; got low=1, high=0.'):
Interval(1, 0, False, False) |
def test_mrmr_classif_without_scores():
selected_features = mrmr.polars.mrmr_classif(df=df_polars, K=4, target_column=target_column_classif, features=features, denominator='mean', only_same_domain=False, return_scores=False, show_progress=True)
assert (set(selected_features) == set(['some_null', 'feature_a', 'feature_b'])) |
def getenv(name, default):
try:
return os.environ[name].strip(' "\'')
except:
return default |
def get_observed_stats_from_network_attr(edgelist_filename, param_func_list, labels, outcome_bin_filename, binattr_filename=None, contattr_filename=None, catattr_filename=None, directed=False, bipartite=False):
assert (len(param_func_list) == len(labels))
if directed:
if bipartite:
raise Exception('directed bipartite network not suppored')
G = Digraph(edgelist_filename, binattr_filename, contattr_filename, catattr_filename)
elif bipartite:
G = BipartiteGraph(edgelist_filename, binattr_filename, contattr_filename, catattr_filename)
else:
G = Graph(edgelist_filename, binattr_filename, contattr_filename, catattr_filename)
outcome_binvar = list(map(int_or_na, open(outcome_bin_filename).read().split()[1:]))
assert (len(outcome_binvar) == G.numNodes())
A = outcome_binvar
assert all([(x in [0, 1, NA_VALUE]) for x in A])
Zobs = computeObservedStatistics(G, A, param_func_list)
labels += ['meanInDegree1', 'varInDegree1', 'meanOutDegree1', 'varOutDegree1', 'meanInDegree0', 'varInDegree0', 'meanOutDegree0', 'varOutDegree0']
indegseq = np.array([G.indegree(v) for v in iter(G.G.keys())])
outdegseq = np.array([G.outdegree(v) for v in iter(G.G.keys())])
A = np.array(outcome_binvar)
meanInDegree1 = np.mean(indegseq[np.nonzero((A == 1))[0]])
varInDegree1 = np.var(indegseq[np.nonzero((A == 1))[0]])
meanOutDegree1 = np.mean(outdegseq[np.nonzero((A == 1))[0]])
varOutDegree1 = np.var(outdegseq[np.nonzero((A == 1))[0]])
meanInDegree0 = np.mean(indegseq[np.nonzero((A == 0))[0]])
varInDegree0 = np.var(indegseq[np.nonzero((A == 0))[0]])
meanOutDegree0 = np.mean(outdegseq[np.nonzero((A == 0))[0]])
varOutDegree0 = np.var(outdegseq[np.nonzero((A == 0))[0]])
Zobs = np.append(Zobs, [meanInDegree1, varInDegree1, meanOutDegree1, varOutDegree1])
Zobs = np.append(Zobs, [meanInDegree0, varInDegree0, meanOutDegree0, varOutDegree0])
sys.stdout.write((' '.join(labels) + '\n'))
sys.stdout.write((' '.join([str(z) for z in Zobs]) + '\n')) |
class BayesLSTM(nn.Module):
def __init__(self, input_size, hidden_size, num_layers=1, bias=True, batch_first=False, dropout=0.0, bidirectional=False, prior=None, mu_lower=(- 0.05), mu_upper=0.05, rho_lower=math.log((math.exp((1.0 / 4.0)) - 1.0)), rho_upper=math.log((math.exp((1.0 / 2.0)) - 1.0))):
super().__init__()
self.module = nn.LSTM(input_size, hidden_size, num_layers, bias, batch_first, dropout, bidirectional)
self.prior = prior
self.kl = None
for (name, w) in self.module.named_parameters():
(mu, rho) = get_bbb_variable(w.shape, mu_lower, mu_upper, rho_lower, rho_upper)
self.register_parameter(f'{name}_mu', mu)
self.register_parameter(f'{name}_rho', rho)
self.module._parameters[name] = mu
def _setweights(self, mean_field_inference=False):
kl = 0
for (name, _) in self.module.named_parameters():
mu = getattr(self, f'{name}_mu')
rho = getattr(self, f'{name}_rho')
sigma = (F.softplus(rho) + 1e-05)
if (mean_field_inference is False):
eps = rho.data.new(rho.size()).normal_(0.0, 1.0)
w = (mu + (sigma * eps))
kl += compute_KL(w, mu, sigma, self.prior)
else:
w = mu
self.module._parameters[name] = w
self.kl = kl
def forward(self, *args, mean_field_inference=False):
self._setweights()
self.module.flatten_parameters()
with warnings.catch_warnings():
warnings.simplefilter('ignore')
return self.module.forward(*args) |
class SparseTransformerSentenceEncoderLayer(TransformerSentenceEncoderLayer):
def __init__(self, embedding_dim: int=768, ffn_embedding_dim: int=3072, num_attention_heads: int=8, dropout: float=0.1, attention_dropout: float=0.1, activation_dropout: float=0.1, activation_fn: str='relu', add_bias_kv: bool=False, add_zero_attn: bool=False, export: bool=False, is_bidirectional: bool=True, stride: int=32, expressivity: int=8) -> None:
super().__init__(embedding_dim, ffn_embedding_dim, num_attention_heads, dropout, attention_dropout, activation_dropout, activation_fn, add_bias_kv, add_zero_attn, export)
self.self_attn = SparseMultiheadAttention(self.embedding_dim, num_attention_heads, dropout=attention_dropout, add_bias_kv=add_bias_kv, add_zero_attn=add_zero_attn, self_attention=True, is_bidirectional=is_bidirectional, stride=stride, expressivity=expressivity) |
class SimpleTokenizer(object):
def __init__(self, bpe_path: str=default_bpe()):
self.byte_encoder = bytes_to_unicode()
self.byte_decoder = {v: k for (k, v) in self.byte_encoder.items()}
merges = gzip.open(bpe_path).read().decode('utf-8').split('\n')
merges = merges[1:(((49152 - 256) - 2) + 1)]
merges = [tuple(merge.split()) for merge in merges]
vocab = list(bytes_to_unicode().values())
vocab = (vocab + [(v + '</w>') for v in vocab])
for merge in merges:
vocab.append(''.join(merge))
vocab.extend(['<|startoftext|>', '<|endoftext|>'])
self.encoder = dict(zip(vocab, range(len(vocab))))
self.decoder = {v: k for (k, v) in self.encoder.items()}
self.bpe_ranks = dict(zip(merges, range(len(merges))))
self.cache = {'<|startoftext|>': '<|startoftext|>', '<|endoftext|>': '<|endoftext|>'}
self.pat = re.compile("<\\|startoftext\\|>|<\\|endoftext\\|>|'s|'t|'re|'ve|'m|'ll|'d|[\\p{L}]+|[\\p{N}]|[^\\s\\p{L}\\p{N}]+", re.IGNORECASE)
self.vocab = self.encoder
def bpe(self, token):
if (token in self.cache):
return self.cache[token]
word = (tuple(token[:(- 1)]) + ((token[(- 1)] + '</w>'),))
pairs = get_pairs(word)
if (not pairs):
return (token + '</w>')
while True:
bigram = min(pairs, key=(lambda pair: self.bpe_ranks.get(pair, float('inf'))))
if (bigram not in self.bpe_ranks):
break
(first, second) = bigram
new_word = []
i = 0
while (i < len(word)):
try:
j = word.index(first, i)
new_word.extend(word[i:j])
i = j
except:
new_word.extend(word[i:])
break
if ((word[i] == first) and (i < (len(word) - 1)) and (word[(i + 1)] == second)):
new_word.append((first + second))
i += 2
else:
new_word.append(word[i])
i += 1
new_word = tuple(new_word)
word = new_word
if (len(word) == 1):
break
else:
pairs = get_pairs(word)
word = ' '.join(word)
self.cache[token] = word
return word
def encode(self, text):
bpe_tokens = []
text = whitespace_clean(basic_clean(text)).lower()
for token in re.findall(self.pat, text):
token = ''.join((self.byte_encoder[b] for b in token.encode('utf-8')))
bpe_tokens.extend((self.encoder[bpe_token] for bpe_token in self.bpe(token).split(' ')))
return bpe_tokens
def decode(self, tokens):
text = ''.join([self.decoder[token] for token in tokens])
text = bytearray([self.byte_decoder[c] for c in text]).decode('utf-8', errors='replace').replace('</w>', ' ')
return text
def tokenize(self, text):
tokens = []
text = whitespace_clean(basic_clean(text)).lower()
for token in re.findall(self.pat, text):
token = ''.join((self.byte_encoder[b] for b in token.encode('utf-8')))
tokens.extend((bpe_token for bpe_token in self.bpe(token).split(' ')))
return tokens
def convert_tokens_to_ids(self, tokens):
return [self.encoder[bpe_token] for bpe_token in tokens] |
def conv2d2(inputs, num_outputs, kernel_size, sn, stride=1, rate=1, data_format='NCHW', activation_fn=tf.nn.relu, normalizer_fn=None, normalizer_params=None, weights_regularizer=None, weights_initializer=ly.xavier_initializer(), biases_initializer=init_ops.zeros_initializer(), biases_regularizer=None, reuse=None, scope=None, SPECTRAL_NORM_UPDATE_OPS='spectral_norm_update_ops'):
assert (data_format == 'NCHW')
assert (rate == 1)
if (data_format == 'NCHW'):
channel_axis = 1
stride = [1, 1, stride, stride]
rate = [1, 1, rate, rate]
else:
channel_axis = 3
stride = [1, stride, stride, 1]
rate = [1, rate, rate, 1]
input_dim = inputs.get_shape().as_list()[channel_axis]
with tf.variable_scope(scope, 'Conv', [inputs], reuse=reuse) as sc:
inputs = tf.convert_to_tensor(inputs)
weights = tf.get_variable(name='weights', shape=(kernel_size, kernel_size, input_dim, num_outputs), initializer=weights_initializer, regularizer=weights_regularizer, trainable=True, dtype=inputs.dtype.base_dtype)
if sn:
weights = spectral_normed_weight(weights, num_iters=1, update_collection=SPECTRAL_NORM_UPDATE_OPS)
conv_out = tf.nn.conv2d(inputs, weights, strides=stride, padding='SAME', data_format=data_format)
if (biases_initializer is not None):
biases = tf.get_variable(name='biases', shape=(1, num_outputs, 1, 1), initializer=biases_initializer, regularizer=biases_regularizer, trainable=True, dtype=inputs.dtype.base_dtype)
conv_out += biases
if (normalizer_fn is not None):
normalizer_params = (normalizer_params or {})
conv_out = normalizer_fn(conv_out, activation_fn=None, **normalizer_params)
if (activation_fn is not None):
conv_out = activation_fn(conv_out)
return conv_out |
def get_preprocessing(name, is_training=False):
preprocessing_fn_map = {'cifarnet': cifarnet_preprocessing, 'inception': inception_preprocessing, 'inception_v1': inception_preprocessing, 'inception_v2': inception_preprocessing, 'inception_v3': inception_preprocessing, 'inception_v3_bap': inception_preprocessing, 'inception_v4': inception_preprocessing, 'inception_resnet_v2': inception_preprocessing, 'lenet': lenet_preprocessing, 'mobilenet_v1': inception_preprocessing, 'nasnet_mobile': inception_preprocessing, 'nasnet_large': inception_preprocessing, 'resnet_v1_50': vgg_preprocessing, 'resnet_v1_101': vgg_preprocessing, 'resnet_v1_152': vgg_preprocessing, 'resnet_v1_200': vgg_preprocessing, 'resnet_v2_50': vgg_preprocessing, 'resnet_v2_101': vgg_preprocessing, 'resnet_v2_152': vgg_preprocessing, 'resnet_v2_200': vgg_preprocessing, 'vgg': vgg_preprocessing, 'vgg_a': vgg_preprocessing, 'vgg_16': vgg_preprocessing, 'vgg_19': vgg_preprocessing}
if (name not in preprocessing_fn_map):
raise ValueError(('Preprocessing name [%s] was not recognized' % name))
def preprocessing_fn(image, output_height, output_width, **kwargs):
return preprocessing_fn_map[name].preprocess_image(image, output_height, output_width, is_training=is_training, **kwargs)
return preprocessing_fn |
class CLI(LightningCLI):
def __init__(self, model_class, run=True, **kwargs):
trainer_defaults = {'default_config_files': [os.path.join('perceiver', 'trainer.yaml')]}
super().__init__(model_class, run=run, save_config_overwrite=True, parser_kwargs={'fit': trainer_defaults, 'test': trainer_defaults, 'validate': trainer_defaults}, **kwargs)
def instantiate_trainer(self, **kwargs: Any) -> Trainer:
if self.subcommand:
cfg = self.config_init[self.subcommand]
else:
cfg = self.config_init
if (cfg['trainer']['strategy'] == 'ddp_static_graph'):
cfg['trainer']['strategy'] = DDPStaticGraphPlugin(find_unused_parameters=False)
return super().instantiate_trainer(logger=cfg['logger'], **kwargs)
def add_default_arguments_to_parser(self, parser: LightningArgumentParser) -> None:
super().add_default_arguments_to_parser(parser)
parser.add_argument('--experiment', default='default')
def add_arguments_to_parser(self, parser: LightningArgumentParser) -> None:
parser.add_class_arguments(TensorBoardLogger, 'logger')
parser.link_arguments('trainer.default_root_dir', 'logger.save_dir', apply_on='parse')
parser.link_arguments('experiment', 'logger.name', apply_on='parse')
parser.add_optimizer_args(torch.optim.AdamW, link_to='model.optimizer_init') |
def get_all_E_gt_func(Js, Trange):
E_gt = [E_gt_func(j, Js, Trange) for j in range(len(Js))]
return E_gt |
class exponweib_gen(rv_continuous):
def _shape_info(self):
ia = _ShapeInfo('a', False, (0, np.inf), (False, False))
ic = _ShapeInfo('c', False, (0, np.inf), (False, False))
return [ia, ic]
def _pdf(self, x, a, c):
return np.exp(self._logpdf(x, a, c))
def _logpdf(self, x, a, c):
negxc = (- (x ** c))
exm1c = (- sc.expm1(negxc))
logp = ((((np.log(a) + np.log(c)) + sc.xlogy((a - 1.0), exm1c)) + negxc) + sc.xlogy((c - 1.0), x))
return logp
def _cdf(self, x, a, c):
exm1c = (- sc.expm1((- (x ** c))))
return (exm1c ** a)
def _ppf(self, q, a, c):
return ((- sc.log1p((- (q ** (1.0 / a))))) ** np.asarray((1.0 / c)))
def _sf(self, x, a, c):
return (- _pow1pm1((- np.exp((- (x ** c)))), a))
def _isf(self, p, a, c):
return ((- np.log((- _pow1pm1((- p), (1 / a))))) ** (1 / c)) |
class AdjustedRandScore(EfficientMI):
def _calc_score(self, *args, **kwargs):
return self.calc_ARand(*args, **kwargs)
def calc_ARand(self, last):
N = last['N']
a = last['a']
b = last['b']
n = last['n']
Nc = tensor_calc_combination(N, 2).sum(dim=[(- 1), (- 2)])
ac = tensor_calc_combination(a, 2).sum(dim=(- 1))
bc = tensor_calc_combination(b, 2).sum(dim=(- 1))
nc = tensor_calc_combination(n, 2)
chance_term = ((ac * bc) / nc)
numerator = (Nc - chance_term)
denominator = (((1 / 2) * (ac + bc)) - chance_term)
return (numerator / denominator) |
class miniImageNetGenerator(object):
def __init__(self, data_file, nb_classes=5, nb_samples_per_class=15, max_iter=None, xp=np):
super(miniImageNetGenerator, self).__init__()
self.data_file = data_file
self.nb_classes = nb_classes
self.nb_samples_per_class = nb_samples_per_class
self.max_iter = max_iter
self.xp = xp
self.num_iter = 0
self.data = self._load_data(self.data_file)
def _load_data(self, data_file):
dataset = self.load_data(data_file)
data = dataset['data']
labels = dataset['labels']
label2ind = self.buildLabelIndex(labels)
return {key: np.array(data[val]) for (key, val) in label2ind.items()}
def load_data(self, data_file):
try:
with open(data_file, 'rb') as fo:
data = pkl.load(fo)
return data
except:
with open(data_file, 'rb') as f:
u = pkl._Unpickler(f)
u.encoding = 'latin1'
data = u.load()
return data
def buildLabelIndex(self, labels):
label2inds = {}
for (idx, label) in enumerate(labels):
if (label not in label2inds):
label2inds[label] = []
label2inds[label].append(idx)
return label2inds
def __iter__(self):
return self
def __next__(self):
return self.next()
def next(self):
if ((self.max_iter is None) or (self.num_iter < self.max_iter)):
self.num_iter += 1
(images, labels) = self.sample(self.nb_classes, self.nb_samples_per_class)
return ((self.num_iter - 1), (images, labels))
else:
raise StopIteration()
def sample(self, nb_classes, nb_samples_per_class):
sampled_characters = random.sample(self.data.keys(), nb_classes)
labels_and_images = []
for (k, char) in enumerate(sampled_characters):
_imgs = self.data[char]
_ind = random.sample(range(len(_imgs)), nb_samples_per_class)
labels_and_images.extend([(k, self.xp.array((_imgs[i] / np.float32(255).flatten()))) for i in _ind])
arg_labels_and_images = []
for i in range(self.nb_samples_per_class):
for j in range(self.nb_classes):
arg_labels_and_images.extend([labels_and_images[(i + (j * self.nb_samples_per_class))]])
(labels, images) = zip(*arg_labels_and_images)
return (images, labels) |
class TestMultipleFields(object):
def setup(self):
self.ary = np.array([(1, 2, 3, 4), (5, 6, 7, 8)], dtype='i4,f4,i2,c8')
def _bad_call(self):
return self.ary[('f0', 'f1')]
def test_no_tuple(self):
assert_raises(IndexError, self._bad_call)
def test_return(self):
res = self.ary[['f0', 'f2']].tolist()
assert_((res == [(1, 3), (5, 7)])) |
class DataTrainingArguments():
dataset_name: Optional[str] = field(default=None, metadata={'help': 'The name of the dataset to use (via the datasets library).'})
dataset_config_name: Optional[str] = field(default=None, metadata={'help': 'The configuration name of the dataset to use (via the datasets library).'})
train_file: Optional[str] = field(default=None, metadata={'help': 'The input training data file (a text file).'})
validation_file: Optional[str] = field(default=None, metadata={'help': 'An optional input evaluation data file to evaluate the perplexity on (a text file).'})
test_file: Optional[str] = field(default=None, metadata={'help': 'An optional input test data file to evaluate the perplexity on (a text file).'})
overwrite_cache: bool = field(default=False, metadata={'help': 'Overwrite the cached training and evaluation sets'})
preprocessing_num_workers: Optional[int] = field(default=None, metadata={'help': 'The number of processes to use for the preprocessing.'})
max_seq_length: int = field(default=384, metadata={'help': 'The maximum total input sequence length after tokenization. Sequences longer than this will be truncated, sequences shorter will be padded.'})
pad_to_max_length: bool = field(default=True, metadata={'help': 'Whether to pad all samples to `max_seq_length`. If False, will pad the samples dynamically when batching to the maximum length in the batch (which can be faster on GPU but will be slower on TPU).'})
max_train_samples: Optional[int] = field(default=None, metadata={'help': 'For debugging purposes or quicker training, truncate the number of training examples to this value if set.'})
max_val_samples: Optional[int] = field(default=None, metadata={'help': 'For debugging purposes or quicker training, truncate the number of validation examples to this value if set.'})
max_test_samples: Optional[int] = field(default=None, metadata={'help': 'For debugging purposes or quicker training, truncate the number of test examples to this value if set.'})
version_2_with_negative: bool = field(default=False, metadata={'help': 'If true, some of the examples do not have an answer.'})
null_score_diff_threshold: float = field(default=0.0, metadata={'help': 'The threshold used to select the null answer: if the best answer has a score that is less than the score of the null answer minus this threshold, the null answer is selected for this example. Only useful when `version_2_with_negative=True`.'})
doc_stride: int = field(default=128, metadata={'help': 'When splitting up a long document into chunks, how much stride to take between chunks.'})
n_best_size: int = field(default=20, metadata={'help': 'The total number of n-best predictions to generate when looking for an answer.'})
max_answer_length: int = field(default=30, metadata={'help': 'The maximum length of an answer that can be generated. This is needed because the start and end predictions are not conditioned on one another.'})
def __post_init__(self):
if ((self.dataset_name is None) and (self.train_file is None) and (self.validation_file is None) and (self.test_file is None)):
raise ValueError('Need either a dataset name or a training/validation file/test_file.')
else:
if (self.train_file is not None):
extension = self.train_file.split('.')[(- 1)]
assert (extension in ['csv', 'json']), '`train_file` should be a csv or a json file.'
if (self.validation_file is not None):
extension = self.validation_file.split('.')[(- 1)]
assert (extension in ['csv', 'json']), '`validation_file` should be a csv or a json file.'
if (self.test_file is not None):
extension = self.test_file.split('.')[(- 1)]
assert (extension in ['csv', 'json']), '`test_file` should be a csv or a json file.' |
class Protocol(object):
_SERIALIZER = ':'
def __init__(self, protocol):
self._name = protocol['name']
self._mode = protocol['mode']
try:
from minicps import __file__
index = __file__.rfind('minicps')
self._minicps_path = (__file__[:(index + 7)] + '/')
except Exception as error:
print('ERROR Protocol __init__ set _minicps_path: ', error)
if (self._mode > 0):
self._server = protocol['server']
else:
self._server = {}
def _start_server(cls, address, values):
print('_start_server: please override me.')
def _stop_server(cls, address):
print('_stop_server: please override me.')
def _send(self, what, value, address, **kwargs):
print('_send: please override me.')
def _receive(self, what, address, **kwargs):
print('_receive: please override me.')
def _receive_multiple(self, what, address, **kwargs):
print('_receive_multiple: please override me.')
def _send_multiple(self, what, values, address):
print('_send_multiple: please override me.') |
class DialogTracker():
def __init__(self, bot_url):
self._bot = convai_api.ConvApiBot(bot_url)
self._bot_url = bot_url
self._chat_fsm = {}
self._users = {}
self._text = 'God'
self._factoid_qas = []
def start(self):
while True:
try:
res = requests.get(os.path.join(self._bot_url, 'getUpdates'), timeout=5)
if (res.status_code != 200):
logger.warn(res.text)
if (len(res.json()) == 0):
sleep(0.1)
continue
for m in res.json():
logger.info(m)
update = convai_api.ConvUpdate(m)
if m['message']['text'].startswith('/start '):
self._log_user('_start_or_begin_or_test_cmd', update)
greet_user(self._bot, update.effective_chat.id)
self._text = m['message']['text'][len('/start '):]
self._get_qas()
self._add_fsm_and_user(update, True)
fsm = self._chat_fsm[update.effective_chat.id]
fsm.start_convai()
elif (m['message']['text'] == '/end'):
self._log_user('_end_cmd', update)
fsm = self._chat_fsm[update.effective_chat.id]
fsm.return_to_init()
elif (m['message']['text'] == 'version'):
self._log_user('version', update)
self._add_fsm_and_user(update)
fsm = self._chat_fsm[update.effective_chat.id]
fsm._send_message('Version is {}'.format(version))
elif m['message']['text'].startswith('reset'):
self._log_user('reset', update)
self._add_fsm_and_user(update, True)
fsm = self._chat_fsm[update.effective_chat.id]
fsm.return_to_init()
fsm.return_to_start()
fsm._send_message('Hmm....')
else:
self._log_user('_echo_cmd', update)
fsm = self._chat_fsm[update.effective_chat.id]
fsm.set_user_message(update.message.text)
if (not fsm._text):
fsm._send_message('Text is not given. Please try to type /end and /test to reset the state and get text.')
continue
if fsm.is_asked():
fsm.check_user_answer_on_asked()
else:
fsm.classify()
except Exception as e:
logger.exception(str(e))
sleep(0.1)
def _log_user(self, cmd, update):
logger_bot.info('USER[{}]: {}'.format(cmd, update.message.text))
def _add_fsm_and_user(self, update, hard=False):
if (update.effective_chat.id not in self._chat_fsm):
fsm = BotBrain(self._bot, update.effective_user, update.effective_chat, self._text_and_qa())
self._chat_fsm[update.effective_chat.id] = fsm
self._users[update.effective_user.id] = update.effective_user
elif ((update.effective_user.id in self._chat_fsm) and hard):
self._chat_fsm[update.effective_chat.id].clear_all()
del self._chat_fsm[update.effective_chat.id]
fsm = BotBrain(self._bot, update.effective_user, update.effective_chat, self._text_and_qa())
self._chat_fsm[update.effective_chat.id] = fsm
self._users[update.effective_user.id] = update.effective_user
def _get_qas(self):
out = subprocess.check_output(['from_question_generation/get_qnas', self._text])
questions = [line.split('\t') for line in str(out, 'utf-8').split('\n')]
self._factoid_qas = [{'question': e[0], 'answer': e[1], 'score': e[2]} for e in questions if (len(e) == 3)]
def _text_and_qa(self):
return {'text': self._text, 'qas': self._factoid_qas} |
class Convolution2d(Sequential):
def __init__(self, sub_layer, filter_size=(1, 1), stride=(1, 1), *, input_shape=None, padding='valid', border_mode='reflect_101', border_value=0.0, name=None, fw_dtype=bb.DType.FP32, bw_dtype=bb.DType.FP32):
self.fw_dtype = fw_dtype
self.bw_dtype = bw_dtype
self.shapes = []
self.im2col = ConvolutionIm2Col(filter_size=filter_size, stride=stride, padding=padding, border_mode=border_mode, border_value=border_value, fw_dtype=fw_dtype, bw_dtype=bw_dtype)
self.sub_layer = sub_layer
self.col2im = ConvolutionCol2Im(fw_dtype=fw_dtype, bw_dtype=bw_dtype)
model_list = [self.im2col, self.sub_layer, self.col2im]
super(Convolution2d, self).__init__(model_list=model_list, name=name, input_shape=input_shape)
def send_command(self, command, send_to='all'):
self.im2col.send_command(command=command, send_to=send_to)
self.sub_layer.send_command(command=command, send_to=send_to)
self.col2im.send_command(command=command, send_to=send_to)
def get_core(self):
core_creator = search_core_model('Convolution2d', [self.fw_dtype, self.bw_dtype]).create
core_model = core_creator(self.im2col.get_core(), self.sub_layer.get_core(), self.col2im.get_core())
if (self.name is not None):
core_model.set_name(self.name)
return core_model
def get_sub_layer(self):
return self.sub_layer
def set_input_shape(self, shape):
self.input_shape = shape
input_h_size = shape[1]
input_w_size = shape[2]
padding = self.im2col.get_padding()
filter_size = self.im2col.get_filter_size()
stride = self.im2col.get_stride()
if (padding == 'valid'):
output_h_size = ((((input_h_size - filter_size[0]) + 1) + (stride[0] - 1)) // stride[0])
output_w_size = ((((input_w_size - filter_size[1]) + 1) + (stride[1] - 1)) // stride[1])
elif (padding == 'same'):
output_h_size = ((input_h_size + (stride[0] - 1)) // stride[0])
output_w_size = ((input_w_size + (stride[1] - 1)) // stride[0])
else:
raise ValueError('illegal padding value')
self.col2im.set_output_size(output_size=[output_h_size, output_w_size])
return super(Convolution2d, self).set_input_shape(shape)
def forward(self, x_buf, train=True):
shape = x_buf.get_node_shape()
self.set_input_shape(shape)
if train:
self.shapes.append(shape)
return super(Convolution2d, self).forward(x_buf, train=train)
def backward(self, dy_buf):
shape = self.shapes.pop()
self.set_input_shape(shape)
return super(Convolution2d, self).backward(dy_buf)
def clear(self):
self.shapes = []
return super(Convolution2d, self).clear()
def get_object_name(self):
return ((('Convolution2d_' + bb.dtype_to_name(self.fw_dtype)) + '_') + bb.dtype_to_name(self.bw_dtype))
def dumps(self):
data = b''
data += core.Object.write_header(self.get_object_name())
ver = 1
data += bb.int_to_bytes(ver)
data += self.im2col.dumps()
data += self.col2im.dumps()
if self.sub_layer:
data += bb.bool_to_bytes(True)
data += self.sub_layer.dumps()
else:
data += bb.bool_to_bytes(False)
return data
def loads(self, data):
(data, name) = bb.load_object_header(data)
type_names = re.match('Convolution2d_(.+)_(.+)', name)
assert type_names
self.fw_dtype = bb.dtype_from_name(type_names[1])
self.bw_dtype = bb.dtype_from_name(type_names[2])
(data, ver) = bb.int_from_bytes(data)
assert (ver == 1)
data = self.im2col.loads(data)
data = self.col2im.loads(data)
(data, has_layer) = bb.bool_from_bytes(data)
if has_layer:
if self.sub_layer:
data = self.sub_layer.loads(data)
else:
(data, model) = bb.object_loads(data)
self.sub_layer = model
self.set_model_list([self.im2col, self.sub_layer, self.col2im])
return data
def from_bytes(cls, data):
(_, object_name) = bb.load_object_header(data)
dtypes = object_name.split('_')
new_model = cls(sub_layer=None, fw_dtype=bb.dtype_from_name(dtypes[1]), bw_dtype=bb.dtype_from_name(dtypes[2]))
data = new_model.loads(data)
return (data, new_model) |
class Block(nn.Module):
def __init__(self, dim, num_heads, mlp_ratio=4.0, qkv_bias=False, drop=0.0, attn_drop=0.0, drop_path=0.0, act_args={'act': 'gelu'}, norm_args={'norm': 'ln'}):
super().__init__()
self.norm1 = create_norm(norm_args, dim)
self.attn = Attention(dim, num_heads=num_heads, qkv_bias=qkv_bias, attn_drop=attn_drop, proj_drop=drop)
self.drop_path = (DropPath(drop_path) if (drop_path > 0.0) else nn.Identity())
self.norm2 = create_norm(norm_args, dim)
mlp_hidden_dim = int((dim * mlp_ratio))
self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, act_args=act_args, drop=drop)
def forward(self, x):
x = (x + self.drop_path(self.attn(self.norm1(x))))
x = (x + self.drop_path(self.mlp(self.norm2(x))))
return x |
def register_Ns3EpcS11SapMmeModifyBearerResponseMessage_methods(root_module, cls):
cls.add_constructor([])
cls.add_constructor([param('ns3::EpcS11SapMme::ModifyBearerResponseMessage const &', 'arg0')])
cls.add_instance_attribute('cause', 'ns3::EpcS11SapMme::ModifyBearerResponseMessage::Cause', is_const=False)
return |
def conv_init(conv):
if (conv.weight is not None):
nn.init.kaiming_normal_(conv.weight, mode='fan_out')
if (conv.bias is not None):
nn.init.constant_(conv.bias, 0) |
def test_RecordArray_NumpyArray():
v2a = ak.contents.recordarray.RecordArray([ak.contents.numpyarray.NumpyArray(np.array([0, 1, 2, 3, 4], np.int64)), ak.contents.numpyarray.NumpyArray(np.array([0.0, 1.1, 2.2, 3.3, 4.4, 5.5]))], ['x', 'y'])
roundtrip(v2a)
array = ak.highlevel.Array(v2a)
memoryleak(array, swallow)
memoryleak(array, passthrough)
memoryleak(array, passthrough2)
memoryleak(array, digest)
memoryleak(array, digest2)
v2b = ak.contents.recordarray.RecordArray([ak.contents.numpyarray.NumpyArray(np.array([0, 1, 2, 3, 4], np.int64)), ak.contents.numpyarray.NumpyArray(np.array([0.0, 1.1, 2.2, 3.3, 4.4, 5.5]))], None)
roundtrip(v2b)
array = ak.highlevel.Array(v2b)
memoryleak(array, swallow)
memoryleak(array, passthrough)
memoryleak(array, passthrough2)
memoryleak(array, digest)
memoryleak(array, digest2)
v2c = ak.contents.recordarray.RecordArray([], [], 10)
roundtrip(v2c)
array = ak.highlevel.Array(v2c)
memoryleak(array, swallow)
memoryleak(array, passthrough)
memoryleak(array, passthrough2)
memoryleak(array, digest)
memoryleak(array, digest2)
v2d = ak.contents.recordarray.RecordArray([], None, 10)
roundtrip(v2d)
array = ak.highlevel.Array(v2d)
memoryleak(array, swallow)
memoryleak(array, passthrough)
memoryleak(array, passthrough2)
memoryleak(array, digest)
memoryleak(array, digest2) |
def load_labelmap(path):
with tf.gfile.GFile(path, 'r') as fid:
label_map_string = fid.read()
label_map = string_int_label_map_pb2.StringIntLabelMap()
try:
text_format.Merge(label_map_string, label_map)
except text_format.ParseError:
label_map.ParseFromString(label_map_string)
_validate_label_map(label_map)
return label_map |
def from_dc_to_ip_survey(dc_survey, dim='2.5D'):
source_list = dc_survey.source_list
ip_survey = Survey(source_list)
return ip_survey |
def eval_distinct_detail(hyps_resp):
if (len(hyps_resp) == 0):
print('ERROR, eval_distinct get empty input')
return
if (type(hyps_resp[0]) != list):
print("ERROR, eval_distinct takes in a list of <class 'list'>, get a list of {} instead".format(type(hyps_resp[0])))
return
hyps_resp = [[str(x) for x in l] for l in hyps_resp]
hyps_resp = [' '.join(i).split() for i in hyps_resp]
num_tokens = sum([len(i) for i in hyps_resp])
dist1 = (count_ngram(hyps_resp, 1) / float(num_tokens))
dist2 = (count_ngram(hyps_resp, 2) / float(num_tokens))
return (dist1, dist2) |
def run(args, kwargs):
args.model_signature = str(datetime.datetime.now())[0:19].replace(' ', '_')
args.model_signature = args.model_signature.replace(':', '_')
snapshots_path = os.path.join(args.out_dir, (('vae_' + args.dataset) + '_'))
snap_dir = (snapshots_path + args.flow)
if (args.flow != 'no_flow'):
snap_dir += (('_' + 'num_flows_') + str(args.num_flows))
if (args.flow == 'orthogonal'):
snap_dir = ((snap_dir + '_num_vectors_') + str(args.num_ortho_vecs))
elif (args.flow == 'orthogonalH'):
snap_dir = ((snap_dir + '_num_householder_') + str(args.num_householder))
elif (args.flow == 'iaf'):
snap_dir = ((snap_dir + '_madehsize_') + str(args.made_h_size))
elif (args.flow == 'permutation'):
snap_dir = (((snap_dir + '_') + 'kernelsize_') + str(args.kernel_size))
elif (args.flow == 'mixed'):
snap_dir = (((snap_dir + '_') + 'num_householder_') + str(args.num_householder))
elif (args.flow == 'cnf_rank'):
snap_dir = ((((((snap_dir + '_rank_') + str(args.rank)) + '_') + args.dims) + '_num_blocks_') + str(args.num_blocks))
elif ('cnf' in args.flow):
snap_dir = ((((snap_dir + '_') + args.dims) + '_num_blocks_') + str(args.num_blocks))
if args.retrain_encoder:
snap_dir = (snap_dir + '_retrain-encoder_')
elif args.evaluate:
snap_dir = (snap_dir + '_evaluate_')
snap_dir = (((snap_dir + '__') + args.model_signature) + '/')
args.snap_dir = snap_dir
if (not os.path.exists(snap_dir)):
os.makedirs(snap_dir)
utils.makedirs(args.snap_dir)
logger = utils.get_logger(logpath=os.path.join(args.snap_dir, 'logs'), filepath=os.path.abspath(__file__))
logger.info(args)
torch.save(args, ((snap_dir + args.flow) + '.config'))
(train_loader, val_loader, test_loader, args) = load_dataset(args, **kwargs)
if (not args.evaluate):
if (args.flow == 'no_flow'):
model = VAE.VAE(args)
elif (args.flow == 'planar'):
model = VAE.PlanarVAE(args)
elif (args.flow == 'iaf'):
model = VAE.IAFVAE(args)
elif (args.flow == 'orthogonal'):
model = VAE.OrthogonalSylvesterVAE(args)
elif (args.flow == 'householder'):
model = VAE.HouseholderSylvesterVAE(args)
elif (args.flow == 'triangular'):
model = VAE.TriangularSylvesterVAE(args)
elif (args.flow == 'cnf'):
model = CNFVAE.CNFVAE(args)
elif (args.flow == 'cnf_bias'):
model = CNFVAE.AmortizedBiasCNFVAE(args)
elif (args.flow == 'cnf_hyper'):
model = CNFVAE.HypernetCNFVAE(args)
elif (args.flow == 'cnf_lyper'):
model = CNFVAE.LypernetCNFVAE(args)
elif (args.flow == 'cnf_rank'):
model = CNFVAE.AmortizedLowRankCNFVAE(args)
else:
raise ValueError('Invalid flow choice')
if args.retrain_encoder:
logger.info(f'Initializing decoder from {args.model_path}')
dec_model = torch.load(args.model_path)
dec_sd = {}
for (k, v) in dec_model.state_dict().items():
if ('p_x' in k):
dec_sd[k] = v
model.load_state_dict(dec_sd, strict=False)
if args.cuda:
logger.info('Model on GPU')
model.cuda()
logger.info(model)
if args.retrain_encoder:
parameters = []
logger.info('Optimizing over:')
for (name, param) in model.named_parameters():
if ('p_x' not in name):
logger.info(name)
parameters.append(param)
else:
parameters = model.parameters()
optimizer = optim.Adamax(parameters, lr=args.learning_rate, eps=1e-07)
train_loss = []
val_loss = []
best_loss = np.inf
best_bpd = np.inf
e = 0
epoch = 0
train_times = []
for epoch in range(1, (args.epochs + 1)):
t_start = time.time()
tr_loss = train(epoch, train_loader, model, optimizer, args, logger)
train_loss.append(tr_loss)
train_times.append((time.time() - t_start))
logger.info(('One training epoch took %.2f seconds' % (time.time() - t_start)))
(v_loss, v_bpd) = evaluate(val_loader, model, args, logger, epoch=epoch)
val_loss.append(v_loss)
if (v_loss < best_loss):
e = 0
best_loss = v_loss
if (args.input_type != 'binary'):
best_bpd = v_bpd
logger.info('->model saved<-')
torch.save(model, ((snap_dir + args.flow) + '.model'))
elif ((args.early_stopping_epochs > 0) and (epoch >= args.warmup)):
e += 1
if (e > args.early_stopping_epochs):
break
if (args.input_type == 'binary'):
logger.info('--> Early stopping: {}/{} (BEST: loss {:.4f})\n'.format(e, args.early_stopping_epochs, best_loss))
else:
logger.info('--> Early stopping: {}/{} (BEST: loss {:.4f}, bpd {:.4f})\n'.format(e, args.early_stopping_epochs, best_loss, best_bpd))
if math.isnan(v_loss):
raise ValueError('NaN encountered!')
train_loss = np.hstack(train_loss)
val_loss = np.array(val_loss)
plot_training_curve(train_loss, val_loss, fname=(snap_dir + ('/training_curve_%s.pdf' % args.flow)))
train_times = np.array(train_times)
mean_train_time = np.mean(train_times)
std_train_time = np.std(train_times, ddof=1)
logger.info(('Average train time per epoch: %.2f +/- %.2f' % (mean_train_time, std_train_time)))
logger.info(args)
logger.info(('Stopped after %d epochs' % epoch))
logger.info(('Average train time per epoch: %.2f +/- %.2f' % (mean_train_time, std_train_time)))
final_model = torch.load(((snap_dir + args.flow) + '.model'))
(validation_loss, validation_bpd) = evaluate(val_loader, final_model, args, logger)
else:
validation_loss = 'N/A'
validation_bpd = 'N/A'
logger.info(f'Loading model from {args.model_path}')
final_model = torch.load(args.model_path)
(test_loss, test_bpd) = evaluate(test_loader, final_model, args, logger, testing=True)
logger.info('FINAL EVALUATION ON VALIDATION SET. ELBO (VAL): {:.4f}'.format(validation_loss))
logger.info('FINAL EVALUATION ON TEST SET. NLL (TEST): {:.4f}'.format(test_loss))
if (args.input_type != 'binary'):
logger.info('FINAL EVALUATION ON VALIDATION SET. ELBO (VAL) BPD : {:.4f}'.format(validation_bpd))
logger.info('FINAL EVALUATION ON TEST SET. NLL (TEST) BPD: {:.4f}'.format(test_bpd)) |
def apply_template_plan(prefix, template):
from openfl.federated.plan import Plan
from openfl.interface.cli_helper import WORKSPACE
template_plan = Plan.parse((((WORKSPACE / template) / 'plan') / 'plan.yaml'))
Plan.dump(((prefix / 'plan') / 'plan.yaml'), template_plan.config) |
def get_cmd_reg(wb, names, cmd_reg):
for name in names:
sheet = wb[name]
name = name.replace(' ', '')
name = name.split('(')[0].split('(')[0]
cmd_reg[name] = read_sheet(sheet) |
class MultiWozDB(object):
domains = ['restaurant', 'hotel', 'attraction', 'train', 'taxi', 'hospital']
dbs = {}
CUR_DIR = os.path.dirname(__file__)
for domain in domains:
db = os.path.join('utils/multiwoz/db/{}-dbase.db'.format(domain))
conn = sqlite3.connect(db)
c = conn.cursor()
dbs[domain] = c
def queryResultVenues(self, domain, turn, real_belief=False):
sql_query = 'select * from {}'.format(domain)
if (real_belief == True):
items = turn.items()
else:
items = turn['metadata'][domain]['semi'].items()
flag = True
for (key, val) in items:
if ((val == '') or (val == 'dontcare') or (val == 'not mentioned') or (val == "don't care") or (val == 'dont care') or (val == "do n't care")):
pass
elif flag:
sql_query += ' where '
val2 = val.replace("'", "''")
val2 = normalize(val2)
if (key == 'leaveAt'):
sql_query += (((((' ' + key) + ' > ') + "'") + val2) + "'")
elif (key == 'arriveBy'):
sql_query += (((((' ' + key) + ' < ') + "'") + val2) + "'")
else:
sql_query += (((((' ' + key) + '=') + "'") + val2) + "'")
flag = False
else:
val2 = val.replace("'", "''")
val2 = normalize(val2)
if (key == 'leaveAt'):
sql_query += (((((' and ' + key) + ' > ') + "'") + val2) + "'")
elif (key == 'arriveBy'):
sql_query += (((((' and ' + key) + ' < ') + "'") + val2) + "'")
else:
sql_query += (((((' and ' + key) + '=') + "'") + val2) + "'")
try:
return self.dbs[domain].execute(sql_query).fetchall()
except:
return [] |
class Kernelf(Component):
def __init__(self, ls, context={}):
super().__init__(context=context)
self.ls = ls
def __call__(self, x, z=None, diagonal=False, distance=False):
qmmlpack = import_qmmlpack('use cmlkit.regression.qmml')
kernelf = getattr(qmmlpack, kernelfs[self.kind])
return kernelf(x=x, z=z, theta=self.ls, diagonal=diagonal)
def _get_config(self):
return {'ls': self.ls} |
def main():
args = get_arg()
random.seed(RAND_SEED)
np.random.seed(RAND_SEED)
torch.manual_seed(RAND_SEED)
data = load_stage2_data(datatrack=args.datatrack, feat_type=args.feat_type, i_cv=args.i_cv)
method = args.method
if (method == 'autogp'):
if (args.datatrack == 'phase1-main'):
method = 'svgp'
else:
method = 'exactgp'
if (method == 'svgp'):
model = SVGP(stage='stage2')
elif (method == 'exactgp'):
model = ExactGP(stage='stage2')
elif (method == 'rf'):
model = RandomForest()
else:
if args.use_opt:
param_file = (((Path('../out/ensemble-multidomain/opt_hp_stage2') / args.datatrack) / f'{method}-{args.feat_type}') / 'params.json')
params = json.load(open(param_file, 'rb'))
logger.info('Params: {}'.format(params))
else:
params = {}
if (method == 'ridge'):
model = Ridge(params=params)
elif (method == 'linear_svr'):
model = LinearSVR(params=params)
elif (method == 'kernel_svr'):
model = KernelSVR(params=params)
elif (method == 'lightgbm'):
model = LightGBM(params=params)
else:
raise RuntimeError('Not supported method: "{}"'.format(method))
model.train(data['train']['X'], data['train']['y'], data['val']['X'], data['val']['y'])
df_val = model.predict(data['val']['X'], data['val']['df'])
df_test = model.predict(data['test']['X'], data['test']['df'])
out_dir = (((Path('../out/ensemble-multidomain/stage2') / args.datatrack) / f'{method}-{args.feat_type}') / str(args.i_cv))
os.makedirs(out_dir, exist_ok=True)
df_val.to_csv((out_dir / 'val.csv'))
df_test.to_csv((out_dir / 'test.csv'))
model.save_model(out_dir) |
def quality_checks_qids(qids_to_relevant_passageids, qids_to_ranked_candidate_passages):
message = ''
allowed = True
candidate_set = set(qids_to_ranked_candidate_passages.keys())
ref_set = set(qids_to_relevant_passageids.keys())
for qid in qids_to_ranked_candidate_passages:
duplicate_pids = set([item for (item, count) in Counter(qids_to_ranked_candidate_passages[qid]).items() if (count > 1)])
if (len((duplicate_pids - set([0]))) > 0):
message = 'Cannot rank a passage multiple times for a single query. QID={qid}, PID={pid}'.format(qid=qid, pid=list(duplicate_pids)[0])
allowed = False
return (allowed, message) |
.parametrize('n_neighbors, expected_risk', [(1, 0.25), (2, (5 / 6)), (3, 1), (4, 1)])
def test_baseline(n_neighbors, expected_risk):
ori = pd.DataFrame(rng.choice(['a', 'b'], size=(400, 2)), columns=['c0', 'c1'])
syn = pd.DataFrame([['a', 'a'], ['b', 'b'], ['a', 'a'], ['a', 'a']], columns=['c0', 'c1'])
evaluator = LinkabilityEvaluator(ori=ori, syn=syn, n_attacks=None, n_neighbors=n_neighbors, aux_cols=('c0', 'c1'))
evaluator.evaluate(n_jobs=1)
(baseline_risk, _) = evaluator.risk(confidence_level=0.95, baseline=True)
np.testing.assert_allclose(baseline_risk, expected_risk, atol=0.05) |
class DataTrainingArguments():
max_len: Optional[int] = field(default=128, metadata={'help': 'The maximum total input sequence length after tokenization. Sequences longer than this will be truncated, sequences shorter will be padded.'})
overwrite_cache: bool = field(default=False, metadata={'help': 'Overwrite the cached preprocessed datasets or not.'})
pad_to_max_length: bool = field(default=True, metadata={'help': 'Whether to pad all samples to `max_seq_length`. If False, will pad the samples dynamically when batching to the maximum length in the batch.'})
max_train_samples: Optional[int] = field(default=None, metadata={'help': 'For debugging purposes or quicker training, truncate the number of training examples to this value if set.'})
max_eval_samples: Optional[int] = field(default=None, metadata={'help': 'For debugging purposes or quicker training, truncate the number of evaluation examples to this value if set.'})
max_predict_samples: Optional[int] = field(default=None, metadata={'help': 'For debugging purposes or quicker training, truncate the number of prediction examples to this value if set.'})
server_ip: Optional[str] = field(default=None, metadata={'help': 'For distant debugging.'})
server_port: Optional[str] = field(default=None, metadata={'help': 'For distant debugging.'}) |
.parametrize('a_shape, b_shape', [([2, 4], [4, 3]), pytest.param([3, 5], [5], marks=pytest.mark.skip('issues in dace')), pytest.param([5], [5, 6], marks=pytest.mark.skip('issues in dace'))])
.pure
def test_matmul_expansion(a_shape, b_shape, sdfg_name):
blas.Gemm.default_implementation = 'pure'
sdfg = dace.SDFG(sdfg_name)
X = np.random.rand(*a_shape).astype(np.float32)
Z = np.random.rand(*b_shape).astype(np.float32)
expected_result = (X Z)
sdfg.add_array('X', a_shape, dace.float32)
sdfg.add_array('Z', b_shape, dace.float32)
sdfg.add_array('__return', expected_result.shape, dace.float32)
state = sdfg.add_state()
access_X = state.add_access('X')
access_Z = state.add_access('Z')
access_result = state.add_access('__return')
op_node = donnx.ONNXMatMul('Matmul')
state.add_node(op_node)
state.add_edge(access_X, None, op_node, 'A', sdfg.make_array_memlet('X'))
state.add_edge(access_Z, None, op_node, 'B', sdfg.make_array_memlet('Z'))
state.add_edge(op_node, 'Y', access_result, None, sdfg.make_array_memlet('__return'))
with dace.library.change_default(blas, 'pure'):
sdfg.expand_library_nodes()
assert any((isinstance(n, dace.nodes.MapEntry) for (n, _) in sdfg.all_nodes_recursive()))
result = sdfg(X=X, Z=Z)
assert_allclose(expected_result, result) |
class MinimizeDegree(EdgeSelection):
def __call__(self, graph):
degrees = dict(graph.degree_iterator(labels=True))
edges = graph.edges(labels=True, sort=False)
if edges:
return min(edges, key=(lambda x: (degrees[x[0]] + degrees[x[1]])))
raise RuntimeError('no edges left to select') |
class JavascriptProcessor():
def create_dead_for_loop(cls, body):
control_variable = ('_i_' + str(np.random.choice(list(range(10)))))
p = np.random.uniform(0, 1)
if (p < 0.5):
prefix = (((((('for ( let ' + control_variable) + ' = 0 ; ') + control_variable) + ' > 0 ; ') + control_variable) + ' ++ ) { ')
loop = ((prefix + body) + ' } ')
return loop
else:
return (('for ( ; false ; ) { ' + body) + '}')
def create_dead_while_loop(cls, body):
p = np.random.uniform(0, 1)
control_variable = ('_i_' + str(np.random.choice(list(range(10)))))
if (p < 0.33):
return (('while ( false ) { ' + body) + ' }')
elif (p < 0.66):
return (((((('while ( ' + control_variable) + ' < ') + control_variable) + ' ) { ') + body) + ' } ')
else:
return (((((('while ( ' + control_variable) + ' > ') + control_variable) + ' ) { ') + body) + ' } ')
def create_dead_if(cls, body):
p = np.random.uniform(0, 1)
control_variable = ('_i_' + str(np.random.choice(list(range(10)))))
if (p < 0.33):
return (('if ( false ) { ' + body) + ' }')
elif (p < 0.66):
return (((((('if ( ' + control_variable) + ' < ') + control_variable) + ' ) { ') + body) + ' } ')
else:
return (((((('if ( ' + control_variable) + ' > ') + control_variable) + ' ) { ') + body) + ' } ')
def get_tokens_insert_before(cls, code_str, root, insertion_code, insert_before_node):
if (not isinstance(insert_before_node, list)):
insert_before_node = [insert_before_node]
if isinstance(code_str, str):
code_str = code_str.encode()
assert isinstance(root, Node)
tokens = []
if (root.type == 'comment'):
return tokens
if ('string' in str(root.type)):
return [code_str[root.start_byte:root.end_byte].decode()]
if (root in insert_before_node):
tokens += insertion_code.split()
children = root.children
if ((len(children) == 0) or (str(root.type) in ['string'])):
tokens.append(code_str[root.start_byte:root.end_byte].decode())
else:
for child in children:
ts = cls.get_tokens_insert_before(code_str, child, insertion_code, insert_before_node)
tokens += ts
return tokens
def get_tokens(cls, code, root):
if isinstance(code, str):
code = code.encode()
assert isinstance(root, Node)
tokens = []
if (root.type == 'comment'):
return tokens
if ('string' in str(root.type)):
return [code[root.start_byte:root.end_byte].decode()]
children = root.children
if ((len(children) == 0) or (str(root.type) in ['string'])):
tokens.append(code[root.start_byte:root.end_byte].decode())
else:
for child in children:
ts = cls.get_tokens(code, child)
tokens += ts
return tokens
def get_breaking_statements(cls, block):
breakings = ['continue_statement', 'break_statement', 'return_statement']
statements = []
stack = [block]
while (len(stack) > 0):
top = stack.pop()
if (str(top.type) in breakings):
statements.append(top)
else:
for child in top.children:
stack.append(child)
return statements
def for_to_while_random(cls, code_string, parser):
root = parser.parse_code(code_string)
loops = cls.extract_for_loops(root)
success = False
try:
while ((not success) and (len(loops) > 0)):
selected_loop = np.random.choice(loops)
loops.remove(selected_loop)
(modified_root, modified_code_string, success) = cls.for_to_while(code_string, root, selected_loop, parser)
if success:
root = modified_root
code_string = modified_code_string
except:
pass
if (not success):
ts = cls.get_tokens(code_string, root)
code_string = cls.beautify_java_code(ts)
return (root, code_string, success)
def while_to_for_random(cls, code_string, parser):
root = parser.parse_code(code_string)
loops = cls.extract_while_loops(root)
success = False
try:
while ((not success) and (len(loops) > 0)):
selected_loop = np.random.choice(loops)
loops.remove(selected_loop)
(modified_root, modified_code_string, success) = cls.while_to_for(code_string, root, selected_loop, parser)
if success:
root = modified_root
code_string = modified_code_string
if (not success):
ts = cls.get_tokens(code_string, root)
code_string = cls.beautify_java_code(ts)
except:
pass
return (root, code_string, success)
def extract_for_loops(cls, root):
loops = []
queue = [root]
while (len(queue) > 0):
current_node = queue[0]
queue = queue[1:]
if (str(current_node.type) == 'for_statement'):
loops.append(current_node)
for child in current_node.children:
queue.append(child)
return loops
def beautify_java_code(cls, tokens):
code = ' '.join(tokens)
code = re.sub(' \\. ', '', code)
code = re.sub(' \\+\\+', '++', code)
return code
def get_tokens_replace_for(cls, code_str, for_node, root, init, cond, update, body):
if isinstance(code_str, str):
code_str = code_str.encode()
assert isinstance(root, Node)
tokens = []
if (root.type == 'comment'):
return tokens
if ('string' in str(root.type)):
parent = root.parent
return [code_str[root.start_byte:root.end_byte].decode()]
children = root.children
if ((len(children) == 0) or (str(root.type) in ['string'])):
tokens.append(code_str[root.start_byte:root.end_byte].decode())
else:
for child in children:
if (child == for_node):
tokens.extend(((((((init + ['while', '(']) + cond) + [')', '{']) + body) + update) + ['}']))
else:
tokens += cls.get_tokens_replace_for(code_str, for_node, child, init, cond, update, body)
return tokens
def for_to_while(cls, code_string, root, fl, parser):
children = fl.children
init = children[2]
init_tokens = cls.get_tokens(code_string, init)
comparison = children[3]
if (str(comparison.type) != ';'):
comp_tokens = cls.get_tokens(code_string, comparison)
if (comp_tokens[(- 1)] == ';'):
comp_tokens = comp_tokens[:(- 1)]
update = children[4]
if (str(update.type) == ')'):
update_tokens = []
body = children[5]
else:
update_tokens = (cls.get_tokens(code_string, update) + [';'])
body = children[6]
breaking_statements = cls.get_breaking_statements(body)
body_tokens = cls.get_tokens_insert_before(code_string, body, ' '.join(update_tokens), breaking_statements)
if ((len(body_tokens) >= 2) and ((body_tokens[0] == '{') and (body_tokens[(- 1)] == '}'))):
body_tokens = body_tokens[1:(- 1)]
tokens = cls.get_tokens_replace_for(code_str=code_string, for_node=fl, root=root, init=init_tokens, cond=comp_tokens, update=update_tokens, body=body_tokens)
code = cls.beautify_java_code(tokens)
return (parser.parse_code(code), code, True)
return (root, code_string, False)
def extract_while_loops(cls, root):
loops = []
queue = [root]
while (len(queue) > 0):
current_node = queue[0]
queue = queue[1:]
if (str(current_node.type) == 'while_statement'):
loops.append(current_node)
for child in current_node.children:
queue.append(child)
return loops
def while_to_for(cls, code_string, root, wl, parser):
children = wl.children
condition = children[1]
body = children[2]
if (str(condition.type) == 'parenthesized_expression'):
expr_tokens = cls.get_tokens(code_string, condition.children[1])
body_tokens = cls.get_tokens(code_string, body)
if ((len(body_tokens) >= 2) and ((body_tokens[0] == '{') and (body_tokens[(- 1)] == '}'))):
body_tokens = body_tokens[1:(- 1)]
tokens = cls.get_tokens_replace_while(code_str=code_string, while_node=wl, root=root, cond=expr_tokens, body=body_tokens)
code = cls.beautify_java_code(tokens)
return (parser.parse_code(code), code, True)
return (root, code_string, False)
def get_tokens_replace_while(cls, code_str, while_node, root, cond, body):
if isinstance(code_str, str):
code_str = code_str.encode()
assert isinstance(root, Node)
tokens = []
if (root.type == 'comment'):
return tokens
if ('string' in str(root.type)):
return [code_str[root.start_byte:root.end_byte].decode()]
children = root.children
if ((len(children) == 0) or (str(root.type) in ['string'])):
tokens.append(code_str[root.start_byte:root.end_byte].decode())
else:
for child in children:
if (child == while_node):
tokens.extend(((((['for', '(', ';'] + cond) + [';', ')', '{']) + body) + ['}']))
else:
tokens += cls.get_tokens_replace_while(code_str, while_node, child, cond, body)
return tokens
def extract_expression(self, root, code):
expressions = []
queue = [root]
while (len(queue) > 0):
current_node = queue[0]
queue = queue[1:]
if (str(current_node.type) == 'binary_expression'):
children_nodes = current_node.children
keep = ['<', '>', '<=', '>=', '==', '!=', '===', '!==']
counter = 0
for w in children_nodes:
if (str(w.type) in keep):
counter = (counter + 1)
if (counter == 1):
expressions.append(current_node)
for child in current_node.children:
queue.append(child)
return expressions
def get_tokens_for_opswap(cls, code, root, left_oprd, operator, right_oprd):
if isinstance(code, str):
code = code.encode()
assert isinstance(root, Node)
tokens = []
if (root.type == 'comment'):
return (tokens, None)
if ('string' in str(root.type)):
return ([code[root.start_byte:root.end_byte].decode()], None)
children = root.children
if (len(children) == 0):
if ((root.start_byte == operator.start_byte) and (root.end_byte == operator.end_byte)):
opt = code[operator.start_byte:operator.end_byte].decode()
if (opt == '<'):
tokens.append('>')
elif (opt == '>'):
tokens.append('<')
elif (opt == '>='):
tokens.append('<=')
elif (opt == '<='):
tokens.append('>=')
elif (opt == '=='):
tokens.append('==')
elif (opt == '!='):
tokens.append('!=')
elif (opt == '==='):
tokens.append('===')
elif (opt == '!=='):
tokens.append('!==')
else:
tokens.append(code[root.start_byte:root.end_byte].decode())
for child in children:
if ((child.start_byte == left_oprd.start_byte) and (child.end_byte == left_oprd.end_byte)):
(ts, _) = cls.get_tokens_for_opswap(code, right_oprd, left_oprd, operator, right_oprd)
elif ((child.start_byte == right_oprd.start_byte) and (child.end_byte == right_oprd.end_byte)):
(ts, _) = cls.get_tokens_for_opswap(code, left_oprd, left_oprd, operator, right_oprd)
else:
(ts, _) = cls.get_tokens_for_opswap(code, child, left_oprd, operator, right_oprd)
tokens += ts
return (tokens, None)
def operand_swap(cls, code_str, parser):
code = code_str.encode()
root = parser.parse_code(code)
expressions = cls.extract_expression(root, code)
success = False
try:
while ((not success) and (len(expressions) > 0)):
selected_exp = np.random.choice(expressions)
expressions.remove(selected_exp)
bin_exp = selected_exp
condition = code[bin_exp.start_byte:bin_exp.end_byte].decode()
bin_exp = bin_exp.children
left_oprd = bin_exp[0]
operator = bin_exp[1]
right_oprd = bin_exp[2]
try:
code_list = cls.get_tokens_for_opswap(code, root, left_oprd, operator, right_oprd)[0]
code_string = ''
for w in code_list:
code_string = ((code_string + w) + ' ')
code_string = code_string.strip()
success = True
except:
success = False
continue
except:
pass
if (not success):
code_string = cls.beautify_java_code(get_tokens(code_str, root))
return (code_string, success)
def extract_if_else(cls, root, code_str, operator_list):
ext_opt_list = ['&&', '&', '||', '|', 'and', 'or']
expressions = []
queue = [root]
not_consider = []
while (len(queue) > 0):
current_node = queue[0]
queue = queue[1:]
if (str(current_node.type) == 'if_statement'):
clause = code_str[current_node.start_byte:current_node.end_byte].decode()
des = current_node.children[1]
cond = code_str[des.start_byte:des.end_byte].decode()
stack = [des]
nodes = []
while (len(stack) > 0):
root1 = stack.pop()
if (len(root1.children) == 0):
nodes.append(root1)
for child in root1.children:
stack.append(child)
nodes.reverse()
counter = 0
extra_counter = 0
for w in nodes:
if (str(w.type) in operator_list):
counter = (counter + 1)
if (str(w.type) in ext_opt_list):
extra_counter = (extra_counter + 1)
if (not ((counter == 1) and (extra_counter == 0))):
continue
children_nodes = current_node.children
flagx = 0
flagy = 0
for w in children_nodes:
if ((str(w.type) == 'else') or (str(w.type) == 'else_clause')):
flagx = 1
m = w.children
for x in m:
if (str(x.type) == 'if_statement'):
not_consider.append(x)
flagy = 1
break
if ((flagx == 1) and (flagy == 0)):
expressions.append([current_node, des])
for child in current_node.children:
if (child not in not_consider):
queue.append(child)
return expressions
def get_tokens_for_blockswap(cls, code, root, first_block, opt_node, second_block, flagx, flagy):
if isinstance(code, str):
code = code.encode()
assert isinstance(root, Node)
tokens = []
if (root.type == 'comment'):
return (tokens, None)
if ('string' in str(root.type)):
return ([code[root.start_byte:root.end_byte].decode()], None)
children = root.children
if (len(children) == 0):
if ((root.start_byte == opt_node.start_byte) and (root.end_byte == opt_node.end_byte)):
op = code[root.start_byte:root.end_byte].decode()
if (op == '<'):
tokens.append('>=')
elif (op == '>'):
tokens.append('<=')
elif (op == '>='):
tokens.append('<')
elif (op == '<='):
tokens.append('>')
elif (op == '!='):
tokens.append('==')
elif (op == '=='):
tokens.append('!=')
else:
tokens.append(code[root.start_byte:root.end_byte].decode())
for child in children:
child_type = str(child.type)
if ((child.start_byte == first_block.start_byte) and (child.end_byte == first_block.end_byte) and (flagx == 0) and (str(child.type) == str(first_block.type))):
flagx = 1
(ts, _) = cls.get_tokens_for_blockswap(code, second_block, first_block, opt_node, second_block, flagx, flagy)
elif ((child.start_byte == second_block.start_byte) and (child.end_byte == second_block.end_byte) and (flagy == 0) and (str(child.type) == str(second_block.type))):
flagy = 1
(ts, _) = cls.get_tokens_for_blockswap(code, first_block, first_block, opt_node, second_block, flagx, flagy)
else:
(ts, _) = cls.get_tokens_for_blockswap(code, child, first_block, opt_node, second_block, flagx, flagy)
tokens += ts
return (tokens, None)
def block_swap(cls, code_str, parser):
code = code_str.encode()
root = parser.parse_code(code)
operator_list = ['<', '>', '<=', '>=', '==', '!=']
pair = cls.extract_if_else(root, code, operator_list)
success = False
lst = list(range(0, len(pair)))
try:
while ((not success) and (len(lst) > 0)):
selected = np.random.choice(lst)
lst.remove(selected)
clause = pair[selected][0]
des = pair[selected][1]
st = [des]
nodes = []
while (len(st) > 0):
root1 = st.pop()
if (len(root1.children) == 0):
nodes.append(root1)
if (code[root1.start_byte:root1.end_byte].decode() in operator_list):
opt_node = root1
break
for child in root1.children:
st.append(child)
nodes = clause.children
flag = 0
for current_node in nodes:
if (str(current_node.type) == 'statement_block'):
first_block = current_node
elif (str(current_node.type) == 'else_clause'):
new_list = current_node.children
for w in new_list:
if (str(w.type) == 'statement_block'):
second_block = w
break
flagx = 0
flagy = 0
try:
code_list = cls.get_tokens_for_blockswap(code, root, first_block, opt_node, second_block, flagx, flagy)[0]
code_string = ''
for w in code_list:
code_string = ((code_string + w) + ' ')
code_string = code_string.strip()
success = True
except:
success = False
continue
except:
pass
if (not success):
code_string = cls.beautify_java_code(get_tokens(code_str, root))
return (code_string, success) |
def spacy_nlp():
if (getattr(spacy_nlp, '_nlp', None) is None):
try:
from spacy.lang.en import English
spacy_nlp._nlp = English()
except ImportError:
raise ImportError('Please install spacy with: pip install spacy')
return spacy_nlp._nlp |
class Partition6(nn.Module):
LAYER_SCOPES = ['T5ForConditionalGeneration/T5Stack[encoder]/T5Block[18]', 'T5ForConditionalGeneration/T5Stack[encoder]/T5Block[19]', 'T5ForConditionalGeneration/T5Stack[encoder]/T5Block[20]']
TENSORS = []
def __init__(self, layers, tensors, device='cuda:6'):
super().__init__()
for (idx, layer_scope) in enumerate(self.LAYER_SCOPES):
self.add_module(f'l_{idx}', layers[layer_scope])
b = p = 0
for tensor_scope in self.TENSORS:
tensor = tensors[tensor_scope]
if isinstance(tensor, nn.Parameter):
self.register_parameter(f'p_{p}', tensor)
p += 1
else:
self.register_buffer(f'b_{b}', tensor)
b += 1
self.device = torch.device(device)
self.input_structure = [1, 1, 1]
self.lookup = {'l_0': 'encoder.18', 'l_1': 'encoder.19', 'l_2': 'encoder.20'}
self.to(self.device)
def forward(self, *args):
(attention_mask, x0, x1) = unflatten(args, self.input_structure)
t_0 = self.l_0(x1, attention_mask=attention_mask, position_bias=x0, encoder_hidden_states=None, encoder_attention_mask=None, encoder_decoder_position_bias=None)
t_0 = self.l_1(t_0, attention_mask=attention_mask, position_bias=x0, encoder_hidden_states=None, encoder_attention_mask=None, encoder_decoder_position_bias=None)
t_0 = self.l_2(t_0, attention_mask=attention_mask, position_bias=x0, encoder_hidden_states=None, encoder_attention_mask=None, encoder_decoder_position_bias=None)
return list(flatten((x0, t_0)))
def state_dict(self, *args, **kwargs):
return state_dict(self, *args, **kwargs)
def load_state_dict(self, *args, **kwargs):
return load_state_dict(self, *args, **kwargs)
def named_parameters(self, *args, **kwargs):
return named_parameters(self, *args, **kwargs)
def named_buffers(self, *args, **kwargs):
return named_buffers(self, *args, **kwargs)
def cpu(self):
return cpu(self)
def cuda(self, device=None):
return cuda(self, device=device)
def to(self, *args, **kwargs):
return to(self, *args, **kwargs) |
class f_model(nn.Module):
def __init__(self, freeze_param=False, inter_dim=INTER_DIM, num_classes=CATEGORIES, model_path=None):
super(f_model, self).__init__()
self.backbone = torchvision.models.resnet50(pretrained=True)
state_dict = self.backbone.state_dict()
num_features = self.backbone.fc.in_features
self.backbone = nn.Sequential(*list(self.backbone.children())[:(- 2)])
model_dict = self.backbone.state_dict()
model_dict.update({k: v for (k, v) in state_dict.items() if (k in model_dict)})
self.backbone.load_state_dict(model_dict)
if freeze_param:
for param in self.backbone.parameters():
param.requires_grad = False
self.avg_pooling = nn.AvgPool2d(7, stride=1)
self.fc = nn.Linear(num_features, inter_dim)
self.fc2 = nn.Linear(inter_dim, num_classes)
state = load_model(model_path)
if state:
new_state = self.state_dict()
new_state.update({k: v for (k, v) in state.items() if (k in new_state)})
self.load_state_dict(new_state)
def forward(self, x):
x = self.backbone(x)
pooled = self.avg_pooling(x)
inter_out = self.fc(pooled.view(pooled.size(0), (- 1)))
out = self.fc2(inter_out)
return (out, inter_out, x) |
def test_epoch_eval_hook():
with pytest.raises(TypeError):
test_dataset = ExampleModel()
data_loader = [DataLoader(test_dataset, batch_size=1, sampler=None, num_worker=0, shuffle=False)]
EvalHook(data_loader, by_epoch=True)
test_dataset = ExampleDataset()
test_dataset.pre_eval = MagicMock(return_value=[torch.tensor([1])])
test_dataset.evaluate = MagicMock(return_value=dict(test='success'))
loader = DataLoader(test_dataset, batch_size=1)
model = ExampleModel()
data_loader = DataLoader(test_dataset, batch_size=1, sampler=None, num_workers=0, shuffle=False)
optim_cfg = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0005)
optimizer = obj_from_dict(optim_cfg, torch.optim, dict(params=model.parameters()))
with tempfile.TemporaryDirectory() as tmpdir:
eval_hook = EvalHook(data_loader, by_epoch=True, interval=2)
runner = mmcv.runner.EpochBasedRunner(model=model, optimizer=optimizer, work_dir=tmpdir, logger=logging.getLogger())
runner.register_hook(eval_hook)
runner.run([loader], [('train', 1)], 2)
test_dataset.evaluate.assert_called_once_with([torch.tensor([1])], logger=runner.logger) |
def get_atoms(molecule):
logger.debug('Entering get_atoms()')
conformer = molecule.GetConformer()
num_atoms = conformer.GetNumAtoms()
list_heavyatoms = []
list_heavyatomnames = []
atoms = np.arange(num_atoms)
for i in np.nditer(atoms):
atom_name = molecule.GetAtomWithIdx(int(atoms[i])).GetSymbol()
if (atom_name != 'H'):
list_heavyatoms.append(atoms[i])
list_heavyatomnames.append(atom_name)
if (len(list_heavyatoms) == 0):
print('Error. No heavy atom found.')
exit(1)
return (list_heavyatoms, list_heavyatomnames) |
class GBasicBlockSig(nn.Module):
def __init__(self, in_channels, out_channels, ksize=3, stride=1, pad=1):
super(GBasicBlockSig, self).__init__()
self.body = nn.Sequential(nn.Conv2d(in_channels, out_channels, ksize, stride, pad, groups=4), nn.Sigmoid())
init_weights(self.modules)
def forward(self, x):
out = self.body(x)
return out |
def get_base_config():
return tp.OpQuantizationConfig(activation_quantization_method=tp.QuantizationMethod.POWER_OF_TWO, weights_quantization_method=tp.QuantizationMethod.POWER_OF_TWO, activation_n_bits=8, weights_n_bits=8, weights_per_channel_threshold=True, enable_weights_quantization=True, enable_activation_quantization=True, quantization_preserving=False, fixed_scale=None, fixed_zero_point=None, weights_multiplier_nbits=None, simd_size=None) |
def _setup_logging(verbosity: int, no_rich: bool) -> (Console | None):
level = logging.WARNING
if (verbosity == 1):
level = logging.INFO
if (verbosity >= 2):
level = logging.DEBUG
console = None
if no_rich:
handler: logging.Handler = logging.StreamHandler()
else:
install()
console = Console(tab_size=4)
handler = RichHandler(rich_tracebacks=True, log_time_format='[%X]', console=console)
handler.setFormatter(logging.Formatter('%(message)s'))
logging.basicConfig(level=level, format='%(asctime)s [%(levelname)s](%(name)s:%(funcName)s:%(lineno)d): %(message)s', datefmt='[%X]', handlers=[handler])
return console |
class IdiomPreproc(abstract_preproc.AbstractPreproc):
def __init__(self, grammar, save_path, censor_pointers):
self.save_path = save_path
self.censor_pointers = censor_pointers
self.grammar = registry.construct('grammar', grammar)
self.ast_wrapper = self.grammar.ast_wrapper
self.items = collections.defaultdict(list)
def validate_item(self, item, section):
parsed = self.grammar.parse(item.code, section)
if parsed:
self.ast_wrapper.verify_ast(parsed)
return (True, parsed)
return ((section != 'train'), None)
def add_item(self, item, section, validation_info):
converted = AstConverter(self.grammar, self.censor_pointers).convert(validation_info)
self.items[section].append({'text': item.text, 'ast': converted, 'orig': item.orig})
def clear_items(self):
self.items.clear()
def save(self):
os.makedirs(self.save_path, exist_ok=True)
for section in self.items:
with open(os.path.join(self.save_path, '{}.jsonl'.format(section)), 'w') as f:
for item in self.items[section]:
f.write((json.dumps(item) + '\n'))
expected_children = {'Null': [], 'End': []}
field_name_nodes = []
binarizers = []
literals = []
single_child = []
for (name, type_) in self.ast_wrapper.singular_types.items():
expected_children[name] = ['{}-{}'.format(name, field.name) for field in type_.fields]
field_name_nodes.extend(('{}-{}'.format(name, field.name) for field in type_.fields))
for field in type_.fields:
if (len(type_.fields) == 1):
field_name = name
else:
field_name = '{}-{}'.format(name, field.name)
if field.seq:
binarizers.append(field_name)
else:
single_child.append(field_name)
if (field.type in {'identifier', 'int', 'string', 'bytes', 'object', 'singleton'}):
literals.append(field_name)
if (field.type in self.grammar.pointers):
literals.append(field_name)
with open(os.path.join(self.save_path, 'grammar.json'), 'w') as f:
json.dump({'expected_children': expected_children, 'field_name_nodes': field_name_nodes, 'binarizers': binarizers, 'literals': literals, 'single_child': single_child}, f, indent=2, sort_keys=True)
def load(self):
raise NotImplementedError
def dataset(self, section):
raise NotImplementedError |
class SimpleMLPRegressor(Regressor):
def __init__(self, input_shape, output_dim, name, *args, **kwargs):
super().__init__(input_shape, output_dim, name)
del args, kwargs
self.model = SimpleMLPModel(output_dim=self._output_dim, name='SimpleMLPModel')
self._ys = None
self._network = None
self._initialize()
def _initialize(self):
input_ph = tf.compat.v1.placeholder(tf.float32, shape=((None,) + self._input_shape))
with tf.compat.v1.variable_scope(self._name) as vs:
self._variable_scope = vs
self._network = self.model.build(input_ph)
def recurrent(self):
return False
def vectorized(self):
return True
def fit(self, xs, ys):
self._ys = ys
def predict(self, xs):
if (self._ys is None):
outputs = tf.compat.v1.get_default_session().run(self._network.outputs, feed_dict={self._network.input: xs})
self._ys = outputs
return self._ys
def get_params_internal(self):
return self._variable_scope.trainable_variables()
def __getstate__(self):
new_dict = super().__getstate__()
del new_dict['_network']
return new_dict
def __setstate__(self, state):
super().__setstate__(state)
self._initialize() |
class Vidit(BaseDataset):
def __init__(self, config, device):
super().__init__(config, device)
self._root_dir = Path(os.path.expanduser(config['data_path']))
self._paths = {}
np.random.seed(config['seed'])
files = [str(path) for path in self._root_dir.iterdir()]
files = np.sort(files).reshape(300, 40)
self._paths['train'] = files[10:]
self._paths['val'] = files[:10]
self._paths['test'] = []
def get_dataset(self, split):
return _Dataset(self._paths[split], self._config, self._device) |
def print_vocabulary(mylist_freq, filename):
print('Printing vocabulary information to file', filename)
with open((filename + '_freq.txt'), 'w') as f:
f.write('{:>6} {}\n'.format('# occ', 'statement (in alphabetical order)'))
for (key, value) in sorted(mylist_freq.items()):
f.write('{:>6} {}\n'.format(str(value), key))
mylist_families_l1 = rgx.get_list_tag_level_1()
to_iterate1 = list()
for i in range(len(mylist_families_l1)):
to_iterate1.append([mylist_families_l1[i], rgx.get_count(mylist_freq, mylist_families_l1[i], 1)])
to_iterate1.sort(key=(lambda tup: tup[1]), reverse=True)
with open((filename + '_class.txt'), 'w') as f:
f.write('{:>6} {:<30}{:<25}{}\n'.format('# occ', 'tag level 1', 'tag level 2', 'tag level 3'))
for tag1 in to_iterate1:
f.write('{:>6} {:<30}\n'.format(str(tag1[1]), tag1[0]))
mylist_families_l2 = rgx.get_list_tag_level_2(tag1[0])
to_iterate2 = list()
for i in range(len(mylist_families_l2)):
to_iterate2.append([mylist_families_l2[i], rgx.get_count(mylist_freq, mylist_families_l2[i], 2)])
to_iterate2.sort(key=(lambda tup: tup[1]), reverse=True)
for tag2 in to_iterate2:
f.write('{:>6} {:<30}{:<25}\n'.format(str(tag2[1]), '', tag2[0]))
mylist_families_l3 = rgx.get_list_tag_level_3(tag2[0])
to_iterate3 = list()
for i in range(len(mylist_families_l3)):
to_iterate3.append([mylist_families_l3[i], rgx.get_count(mylist_freq, mylist_families_l3[i], 3)])
to_iterate3.sort(key=(lambda tup: tup[1]), reverse=True)
for tag3 in to_iterate3:
f.write('{:>6} {:<30}{:<25}{}\n'.format(str(tag3[1]), '', '', tag3[0])) |
def test_volume(problem):
from sfepy.discrete import FieldVariable
ok = True
field_map = {'u': 'vector', 'p': 'scalar'}
volumes = {}
avg = 0.0
for (key, term) in expressions.items():
var_name = key[(- 1)]
field = problem.fields[field_map[var_name]]
var = FieldVariable(var_name, 'parameter', field, primary_var_name='(set-to-None)')
val = problem.evaluate(term, **{var_name: var})
volumes[key] = val
avg += val
avg /= len(volumes)
for (key, val) in volumes.items():
err = (nm.abs((avg - val)) / nm.abs(avg))
_ok = (err < 1e-12)
tst.report(('"%s" - volume: %e' % (key, val)))
tst.report(('"%s" - relative volume difference: %e -> %s' % (key, err, _ok)))
ok = (ok and _ok)
assert ok |
def gen_vocab(corpus, unk_threshold):
vocab = collections.defaultdict((lambda : len(vocab)))
freqs = collections.defaultdict((lambda : 0))
vocab[PAD]
vocab[GO]
vocab[EOS]
vocab[UNK]
with open(corpus) as f:
for sentence in f:
tokens = sentence.strip().split()
for token in tokens:
freqs[token] += 1
for (token, freq) in viewitems(freqs):
if (freq > unk_threshold):
vocab[token]
return vocab |
class Custom(BaseVRMWaveform):
def __init__(self, waveform_function):
self.waveform_function = waveform_function
def waveform_function(self):
return self._waveform_function
_function.setter
def waveform_function(self, value):
self._waveform_function = validate_callable('waveform_function', value)
def getCharDecay(self, times):
return self.waveform_function(times) |
def get_dcmdjpeg_exe():
fname = ('dcmdjpeg' + ('.exe' * sys.platform.startswith('win')))
for dir in ('c:\\dcmtk', 'c:\\Program Files', 'c:\\Program Files\\dcmtk', 'c:\\Program Files (x86)\\dcmtk'):
filename = os.path.join(dir, fname)
if os.path.isfile(filename):
return filename
try:
subprocess.check_call([fname, '--version'])
return fname
except Exception:
return None |
class HeadNet():
def __init__(self, config, num_outputs, name):
self.num_levels = config.num_levels
self.bn_level_first = getattr(config, 'head_bn_level_first', False)
norm_layer = (config.norm_layer or tf.keras.layers.BatchNormalization)
if config.norm_kwargs:
norm_kwargs = {**config.norm_kwargs}
if ('eps' in norm_kwargs):
eps = norm_kwargs.pop('eps')
norm_kwargs['epsilon'] = eps
norm_layer = partial(norm_layer, **norm_kwargs)
act_type = (config.head_act_type if getattr(config, 'head_act_type', None) else config.act_type)
act_layer = (get_act_layer(act_type) or _ACT_LAYER)
conv_fn = (SeparableConv2d if config.separable_conv else ConvBnAct2d)
conv_kwargs = dict(in_channels=config.fpn_channels, out_channels=config.fpn_channels, kernel_size=3, padding=config.pad_type, bias=config.redundant_bias, act_layer=None, norm_layer=None)
self.conv_rep = [conv_fn(name=f'{name}/conv_rep/{_}', **conv_kwargs) for _ in range(config.box_class_repeats)]
self.bn_rep = []
if self.bn_level_first:
for _ in range(self.num_levels):
self.bn_rep.append([norm_layer(config.fpn_channels, name=f'{name}/bn_rep/{_}/') for _ in range(config.box_class_repeats)])
else:
for _ in range(config.box_class_repeats):
self.bn_rep.append([norm_layer(name=f'{name}/bn_rep/{_}/{_level}/bn') for _level in range(self.num_levels)])
self.act = act_layer
num_anchors = (len(config.aspect_ratios) * config.num_scales)
predict_kwargs = dict(in_channels=config.fpn_channels, out_channels=(num_outputs * num_anchors), kernel_size=3, padding=config.pad_type, bias=True, norm_layer=None, act_layer=None, name=f'{name}/predict')
self.predict = conv_fn(**predict_kwargs)
def toggle_bn_level_first(self):
new_bn_rep = []
for i in range(len(self.bn_rep[0])):
bn_first = []
for r in self.bn_rep.children():
m = r[i]
bn_first.append((m[0] if isinstance(m, nn.Sequential) else nn.Sequential(OrderedDict([('bn', m)]))))
new_bn_rep.append(bn_first)
self.bn_level_first = (not self.bn_level_first)
self.bn_rep = new_bn_rep
def _forward(self, x: List[tf.Tensor]) -> List[tf.Tensor]:
outputs = []
for level in range(self.num_levels):
x_level = x[level]
for (conv, bn) in zip(self.conv_rep, self.bn_rep):
x_level = conv(x_level)
x_level = bn[level](x_level)
x_level = self.act()(x_level)
outputs.append(self.predict(x_level))
return outputs
def _forward_level_first(self, x: List[tf.Tensor]) -> List[tf.Tensor]:
outputs = []
for (level, bn_rep) in enumerate(self.bn_rep):
x_level = x[level]
for (conv, bn) in zip(self.conv_rep, bn_rep):
x_level = conv(x_level)
x_level = bn(x_level)
x_level = self.act()(x_level)
outputs.append(self.predict(x_level))
return outputs
def __call__(self, x: List[tf.Tensor]) -> List[tf.Tensor]:
if self.bn_level_first:
return self._forward_level_first(x)
else:
return self._forward(x) |
def register_Ns3Ipv6Route_methods(root_module, cls):
cls.add_output_stream_operator()
cls.add_constructor([param('ns3::Ipv6Route const &', 'arg0')])
cls.add_constructor([])
cls.add_method('GetDestination', 'ns3::Ipv6Address', [], is_const=True)
cls.add_method('GetGateway', 'ns3::Ipv6Address', [], is_const=True)
cls.add_method('GetOutputDevice', 'ns3::Ptr< ns3::NetDevice >', [], is_const=True)
cls.add_method('GetSource', 'ns3::Ipv6Address', [], is_const=True)
cls.add_method('SetDestination', 'void', [param('ns3::Ipv6Address', 'dest')])
cls.add_method('SetGateway', 'void', [param('ns3::Ipv6Address', 'gw')])
cls.add_method('SetOutputDevice', 'void', [param('ns3::Ptr< ns3::NetDevice >', 'outputDevice')])
cls.add_method('SetSource', 'void', [param('ns3::Ipv6Address', 'src')])
return |
class NonMaximumSuppressionTest(tf.test.TestCase):
def setUp(self):
self._boxes = np.array([[0, 0, 1, 1], [0, 0.1, 1, 1.1], [0, (- 0.1), 1, 0.9], [0, 10, 1, 11], [0, 10.1, 1, 11.1], [0, 100, 1, 101]], dtype=float)
self._boxlist = np_box_list.BoxList(self._boxes)
def test_with_no_scores_field(self):
boxlist = np_box_list.BoxList(self._boxes)
max_output_size = 3
iou_threshold = 0.5
with self.assertRaises(ValueError):
np_box_list_ops.non_max_suppression(boxlist, max_output_size, iou_threshold)
def test_nms_disabled_max_output_size_equals_three(self):
boxlist = np_box_list.BoxList(self._boxes)
boxlist.add_field('scores', np.array([0.9, 0.75, 0.6, 0.95, 0.2, 0.3], dtype=float))
max_output_size = 3
iou_threshold = 1.0
expected_boxes = np.array([[0, 10, 1, 11], [0, 0, 1, 1], [0, 0.1, 1, 1.1]], dtype=float)
nms_boxlist = np_box_list_ops.non_max_suppression(boxlist, max_output_size, iou_threshold)
self.assertAllClose(nms_boxlist.get(), expected_boxes)
def test_select_from_three_clusters(self):
boxlist = np_box_list.BoxList(self._boxes)
boxlist.add_field('scores', np.array([0.9, 0.75, 0.6, 0.95, 0.2, 0.3], dtype=float))
max_output_size = 3
iou_threshold = 0.5
expected_boxes = np.array([[0, 10, 1, 11], [0, 0, 1, 1], [0, 100, 1, 101]], dtype=float)
nms_boxlist = np_box_list_ops.non_max_suppression(boxlist, max_output_size, iou_threshold)
self.assertAllClose(nms_boxlist.get(), expected_boxes)
def test_select_at_most_two_from_three_clusters(self):
boxlist = np_box_list.BoxList(self._boxes)
boxlist.add_field('scores', np.array([0.9, 0.75, 0.6, 0.95, 0.5, 0.3], dtype=float))
max_output_size = 2
iou_threshold = 0.5
expected_boxes = np.array([[0, 10, 1, 11], [0, 0, 1, 1]], dtype=float)
nms_boxlist = np_box_list_ops.non_max_suppression(boxlist, max_output_size, iou_threshold)
self.assertAllClose(nms_boxlist.get(), expected_boxes)
def test_select_at_most_thirty_from_three_clusters(self):
boxlist = np_box_list.BoxList(self._boxes)
boxlist.add_field('scores', np.array([0.9, 0.75, 0.6, 0.95, 0.5, 0.3], dtype=float))
max_output_size = 30
iou_threshold = 0.5
expected_boxes = np.array([[0, 10, 1, 11], [0, 0, 1, 1], [0, 100, 1, 101]], dtype=float)
nms_boxlist = np_box_list_ops.non_max_suppression(boxlist, max_output_size, iou_threshold)
self.assertAllClose(nms_boxlist.get(), expected_boxes)
def test_select_from_ten_indentical_boxes(self):
boxes = np.array((10 * [[0, 0, 1, 1]]), dtype=float)
boxlist = np_box_list.BoxList(boxes)
boxlist.add_field('scores', np.array((10 * [0.8])))
iou_threshold = 0.5
max_output_size = 3
expected_boxes = np.array([[0, 0, 1, 1]], dtype=float)
nms_boxlist = np_box_list_ops.non_max_suppression(boxlist, max_output_size, iou_threshold)
self.assertAllClose(nms_boxlist.get(), expected_boxes)
def test_different_iou_threshold(self):
boxes = np.array([[0, 0, 20, 100], [0, 0, 20, 80], [200, 200, 210, 300], [200, 200, 210, 250]], dtype=float)
boxlist = np_box_list.BoxList(boxes)
boxlist.add_field('scores', np.array([0.9, 0.8, 0.7, 0.6]))
max_output_size = 4
iou_threshold = 0.4
expected_boxes = np.array([[0, 0, 20, 100], [200, 200, 210, 300]], dtype=float)
nms_boxlist = np_box_list_ops.non_max_suppression(boxlist, max_output_size, iou_threshold)
self.assertAllClose(nms_boxlist.get(), expected_boxes)
iou_threshold = 0.5
expected_boxes = np.array([[0, 0, 20, 100], [200, 200, 210, 300], [200, 200, 210, 250]], dtype=float)
nms_boxlist = np_box_list_ops.non_max_suppression(boxlist, max_output_size, iou_threshold)
self.assertAllClose(nms_boxlist.get(), expected_boxes)
iou_threshold = 0.8
expected_boxes = np.array([[0, 0, 20, 100], [0, 0, 20, 80], [200, 200, 210, 300], [200, 200, 210, 250]], dtype=float)
nms_boxlist = np_box_list_ops.non_max_suppression(boxlist, max_output_size, iou_threshold)
self.assertAllClose(nms_boxlist.get(), expected_boxes)
def test_multiclass_nms(self):
boxlist = np_box_list.BoxList(np.array([[0.2, 0.4, 0.8, 0.8], [0.4, 0.2, 0.8, 0.8], [0.6, 0.0, 1.0, 1.0]], dtype=np.float32))
scores = np.array([[(- 0.2), 0.1, 0.5, (- 0.4), 0.3], [0.7, (- 0.7), 0.6, 0.2, (- 0.9)], [0.4, 0.34, (- 0.9), 0.2, 0.31]], dtype=np.float32)
boxlist.add_field('scores', scores)
boxlist_clean = np_box_list_ops.multi_class_non_max_suppression(boxlist, score_thresh=0.25, iou_thresh=0.1, max_output_size=3)
scores_clean = boxlist_clean.get_field('scores')
classes_clean = boxlist_clean.get_field('classes')
boxes = boxlist_clean.get()
expected_scores = np.array([0.7, 0.6, 0.34, 0.31])
expected_classes = np.array([0, 2, 1, 4])
expected_boxes = np.array([[0.4, 0.2, 0.8, 0.8], [0.4, 0.2, 0.8, 0.8], [0.6, 0.0, 1.0, 1.0], [0.6, 0.0, 1.0, 1.0]], dtype=np.float32)
self.assertAllClose(scores_clean, expected_scores)
self.assertAllClose(classes_clean, expected_classes)
self.assertAllClose(boxes, expected_boxes) |
class ContrastCLIPBottleneckBase(AbstractCLIPBottleneck):
def __init__(self, feature_dim, num_classes, num_domains, hparams, pretrained, idx2class):
super(ContrastCLIPBottleneckBase, self).__init__(feature_dim, num_classes, num_domains, hparams, pretrained, idx2class, DummyBottleneck, use_clip_contrast=True) |
def add_cam_tracking_constraint(camera, lookat):
cam_constraint = camera.constraints.new(type='TRACK_TO')
cam_constraint.track_axis = 'TRACK_NEGATIVE_Z'
cam_constraint.up_axis = 'UP_Y'
track_to = bpy.data.objects.new('Empty', None)
track_to.location = lookat
camera.parent = track_to
bpy.context.scene.collection.objects.link(track_to)
bpy.context.view_layer.objects.active = track_to
cam_constraint.target = track_to
return track_to |
class SecMin(Function):
def forward(ctx, inp, offsets):
nProposal = (offsets.size(0) - 1)
C = inp.size(1)
assert inp.is_contiguous()
assert offsets.is_contiguous()
out = torch.cuda.FloatTensor(nProposal, C).zero_()
pointgroup_ops_ext.sec_min(inp, offsets, out, nProposal, C)
return out
def backward(ctx, a=None):
return (None, None) |
def _seg_62():
return [(120220, 'M', u'w'), (120221, 'M', u'x'), (120222, 'M', u'y'), (120223, 'M', u'z'), (120224, 'M', u'a'), (120225, 'M', u'b'), (120226, 'M', u'c'), (120227, 'M', u'd'), (120228, 'M', u'e'), (120229, 'M', u'f'), (120230, 'M', u'g'), (120231, 'M', u'h'), (120232, 'M', u'i'), (120233, 'M', u'j'), (120234, 'M', u'k'), (120235, 'M', u'l'), (120236, 'M', u'm'), (120237, 'M', u'n'), (120238, 'M', u'o'), (120239, 'M', u'p'), (120240, 'M', u'q'), (120241, 'M', u'r'), (120242, 'M', u's'), (120243, 'M', u't'), (120244, 'M', u'u'), (120245, 'M', u'v'), (120246, 'M', u'w'), (120247, 'M', u'x'), (120248, 'M', u'y'), (120249, 'M', u'z'), (120250, 'M', u'a'), (120251, 'M', u'b'), (120252, 'M', u'c'), (120253, 'M', u'd'), (120254, 'M', u'e'), (120255, 'M', u'f'), (120256, 'M', u'g'), (120257, 'M', u'h'), (120258, 'M', u'i'), (120259, 'M', u'j'), (120260, 'M', u'k'), (120261, 'M', u'l'), (120262, 'M', u'm'), (120263, 'M', u'n'), (120264, 'M', u'o'), (120265, 'M', u'p'), (120266, 'M', u'q'), (120267, 'M', u'r'), (120268, 'M', u's'), (120269, 'M', u't'), (120270, 'M', u'u'), (120271, 'M', u'v'), (120272, 'M', u'w'), (120273, 'M', u'x'), (120274, 'M', u'y'), (120275, 'M', u'z'), (120276, 'M', u'a'), (120277, 'M', u'b'), (120278, 'M', u'c'), (120279, 'M', u'd'), (120280, 'M', u'e'), (120281, 'M', u'f'), (120282, 'M', u'g'), (120283, 'M', u'h'), (120284, 'M', u'i'), (120285, 'M', u'j'), (120286, 'M', u'k'), (120287, 'M', u'l'), (120288, 'M', u'm'), (120289, 'M', u'n'), (120290, 'M', u'o'), (120291, 'M', u'p'), (120292, 'M', u'q'), (120293, 'M', u'r'), (120294, 'M', u's'), (120295, 'M', u't'), (120296, 'M', u'u'), (120297, 'M', u'v'), (120298, 'M', u'w'), (120299, 'M', u'x'), (120300, 'M', u'y'), (120301, 'M', u'z'), (120302, 'M', u'a'), (120303, 'M', u'b'), (120304, 'M', u'c'), (120305, 'M', u'd'), (120306, 'M', u'e'), (120307, 'M', u'f'), (120308, 'M', u'g'), (120309, 'M', u'h'), (120310, 'M', u'i'), (120311, 'M', u'j'), (120312, 'M', u'k'), (120313, 'M', u'l'), (120314, 'M', u'm'), (120315, 'M', u'n'), (120316, 'M', u'o'), (120317, 'M', u'p'), (120318, 'M', u'q'), (120319, 'M', u'r')] |
class FuncParamType(ParamType):
def __init__(self, func):
self.name = func.__name__
self.func = func
def convert(self, value, param, ctx):
try:
return self.func(value)
except ValueError:
try:
value = text_type(value)
except UnicodeError:
value = str(value).decode('utf-8', 'replace')
self.fail(value, param, ctx) |
class TransfoXLTokenizationTest(CommonTestCases.CommonTokenizerTester):
tokenizer_class = TransfoXLTokenizer
def setUp(self):
super(TransfoXLTokenizationTest, self).setUp()
vocab_tokens = ['<unk>', '[CLS]', '[SEP]', 'want', 'unwanted', 'wa', 'un', 'running', ',', 'low', 'l']
self.vocab_file = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES['vocab_file'])
with open(self.vocab_file, 'w', encoding='utf-8') as vocab_writer:
vocab_writer.write(''.join([(x + '\n') for x in vocab_tokens]))
def get_tokenizer(self, **kwargs):
kwargs['lower_case'] = True
return TransfoXLTokenizer.from_pretrained(self.tmpdirname, **kwargs)
def get_input_output_texts(self):
input_text = u'<unk> UNwanted , running'
output_text = u'<unk> unwanted, running'
return (input_text, output_text)
def test_full_tokenizer(self):
tokenizer = TransfoXLTokenizer(vocab_file=self.vocab_file, lower_case=True)
tokens = tokenizer.tokenize(u'<unk> UNwanted , running')
self.assertListEqual(tokens, ['<unk>', 'unwanted', ',', 'running'])
self.assertListEqual(tokenizer.convert_tokens_to_ids(tokens), [0, 4, 8, 7])
def test_full_tokenizer_lower(self):
tokenizer = TransfoXLTokenizer(lower_case=True)
self.assertListEqual(tokenizer.tokenize(u' \tHeLLo ! how \n Are yoU ? '), ['hello', '!', 'how', 'are', 'you', '?'])
def test_full_tokenizer_no_lower(self):
tokenizer = TransfoXLTokenizer(lower_case=False)
self.assertListEqual(tokenizer.tokenize(u' \tHeLLo ! how \n Are yoU ? '), ['HeLLo', '!', 'how', 'Are', 'yoU', '?']) |
def _with_metaclass(cls):
if DebugFlags.debug_trace_code_generation:
return add_metaclass(VerboseCodeWriter)(cls)
return cls |
def discriminator_fill_statedict(statedict, vars, size):
log_size = int(math.log(size, 2))
update(statedict, convert_conv(vars, f'{size}x{size}/FromRGB', 'convs.0'))
conv_i = 1
for i in range((log_size - 2), 0, (- 1)):
reso = (4 * (2 ** i))
update(statedict, convert_conv(vars, f'{reso}x{reso}/Conv0', f'convs.{conv_i}.conv1'))
update(statedict, convert_conv(vars, f'{reso}x{reso}/Conv1_down', f'convs.{conv_i}.conv2', start=1))
update(statedict, convert_conv(vars, f'{reso}x{reso}/Skip', f'convs.{conv_i}.skip', start=1, bias=False))
conv_i += 1
update(statedict, convert_conv(vars, f'4x4/Conv', 'final_conv'))
update(statedict, convert_dense(vars, f'4x4/Dense0', 'final_linear.0'))
update(statedict, convert_dense(vars, f'Output', 'final_linear.1'))
return statedict |
def initialize_compiler_options(cmd):
cmd.fcompiler = None
cmd.f2py = None
cmd.compiler = None
cmd.f77exec = None
cmd.f90exec = None |
def test_dedupe_parameters():
parameters = [{'name': 'SigXsecOverSM', 'bounds': [[0.0, 10.0]]}, {'name': 'SigXsecOverSM', 'bounds': [[0.0, 10.0]]}]
assert (len(pyhf.readxml.dedupe_parameters(parameters)) == 1)
parameters[1]['bounds'] = [[0.0, 2.0]]
with pytest.raises(RuntimeError, match='SigXsecOverSM'):
pyhf.readxml.dedupe_parameters(parameters) |
class XLMRobertaForQuestionAnswering():
def __init__(self, *args, **kwargs):
requires_pytorch(self)
def from_pretrained(self, *args, **kwargs):
requires_pytorch(self) |
def _get_google_drive_file_id(url: str) -> Optional[str]:
parts = urlparse(url)
if (re.match('(drive|docs)[.]google[.]com', parts.netloc) is None):
return None
match = re.match('/file/d/(?P<id>[^/]*)', parts.path)
if (match is None):
return None
return match.group('id') |
def get_end_date(start_date: datetime) -> datetime:
if (start_date.month == 12):
end_date = start_date.replace(year=(start_date.year + 1), month=1)
else:
end_date = start_date.replace(month=(start_date.month + 1))
return end_date |
def compile_model(model, learning_rate=0.005):
optimizer = tf.keras.optimizers.Adam(learning_rate=learning_rate)
loss = tf.keras.losses.MeanSquaredError()
metrics = [tf.keras.metrics.MeanSquaredError()]
model.compile(optimizer=optimizer, loss=loss, metrics=metrics) |
def test_hessian_vector_product():
a = torch.tensor([5.0])
x = torch.tensor([10.0], requires_grad=True)
def f():
return (a * (x ** 2))
expected_hessian = (2 * a)
vector = torch.tensor([10.0])
expected_hvp = (expected_hessian * vector).detach()
f_Ax = _build_hessian_vector_product(f, [x])
computed_hvp = f_Ax(vector).detach()
assert np.allclose(computed_hvp, expected_hvp) |
def _serialize_json_and_commit(path, obj):
with fsspec.open(f'{path}.tmp', 'w') as file:
file.write(obj.to_json())
fs: AbstractFileSystem = fsspec.core.url_to_fs(path)[0]
fs.mkdirs(os.path.dirname(path), exist_ok=True)
if fs.exists(path):
fs.copy(path, f'{path}.bak')
fs.rename(f'{path}.tmp', path) |
class LocationTimeAttack(Attack):
def __init__(self, knowledge_length, time_precision='Hour'):
self.time_precision = time_precision
super(LocationTimeAttack, self).__init__(knowledge_length)
def time_precision(self):
return self._time_precision
_precision.setter
def time_precision(self, val):
if (val not in constants.PRECISION_LEVELS):
raise ValueError('Possible time precisions are: Year, Month, Day, Hour, Minute, Second')
self._time_precision = val
def assess_risk(self, traj, targets=None, force_instances=False, show_progress=False):
traj = traj.sort_values(by=[constants.UID, constants.DATETIME])
traj[constants.TEMP] = traj[constants.DATETIME].apply((lambda x: date_time_precision(x, self.time_precision)))
return self._all_risks(traj, targets, force_instances, show_progress)
def _match(self, single_traj, instance):
locs = single_traj.groupby([constants.LATITUDE, constants.LONGITUDE, constants.TEMP]).size().reset_index(name=constants.COUNT)
inst = pd.DataFrame(data=instance, columns=single_traj.columns)
inst = inst.groupby([constants.LATITUDE, constants.LONGITUDE, constants.TEMP]).size().reset_index(name=(constants.COUNT + 'inst'))
locs_inst = pd.merge(locs, inst, left_on=[constants.LATITUDE, constants.LONGITUDE, constants.TEMP], right_on=[constants.LATITUDE, constants.LONGITUDE, constants.TEMP])
if (len(locs_inst.index) != len(inst.index)):
return 0
else:
condition = (locs_inst[constants.COUNT] >= locs_inst[(constants.COUNT + 'inst')])
if (len(locs_inst[condition].index) != len(inst.index)):
return 0
else:
return 1 |
def convert_tokens_to_ids(vocab, tokens):
ids = []
for token in tokens:
ids.append(vocab[token])
return ids |
class RNNField(Dense):
def __init__(self, units=1, name=None, rnn_type='SimpleRNN', activation=linear, kernel_initializer=default_kernel_initializer(), recurrent_initializer=default_kernel_initializer(), bias_initializer=default_bias_initializer(), kernel_regularizer=None, recurrent_regularizer=None, bias_regularizer=None, trainable=True, dtype=None):
if (not dtype):
dtype = floatx()
elif (not (dtype == floatx())):
set_floatx(dtype)
assert isinstance(name, str), 'Please provide a string for field name. '
assert callable(activation), 'Please provide a function handle for the activation. '
if isinstance(kernel_initializer, (float, int)):
kernel_initializer = default_constant_initializer(kernel_initializer)
if isinstance(bias_initializer, (float, int)):
bias_initializer = default_constant_initializer(bias_initializer)
kernel_regularizer = default_regularizer(kernel_regularizer)
bias_regularizer = default_regularizer(bias_regularizer)
super(RNNField, self).__init__(units=units, activation=activation, kernel_initializer=kernel_initializer, bias_initializer=bias_initializer, kernel_regularizer=kernel_regularizer, bias_regularizer=bias_regularizer, trainable=trainable, dtype=dtype, name=name) |
def create_syncube(modelname, voxelpos):
print('Creating simulated cube data ...')
(xxx, yyy, zzz) = voxelpos
x3 = xxx.reshape(yNcube, xNcube, zNcube)
y3 = yyy.reshape(yNcube, xNcube, zNcube)
z3 = zzz.reshape(yNcube, xNcube, zNcube)
if (modelname == 'layers_2'):
zshift = (((zLcube / 8.0) * 1.0) / (1 + np.exp((2.0 * ((- y3) + (zLcube / 2))))))
layer1 = (4.0 * ((1.0 / (1 + np.exp(((- 2) * (((- z3) - (zLcube * 0.3)) + zshift))))) - (1.0 / (1 + np.exp(((- 2) * (((- z3) - (zLcube * 0.325)) + zshift)))))))
cut1 = np.percentile(layer1, 90)
layer1[(layer1 < cut1)] = 0.0
layer1[(layer1 >= cut1)] = layer1.max()
layer2 = (8.0 * ((1.0 / (1 + np.exp(((- 2) * (((- z3) - (zLcube * 0.25)) + zshift))))) - (1.0 / (1 + np.exp(((- 2) * (((- z3) - (zLcube * 0.275)) + zshift)))))))
cut2 = np.percentile(layer2, 90)
layer2[(layer2 < cut2)] = 0.0
layer2[(layer2 >= cut2)] = layer2.max()
density = ((0.5 + layer1) + layer2)
magsus = (gp_coeff[1] * density)
if (modelname == 'layers_3'):
zshift = (((zLcube / 8.0) * 1.0) / (1 + np.exp((2.0 * ((- y3) + (yLcube / 2.0))))))
layer3 = (6.0 * ((1.0 / (1 + np.exp(((- 2) * (((- z3) - (zLcube * 0.35)) + zshift))))) - (1.0 / (1 + np.exp(((- 2) * (((- z3) - (zLcube * 0.375)) + zshift)))))))
cut3 = np.percentile(layer3, 90)
layer3[(layer3 < cut3)] = 0.0
layer3[(layer3 >= cut3)] = layer3.max()
layer1 = (4.0 * ((1.0 / (1 + np.exp(((- 2) * (((- z3) - (zLcube * 0.3)) + zshift))))) - (1.0 / (1 + np.exp(((- 2) * (((- z3) - (zLcube * 0.325)) + zshift)))))))
cut1 = np.percentile(layer1, 90)
layer1[(layer1 < cut1)] = 0.0
layer1[(layer1 >= cut1)] = layer1.max()
layer2 = (8.0 * ((1.0 / (1 + np.exp(((- 2) * (((- z3) - (zLcube * 0.25)) + zshift))))) - (1.0 / (1 + np.exp(((- 2) * (((- z3) - (zLcube * 0.275)) + zshift)))))))
cut2 = np.percentile(layer2, 90)
layer2[(layer2 < cut2)] = 0.0
layer2[(layer2 >= cut2)] = layer2.max()
density = (((0.5 + layer1) + layer2) + layer3)
magsus = (gp_coeff[1] * density)
if (modelname == 'cylinders'):
rad = (yLcube / 18.0)
rc1 = ((((y3 - (yLcube / 1.3)) - rad) ** 2) + (((z3 + (zLcube / 4)) - rad) ** 2))
rc2 = ((((y3 - (yLcube / 4.0)) - rad) ** 2) + (((z3 + (zLcube / 4)) - rad) ** 2))
density = ((x3 * 0.0) + 0.1)
density[(rc2 <= (rad ** 2))] = 1.0
density[(rc1 <= (rad ** 2))] = 1.0
density[((x3 < (xLcube / 5.0)) | (x3 > ((xLcube * 4.0) / 5.0)))] = 0.1
magsus = (gp_coeff[1] * density)
origin = (voxelpos[0].min(), voxelpos[1].min(), voxelpos[2].min())
voxelsize = (xvoxsize, yvoxsize, zvoxsize)
cs.create_vtkcube(density, origin, voxelsize, fname=os.path.join(inpath, (('simcube_' + modelname) + '.vtk')))
newdf_head = ['x', 'y', 'z', 'DENSITY', 'MAGSUS']
data = np.asarray([x3.flatten(), y3.flatten(), z3.flatten(), density.flatten(), magsus.flatten()])
df = pd.DataFrame(data.T, columns=newdf_head)
df.to_csv(os.path.join(inpath, (('simcube_' + modelname) + '.csv')), index=False)
dfdrill = df.copy()
xdrill = [random.randint(2, (xNcube - 2)) for p in range(2)]
ydrill = [random.randrange(2, (yNcube - 2)) for p in range(2)]
xdrill = ((np.asarray(xdrill) * xvoxsize) + (0.5 * xvoxsize))
ydrill = ((np.asarray(ydrill) * yvoxsize) + (0.5 * yvoxsize))
dfdrill = dfdrill.loc[(dfdrill['x'].isin(xdrill) & dfdrill['y'].isin(ydrill))]
dfdrill['SiteID'] = (('SiteID_' + dfdrill['x'].astype(str)) + dfdrill['y'].astype(str))
dfdrill.to_csv(os.path.join(inpath, (('simdrill_' + modelname) + '.csv')), index=False)
return (density, magsus) |
def _resolve_random_state(random_state: Union[(int, np.random.RandomState)]) -> np.random.RandomState:
if isinstance(random_state, int):
return np.random.RandomState(random_state)
elif isinstance(random_state, np.random.RandomState):
return random_state
else:
raise NotImplementedError(f'The random_state must be an integer or np.random.RandomState, current type: {type(random_state)}') |
class AveragePooling2D(_Pooling2D):
_pooling2d_support
def __init__(self, pool_size=(2, 2), strides=None, padding='valid', data_format=None, **kwargs):
super(AveragePooling2D, self).__init__(pool_size, strides, padding, data_format, **kwargs)
def _pooling_function(self, inputs, pool_size, strides, padding, data_format):
output = K.pool2d(inputs, pool_size, strides, padding, data_format, pool_mode='avg')
return output |
class SimpleQueue(multiprocessing.queues.SimpleQueue):
def _make_methods(self):
if (not isinstance(self._reader, ConnectionWrapper)):
self._reader = ConnectionWrapper(self._reader)
self._writer = ConnectionWrapper(self._writer)
super(SimpleQueue, self)._make_methods() |
class MultiWozDB(object):
def __init__(self, db_paths):
self.dbs = {}
self.sql_dbs = {}
for domain in all_domains:
with open(db_paths[domain], 'r') as f:
self.dbs[domain] = json.loads(f.read().lower())
def oneHotVector(self, domain, num):
vector = [0, 0, 0, 0]
if (num == ''):
return vector
if (domain != 'train'):
if (num == 0):
vector = [1, 0, 0, 0]
elif (num == 1):
vector = [0, 1, 0, 0]
elif (num <= 3):
vector = [0, 0, 1, 0]
else:
vector = [0, 0, 0, 1]
elif (num == 0):
vector = [1, 0, 0, 0]
elif (num <= 5):
vector = [0, 1, 0, 0]
elif (num <= 10):
vector = [0, 0, 1, 0]
else:
vector = [0, 0, 0, 1]
return vector
def addBookingPointer(self, turn_da):
vector = [0, 0]
if turn_da.get('booking-nobook'):
vector = [1, 0]
if (turn_da.get('booking-book') or turn_da.get('train-offerbooked')):
vector = [0, 1]
return vector
def addDBPointer(self, domain, match_num, return_num=False):
if (domain in db_domains):
vector = self.oneHotVector(domain, match_num)
else:
vector = [0, 0, 0, 0]
return vector
def addDBIndicator(self, domain, match_num, return_num=False):
if (domain in db_domains):
vector = self.oneHotVector(domain, match_num)
else:
vector = [0, 0, 0, 0]
if (vector == [0, 0, 0, 0]):
indicator = '[db_nores]'
else:
indicator = ('[db_%s]' % vector.index(1))
return indicator
def get_match_num(self, constraints, return_entry=False):
match = {'general': ''}
entry = {}
for domain in all_domains:
match[domain] = ''
if ((domain in db_domains) and constraints.get(domain)):
matched_ents = self.queryJsons(domain, constraints[domain])
match[domain] = len(matched_ents)
if return_entry:
entry[domain] = matched_ents
if return_entry:
return entry
return match
def pointerBack(self, vector, domain):
if domain.endswith(']'):
domain = domain[1:(- 1)]
if (domain != 'train'):
nummap = {0: '0', 1: '1', 2: '2-3', 3: '>3'}
else:
nummap = {0: '0', 1: '1-5', 2: '6-10', 3: '>10'}
if (vector[:4] == [0, 0, 0, 0]):
report = ''
else:
num = vector.index(1)
report = (((domain + ': ') + nummap[num]) + '; ')
if ((vector[(- 2)] == 0) and (vector[(- 1)] == 1)):
report += 'booking: ok'
if ((vector[(- 2)] == 1) and (vector[(- 1)] == 0)):
report += 'booking: unable'
return report
def queryJsons(self, domain, constraints, exactly_match=True, return_name=False):
if (domain == 'taxi'):
return [{'taxi_colors': random.choice(self.dbs[domain]['taxi_colors']), 'taxi_types': random.choice(self.dbs[domain]['taxi_types']), 'taxi_phone': [random.randint(1, 9) for _ in range(10)]}]
if (domain == 'police'):
return self.dbs['police']
if (domain == 'hospital'):
if constraints.get('department'):
for entry in self.dbs['hospital']:
if (entry.get('department') == constraints.get('department')):
return [entry]
else:
return []
valid_cons = False
for v in constraints.values():
if (v not in ['not mentioned', '']):
valid_cons = True
if (not valid_cons):
return []
match_result = []
if ('name' in constraints):
for db_ent in self.dbs[domain]:
if ('name' in db_ent):
cons = constraints['name']
dbn = db_ent['name']
if (cons == dbn):
db_ent = (db_ent if (not return_name) else db_ent['name'])
match_result.append(db_ent)
return match_result
for db_ent in self.dbs[domain]:
match = True
for (s, v) in constraints.items():
if (s == 'name'):
continue
if ((s in ['people', 'stay']) or ((domain == 'hotel') and (s == 'day')) or ((domain == 'restaurant') and (s in ['day', 'time']))):
continue
skip_case = {"don't care": 1, "do n't care": 1, 'dont care': 1, 'not mentioned': 1, 'dontcare': 1, '': 1}
if skip_case.get(v):
continue
if (s not in db_ent):
match = False
break
v = ('yes' if (v == 'free') else v)
if (s in ['arrive', 'leave']):
try:
(h, m) = v.split(':')
v = ((int(h) * 60) + int(m))
except:
match = False
break
time = ((int(db_ent[s].split(':')[0]) * 60) + int(db_ent[s].split(':')[1]))
if ((s == 'arrive') and (v > time)):
match = False
if ((s == 'leave') and (v < time)):
match = False
elif (exactly_match and (v != db_ent[s])):
match = False
break
elif (v not in db_ent[s]):
match = False
break
if match:
match_result.append(db_ent)
if (not return_name):
return match_result
else:
if (domain == 'train'):
match_result = [e['id'] for e in match_result]
else:
match_result = [e['name'] for e in match_result]
return match_result
def querySQL(self, domain, constraints):
if (not self.sql_dbs):
for dom in db_domains:
db = 'db/{}-dbase.db'.format(dom)
conn = sqlite3.connect(db)
c = conn.cursor()
self.sql_dbs[dom] = c
sql_query = 'select * from {}'.format(domain)
flag = True
for (key, val) in constraints.items():
if ((val == '') or (val == 'dontcare') or (val == 'not mentioned') or (val == "don't care") or (val == 'dont care') or (val == "do n't care")):
pass
elif flag:
sql_query += ' where '
val2 = val.replace("'", "''")
if (key == 'leaveAt'):
sql_query += (((((' ' + key) + ' > ') + "'") + val2) + "'")
elif (key == 'arriveBy'):
sql_query += (((((' ' + key) + ' < ') + "'") + val2) + "'")
else:
sql_query += (((((' ' + key) + '=') + "'") + val2) + "'")
flag = False
else:
val2 = val.replace("'", "''")
if (key == 'leaveAt'):
sql_query += (((((' and ' + key) + ' > ') + "'") + val2) + "'")
elif (key == 'arriveBy'):
sql_query += (((((' and ' + key) + ' < ') + "'") + val2) + "'")
else:
sql_query += (((((' and ' + key) + '=') + "'") + val2) + "'")
try:
print(sql_query)
return self.sql_dbs[domain].execute(sql_query).fetchall()
except:
return [] |
class Sine(SignalGenerator):
def __init__(self, freq, **kwargs):
super(Sine, self).__init__(**kwargs)
self.freq = freq
def generate(self):
sine_of = (((self.freq * 2) * math.pi) / self.sample_rate)
sample_n = 0
while True:
(yield math.sin((sine_of * sample_n)))
sample_n += 1 |
def unzip(zip_path: str, dest_dir: str) -> None:
with ZipFile(zip_path, 'r') as zipObj:
zipObj.extractall(dest_dir) |
def modifies_known_mutable(obj, attr):
for (typespec, unsafe) in _mutable_spec:
if isinstance(obj, typespec):
return (attr in unsafe)
return False |
def filter_roberta_detectors(_, pretrained_name: str):
return ('detector' not in pretrained_name) |
_REGISTRY.register()
class DIVO(ImageDataset):
_junk_pids = [0, (- 1)]
dataset_dir = ''
dataset_url = '
dataset_name = 'market1501'
def __init__(self, root='datasets', divo=False, **kwargs):
self.root = root
self.dataset_dir = osp.join(self.root, self.dataset_dir)
self.data_dir = self.dataset_dir
self.train_dir = osp.join(self.data_dir, 'bounding_box_train')
self.query_dir = osp.join(self.data_dir, 'bounding_box_test')
self.gallery_dir = osp.join(self.data_dir, 'bounding_box_test')
self.train_dir = '/mnt/sdb/syh/DIVOTrack/datasets/DIVO/images/dets/det_imgs'
self.query_dir = '/mnt/sdb/syh/DIVOTrack/datasets/DIVO/images/dets/det_imgs'
self.gallery_dir = '/mnt/sdb/syh/DIVOTrack/datasets/DIVO/images/dets/det_imgs'
self.extra_gallery_dir = osp.join(self.data_dir, 'images')
self.divo = divo
required_files = [self.data_dir, self.train_dir, self.query_dir, self.gallery_dir]
if self.divo:
required_files.append(self.extra_gallery_dir)
self.check_before_run(required_files)
train = (lambda : self.process_dir(self.train_dir))
query = (lambda : self.process_dir(self.query_dir, is_train=False))
gallery = (lambda : (self.process_dir(self.gallery_dir, is_train=False) + (self.process_dir(self.extra_gallery_dir, is_train=False) if self.divo else [])))
super(DIVO, self).__init__(train, query, gallery, **kwargs)
def process_dir(self, dir_path, is_train=True):
img_paths = glob.glob(osp.join(dir_path, '*.jpg'))
pattern = re.compile('([-\\d]+)_c(\\d)')
data = []
for img_path in img_paths:
(pid, camid) = map(int, pattern.search(img_path).groups())
if (pid == (- 1)):
continue
camid -= 1
if is_train:
pid = ((self.dataset_name + '_') + str(pid))
camid = ((self.dataset_name + '_') + str(camid))
data.append((img_path, pid, camid))
return data |
def get_source_index(scale, dst_index, half_pixel):
return (np.maximum(0, ((scale * (dst_index + 0.5)) - 0.5)) if half_pixel else (scale * dst_index)) |
def convert_example_to_features(example, tokenizer, max_seq_length):
tokens = example['tokens']
segment_ids = example['segment_ids']
is_random_next = example['is_random_next']
masked_lm_positions = example['masked_lm_positions']
masked_lm_labels = example['masked_lm_labels']
assert (len(tokens) == len(segment_ids) <= max_seq_length)
input_ids = tokenizer.convert_tokens_to_ids(tokens)
masked_label_ids = tokenizer.convert_tokens_to_ids(masked_lm_labels)
input_array = np.zeros(max_seq_length, dtype=np.int)
input_array[:len(input_ids)] = input_ids
mask_array = np.zeros(max_seq_length, dtype=np.bool)
mask_array[:len(input_ids)] = 1
segment_array = np.zeros(max_seq_length, dtype=np.bool)
segment_array[:len(segment_ids)] = segment_ids
lm_label_array = np.full(max_seq_length, dtype=np.int, fill_value=(- 1))
lm_label_array[masked_lm_positions] = masked_label_ids
features = InputFeatures(input_ids=input_array, input_mask=mask_array, segment_ids=segment_array, lm_label_ids=lm_label_array, is_next=is_random_next)
return features |
def view(g, self, size):
if _is_value(size):
shape = size
else:
if self.isTensor():
self_sizes = self.type().sizes()
if (self_sizes and (len(size) == 2) and (self_sizes[0] == size[0])):
return g.op('Flatten', self, axis_i=1)
shape = g.op('Constant', value_t=torch.LongTensor(size))
return g.op('Reshape', self, shape) |
def test_logging(capsys, tmp_path):
config_filename = get_pkg_data_filename('data/test_config.yml')
output_filename = str((tmp_path / 'logging.fits'))
skypy.main([config_filename, output_filename])
(out, err) = capsys.readouterr()
assert (not err)
with pytest.raises(SystemExit):
skypy.main([config_filename, output_filename, '--verbose'])
(out, err) = capsys.readouterr()
config = load_skypy_yaml(config_filename)
cosmology = config.pop('cosmology', None)
tables = config.pop('tables', {})
config.update({k: v.pop('.init', Call(Table)) for (k, v) in tables.items()})
columns = [f'{t}.{c}' for (t, cols) in tables.items() for c in cols]
functions = [f for f in config.values() if isinstance(f, Call)]
functions += [f for (t, cols) in tables.items() for f in cols.values() if isinstance(f, Call)]
for job in ((list(config) + list(tables)) + columns):
log_string = f'[INFO] skypy.pipeline: Generating {job}'
assert (log_string in err)
for f in functions:
log_string = f'[INFO] skypy.pipeline: Calling {f.function.__name__}'
assert (log_string in err)
if cosmology:
assert ('[INFO] skypy.pipeline: Setting cosmology' in err)
assert (f'[INFO] skypy: Writing {output_filename}' in err)
try:
from astropy.utils.misc import NOT_OVERWRITING_MSG
error_string = NOT_OVERWRITING_MSG.format(output_filename)
except ImportError:
error_string = f'[ERROR] skypy: File {output_filename!r} already exists.'
assert (error_string in err)
with pytest.raises(SystemExit):
skypy.main([config_filename, output_filename, '-qq'])
(out, err) = capsys.readouterr()
assert (not err) |
class SAGE(torch.nn.Module):
def __init__(self, in_channels, hidden_channels, out_channels, num_layers, dropout):
super(SAGE, self).__init__()
self.convs = torch.nn.ModuleList()
self.convs.append(SAGEConv(in_channels, hidden_channels))
for _ in range((num_layers - 2)):
self.convs.append(SAGEConv(hidden_channels, hidden_channels))
self.convs.append(SAGEConv(hidden_channels, out_channels))
self.dropout = dropout
def reset_parameters(self):
for conv in self.convs:
conv.reset_parameters()
def forward(self, x, adj_t):
for conv in self.convs[:(- 1)]:
x = conv(x, adj_t)
x = F.relu(x)
x = F.dropout(x, p=self.dropout, training=self.training)
x = self.convs[(- 1)](x, adj_t)
return torch.log_softmax(x, dim=(- 1)) |
def make_destination_dataset(ws, schema, name=None):
name = (name or 'dst')
dst_init = core.Net('{}_init'.format(name))
with core.NameScope(name):
dst_ds = Dataset(schema, name=name)
dst_ds.init_empty(dst_init)
ws.run(dst_init)
return dst_ds |
class SG2260Context(BModelContext):
device = Target.SG2260
memmap = memmap
dma_sys = dma_sys
tiu_sys = tiu_sys
local_layout_to_stride = local_layout_to_stride
valid_tag = {1: 0, 2: 1}
base_addr = [0, , GET_LMEM_START_ADDR]
def __init__(self) -> None:
super().__init__()
self.decoder = Decoder(self)
self._runner = None
_cache()
def opparam_converter(self):
return get_opparam_converter_with_context(self, opparam_converter)
def MemRef(self) -> Type[MemRef]:
return partial(MemRef, context=self)
def get_memory_type(self, reg_address):
tag = ((reg_address >> 40) & 31)
if (tag in (1, 2)):
return MType.G
elif (tag == 0):
return MType.R
elif (tag == 31):
if (np.binary_repr(reg_address)[(- 27)] == '1'):
return MType.S
else:
return MType.R
elif (tag == 30):
return MType.L
return MType.UNKNOWN
def fix_addr(self, reg_address: int) -> int:
assert (0 <= reg_address < (2 ** 45))
tag = ((reg_address >> 40) & 31)
if (tag == 31):
return (reg_address & )
fixed_addr = (self.base_addr[self.valid_tag[tag]] + (reg_address & ))
return fixed_addr
def merge_instruction(cls, tiu: List[BaseTpuCmd], dma: List[BaseTpuCmd]) -> List[BaseTpuCmd]:
(main_cmd, inserted_cmd) = (dma, tiu)
def get_end(cmds: List[BaseTpuCmd]):
if (len(cmds) == 0):
return 0
if cls.is_sys(cmds[(- 1)]):
return (- 1)
else:
return len(cmds)
def fix_tgcr_cmd_id_dp(tiu_cmd: List[BaseTpuCmd]):
for (i, v) in enumerate(tiu_cmd):
if isinstance(v.reg, SYS_TR_ACC_reg):
v.cmd_id_dep = (tiu_cmd[(i + 1)].cmd_id_dep if (tiu_cmd[(i + 1)].cmd_id_dep != None) else tiu_cmd[(i + 2)].cmd_id_dep)
fix_tgcr_cmd_id_dp(inserted_cmd[:get_end(inserted_cmd)])
main_id = [(m.cmd_id, m) for m in main_cmd[:get_end(main_cmd)]]
inserted_id = [(i.cmd_id_dep, i) for i in inserted_cmd[:get_end(inserted_cmd)]]
cmd = (main_id + inserted_id)
cmd_sorted = sorted(cmd, key=(lambda x: x[0]))
return [x[1] for x in cmd_sorted]
def is_sys(cls, cmd: BaseTpuCmd):
return isinstance(cmd.reg, (dma_sys, tiu_sys))
def get_runner(self, memory_size: int) -> CModelRunner:
assert self.using_cmodel, '2260 currently only support cmodel mode'
if (self._cmodel_runner is None):
self._cmodel_runner = SG2260Runner(memory_size, self.base_addr)
return self._cmodel_runner |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.