code stringlengths 101 5.91M |
|---|
class ArchVariant():
def __init__(self, name, is_custom=False):
self.name = name
self.is_custom = is_custom
def render(self):
extra_parts = (['custom'] if self.is_custom else [])
return '_'.join(([self.name] + extra_parts)) |
def compute_aspect_ratios(dataset, indices=None):
if hasattr(dataset, 'get_height_and_width'):
return _compute_aspect_ratios_custom_dataset(dataset, indices)
if isinstance(dataset, torchvision.datasets.CocoDetection):
return _compute_aspect_ratios_coco_dataset(dataset, indices)
if isinstance(dataset, torchvision.datasets.VOCDetection):
return _compute_aspect_ratios_voc_dataset(dataset, indices)
if isinstance(dataset, torch.utils.data.Subset):
return _compute_aspect_ratios_subset_dataset(dataset, indices)
return _compute_aspect_ratios_slow(dataset, indices) |
def verbosity_to_loglevel(verbosity: int, extended=True):
if extended:
if (verbosity <= 0):
log_level = logging.ERROR
elif (verbosity == 1):
log_level = logging.INFO
elif (verbosity == 2):
log_level = logging.INFO2
elif (verbosity == 3):
log_level = logging.INFO3
else:
log_level = logging.DEBUG
elif (verbosity <= 0):
log_level = logging.ERROR
elif (verbosity == 1):
log_level = logging.INFO
else:
log_level = logging.DEBUG
return log_level |
def sox_func(inputs):
(files, root, out_root, speaker) = inputs
for name in tqdm.tqdm(files, desc=('Process for speaker: ' + speaker)):
if name.endswith('.mp3'):
split = name.split('-')[1]
out_dir = os.path.join(out_root, split)
os.makedirs(out_dir, exist_ok=True)
orig_file = os.path.join(root, name)
new_file = os.path.join(out_dir, (((speaker + '-') + name.split('/')[(- 1)].split('.')[0]) + '.wav'))
bashCommand = ((('sox ' + orig_file) + ' -t wav -c 1 -r 16000 -b 16 -e signed-integer ') + new_file)
r = os.popen(bashCommand).read() |
class MapieCalibrator(BaseEstimator, ClassifierMixin):
fit_attributes = ['estimator', 'calibrators']
named_calibrators = {'sigmoid': _SigmoidCalibration(), 'isotonic': IsotonicRegression(out_of_bounds='clip')}
valid_methods = ['top_label']
valid_cv = ['prefit', 'split']
valid_inputs = ['multiclass', 'binary']
def __init__(self, estimator: Optional[ClassifierMixin]=None, method: str='top_label', calibrator: Optional[Union[(str, RegressorMixin)]]=None, cv: Optional[str]='split') -> None:
self.estimator = estimator
self.method = method
self.calibrator = calibrator
self.cv = cv
def _check_cv(self, cv: Optional[str]) -> str:
if (cv in self.valid_cv):
return cv
raise ValueError(f'Invalid cv argument. Allowed values are {self.valid_cv}.')
def _check_calibrator(self, calibrator: Optional[Union[(str, RegressorMixin)]]) -> RegressorMixin:
if (calibrator is None):
calibrator = 'sigmoid'
if isinstance(calibrator, str):
if (calibrator in self.named_calibrators.keys()):
calibrator = self.named_calibrators[calibrator]
else:
raise ValueError((('Please provide a string in: ' + ', '.join(self.named_calibrators.keys())) + '.'))
check_estimator_fit_predict(calibrator)
return calibrator
def _get_labels(self, X: ArrayLike) -> Tuple[(NDArray, NDArray)]:
pred = self.single_estimator_.predict_proba(X=X)
max_class_prob = np.max(pred, axis=1).reshape((- 1), 1)
y_pred = self.classes_[np.argmax(pred, axis=1)]
return (max_class_prob, y_pred)
def _check_method(self) -> None:
if (self.method not in self.valid_methods):
raise ValueError((('Invalid method, allowed method are: ' + ', '.join(self.valid_methods)) + '.'))
def _check_type_of_target(self, y: ArrayLike):
if (type_of_target(y) not in self.valid_inputs):
raise ValueError((('Make sure to have one of the allowed targets: ' + ', '.join(self.valid_inputs)) + '.'))
def _fit_calibrator(self, label: Union[(int, str)], calibrator: RegressorMixin, y_calib: NDArray, top_class_prob: NDArray, y_pred: NDArray, sample_weight: Optional[ArrayLike]) -> RegressorMixin:
calibrator_ = clone(calibrator)
sample_weight = cast(NDArray, sample_weight)
given_label_indices = np.argwhere((y_pred.ravel() == label)).ravel()
y_calib_ = np.equal(y_calib[given_label_indices], label).astype(int)
top_class_prob_ = top_class_prob[given_label_indices]
if (sample_weight is not None):
sample_weight_ = sample_weight[given_label_indices]
(sample_weight_, top_class_prob_, y_calib_) = check_null_weight(sample_weight_, top_class_prob_, y_calib_)
else:
sample_weight_ = sample_weight
calibrator_ = fit_estimator(calibrator_, top_class_prob_, y_calib_, sample_weight_)
return calibrator_
def _fit_calibrators(self, X: ArrayLike, y: ArrayLike, sample_weight: Optional[ArrayLike], calibrator: RegressorMixin) -> Dict[(Union[(int, str)], RegressorMixin)]:
(X, y) = indexable(X, y)
y = _check_y(y)
(max_prob, y_pred) = self._get_labels(X)
calibrators = {}
for label in np.unique(y_pred):
calibrator_ = self._fit_calibrator(label, calibrator, cast(NDArray, y), max_prob, y_pred, sample_weight)
calibrators[label] = calibrator_
return calibrators
def _pred_proba_calib(self, idx: int, label: Union[(int, str)], calibrated_values: NDArray, max_prob: NDArray, y_pred: NDArray) -> None:
idx_labels = np.where((y_pred.ravel() == label))[0].ravel()
if (label not in self.calibrators.keys()):
calibrated_values[(idx_labels, idx)] = max_prob[idx_labels].ravel()
warnings.warn(((f'WARNING: This predicted label {label} has not been seen ' + ' during the calibration and therefore scores will remain') + ' unchanged.'))
else:
calibrator_ = self.calibrators[label]
preds_ = calibrator_.predict(max_prob[idx_labels])
calibrated_values[(idx_labels, idx)] = preds_
def fit(self, X: ArrayLike, y: ArrayLike, sample_weight: Optional[NDArray]=None, calib_size: Optional[float]=0.33, random_state: Optional[Union[(int, np.random.RandomState, None)]]=None, shuffle: Optional[bool]=True, stratify: Optional[ArrayLike]=None) -> MapieCalibrator:
self._check_method()
cv = self._check_cv(self.cv)
(X, y) = indexable(X, y)
y = _check_y(y)
self._check_type_of_target(y)
estimator = check_estimator_classification(X, y, cv, self.estimator)
calibrator = self._check_calibrator(self.calibrator)
(sample_weight, X, y) = check_null_weight(sample_weight, X, y)
self.n_features_in_ = check_n_features_in(X, cv, estimator)
random_state = check_random_state(random_state)
if (cv == 'prefit'):
self.single_estimator_ = estimator
self.classes_ = self.single_estimator_.classes_
self.n_classes_ = len(self.classes_)
self.calibrators = self._fit_calibrators(X, y, sample_weight, calibrator)
if (cv == 'split'):
results = get_calib_set(X, y, sample_weight=sample_weight, calib_size=calib_size, random_state=random_state, shuffle=shuffle, stratify=stratify)
(X_train, y_train, X_calib, y_calib, sw_train, sw_calib) = results
(X_train, y_train) = indexable(X_train, y_train)
y_train = _check_y(y_train)
(sw_train, X_train, y_train) = check_null_weight(sw_train, X_train, y_train)
estimator = fit_estimator(clone(estimator), X_train, y_train, sw_train)
self.single_estimator_ = estimator
self.classes_ = self.single_estimator_.classes_
self.n_classes_ = len(self.classes_)
self.calibrators = self._fit_calibrators(X_calib, y_calib, sw_calib, calibrator)
return self
def predict_proba(self, X: ArrayLike) -> NDArray:
check_is_fitted(self, self.fit_attributes)
self.uncalib_pred = self.single_estimator_.predict_proba(X=X)
(max_prob, y_pred) = self._get_labels(X)
n = _num_samples(max_prob)
calibrated_test_values = np.full((n, self.n_classes_), np.nan)
for (idx, label) in enumerate(np.unique(y_pred)):
self._pred_proba_calib(idx, label, calibrated_test_values, max_prob, y_pred)
return calibrated_test_values
def predict(self, X: ArrayLike) -> NDArray:
check_is_fitted(self, self.fit_attributes)
return self.single_estimator_.predict(X) |
(frozen=True)
class PerspectiveAPIRequestResult():
success: bool
cached: bool
text_to_toxicity_attributes: Dict[(str, ToxicityAttributes)] = field(default_factory=dict)
error: Optional[str] = None |
def eval_task1(version_dir: Path):
if (not ((version_dir / 'dev_task1.csv').is_file() and (version_dir / 'test_task1.csv').is_file())):
logging.warning(f'Directory {version_dir} does not contain task 1')
return {}
stats = {}
dev_pred = pd.read_csv((version_dir / 'dev_task1.csv'))
dev_pred['trace_type_bin'] = (dev_pred['trace_type'] == 'earthquake')
test_pred = pd.read_csv((version_dir / 'test_task1.csv'))
test_pred['trace_type_bin'] = (test_pred['trace_type'] == 'earthquake')
(prec, recall, thr) = precision_recall_curve(dev_pred['trace_type_bin'], dev_pred['score_detection'])
f1 = (((2 * prec) * recall) / (prec + recall))
auc = roc_auc_score(dev_pred['trace_type_bin'], dev_pred['score_detection'])
opt_index = np.nanargmax(f1)
opt_thr = thr[opt_index]
dev_stats = {'dev_det_precision': prec[opt_index], 'dev_det_recall': recall[opt_index], 'dev_det_f1': f1[opt_index], 'dev_det_auc': auc, 'det_threshold': opt_thr}
stats.update(dev_stats)
(prec, recall, f1, _) = precision_recall_fscore_support(test_pred['trace_type_bin'], (test_pred['score_detection'] > opt_thr), average='binary')
auc = roc_auc_score(test_pred['trace_type_bin'], test_pred['score_detection'])
test_stats = {'test_det_precision': prec, 'test_det_recall': recall, 'test_det_f1': f1, 'test_det_auc': auc}
stats.update(test_stats)
return stats |
class ErrorMetrics():
def preprocess(self, text):
preprocessed = ' '.join(text.strip().split())
return preprocessed
def calculate_metrics(self, predicted_text, transcript):
cer = (ed.eval(predicted_text, transcript) / float(len(transcript)))
pred_spl = predicted_text.split()
transcript_spl = transcript.split()
wer = (ed.eval(pred_spl, transcript_spl) / float(len(transcript_spl)))
return (cer, wer) |
def plot_mesh(ax, coors, conn, edges, color='k', **plot_kwargs):
dim = coors.shape[1]
ax = _get_axes(ax, dim)
coors = _to2d(coors)
for el in conn:
eds = el[edges]
for ed in eds:
cc = coors[ed]
ax.plot(*cc.T, color=color, **plot_kwargs)
return ax |
def test_all_zero_stats():
import numpy as np
from pysad.statistics import AbsStatistic
from pysad.statistics import RunningStatistic
from pysad.statistics import AverageMeter
from pysad.statistics import CountMeter
from pysad.statistics import MaxMeter
from pysad.statistics import MedianMeter
from pysad.statistics import MinMeter
from pysad.statistics import SumMeter
from pysad.statistics import SumSquaresMeter
from pysad.statistics import VarianceMeter
from pysad.utils import fix_seed
fix_seed(61)
num_items = 100
stat_classes = {AverageMeter: 0.0, CountMeter: 'count', MaxMeter: 0.0, MedianMeter: 0.0, MinMeter: 0.0, SumMeter: 0.0, SumSquaresMeter: 0.0, VarianceMeter: 0.0}
for (stat_cls, val) in stat_classes.items():
stat = stat_cls()
abs_stat = AbsStatistic(stat_cls)
window_size = 25
running_stat = RunningStatistic(stat_cls, window_size=window_size)
arr = np.zeros(num_items, dtype=np.float64)
prev_value = 0.0
for i in range(arr.shape[0]):
num = arr[i]
stat.update(num)
abs_stat.update(num)
running_stat.update(num)
if (i > 1):
assert np.isclose(stat.get(), (val if (val != 'count') else (i + 1)))
assert np.isclose(abs_stat.get(), (val if (val != 'count') else (i + 1)))
assert np.isclose(running_stat.get(), (val if (val != 'count') else min((i + 1), window_size)))
stat.remove(num)
abs_stat.remove(num)
assert np.isclose(stat.get(), prev_value)
assert np.isclose(abs_stat.get(), abs(prev_value))
stat.update(num)
abs_stat.update(num)
prev_value = stat.get() |
def _seg_29():
return [(12289, 'V'), (12290, 'M', u'.'), (12291, 'V'), (12342, 'M', u''), (12343, 'V'), (12344, 'M', u''), (12345, 'M', u''), (12346, 'M', u''), (12347, 'V'), (12352, 'X'), (12353, 'V'), (12439, 'X'), (12441, 'V'), (12443, '3', u' '), (12444, '3', u' '), (12445, 'V'), (12447, 'M', u''), (12448, 'V'), (12543, 'M', u''), (12544, 'X'), (12549, 'V'), (12592, 'X'), (12593, 'M', u''), (12594, 'M', u''), (12595, 'M', u''), (12596, 'M', u''), (12597, 'M', u''), (12598, 'M', u''), (12599, 'M', u''), (12600, 'M', u''), (12601, 'M', u''), (12602, 'M', u''), (12603, 'M', u''), (12604, 'M', u''), (12605, 'M', u''), (12606, 'M', u''), (12607, 'M', u''), (12608, 'M', u''), (12609, 'M', u''), (12610, 'M', u''), (12611, 'M', u''), (12612, 'M', u''), (12613, 'M', u''), (12614, 'M', u''), (12615, 'M', u''), (12616, 'M', u''), (12617, 'M', u''), (12618, 'M', u''), (12619, 'M', u''), (12620, 'M', u''), (12621, 'M', u''), (12622, 'M', u''), (12623, 'M', u''), (12624, 'M', u''), (12625, 'M', u''), (12626, 'M', u''), (12627, 'M', u''), (12628, 'M', u''), (12629, 'M', u''), (12630, 'M', u''), (12631, 'M', u''), (12632, 'M', u''), (12633, 'M', u''), (12634, 'M', u''), (12635, 'M', u''), (12636, 'M', u''), (12637, 'M', u''), (12638, 'M', u''), (12639, 'M', u''), (12640, 'M', u''), (12641, 'M', u''), (12642, 'M', u''), (12643, 'M', u''), (12644, 'X'), (12645, 'M', u''), (12646, 'M', u''), (12647, 'M', u''), (12648, 'M', u''), (12649, 'M', u''), (12650, 'M', u''), (12651, 'M', u''), (12652, 'M', u''), (12653, 'M', u''), (12654, 'M', u''), (12655, 'M', u''), (12656, 'M', u''), (12657, 'M', u''), (12658, 'M', u''), (12659, 'M', u''), (12660, 'M', u''), (12661, 'M', u''), (12662, 'M', u''), (12663, 'M', u''), (12664, 'M', u''), (12665, 'M', u''), (12666, 'M', u''), (12667, 'M', u''), (12668, 'M', u''), (12669, 'M', u''), (12670, 'M', u'')] |
class CosineSimilarityLoss(nn.Module):
def __init__(self, model: SentenceTransformer):
super(CosineSimilarityLoss, self).__init__()
self.model = model
def forward(self, sentence_features: Iterable[Dict[(str, Tensor)]], labels: Tensor):
reps = [self.model(sentence_feature)['sentence_embedding'] for sentence_feature in sentence_features]
(rep_a, rep_b) = reps
output = torch.cosine_similarity(rep_a, rep_b)
loss_fct = nn.MSELoss()
if (labels is not None):
loss = loss_fct(output, labels.view((- 1)))
return loss
else:
return (reps, output) |
class TestLayout(unittest.TestCase):
def test_add_fnod(self):
L = Layout('defstr.lay')
L.add_fnod(p=(10, 10))
self.assertEqual(L.Np, 13)
def test_add_furniture(self):
L = Layout('defstr.lay')
L.add_furniture(name='R1_C', matname='PARTITION', origin=(5.0, 5.0), zmin=0.0, height=1.0, width=1.0, length=1.0, angle=0.0)
def test_add_nfpe(self):
L = Layout('defstr.lay')
L.add_nfpe((- 8), 7, 6)
self.assertEqual(L.Np, 13)
def test_angleonlink(self):
data1 = L1.angleonlink(np.array([2, 2.5]), np.array([8, 4]))
data2 = L1.angleonlink3(np.array([2, 2.5, 1.5]), np.array([8, 4, 1.5]))
print(data1)
print(data2)
data1 = L1.angleonlink(np.array([2, 2]), np.array([8, 4]))
data2 = L1.angleonlink3(np.array([2, 2, 1.5]), np.array([8, 4, 1.5]))
print(data1)
print(data2)
def test_boundary(self):
L = Layout('defstr.lay')
L.boundary()
def test_build(self):
L = Layout('defstr.lay')
L.build()
def test_cleanup(self):
L = Layout('defstr.lay')
L.add_fnod(p=(10, 10))
L.cleanup()
self.assertEqual(L.Np, 12)
def test_load(self):
L = Layout('defstr.lay')
self.assertEqual(L.Np, 12)
self.assertEqual(L.Ns, 15)
def test_check(self):
(bc, ds) = L1.check()
self.assertTrue(bc)
def test_check2(self):
L = Layout('defstr.lay')
L.build()
tseg = L.check2()
L.build()
L.check_Gi()
def test_have_subseg(self):
self.assertTrue(L1.have_subseg(1))
self.assertTrue(L1.have_subseg(2))
self.assertTrue(L1.have_subseg(3))
self.assertFalse(L1.have_subseg(4))
self.assertFalse(L1.have_subseg(5))
self.assertFalse(L1.have_subseg(6))
self.assertFalse(L1.have_subseg(7))
self.assertFalse(L1.have_subseg(8))
self.assertFalse(L1.have_subseg(9))
self.assertFalse(L1.have_subseg(10))
self.assertFalse(L1.have_subseg(11))
def test_add_pons(self):
L = Layout('defstr.lay')
L.add_pons(1, alpha=0.6)
self.assertEqual(L.Np, 13)
def test_isseg(self):
self.assertTrue(L1.isseg((- 8), (- 7)))
def test_ispoint(self):
pto = np.array([[0], [0]])
num = L1.ispoint(pto)
self.assertEqual(num, (- 1))
def test_seg_intersection(self):
pt1 = L1.Gs.pos[(- 8)]
pt2 = L1.Gs.pos[(- 7)]
(liseg, lipsh) = L1.seg_intersection(**{'ta': pt1, 'he': pt2})
def test_clip(self):
seglist = L1.clip(2, 8, 2, 4)
self.assertEqual(sum(seglist), 10)
def test_cy2pt(self):
L = Layout('defstr.lay')
L.build()
pt = L.cy2pt(2)
def test_geomfile(self):
L1.geomfile()
def test_DLRosm(self):
L = Layout('DLR.osm') |
def hear_scene_kfolds(target_dir: str, cache_dir: str, dataset_root: str, test_fold: int, num_folds: int, get_path_only: bool=False):
assert (test_fold < num_folds), f'test_fold id must be smaller than num_folds. get test_fold={test_fold} and num_folds={num_folds}'
target_dir = Path(target_dir)
train_csv = (target_dir / 'train.csv')
valid_csv = (target_dir / 'valid.csv')
test_csv = (target_dir / 'test.csv')
if get_path_only:
return (train_csv, valid_csv, [test_csv])
resample_hear_corpus(dataset_root, target_sr=16000)
dataset_root = Path(dataset_root)
wav_root: Path = (dataset_root / '16000')
def load_json(filepath):
with open(filepath, 'r') as fp:
return json.load(fp)
fold_metas = []
fold_datas = []
for fold_id in range(num_folds):
meta = load_json((dataset_root / f'fold{fold_id:2d}.json'.replace(' ', '0')))
fold_metas.append(meta)
data = defaultdict(list)
for k in list(meta.keys()):
wav_path = ((wav_root / f'fold{fold_id:2d}'.replace(' ', '0')) / k)
labels = meta[k]
data['id'].append(k)
data['wav_path'].append(wav_path)
data['labels'].append(','.join([str(label).strip() for label in labels]))
df = pd.DataFrame(data=data)
fold_datas.append(df)
test_id = test_fold
valid_id = ((test_fold + 1) % num_folds)
train_ids = [idx for idx in range(num_folds) if (idx not in [test_id, valid_id])]
test_data = fold_datas[test_id]
valid_data = fold_datas[valid_id]
train_data = []
for idx in train_ids:
train_data.append(fold_datas[idx])
train_data = pd.concat(train_data)
train_data.to_csv(train_csv, index=False)
valid_data.to_csv(valid_csv, index=False)
test_data.to_csv(test_csv, index=False)
return (train_csv, valid_csv, [test_csv]) |
class Locator(object):
source_extensions = ('.tar.gz', '.tar.bz2', '.tar', '.zip', '.tgz', '.tbz')
binary_extensions = ('.egg', '.exe', '.whl')
excluded_extensions = ('.pdf',)
wheel_tags = None
downloadable_extensions = (source_extensions + ('.whl',))
def __init__(self, scheme='default'):
self._cache = {}
self.scheme = scheme
self.opener = build_opener(RedirectHandler())
self.matcher = None
self.errors = queue.Queue()
def get_errors(self):
result = []
while (not self.errors.empty()):
try:
e = self.errors.get(False)
result.append(e)
except self.errors.Empty:
continue
self.errors.task_done()
return result
def clear_errors(self):
self.get_errors()
def clear_cache(self):
self._cache.clear()
def _get_scheme(self):
return self._scheme
def _set_scheme(self, value):
self._scheme = value
scheme = property(_get_scheme, _set_scheme)
def _get_project(self, name):
raise NotImplementedError('Please implement in the subclass')
def get_distribution_names(self):
raise NotImplementedError('Please implement in the subclass')
def get_project(self, name):
if (self._cache is None):
result = self._get_project(name)
elif (name in self._cache):
result = self._cache[name]
else:
self.clear_errors()
result = self._get_project(name)
self._cache[name] = result
return result
def score_url(self, url):
t = urlparse(url)
basename = posixpath.basename(t.path)
compatible = True
is_wheel = basename.endswith('.whl')
is_downloadable = basename.endswith(self.downloadable_extensions)
if is_wheel:
compatible = is_compatible(Wheel(basename), self.wheel_tags)
return ((t.scheme == ' ('pypi.org' in t.netloc), is_downloadable, is_wheel, compatible, basename)
def prefer_url(self, url1, url2):
result = url2
if url1:
s1 = self.score_url(url1)
s2 = self.score_url(url2)
if (s1 > s2):
result = url1
if (result != url2):
logger.debug('Not replacing %r with %r', url1, url2)
else:
logger.debug('Replacing %r with %r', url1, url2)
return result
def split_filename(self, filename, project_name):
return split_filename(filename, project_name)
def convert_url_to_download_info(self, url, project_name):
def same_project(name1, name2):
return (normalize_name(name1) == normalize_name(name2))
result = None
(scheme, netloc, path, params, query, frag) = urlparse(url)
if frag.lower().startswith('egg='):
logger.debug('%s: version hint in fragment: %r', project_name, frag)
m = HASHER_HASH.match(frag)
if m:
(algo, digest) = m.groups()
else:
(algo, digest) = (None, None)
origpath = path
if (path and (path[(- 1)] == '/')):
path = path[:(- 1)]
if path.endswith('.whl'):
try:
wheel = Wheel(path)
if (not is_compatible(wheel, self.wheel_tags)):
logger.debug('Wheel not compatible: %s', path)
else:
if (project_name is None):
include = True
else:
include = same_project(wheel.name, project_name)
if include:
result = {'name': wheel.name, 'version': wheel.version, 'filename': wheel.filename, 'url': urlunparse((scheme, netloc, origpath, params, query, '')), 'python-version': ', '.join(['.'.join(list(v[2:])) for v in wheel.pyver])}
except Exception as e:
logger.warning('invalid path for wheel: %s', path)
elif (not path.endswith(self.downloadable_extensions)):
logger.debug('Not downloadable: %s', path)
else:
path = filename = posixpath.basename(path)
for ext in self.downloadable_extensions:
if path.endswith(ext):
path = path[:(- len(ext))]
t = self.split_filename(path, project_name)
if (not t):
logger.debug('No match for project/version: %s', path)
else:
(name, version, pyver) = t
if ((not project_name) or same_project(project_name, name)):
result = {'name': name, 'version': version, 'filename': filename, 'url': urlunparse((scheme, netloc, origpath, params, query, ''))}
if pyver:
result['python-version'] = pyver
break
if (result and algo):
result[('%s_digest' % algo)] = digest
return result
def _get_digest(self, info):
result = None
if ('digests' in info):
digests = info['digests']
for algo in ('sha256', 'md5'):
if (algo in digests):
result = (algo, digests[algo])
break
if (not result):
for algo in ('sha256', 'md5'):
key = ('%s_digest' % algo)
if (key in info):
result = (algo, info[key])
break
return result
def _update_version_data(self, result, info):
name = info.pop('name')
version = info.pop('version')
if (version in result):
dist = result[version]
md = dist.metadata
else:
dist = make_dist(name, version, scheme=self.scheme)
md = dist.metadata
dist.digest = digest = self._get_digest(info)
url = info['url']
result['digests'][url] = digest
if (md.source_url != info['url']):
md.source_url = self.prefer_url(md.source_url, url)
result['urls'].setdefault(version, set()).add(url)
dist.locator = self
result[version] = dist
def locate(self, requirement, prereleases=False):
result = None
r = parse_requirement(requirement)
if (r is None):
raise DistlibException(('Not a valid requirement: %r' % requirement))
scheme = get_scheme(self.scheme)
self.matcher = matcher = scheme.matcher(r.requirement)
logger.debug('matcher: %s (%s)', matcher, type(matcher).__name__)
versions = self.get_project(r.name)
if (len(versions) > 2):
slist = []
vcls = matcher.version_class
for k in versions:
if (k in ('urls', 'digests')):
continue
try:
if (not matcher.match(k)):
logger.debug('%s did not match %r', matcher, k)
elif (prereleases or (not vcls(k).is_prerelease)):
slist.append(k)
else:
logger.debug('skipping pre-release version %s of %s', k, matcher.name)
except Exception:
logger.warning('error matching %s with %r', matcher, k)
pass
if (len(slist) > 1):
slist = sorted(slist, key=scheme.key)
if slist:
logger.debug('sorted list: %s', slist)
version = slist[(- 1)]
result = versions[version]
if result:
if r.extras:
result.extras = r.extras
result.download_urls = versions.get('urls', {}).get(version, set())
d = {}
sd = versions.get('digests', {})
for url in result.download_urls:
if (url in sd):
d[url] = sd[url]
result.digests = d
self.matcher = None
return result |
def class_doc_from_option(arg: Any) -> Optional[str]:
if (arg in ('both', 'class', 'init')):
return arg
else:
raise ValueError((__('invalid value for class-doc-from option: %s') % arg)) |
def test_string_cast():
str_arr = np.array(['1234', '1234\x00\x00'], dtype='S')
uni_arr1 = str_arr.astype('>U')
uni_arr2 = str_arr.astype('<U')
if (sys.version_info[0] < 3):
assert_array_equal(str_arr, uni_arr1)
assert_array_equal(str_arr, uni_arr2)
else:
assert_((str_arr != uni_arr1))
assert_((str_arr != uni_arr2))
assert_array_equal(uni_arr1, uni_arr2) |
def schema_with_payload(empty_open_api_3_schema):
empty_open_api_3_schema['paths'] = {'/data': {'post': {'requestBody': {'required': True, 'content': {'text/plain': {'schema': {'type': 'string'}}}}, 'responses': {'200': {'description': 'OK'}}}}}
return schemathesis.from_dict(empty_open_api_3_schema) |
class bodypose_model(nn.Module):
def __init__(self):
super(bodypose_model, self).__init__()
no_relu_layers = ['conv5_5_CPM_L1', 'conv5_5_CPM_L2', 'Mconv7_stage2_L1', 'Mconv7_stage2_L2', 'Mconv7_stage3_L1', 'Mconv7_stage3_L2', 'Mconv7_stage4_L1', 'Mconv7_stage4_L2', 'Mconv7_stage5_L1', 'Mconv7_stage5_L2', 'Mconv7_stage6_L1', 'Mconv7_stage6_L1']
blocks = {}
block0 = OrderedDict([('conv1_1', [3, 64, 3, 1, 1]), ('conv1_2', [64, 64, 3, 1, 1]), ('pool1_stage1', [2, 2, 0]), ('conv2_1', [64, 128, 3, 1, 1]), ('conv2_2', [128, 128, 3, 1, 1]), ('pool2_stage1', [2, 2, 0]), ('conv3_1', [128, 256, 3, 1, 1]), ('conv3_2', [256, 256, 3, 1, 1]), ('conv3_3', [256, 256, 3, 1, 1]), ('conv3_4', [256, 256, 3, 1, 1]), ('pool3_stage1', [2, 2, 0]), ('conv4_1', [256, 512, 3, 1, 1]), ('conv4_2', [512, 512, 3, 1, 1]), ('conv4_3_CPM', [512, 256, 3, 1, 1]), ('conv4_4_CPM', [256, 128, 3, 1, 1])])
block1_1 = OrderedDict([('conv5_1_CPM_L1', [128, 128, 3, 1, 1]), ('conv5_2_CPM_L1', [128, 128, 3, 1, 1]), ('conv5_3_CPM_L1', [128, 128, 3, 1, 1]), ('conv5_4_CPM_L1', [128, 512, 1, 1, 0]), ('conv5_5_CPM_L1', [512, 38, 1, 1, 0])])
block1_2 = OrderedDict([('conv5_1_CPM_L2', [128, 128, 3, 1, 1]), ('conv5_2_CPM_L2', [128, 128, 3, 1, 1]), ('conv5_3_CPM_L2', [128, 128, 3, 1, 1]), ('conv5_4_CPM_L2', [128, 512, 1, 1, 0]), ('conv5_5_CPM_L2', [512, 19, 1, 1, 0])])
blocks['block1_1'] = block1_1
blocks['block1_2'] = block1_2
self.model0 = make_layers(block0, no_relu_layers)
for i in range(2, 7):
blocks[('block%d_1' % i)] = OrderedDict([(('Mconv1_stage%d_L1' % i), [185, 128, 7, 1, 3]), (('Mconv2_stage%d_L1' % i), [128, 128, 7, 1, 3]), (('Mconv3_stage%d_L1' % i), [128, 128, 7, 1, 3]), (('Mconv4_stage%d_L1' % i), [128, 128, 7, 1, 3]), (('Mconv5_stage%d_L1' % i), [128, 128, 7, 1, 3]), (('Mconv6_stage%d_L1' % i), [128, 128, 1, 1, 0]), (('Mconv7_stage%d_L1' % i), [128, 38, 1, 1, 0])])
blocks[('block%d_2' % i)] = OrderedDict([(('Mconv1_stage%d_L2' % i), [185, 128, 7, 1, 3]), (('Mconv2_stage%d_L2' % i), [128, 128, 7, 1, 3]), (('Mconv3_stage%d_L2' % i), [128, 128, 7, 1, 3]), (('Mconv4_stage%d_L2' % i), [128, 128, 7, 1, 3]), (('Mconv5_stage%d_L2' % i), [128, 128, 7, 1, 3]), (('Mconv6_stage%d_L2' % i), [128, 128, 1, 1, 0]), (('Mconv7_stage%d_L2' % i), [128, 19, 1, 1, 0])])
for k in blocks.keys():
blocks[k] = make_layers(blocks[k], no_relu_layers)
self.model1_1 = blocks['block1_1']
self.model2_1 = blocks['block2_1']
self.model3_1 = blocks['block3_1']
self.model4_1 = blocks['block4_1']
self.model5_1 = blocks['block5_1']
self.model6_1 = blocks['block6_1']
self.model1_2 = blocks['block1_2']
self.model2_2 = blocks['block2_2']
self.model3_2 = blocks['block3_2']
self.model4_2 = blocks['block4_2']
self.model5_2 = blocks['block5_2']
self.model6_2 = blocks['block6_2']
def forward(self, x):
out1 = self.model0(x)
out1_1 = self.model1_1(out1)
out1_2 = self.model1_2(out1)
out2 = torch.cat([out1_1, out1_2, out1], 1)
out2_1 = self.model2_1(out2)
out2_2 = self.model2_2(out2)
out3 = torch.cat([out2_1, out2_2, out1], 1)
out3_1 = self.model3_1(out3)
out3_2 = self.model3_2(out3)
out4 = torch.cat([out3_1, out3_2, out1], 1)
out4_1 = self.model4_1(out4)
out4_2 = self.model4_2(out4)
out5 = torch.cat([out4_1, out4_2, out1], 1)
out5_1 = self.model5_1(out5)
out5_2 = self.model5_2(out5)
out6 = torch.cat([out5_1, out5_2, out1], 1)
out6_1 = self.model6_1(out6)
out6_2 = self.model6_2(out6)
return (out6_1, out6_2) |
def _fd_or_path_or_tempfile(fd, mode='w+b', tempfile=True):
close_fd = False
if ((fd is None) and tempfile):
fd = TemporaryFile(mode=mode)
close_fd = True
if isinstance(fd, basestring):
fd = open(fd, mode=mode)
close_fd = True
try:
if isinstance(fd, os.PathLike):
fd = open(fd, mode=mode)
close_fd = True
except AttributeError:
pass
return (fd, close_fd) |
class NNPolicy(Policy, Serializable):
def __init__(self, env_spec, observation_ph, actions, scope_name=None):
Serializable.quick_init(self, locals())
self._observations_ph = observation_ph
self._actions = actions
self._scope_name = (tf.get_variable_scope().name if (not scope_name) else scope_name)
super(NNPolicy, self).__init__(env_spec)
def get_action(self, observation):
return (self.get_actions(observation[None])[0], {})
def get_actions(self, observations):
feed_dict = {self._observations_ph: observations}
actions = tf.get_default_session().run(self._actions, feed_dict)
return actions
def log_diagnostics(self, paths):
pass
def get_params_internal(self, **tags):
if tags:
raise NotImplementedError
scope = self._scope_name
scope = (scope if (scope == '') else (scope + '/'))
return tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope) |
class TestPointerStructures():
def test_scalars(self):
s = readsav(path.join(DATA_PATH, 'struct_pointers.sav'), verbose=False)
assert_identical(s.pointers.g, np.array(np.float32(4.0), dtype=np.object_))
assert_identical(s.pointers.h, np.array(np.float32(4.0), dtype=np.object_))
assert_((id(s.pointers.g[0]) == id(s.pointers.h[0])))
def test_pointers_replicated(self):
s = readsav(path.join(DATA_PATH, 'struct_pointers_replicated.sav'), verbose=False)
assert_identical(s.pointers_rep.g, np.repeat(np.float32(4.0), 5).astype(np.object_))
assert_identical(s.pointers_rep.h, np.repeat(np.float32(4.0), 5).astype(np.object_))
assert_(np.all((vect_id(s.pointers_rep.g) == vect_id(s.pointers_rep.h))))
def test_pointers_replicated_3d(self):
s = readsav(path.join(DATA_PATH, 'struct_pointers_replicated_3d.sav'), verbose=False)
s_expect = np.repeat(np.float32(4.0), 24).reshape(4, 3, 2).astype(np.object_)
assert_identical(s.pointers_rep.g, s_expect)
assert_identical(s.pointers_rep.h, s_expect)
assert_(np.all((vect_id(s.pointers_rep.g) == vect_id(s.pointers_rep.h))))
def test_arrays(self):
s = readsav(path.join(DATA_PATH, 'struct_pointer_arrays.sav'), verbose=False)
assert_array_identical(s.arrays.g[0], np.repeat(np.float32(4.0), 2).astype(np.object_))
assert_array_identical(s.arrays.h[0], np.repeat(np.float32(4.0), 3).astype(np.object_))
assert_(np.all((vect_id(s.arrays.g[0]) == id(s.arrays.g[0][0]))))
assert_(np.all((vect_id(s.arrays.h[0]) == id(s.arrays.h[0][0]))))
assert_((id(s.arrays.g[0][0]) == id(s.arrays.h[0][0])))
def test_arrays_replicated(self):
s = readsav(path.join(DATA_PATH, 'struct_pointer_arrays_replicated.sav'), verbose=False)
assert_((s.arrays_rep.g.dtype.type is np.object_))
assert_((s.arrays_rep.h.dtype.type is np.object_))
assert_equal(s.arrays_rep.g.shape, (5,))
assert_equal(s.arrays_rep.h.shape, (5,))
for i in range(5):
assert_array_identical(s.arrays_rep.g[i], np.repeat(np.float32(4.0), 2).astype(np.object_))
assert_array_identical(s.arrays_rep.h[i], np.repeat(np.float32(4.0), 3).astype(np.object_))
assert_(np.all((vect_id(s.arrays_rep.g[i]) == id(s.arrays_rep.g[0][0]))))
assert_(np.all((vect_id(s.arrays_rep.h[i]) == id(s.arrays_rep.h[0][0]))))
def test_arrays_replicated_3d(self):
pth = path.join(DATA_PATH, 'struct_pointer_arrays_replicated_3d.sav')
s = readsav(pth, verbose=False)
assert_((s.arrays_rep.g.dtype.type is np.object_))
assert_((s.arrays_rep.h.dtype.type is np.object_))
assert_equal(s.arrays_rep.g.shape, (4, 3, 2))
assert_equal(s.arrays_rep.h.shape, (4, 3, 2))
for i in range(4):
for j in range(3):
for k in range(2):
assert_array_identical(s.arrays_rep.g[(i, j, k)], np.repeat(np.float32(4.0), 2).astype(np.object_))
assert_array_identical(s.arrays_rep.h[(i, j, k)], np.repeat(np.float32(4.0), 3).astype(np.object_))
g0 = vect_id(s.arrays_rep.g[(i, j, k)])
g1 = id(s.arrays_rep.g[(0, 0, 0)][0])
assert np.all((g0 == g1))
h0 = vect_id(s.arrays_rep.h[(i, j, k)])
h1 = id(s.arrays_rep.h[(0, 0, 0)][0])
assert np.all((h0 == h1)) |
class RegularPartitionTuples_all(RegularPartitionTuples):
def __init__(self, regular):
RegularPartitionTuples.__init__(self, regular, category=InfiniteEnumeratedSets())
def _repr_(self):
return '{}-Regular partition tuples'.format(self._ell)
def __iter__(self):
for N in NN:
for size in range((N + 1)):
for mu in RegularPartitionTuples_level_size(((N - size) + 1), size, self._ell):
(yield self.element_class(self, list(mu))) |
def top_sources_all(args: Dict[(str, Any)]) -> List[object]:
query = [{'$match': {'body': {'$ne': ''}, 'quotesUpdated': {'$exists': True}, 'outlet': {'$in': args['outlets']}, 'publishedAt': {'$gte': args['begin_date'], '$lt': (args['end_date'] + timedelta(days=1))}}}, {'$project': {'outlet': 1, 'sourcesMale': 1, 'sourcesFemale': 1, 'allSources': {'$concatArrays': [{'$ifNull': ['$sourcesFemale', []]}, {'$ifNull': ['$sourcesMale', []]}]}}}, {'$unwind': {'path': '$allSources', 'preserveNullAndEmptyArrays': False}}, {'$group': {'_id': '$allSources', 'count': {'$sum': 1.0}}}, {'$sort': {'count': args['sort']}}, {'$limit': args['limit']}]
return query |
def _mnist_dataset(dtype=np.float32):
(X, y) = fetch_openml('mnist_784', version=1, return_X_y=True, as_frame=False)
X = X.astype(dtype, copy=False)
X = MaxAbsScaler().fit_transform(X)
(X, X_val, y, y_val) = train_test_split(X, y, test_size=0.1, random_state=0)
return (X, X_val, y, y_val) |
def test_date_time_units():
array1 = np.array(['2020-07-27T10:41:11', '2019-01-01', '2020-01-01'], 'datetime64[s]')
array2 = np.array(['2020-07-27T10:41:11', '2019-01-01', '2020-01-01'], 'datetime64[25s]')
ak_a1 = ak.highlevel.Array(array1).layout
ak_a2 = ak.highlevel.Array(array2).layout
np_ar1 = ak.operations.to_numpy(ak_a1)
np_ar2 = ak.operations.to_numpy(ak_a2)
if (np_ar1[0] > np_ar2[0]):
assert ((np_ar1[0] - np.timedelta64(25, 's')) < np_ar2[0])
else:
assert ((np_ar1[0] + np.timedelta64(25, 's')) >= np_ar2[0]) |
def split_supernet(run_manager, args, split_eid, split_crit, split_num, dis_metric='cos'):
run_manager.net.train()
if (split_crit == 'grad'):
if (split_eid is None):
eids = []
for i in range(1, len(run_manager.net.blocks)):
if (run_manager.net.blocks[i].mobile_inverted_conv.kernel_size_enc.sum(dim=(- 1)) == run_manager.net.blocks[i].mobile_inverted_conv.kernel_size_enc.size(0)):
eids.append(i)
else:
eids = [split_eid]
(best_edge_score, best_eid, best_groups) = (0, 9999, None)
for eid in eids:
repeat = 100
dist_avg = 0
n_choices = run_manager.net.blocks[eid].mobile_inverted_conv.kernel_size_enc.size(0)
for _ in range(repeat):
encs = [None]
(images, labels) = next(iter(run_manager.run_config.train_loader))
(images, labels) = (images.cuda(), labels.cuda())
target = labels
if (args.kd_ratio > 0):
args.teacher_model.train()
with torch.no_grad():
soft_logits = args.teacher_model(images).detach()
soft_label = F.softmax(soft_logits, dim=1)
run_manager.optimizer.zero_grad()
subnet_settings = run_manager.net.sample_active_subnet()
split_op_grads = []
for opid in range(n_choices):
run_manager.net.blocks[eid].mobile_inverted_conv.active_kernel_size = run_manager.net.blocks[eid].mobile_inverted_conv.kernel_size_list[opid]
output = run_manager.net(images)
if (args.kd_ratio == 0):
loss = run_manager.train_criterion(output, labels)
loss_type = 'ce'
else:
if (args.kd_type == 'ce'):
kd_loss = cross_entropy_loss_with_soft_target(output, soft_label)
else:
kd_loss = F.mse_loss(output, soft_logits)
loss = ((args.kd_ratio * kd_loss) + run_manager.train_criterion(output, labels))
loss = (loss * (2 / (args.kd_ratio + 1)))
loss = (loss / distributed.get_world_size())
run_manager.net.zero_grad()
loss.backward()
distributed.sync_grad_sum(run_manager.net)
grads = run_manager.net.get_split_gradients(split_eid=eid)
grads = [g.clone().detach() for g in grads]
split_op_grads.append(grads)
dist_mat = torch.zeros((n_choices, n_choices))
for opid_i in range(n_choices):
for opid_j in range(n_choices):
dist_mat[(opid_i, opid_j)] = match_loss(split_op_grads[opid_i], split_op_grads[opid_j], dis_metric=dis_metric)
dist_avg += dist_mat
dist_avg /= repeat
if run_manager.is_root:
print(((('\n' + 'edge ') + str(eid)) + ' distance matrix:'))
print(('\n' + str(dist_avg)))
(groups, edge_score) = mincut_split_ofa(dist_avg.numpy(), split_num)
if run_manager.is_root:
print(('edge ' + str(eid)), groups, edge_score)
if (edge_score > best_edge_score):
best_edge_score = edge_score
best_eid = eid
best_groups = groups
split_eid = best_eid
groups = best_groups
elif (split_crit == 'fewshot'):
eid = split_eid
n_choices = run_manager.net.blocks[eid].mobile_inverted_conv.kernel_size_enc.size(0)
groups = random_split_ofa(split_num, n_choices)
else:
print(f'ERROR: UNRECOGNIZED SPLIT CRITERIA: {split_crit}')
exit(1)
encs_splitted = []
for group in groups:
n_choices = run_manager.net.blocks[eid].mobile_inverted_conv.kernel_size_enc.size(0)
enc = torch.zeros(n_choices)
enc[torch.LongTensor(group)] = 1
encs_splitted.append(enc)
return (encs_splitted, split_eid) |
class InvertedResidual(nn.Module):
def __init__(self, inp, oup, stride, expand_ratio):
super(InvertedResidual, self).__init__()
self.blockname = None
self.stride = stride
assert (stride in [1, 2])
self.use_res_connect = ((self.stride == 1) and (inp == oup))
self.conv = nn.Sequential(nn.Conv2d(inp, (inp * expand_ratio), 1, 1, 0, bias=False), nn.BatchNorm2d((inp * expand_ratio)), nn.ReLU(inplace=True), nn.Conv2d((inp * expand_ratio), (inp * expand_ratio), 3, stride, 1, groups=(inp * expand_ratio), bias=False), nn.BatchNorm2d((inp * expand_ratio)), nn.ReLU(inplace=True), nn.Conv2d((inp * expand_ratio), oup, 1, 1, 0, bias=False), nn.BatchNorm2d(oup))
self.names = ['0', '1', '2', '3', '4', '5', '6', '7']
def forward(self, x):
t = x
if self.use_res_connect:
return (t + self.conv(x))
else:
return self.conv(x) |
class EmptySlot(FixedSlot):
def __init__(self, slot_name, py3=True, py2=True, ifdef=None):
FixedSlot.__init__(self, slot_name, '0', py3=py3, py2=py2, ifdef=ifdef) |
class ElementwiseLoss(ElementwiseMetric):
def __init__(self, loss_fn, name=None):
self.loss_fn = loss_fn
if (name is None):
name = 'loss'
super().__init__(name=name)
def _compute_element_wise(self, y_pred, y_true):
return self.loss_fn(y_pred, y_true)
def worst(self, metrics):
return maximum(metrics) |
def _opti_file_loader(ctx, fileloaders, nnp, filename, ext):
file_type = get_buf_type(filename)
if (file_type == 'protobuf'):
opti_proto = nnabla_pb2.NNablaProtoBuf()
with get_file_handle_load(nnp, filename, '.protobuf') as f:
opti_proto.MergeFromString(f.read())
for p_opti in opti_proto.optimizer:
o = ctx.optimizers.get(p_opti.name, None)
if o:
o.solver.set_states_from_protobuf(p_opti)
else:
logger.warning('No matched optimizer is found for {}.'.format(filename))
elif (file_type == 'h5'):
loaded = False
for o in ctx.optimizers.values():
key = '{}_{}_optimizer.h5.optimizer'.format(o.name, re.sub('(|Cuda)$', '', str(o.solver.name)))
if (key == filename):
o.solver.set_states(load_solve_state_from_h5(nnp, filename))
loaded = True
if (not loaded):
logger.warning('No matched optimizer is found for {}.'.format(filename)) |
def _impl(array, counts, axis, highlevel, behavior, attrs):
axis = regularize_axis(axis)
with HighLevelContext(behavior=behavior, attrs=attrs) as ctx:
(layout, maybe_counts_layout) = ensure_same_backend(ctx.unwrap(array, allow_record=False, primitive_policy='error').to_packed(), ctx.unwrap(counts, allow_record=False, allow_unknown=True, primitive_policy='pass-through', string_policy='error'))
if is_integer_like(maybe_counts_layout):
if (is_unknown_scalar(maybe_counts_layout) or (maybe_counts_layout is unknown_length)):
counts = unknown_length
else:
counts = int(counts)
current_offsets = None
else:
if (maybe_counts_layout.is_indexed and (not maybe_counts_layout.is_option)):
maybe_counts_layout = maybe_counts_layout.project()
if (maybe_counts_layout.is_option and (maybe_counts_layout.content.is_numpy or maybe_counts_layout.content.is_unknown)):
mask = maybe_counts_layout.mask_as_bool(valid_when=False)
counts = ak.operations.fill_none(maybe_counts_layout, 0, axis=(- 1), highlevel=False).to_backend_array()
elif (maybe_counts_layout.is_numpy or maybe_counts_layout.is_unknown):
counts = maybe_counts_layout.to_backend_array()
mask = False
else:
raise ValueError('counts must be an integer or a one-dimensional array of integers')
if (counts.ndim != 1):
raise ValueError('counts must be one-dimensional')
if (not np.issubdtype(counts.dtype, np.integer)):
raise ValueError('counts must be integers')
current_offsets = maybe_counts_layout.backend.index_nplike.empty((counts.size + 1), dtype=np.int64)
current_offsets[0] = 0
maybe_counts_layout.backend.index_nplike.cumsum(counts, maybe_out=current_offsets[1:])
def unflatten_this_layout(layout):
nonlocal current_offsets
index_nplike = layout.backend.index_nplike
if (isinstance(counts, int) or (counts is unknown_length)):
if ((counts is not unknown_length) and (layout.length is not unknown_length) and (not (0 <= counts <= layout.length))):
raise ValueError('too large counts for array or negative counts')
out = ak.contents.RegularArray(layout, counts)
else:
position = (index_nplike.searchsorted(current_offsets, index_nplike.asarray([index_nplike.shape_item_as_index(layout.length)]), side='right')[0] - 1)
if ((current_offsets.size is not unknown_length) and (layout.length is not unknown_length) and (not is_unknown_scalar(position)) and ((position >= current_offsets.size) or (current_offsets[position] != layout.length))):
raise ValueError(f"structure imposed by 'counts' does not fit in the array or partition at axis={axis}")
offsets = current_offsets[:(position + 1)]
current_offsets = (current_offsets[position:] - index_nplike.shape_item_as_index(layout.length))
out = ak.contents.ListOffsetArray(ak.index.Index64(offsets), layout)
if (not isinstance(mask, (bool, np.bool_))):
index = ak.index.Index8(index_nplike.asarray(mask, dtype=np.int8), nplike=index_nplike)
out = ak.contents.ByteMaskedArray(index, out, valid_when=False)
return out
if ((axis == 0) or (maybe_posaxis(layout, axis, 1) == 0)):
out = unflatten_this_layout(layout)
else:
def apply(layout, depth, backend, **kwargs):
posaxis = maybe_posaxis(layout, axis, depth)
if ((posaxis == depth) and layout.is_list):
listoffsetarray = layout.to_ListOffsetArray64(True)
outeroffsets = listoffsetarray.offsets
content = unflatten_this_layout(listoffsetarray.content[:outeroffsets[(- 1)]])
if isinstance(content, ak.contents.ByteMaskedArray):
inneroffsets = content.content.offsets
elif isinstance(content, ak.contents.RegularArray):
inneroffsets = content.to_ListOffsetArray64(True).offsets
else:
inneroffsets = content.offsets
positions = (backend.index_nplike.searchsorted(inneroffsets.data, outeroffsets.data, side='right') - 1)
if (backend.index_nplike.known_data and (not backend.index_nplike.array_equal(inneroffsets.data[positions], outeroffsets))):
raise ValueError(f"structure imposed by 'counts' does not fit in the array or partition at axis={axis}")
positions[0] = 0
return ak.contents.ListOffsetArray(ak.index.Index64(positions), content)
out = ak._do.recursively_apply(layout, apply)
if ((current_offsets is not None) and (current_offsets.size is not unknown_length) and (not ((current_offsets.size == 1) and (current_offsets[0] == 0)))):
raise ValueError(f"structure imposed by 'counts' does not fit in the array or partition at axis={axis}")
return ctx.wrap(out, highlevel=highlevel) |
class Issue57ExecutableOnPath(ReBenchTestCase):
def setUp(self):
super(Issue57ExecutableOnPath, self).setUp()
self._set_path(__file__)
def test_sleep_gives_results(self):
store = DataStore(self.ui)
cnf = Configurator(load_config((self._path + '/issue_57.conf')), store, self.ui, data_file=self._tmp_file)
runs = list(cnf.get_runs())
runs = sorted(runs, key=(lambda e: e.benchmark.name))
ex = Executor(runs, False, self.ui, False)
ex.execute()
self.assertEqual('Bench1', runs[0].benchmark.name)
self.assertEqual(10, runs[0].get_number_of_data_points()) |
class BenchmarkRunner(object):
def __init__(self, args):
self.args = args
self.iters = 100
self.has_explicit_iteration_count = False
self.multiplier = 2
self.predefined_minimum_secs = 1
self.max_iters = 1000000.0
self.use_jit = args.use_jit
self.num_runs = args.num_runs
self.print_per_iter = False
self.operator_range = benchmark_utils.get_operator_range(args.operator_range)
if (self.args.warmup_iterations == (- 1)):
self.args.warmup_iterations = 100
if (self.args.iterations and (self.args.iterations != (- 1))):
self.has_explicit_iteration_count = True
self.iters = self.args.iterations
if (self.args.test_name is not None):
self.args.tag_filter = None
def _print_header(self):
DASH_LINE = ('-' * 40)
print('# {}\n# PyTorch/Caffe2 Operator Micro-benchmarks\n# {}\n# Tag : {}\n'.format(DASH_LINE, DASH_LINE, self.args.tag_filter))
if self.args.list_tests:
print('# List of tests:')
elif self.args.list_ops:
print('# List of Operators to run:')
self.printed_ops_list = set()
if self.args.operators:
print('# {}'.format(self.args.operators))
def _print_perf_result(self, reported_run_time_us, test_case):
if self.args.ai_pep_format:
return
test_name = '_'.join([test_case.framework, test_case.test_config.test_name])
for run in range(self.num_runs):
print(('{}Observer '.format(test_case.framework) + json.dumps({'type': test_name, 'metric': 'latency', 'unit': 'us', 'value': str(reported_run_time_us[run])})))
else:
if (test_case.framework == 'PyTorch'):
print('# Mode: {}'.format(('JIT' if self.use_jit else 'Eager')))
print('# Name: {}\n# Input: {}'.format(test_case.test_config.test_name, test_case.test_config.input_config))
mode = ('Backward' if test_case.test_config.run_backward else 'Forward')
if (self.num_runs > 1):
for run in range(self.num_runs):
print('Run: {}, {} Execution Time (us) : {:.3f}'.format(run, mode, reported_run_time_us[run]))
print()
else:
print('{} Execution Time (us) : {:.3f}\n'.format(mode, reported_run_time_us[0]))
def _predict_num_iter_needed(self, i):
return (i * self.multiplier)
def _iteration_result_is_significant(self, iters, run_time_sec, curr_test_total_time, has_explicit_iteration_count):
return (((iters > self.max_iters) or (run_time_sec > self.predefined_minimum_secs) or has_explicit_iteration_count) and (curr_test_total_time > self.args.min_time_per_test))
def _launch_forward(self, test_case, iters, print_per_iter):
cuda_sync = (True if ('cuda' in test_case.test_config.test_name) else False)
func = test_case.run_forward
if self.use_jit:
func = test_case.run_jit_forward
forward_time = timeit.timeit(functools.partial(func, iters, print_per_iter, cuda_sync), number=1)
return forward_time
def _launch_backward(self, test_case, iters, print_per_iter=False):
test_case.run_forward(num_runs=1, print_per_iter=False, cuda_sync=False)
if (test_case.framework == 'PyTorch'):
test_case._output_mean()
backward_time = timeit.timeit(functools.partial(test_case.run_backward, iters, print_per_iter), number=1)
return backward_time
def _measure_time(self, launch_test, test_case, iters, print_per_iter):
curr_test_total_time = 0
time_trace = []
while True:
run_time_sec = launch_test(test_case, iters, print_per_iter)
curr_test_total_time += run_time_sec
results_are_significant = self._iteration_result_is_significant(iters, run_time_sec, curr_test_total_time, self.has_explicit_iteration_count)
report_run_time = ((1000000.0 * run_time_sec) / iters)
time_trace.append(report_run_time)
if self.args.ai_pep_format:
mode = ('JIT' if self.use_jit else 'Eager')
test_name = '_'.join([test_case.framework, test_case.test_config.test_name, mode])
print(('PyTorchObserver ' + json.dumps({'type': test_name, 'metric': 'latency', 'unit': 'ms', 'value': str((report_run_time / 1000.0))})))
if results_are_significant:
break
iters = self._predict_num_iter_needed(iters)
reported_run_time_us = np.percentile(np.array(time_trace), 50)
return reported_run_time_us
def _check_keep(self, test_flag, cmd_flag):
return ((cmd_flag is None) or (test_flag == cmd_flag))
def _check_operator_first_char(self, test_flag, cmd_flag):
if ((cmd_flag is None) or (test_flag[:1].lower() in cmd_flag)):
return True
return False
def _check_keep_list(self, test_flag, cmd_flag_list):
if ((cmd_flag_list is None) or any(((test_flag == cmd_flag) for cmd_flag in cmd_flag_list))):
return True
return False
def _keep_test(self, test_case):
op_test_config = test_case.test_config
if self.args.framework:
frameworks = benchmark_utils.process_arg_list(self.args.framework)
operators = (benchmark_utils.process_arg_list(self.args.operators) if self.args.operators else None)
if (self._check_keep(op_test_config.test_name, self.args.test_name) and self._check_keep_list(test_case.op_bench.module_name(), operators) and self._check_keep_list(test_case.framework, frameworks) and self._check_operator_first_char(test_case.op_bench.module_name(), self.operator_range) and ((self.args.tag_filter == 'all') or self._check_keep(op_test_config.tag, self.args.tag_filter)) and ((not self.args.forward_only) or (op_test_config.run_backward != self.args.forward_only)) and ((self.args.device == 'None') or ('device' not in test_case.test_config.input_config) or (self.args.device in op_test_config.test_name))):
return True
return False
def _print_test_case_info(self, test_case):
if self.args.list_tests:
print('# {}'.format(test_case.test_config.test_name))
return True
elif self.args.list_ops:
if (self.args.operators is None):
op_name = test_case.op_bench.module_name()
if (op_name not in self.printed_ops_list):
print('# {}'.format(op_name))
self.printed_ops_list.add(op_name)
return True
return False
def run(self):
self._print_header()
for test_metainfo in BENCHMARK_TESTER:
for test in _build_test(*test_metainfo):
(full_test_id, test_case) = test
op_test_config = test_case.test_config
if self._print_test_case_info(test_case):
continue
if (not self._keep_test(test_case)):
continue
np.random.seed(seed=(hash(full_test_id) & ((1 << 32) - 1)))
print('# Benchmarking {}: {}'.format(test_case.framework, test_case.op_bench.module_name()))
if op_test_config.run_backward:
launch_func = self._launch_backward
else:
launch_func = self._launch_forward
launch_func(test_case, self.args.warmup_iterations, print_per_iter=False)
reported_time = [self._measure_time(launch_func, test_case, self.iters, self.print_per_iter) for _ in range(self.num_runs)]
self._print_perf_result(reported_time, test_case) |
def check_file_exist(dir_name, file_name, md5=None):
dir_name = os.path.expanduser(dir_name)
file_path = os.path.join(dir_name, file_name)
if (md5 is not None):
return (os.path.exists(file_path) and check_md5(file_path, md5))
else:
return os.path.exists(file_path) |
def parallel_workload(x):
def parallel_task(x):
for i in range(int((INTERNAL_ITER / PARALLEL_TASKS_NUM))):
x = torch.mm(x, x)
return x
futs = []
for i in range(PARALLEL_TASKS_NUM):
futs.append(torch.jit._fork(parallel_task, x))
for i in range(PARALLEL_TASKS_NUM):
torch.jit._wait(futs[i])
return x |
.parametrize('device', ['cpu', 'cuda'])
.parametrize('fl', [1, 2, 3, 4, 5])
.parametrize('fp', [1, 2, 3, 4, 5])
.parametrize('center', [True, False])
def test_compatibility(device, fl, fp, center, T=20):
if ((device == 'cuda') and (not torch.cuda.is_available())):
return
if (fl < fp):
return
frame = diffsptk.Frame(fl, fp, center=center)
unframe = diffsptk.Unframe(fl, fp, center=center)
x = diffsptk.ramp(T)
y = frame(x)
x2 = diffsptk.ramp(torch.max(y))
z = unframe(y, out_length=x2.size((- 1)))
assert torch.allclose(x2, z) |
def get_call(method_name, func_type, args, kwargs):
kwargs_str = ', '.join([((k + '=') + str(v)) for (k, v) in kwargs.items()])
self_arg = args[0]
if (func_type == 'method'):
args = args[1:]
argument_str = ', '.join(args)
argument_str += (', ' if (len(args) and len(kwargs)) else '')
argument_str += kwargs_str
if (func_type == 'functional'):
call = 'torch.{}({})'.format(method_name, argument_str)
elif (func_type == 'method'):
call = '{}.{}({})'.format(self_arg, method_name, argument_str)
elif (func_type == 'nn_functional'):
call = 'torch.nn.functional.{}({})'.format(method_name, argument_str)
else:
raise TypeError('Unsupported function type')
return call |
def trpo_step_td(policy_net, value_net, states, actions, next_states, rewards, masks, gamma, advantages, max_kl, damping, lambda_td=0, method_name='TRPO-TD', returns=0, mtd=1):
if (method_name == 'TRPO-TD'):
values_pred = value_net(states)
next_v = value_net(next_states)
target_v = (rewards + ((gamma * next_v) * masks))
td_err2 = (values_pred - target_v).pow(2).detach()
if mtd:
td_err2 = ((td_err2 - td_err2.mean()) / td_err2.std())
else:
td_err2 = (td_err2 / td_err2.std())
elif ((method_name == 'TRPO-RET-MC') or (method_name == 'TRPO-RET-GAE')):
values_pred = value_net(states)
target_v = returns.to(device)
td_err2 = (values_pred - target_v).pow(2).detach()
if mtd:
td_err2 = ((td_err2 - td_err2.mean()) / td_err2.std())
else:
td_err2 = (td_err2 / td_err2.std())
'update policy'
fixed_log_probs = policy_net.get_log_prob(states, actions).data
def get_loss(volatile=False):
log_probs = policy_net.get_log_prob(states, actions)
if ((method_name == 'TRPO-TD') or (method_name == 'TRPO-RET-MC') or (method_name == 'TRPO-RET-GAE')):
action_loss = (((- advantages) + (lambda_td * td_err2)) * torch.exp((log_probs - fixed_log_probs)))
elif (method_name == 'TRPO'):
action_loss = ((- advantages) * torch.exp((log_probs - fixed_log_probs)))
return action_loss.mean()
'use fisher information matrix for Hessian*vector'
def Fvp_fim(v):
(M, mu, info) = policy_net.get_fim(states)
mu = mu.view((- 1))
filter_input_ids = (set() if policy_net.is_disc_action else set([info['std_id']]))
t = ones(mu.size()).requires_grad_()
mu_t = (mu * t).sum()
Jt = compute_flat_grad(mu_t, policy_net.parameters(), filter_input_ids=filter_input_ids, create_graph=True)
Jtv = (Jt * v).sum()
Jv = torch.autograd.grad(Jtv, t, retain_graph=True)[0]
MJv = (M * Jv.data)
mu_MJv = (MJv * mu).sum()
JTMJv = compute_flat_grad(mu_MJv, policy_net.parameters(), filter_input_ids=filter_input_ids, retain_graph=True).data
JTMJv /= states.shape[0]
if (not policy_net.is_disc_action):
std_index = info['std_index']
JTMJv[std_index:(std_index + M.shape[0])] += (2 * v[std_index:(std_index + M.shape[0])])
return (JTMJv + (v * damping))
loss = get_loss()
grads = torch.autograd.grad(loss, policy_net.parameters())
loss_grad = torch.cat([grad.view((- 1)) for grad in grads]).data
stepdir = conjugate_gradients(Fvp_fim, (- loss_grad), 10)
shs = (0.5 * stepdir.dot(Fvp_fim(stepdir)))
lm = math.sqrt((max_kl / shs))
fullstep = (stepdir * lm)
expected_improve = (- loss_grad.dot(fullstep))
prev_params = get_flat_params_from(policy_net)
(success, new_params) = line_search(policy_net, get_loss, prev_params, fullstep, expected_improve)
set_flat_params_to(policy_net, new_params)
return success |
class IteratorUtilsTest(tf.test.TestCase):
def testGetIterator(self):
tgt_vocab_table = src_vocab_table = lookup_ops.index_table_from_tensor(tf.constant(['a', 'b', 'c', 'eos', 'sos']))
src_dataset = tf.contrib.data.Dataset.from_tensor_slices(tf.constant(['f e a g', 'c c a', 'd', 'c a']))
tgt_dataset = tf.contrib.data.Dataset.from_tensor_slices(tf.constant(['c c', 'a b', '', 'b c']))
hparams = tf.contrib.training.HParams(random_seed=3, num_buckets=5, source_reverse=False, eos='eos', sos='sos')
batch_size = 2
src_max_len = 3
iterator = iterator_utils.get_iterator(src_dataset=src_dataset, tgt_dataset=tgt_dataset, src_vocab_table=src_vocab_table, tgt_vocab_table=tgt_vocab_table, batch_size=batch_size, sos=hparams.sos, eos=hparams.eos, source_reverse=hparams.source_reverse, random_seed=hparams.random_seed, num_buckets=hparams.num_buckets, src_max_len=src_max_len)
table_initializer = tf.tables_initializer()
source = iterator.source
target_input = iterator.target_input
target_output = iterator.target_output
src_seq_len = iterator.source_sequence_length
tgt_seq_len = iterator.target_sequence_length
self.assertEqual([None, None], source.shape.as_list())
self.assertEqual([None, None], target_input.shape.as_list())
self.assertEqual([None, None], target_output.shape.as_list())
self.assertEqual([None], src_seq_len.shape.as_list())
self.assertEqual([None], tgt_seq_len.shape.as_list())
with self.test_session() as sess:
sess.run(table_initializer)
sess.run(iterator.initializer)
(source_v, src_len_v, target_input_v, target_output_v, tgt_len_v) = sess.run((source, src_seq_len, target_input, target_output, tgt_seq_len))
self.assertAllEqual([[(- 1), (- 1), 0], [2, 0, 3]], source_v)
self.assertAllEqual([3, 2], src_len_v)
self.assertAllEqual([[4, 2, 2], [4, 1, 2]], target_input_v)
self.assertAllEqual([[2, 2, 3], [1, 2, 3]], target_output_v)
self.assertAllEqual([3, 3], tgt_len_v)
(source_v, src_len_v, target_input_v, target_output_v, tgt_len_v) = sess.run((source, src_seq_len, target_input, target_output, tgt_seq_len))
self.assertAllEqual([[2, 2, 0]], source_v)
self.assertAllEqual([3], src_len_v)
self.assertAllEqual([[4, 0, 1]], target_input_v)
self.assertAllEqual([[0, 1, 3]], target_output_v)
self.assertAllEqual([3], tgt_len_v)
with self.assertRaisesOpError('End of sequence'):
sess.run(source)
def testGetIteratorWithShard(self):
tgt_vocab_table = src_vocab_table = lookup_ops.index_table_from_tensor(tf.constant(['a', 'b', 'c', 'eos', 'sos']))
src_dataset = tf.contrib.data.Dataset.from_tensor_slices(tf.constant(['c c a', 'f e a g', 'd', 'c a']))
tgt_dataset = tf.contrib.data.Dataset.from_tensor_slices(tf.constant(['a b', 'c c', '', 'b c']))
hparams = tf.contrib.training.HParams(random_seed=3, num_buckets=5, source_reverse=False, eos='eos', sos='sos')
batch_size = 2
src_max_len = 3
iterator = iterator_utils.get_iterator(src_dataset=src_dataset, tgt_dataset=tgt_dataset, src_vocab_table=src_vocab_table, tgt_vocab_table=tgt_vocab_table, batch_size=batch_size, sos=hparams.sos, eos=hparams.eos, source_reverse=hparams.source_reverse, random_seed=hparams.random_seed, num_buckets=hparams.num_buckets, src_max_len=src_max_len, num_shards=2, shard_index=1)
table_initializer = tf.tables_initializer()
source = iterator.source
target_input = iterator.target_input
target_output = iterator.target_output
src_seq_len = iterator.source_sequence_length
tgt_seq_len = iterator.target_sequence_length
self.assertEqual([None, None], source.shape.as_list())
self.assertEqual([None, None], target_input.shape.as_list())
self.assertEqual([None, None], target_output.shape.as_list())
self.assertEqual([None], src_seq_len.shape.as_list())
self.assertEqual([None], tgt_seq_len.shape.as_list())
with self.test_session() as sess:
sess.run(table_initializer)
sess.run(iterator.initializer)
(source_v, src_len_v, target_input_v, target_output_v, tgt_len_v) = sess.run((source, src_seq_len, target_input, target_output, tgt_seq_len))
self.assertAllEqual([[(- 1), (- 1), 0], [2, 0, 3]], source_v)
self.assertAllEqual([3, 2], src_len_v)
self.assertAllEqual([[4, 2, 2], [4, 1, 2]], target_input_v)
self.assertAllEqual([[2, 2, 3], [1, 2, 3]], target_output_v)
self.assertAllEqual([3, 3], tgt_len_v)
with self.assertRaisesOpError('End of sequence'):
sess.run(source)
def testGetIteratorWithSkipCount(self):
tgt_vocab_table = src_vocab_table = lookup_ops.index_table_from_tensor(tf.constant(['a', 'b', 'c', 'eos', 'sos']))
src_dataset = tf.contrib.data.Dataset.from_tensor_slices(tf.constant(['c a', 'c c a', 'd', 'f e a g']))
tgt_dataset = tf.contrib.data.Dataset.from_tensor_slices(tf.constant(['b c', 'a b', '', 'c c']))
hparams = tf.contrib.training.HParams(random_seed=3, num_buckets=5, source_reverse=False, eos='eos', sos='sos')
batch_size = 2
src_max_len = 3
skip_count = tf.placeholder(shape=(), dtype=tf.int64)
iterator = iterator_utils.get_iterator(src_dataset=src_dataset, tgt_dataset=tgt_dataset, src_vocab_table=src_vocab_table, tgt_vocab_table=tgt_vocab_table, batch_size=batch_size, sos=hparams.sos, eos=hparams.eos, source_reverse=hparams.source_reverse, random_seed=hparams.random_seed, num_buckets=hparams.num_buckets, src_max_len=src_max_len, skip_count=skip_count)
table_initializer = tf.tables_initializer()
source = iterator.source
target_input = iterator.target_input
target_output = iterator.target_output
src_seq_len = iterator.source_sequence_length
tgt_seq_len = iterator.target_sequence_length
self.assertEqual([None, None], source.shape.as_list())
self.assertEqual([None, None], target_input.shape.as_list())
self.assertEqual([None, None], target_output.shape.as_list())
self.assertEqual([None], src_seq_len.shape.as_list())
self.assertEqual([None], tgt_seq_len.shape.as_list())
with self.test_session() as sess:
sess.run(table_initializer)
sess.run(iterator.initializer, feed_dict={skip_count: 3})
(source_v, src_len_v, target_input_v, target_output_v, tgt_len_v) = sess.run((source, src_seq_len, target_input, target_output, tgt_seq_len))
self.assertAllEqual([[(- 1), (- 1), 0]], source_v)
self.assertAllEqual([3], src_len_v)
self.assertAllEqual([[4, 2, 2]], target_input_v)
self.assertAllEqual([[2, 2, 3]], target_output_v)
self.assertAllEqual([3], tgt_len_v)
with self.assertRaisesOpError('End of sequence'):
sess.run(source)
sess.run(iterator.initializer, feed_dict={skip_count: 0})
(source_v, src_len_v, target_input_v, target_output_v, tgt_len_v) = sess.run((source, src_seq_len, target_input, target_output, tgt_seq_len))
self.assertAllEqual([[2, 0, 3], [(- 1), (- 1), 0]], source_v)
self.assertAllEqual([2, 3], src_len_v)
self.assertAllEqual([[4, 1, 2], [4, 2, 2]], target_input_v)
self.assertAllEqual([[1, 2, 3], [2, 2, 3]], target_output_v)
self.assertAllEqual([3, 3], tgt_len_v)
(source_v, src_len_v, target_input_v, target_output_v, tgt_len_v) = sess.run((source, src_seq_len, target_input, target_output, tgt_seq_len))
self.assertAllEqual([[2, 2, 0]], source_v)
self.assertAllEqual([3], src_len_v)
self.assertAllEqual([[4, 0, 1]], target_input_v)
self.assertAllEqual([[0, 1, 3]], target_output_v)
self.assertAllEqual([3], tgt_len_v)
with self.assertRaisesOpError('End of sequence'):
sess.run(source)
def testGetInferIterator(self):
src_vocab_table = lookup_ops.index_table_from_tensor(tf.constant(['a', 'b', 'c', 'eos', 'sos']))
src_dataset = tf.contrib.data.Dataset.from_tensor_slices(tf.constant(['c c a', 'c a', 'd', 'f e a g']))
hparams = tf.contrib.training.HParams(random_seed=3, source_reverse=False, eos='eos', sos='sos')
batch_size = 2
src_max_len = 3
iterator = iterator_utils.get_infer_iterator(src_dataset=src_dataset, src_vocab_table=src_vocab_table, batch_size=batch_size, eos=hparams.eos, source_reverse=hparams.source_reverse, src_max_len=src_max_len)
table_initializer = tf.tables_initializer()
source = iterator.source
seq_len = iterator.source_sequence_length
self.assertEqual([None, None], source.shape.as_list())
self.assertEqual([None], seq_len.shape.as_list())
with self.test_session() as sess:
sess.run(table_initializer)
sess.run(iterator.initializer)
(source_v, seq_len_v) = sess.run((source, seq_len))
self.assertAllEqual([[2, 2, 0], [2, 0, 3]], source_v)
self.assertAllEqual([3, 2], seq_len_v)
(source_v, seq_len_v) = sess.run((source, seq_len))
self.assertAllEqual([[(- 1), 3, 3], [(- 1), (- 1), 0]], source_v)
self.assertAllEqual([1, 3], seq_len_v)
with self.assertRaisesOpError('End of sequence'):
sess.run((source, seq_len)) |
def ParseArguments(args):
try:
(opts, filenames) = getopt.getopt(args, '', ['help', 'output=', 'verbose=', 'counting=', 'filter=', 'root=', 'linelength=', 'extensions='])
except getopt.GetoptError:
PrintUsage('Invalid arguments.')
verbosity = _VerboseLevel()
output_format = _OutputFormat()
filters = ''
counting_style = ''
for (opt, val) in opts:
if (opt == '--help'):
PrintUsage(None)
elif (opt == '--output'):
if (val not in ('emacs', 'vs7', 'eclipse')):
PrintUsage('The only allowed output formats are emacs, vs7 and eclipse.')
output_format = val
elif (opt == '--verbose'):
verbosity = int(val)
elif (opt == '--filter'):
filters = val
if (not filters):
PrintCategories()
elif (opt == '--counting'):
if (val not in ('total', 'toplevel', 'detailed')):
PrintUsage('Valid counting options are total, toplevel, and detailed')
counting_style = val
elif (opt == '--root'):
global _root
_root = val
elif (opt == '--linelength'):
global _line_length
try:
_line_length = int(val)
except ValueError:
PrintUsage('Line length must be digits.')
elif (opt == '--extensions'):
global _valid_extensions
try:
_valid_extensions = set(val.split(','))
except ValueError:
PrintUsage('Extensions must be comma separated list.')
if (not filenames):
PrintUsage('No files were specified.')
_SetOutputFormat(output_format)
_SetVerboseLevel(verbosity)
_SetFilters(filters)
_SetCountingStyle(counting_style)
return filenames |
class SerializedError():
type: RuntimeErrorType
title: (str | None)
message: (str | None)
extras: list[str]
exception: str
exception_with_traceback: str
def with_exception(cls, type_: RuntimeErrorType, title: (str | None), message: (str | None), extras: list[str], exception: Exception) -> SerializedError:
return cls(type=type_, title=title, message=message, extras=extras, exception=format_exception(exception), exception_with_traceback=format_exception(exception, True))
def from_exception(cls, exception: Exception) -> SerializedError:
import requests
import hypothesis.errors
from hypothesis import HealthCheck
title = 'Runtime Error'
message: (str | None)
if isinstance(exception, requests.RequestException):
if isinstance(exception, requests.exceptions.SSLError):
type_ = RuntimeErrorType.CONNECTION_SSL
elif isinstance(exception, requests.exceptions.ConnectionError):
type_ = RuntimeErrorType.CONNECTION_OTHER
else:
type_ = RuntimeErrorType.NETWORK_OTHER
(message, extras) = extract_requests_exception_details(exception)
title = 'Network Error'
elif isinstance(exception, DeadlineExceeded):
type_ = RuntimeErrorType.HYPOTHESIS_DEADLINE_EXCEEDED
message = str(exception).strip()
extras = []
elif (isinstance(exception, hypothesis.errors.InvalidArgument) and str(exception).startswith('Scalar ')):
scalar_name = _scalar_name_from_error(exception)
type_ = RuntimeErrorType.HYPOTHESIS_UNSUPPORTED_GRAPHQL_SCALAR
message = f"Scalar type '{scalar_name}' is not recognized"
extras = []
title = 'Unknown GraphQL Scalar'
elif isinstance(exception, hypothesis.errors.Unsatisfiable):
type_ = RuntimeErrorType.HYPOTHESIS_UNSATISFIABLE
message = f'{exception}. Possible reasons:'
extras = ['- Contradictory schema constraints, such as a minimum value exceeding the maximum.', '- Excessive schema complexity, which hinders parameter generation.']
title = 'Schema Error'
elif isinstance(exception, hypothesis.errors.FailedHealthCheck):
health_check = _health_check_from_error(exception)
if (health_check is not None):
(message, type_) = {HealthCheck.data_too_large: (HEALTH_CHECK_MESSAGE_DATA_TOO_LARGE, RuntimeErrorType.HYPOTHESIS_HEALTH_CHECK_DATA_TOO_LARGE), HealthCheck.filter_too_much: (HEALTH_CHECK_MESSAGE_FILTER_TOO_MUCH, RuntimeErrorType.HYPOTHESIS_HEALTH_CHECK_FILTER_TOO_MUCH), HealthCheck.too_slow: (HEALTH_CHECK_MESSAGE_TOO_SLOW, RuntimeErrorType.HYPOTHESIS_HEALTH_CHECK_TOO_SLOW), HealthCheck.large_base_example: (HEALTH_CHECK_MESSAGE_LARGE_BASE_EXAMPLE, RuntimeErrorType.HYPOTHESIS_HEALTH_CHECK_LARGE_BASE_EXAMPLE)}[health_check]
else:
type_ = RuntimeErrorType.UNCLASSIFIED
message = str(exception)
extras = []
title = 'Failed Health Check'
elif isinstance(exception, OperationSchemaError):
if isinstance(exception, BodyInGetRequestError):
type_ = RuntimeErrorType.SCHEMA_BODY_IN_GET_REQUEST
elif isinstance(exception, InvalidRegularExpression):
type_ = RuntimeErrorType.SCHEMA_INVALID_REGULAR_EXPRESSION
else:
type_ = RuntimeErrorType.SCHEMA_GENERIC
message = exception.message
extras = []
title = 'Schema Error'
elif isinstance(exception, SerializationError):
if isinstance(exception, UnboundPrefixError):
type_ = RuntimeErrorType.SERIALIZATION_UNBOUNDED_PREFIX
title = 'XML serialization error'
else:
title = 'Serialization not possible'
type_ = RuntimeErrorType.SERIALIZATION_NOT_POSSIBLE
message = str(exception)
extras = []
else:
type_ = RuntimeErrorType.UNCLASSIFIED
message = str(exception)
extras = []
return cls.with_exception(type_=type_, exception=exception, title=title, message=message, extras=extras) |
def execute_sql_with_column_info(sql_query, database='restaurants', user='select_user', password='select_user', unprotected=False):
start_time = time.time()
conn = psycopg2.connect(database=database, user=user, password=password, host='127.0.0.1', port='5432', options='-c statement_timeout=30000 -c client_encoding=UTF8')
cursor = conn.cursor()
cursor.execute('SET statement_timeout = 30000')
conn.commit()
try:
print('executing SQL {}'.format(sql_query))
cursor.execute(sql_query)
results = cursor.fetchall()
column_names = [desc[0] for desc in cursor.description]
column_type_oids = [desc[1] for desc in cursor.description]
type_map = {}
cursor.execute('SELECT oid, typname FROM pg_type WHERE oid = ANY(%s);', ([desc[1] for desc in cursor.description],))
for (oid, typname) in cursor.fetchall():
if typname.startswith('_'):
type_map[oid] = (typname[1:] + '[]')
else:
type_map[oid] = typname
column_types = [type_map[oid] for oid in column_type_oids]
column_info = list(zip(column_names, column_types))
except psycopg2.Error as e:
print('Error executing SQL query:', e)
if unprotected:
raise e
return ([], [])
cursor.close()
conn.close()
end_time = time.time()
elapsed_time = (end_time - start_time)
print(elapsed_time)
return (list(results), column_info) |
class BidirectionalGRU(nn.Module):
def __init__(self, rnn_dim, hidden_size, dropout, batch_first):
super(BidirectionalGRU, self).__init__()
self.BiGRU = nn.GRU(input_size=rnn_dim, hidden_size=hidden_size, num_layers=1, batch_first=batch_first, bidirectional=True)
self.layer_norm = nn.LayerNorm(rnn_dim)
self.dropout = nn.Dropout(dropout)
def forward(self, x):
x = self.layer_norm(x)
x = F.elu(x)
(x, _) = self.BiGRU(x)
x = self.dropout(x)
return x |
class TILGAN():
def __init__(self, hparams, mode):
self.hparams = hparams
self.vocab_size = hparams.from_vocab_size
self.num_units = hparams.num_units
self.emb_dim = hparams.emb_dim
self.num_layers = hparams.num_layers
self.num_heads = hparams.num_heads
self.learning_rate = tf.Variable(float(hparams.learning_rate), trainable=False)
self.clip_value = hparams.clip_value
self.max_story_length = 105
self.max_single_length = 25
self.latent_dim = hparams.latent_dim
self.dropout_rate = hparams.dropout_rate
self.init_weight = hparams.init_weight
self.flag = True
self.mode = mode
self.batch_size = hparams.batch_size
if (self.mode == tf.contrib.learn.ModeKeys.TRAIN):
self.is_training = True
else:
self.is_training = False
if (self.mode != tf.contrib.learn.ModeKeys.INFER):
self.input_ids = tf.placeholder(tf.int32, [None, None])
self.input_scopes = tf.placeholder(tf.int32, [None, None])
self.input_positions = tf.placeholder(tf.int32, [None, None])
self.input_masks = tf.placeholder(tf.int32, [None, None, None])
self.input_lens = tf.placeholder(tf.int32, [None])
self.targets = tf.placeholder(tf.int32, [None, None])
self.weights = tf.placeholder(tf.float32, [None, None])
self.input_windows = tf.placeholder(tf.float32, [None, 4, None])
self.which = tf.placeholder(tf.int32, [None])
else:
self.input_ids = tf.placeholder(tf.int32, [None, None])
self.input_scopes = tf.placeholder(tf.int32, [None, None])
self.input_positions = tf.placeholder(tf.int32, [None, None])
self.input_masks = tf.placeholder(tf.int32, [None, None, None])
self.input_lens = tf.placeholder(tf.int32, [None])
self.input_windows = tf.placeholder(tf.float32, [None, 4, None])
self.which = tf.placeholder(tf.int32, [None])
with tf.variable_scope('embedding') as scope:
self.word_embeddings = tf.Variable(hparams.embeddings, trainable=True)
self.scope_embeddings = tf.Variable(self.init_matrix([9, int((self.emb_dim / 2))]))
with tf.variable_scope('project'):
self.output_layer = layers_core.Dense(self.vocab_size, use_bias=True)
self.mid_output_layer = layers_core.Dense(self.vocab_size, use_bias=True)
self.input_layer = layers_core.Dense(self.num_units, use_bias=False)
with tf.variable_scope('encoder') as scope:
self.word_emb = tf.nn.embedding_lookup(self.word_embeddings, self.input_ids)
self.scope_emb = tf.nn.embedding_lookup(self.scope_embeddings, self.input_scopes)
self.pos_emb = positional_encoding(self.input_positions, self.batch_size, self.max_single_length, int((self.emb_dim / 2)))
self.embs = tf.concat([self.word_emb, self.scope_emb, self.pos_emb], axis=2)
inputs = self.input_layer(self.embs)
self.query = tf.get_variable('w_Q', [1, self.num_units], dtype=tf.float32)
windows = tf.transpose(self.input_windows, [1, 0, 2])
layers_outputs = []
post_inputs = inputs
for i in range(self.num_layers):
with tf.variable_scope('num_layers_{}'.format(i)):
outputs = multihead_attention(queries=inputs, keys=inputs, query_length=self.input_lens, key_length=self.input_lens, num_units=self.num_units, num_heads=self.num_heads, dropout_rate=self.dropout_rate, is_training=self.is_training, using_mask=True, mymasks=self.input_masks, scope='self_attention')
outputs = (outputs + inputs)
inputs = normalize(outputs)
outputs = feedforward(inputs, [(self.num_units * 2), self.num_units], is_training=self.is_training, dropout_rate=self.dropout_rate, scope='f1')
outputs = (outputs + inputs)
inputs = normalize(outputs)
post_outputs = multihead_attention(queries=post_inputs, keys=post_inputs, query_length=self.input_lens, key_length=self.input_lens, num_units=self.num_units, num_heads=self.num_heads, dropout_rate=self.dropout_rate, is_training=self.is_training, using_mask=False, mymasks=None, scope='self_attention', reuse=tf.AUTO_REUSE)
post_outputs = (post_outputs + post_inputs)
post_inputs = normalize(post_outputs)
post_outputs = feedforward(post_inputs, [(self.num_units * 2), self.num_units], is_training=self.is_training, dropout_rate=self.dropout_rate, scope='f1', reuse=tf.AUTO_REUSE)
post_outputs = (post_outputs + post_inputs)
post_inputs = normalize(post_outputs)
big_window = (((windows[0] + windows[1]) + windows[2]) + windows[3])
(post_encode, weight) = w_encoder_attention(self.query, post_inputs, self.input_lens, num_units=self.num_units, num_heads=self.num_heads, dropout_rate=self.dropout_rate, is_training=self.is_training, using_mask=False, mymasks=None, scope='concentrate_attention')
(prior_encode, weight) = w_encoder_attention(self.query, inputs, self.input_lens, num_units=self.num_units, num_heads=self.num_heads, dropout_rate=self.dropout_rate, is_training=self.is_training, using_mask=True, mymasks=big_window, scope='concentrate_attention', reuse=tf.AUTO_REUSE)
z = tf.random_normal(tf.shape(prior_encode))
gen_input = tf.concat([prior_encode, z], axis=1)
fake_sample = generator(gen_input)
post_encode = tf.layers.dense(tf.layers.dense(post_encode, 256, activation=tf.nn.tanh, name='ae_1'), 64, use_bias=False, name='ae_2')
real_result = discriminator(post_encode)
fake_result = discriminator(fake_sample)
if (self.mode != tf.contrib.learn.ModeKeys.INFER):
latent_sample = tf.tile(tf.expand_dims(post_encode, 1), [1, self.max_story_length, 1])
else:
latent_sample = tf.tile(tf.expand_dims(fake_sample, 1), [1, self.max_story_length, 1])
inputs = tf.concat([inputs, latent_sample], axis=2)
inputs = tf.layers.dense(inputs, self.num_units, activation=tf.tanh, use_bias=False, name='last')
self.logits = self.output_layer(inputs)
self.s = self.logits
self.sample_id = tf.argmax(self.logits, axis=2)
if (self.mode != tf.contrib.learn.ModeKeys.INFER):
with tf.variable_scope('loss') as scope:
self.global_step = tf.Variable(0, trainable=False)
crossent = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=self.targets, logits=self.logits)
self.total_loss = tf.reduce_sum((crossent * self.weights))
kl_weights = tf.minimum((tf.to_float(self.global_step) / 20000), 1.0)
self.loss = tf.reduce_mean((crossent * self.weights))
self.disc_loss = ((tf.reduce_mean(tf.nn.softplus(fake_result)) + tf.reduce_mean(tf.nn.softplus((- real_result)))) * 0.02)
self.gen_loss = (tf.reduce_mean((- (tf.clip_by_value(tf.exp(fake_result), 0.5, 2) * fake_result))) * 0.02)
self.gan_ae_loss = (tf.reduce_mean(real_result) * 0.02)
if (self.mode == tf.contrib.learn.ModeKeys.TRAIN):
with tf.variable_scope('train_op') as scope:
optimizer = tf.train.AdamOptimizer(0.0001, beta1=0.9, beta2=0.99, epsilon=1e-09)
(gradients, v) = zip(*optimizer.compute_gradients(self.loss))
(gradients, _) = tf.clip_by_global_norm(gradients, 5.0)
self.train_op = optimizer.apply_gradients(zip(gradients, v), global_step=self.global_step)
t_vars = tf.trainable_variables()
d_vars = [var for var in t_vars if ('discriminator_' in var.name)]
g_vars = [var for var in t_vars if ('generator_' in var.name)]
ae_vars = [var for var in t_vars if ('GAN' not in var.name)]
(gradients_gen, v_gen) = zip(*optimizer.compute_gradients(self.gen_loss, var_list=g_vars))
(gradients_disc, v_disc) = zip(*optimizer.compute_gradients(self.disc_loss, var_list=d_vars))
(gradients_gan_ae, v_ae) = zip(*optimizer.compute_gradients(self.gan_ae_loss, var_list=ae_vars))
(gradients_gen, _gen) = tf.clip_by_global_norm(gradients_gen, 5.0)
(gradients_disc, _disc) = tf.clip_by_global_norm(gradients_disc, 5.0)
(gradients_gan_ae, _gan_ae) = tf.clip_by_global_norm(gradients_gan_ae, 5.0)
self.gen_step = optimizer.apply_gradients(zip(gradients_gen, v_gen))
self.disc_step = optimizer.apply_gradients(zip(gradients_disc, v_disc))
self.gan_ae_step = optimizer.apply_gradients(zip(gradients_gan_ae, v_ae))
self.saver = tf.train.Saver(tf.global_variables())
def get_batch(self, data, no_random=False, id=0, which=0, position=None):
hparams = self.hparams
input_scopes = []
input_ids = []
input_positions = []
input_lens = []
input_masks = []
input_which = []
input_windows = []
targets = []
weights = []
for i in range(hparams.batch_size):
if no_random:
if (position != None):
x = data[((id + i) % len(data))]
which_stn = position
else:
x = data[((id + i) % len(data))]
which_stn = ((id + i) % 5)
else:
x = random.choice(data)
which_stn = random.randint(0, 4)
input_which.append(which_stn)
mask = []
input_scope = []
input_id = []
input_position = []
input_mask = []
target = []
weight = []
for j in range(0, 5):
input_id.append(GO_ID)
input_scope.append(j)
input_position.append(0)
for k in range(0, len(x[j])):
input_id.append(x[j][k])
input_scope.append(j)
input_position.append((k + 1))
target.append(x[j][k])
if (j == which_stn):
weight.append(1.0)
mask.append(0)
else:
weight.append(0.0)
mask.append(1)
target.append(EOS_ID)
if (j == which_stn):
weight.append(1.0)
mask.append(0)
else:
weight.append(0.0)
mask.append(1)
input_id.append(EOS_ID)
input_scope.append(j)
input_position.append((len(x[j]) + 1))
target.append(GO_ID)
if (j == which_stn):
weight.append(0.0)
mask.append(0)
else:
weight.append(0.0)
mask.append(1)
if (j == which_stn):
for k in range((len(x[j]) + 2), self.max_single_length):
input_id.append(PAD_ID)
input_scope.append(j)
input_position.append(k)
target.append(PAD_ID)
weight.append(0.0)
mask.append(0)
input_lens.append(len(input_id))
for k in range(0, (self.max_story_length - input_lens[i])):
input_id.append(PAD_ID)
input_scope.append(4)
input_position.append(0)
target.append(PAD_ID)
weight.append(0.0)
mask.append(0)
input_ids.append(input_id)
input_scopes.append(input_scope)
input_positions.append(input_position)
targets.append(target)
weights.append(weight)
tmp_mask = mask.copy()
last = 0
window = []
for k in range(0, 5):
start = last
if (k != 4):
last = input_scope.index((k + 1))
else:
last = self.max_story_length
if (k != which_stn):
window.append(((([0] * start) + ([1] * (last - start))) + ([0] * (self.max_story_length - last))))
input_windows.append(window)
for k in range(input_lens[i]):
if (input_scope[k] != which_stn):
input_mask.append(mask)
else:
tmp_mask[k] = 1
input_mask.append(tmp_mask.copy())
for k in range(input_lens[i], self.max_story_length):
input_mask.append(mask)
input_mask = np.array(input_mask)
input_masks.append(input_mask)
return (input_ids, input_scopes, input_positions, input_masks, input_lens, input_which, targets, weights, input_windows)
def train_step(self, sess, data):
(input_ids, input_scopes, input_positions, input_masks, input_lens, input_which, targets, weights, input_windows) = self.get_batch(data)
feed = {self.input_ids: input_ids, self.input_scopes: input_scopes, self.input_positions: input_positions, self.input_masks: input_masks, self.input_lens: input_lens, self.weights: weights, self.targets: targets, self.input_windows: input_windows, self.which: input_which}
word_nums = sum((sum(weight) for weight in weights))
(loss, global_step, _, total_loss) = sess.run([self.loss, self.global_step, self.train_op, self.total_loss], feed_dict=feed)
(loss_disc, global_step, _) = sess.run([self.disc_loss, self.global_step, self.disc_step], feed_dict=feed)
(loss_gen, loss_gan_ae, global_step, _, _) = sess.run([self.gen_loss, self.gan_ae_loss, self.global_step, self.gen_step, self.gan_ae_step], feed_dict=feed)
return (total_loss, global_step, word_nums, loss_disc, loss_gen, loss_gan_ae)
def eval_step(self, sess, data, no_random=False, id=0):
(input_ids, input_scopes, input_positions, input_masks, input_lens, input_which, targets, weights, input_windows) = self.get_batch(data, no_random, id)
feed = {self.input_ids: input_ids, self.input_scopes: input_scopes, self.input_positions: input_positions, self.input_masks: input_masks, self.input_lens: input_lens, self.weights: weights, self.targets: targets, self.input_windows: input_windows, self.which: input_which}
(loss, logits) = sess.run([self.total_loss, self.logits], feed_dict=feed)
word_nums = sum((sum(weight) for weight in weights))
return (loss, word_nums)
def infer_step(self, sess, data, no_random=False, id=0, which=0, position=None):
(input_ids, input_scopes, input_positions, input_masks, input_lens, input_which, targets, weights, input_windows) = self.get_batch(data, no_random, id, which=which, position=position)
start_pos = []
given = []
ans = []
predict = []
hparams = self.hparams
for i in range(self.hparams.batch_size):
start_pos.append(input_scopes[i].index(input_which[i]))
given.append(((input_ids[i][:start_pos[i]] + ([UNK_ID] * self.max_single_length)) + input_ids[i][(start_pos[i] + self.max_single_length):]))
ans.append(input_ids[i][start_pos[i]:(start_pos[i] + self.max_single_length)].copy())
predict.append([])
for i in range((self.max_single_length - 1)):
feed = {self.input_ids: input_ids, self.input_scopes: input_scopes, self.input_positions: input_positions, self.input_masks: input_masks, self.input_lens: input_lens, self.input_windows: input_windows, self.which: input_which}
sample_id = sess.run(self.sample_id, feed_dict=feed)
for batch in range(self.hparams.batch_size):
input_ids[batch][((start_pos[batch] + i) + 1)] = sample_id[batch][(start_pos[batch] + i)]
predict[batch].append(sample_id[batch][(start_pos[batch] + i)])
return (given, ans, predict)
def init_matrix(self, shape):
return tf.random_normal(shape, stddev=0.1) |
def test_bayesian_optimizer_optimize_raises_for_invalid_rule_keys_and_default_acquisition() -> None:
optimizer = BayesianOptimizer((lambda x: x[:1]), Box([(- 1)], [1]))
(data, models) = ({FOO: empty_dataset([1], [1])}, {FOO: _PseudoTrainableQuadratic()})
with pytest.raises(ValueError):
optimizer.optimize(3, data, models) |
class PointwiseConv1d(nn.Module):
def __init__(self, in_channels: int, out_channels: int, stride: int=1, padding: int=0, bias: bool=True) -> None:
super(PointwiseConv1d, self).__init__()
self.conv = nn.Conv1d(in_channels=in_channels, out_channels=out_channels, kernel_size=1, stride=stride, padding=padding, bias=bias)
def forward(self, inputs: Tensor) -> Tensor:
return self.conv(inputs) |
def create_param_table(params=None, height=100):
if ((params is None) or (len(params) == 0)):
data = [{'Parameter': '', 'Value': ''}]
else:
data = [{'Parameter': key, 'Value': str(value['default'])} for (key, value) in params.items()]
table = dash_table.DataTable(data=data, columns=[{'id': 'Parameter', 'name': 'Parameter'}, {'id': 'Value', 'name': 'Value'}], editable=True, style_header_conditional=[{'textAlign': 'center'}], style_cell_conditional=[{'textAlign': 'center'}], style_table={'overflowX': 'scroll', 'overflowY': 'scroll', 'height': height}, style_header=dict(backgroundColor=TABLE_HEADER_COLOR), style_data=dict(backgroundColor=TABLE_DATA_COLOR))
return table |
def test_shannon_all_unique():
img = np.arange(64)
res = shannon_entropy(img, base=2)
assert_almost_equal(res, (np.log(64) / np.log(2))) |
class NLTKTokenizer():
def word_tokenize(self, text: str) -> List[str]:
text = text.replace('.', DUMMYTOKEN)
text = text.replace('', '.')
tokens = nltk.word_tokenize(text)
new_tokens = []
for token in tokens:
token = token.replace('.', '')
token = token.replace(DUMMYTOKEN, '.')
new_tokens.append(token)
return new_tokens
def sentence_tokenize(self, text: str) -> List[str]:
text = text.replace('.', DUMMYTOKEN)
text = text.replace('', '.')
tokens = nltk.tokenize.sent_tokenize(text)
new_tokens = []
for token in tokens:
token = token.replace('.', '')
token = token.replace(DUMMYTOKEN, '.')
new_tokens.append(token)
return new_tokens |
def make_net(genome, config, _batch_size):
input_coords = [[(- 1.0), 0.0], [0.0, 0.0], [1.0, 0.0], [0.0, (- 1.0)]]
output_coords = [[(- 1.0), 0.0], [0.0, 0.0], [1.0, 0.0]]
return AdaptiveLinearNet.create(genome, config, input_coords=input_coords, output_coords=output_coords, weight_threshold=0.4, batch_size=batch_size, activation=tanh_activation, output_activation=tanh_activation, device='cpu') |
class _MaxPoolNd(Module):
__constants__ = ['kernel_size', 'stride', 'padding', 'dilation', 'return_indices', 'ceil_mode']
return_indices: bool
ceil_mode: bool
def __init__(self, kernel_size: _size_any_t, stride: Optional[_size_any_t]=None, padding: _size_any_t=0, dilation: _size_any_t=1, return_indices: bool=False, ceil_mode: bool=False) -> None:
super(_MaxPoolNd, self).__init__()
self.kernel_size = kernel_size
self.stride = (stride if (stride is not None) else kernel_size)
self.padding = padding
self.dilation = dilation
self.return_indices = return_indices
self.ceil_mode = ceil_mode
def extra_repr(self) -> str:
return 'kernel_size={kernel_size}, stride={stride}, padding={padding}, dilation={dilation}, ceil_mode={ceil_mode}'.format(**self.__dict__) |
def test_matches_datetime_format():
result = matches_datetime_format('1/1/2020', '%m/%d/%Y')
assert (result is True) |
def create_vadd_sdfg(name, array_shape=dace.symbol('n'), map_range=dace.symbol('n')):
def vadd(x: dace.float32[array_shape], y: dace.float32[array_shape], z: dace.float32[array_shape]):
for i in dace.map[0:map_range]:
with dace.tasklet:
(xin << x[i])
(yin << y[i])
(zout >> z[i])
zout = (xin + yin)
sdfg = vadd.to_sdfg()
sdfg.name = name
sdfg.apply_strict_transformations()
return sdfg |
def test_to_categorical_none():
array = ak.Array(['one', 'two', 'three', None, 'one', 'two', 'three', None, 'one', 'two', 'three', None])
assert (not ak.operations.ak_is_categorical.is_categorical(array))
categorical = ak.str.to_categorical(array)
assert ak.operations.ak_is_categorical.is_categorical(categorical)
assert (ak.to_list(array) == categorical.to_list())
assert (ak.to_list(categorical.layout.content) == ['one', 'two', 'three'])
not_categorical = ak.operations.ak_from_categorical.from_categorical(categorical)
assert (not ak.operations.ak_is_categorical.is_categorical(not_categorical))
assert (ak.operations.ak_categories.categories(categorical).to_list() == ['one', 'two', 'three']) |
()
class MinMaxRewardScaler(RewardScaler):
minimum: Optional[float] = None
maximum: Optional[float] = None
multiplier: float = 1.0
def fit_with_transition_picker(self, episodes: Sequence[EpisodeBase], transition_picker: TransitionPickerProtocol) -> None:
assert (not self.built)
rewards = []
for episode in episodes:
for i in range(episode.transition_count):
transition = transition_picker(episode, i)
rewards.append(transition.reward)
self.minimum = float(np.min(rewards))
self.maximum = float(np.max(rewards))
def fit_with_trajectory_slicer(self, episodes: Sequence[EpisodeBase], trajectory_slicer: TrajectorySlicerProtocol) -> None:
assert (not self.built)
rewards = [trajectory_slicer(episode, (episode.size() - 1), episode.size()).rewards for episode in episodes]
self.minimum = float(np.min(rewards))
self.maximum = float(np.max(rewards))
def transform(self, x: torch.Tensor) -> torch.Tensor:
assert self.built
assert ((self.maximum is not None) and (self.minimum is not None))
base = (self.maximum - self.minimum)
return ((self.multiplier * (x - self.minimum)) / base)
def reverse_transform(self, x: torch.Tensor) -> torch.Tensor:
assert self.built
assert ((self.maximum is not None) and (self.minimum is not None))
base = (self.maximum - self.minimum)
return (((x * base) / self.multiplier) + self.minimum)
def transform_numpy(self, x: NDArray) -> NDArray:
assert self.built
assert ((self.maximum is not None) and (self.minimum is not None))
base = (self.maximum - self.minimum)
return ((self.multiplier * (x - self.minimum)) / base)
def reverse_transform_numpy(self, x: NDArray) -> NDArray:
assert self.built
assert ((self.maximum is not None) and (self.minimum is not None))
base = (self.maximum - self.minimum)
return (((x * base) / self.multiplier) + self.minimum)
def get_type() -> str:
return 'min_max'
def built(self) -> bool:
return ((self.minimum is not None) and (self.maximum is not None)) |
def test_lm_example_handles_ignore_id():
Pos = hax.Axis('Pos', 10)
Vocab = hax.Axis('vocab', (Pos.size + 1))
tokens = hax.arange(Pos, dtype=jnp.int32)
ignore_id = 6
ex_ignore = LmExample.causal(tokens, ignore_id=ignore_id)
ex_no_ignore = LmExample.causal(tokens)
assert (ex_ignore.loss_mask[(Pos, (ignore_id - 1))] == 0)
distr = ((- 100) * hax.nn.one_hot(ignore_id, Vocab))
distr = distr.broadcast_axis(Pos)
ignored_loss = next_token_loss(Pos, Vocab, distr, tokens, loss_mask=ex_ignore.loss_mask)
no_ignore_loss = next_token_loss(Pos, Vocab, distr, tokens, loss_mask=ex_no_ignore.loss_mask)
assert (no_ignore_loss.item() >= (ignored_loss.item() + (100 / Pos.size))) |
def make_all_rules(operations: list[APIOperation], bundles: dict[(str, CaseInsensitiveDict)], connections: APIOperationConnections) -> dict[(str, Rule)]:
rules = {}
for operation in operations:
new_rule = make_rule(operation, bundles[operation.path][operation.method.upper()], connections)
if (new_rule is not None):
rules[f'rule {operation.verbose_name}'] = new_rule
return rules |
class Sequential(torch.nn.Module):
def __init__(self, *args, **kwargs):
super(Sequential, self).__init__()
if ((len(args) == 1) and isinstance(args[0], OrderedDict)):
for (key, module) in args[0].items():
self.add_module(key, module)
else:
for (idx, module) in enumerate(args):
self.add_module(str(idx), module)
for (name, module) in kwargs.items():
if (sys.version_info < (3, 6)):
raise ValueError('kwargs only supported in py36+')
if (name in self._modules):
raise ValueError('name exists.')
self.add_module(name, module)
def __getitem__(self, idx):
if (not ((- len(self)) <= idx < len(self))):
raise IndexError('index {} is out of range'.format(idx))
if (idx < 0):
idx += len(self)
it = iter(self._modules.values())
for i in range(idx):
next(it)
return next(it)
def __len__(self):
return len(self._modules)
def add(self, module, name=None):
if (name is None):
name = str(len(self._modules))
if (name in self._modules):
raise KeyError('name exists')
self.add_module(name, module)
def forward(self, input):
for module in self._modules.values():
input = module(input)
return input |
def options():
global _options_singelton
if (_options_singelton is None):
_options_singelton = _parse_options()
return _options_singelton |
def model_file_has_bert(filename):
checkpoint = torch.load(filename, (lambda storage, loc: storage))
return any((x.startswith('bert_model.') for x in checkpoint['model'].keys())) |
def acc_and_f1(preds, labels):
warnings.warn(DEPRECATION_WARNING, FutureWarning)
requires_backends(acc_and_f1, 'sklearn')
acc = simple_accuracy(preds, labels)
f1 = f1_score(y_true=labels, y_pred=preds)
return {'acc': acc, 'f1': f1, 'acc_and_f1': ((acc + f1) / 2)} |
def weights_init(m):
if isinstance(m, torch.nn.Conv2d):
torch.nn.init.xavier_normal_(m.weight) |
def get_scorep_config(config_line=None):
(return_code, std_out, std_err) = call(['scorep-info', 'config-summary'])
if (return_code != 0):
raise RuntimeError('Cannot call Score-P, reason {}'.format(std_err))
if (config_line is None):
return std_out.split('\n')
else:
for line in std_out.split('\n'):
if (config_line in line):
return line
return None |
class DWConv(nn.Module):
def __init__(self, dim=768):
super(DWConv, self).__init__()
self.dwconv = nn.Conv2d(dim, dim, 3, 1, 1, bias=True, groups=dim)
def forward(self, x, H, W):
(B, N, C) = x.shape
x = x.transpose(1, 2).view(B, C, H, W)
x = self.dwconv(x)
x = x.flatten(2).transpose(1, 2)
return x |
def __run_shadow(args):
if (args.shadow_exe is None):
logging.warning('Cannot find shadow in your PATH. Do you have shadow installed? Did you update your PATH?')
logging.warning('Unable to run simulation without shadow.')
return None
shadow_cmd_str = f'{args.shadow_exe} {args.shadow_args} {args.shadow_config}'
if args.use_realtime:
chrt_exe_path = which('chrt')
if (chrt_exe_path is None):
logging.warning('Cannot find chrt in your PATH. Do you have chrt installed?')
logging.warning('Unable to run simulation with realtime scheduling without chrt.')
return None
shadow_cmd_str = f'{chrt_exe_path} --fifo 1 {shadow_cmd_str}'
with open_writeable_file(f'{args.prefix}/shadow.log', compress=args.do_compress) as outf:
shadow_cmd = cmdsplit(shadow_cmd_str)
comproc = subprocess.run(shadow_cmd, cwd=args.prefix, stdout=outf)
return comproc |
def index_class_label(arr: np.ndarray):
(_, idx) = np.unique(arr, return_inverse=True)
return idx |
class Trainer():
def __init__(self, dataset, config, _type='qa'):
Model = QA.Model
self.model = Model(config, pre_embed=dataset.vec.embeddings)
self.metrics = calc_metrics_qa
self.display_metrics = True
def train(self, train_data, test_data, n_iters=20, save_on_metric='accuracy'):
best_metric = 0.0
for i in tqdm(range(n_iters)):
self.model.train(train_data)
(predictions, attentions) = self.model.evaluate(test_data)
predictions = np.array(predictions)
test_metrics = self.metrics(test_data.A, predictions)
if self.display_metrics:
print_metrics(test_metrics)
metric = test_metrics[save_on_metric]
if ((metric > best_metric) and (i > 0)):
best_metric = metric
save_model = True
print('Model Saved on ', save_on_metric, metric)
else:
save_model = False
print('Model not saved on ', save_on_metric, metric)
dirname = self.model.save_values(save_model=save_model)
f = open((dirname + '/epoch.txt'), 'a')
f.write((str(test_metrics) + '\n'))
f.close() |
def _format(val: Any, output_format: str='standard', split: bool=False, errors: str='coarse') -> Any:
val = str(val)
result: Any = []
if (val in NULL_VALUES):
return [np.nan]
if (not validate_ar_cuit(val)):
if (errors == 'raise'):
raise ValueError(f'Unable to parse value {val}')
error_result = (val if (errors == 'ignore') else np.nan)
return [error_result]
if (output_format == 'compact'):
result = ([cuit.compact(val)] + result)
elif (output_format == 'standard'):
result = ([cuit.format(val)] + result)
return result |
class CyBreak(CythonCommand):
name = 'cy break'
command_class = gdb.COMMAND_BREAKPOINTS
def _break_pyx(self, name):
(modulename, _, lineno) = name.partition(':')
lineno = int(lineno)
if modulename:
cython_module = self.cy.cython_namespace[modulename]
else:
cython_module = self.get_cython_function().module
if (lineno in cython_module.lineno_cy2c):
c_lineno = cython_module.lineno_cy2c[lineno]
breakpoint = ('%s:%s' % (cython_module.c_filename, c_lineno))
gdb.execute(('break ' + breakpoint))
else:
raise gdb.GdbError('Not a valid line number. Does it contain actual code?')
def _break_funcname(self, funcname):
func = self.cy.functions_by_qualified_name.get(funcname)
if (func and func.is_initmodule_function):
func = None
break_funcs = [func]
if (not func):
funcs = (self.cy.functions_by_name.get(funcname) or [])
funcs = [f for f in funcs if (not f.is_initmodule_function)]
if (not funcs):
gdb.execute(('break ' + funcname))
return
if (len(funcs) > 1):
print('There are multiple such functions:')
for (idx, func) in enumerate(funcs):
print(('%3d) %s' % (idx, func.qualified_name)))
while True:
try:
result = input("Select a function, press 'a' for all functions or press 'q' or '^D' to quit: ")
except EOFError:
return
else:
if (result.lower() == 'q'):
return
elif (result.lower() == 'a'):
break_funcs = funcs
break
elif (result.isdigit() and (0 <= int(result) < len(funcs))):
break_funcs = [funcs[int(result)]]
break
else:
print('Not understood...')
else:
break_funcs = [funcs[0]]
for func in break_funcs:
gdb.execute(('break %s' % func.cname))
if func.pf_cname:
gdb.execute(('break %s' % func.pf_cname))
def invoke(self, function_names, from_tty):
if isinstance(function_names, BYTES):
function_names = function_names.decode(_filesystemencoding)
argv = string_to_argv(function_names)
if function_names.startswith('-p'):
argv = argv[1:]
python_breakpoints = True
else:
python_breakpoints = False
for funcname in argv:
if python_breakpoints:
gdb.execute(('py-break %s' % funcname))
elif (':' in funcname):
self._break_pyx(funcname)
else:
self._break_funcname(funcname)
_suppress_errors
def complete(self, text, word):
names = [n for (n, L) in self.cy.functions_by_name.items() if any(((not f.is_initmodule_function) for f in L))]
qnames = [n for (n, f) in self.cy.functions_by_qualified_name.items() if (not f.is_initmodule_function)]
if parameters.complete_unqualified:
all_names = itertools.chain(qnames, names)
else:
all_names = qnames
words = text.strip().split()
if ((not words) or ('.' not in words[(- 1)])):
seen = set(text[:(- len(word))].split())
return [n for n in all_names if (n.startswith(word) and (n not in seen))]
lastword = words[(- 1)]
compl = [n for n in qnames if n.startswith(lastword)]
if (len(lastword) > len(word)):
strip_prefix_length = (len(lastword) - len(word))
compl = [n[strip_prefix_length:] for n in compl]
return compl |
def _get_average_score(concept, _keywords):
word_list = concept.split()
word_counter = 0
total = 0
for word in word_list:
total += _keywords[word]
word_counter += 1
return (total / word_counter) |
def skip(save_csv_train, save_csv_dev, save_csv_test):
skip = (os.path.isfile(save_csv_train) and os.path.isfile(save_csv_dev) and os.path.isfile(save_csv_test))
return skip |
class Pairs_Y(ParentWithSetFactory, DisjointUnionEnumeratedSets):
def __init__(self, y, policy):
self._y = y
ParentWithSetFactory.__init__(self, (None, y), policy=policy, category=EnumeratedSets().Finite())
DisjointUnionEnumeratedSets.__init__(self, LazyFamily(range(MAX), self.single_pair), facade=True, keepkey=False, category=self.category())
def _repr_(self):
return ('{(a, %s) | a in range(%s)}' % (self._y, MAX))
def an_element(self):
return self._element_constructor_((0, self._y), check=False)
def single_pair(self, letter):
return SingletonPair(letter, self._y, policy=self.facade_policy())
def check_element(self, el, check):
(x, y) = el.value
if (y != self._y):
raise ValueError('Wrong second coordinate') |
class FblasTranspose(aenum.AutoNumberEnum):
FblasNoTrans = ((),)
FblasTrans = ((),)
FblasTransUndef = () |
def printModels(models):
string = ''
for m in models:
string += (str(m) + '|')
return string[:(- 1)] |
class InfoGANDiscriminator(Network):
def __init__(self, output_length, stride=2, kernel=5, start_depth=64, scope_name='infoGANDiscriminator', *args, **kwargs):
super(InfoGANDiscriminator, self).__init__(*args, scope_name=scope_name, **kwargs)
self.output_length = output_length
self.stride = stride
self.kernel = kernel
self.start_depth = start_depth
def build(self, image, train, sn_op):
with tf.variable_scope(self.scope_name, reuse=tf.AUTO_REUSE):
with tf.variable_scope('shared'):
layers = [image]
with tf.variable_scope('layer0'):
layers.append(conv2d(layers[(- 1)], self.start_depth, d_h=self.stride, d_w=self.stride, k_h=self.kernel, k_w=self.kernel, sn_op=sn_op))
layers.append(lrelu(layers[(- 1)]))
with tf.variable_scope('layer1'):
layers.append(conv2d(layers[(- 1)], (self.start_depth * 2), d_h=self.stride, d_w=self.stride, k_h=self.kernel, k_w=self.kernel, sn_op=sn_op))
layers.append(lrelu(layers[(- 1)]))
with tf.variable_scope('layer2'):
layers.append(conv2d(layers[(- 1)], (self.start_depth * 4), d_h=self.stride, d_w=self.stride, k_h=self.kernel, k_w=self.kernel, sn_op=sn_op))
layers.append(lrelu(layers[(- 1)]))
with tf.variable_scope('layer3'):
layers.append(conv2d(layers[(- 1)], (self.start_depth * 8), d_h=self.stride, d_w=self.stride, k_h=self.kernel, k_w=self.kernel, sn_op=sn_op))
layers.append(lrelu(layers[(- 1)]))
with tf.variable_scope('x'):
image_layers = [layers[(- 1)]]
with tf.variable_scope('layer0'):
image_layers.append(linear(image_layers[(- 1)], 1, sn_op=sn_op))
image_layers.append(tf.nn.sigmoid(image_layers[(- 1)]))
with tf.variable_scope('q'):
q_layers = [layers[(- 1)]]
with tf.variable_scope('layer0'):
q_layers.append(linear(q_layers[(- 1)], self.output_length, sn_op=sn_op))
layers.extend(image_layers)
layers.extend(q_layers)
return (image_layers[(- 1)], q_layers[(- 1)], layers) |
def get_documents_statistics(documents: List[Document]):
max_depths = [get_max_depth(d) for d in documents]
return {'n_text_blocks': get_measures([len(d.text_blocks) for d in documents]), 'max_depth': get_measures(max_depths), 'label_counts': {'continuous': get_measures([len([l for l in d.labels if (l == ListAction.CONTINUOUS)]) for d in documents]), 'same_level': get_measures([len([l for l in d.labels if (l == ListAction.SAME_LEVEL)]) for d in documents]), 'down': get_measures([len([l for l in d.labels if (l == ListAction.DOWN)]) for d in documents]), 'up': get_measures([len([l for l in d.labels if (l == ListAction.UP)]) for d in documents]), 'eliminated': get_measures([len([l for l in d.labels if (l == ListAction.ELIMINATE)]) for d in documents])}, 'label_ratio': {'continuous': get_measures([(len([l for l in d.labels if (l == ListAction.CONTINUOUS)]) / len(d.labels)) for d in documents]), 'same_level': get_measures([(len([l for l in d.labels if (l == ListAction.SAME_LEVEL)]) / len(d.labels)) for d in documents]), 'down': get_measures([(len([l for l in d.labels if (l == ListAction.DOWN)]) / len(d.labels)) for d in documents]), 'up': get_measures([(len([l for l in d.labels if (l == ListAction.UP)]) / len(d.labels)) for d in documents]), 'eliminate': get_measures([(len([l for l in d.labels if (l == ListAction.ELIMINATE)]) / len(d.labels)) for d in documents])}} |
.parametrize('test_input', [0, (- 1), None, 'True', 'False', bool, int, 1.5, False])
def test_initialize_bad_background_knowledge_number_of_cycles(test_input):
with pytest.raises(ValueError):
_ = Background(number_of_cycles=test_input) |
def test_suite_assertion_minimization():
ass_min = pp.AssertionMinimization()
chromosome = MagicMock()
suite = MagicMock(test_case_chromosomes=[chromosome, chromosome])
ass_min.visit_test_suite_chromosome(suite)
chromosome.accept.assert_has_calls([call(ass_min), call(ass_min)]) |
def create_model(model_class):
(layer1, layer2, likelihood_layer) = create_layers()
dgp = gpflux.models.DeepGP([layer1, layer2], likelihood_layer, default_model_class=model_class)
return dgp |
def add_evaluation_args(parser):
group = parser.add_argument_group('validation', 'validation configurations')
group.add_argument('--eval-batch-size', type=int, default=None, help='Data Loader batch size for evaluation datasets.Defaults to `--batch-size`')
group.add_argument('--eval-iters', type=int, default=100, help='number of iterations to run for evaluationvalidation/test for')
group.add_argument('--eval-interval', type=int, default=1000, help='interval between running evaluation on validation set')
group.add_argument('--eval-seq-length', type=int, default=None, help='Maximum sequence length to process for evaluation. Defaults to `--seq-length`')
group.add_argument('--eval-max-preds-per-seq', type=int, default=None, help='Maximum number of predictions to use for evaluation. Defaults to math.ceil(`--eval-seq-length`*.15/10)*10')
group.add_argument('--overlapping-eval', type=int, default=32, help='sliding window for overlapping eval ')
group.add_argument('--cloze-eval', action='store_true', help='Evaluation dataset from `--valid-data` is a cloze task')
group.add_argument('--strict-lambada', action='store_true', help='use more difficult formulation of lambada')
group.add_argument('--eval-hf', action='store_true', help='perform evaluation with huggingface openai model.use `--load` to specify weights path to be loaded')
group.add_argument('--load-openai', action='store_true', help='load openai weights into our model. Use `--load` to specify weights path to be loaded')
return parser |
.parametrize('ctx, func_name', ctxs_rand_beta)
.parametrize('alpha, beta', [(0.5, 0.5), (5, 1), (1, 3), (2, 5), (2, 2)])
.parametrize('shape', [[50], [100, 100], [32, 4, 16, 16]])
.parametrize('seed', [(- 1), 313])
def test_rand_beta_forward(seed, ctx, func_name, alpha, beta, shape):
with nn.context_scope(ctx):
o = F.rand_beta(alpha, beta, shape, seed=seed)
assert (o.shape == tuple(shape))
assert (o.parent.name == func_name)
o.forward()
if (o.size >= 10000):
est_mu = o.d.mean()
est_sigma = o.d.std()
else:
data = []
for i in range(10000):
o.forward()
data += [o.d.copy()]
est_mu = np.mean(np.array(data))
est_sigma = np.std(np.array(data))
mu = (alpha / (alpha + beta))
var = ((alpha * beta) / (((alpha + beta) * (alpha + beta)) * ((alpha + beta) + 1)))
sigma = np.sqrt(var)
assert np.isclose(est_mu, mu, atol=0.05)
assert np.isclose(est_sigma, sigma, atol=0.05)
func_args = [alpha, beta, shape, seed]
recomputation_test(rng=None, func=F.rand_beta, vinputs=[], func_args=func_args, func_kwargs={}, ctx=ctx) |
class ModelOutput(OrderedDict):
def __post_init__(self):
class_fields = fields(self)
if (not len(class_fields)):
raise ValueError(f'{self.__class__.__name__} has no fields.')
if (not all(((field.default is None) for field in class_fields[1:]))):
raise ValueError(f'{self.__class__.__name__} should not have more than one required field.')
first_field = getattr(self, class_fields[0].name)
other_fields_are_none = all(((getattr(self, field.name) is None) for field in class_fields[1:]))
if (other_fields_are_none and (not is_tensor(first_field))):
if isinstance(first_field, dict):
iterator = first_field.items()
first_field_iterator = True
else:
try:
iterator = iter(first_field)
first_field_iterator = True
except TypeError:
first_field_iterator = False
if first_field_iterator:
for element in iterator:
if ((not isinstance(element, (list, tuple))) or (not (len(element) == 2)) or (not isinstance(element[0], str))):
break
setattr(self, element[0], element[1])
if (element[1] is not None):
self[element[0]] = element[1]
elif (first_field is not None):
self[class_fields[0].name] = first_field
else:
for field in class_fields:
v = getattr(self, field.name)
if (v is not None):
self[field.name] = v
def __delitem__(self, *args, **kwargs):
raise Exception(f'You cannot use ``__delitem__`` on a {self.__class__.__name__} instance.')
def setdefault(self, *args, **kwargs):
raise Exception(f'You cannot use ``setdefault`` on a {self.__class__.__name__} instance.')
def pop(self, *args, **kwargs):
raise Exception(f'You cannot use ``pop`` on a {self.__class__.__name__} instance.')
def update(self, *args, **kwargs):
raise Exception(f'You cannot use ``update`` on a {self.__class__.__name__} instance.')
def __getitem__(self, k):
if isinstance(k, str):
inner_dict = {k: v for (k, v) in self.items()}
return inner_dict[k]
else:
return self.to_tuple()[k]
def __setattr__(self, name, value):
if ((name in self.keys()) and (value is not None)):
super().__setitem__(name, value)
super().__setattr__(name, value)
def __setitem__(self, key, value):
super().__setitem__(key, value)
super().__setattr__(key, value)
def to_tuple(self) -> Tuple[Any]:
return tuple((self[k] for k in self.keys())) |
def grid(nx=4, ny=2, height=6.0, n_caxes=0, large_margin=0.02, small_margin=0.02, sep=0.02, cbar_width=0.03):
left = large_margin
right = small_margin
top = small_margin
bottom = large_margin
panel_size = ((((1.0 - top) - bottom) - ((ny - 1) * sep)) / ny)
width = (height * (((left + (nx * panel_size)) + ((nx - 1) * sep)) + right))
avg_width_abs = (((((height * panel_size) * nx) * ny) + ((n_caxes * cbar_width) * height)) / ((nx * ny) + n_caxes))
avg_height_abs = (height * panel_size)
wspace = ((sep * height) / avg_width_abs)
hspace = ((sep * height) / avg_height_abs)
fig = plt.figure(figsize=(width, height))
plt.subplots_adjust(left=((left * height) / width), right=(1.0 - ((right * height) / width)), bottom=bottom, top=(1.0 - top), wspace=wspace, hspace=hspace)
caxes = []
if (n_caxes > 0):
ax = plt.subplot(ny, nx, (nx * ny))
ax.axis('off')
pos = ax.get_position()
cax_total_width = (pos.width / n_caxes)
cbar_width_ = ((cbar_width * height) / width)
for i in range(n_caxes):
cax = fig.add_axes([(pos.x0 + (i * cax_total_width)), pos.y0, cbar_width_, pos.height])
cax.yaxis.set_ticks_position('right')
caxes.append(cax)
return (fig, caxes) |
class CamVid(SegmentationDataset):
num_classes = 11
def __init__(self, root, subset='train', transform=None, file_path=False, num_images=None, mode='labeled'):
self.d_idx = 'CVD'
self.mode = mode
self.images_root = f'{root}/{subset}/'
self.labels_root = f'{root}/{subset}annot/'
self.image_paths = glob.glob(f'{self.images_root}/*.png')
self.label_paths = glob.glob(f'{self.labels_root}/*.png')
if (num_images is not None):
self.image_paths = self.image_paths[:num_images]
self.label_paths = self.label_paths[:num_images]
self.file_path = file_path
self.transform = transform
self.relabel = (Relabel(255, self.num_classes) if (transform != None) else None) |
def get_coco_metrics_from_path(path_to_results):
all_gt_boxes = []
all_detection_boxes = []
each_image_metrics = []
for i in tqdm(os.listdir(os.path.join(path_to_results, 'groundtruths'))):
gt_txt_file = open(os.path.join(path_to_results, 'groundtruths', i), 'r')
detection_txt_file = open(os.path.join(path_to_results, 'detections', i), 'r')
gt_boxes = []
detected_boxes = []
for current_line in gt_txt_file.readlines():
current_line = current_line.split(' ')
gt_boxes.append(BoundingBox(image_name=i, class_id=current_line[0], coordinates=(float(current_line[1]), float(current_line[2]), float(current_line[3]), float(current_line[4])), type_coordinates=CoordinatesType.ABSOLUTE, bb_type=BBType.GROUND_TRUTH, confidence=None, format=BBFormat.XYWH))
for current_line in detection_txt_file.readlines():
current_line = current_line.split(' ')
detected_boxes.append(BoundingBox(image_name=i, class_id=current_line[0], coordinates=(float(current_line[2]), float(current_line[3]), float(current_line[4]), float(current_line[5])), type_coordinates=CoordinatesType.ABSOLUTE, bb_type=BBType.DETECTED, confidence=float(current_line[1]), format=BBFormat.XYX2Y2))
all_gt_boxes += gt_boxes
all_detection_boxes += detected_boxes
image_metrics = get_coco_summary(gt_boxes, detected_boxes)
image_metrics_list = [i]
for (_, v) in image_metrics.items():
if math.isnan(v):
image_metrics_list.append((- 1))
continue
image_metrics_list.append(v)
each_image_metrics.append(np.array(image_metrics_list))
all_image_metrics = get_coco_summary(all_gt_boxes, all_detection_boxes)
return (each_image_metrics, all_image_metrics) |
class ParagraphInfo(object):
def __init__(self, dictionary):
self.dictionary = dictionary
def get_word_piece_map(self, sentence):
return [self.dictionary.is_start_word(i) for i in sentence]
def get_word_at_k(self, sentence, left, right, k, word_piece_map=None):
num_words = 0
while ((num_words < k) and (right < len(sentence))):
left = right
right = self.get_word_end(sentence, right, word_piece_map)
num_words += 1
return (left, right)
def get_word_start(self, sentence, anchor, word_piece_map=None):
word_piece_map = (word_piece_map if (word_piece_map is not None) else self.get_word_piece_map(sentence))
left = anchor
while ((left > 0) and (word_piece_map[left] == False)):
left -= 1
return left
def get_word_end(self, sentence, anchor, word_piece_map=None):
word_piece_map = (word_piece_map if (word_piece_map is not None) else self.get_word_piece_map(sentence))
right = (anchor + 1)
while ((right < len(sentence)) and (word_piece_map[right] == False)):
right += 1
return right |
def resize_images(input_dir, output_dir, size):
for idir in os.scandir(input_dir):
if (not idir.is_dir()):
continue
if (not os.path.exists(((output_dir + '/') + idir.name))):
os.makedirs(((output_dir + '/') + idir.name))
images = os.listdir(idir.path)
n_images = len(images)
for (iimage, image) in enumerate(images):
try:
with open(os.path.join(idir.path, image), 'r+b') as f:
with Image.open(f) as img:
img = resize_image(img, size)
img.save(os.path.join(((output_dir + '/') + idir.name), image), img.format)
except (IOError, SyntaxError) as e:
pass
if (((iimage + 1) % 1000) == 0):
print("[{}/{}] Resized the images and saved into '{}'.".format((iimage + 1), n_images, ((output_dir + '/') + idir.name))) |
def r_stmt(t):
stmt = t[0]
def fn(world, n):
if (n > MAX_FUNC_CALL):
return (world, n, False)
return stmt(world, (n + 1))
return [('stmt', fn)] |
class TChAIn(object):
thisown = _swig_property((lambda x: x.this.own()), (lambda x, v: x.this.own(v)), doc='The membership flag')
__repr__ = _swig_repr
def __init__(self, ChA, _BfC=0):
_snap.TChAIn_swiginit(self, _snap.new_TChAIn(ChA, _BfC))
def New(ChA):
return _snap.TChAIn_New(ChA)
New = staticmethod(New)
__swig_destroy__ = _snap.delete_TChAIn
def Eof(self):
return _snap.TChAIn_Eof(self)
def Len(self):
return _snap.TChAIn_Len(self)
def GetCh(self):
return _snap.TChAIn_GetCh(self)
def PeekCh(self):
return _snap.TChAIn_PeekCh(self)
def GetBf(self, LBf, LBfL):
return _snap.TChAIn_GetBf(self, LBf, LBfL)
def Reset(self):
return _snap.TChAIn_Reset(self)
def GetNextLnBf(self, LnChA):
return _snap.TChAIn_GetNextLnBf(self, LnChA) |
.overload_attribute(NumpyType, 'dtype', inline='always')
def Numpy_dtype(builder):
def get(builder):
return builder._data.dtype
return get |
class IdentityEncoder(Encoder):
def __init__(self, config: EncoderConfig):
super().__init__(config)
def embedded2hidden(self, embedded: torch.FloatTensor, mask: torch.BoolTensor=None):
return embedded |
def add_to_partition(_partition, _setting_str, _log_str):
slurm_cmd = ('srun --gres=gpu:1 --partition=%s --mem=%s' % (_partition, args.cpu_memory))
log_dir = ('%s/%s' % (out_dir, _log_str))
if (not os.path.exists(log_dir)):
os.makedirs(log_dir)
with open(('%s/%s' % (log_dir, 'run.cmd')), 'w') as outf:
outf.write(('%s %s\n' % (args.script, _setting_str)))
save_str = ('--save_dir %s' % os.path.join(log_dir, 'model'))
full_cmd = ('%s %s %s %s' % (slurm_cmd, args.script, _setting_str, save_str))
bash_cmd = ('%s &> %s/train.log &' % (full_cmd, log_dir))
print(bash_cmd)
subprocess.call(bash_cmd, shell=True) |
class BasicBlock(nn.Module):
def __init__(self, in_planes, planes, stride=1):
super(BasicBlock, self).__init__()
self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=1, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.shortcut = nn.Sequential()
if ((stride != 1) or (in_planes != planes)):
self.shortcut = nn.Sequential(nn.Conv2d(in_planes, planes, kernel_size=1, stride=stride, bias=False), nn.BatchNorm2d(planes))
self.fc1 = nn.Conv2d(planes, (planes // 16), kernel_size=1)
self.fc2 = nn.Conv2d((planes // 16), planes, kernel_size=1)
def forward(self, x):
out = F.softplus(self.bn1(self.conv1(x)))
out = self.bn2(self.conv2(out))
w = F.avg_pool2d(out, out.size(2))
w = F.softplus(self.fc1(w))
w = F.sigmoid(self.fc2(w))
out = (out * w)
out += self.shortcut(x)
out = F.softplus(out)
return out |
_without_pywt
def test_invariant_denoise():
denoised_img = denoise_invariant(noisy_img, _denoise_wavelet)
denoised_mse = mse(denoised_img, test_img)
original_mse = mse(noisy_img, test_img)
assert_((denoised_mse < original_mse)) |
class PDEProblem(abc.ABC):
def __init__(self, db: database.Database) -> None:
self.db = db
self.config = db.config
self.has_solution = False
def solve(self) -> Union[(fenics.Function, List[fenics.Function])]:
pass |
class CifarResNeXt(nn.Module):
def __init__(self, cardinality, depth, nlabels, base_width, widen_factor=4):
super(CifarResNeXt, self).__init__()
self.cardinality = cardinality
self.depth = depth
self.block_depth = ((self.depth - 2) // 9)
self.base_width = base_width
self.widen_factor = widen_factor
self.nlabels = nlabels
self.output_size = 64
self.stages = [64, (64 * self.widen_factor), (128 * self.widen_factor), (256 * self.widen_factor)]
self.conv_1_3x3 = nn.Conv2d(3, 64, 3, 1, 1, bias=False)
self.bn_1 = nn.BatchNorm2d(64)
self.relu = nn.ReLU(inplace=True)
self.stage_1 = self.block('stage_1', self.stages[0], self.stages[1], 1)
self.stage_2 = self.block('stage_2', self.stages[1], self.stages[2], 2)
self.stage_3 = self.block('stage_3', self.stages[2], self.stages[3], 2)
self.avg_pool = nn.AvgPool2d(8, 1)
self.classifier = nn.Linear(self.stages[3], nlabels)
init.kaiming_normal(self.classifier.weight)
for key in self.state_dict():
if (key.split('.')[(- 1)] == 'weight'):
if ('conv' in key):
init.kaiming_normal(self.state_dict()[key], mode='fan_out')
if ('bn' in key):
self.state_dict()[key][...] = 1
elif (key.split('.')[(- 1)] == 'bias'):
self.state_dict()[key][...] = 0
def block(self, name, in_channels, out_channels, pool_stride=2):
block = nn.Sequential()
for bottleneck in range(self.block_depth):
name_ = ('%s_bottleneck_%d' % (name, bottleneck))
if (bottleneck == 0):
block.add_module(name_, ResNeXtBottleneck(in_channels, out_channels, pool_stride, self.cardinality, self.base_width, self.widen_factor))
else:
block.add_module(name_, ResNeXtBottleneck(out_channels, out_channels, 1, self.cardinality, self.base_width, self.widen_factor))
return block
def forward(self, x):
x = self.conv_1_3x3.forward(x)
x = self.bn_1.forward(x)
x = self.relu(x)
x = self.stage_1.forward(x)
x = self.stage_2.forward(x)
x = self.stage_3.forward(x)
x = self.avg_pool(x)
x = x.view((- 1), self.stages[3])
x = self.classifier(x)
return x |
def replace_pat2(matched_str):
if (matched_str.group(1) != ''):
num = matched_str.group(1).strip()
else:
num = matched_str.group(2).strip()
try:
ret = matched_str.group(0).replace(num, str(w2n.word_to_num(num)))
except ValueError:
num = matched_str.group(2).strip()
ret = matched_str.group(0).replace(num, str(w2n.word_to_num(num)))
return ret |
class Flatten(nn.Module):
def forward(self, input):
return input.view(input.size(0), (- 1)) |
def init_test_mot15():
config['resume'] = '/media/ssm/seagate/weights/MOT17/0601-E120-M80-G30-weights/sst300_0712_83000.pth'
config['mot_root'] = '/media/ssm/seagate/dataset/MOT15/2DMOT2015'
config['log_folder'] = '/media/ssm/seagate/logs/1005-mot15-test-5'
config['batch_size'] = 1
config['write_file'] = True
config['tensorboard'] = True
config['save_combine'] = False
config['type'] = 'test'
config['dataset_type'] = 'test'
config['video_name_list'] = ['ADL-Rundle-1', 'ADL-Rundle-3', 'AVG-TownCentre', 'ETH-Crossing', 'ETH-Jelmoli', 'ETH-Linthescher', 'KITTI-16', 'KITTI-19', 'PETS09-S2L2', 'TUD-Crossing', 'Venice-1'] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.