code stringlengths 101 5.91M |
|---|
class AsyncRenderManager():
def __init__(self):
self._closed = False
self._is_async = False
self._cur_args = None
self._cur_result = None
self._cur_stamp = 0
self._renderer_obj = None
self._args_queue = None
self._result_queue = None
self._process = None
self._model_path = None
self._live_updates = False
self.tile_px = None
self.extract_px = None
self._addl_render = []
self._set_device()
def _set_device(self) -> None:
if sf.util.torch_available:
from slideflow.model import torch_utils
self.device = torch_utils.get_device()
else:
self.device = None
def close(self) -> None:
self._closed = True
self._renderer_obj = None
if (self._process is not None):
self._process.terminate()
self._process = None
self._args_queue = None
self._result_queue = None
def is_async(self) -> bool:
return self._is_async
def set_renderer(self, renderer_class: type, **kwargs) -> None:
assert (not self._closed)
if self.is_async:
self._set_args_async(set_renderer=(renderer_class, kwargs))
else:
self._renderer_obj = renderer_class(device=self.device, **kwargs)
for _renderer in self._addl_render:
self._renderer_obj.add_renderer(_renderer)
def close_renderer(self) -> None:
if self.is_async:
self._set_args_async(close_renderer=True)
else:
self._renderer_obj = None
def add_to_render_pipeline(self, renderer: Renderer) -> None:
if self.is_async:
raise ValueError('Cannot add to rendering pipeline when in asynchronous mode.')
self._addl_render += [renderer]
if (self._renderer_obj is not None):
self._renderer_obj.add_renderer(renderer)
def remove_from_render_pipeline(self, renderer: Renderer) -> None:
if self.is_async:
raise ValueError('Cannot remove rendering pipeline when in asynchronous mode.')
idx = self._addl_render.index(renderer)
del self._addl_render[idx]
if (self._renderer_obj is not None):
self._renderer_obj.remove_renderer(renderer)
def set_async(self, is_async):
self._is_async = is_async
def set_args(self, **args):
assert (not self._closed)
if ((args != self._cur_args) or self._live_updates):
if self._is_async:
self._set_args_async(**args)
else:
self._set_args_sync(**args)
if (not self._live_updates):
self._cur_args = args
def _set_args_async(self, **args):
if (self._process is None):
ctx = multiprocessing.get_context('spawn')
self._args_queue = ctx.Queue()
self._result_queue = ctx.Queue()
self._process = ctx.Process(target=self._process_fn, args=(self._args_queue, self._result_queue, self._model_path, self._live_updates), daemon=True)
self._process.start()
self._args_queue.put([args, self._cur_stamp])
def _set_args_sync(self, **args):
if (self._renderer_obj is None):
self._renderer_obj = Renderer(device=self.device)
for _renderer in self._addl_render:
self._renderer_obj.add_renderer(_renderer)
self._renderer_obj._model = self._model
self._renderer_obj._saliency = self._saliency
self._cur_result = self._renderer_obj.render(**args)
def get_result(self):
assert (not self._closed)
if (self._result_queue is not None):
while (self._result_queue.qsize() > 0):
(result, stamp) = self._result_queue.get()
if (stamp == self._cur_stamp):
self._cur_result = result
return self._cur_result
def clear_result(self):
assert (not self._closed)
self._cur_args = None
self._cur_result = None
self._cur_stamp += 1
def load_model(self, model_path: str) -> None:
if self._is_async:
self._set_args_async(load_model=model_path)
elif (model_path != self._model_path):
self._model_path = model_path
if (self._renderer_obj is None):
self._renderer_obj = Renderer(device=self.device)
for _renderer in self._addl_render:
self._renderer_obj.add_renderer(_renderer)
self._renderer_obj.load_model(model_path, device=self.device)
def clear_model(self):
self._model_path = None
if (self._renderer_obj is not None):
self._renderer_obj._umap_encoders = None
self._renderer_obj._model = None
self._renderer_obj._saliency = None
def _model(self):
if (self._renderer_obj is not None):
return self._renderer_obj._model
else:
return None
def _saliency(self):
if (self._renderer_obj is not None):
return self._renderer_obj._saliency
else:
return None
def _umap_encoders(self):
if (self._renderer_obj is not None):
return self._renderer_obj._umap_encoders
else:
return None
def _process_fn(args_queue: multiprocessing.Queue, result_queue: multiprocessing.Queue, model_path: Optional[str]=None, live_updates: bool=False):
if sf.util.torch_available:
from slideflow.model import torch_utils
device = torch_utils.get_device()
else:
device = None
renderer_obj = Renderer(device=device)
if model_path:
renderer_obj.load_model(model_path, device=device)
while True:
while (args_queue.qsize() > 0):
(args, stamp) = args_queue.get()
if ('close_renderer' in args):
renderer_obj = Renderer(device=device)
if ('set_renderer' in args):
(renderer_class, kwargs) = args['set_renderer']
renderer_obj = renderer_class(**kwargs)
if ('load_model' in args):
renderer_obj.load_model(args['load_model'], device=device)
if ('quit' in args):
return
if (live_updates and (not result_queue.qsize())):
result = renderer_obj.render(**args)
if ('error' in result):
result.error = CapturedException(result.error)
result_queue.put([result, stamp]) |
class Vocab(defaultdict):
def __init__(self, train=True):
super().__init__((lambda : len(self)))
self.train = train
self.UNK = 'UNK'
self[self.UNK]
self.idx2w = self.update_idx2w()
def update_idx2w(self):
self.idx2w = dict([(i, w) for (w, i) in self.items()])
def ws2ids(self, ws):
if self.train:
return torch.tensor([self[w] for w in ws], dtype=torch.long)
else:
return [(self[w] if (w in self) else 0) for w in ws]
def ids2sent(self, ids):
return [self.idx2w[int(i)] for i in ids] |
def assert_tensor_eq(real, expected, eps=EPS):
assert (torch.abs((real - expected)) < eps).all(), ('%s (true) vs %s (expected)' % (real, expected)) |
class CountingIterator(object):
def __init__(self, iterable, start=None, total=None):
self.iterable = iterable
self.itr = iter(self)
if (start is None):
self.n = getattr(iterable, 'n', 0)
else:
self.n = start
if (total is None):
self.total = (self.n + len(iterable))
else:
self.total = total
def __len__(self):
return self.total
def __iter__(self):
for x in self.iterable:
if (self.n >= self.total):
raise RuntimeError('Mismatch between actual and expected iterable length. This may be caused by resuming training from a checkpoint using a different number of GPUs, in which case you can try the --reset-dataloader option. Alternatively you may have a train or validation set that is smaller than the number of GPUs. If none of these apply, please report this to the fairseq developers.')
self.n += 1
(yield x)
def __next__(self):
return next(self.itr)
def has_next(self):
return (self.n < len(self))
def skip(self, num_to_skip):
next(itertools.islice(self.itr, num_to_skip, num_to_skip), None)
return self
def take(self, n):
self.total = min(self.total, n)
propagated_take = max((n - self.n), 0)
if hasattr(self.iterable, 'take'):
self.iterable.take(propagated_take)
else:
self.iterable = itertools.islice(self.iterable, propagated_take) |
class STL10Tester(DatasetTestcase):
def mocked_root(self):
with stl10_root() as (root, data):
(yield (root, data))
def mocked_dataset(self, pre_extract=False, download=True, **kwargs):
with self.mocked_root() as (root, data):
if pre_extract:
utils.extract_archive(os.path.join(root, data['archive']))
dataset = torchvision.datasets.STL10(root, download=download, **kwargs)
(yield (dataset, data))
def test_not_found(self):
with self.assertRaises(RuntimeError):
with self.mocked_dataset(download=False):
pass
def test_splits(self):
for split in ('train', 'train+unlabeled', 'unlabeled', 'test'):
with self.mocked_dataset(split=split) as (dataset, data):
num_images = sum([data['num_images_in_split'][part] for part in split.split('+')])
self.generic_classification_dataset_test(dataset, num_images=num_images)
def test_folds(self):
for fold in range(10):
with self.mocked_dataset(split='train', folds=fold) as (dataset, data):
num_images = data['num_images_in_folds'][fold]
self.assertEqual(len(dataset), num_images)
def test_invalid_folds1(self):
with self.assertRaises(ValueError):
with self.mocked_dataset(folds=10):
pass
def test_invalid_folds2(self):
with self.assertRaises(ValueError):
with self.mocked_dataset(folds='0'):
pass
def test_transforms(self):
expected_image = 'image'
expected_target = 'target'
def transform(image):
return expected_image
def target_transform(target):
return expected_target
with self.mocked_dataset(transform=transform, target_transform=target_transform) as (dataset, _):
(actual_image, actual_target) = dataset[0]
self.assertEqual(actual_image, expected_image)
self.assertEqual(actual_target, expected_target)
def test_unlabeled(self):
with self.mocked_dataset(split='unlabeled') as (dataset, _):
labels = [dataset[idx][1] for idx in range(len(dataset))]
self.assertTrue(all([(label == (- 1)) for label in labels]))
.patch('torchvision.datasets.stl10.download_and_extract_archive')
def test_download_preexisting(self, mock):
with self.mocked_dataset(pre_extract=True) as (dataset, data):
mock.assert_not_called()
def test_repr_smoke(self):
with self.mocked_dataset() as (dataset, _):
self.assertIsInstance(repr(dataset), str) |
def gelu(x: torch.Tensor) -> torch.Tensor:
return ((x * 0.5) * (1.0 + torch.erf((x / math.sqrt(2.0))))) |
def extract_seconds(input_file, output_file):
with open(input_file, 'r') as f:
lines = f.readlines()
log_created_year = get_log_created_year(input_file)
start_datetime = get_start_time(lines, log_created_year)
assert start_datetime, 'Start time not found'
last_dt = start_datetime
out = open(output_file, 'w')
for line in lines:
line = line.strip()
if (line.find('Iteration') != (- 1)):
dt = extract_datetime_from_line(line, log_created_year)
if (dt.month < last_dt.month):
log_created_year += 1
dt = extract_datetime_from_line(line, log_created_year)
last_dt = dt
elapsed_seconds = (dt - start_datetime).total_seconds()
out.write(('%f\n' % elapsed_seconds))
out.close() |
def _non_dist_train(model, dataset, cfg, validate=False):
data_loaders = [build_dataloader(dataset, cfg.data.imgs_per_gpu, cfg.data.workers_per_gpu, cfg.gpus, dist=False)]
model = MMDataParallel(model, device_ids=range(cfg.gpus)).cuda()
runner = Runner(model, batch_processor, cfg.optimizer, cfg.work_dir, cfg.log_level)
runner.register_training_hooks(cfg.lr_config, cfg.optimizer_config, cfg.checkpoint_config, cfg.log_config)
if cfg.resume_from:
runner.resume(cfg.resume_from)
elif cfg.load_from:
runner.load_checkpoint(cfg.load_from)
runner.run(data_loaders, cfg.workflow, cfg.total_epochs) |
def update_moving_average(ema_updater, ma_model, current_model):
for (current_params, ma_params) in zip(current_model.parameters(), ma_model.parameters()):
(old_weight, up_weight) = (ma_params.data, current_params.data)
ma_params.data = ema_updater.update_average(old_weight, up_weight) |
def parse_args():
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument('--data-file', type=str, default='_output/data.pkl')
parser.add_argument('--out-file', type=str, default='_output/restrict_data.pkl')
parser.add_argument('--max-relevant-idx', type=int, default=6)
args = parser.parse_args()
return args |
class WIKIPEDIA5MProcessor(BaseProcessor):
def __init__(self, node_lut, relation_lut):
super().__init__(data_name='WIKIPEDIA5M', node_lut=node_lut, relation_lut=relation_lut) |
def get_bond_features(mol, mono=False):
m = Chem.MolFromSmiles(mol)
atom_list = m.GetAtoms()
bond_features = []
for i in range(len(atom_list)):
bond_vector = []
for j in range(len(atom_list)):
bond = m.GetBondBetweenAtoms(i, j)
if mono:
bf = [float(hasattr(bond, 'GetBondType'))]
elif hasattr(bond, 'GetBondType'):
bf = bond_feature(bond)
else:
bf = ([0.0] * 4)
bond_vector.append(bf)
bond_features.append(bond_vector)
return np.array(bond_features) |
class AccuracyOfEpochMonitorSegmentation(object):
NA_PATTERN = 'N/A'
def __init__(self, log, training0orValidation1, epoch, numberOfClasses, numberOfSubepochsPerEpoch):
self.log = log
self.training0orValidation1 = training0orValidation1
self.epoch = epoch
self.numberOfClasses = numberOfClasses
self.numberOfSubepochsPerEpoch = numberOfSubepochsPerEpoch
self.numberOfSubepochsForWhichUpdated = 0
if (self.training0orValidation1 == 0):
self.meanCostOfEachSubep = []
self.correctlyPredVoxelsInEachSubep = []
self.numberOfAllSamplesOfEachSubep = []
self.meanEmpiricalAccuracyOfEachSubep = []
self.listPerSubepPerClassRpRnTpTn = []
self.listPerSubepPerClassMeanAccSensSpecDsc = []
self.listPerSubepForegrRpRnTpTn = []
self.listPerSubepForegrMeanAccSensSpecDsc = []
def getMeanEmpiricalAccuracyOfEpoch(self):
return np.mean(self.meanEmpiricalAccuracyOfEachSubep)
def updateMonitorAccuraciesWithNewSubepochEntries(self, meanCostOfSubepoch, perClassRpRnTpTnInSubep):
if (self.training0orValidation1 == 0):
self.meanCostOfEachSubep.append(meanCostOfSubepoch)
correctlyPredVoxelsInSubep = 0
for class_i in range(self.numberOfClasses):
correctlyPredVoxelsInSubep += perClassRpRnTpTnInSubep[class_i][2]
self.correctlyPredVoxelsInEachSubep.append(correctlyPredVoxelsInSubep)
numberOfAllSamples = (perClassRpRnTpTnInSubep[(0, 0)] + perClassRpRnTpTnInSubep[(0, 1)])
self.numberOfAllSamplesOfEachSubep.append(numberOfAllSamples)
meanAccuracyOfSubepoch = (self.NA_PATTERN if (numberOfAllSamples == 0) else ((correctlyPredVoxelsInSubep * 1.0) / numberOfAllSamples))
self.meanEmpiricalAccuracyOfEachSubep.append(meanAccuracyOfSubepoch)
self.listPerSubepPerClassRpRnTpTn.append(perClassRpRnTpTnInSubep)
listWithPerClassMeanAccSensSpecDscInSubep = []
for class_i in range(self.numberOfClasses):
numOfRealPosInSubep = perClassRpRnTpTnInSubep[(class_i, 0)]
numOfRealNegInSubep = perClassRpRnTpTnInSubep[(class_i, 1)]
numOfTruePosInSubep = perClassRpRnTpTnInSubep[(class_i, 2)]
numOfTrueNegInSubep = perClassRpRnTpTnInSubep[(class_i, 3)]
numOfFalsePosInSubep = (numOfRealNegInSubep - numOfTrueNegInSubep)
meanAccuracyClassVsAllOfSubep = ((numOfTruePosInSubep + numOfTrueNegInSubep) / float((numOfRealPosInSubep + numOfRealNegInSubep)))
meanAccuracyOnPosOfSubep = (self.NA_PATTERN if (numOfRealPosInSubep == 0) else ((numOfTruePosInSubep * 1.0) / numOfRealPosInSubep))
meanPrecOfSubep = (self.NA_PATTERN if ((numOfTruePosInSubep + numOfFalsePosInSubep) == 0) else ((numOfTruePosInSubep * 1.0) / (numOfTruePosInSubep + numOfFalsePosInSubep)))
meanAccuracyOnNegOfSubep = (self.NA_PATTERN if (numOfRealNegInSubep == 0) else ((numOfTrueNegInSubep * 1.0) / numOfRealNegInSubep))
numOfPredPosInSubep = ((numOfRealNegInSubep - numOfTrueNegInSubep) + numOfTruePosInSubep)
meanDiceOfSubep = (self.NA_PATTERN if (numOfRealPosInSubep == 0) else ((2.0 * numOfTruePosInSubep) / (numOfPredPosInSubep + numOfRealPosInSubep)))
listWithPerClassMeanAccSensSpecDscInSubep.append([meanAccuracyClassVsAllOfSubep, meanAccuracyOnPosOfSubep, meanPrecOfSubep, meanAccuracyOnNegOfSubep, meanDiceOfSubep])
self.listPerSubepPerClassMeanAccSensSpecDsc.append(listWithPerClassMeanAccSensSpecDscInSubep)
foregrTp = perClassRpRnTpTnInSubep[0][3]
foregrTn = perClassRpRnTpTnInSubep[0][2]
foregrRp = perClassRpRnTpTnInSubep[0][1]
foregrRn = perClassRpRnTpTnInSubep[0][0]
foregrFp = (foregrRn - foregrTn)
self.listPerSubepForegrRpRnTpTn.append([foregrRp, foregrRn, foregrTp, foregrTn])
foregrMeanAccOfSubep = ((foregrTp + foregrTn) / float((foregrRp + foregrRn)))
foregrMeanAccOnPosOfSubep = (self.NA_PATTERN if (foregrRp == 0) else ((foregrTp * 1.0) / foregrRp))
foregrMeanPrecOfSubep = (self.NA_PATTERN if ((foregrTp + foregrFp) == 0) else ((foregrTp * 1.0) / (foregrTp + foregrFp)))
foregrMeanAccOnNegOfSubep = (self.NA_PATTERN if (foregrRn == 0) else ((foregrTn * 1.0) / foregrRn))
foregrPredPosInSubep = ((foregrRn - foregrTn) + foregrTp)
foregrMeanDiceOfSubep = (self.NA_PATTERN if (foregrRp == 0) else ((2.0 * foregrTp) / (foregrPredPosInSubep + foregrRp)))
self.listPerSubepForegrMeanAccSensSpecDsc.append([foregrMeanAccOfSubep, foregrMeanAccOnPosOfSubep, foregrMeanPrecOfSubep, foregrMeanAccOnNegOfSubep, foregrMeanDiceOfSubep])
self.numberOfSubepochsForWhichUpdated += 1
def reportAccuracyForLastSubepoch(self):
trainOrValString = ('TRAINING' if (self.training0orValidation1 == 0) else 'VALIDATION')
numberOfClasses = self.numberOfClasses
currSubep = (self.numberOfSubepochsForWhichUpdated - 1)
logStr = ((((trainOrValString + ': Epoch #') + str(self.epoch)) + ', Subepoch #') + str(currSubep))
self.log.print3(' Reporting Accuracy over whole subepoch ')
self.log.print3(((((((logStr + ', Overall:\t mean accuracy: \t') + strFl4fNA(self.meanEmpiricalAccuracyOfEachSubep[currSubep], self.NA_PATTERN)) + '\t=> Correctly-Classified-Voxels/All-Predicted-Voxels = ') + str(self.correctlyPredVoxelsInEachSubep[currSubep])) + '/') + str(self.numberOfAllSamplesOfEachSubep[currSubep])))
if (self.training0orValidation1 == 0):
self.log.print3(((logStr + ', Overall:\t mean cost: \t') + strFl5fNA(self.meanCostOfEachSubep[currSubep], self.NA_PATTERN)))
for class_i in range(self.numberOfClasses):
classString = ('Class-' + str(class_i))
extraDescription = ('[Whole Foreground (Pos) Vs Background (Neg)]' if (class_i == 0) else '[This Class (Pos) Vs All Others (Neg)]')
self.log.print3(((((' Reporting Accuracy over whole subepoch for ' + classString) + ' ') + extraDescription) + ' '))
[meanAccClassOfSubep, meanAccOnPosOfSubep, meanPrecOfSubep, meanAccOnNegOfSubep, meanDiceOfSubep] = (self.listPerSubepPerClassMeanAccSensSpecDsc[currSubep][class_i] if (class_i != 0) else self.listPerSubepForegrMeanAccSensSpecDsc[currSubep])
[numOfRpInSubep, numOfRnInSubep, numOfTpInSubep, numOfTnInSubep] = (self.listPerSubepPerClassRpRnTpTn[currSubep][class_i] if (class_i != 0) else self.listPerSubepForegrRpRnTpTn[currSubep])
numOfFpInSubep = (numOfRnInSubep - numOfTnInSubep)
logStrClass = (((logStr + ', ') + classString) + ':')
self.log.print3(((((((logStrClass + '\t mean accuracy: \t') + strFl4fNA(meanAccClassOfSubep, self.NA_PATTERN)) + '\t=> (TruePos+TrueNeg)/All-Predicted-Voxels = ') + str((numOfTpInSubep + numOfTnInSubep))) + '/') + str((numOfRpInSubep + numOfRnInSubep))))
self.log.print3(((((((logStrClass + '\t mean sensitivity:\t') + strFl4fNA(meanAccOnPosOfSubep, self.NA_PATTERN)) + '\t=> TruePos/RealPos = ') + str(numOfTpInSubep)) + '/') + str(numOfRpInSubep)))
self.log.print3(((((((logStrClass + '\t mean precision:\t') + strFl4fNA(meanPrecOfSubep, self.NA_PATTERN)) + '\t=> TruePos/(TruePos+FalsePos) = ') + str(numOfTpInSubep)) + '/') + str((numOfTpInSubep + numOfFpInSubep))))
self.log.print3(((((((logStrClass + '\t mean specificity:\t') + strFl4fNA(meanAccOnNegOfSubep, self.NA_PATTERN)) + '\t=> TrueNeg/RealNeg = ') + str(numOfTnInSubep)) + '/') + str(numOfRnInSubep)))
self.log.print3(((logStrClass + '\t mean Dice: \t') + strFl4fNA(meanDiceOfSubep, self.NA_PATTERN)))
def reportMeanAccyracyOfEpoch(self):
trainOrValString = ('TRAINING' if (self.training0orValidation1 == 0) else 'VALIDATION')
logStr = ((trainOrValString + ': Epoch #') + str(self.epoch))
self.log.print3('( Reporting Accuracy over whole epoch ')
meanEmpiricalAccOfEp = getMeanOfListExclNA(self.meanEmpiricalAccuracyOfEachSubep, self.NA_PATTERN)
self.log.print3((((logStr + ', Overall:\t mean accuracy of epoch:\t') + strFl4fNA(meanEmpiricalAccOfEp, self.NA_PATTERN)) + '\t=> Correctly-Classified-Voxels/All-Predicted-Voxels'))
if (self.training0orValidation1 == 0):
meanCostOfEp = getMeanOfListExclNA(self.meanCostOfEachSubep, self.NA_PATTERN)
self.log.print3(((logStr + ', Overall:\t mean cost of epoch: \t') + strFl5fNA(meanCostOfEp, self.NA_PATTERN)))
self.log.print3(((logStr + ', Overall:\t mean accuracy of each subepoch:\t') + strListFl4fNA(self.meanEmpiricalAccuracyOfEachSubep, self.NA_PATTERN)))
if (self.training0orValidation1 == 0):
self.log.print3(((logStr + ', Overall:\t mean cost of each subepoch: \t') + strListFl5fNA(self.meanCostOfEachSubep, self.NA_PATTERN)))
for class_i in range(self.numberOfClasses):
classString = ('Class-' + str(class_i))
extraDescription = ('[Whole Foreground (Pos) Vs Background (Neg)]' if (class_i == 0) else '[This Class (Pos) Vs All Others (Neg)]')
self.log.print3(((((' Reporting Accuracy over whole epoch for ' + classString) + ' ') + extraDescription) + ' '))
if (class_i != 0):
meanAccPerSubep = [self.listPerSubepPerClassMeanAccSensSpecDsc[subep_i][class_i][0] for subep_i in range(len(self.listPerSubepPerClassMeanAccSensSpecDsc))]
meanSensPerSubep = [self.listPerSubepPerClassMeanAccSensSpecDsc[subep_i][class_i][1] for subep_i in range(len(self.listPerSubepPerClassMeanAccSensSpecDsc))]
meanPrecPerSubep = [self.listPerSubepPerClassMeanAccSensSpecDsc[subep_i][class_i][2] for subep_i in range(len(self.listPerSubepPerClassMeanAccSensSpecDsc))]
meanSpecPerSubep = [self.listPerSubepPerClassMeanAccSensSpecDsc[subep_i][class_i][3] for subep_i in range(len(self.listPerSubepPerClassMeanAccSensSpecDsc))]
meanDscPerSubep = [self.listPerSubepPerClassMeanAccSensSpecDsc[subep_i][class_i][4] for subep_i in range(len(self.listPerSubepPerClassMeanAccSensSpecDsc))]
else:
meanAccPerSubep = [self.listPerSubepForegrMeanAccSensSpecDsc[subep_i][0] for subep_i in range(len(self.listPerSubepForegrMeanAccSensSpecDsc))]
meanSensPerSubep = [self.listPerSubepForegrMeanAccSensSpecDsc[subep_i][1] for subep_i in range(len(self.listPerSubepForegrMeanAccSensSpecDsc))]
meanPrecPerSubep = [self.listPerSubepForegrMeanAccSensSpecDsc[subep_i][2] for subep_i in range(len(self.listPerSubepForegrMeanAccSensSpecDsc))]
meanSpecPerSubep = [self.listPerSubepForegrMeanAccSensSpecDsc[subep_i][3] for subep_i in range(len(self.listPerSubepForegrMeanAccSensSpecDsc))]
meanDscPerSubep = [self.listPerSubepForegrMeanAccSensSpecDsc[subep_i][4] for subep_i in range(len(self.listPerSubepForegrMeanAccSensSpecDsc))]
meanAccOfEp = getMeanOfListExclNA(meanAccPerSubep, self.NA_PATTERN)
meanSensOfEp = getMeanOfListExclNA(meanSensPerSubep, self.NA_PATTERN)
meanPrecOfEp = getMeanOfListExclNA(meanPrecPerSubep, self.NA_PATTERN)
meanSpecOfEp = getMeanOfListExclNA(meanSpecPerSubep, self.NA_PATTERN)
meanDscOfEp = getMeanOfListExclNA(meanDscPerSubep, self.NA_PATTERN)
logStrClass = (((logStr + ', ') + classString) + ':')
self.log.print3((((logStrClass + '\t mean accuracy of epoch:\t') + strFl4fNA(meanAccOfEp, self.NA_PATTERN)) + '\t=> (TruePos+TrueNeg)/All-Predicted-Voxels'))
self.log.print3((((logStrClass + '\t mean sensitivity of epoch:\t') + strFl4fNA(meanSensOfEp, self.NA_PATTERN)) + '\t=> TruePos/RealPos'))
self.log.print3((((logStrClass + '\t mean precision of epoch:\t') + strFl4fNA(meanPrecOfEp, self.NA_PATTERN)) + '\t=> TruePos/(TruePos+FalsePos)'))
self.log.print3((((logStrClass + '\t mean specificity of epoch:\t') + strFl4fNA(meanSpecOfEp, self.NA_PATTERN)) + '\t=> TrueNeg/RealNeg'))
self.log.print3(((logStrClass + '\t mean Dice of epoch: \t') + strFl4fNA(meanDscOfEp, self.NA_PATTERN)))
self.log.print3(((logStrClass + '\t mean accuracy of each subepoch:\t') + strListFl4fNA(meanAccPerSubep, self.NA_PATTERN)))
self.log.print3(((logStrClass + '\t mean sensitivity of each subepoch:\t') + strListFl4fNA(meanSensPerSubep, self.NA_PATTERN)))
self.log.print3(((logStrClass + '\t mean precision of each subepoch:\t') + strListFl4fNA(meanPrecPerSubep, self.NA_PATTERN)))
self.log.print3(((logStrClass + '\t mean specificity of each subepoch:\t') + strListFl4fNA(meanSpecPerSubep, self.NA_PATTERN)))
self.log.print3(((logStrClass + '\t mean Dice of each subepoch: \t') + strListFl4fNA(meanDscPerSubep, self.NA_PATTERN)))
self.log.print3(' End Of Accuracy Report at the end of Epoch ')
self.log.print3('') |
def adjust_widths_groups_comp(widths, bottle_ratios, groups):
bottleneck_widths = [int((w * b)) for (w, b) in zip(widths, bottle_ratios)]
groups = [min(g, w_bot) for (g, w_bot) in zip(groups, bottleneck_widths)]
bottleneck_widths = [quantize_float(w_bot, g) for (w_bot, g) in zip(bottleneck_widths, groups)]
widths = [int((w_bot / b)) for (w_bot, b) in zip(bottleneck_widths, bottle_ratios)]
return (widths, groups) |
_module()
class DeepGCN(nn.Module):
def __init__(self, in_channels=3, channels=64, emb_dims=1024, n_blocks=14, conv='edge', block='res', k=16, epsilon=0.2, use_stochastic=True, use_dilation=True, norm_args={'norm': 'bn'}, act_args={'act': 'relu'}, conv_args={'order': 'conv-norm-act'}, is_seg=False, **kwargs):
super(DeepGCN, self).__init__()
if kwargs:
logging.warning(f'kwargs: {kwargs} are not used in {__class__.__name__}')
c_growth = channels
self.n_blocks = n_blocks
self.knn = DilatedKNN(k, 1, use_stochastic, epsilon)
self.head = GraphConv(in_channels, channels, conv, bias=False, norm_args=norm_args, act_args=act_args, **conv_args)
if (block.lower() == 'dense'):
self.backbone = Seq(*[DenseDynBlock((channels + (c_growth * i)), c_growth, conv, k, (1 + i), use_stochastic, epsilon, act_args=act_args, norm_args=norm_args, **conv_args) for i in range((self.n_blocks - 1))])
fusion_dims = int(((((channels + channels) + (c_growth * (self.n_blocks - 1))) * self.n_blocks) // 2))
elif (block.lower() == 'res'):
if use_dilation:
self.backbone = Seq(*[ResDynBlock(channels, conv, k, (1 + i), use_stochastic, epsilon, act_args=act_args, norm_args=norm_args, **conv_args) for i in range((self.n_blocks - 1))])
else:
self.backbone = Seq(*[ResDynBlock(channels, conv, k, 1, use_stochastic, epsilon, act_args=act_args, norm_args=norm_args, **conv_args) for i in range((self.n_blocks - 1))])
fusion_dims = int((channels + (c_growth * (self.n_blocks - 1))))
else:
stochastic = False
self.backbone = Seq(*[DynConv(channels, channels, conv, k, 1, stochastic, epsilon, act_args=act_args, norm_args=norm_args, **conv_args) for i in range((self.n_blocks - 1))])
fusion_dims = int((channels + (c_growth * (self.n_blocks - 1))))
self.fusion_block = create_convblock1d(fusion_dims, emb_dims, act_args={'act': 'leakyrelu', 'negative_slope': 0.2}, norm_args=norm_args, **conv_args, bias=False)
self.model_init()
self.maxpool = (lambda x: torch.max(x, dim=(- 1), keepdim=False)[0])
self.avgpool = (lambda x: torch.mean(x, dim=(- 1), keepdim=False))
self.out_channels = (emb_dims if is_seg else (emb_dims * 2))
def model_init(self):
for m in self.modules():
if isinstance(m, (torch.nn.Conv2d, torch.nn.Conv1d)):
torch.nn.init.kaiming_normal_(m.weight)
m.weight.requires_grad = True
if (m.bias is not None):
m.bias.data.zero_()
m.bias.requires_grad = True
elif isinstance(m, (nn.LayerNorm, nn.GroupNorm, nn.BatchNorm2d, nn.BatchNorm1d)):
nn.init.constant_(m.bias, 0)
nn.init.constant_(m.weight, 1.0)
def forward_seg_feat(self, pts, features=None):
fusion = self.forward(pts, features)
return (pts, fusion)
def forward_cls_feat(self, pts, features=None):
fusion = self.forward(pts, features)
return torch.cat((self.maxpool(fusion), self.avgpool(fusion)), dim=1)
def forward(self, pts, features=None):
if hasattr(pts, 'keys'):
(pts, features) = (pts['pos'], pts['x'])
if (features is None):
features = pts.transpose(1, 2).contiguous()
features = features.unsqueeze((- 1))
feats = [self.head(features, self.knn(pts))]
for i in range((self.n_blocks - 1)):
feats.append(self.backbone[i](feats[(- 1)]))
feats = torch.cat(feats, dim=1).squeeze((- 1))
fusion = self.fusion_block(feats)
return fusion |
def main(args):
if (args.apex and (amp is None)):
raise RuntimeError('Failed to import apex. Please install apex from to enable mixed-precision training.')
if args.output_dir:
utils.mkdir(args.output_dir)
utils.init_distributed_mode(args)
print(args)
device = torch.device(args.device)
torch.backends.cudnn.benchmark = True
train_dir = os.path.join(args.data_path, 'train')
val_dir = os.path.join(args.data_path, 'val')
(dataset, dataset_test, train_sampler, test_sampler) = load_data(train_dir, val_dir, args)
data_loader = torch.utils.data.DataLoader(dataset, batch_size=args.batch_size, sampler=train_sampler, num_workers=args.workers, pin_memory=True)
data_loader_test = torch.utils.data.DataLoader(dataset_test, batch_size=args.batch_size, sampler=test_sampler, num_workers=args.workers, pin_memory=True)
print('Creating model')
model = torchvision.models.__dict__[args.model](pretrained=args.pretrained)
model.to(device)
if (args.distributed and args.sync_bn):
model = torch.nn.SyncBatchNorm.convert_sync_batchnorm(model)
criterion = nn.CrossEntropyLoss()
opt_name = args.opt.lower()
if (opt_name == 'sgd'):
optimizer = torch.optim.SGD(model.parameters(), lr=args.lr, momentum=args.momentum, weight_decay=args.weight_decay)
elif (opt_name == 'rmsprop'):
optimizer = torch.optim.RMSprop(model.parameters(), lr=args.lr, momentum=args.momentum, weight_decay=args.weight_decay, eps=0.0316, alpha=0.9)
else:
raise RuntimeError('Invalid optimizer {}. Only SGD and RMSprop are supported.'.format(args.opt))
if args.apex:
(model, optimizer) = amp.initialize(model, optimizer, opt_level=args.apex_opt_level)
lr_scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=args.lr_step_size, gamma=args.lr_gamma)
model_without_ddp = model
if args.distributed:
model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.gpu])
model_without_ddp = model.module
if args.resume:
checkpoint = torch.load(args.resume, map_location='cpu')
model_without_ddp.load_state_dict(checkpoint['model'])
optimizer.load_state_dict(checkpoint['optimizer'])
lr_scheduler.load_state_dict(checkpoint['lr_scheduler'])
args.start_epoch = (checkpoint['epoch'] + 1)
if args.test_only:
evaluate(model, criterion, data_loader_test, device=device)
return
print('Start training')
start_time = time.time()
for epoch in range(args.start_epoch, args.epochs):
if args.distributed:
train_sampler.set_epoch(epoch)
train_one_epoch(model, criterion, optimizer, data_loader, device, epoch, args.print_freq, args.apex)
lr_scheduler.step()
evaluate(model, criterion, data_loader_test, device=device)
if args.output_dir:
checkpoint = {'model': model_without_ddp.state_dict(), 'optimizer': optimizer.state_dict(), 'lr_scheduler': lr_scheduler.state_dict(), 'epoch': epoch, 'args': args}
utils.save_on_master(checkpoint, os.path.join(args.output_dir, 'model_{}.pth'.format(epoch)))
utils.save_on_master(checkpoint, os.path.join(args.output_dir, 'checkpoint.pth'))
total_time = (time.time() - start_time)
total_time_str = str(datetime.timedelta(seconds=int(total_time)))
print('Training time {}'.format(total_time_str)) |
_arg_scope
def fully_connected(inputs, num_outputs, activation_fn=nn.relu, normalizer_fn=None, normalizer_params=None, weights_initializer=initializers.xavier_initializer(), weights_regularizer=None, biases_initializer=init_ops.zeros_initializer(), biases_regularizer=None, reuse=None, variables_collections=None, outputs_collections=None, trainable=True, scope=None):
if (not isinstance(num_outputs, six.integer_types)):
raise ValueError(('num_outputs should be int or long, got %s.' % (num_outputs,)))
layer_variable_getter = _build_variable_getter({'bias': 'biases', 'kernel': 'weights'})
with variable_scope.variable_scope(scope, 'fully_connected', [inputs], reuse=reuse, custom_getter=layer_variable_getter) as sc:
inputs = ops.convert_to_tensor(inputs)
layer = core_layers.Dense(units=num_outputs, activation=None, use_bias=((not normalizer_fn) and biases_initializer), kernel_initializer=weights_initializer, bias_initializer=biases_initializer, kernel_regularizer=weights_regularizer, bias_regularizer=biases_regularizer, activity_regularizer=None, trainable=trainable, name=sc.name, dtype=inputs.dtype.base_dtype, _scope=sc, _reuse=reuse)
outputs = layer.apply(inputs)
_add_variable_to_collections(layer.kernel, variables_collections, 'weights')
if (layer.bias is not None):
_add_variable_to_collections(layer.bias, variables_collections, 'biases')
if (normalizer_fn is not None):
if (not normalizer_params):
normalizer_params = {}
outputs = normalizer_fn(outputs, **normalizer_params)
if (activation_fn is not None):
outputs = activation_fn(outputs)
return utils.collect_named_outputs(outputs_collections, sc.name, outputs) |
class ControlNetSimpleInpaintPipelineFastTests(ControlNetInpaintPipelineFastTests):
pipeline_class = StableDiffusionControlNetInpaintPipeline
params = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
batch_params = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
image_params = frozenset([])
def get_dummy_components(self):
torch.manual_seed(0)
unet = UNet2DConditionModel(block_out_channels=(32, 64), layers_per_block=2, sample_size=32, in_channels=4, out_channels=4, down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D'), up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D'), cross_attention_dim=32)
torch.manual_seed(0)
controlnet = ControlNetModel(block_out_channels=(32, 64), layers_per_block=2, in_channels=4, down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D'), cross_attention_dim=32, conditioning_embedding_out_channels=(16, 32))
torch.manual_seed(0)
scheduler = DDIMScheduler(beta_start=0.00085, beta_end=0.012, beta_schedule='scaled_linear', clip_sample=False, set_alpha_to_one=False)
torch.manual_seed(0)
vae = AutoencoderKL(block_out_channels=[32, 64], in_channels=3, out_channels=3, down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'], up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'], latent_channels=4)
torch.manual_seed(0)
text_encoder_config = CLIPTextConfig(bos_token_id=0, eos_token_id=2, hidden_size=32, intermediate_size=37, layer_norm_eps=1e-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1000)
text_encoder = CLIPTextModel(text_encoder_config)
tokenizer = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip')
components = {'unet': unet, 'controlnet': controlnet, 'scheduler': scheduler, 'vae': vae, 'text_encoder': text_encoder, 'tokenizer': tokenizer, 'safety_checker': None, 'feature_extractor': None, 'image_encoder': None}
return components |
class Network(Dot):
def __init__(self):
Dot.__init__(self, 'SimConf', graph_type='graph')
self.node_list = []
def _init_addr_helper(self):
ipv4_net_addr_base = self.net_desc.get('ipv4_net_addr_base', '10.0.7.4/24')
(addr, network, mask) = CIDR_to_subnet_mask(ipv4_net_addr_base)
base = get_net_addr(addr, mask)
self.addr_helper = Ipv4AddressHelper(network, mask, base)
def init(self, net_desc, norm_desc):
self.net_desc = net_desc
self.norm_desc = norm_desc
self._init_addr_helper()
self.Node = node_map[net_desc['node_type']]
self.create_topology(self.net_desc['topo'])
self.config_traffic(self.norm_desc['src_nodes'], self.norm_desc['dst_nodes'])
def config_traffic(self, src_nodes, dst_nodes):
dst_node_list = [self.node_list[i] for i in dst_nodes]
for i in src_nodes:
self.node_list[i].init_traffic(self.norm_desc, dst_node_list)
def create_topology(self, topo):
def size(topo):
return (len(topo) if isinstance(topo, list) else topo.shape[0])
for i in xrange(size(topo)):
node = self.Node([], i)
self.node_list.append(node)
self.add_node(node)
def nonzero(topo):
if isinstance(topo, list):
n = len(topo)
assert (n == len(topo[0]))
return [(i, j) for i in xrange(n) for j in xrange(n) if topo[i][j]]
else:
(X, Y) = topo.nonzero()
return zip(X.reshape((- 1)), Y.reshape((- 1)))
for (i, j) in nonzero(topo):
la_dft = self.net_desc['link_attr_default']
link_attr_list = self.net_desc.get('link_attr', {}).get((i, j), la_dft)
edge = NEdge(self.node_list[i], self.node_list[j], link_attr_list_to_map(link_attr_list))
self.assign_link_interface_ip(i, j)
self.add_edge(edge)
def assign_link_interface_ip(self, i, j):
if_addr = self.net_desc.get('link_to_ip_map', {}).get((i, j), None)
if (if_addr is not None):
if (if_addr[0] is not ''):
self.node_list[i].add_interface_addr(if_addr[0])
if (if_addr[1] is not ''):
self.node_list[j].add_interface_addr(if_addr[1])
return
node_container = [self.node_list[i], self.node_list[j]]
self.addr_helper.Assign(node_container)
self.addr_helper.NewNetwork()
def write(self, f_name):
for node in self.node_list:
node.sync()
Dot.write(self, f_name)
FixQuoteBug(f_name)
def inject_anomaly(self, A):
A.run(self) |
def random_batch(batch_size, train_data, singletons=[]):
input_seqs = []
target_seqs = []
chars2_seqs = []
for i in range(batch_size):
data = random.choice(train_data)
words = []
for word in data['words']:
if ((word in singletons) and (np.random.uniform() < 0.5)):
words.append(1)
else:
words.append(word)
input_seqs.append(data['words'])
target_seqs.append(data['tags'])
chars2_seqs.append(data['chars'])
seq_pairs = sorted(zip(input_seqs, target_seqs, chars2_seqs), key=(lambda p: len(p[0])), reverse=True)
(input_seqs, target_seqs, chars2_seqs) = zip(*seq_pairs)
chars2_seqs_lengths = []
chars2_seqs_padded = []
for chars2 in chars2_seqs:
chars2_lengths = [len(c) for c in chars2]
chars2_padded = [pad_seq(c, max(chars2_lengths)) for c in chars2]
chars2_seqs_padded.append(chars2_padded)
chars2_seqs_lengths.append(chars2_lengths)
input_lengths = [len(s) for s in input_seqs]
input_padded = [pad_seq(s, max(input_lengths)) for s in input_seqs]
target_lengths = [len(s) for s in target_seqs]
assert (target_lengths == input_lengths)
target_padded = [pad_seq(s, max(target_lengths)) for s in target_seqs]
return (input_padded, input_lengths, target_padded, target_lengths, chars2_seqs_padded, chars2_seqs_lengths) |
def dwconv3x3(in_channels, out_channels, stride, bias=False):
return nn.Conv2d(in_channels=in_channels, out_channels=out_channels, kernel_size=3, stride=stride, padding=1, groups=out_channels, bias=bias) |
def make_parser():
parser = options.get_speech_generation_parser()
options.add_generation_args(parser)
parser.add_argument('--generator-type', type=str, choices=['at_tts', 'at_s2s', 'nat_tts', 'nat_s2s'], help='which type of generator to use')
return parser |
class TestDenseLayout(QiskitTestCase):
def setUp(self):
self.cmap20 = FakeTokyo().configuration().coupling_map
def test_5q_circuit_20q_coupling(self):
qr = QuantumRegister(5, 'q')
circuit = QuantumCircuit(qr)
circuit.cx(qr[0], qr[3])
circuit.cx(qr[3], qr[4])
circuit.cx(qr[3], qr[1])
circuit.cx(qr[0], qr[2])
dag = circuit_to_dag(circuit)
pass_ = DenseLayout(CouplingMap(self.cmap20))
pass_.run(dag)
layout = pass_.property_set['layout']
self.assertEqual(layout[qr[0]], 11)
self.assertEqual(layout[qr[1]], 10)
self.assertEqual(layout[qr[2]], 6)
self.assertEqual(layout[qr[3]], 5)
self.assertEqual(layout[qr[4]], 0)
def test_6q_circuit_20q_coupling(self):
qr0 = QuantumRegister(3, 'q0')
qr1 = QuantumRegister(3, 'q1')
circuit = QuantumCircuit(qr0, qr1)
circuit.cx(qr0[0], qr1[2])
circuit.cx(qr1[1], qr0[2])
dag = circuit_to_dag(circuit)
pass_ = DenseLayout(CouplingMap(self.cmap20))
pass_.run(dag)
layout = pass_.property_set['layout']
self.assertEqual(layout[qr0[0]], 11)
self.assertEqual(layout[qr0[1]], 10)
self.assertEqual(layout[qr0[2]], 6)
self.assertEqual(layout[qr1[0]], 5)
self.assertEqual(layout[qr1[1]], 1)
self.assertEqual(layout[qr1[2]], 0) |
def merge_all_modules():
modules = os.listdir(DATASET_DIR)
print('Starting to merge {} modules.'.format(len(modules)))
target_dir = os.path.join(DATASET_DIR, ALL_MODULE_NAME)
if os.path.exists(target_dir):
print('Merge module {} already exists?!'.format(target_dir))
print('Exiting.')
sys.exit(1)
os.makedirs(target_dir)
print('Merging extrapolate ...')
read = get_read_streams(name='extrapolate', modules=modules)
write = get_write_stream(name='extrapolate')
random_merge(read, write)
print('Merging interpolate ...')
read = get_read_streams(name='interpolate', modules=modules)
write = get_write_stream(name='interpolate')
random_merge(read, write)
print('Merging train ...')
read = get_read_streams(name='train', modules=modules)
write = get_write_stream(name='train')
random_merge(read, write) |
.parametrize('alpha_parameter', [0.5, 0.7, 0.1, 30.0])
def test_gradients_inverted_alpha(alpha_parameter):
network = torch.nn.Sequential(torch.nn.Linear(5, 3), torch.nn.Linear(3, 1))
revnetwork = torch.nn.Sequential(copy.deepcopy(network), RevGrad(alpha=alpha_parameter))
inp = torch.randn(8, 5)
outp = torch.randn(8, 1)
criterion = torch.nn.MSELoss()
criterion(network(inp), outp).backward()
criterion(revnetwork(inp), outp).backward()
for (p1, p2) in zip(network.parameters(), revnetwork.parameters()):
assert torch.isclose(p1.grad, ((- p2.grad) / alpha_parameter)).all() |
def evaluate_written_preds(gold_dir, prediction_dir):
ae_gold = [list(np.array(line.strip().split(), dtype=int)) for line in open(os.path.join(gold_dir, 'target.txt'))]
ae_pred = [np.array(line.strip().split(), dtype=int) for line in open(os.path.join(prediction_dir, 'target.txt'))]
sent_gold = [np.array(line.strip().split(), dtype=int) for line in open(os.path.join(gold_dir, 'target_polarity.txt'))]
sent_gold = [[(0 if (i == 4) else i) for i in sent] for sent in sent_gold]
sent_pred = [np.array(line.strip().split(), dtype=int) for line in open(os.path.join(prediction_dir, 'target_polarity.txt'))]
opinion_gold = [np.array(line.strip().split(), dtype=int) for line in open(os.path.join(gold_dir, 'opinion.txt'))]
opinion_pred = [np.array(line.strip().split(), dtype=int) for line in open(os.path.join(prediction_dir, 'opinion.txt'))]
(f_aspect, acc_s, f_s, f_absa) = score(ae_gold, ae_pred, sent_gold, sent_pred, 0)
(f_opinion, _, _, _) = score(opinion_gold, opinion_pred, sent_gold, sent_pred, 1)
return (f_aspect, f_opinion, acc_s, f_s, f_absa) |
class Human36mSkeleton(Skeleton):
def __init__(self, parents, joints_left, joints_right):
super().__init__(parents, joints_left, joints_right)
self.kpt_name = ['mid_hip', 'right_hip', 'right_knee', 'right_ankle', 'left_hip', 'left_knee', 'left_ankle', 'mid_spine', 'neck', 'chin', 'head', 'left_shoulder', 'left_elbow', 'left_wrist', 'right_shoulder', 'right_elbow', 'right_wrist']
self.kpt_idx = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16] |
def get_cmd_prefix(core_list):
return 'OMP_NUM_THREADS={} numactl --localalloc --physcpubind={} '.format(len(core_list), ','.join(core_list.astype(str))) |
def predict():
net = load_net()
(images, labels) = load_drive()
transform = transforms.Compose([transforms.ToTensor()])
with torch.no_grad():
net.eval()
for i in range(len(images)):
print(images[i])
name_list = images[i].split('/')
index = name_list[(- 1)][:(- 4)]
image = Image.open(images[i])
label = Image.open(labels[i])
(image, label) = center_crop(image, label)
image = transform(image).cuda()
image = image.unsqueeze(0)
output = net(image)
save_prediction(output, filename=(index + '_pred'))
print('output saving successfully') |
def get_apu_version(enable_apu, android_ver, target_soc):
if enable_apu:
android_ver = int(android_ver)
if (android_ver <= 10):
target_soc = target_soc.lower()
if target_soc.startswith('mt67'):
return 1
else:
return 2
elif (android_ver == 11):
target_soc = target_soc.lower()
if (target_soc.startswith('mt689') or (target_soc == 'mt6877')):
return 4
else:
return 3
else:
return 4
return (- 1) |
class HumanoidRandDirecEnv(MetaEnv, gym.utils.EzPickle, MujocoEnv):
def __init__(self):
self.set_task(self.sample_tasks(1)[0])
MujocoEnv.__init__(self, 'humanoid.xml', 5)
gym.utils.EzPickle.__init__(self)
def sample_tasks(self, n_tasks):
return np.random.choice(((- 1.0), 1.0), (n_tasks,))
def set_task(self, task):
self.goal_direction = task
def get_task(self):
return self.goal_direction
def _get_obs(self):
data = self.sim.data
return np.concatenate([data.qpos.flat[2:], data.qvel.flat, data.cinert.flat, data.cvel.flat, data.qfrc_actuator.flat, data.cfrc_ext.flat])
def step(self, a):
pos_before = mass_center(self.model, self.sim)[0]
self.do_simulation(a, self.frame_skip)
pos_after = mass_center(self.model, self.sim)[0]
alive_bonus = 5.0
data = self.sim.data
lin_vel_cost = (((0.25 * self.goal_direction) * (pos_after - pos_before)) / self.model.opt.timestep)
quad_ctrl_cost = (0.1 * np.square(data.ctrl).sum())
quad_impact_cost = (5e-07 * np.square(data.cfrc_ext).sum())
quad_impact_cost = min(quad_impact_cost, 10)
reward = (((lin_vel_cost - quad_ctrl_cost) - quad_impact_cost) + alive_bonus)
qpos = self.sim.data.qpos
done = bool(((qpos[2] < 1.0) or (qpos[2] > 2.0)))
return (self._get_obs(), reward, done, dict(reward_linvel=lin_vel_cost, reward_quadctrl=(- quad_ctrl_cost), reward_alive=alive_bonus, reward_impact=(- quad_impact_cost)))
def reset_model(self):
c = 0.01
self.set_state((self.init_qpos + self.np_random.uniform(low=(- c), high=c, size=self.model.nq)), (self.init_qvel + self.np_random.uniform(low=(- c), high=c, size=self.model.nv)))
return self._get_obs()
def viewer_setup(self):
self.viewer.cam.trackbodyid = 1
self.viewer.cam.distance = (self.model.stat.extent * 1.0)
self.viewer.cam.elevation = (- 20) |
def extract_features(in_audios, out_files, deepspeech_pb_path, metainfo_file_path=None):
if (metainfo_file_path is None):
num_frames_info = ([None] * len(in_audios))
else:
train_df = pd.read_csv(metainfo_file_path, sep='\t', index_col=False, dtype={'Id': np.int, 'File': np.unicode, 'Count': np.int})
num_frames_info = train_df['Count'].values
assert (len(num_frames_info) == len(in_audios))
for (i, in_audio) in enumerate(in_audios):
if (not out_files[i]):
(file_stem, _) = os.path.splitext(in_audio)
out_files[i] = (file_stem + '.npy')
conv_audios_to_deepspeech(audios=in_audios, out_files=out_files, num_frames_info=num_frames_info, deepspeech_pb_path=deepspeech_pb_path) |
class TestMXNetModel(unittest.TestCase):
def setUpClass(self):
if (platform.system().lower() == 'windows'):
self.skipTest(self, 'not support mxnet on windows yet')
import mxnet as mx
import mxnet.gluon.nn as nn
net = nn.HybridSequential()
net.add(nn.Dense(128, activation='relu'))
net.add(nn.Dense(64, activation='relu'))
net.add(nn.Dense(10))
net.initialize()
net.hybridize()
fake_data = mx.random.uniform(shape=(1, 128, 128))
net(fake_data)
self.net = net
def tearDownClass(self):
os.remove('test-symbol.json')
os.remove('test-0000.params')
os.remove('test2-symbol.json')
os.remove('test2-0000.params')
def test_model(self):
import mxnet as mx
self.assertEqual('mxnet', get_model_fwk_name(self.net))
model = MODELS['mxnet'](self.net)
self.assertEqual(True, isinstance(model, MXNetModel))
self.assertEqual(True, isinstance(model.model, mx.gluon.HybridBlock))
model.save('./test')
self.assertEqual(True, os.path.exists('test-symbol.json'))
self.assertEqual(True, os.path.exists('test-0000.params'))
net = load_mxnet_model('test-symbol.json', 'test-0000.params')
model.model = net
self.assertEqual(True, isinstance(model.model[0], mx.symbol.Symbol))
model.save('./test2')
self.assertEqual(True, os.path.exists('test2-symbol.json'))
self.assertEqual(True, os.path.exists('test2-0000.params')) |
.xfail(reason='torch.as_strided is not supported by ONNX')
.parametrize('training', [True, False, None])
def test_cplx_interleaved_casting_onnx_export(training):
module = torch.nn.Sequential(casting.InterleavedRealToCplx(), nn.CplxIdentity(), casting.CplxToInterleavedReal())
input = torch.randn(2, 16, 256)
do_onnx_export_test(module.float(), input.float(), training=training)
do_onnx_inference_test(module.float(), input.float(), training=training)
with pytest.xfail(reason='double is not implemented in ONNX'):
do_onnx_export_test(module.double(), input.double(), training=training)
with pytest.xfail(reason='double is not implemented in ONNX'):
do_onnx_inference_test(module.double(), input.double(), training=training) |
def data_split(src_list):
counter_list = random.sample(range(0, len(src_list)), 550)
return counter_list |
def extract_file(downloaded_file, extract_folder, get_extract_name=get_extract_name, debug=False):
extract_name = get_extract_name(downloaded_file)
extract_to = f'{extract_folder}/{extract_name}'
os.makedirs(extract_to, exist_ok=True)
if os.path.exists(f'{extract_to}/DONE'):
print(f'{downloaded_file} has already been extracted to {extract_to} so skip')
return extract_to
def get_extract_cmd(filename):
if (filename.endswith('.tgz') or filename.endswith('tar.gz')):
return f'tar xzfv {filename} -C {extract_to}'
elif filename.endswith('.gz.tar'):
return f'tar xfv {filename} -C {extract_to}; (cd {extract_to}; gzip -d *.gz; [ $? -eq 0 ] || gzip -d */*.gz)'
elif filename.endswith('.tar'):
return f'tar xfv {filename} -C {extract_to}'
elif filename.endswith('.gz'):
return f'cp {filename} {extract_to}; (cd {extract_to}; gzip -d *.gz)'
elif filename.endswith('.zip'):
return f'unzip {filename} -d {extract_to}'
extract_cmd = get_extract_cmd(downloaded_file)
print(f'extracting {downloaded_file}')
if isinstance(extract_cmd, list):
for c in extract_cmd:
call(c, debug=debug)
else:
call(extract_cmd, debug=debug)
call(f'echo DONE > {extract_to}/DONE')
return extract_to |
_registry(op_types='QLinearAdd, QLinearMul')
class QBinaryOperator(QOperator):
def __init__(self, onnx_node, children, initializers):
super().__init__(onnx_node, children, initializers)
def convert(self):
node = self.node
add_nodes = []
inits = []
in_dq1 = onnx.helper.make_node('DequantizeLinear', node.input[:3], [(node.name + '_in_dequant1')], (node.name + '_in_dequant1'))
in_dq2 = onnx.helper.make_node('DequantizeLinear', node.input[3:6], [(node.name + '_in_dequant2')], (node.name + '_in_dequant2'))
inputs = [(node.name + '_in_dequant1'), (node.name + '_in_dequant2')]
add_nodes.extend([in_dq1, in_dq2])
out_q = onnx.helper.make_node('QuantizeLinear', [(node.name + '_out'), node.input[6], node.input[7]], node.output, (node.name + '_out_quant'))
outputs = [(node.name + '_out')]
add_nodes.append(out_q)
kwargs = {}
for attribute in node.attribute:
kwargs.update(attribute_to_kwarg(attribute))
binary_node = onnx.helper.make_node(node.op_type.split('QLinear')[(- 1)], inputs, outputs, (node.name + '_convert'), **kwargs)
add_nodes.append(binary_node)
return (True, add_nodes, inits) |
def _set_object(world, pos, player, tunnels):
(x, y) = pos
uniform = world.random.uniform
dist = np.sqrt((((x - player.pos[0]) ** 2) + ((y - player.pos[1]) ** 2)))
(material, _) = world[(x, y)]
if (material not in constants.walkable):
pass |
def extract_instruction_tokens(observations: List[Dict], instruction_sensor_uuid: str, tokens_uuid: str='tokens') -> Dict[(str, Any)]:
if ((instruction_sensor_uuid not in observations[0]) or (instruction_sensor_uuid == 'pointgoal_with_gps_compass')):
return observations
for i in range(len(observations)):
if (isinstance(observations[i][instruction_sensor_uuid], dict) and (tokens_uuid in observations[i][instruction_sensor_uuid])):
observations[i][instruction_sensor_uuid] = observations[i][instruction_sensor_uuid]['tokens']
else:
break
return observations |
def export_split(split, src, dst, overwrite=False):
print(f'-> Exporting "{split}" split...')
dst = (dst / split)
io.mkdirs(dst)
seqs = io.get_dirs((src / split))
dsts = [(dst / s.stem) for s in seqs]
ovs = [overwrite for _ in seqs]
with Pool(8) as p:
for _ in tqdm(p.imap_unordered(export_seq, zip(seqs, dsts, ovs)), total=len(seqs)):
pass
return {} |
class MCmodel(nn.Module):
def __init__(self, data):
super(MCmodel, self).__init__()
self.gpu = data.HP_gpu
self.use_char = data.use_char
self.model1_fc_dropout = data.HP_model1_dropout
self.model1_in_dropout = data.HP_bayesian_lstm_dropout[0]
self.bilstm_flag = data.HP_bilstm
self.hidden_dim = data.HP_hidden_dim
self.wordrep = WordRep(data)
self.input_size = self.wordrep.total_size
if self.bilstm_flag:
lstm_hidden = (data.HP_hidden_dim // 2)
else:
lstm_hidden = data.HP_hidden_dim
self.lstms = nn.ModuleList([nn.LSTM(self.input_size, lstm_hidden, num_layers=1, batch_first=True, bidirectional=self.bilstm_flag)])
for _ in range((data.HP_model1_layer - 1)):
self.lstms.append(nn.LSTM(data.HP_hidden_dim, lstm_hidden, num_layers=1, batch_first=True, bidirectional=self.bilstm_flag))
self.hidden2tag = nn.Linear(data.HP_hidden_dim, data.label_alphabet_size)
if self.gpu:
self.lstms = self.lstms.cuda()
self.hidden2tag = self.hidden2tag.cuda()
def forward(self, word_inputs, feature_inputs, word_seq_lengths, char_inputs, char_seq_lengths, char_seq_recover):
word_represent = self.forward_word(word_inputs, feature_inputs, word_seq_lengths, char_inputs, char_seq_lengths, char_seq_recover)
return self.forward_rest(word_represent, word_seq_lengths)
def forward_word(self, word_inputs, feature_inputs, word_seq_lengths, char_inputs, char_seq_lengths, char_seq_recover):
word_represent = self.wordrep(word_inputs, feature_inputs, word_seq_lengths, char_inputs, char_seq_lengths, char_seq_recover)
return word_represent
def forward_rest(self, word_represent, word_seq_lengths):
if (not self.training):
(ordered_lens, index) = word_seq_lengths.sort(descending=True)
ordered_x = word_represent[index]
else:
(ordered_x, ordered_lens) = (word_represent, word_seq_lengths)
for (i, lstm) in enumerate(self.lstms):
ordered_x = add_dropout(ordered_x, self.model1_in_dropout)
pack_input = pack_padded_sequence(ordered_x, ordered_lens, batch_first=True)
(pack_output, _) = lstm(pack_input)
(ordered_x, _) = pad_packed_sequence(pack_output, batch_first=True)
if (not self.training):
recover_index = index.argsort()
lstm_out = ordered_x[recover_index]
else:
lstm_out = ordered_x
h2t_in = add_dropout(lstm_out, self.model1_fc_dropout)
outs = self.hidden2tag(h2t_in)
p = F.softmax(outs, (- 1))
return (p, lstm_out, outs, word_represent)
def MC_sampling(self, word_inputs, feature_inputs, word_seq_lengths, char_inputs, char_seq_lengths, char_seq_recover, mc_steps):
word_represent = self.forward_word(word_inputs, feature_inputs, word_seq_lengths, char_inputs, char_seq_lengths, char_seq_recover)
(batch, max_seq_len) = word_represent.size()[:2]
_word_represent = word_represent.repeat(([mc_steps] + [1 for _ in range(1, len(word_represent.size()))]))
_word_seq_lengths = word_seq_lengths.repeat(([mc_steps] + [1 for _ in range(1, len(word_seq_lengths.size()))]))
(p, lstm_out, outs, _) = self.forward_rest(_word_represent, _word_seq_lengths)
p = p.reshape(mc_steps, batch, max_seq_len, (- 1)).mean(0)
lstm_out = lstm_out.reshape(mc_steps, batch, max_seq_len, (- 1)).mean(0)
outs = outs.reshape(mc_steps, batch, max_seq_len, (- 1)).mean(0)
return (p, lstm_out, outs, word_represent) |
class ClicEdmSinglePi0HitsPf(tfds.core.GeneratorBasedBuilder):
VERSION = tfds.core.Version('1.5.0')
RELEASE_NOTES = {'1.1.0': 'Remove track referencepoint feature', '1.2.0': 'Keep all interacting genparticles', '1.5.0': 'Regenerate with ARRAY_RECORD'}
MANUAL_DOWNLOAD_INSTRUCTIONS = '\n For the raw input files in ROOT EDM4HEP format, please see the citation above.\n\n The processed tensorflow_dataset can also be downloaded from:\n FIXME\n '
def __init__(self, *args, **kwargs):
kwargs['file_format'] = tfds.core.FileFormat.ARRAY_RECORD
super(ClicEdmSinglePi0HitsPf, self).__init__(*args, **kwargs)
def _info(self) -> tfds.core.DatasetInfo:
return tfds.core.DatasetInfo(builder=self, description=_DESCRIPTION, features=tfds.features.FeaturesDict({'X': tfds.features.Tensor(shape=(None, max(len(X_FEATURES_TRK), len(X_FEATURES_CH))), dtype=tf.float32), 'ygen': tfds.features.Tensor(shape=(None, len(Y_FEATURES)), dtype=tf.float32), 'ycand': tfds.features.Tensor(shape=(None, len(Y_FEATURES)), dtype=tf.float32)}), supervised_keys=None, homepage='', citation=_CITATION, metadata=tfds.core.MetadataDict(x_features_track=X_FEATURES_TRK, x_features_calohit=X_FEATURES_CH, y_features=Y_FEATURES))
def _split_generators(self, dl_manager: tfds.download.DownloadManager):
path = dl_manager.manual_dir
return split_sample(Path((path / 'pi0/')))
def _generate_examples(self, files):
return generate_examples(files) |
def make_roi_mask_predictor(cfg, in_channels):
func = registry.ROI_MASK_PREDICTOR[cfg.MODEL.ROI_MASK_HEAD.PREDICTOR]
return func(cfg, in_channels) |
class BaselineImageImputer(ImageImputer):
def __init__(self, model, baseline, width, height, superpixel_size, link=None):
super().__init__(width, height, superpixel_size)
self.model = model
self.baseline = baseline
if (link is None):
self.link = nn.Identity()
elif isinstance(link, nn.Module):
self.link = link
else:
raise ValueError('unsupported link function: {}'.format(link))
def __call__(self, x, S):
S = self.resize(S)
x_baseline = ((S * x) + ((1 - S) * self.baseline))
return self.link(self.model(x_baseline)) |
def get_key_to_ground_truth(data):
if (data['Domain'] == 'Wikipedia'):
return {datum['QuestionId']: datum['Answer'] for datum in data['Data']}
else:
return get_qd_to_answer(data) |
class tracker():
_init_args
def __init__(self, names):
assert (len(names) > 0)
self.reset()
def __getitem__(self, name):
return ((self.values.get(name, 0) / self.counter) if self.counter else 0)
def __len__(self):
return len(self.names)
def reset(self):
self.values = dict({name: 0.0 for name in self.names})
self.counter = 0
self.create_time = time.time()
def update(self, named_values, count):
self.counter += count
for (name, value) in named_values.items():
self.values[name] += (value.data.cpu().numpy()[0] * count)
def summarize(self, output=''):
if output:
output += ', '
for name in self.names:
output += '{}: {:.3f}, '.format(name, ((self.values[name] / self.counter) if self.counter else 0))
output += 'elapsed time: {:.1f}(s)'.format((time.time() - self.create_time))
return output
def stats(self):
return {n: ((v / self.counter) if self.counter else 0) for (n, v) in self.values.items()} |
_HEADS_REGISTRY.register()
class PointRendROIHeads(StandardROIHeads):
_version = 2
def _load_from_state_dict(self, state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs):
version = local_metadata.get('version', None)
if ((version is None) or (version < 2)):
logger = logging.getLogger(__name__)
logger.warning('Weight format of PointRend models have changed! Please upgrade your models. Applying automatic conversion now ...')
for k in list(state_dict.keys()):
newk = k
if k.startswith((prefix + 'mask_point_head')):
newk = k.replace((prefix + 'mask_point_head'), (prefix + 'mask_head.point_head'))
if k.startswith((prefix + 'mask_coarse_head')):
newk = k.replace((prefix + 'mask_coarse_head'), (prefix + 'mask_head.coarse_head'))
if (newk != k):
state_dict[newk] = state_dict[k]
del state_dict[k]
def _init_mask_head(cls, cfg, input_shape):
if (cfg.MODEL.MASK_ON and (cfg.MODEL.ROI_MASK_HEAD.NAME != 'PointRendMaskHead')):
logger = logging.getLogger(__name__)
logger.warning('Config of PointRend models have changed! Please upgrade your models. Applying automatic conversion now ...')
assert (cfg.MODEL.ROI_MASK_HEAD.NAME == 'CoarseMaskHead')
cfg.defrost()
cfg.MODEL.ROI_MASK_HEAD.NAME = 'PointRendMaskHead'
cfg.MODEL.ROI_MASK_HEAD.POOLER_TYPE = ''
cfg.freeze()
return super()._init_mask_head(cfg, input_shape) |
class Block35(nn.Module):
def __init__(self, scale=1.0):
super().__init__()
self.scale = scale
self.branch0 = BasicConv2d(256, 32, kernel_size=1, stride=1)
self.branch1 = nn.Sequential(BasicConv2d(256, 32, kernel_size=1, stride=1), BasicConv2d(32, 32, kernel_size=3, stride=1, padding=1))
self.branch2 = nn.Sequential(BasicConv2d(256, 32, kernel_size=1, stride=1), BasicConv2d(32, 32, kernel_size=3, stride=1, padding=1), BasicConv2d(32, 32, kernel_size=3, stride=1, padding=1))
self.conv2d = nn.Conv2d(96, 256, kernel_size=1, stride=1)
self.relu = nn.ReLU(inplace=False)
def forward(self, x):
x0 = self.branch0(x)
x1 = self.branch1(x)
x2 = self.branch2(x)
out = torch.cat((x0, x1, x2), 1)
out = self.conv2d(out)
out = ((out * self.scale) + x)
out = self.relu(out)
return out |
def _segm_lraspp_mobilenetv3(backbone_name, num_classes, pretrained_backbone=True):
backbone = mobilenetv3.__dict__[backbone_name](pretrained=pretrained_backbone, _dilated=True).features
stage_indices = (([0] + [i for (i, b) in enumerate(backbone) if getattr(b, '_is_cn', False)]) + [(len(backbone) - 1)])
low_pos = stage_indices[(- 4)]
high_pos = stage_indices[(- 1)]
low_channels = backbone[low_pos].out_channels
high_channels = backbone[high_pos].out_channels
backbone = IntermediateLayerGetter(backbone, return_layers={str(low_pos): 'low', str(high_pos): 'high'})
model = LRASPP(backbone, low_channels, high_channels, num_classes)
return model |
class AbstractDataManager():
__metaclass__ = abc.ABCMeta
def __init__(self, name: str):
self._data = dict()
self._info = dict()
self._name = name
def name(self) -> str:
return self._name
def data(self) -> Dict[(str, np.ndarray)]:
return self._data
def info(self) -> Dict[(str, Any)]:
return self._info
def feat_type(self) -> Dict[(Union[(str, int)], str)]:
return self._feat_type
_type.setter
def feat_type(self, value: Dict[(Union[(str, int)], str)]) -> None:
self._feat_type = value
def encoder(self) -> DataPreprocessor:
return self._encoder
def encoder(self, value: DataPreprocessor) -> DataPreprocessor:
self._encoder = value
def __repr__(self) -> str:
return ('DataManager : ' + self.name)
def __str__(self) -> str:
val = (('DataManager : ' + self.name) + '\ninfo:\n')
for item in self.info:
val = (((((val + '\t') + item) + ' = ') + str(self.info[item])) + '\n')
val = (val + 'data:\n')
for subset in self.data:
val = (val + ('\t%s = %s %s %s\n' % (subset, type(self.data[subset]), str(self.data[subset].shape), str(self.data[subset].dtype))))
if isinstance(self.data[subset], scipy.sparse.spmatrix):
val = (val + ('\tdensity: %f\n' % ((float(len(self.data[subset].data)) / self.data[subset].shape[0]) / self.data[subset].shape[1])))
val = (((val + 'feat_type:\t') + str(self.feat_type)) + '\n')
return val |
def main():
parser = argparse.ArgumentParser(description='Convert keys from jax official pretrained vit models to MMSegmentation style.')
parser.add_argument('src', help='src model path or url')
parser.add_argument('dst', help='save path')
args = parser.parse_args()
jax_weights = np.load(args.src)
jax_weights_tensor = {}
for key in jax_weights.files:
value = torch.from_numpy(jax_weights[key])
jax_weights_tensor[key] = value
if ('L_16-i21k' in args.src):
num_layer = 24
else:
num_layer = 12
torch_weights = vit_jax_to_torch(jax_weights_tensor, num_layer)
mmcv.mkdir_or_exist(osp.dirname(args.dst))
torch.save(torch_weights, args.dst) |
class Convolution(nn.Module):
def __init__(self, c_in, c_out):
super().__init__()
self.conv = nn.Conv2d(c_in, c_out, 3, stride=1, padding=1)
self.relu = nn.ReLU(True)
def forward(self, x):
return self.relu(self.conv(x)) |
def test_grid_to_int_index_wrong_shape(data):
with pytest.raises(ValueError):
data.archive.grid_to_int_index([data.grid_indices[:(- 1)]]) |
def merge_eval(main_eval, new_eval, prefix):
for k in new_eval:
main_eval[f'{prefix}_{k}'] = new_eval[k] |
class BigBirdPegasusForQuestionAnswering(metaclass=DummyObject):
_backends = ['torch']
def __init__(self, *args, **kwargs):
requires_backends(self, ['torch']) |
class HalfCheetahEnv(mujoco_env.MujocoEnv, utils.EzPickle):
def __init__(self):
self.prev_qpos = None
dir_path = os.path.dirname(os.path.realpath(__file__))
mujoco_env.MujocoEnv.__init__(self, ('%s/assets/half_cheetah.xml' % dir_path), 5)
utils.EzPickle.__init__(self)
def _step(self, action):
self.prev_qpos = np.copy(self.model.data.qpos.flat)
self.do_simulation(action, self.frame_skip)
ob = self._get_obs()
reward_ctrl = ((- 0.1) * np.square(action).sum())
reward_run = (ob[0] - (0.0 * np.square(ob[2])))
reward = (reward_run + reward_ctrl)
done = False
return (ob, reward, done, {})
def _get_obs(self):
return np.concatenate([((self.model.data.qpos.flat[:1] - self.prev_qpos[:1]) / self.dt), self.model.data.qpos.flat[1:], self.model.data.qvel.flat])
def reset_model(self):
qpos = (self.init_qpos + np.random.normal(loc=0, scale=0.001, size=self.model.nq))
qvel = (self.init_qvel + np.random.normal(loc=0, scale=0.001, size=self.model.nv))
self.set_state(qpos, qvel)
self.prev_qpos = np.copy(self.model.data.qpos.flat)
return self._get_obs()
def viewer_setup(self):
self.viewer.cam.distance = (self.model.stat.extent * 0.25)
self.viewer.cam.elevation = (- 55) |
def Brightness(img, v):
assert (0.1 <= v <= 1.9)
return PIL.ImageEnhance.Brightness(img).enhance(v) |
class GradientAggregationOptimizer(tf.train.Optimizer):
def __init__(self, opt: tf.train.Optimizer, grad_steps: int, apply_crs_to_grad=False, xla_num_partitions=None, use_tpu=False):
self._opt = opt
self._grad_steps = grad_steps
self._counter = None
self._use_tpu = use_tpu
self._apply_crs_to_grad = apply_crs_to_grad
self._xla_num_partitions = xla_num_partitions
self.strategy = tf.distribute.get_strategy()
def _create_slots(self, var_list):
if (self._use_tpu and (not self._counter)):
self._counter = tf.get_variable(shape=[], initializer=tf.zeros_initializer, name='update_count')
for v in var_list:
self._opt._zeros_slot(v, 'grad_accum', 'GradientAccumulator')
def compute_gradients(self, loss, var_list, **kwargs):
return self._opt.compute_gradients(loss, var_list, **kwargs)
def _sharding(self, x):
if self._xla_num_partitions:
if (len(x.get_shape()) == 3):
x = xla_sharding.split(x, 1, self._xla_num_partitions, use_sharding_op=True)
if (len(x.get_shape()) == 2):
if (x.get_shape().as_list()[0] < x.get_shape().as_list()[1]):
x = xla_sharding.split(x, 1, self._xla_num_partitions, use_sharding_op=True)
else:
x = xla_sharding.split(x, 0, self._xla_num_partitions, use_sharding_op=True)
return x
def _apply_and_zero_for_each_replica(self, global_step, accums, var_list):
normalized_accums = accums
if self._apply_crs_to_grad:
normalized_accums = [tf.tpu.cross_replica_sum(accum.read_value()) for accum in accums]
apply_op = self._opt.apply_gradients(list(zip(normalized_accums, var_list)))
with tf.control_dependencies([apply_op]):
zero_op = [tf.assign(accum, tf.zeros_like(accum)) for accum in accums]
with tf.control_dependencies([tf.group(zero_op)]):
return tf.add(global_step, 1)
def _apply_and_zero(self, distribution, global_step, accums, var_list):
call_return = distribution.extended.call_for_each_replica(self._apply_and_zero_for_each_replica, args=(global_step, accums, var_list))
reduced_call_return = distribution.reduce(tf.distribute.ReduceOp.MEAN, call_return, axis=None)
with tf.control_dependencies([reduced_call_return]):
return tf.assign_add(global_step, 1)
def _accum(self, global_step):
return tf.assign_add(global_step, 1)
def _maybe_apply_grads_and_zero(self, distribution, global_step, accum_grads, var_list):
cond_return = tf.cond(tf.equal(tf.mod(global_step, self._grad_steps), (self._grad_steps - 1)), (lambda : self._apply_and_zero(distribution, global_step, accum_grads, var_list)), (lambda : self._accum(global_step)))
return cond_return
def apply_gradients(self, grads_and_vars, global_step=None, name=None):
grad_list = []
var_list = []
for (g, v) in grads_and_vars:
grad_list.append(g)
var_list.append(v)
with tf.init_scope():
self._create_slots(var_list)
accums = []
for (g, v) in zip(grad_list, var_list):
accum = self.get_slot(v, 'grad_accum')
if isinstance(g, tf.IndexedSlices):
scaled_grad = tf.IndexedSlices((g.values / self._grad_steps), g.indices, dense_shape=g.dense_shape)
accums.append(accum.assign((self._sharding(accum.read_value()) + scaled_grad)))
else:
accums.append(accum.assign((self._sharding(accum.read_value()) + (g / self._grad_steps))))
if self._use_tpu:
def _apply_and_zero_tpu2():
normalized_accums = accums
if self._apply_crs_to_grad:
normalized_accums = [tf.tpu.cross_replica_sum(accum.read_value()) for accum in accums]
apply_op = self._opt.apply_gradients(list(zip(normalized_accums, var_list)))
with tf.control_dependencies([apply_op]):
zero_op = [tf.assign(accum, tf.zeros_like(accum)) for accum in accums]
return tf.group(zero_op, tf.assign_add(global_step, 1))
def _accum_tpu2():
return tf.group(tf.no_op(), tf.assign_add(global_step, 1))
accum_step = tf.cond(tf.equal(tf.mod(self._counter, self._grad_steps), (self._grad_steps - 1)), _apply_and_zero_tpu2, _accum_tpu2)
with tf.control_dependencies([tf.group(accums)]):
return tf.group(accum_step, tf.assign_add(self._counter, 1))
with tf.control_dependencies([tf.group(accums)]):
merge_return = tf.distribute.get_replica_context().merge_call(self._maybe_apply_grads_and_zero, args=(global_step, accums, var_list))
return merge_return
def get_slot(self, *args, **kwargs):
return self._opt.get_slot(*args, **kwargs)
def get_slot_names(self, *args, **kwargs):
return self._opt.get_slot_names(*args, **kwargs)
def variables(self):
return self._opt.variables() |
def plot_pies(data, color_mapping, pie_labels, subdirectory_names):
nrow = len(subdirectory_names)
ncol = int((len(data) / len(subdirectory_names)))
(fig, axs) = plt.subplots(nrow, ncol)
for (i, key) in enumerate(sorted(data.keys())):
axs[((i // ncol), (i % ncol))].pie(data[key], labels=['DRL', 'TEB'], startangle=90, colors=['tab:green', 'tab:blue'], autopct='%1.0f%%', pctdistance=0.5, labeldistance=1.2)
plt.legend(loc='lower right')
fig.suptitle('DRL-TEB distribution across maps and obstacles numbers', fontsize=16)
plt.tight_layout()
plt.savefig('pie.png')
plt.show() |
def torch_distributed_zero_first(*args, **kwargs):
requires_pytorch(torch_distributed_zero_first) |
def example_TEBD_gs_finite(L, J, g):
print('finite TEBD, (imaginary time evolution)')
print('L={L:d}, J={J:.1f}, g={g:.2f}'.format(L=L, J=J, g=g))
import a_mps
import b_model
model = b_model.TFIModel(L, J=J, g=g)
psi = a_mps.init_spinup_MPS(L)
for dt in [0.1, 0.01, 0.001, 0.0001, 1e-05]:
U_bonds = calc_U_bonds(model, dt)
run_TEBD(psi, U_bonds, N_steps=500, chi_max=30, eps=1e-10)
E = model.energy(psi)
print('dt = {dt:.5f}: E = {E:.13f}'.format(dt=dt, E=E))
print('final bond dimensions: ', psi.get_chi())
if (L < 20):
E_exact = tfi_exact.finite_gs_energy(L, 1.0, g)
print('Exact diagonalization: E = {E:.13f}'.format(E=E_exact))
print('relative error: ', abs(((E - E_exact) / E_exact)))
return (E, psi, model) |
def generate_statistics(dataset_directory_path):
generate_statistics_file(dataset_directory_path) |
def add_nnet_context_info(config_dir, nnet_edits=None, existing_model=None):
common_lib.execute_command('nnet3-init {0} {1}/ref.config {1}/ref.raw'.format((existing_model if (existing_model is not None) else ''), config_dir))
model = '{0}/ref.raw'.format(config_dir)
if (nnet_edits is not None):
model = "nnet3-copy --edits='{0}' {1} - |".format(nnet_edits, model)
out = common_lib.get_command_stdout('nnet3-info "{0}"'.format(model))
info = {}
for line in out.split('\n')[:4]:
parts = line.split(':')
if (len(parts) != 2):
continue
info[parts[0].strip()] = int(parts[1].strip())
vf = open('{0}/vars'.format(config_dir), 'w')
vf.write('model_left_context={0}\n'.format(info['left-context']))
vf.write('model_right_context={0}\n'.format(info['right-context']))
vf.close() |
def init_logger(log_file, log_file_level=logging.NOTSET, log_level=logging.INFO):
if isinstance(log_file_level, str):
log_file_level = getattr(logging, log_file_level)
if isinstance(log_level, str):
log_level = getattr(logging, log_level)
log_format = logging.Formatter('[\x1b[032m%(asctime)s\x1b[0m %(levelname)s] %(module)s.%(funcName)s %(message)s')
logger = logging.getLogger()
logger.setLevel(log_level)
console_handler = logging.StreamHandler()
console_handler.setFormatter(log_format)
logger.handlers = [console_handler]
if (log_file and (log_file != '')):
file_handler = logging.FileHandler(log_file)
file_handler.setLevel(log_file_level)
file_handler.setFormatter(log_format)
logger.addHandler(file_handler)
return logger |
def get_learning_rate_multipliers(model, alpha=0):
layer_names = get_kernel_layer_names(model)
if (alpha > 0.0):
mult = ((1 - alpha) ** (5 / (len(layer_names) - 1)))
multipliers = dict(zip(layer_names, [(mult ** ((len(layer_names) - 1) - i)) for i in range(len(layer_names))]))
elif (alpha <= 0.0):
mult = ((alpha + 1) ** (5 / (len(layer_names) - 1)))
multipliers = dict(zip(layer_names, [(mult ** i) for i in range(len(layer_names))]))
return multipliers |
class NAG(Optimizer):
def __init__(self, params, lr=required, momentum=0, weight_decay=0):
defaults = dict(lr=lr, lr_old=lr, momentum=momentum, weight_decay=weight_decay)
super(NAG, self).__init__(params, defaults)
def supports_memory_efficient_fp16(self):
return True
def supports_flat_params(self):
return True
def step(self, closure=None):
loss = None
if (closure is not None):
loss = closure()
for group in self.param_groups:
weight_decay = group['weight_decay']
momentum = group['momentum']
lr = group['lr']
lr_old = group.get('lr_old', lr)
lr_correct = ((lr / lr_old) if (lr_old > 0) else lr)
for p in group['params']:
if (p.grad is None):
continue
p_data_fp32 = p.data
if (p_data_fp32.dtype in {torch.float16, torch.bfloat16}):
p_data_fp32 = p_data_fp32.float()
d_p = p.grad.data.float()
param_state = self.state[p]
if ('momentum_buffer' not in param_state):
param_state['momentum_buffer'] = torch.zeros_like(d_p)
else:
param_state['momentum_buffer'] = param_state['momentum_buffer'].to(d_p)
buf = param_state['momentum_buffer']
if (weight_decay != 0):
p_data_fp32.mul_((1 - (lr * weight_decay)))
p_data_fp32.add_(buf, alpha=((momentum * momentum) * lr_correct))
p_data_fp32.add_(d_p, alpha=((- (1 + momentum)) * lr))
buf.mul_((momentum * lr_correct)).add_(d_p, alpha=(- lr))
if (p.data.dtype in {torch.float16, torch.bfloat16}):
p.data.copy_(p_data_fp32)
group['lr_old'] = lr
return loss |
class Kernel(MeasOpts):
def __init__(self, name=None, **params):
super().__init__(name, **params) |
def particle_has_track(g, particle):
for e in g.edges(particle):
if (e[1][0] == 'track'):
return True
return False |
def main(unused_argv):
assert (not (FLAGS.train_shards % FLAGS.num_threads)), 'please make the FLAGS.num_threads commersurate with FLAGS.train_shards'
assert (not (FLAGS.valid_shards % FLAGS.num_threads)), 'please make the FLAGS.num_threads commensurate with FLAGS.valid_shards'
assert (not (FLAGS.test_shards % FLAGS.num_threads)), 'please make the FLAGS.num_threads commensurate with FLAGS.test_shards'
folddirlist = FLAGS.fold_dir.split(os.sep)
savefolder = ((FLAGS.tf_output_dir + os.sep) + folddirlist[(- 1)])
print(('saving results to %s ' % savefolder))
if (os.path.exists(FLAGS.tf_output_dir) is False):
print(('Creating %s' % FLAGS.tf_output_dir))
os.makedirs(FLAGS.tf_output_dir)
if (os.path.exists(savefolder) is False):
print(('Creating %s ' % savefolder))
os.makedirs(savefolder)
folddirlist = os.listdir(FLAGS.fold_dir)
for folddirname in folddirlist:
subfolddir = ((FLAGS.fold_dir + os.sep) + folddirname)
subsavefolder = ((savefolder + os.sep) + folddirname)
if (os.path.exists(subsavefolder) is False):
print(('Creating %s ' % subsavefolder))
os.makedirs(subsavefolder)
(agevalid, agevalid_outcomes, gendervalid, gendervalid_outcomes) = _process_dataset('validation', ('%s/%s' % (subfolddir, FLAGS.valid_list)), FLAGS.data_dir, FLAGS.valid_shards, subsavefolder)
(agetrain, agetrain_outcomes, gendertrain, gendertrain_outcomes) = _process_dataset('train', ('%s/%s' % (subfolddir, FLAGS.train_list)), FLAGS.data_dir, FLAGS.train_shards, subsavefolder)
(agetest, agetest_outcomes, gendertest, gendertest_outcomes) = _process_dataset('test', ('%s/%s' % (subfolddir, FLAGS.test_list)), FLAGS.data_dir, FLAGS.test_shards, subsavefolder)
if ((len(agevalid_outcomes) != len((agevalid_outcomes | agetrain_outcomes))) or (len(gendervalid_outcomes) != len((gendervalid_outcomes | gendertrain_outcomes)))):
print(('Warning: age unattested labels in training data [%s]' % (', '.join((agevalid_outcomes | agetrain_outcomes)) - agevalid_outcomes)))
print(('Warning: gender unattested labels in training data [%s]' % (', '.join((gendervalid_outcomes | gendertrain_outcomes)) - gendervalid_outcomes)))
output_file_age = os.path.join(subsavefolder, 'mdage.json')
output_file_gender = os.path.join(subsavefolder, 'mdgender.json')
mdage = {'num_valid_shards': FLAGS.valid_shards, 'num_train_shards': FLAGS.train_shards, 'num_test_shards': FLAGS.test_shards, 'valid_counts': agevalid, 'train_counts': agetrain, 'test_counts': agetest, 'timestamp': str(datetime.now()), 'nlabels': len(agetrain_outcomes)}
with open(output_file_age, 'w') as f:
json.dump(mdage, f)
mdgender = {'num_valid_shards': FLAGS.valid_shards, 'num_train_shards': FLAGS.train_shards, 'num_test_shards': FLAGS.test_shards, 'valid_counts': gendervalid, 'train_counts': gendertrain, 'test_counts': gendertest, 'timestamp': str(datetime.now()), 'nlabels': len(gendertrain_outcomes)}
with open(output_file_gender, 'w') as f:
json.dump(mdgender, f) |
def group_weight(model):
(group_decay, group_no_decay) = ([], [])
for params in model.named_parameters():
if ('transformer' in params[0]):
if (('bias' in params[0]) or ('norm' in params[0])):
group_no_decay.append(params[1])
continue
group_decay.append(params[1])
assert (len(list(model.parameters())) == (len(group_decay) + len(group_no_decay)))
groups = [dict(params=group_decay), dict(params=group_no_decay, weight_decay=0.0)]
return groups |
class BaseSampler():
def __init__(self, data, n_samples=1, device='cpu'):
assert isinstance(data, dict), 'you must pass a dict with your data'
self.device = device
self.data = data
self.vars = tuple(data.keys())
self.n_samples = n_samples
def _sample(self, n_samples=None):
n_samples = (n_samples or self.n_samples)
sample = self.sample(n_samples)
return torch.stack([sample[var] for var in self.vars], axis=(- 1))
def sample(self, n_samples=None):
raise ValueError('you must implement this method') |
class VarianceThreshold(AutotabularPreprocessingAlgorithm):
def __init__(self, random_state: Optional[np.random.RandomState]=None):
self.random_state = random_state
def fit(self, X: PIPELINE_DATA_DTYPE, y: Optional[PIPELINE_DATA_DTYPE]=None) -> 'VarianceThreshold':
self.preprocessor = sklearn.feature_selection.VarianceThreshold(threshold=0.0)
self.preprocessor = self.preprocessor.fit(X)
return self
def transform(self, X: PIPELINE_DATA_DTYPE) -> PIPELINE_DATA_DTYPE:
if (self.preprocessor is None):
raise NotImplementedError()
return self.preprocessor.transform(X)
def get_properties(dataset_properties: Optional[DATASET_PROPERTIES_TYPE]=None) -> Dict[(str, Optional[Union[(str, int, bool, Tuple)]])]:
return {'shortname': 'Variance Threshold', 'name': 'Variance Threshold (constant feature removal)', 'handles_regression': True, 'handles_classification': True, 'handles_multiclass': True, 'handles_multilabel': True, 'handles_multioutput': True, 'is_deterministic': True, 'handles_sparse': True, 'handles_dense': True, 'input': (DENSE, SPARSE, UNSIGNED_DATA), 'output': (INPUT,)}
def get_hyperparameter_search_space(dataset_properties: Optional[DATASET_PROPERTIES_TYPE]=None) -> ConfigurationSpace:
cs = ConfigurationSpace()
return cs |
def tryCreatePartition(numCoresPerAxis, coreShape, postLayerPartition, layer, logdir):
output_shape = (layer._output_shape3D if hasattr(layer, '_output_shape3D') else layer.output_shape)
outputShape = output_shape[1:]
if hasattr(layer, 'signed'):
if layer.signed:
outputShape = (outputShape[:(- 1)] + ((2 * outputShape[(- 1)]),))
coreIdMap = getCoreIdMapFromCoreShape(coreShape, outputShape, numCoresPerAxis)
coreOccupancy = getCoreOccupancy(coreIdMap, numCoresPerAxis)
if np.any((coreOccupancy > layer.maxNumCompartments)):
return
multiplicityMap = layer.getMultiplicityMap(coreIdMap)
partitionCandidate = Layer(layer.name, layer.__class__.__name__, layer.compartmentKwargs, layer.connectionKwargs, coreIdMap, multiplicityMap, postLayerPartition)
partitionCandidate.coreOccupancy = coreOccupancy
partitionCandidate = layer.compile(partitionCandidate)
if (partitionCandidate is None):
print('.', end='', flush=True)
return
layer.validatePartition(partitionCandidate)
layer.visualizePartition(logdir, partitionCandidate, coreIdMap, coreOccupancy, multiplicityMap=multiplicityMap)
print('x', end='', flush=True)
return partitionCandidate |
class TestPytorchAdaptor(unittest.TestCase):
framework_specific_info = {'device': 'cpu', 'approach': 'post_training_static_quant', 'random_seed': 1234, 'q_dataloader': None, 'workspace_path': './'}
framework = 'pytorch'
adaptor = FRAMEWORKS[framework](framework_specific_info)
model = q_resnet18()
nc_model = MODELS['pytorch'](model)
def setUpClass(self):
build_pytorch_yaml()
build_dump_tensors_yaml()
def tearDownClass(self):
os.remove('ptq_yaml.yaml')
os.remove('dynamic_yaml.yaml')
os.remove('qat_yaml.yaml')
os.remove('dump_yaml.yaml')
os.remove('auto_yaml.yaml')
shutil.rmtree('./saved', ignore_errors=True)
shutil.rmtree('runs', ignore_errors=True)
def test_get_all_weight_name(self):
assert (len(list(self.nc_model.get_all_weight_names())) == 62)
def test_get_weight(self):
for (name, param) in self.model.named_parameters():
if (name == 'layer4.1.conv2.weight'):
param.data.fill_(0.0)
if (name == 'fc.bias'):
param.data.fill_(0.1)
assert (int(torch.sum(self.nc_model.get_weight('layer4.1.conv2.weight'))) == 0)
assert torch.allclose(torch.sum(self.nc_model.get_weight('fc.bias')), torch.tensor(100.0))
def test_get_input(self):
model = MODELS['pytorch'](q_resnet18())
model.model.eval().fuse_model()
model.register_forward_pre_hook()
rand_input = torch.rand(100, 3, 224, 224).float()
model.model(rand_input)
assert torch.equal(model.get_inputs('x'), rand_input)
model.remove_hooks()
def test_update_weights(self):
self.nc_model.update_weights('fc.bias', torch.zeros([1000]))
assert (int(torch.sum(self.nc_model.get_weight('fc.bias'))) == 0)
def test_get_gradient(self):
with self.assertRaises(AssertionError):
self.nc_model.get_gradient('fc.bias')
for (name, tensor) in self.nc_model._model.named_parameters():
if (name == 'fc.bias'):
tensor.grad = torch.zeros_like(tensor)
break
assert torch.equal(torch.Tensor(self.nc_model.get_gradient('fc.bias')), torch.zeros_like(tensor))
rand_input = torch.rand(100, 3, 224, 224).float()
rand_input.grad = torch.ones_like(rand_input)
assert torch.equal(torch.Tensor(self.nc_model.get_gradient(rand_input)), torch.ones_like(rand_input))
def test_report_sparsity(self):
(df, total_sparsity) = self.nc_model.report_sparsity()
self.assertTrue((total_sparsity > 0))
self.assertTrue((len(df) == 22))
def test_quantization_saved(self):
for fake_yaml in ['dynamic_yaml.yaml', 'qat_yaml.yaml', 'ptq_yaml.yaml']:
model = M()
quantizer = Quantization(fake_yaml)
quantizer.conf.usr_cfg.tuning.exit_policy['performance_only'] = True
dataset = quantizer.dataset('dummy', (100, 3, 224, 224), label=True)
quantizer.model = model
quantizer.calib_dataloader = common.DataLoader(dataset)
quantizer.eval_dataloader = common.DataLoader(dataset)
q_model = quantizer.fit()
eval_func(q_model)
q_model.save('./saved')
saved_model = load('./saved', model)
eval_func(saved_model)
history_file = './saved/history.snapshot'
model_recover = recover(model, history_file, 0)
eval_func(model_recover)
self.assertEqual(type(saved_model.conv), type(model_recover.conv))
shutil.rmtree('./saved', ignore_errors=True)
from neural_compressor.experimental import Benchmark
evaluator = Benchmark('ptq_yaml.yaml')
evaluator.model = model
evaluator.b_dataloader = common.DataLoader(dataset)
evaluator.fit('accuracy')
for fake_yaml in ['qat_yaml.yaml', 'ptq_yaml.yaml']:
model = copy.deepcopy(self.model)
if (fake_yaml == 'ptq_yaml.yaml'):
model.eval().fuse_model()
conf = QuantConf(fake_yaml)
quantizer = Quantization(conf)
dataset = quantizer.dataset('dummy', (100, 3, 224, 224))
quantizer.model = model
if (fake_yaml == 'qat_yaml.yaml'):
quantizer.q_func = q_func
else:
quantizer.calib_dataloader = common.DataLoader(dataset)
quantizer.eval_func = eval_func
q_model = quantizer.fit()
q_model.save('./saved')
saved_model = load('./saved', model)
eval_func(saved_model)
shutil.rmtree('./saved', ignore_errors=True)
def test_quantization_new_saved(self):
for fake_yaml in ['dynamic_yaml.yaml', 'qat_yaml.yaml', 'ptq_yaml.yaml']:
model = M()
quantizer = Quantization(fake_yaml)
quantizer.conf.usr_cfg.tuning.exit_policy['performance_only'] = True
dataset = quantizer.dataset('dummy', (100, 3, 224, 224), label=True)
quantizer.model = model
quantizer.calib_dataloader = common.DataLoader(dataset)
quantizer.eval_dataloader = common.DataLoader(dataset)
q_model = quantizer.fit()
eval_func(q_model)
torch.save(q_model.quantized_state_dict(), './saved/model.pt')
from neural_compressor.experimental.common import Model
common_model = Model(model)
common_model.load_quantized_state_dict(torch.load('./saved/model.pt'))
eval_func(common_model)
self.assertEqual(type(q_model._model.linear), type(common_model._model.linear))
shutil.rmtree('./saved', ignore_errors=True)
(IPEX, 'this function is affected by IPEX, Fixing now.')
def test_non_quant_module(self):
for fake_yaml in ['qat_yaml.yaml', 'ptq_yaml.yaml']:
model = PartialQuantModel()
conf = QuantConf(fake_yaml)
quantizer = Quantization(conf)
dataset = quantizer.dataset('dummy', (1, 3, 224, 224))
non_quant_dict = {'non_quant_module_name': ['conv', 'conv1', 'sub.conv'], 'non_quant_module_class': ['BatchNorm2d', 'FP32Model']}
quantizer.model = common.Model(model, **non_quant_dict)
if (fake_yaml == 'qat_yaml.yaml'):
quantizer.q_func = q_func
else:
quantizer.calib_func = eval_func
quantizer.eval_func = eval_func
q_model = quantizer.fit()
self.assertTrue(isinstance(q_model.model.conv, torch.nn.Conv2d))
self.assertTrue(('quantize' in str(q_model.model.conv2.__class__)))
q_model.save('./saved')
saved_model = load('./saved', model, **non_quant_dict)
eval_func(saved_model)
shutil.rmtree('./saved', ignore_errors=True)
def test_auto_quant(self):
def eval_func(model):
return 1
model_origin = LSTMModel(ntoken=10, ninp=512, nhid=256, nlayers=2)
quantizer = Quantization('auto_yaml.yaml')
dataset = quantizer.dataset('dummy', (3, 10), label=True)
quantizer.eval_func = eval_func
quantizer.calib_dataloader = common.DataLoader(dataset)
quantizer.model = common.Model(model_origin)
q_model = quantizer.fit()
self.assertNotEqual(q_model, None)
def test_workspace_path(self):
model = M()
quantizer = Quantization('ptq_yaml.yaml')
quantizer.conf.usr_cfg.tuning.exit_policy['performance_only'] = True
dataset = quantizer.dataset('dummy', (100, 3, 224, 224), label=True)
quantizer.model = model
quantizer.calib_dataloader = common.DataLoader(dataset)
quantizer.eval_dataloader = common.DataLoader(dataset)
q_model = quantizer.fit()
eval_func(q_model)
torch.save(q_model.quantized_state_dict(), './saved/best_model.pt')
from neural_compressor.experimental.common import Model
common_model = Model(model)
common_model.workspace_path = './saved'
eval_func(common_model)
self.assertEqual(type(q_model._model.linear), type(common_model._model.linear))
shutil.rmtree('./saved', ignore_errors=True)
def test_get_graph_info(self):
from neural_compressor.model.torch_model import PyTorchModel
model = PyTorchModel(self.model)
op_map = model.graph_info
self.assertTrue((op_map['conv1'] == 'Conv2d'))
def test_tensorboard(self):
model = copy.deepcopy(self.nc_model)
model.model.eval().fuse_model()
quantizer = Quantization('dump_yaml.yaml')
dataset = quantizer.dataset('dummy', (100, 3, 224, 224), label=True)
quantizer.model = model.model
quantizer.calib_dataloader = common.DataLoader(dataset)
quantizer.eval_func = eval_func
quantizer.fit()
self.assertTrue((True if os.path.exists('runs/eval/baseline_acc0.0') else False))
quantizer.eval_dataloader = common.DataLoader(dataset)
quantizer.eval_func = None
quantizer.fit()
self.assertTrue((True if os.path.exists('runs/eval/baseline_acc0.0') else False))
def test_tensor_dump_and_set(self):
model = copy.deepcopy(self.nc_model)
model.model.eval().fuse_model()
quantizer = Quantization('ptq_yaml.yaml')
dataset = quantizer.dataset('dummy', (100, 3, 224, 224), label=True)
dataloader = common.DataLoader(dataset)
dataloader = common._generate_common_dataloader(dataloader, 'pytorch')
quantizer.eval_dataloader = dataloader
quantizer.calib_dataloader = dataloader
quantizer.model = model.model
q_model = quantizer.fit()
quantizer.strategy.adaptor.inspect_tensor(model, dataloader, op_list=['conv1.0', 'layer1.0.conv1.0'], iteration_list=[1, 2], inspect_type='all', save_to_disk=True)
with open('saved/inspect_result.pkl', 'rb') as fp:
tensor_dict = pickle.load(fp)
a = tensor_dict['activation'][0]
w = tensor_dict['weight']
if (PT_VERSION >= Version('1.8.0').release):
self.assertTrue((w['conv1.0']['conv1.0.weight'].shape[0] == a['conv1.0']['conv1.0.output0'].shape[1]))
else:
self.assertTrue((w['conv1.0']['conv1.0.weight'].shape[0] == a['conv1.0']['conv1.1.output0'].shape[1]))
data = np.random.random(w['conv1.0']['conv1.0.weight'].shape).astype(np.float32)
quantizer.strategy.adaptor.set_tensor(q_model, {'conv1.0.weight': data})
changed_tensor = q_model.get_weight('conv1.weight')
scales = changed_tensor.q_per_channel_scales()
changed_tensor_fp32 = torch.dequantize(changed_tensor)
self.assertTrue(np.allclose(data, changed_tensor_fp32.numpy(), atol=(2 / np.min(scales.numpy()))))
quantizer.strategy.adaptor.inspect_tensor(q_model, dataloader, op_list=['conv1.0', 'layer1.0.conv1.0'], iteration_list=[1, 2], inspect_type='all', save_to_disk=False)
def test_forward_wrapper(self):
vision_model = resnet18()
class dummymodel(torch.nn.Module):
def __init__(self, model):
super(dummymodel, self).__init__()
self._model = model
def forward(self, input=None):
return self._model(input)
data = [[{'input': torch.rand(3, 224, 224)}, torch.ones(1, 1)]]
dataloader = common.DataLoader(data, batch_size=1)
quantizer = Quantization('dynamic_yaml.yaml')
model = dummymodel(vision_model)
quantizer.model = model
quantizer.calib_dataloader = dataloader
quantizer.eval_dataloader = dataloader
model = quantizer.fit()
self.assertTrue(isinstance(model, torch.nn.Module))
def test_floatfunctions_fallback(self):
class ModelWithFunctionals(torch.nn.Module):
def __init__(self):
super(ModelWithFunctionals, self).__init__()
self.mycat = nnq.FloatFunctional()
self.myadd = nnq.FloatFunctional()
self.myadd_relu = nnq.FloatFunctional()
self.my_scalar_add = nnq.FloatFunctional()
self.mymul = nnq.FloatFunctional()
self.my_scalar_mul = nnq.FloatFunctional()
self.quant = QuantStub()
self.dequant = DeQuantStub()
def forward(self, x):
x = self.quant(x)
y = self.mycat.cat([x, x, x])
z = self.myadd.add(y, y)
w = self.myadd_relu.add_relu(z, z)
w = self.my_scalar_add.add_scalar(w, (- 0.5))
w = self.mymul.mul(w, w)
w = self.my_scalar_mul.mul_scalar(w, 0.5)
w = self.dequant(w)
return w
model = ModelWithFunctionals()
model = MODELS['pytorch'](model)
x = torch.rand(10, 1, dtype=torch.float)
y = model.model(x)
fallback_ops = []
q_capability = self.adaptor.query_fw_capability(model)
for (k, v) in q_capability['opwise'].items():
if ((k[0] != 'quant') and (k[0] != 'dequant')):
fallback_ops.append(k[0])
model.model.qconfig = torch.quantization.default_qconfig
model.model.quant.qconfig = torch.quantization.default_qconfig
if (PT_VERSION >= Version('1.8.0').release):
model.model.dequant.qconfig = torch.quantization.default_qconfig
nc_torch._fallback_quantizable_ops_recursively(model.model, '', fallback_ops, op_qcfgs={})
if (PT_VERSION >= Version('2.0.0').release):
from torch.quantization.quantize import _add_observer_ as add_observer_
else:
from torch.quantization.quantize import add_observer_
add_observer_(model.model)
model.model(x)
torch.quantization.convert(model.model, self.adaptor.q_mapping, inplace=True)
qy = model.model(x)
tol = {'atol': 0.1, 'rtol': 0.001}
self.assertTrue(np.allclose(y, qy, **tol)) |
class MemoryEfficientFP16Optimizer(_MemoryEfficientFP16OptimizerMixin, optim.FairseqOptimizer):
def __init__(self, cfg: DictConfig, params, optimizer, **kwargs):
if (not optimizer.supports_memory_efficient_fp16):
raise ValueError('Unsupported optimizer: {}'.format(optimizer.__class__.__name__))
super().__init__(cfg.optimizer)
self.wrapped_optimizer = optimizer
if (getattr(cfg.common, 'fp16_scale_window', None) is None):
if (len(cfg.optimization.update_freq) > 1):
raise ValueError('--fp16-scale-window must be given explicitly when using a custom --update-freq schedule')
data_parallel_size = int((cfg.distributed_training.distributed_world_size / cfg.common.model_parallel_size))
scale_window = int((((2 ** 14) / data_parallel_size) / cfg.optimization.update_freq[0]))
else:
scale_window = cfg.common.fp16_scale_window
if (not getattr(cfg.common, 'bf16', False)):
self.scaler = DynamicLossScaler(init_scale=cfg.common.fp16_init_scale, scale_window=scale_window, tolerance=cfg.common.fp16_scale_tolerance, threshold=cfg.common.threshold_loss_scale, min_loss_scale=cfg.common.min_loss_scale)
else:
self.scaler = None
def build_optimizer(cls, cfg: DictConfig, params, **kwargs):
fp16_optimizer = optim.build_optimizer(cfg.optimizer, params)
return cls(cfg, params, fp16_optimizer, **kwargs)
def optimizer(self):
return self.wrapped_optimizer.optimizer
def optimizer(self, optimizer):
self.wrapped_optimizer.optimizer = optimizer
def optimizer_config(self):
return self.wrapped_optimizer.optimizer_config
def lr_scheduler(self):
return getattr(self.wrapped_optimizer, 'lr_scheduler', None)
def get_lr(self):
return self.wrapped_optimizer.get_lr()
def set_lr(self, lr):
self.wrapped_optimizer.set_lr(lr)
def all_reduce_grads(self, module):
self.wrapped_optimizer.all_reduce_grads(module) |
def test_watershed_saddle_basin():
saddle_landscape = np.array([[0, 0, 3], [2, 1, 2], [0, 0, 3]])
saddle_result = np.array([[1, 1, 1], [0, 0, 0], [2, 2, 2]])
saddle_ws = morpho.watershed(saddle_landscape, dams=True)
assert_array_equal(saddle_ws, saddle_result) |
class PreResNet(nn.Module):
def __init__(self, depth, num_classes=1000, block_name='BasicBlock'):
super(PreResNet, self).__init__()
if (block_name.lower() == 'basicblock'):
assert (((depth - 2) % 6) == 0), 'When use basicblock, depth should be 6n+2, e.g. 20, 32, 44, 56, 110, 1202'
n = ((depth - 2) // 6)
block = BasicBlock
elif (block_name.lower() == 'bottleneck'):
assert (((depth - 2) % 9) == 0), 'When use bottleneck, depth should be 9n+2, e.g. 20, 29, 47, 56, 110, 1199'
n = ((depth - 2) // 9)
block = Bottleneck
else:
raise ValueError('block_name shoule be Basicblock or Bottleneck')
self.inplanes = 16
self.conv1 = nn.Conv2d(3, 16, kernel_size=3, padding=1, bias=False)
self.layer1 = self._make_layer(block, 16, n)
self.layer2 = self._make_layer(block, 32, n, stride=2)
self.layer3 = self._make_layer(block, 64, n, stride=2)
self.bn = nn.BatchNorm2d((64 * block.expansion))
self.relu = nn.ReLU(inplace=True)
self.avgpool = nn.AvgPool2d(8)
self.fc = nn.Linear((64 * block.expansion), num_classes)
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = ((m.kernel_size[0] * m.kernel_size[1]) * m.out_channels)
m.weight.data.normal_(0, math.sqrt((2.0 / n)))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def _make_layer(self, block, planes, blocks, stride=1):
downsample = None
if ((stride != 1) or (self.inplanes != (planes * block.expansion))):
downsample = nn.Sequential(nn.Conv2d(self.inplanes, (planes * block.expansion), kernel_size=1, stride=stride, bias=False))
layers = []
layers.append(block(self.inplanes, planes, stride, downsample))
self.inplanes = (planes * block.expansion)
for i in range(1, blocks):
layers.append(block(self.inplanes, planes))
return nn.Sequential(*layers)
def forward(self, x):
x = self.conv1(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.bn(x)
x = self.relu(x)
x = self.avgpool(x)
x = x.view(x.size(0), (- 1))
x = self.fc(x)
return x |
def test_d0_conf_note_report_number():
ref_line = u'[4] D0 Collaboration, D0 Note 6417-CONF (2015)'
res = get_references(ref_line)
references = res[0]
assert (references[0]['reportnumber'] == [u'D0-Note-6417-CONF'])
assert (references[0]['linemarker'] == [u'4']) |
class ColorDenseCRFLoss(nn.Module):
def __init__(self, weight, sigma_rgb, scale_factor):
super(ColorDenseCRFLoss, self).__init__()
self.weight = weight
self.sigma_rgb = sigma_rgb
self.scale_factor = scale_factor
def forward(self, images, segmentations):
assert (images.ndim == 4)
scaled_images = F.interpolate(images, scale_factor=self.scale_factor, mode='nearest', recompute_scale_factor=False)
scaled_segs = F.interpolate(segmentations, scale_factor=self.scale_factor, mode='bilinear', recompute_scale_factor=False, align_corners=False)
val = (self.weight * ColorDenseCRFLossFunction.apply(scaled_images, scaled_segs, self.sigma_rgb))
return val
def extra_repr(self):
return 'sigma_rgb={}, weight={}, scale_factor={}'.format(self.sigma_rgb, self.weight, self.scale_factor) |
class Resize(object):
def __init__(self, size):
self.size = size
def __call__(self, vid):
return resize(vid, self.size) |
class CrossEntropyLabelSmooth(nn.Module):
def __init__(self, num_classes, epsilon):
super(CrossEntropyLabelSmooth, self).__init__()
self.num_classes = num_classes
self.epsilon = epsilon
self.logsoftmax = nn.LogSoftmax(dim=1)
def forward(self, inputs, targets):
log_probs = self.logsoftmax(inputs)
targets = torch.zeros_like(log_probs).scatter_(1, targets.unsqueeze(1), 1)
targets = (((1 - self.epsilon) * targets) + (self.epsilon / self.num_classes))
loss = ((- targets) * log_probs).mean(0).sum()
return loss |
def test_result_of_conf_dict_is_not_dogmatic(conf_dict):
cfg = conf_dict({'e': [1, 1, 1]})
assert (not is_dogmatic(cfg)) |
def extract_vel_from_state(state: X) -> float:
try:
vel = state.vx
return vel
except AttributeError:
msg = 'Unable to extract vel from state'
raise ZValueError(msg=msg, state=state, state_type=type(state)) |
_grad()
def inference(weight, name, img):
if (img is None):
img = np.random.randint(0, 255, size=(112, 112, 3), dtype=np.uint8)
else:
img = cv2.imread(img)
img = cv2.resize(img, (112, 112))
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
img = np.transpose(img, (2, 0, 1))
img = torch.from_numpy(img).unsqueeze(0).float()
img.div_(255).sub_(0.5).div_(0.5)
net = get_model(name, fp16=False)
net.load_state_dict(torch.load(weight))
net.eval()
feat = net(img).numpy()
print(feat) |
def save_ckpt(output_dir, args, step, train_size, model, optimizer):
if args.no_save:
return
ckpt_dir = os.path.join(output_dir, 'ckpt')
if (not os.path.exists(ckpt_dir)):
os.makedirs(ckpt_dir)
save_name = os.path.join(ckpt_dir, 'model_step{}.pth'.format(step))
if isinstance(model, mynn.DataParallel):
model = model.module
model_state_dict = model.state_dict()
torch.save({'step': step, 'train_size': train_size, 'batch_size': args.batch_size, 'model': model.state_dict(), 'optimizer': optimizer.state_dict()}, save_name)
logger.info('save model: %s', save_name) |
def FPN(backbone_name='vgg16', input_shape=(None, None, None, 3), classes=21, activation='softmax', weights=None, encoder_weights='imagenet', encoder_freeze=False, encoder_features='default', pyramid_block_filters=256, pyramid_use_batchnorm=True, pyramid_aggregation='concat', pyramid_dropout=None, **kwargs):
global backend, layers, models, keras_utils
(backend, layers, models, keras_utils) = get_submodules_from_kwargs(kwargs)
backbone = Backbones.get_backbone(backbone_name, input_shape=input_shape, weights=encoder_weights, include_top=False, **kwargs)
if (encoder_features == 'default'):
encoder_features = Backbones.get_feature_layers(backbone_name, n=4)
model = build_fpn(backbone=backbone, skip_connection_layers=encoder_features, pyramid_filters=pyramid_block_filters, segmentation_filters=(pyramid_block_filters // 2), use_batchnorm=pyramid_use_batchnorm, dropout=pyramid_dropout, activation=activation, classes=classes, aggregation=pyramid_aggregation)
if encoder_freeze:
freeze_model(backbone, **kwargs)
if (weights is not None):
model.load_weights(weights)
return model |
def plotgeneral(fig):
axs = fig.gca()
center = (3, 2)
radius = 1
circle = plt.Circle(center, radius, edgecolor='blue', facecolor='none')
axs.add_artist(circle)
plt.plot([0, 4], [0, 4], 'r')
plt.plot([2, 3], [2, 3], 'go')
plt.axis([0, 5, 0, 4]) |
class cLSTM(nn.Module):
def __init__(self, input_size, hidden_size, num_layers=2):
super().__init__()
self.lstm = nn.LSTM(input_size, hidden_size, num_layers)
def forward(self, features, init_hidden=None):
self.lstm.flatten_parameters()
(output, (h_n, c_n)) = self.lstm(features, init_hidden)
last_h = h_n[(- 1)]
return last_h |
class GmmPolicy(AbstractPolicy):
def __init__(self, dataset):
pass
'\n Compute features if actor id is nonzero; use world\n '
def __call__(self, world, state, actor):
pass |
def test_statcast_catcher_poptime() -> None:
min_2b_att = 5
min_3b_att = 0
result: pd.DataFrame = statcast_catcher_poptime(2019, min_2b_att, min_3b_att)
assert (result is not None)
assert (not result.empty)
assert (len(result.columns) == 14)
assert (len(result) > 0)
assert (len(result.loc[(result.pop_2b_sba_count < min_2b_att)]) == 0)
assert (len(result.loc[(result.pop_3b_sba_count < min_3b_att)]) == 0) |
def test_global_var():
run_cell('x = 0')
run_cell('def f(): global x; x = 42')
run_cell('f()')
run_cell('assert x == 42') |
class OcoGradEstimation():
def __init__(self, k_min, k_max):
self.k_min_orig = k_min
self.k_max_orig = k_max
self.k_min = k_min
self.k_max = k_max
self.d = (k_max - k_min)
self.delta = 0.1
self.min_max_update_window = 20
self.alpha = 1.5
self.min_max_update_count = 0
self.k_min_window = self.k_max_orig
self.k_max_window = self.k_min_orig
self.reference_iter = 0
self.m_prev = 0
self.timer = 0
self.grad_value_prev = 0
def tuning_k_grad_sign(self, k, k_aux, cost, cost_aux, time):
eta = (self.d / np.sqrt((2 * (time - self.reference_iter))))
k_unchanged_due_to_cost_none = False
if (cost_aux is None):
if (self.timer <= 10):
k_next = k
self.timer += 1
k_unchanged_due_to_cost_none = True
else:
self.timer = 0
k_next = self.stochastic_rounding((k + eta))
else:
self.timer = 0
k_next = self.stochastic_rounding((k - (eta * np.sign(((cost - cost_aux) / (k - k_aux))))))
if (k_next < self.k_min):
k_next = self.k_min
elif (k_next > self.k_max):
k_next = self.k_max
k_aux_next = self.stochastic_rounding((k_next - (eta / 2.0)))
if (k_aux_next < (self.k_min * self.delta)):
k_aux_next = int(np.ceil((self.k_min * self.delta)))
if (k_aux_next >= k_next):
k_aux_next = (k_next - 1)
if (not k_unchanged_due_to_cost_none):
self.k_min_window = min(self.k_min_window, k_next)
self.k_max_window = max(self.k_max_window, k_next)
self.min_max_update_count += 1
if (self.min_max_update_count >= self.min_max_update_window):
self.min_max_update_count = 0
k_min_window_change = (self.k_min_window / self.alpha)
k_max_window_change = (self.k_max_window * self.alpha)
k_min_window_change = int(np.round(max(k_min_window_change, self.k_min_orig)))
k_max_window_change = int(np.round(min(k_max_window_change, self.k_max_orig)))
b_new = (k_max_window_change - k_min_window_change)
b_orig = (self.k_max - self.k_min)
m_current = (time - self.reference_iter)
if ((b_new > 0) and (m_current >= self.m_prev) and ((b_orig + b_new) <= (b_orig * np.sqrt(2)))):
self.k_min = k_min_window_change
self.k_max = k_max_window_change
self.d = (self.k_max - self.k_min)
self.reference_iter = time
self.m_prev = m_current
print(' New k_min:', self.k_min, 'new k_max:', self.k_max)
else:
print(' Same range - New k_min_window:', self.k_min, 'new k_max_window:', self.k_max, 'b_orig:', b_orig, 'b_new:', (self.k_max - self.k_min))
print('m_current:', m_current, 'self.m_prev:', self.m_prev)
print('b_orig * np.sqrt(m_current) + b_new * np.sqrt(self.min_max_update_window) =', ((b_orig * np.sqrt(m_current)) + (b_new * np.sqrt(self.min_max_update_window))))
print('b_orig * np.sqrt(m_current + self.min_max_update_window) =', (b_orig * np.sqrt((m_current + self.min_max_update_window))))
self.k_min_window = self.k_max_orig
self.k_max_window = self.k_min_orig
return (k_next, k_aux_next)
def tuning_k_grad_value(self, k, k_aux, cost, cost_aux, time):
eta = (self.d / np.sqrt((2 * time)))
grad_value = None
if (cost_aux is None):
if (self.timer <= 10):
k_next = k
self.timer += 1
else:
self.timer = 0
k_next = self.stochastic_rounding((k + (eta * self.grad_value_prev)))
else:
self.timer = 0
grad_value = ((cost - cost_aux) / (k - k_aux))
k_next = self.stochastic_rounding((k - (eta * grad_value)))
self.grad_value_prev = grad_value
if (k_next < self.k_min):
k_next = self.k_min
elif (k_next > self.k_max):
k_next = self.k_max
if (grad_value is None):
grad_value = self.grad_value_prev
k_aux_next = self.stochastic_rounding((k_next - ((eta * np.abs(grad_value)) / 2.0)))
if (k_aux_next < (self.k_min * self.delta)):
k_aux_next = int(np.ceil((self.k_min * self.delta)))
if (k_aux_next >= k_next):
k_aux_next = (k_next - 1)
return (k_next, k_aux_next)
def stochastic_rounding(x):
floor_x = int(np.floor(x))
prob = random.random()
if (prob < (x - floor_x)):
x = (floor_x + 1)
else:
x = floor_x
return x |
def build(region_similarity_calculator_config):
if (not isinstance(region_similarity_calculator_config, region_similarity_calculator_pb2.RegionSimilarityCalculator)):
raise ValueError('region_similarity_calculator_config not of type region_similarity_calculator_pb2.RegionsSimilarityCalculator')
similarity_calculator = region_similarity_calculator_config.WhichOneof('region_similarity')
if (similarity_calculator == 'iou_similarity'):
return region_similarity_calculator.IouSimilarity()
if (similarity_calculator == 'ioa_similarity'):
return region_similarity_calculator.IoaSimilarity()
if (similarity_calculator == 'neg_sq_dist_similarity'):
return region_similarity_calculator.NegSqDistSimilarity()
raise ValueError('Unknown region similarity calculator.') |
def rescale_centercrop_resize(output_size, dtype=np.float32):
def _rescale_centercrop_resize_thunk(obs_space):
obs_shape = obs_space.shape
obs_min_wh = min(obs_shape[:2])
assert (obs_min_wh > 10), 'are you sure your data format is correct? is your min wh really < 10?'
output_wh = output_size[(- 2):]
processed_env_shape = output_size
pipeline = vision.transforms.Compose([vision.transforms.ToPILImage(), vision.transforms.CenterCrop([obs_min_wh, obs_min_wh]), vision.transforms.Resize(output_wh), vision.transforms.ToTensor(), RESCALE_0_1_NEG1_POS1])
return (pipeline, spaces.Box((- 1), 1, output_size, dtype))
return _rescale_centercrop_resize_thunk |
def get_generic_path_information(paths, stat_prefix=''):
statistics = OrderedDict()
returns = [sum(path['rewards']) for path in paths]
rewards = np.vstack([path['rewards'] for path in paths])
statistics.update(create_stats_ordered_dict('Rewards', rewards, stat_prefix=stat_prefix))
statistics.update(create_stats_ordered_dict('Returns', returns, stat_prefix=stat_prefix))
actions = [path['actions'] for path in paths]
if (len(actions[0].shape) == 1):
actions = np.hstack([path['actions'] for path in paths])
else:
actions = np.vstack([path['actions'] for path in paths])
statistics.update(create_stats_ordered_dict('Actions', actions, stat_prefix=stat_prefix))
statistics['Num Paths'] = len(paths)
return statistics |
def compute_em_score(prediction, ground_truth):
return (1.0 if (prediction == ground_truth) else 0.0) |
def load_json_file(fileName: str) -> DataInstance:
with open(fileName, 'r') as read_file:
JSONdata = json.load(read_file).get('layouts')[0]
fileString = os.path.basename(fileName)
data = dict_to_datainstance(JSONdata)
data.inputFile = os.path.splitext(fileString)[0]
print('Loaded ', data.element_count, ' elements data from ', fileName)
return data |
def __gather_predictions(predictions_list: list, labels: list) -> list:
results = []
for prediction in predictions_list:
results += __rnnt_decoder_predictions_tensor(prediction, labels=labels)
return results |
def visualize():
result_path = 'demo_result.mat'
mat = scipy.io.loadmat(result_path)
x_sample = mat['X_test']
y_pred = mat['Y_test_pred']
y_true = mat['Y_test_true']
th = 0.5
y_pred[(y_pred >= th)] = 1
y_pred[(y_pred < th)] = 0
tools.Data.plotFromVoxels(x_sample, title='x_sample')
tools.Data.plotFromVoxels(y_pred, title='y_pred')
tools.Data.plotFromVoxels(y_true, title='y_true')
from matplotlib.pyplot import show
show() |
class joint_set():
leaf = [7, 8, 12, 20, 21]
full = list(range(1, 24))
reduced = [1, 2, 3, 4, 5, 6, 9, 12, 13, 14, 15, 16, 17, 18, 19]
ignored = [0, 7, 8, 10, 11, 20, 21, 22, 23]
lower_body = [0, 1, 2, 4, 5, 7, 8, 10, 11]
lower_body_parent = [None, 0, 0, 1, 2, 3, 4, 5, 6]
n_leaf = len(leaf)
n_full = len(full)
n_reduced = len(reduced)
n_ignored = len(ignored) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.