code stringlengths 101 5.91M |
|---|
def replace_example_docstring(example_docstring):
def docstring_decorator(fn):
func_doc = fn.__doc__
lines = func_doc.split('\n')
i = 0
while ((i < len(lines)) and (re.search('^\\s*Examples?:\\s*$', lines[i]) is None)):
i += 1
if (i < len(lines)):
lines[i] = example_docstring
func_doc = '\n'.join(lines)
else:
raise ValueError(f'''The function {fn} should have an empty 'Examples:' in its docstring as placeholder, current docstring is:
{func_doc}''')
fn.__doc__ = func_doc
return fn
return docstring_decorator |
def add_content_and_label(file_location, output_sentences, output_labels, label):
with open(file_location) as file:
content = ' '.join(file.readlines()).replace('\n', ' ').replace('\r', '')
single_spaced_content = ' '.join(content.split())
output_sentences.write((single_spaced_content + '\n'))
output_labels.write((str(label) + '\n')) |
def get_learning_rate_schedules(specs):
schedule_specs = specs['LearningRateSchedule']
schedules = []
for schedule_specs in schedule_specs:
if (schedule_specs['Type'] == 'Step'):
schedules.append(StepLearningRateSchedule(schedule_specs['Initial'], schedule_specs['Interval'], schedule_specs['Factor']))
elif (schedule_specs['Type'] == 'Warmup'):
schedules.append(WarmupLearningRateSchedule(schedule_specs['Initial'], schedule_specs['Final'], schedule_specs['Length']))
elif (schedule_specs['Type'] == 'Constant'):
schedules.append(ConstantLearningRateSchedule(schedule_specs['Value']))
else:
raise Exception('no known learning rate schedule of type "{}"'.format(schedule_specs['Type']))
return schedules |
class RandAugment(torch.nn.Module):
def __init__(self, num_ops: int=2, magnitude: int=9, num_magnitude_bins: int=31, interpolation: InterpolationMode=InterpolationMode.NEAREST, fill: Optional[List[float]]=None) -> None:
super().__init__()
self.num_ops = num_ops
self.magnitude = magnitude
self.num_magnitude_bins = num_magnitude_bins
self.interpolation = interpolation
self.fill = fill
def _augmentation_space(self, num_bins: int, image_size: List[int]) -> Dict[(str, Tuple[(Tensor, bool)])]:
return {'Identity': (torch.tensor(0.0), False), 'Brightness': (torch.linspace(0.0, 0.9, num_bins), True), 'Color': (torch.linspace(0.0, 0.9, num_bins), True), 'Contrast': (torch.linspace(0.0, 0.9, num_bins), True), 'Sharpness': (torch.linspace(0.0, 0.9, num_bins), True), 'Posterize': ((8 - (torch.arange(num_bins) / ((num_bins - 1) / 4)).round().int()), False), 'Solarize': (torch.linspace(255.0, 0.0, num_bins), False), 'AutoContrast': (torch.tensor(0.0), False), 'Equalize': (torch.tensor(0.0), False)}
def forward(self, img: Tensor) -> Tensor:
fill = self.fill
if isinstance(img, Tensor):
if isinstance(fill, (int, float)):
fill = ([float(fill)] * F.get_image_num_channels(img))
elif (fill is not None):
fill = [float(f) for f in fill]
for _ in range(self.num_ops):
op_meta = self._augmentation_space(self.num_magnitude_bins, F.get_image_size(img))
op_index = int(torch.randint(len(op_meta), (1,)).item())
op_name = list(op_meta.keys())[op_index]
(magnitudes, signed) = op_meta[op_name]
magnitude = (float(magnitudes[self.magnitude].item()) if (magnitudes.ndim > 0) else 0.0)
if (signed and torch.randint(2, (1,))):
magnitude *= (- 1.0)
img = _apply_op(img, op_name, magnitude, interpolation=self.interpolation, fill=fill)
return img
def __repr__(self) -> str:
s = (self.__class__.__name__ + '(')
s += 'num_ops={num_ops}'
s += ', magnitude={magnitude}'
s += ', num_magnitude_bins={num_magnitude_bins}'
s += ', interpolation={interpolation}'
s += ', fill={fill}'
s += ')'
return s.format(**self.__dict__) |
def find_first_non_zero_pixel(points, instance_image):
points = list(points)
coord = points[0]
for pixel in points:
pixel = list(pixel)
pixel[0] = np.clip(pixel[0], 0, (instance_image.shape[1] - 1))
pixel[1] = np.clip(pixel[1], 0, (instance_image.shape[0] - 1))
coord = pixel
if (instance_image[(pixel[1], pixel[0])] > 0):
break
return coord |
def sample_and_group_all(xyz, points, use_xyz=True):
batch_size = tf.shape(xyz)[0]
nsample = xyz.get_shape()[1].value
new_xyz = tf.tile(np.array([0, 0, 0], dtype=np.float32).reshape((1, 1, 3)), (batch_size, 1, 1))
idx = tf.tile(np.array(range(nsample), dtype=np.float32).reshape((1, 1, nsample)), (batch_size, 1, 1))
grouped_xyz = tf.reshape(xyz, (batch_size, 1, nsample, 3))
if (points is not None):
if use_xyz:
new_points = tf.concat([xyz, points], axis=2)
else:
new_points = points
new_points = tf.expand_dims(new_points, 1)
else:
new_points = grouped_xyz
return (new_xyz, new_points, idx, grouped_xyz) |
_task('cross_lingual_lm')
class CrossLingualLMTask(FairseqTask):
def add_args(parser):
parser.add_argument('data', help='colon separated path to data directories list, will be iterated upon during epochs in round-robin manner')
parser.add_argument('--tokens-per-sample', default=512, type=int, help='max number of total tokens over all segments per sample')
parser.add_argument('--monolingual-langs', default='en', type=str, help='comma separated list of languages for which we want to train XLM on')
parser.add_argument('--raw-text', default=False, action='store_true', help='load raw text dataset')
parser.add_argument('--lazy-load', action='store_true', help='load the dataset lazily')
parser.add_argument('--shuffle', action='store_true', help='shuffle each monolingual dataset while training')
def __init__(self, args, dictionary):
super().__init__(args)
self.dictionary = dictionary
self.seed = args.seed
self.distributed_world_size = args.distributed_world_size
self.langs2id = self._lang_to_id(args.monolingual_langs)
def _lang_to_id(self, languages: str):
lang2id = {}
langs = [l.strip() for l in languages.split(',')]
for (id, lang) in enumerate(langs):
lang2id[lang] = id
return lang2id
def load_dictionary(cls, filename):
return MaskedLMDictionary.load(filename)
def build_dictionary(cls, filenames, workers=1, threshold=(- 1), nwords=(- 1), padding_factor=8):
d = MaskedLMDictionary()
for filename in filenames:
Dictionary.add_file_to_dictionary(filename, d, tokenizer.tokenize_line, workers)
d.finalize(threshold=threshold, nwords=nwords, padding_factor=padding_factor)
return d
def target_dictionary(self):
return self.dictionary
def setup_task(cls, args, **kwargs):
dictionary = MaskedLMDictionary.load(os.path.join(args.data, 'dict.txt'))
print('| dictionary: {} types'.format(len(dictionary)))
return cls(args, dictionary)
def _load_single_lang_dataset(self, split, epoch):
loaded_datasets = []
paths = self.args.data.split(':')
assert (len(paths) > 0)
data_path = paths[(epoch % len(paths))]
for k in itertools.count():
split_k = (split + (str(k) if (k > 0) else ''))
path = os.path.join(data_path, split_k)
ds = indexed_dataset.make_dataset(path, impl=self.args.dataset_impl, fix_lua_indexing=True, dictionary=self.dictionary)
if (ds is None):
if (k > 0):
break
else:
raise FileNotFoundError('Dataset not found: {} ({})'.format(split, data_path))
loaded_datasets.append(TokenBlockDataset(ds, ds.sizes, (self.args.tokens_per_sample - 1), pad=self.dictionary.pad(), eos=self.dictionary.eos()))
print('| {} {} {} examples'.format(data_path, split_k, len(loaded_datasets[(- 1)])))
if (len(loaded_datasets) == 1):
dataset = loaded_datasets[0]
sizes = dataset.sizes
else:
dataset = ConcatDataset(loaded_datasets)
sizes = np.concatenate([ds.sizes for ds in loaded_datasets])
return (dataset, sizes)
def load_dataset(self, split, epoch=0, combine=False, **kwargs):
dataset_map = OrderedDict()
for lang in self.langs2id.keys():
language_split = '{}.{}'.format(split, lang)
(block_dataset, sizes) = self._load_single_lang_dataset(split=language_split, epoch=epoch)
dataset_map[lang] = MaskedLMDataset(dataset=block_dataset, sizes=sizes, vocab=self.dictionary, pad_idx=self.dictionary.pad(), mask_idx=self.dictionary.mask(), classif_token_idx=self.dictionary.eos(), sep_token_idx=self.dictionary.eos(), shuffle=getattr(self.args, 'shuffle', False), has_pairs=False, segment_id=self.langs2id[lang], seed=self.seed)
self.datasets[split] = MultiCorpusSampledDataset(dataset_map)
print('| {} {} {} examples'.format(self.args.data.split(':')[epoch], split, len(self.datasets[split]))) |
def test_graph_gnm():
(n_v, n_e) = (100, 500)
g = graph_Gnm(n_v, n_e)
assert (g.num_v == n_v)
assert (g.num_e == n_e) |
('translate', inputs={'source_imgs': runway.image(description='input image to be translated'), 'Strokes': runway.number(min=100, max=700, default=100, description='number of strokes')}, outputs={'image': runway.image(description='output image containing the translated result')})
def translate(learn, inputs):
os.makedirs('images', exist_ok=True)
inputs['source_imgs'].save('images/temp.jpg')
paths = os.path.join('images', 'temp.jpg')
args.img_path = paths
args.max_m_strokes = inputs['Strokes']
pt = ProgressivePainter(args=args)
final_rendered_image = optimize_x(pt)
formatted = ((final_rendered_image * 255) / np.max(final_rendered_image)).astype('uint8')
img = Image.fromarray(formatted)
return img |
def perfect_plot(ax, xarr, yarr, label):
if (len(xarr) != len(yarr)):
ax.plot(xarr[:(- 1)], yarr, label=label)
else:
ax.plot(xarr, yarr, label=label) |
class TestTensorboardXWriter(unittest.TestCase):
def test_no_files_created(self) -> None:
with tempfile.TemporaryDirectory() as tmp_dir:
writer = TensorboardXWriter(tmp_dir)
writer.close()
self.assertFalse(os.listdir(tmp_dir))
def test_single_write(self) -> None:
with tempfile.TemporaryDirectory() as tmp_dir:
writer = TensorboardXWriter(tmp_dir)
writer._writer.add_scalar('testing', 1, 1)
writer.close()
self.assertTrue(os.listdir(tmp_dir)) |
def attach_head_and_body(root):
head = ET.Element('head')
body = ET.Element('body')
root.append(head)
root.append(body)
meta1 = ET.Element('meta')
meta1.set('name', 'ocr-system')
meta1.set('content', 'eperiodica_fulltext')
meta2 = ET.Element('meta')
meta2.set('name', 'ocr-capabilities')
meta2.set('content', 'ocr_page ocr_author ocr_carea ocr_photo ocr_caption ocr_linear ocr_footer ocr_header ocr_pageno ocr_table ocr_section ocrx_block ocrx_word')
head.append(meta1)
head.append(meta2)
return (head, body) |
class PathBuffer():
def __init__(self, capacity_in_transitions):
self._capacity = capacity_in_transitions
self._transitions_stored = 0
self._first_idx_of_next_path = 0
self._path_segments = collections.deque()
self._buffer = {}
def add_path(self, path):
for (key, buf_arr) in self._buffer.items():
path_array = path.get(key, None)
if (path_array is None):
raise ValueError('Key {} missing from path.'.format(key))
if ((len(path_array.shape) != 2) or (path_array.shape[1] != buf_arr.shape[1])):
raise ValueError('Array {} has wrong shape.'.format(key))
path_len = self._get_path_length(path)
(first_seg, second_seg) = self._next_path_segments(path_len)
while (self._path_segments and self._segments_overlap(first_seg, self._path_segments[0][0])):
self._path_segments.popleft()
while (self._path_segments and self._segments_overlap(second_seg, self._path_segments[0][0])):
self._path_segments.popleft()
self._path_segments.append((first_seg, second_seg))
for (key, array) in path.items():
buf_arr = self._get_or_allocate_key(key, array)
buf_arr[first_seg.start:first_seg.stop] = array[:len(first_seg)]
buf_arr[second_seg.start:second_seg.stop] = array[len(first_seg):]
if (second_seg.stop != 0):
self._first_idx_of_next_path = second_seg.stop
else:
self._first_idx_of_next_path = first_seg.stop
self._transitions_stored = min(self._capacity, (self._transitions_stored + path_len))
def sample_path(self):
path_idx = np.random.randint(len(self._path_segments))
(first_seg, second_seg) = self._path_segments[path_idx]
first_seg_indices = np.arange(first_seg.start, first_seg.stop)
second_seg_indices = np.arange(second_seg.start, second_seg.stop)
indices = np.concatenate([first_seg_indices, second_seg_indices])
path = {key: buf_arr[indices] for (key, buf_arr) in self._buffer.items()}
return path
def sample_transitions(self, batch_size):
idx = np.random.choice(self._transitions_stored, batch_size)
return {key: buf_arr[idx] for (key, buf_arr) in self._buffer.items()}
def _next_path_segments(self, n_indices):
if (n_indices > self._capacity):
raise ValueError('Path is too long to store in buffer.')
start = self._first_idx_of_next_path
end = (start + n_indices)
if (end > self._capacity):
second_end = (end - self._capacity)
return (range(start, self._capacity), range(0, second_end))
else:
return (range(start, end), range(0, 0))
def _get_or_allocate_key(self, key, array):
buf_arr = self._buffer.get(key, None)
if (buf_arr is None):
buf_arr = np.zeros((self._capacity, array.shape[1]), array.dtype)
self._buffer[key] = buf_arr
return buf_arr
def clear(self):
self._transitions_stored = 0
self._first_idx_of_next_path = 0
self._path_segments.clear()
self._buffer.clear()
def _get_path_length(path):
length_key = None
length = None
for (key, value) in path.items():
if (length is None):
length = len(value)
length_key = key
elif (len(value) != length):
raise ValueError('path has inconsistent lengths between {!r} and {!r}.'.format(length_key, key))
if (not length):
raise ValueError('Nothing in path')
return length
def _segments_overlap(seg_a, seg_b):
if ((not seg_a) or (not seg_b)):
return False
first = seg_a
second = seg_b
if (seg_b.start < seg_a.start):
(first, second) = (seg_b, seg_a)
assert (first.start <= second.start)
return (first.stop > second.start)
def n_transitions_stored(self):
return int(self._transitions_stored) |
def load_obj_data(filename):
v_list = []
vt_list = []
vc_list = []
vn_list = []
f_list = []
fn_list = []
ft_list = []
fp = open(filename, 'r')
lines = fp.readlines()
fp.close()
for line in lines:
if (len(line) < 2):
continue
line_data = line.strip().split(' ')
if (line_data[0] == 'v'):
v_list.append((float(line_data[1]), float(line_data[2]), float(line_data[3])))
if (len(line_data) == 7):
vc_list.append((float(line_data[4]), float(line_data[5]), float(line_data[6])))
else:
vc_list.append((0.5, 0.5, 0.5))
if (line_data[0] == 'vt'):
vt_list.append((float(line_data[1]), float(line_data[2])))
if (line_data[0] == 'vn'):
vn_list.append((float(line_data[1]), float(line_data[2]), float(line_data[3])))
if (line_data[0] == 'f'):
def segElementData(ele_str):
fv = None
ft = None
fn = None
eles = ele_str.strip().split('/')
if (len(eles) == 1):
fv = (int(eles[0]) - 1)
elif (len(eles) == 2):
fv = (int(eles[0]) - 1)
ft = (int(eles[1]) - 1)
elif (len(eles) == 3):
fv = (int(eles[0]) - 1)
fn = (int(eles[2]) - 1)
ft = (None if (eles[1] == '') else (int(eles[1]) - 1))
return (fv, ft, fn)
(fv0, ft0, fn0) = segElementData(line_data[1])
(fv1, ft1, fn1) = segElementData(line_data[2])
(fv2, ft2, fn2) = segElementData(line_data[3])
f_list.append((fv0, fv1, fv2))
if ((ft0 is not None) and (ft1 is not None) and (ft2 is not None)):
ft_list.append((ft0, ft1, ft2))
if ((fn0 is not None) and (fn1 is not None) and (fn2 is not None)):
fn_list.append((fn0, fn1, fn2))
v_list = np.asarray(v_list)
vn_list = np.asarray(vn_list)
vt_list = np.asarray(vt_list)
vc_list = np.asarray(vc_list)
f_list = np.asarray(f_list)
ft_list = np.asarray(ft_list)
fn_list = np.asarray(fn_list)
model = {'v': v_list, 'vt': vt_list, 'vc': vc_list, 'vn': vn_list, 'f': f_list, 'ft': ft_list, 'fn': fn_list}
return model |
def test_potential_paramunits_1d():
from galpy import potential
from galpy.util import conversion
(ro, vo) = (10.5, 195.0)
pot = potential.KGPotential(amp=1.0, K=((40.0 * units.Msun) / (units.pc ** 2)), F=((0.02 * units.Msun) / (units.pc ** 3)), D=(200 * units.pc), ro=ro, vo=vo)
pot_nounits = potential.KGPotential(amp=1.0, K=(((40.0 / conversion.surfdens_in_msolpc2(vo, ro)) * 2.0) * numpy.pi), F=(((0.02 / conversion.dens_in_msolpc3(vo, ro)) * 4.0) * numpy.pi), D=(0.2 / ro), ro=ro, vo=vo)
assert (numpy.fabs((pot(1.5, use_physical=False) - pot_nounits(1.5, use_physical=False))) < (10.0 ** (- 8.0))), 'KGPotential w/ parameters w/ units does not behave as expected'
pot = potential.KGPotential(amp=1.0, K=(((40.0 * units.Msun) / (units.pc ** 2)) * constants.G), F=(((0.02 * units.Msun) / (units.pc ** 3)) * constants.G), D=(200 * units.pc), ro=ro, vo=vo)
pot_nounits = potential.KGPotential(amp=1.0, K=(40.0 / conversion.surfdens_in_msolpc2(vo, ro)), F=(0.02 / conversion.dens_in_msolpc3(vo, ro)), D=(0.2 / ro), ro=ro, vo=vo)
assert (numpy.fabs((pot(1.5, use_physical=False) - pot_nounits(1.5, use_physical=False))) < (10.0 ** (- 8.0))), 'KGPotential w/ parameters w/ units does not behave as expected'
pot = potential.IsothermalDiskPotential(amp=1.2, sigma=((30.0 * units.km) / units.s), ro=ro, vo=vo)
pot_nounits = potential.IsothermalDiskPotential(amp=1.2, sigma=(30.0 / vo), ro=ro, vo=vo)
assert (numpy.fabs((pot(1.5, use_physical=False) - pot_nounits(1.5, use_physical=False))) < (10.0 ** (- 8.0))), 'IsothermalDiskPotential w/ parameters w/ units does not behave as expected'
return None |
class TFRemBertPreTrainedModel(metaclass=DummyObject):
_backends = ['tf']
def __init__(self, *args, **kwargs):
requires_backends(self, ['tf']) |
class DistEvalHook(EvalHook):
def after_train_epoch(self, runner):
if (not self.every_n_epochs(runner, self.interval)):
return
current_ckpt_path = osp.join(runner.work_dir, f'epoch_{(runner.epoch + 1)}.pth')
json_path = osp.join(runner.work_dir, 'best.json')
if (osp.exists(json_path) and (len(self.best_json) == 0)):
self.best_json = mmcv.load(json_path)
self.best_score = self.best_json['best_score']
self.best_ckpt = self.best_json['best_ckpt']
self.key_indicator = self.best_json['key_indicator']
from mmpose.apis import multi_gpu_test
results = multi_gpu_test(runner.model, self.dataloader, tmpdir=osp.join(runner.work_dir, '.eval_hook'), gpu_collect=self.gpu_collect)
if (runner.rank == 0):
print('\n')
key_score = self.evaluate(runner, results)
if (self.save_best and self.compare_func(key_score, self.best_score)):
self.best_score = key_score
self.logger.info(f'Now best checkpoint is epoch_{(runner.epoch + 1)}.pth')
self.best_json['best_score'] = self.best_score
self.best_json['best_ckpt'] = current_ckpt_path
self.best_json['key_indicator'] = self.key_indicator
save_checkpoint(runner.model, osp.join(runner.work_dir, 'best.pth'))
mmcv.dump(self.best_json, json_path) |
def test_digits_cosine_lazy_init():
model = SumRedundancySelection(100, 'cosine', optimizer='lazy', initial_subset=digits_cosine_ranking[:5])
model.fit(X_digits)
assert_array_equal(model.ranking[:(- 5)], digits_cosine_ranking[5:])
assert_array_almost_equal(model.gains[:(- 5)], digits_cosine_gains[5:], 4)
assert_array_almost_equal(model.subset, X_digits[model.ranking]) |
class Critic(nn.Module):
def __init__(self, state_dim, action_dim):
super(Critic, self).__init__()
self.l1 = nn.Linear(state_dim, 400)
self.l2 = nn.Linear((400 + action_dim), 300)
self.l3_additional = nn.Linear(300, 300)
self.l3 = nn.Linear(300, 1)
def forward(self, x, u):
x = F.relu(self.l1(x))
x = F.relu(self.l2(torch.cat([x, u], 1)))
x = self.l3_additional(x)
x = self.l3(x)
return x |
def get_split_subset(args, ds, split):
manual_seed(args.split_seed)
indices = randperm(len(ds))
valid_size = round((len(ds) * args.split_ratio))
if (args.dataset == 'cifar10'):
if (split == 'train'):
ds = Subset(ds, indices[:(- valid_size)])
elif (split == 'val'):
ds = Subset(ds, indices[(- valid_size):])
elif (args.dataset in ['imagenet', 'imagenet-mini']):
if (split == 'train'):
ds = Subset(ds, indices[:(- valid_size)])
elif (split == 'test'):
ds = Subset(ds, indices[(- valid_size):])
return ds |
class AsyncMultiGPUTrainer(MultiGPUTrainer, SingleCostFeedfreeTrainer, MultiPredictorTowerTrainer):
def __init__(self, config, input_queue=None, average_gradient=True, predict_tower=None):
if hasattr(config, 'dataset'):
self._input_method = QueueInput(config.dataset, input_queue)
else:
self._input_method = config.data
assert isinstance(self._input_method, QueueInput)
super(AsyncMultiGPUTrainer, self).__init__(config)
if (predict_tower is not None):
logger.warn('[Deprecated] Argument `predict_tower` is deprecated for trainer. Use TrainConfig.predict_tower instead!')
config.predict_tower = predict_tower
self._setup_predictor_factory(config.predict_tower)
self._average_gradient = average_gradient
assert tf.test.is_gpu_available()
def _setup(self):
super(AsyncMultiGPUTrainer, self)._setup()
grad_list = MultiGPUTrainer._multi_tower_grads(self.config.tower, (lambda : self._get_cost_and_grad()[1]))
gradprocs = self.model.get_gradient_processor()
if (self._average_gradient and (self.config.nr_tower > 1)):
gradprocs.insert(0, ScaleGradient(('.*', (1.0 / self.config.nr_tower)), log=False))
grad_list = [apply_grad_processors(g, gradprocs) for g in grad_list]
self.train_op = tf.group(self.config.optimizer.apply_gradients(grad_list[0], get_global_step_var()), summary_moving_average(), name='train_op')
self._start_async_threads(grad_list)
def _start_async_threads(self, grad_list):
self.async_step_counter = itertools.count()
self.training_threads = []
for k in range(1, len(self.config.tower)):
train_op = self.config.optimizer.apply_gradients(grad_list[k])
def f(op=train_op):
self.sess.run([op])
next(self.async_step_counter)
th = LoopThread(f)
th.pause()
th.start()
self.training_threads.append(th)
self.async_running = False
def run_step(self):
if (not self.async_running):
self.async_running = True
for th in self.training_threads:
th.resume()
next(self.async_step_counter)
self.sess.run(self.train_op)
def _trigger_epoch(self):
self.async_running = False
for th in self.training_threads:
th.pause()
try:
if (self.config.tower > 1):
async_step_total_cnt = int(re.findall('[0-9]+', self.async_step_counter.__str__())[0])
self.write_scalar_summary('async_global_step', async_step_total_cnt)
except:
logger.exception('Cannot log async_global_step')
super(AsyncMultiGPUTrainer, self)._trigger_epoch() |
def stanford_pre(path_bf, path_af, path_root='/home/cc', data_name='nyt', flist='before-parse-problems-flist.txt'):
files = os.listdir(path_bf)
files = [os.path.join(path_bf, f) for f in files]
num_flist = multiprocessing.cpu_count()
slice = (len(files) // num_flist)
for i in range(num_flist):
if (i == (num_flist - 1)):
tmp = files[(i * slice):]
else:
tmp = files[(i * slice):((i + 1) * slice)]
with open(os.path.join(path_root, ((data_name + str(i)) + flist)), 'w') as fd:
fd.write('\n'.join(tmp))
file_lists = [os.path.join(path_root, ((data_name + str(i)) + flist)) for i in range(num_flist)]
inp = zip(file_lists, ([path_af] * len(file_lists)))
pool = multiprocessing.Pool(processes=num_flist)
pool.map(run_snlp, inp)
pool.close()
pool.join() |
class TestBoxIOU(unittest.TestCase):
def test_pairwise_iou(self):
boxes1 = torch.tensor([[0.0, 0.0, 1.0, 1.0], [0.0, 0.0, 1.0, 1.0]])
boxes2 = torch.tensor([[0.0, 0.0, 1.0, 1.0], [0.0, 0.0, 0.5, 1.0], [0.0, 0.0, 1.0, 0.5], [0.0, 0.0, 0.5, 0.5], [0.5, 0.5, 1.0, 1.0], [0.5, 0.5, 1.5, 1.5]])
expected_ious = torch.tensor([[1.0, 0.5, 0.5, 0.25, 0.25, (0.25 / (2 - 0.25))], [1.0, 0.5, 0.5, 0.25, 0.25, (0.25 / (2 - 0.25))]])
ious = pairwise_iou(Boxes(boxes1), Boxes(boxes2))
assert torch.allclose(ious, expected_ious) |
def regadv():
filename = sys.argv[1]
config = {}
config['jobs'] = []
model_list = ['ResNet18']
batch_size = [(512, 32)]
for (i, modelname) in enumerate(model_list):
job = {}
job['eps'] = 0.047
job['alpha'] = 0.01
job['model'] = {}
job['model']['name'] = modelname
job['adv_train'] = {}
job['adv_train']['attack'] = 'ml'
job['logfilename'] = './log/SVHN/{}_adv.log'.format(modelname)
job['savename'] = './models/SVHN/{}_adv.pth'.format(modelname)
job['epoch'] = 100
job['epoch1'] = 60
job['epoch2'] = 80
job['lr'] = 0.1
job['momentum'] = 0.9
job['epoch1_lr'] = 0.01
job['epoch2_lr'] = 0.001
job['test_attack'] = ['untarg1', 'untarg2']
job['n_test_adv'] = 1000
job['train_batch_size'] = batch_size[i][0]
job['test_batch_size'] = batch_size[i][1]
config['jobs'].append(job)
j = json.dumps(config, indent=4)
with open(filename, 'w+') as f:
f.write(j) |
def unwrap_if_singleton(x):
if (isinstance(x, dict) and (len(x) == 1) and ('single_element' in x)):
return x['single_element']
return x |
def train(train_loader, model, criterion, optimizer, epoch, args):
batch_time = AverageMeter('Time', ':6.3f')
data_time = AverageMeter('Data', ':6.3f')
losses = AverageMeter('Loss', ':.4e')
top1 = AverageMeter('', ':6.2f')
top5 = AverageMeter('', ':6.2f')
progress = ProgressMeter(len(train_loader), [batch_time, data_time, losses, top1, top5], prefix='Epoch: [{}]'.format(epoch))
model.train()
end = time.time()
for (i, (images, _)) in enumerate(train_loader):
data_time.update((time.time() - end))
if (args.gpu is not None):
images[0] = images[0].cuda(args.gpu, non_blocking=True)
images[1] = images[1].cuda(args.gpu, non_blocking=True)
if (args.method is None):
(output, target) = model(im_q=images[0], im_k=images[1])
loss = criterion(output, target)
if (args.method == 'ifm'):
(output, output_adv, target) = model(im_q=images[0], im_k=images[1])
loss_orig = criterion(output, target)
loss_adv = criterion(output_adv, target)
loss = (loss_orig + (args.alpha * loss_adv))
loss /= (1 + args.alpha)
if (args.method == 'ifm_only'):
(output, output_adv, target) = model(im_q=images[0], im_k=images[1])
loss_adv = criterion(output_adv, target)
loss = loss_adv
(acc1, acc5) = accuracy(output, target, topk=(1, 5))
losses.update(loss.item(), images[0].size(0))
top1.update(acc1[0], images[0].size(0))
top5.update(acc5[0], images[0].size(0))
optimizer.zero_grad()
loss.backward()
optimizer.step()
batch_time.update((time.time() - end))
end = time.time()
if ((i % args.print_freq) == 0):
progress.display(i) |
def get_post_fmean(blm, X, Psi=None, w=None):
if (Psi is None):
Psi = blm.lik.linear.basis.get_basis(X)
if (w is None):
w = get_post_params_mean(blm)
return (Psi.dot(w) + blm.lik.linear.bias) |
_model
def resnet12_wide(pretrained=False, **kwargs):
model_args = dict(block=BasicBlock, layers=[1, 1, 1, 2], base_width=(64 * 2), our_ver=True, **kwargs)
return _create_resnet('resnet12_wide', pretrained, **model_args) |
def run_model_on_fold(name, max_len, embed_size, embed, bulid_fun):
max_features = 50000
scores = {}
scores.setdefault('fit_time', [])
scores.setdefault('score_time', [])
scores.setdefault('test_F1', [])
scores.setdefault('test_Precision', [])
scores.setdefault('test_Recall', [])
scores.setdefault('test_Accuracy', [])
scores.setdefault('test_Specificity', [])
scores.setdefault('test_Sensitivity', [])
for (fold_n, (train_index, valid_index)) in enumerate(folds.split(X, y)):
print('Fold', fold_n, 'started at', time.ctime())
(X_train, X_valid) = (X[train_index], X[valid_index])
(y_train, y_valid) = (y[train_index], y[valid_index])
tk = Tokenizer(lower=True, filters='', num_words=max_features, oov_token=True)
tk.fit_on_texts(X_train)
train_tokenized = tk.texts_to_sequences(X_train)
valid_tokenized = tk.texts_to_sequences(X_valid)
X_train = pad_sequences(train_tokenized, maxlen=max_len)
X_valid = pad_sequences(valid_tokenized, maxlen=max_len)
embedding_matrix = create_embedding_matrix(embed, tk, max_features)
model = bulid_fun(X_train, y_train, X_valid, y_valid, max_len, max_features, embed_size, embedding_matrix, lr=0.001, lr_d=0, spatial_dr=0.1, dense_units=128, conv_size=128, dr=0.1, patience=4, fold_id=fold_n)
y_preds = []
for i in model.predict(X_valid):
if (i[0] >= 0.5):
y_preds.append(1)
else:
y_preds.append(0)
print(accuracy_score(y_valid, y_preds))
scores['test_F1'].append(f1_score(y_valid, y_preds, average='macro'))
scores['test_Precision'].append(precision_score(y_valid, y_preds, average='macro'))
scores['test_Recall'].append(recall_score(y_valid, y_preds, average='macro'))
scores['test_Accuracy'].append(accuracy_score(y_valid, y_preds))
scores['test_Specificity'].append(specificity(y_valid, y_preds))
scores['test_Sensitivity'].append(sensitivity(y_valid, y_preds))
print('{:<10} | {:<7} {:<7} {:<7} {:<7} {:<7} {:<7}'.format(str(name)[:7], str(('%.4f' % (sum(scores['test_F1']) / 10))), str(('%.4f' % (sum(scores['test_Precision']) / 10))), str(('%.4f' % (sum(scores['test_Recall']) / 10))), str(('%.4f' % (sum(scores['test_Accuracy']) / 10))), str(('%.4f' % (sum(scores['test_Specificity']) / 10))), str(('%.4f' % (sum(scores['test_Sensitivity']) / 10)))))
f = open('setC.txt', 'a+')
f.write(('{:<10} | {:<7} {:<7} {:<7} {:<7} {:<7} {:<7}'.format(str(name)[:7], str(('%.4f' % (sum(scores['test_F1']) / 10))), str(('%.4f' % (sum(scores['test_Precision']) / 10))), str(('%.4f' % (sum(scores['test_Recall']) / 10))), str(('%.4f' % (sum(scores['test_Accuracy']) / 10))), str(('%.4f' % (sum(scores['test_Specificity']) / 10))), str(('%.4f' % (sum(scores['test_Sensitivity']) / 10)))) + '\n'))
f.close() |
class Lossless(RateEstimator):
def forward_help(self, z, _, parent=None):
(batch_size, z_dim) = z.shape
z_hat = z
with closing(io.BytesIO()) as f:
np.savez_compressed(f, to_numpy(z_hat))
bit_rate = ((f.getbuffer().nbytes * 8) / batch_size)
nats_rate = (bit_rate * math.log(2))
rates = (nats_rate + (z.mean(dim=(- 1)) * 0))
logs = dict()
other = dict()
return (z_hat, rates, logs, other)
def aux_parameters(self):
return iter(()) |
class NLIDataset(torch.utils.data.Dataset):
def __init__(self, premise, hypothesis, label, memory_key=None, memory_value=None, attention=None):
assert (len(premise) == len(hypothesis))
self.premise = premise.astype(np.long)
self.hypothesis = hypothesis.astype(np.long)
if (label is not None):
assert (len(hypothesis) == len(label))
self.label = label.astype(np.long)
else:
self.label = None
self.memory_key = memory_key
self.memory_value = memory_value
self.attention = attention
def __getitem__(self, index):
prem = self.premise[index]
hyp = self.hypothesis[index]
if ((self.memory_key is not None) and (self.memory_value is not None)):
mk = self.memory_key[index]
mv = self.memory_value[index]
else:
mk = 0
mv = 0
if (self.attention is not None):
att = self.attention[index]
else:
att = 0
cove_prem = 0
cove_hyp = 0
if (self.label is not None):
lab = self.label[index]
else:
lab = 0
return (prem, hyp, mk, mv, att, cove_prem, cove_hyp, lab)
def __len__(self):
return len(self.hypothesis) |
class MetadataField(Field[DataArray]):
def __init__(self, metadata: Any) -> None:
self.metadata = metadata
def get_padding_lengths(self) -> Dict[(str, int)]:
return {}
def as_tensor(self, padding_lengths: Dict[(str, int)]) -> DataArray:
return self.metadata
def empty_field(self) -> 'MetadataField':
return MetadataField(None)
def batch_tensors(cls, tensor_list: List[DataArray]) -> DataArray:
return tensor_list
def __str__(self) -> str:
return f'MetadataField (print field.metadata to see specific information).' |
class First_order_chemical_synapse(SynapseModel):
def __init__(self, conn, **kwargs):
super(First_order_chemical_synapse, self).__init__(conn)
from .Connections import FullConnection
assert isinstance(conn, FullConnection)
self._syn_tau_variables['tau[link]'] = kwargs.get('tau', 2.0)
self._syn_variables['R[link]'] = np.zeros([1, conn.post_num])
self._syn_variables['WgtSum[link]'] = np.zeros([1, conn.post_num])
if (conn.post.model_name == 'complex'):
self._syn_operations.append(['WgtSum[link]', 'mat_mult_weight_complex', (conn.pre_var_name + '[input][updated]'), 'weight[link]', 'complex_beta[post]'])
else:
self._syn_operations.append(['WgtSum[link]', 'mat_mult_weight', '[input]', 'weight[link]'])
self._syn_operations.append(['R[link]', 'var_linear', 'tau[link]', 'R[link]', 'WgtSum[link][updated]'])
self._syn_operations.append([(conn.post_var_name + '[post]'), 'assign', 'R[link][updated]']) |
class WordSequence(nn.Module):
def __init__(self, data):
super(WordSequence, self).__init__()
print(('build word sequence feature extractor: %s...' % data.word_feature_extractor))
self.gpu = data.HP_gpu
self.use_char = data.use_char
self.droplstm = nn.Dropout(data.HP_dropout)
self.bilstm_flag = data.HP_bilstm
self.lstm_layer = data.HP_lstm_layer
self.wordrep = WordRep(data)
self.input_size = data.word_emb_dim
self.task_num = data.task_number
self.domain_num = data.domain_number
self.task_emb_size = data.task_emb_dim
self.domain_emb_size = data.domain_emb_dim
self.pretrain_task_emb = None
self.pretrain_domain_emb = None
self.model1_task_idx = data.task_alphabet.get_index(data.model1_task_name)
self.model1_domain_idx = data.domain_alphabet.get_index(data.model1_domain_name)
self.model2_task_idx = data.task_alphabet.get_index(data.model2_task_name)
self.model2_domain_idx = data.domain_alphabet.get_index(data.model2_domain_name)
self.model3_task_idx = data.task_alphabet.get_index(data.model3_task_name)
self.model3_domain_idx = data.domain_alphabet.get_index(data.model3_domain_name)
self.model4_task_idx = data.task_alphabet.get_index(data.model4_task_name)
self.model4_domain_idx = data.domain_alphabet.get_index(data.model4_domain_name)
if self.use_char:
self.input_size += data.HP_char_hidden_dim
if (data.char_feature_extractor == 'ALL'):
self.input_size += data.HP_char_hidden_dim
for idx in range(data.feature_num):
self.input_size += data.feature_emb_dims[idx]
if self.bilstm_flag:
lstm_hidden = (data.HP_hidden_dim // 2)
else:
lstm_hidden = data.HP_hidden_dim
self.word_feature_extractor = data.word_feature_extractor
self.LSTM_param_generator = Network_param_generater(self.input_size, lstm_hidden, data)
if (self.word_feature_extractor == 'GRU'):
self.lstm = nn.GRU(self.input_size, lstm_hidden, num_layers=self.lstm_layer, batch_first=True, bidirectional=self.bilstm_flag)
elif (self.word_feature_extractor == 'LSTM'):
self.lstm = LSTM(self.input_size, lstm_hidden, num_layers=self.lstm_layer, batch_first=True, bidirectional=self.bilstm_flag)
elif (self.word_feature_extractor == 'CNN'):
self.word2cnn = nn.Linear(self.input_size, data.HP_hidden_dim)
self.cnn_layer = data.HP_cnn_layer
print('CNN layer: ', self.cnn_layer)
self.cnn_list = nn.ModuleList()
self.cnn_drop_list = nn.ModuleList()
self.cnn_batchnorm_list = nn.ModuleList()
kernel = 3
pad_size = ((kernel - 1) / 2)
for idx in range(self.cnn_layer):
self.cnn_list.append(nn.Conv1d(data.HP_hidden_dim, data.HP_hidden_dim, kernel_size=kernel, padding=pad_size))
self.cnn_drop_list.append(nn.Dropout(data.HP_dropout))
self.cnn_batchnorm_list.append(nn.BatchNorm1d(data.HP_hidden_dim))
if self.gpu:
self.droplstm = self.droplstm.cuda()
if (self.word_feature_extractor == 'CNN'):
self.word2cnn = self.word2cnn.cuda()
for idx in range(self.cnn_layer):
self.cnn_list[idx] = self.cnn_list[idx].cuda()
self.cnn_drop_list[idx] = self.cnn_drop_list[idx].cuda()
self.cnn_batchnorm_list[idx] = self.cnn_batchnorm_list[idx].cuda()
else:
self.lstm = self.lstm.cuda()
self.LSTM_param_generator = self.LSTM_param_generator.cuda()
def forward(self, mode, word_inputs, feature_inputs, word_seq_lengths, char_inputs, char_seq_lengths, char_seq_recover):
word_represent = self.wordrep(word_inputs, feature_inputs, word_seq_lengths, char_inputs, char_seq_lengths, char_seq_recover)
if (self.word_feature_extractor == 'CNN'):
word_in = F.tanh(self.word2cnn(word_represent)).transpose(2, 1).contiguous()
for idx in range(self.cnn_layer):
if (idx == 0):
cnn_feature = F.relu(self.cnn_list[idx](word_in))
else:
cnn_feature = F.relu(self.cnn_list[idx](cnn_feature))
cnn_feature = self.cnn_drop_list[idx](cnn_feature)
cnn_feature = self.cnn_batchnorm_list[idx](cnn_feature)
feature_out = cnn_feature.transpose(2, 1).contiguous()
else:
packed_words = pack_padded_sequence(word_represent, word_seq_lengths.cpu().numpy(), True)
hidden = None
if (mode == 'model1'):
task_idx = self.model1_task_idx
domain_idx = self.model1_domain_idx
elif (mode == 'model2'):
task_idx = self.model2_task_idx
domain_idx = self.model2_domain_idx
elif (mode == 'model3'):
task_idx = self.model3_task_idx
domain_idx = self.model3_domain_idx
elif (mode == 'model4'):
task_idx = self.model4_task_idx
domain_idx = self.model4_domain_idx
lstm_param = self.LSTM_param_generator(task_idx, domain_idx)
(outputs_forward, outputs_backward, outputs) = (None, None, None)
if ((mode == 'model1') or (mode == 'model3')):
(lstm_out, hidden) = self.lstm(packed_words, lstm_param, hidden)
(lstm_out, _) = pad_packed_sequence(lstm_out)
feature_out = self.droplstm(lstm_out.transpose(1, 0))
(outputs_forward, outputs_backward) = feature_out.chunk(2, (- 1))
else:
(lstm_out, hidden) = self.lstm(packed_words, lstm_param, hidden)
(lstm_out, _) = pad_packed_sequence(lstm_out)
outputs = self.droplstm(lstm_out.transpose(1, 0))
return (outputs_forward, outputs_backward, outputs) |
class IMSATHeader(nn.Module):
def __init__(self, output_k=10, num_sub_heads=5):
super().__init__()
self.output_k = output_k
self.num_sub_heads = num_sub_heads
self.heads = nn.ModuleList([nn.Sequential(nn.Linear(1200, self.output_k), nn.Softmax(dim=1)) for _ in range(self.num_sub_heads)])
def forward(self, input):
results = []
for i in range(self.num_sub_heads):
results.append(self.heads[i](input))
return results |
def chunk_list(examples, chunk_size=2, pad_to_divisible=True):
n_examples = len(examples)
remainder = (n_examples % chunk_size)
if (pad_to_divisible and (remainder > 0)):
n_pad = (chunk_size - remainder)
pad = random.choices(examples, k=n_pad)
examples = (examples + pad)
n_examples = len(examples)
remainder = 0
chunked_examples = []
n_chunks = int((n_examples / chunk_size))
n_chunks = ((n_chunks + 1) if (remainder > 0) else n_chunks)
for i in range(n_chunks):
chunked_examples.append(examples[(i * chunk_size):((i + 1) * chunk_size)])
return chunked_examples |
def get_arrow_hex_str(batched_data, names):
import pyarrow as pa
sink = pa.BufferOutputStream()
pred_arrow = pa.record_batch(batched_data, names=names)
with pa.ipc.new_stream(sink, pred_arrow.schema) as writer:
writer.write_batch(pred_arrow)
pred_arrow = sink.getvalue().hex()
pred_arrow = pred_arrow.decode('utf-8')
sink.close()
return pred_arrow |
class MetricLogger(object):
def __init__(self, delimiter='\t'):
self.meters = defaultdict(SmoothedValue)
self.delimiter = delimiter
def update(self, **kwargs):
for (k, v) in kwargs.items():
if (v is None):
continue
if isinstance(v, torch.Tensor):
v = v.item()
assert isinstance(v, (float, int))
self.meters[k].update(v)
def __getattr__(self, attr):
if (attr in self.meters):
return self.meters[attr]
if (attr in self.__dict__):
return self.__dict__[attr]
raise AttributeError("'{}' object has no attribute '{}'".format(type(self).__name__, attr))
def __str__(self):
loss_str = []
for (name, meter) in self.meters.items():
loss_str.append('{}: {}'.format(name, str(meter)))
return self.delimiter.join(loss_str)
def synchronize_between_processes(self):
for meter in self.meters.values():
meter.synchronize_between_processes()
def add_meter(self, name, meter):
self.meters[name] = meter
def log_every(self, iterable, print_freq, header=None):
i = 0
if (not header):
header = ''
start_time = time.time()
end = time.time()
iter_time = SmoothedValue(fmt='{avg:.4f} ({min:.4f} -- {max:.4f})')
data_time = SmoothedValue(fmt='{avg:.4f} ({min:.4f} -- {max:.4f})')
space_fmt = ((':' + str(len(str(len(iterable))))) + 'd')
log_msg = [header, (('[{0' + space_fmt) + '}/{1}]'), 'eta: {eta}', '{meters}', 'time: {time}', 'data: {data}']
if torch.cuda.is_available():
log_msg.append('max mem: {memory:.0f}')
log_msg = self.delimiter.join(log_msg)
MB = (1024.0 * 1024.0)
for obj in iterable:
data_time.update((time.time() - end))
(yield obj)
iter_time.update((time.time() - end))
if (((i % print_freq) == 0) or (i == (len(iterable) - 1))):
eta_seconds = (iter_time.global_avg * (len(iterable) - i))
eta_string = str(datetime.timedelta(seconds=int(eta_seconds)))
if torch.cuda.is_available():
print(log_msg.format(i, len(iterable), eta=eta_string, meters=str(self), time=str(iter_time), data=str(data_time), memory=(torch.cuda.max_memory_allocated() / MB)))
else:
print(log_msg.format(i, len(iterable), eta=eta_string, meters=str(self), time=str(iter_time), data=str(data_time)))
i += 1
end = time.time()
total_time = (time.time() - start_time)
total_time_str = str(datetime.timedelta(seconds=int(total_time)))
print('{} Total time: {} ({:.4f} s / it)'.format(header, total_time_str, (total_time / len(iterable)))) |
class ResNetPerfCallback(MainCallback):
def before_val_epoch(self, runner):
if (runner.config['ipex'] and runner.config['int8'] and runner.config['calibration']):
print('running int8 calibration step\n')
import intel_extension_for_pytorch as ipex
from torch.ao.quantization import MinMaxObserver, PerChannelMinMaxObserver, QConfig
qconfig = QConfig(activation=MinMaxObserver.with_args(qscheme=torch.per_tensor_symmetric, dtype=torch.qint8), weight=PerChannelMinMaxObserver.with_args(dtype=torch.qint8, qscheme=torch.per_channel_symmetric))
x = torch.randn(1, 3, 224, 224)
prepared_model = ipex.quantization.prepare(runner.model, qconfig, x, inplace=True)
with torch.no_grad():
for (i, (images, target)) in enumerate(runner.val_loader):
images = images.contiguous(memory_format=torch.channels_last)
prepared_model(images)
if (i == 4):
print(i)
break
prepared_model.save_qconf_summary(runner.config['configure_dir'])
print('calibration step done')
if (not runner.num_steps):
runner.num_steps = len(runner.val_loader)
invalidInputError((runner.num_steps > runner.config['warmup_iterations']), 'total steps should be larger than warmup iterations')
if runner.config['dummy']:
images = torch.randn(runner.config['batch'], 3, 224, 224)
target = torch.arange(1, (runner.config['batch'] + 1)).long()
if runner.config['ipex']:
images = images.contiguous(memory_format=torch.channels_last)
if runner.config['bf16']:
images = images.to(torch.bfloat16)
runner.put('images', images)
runner.put('target', target)
if (runner.config['warmup_iterations'] > 0):
print('running warmup iterations')
def on_val_forward(self, runner):
if runner.config['dummy']:
images = runner.get('images')
target = runner.get('target')
else:
(images, target) = next(iter(runner.val_loader))
if runner.config['ipex']:
images = images.contiguous(memory_format=torch.channels_last)
if runner.config['bf16']:
images = images.to(torch.bfloat16)
runner.batch = (images, target)
if (runner.batch_idx < runner.config['warmup_iterations']):
(output, target, loss) = self.forward(runner, images, target, warmup=True)
else:
(output, target, loss) = self.forward(runner, images, target, warmup=False)
runner.output = output
runner.loss = loss
runner.target = target
def forward(self, runner, images, target, warmup=False):
if warmup:
if ((not runner.config['jit']) and runner.config['bf16']):
with torch.cpu.amp.autocast():
output = runner.model(images)
else:
output = runner.model(images)
else:
with runner.timers.record('non_warmup_eval_fwd'):
if ((not runner.config['jit']) and runner.config['bf16']):
with torch.cpu.amp.autocast():
output = runner.model(images)
else:
output = runner.model(images)
if runner.config['bf16']:
output = output.to(torch.float32)
loss = runner.criterion(output, target)
return (output, target, loss) |
def _add_category(context, name: str=None, color: Tuple[float]=None) -> None:
if (name in context.scene.categories.keys()):
log.warning(f'Skipping duplicate category {name}.')
return
if (color is None):
color = zpy.color.random_color(output_style='frgb')
log.info(f'Choosing random color for category {name}: {color}')
new_category = context.scene.categories.add()
new_category.name = name
new_category.color = color |
class Interpolation():
def interpolate_position(dataframe: NumTrajDF, sampling_rate: float, ip_type: Optional[Text]='linear', class_label_col: Optional[Text]=''):
df = dataframe.reset_index()
df_chunks = helper._df_split_helper(df)
processes = ([None] * len(df_chunks))
manager = mlp.Manager()
return_list = manager.list()
ip_type = ip_type.lower().strip()
if (ip_type == 'linear'):
for i in range(len(processes)):
processes[i] = mlp.Process(target=Interpolation._linear_ip, args=(df_chunks[i], sampling_rate, return_list, class_label_col))
processes[i].start()
for j in range(len(processes)):
processes[j].join()
elif (ip_type == 'cubic'):
for i in range(len(processes)):
processes[i] = mlp.Process(target=Interpolation._cubic_ip, args=(df_chunks[i], sampling_rate, return_list, class_label_col))
processes[i].start()
for j in range(len(processes)):
processes[j].join()
elif (ip_type == 'kinematic'):
for i in range(len(processes)):
processes[i] = mlp.Process(target=Interpolation._kinematic_ip, args=(df_chunks[i], sampling_rate, return_list, class_label_col))
processes[i].start()
for j in range(len(processes)):
processes[j].join()
elif (ip_type == 'random-walk'):
for i in range(len(processes)):
processes[i] = mlp.Process(target=Interpolation._random_walk_ip, args=(df_chunks[i], sampling_rate, return_list, class_label_col))
processes[i].start()
for j in range(len(processes)):
processes[j].join()
else:
raise ValueError(f'Interpolation type: {ip_type} specified does not exist. Please check theinterpolation type specified and type again.')
return NumTrajDF(pd.concat(return_list).reset_index(), const.LAT, const.LONG, const.DateTime, const.TRAJECTORY_ID)
def _linear_ip(dataframe: Union[(pd.DataFrame, NumTrajDF)], sampling_rate: float, return_list: list, class_label_col):
if (class_label_col == ''):
dataframe = dataframe.reset_index()[[const.DateTime, const.TRAJECTORY_ID, const.LAT, const.LONG]].set_index(const.DateTime)
else:
dataframe = dataframe.reset_index()[[const.DateTime, const.TRAJECTORY_ID, const.LAT, const.LONG, class_label_col]].set_index(const.DateTime)
ids_ = list(dataframe[const.TRAJECTORY_ID].value_counts().keys())
df_chunks = [dataframe.loc[(dataframe[const.TRAJECTORY_ID] == ids_[i])] for i in range(len(ids_))]
small_pool = mlp.Pool(NUM_CPU)
final = small_pool.starmap(helper.linear_help, zip(df_chunks, ids_, itertools.repeat(sampling_rate), itertools.repeat(class_label_col)))
small_pool.close()
small_pool.join()
return_list.append(pd.concat(final))
def _cubic_ip(dataframe: Union[(pd.DataFrame, NumTrajDF)], sampling_rate: float, return_list: list, class_label_col):
try:
if (class_label_col == ''):
dataframe = dataframe.reset_index()[[const.DateTime, const.TRAJECTORY_ID, const.LAT, const.LONG]].set_index(const.DateTime)
else:
dataframe = dataframe.reset_index()[[const.DateTime, const.TRAJECTORY_ID, const.LAT, const.LONG, class_label_col]].set_index(const.DateTime)
ids_ = list(dataframe[const.TRAJECTORY_ID].value_counts().keys())
df_chunks = [dataframe.loc[(dataframe[const.TRAJECTORY_ID] == ids_[i])] for i in range(len(ids_))]
small_pool = mlp.Pool(NUM_CPU)
final = small_pool.starmap(helper.cubic_help, zip(df_chunks, ids_, itertools.repeat(sampling_rate), itertools.repeat(class_label_col)))
small_pool.close()
small_pool.join()
return_list.append(pd.concat(final))
except ValueError:
raise ValueError
def _kinematic_ip(dataframe: Union[(pd.DataFrame, NumTrajDF)], sampling_rate, return_list, class_label_col):
if (class_label_col == ''):
dataframe = dataframe.reset_index()[[const.DateTime, const.TRAJECTORY_ID, const.LAT, const.LONG]].set_index(const.DateTime)
else:
dataframe = dataframe.reset_index()[[const.DateTime, const.TRAJECTORY_ID, const.LAT, const.LONG, class_label_col]].set_index(const.DateTime)
ids_ = list(dataframe[const.TRAJECTORY_ID].value_counts().keys())
df_chunks = [dataframe.loc[(dataframe[const.TRAJECTORY_ID] == ids_[i])] for i in range(len(ids_))]
small_pool = mlp.Pool(NUM_CPU)
final = small_pool.starmap(helper.kinematic_help, zip(df_chunks, ids_, itertools.repeat(sampling_rate), itertools.repeat(class_label_col)))
small_pool.close()
small_pool.join()
return_list.append(pd.concat(final))
def _random_walk_ip(dataframe: Union[(pd.DataFrame, NumTrajDF)], sampling_rate, return_list, class_label_col):
if (class_label_col == ''):
dataframe = dataframe.reset_index()[[const.DateTime, const.TRAJECTORY_ID, const.LAT, const.LONG]].set_index(const.DateTime)
else:
dataframe = dataframe.reset_index()[[const.DateTime, const.TRAJECTORY_ID, const.LAT, const.LONG, class_label_col]].set_index(const.DateTime)
ids_ = list(dataframe[const.TRAJECTORY_ID].value_counts().keys())
df_chunks = [dataframe.loc[(dataframe[const.TRAJECTORY_ID] == ids_[i])] for i in range(len(ids_))]
small_pool = mlp.Pool(NUM_CPU)
final = small_pool.starmap(helper.random_walk_help, zip(df_chunks, ids_, itertools.repeat(sampling_rate), itertools.repeat(class_label_col)))
small_pool.close()
small_pool.join()
return_list.append(pd.concat(final)) |
def sample_vectors(samples, num):
(num_samples, device) = (samples.shape[0], samples.device)
if (num_samples >= num):
indices = torch.randperm(num_samples, device=device)[:num]
else:
indices = torch.randint(0, num_samples, (num,), device=device)
return samples[indices] |
def get_figure_properties(fig):
return {'figwidth': fig.get_figwidth(), 'figheight': fig.get_figheight(), 'dpi': fig.dpi} |
def evaluate(Net, config, load_data, train, test, optim_func):
file_name = config['exp_name']
for trial in range(config['num_trials_eval']):
csv_name = (((file_name + '_t') + str(trial)) + '.csv')
model_name = (((file_name + '_t') + str(trial)) + '.pt')
num_epochs = config['num_epochs_eval']
set_all_seeds((config['seed'] + trial))
df_train_loss = pd.DataFrame()
df_test_acc = pd.DataFrame(columns=['epoch', 'test_acc', 'train_time'])
df_lr = pd.DataFrame()
net = Net(config)
device = 'cpu'
if torch.cuda.is_available():
device = 'cuda'
net.to(device)
criterion = SF.mse_count_loss(correct_rate=config['correct_rate'], incorrect_rate=config['incorrect_rate'])
(optimizer, scheduler, loss_dependent) = optim_func(net, config)
if config['early_stopping']:
early_stopping = EarlyStopping_acc(patience=config['patience'], verbose=True, path=model_name)
early_stopping.early_stop = False
early_stopping.best_score = None
(trainset, testset) = load_data(config)
config['dataset_length'] = len(trainset)
trainloader = DataLoader(trainset, batch_size=int(config['batch_size']), shuffle=True)
testloader = DataLoader(testset, batch_size=int(config['batch_size']), shuffle=False)
if loss_dependent:
old_loss_hist = float('inf')
print(f"=======Trial: {trial}, Batch: {config['batch_size']}, beta: {config['beta']:.3f}, threshold: {config['threshold']:.2f}, slope: {config['slope']}, lr: {config['lr']:.3e}======")
for epoch in range(num_epochs):
start_time = time.time()
(loss_list, lr_list) = train(config, net, trainloader, criterion, optimizer, device, scheduler)
epoch_time = (time.time() - start_time)
if loss_dependent:
avg_loss_hist = (sum(loss_list) / len(loss_list))
if (avg_loss_hist > old_loss_hist):
for param_group in optimizer.param_groups:
param_group['lr'] = (param_group['lr'] * 0.5)
else:
old_loss_hist = avg_loss_hist
test_accuracy = test(config, net, testloader, device)
print(f'Epoch: {epoch} Test Accuracy: {test_accuracy}')
df_lr = df_lr.append(lr_list, ignore_index=True)
df_train_loss = df_train_loss.append(loss_list, ignore_index=True)
df_test_acc = df_test_acc.append({'epoch': epoch, 'test_acc': test_accuracy, 'train_time': epoch_time}, ignore_index=True)
if config['save_csv']:
df_train_loss.to_csv(('loss_' + csv_name), index=False)
df_test_acc.to_csv(('acc_' + csv_name), index=False)
df_lr.to_csv(('lr_' + csv_name), index=False)
if config['early_stopping']:
early_stopping(test_accuracy, net)
if early_stopping.early_stop:
print('Early stopping')
early_stopping.early_stop = False
early_stopping.best_score = None
break |
def replaceRule(expr, repls):
for (k, m) in repls.items():
expr = expr.replace(k, m, map=False, simultaneous=True, exact=False)
return expr |
class Walker2dFullObsEnv(mujoco_env.MujocoEnv, utils.EzPickle):
def __init__(self):
asset_path = os.path.join(os.path.dirname(__file__), 'assets/walker2d.xml')
mujoco_env.MujocoEnv.__init__(self, asset_path, 4)
utils.EzPickle.__init__(self)
def step(self, a):
posbefore = self.sim.data.qpos[0]
self.do_simulation(a, self.frame_skip)
(posafter, height, ang) = self.sim.data.qpos[0:3]
alive_bonus = 1.0
reward = ((posafter - posbefore) / self.dt)
reward += alive_bonus
reward -= (0.001 * np.square(a).sum())
done = (not ((height > 0.8) and (height < 2.0) and (ang > (- 1.0)) and (ang < 1.0)))
ob = self._get_obs()
return (ob, reward, done, {})
def _get_obs(self):
qpos = self.sim.data.qpos
qvel = self.sim.data.qvel
return np.concatenate([qpos, qvel]).ravel()
def reset_model(self):
self.set_state((self.init_qpos + self.np_random.uniform(low=(- 0.005), high=0.005, size=self.model.nq)), (self.init_qvel + self.np_random.uniform(low=(- 0.005), high=0.005, size=self.model.nv)))
return self._get_obs()
def viewer_setup(self):
self.viewer.cam.trackbodyid = 2
self.viewer.cam.distance = (self.model.stat.extent * 0.5)
self.viewer.cam.lookat[2] = 1.15
self.viewer.cam.elevation = (- 20) |
class TupleTensorOutputModel(nn.Module):
def __init__(self):
super().__init__()
self.layer_1 = nn.Linear((28 * 28), 12)
self.layer_2 = nn.Linear((28 * 28), 12)
self.layer_3 = nn.Linear(24, 1)
def forward(self, x1, x2):
x1 = self.layer_1(x1)
x2 = self.layer_2(x2)
x = torch.cat([x1, x2], axis=1)
output = self.layer_3(x)
return (output, (x1, x2, x)) |
_criterion('cross_entropy')
class CrossEntropyCriterion(FairseqCriterion):
def __init__(self, args, task):
super().__init__(args, task)
def forward(self, model, sample, reduce=True):
net_output = model(**sample['net_input'])
lprobs = model.get_normalized_probs(net_output, log_probs=True)
lprobs = lprobs.view((- 1), lprobs.size((- 1)))
target = model.get_targets(sample, net_output).view((- 1))
loss = F.nll_loss(lprobs, target, size_average=False, ignore_index=self.padding_idx, reduce=reduce)
sample_size = (sample['target'].size(0) if self.args.sentence_avg else sample['ntokens'])
logging_output = {'loss': (utils.item(loss.data) if reduce else loss.data), 'ntokens': sample['ntokens'], 'nsentences': sample['target'].size(0), 'sample_size': sample_size}
if (not reduce):
logging_output['model_out'] = net_output
return (loss, sample_size, logging_output)
def aggregate_logging_outputs(logging_outputs):
loss_sum = sum((log.get('loss', 0) for log in logging_outputs))
ntokens = sum((log.get('ntokens', 0) for log in logging_outputs))
nsentences = sum((log.get('nsentences', 0) for log in logging_outputs))
sample_size = sum((log.get('sample_size', 0) for log in logging_outputs))
agg_output = {'loss': ((loss_sum / sample_size) / math.log(2)), 'ntokens': ntokens, 'nsentences': nsentences, 'sample_size': sample_size}
if (sample_size != ntokens):
agg_output['nll_loss'] = ((loss_sum / ntokens) / math.log(2))
return agg_output |
def create_dir(dir_path):
if (not osp.exists(dir_path)):
os.makedirs(dir_path)
return dir_path |
class SpatialTemporalEnsemble(nn.Module):
def __init__(self, is_temporal_ensemble=False):
super().__init__()
self.is_temporal_ensemble = is_temporal_ensemble
def _transform(self, imgs, mode):
is_single_image = False
if (imgs.ndim == 4):
if self.is_temporal_ensemble:
raise ValueError('"is_temporal_ensemble" must be False if the input is an image.')
is_single_image = True
imgs = imgs.unsqueeze(1)
if (mode == 'vertical'):
imgs = imgs.flip(4).clone()
elif (mode == 'horizontal'):
imgs = imgs.flip(3).clone()
elif (mode == 'transpose'):
imgs = imgs.permute(0, 1, 2, 4, 3).clone()
if is_single_image:
imgs = imgs.squeeze(1)
return imgs
def spatial_ensemble(self, imgs, model):
img_list = [imgs.cpu()]
for mode in ['vertical', 'horizontal', 'transpose']:
img_list.extend([self._transform(t, mode) for t in img_list])
output_list = [model(t.to(imgs.device)).cpu() for t in img_list]
for i in range(len(output_list)):
if (i > 3):
output_list[i] = self._transform(output_list[i], 'transpose')
if ((i % 4) > 1):
output_list[i] = self._transform(output_list[i], 'horizontal')
if (((i % 4) % 2) == 1):
output_list[i] = self._transform(output_list[i], 'vertical')
outputs = torch.stack(output_list, dim=0)
outputs = outputs.mean(dim=0, keepdim=False)
return outputs.to(imgs.device)
def forward(self, imgs, model):
outputs = self.spatial_ensemble(imgs, model)
if self.is_temporal_ensemble:
outputs += self.spatial_ensemble(imgs.flip(1), model).flip(1)
outputs *= 0.5
return outputs |
class HiLAMParallel(BaseHiGraphModel):
def __init__(self, args):
super().__init__(args)
total_edge_index_list = ((list(self.m2m_edge_index) + list(self.mesh_up_edge_index)) + list(self.mesh_down_edge_index))
total_edge_index = torch.cat(total_edge_index_list, dim=1)
self.edge_split_sections = [ei.shape[1] for ei in total_edge_index_list]
if (args.processor_layers == 0):
self.processor = (lambda x, edge_attr: (x, edge_attr))
else:
processor_nets = [InteractionNet(total_edge_index, args.hidden_dim, hidden_layers=args.hidden_layers, edge_chunk_sizes=self.edge_split_sections, aggr_chunk_sizes=self.N_mesh_levels) for _ in range(args.processor_layers)]
self.processor = pyg.nn.Sequential('mesh_rep, edge_rep', [(net, 'mesh_rep, mesh_rep, edge_rep -> mesh_rep, edge_rep') for net in processor_nets])
def hi_processor_step(self, mesh_rep_levels, mesh_same_rep, mesh_up_rep, mesh_down_rep):
mesh_rep = torch.cat(mesh_rep_levels, dim=1)
mesh_edge_rep = torch.cat(((mesh_same_rep + mesh_up_rep) + mesh_down_rep), axis=1)
(mesh_rep, mesh_edge_rep) = self.processor(mesh_rep, mesh_edge_rep)
mesh_rep_levels = list(torch.split(mesh_rep, self.N_mesh_levels, dim=1))
mesh_edge_rep_sections = torch.split(mesh_edge_rep, self.edge_split_sections, dim=1)
mesh_same_rep = mesh_edge_rep_sections[:self.N_levels]
mesh_up_rep = mesh_edge_rep_sections[self.N_levels:(self.N_levels + (self.N_levels - 1))]
mesh_down_rep = mesh_edge_rep_sections[(self.N_levels + (self.N_levels - 1)):]
return (mesh_rep_levels, mesh_same_rep, mesh_up_rep, mesh_down_rep) |
def init_weights(net, init_type='kaiming', scale=1.0, std=0.02):
print('initialization method [{:s}]'.format(init_type))
if (init_type == 'normal'):
weights_init_normal_ = functools.partial(weights_init_normal, std=std)
net.apply(weights_init_normal_)
elif (init_type == 'kaiming'):
weights_init_kaiming_ = functools.partial(weights_init_kaiming, scale=scale)
net.apply(weights_init_kaiming_)
elif (init_type == 'orthogonal'):
net.apply(weights_init_orthogonal)
else:
raise NotImplementedError('initialization method [{:s}] not implemented'.format(init_type)) |
class World(object):
def __init__(self, name, args, timeout):
self.client = None
self.name = name
self.args = args
self.timeout = timeout
self.server_fps = 0.0
self.simulation_time = 0
self.server_clock = pygame.time.Clock()
self.world = None
self.town_map = None
self.actors_with_transforms = []
self._hud = None
self._input = None
self.surface_size = [0, 0]
self.prev_scaled_size = 0
self.scaled_size = 0
self.hero_actor = None
self.spawned_hero = None
self.hero_transform = None
self.scale_offset = [0, 0]
self.vehicle_id_surface = None
self.result_surface = None
self.traffic_light_surfaces = TrafficLightSurfaces()
self.affected_traffic_light = None
self.map_image = None
self.border_round_surface = None
self.original_surface_size = None
self.hero_surface = None
self.actors_surface = None
def _get_data_from_carla(self):
try:
self.client = carla.Client(self.args.host, self.args.port)
self.client.set_timeout(self.timeout)
if (self.args.map is None):
world = self.client.get_world()
else:
world = self.client.load_world(self.args.map)
town_map = world.get_map()
return (world, town_map)
except RuntimeError as ex:
logging.error(ex)
exit_game()
def start(self, hud, input_control):
(self.world, self.town_map) = self._get_data_from_carla()
settings = self.world.get_settings()
settings.no_rendering_mode = self.args.no_rendering
self.world.apply_settings(settings)
self.map_image = MapImage(carla_world=self.world, carla_map=self.town_map, pixels_per_meter=PIXELS_PER_METER, show_triggers=self.args.show_triggers, show_connections=self.args.show_connections, show_spawn_points=self.args.show_spawn_points)
self._hud = hud
self._input = input_control
self.original_surface_size = min(self._hud.dim[0], self._hud.dim[1])
self.surface_size = self.map_image.big_map_surface.get_width()
self.scaled_size = int(self.surface_size)
self.prev_scaled_size = int(self.surface_size)
self.actors_surface = pygame.Surface((self.map_image.surface.get_width(), self.map_image.surface.get_height()))
self.actors_surface.set_colorkey(COLOR_BLACK)
self.vehicle_id_surface = pygame.Surface((self.surface_size, self.surface_size)).convert()
self.vehicle_id_surface.set_colorkey(COLOR_BLACK)
self.border_round_surface = pygame.Surface(self._hud.dim, pygame.SRCALPHA).convert()
self.border_round_surface.set_colorkey(COLOR_WHITE)
self.border_round_surface.fill(COLOR_BLACK)
center_offset = (int((self._hud.dim[0] / 2)), int((self._hud.dim[1] / 2)))
pygame.draw.circle(self.border_round_surface, COLOR_ALUMINIUM_1, center_offset, int((self._hud.dim[1] / 2)))
pygame.draw.circle(self.border_round_surface, COLOR_WHITE, center_offset, int(((self._hud.dim[1] - 8) / 2)))
scaled_original_size = (self.original_surface_size * (1.0 / 0.9))
self.hero_surface = pygame.Surface((scaled_original_size, scaled_original_size)).convert()
self.result_surface = pygame.Surface((self.surface_size, self.surface_size)).convert()
self.result_surface.set_colorkey(COLOR_BLACK)
self.select_hero_actor()
self.hero_actor.set_autopilot(False)
self._input.wheel_offset = HERO_DEFAULT_SCALE
self._input.control = carla.VehicleControl()
weak_self = weakref.ref(self)
self.world.on_tick((lambda timestamp: World.on_world_tick(weak_self, timestamp)))
def select_hero_actor(self):
hero_vehicles = [actor for actor in self.world.get_actors() if (('vehicle' in actor.type_id) and (actor.attributes['role_name'] == 'hero'))]
if (len(hero_vehicles) > 0):
self.hero_actor = random.choice(hero_vehicles)
self.hero_transform = self.hero_actor.get_transform()
else:
self._spawn_hero()
def _spawn_hero(self):
blueprint = random.choice(self.world.get_blueprint_library().filter(self.args.filter))
blueprint.set_attribute('role_name', 'hero')
if blueprint.has_attribute('color'):
color = random.choice(blueprint.get_attribute('color').recommended_values)
blueprint.set_attribute('color', color)
while (self.hero_actor is None):
spawn_points = self.world.get_map().get_spawn_points()
spawn_point = (random.choice(spawn_points) if spawn_points else carla.Transform())
self.hero_actor = self.world.try_spawn_actor(blueprint, spawn_point)
self.hero_transform = self.hero_actor.get_transform()
self.spawned_hero = self.hero_actor
def tick(self, clock):
actors = self.world.get_actors()
self.actors_with_transforms = [(actor, actor.get_transform()) for actor in actors]
if (self.hero_actor is not None):
self.hero_transform = self.hero_actor.get_transform()
self.update_hud_info(clock)
def update_hud_info(self, clock):
hero_mode_text = []
if (self.hero_actor is not None):
hero_speed = self.hero_actor.get_velocity()
hero_speed_text = (3.6 * math.sqrt((((hero_speed.x ** 2) + (hero_speed.y ** 2)) + (hero_speed.z ** 2))))
affected_traffic_light_text = 'None'
if (self.affected_traffic_light is not None):
state = self.affected_traffic_light.state
if (state == carla.TrafficLightState.Green):
affected_traffic_light_text = 'GREEN'
elif (state == carla.TrafficLightState.Yellow):
affected_traffic_light_text = 'YELLOW'
else:
affected_traffic_light_text = 'RED'
affected_speed_limit_text = self.hero_actor.get_speed_limit()
if math.isnan(affected_speed_limit_text):
affected_speed_limit_text = 0.0
hero_mode_text = ['Hero Mode: ON', ('Hero ID: %7d' % self.hero_actor.id), ('Hero Vehicle: %14s' % get_actor_display_name(self.hero_actor, truncate=14)), ('Hero Speed: %3d km/h' % hero_speed_text), 'Hero Affected by:', (' Traffic Light: %12s' % affected_traffic_light_text), (' Speed Limit: %3d km/h' % affected_speed_limit_text)]
else:
hero_mode_text = ['Hero Mode: OFF']
self.server_fps = self.server_clock.get_fps()
self.server_fps = ('inf' if (self.server_fps == float('inf')) else round(self.server_fps))
info_text = [('Server: % 16s FPS' % self.server_fps), ('Client: % 16s FPS' % round(clock.get_fps())), ('Simulation Time: % 12s' % datetime.timedelta(seconds=int(self.simulation_time))), ('Map Name: %10s' % self.town_map.name)]
self._hud.add_info(self.name, info_text)
self._hud.add_info('HERO', hero_mode_text)
def on_world_tick(weak_self, timestamp):
self = weak_self()
if (not self):
return
self.server_clock.tick()
self.server_fps = self.server_clock.get_fps()
self.simulation_time = timestamp.elapsed_seconds
def _show_nearby_vehicles(self, vehicles):
info_text = []
if ((self.hero_actor is not None) and (len(vehicles) > 1)):
location = self.hero_transform.location
vehicle_list = [x[0] for x in vehicles if (x[0].id != self.hero_actor.id)]
def distance(v):
return location.distance(v.get_location())
for (n, vehicle) in enumerate(sorted(vehicle_list, key=distance)):
if (n > 15):
break
vehicle_type = get_actor_display_name(vehicle, truncate=22)
info_text.append(('% 5d %s' % (vehicle.id, vehicle_type)))
self._hud.add_info('NEARBY VEHICLES', info_text)
def _split_actors(self):
vehicles = []
traffic_lights = []
speed_limits = []
walkers = []
for actor_with_transform in self.actors_with_transforms:
actor = actor_with_transform[0]
if ('vehicle' in actor.type_id):
vehicles.append(actor_with_transform)
elif ('traffic_light' in actor.type_id):
traffic_lights.append(actor_with_transform)
elif ('speed_limit' in actor.type_id):
speed_limits.append(actor_with_transform)
elif ('walker.pedestrian' in actor.type_id):
walkers.append(actor_with_transform)
return (vehicles, traffic_lights, speed_limits, walkers)
def _render_traffic_lights(self, surface, list_tl, world_to_pixel):
self.affected_traffic_light = None
for tl in list_tl:
world_pos = tl.get_location()
pos = world_to_pixel(world_pos)
if self.args.show_triggers:
corners = Util.get_bounding_box(tl)
corners = [world_to_pixel(p) for p in corners]
pygame.draw.lines(surface, COLOR_BUTTER_1, True, corners, 2)
if (self.hero_actor is not None):
corners = Util.get_bounding_box(tl)
corners = [world_to_pixel(p) for p in corners]
tl_t = tl.get_transform()
transformed_tv = tl_t.transform(tl.trigger_volume.location)
hero_location = self.hero_actor.get_location()
d = hero_location.distance(transformed_tv)
s = (Util.length(tl.trigger_volume.extent) + Util.length(self.hero_actor.bounding_box.extent))
if (d <= s):
self.affected_traffic_light = tl
srf = self.traffic_light_surfaces.surfaces['h']
surface.blit(srf, srf.get_rect(center=pos))
srf = self.traffic_light_surfaces.surfaces[tl.state]
surface.blit(srf, srf.get_rect(center=pos))
def _render_speed_limits(self, surface, list_sl, world_to_pixel, world_to_pixel_width):
font_size = world_to_pixel_width(2)
radius = world_to_pixel_width(2)
font = pygame.font.SysFont('Arial', font_size)
for sl in list_sl:
(x, y) = world_to_pixel(sl.get_location())
white_circle_radius = int((radius * 0.75))
pygame.draw.circle(surface, COLOR_SCARLET_RED_1, (x, y), radius)
pygame.draw.circle(surface, COLOR_ALUMINIUM_0, (x, y), white_circle_radius)
limit = sl.type_id.split('.')[2]
font_surface = font.render(limit, True, COLOR_ALUMINIUM_5)
if self.args.show_triggers:
corners = Util.get_bounding_box(sl)
corners = [world_to_pixel(p) for p in corners]
pygame.draw.lines(surface, COLOR_PLUM_2, True, corners, 2)
if (self.hero_actor is not None):
angle = ((- self.hero_transform.rotation.yaw) - 90.0)
font_surface = pygame.transform.rotate(font_surface, angle)
offset = font_surface.get_rect(center=(x, y))
surface.blit(font_surface, offset)
else:
surface.blit(font_surface, ((x - (radius / 2)), (y - (radius / 2))))
def _render_walkers(self, surface, list_w, world_to_pixel):
for w in list_w:
color = COLOR_PLUM_0
bb = w[0].bounding_box.extent
corners = [carla.Location(x=(- bb.x), y=(- bb.y)), carla.Location(x=bb.x, y=(- bb.y)), carla.Location(x=bb.x, y=bb.y), carla.Location(x=(- bb.x), y=bb.y)]
w[1].transform(corners)
corners = [world_to_pixel(p) for p in corners]
pygame.draw.polygon(surface, color, corners)
def _render_vehicles(self, surface, list_v, world_to_pixel):
for v in list_v:
color = COLOR_SKY_BLUE_0
if (int(v[0].attributes['number_of_wheels']) == 2):
color = COLOR_CHOCOLATE_1
if (v[0].attributes['role_name'] == 'hero'):
color = COLOR_CHAMELEON_0
bb = v[0].bounding_box.extent
corners = [carla.Location(x=(- bb.x), y=(- bb.y)), carla.Location(x=(bb.x - 0.8), y=(- bb.y)), carla.Location(x=bb.x, y=0), carla.Location(x=(bb.x - 0.8), y=bb.y), carla.Location(x=(- bb.x), y=bb.y), carla.Location(x=(- bb.x), y=(- bb.y))]
v[1].transform(corners)
corners = [world_to_pixel(p) for p in corners]
pygame.draw.lines(surface, color, False, corners, int(math.ceil((4.0 * self.map_image.scale))))
def render_actors(self, surface, vehicles, traffic_lights, speed_limits, walkers):
self._render_traffic_lights(surface, [tl[0] for tl in traffic_lights], self.map_image.world_to_pixel)
self._render_speed_limits(surface, [sl[0] for sl in speed_limits], self.map_image.world_to_pixel, self.map_image.world_to_pixel_width)
self._render_vehicles(surface, vehicles, self.map_image.world_to_pixel)
self._render_walkers(surface, walkers, self.map_image.world_to_pixel)
def clip_surfaces(self, clipping_rect):
self.actors_surface.set_clip(clipping_rect)
self.vehicle_id_surface.set_clip(clipping_rect)
self.result_surface.set_clip(clipping_rect)
def _compute_scale(self, scale_factor):
m = self._input.mouse_pos
px = ((m[0] - self.scale_offset[0]) / float(self.prev_scaled_size))
py = ((m[1] - self.scale_offset[1]) / float(self.prev_scaled_size))
diff_between_scales = (((float(self.prev_scaled_size) * px) - (float(self.scaled_size) * px)), ((float(self.prev_scaled_size) * py) - (float(self.scaled_size) * py)))
self.scale_offset = ((self.scale_offset[0] + diff_between_scales[0]), (self.scale_offset[1] + diff_between_scales[1]))
self.prev_scaled_size = self.scaled_size
self.map_image.scale_map(scale_factor)
def render(self, display):
if (self.actors_with_transforms is None):
return
self.result_surface.fill(COLOR_BLACK)
(vehicles, traffic_lights, speed_limits, walkers) = self._split_actors()
scale_factor = self._input.wheel_offset
self.scaled_size = int((self.map_image.width * scale_factor))
if (self.scaled_size != self.prev_scaled_size):
self._compute_scale(scale_factor)
self.actors_surface.fill(COLOR_BLACK)
self.render_actors(self.actors_surface, vehicles, traffic_lights, speed_limits, walkers)
self._hud.render_vehicles_ids(self.vehicle_id_surface, vehicles, self.map_image.world_to_pixel, self.hero_actor, self.hero_transform)
self._show_nearby_vehicles(vehicles)
surfaces = ((self.map_image.surface, (0, 0)), (self.actors_surface, (0, 0)), (self.vehicle_id_surface, (0, 0)))
angle = (0.0 if (self.hero_actor is None) else (self.hero_transform.rotation.yaw + 90.0))
self.traffic_light_surfaces.rotozoom((- angle), self.map_image.scale)
center_offset = (0, 0)
if (self.hero_actor is not None):
hero_location_screen = self.map_image.world_to_pixel(self.hero_transform.location)
hero_front = self.hero_transform.get_forward_vector()
translation_offset = (((hero_location_screen[0] - (self.hero_surface.get_width() / 2)) + (hero_front.x * PIXELS_AHEAD_VEHICLE)), ((hero_location_screen[1] - (self.hero_surface.get_height() / 2)) + (hero_front.y * PIXELS_AHEAD_VEHICLE)))
clipping_rect = pygame.Rect(translation_offset[0], translation_offset[1], self.hero_surface.get_width(), self.hero_surface.get_height())
self.clip_surfaces(clipping_rect)
Util.blits(self.result_surface, surfaces)
self.border_round_surface.set_clip(clipping_rect)
self.hero_surface.fill(COLOR_ALUMINIUM_4)
self.hero_surface.blit(self.result_surface, ((- translation_offset[0]), (- translation_offset[1])))
rotated_result_surface = pygame.transform.rotozoom(self.hero_surface, angle, 0.9).convert()
center = ((display.get_width() / 2), (display.get_height() / 2))
rotation_pivot = rotated_result_surface.get_rect(center=center)
display.blit(rotated_result_surface, rotation_pivot)
display.blit(self.border_round_surface, (0, 0))
else:
translation_offset = (((self._input.mouse_offset[0] * scale_factor) + self.scale_offset[0]), ((self._input.mouse_offset[1] * scale_factor) + self.scale_offset[1]))
center_offset = (((abs((display.get_width() - self.surface_size)) / 2) * scale_factor), 0)
clipping_rect = pygame.Rect(((- translation_offset[0]) - center_offset[0]), (- translation_offset[1]), self._hud.dim[0], self._hud.dim[1])
self.clip_surfaces(clipping_rect)
Util.blits(self.result_surface, surfaces)
display.blit(self.result_surface, ((translation_offset[0] + center_offset[0]), translation_offset[1]))
def destroy(self):
if (self.spawned_hero is not None):
self.spawned_hero.destroy() |
class TestGraphMatMulFusion(unittest.TestCase):
def setUpClass(self):
build_fake_yaml()
self.op_wise_sequences = TensorflowQuery(local_config_file=os.path.join(os.path.dirname(neural_compressor.__file__), 'adaptor/tensorflow.yaml')).get_eightbit_patterns()
def tearDownClass(self):
os.remove('fake_yaml.yaml')
_random()
def test_matmul_biasadd_relu_requantize_fusion(self):
g = tf.Graph()
with g.as_default():
x_data = np.array([[0.1, 0.2], [0.2, 0.3]])
y_data = np.array([[1, 2], [3, 4]], dtype=np.float32)
x = tf.placeholder(tf.float32, shape=[2, 2], name='x')
y = tf.constant(y_data, dtype=tf.float32, shape=[2, 2])
z = tf.matmul(x, y)
z = tf.nn.bias_add(z, [1, 2])
z = tf.nn.relu(z, name='op_to_store')
found_quantized_matmul = False
with tf.Session() as sess:
sess.run(z, feed_dict={x: x_data, y: y_data})
float_graph_def = sess.graph.as_graph_def()
from neural_compressor.experimental import Quantization, common
quantizer = Quantization('fake_yaml.yaml')
dataset = quantizer.dataset('dummy', shape=(2, 2), label=True)
quantizer.calib_dataloader = common.DataLoader(dataset, batch_size=2)
quantizer.eval_dataloader = common.DataLoader(dataset, batch_size=2)
quantizer.model = float_graph_def
output_graph = quantizer.fit()
for i in output_graph.graph_def.node:
if (i.op == 'QuantizedMatMulWithBiasAndReluAndRequantize'):
found_quantized_matmul = True
break
self.assertEqual(found_quantized_matmul, True)
_random()
def test_first_matmul_biasadd_relu_fusion(self):
x_data = np.array([[0.1, 0.2], [0.2, 0.3]])
y_data = np.array([[1, 2], [3, 4]], dtype=np.float32)
x = tf.placeholder(tf.float32, shape=[2, 2], name='x')
y = tf.constant(y_data, dtype=tf.float32, shape=[2, 2])
z = tf.matmul(x, y)
z = tf.nn.bias_add(z, [1, 2])
z = tf.nn.relu(z, name='op_to_store')
with tf.Session() as sess:
sess.run(z, feed_dict={x: x_data, y: y_data})
float_graph_def = sess.graph.as_graph_def()
from neural_compressor.experimental import Quantization, common
quantizer = Quantization('fake_yaml.yaml')
dataset = quantizer.dataset('dummy', shape=(2, 2), label=True)
quantizer.calib_dataloader = common.DataLoader(dataset, batch_size=2)
quantizer.eval_dataloader = common.DataLoader(dataset, batch_size=2)
quantizer.model = float_graph_def
output_graph = quantizer.fit()
found_quantized_matmul = False
for i in output_graph.graph_def.node:
if ((i.op == 'QuantizeV2') and (i.name == 'MatMul_eightbit_quantize_x') and (i.attr['T'].type == dtypes.quint8)):
found_quantized_matmul = True
break
self.assertEqual(found_quantized_matmul, True)
_random()
def test_matmul_biasadd_requantize_dequantize_fusion(self):
g = tf.Graph()
with g.as_default():
x_data = np.array([[0.1, 0.2], [0.2, 0.3]])
y_data = np.array([[1, 2], [3, 4]], dtype=np.float32)
x = tf.placeholder(tf.float32, shape=[2, 2], name='x')
y = tf.constant(y_data, dtype=tf.float32, shape=[2, 2])
z = tf.matmul(x, y)
z = tf.nn.bias_add(z, [1, 2])
z = tf.identity(z, name='op_to_store')
found_quantized_matmul = False
if (tf.version.VERSION < '2.2.0'):
found_quantized_matmul = True
else:
with tf.Session() as sess:
sess.run(z, feed_dict={x: x_data, y: y_data})
float_graph_def = sess.graph.as_graph_def()
from neural_compressor.experimental import Quantization, common
quantizer = Quantization('fake_yaml.yaml')
dataset = quantizer.dataset('dummy', shape=(2, 2), label=True)
quantizer.calib_dataloader = common.DataLoader(dataset, batch_size=2)
quantizer.eval_dataloader = common.DataLoader(dataset, batch_size=2)
quantizer.model = float_graph_def
output_graph = quantizer.fit()
for i in output_graph.graph_def.node:
if (i.op == 'QuantizedMatMulWithBiasAndDequantize'):
found_quantized_matmul = True
break
self.assertEqual(found_quantized_matmul, True)
_random()
def test_matmul_biasadd_requantize_dequantize_last_fusion(self):
g = tf.Graph()
with g.as_default():
x_data = np.array([[0.1, 0.2], [0.2, 0.3]])
y_data = np.array([[1, 2], [3, 4]], dtype=np.float32)
x = tf.placeholder(tf.float32, shape=[2, 2], name='x')
y = tf.constant(y_data, dtype=tf.float32, shape=[2, 2])
z = tf.matmul(x, y)
z = tf.nn.bias_add(z, [1, 2], name='op_to_store')
found_quantized_matmul = False
if (tf.version.VERSION < '2.2.0'):
found_quantized_matmul = True
else:
with tf.Session() as sess:
sess.run(z, feed_dict={x: x_data, y: y_data})
float_graph_def = sess.graph.as_graph_def()
from neural_compressor.experimental import Quantization, common
quantizer = Quantization('fake_yaml.yaml')
dataset = quantizer.dataset('dummy', shape=(2, 2), label=True)
quantizer.calib_dataloader = common.DataLoader(dataset, batch_size=2)
quantizer.eval_dataloader = common.DataLoader(dataset, batch_size=2)
quantizer.model = float_graph_def
output_graph = quantizer.fit()
for i in output_graph.graph_def.node:
if ((i.op == 'QuantizedMatMulWithBiasAndDequantize') and (i.name == 'op_to_store')):
found_quantized_matmul = True
break
self.assertEqual(found_quantized_matmul, True)
_random()
def test_disable_matmul_fusion(self):
g = tf.Graph()
with g.as_default():
x_data = np.array([[0.1, 0.2], [0.2, 0.3]])
y_data = np.array([[1, 2], [3, 4]], dtype=np.float32)
x = tf.placeholder(tf.float32, shape=[2, 2], name='x')
y = tf.constant(y_data, dtype=tf.float32, shape=[2, 2])
z = tf.matmul(x, y, name='no_quant_matmul')
z = tf.nn.relu6(z, name='op_to_store')
found_quantized_matmul = False
with tf.Session() as sess:
sess.run(z, feed_dict={x: x_data, y: y_data})
float_graph_def = sess.graph.as_graph_def()
from neural_compressor.experimental import Quantization, common
quantizer = Quantization('fake_yaml.yaml')
dataset = quantizer.dataset('dummy', shape=(2, 2), label=True)
quantizer.calib_dataloader = common.DataLoader(dataset, batch_size=2)
quantizer.eval_dataloader = common.DataLoader(dataset, batch_size=2)
quantizer.model = float_graph_def
output_graph = quantizer.fit()
for i in output_graph.graph_def.node:
if ((i.op == 'QuantizedMatMulWithBiasAndDequantize') and (i.name == 'op_to_store')):
found_quantized_matmul = True
break
self.assertEqual(found_quantized_matmul, False)
_random()
def test_disable_matmul_fusion_with_transpose_b_true(self):
g = tf.Graph()
with g.as_default():
x_data = np.array([[0.1, 0.2], [0.2, 0.3]])
y_data = np.array([[1, 2], [3, 4]], dtype=np.float32)
x = tf.placeholder(tf.float32, shape=[2, 2], name='x')
y = tf.constant(y_data, dtype=tf.float32, shape=[2, 2])
z = tf.matmul(x, y, name='no_quant_matmul', transpose_b=True)
z = tf.nn.relu6(z, name='op_to_store')
found_quantized_matmul = False
with tf.Session() as sess:
sess.run(z, feed_dict={x: x_data, y: y_data})
float_graph_def = sess.graph.as_graph_def()
from neural_compressor.experimental import Quantization, common
quantizer = Quantization('fake_yaml.yaml')
dataset = quantizer.dataset('dummy', shape=(2, 2), label=True)
quantizer.calib_dataloader = common.DataLoader(dataset, batch_size=2)
quantizer.eval_dataloader = common.DataLoader(dataset, batch_size=2)
quantizer.model = float_graph_def
output_graph = quantizer.fit()
for i in output_graph.graph_def.node:
if ((i.op == 'QuantizedMatMulWithBiasAndDequantize') and (i.name == 'op_to_store')):
found_quantized_matmul = True
break
self.assertEqual(found_quantized_matmul, False)
_random()
((float(tf.__version__[:3]) > 2.7), 'only tf lower than 2.8 enable dummy biasadd')
def test_matmul_with_dummy_biasadd(self):
g = tf.Graph()
with g.as_default():
x_data = np.array([[0.1, 0.2], [0.2, 0.3]])
y_data = np.array([[1, 2], [3, 4]], dtype=np.float32)
x = tf.placeholder(tf.float32, shape=[2, 2], name='x')
y = tf.constant(y_data, dtype=tf.float32, shape=[2, 2])
z = tf.matmul(x, y, name='no_quant_matmul')
z = tf.identity(z, name='op_to_store')
found_quantized_matmul = True
with tf.Session() as sess:
sess.run(z, feed_dict={x: x_data, y: y_data})
float_graph_def = sess.graph.as_graph_def()
from neural_compressor.experimental import Quantization, common
quantizer = Quantization('fake_yaml.yaml')
dataset = quantizer.dataset('dummy', shape=(2, 2), label=True)
quantizer.calib_dataloader = common.DataLoader(dataset, batch_size=2)
quantizer.eval_dataloader = common.DataLoader(dataset, batch_size=2)
quantizer.model = float_graph_def
output_graph = quantizer.fit()
for i in output_graph.graph_def.node:
if (i.op == 'MatMul'):
found_quantized_matmul = False
break
self.assertEqual(found_quantized_matmul, True)
_random()
((float(tf.__version__[:3]) > 2.7), 'only tf lower than 2.8 enable dummy biasadd')
def test_matmul_with_nan(self):
g = tf.Graph()
with g.as_default():
x_data = np.array([[0.1, 0.2], [0.2, 0.3]])
nan_array = np.empty((2, 2), dtype=np.float32)
nan_array[:] = np.NaN
x = tf.placeholder(tf.float32, shape=[2, 2], name='x')
z = tf.matmul(x, nan_array, name='no_quant_matmul')
z = tf.identity(z, name='op_to_store')
found_quantized_matmul = True
with tf.Session() as sess:
sess.run(z, feed_dict={x: x_data})
float_graph_def = sess.graph.as_graph_def()
from neural_compressor.experimental import Quantization, common
quantizer = Quantization('fake_yaml.yaml')
dataset = quantizer.dataset('dummy', shape=(2, 2), label=True)
quantizer.calib_dataloader = common.DataLoader(dataset, batch_size=2)
quantizer.eval_dataloader = common.DataLoader(dataset, batch_size=2)
quantizer.model = float_graph_def
output_graph = quantizer.fit()
for i in output_graph.graph_def.node:
if (i.op == 'MatMul'):
found_quantized_matmul = False
break
self.assertEqual(found_quantized_matmul, True)
_random()
def test_matmul_with_reshape_transpose(self):
g = tf.Graph()
with g.as_default():
x_data = np.array([[0.1, 0.2], [0.2, 0.3]])
y_data = np.array([[1, 2], [3, 4]], dtype=np.float32)
x = tf.placeholder(tf.float32, shape=[2, 2], name='x')
y = tf.constant(y_data, dtype=tf.float32, shape=[2, 2])
transpose = tf.transpose(y, perm=[1, 0])
reshape = tf.reshape(transpose, [2, 2])
z = tf.matmul(x, reshape, name='no_quant_matmul')
z = tf.nn.bias_add(z, [1, 2], name='op_to_store')
found_quantized_matmul = True
with tf.Session() as sess:
sess.run(z, feed_dict={x: x_data, y: y_data})
float_graph_def = sess.graph.as_graph_def()
from neural_compressor.experimental import Quantization, common
quantizer = Quantization('fake_yaml.yaml')
dataset = quantizer.dataset('dummy', shape=(2, 2), label=True)
quantizer.calib_dataloader = common.DataLoader(dataset, batch_size=2)
quantizer.eval_dataloader = common.DataLoader(dataset, batch_size=2)
quantizer.model = float_graph_def
output_graph = quantizer.fit()
for i in output_graph.graph_def.node:
if (i.op == 'MatMul'):
found_quantized_matmul = False
break
self.assertEqual(found_quantized_matmul, True)
_random()
def test_matmul_with_add(self):
g = tf.Graph()
with g.as_default():
x_data = np.array([[0.1, 0.2], [0.2, 0.3]])
y_data = np.array([[1, 2], [3, 4]], dtype=np.float32)
x = tf.placeholder(tf.float32, shape=[2, 2], name='x')
y = tf.constant(y_data, dtype=tf.float32, shape=[2, 2])
transpose = tf.transpose(y, perm=[1, 0])
reshape = tf.reshape(transpose, [2, 2])
z = tf.matmul(x, reshape, name='no_quant_matmul')
z = tf.math.add(z, [1, 2], name='op_to_store')
found_quantized_matmul = True
with tf.Session() as sess:
sess.run(z, feed_dict={x: x_data, y: y_data})
float_graph_def = sess.graph.as_graph_def()
from neural_compressor.experimental import Quantization, common
quantizer = Quantization('fake_yaml.yaml')
dataset = quantizer.dataset('dummy', shape=(2, 2), label=True)
quantizer.calib_dataloader = common.DataLoader(dataset, batch_size=2)
quantizer.eval_dataloader = common.DataLoader(dataset, batch_size=2)
quantizer.model = float_graph_def
output_graph = quantizer.fit()
for i in output_graph.graph_def.node:
if (i.op == 'MatMul'):
found_quantized_matmul = False
break
self.assertEqual(found_quantized_matmul, True)
_random()
def test_matmul_biasadd_requantize_dequantize_fusion_with_softmax(self):
g = tf.Graph()
with g.as_default():
x_data = np.array([[0.1, 0.2], [0.2, 0.3]])
y_data = np.array([[1, 2], [3, 4]], dtype=np.float32)
x = tf.placeholder(tf.float32, shape=[2, 2], name='x')
y = tf.constant(y_data, dtype=tf.float32, shape=[2, 2])
z = tf.matmul(x, y)
biasadd = tf.nn.bias_add(z, [1, 2])
biasadd1 = tf.nn.bias_add(biasadd, [1, 1])
y1 = tf.constant(x_data, dtype=tf.float32, shape=[2, 2])
matmul1 = tf.matmul(biasadd1, y1)
biasadd2 = tf.nn.bias_add(matmul1, [1, 1])
z = tf.nn.softmax(biasadd2, name='op_to_store')
found_quantized_matmul = False
if (tf.version.VERSION < '2.2.0'):
found_quantized_matmul = False
else:
with tf.Session() as sess:
sess.run(z, feed_dict={x: x_data, y: y_data})
float_graph_def = sess.graph.as_graph_def()
from neural_compressor.experimental import Quantization, common
quantizer = Quantization('fake_yaml.yaml')
dataset = quantizer.dataset('dummy', shape=(2, 2), label=True)
quantizer.calib_dataloader = common.DataLoader(dataset, batch_size=2)
quantizer.eval_dataloader = common.DataLoader(dataset, batch_size=2)
quantizer.model = float_graph_def
output_graph = quantizer.fit()
count = 0
for i in output_graph.model.as_graph_def().node:
if (i.op == 'QuantizedMatMulWithBiasAndDequantize'):
count += 1
found_quantized_matmul = bool((count > 1))
if (tf.__version__ < '2.6.0'):
self.assertEqual(found_quantized_matmul, False)
else:
self.assertEqual(found_quantized_matmul, True)
def test_matmul_biasadd_relu_non_const_weight(self):
g = tf.Graph()
with g.as_default():
x_data = np.array([[0.1, 0.2], [0.2, 0.3]])
x = tf.placeholder(tf.float32, shape=[2, 2], name='x')
y = tf.matmul(x, x, name='no_quant_matmul')
biasadd = tf.nn.bias_add(y, [1, 2])
z = tf.nn.relu(biasadd)
found_quantized_matmul = True
with tf.Session() as sess:
sess.run(z, feed_dict={x: x_data})
float_graph_def = sess.graph.as_graph_def()
from neural_compressor.experimental import Quantization, common
quantizer = Quantization('fake_yaml.yaml')
dataset = quantizer.dataset('dummy', shape=(2, 2), label=True)
quantizer.calib_dataloader = common.DataLoader(dataset, batch_size=2)
quantizer.eval_dataloader = common.DataLoader(dataset, batch_size=2)
quantizer.model = float_graph_def
output_graph = quantizer.fit()
for i in output_graph.graph_def.node:
if (i.op == 'MatMul'):
found_quantized_matmul = False
break
self.assertEqual(found_quantized_matmul, False)
def test_matmul_biasadd_non_const_weight(self):
g = tf.Graph()
with g.as_default():
x_data = np.array([[0.1, 0.2], [0.2, 0.3]])
x = tf.placeholder(tf.float32, shape=[2, 2], name='x')
y = tf.matmul(x, x, name='no_quant_matmul')
z = tf.nn.bias_add(y, [1, 2])
found_quantized_matmul = True
with tf.Session() as sess:
sess.run(z, feed_dict={x: x_data})
float_graph_def = sess.graph.as_graph_def()
from neural_compressor.experimental import Quantization, common
quantizer = Quantization('fake_yaml.yaml')
dataset = quantizer.dataset('dummy', shape=(2, 2), label=True)
quantizer.calib_dataloader = common.DataLoader(dataset, batch_size=2)
quantizer.eval_dataloader = common.DataLoader(dataset, batch_size=2)
quantizer.model = float_graph_def
output_graph = quantizer.fit()
for i in output_graph.graph_def.node:
if (i.op == 'MatMul'):
found_quantized_matmul = False
break
self.assertEqual(found_quantized_matmul, False) |
def reveal_fog_of_war(top_down_map: np.ndarray, current_fog_of_war_mask: np.ndarray, current_point: np.ndarray, current_angle: float, fov: float=90, max_line_len: float=100) -> np.ndarray:
fov = np.deg2rad(fov)
angles = np.arange(((- fov) / 2), (fov / 2), step=(1.0 / max_line_len), dtype=np.float32)
fog_of_war_mask = current_fog_of_war_mask.copy()
_draw_loop(top_down_map, fog_of_war_mask, current_point, current_angle, max_line_len, angles)
return fog_of_war_mask |
def save_episode_result(environment, test_result):
res_dict = environment.get_final_result()
date = environment.day
idx = environment.episode_idx
test_result.loc[((date + '_') + str(idx))] = [res_dict['pnl'], res_dict['nd_pnl'], res_dict['avg_abs_position'], res_dict['profit_ratio'], res_dict['volume']] |
def main():
atheris.Setup(sys.argv, TestOneInput, enable_python_coverage=True)
atheris.Fuzz() |
def world_info_from_env():
local_rank = 0
for v in ('LOCAL_RANK', 'MPI_LOCALRANKID', 'SLURM_LOCALID', 'OMPI_COMM_WORLD_LOCAL_RANK'):
if (v in os.environ):
local_rank = int(os.environ[v])
break
global_rank = 0
for v in ('RANK', 'PMI_RANK', 'SLURM_PROCID', 'OMPI_COMM_WORLD_RANK'):
if (v in os.environ):
global_rank = int(os.environ[v])
break
world_size = 1
for v in ('WORLD_SIZE', 'PMI_SIZE', 'SLURM_NTASKS', 'OMPI_COMM_WORLD_SIZE'):
if (v in os.environ):
world_size = int(os.environ[v])
break
return (local_rank, global_rank, world_size) |
class SkipProofDatasetCreator(DatasetCreator):
def __init__(self, fp):
super().__init__(fp)
self.seen = set()
def process_dp(self, dp):
(result, proof_term) = get_skip_proof_datapoint(dp)
guard = (lambda : ('PREDICT' in result))
if guard():
if (not ((result, proof_term) in self.seen)):
self.seen.add((result, proof_term))
result = {'skip_proof': result, 'proof_term': proof_term}
result_msg = json.dumps(result)
self.fp.write((result_msg + '\n')) |
def convert(model, qconfig_mapping):
for ((op_name, op_type), qconfig) in qconfig_mapping.items():
if (qconfig.weight_dtype not in FP8_DTYPE):
continue
module = fetch_module(model, op_name)
if (module is None):
logger.info(f'{op_name} is not found in model.')
continue
if (qconfig.approach != 'dynamic'):
_remove_observer(module, qconfig)
module = _replace_module(module, qconfig)
set_module(model, op_name, module)
htcore.mark_step()
return model |
def get_sumo_binary():
sumo_binary = 'sumo'
if Settings.USE_GUI:
if (Settings.SYSTEM == 'Windows'):
sumo_binary = 'sumo-gui.exe'
elif (Settings.SYSTEM == 'Linux'):
sumo_binary = 'sumo-gui'
elif (Settings.SYSTEM == 'Windows'):
sumo_binary = 'sumo.exe'
elif (Settings.SYSTEM == 'Linux'):
sumo_binary = 'sumo'
return sumo_binary |
def insert_receptors(path_db, name, receptors, max_cdr3_length=32):
labels = set()
for quantities in receptors.values():
labels.update(quantities.keys())
labels = sorted(list(labels))
dtype_receptor = ([('tra_vgene', 'S16'), ('tra_cdr3', ('S' + str(max_cdr3_length))), ('tra_jgene', 'S16'), ('trb_vgene', 'S16'), ('trb_cdr3', ('S' + str(max_cdr3_length))), ('trb_jgene', 'S16')] + [(('frequency_' + label), 'f8') for label in labels])
rs = np.zeros(len(receptors), dtype=dtype_receptor)
for (i, (receptor, quantities)) in enumerate(receptors.items()):
(tra_vgene, tra_cdr3, tra_jgene, trb_vgene, trb_cdr3, trb_jgene) = receptor.split(':')
rs[i]['tra_vgene'] = tra_vgene
rs[i]['tra_cdr3'] = tra_cdr3
rs[i]['tra_jgene'] = tra_jgene
rs[i]['trb_vgene'] = trb_vgene
rs[i]['trb_cdr3'] = trb_cdr3
rs[i]['trb_jgene'] = trb_jgene
for label in quantities.keys():
rs[i][('frequency_' + label)] = quantities[label]
flag = ('r+' if os.path.isfile(path_db) else 'w')
with h5py.File(path_db, flag) as db:
rs_db = db.create_dataset(name, (rs.size,), dtype_receptor)
rs_db[:] = rs |
def read_non_scored_words(non_scored_words_file):
for line in non_scored_words_file.readlines():
parts = line.split()
if (not (len(parts) == 1)):
raise RuntimeError('segment_ctm_edits.py: bad line in non-scored-words file {0}: {1}'.format(non_scored_words_file, line))
_global_non_scored_words.add(parts[0])
non_scored_words_file.close() |
def _at_least_version(actual_version, required_version):
actual = [int(v) for v in actual_version.split('.')]
required = [int(v) for v in required_version.split('.')]
return (actual >= required) |
def conv2d_bn(x, filters, kernel_size, strides=1, padding='same', activation='relu', use_bias=False, name=None):
x = Conv2D(filters, kernel_size, strides=strides, padding=padding, use_bias=use_bias, name=name)(x)
if (not use_bias):
bn_axis = (1 if (K.image_data_format() == 'channels_first') else 3)
bn_name = (None if (name is None) else (name + '_bn'))
x = BatchNormalization(axis=bn_axis, scale=False, name=bn_name)(x)
if (activation is not None):
ac_name = (None if (name is None) else (name + '_ac'))
x = Activation(activation, name=ac_name)(x)
return x |
class LeakyClamp(torch.autograd.Function):
def forward(ctx: Any, x: torch.Tensor, min: float, max: float) -> torch.Tensor:
ctx.save_for_backward((x.ge(min) * x.le(max)))
return torch.clamp(x, min=min, max=max)
def backward(ctx: Any, grad_output: torch.Tensor) -> Tuple[(torch.Tensor, None, None)]:
(mask,) = ctx.saved_tensors
mask = mask.type_as(grad_output)
return (((grad_output * mask) + ((grad_output * (1 - mask)) * eps)), None, None) |
class AvgStatistic(Statistic):
decay: bool = False
debias: bool = False
def new_step(self):
(self.val, self.count) = (0.0, 0)
def accumulate(self, val):
self.count += 1
self.val += self._get_val1(val)
def _get_val1(self, val):
return val.mean()
def _get_val2(self, state, val, param):
return (state.add_((1 - param), val) if self.decay else state.add_(val))
def _get_val3(self, state, val, param):
v = val.view(val.size(0), (- 1)).mean(1)
return (state.add_((1 - param), v) if self.decay else state.add_(v))
def update(self, state, param, val=None, step=None):
if (self.scope == StatScope.Weight):
res = self._get_val2(state.mul_(param), val, param)
elif (self.scope == StatScope.Channel):
res = self._get_val3(state.mul_(param), val, param)
elif (self.scope == StatScope.Layer):
res = ((state * param) + (self._get_val1(val) * ((1 - param) if self.decay else 1.0)))
elif (self.count != 0):
res = ((state * param) + ((self.val / self.count) * ((1 - param) if self.decay else 1.0)))
else:
return state
if (self.debias and (step is not None)):
res /= (1 - (param ** step))
return res |
class LinearFilter(nn.Module):
def __init__(self, filter_size, filter_initializer, filter_optimizer=None, feature_extractor=None):
super().__init__()
self.filter_size = filter_size
self.filter_initializer = filter_initializer
self.filter_optimizer = filter_optimizer
self.feature_extractor = feature_extractor
for m in self.feature_extractor.modules():
if isinstance(m, nn.Conv2d):
n = ((m.kernel_size[0] * m.kernel_size[1]) * m.out_channels)
m.weight.data.normal_(0, math.sqrt((2.0 / n)))
if (m.bias is not None):
m.bias.data.zero_()
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def forward(self, train_feat, test_feat, train_bb, *args, **kwargs):
assert (train_bb.dim() == 3)
num_sequences = train_bb.shape[1]
if (train_feat.dim() == 5):
train_feat = train_feat.view((- 1), *train_feat.shape[(- 3):])
if (test_feat.dim() == 5):
test_feat = test_feat.view((- 1), *test_feat.shape[(- 3):])
train_feat = self.extract_classification_feat(train_feat, num_sequences)
test_feat = self.extract_classification_feat(test_feat, num_sequences)
(filter, filter_iter, losses) = self.get_filter(train_feat, train_bb, *args, **kwargs)
test_scores = [self.classify(f, test_feat) for f in filter_iter]
return test_scores
def extract_classification_feat(self, feat, num_sequences=None):
if (self.feature_extractor is None):
return feat
if (num_sequences is None):
return self.feature_extractor(feat)
output = self.feature_extractor(feat)
return output.view((- 1), num_sequences, *output.shape[(- 3):])
def classify(self, weights, feat):
scores = filter_layer.apply_filter(feat, weights)
return scores
def get_filter(self, feat, bb, *args, **kwargs):
weights = self.filter_initializer(feat, bb)
if (self.filter_optimizer is not None):
(weights, weights_iter, losses) = self.filter_optimizer(weights, *args, feat=feat, bb=bb, **kwargs)
else:
weights_iter = [weights]
losses = None
return (weights, weights_iter, losses) |
class PhotoAIAPIRouter(APIRouter):
def __init__(self) -> None:
super().__init__()
self.chatbot = None
def set_chatbot(self, chatbot, use_deepspeed, world_size, host, port) -> None:
self.chatbot = chatbot
self.use_deepspeed = use_deepspeed
self.world_size = world_size
self.host = host
self.port = port
def get_chatbot(self):
if (self.chatbot is None):
logger.error('Chatbot instance is not found.')
raise RuntimeError('Chatbot instance has not been set.')
return self.chatbot
async def handle_voice_chat_request(self, prompt: str, audio_output_path: Optional[str]=None) -> str:
chatbot = self.get_chatbot()
try:
plugins['tts']['enable'] = True
plugins['retrieval']['enable'] = False
config = GenerationConfig(audio_output_path=audio_output_path)
(result, link) = chatbot.chat_stream(query=prompt, config=config)
def audio_file_generate(result):
for path in result:
with open(path, mode='rb') as file:
bytes = file.read()
data = base64.b64encode(bytes)
(yield f'''data: {data}
''')
(yield f'''data: [DONE]
''')
return StreamingResponse(audio_file_generate(result), media_type='text/event-stream')
except Exception as e:
raise Exception(e) |
class SchedulerBaseTests(unittest.TestCase):
def test_save_load_from_different_config(self):
obj = SchedulerObject()
setattr(diffusers, 'SchedulerObject', SchedulerObject)
logger = logging.get_logger('diffusers.configuration_utils')
with tempfile.TemporaryDirectory() as tmpdirname:
obj.save_config(tmpdirname)
with CaptureLogger(logger) as cap_logger_1:
config = SchedulerObject2.load_config(tmpdirname)
new_obj_1 = SchedulerObject2.from_config(config)
with open(os.path.join(tmpdirname, SchedulerObject.config_name), 'r') as f:
data = json.load(f)
data['unexpected'] = True
with open(os.path.join(tmpdirname, SchedulerObject.config_name), 'w') as f:
json.dump(data, f)
with CaptureLogger(logger) as cap_logger_2:
config = SchedulerObject.load_config(tmpdirname)
new_obj_2 = SchedulerObject.from_config(config)
with CaptureLogger(logger) as cap_logger_3:
config = SchedulerObject2.load_config(tmpdirname)
new_obj_3 = SchedulerObject2.from_config(config)
assert (new_obj_1.__class__ == SchedulerObject2)
assert (new_obj_2.__class__ == SchedulerObject)
assert (new_obj_3.__class__ == SchedulerObject2)
assert (cap_logger_1.out == '')
assert (cap_logger_2.out == "The config attributes {'unexpected': True} were passed to SchedulerObject, but are not expected and will be ignored. Please verify your config.json configuration file.\n")
assert (cap_logger_2.out.replace('SchedulerObject', 'SchedulerObject2') == cap_logger_3.out)
def test_save_load_compatible_schedulers(self):
SchedulerObject2._compatibles = ['SchedulerObject']
SchedulerObject._compatibles = ['SchedulerObject2']
obj = SchedulerObject()
setattr(diffusers, 'SchedulerObject', SchedulerObject)
setattr(diffusers, 'SchedulerObject2', SchedulerObject2)
logger = logging.get_logger('diffusers.configuration_utils')
with tempfile.TemporaryDirectory() as tmpdirname:
obj.save_config(tmpdirname)
with open(os.path.join(tmpdirname, SchedulerObject.config_name), 'r') as f:
data = json.load(f)
data['f'] = [0, 0]
data['unexpected'] = True
with open(os.path.join(tmpdirname, SchedulerObject.config_name), 'w') as f:
json.dump(data, f)
with CaptureLogger(logger) as cap_logger:
config = SchedulerObject.load_config(tmpdirname)
new_obj = SchedulerObject.from_config(config)
assert (new_obj.__class__ == SchedulerObject)
assert (cap_logger.out == "The config attributes {'unexpected': True} were passed to SchedulerObject, but are not expected and will be ignored. Please verify your config.json configuration file.\n")
def test_save_load_from_different_config_comp_schedulers(self):
SchedulerObject3._compatibles = ['SchedulerObject', 'SchedulerObject2']
SchedulerObject2._compatibles = ['SchedulerObject', 'SchedulerObject3']
SchedulerObject._compatibles = ['SchedulerObject2', 'SchedulerObject3']
obj = SchedulerObject()
setattr(diffusers, 'SchedulerObject', SchedulerObject)
setattr(diffusers, 'SchedulerObject2', SchedulerObject2)
setattr(diffusers, 'SchedulerObject3', SchedulerObject3)
logger = logging.get_logger('diffusers.configuration_utils')
logger.setLevel(diffusers.logging.INFO)
with tempfile.TemporaryDirectory() as tmpdirname:
obj.save_config(tmpdirname)
with CaptureLogger(logger) as cap_logger_1:
config = SchedulerObject.load_config(tmpdirname)
new_obj_1 = SchedulerObject.from_config(config)
with CaptureLogger(logger) as cap_logger_2:
config = SchedulerObject2.load_config(tmpdirname)
new_obj_2 = SchedulerObject2.from_config(config)
with CaptureLogger(logger) as cap_logger_3:
config = SchedulerObject3.load_config(tmpdirname)
new_obj_3 = SchedulerObject3.from_config(config)
assert (new_obj_1.__class__ == SchedulerObject)
assert (new_obj_2.__class__ == SchedulerObject2)
assert (new_obj_3.__class__ == SchedulerObject3)
assert (cap_logger_1.out == '')
assert (cap_logger_2.out == "{'f'} was not found in config. Values will be initialized to default values.\n")
assert (cap_logger_3.out == "{'f'} was not found in config. Values will be initialized to default values.\n")
def test_default_arguments_not_in_config(self):
pipe = DiffusionPipeline.from_pretrained('hf-internal-testing/tiny-stable-diffusion-pipe', torch_dtype=torch.float16)
assert (pipe.scheduler.__class__ == DDIMScheduler)
assert (pipe.scheduler.config.timestep_spacing == 'leading')
pipe.scheduler = EulerDiscreteScheduler.from_config(pipe.scheduler.config)
assert (pipe.scheduler.config.timestep_spacing == 'linspace')
pipe.scheduler = EulerDiscreteScheduler.from_config(pipe.scheduler.config, timestep_spacing='trailing')
assert (pipe.scheduler.config.timestep_spacing == 'trailing')
pipe.scheduler = LMSDiscreteScheduler.from_config(pipe.scheduler.config)
assert (pipe.scheduler.config.timestep_spacing == 'trailing')
pipe.scheduler = LMSDiscreteScheduler.from_config(pipe.scheduler.config)
assert (pipe.scheduler.config.timestep_spacing == 'trailing')
def test_default_solver_type_after_switch(self):
pipe = DiffusionPipeline.from_pretrained('hf-internal-testing/tiny-stable-diffusion-pipe', torch_dtype=torch.float16)
assert (pipe.scheduler.__class__ == DDIMScheduler)
pipe.scheduler = DEISMultistepScheduler.from_config(pipe.scheduler.config)
assert (pipe.scheduler.config.solver_type == 'logrho')
pipe.scheduler = UniPCMultistepScheduler.from_config(pipe.scheduler.config)
assert (pipe.scheduler.config.solver_type == 'bh2') |
def load_dataset_config(cfg_path):
cfg = OmegaConf.load(cfg_path).datasets
cfg = cfg[list(cfg.keys())[0]]
return cfg |
class DummyModel(EztorchBaseModule, ABC):
def __init__(self, input_shape: int, transform: Optional[DictConfig]=None) -> None:
super().__init__()
self.transform = (hydra.utils.instantiate(transform) if (transform is not None) else None)
self.save_hyperparameters()
input_dim = math.prod(input_shape)
self.layer = nn.Linear(input_dim, 1)
def configure_optimizers(self) -> Dict[(Any, Any)]:
return torch.optim.Adam(self.parameters(), 0.0001)
def forward(self, x: Tensor) -> Tensor:
x = torch.flatten(x, 1)
x = self.layer(x)
return x
def training_step(self, batch: Iterable[Any], batch_idx: int):
x = batch['input']
if (self.transform is not None):
with torch.no_grad():
with torch.cuda.amp.autocast(enabled=False):
x = self.transform(x)
if (type(x) is list):
pred = self.forward(x[0])
else:
pred = self.forward(x)
return torch.nn.functional.binary_cross_entropy_with_logits(pred, torch.ones_like(pred)) |
def flavour_compair(d1, d2):
diffs1 = {x: [] for x in 'tp fp fn fl None'.split()}
diffs2 = {x: [] for x in 'tp fp fn fl None'.split()}
for arc in d1['tp']:
if (arc in d2['tp']):
diffs1['tp'].append(arc)
elif (arc in d2['fp']):
diffs1['fp'].append(arc)
elif (arc in d2['fn']):
diffs1['fn'].append(arc)
elif (arc in d2['fl']):
diffs1['fl'].append(arc)
else:
diffs1['None'].append(arc)
for arc in d2['tp']:
if (arc in d1['tp']):
diffs2['tp'].append(arc)
elif (arc in d1['fp']):
diffs2['fp'].append(arc)
elif (arc in d1['fn']):
diffs2['fn'].append(arc)
elif (arc in d1['fl']):
diffs2['fl'].append(arc)
else:
diffs2['None'].append(arc)
print([(x, len(y)) for (x, y) in diffs1.items()])
print([(x, len(y)) for (x, y) in diffs2.items()])
return (diffs1, diffs2) |
class NormsMethods(Generic[T_co], ExtensionMethods):
l0: Callable[(..., T_co)] = norms.l0
l1: Callable[(..., T_co)] = norms.l1
l2: Callable[(..., T_co)] = norms.l2
linf: Callable[(..., T_co)] = norms.linf
lp: Callable[(..., T_co)] = norms.lp |
def gradients_collection(ys, xs, grad_ys=None, **kwargs):
return gradients(ys, xs, grad_ys, checkpoints='collection', **kwargs) |
class RPNModule(torch.nn.Module):
def __init__(self, cfg, in_channels):
super(RPNModule, self).__init__()
self.cfg = cfg.clone()
anchor_generator = make_anchor_generator(cfg)
rpn_head = registry.RPN_HEADS[cfg.MODEL.RPN.RPN_HEAD]
head = rpn_head(cfg, in_channels, anchor_generator.num_anchors_per_location()[0])
rpn_box_coder = BoxCoder(weights=(1.0, 1.0, 1.0, 1.0))
self.rpn_box_coder = rpn_box_coder
box_selector_train = make_rpn_postprocessor(cfg, rpn_box_coder, is_train=True)
box_selector_test = make_rpn_postprocessor(cfg, rpn_box_coder, is_train=False)
loss_evaluator = make_rpn_loss_evaluator(cfg, rpn_box_coder)
self.anchor_generator = anchor_generator
self.head = head
self.box_selector_train = box_selector_train
self.box_selector_test = box_selector_test
self.loss_evaluator = loss_evaluator
resolution = cfg.MODEL.ROI_BOX_HEAD.POOLER_RESOLUTION
scales = cfg.MODEL.ROI_BOX_HEAD.POOLER_SCALES
sampling_ratio = cfg.MODEL.ROI_BOX_HEAD.POOLER_SAMPLING_RATIO
pooler = Pooler(output_size=(resolution, resolution), scales=scales, sampling_ratio=sampling_ratio)
self.pooler = pooler
def forward(self, images, features, atten_logits, atten_map, targets=None):
image_size = (images.tensors.shape[(- 2)], images.tensors.shape[(- 1)])
result = []
anchors = self.anchor_generator(images, features, atten_logits)
for (anchor, atten_logit) in zip(anchors, atten_logits.mean(1)):
boxes = anchor[0].bbox
boxlist = BoxList(boxes, features[0].shape[(- 2):], mode='xyxy')
boxlist = boxlist.clip_to_image(remove_empty=False)
boxlist = boxlist.resize(anchor[0].size)
boxlist = boxlist.clip_to_image(remove_empty=False)
boxlist = remove_small_boxes(boxlist, 20)
objectness = self.pooler([atten_logit.unsqueeze(0).unsqueeze(1)], [boxlist]).mean(3).mean(2).squeeze().sigmoid()
boxlist.add_field('objectness', objectness)
boxlist = boxlist_nms(boxlist, 0.7, max_proposals=2000, score_field='objectness')
result.append(boxlist)
return (result, {})
if self.training:
return self._forward_train(anchors, objectness, rpn_box_regression, targets)
else:
return self._forward_test(anchors, objectness, rpn_box_regression)
def _forward_train(self, anchors, objectness, rpn_box_regression, targets):
if self.cfg.MODEL.RPN_ONLY:
boxes = anchors
else:
with torch.no_grad():
boxes = self.box_selector_train(anchors, objectness, rpn_box_regression, targets)
losses = {}
return (boxes, losses)
def _forward_test(self, anchors, objectness, rpn_box_regression):
boxes = self.box_selector_test(anchors, objectness, rpn_box_regression)
if self.cfg.MODEL.RPN_ONLY:
inds = [box.get_field('objectness').sort(descending=True)[1] for box in boxes]
boxes = [box[ind] for (box, ind) in zip(boxes, inds)]
return (boxes, {}) |
def main():
args = parser.parse_args()
print(args)
if (args.seed is not None):
random.seed(args.seed)
torch.manual_seed(args.seed)
cudnn.deterministic = True
warnings.warn('You have chosen to seed training. This will turn on the CUDNN deterministic setting, which can slow down your training considerably! You may see unexpected behavior when restarting from checkpoints.')
if (args.gpu is not None):
warnings.warn('You have chosen a specific GPU. This will completely disable data parallelism.')
if ((args.dist_url == 'env://') and (args.world_size == (- 1))):
args.world_size = int(os.environ['WORLD_SIZE'])
args.distributed = ((args.world_size > 1) or args.multiprocessing_distributed)
ngpus_per_node = torch.cuda.device_count()
if args.multiprocessing_distributed:
args.world_size = (ngpus_per_node * args.world_size)
mp.spawn(main_worker, nprocs=ngpus_per_node, args=(ngpus_per_node, args))
else:
main_worker(args.gpu, ngpus_per_node, args) |
class TanhTransformedDistribution(tfd.TransformedDistribution):
def __init__(self, distribution: tfd.Distribution, validate_args: bool=False):
super().__init__(distribution=distribution, bijector=tfb.Tanh(), validate_args=validate_args)
def mode(self) -> jnp.ndarray:
return self.bijector.forward(self.distribution.mode())
def _parameter_properties(cls, dtype: Optional[Any], num_classes=None):
td_properties = super()._parameter_properties(dtype, num_classes=num_classes)
del td_properties['bijector']
return td_properties |
class Cell(nn.Module):
def __init__(self, genotype, C_prev_prev, C_prev, C, reduction, reduction_prev):
super(Cell, self).__init__()
print(C_prev_prev, C_prev, C)
if reduction_prev:
self.preprocess0 = FactorizedReduce(C_prev_prev, C)
else:
self.preprocess0 = ReLUConvBN(C_prev_prev, C, (1, 1), (1, 1), (0, 0))
self.preprocess1 = ReLUConvBN(C_prev, C, (1, 1), (1, 1), (0, 0))
if reduction:
(op_names, indices) = zip(*genotype.reduce)
concat = genotype.reduce_concat
else:
(op_names, indices) = zip(*genotype.normal)
concat = genotype.normal_concat
self._compile(C, op_names, indices, concat, reduction)
def _compile(self, C, op_names, indices, concat, reduction):
assert (len(op_names) == len(indices))
self._steps = (len(op_names) // 2)
self._concat = concat
self.multiplier = len(concat)
self._ops = nn.ModuleList()
for (name, index) in zip(op_names, indices):
stride = (2 if (reduction and (index < 2)) else 1)
op = OPS[name](C, stride, True)
self._ops += [op]
self._indices = indices
def forward(self, s0, s1, drop_prob):
s0 = self.preprocess0(s0)
s1 = self.preprocess1(s1)
states = [s0, s1]
for i in range(self._steps):
h1 = states[self._indices[(2 * i)]]
h2 = states[self._indices[((2 * i) + 1)]]
op1 = self._ops[(2 * i)]
op2 = self._ops[((2 * i) + 1)]
h1 = op1(h1)
h2 = op2(h2)
if (self.training and (drop_prob > 0.0)):
if (not isinstance(op1, Identity)):
h1 = drop_path(h1, drop_prob)
if (not isinstance(op2, Identity)):
h2 = drop_path(h2, drop_prob)
s = (h1 + h2)
states += [s]
return torch.cat([states[i] for i in self._concat], dim=1) |
class TimeLimit(EnvWrapper):
def __init__(self, env, duration):
super().__init__(env)
self._duration = duration
self._step = None
def step(self, action):
assert (self._step is not None), 'Must reset environment.'
(obs, reward, done, info) = self.env.step(action)
self._step += 1
if (self._step >= self._duration):
if isinstance(info, EnvInfo):
info = EnvInfo(info.discount, info.game_score, True)
self._step = None
return EnvStep(obs, reward, done, info)
def reset(self):
self._step = 0
return self.env.reset() |
def start_server(args):
scorer = Scorer(args)
app = web.Application([('/result', ResultHandler, dict(scorer=scorer)), ('/src', SourceHandler, dict(scorer=scorer)), ('/hypo', HypothesisHandler, dict(scorer=scorer)), ('/', EvalSessionHandler, dict(scorer=scorer))], debug=False)
app.listen(args.port, max_buffer_size=(1024 ** 3))
logger.info(f'Evaluation Server Started (process id {os.getpid()}). Listening to port {args.port} ')
ioloop.IOLoop.current().start() |
class DatasetSplit(Dataset):
def __init__(self, dataset, idxs, Y=None):
self.dataset = dataset
self.idxs = [int(i) for i in idxs]
self.mal = False
if (Y is not None):
self.mal = True
self.mal_Y = Y
def __len__(self):
return len(self.idxs)
def __getitem__(self, item):
(image, label) = self.dataset[self.idxs[item]]
if (self.mal == True):
label_mal = self.mal_Y[item]
return (torch.tensor(image), torch.tensor(label_mal), torch.tensor(label))
return (torch.tensor(image), torch.tensor(label)) |
def setup(rank, world_size, port):
os.environ['MASTER_ADDR'] = 'localhost'
os.environ['MASTER_PORT'] = str(port)
dist.init_process_group('nccl', rank=rank, world_size=world_size, init_method='env://') |
class Txt2ImgIterableBaseDataset(IterableDataset):
def __init__(self, num_records=0, valid_ids=None, size=256):
super().__init__()
self.num_records = num_records
self.valid_ids = valid_ids
self.sample_ids = valid_ids
self.size = size
print(f'{self.__class__.__name__} dataset contains {self.__len__()} examples.')
def __len__(self):
return self.num_records
def __iter__(self):
pass |
def update(config, args):
config['model_dir'] = get_value(config['model_dir'], args.model_dir)
config['training_opt']['batch_size'] = get_value(config['training_opt']['batch_size'], args.batch_size)
return config |
def get_metric(y_true_aspect, y_predict_aspect, y_true_opinion, y_predict_opinion, y_true_sentiment, y_predict_sentiment, mask, train_op):
(f_a, f_o) = (0, 0)
(true_aspect, true_sentiment) = convert_to_list(y_true_aspect, y_true_sentiment, mask)
(predict_aspect, predict_sentiment) = convert_to_list(y_predict_aspect, y_predict_sentiment, mask)
(true_opinion, _) = convert_to_list(y_true_opinion, y_true_sentiment, mask)
(predict_opinion, _) = convert_to_list(y_predict_opinion, y_predict_sentiment, mask)
(f_aspect, acc_s, f_s, f_absa) = score(true_aspect, predict_aspect, true_sentiment, predict_sentiment, 0)
if train_op:
(f_opinion, _, _, _) = score(true_opinion, predict_opinion, true_sentiment, predict_sentiment, 1)
return (f_aspect, f_opinion, acc_s, f_s, f_absa) |
class KIEDecoderTRIE(nn.Module):
def __init__(self, ocr_dict, entity_dict, use_crf=False, ins_lvl_mean=False, d_model=(- 1), lstm_args=None):
super(KIEDecoderTRIE, self).__init__()
self.debug_mode = False
self.ins_lvl_mean = ins_lvl_mean
self.ocr_dict = ocr_dict
self.rev_ocr_dict = dict()
for (key, val) in ocr_dict.items():
self.rev_ocr_dict[val] = key
self.entity_dict = entity_dict
bilstm_kwargs = lstm_args.get('bilstm_kwargs', dict())
mlp_kwargs = lstm_args.get('mlp_kwargs', dict())
bilstm_kwargs.update(input_size=(2 * d_model))
if bilstm_kwargs['bidirectional']:
mlp_kwargs.update(in_dim=(2 * bilstm_kwargs['hidden_size']))
else:
mlp_kwargs.update(in_dim=bilstm_kwargs['hidden_size'])
mlp_kwargs.update(out_dim=len(self.entity_dict))
self.bilstm_layer = BiLSTMLayer(bilstm_kwargs, mlp_kwargs, pad_val=self.ocr_dict['<PAD>'], apply_norm=lstm_args.get('apply_norm', False))
self.union_layer = UnionLayer(debug_mode=self.debug_mode)
self.soft_max_func = nn.Softmax(dim=(- 1))
self.use_crf = use_crf
if use_crf:
self.crf = CRF(len(entity_dict), batch_first=True)
else:
self.criterion = nn.CrossEntropyLoss(ignore_index=self.entity_dict['<PAD>'], reduction='mean')
def forward(self, ocr_logits, multi_modal_context, texts=None, tags=None, logits_logger=None, sorted_idx=None):
if self.training:
(B, N, L) = texts.shape
C = ocr_logits.shape[(- 1)]
multi_modal_context = multi_modal_context.reshape((B * N), C).unsqueeze(1).expand((B * N), L, C)
if (not self.debug_mode):
(new_kie_logits, new_mask, new_tags, mask) = self.union_layer(torch.cat((ocr_logits, multi_modal_context), dim=(- 1)).reshape(B, N, L, (- 1)), texts, self.ocr_dict['<END>'], tags_=tags, tag_pad=self.entity_dict['<PAD>'], sorted_idx=sorted_idx)
else:
raise NotImplementedError
(new_kie_logits, new_mask, new_tags, mask, doc_texts) = self.union_layer(kie_logits.reshape(B, N, L, (- 1)), texts, self.ocr_dict['<END>'], tags_=tags, tag_pad=self.entity_dict['<PAD>'], sorted_idx=sorted_idx)
docs = []
for i in range(B):
cur_doc = ''
for j in doc_texts[i]:
cur_idx = j.item()
if (cur_idx == self.ocr_dict['<END>']):
break
cur_doc += self.rev_ocr_dict[cur_idx]
docs.append(cur_doc)
new_kie_logits = self.bilstm_layer(new_kie_logits, new_mask.sum(dim=(- 1)), (None, None))
if self.use_crf:
if self.ins_lvl_mean:
log_likelihood = self.crf(new_kie_logits, new_tags, mask=new_mask)
if sorted_idx:
total_num_box = 0
for idx_set in sorted_idx:
total_num_box += len(idx_set)
log_likelihood /= total_num_box
else:
raise ValueError(f'sorted_idx is required for calculating the num of boxes')
else:
log_likelihood = self.crf(new_kie_logits, new_tags, mask=new_mask)
log_likelihood /= new_mask.sum()
if ('CRF' not in logits_logger):
logits_logger['CRF'] = [(- log_likelihood.reshape(1))]
else:
logits_logger['CRF'].append((- log_likelihood.reshape(1)))
elif ('CRF' not in logits_logger):
logits_logger['CRF'] = [self.criterion(new_kie_logits.reshape((- 1), new_kie_logits.shape[(- 1)]), new_tags.reshape((- 1)))]
else:
logits_logger['CRF'].append(self.criterion(new_kie_logits.reshape((- 1), new_kie_logits.shape[(- 1)]), new_tags.reshape((- 1))))
else:
logits_logger['ocr_logits'] = ocr_logits
logits_logger['multi_modal_context'] = multi_modal_context
def crf_decode(self, logits_logger, shape_, sorted_idx, gt_texts=None):
if self.training:
raise RuntimeError('crf_decode should be inference only')
(B, N) = shape_
(L, C) = logits_logger['ocr_logits'].shape[1:]
multi_modal_context = logits_logger['multi_modal_context'].reshape((B * N), C).unsqueeze(1).expand((B * N), L, C)
ocr_logits = logits_logger['ocr_logits']
if (not isinstance(gt_texts, type(None))):
pred_texts = gt_texts
else:
(_, pred_texts) = torch.max(self.soft_max_func(logits_logger['REC'][(- 1)]), dim=(- 1))
(new_kie_logits, new_mask, mask) = self.union_layer(torch.cat((ocr_logits, multi_modal_context), dim=(- 1)).reshape(B, N, L, (- 1)), pred_texts.reshape(B, N, (- 1)), self.ocr_dict['<END>'], sorted_idx=sorted_idx)
new_kie_logits = self.bilstm_layer(new_kie_logits, new_mask.sum(dim=(- 1)), (None, None))
if self.use_crf:
best_paths = self.crf.decode(new_kie_logits, new_mask)
ins_lvl_tags = self.union_layer.split_out(best_paths, mask, sorted_idx=sorted_idx)
if ('CRF' not in logits_logger):
logits_logger['CRF'] = [ins_lvl_tags]
else:
logits_logger['CRF'].append(ins_lvl_tags)
else:
ins_lvl_logits = self.union_layer.split_logits_out(new_kie_logits, mask, sorted_idx=sorted_idx)
if ('CRF' not in logits_logger):
logits_logger['CRF'] = [ins_lvl_logits]
else:
logits_logger['CRF'].append(ins_lvl_logits) |
class SharedMLP(nn.Sequential):
def __init__(self, args: List[int], *, bn: bool=False, activation=nn.ReLU(inplace=True), preact: bool=False, first: bool=False, name: str=''):
super().__init__()
for i in range((len(args) - 1)):
self.add_module((name + 'layer{}'.format(i)), Conv2d(args[i], args[(i + 1)], bn=(((not first) or (not preact) or (i != 0)) and bn), activation=(activation if ((not first) or (not preact) or (i != 0)) else None), preact=preact)) |
def parse(opt_path, root_path, is_train=True, debug=False):
with open(opt_path, mode='r') as f:
(Loader, _) = ordered_yaml()
opt = yaml.load(f, Loader=Loader)
if (debug and (not opt['name'].startswith('debug'))):
opt['name'] = ('debug_' + opt['name'])
opt['is_train'] = is_train
if (opt['num_gpu'] == 'auto'):
opt['num_gpu'] = torch.cuda.device_count()
for (phase, dataset) in opt['datasets'].items():
phase = phase.split('_')[0]
dataset['phase'] = phase
if ('scale' in opt):
dataset['scale'] = opt['scale']
if (dataset.get('dataroot_gt') is not None):
dataset['dataroot_gt'] = osp.expanduser(dataset['dataroot_gt'])
if (dataset.get('dataroot_lq') is not None):
dataset['dataroot_lq'] = osp.expanduser(dataset['dataroot_lq'])
if (opt.get('folder_suffix') == None):
opt['folder_suffix'] = ''
for (key, val) in opt['path'].items():
if ((val is not None) and (('resume_state' in key) or ('pretrain_network' in key))):
opt['path'][key] = osp.expanduser(val)
if is_train:
experiments_root = osp.join(root_path, 'experiments', opt['folder_suffix'], opt['name'])
opt['path']['experiments_root'] = experiments_root
opt['path']['models'] = osp.join(experiments_root, 'models')
opt['path']['training_states'] = osp.join(experiments_root, 'training_states')
opt['path']['log'] = experiments_root
opt['path']['visualization'] = osp.join(experiments_root, 'visualization')
if ('debug' in opt['name']):
if ('val' in opt):
opt['val']['val_freq'] = 8
opt['logger']['print_freq'] = 1
opt['logger']['save_checkpoint_freq'] = 8
else:
results_root = osp.join(root_path, 'results', opt['folder_suffix'], opt['name'])
opt['path']['results_root'] = results_root
opt['path']['log'] = results_root
opt['path']['visualization'] = osp.join(results_root, 'visualization')
return opt |
class Model(nn.Module):
def __init__(self, nclasses):
super().__init__()
self.features = [6, 100, 100, nclasses]
self.bandwidths = [64, 16, 10]
assert (len(self.bandwidths) == (len(self.features) - 1))
sequence = []
grid = s2_equatorial_grid(max_beta=0, n_alpha=(2 * self.bandwidths[0]), n_beta=1)
sequence.append(S2Convolution(self.features[0], self.features[1], self.bandwidths[0], self.bandwidths[1], grid))
for l in range(1, (len(self.features) - 2)):
nfeature_in = self.features[l]
nfeature_out = self.features[(l + 1)]
b_in = self.bandwidths[l]
b_out = self.bandwidths[(l + 1)]
sequence.append(nn.BatchNorm3d(nfeature_in, affine=True))
sequence.append(nn.ReLU())
grid = so3_equatorial_grid(max_beta=0, max_gamma=0, n_alpha=(2 * b_in), n_beta=1, n_gamma=1)
sequence.append(SO3Convolution(nfeature_in, nfeature_out, b_in, b_out, grid))
sequence.append(nn.BatchNorm3d(self.features[(- 2)], affine=True))
sequence.append(nn.ReLU())
self.sequential = nn.Sequential(*sequence)
output_features = self.features[(- 2)]
self.out_layer = nn.Linear(output_features, self.features[(- 1)])
def forward(self, x):
x = self.sequential(x)
x = so3_integrate(x)
x = self.out_layer(x)
return F.log_softmax(x, dim=1) |
def test_fs_observer_resource_event(dir_obs, sample_run, tmpfile):
(basedir, obs) = dir_obs
_id = obs.started_event(**sample_run)
run_dir = basedir.join(_id)
obs.resource_event(tmpfile.name)
res_dir = basedir.join('_resources')
assert res_dir.exists()
assert (len(res_dir.listdir()) == 1)
assert (res_dir.listdir()[0].read() == tmpfile.content)
run = json.loads(run_dir.join('run.json').read())
assert (len(run['resources']) == 1)
assert (run['resources'][0] == [tmpfile.name, res_dir.listdir()[0].strpath]) |
def train(args, trainer, task, epoch_itr):
update_freq = (args.update_freq[(epoch_itr.epoch - 1)] if (epoch_itr.epoch <= len(args.update_freq)) else args.update_freq[(- 1)])
itr = epoch_itr.next_epoch_itr(fix_batches_to_gpus=args.fix_batches_to_gpus, shuffle=(epoch_itr.epoch >= args.curriculum))
itr = iterators.GroupedIterator(itr, update_freq)
progress = progress_bar.build_progress_bar(args, itr, epoch_itr.epoch, no_progress_bar='simple')
extra_meters = collections.defaultdict((lambda : AverageMeter()))
valid_subsets = args.valid_subset.split(',')
max_update = (args.max_update or math.inf)
for (i, samples) in enumerate(progress, start=epoch_itr.iterations_in_epoch):
log_output = trainer.train_step(samples)
if (log_output is None):
continue
stats = get_training_stats(trainer)
for (k, v) in log_output.items():
if (k in ['loss', 'nll_loss', 'ntokens', 'nsentences', 'sample_size']):
continue
if ('loss' in k):
extra_meters[k].update(v, log_output['sample_size'])
else:
extra_meters[k].update(v)
stats[k] = extra_meters[k].avg
progress.log(stats, tag='train', step=stats['num_updates'])
if (i == 0):
trainer.get_meter('wps').reset()
num_updates = trainer.get_num_updates()
if ((not args.disable_validation) and (args.save_interval_updates > 0) and ((num_updates % args.save_interval_updates) == 0) and (num_updates > 0)):
valid_losses = validate(args, trainer, task, epoch_itr, valid_subsets)
checkpoint_utils.save_checkpoint(args, trainer, epoch_itr, valid_losses[0])
if (num_updates >= max_update):
break
stats = get_training_stats(trainer)
for (k, meter) in extra_meters.items():
stats[k] = meter.avg
progress.print(stats, tag='train', step=stats['num_updates'])
for k in ['train_loss', 'train_nll_loss', 'wps', 'ups', 'wpb', 'bsz', 'gnorm', 'clip']:
meter = trainer.get_meter(k)
if (meter is not None):
meter.reset() |
def create_oracles_with_ilp(dataname, path_read, path_wt_distributed):
process_one_example_with_ilp(path_read, path_wt_distributed, '6b43f5e79b3cdf1c4a6debad958d5d70358f4269', 30, data_name)
process_one_example_with_ilp(path_read, path_wt_distributed, 'e1dc607107cc484c4bc1515cfa414a45088d4048', 30, data_name)
process_one_example_with_ilp(path_read, path_wt_distributed, 'b3f9fa06492bed4394ca521100b10e87431d0ddb', 30, data_name)
process_one_example_with_ilp(path_read, path_wt_distributed, '8e03f1cc2061dbfb947d4e790395bddbf070db53', 30, data_name) |
def data_reader():
data_root = 'data/Casia_maxpy_clean'
images_data = []
images_label = []
batch_size = 128
count_value = 0
class_file_list = os.listdir(data_root)
for i in tqdm.tqdm(range(len(class_file_list)), ncols=80):
images_path = os.listdir(os.path.join(data_root, class_file_list[i]))
for image_path in images_path:
images_data.append(vision.image_load(((((data_root + '/') + class_file_list[i]) + '/') + image_path), backend='cv2'))
images_label.append(i)
count_value += 1
print(count_value)
assert (len(images_data) == len(images_label))
def reader():
for i in range((int((len(images_data) / batch_size)) - 1)):
data = images_data[(i * batch_size):((i + 1) * batch_size)]
data = np.array(data)
data = np.transpose(data, (0, 3, 1, 2))
data = ((data - 127.5) / 127.5)
(yield data)
return reader |
class LoadGinConfigOperator(bpy.types.Operator):
bl_idname = 'scene.zpy_load_gin_config'
bl_label = 'Load gin config from file.'
bl_description = 'Load gin config from file.'
bl_category = 'ZPY'
bl_options = {'REGISTER'}
DEFAULT_TEXT_NAME = 'config'
def execute(self, context):
zpy.blender.load_text_from_file(bpy.path.abspath(context.scene.zpy_gin_config_path), text_name=self.DEFAULT_TEXT_NAME)
return {'FINISHED'} |
def complete_device(device):
if (not torch.cuda.is_available()):
return torch.device('cpu')
if (type(device) == str):
device = torch.device(device)
if ((device.type == 'cuda') and (device.index is None)):
return torch.device(device.type, torch.cuda.current_device())
return device |
class EvolutionStrategyEmitter(EmitterBase):
def __init__(self, archive, *, x0, sigma0, ranker='2imp', es='cma_es', es_kwargs=None, selection_rule='filter', restart_rule='no_improvement', bounds=None, batch_size=None, seed=None):
EmitterBase.__init__(self, archive, solution_dim=archive.solution_dim, bounds=bounds)
seed_sequence = (seed if isinstance(seed, np.random.SeedSequence) else np.random.SeedSequence(seed))
(opt_seed, ranker_seed) = seed_sequence.spawn(2)
self._x0 = np.array(x0, dtype=archive.dtype)
check_shape(self._x0, 'x0', archive.solution_dim, 'archive.solution_dim')
self._sigma0 = sigma0
if (selection_rule not in ['mu', 'filter']):
raise ValueError(f'Invalid selection_rule {selection_rule}')
self._selection_rule = selection_rule
self._restart_rule = restart_rule
self._restarts = 0
self._itrs = 0
_ = self._check_restart(0)
self._opt = _get_es(es, sigma0=sigma0, batch_size=batch_size, solution_dim=self._solution_dim, seed=opt_seed, dtype=self.archive.dtype, lower_bounds=self.lower_bounds, upper_bounds=self.upper_bounds, **(es_kwargs if (es_kwargs is not None) else {}))
self._opt.reset(self._x0)
self._ranker = _get_ranker(ranker, ranker_seed)
self._ranker.reset(self, archive)
self._batch_size = self._opt.batch_size
def x0(self):
return self._x0
def batch_size(self):
return self._batch_size
def restarts(self):
return self._restarts
def itrs(self):
return self._itrs
def ask(self):
return self._opt.ask()
def _check_restart(self, num_parents):
if isinstance(self._restart_rule, numbers.Integral):
return ((self._itrs % self._restart_rule) == 0)
if (self._restart_rule == 'no_improvement'):
return (num_parents == 0)
if (self._restart_rule == 'basic'):
return False
raise ValueError(f'Invalid restart_rule {self._restart_rule}')
def tell(self, solution, objective, measures, add_info, **fields):
(data, add_info) = validate_batch(self.archive, {'solution': solution, 'objective': objective, 'measures': measures, **fields}, add_info)
self._itrs += 1
new_sols = add_info['status'].astype(bool).sum()
(indices, ranking_values) = self._ranker.rank(self, self.archive, data, add_info)
num_parents = (new_sols if (self._selection_rule == 'filter') else (self._batch_size // 2))
self._opt.tell(indices, ranking_values, num_parents)
if (self._opt.check_stop(ranking_values[indices]) or self._check_restart(new_sols)):
new_x0 = self.archive.sample_elites(1)['solution'][0]
self._opt.reset(new_x0)
self._ranker.reset(self, self.archive)
self._restarts += 1 |
def integer_mixed_cell(dim, nbr, idx, verbose=True):
from ast import literal_eval
from phcpy.phcpy2c3 import py2c_intcelcon_get_inner_normal as getnormal
from phcpy.phcpy2c3 import py2c_intcelcon_mixed_volume as mixvol
from phcpy.phcpy2c3 import py2c_intcelcon_number_of_points_in_cell as npts
from phcpy.phcpy2c3 import py2c_intcelcon_get_point_in_cell as getpoint
normal = literal_eval(getnormal(dim, idx))
mv = mixvol(idx)
lenpts = literal_eval(npts(idx, nbr))
if verbose:
print('inner normal :', normal)
print('mixed volume :', mv)
print('number of points :', lenpts)
supp = [[] for _ in range(nbr)]
for i in range(nbr):
if verbose:
print('points in support', (i + 1), ':')
for j in range(lenpts[i]):
point = getpoint(dim, idx, (i + 1), (j + 1))
if verbose:
print(eval(point))
supp[i].append(eval(point))
return (normal, mv, tuple(supp)) |
class MasterServicer(elastic_training_pb2_grpc.MasterServicer):
def __init__(self, task_manager, job_manager, speed_monitor: SpeedMonitor, rdzv_managers: Dict[(str, RendezvousManager)], job_metric_collector=None, elastic_ps_service=None, sync_service=None):
self._task_manager: TaskManager = task_manager
self._job_manager: JobManager = job_manager
self._speed_monitor = speed_monitor
self._rdzv_managers = rdzv_managers
self._kv_store = KVStoreService()
self._job_metric_collector: JobMetricCollector = job_metric_collector
self._elastic_ps_service: ElasticPsService = elastic_ps_service
self._sync_service: SyncService = sync_service
self._lock = threading.Lock()
self._version = 0
self._start_training_time = 0
self._start_autoscale = False
def get(self, request, _):
node_type = request.node_type
node_id = request.node_id
req_message = grpc.deserialize_message(request.data)
response = elastic_training_pb2.Message()
if (not req_message):
return response
message = None
if isinstance(req_message, grpc.TaskRequest):
message = self._get_task(node_type, node_id, req_message)
elif isinstance(req_message, grpc.ShardCheckpointRequest):
message = self._get_shard_checkpoint(req_message)
elif isinstance(req_message, grpc.ClusterVersionRequest):
message = self._get_cluster_version(req_message)
elif isinstance(req_message, grpc.RunningNodesRequest):
message = self._get_running_nodes()
elif isinstance(req_message, grpc.JoinRendezvousRequest):
message = self._join_rendezvous(req_message)
elif isinstance(req_message, grpc.WaitingNodeNumRequest):
message = self._num_nodes_waiting(req_message.rdzv_name)
elif isinstance(req_message, grpc.NetworkReadyRequest):
message = self._check_fault_node()
elif isinstance(req_message, grpc.StragglerExistRequest):
message = self._check_straggler()
elif isinstance(req_message, grpc.JoinRendezvousRequest):
message = self._join_rendezvous(req_message)
elif isinstance(req_message, grpc.CommWorldRequest):
message = self._get_comm_world(req_message)
elif isinstance(req_message, grpc.KeyValuePair):
message = self._kv_store_get(req_message)
elif isinstance(req_message, grpc.PsNodesRequest):
message = self._query_ps_nodes()
elif isinstance(req_message, grpc.TrainingStatusRequest):
message = self._get_training_status()
elif isinstance(req_message, grpc.ParallelConfigRequest):
message = self._get_paral_config()
elif isinstance(req_message, grpc.CheckHardwareResetRequest):
message = self._need_to_restart_training(node_type, node_id)
if message:
response.data = message.serialize()
return response
def _get_task(self, node_type, node_id, request: grpc.TaskRequest):
if (not self._start_training_time):
self._start_training_time = int(time.time())
shard = grpc.Shard()
res = grpc.Task(shard=shard)
ds_name = request.dataset_name
dataset = self._task_manager.get_dataset(ds_name)
if (not dataset):
return res
task = self._task_manager.get_dataset_task(node_type, node_id, ds_name)
if task:
res.task_id = task.task_id
res.type = task.task_type
res.shard.name = task.shard.name
res.shard.start = task.shard.start
res.shard.end = task.shard.end
if task.shard.record_indices:
res.shard.indices = task.shard.record_indices
elif (not dataset.completed()):
res.type = elastic_training_pb2.WAIT
with self._lock:
self._task_manager.reset_worker_start_task_time(node_id)
return res
def _get_shard_checkpoint(self, request: grpc.ShardCheckpointRequest):
response = grpc.ShardCheckpoint()
dataset = self._task_manager.get_dataset(request.dataset_name)
checkpoint = dataset.checkpoint()
if checkpoint:
response.content = checkpoint.to_json()
return response
def _get_cluster_version(self, request: grpc.ClusterVersionRequest):
message = grpc.ClusterVersion()
if (not self._elastic_ps_service):
return message
if (request.task_type == NodeType.WORKER):
message.version = self._elastic_ps_service.get_worker_version(request.version_type, request.task_id)
elif (request.task_type == NodeType.PS):
message.version = self._elastic_ps_service.get_ps_version(request.version_type, request.task_id)
return message
def _query_ps_nodes(self):
res = grpc.PsNodes(nodes=[])
training_ps: List[Node] = self._job_manager.get_next_cluster_ps()
ready = self._job_manager.ready_for_new_ps_cluster()
ps_failure = self._job_manager.has_ps_failure()
for ps in training_ps:
ps_meta = grpc.NodeMeta()
ps_meta.type = NodeType.PS
ps_meta.addr = ps.service_addr
ps_meta.cpu = ps.config_resource.cpu
ps_meta.memory = int(ps.config_resource.memory)
res.nodes.append(ps_meta)
logger.info('PS nodes : %s', res)
res.new_ps_ready = ready
res.ps_failure = ps_failure
return res
def _get_running_nodes(self):
res = grpc.RunningNodes(nodes=[])
nodes: List[Node] = self._job_manager.get_running_nodes()
for node in nodes:
meta = grpc.NodeMeta()
meta.type = node.type
meta.addr = node.service_addr
meta.cpu = node.config_resource.cpu
meta.memory = node.config_resource.memory
if node.config_resource.gpu_type:
meta.gpu_type = node.config_resource.gpu_type
meta.gpu = node.config_resource.gpu_num
res.nodes.append(meta)
return res
def _get_training_status(self):
res = grpc.TrainingStatus()
if self._task_manager.training_started():
res.status = TrainingLoopStatus.START
else:
res.status = TrainingLoopStatus.PENDING
return res
def _check_fault_node(self):
rdzv_manager: NetworkCheckRendezvousManager = self._rdzv_managers[RendezvousName.NETWORK_CHECK]
(nodes, reason) = rdzv_manager.check_fault_node()
res = grpc.NetworkCheckResult(nodes=nodes, reason=reason)
return res
def _check_straggler(self):
rdzv_manager: NetworkCheckRendezvousManager = self._rdzv_managers[RendezvousName.NETWORK_CHECK]
(nodes, reason) = rdzv_manager.get_straggler()
res = grpc.NetworkCheckResult(nodes=nodes, reason=reason)
return res
def _join_rendezvous(self, request: grpc.JoinRendezvousRequest):
rdzv_manager = self._rdzv_managers[request.rdzv_name]
round = rdzv_manager.join_rendezvous(request.node_id, request.local_world_size)
if (request.rdzv_name == RendezvousName.NETWORK_CHECK):
training_manager = self._rdzv_managers[RendezvousName.ELASTIC_TRAINING]
training_manager.clear_waiting_nodes()
res = grpc.RendezvousState(round=round)
return res
def _num_nodes_waiting(self, rdzv_name):
waiting_num = self._rdzv_managers[rdzv_name].num_nodes_waiting()
res = grpc.RendezvousState(waiting_num=waiting_num)
return res
def _get_comm_world(self, request: grpc.CommWorldRequest):
rdzv_manager = self._rdzv_managers[request.rdzv_name]
(rdzv_round, group, nodes) = rdzv_manager.get_comm_world(request.node_id)
res = grpc.RendezvousState(world={})
res.group = group
res.round = rdzv_round
for (rank_id, worker_num) in nodes.items():
res.world[rank_id] = worker_num
return res
def _kv_store_get(self, request: grpc.KeyValuePair):
value = self._kv_store.get(request.key)
res = grpc.KeyValuePair(request.key, value)
return res
def _get_paral_config(self):
res = self._job_manager.get_opt_strategy()
if (not res):
res = grpc.ParallelConfig()
return res
def _need_to_restart_training(self, node_type, node_id):
restart = self._job_manager.verify_restarting_worker_training(node_type, node_id)
res = grpc.ParallelConfig()
res.restart = restart
return res
def report(self, request, _):
node_type = request.node_type
node_id = request.node_id
message = grpc.deserialize_message(request.data)
response = elastic_training_pb2.Response()
if (not message):
return response
success = False
if isinstance(message, grpc.DatasetShardParams):
success = self._collect_dataset_shard_params(message)
elif isinstance(message, grpc.ResourceStats):
success = self._update_node_resource_usage(node_type, node_id, message)
elif isinstance(message, grpc.ModelInfo):
success = self._collect_model_info(message)
elif isinstance(message, grpc.GlobalStep):
success = self._collect_global_step(message)
elif isinstance(message, grpc.ShardCheckpoint):
success = self._restore_shard_checkpoint(message)
elif isinstance(message, grpc.TaskResult):
success = self._report_task_result(message)
elif isinstance(message, grpc.ClusterVersion):
success = self._update_cluster_version(message)
elif isinstance(message, grpc.NodeAddress):
success = self._update_node_address(message)
elif isinstance(message, grpc.NetworkStatus):
success = self._update_node_status(message)
elif isinstance(message, grpc.NodeEvent):
success = self._update_node_event(message)
elif isinstance(message, grpc.SyncJoin):
success = self._join_sync(node_type, node_id, message)
elif isinstance(message, grpc.SyncFinish):
success = self._sync_finished(message)
elif isinstance(message, grpc.SyncBarrier):
success = self._barrier(message)
elif isinstance(message, grpc.NodeFailure):
success = self._report_failure(node_type, node_id, message)
elif isinstance(message, grpc.RendezvousParams):
success = self._report_rdzv_params(message)
elif isinstance(message, grpc.PsReady):
success = self._ready_for_ps_relaunch()
elif isinstance(message, grpc.KeyValuePair):
success = self._kv_store_set(message)
elif isinstance(message, grpc.ParallelConfig):
success = self._report_paral_config(node_type, node_id, message)
response.success = success
return response
def _ready_for_ps_relaunch(self):
self._job_manager.post_ps_ready()
return True
def _collect_dataset_shard_params(self, metrics: grpc.DatasetShardParams):
num_minibatches_per_task = (metrics.num_minibatches_per_shard or _DEFAULT_NUM_MINIBATCHES_PER_SHARD)
shard_size = (metrics.batch_size * num_minibatches_per_task)
splitter = new_dataset_splitter(metrics.shuffle, shard_size, metrics.dataset_size, metrics.num_epochs, metrics.dataset_name, metrics.storage_type)
self._task_manager.new_dataset(metrics.batch_size, metrics.dataset_size, metrics.dataset_name, splitter, metrics.task_type)
if self._job_metric_collector:
self._job_metric_collector.collect_dataset_metric(metrics.dataset_name, metrics.dataset_size, metrics.storage_type)
if (metrics.task_type == elastic_training_pb2.TRAINING):
self._job_metric_collector.collect_training_hyper_params(metrics.num_epochs, metrics.batch_size)
return True
def _update_node_resource_usage(self, node_type, node_id, metrics: grpc.ResourceStats):
logger.debug(f'Update resource usage for {node_type}-{node_id},cpu={metrics.cpu}, memory={metrics.memory},gpu_stats={metrics.gpu_stats}')
if self._job_manager:
self._job_manager.update_node_resource_usage(node_type, node_id, metrics.cpu, metrics.memory, metrics.gpu_stats)
return True
def _collect_model_info(self, metrics: grpc.ModelInfo):
if self._job_metric_collector:
self._job_metric_collector.collect_model_metric(metrics)
return True
def _collect_global_step(self, metrics: grpc.GlobalStep):
self._speed_monitor.collect_global_step(metrics.step, metrics.timestamp)
self._collect_runtime_stats()
self._check_start_auto_scale_worker()
return True
def _restore_shard_checkpoint(self, message: grpc.ShardCheckpoint):
success = self._task_manager.restore_dataset_from_checkpoint(message.content)
return success
def _collect_runtime_stats(self):
if (self._job_metric_collector and self._job_manager):
nodes = self._job_manager.get_running_nodes()
self._job_metric_collector.collect_runtime_stats(self._speed_monitor, nodes)
def _report_task_result(self, request: grpc.TaskResult):
success = True
if request.err_message:
logger.warning(('Worker reported error: ' + request.err_message))
success = False
(task, _) = self._task_manager.report_dataset_task(request, success)
if ((not self._start_autoscale) and self._job_manager and (self._speed_monitor.completed_global_step == 0) and ((int(time.time()) - self._start_training_time) > _dlrover_context.seconds_to_autoscale_worker)):
logger.info('Start autoscale for non-training jobs')
self._job_manager.start_auto_scaling()
self._start_autoscale = True
if (self._job_metric_collector and task and (task.task_type == elastic_training_pb2.PREDICTION)):
self._collect_runtime_stats()
self._check_start_auto_scale_worker()
return success
def _check_start_auto_scale_worker(self):
sample_count = self._speed_monitor.get_sample_count()
if ((not self._start_autoscale) and (sample_count >= _dlrover_context.sample_count_to_adjust_worker)):
logger.info('Start autoscale with %s stats samples', sample_count)
self._job_manager.start_auto_scaling()
self._start_autoscale = True
def _update_cluster_version(self, message: grpc.ClusterVersion):
if (not self._elastic_ps_service):
return False
if (message.task_type == NodeType.WORKER):
self._elastic_ps_service.update_worker_version(message.task_id, message.version_type, message.version)
elif (message.task_type == NodeType.PS):
self._elastic_ps_service.update_ps_version(message.task_id, message.version_type, message.version)
return True
def _update_node_address(self, message: grpc.NodeAddress):
self._job_manager.update_node_service_addr(node_type=message.type, node_id=message.id, service_addr=message.addr)
return True
def _update_node_status(self, message: grpc.NetworkStatus):
net_rdzv_manager = self._rdzv_managers.get(RendezvousName.NETWORK_CHECK, None)
if net_rdzv_manager:
succeed = (message.status == NodeStatus.SUCCEEDED)
net_rdzv_manager.report_network_check_result(message.rank, succeed, message.elasped_time)
return True
def _update_node_event(self, message: grpc.NodeEvent):
node = Node(message.event_type, message.node.id)
event = NodeEvent('exit', node)
ray_event_queue.put(event)
return True
def _join_sync(self, node_type, node_id, message: grpc.SyncJoin):
success = False
if self._sync_service:
success = self._sync_service.join_sync(message.sync_name, node_type, node_id)
return success
def _sync_finished(self, message: grpc.SyncFinish):
success = False
if self._sync_service:
success = self._sync_service.sync_finished(message.sync_name)
return success
def _barrier(self, message: grpc.SyncBarrier):
if (not self._sync_service):
return False
if message.notify:
success = self._sync_service.notify_barrier(message.barrier_name)
else:
success = self._sync_service.barrier(message.barrier_name)
return success
def _report_rdzv_params(self, message: grpc.RendezvousParams):
for manager in self._rdzv_managers.values():
manager.update_rdzv_params(min_nodes=message.min_nodes, max_ndoes=message.max_nodes, waiting_timeout=message.waiting_timeout, node_unit=message.node_unit)
return True
def _report_failure(self, node_type, node_id, message: grpc.NodeFailure):
self._job_manager.handle_training_failure(node_type, node_id, message.restart_count, message.error_data, message.level)
return True
def _kv_store_set(self, message: grpc.KeyValuePair):
self._kv_store.set(message.key, message.value)
return True
def _report_paral_config(self, node_type, node_id, message: grpc.ParallelConfig):
if self._job_manager:
logger.debug('Update parallel config for %s-%s: %s', node_type, node_id, message)
self._job_manager.update_node_paral_config(node_type, node_id, message)
return True |
class _UpConv(nn.Module):
def __init__(self, in_ch, out_ch, momentum=0.1):
super(_UpConv, self).__init__()
self.up = nn.Sequential(nn.Upsample(scale_factor=2), nn.Conv2d(in_ch, out_ch, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False), nn.BatchNorm2d(out_ch, momentum=momentum), nn.ReLU(inplace=True))
def forward(self, x):
x = self.up(x)
return x |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.