code stringlengths 101 5.91M |
|---|
('download_file', 'Download File', '"url": "<url>", "filename": "<filename>"', (lambda config: config.allow_downloads), 'Error: You do not have user authorization to download files locally.')
def download_file(url, filename, agent: Agent):
try:
directory = os.path.dirname(filename)
os.makedirs(directory, exist_ok=True)
message = f'{Fore.YELLOW}Downloading file from {Back.LIGHTBLUE_EX}{url}{Back.RESET}{Fore.RESET}'
with Spinner(message, plain_output=agent.config.plain_output) as spinner:
session = requests.Session()
retry = Retry(total=3, backoff_factor=1, status_forcelist=[502, 503, 504])
adapter = HTTPAdapter(max_retries=retry)
session.mount(' adapter)
session.mount(' adapter)
total_size = 0
downloaded_size = 0
with session.get(url, allow_redirects=True, stream=True) as r:
r.raise_for_status()
total_size = int(r.headers.get('Content-Length', 0))
downloaded_size = 0
with open(filename, 'wb') as f:
for chunk in r.iter_content(chunk_size=8192):
f.write(chunk)
downloaded_size += len(chunk)
progress = f'{readable_file_size(downloaded_size)} / {readable_file_size(total_size)}'
spinner.update_message(f'{message} {progress}')
return f'Successfully downloaded and locally stored file: "{filename}"! (Size: {readable_file_size(downloaded_size)})'
except requests.HTTPError as err:
return f'Got an HTTP Error whilst trying to download file: {err}'
except Exception as err:
return f'Error: {err}' |
class Square(BaseFunction):
def tf(self, x):
return (tf.square(x) / self.norm)
def sp(self, x):
return ((x ** 2) / self.norm)
def np(self, x):
return (np.square(x) / self.norm) |
class SpanBox(TextBlock):
def __init__(self, text: str, bbox: Tuple[(float, float, float, float)], block_ids: Set[str], cell_size: Tuple[(float, float)]):
super(SpanBox, self).__init__(text)
self.bbox: Tuple[(float, float, float, float)] = bbox
self.blocks: Set[str] = block_ids
assert ((cell_size[0] >= 0) and (cell_size[1] >= 0))
self.cell_size: Tuple[(float, float)] = cell_size
def merge_continuous_lines(cls, text_blocks: List['SpanBox'], threshold=0.5, space_size=4):
assert (len(set(map(type, text_blocks))) == 1)
if (len(text_blocks) <= 1):
return text_blocks
text_blocks = sorted(text_blocks, key=(lambda b: ((- b.bbox[1]), b.bbox[0])))
merged_text_blocks = []
i = 0
while (i < (len(text_blocks) - 1)):
tbi = text_blocks[i]
same_line_boxes = [tbi]
for j in range((i + 1), len(text_blocks)):
tbj = text_blocks[j]
span = (max(tbi.bbox[3], tbj.bbox[3]) - tbj.bbox[1])
overlap = (min(tbi.bbox[3], tbj.bbox[3]) - tbi.bbox[1])
if ((overlap / span) > threshold):
same_line_boxes.append(tbj)
continue
else:
break
if (len(same_line_boxes) > 1):
same_line_boxes = sorted(same_line_boxes, key=(lambda b: b.bbox[0]))
text = same_line_boxes[0].text.strip('\n')
bbox = same_line_boxes[0].bbox
for tbk in same_line_boxes[1:]:
spaces = max((tbk.bbox[0] - bbox[2]), 0)
text += (int((spaces // space_size)) * ' ')
text += tbk.text.strip('\n')
bbox = [bbox[0], min(bbox[1], tbk.bbox[1]), max(bbox[2], tbk.bbox[2]), max(bbox[3], tbk.bbox[3])]
blocks = reduce(operator.or_, (b.blocks for b in same_line_boxes))
merged_text_blocks.append(SpanBox(text, bbox, blocks, tbi.cell_size))
else:
merged_text_blocks.append(tbi)
i = j
if (len(same_line_boxes) == 1):
merged_text_blocks.append(text_blocks[(- 1)])
return merged_text_blocks |
def write_tokenizer(tokenizer_path, input_tokenizer_path):
os.makedirs(tokenizer_path, exist_ok=True)
write_json({}, os.path.join(tokenizer_path, 'special_tokens_map.json'))
write_json({'bos_token': '', 'eos_token': '', 'model_max_length': int(1e+30), 'tokenizer_class': 'LlamaTokenizer', 'unk_token': ''}, os.path.join(tokenizer_path, 'tokenizer_config.json'))
shutil.copyfile(input_tokenizer_path, os.path.join(tokenizer_path, 'tokenizer.model')) |
class TSBase(object):
thisown = _swig_property((lambda x: x.this.own()), (lambda x, v: x.this.own(v)), doc='The membership flag')
__repr__ = _swig_repr
def __init__(self, Nm):
_snap.TSBase_swiginit(self, _snap.new_TSBase(Nm))
__swig_destroy__ = _snap.delete_TSBase
def GetSNm(self):
return _snap.TSBase_GetSNm(self) |
def local_prediction_rigidity(X_train, X_test, alpha):
X_atom = np.vstack(X_train)
sfactor = np.sqrt(np.mean((X_atom ** 2), axis=0).sum())
X_struc = []
for X_i in X_train:
X_struc.append(np.mean((X_i / sfactor), axis=0))
X_struc = np.vstack(X_struc)
XX = (X_struc.T X_struc)
Xprime = (XX + (alpha * np.eye(XX.shape[0])))
rank_diff = (X_struc.shape[1] - np.linalg.matrix_rank(Xprime))
Xinv = np.linalg.pinv(Xprime)
lens = []
for X in X_test:
lens.append(len(X))
test_idxs = np.cumsum(([0] + lens))
num_test = len(X_test)
X_test = np.vstack(X_test)
atom_count = X_test.shape[0]
LPR_np = np.zeros(X_test.shape[0])
for ai in range(atom_count):
Xi = (X_test[ai].reshape(1, (- 1)) / sfactor)
LPR_np[ai] = (1 / ((Xi Xinv) Xi.T))
LPR = []
for i in range(num_test):
LPR.append(LPR_np[test_idxs[i]:test_idxs[(i + 1)]])
return (LPR, rank_diff) |
def _impl(array):
layout = ak.operations.ak_to_layout._impl(array, allow_record=True, allow_unknown=False, none_policy='error', regulararray=True, use_from_iter=True, primitive_policy='error', string_policy='as-characters')
return layout.is_tuple |
def test_option_numpy_1():
text = '?int64'
parsedtype = deduce_type(text)
assert isinstance(parsedtype, ak.types.OptionType)
assert (str(parsedtype) == text) |
_module()
class LinearLrUpdaterHook(LrUpdaterHook):
def __init__(self, target_lr=0, start=0, interval=1, **kwargs):
super().__init__(**kwargs)
self.target_lr = target_lr
self.start = start
self.interval = interval
def get_lr(self, runner, base_lr):
if self.by_epoch:
progress = runner.epoch
max_progress = runner.max_epochs
else:
progress = runner.iter
max_progress = runner.max_iters
assert (max_progress >= self.start)
if (max_progress == self.start):
return base_lr
factor = ((max(0, (progress - self.start)) // self.interval) / ((max_progress - self.start) // self.interval))
return (base_lr + ((self.target_lr - base_lr) * factor)) |
class SentenceLevelScorer(scorer.Scorer):
__metaclass__ = abc.ABCMeta
def __init__(self):
super(SentenceLevelScorer, self).__init__()
self._total_loss = 0
self._true_labels = []
self._preds = []
def update(self, results):
super(SentenceLevelScorer, self).update(results)
self._total_loss += results['loss']
self._true_labels.append((results['label_ids'] if ('label_ids' in results) else results['targets']))
self._preds.append(results['predictions'])
def get_loss(self):
return (self._total_loss / len(self._true_labels)) |
def _seg_51():
return [(65317, 'M', u'e'), (65318, 'M', u'f'), (65319, 'M', u'g'), (65320, 'M', u'h'), (65321, 'M', u'i'), (65322, 'M', u'j'), (65323, 'M', u'k'), (65324, 'M', u'l'), (65325, 'M', u'm'), (65326, 'M', u'n'), (65327, 'M', u'o'), (65328, 'M', u'p'), (65329, 'M', u'q'), (65330, 'M', u'r'), (65331, 'M', u's'), (65332, 'M', u't'), (65333, 'M', u'u'), (65334, 'M', u'v'), (65335, 'M', u'w'), (65336, 'M', u'x'), (65337, 'M', u'y'), (65338, 'M', u'z'), (65339, '3', u'['), (65340, '3', u'\\'), (65341, '3', u']'), (65342, '3', u'^'), (65343, '3', u'_'), (65344, '3', u'`'), (65345, 'M', u'a'), (65346, 'M', u'b'), (65347, 'M', u'c'), (65348, 'M', u'd'), (65349, 'M', u'e'), (65350, 'M', u'f'), (65351, 'M', u'g'), (65352, 'M', u'h'), (65353, 'M', u'i'), (65354, 'M', u'j'), (65355, 'M', u'k'), (65356, 'M', u'l'), (65357, 'M', u'm'), (65358, 'M', u'n'), (65359, 'M', u'o'), (65360, 'M', u'p'), (65361, 'M', u'q'), (65362, 'M', u'r'), (65363, 'M', u's'), (65364, 'M', u't'), (65365, 'M', u'u'), (65366, 'M', u'v'), (65367, 'M', u'w'), (65368, 'M', u'x'), (65369, 'M', u'y'), (65370, 'M', u'z'), (65371, '3', u'{'), (65372, '3', u'|'), (65373, '3', u'}'), (65374, '3', u'~'), (65375, 'M', u''), (65376, 'M', u''), (65377, 'M', u'.'), (65378, 'M', u''), (65379, 'M', u''), (65380, 'M', u''), (65381, 'M', u''), (65382, 'M', u''), (65383, 'M', u''), (65384, 'M', u''), (65385, 'M', u''), (65386, 'M', u''), (65387, 'M', u''), (65388, 'M', u''), (65389, 'M', u''), (65390, 'M', u''), (65391, 'M', u''), (65392, 'M', u''), (65393, 'M', u''), (65394, 'M', u''), (65395, 'M', u''), (65396, 'M', u''), (65397, 'M', u''), (65398, 'M', u''), (65399, 'M', u''), (65400, 'M', u''), (65401, 'M', u''), (65402, 'M', u''), (65403, 'M', u''), (65404, 'M', u''), (65405, 'M', u''), (65406, 'M', u''), (65407, 'M', u''), (65408, 'M', u''), (65409, 'M', u''), (65410, 'M', u''), (65411, 'M', u''), (65412, 'M', u''), (65413, 'M', u''), (65414, 'M', u''), (65415, 'M', u''), (65416, 'M', u'')] |
def register_Ns3ApplicationContainer_methods(root_module, cls):
cls.add_constructor([param('ns3::ApplicationContainer const &', 'arg0')])
cls.add_constructor([])
cls.add_constructor([param('ns3::Ptr< ns3::Application >', 'application')])
cls.add_constructor([param('std::string', 'name')])
cls.add_method('Add', 'void', [param('ns3::ApplicationContainer', 'other')])
cls.add_method('Add', 'void', [param('ns3::Ptr< ns3::Application >', 'application')])
cls.add_method('Add', 'void', [param('std::string', 'name')])
cls.add_method('Begin', 'ns3::ApplicationContainer::Iterator', [], is_const=True)
cls.add_method('End', 'ns3::ApplicationContainer::Iterator', [], is_const=True)
cls.add_method('Get', 'ns3::Ptr< ns3::Application >', [param('uint32_t', 'i')], is_const=True)
cls.add_method('GetN', 'uint32_t', [], is_const=True)
cls.add_method('Start', 'void', [param('ns3::Time', 'start')])
cls.add_method('StartWithJitter', 'void', [param('ns3::Time', 'start'), param('ns3::Ptr< ns3::RandomVariableStream >', 'rv')])
cls.add_method('Stop', 'void', [param('ns3::Time', 'stop')])
return |
class RunDirectiveNotAllowedInUserRules(ShowyourworkException):
def __init__(self, name):
super().__init__(f'The `run` directive is not allowed in user-defined rules. Please use `script` or `shell` instead in rule {name}.') |
def env_0():
env = Warehouse(3, 8, 3, 1, 0, 1, 5, 10, None, RewardType.GLOBAL)
env.reset()
env.agents[0].x = 4
env.agents[0].y = 27
env.agents[0].dir = Direction.DOWN
env.shelfs[0].x = 4
env.shelfs[0].y = 27
env.agents[0].carrying_shelf = env.shelfs[0]
env.request_queue[0] = env.shelfs[0]
env._recalc_grid()
return env |
def test_psf_estimation(psf_data, true_psf_file, kernel=None, metric='mean'):
true_psf = read_file(true_psf_file)
if (true_psf.shape != psf_data.shape):
raise ValueError('The number of true PSF images must match the number estimated PSF images.')
return test_images(psf_data, true_psf, kernel, metric) |
def average_checkpoints(inputs):
params_dict = collections.OrderedDict()
params_keys = None
new_state = None
num_models = len(inputs)
for fpath in inputs:
with PathManager.open(fpath, 'rb') as f:
state = torch.load(f, map_location=(lambda s, _: torch.serialization.default_restore_location(s, 'cpu')))
if (new_state is None):
new_state = state
model_params = state['model']
model_params_keys = list(model_params.keys())
if (params_keys is None):
params_keys = model_params_keys
elif (params_keys != model_params_keys):
raise KeyError('For checkpoint {}, expected list of params: {}, but found: {}'.format(f, params_keys, model_params_keys))
for k in params_keys:
p = model_params[k]
if isinstance(p, torch.HalfTensor):
p = p.float()
if (k not in params_dict):
params_dict[k] = p.clone()
else:
params_dict[k] += p
averaged_params = collections.OrderedDict()
for (k, v) in params_dict.items():
averaged_params[k] = v
if averaged_params[k].is_floating_point():
averaged_params[k].div_(num_models)
else:
averaged_params[k] //= num_models
new_state['model'] = averaged_params
return new_state |
class ImageTextPairInstructDataset(ImageTextPairDataset):
def __getitem__(self, index):
data = super().__getitem__(index)
if (data != None):
data['text_output'] = data['text_input']
data['text_input'] = self.text_processor('')
return data |
def skip_if_no_gpu(func):
(func)
def wrapper(*args, **kwargs):
if (not torch.cuda.is_available()):
sys.exit(TEST_SKIPS['no_cuda'].exit_code)
if (torch.cuda.device_count() < int(os.environ['WORLD_SIZE'])):
message = 'Need at least {} CUDA devices'.format(os.environ['WORLD_SIZE'])
TEST_SKIPS['multi-gpu'] = TestSkip(75, message)
sys.exit(TEST_SKIPS['multi-gpu'].exit_code)
return func(*args, **kwargs)
return wrapper |
def eval_loop(preprocess_fn, network_factory, data_x, data_y, camera_indices, log_dir, eval_log_dir, image_shape=None, run_id=None, loss_mode='cosine-softmax', num_galleries=10, random_seed=4321):
if (image_shape is None):
assert (type(data_x) == np.ndarray)
image_shape = data_x.shape[1:]
elif (type(data_x) == np.ndarray):
assert (data_x.shape[1:] == image_shape)
read_from_file = (type(data_x) != np.ndarray)
(probes, galleries) = ([], [])
for i in range(num_galleries):
(probe_indices, gallery_indices) = util.create_cmc_probe_and_gallery(data_y, camera_indices, seed=(random_seed + i))
probes.append(probe_indices)
galleries.append(gallery_indices)
(probes, galleries) = (np.asarray(probes), np.asarray(galleries))
with tf.device('/cpu:0'):
(num_probes, num_gallery_images) = (probes.shape[1], galleries.shape[1])
probe_idx_var = tf.placeholder(tf.int64, (None, num_probes))
gallery_idx_var = tf.placeholder(tf.int64, (None, num_gallery_images))
trainer = queued_trainer.QueuedTrainer([probe_idx_var, gallery_idx_var])
data_x_var = tf.constant(data_x)
data_y_var = tf.constant(data_y)
(probe_idx_var, gallery_idx_var) = trainer.get_input_vars(batch_size=1)
probe_idx_var = tf.squeeze(probe_idx_var)
gallery_idx_var = tf.squeeze(gallery_idx_var)
probe_x_var = tf.gather(data_x_var, probe_idx_var)
if read_from_file:
num_channels = (image_shape[(- 1)] if (len(image_shape) == 3) else 1)
probe_x_var = tf.map_fn((lambda x: tf.image.decode_jpeg(tf.read_file(x), channels=num_channels)), probe_x_var, dtype=tf.uint8)
probe_x_var = tf.image.resize_images(probe_x_var, image_shape[:2])
probe_x_var = tf.map_fn((lambda x: preprocess_fn(x, is_training=False)), probe_x_var, back_prop=False, dtype=tf.float32)
probe_y_var = tf.gather(data_y_var, probe_idx_var)
gallery_x_var = tf.gather(data_x_var, gallery_idx_var)
if read_from_file:
num_channels = (image_shape[(- 1)] if (len(image_shape) == 3) else 1)
gallery_x_var = tf.map_fn((lambda x: tf.image.decode_jpeg(tf.read_file(x), channels=num_channels)), gallery_x_var, dtype=tf.uint8)
gallery_x_var = tf.image.resize_images(gallery_x_var, image_shape[:2])
gallery_x_var = tf.map_fn((lambda x: preprocess_fn(x, is_training=False)), gallery_x_var, back_prop=False, dtype=tf.float32)
gallery_y_var = tf.gather(data_y_var, gallery_idx_var)
probe_and_gallery_x_var = tf.concat(axis=0, values=[probe_x_var, gallery_x_var])
(probe_and_gallery_x_var, _) = network_factory(probe_and_gallery_x_var)
num_probe = tf.shape(probe_x_var)[0]
probe_x_var = tf.slice(probe_and_gallery_x_var, [0, 0], [num_probe, (- 1)])
gallery_x_var = tf.slice(probe_and_gallery_x_var, [num_probe, 0], [(- 1), (- 1)])
distance_measure = (metrics.cosine_distance if (loss_mode == 'cosine-softmax') else metrics.pdist)
def cmc_metric_at_k(k):
return metrics.streaming_mean_cmc_at_k(probe_x_var, probe_y_var, gallery_x_var, gallery_y_var, k=k, measure=distance_measure)
(names_to_values, names_to_updates) = slim.metrics.aggregate_metric_map({('%d' % k): cmc_metric_at_k(k) for k in [1, 5, 10, 20]})
for (metric_name, metric_value) in names_to_values.items():
tf.summary.scalar(metric_name, metric_value)
trainer.evaluate((probes, galleries), log_dir, eval_log_dir, run_id=run_id, eval_op=list(names_to_updates.values()), eval_interval_secs=60) |
def compile_time_env_variables():
return dict(PY_PLATFORM=sys.platform, PY_VERSION_HEX=sys.hexversion, PY_MAJOR_VERSION=sys.version_info[0]) |
def test_record_to_arrow():
x_content = ak.highlevel.Array([1.1, 2.2, 3.3, 4.4, 5.5]).layout
z_content = ak.highlevel.Array([1, 2, 3, None, 5]).layout
ak_array = ak.contents.RecordArray([x_content, ak.contents.UnmaskedArray(x_content), z_content], ['x', 'y', 'z'])
pa_array = ak_array.to_arrow()
assert isinstance(pa_array.type.storage_type, pyarrow.StructType)
assert (pa_array.to_pylist() == [{'x': 1.1, 'y': 1.1, 'z': 1}, {'x': 2.2, 'y': 2.2, 'z': 2}, {'x': 3.3, 'y': 3.3, 'z': 3}, {'x': 4.4, 'y': 4.4, 'z': None}, {'x': 5.5, 'y': 5.5, 'z': 5}]) |
class MultiPassSieveModel():
def __init__(self, *models):
self.models = models
def predict(self, df):
preds = pd.DataFrame(([([False] * 2)] * len(df)), columns=['a_coref', 'b_coref'])
for model in self.models:
preds_ = model.predict(df)
preds_ = pd.DataFrame(preds_, columns=['a_coref', 'b_coref'])
mask = ((~ preds['a_coref']) & (~ preds['b_coref']))
preds[mask] = preds_[mask]
return preds.values.tolist() |
def make_matrix(arr, dt=None):
if (len(arr) == 0):
shape = [0]
dt = primitive_types.i32
else:
if isinstance(arr[0], Iterable):
shape = [len(arr), len(arr[0])]
arr = [elt for row in arr for elt in row]
else:
shape = [len(arr)]
if (dt is None):
dt = _infer_array_dt(arr)
else:
dt = cook_dtype(dt)
return expr.Expr(impl.get_runtime().compiling_callable.ast_builder().make_matrix_expr(shape, dt, [expr.Expr(elt).ptr for elt in arr], ti_python_core.DebugInfo(impl.get_runtime().get_current_src_info()))) |
def ManualLLVM(inputs, *outputs):
outputs_ravel = list(itertools.chain(*outputs))
cb = se.Lambdify(inputs, outputs_ravel, backend='llvm')
def func(*args):
result = []
n = np.empty(len(outputs_ravel))
t = cb.unsafe_real(np.concatenate([arg.ravel() for arg in args]), n)
start = 0
for output in outputs:
elems = reduce(mul, output.shape)
result.append(n[start:(start + elems)].reshape(output.shape))
start += elems
return result
return func |
class Ego4DDataset(BaseDataset):
def __init__(self, *args, split='', **kwargs):
assert (split in ['train', 'val', 'test'])
self.split = split
if (split == 'train'):
names = ['ego4d_train']
elif (split == 'val'):
names = ['ego4d_val']
elif (split == 'test'):
names = ['ego4d_test']
super().__init__(*args, **kwargs, names=names, text_column_name='caption')
self._load_metadata()
def _load_metadata(self):
metadata_dir = './meta_data/ego4d'
split_files = {'train': 'ego4d_train_subset.csv', 'val': 'ego4d_val_ts_clean.csv', 'test': 'ego4d_val_ts_clean.csv'}
target_split_fp = split_files[self.split]
self.metadata = pd.read_csv(os.path.join(metadata_dir, target_split_fp), sep='\t', header=None, error_bad_lines=False)
def _get_video_path(self, sample):
rel_video_fp = (sample[0] + '.mp4')
full_video_fp = os.path.join(self.data_dir, 'videos', rel_video_fp)
if (not os.path.exists(full_video_fp)):
Exception(IOError)
return (full_video_fp, rel_video_fp)
def _get_caption(self, sample):
return sample[6]
def get_raw_video(self, sample):
(abs_fp, rel_fp) = self._get_video_path(sample)
(frame_end, frame_loc) = (int(sample[3]), int(sample[5]))
imgs = read_large_frames_decord(abs_fp, frame_loc, frame_end, self.num_frames)
if (imgs is None):
raise Exception('Invalid video!', rel_fp)
else:
return imgs |
def prepare_dataset(path, built_vocab=None, user_only=False):
data = open(path, 'r', encoding='utf-8').readlines()
p_data = []
history = [['<null>']]
for d in data:
if (d == '\n'):
history = [['<null>']]
continue
dd = d.replace('\n', '').split('|||')
if (len(dd) == 1):
if user_only:
pass
else:
bot = dd[0].split()
history.append(bot)
else:
user = dd[0].split()
tag = dd[1].split()
intent = dd[2]
temp = deepcopy(history)
p_data.append([temp, user, tag, intent])
history.append(user)
if (built_vocab is None):
(historys, currents, slots, intents) = list(zip(*p_data))
vocab = list(set(flatten(currents)))
slot_vocab = list(set(flatten(slots)))
intent_vocab = list(set(intents))
word2index = {'<pad>': 0, '<unk>': 1, '<null>': 2, '<s>': 3, '</s>': 4}
for vo in vocab:
if (word2index.get(vo) == None):
word2index[vo] = len(word2index)
slot2index = {'<pad>': 0}
for vo in slot_vocab:
if (slot2index.get(vo) == None):
slot2index[vo] = len(slot2index)
intent2index = {}
for vo in intent_vocab:
if (intent2index.get(vo) == None):
intent2index[vo] = len(intent2index)
else:
(word2index, slot2index, intent2index) = built_vocab
for t in tqdm(p_data):
for (i, history) in enumerate(t[0]):
t[0][i] = prepare_sequence(history, word2index).view(1, (- 1))
t[1] = prepare_sequence(t[1], word2index).view(1, (- 1))
t[2] = prepare_sequence(t[2], slot2index).view(1, (- 1))
t[3] = torch.LongTensor([intent2index[t[3]]]).view(1, (- 1))
if (built_vocab is None):
return (p_data, word2index, slot2index, intent2index)
else:
return p_data |
def extract_convsdae_yale(slope=0.0):
return extractconvSDAE(dim=[1, 50, 50, 50, 50, 50, 10], output_padding=[(0, 0), (1, 1), (1, 1), (0, 1), (0, 1)], numpen=6, slope=slope) |
def check_labels(labels, dataset):
new_labels = dataset_labels(dataset)
not_found = [i for i in new_labels if (i not in labels)]
if not_found:
raise RuntimeError(('Dataset contains labels which the model does not know about:' + str(not_found))) |
def test_load_SpatialEvents():
dataset = tau2020sse_nigens.Dataset(TEST_DATA_HOME)
clip = dataset.clip('foa_dev/fold1_room1_mix001_ov1')
annotations_path = clip.csv_path
tau2020_annotations = tau2020sse_nigens.load_spatialevents(annotations_path)
confidence = ([1.0] * 6)
intervals = [[[0, 0], [0.8, 1.0]], [[0.3, 0.5]], [[0.8, 0.9]], [[0.9, 0.9]], [[0.9, 2.8], [3.1, 4.8], [6.5, 7.6]], [[11.0, 11.1]]]
azimuths = [[[10], [10, 11, 12]], [[5]], [[6, 5]], [[(- 5)]], [[25, 26, 24, 22, 20, 18, 16, 14, 12, 10, 8, 6, 4, 2, 0, 1, 3, 5, 7, 9], [15, 17, 19, 21, 23, 25, 27, 29, 31, 33, 35, 37, 39, 41, 43, 45, 47, 49], [83, 85, 87, 89, 91, 93, 95, 97, 99, 101, 103, 105]], [[(- 47)]]]
elevations = [[[2], [2, 2, 2]], [[4]], [[7, 6]], [[6]], [[1, 2, 2, 2, 2, 2, 2, 2, 3, 3, 2, 2, 2, 2, 2, 4, 4, 5, 5, 5], [5, 4, 4, 4, 4, 4, 4, 4, 4, 4, 5, 5, 5, 4, 4, 4, 5, 5], [6, 5, 5, 5, 4, 5, 5, 5, 4, 4, 4, 4]], [[9]]]
distances = [[np.array(([None] * len(azimuth))) for azimuth in event_azimuths] for event_azimuths in azimuths]
labels = ['1', '2', '4', '4', '5', '6']
clip_number_indices = ['0', '0', '0', '1', '0', '0']
assert np.allclose(tau2020_annotations.time_step, 0.1)
assert np.allclose(confidence, tau2020_annotations.confidence)
for pair in [zip(elevations, tau2020_annotations.elevations), zip(azimuths, tau2020_annotations.azimuths)]:
for (event_test_data, event_data) in pair:
for (test_data, data) in zip(event_test_data, event_data):
assert np.allclose(test_data, data)
for pair in [zip(distances, tau2020_annotations.distances)]:
for (event_test_data, event_data) in pair:
for (test_data, data) in zip(event_test_data, event_data):
(test_data == data)
for (test_label, label) in zip(labels, tau2020_annotations.labels):
assert (test_label == label)
for (test_clip_index, clip_index) in zip(clip_number_indices, tau2020_annotations.clip_number_index):
assert (test_clip_index == clip_index)
with pytest.raises(ValueError):
annotations.validate_time_steps(0.1, [4, 5, 7], [2, 1])
with pytest.raises(ValueError):
annotations.validate_time_steps(0.1, np.array([[4, 5, 7], [1, 2, 3]]), [0.0, 0.2])
with pytest.raises(ValueError):
annotations.validate_locations(np.array([[4, 5], [2, 3]]))
with pytest.raises(ValueError):
annotations.validate_locations(np.array([[90, 5, None], [2, 3, 4]]))
with pytest.raises(ValueError):
annotations.validate_locations(np.array([[91, 5, None], [2, 3, None]]))
with pytest.raises(ValueError):
annotations.validate_locations(np.array([[90, 181, None], [2, 3, None]])) |
.torch
def test_prediction_sasrec(item_user_sequential_dataset, train_sasrec_loader):
pred = SasRecPredictionDataset(item_user_sequential_dataset, max_sequence_length=5)
pred_sasrec_loader = torch.utils.data.DataLoader(pred)
trainer = L.Trainer(max_epochs=1)
model = SasRec(tensor_schema=item_user_sequential_dataset._tensor_schema, max_seq_len=5, hidden_size=64)
trainer.fit(model, train_sasrec_loader)
predicted = trainer.predict(model, pred_sasrec_loader)
assert (len(predicted) == len(pred))
assert (predicted[0].size() == (1, 6)) |
def pure_graph(dtype, transposed, expansion, veclen, alpha, beta, expansion_args=None):
sdfg = dace.SDFG(f'gemv_{expansion}_{dtype}_{transposed}_w{veclen}')
m = dace.symbol('m')
n = dace.symbol('n')
n /= veclen
vtype = dace.vector(dtype, veclen)
state = sdfg.add_state('gemv_compute')
A_rows = m
A_cols = n
x_size = (n if (not transposed) else m)
y_size = (m if (not transposed) else n)
sdfg.add_array('A', shape=[A_rows, A_cols], dtype=vtype)
sdfg.add_array('x', shape=[x_size], dtype=(dtype if transposed else vtype))
sdfg.add_array('y', shape=[y_size], dtype=(vtype if transposed else dtype))
A = state.add_read('A')
x = state.add_read('x')
result = state.add_write('y')
gemv_node = blas.Gemv('gemv', transA=transposed, alpha=alpha, beta=beta)
gemv_node.implementation = expansion
state.add_memlet_path(A, gemv_node, dst_conn='_A', memlet=Memlet(f'A[0:{A_rows}, 0:{A_cols}]'))
state.add_memlet_path(x, gemv_node, dst_conn='_x', memlet=Memlet(f'x[0:{x_size}]'))
state.add_memlet_path(gemv_node, result, src_conn='_y', memlet=Memlet(f'y[0:{y_size}]'))
if (expansion_args is not None):
gemv_node.expand(sdfg, state, **expansion_args)
return sdfg |
def before_record_request(request: Any) -> Any:
request = replace_request_hostname(request, ORIGINAL_URL, NEW_URL)
filtered_request = filter_hostnames(request)
filtered_request_without_dynamic_data = replace_timestamp_in_request(filtered_request)
return filtered_request_without_dynamic_data |
def GetIOU(Pred, GT, NumClasses, ClassNames=[], DisplyResults=False):
ClassIOU = np.zeros(NumClasses)
ClassWeight = np.zeros(NumClasses)
for i in range(NumClasses):
Intersection = np.float32(np.sum(((Pred == GT) * (GT == i))))
Union = ((np.sum((GT == i)) + np.sum((Pred == i))) - Intersection)
if (Union > 0):
ClassIOU[i] = (Intersection / Union)
ClassWeight[i] = Union
if DisplyResults:
for i in range(len(ClassNames)):
print(((ClassNames[i] + ') ') + str(ClassIOU[i])))
print(('Mean Classes IOU) ' + str(np.mean(ClassIOU))))
print(('Image Predicition Accuracy)' + str((np.float32(np.sum((Pred == GT))) / GT.size))))
return (ClassIOU, ClassWeight) |
def simSetBoolParameter(parameter, value):
ret = lib.simSetBoolParameter(parameter, value)
_check_return(ret) |
class BitModel(metaclass=DummyObject):
_backends = ['torch']
def __init__(self, *args, **kwargs):
requires_backends(self, ['torch']) |
def get_installed_apt_pkgs() -> set:
with open('/var/lib/dpkg/status') as f:
return set(RE_DPKG_STATUS.findall(f.read())) |
def read_in_para_lengths(corpus_dir: str, output_dir: str):
lengths = {}
dict_paragraphs = {}
failed_files = []
for (root, dirs, files) in os.walk(corpus_dir):
for file in files:
with open(os.path.join(corpus_dir, file), 'r') as f:
lines = f.readlines()
lines = [line.strip() for line in lines if ((line.strip('\n') is not ' ') and (line.strip() is not ''))]
paragraphs = lines_to_paragraphs(lines)
if paragraphs:
paragraphs = only_english(paragraphs)
paragraphs = only_string_in_dict(paragraphs)
(no_intro, no_summ, lengths_para) = count_doc(paragraphs)
lengths.update({file.split('.')[0]: {'intro': no_intro, 'summary': no_summ, 'lengths_paragraphs': lengths_para}})
dict_paragraphs.update({file.split('.')[0]: paragraphs})
else:
print('reading in of file {} doesnt work'.format(file))
failed_files.append(file)
return (lengths, dict_paragraphs, failed_files) |
class PublicSingleton(PrivateSingleton, Protocol):
def instance(cls) -> Self:
return cls._ensure_instance() |
class Identity(nn.Module):
def forward(self, *args):
if (len(args) == 1):
return args[0]
return args |
def collate_fn(batch):
frames = []
target = []
times = []
if (sum([(s is not None) for s in batch]) == 0):
return (None, None, None)
for items in batch:
if (items is None):
continue
frames.append(items[3])
target.append([items[2], items[5], items[6]])
times.append(items[4].float())
return (torch.stack(frames, 0).permute(0, 4, 1, 2, 3), target, torch.stack(times, 0)) |
def plot3d_cubie(cnt, clrs):
half = QQ((1, 2))
x = (cnt[0] - half)
y = (cnt[1] - half)
z = (cnt[2] - half)
ptsF = [[(x + 1), (y + 0), (0 + z)], [(x + 1), (y + 1), (0 + z)], [(x + 1), (y + 1), (1 + z)], [(x + 1), (y + 0), (1 + z)], [(x + 1), (y + 0), (0 + z)]]
ptsU = [[(x + 0), (y + 0), (1 + z)], [(x + 1), (y + 0), (1 + z)], [(x + 1), (y + 1), (1 + z)], [(x + 0), (y + 1), (1 + z)], [(x + 0), (y + 0), (1 + z)]]
ptsR = [[(x + 0), (y + 1), (0 + z)], [(x + 1), (y + 1), (0 + z)], [(x + 1), (y + 1), (1 + z)], [(x + 0), (y + 1), (1 + z)], [(x + 0), (y + 1), (0 + z)]]
P = polygon_plot3d(ptsR, rgbcolor=clrs[2])
P += polygon_plot3d(ptsU, rgbcolor=clrs[1])
P += polygon_plot3d(ptsF, rgbcolor=clrs[0])
P.axes(show=False)
return P |
def test_plot_dt() -> None:
srs = pd.Series(['3/11/2001', '3/12/2002', '3/12/2003', '3/13/2003', '4/13/2003', '4/13/2003', '4/13/2003', '4/13/2003', '4/13/2003', '4/13/2003', '4/13/2003', '4/13/2003', '4/13/2003', '4/13/2003', '4/13/2003'])
dt_col = pd.to_datetime(srs, infer_datetime_format=True)
df = pd.DataFrame()
df['dt'] = dt_col
df['num'] = [1.0, 2.1, 3.5, 4.5, 2.5, 1.5, 2.3, 6.1, 8.1, 1.0, 3, 10.6, 7.8, 9.1, 20.6]
plot(df, 'dt', 'num') |
class WriteDataCallback(Callback):
def __init__(self, writer: wr.Writer) -> None:
self.writer = writer
def on_subject(self, params: dict):
subject_files = params[defs.KEY_SUBJECT_FILES]
subject_index = params[defs.KEY_SUBJECT_INDEX]
index_str = defs.subject_index_to_str(subject_index, len(subject_files))
for category in params[defs.KEY_CATEGORIES]:
data = params[category]
self.writer.write('{}/{}'.format(defs.LOC_DATA_PLACEHOLDER.format(category), index_str), data, dtype=data.dtype) |
def time_funcs(funcs, name='', func_names=None, num_iters=100, warmups=5, launch_wait=True):
timer = CudaTimer(len(funcs))
pycudaprof.start_cuda_profiling()
torch.cuda.nvtx.range_push((name + ' Warmup'))
for _ in range(warmups):
for f in funcs:
f()
torch.cuda.nvtx.range_pop()
times = [list() for _ in range(len(funcs))]
for _ in range(num_iters):
iter_times = time_one(funcs, launch_wait=launch_wait, timer=timer, name=name, func_names=func_names)
for (i, t) in enumerate(iter_times):
times[i].append(t)
pycudaprof.stop_cuda_profiling()
return times |
class Classifier():
def __init__(self, path: str, label_list, args):
self.args = args
self.device = torch.device(('cuda:0' if (torch.cuda.is_available() and (not self.args.no_cuda)) else 'cpu'))
self.label_list = label_list
self.num_labels = len(self.label_list)
self.config = AutoConfig.from_pretrained(self.args.bert_model, num_labels=self.num_labels)
self.tokenizer = AutoTokenizer.from_pretrained(self.args.bert_model)
if (path is not None):
state_dict = torch.load((path + '/pytorch_model.bin'))
self.model = AutoModelForSequenceClassification.from_pretrained(path, state_dict=state_dict, config=self.config)
else:
self.model = AutoModelForSequenceClassification.from_pretrained(self.args.bert_model, config=self.config)
self.model.to(self.device)
def save(self, dir_path: str):
model_to_save = (self.model.module if hasattr(self.model, 'module') else self.model)
torch.save(model_to_save.state_dict(), '{}/pytorch_model.bin'.format(dir_path))
def convert_examples_to_features(self, examples, train):
label_map = {label: i for (i, label) in enumerate(self.label_list)}
if_roberta = (True if ('roberta' in self.config.architectures[0].lower()) else False)
if train:
label_distribution = torch.FloatTensor(len(label_map)).zero_()
else:
label_distribution = None
features = []
for (ex_index, example) in enumerate(examples):
tokens_a = self.tokenizer.tokenize(example.text_a)
if (len(tokens_a) > (self.args.max_seq_length - 2)):
tokens_a = tokens_a[:(self.args.max_seq_length - 2)]
tokens = (([self.tokenizer.cls_token] + tokens_a) + [self.tokenizer.sep_token])
segment_ids = ([0] * len(tokens))
input_ids = self.tokenizer.convert_tokens_to_ids(tokens)
input_mask = ([1] * len(input_ids))
padding = ([0] * (self.args.max_seq_length - len(input_ids)))
input_ids += padding
input_mask += padding
segment_ids += padding
assert (len(input_ids) == self.args.max_seq_length)
assert (len(input_mask) == self.args.max_seq_length)
assert (len(segment_ids) == self.args.max_seq_length)
if (example.label is not None):
label_id = label_map[example.label]
else:
label_id = (- 1)
if train:
label_distribution[label_id] += 1.0
features.append(InputFeatures(input_ids=input_ids, input_mask=input_mask, segment_ids=segment_ids, label_id=label_id))
if train:
label_distribution = (label_distribution / label_distribution.sum())
return (features, label_distribution)
else:
return features
def train(self, train_examples):
train_batch_size = int((self.args.train_batch_size / self.args.gradient_accumulation_steps))
random.seed(self.args.seed)
np.random.seed(self.args.seed)
torch.manual_seed(self.args.seed)
torch.cuda.manual_seed(self.args.seed)
num_train_steps = int((((len(train_examples) / train_batch_size) / self.args.gradient_accumulation_steps) * self.args.num_train_epochs))
(optimizer, scheduler) = get_optimizer(self.model, num_train_steps, self.args)
(train_features, label_distribution) = self.convert_examples_to_features(train_examples, train=True)
train_dataloader = get_train_dataloader(train_features, train_batch_size)
logger.info('***** Label distribution for label smoothing *****')
logger.info(str(label_distribution))
logger.info('***** Running training *****')
logger.info(' Num examples = %d', len(train_examples))
logger.info(' Batch size = %d', self.args.train_batch_size)
logger.info(' Num steps = %d', num_train_steps)
self.model.zero_grad()
self.model.train()
for _ in trange(int(self.args.num_train_epochs), desc='Epoch'):
for (step, batch) in enumerate(tqdm(train_dataloader, desc='Iteration')):
(input_ids, input_mask, segment_ids, label_ids) = process_train_batch(batch, self.device)
outputs = self.model(input_ids=input_ids, attention_mask=input_mask, token_type_ids=segment_ids)
logits = outputs[0]
loss = loss_with_label_smoothing(label_ids, logits, label_distribution, self.args.label_smoothing, self.device)
if (self.args.gradient_accumulation_steps > 1):
loss = (loss / self.args.gradient_accumulation_steps)
loss.backward()
if (((step + 1) % self.args.gradient_accumulation_steps) == 0):
torch.nn.utils.clip_grad_norm_(self.model.parameters(), self.args.max_grad_norm)
optimizer.step()
scheduler.step()
self.model.zero_grad()
self.model.train()
def evaluate(self, eval_examples):
if (len(eval_examples) == 0):
logger.info('\n No eval data!')
return []
eval_features = self.convert_examples_to_features(eval_examples, train=False)
eval_dataloader = get_eval_dataloader(eval_features, self.args.eval_batch_size)
self.model.eval()
preds = []
for (input_ids, input_mask, segment_ids, label_ids) in tqdm(eval_dataloader, desc='Evaluating'):
input_ids = input_ids.to(self.device)
input_mask = input_mask.to(self.device)
segment_ids = segment_ids.to(self.device)
with torch.no_grad():
outputs = self.model(input_ids=input_ids, attention_mask=input_mask, token_type_ids=segment_ids)
logits = outputs[0]
confs = torch.softmax(logits, dim=1)
confs = confs.detach().cpu()
for i in range(input_ids.size(0)):
(conf, index) = confs[i].max(dim=0)
preds.append((conf.item(), self.label_list[index.item()]))
return preds |
def _pretrain_generator(generator, train_examples):
print(('=' * 60), '\n', 'Generator Pre-training', '\n', ('=' * 60), sep='')
best_dev_loss = .0
tag = time.time()
for epoch in range(args.generator_pretrain_epochs):
dev_loss = generator.train_epoch()
if (dev_loss < best_dev_loss):
torch.save(generator.model.state_dict(), '/tmp/generator_model_{}.pt'.format(tag))
torch.save(generator.optimizer.state_dict(), '/tmp/generator_optimizer_{}.pt'.format(tag))
best_dev_loss = dev_loss
print('Epoch {}, Dev Loss: {:.4f}; Best Dev Loss: {:.4f}'.format(epoch, dev_loss, best_dev_loss))
generator.model.load_state_dict(torch.load('/tmp/generator_model_{}.pt'.format(tag)))
generator.optimizer.load_state_dict(torch.load('/tmp/generator_optimizer_{}.pt'.format(tag)))
os.remove(os.path.join('/tmp', 'generator_model_{}.pt'.format(tag)))
os.remove(os.path.join('/tmp', 'generator_optimizer_{}.pt'.format(tag))) |
class WeylLieConformalAlgebra(LieConformalAlgebraWithStructureCoefficients):
def __init__(self, R, ngens=None, gram_matrix=None, names=None, index_set=None):
from sage.matrix.matrix_space import MatrixSpace
if ngens:
from sage.rings.integer_ring import ZZ
if (not ((ngens in ZZ) and (not (ngens % 2)))):
raise ValueError(f'ngens needs to be an even positive Integer, got {ngens}')
if (gram_matrix is not None):
if (ngens is None):
ngens = gram_matrix.dimensions()[0]
try:
assert (gram_matrix in MatrixSpace(R, ngens, ngens))
except AssertionError:
raise ValueError('The gram_matrix should be a skew-symmetric {0} x {0} matrix, got {1}'.format(ngens, gram_matrix))
if ((not gram_matrix.is_skew_symmetric()) or gram_matrix.is_singular()):
raise ValueError('The gram_matrix should be a non degenerate skew-symmetric {0} x {0} matrix, got {1}'.format(ngens, gram_matrix))
elif (gram_matrix is None):
if (ngens is None):
ngens = 2
A = identity_matrix(R, (ngens // 2))
from sage.matrix.special import block_matrix
gram_matrix = block_matrix([[R.zero(), A], [(- A), R.zero()]])
latex_names = None
if ((names is None) and (index_set is None)):
names = 'alpha'
latex_names = (tuple((('\\alpha_{%d}' % i) for i in range(ngens))) + ('K',))
(names, index_set) = standardize_names_index_set(names=names, index_set=index_set, ngens=ngens)
weyldict = {(i, j): {0: {('K', 0): gram_matrix[(index_set.rank(i), index_set.rank(j))]}} for i in index_set for j in index_set}
super().__init__(R, weyldict, names=names, latex_names=latex_names, index_set=index_set, central_elements=('K',))
self._gram_matrix = gram_matrix
def _repr_(self):
return 'The Weyl Lie conformal algebra with generators {} over {}'.format(self.gens(), self.base_ring())
def gram_matrix(self):
return self._gram_matrix |
class _Pretty(Doc):
__slots__ = ('obj',)
def __init__(self, obj):
self.obj = obj
def send_to(self, out, indent):
self.obj.pretty().send_to(out, indent) |
def scatter_plot(dataset, data_range, title, axis_name, file_path):
matplotlib.use('agg')
plt.figure(figsize=(10, 8), dpi=300)
for (ens10, era5, _, _) in tqdm(dataset, desc=f'[Plot]', unit='Batch', total=len(dataset)):
era5 = era5.unsqueeze((- 1)).expand(ens10.shape)
plt.scatter(era5.numpy().ravel(), ens10.numpy().ravel(), color='black', s=(((data_range[1] - data_range[0]) / 1024) * 0.5))
plt.xlabel(f'{axis_name} from ERA5')
plt.ylabel(f'{axis_name} from ENS10 with 48h lead time')
plt.savefig(file_path) |
class Result():
def __init__(self, correct, total):
self.correct = correct
self.total = total
def report_accuracy(self):
print(('Accuracy: %.2f%%' % ((100.0 * self.correct) / self.total)))
def accuracy(self):
acc = ((100.0 * self.correct) / self.total)
return float(('%.2f' % acc)) |
def test_redundant_array_success():
sdfg = _make_sdfg_1(succeed=True)
sdfg.save('test2.sdfg')
num = sdfg.apply_transformations(RedundantArray)
assert (num == 1) |
class StoreOpsTests(object):
def _test_set_get(cls, queue, create_store_handler_fn, index, num_procs):
store_handler = create_store_handler_fn()
blob = 'blob'
value = np.full(1, 1, np.float32)
if (index == (num_procs - 1)):
workspace.FeedBlob(blob, value)
workspace.RunOperatorOnce(core.CreateOperator('StoreSet', [store_handler, blob], [], blob_name=blob))
output_blob = 'output_blob'
workspace.RunOperatorOnce(core.CreateOperator('StoreGet', [store_handler], [output_blob], blob_name=blob))
try:
np.testing.assert_array_equal(workspace.FetchBlob(output_blob), 1)
except AssertionError as err:
queue.put(err)
workspace.ResetWorkspace()
def test_set_get(cls, create_store_handler_fn):
queue = Queue()
num_procs = 4
procs = []
for index in range(num_procs):
proc = Process(target=cls._test_set_get, args=(queue, create_store_handler_fn, index, num_procs))
proc.start()
procs.append(proc)
for proc in procs:
proc.join()
if (not queue.empty()):
raise queue.get()
def test_get_timeout(cls, create_store_handler_fn):
store_handler = create_store_handler_fn()
net = core.Net('get_missing_blob')
net.StoreGet([store_handler], 1, blob_name='blob')
workspace.RunNetOnce(net) |
class NoOpBuildEnvironment(BuildEnvironment):
def __init__(self):
pass
def __enter__(self):
pass
def __exit__(self, exc_type, exc_val, exc_tb):
pass
def cleanup(self):
pass
def install_requirements(self, finder, requirements, prefix_as_string, message):
raise NotImplementedError() |
def __getattr__(name):
return _sub_module_deprecation(sub_package='odr', module='models', private_modules=['_models'], all=__all__, attribute=name) |
class BasicResBlock(nn.Module):
def __init__(self, in_channels, out_channels, conv_cfg=None, norm_cfg=dict(type='BN')):
super(BasicResBlock, self).__init__()
self.conv1 = ConvModule(in_channels, in_channels, kernel_size=3, padding=1, bias=False, conv_cfg=conv_cfg, norm_cfg=norm_cfg)
self.conv2 = ConvModule(in_channels, out_channels, kernel_size=1, bias=False, activation=None, conv_cfg=conv_cfg, norm_cfg=norm_cfg)
self.conv_identity = ConvModule(in_channels, out_channels, kernel_size=1, conv_cfg=conv_cfg, norm_cfg=norm_cfg, activation=None)
self.relu = nn.ReLU(inplace=True)
def forward(self, x):
identity = x
x = self.conv1(x)
x = self.conv2(x)
identity = self.conv_identity(identity)
out = (x + identity)
out = self.relu(out)
return out |
class MultiInputController(GeneralController):
def __init__(self, model_space, buffer_type='ordinal', with_skip_connection=True, with_input_blocks=True, share_embedding=None, use_ppo_loss=False, kl_threshold=0.05, num_input_blocks=2, input_block_unique_connection=True, skip_connection_unique_connection=False, buffer_size=15, batch_size=5, session=None, train_pi_iter=20, lstm_size=32, lstm_num_layers=2, lstm_keep_prob=1.0, tanh_constant=None, temperature=None, skip_target=0.8, skip_weight=0.5, optim_algo='adam', name='controller', *args, **kwargs):
self.with_input_blocks = with_input_blocks
self.num_input_blocks = num_input_blocks
self.input_block_unique_connection = input_block_unique_connection
super().__init__(model_space=model_space, buffer_type=buffer_type, with_skip_connection=with_skip_connection, share_embedding=share_embedding, use_ppo_loss=use_ppo_loss, kl_threshold=kl_threshold, skip_connection_unique_connection=skip_connection_unique_connection, buffer_size=buffer_size, batch_size=batch_size, session=session, train_pi_iter=train_pi_iter, lstm_size=lstm_size, lstm_num_layers=lstm_num_layers, lstm_keep_prob=lstm_keep_prob, tanh_constant=tanh_constant, temperature=temperature, optim_algo=optim_algo, skip_target=skip_target, skip_weight=skip_weight, name=name, **kwargs)
def _create_weight(self):
super()._create_weight()
if self.with_input_blocks:
with tf.variable_scope('input', initializer=tf.random_uniform_initializer(minval=(- 0.1), maxval=0.1)):
self.input_emb = tf.get_variable('inp_emb', [self.num_input_blocks, self.lstm_size])
self.w_soft['input'] = tf.get_variable('w_input', [self.lstm_size, self.num_input_blocks])
def _build_sampler(self):
anchors = []
anchors_w_1 = []
arc_seq = []
hidden_states = []
entropys = []
probs_ = []
log_probs = []
skip_count = []
skip_penaltys = []
prev_c = [tf.zeros([1, self.lstm_size], tf.float32) for _ in range(self.lstm_num_layers)]
prev_h = [tf.zeros([1, self.lstm_size], tf.float32) for _ in range(self.lstm_num_layers)]
inputs = self.g_emb
skip_targets = tf.constant([(1.0 - self.skip_target), self.skip_target], dtype=tf.float32)
input_block_record = []
skip_conn_record = []
for layer_id in range(self.num_layers):
(next_c, next_h) = stack_lstm(inputs, prev_c, prev_h, self.w_lstm)
(prev_c, prev_h) = (next_c, next_h)
hidden_states.append(prev_h)
logit = tf.matmul(next_h[(- 1)], self.w_soft['start'][layer_id])
if (self.temperature is not None):
logit /= self.temperature
if (self.tanh_constant is not None):
logit = (self.tanh_constant * tf.tanh(logit))
probs_.append(tf.nn.softmax(logit))
start = tf.multinomial(logit, 1)
start = tf.to_int32(start)
start = tf.reshape(start, [1])
arc_seq.append(start)
log_prob = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logit, labels=start)
log_probs.append(log_prob)
entropy = tf.stop_gradient((log_prob * tf.exp((- log_prob))))
entropys.append(entropy)
inputs = tf.nn.embedding_lookup(self.w_emb['start'][layer_id], start)
if self.with_input_blocks:
(next_c, next_h) = stack_lstm(inputs, prev_c, prev_h, self.w_lstm)
(prev_c, prev_h) = (next_c, next_h)
hidden_states.append(prev_h)
block_query = tf.reshape(tf.matmul(next_h[(- 1)], self.w_soft['input']), (self.num_input_blocks, 1))
if (layer_id != (self.num_layers - 1)):
if (self.input_block_unique_connection and (layer_id > 0)):
mask = tf.stop_gradient(tf.reduce_sum(tf.stack(input_block_record), axis=0))
mask = tf.reshape(mask, [self.num_input_blocks, 1])
mask1 = tf.greater(mask, 0)
block_query = tf.where(mask1, y=block_query, x=tf.fill(tf.shape(block_query), (- 10000.0)))
else:
mask = tf.stop_gradient(tf.reduce_sum(tf.stack(input_block_record), axis=0))
mask = tf.reshape(mask, [self.num_input_blocks, 1])
mask2 = tf.equal(mask, 0)
block_query = tf.where(mask2, y=block_query, x=tf.fill(tf.shape(block_query), 10000.0))
logit = tf.concat([(- block_query), block_query], axis=1)
if (self.temperature is not None):
logit /= self.temperature
if (self.tanh_constant is not None):
logit = (self.tanh_constant * tf.tanh(logit))
probs_.append(tf.expand_dims(tf.nn.softmax(logit), axis=0))
input_block = tf.multinomial(logit, 1)
input_block = tf.to_int32(input_block)
input_block = tf.reshape(input_block, [self.num_input_blocks])
arc_seq.append(input_block)
input_block_record.append(input_block)
log_prob = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logit, labels=input_block)
log_probs.append(tf.reshape(tf.reduce_sum(log_prob), [(- 1)]))
entropy = tf.stop_gradient(tf.reshape(tf.reduce_sum((log_prob * tf.exp((- log_prob)))), [(- 1)]))
entropys.append(entropy)
inputs = tf.cast(tf.reshape(input_block, (1, self.num_input_blocks)), tf.float32)
inputs /= (1.0 + tf.cast(tf.reduce_sum(input_block), tf.float32))
inputs = tf.matmul(inputs, self.input_emb)
if self.with_skip_connection:
if (layer_id > 0):
(next_c, next_h) = stack_lstm(inputs, prev_c, prev_h, self.w_lstm)
(prev_c, prev_h) = (next_c, next_h)
hidden_states.append(prev_h)
query = tf.concat(anchors_w_1, axis=0)
query = tf.tanh((query + tf.matmul(next_h[(- 1)], self.w_attn_2)))
query = tf.matmul(query, self.v_attn)
if self.skip_connection_unique_connection:
mask = tf.stop_gradient(tf.reduce_sum(tf.stack(skip_conn_record), axis=0))
mask = tf.slice(mask, begin=[0], size=[layer_id])
mask1 = tf.greater(mask, 0)
query = tf.where(mask1, y=query, x=tf.fill(tf.shape(query), (- 10000.0)))
logit = tf.concat([(- query), query], axis=1)
if (self.temperature is not None):
logit /= self.temperature
if (self.tanh_constant is not None):
logit = (self.tanh_constant * tf.tanh(logit))
probs_.append(tf.expand_dims(tf.nn.softmax(logit), axis=0))
skip = tf.multinomial(logit, 1)
skip = tf.to_int32(skip)
skip = tf.reshape(skip, [layer_id])
arc_seq.append(skip)
skip_conn_record.append(tf.concat([tf.cast(skip, tf.float32), tf.zeros((self.num_layers - layer_id))], axis=0))
skip_prob = tf.sigmoid(logit)
kl = (skip_prob * tf.log((skip_prob / skip_targets)))
kl = tf.reduce_sum(kl)
skip_penaltys.append(kl)
log_prob = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logit, labels=skip)
log_probs.append(tf.reshape(tf.reduce_sum(log_prob), [(- 1)]))
entropy = tf.stop_gradient(tf.reshape(tf.reduce_sum((log_prob * tf.exp((- log_prob)))), [(- 1)]))
entropys.append(entropy)
skip = tf.to_float(skip)
skip = tf.reshape(skip, [1, layer_id])
skip_count.append(tf.reduce_sum(skip))
inputs = tf.matmul(skip, tf.concat(anchors, axis=0))
inputs /= (1.0 + tf.reduce_sum(skip))
else:
skip_conn_record.append(tf.zeros(self.num_layers, 1))
anchors.append(next_h[(- 1)])
anchors_w_1.append(tf.matmul(next_h[(- 1)], self.w_attn_1))
self.anchors = anchors
self.anchors_w_1 = anchors_w_1
self.sample_hidden_states = hidden_states
arc_seq = tf.concat(arc_seq, axis=0)
self.sample_arc = tf.reshape(arc_seq, [(- 1)])
entropys = tf.stack(entropys)
self.sample_entropy = tf.reduce_sum(entropys)
log_probs = tf.stack(log_probs)
self.sample_log_prob = tf.reduce_sum(log_probs)
skip_count = tf.stack(skip_count)
self.skip_count = tf.reduce_sum(skip_count)
skip_penaltys = tf.stack(skip_penaltys)
self.skip_penaltys = tf.reduce_mean(skip_penaltys)
self.sample_probs = probs_
def _build_trainer(self):
anchors = []
anchors_w_1 = []
probs_ = []
ops_each_layer = 1
total_arc_len = sum(([(ops_each_layer + (self.num_input_blocks * self.with_input_blocks))] + [((ops_each_layer + (self.num_input_blocks * self.with_input_blocks)) + (i * self.with_skip_connection)) for i in range(1, self.num_layers)]))
self.total_arc_len = total_arc_len
self.input_arc = [tf.placeholder(shape=(None, 1), dtype=tf.int32, name='arc_{}'.format(i)) for i in range(total_arc_len)]
batch_size = tf.shape(self.input_arc[0])[0]
entropys = []
log_probs = []
skip_count = []
skip_penaltys = []
prev_c = [tf.zeros([batch_size, self.lstm_size], tf.float32) for _ in range(self.lstm_num_layers)]
prev_h = [tf.zeros([batch_size, self.lstm_size], tf.float32) for _ in range(self.lstm_num_layers)]
inputs = tf.matmul(tf.ones((batch_size, 1)), self.g_emb)
skip_targets = tf.constant([(1.0 - self.skip_target), self.skip_target], dtype=tf.float32)
arc_pointer = 0
input_block_record = []
skip_conn_record = []
hidden_states = []
for layer_id in range(self.num_layers):
(next_c, next_h) = stack_lstm(inputs, prev_c, prev_h, self.w_lstm)
(prev_c, prev_h) = (next_c, next_h)
hidden_states.append(prev_h)
logit = tf.matmul(next_h[(- 1)], self.w_soft['start'][layer_id])
if (self.temperature is not None):
logit /= self.temperature
if (self.tanh_constant is not None):
logit = (self.tanh_constant * tf.tanh(logit))
start = self.input_arc[arc_pointer]
start = tf.reshape(start, [batch_size])
probs_.append(tf.nn.softmax(logit))
log_prob1 = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logit, labels=start)
log_probs.append(log_prob1)
entropy = tf.stop_gradient((log_prob1 * tf.exp((- log_prob1))))
entropys.append(entropy)
inputs = tf.nn.embedding_lookup(self.w_emb['start'][layer_id], start)
if self.with_input_blocks:
(next_c, next_h) = stack_lstm(inputs, prev_c, prev_h, self.w_lstm)
(prev_c, prev_h) = (next_c, next_h)
hidden_states.append(prev_h)
block_query = tf.reshape(tf.matmul(next_h[(- 1)], self.w_soft['input']), ((self.num_input_blocks * batch_size), 1))
if (layer_id != (self.num_layers - 1)):
if (self.input_block_unique_connection and (layer_id > 0)):
mask = tf.stop_gradient(tf.reduce_sum(tf.stack(input_block_record), axis=0))
mask = tf.reshape(mask, [(self.num_input_blocks * batch_size), 1])
mask1 = tf.greater(mask, 0)
block_query = tf.where(mask1, y=block_query, x=tf.fill(tf.shape(block_query), (- 10000.0)))
else:
mask = tf.stop_gradient(tf.reduce_sum(tf.stack(input_block_record), axis=0))
mask = tf.reshape(mask, [(self.num_input_blocks * batch_size), 1])
mask2 = tf.equal(mask, 0)
block_query = tf.where(mask2, y=block_query, x=tf.fill(tf.shape(block_query), 10000.0))
logit = tf.concat([(- block_query), block_query], axis=1)
if (self.temperature is not None):
logit /= self.temperature
if (self.tanh_constant is not None):
logit = (self.tanh_constant * tf.tanh(logit))
probs_.append(tf.reshape(tf.nn.softmax(logit), [batch_size, self.num_input_blocks, 2]))
input_block = self.input_arc[(arc_pointer + ops_each_layer):((arc_pointer + ops_each_layer) + self.num_input_blocks)]
input_block = tf.reshape(tf.transpose(input_block), [(batch_size * self.num_input_blocks)])
input_block_record.append(input_block)
log_prob2 = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logit, labels=input_block)
log_prob2 = tf.reshape(log_prob2, [batch_size, (- 1)])
log_probs.append(tf.reduce_sum(log_prob2, axis=1))
entropy = tf.stop_gradient(tf.reshape(tf.reduce_sum((log_prob2 * tf.exp((- log_prob2)))), [(- 1)]))
entropys.append(entropy)
inputs = tf.cast(tf.reshape(input_block, (batch_size, self.num_input_blocks)), tf.float32)
inputs /= tf.matmul(tf.reshape((1.0 + tf.cast(tf.reduce_sum(tf.reshape(input_block, (batch_size, self.num_input_blocks)), axis=1), tf.float32)), ((- 1), 1)), tf.ones((1, self.num_input_blocks), dtype=tf.float32))
inputs = tf.matmul(inputs, self.input_emb)
if self.with_skip_connection:
if (layer_id > 0):
(next_c, next_h) = stack_lstm(inputs, prev_c, prev_h, self.w_lstm)
(prev_c, prev_h) = (next_c, next_h)
hidden_states.append(prev_h)
query = tf.transpose(tf.stack(anchors_w_1), [1, 0, 2])
query = tf.tanh((query + tf.expand_dims(tf.matmul(next_h[(- 1)], self.w_attn_2), axis=1)))
query = tf.reshape(query, ((batch_size * layer_id), self.lstm_size))
query = tf.matmul(query, self.v_attn)
if self.skip_connection_unique_connection:
mask = tf.stop_gradient(tf.reduce_sum(tf.stack(skip_conn_record), axis=0))
mask = tf.slice(mask, begin=[0, 0], size=[batch_size, layer_id])
mask = tf.reshape(mask, ((batch_size * layer_id), 1))
mask1 = tf.greater(mask, 0)
query = tf.where(mask1, y=query, x=tf.fill(tf.shape(query), (- 10000.0)))
logit = tf.concat([(- query), query], axis=1)
if (self.temperature is not None):
logit /= self.temperature
if (self.tanh_constant is not None):
logit = (self.tanh_constant * tf.tanh(logit))
probs_.append(tf.reshape(tf.nn.softmax(logit), [batch_size, layer_id, 2]))
if self.with_input_blocks:
skip = self.input_arc[((arc_pointer + ops_each_layer) + self.num_input_blocks):(((arc_pointer + ops_each_layer) + self.num_input_blocks) + layer_id)]
else:
skip = self.input_arc[(arc_pointer + ops_each_layer):((arc_pointer + ops_each_layer) + layer_id)]
skip = tf.reshape(tf.transpose(skip), [(batch_size * layer_id)])
skip = tf.to_int32(skip)
skip_prob = tf.sigmoid(logit)
kl = (skip_prob * tf.log((skip_prob / skip_targets)))
kl = tf.reduce_sum(kl, axis=1)
kl = tf.reshape(kl, [batch_size, (- 1)])
skip_penaltys.append(kl)
log_prob3 = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logit, labels=skip)
log_prob3 = tf.reshape(log_prob3, [batch_size, (- 1)])
log_probs.append(tf.reduce_sum(log_prob3, axis=1))
entropy = tf.stop_gradient(tf.reduce_sum((log_prob3 * tf.exp((- log_prob3))), axis=1))
entropys.append(entropy)
skip = tf.to_float(skip)
skip = tf.reshape(skip, [batch_size, 1, layer_id])
skip_count.append(tf.reduce_sum(skip, axis=2))
anchors_ = tf.stack(anchors)
anchors_ = tf.transpose(anchors_, [1, 0, 2])
inputs = tf.matmul(skip, anchors_)
inputs = tf.squeeze(inputs, axis=1)
inputs /= (1.0 + tf.reduce_sum(skip, axis=2))
else:
skip_conn_record.append(tf.zeros((batch_size, self.num_layers)))
anchors.append(next_h[(- 1)])
anchors_w_1.append(tf.matmul(next_h[(- 1)], self.w_attn_1))
arc_pointer += ((ops_each_layer + (layer_id * self.with_skip_connection)) + (self.num_input_blocks * self.with_input_blocks))
self.train_hidden_states = hidden_states
self.entropys = tf.stack(entropys)
self.onehot_probs = probs_
log_probs = tf.stack(log_probs)
self.onehot_log_prob = tf.reduce_sum(log_probs, axis=0)
skip_count = tf.stack(skip_count)
self.onehot_skip_count = tf.reduce_sum(skip_count, axis=0)
skip_penaltys_flat = [tf.reduce_mean(x, axis=1) for x in skip_penaltys]
self.onehot_skip_penaltys = tf.reduce_mean(skip_penaltys_flat, axis=0) |
class TestLayerFusing(unittest.TestCase):
def _compare(self, fused_nodes, expected_fusions):
self.assertTrue((len(fused_nodes) == len(expected_fusions)), msg=f'Number of fusions is not as expected!')
for (i, fusion) in enumerate(fused_nodes):
self.assertTrue((get_type(fusion) == expected_fusions[i]), msg=f'Miss-match fusion compared to expected!')
def test_layer_fusing_1(self):
expected_fusions = [[Conv2D, Activation]]
model = create_network_1(INPUT_SHAPE)
fusion_graph = prepare_graph_with_configs(model, KerasImplementation(), DEFAULT_KERAS_INFO, representative_dataset, (lambda name, _tp: get_tpc_1()))
self._compare(fusion_graph.fused_nodes, expected_fusions)
def test_layer_fusing_2(self):
expected_fusions = [[Conv2D, Activation], [Conv2D, ReLU], [Conv2D, tf.nn.sigmoid], [Conv2D, Activation]]
model = create_network_2(INPUT_SHAPE)
fusion_graph = prepare_graph_with_configs(model, KerasImplementation(), DEFAULT_KERAS_INFO, representative_dataset, (lambda name, _tp: get_tpc_2()))
self._compare(fusion_graph.fused_nodes, expected_fusions)
def test_layer_fusing_3(self):
expected_fusions = [[Conv2D, Activation]]
model = create_network_3(INPUT_SHAPE)
fusion_graph = prepare_graph_with_configs(model, KerasImplementation(), DEFAULT_KERAS_INFO, representative_dataset, (lambda name, _tp: get_tpc_3()))
self._compare(fusion_graph.fused_nodes, expected_fusions)
def test_layer_fusing_4(self):
expected_fusions = [[Conv2D, Activation, Add], [Conv2D, Activation, Add], [Conv2D, Activation], [Conv2D, ReLU, Add], [Dense, tf.nn.silu], [Dense, Activation]]
model = create_network_4(INPUT_SHAPE)
fusion_graph = prepare_graph_with_configs(model, KerasImplementation(), DEFAULT_KERAS_INFO, representative_dataset, (lambda name, _tp: get_tpc_4()))
self._compare(fusion_graph.fused_nodes, expected_fusions) |
def test_issue_1864():
a = ak.from_iter([[None, 1], None, [1, 2]])
tt = ak.Array(a.layout.to_typetracer())
assert (str(ak.is_none(tt, axis=0).layout.form.type) == 'bool')
assert (str(ak.is_none(tt, axis=1).layout.form.type) == 'option[var * bool]') |
class EchoChamberDynamics(object):
def __init__(self, num_agents, num_links, epsilon, sns_seed, l, data_dir):
self.num_agents = num_agents
self.l = l
self.epsilon = epsilon
self.set_agents(num_agents, epsilon)
self.social_media = SocialMedia(num_agents, num_links, l, sns_seed)
self.data_dir = data_dir
self.opinion_data = []
self.screen_diversity_data = []
if (not os.path.isdir(data_dir)):
os.makedirs(os.path.join(data_dir, 'data'))
os.makedirs(os.path.join(data_dir, 'network_data'))
def set_agents(self, num_agents, epsilon):
screen_diversity = analysis.screen_diversity([], bins=10)
self.agents = [Agent(i, epsilon, screen_diversity) for i in range(num_agents)]
def total_discordant_messages(self):
total_discordant_msgs = 0
for a in self.agents:
total_discordant_msgs += len(a.discordant_msgs)
return total_discordant_msgs
def is_stationary_state(self, G):
num_clusters = len([G.subgraph(c) for c in nx.weakly_connected_components(G)])
num_coverged_clusters = 0
if (num_clusters >= 2):
for C in [G.subgraph(c) for c in nx.weakly_connected_components(G)]:
_agents = [self.agents[i] for i in list(C.nodes())]
opinions = np.array([a.opinion for a in _agents])
opi_diff = (np.max(opinions) - np.min(opinions))
if (opi_diff <= self.epsilon):
num_coverged_clusters += 1
if (num_coverged_clusters == num_clusters):
return True
else:
return False
def export_csv(self, data_dic, ofname):
dir_path = os.path.join(self.data_dir, 'data')
file_path = os.path.join(dir_path, ofname)
pd.DataFrame(data_dic).to_csv(file_path, compression='xz')
def export_gexf(self, t):
network_dir_path = os.path.join(self.data_dir, 'network_data')
file_path = os.path.join(network_dir_path, (('G_' + str(t).zfill(7)) + '.gexf.bz2'))
cls = [float(a.opinion) for a in self.agents]
self.social_media.set_node_colors(cls)
nx.write_gexf(self.social_media.G, file_path)
def final_exports(self, t):
self.export_csv(self.opinion_data, 'opinions.csv.xz')
self.export_csv(self.screen_diversity_data, 'screen_diversity.csv.xz')
self.social_media.message_df.to_csv(os.path.join((self.data_dir + '/data'), 'messages.csv.xz'))
self.export_gexf(t)
def evolve(self, t_max, mu, p, q, rewiring_methods):
for t in range(t_max):
self.opinion_data.append([a.opinion for a in self.agents])
self.screen_diversity_data.append([a.screen_diversity for a in self.agents])
if ((t % 10000) == 0):
self.export_gexf(t)
user_id = np.random.choice(self.num_agents)
screen = self.social_media.show_screen(user_id)
self.agents[user_id].evaluate_messages(screen)
self.agents[user_id].screen_diversity = analysis.screen_diversity(screen.content.values, bins=10)
unfollow_id = None
follow_id = None
self.agents[user_id].update_opinion(mu)
if (np.random.random() < q):
(unfollow_id, follow_id) = self.agents[user_id].decide_to_rewire(self.social_media, rewiring_methods)
if ((unfollow_id is not None) and (follow_id is not None)):
self.social_media.rewire_users(user_id, unfollow_id, follow_id)
msg = self.agents[user_id].post_message(t, p)
self.social_media.update_message_db(t, msg)
if self.is_stationary_state(self.social_media.G):
self.final_exports(t)
break
elif (t >= (t_max - 1)):
self.final_exports(t)
break |
def bernoulli(n, algorithm='default', num_threads=1):
n = ZZ(n)
if (algorithm == 'default'):
if (n <= 20000):
algorithm = 'flint'
elif (n <= 300000):
algorithm = 'arb'
else:
algorithm = 'bernmm'
if (algorithm == 'arb'):
import sage.libs.arb.arith as arb_arith
return arb_arith.bernoulli(n)
elif (algorithm == 'flint'):
if (n >= 100000):
from warnings import warn
warn('flint is known to not be accurate for large Bernoulli numbers')
from sage.libs.flint.arith import bernoulli_number as flint_bernoulli
return flint_bernoulli(n)
elif ((algorithm == 'pari') or (algorithm == 'gp')):
from sage.libs.pari.all import pari
x = pari(n).bernfrac()
return Rational(x)
elif (algorithm == 'gap'):
from sage.libs.gap.libgap import libgap
x = libgap.Bernoulli(n).sage()
return Rational(x)
elif (algorithm == 'magma'):
import sage.interfaces.magma
x = sage.interfaces.magma.magma(('Bernoulli(%s)' % n))
return Rational(x)
elif (algorithm == 'bernmm'):
import sage.rings.bernmm
return sage.rings.bernmm.bernmm_bern_rat(n, num_threads)
else:
raise ValueError('invalid choice of algorithm') |
def average_state_dicts(state_dicts):
iterator = iter(state_dicts)
try:
running_sum = next(iterator)
except StopIteration:
raise ValueError('No state dicts to average.')
num_dicts = 1
with torch.no_grad():
for state_dict in iterator:
for (pname, param) in state_dict.items():
running_sum[pname] += param.data
num_dicts += 1
for (pname, param) in running_sum.items():
running_sum[pname] = (param.data / float(num_dicts))
return running_sum |
class Slim(nn.Module):
def __init__(self, cfg=None, phase='train'):
super(Slim, self).__init__()
self.phase = phase
self.num_classes = 2
self.conv1 = conv_bn(3, 16, 2)
self.conv2 = conv_dw(16, 32, 1)
self.conv3 = conv_dw(32, 32, 2)
self.conv4 = conv_dw(32, 32, 1)
self.conv5 = conv_dw(32, 64, 2)
self.conv6 = conv_dw(64, 64, 1)
self.conv7 = conv_dw(64, 64, 1)
self.conv8 = conv_dw(64, 64, 1)
self.conv9 = conv_dw(64, 128, 2)
self.conv10 = conv_dw(128, 128, 1)
self.conv11 = conv_dw(128, 128, 1)
self.conv12 = conv_dw(128, 256, 2)
self.conv13 = conv_dw(256, 256, 1)
self.conv14 = nn.Sequential(nn.Conv2d(in_channels=256, out_channels=64, kernel_size=1), nn.ReLU(inplace=True), depth_conv2d(64, 256, kernel=3, stride=2, pad=1), nn.ReLU(inplace=True))
(self.loc, self.conf, self.landm) = self.multibox(self.num_classes)
def multibox(self, num_classes):
loc_layers = []
conf_layers = []
landm_layers = []
loc_layers += [depth_conv2d(64, (3 * 4), kernel=3, pad=1)]
conf_layers += [depth_conv2d(64, (3 * num_classes), kernel=3, pad=1)]
landm_layers += [depth_conv2d(64, (3 * 10), kernel=3, pad=1)]
loc_layers += [depth_conv2d(128, (2 * 4), kernel=3, pad=1)]
conf_layers += [depth_conv2d(128, (2 * num_classes), kernel=3, pad=1)]
landm_layers += [depth_conv2d(128, (2 * 10), kernel=3, pad=1)]
loc_layers += [depth_conv2d(256, (2 * 4), kernel=3, pad=1)]
conf_layers += [depth_conv2d(256, (2 * num_classes), kernel=3, pad=1)]
landm_layers += [depth_conv2d(256, (2 * 10), kernel=3, pad=1)]
loc_layers += [nn.Conv2d(256, (3 * 4), kernel_size=3, padding=1)]
conf_layers += [nn.Conv2d(256, (3 * num_classes), kernel_size=3, padding=1)]
landm_layers += [nn.Conv2d(256, (3 * 10), kernel_size=3, padding=1)]
return (nn.Sequential(*loc_layers), nn.Sequential(*conf_layers), nn.Sequential(*landm_layers))
def forward(self, inputs):
detections = list()
loc = list()
conf = list()
landm = list()
x1 = self.conv1(inputs)
x2 = self.conv2(x1)
x3 = self.conv3(x2)
x4 = self.conv4(x3)
x5 = self.conv5(x4)
x6 = self.conv6(x5)
x7 = self.conv7(x6)
x8 = self.conv8(x7)
detections.append(x8)
x9 = self.conv9(x8)
x10 = self.conv10(x9)
x11 = self.conv11(x10)
detections.append(x11)
x12 = self.conv12(x11)
x13 = self.conv13(x12)
detections.append(x13)
x14 = self.conv14(x13)
detections.append(x14)
for (x, l, c, lam) in zip(detections, self.loc, self.conf, self.landm):
loc.append(l(x).permute(0, 2, 3, 1).contiguous())
conf.append(c(x).permute(0, 2, 3, 1).contiguous())
landm.append(lam(x).permute(0, 2, 3, 1).contiguous())
bbox_regressions = torch.cat([o.view(o.size(0), (- 1), 4) for o in loc], 1)
classifications = torch.cat([o.view(o.size(0), (- 1), 2) for o in conf], 1)
ldm_regressions = torch.cat([o.view(o.size(0), (- 1), 10) for o in landm], 1)
if (self.phase == 'train'):
output = (bbox_regressions, classifications, ldm_regressions)
else:
output = (bbox_regressions, F.softmax(classifications, dim=(- 1)), ldm_regressions)
return output |
def quadratic(x: tf.Tensor) -> tf.Tensor:
if ((x.shape == []) or (x.shape[(- 1)] == 0)):
raise ValueError(f'x must have non-empty trailing dimension, got shape {x.shape}')
return tf.reduce_sum((x ** 2), axis=(- 1), keepdims=True) |
def min(*args):
num_args = len(args)
assert (num_args >= 1)
if (num_args == 1):
return args[0]
if (num_args == 2):
return min_impl(args[0], args[1])
return min_impl(args[0], min(*args[1:])) |
def squash(vectors, axis=(- 1)):
s_squared_norm = K.sum(K.square(vectors), axis, keepdims=True)
scale = ((s_squared_norm / (1 + s_squared_norm)) / K.sqrt((s_squared_norm + K.epsilon())))
return (scale * vectors) |
def register_Ns3SimpleRefCount__Ns3Ipv4MulticastRoute_Ns3Empty_Ns3DefaultDeleter__lt__ns3Ipv4MulticastRoute__gt___methods(root_module, cls):
cls.add_constructor([])
cls.add_constructor([param('ns3::SimpleRefCount< ns3::Ipv4MulticastRoute, ns3::empty, ns3::DefaultDeleter< ns3::Ipv4MulticastRoute > > const &', 'o')])
return |
def valid_port(port):
port = int(port)
if (1 <= port <= 65535):
return port
else:
raise argparse.ArgumentTypeError(('%d is not a valid port number' % port)) |
def get_unetw(x_train, pretrained_weights=None):
print('Begining UNet Wide')
weight = 38
nb_filter = [weight, (weight * 2), (weight * 4), (weight * 8), (weight * 16)]
inputs = Input(shape=x_train.shape[1:])
conv1 = Conv2D(nb_filter[0], (3, 3), activation='relu', padding='same')(inputs)
conv1 = Conv2D(nb_filter[0], (3, 3), activation='relu', padding='same')(conv1)
pool1 = MaxPooling2D(pool_size=(2, 2))(conv1)
conv2 = Conv2D(nb_filter[1], (3, 3), activation='relu', padding='same')(pool1)
conv2 = Conv2D(nb_filter[1], (3, 3), activation='relu', padding='same')(conv2)
pool2 = MaxPooling2D(pool_size=(2, 2))(conv2)
conv3 = Conv2D(nb_filter[2], (3, 3), activation='relu', padding='same')(pool2)
conv3 = Conv2D(nb_filter[2], (3, 3), activation='relu', padding='same')(conv3)
pool3 = MaxPooling2D(pool_size=(2, 2))(conv3)
conv4 = Conv2D(nb_filter[3], (3, 3), activation='relu', padding='same')(pool3)
conv4 = Conv2D(nb_filter[3], (3, 3), activation='relu', padding='same')(conv4)
pool4 = MaxPooling2D(pool_size=(2, 2))(conv4)
conv5 = Conv2D(nb_filter[4], (3, 3), activation='relu', padding='same')(pool4)
conv5 = Conv2D(nb_filter[4], (3, 3), activation='relu', padding='same')(conv5)
up6 = concatenate([Conv2DTranspose(256, (2, 2), strides=(2, 2), padding='same')(conv5), conv4], axis=3)
conv6 = Conv2D(nb_filter[3], (3, 3), activation='relu', padding='same')(up6)
conv6 = Conv2D(nb_filter[3], (3, 3), activation='relu', padding='same')(conv6)
up7 = concatenate([Conv2DTranspose(128, (2, 2), strides=(2, 2), padding='same')(conv6), conv3], axis=3)
conv7 = Conv2D(nb_filter[2], (3, 3), activation='relu', padding='same')(up7)
conv7 = Conv2D(nb_filter[2], (3, 3), activation='relu', padding='same')(conv7)
up8 = concatenate([Conv2DTranspose(64, (2, 2), strides=(2, 2), padding='same')(conv7), conv2], axis=3)
conv8 = Conv2D(nb_filter[1], (3, 3), activation='relu', padding='same')(up8)
conv8 = Conv2D(nb_filter[1], (3, 3), activation='relu', padding='same')(conv8)
up9 = concatenate([Conv2DTranspose(32, (2, 2), strides=(2, 2), padding='same')(conv8), conv1], axis=3)
conv9 = Conv2D(nb_filter[0], (3, 3), activation='relu', padding='same')(up9)
conv9 = Conv2D(nb_filter[0], (3, 3), activation='relu', padding='same')(conv9)
conv10 = Conv2D(1, (1, 1), activation='sigmoid')(conv9)
model = Model(inputs=[inputs], outputs=[conv10])
if pretrained_weights:
model.load_weights(pretrained_weights)
return model |
def compute_data_representations_only(net, data, device, has_features=True):
net.eval()
reps = []
if (not has_features):
if (data.x is not None):
log.warn('[WARNING] features overidden in adj matrix')
data.x = net.get_node_feats().weight.data
with torch.no_grad():
reps.append(net(data))
reps = torch.cat(reps, dim=0).to(device)
return reps |
def mv_txt(dataset, aug_name):
txt_dir = glob.glob('datasets/NewVersion/{}/test_label*.txt'.format(dataset))
write_dir = '{}/{}/test_label.txt'.format(aug_name, dataset)
with open(txt_dir[(- 1)], 'r', encoding='UTF-8-sig') as f:
all = f.readlines()
with open(write_dir, 'w') as f:
f.writelines(all) |
class DistributedSamplerV2(Sampler):
def __init__(self, dataset, num_replicas=None, rank=None, shuffle=True):
if (num_replicas is None):
if (not dist.is_available()):
raise RuntimeError('Requires distributed package to be available')
num_replicas = dist.get_world_size()
if (rank is None):
if (not dist.is_available()):
raise RuntimeError('Requires distributed package to be available')
rank = dist.get_rank()
self.dataset = dataset
self.num_replicas = num_replicas
self.rank = rank
self.epoch = 0
self.num_samples = int(math.ceil(((len(self.dataset) * 1.0) / self.num_replicas)))
self.total_size = (self.num_samples * self.num_replicas)
self.shuffle = shuffle
def __iter__(self):
if self.shuffle:
g = torch.Generator()
g.manual_seed(self.epoch)
indices = torch.randperm(len(self.dataset), generator=g).tolist()
else:
indices = torch.arange(len(self.dataset)).tolist()
indices += indices[:(self.total_size - len(indices))]
assert (len(indices) == self.total_size)
indices = indices[self.rank:self.total_size:self.num_replicas]
assert (len(indices) == self.num_samples)
return iter(indices)
def __len__(self):
return self.num_samples
def set_epoch(self, epoch):
self.epoch = epoch |
()
def type4py_data():
return json.loads('{\n "error":null,\n "response":{\n "classes":[\n {\n "cls_var_ln":{\n "cls_var_name":[\n [\n 4,\n 4\n ],\n [\n 4,\n 13\n ]\n ]\n },\n "cls_var_occur":{\n "cls_var_name":[\n [\n "token",\n "var",\n "name"\n ]\n ]\n },\n "funcs":[\n {\n "docstring":{\n "func":"None",\n "long_descr":"None",\n "ret":"None"\n },\n "fn_lc":[\n [\n 5,\n 4\n ],\n [\n 8,\n 28\n ]\n ],\n "fn_var_ln":{\n "var_name":[\n [\n 7,\n 8\n ],\n [\n 7,\n 16\n ]\n ]\n },\n "fn_var_occur":{\n "var_name":[\n [\n "token",\n "var",\n "name"\n ]\n ]\n },\n "name":"__init__",\n "params":{\n "age":"int"\n },\n "params_descr":{\n "age":"comment"\n },\n "params_occur":{\n "age":[\n [\n "self",\n "age",\n "age"\n ]\n ]\n },\n "params_p":{\n "age":[\n [\n "int",\n 0.\n ],\n [\n "str",\n 2.e-10\n ]\n ]\n },\n "q_name":"Person.__init__",\n "ret_exprs":[\n ""\n ],\n "ret_type":"None",\n "variables":{\n "age":""\n },\n "variables_p":{\n "age":[\n [\n "int",\n 0.\n ]\n ]\n }\n },\n {\n "docstring":{\n "func":"None",\n "long_descr":"None",\n "ret":"None"\n },\n "fn_lc":[\n [\n 10,\n 4\n ],\n [\n 11,\n 24\n ]\n ],\n "fn_var_ln":{\n\n },\n "fn_var_occur":{\n\n },\n "name":"get_name",\n "params":{\n\n },\n "params_descr":{\n\n },\n "params_occur":{\n\n },\n "params_p":{\n\n },\n "q_name":"Person.get_name",\n "ret_exprs":[\n "return self.name"\n ],\n "ret_type":"",\n "ret_type_p":[\n [\n "str",\n 0.\n ]\n ],\n "variables":{\n\n },\n "variables_p":{\n\n }\n }\n ],\n "name":"Person",\n "q_name":"Person",\n "variables":{\n "person_id":""\n },\n "variables_p":{\n "person_id":[\n [\n "str",\n 0.\n ]\n ]\n }\n }\n ],\n "funcs":[\n {\n "docstring":{\n "func":"None",\n "long_descr":"None",\n "ret":"None"\n },\n "fn_lc":[\n [\n 18,\n 0\n ],\n [\n 25,\n 18\n ]\n ],\n "fn_var_ln":{\n "leave_hours":[\n [\n 19,\n 4\n ],\n [\n 19,\n 15\n ]\n ]\n },\n "fn_var_occur":{\n "leave_hours":[\n [\n "no_hours",\n "leave_hours"\n ]\n ]\n },\n "name":"work",\n "params":{\n "no_hours":""\n },\n "params_descr":{\n "no_hours":""\n },\n "params_occur":{\n "no_hours":[\n [\n "no_hours",\n "leave_hours"\n ]\n ]\n },\n "params_p":{\n "no_hours":[\n [\n "Type",\n 0.0999\n ]\n ]\n },\n "q_name":"work",\n "ret_exprs":[\n "return \'Done!\'"\n ],\n "ret_type":"",\n "ret_type_p":[\n [\n "str",\n 0.\n ]\n ],\n "variables":{\n "leave_hours":""\n },\n "variables_p":{\n "leave_hours":[\n [\n "int",\n 0.2\n ]\n ]\n }\n }\n ],\n "imports":[\n "os"\n ],\n "mod_var_ln":{\n "A_GLOBAL_VAR":[\n [\n 1,\n 0\n ],\n [\n 1,\n 12\n ]\n ]\n },\n "mod_var_occur":{\n "A_GLOBAL_VAR":[\n "token"\n ]\n },\n "no_types_annot":{\n "D":1,\n "I":0,\n "U":14\n },\n "session_id":"a0bvkdCC8utA35r8JrOho07FrDpV9qaLr2lccFzoXB4",\n "set":"None",\n "tc":[\n false,\n "None"\n ],\n "type_annot_cove":0.07,\n "typed_seq":"",\n "untyped_seq":"",\n "variables":{\n "A_GLOBAL_VAR":""\n },\n "variables_p":{\n "A_GLOBAL_VAR":[\n [\n "str",\n 0.\n ]\n ]\n }\n }\n }') |
def test_complex_dependencies():
cluster = generate_test_cluster('tests.fixtures.cluster.complex_dependencies')
assert (cluster.num_accessible_objects_under_test() == 1) |
def bulgarian_solitaire(n):
from sage.combinat.partition import Partition, Partitions
X = Partitions(n)
def phi(lam):
mu = [(p - 1) for p in lam if (p > 0)]
nu = sorted((mu + [len(lam)]), reverse=True)
return Partition(nu)
return FiniteDynamicalSystem(X, phi) |
def main(action, savedir, data_to_use, n_images, model_dir, batch_size):
if (model_dir == 'download_from_web'):
model_dir = './denoising_student_models'
if (not os.path.exists(model_dir)):
load_models_from_gdrive('./', False)
device = ('/GPU:0' if tf.config.list_physical_devices('GPU') else '/CPU:0')
print('Running on device {}'.format(device))
print('TPU and Multi-GPU setups are not supported in evaluation.')
if (not os.path.isdir(savedir)):
os.mkdir(savedir)
if (action == 'figures'):
status = get_uncurated_samples(data_to_use, model_dir, savedir, device, n_images)
elif (action == 'tofolder'):
model = getmodel(data_to_use, model_dir)
status = write_images_to_folder(model, device, savedir, batch_size, n_images)
else:
raise NotImplementedError("action must be 'figures' or 'tofolder'. ")
if status:
print('Finished execution properly.') |
def test_write_file_logs_checksum(test_file_path: Path, agent: Agent):
new_content = 'This is new content.\n'
new_checksum = file_ops.text_checksum(new_content)
file_ops.write_to_file(str(test_file_path), new_content, agent=agent)
with open(agent.config.file_logger_path, 'r', encoding='utf-8') as f:
log_entry = f.read()
assert (log_entry == f'''write: {test_file_path} #{new_checksum}
''') |
def pushout(R, S):
if ((R is S) or (R == S)):
return R
if hasattr(R, '_pushout_'):
P = R._pushout_(S)
if (P is not None):
return P
if hasattr(S, '_pushout_'):
P = S._pushout_(R)
if (P is not None):
return P
if isinstance(R, type):
R = type_to_parent(R)
if isinstance(S, type):
S = type_to_parent(S)
R_tower = construction_tower(R)
S_tower = construction_tower(S)
Rs = [c[1] for c in R_tower]
Ss = [c[1] for c in S_tower]
from sage.structure.parent import Parent
if (not isinstance(Rs[(- 1)], Parent)):
Rs = Rs[:(- 1)]
if (not isinstance(Ss[(- 1)], Parent)):
Ss = Ss[:(- 1)]
if (R in Ss):
if (not any((c[0].coercion_reversed for c in S_tower[1:]))):
return S
elif (S in Rs):
if (not any((c[0].coercion_reversed for c in R_tower[1:]))):
return R
if (Rs[(- 1)] in Ss):
(Rs, Ss) = (Ss, Rs)
(R_tower, S_tower) = (S_tower, R_tower)
Z = None
if (Ss[(- 1)] in Rs):
if (Rs[(- 1)] == Ss[(- 1)]):
while (Rs and Ss and (Rs[(- 1)] == Ss[(- 1)])):
Rs.pop()
Z = Ss.pop()
else:
Rs = Rs[:Rs.index(Ss[(- 1)])]
Z = Ss.pop()
elif S.has_coerce_map_from(Rs[(- 1)]):
while (not Ss[(- 1)].has_coerce_map_from(Rs[(- 1)])):
Ss.pop()
while (Rs and Ss[(- 1)].has_coerce_map_from(Rs[(- 1)])):
Rs.pop()
Z = Ss.pop()
elif R.has_coerce_map_from(Ss[(- 1)]):
while (not Rs[(- 1)].has_coerce_map_from(Ss[(- 1)])):
Rs.pop()
while (Ss and Rs[(- 1)].has_coerce_map_from(Ss[(- 1)])):
Ss.pop()
Z = Rs.pop()
if ((Z is None) and (R_tower[(- 1)][0] is not None)):
Z = R_tower[(- 1)][0].common_base(S_tower[(- 1)][0], R_tower[(- 1)][1], S_tower[(- 1)][1])
R_tower = expand_tower(R_tower[:len(Rs)])
S_tower = expand_tower(S_tower[:len(Ss)])
else:
R_tower = expand_tower(R_tower[:(len(Rs) + 1)])
S_tower = expand_tower(S_tower[:(len(Ss) + 1)])
Rc = [c[0] for c in R_tower[1:]]
Sc = [c[0] for c in S_tower[1:]]
all = IdentityConstructionFunctor()
def apply_from(Xc):
c = Xc.pop()
if c.coercion_reversed:
Yc = (Sc if (Xc is Rc) else Rc)
Y_tower = (S_tower if (Xc is Rc) else R_tower)
Y_partial = Y_tower[len(Yc)][1]
if (not (c * all)(Z).has_coerce_map_from(Y_partial)):
return all
return (c * all)
try:
while (Rc or Sc):
if (not Sc):
all = apply_from(Rc)
elif (not Rc):
all = apply_from(Sc)
elif (Rc[(- 1)].rank < Sc[(- 1)].rank):
all = apply_from(Rc)
elif (Sc[(- 1)].rank < Rc[(- 1)].rank):
all = apply_from(Sc)
elif (Rc[(- 1)] == Sc[(- 1)]):
cR = Rc.pop()
cS = Sc.pop()
c = (cR.merge(cS) or cS.merge(cR))
if c:
all = (c * all)
else:
raise CoercionException(('Incompatible Base Extension %r, %r (on %r, %r)' % (R, S, cR, cS)))
elif (Rc[(- 1)] in Sc):
if (Sc[(- 1)] in Rc):
raise CoercionException('Ambiguous Base Extension', R, S)
else:
all = apply_from(Sc)
elif (Sc[(- 1)] in Rc):
all = apply_from(Rc)
elif (Rc[(- 1)].commutes(Sc[(- 1)]) or Sc[(- 1)].commutes(Rc[(- 1)])):
all = ((Sc.pop() * Rc.pop()) * all)
else:
cR = Rc.pop()
cS = Sc.pop()
c = (cR.merge(cS) or cS.merge(cR))
if (c is not None):
all = (c * all)
else:
raise CoercionException('Ambiguous Base Extension', R, S)
return all(Z)
except CoercionException:
raise
except (TypeError, ValueError, AttributeError, NotImplementedError) as ex:
raise CoercionException(ex) |
class PromptGenerator():
def __init__(self) -> None:
self.constraints = []
self.commands = []
self.resources = []
self.performance_evaluation = []
self.goals = []
self.command_registry: (CommandRegistry | None) = None
self.name = 'Bob'
self.role = 'AI'
def add_constraint(self, constraint: str) -> None:
self.constraints.append(constraint)
def add_command(self, command_label: str, command_name: str, args=None, function: Optional[Callable]=None) -> None:
if (args is None):
args = {}
command_args = {arg_key: arg_value for (arg_key, arg_value) in args.items()}
command = {'label': command_label, 'name': command_name, 'args': command_args, 'function': function}
self.commands.append(command)
def _generate_command_string(self, command: Dict[(str, Any)]) -> str:
args_string = ', '.join((f'"{key}": "{value}"' for (key, value) in command['args'].items()))
return f"""{command['label']}: "{command['name']}", args: {args_string}"""
def add_resource(self, resource: str) -> None:
self.resources.append(resource)
def add_performance_evaluation(self, evaluation: str) -> None:
self.performance_evaluation.append(evaluation)
def _generate_numbered_list(self, items: List[Any], item_type='list') -> str:
if (item_type == 'command'):
command_strings = []
if self.command_registry:
command_strings += [str(item) for item in self.command_registry.commands.values() if item.enabled]
command_strings += [self._generate_command_string(item) for item in items]
return '\n'.join((f'{(i + 1)}. {item}' for (i, item) in enumerate(command_strings)))
else:
return '\n'.join((f'{(i + 1)}. {item}' for (i, item) in enumerate(items)))
def generate_prompt_string(self) -> str:
return f'''Constraints:
{self._generate_numbered_list(self.constraints)}
Commands:
{self._generate_numbered_list(self.commands, item_type='command')}
Resources:
{self._generate_numbered_list(self.resources)}
Performance Evaluation:
{self._generate_numbered_list(self.performance_evaluation)}
Respond with only valid JSON conforming to the following schema:
{llm_response_schema()}
''' |
class SequentialCond(torch.nn.Sequential):
def forward(self, input, *args, **kwargs):
for module in self:
if isinstance(module, (AdaptiveLayerNorm1D, SequentialCond, ResidualMLPBlock)):
input = module(input, *args, **kwargs)
else:
input = module(input)
return input |
class Algorithm():
def __init__(self, algorithm, *, heterogeneous=None, directed=None, weighted=None, temporal=None, features=None, nc=None, interpretability_nc=None, lp=None, rl=None, inductive=None, gc=None):
columns = {ALGORITHM: algorithm, HETEROGENEOUS: heterogeneous, DIRECTED: directed, WEIGHTED: weighted, TEMPORAL: temporal, FEATURES: features, NC: nc, LP: lp, RL: rl, INDUCTIVE: inductive, GC: gc}
self.columns = {name: T.textify(value) for (name, value) in columns.items()} |
def get_visualizers(visualizers_config):
visualizers = []
for renderer in visualizers_config:
visualizers.append(get_visualizer(renderer['renderer_name'], renderer['renderer_config']))
return visualizers |
def test_gcn_lstm_save_load(tmpdir, arange_graph):
gen = SlidingFeaturesNodeGenerator(arange_graph, 2, batch_size=3)
gcn_lstm = GCN_LSTM(None, None, [2], [4], generator=gen)
test_utils.model_save_load(tmpdir, gcn_lstm) |
def extract_sdae_reuters(slope=0.0, dim=10):
return extractSDAE(dim=[2000, 500, 500, 2000, dim], slope=slope) |
def register_Ns3TcpLedbat_methods(root_module, cls):
cls.add_constructor([])
cls.add_constructor([param('ns3::TcpLedbat const &', 'sock')])
cls.add_method('Fork', 'ns3::Ptr< ns3::TcpCongestionOps >', [], is_virtual=True)
cls.add_method('GetName', 'std::string', [], is_const=True, is_virtual=True)
cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True)
cls.add_method('IncreaseWindow', 'void', [param('ns3::Ptr< ns3::TcpSocketState >', 'tcb'), param('uint32_t', 'segmentsAcked')], is_virtual=True)
cls.add_method('PktsAcked', 'void', [param('ns3::Ptr< ns3::TcpSocketState >', 'tcb'), param('uint32_t', 'segmentsAcked'), param('ns3::Time const &', 'rtt')], is_virtual=True)
cls.add_method('SetDoSs', 'void', [param('ns3::TcpLedbat::SlowStartType', 'doSS')])
cls.add_method('CongestionAvoidance', 'void', [param('ns3::Ptr< ns3::TcpSocketState >', 'tcb'), param('uint32_t', 'segmentsAcked')], visibility='protected', is_virtual=True)
return |
class MLARAM(MLClassifierBase):
def __init__(self, vigilance=0.9, threshold=0.02, neurons=None):
super(MLARAM, self).__init__()
if (neurons is not None):
self.neurons = neurons
else:
self.neurons = []
self.vigilance = vigilance
self.threshold = threshold
self.copyable_attrs += ['neurons', 'vigilance', 'threshold']
def reset(self):
self._labels = []
self.neurons = []
def fit(self, X, y):
self._labels = []
self._allneu = ''
self._online = 1
self._alpha = 1e-13
is_sparse_x = issparse(X)
label_combination_to_class_map = {}
if isinstance(X, numpy.matrix):
X = numpy.asarray(X)
if isinstance(y, numpy.matrix):
y = numpy.asarray(y)
is_more_dimensional = int((len(X[0].shape) != 1))
X = _normalize_input_space(X)
y_0 = _get_label_vector(y, 0)
if (len(self.neurons) == 0):
neuron_vc = _concatenate_with_negation(X[0])
self.neurons.append(Neuron(neuron_vc, y_0))
start_index = 1
label_combination_to_class_map[_get_label_combination_representation(y_0)] = [0]
else:
start_index = 0
last_used_label_combination_class_id = 0
for (row_no, input_vector) in enumerate(X[start_index:], start_index):
label_assignment_vector = _get_label_vector(y, row_no)
fc = _concatenate_with_negation(input_vector)
activationn = ([0] * len(self.neurons))
activationi = ([0] * len(self.neurons))
label_combination = _get_label_combination_representation(label_assignment_vector)
if (label_combination in label_combination_to_class_map):
fcs = fc.sum()
for class_number in label_combination_to_class_map[label_combination]:
if issparse(self.neurons[class_number].vc):
minnfs = self.neurons[class_number].vc.minimum(fc).sum()
else:
minnfs = umath.minimum(self.neurons[class_number].vc, fc).sum()
activationi[class_number] = (minnfs / fcs)
activationn[class_number] = (minnfs / self.neurons[class_number].vc.sum())
if (numpy.max(activationn) == 0):
last_used_label_combination_class_id += 1
self.neurons.append(Neuron(fc, label_assignment_vector))
label_combination_to_class_map.setdefault(label_combination, []).append((len(self.neurons) - 1))
continue
inds = numpy.argsort(activationn)
indc = numpy.where((numpy.array(activationi)[inds[::(- 1)]] > self.vigilance))[0]
if (indc.shape[0] == 0):
self.neurons.append(Neuron(fc, label_assignment_vector))
label_combination_to_class_map.setdefault(label_combination, []).append((len(self.neurons) - 1))
continue
winner = inds[::(- 1)][indc[0]]
if issparse(self.neurons[winner].vc):
self.neurons[winner].vc = self.neurons[winner].vc.minimum(fc)
else:
self.neurons[winner].vc = umath.minimum(self.neurons[winner].vc, fc)
labels_won_indicator = numpy.zeros(y_0.shape, dtype=y_0.dtype)
labels_won_indicator[label_assignment_vector.nonzero()] = 1
self.neurons[winner].label += labels_won_indicator
return self
def predict(self, X):
result = []
if isinstance(X, numpy.matrix):
X = numpy.asarray(X)
ranks = self.predict_proba(X)
for rank in ranks:
sorted_rank_arg = numpy.argsort((- rank))
diffs = (- numpy.diff([rank[k] for k in sorted_rank_arg]))
indcutt = numpy.where((diffs == diffs.max()))[0]
if (len(indcutt.shape) == 1):
indcut = (indcutt[0] + 1)
else:
indcut = (indcutt[(0, (- 1))] + 1)
label = numpy.zeros(rank.shape)
label[sorted_rank_arg[0:indcut]] = 1
result.append(label)
return numpy.array(numpy.matrix(result))
def predict_proba(self, X):
if isinstance(X, numpy.matrix):
X = numpy.asarray(X)
if issparse(X):
if (X.getnnz() == 0):
return
elif (len(X) == 0):
return
is_matrix = int((len(X[0].shape) != 1))
X = _normalize_input_space(X)
all_ranks = []
neuron_vectors = [n1.vc for n1 in self.neurons]
if any(map(issparse, neuron_vectors)):
all_neurons = scipy.sparse.vstack(neuron_vectors)
all_neurons_sum = all_neurons.sum(1).A
else:
all_neurons = numpy.vstack(neuron_vectors)
all_neurons_sum = all_neurons.sum(1)
all_neurons_sum += self._alpha
for (row_number, input_vector) in enumerate(X):
fc = _concatenate_with_negation(input_vector)
if issparse(fc):
activity = (fc.minimum(all_neurons).sum(1) / all_neurons_sum).squeeze().tolist()
else:
activity = (umath.minimum(fc, all_neurons).sum(1) / all_neurons_sum).squeeze().tolist()
if is_matrix:
activity = activity[0]
sorted_activity = numpy.argsort(activity)[::(- 1)]
winner = sorted_activity[0]
activity_difference = (activity[winner] - activity[sorted_activity[(- 1)]])
largest_activity = 1
par_t = self.threshold
for i in range(1, len(self.neurons)):
activity_change = ((activity[winner] - activity[sorted_activity[i]]) / activity[winner])
if (activity_change > (par_t * activity_difference)):
break
largest_activity += 1
rbsum = sum([activity[k] for k in sorted_activity[0:largest_activity]])
rank = (activity[winner] * self.neurons[winner].label)
activated = []
activity_among_activated = []
activated.append(winner)
activity_among_activated.append(activity[winner])
for i in range(1, largest_activity):
rank += (activity[sorted_activity[i]] * self.neurons[sorted_activity[i]].label)
activated.append(sorted_activity[i])
activity_among_activated.append(activity[sorted_activity[i]])
rank /= rbsum
all_ranks.append(rank)
return numpy.array(numpy.matrix(all_ranks)) |
def length_of_broadcast(inputs: Sequence) -> (int | type[unknown_length]):
max_length: (int | None) = None
has_seen_unknown_length: bool = False
for x in inputs:
if (not isinstance(x, Content)):
continue
if (x.length is unknown_length):
has_seen_unknown_length = True
elif (max_length is None):
max_length = x.length
else:
max_length = max(max_length, x.length)
if has_seen_unknown_length:
if (max_length is None):
return unknown_length
else:
return max_length
elif (max_length is None):
return 1
else:
return max_length |
def register_Ns3EpcS1apSapEnbProvider_methods(root_module, cls):
cls.add_constructor([])
cls.add_constructor([param('ns3::EpcS1apSapEnbProvider const &', 'arg0')])
cls.add_method('SendErabReleaseIndication', 'void', [param('uint64_t', 'mmeUeS1Id'), param('uint16_t', 'enbUeS1Id'), param('std::list< ns3::EpcS1apSap::ErabToBeReleasedIndication >', 'erabToBeReleaseIndication')], is_pure_virtual=True, is_virtual=True)
cls.add_method('SendInitialContextSetupResponse', 'void', [param('uint64_t', 'mmeUeS1Id'), param('uint16_t', 'enbUeS1Id'), param('std::list< ns3::EpcS1apSap::ErabSetupItem >', 'erabSetupList')], is_pure_virtual=True, is_virtual=True)
cls.add_method('SendInitialUeMessage', 'void', [param('uint64_t', 'mmeUeS1Id'), param('uint16_t', 'enbUeS1Id'), param('uint64_t', 'stmsi'), param('uint16_t', 'ecgi')], is_pure_virtual=True, is_virtual=True)
cls.add_method('SendPathSwitchRequest', 'void', [param('uint64_t', 'enbUeS1Id'), param('uint64_t', 'mmeUeS1Id'), param('uint16_t', 'gci'), param('std::list< ns3::EpcS1apSap::ErabSwitchedInDownlinkItem >', 'erabToBeSwitchedInDownlinkList')], is_pure_virtual=True, is_virtual=True)
return |
def assert_identical(a, b):
assert_equal(a, b)
if (type(b) is str):
assert_equal(type(a), type(b))
else:
assert_equal(np.asarray(a).dtype.type, np.asarray(b).dtype.type) |
def create_pipeline_configuration(DEBUG=False, batch_size=4):
config = {'batch_dim': 0, 'depth': 10000, 'basic_blocks': (Softmax, LayerNorm, Dropout, Linear, Embedding, Gelu, Tanh), 'model_inputs': {'attention_mask': {'shape': torch.Size([4, 384]), 'dtype': torch.int64, 'is_batched': True, 'used_by': [0]}, 'input_ids': {'shape': torch.Size([4, 384]), 'dtype': torch.int64, 'is_batched': True, 'used_by': [0]}, 'token_type_ids': {'shape': torch.Size([4, 384]), 'dtype': torch.int64, 'is_batched': True, 'used_by': [0]}}, 'model_outputs': {'BertForQuestionAnswering/Linear[qa_outputs]': {'shape': torch.Size([4, 384, 2]), 'dtype': torch.float32, 'is_batched': True, 'created_by': 1}}, 'stages': {0: {'stage_cls': Partition0, 'inputs': {'attention_mask': {'shape': torch.Size([4, 384]), 'dtype': torch.int64, 'req_grad': False, 'is_batched': True, 'created_by': (- 1)}, 'input_ids': {'shape': torch.Size([4, 384]), 'dtype': torch.int64, 'req_grad': False, 'is_batched': True, 'created_by': (- 1)}, 'token_type_ids': {'shape': torch.Size([4, 384]), 'dtype': torch.int64, 'req_grad': False, 'is_batched': True, 'created_by': (- 1)}}, 'outputs': {'BertForQuestionAnswering/BertModel[bert]/Tensor::__mul___12': {'shape': torch.Size([4, 1, 1, 384]), 'dtype': torch.float32, 'req_grad': False, 'is_batched': True, 'used_by': [1]}, 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[5]/BertAttention[attention]/BertSelfOutput[output]/LayerNorm[LayerNorm]': {'shape': torch.Size([4, 384, 768]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [1]}, 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[5]/BertOutput[output]/Linear[dense]': {'shape': torch.Size([4, 384, 768]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [1]}}, 'devices': [('cpu' if DEBUG else 'cuda:0')], 'stage_depth': 1}, 1: {'stage_cls': Partition1, 'inputs': {'BertForQuestionAnswering/BertModel[bert]/Tensor::__mul___12': {'shape': torch.Size([4, 1, 1, 384]), 'dtype': torch.float32, 'req_grad': False, 'is_batched': True, 'created_by': 0}, 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[5]/BertAttention[attention]/BertSelfOutput[output]/LayerNorm[LayerNorm]': {'shape': torch.Size([4, 384, 768]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 0}, 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[5]/BertOutput[output]/Linear[dense]': {'shape': torch.Size([4, 384, 768]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 0}}, 'outputs': {'BertForQuestionAnswering/Linear[qa_outputs]': {'shape': torch.Size([4, 384, 2]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [(- 1)]}}, 'devices': [('cpu' if DEBUG else 'cuda:1')], 'stage_depth': 0}}}
batch_dim = config['batch_dim']
for d in chain(config['model_inputs'].values(), config['model_outputs'].values()):
if d['is_batched']:
shape = d['shape']
d['shape'] = torch.Size(((shape[:batch_dim] + (batch_size,)) + shape[(batch_dim + 1):]))
for s in config['stages'].values():
for d in chain(s['inputs'].values(), s['outputs'].values()):
if d['is_batched']:
shape = d['shape']
d['shape'] = torch.Size(((shape[:batch_dim] + (batch_size,)) + shape[(batch_dim + 1):]))
return config |
def test_brentq_full_output():
output = _zeros.full_output_example(((A0[0],) + ARGS), XLO, XHI, XTOL, RTOL, MITR)
npt.assert_allclose(EXPECTED[0], output['root'], rtol=RTOL, atol=XTOL)
npt.assert_equal(6, output['iterations'])
npt.assert_equal(7, output['funcalls'])
npt.assert_equal(0, output['error_num']) |
class CADData(torch.utils.data.Dataset):
def __init__(self, cad_path, solid_path, profile_path, loop_path, mode, is_training=True):
with open(cad_path, 'rb') as f:
cad_data = pickle.load(f)
with open(solid_path, 'rb') as f:
solid_data = pickle.load(f)
self.solid_code = solid_data['content']
with open(profile_path, 'rb') as f:
profile_data = pickle.load(f)
self.profile_code = profile_data['content']
with open(loop_path, 'rb') as f:
loop_data = pickle.load(f)
self.loop_code = loop_data['content']
self.solid_unique_num = solid_data['unique_num']
self.profile_unique_num = profile_data['unique_num']
self.loop_unique_num = loop_data['unique_num']
self.mode = mode
self.is_training = is_training
self.data = []
print('Loading dataset...')
for cad in tqdm(cad_data):
solid_uid = cad['name'].split('/')[(- 1)]
if (solid_uid not in self.solid_code):
continue
solid_code = ((self.solid_code[solid_uid] + self.loop_unique_num) + self.profile_unique_num)
num_se = len(cad['cad_ext'])
if ((self.mode == 'cond') and (num_se == 1)):
continue
sketchProfileCode = []
sketchLoopCode = []
valid = True
for idx_se in range(num_se):
profile_uid = ((solid_uid + '_') + str(idx_se))
if (profile_uid not in self.profile_code):
valid = False
continue
profile_code = (self.profile_code[profile_uid] + self.loop_unique_num)
sketchProfileCode.append(profile_code)
loop_codes = []
num_loop = len(np.where((cad['cad_cmd'][idx_se] == 3))[0])
for idx_loop in range(num_loop):
loop_uid = ((profile_uid + '_') + str(idx_loop))
if (loop_uid not in self.loop_code):
valid = False
continue
loop_code = self.loop_code[loop_uid]
loop_codes.append(loop_code)
sketchLoopCode.append(loop_codes)
if (not valid):
continue
(pixel_full, coord_full, ext_full) = self.param2pix(cad)
total_code = []
for (bbox_code, loops) in zip(sketchProfileCode, sketchLoopCode):
total_code += [(- 1)]
total_code += [bbox_code]
total_code += [(- 2)]
total_code += loops
total_code += [(- 3)]
total_code += [solid_code]
total_code += [(- 4)]
total_code = (np.array(total_code) + CODE_PAD)
if ((len(pixel_full) > MAX_CAD) or (len(total_code) > MAX_CODE)):
continue
(pixels, sketch_mask) = self.pad_pixel(pixel_full)
coords = self.pad_coord(coord_full)
(exts, ext_mask) = self.pad_ext(ext_full)
(total_code, code_mask) = self.pad_code(total_code)
vec_data = {}
vec_data['pixel'] = pixels
vec_data['coord'] = coords
vec_data['ext'] = exts
vec_data['sketch_mask'] = sketch_mask
vec_data['ext_mask'] = ext_mask
vec_data['code'] = total_code
vec_data['code_mask'] = code_mask
vec_data['num_se'] = num_se
vec_data['cad'] = cad
self.data.append(vec_data)
print(f'Post-Filter: {len(self.data)}, Keep Ratio: {((100 * len(self.data)) / len(cad_data)):.2f}%')
def param2pix_par(self, cmd_seq, param_seq, ext_seq):
pixel_full = []
coord_full = []
ext_full = []
for (cmd, param, ext) in zip(cmd_seq, param_seq, ext_seq):
ext_full.append(ext)
ext_full.append(np.array([(- 1)]))
coords = []
pixels = []
for (cc, pp) in zip(cmd, param):
if (cc == 6):
coords.append(pp[0:2])
coords.append(pp[2:4])
coords.append(pp[4:6])
coords.append(pp[6:8])
coords.append(np.array([(- 1), (- 1)]))
elif (cc == 5):
coords.append(pp[0:2])
coords.append(pp[2:4])
coords.append(np.array([(- 1), (- 1)]))
elif (cc == 4):
coords.append(pp[0:2])
coords.append(np.array([(- 1), (- 1)]))
elif (cc == 3):
coords.append(np.array([(- 2), (- 2)]))
elif (cc == 2):
coords.append(np.array([(- 3), (- 3)]))
elif (cc == 1):
coords.append(np.array([(- 4), (- 4)]))
for xy in coords:
if (xy[0] < 0):
pixels.append(xy[0])
else:
pixels.append(((xy[1] * (2 ** CAD_BIT)) + xy[0]))
pixel_full.append(pixels)
coord_full.append(coords)
ext_full.append(np.array([(- 2)]))
coord_full.append(np.array([(- 5), (- 5)]))
pixel_full += [(- 5)]
ext_full = (np.hstack(ext_full) + EXT_PAD)
coord_full = (np.vstack(coord_full) + SKETCH_PAD)
pixel_full = (np.hstack(pixel_full) + SKETCH_PAD)
return (pixel_full, coord_full, ext_full)
def param2pix(self, cad):
pixel_full = []
coord_full = []
ext_full = []
for (cmd, param, ext) in zip(cad['cad_cmd'], cad['cad_param'], cad['cad_ext']):
ext_full.append(ext)
ext_full.append(np.array([(- 1)]))
coords = []
pixels = []
for (cc, pp) in zip(cmd, param):
if (cc == 6):
coords.append(pp[0:2])
coords.append(pp[2:4])
coords.append(pp[4:6])
coords.append(pp[6:8])
coords.append(np.array([(- 1), (- 1)]))
elif (cc == 5):
coords.append(pp[0:2])
coords.append(pp[2:4])
coords.append(np.array([(- 1), (- 1)]))
elif (cc == 4):
coords.append(pp[0:2])
coords.append(np.array([(- 1), (- 1)]))
elif (cc == 3):
coords.append(np.array([(- 2), (- 2)]))
elif (cc == 2):
coords.append(np.array([(- 3), (- 3)]))
elif (cc == 1):
coords.append(np.array([(- 4), (- 4)]))
for xy in coords:
if (xy[0] < 0):
pixels.append(xy[0])
else:
pixels.append(((xy[1] * (2 ** CAD_BIT)) + xy[0]))
pixel_full.append(pixels)
coord_full.append(coords)
ext_full.append(np.array([(- 2)]))
coord_full.append(np.array([(- 5), (- 5)]))
pixel_full += [(- 5)]
ext_full = (np.hstack(ext_full) + EXT_PAD)
coord_full = (np.vstack(coord_full) + SKETCH_PAD)
pixel_full = (np.hstack(pixel_full) + SKETCH_PAD)
return (pixel_full, coord_full, ext_full)
def pad_pixel(self, tokens):
keys = np.ones(len(tokens))
padding = np.zeros((MAX_CAD - len(tokens))).astype(int)
seq_mask = ((1 - np.concatenate([keys, padding])) == 1)
tokens = np.concatenate([tokens, padding], axis=0)
return (tokens, seq_mask)
def pad_coord(self, tokens):
padding = np.zeros(((MAX_CAD - len(tokens)), 2)).astype(int)
tokens = np.concatenate([tokens, padding], axis=0)
return tokens
def pad_code(self, total_code):
keys = np.ones(len(total_code))
padding = np.zeros((MAX_CODE - len(total_code))).astype(int)
total_code = np.concatenate([total_code, padding], axis=0)
seq_mask = ((1 - np.concatenate([keys, padding])) == 1)
return (total_code, seq_mask)
def pad_ext(self, tokens):
keys = np.ones(len(tokens))
padding = np.zeros((MAX_EXT - len(tokens))).astype(int)
seq_mask = ((1 - np.concatenate([keys, padding])) == 1)
tokens = np.concatenate([tokens, padding], axis=0)
return (tokens, seq_mask)
def __len__(self):
return len(self.data)
def __getitem__(self, index):
vec_data = self.data[index]
sketch_mask = vec_data['sketch_mask']
code = vec_data['code']
code_mask = vec_data['code_mask']
exts = vec_data['ext']
ext_mask = vec_data['ext_mask']
if (self.mode == 'uncond'):
aug_xys = []
for xy in vec_data['coord']:
if (xy[0] < SKETCH_PAD):
aug_xys.append((xy - SKETCH_PAD))
else:
new_xy = (xy - SKETCH_PAD)
new_xy[0] = (new_xy[0] + random.randint((- AUG_RANGE), (+ AUG_RANGE)))
new_xy[1] = (new_xy[1] + random.randint((- AUG_RANGE), (+ AUG_RANGE)))
new_xy = np.clip(new_xy, a_min=0, a_max=((2 ** CAD_BIT) - 1))
aug_xys.append(new_xy)
coords_aug = (np.vstack(aug_xys) + SKETCH_PAD)
aug_pix = []
for xy in aug_xys:
if ((xy[0] >= 0) and (xy[1] >= 0)):
aug_pix.append(((xy[1] * (2 ** CAD_BIT)) + xy[0]))
else:
aug_pix.append(xy[0])
pixels_aug = (np.hstack(aug_pix) + SKETCH_PAD)
(pixels_aug, _) = self.pad_pixel(pixels_aug)
coords_aug = self.pad_coord(coords_aug)
pixels = vec_data['pixel']
coords = vec_data['coord']
return (pixels, coords, sketch_mask, pixels_aug, coords_aug, exts, ext_mask, code, code_mask)
else:
assert (self.mode == 'cond')
cad = vec_data['cad']
if self.is_training:
num_token = len(cad['cad_cmd'])
masked_ratio = random.uniform(MASK_RATIO_LOW, MASK_RATIO_HIGH)
len_keep = np.clip(round((num_token * (1 - masked_ratio))), a_min=1, a_max=(num_token - 1))
noise = np.random.random(num_token)
ids_shuffle = np.argsort(noise)
ids_keep = sorted(ids_shuffle[:len_keep])
else:
ids_keep = [0]
cmd_partial = [cad['cad_cmd'][id] for id in ids_keep]
param_partial = [cad['cad_param'][id] for id in ids_keep]
ext_partial = [cad['cad_ext'][id] for id in ids_keep]
(pixel_partial, coord_partial, ext_partial) = self.param2pix_par(cmd_partial, param_partial, ext_partial)
(pixels_par, sketch_mask_par) = self.pad_pixel(pixel_partial)
coords_par = self.pad_coord(coord_partial)
(exts_par, ext_mask_par) = self.pad_ext(ext_partial)
pixels = vec_data['pixel']
coords = vec_data['coord']
return (pixels_par, coords_par, sketch_mask_par, exts_par, ext_mask_par, pixels, coords, sketch_mask, exts, ext_mask, code, code_mask) |
def plot_cur_mem_spk(cur, mem, spk, thr_line=False, vline=False, title=False, ylim_max2=1.25):
(fig, ax) = plt.subplots(3, figsize=(8, 6), sharex=True, gridspec_kw={'height_ratios': [1, 1, 0.4]})
ax[0].plot(cur, c='tab:orange')
ax[0].set_ylim([0, 0.4])
ax[0].set_xlim([0, 200])
ax[0].set_ylabel('Input Current ($I_{in}$)')
if title:
ax[0].set_title(title)
ax[1].plot(mem)
ax[1].set_ylim([0, ylim_max2])
ax[1].set_ylabel('Membrane Potential ($U_{mem}$)')
if thr_line:
ax[1].axhline(y=thr_line, alpha=0.25, linestyle='dashed', c='black', linewidth=2)
plt.xlabel('Time step')
splt.raster(spk, ax[2], s=400, c='black', marker='|')
if vline:
ax[2].axvline(x=vline, ymin=0, ymax=6.75, alpha=0.15, linestyle='dashed', c='black', linewidth=2, zorder=0, clip_on=False)
plt.ylabel('Output spikes')
plt.yticks([])
plt.show() |
def c_type(tensor):
if isinstance(tensor, torch.cuda.HalfTensor):
return ctypes.c_float
elif isinstance(tensor, torch.cuda.FloatTensor):
return ctypes.c_float
elif isinstance(tensor, torch.cuda.DoubleTensor):
return ctypes.c_double
else:
raise ValueError("unknown type '{}'".format(type(tensor))) |
class DatasetDeepglobe(Dataset):
def __init__(self, datapath, fold, transform, split, shot, num=600):
self.split = split
self.benchmark = 'deepglobe'
self.shot = shot
self.num = num
self.base_path = os.path.join(datapath, 'Deepglobe')
self.categories = ['1', '2', '3', '4', '5', '6']
self.class_ids = range(0, 6)
self.img_metadata_classwise = self.build_img_metadata_classwise()
self.transform = transform
def __len__(self):
return self.num
def __getitem__(self, idx):
(query_name, support_names, class_sample) = self.sample_episode(idx)
(query_img, query_mask, support_imgs, support_masks) = self.load_frame(query_name, support_names)
query_img = self.transform(query_img)
query_mask = F.interpolate(query_mask.unsqueeze(0).unsqueeze(0).float(), query_img.size()[(- 2):], mode='nearest').squeeze()
support_imgs = torch.stack([self.transform(support_img) for support_img in support_imgs])
support_masks_tmp = []
for smask in support_masks:
smask = F.interpolate(smask.unsqueeze(0).unsqueeze(0).float(), support_imgs.size()[(- 2):], mode='nearest').squeeze()
support_masks_tmp.append(smask)
support_masks = torch.stack(support_masks_tmp)
batch = {'query_img': query_img, 'query_mask': query_mask, 'query_name': query_name, 'support_imgs': support_imgs, 'support_masks': support_masks, 'support_names': support_names, 'class_id': torch.tensor(class_sample)}
return batch
def load_frame(self, query_name, support_names):
query_img = Image.open(query_name).convert('RGB')
support_imgs = [Image.open(name).convert('RGB') for name in support_names]
query_id = query_name.split('/')[(- 1)].split('.')[0]
ann_path = os.path.join(self.base_path, query_name.split('/')[(- 4)], 'test', 'groundtruth')
query_name = (os.path.join(ann_path, query_id) + '.png')
support_ids = [name.split('/')[(- 1)].split('.')[0] for name in support_names]
support_names = [(os.path.join(ann_path, sid) + '.png') for (name, sid) in zip(support_names, support_ids)]
query_mask = self.read_mask(query_name)
support_masks = [self.read_mask(name) for name in support_names]
return (query_img, query_mask, support_imgs, support_masks)
def read_mask(self, img_name):
mask = torch.tensor(np.array(Image.open(img_name).convert('L')))
mask[(mask < 128)] = 0
mask[(mask >= 128)] = 1
return mask
def sample_episode(self, idx):
class_id = (idx % len(self.class_ids))
class_sample = self.categories[class_id]
query_name = np.random.choice(self.img_metadata_classwise[class_sample], 1, replace=False)[0]
support_names = []
while True:
support_name = np.random.choice(self.img_metadata_classwise[class_sample], 1, replace=False)[0]
if (query_name != support_name):
support_names.append(support_name)
if (len(support_names) == self.shot):
break
return (query_name, support_names, class_id)
def build_img_metadata(self):
img_metadata = []
for cat in self.categories:
os.path.join(self.base_path, cat)
img_paths = sorted([path for path in glob.glob(('%s/*' % os.path.join(self.base_path, cat, 'test', 'origin')))])
for img_path in img_paths:
if (os.path.basename(img_path).split('.')[1] == 'jpg'):
img_metadata.append(img_path)
return img_metadata
def build_img_metadata_classwise(self):
img_metadata_classwise = {}
for cat in self.categories:
img_metadata_classwise[cat] = []
for cat in self.categories:
img_paths = sorted([path for path in glob.glob(('%s/*' % os.path.join(self.base_path, cat, 'test', 'origin')))])
for img_path in img_paths:
if (os.path.basename(img_path).split('.')[1] == 'jpg'):
img_metadata_classwise[cat] += [img_path]
return img_metadata_classwise |
def generate_switch_batch_size():
s = "batch_dim = config['batch_dim']\n for d in chain(config['model_inputs'].values(),config['model_outputs'].values()):\n if d['is_batched']:\n shape = d['shape']\n d['shape'] = torch.Size(shape[:batch_dim] + (batch_size,) + shape[batch_dim+1:])\n \n for s in config['stages'].values():\n for d in chain(s['inputs'].values(),s['outputs'].values()):\n if d['is_batched']:\n shape = d['shape']\n d['shape'] = torch.Size(shape[:batch_dim] + (batch_size,) + shape[batch_dim+1:])"
return s |
def tens2image(im):
tmp = np.squeeze(im.numpy())
if (tmp.ndim == 2):
return tmp
else:
return tmp.transpose((1, 2, 0)) |
_metric
def rendering_train(opts):
opts.dataset_kwargs.update(max_size=None, xflip=False)
rendering_utils.render_train(opts, max_items=None)
return dict(rendering_train=1) |
def get_acronyms(entity):
words = entity.split()
first_letters = ''.join([w[0] for w in words])
acronyms = [first_letters]
for split in range(2, len(first_letters)):
acronyms.append(first_letters[:split])
return acronyms |
def wmd_distance(model, sent1_cut_list, sent2_cut_list):
distance = model.wmdistance(sent1_cut_list, sent2_cut_list)
return distance |
def filter_genre_edgelist(fname, genres_dict):
edgelist = open(fname, 'r')
lines = list(edgelist.readlines())
edgelist.close()
with open('lastfm_edgelist_clean.csv', 'w') as f:
write = csv.writer(f)
fields = ['user_id', 'timestamp', 'tags', 'weight']
write.writerow(fields)
for i in range(1, len(lines)):
vals = lines[i].split(',')
user_id = vals[1]
time = vals[2]
genre = vals[3].strip('"').strip("['")
w = vals[4][:(- 3)]
if (genre in genres_dict):
if (genre in similarity_dict):
genre = similarity_dict[genre]
write.writerow([user_id, time, genre, w]) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.